o
    h.                     @   s<  d Z ddlZddlmZmZmZ ddlZddlZddlmZ ddl	m
Z
mZmZ ddlmZ ddlmZmZmZmZmZ dd	lmZ dd
lmZ ddlmZmZmZmZmZmZ ddl m!Z! e rkddl"Z"ddl#m$Z$ e%e&Z'dZ(dZ)ddgZ*G dd dej+Z,G dd dej+Z-G dd dej+Z.G dd dej+Z/G dd dej+Z0G dd dej+Z1G dd dej+Z2d=d#d$Z3G d%d& d&ej+Z4G d'd( d(eZ5d>d)d*Z6G d+d, d,ej+Z7d-Z8d.Z9G d/d0 d0ej+Z:ed1e8G d2d3 d3e5Z;ed4e8G d5d6 d6e5Z<ed7e8G d8d9 d9e5Z=ed:e8G d;d< d<e5Z>dS )?z PyTorch LayoutLMv2 model.    N)OptionalTupleUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)BaseModelOutputBaseModelOutputWithPoolingQuestionAnsweringModelOutputSequenceClassifierOutputTokenClassifierOutput)PreTrainedModel)apply_chunking_to_forward)add_start_docstrings%add_start_docstrings_to_model_forwardis_detectron2_availableloggingreplace_return_docstringsrequires_backends   )LayoutLMv2Config)META_ARCH_REGISTRYz!microsoft/layoutlmv2-base-uncasedr   z"microsoft/layoutlmv2-large-uncasedc                       s(   e Zd ZdZ fddZdd Z  ZS )LayoutLMv2EmbeddingszGConstruct the embeddings from word, position and token_type embeddings.c                    s   t t|   tj|j|j|jd| _t|j	|j| _
t|j|j| _t|j|j| _t|j|j| _t|j|j| _t|j|j| _tj|j|jd| _t|j| _| jdt|j	ddd d S )N)padding_idxepsposition_ids)r   F
persistent)superr   __init__r   	Embedding
vocab_sizehidden_sizepad_token_idword_embeddingsmax_position_embeddingsposition_embeddingsmax_2d_position_embeddingscoordinate_sizex_position_embeddingsy_position_embeddings
shape_sizeh_position_embeddingsw_position_embeddingstype_vocab_sizetoken_type_embeddings	LayerNormlayer_norm_epsDropouthidden_dropout_probdropoutregister_buffertorcharangeexpandselfconfig	__class__ h/var/www/html/ai/venv/lib/python3.10/site-packages/transformers/models/layoutlmv2/modeling_layoutlmv2.pyr$   B   s   
zLayoutLMv2Embeddings.__init__c           
   
   C   s  z:|  |d d d d df }| |d d d d df }|  |d d d d df }| |d d d d df }W n tyK } ztd|d }~ww | |d d d d df |d d d d df  }| |d d d d df |d d d d df  }tj||||||gdd}	|	S )Nr   r      r	   z;The `bbox` coordinate values should be within 0-1000 range.r    dim)r.   r/   
IndexErrorr1   r2   r;   cat)
r?   bboxleft_position_embeddingsupper_position_embeddingsright_position_embeddingslower_position_embeddingser1   r2   spatial_position_embeddingsrC   rC   rD   !_calc_spatial_position_embeddingsT   s,    
22z6LayoutLMv2Embeddings._calc_spatial_position_embeddings)__name__
__module____qualname____doc__r$   rQ   __classcell__rC   rC   rA   rD   r   ?   s    r   c                       s@   e Zd Z fddZdd Zdd Z					dd	d
Z  ZS )LayoutLMv2SelfAttentionc                    s  t    |j|j dkrt|dstd|j d|j d|j| _|j| _t|j|j | _| j| j | _	|j
| _
|j| _|jrhtj|jd| j	 dd| _ttd	d	| j	| _ttd	d	| j	| _nt|j| j	| _t|j| j	| _t|j| j	| _t|j| _d S )
Nr   embedding_sizezThe hidden size (z6) is not a multiple of the number of attention heads ()r	   Fbiasr   )r#   r$   r'   num_attention_headshasattr
ValueErrorfast_qkvintattention_head_sizeall_head_sizehas_relative_attention_biashas_spatial_attention_biasr   Linear
qkv_linear	Parameterr;   zerosq_biasv_biasquerykeyvaluer7   attention_probs_dropout_probr9   r>   rA   rC   rD   r$   o   s*   

z LayoutLMv2SelfAttention.__init__c                 C   s6   |  d d | j| jf }|j| }|ddddS )Nr    r   rE   r   r	   )sizer\   ra   viewpermute)r?   xnew_x_shaperC   rC   rD   transpose_for_scores   s   
z,LayoutLMv2SelfAttention.transpose_for_scoresc                 C   s   | j rB| |}tj|ddd\}}}| | j kr'|| j }|| j }n*d| d  d }|| jj|  }|| jj|  }n| |}| 	|}| 
|}|||fS )Nr	   r    rF   r   r   )r    )r_   rf   r;   chunk
ndimensionri   rj   rp   rk   rl   rm   )r?   hidden_statesqkvqkv_szrC   rC   rD   compute_qkv   s   





z#LayoutLMv2SelfAttention.compute_qkvNFc                 C   s   |  |\}}}	| |}
| |}| |	}|
t| j }
t|
|dd}| jr0||7 }| j	r7||7 }|
 |tjt|jj}tjj|dtjd|}| |}|d urb|| }t||}|dddd }| d d | jf }|j| }|r||f}|S |f}|S )Nr    )rG   dtyper   rE   r   r	   )r~   rt   mathsqrtra   r;   matmul	transposerc   rd   floatmasked_fill_toboolfinfor   minr   
functionalsoftmaxfloat32type_asr9   rq   
contiguousro   rb   rp   )r?   rx   attention_mask	head_maskoutput_attentionsrel_pos
rel_2d_posrz   r{   r|   query_layer	key_layervalue_layerattention_scoresattention_probscontext_layernew_context_layer_shapeoutputsrC   rC   rD   forward   s2   	




zLayoutLMv2SelfAttention.forwardNNFNN)rR   rS   rT   r$   rt   r~   r   rV   rC   rC   rA   rD   rW   n   s    rW   c                       s0   e Zd Z fddZ					dddZ  ZS )LayoutLMv2Attentionc                    s"   t    t|| _t|| _d S N)r#   r$   rW   r?   LayoutLMv2SelfOutputoutputr>   rA   rC   rD   r$      s   

zLayoutLMv2Attention.__init__NFc           
      C   s<   | j ||||||d}| |d |}|f|dd   }	|	S )Nr   r   r   r   )r?   r   )
r?   rx   r   r   r   r   r   self_outputsattention_outputr   rC   rC   rD   r      s   	zLayoutLMv2Attention.forwardr   rR   rS   rT   r$   r   rV   rC   rC   rA   rD   r      s    r   c                       $   e Zd Z fddZdd Z  ZS )r   c                    sB   t    t|j|j| _tj|j|jd| _t|j	| _
d S Nr   )r#   r$   r   re   r'   denser5   r6   r7   r8   r9   r>   rA   rC   rD   r$         
zLayoutLMv2SelfOutput.__init__c                 C   &   |  |}| |}| || }|S r   r   r9   r5   r?   rx   input_tensorrC   rC   rD   r         

zLayoutLMv2SelfOutput.forwardr   rC   rC   rA   rD   r      s    r   c                       s2   e Zd Z fddZdejdejfddZ  ZS )LayoutLMv2Intermediatec                    sD   t    t|j|j| _t|jt	rt
|j | _d S |j| _d S r   )r#   r$   r   re   r'   intermediate_sizer   
isinstance
hidden_actstrr
   intermediate_act_fnr>   rA   rC   rD   r$      s
   
zLayoutLMv2Intermediate.__init__rx   returnc                 C   s   |  |}| |}|S r   )r   r   )r?   rx   rC   rC   rD   r      s   

zLayoutLMv2Intermediate.forwardrR   rS   rT   r$   r;   Tensorr   rV   rC   rC   rA   rD   r      s    r   c                       s8   e Zd Z fddZdejdejdejfddZ  ZS )LayoutLMv2Outputc                    sB   t    t|j|j| _tj|j|jd| _t	|j
| _d S r   )r#   r$   r   re   r   r'   r   r5   r6   r7   r8   r9   r>   rA   rC   rD   r$     r   zLayoutLMv2Output.__init__rx   r   r   c                 C   r   r   r   r   rC   rC   rD   r     r   zLayoutLMv2Output.forwardr   rC   rC   rA   rD   r     s    $r   c                       s8   e Zd Z fddZ					d	ddZdd Z  ZS )
LayoutLMv2Layerc                    s:   t    |j| _d| _t|| _t|| _t|| _	d S )Nr   )
r#   r$   chunk_size_feed_forwardseq_len_dimr   	attentionr   intermediater   r   r>   rA   rC   rD   r$     s   


zLayoutLMv2Layer.__init__NFc                 C   sL   | j ||||||d}|d }|dd  }	t| j| j| j|}
|
f|	 }	|	S )N)r   r   r   r   r   )r   r   feed_forward_chunkr   r   )r?   rx   r   r   r   r   r   self_attention_outputsr   r   layer_outputrC   rC   rD   r     s   	
zLayoutLMv2Layer.forwardc                 C   s   |  |}| ||}|S r   )r   r   )r?   r   intermediate_outputr   rC   rC   rD   r   9  s   
z"LayoutLMv2Layer.feed_forward_chunkr   )rR   rS   rT   r$   r   r   rV   rC   rC   rA   rD   r     s    
r   T       c           	      C   s   d}|r|d }|| dk  | 7 }t| }n
t|  t| }|d }||k }|t| | t||  ||  tj  }t	|t
||d }|t|||7 }|S )a  
    Adapted from Mesh Tensorflow:
    https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
    Translate relative position to a bucket number for relative attention. The relative position is defined as
    memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
    position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small
    absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions
    >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should
    allow for more graceful generalization to longer sequences than the model has been trained on.

    Args:
        relative_position: an int32 Tensor
        bidirectional: a boolean - whether the attention is bidirectional
        num_buckets: an integer
        max_distance: an integer

    Returns:
        a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
    r   rE   r   )longr;   absmax
zeros_likelogr   r   r   r   	full_likewhere)	relative_positionbidirectionalnum_bucketsmax_distanceretn	max_exactis_smallval_if_largerC   rC   rD   relative_position_bucket?  s   &r   c                       sD   e Zd Z fddZdd Zdd Z								dd
dZ  ZS )LayoutLMv2Encoderc                    s   t     | _t fddt jD | _ j| _ j	| _	| jr6 j
| _
 j| _tj| j
 jdd| _| j	rW j| _ j| _tj| j jdd| _tj| j jdd| _d| _d S )Nc                    s   g | ]}t  qS rC   )r   ).0_r@   rC   rD   
<listcomp>o  s    z.LayoutLMv2Encoder.__init__.<locals>.<listcomp>FrZ   )r#   r$   r@   r   
ModuleListrangenum_hidden_layerslayerrc   rd   rel_pos_binsmax_rel_posre   r\   rel_pos_biasmax_rel_2d_posrel_2d_pos_binsrel_pos_x_biasrel_pos_y_biasgradient_checkpointingr>   rA   r   rD   r$   l  s   
 
zLayoutLMv2Encoder.__init__c                 C   sN   | d| d }t|| j| jd}| jj | dddd}| }|S )Nr   r    r   r   r   r	   r   rE   )		unsqueezer   r   r   r   weighttrq   r   )r?   r   rel_pos_matr   rC   rC   rD   !_calculate_1d_position_embeddings  s   z3LayoutLMv2Encoder._calculate_1d_position_embeddingsc           	      C   s   |d d d d df }|d d d d df }| d| d }| d| d }t|| j| jd}t|| j| jd}| jj | dddd}| jj | dddd}|	 }|	 }|| }|S )Nr   r	   r   r    r   r   rE   )
r   r   r   r   r   r   r   rq   r   r   )	r?   rJ   position_coord_xposition_coord_yrel_pos_x_2d_matrel_pos_y_2d_mat	rel_pos_x	rel_pos_yr   rC   rC   rD   !_calculate_2d_position_embeddings  s(   z3LayoutLMv2Encoder._calculate_2d_position_embeddingsNFTc	              
   C   s  |rdnd }	|r
dnd }
| j r| |nd }| jr| |nd }t| jD ]@\}}|r0|	|f }	|d ur8|| nd }| jrN| jrN| j|j	||||||d}n
|||||||d}|d }|re|
|d f }
q%|rm|	|f }	|s{t
dd ||	|
fD S t||	|
dS )NrC   r   r   r   c                 s   s    | ]	}|d ur|V  qd S r   rC   )r   r|   rC   rC   rD   	<genexpr>  s    z,LayoutLMv2Encoder.forward.<locals>.<genexpr>)last_hidden_staterx   
attentions)rc   r   rd   r   	enumerater   r   training_gradient_checkpointing_func__call__tupler   )r?   rx   r   r   r   output_hidden_statesreturn_dictrJ   r   all_hidden_statesall_self_attentionsr   r   ilayer_modulelayer_head_masklayer_outputsrC   rC   rD   r     sX   

	
	zLayoutLMv2Encoder.forward)NNFFTNN)rR   rS   rT   r$   r   r   r   rV   rC   rC   rA   rD   r   k  s    r   c                   @   s$   e Zd ZdZeZeZdZdd Z	dS )LayoutLMv2PreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    
layoutlmv2c                 C   s   t |tjr |jjjd| jjd |jdur|jj	  dS dS t |tj
rC|jjjd| jjd |jdurA|jj|j 	  dS dS t |tjrX|jj	  |jjd dS dS )zInitialize the weightsg        )meanstdN      ?)r   r   re   r   datanormal_r@   initializer_ranger[   zero_r%   r   r5   fill_)r?   modulerC   rC   rD   _init_weights  s   

z'LayoutLMv2PreTrainedModel._init_weightsN)
rR   rS   rT   rU   r   config_class(LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LISTpretrained_model_archive_mapbase_model_prefixr  rC   rC   rC   rD   r    s    r  c                 C   s   t | tjjjjrtjj| |S | }t | tj	j
rKtjj| j| jdd|d}tj| j|_tj| j|_| j|_| j|_tjdtj| jjd|_|  D ]\}}||t|| qO~ |S )NT)num_featuresr   affinetrack_running_statsprocess_groupr   r   device)r   r;   r   modules	batchnorm
_BatchNormSyncBatchNormconvert_sync_batchnorm
detectron2layersFrozenBatchNorm2dr  r   rg   r   r[   running_meanrunning_vartensorr   r  num_batches_trackednamed_children
add_modulemy_convert_sync_batchnorm)r  r  module_outputnamechildrC   rC   rD   r(     s(   r(  c                       s,   e Zd Z fddZdd Zdd Z  ZS )LayoutLMv2VisualBackbonec              	      s  t    | | _| jjj}t|| j}t|j	t
jj	js"J |j	| _	t| jjjt| jjjks6J t| jjj}| jdt| jjj|dddd | jdt| jjj|dddd d| _t rtd d}| j	 | j j}ttt|d	 | |jd	  tt|d | |jd  f| _nt |jd d
 | _t|jd
kr|j!| j	 | j j" | j	 | j j"|jd
 ksJ d S )N
pixel_meanr   Fr!   	pixel_stdp2z0using `AvgPool2d` instead of `AdaptiveAvgPool2d`)   r0  r   rE   )#r#   r$   get_detectron2_configcfgMODELMETA_ARCHITECTUREr   getr   backboner  modelingFPNlen
PIXEL_MEAN	PIXEL_STDr:   r;   r   rp   out_feature_key$are_deterministic_algorithms_enabledloggerwarningoutput_shapestrider   	AvgPool2dr   ceilimage_feature_pool_shapepoolAdaptiveAvgPool2dappendchannels)r?   r@   	meta_archmodelnum_channelsinput_shapebackbone_striderA   rC   rD   r$     s<   


 
  $z!LayoutLMv2VisualBackbone.__init__c                 C   sV   t |r|n|j| j | j }| |}|| j }| |jdd	dd
 }|S )NrE   )	start_dimr   )r;   	is_tensorr$  r-  r.  r6  r<  rE  flattenr   r   )r?   imagesimages_inputfeaturesrC   rC   rD   r   <  s
    

z LayoutLMv2VisualBackbone.forwardc                    s   t j rt j rt j dkstdt j }t j t j }| dks.tdfddt	| D   fddt	| D }| }t
| j|| d| _d S )	Nr    z/Make sure torch.distributed is set up properly.r   zGMake sure the number of processes can be divided by the number of nodesc                    s&   g | ]}t t|  |d    qS ru   )listr   r   r   )	node_sizerC   rD   r   Q  s   & zCLayoutLMv2VisualBackbone.synchronize_batch_norm.<locals>.<listcomp>c                    s   g | ]}t jj | d qS ))ranks)r;   distributed	new_grouprU  )node_global_ranksrC   rD   r   R  s    )r  )r;   rX  is_availableis_initializedget_rankRuntimeErrorcudadevice_countget_world_sizer   r(  r6  )r?   	self_rank
world_sizesync_bn_groups	node_rankrC   )rZ  rV  rD   synchronize_batch_normC  s"   




z/LayoutLMv2VisualBackbone.synchronize_batch_norm)rR   rS   rT   r$   r   rf  rV   rC   rC   rA   rD   r,    s    #r,  aM  
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`LayoutLMv2Config`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a  
    Args:
        input_ids (`torch.LongTensor` of shape `{0}`):
            Indices of input sequence tokens in the vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)

        bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*):
            Bounding boxes of each input sequence tokens. Selected in the range `[0,
            config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
            format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
            y1) represents the position of the lower right corner.

        image (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `detectron.structures.ImageList` whose `tensors` is of shape `(batch_size, num_channels, height, width)`):
            Batch of document images.

        attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        token_type_ids (`torch.LongTensor` of shape `{0}`, *optional*):
            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
            1]`:

            - 0 corresponds to a *sentence A* token,
            - 1 corresponds to a *sentence B* token.

            [What are token type IDs?](../glossary#token-type-ids)
        position_ids (`torch.LongTensor` of shape `{0}`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.

            [What are position IDs?](../glossary#position-ids)
        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
            is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
            model's internal embedding lookup matrix.
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
c                       r   )LayoutLMv2Poolerc                    s*   t    t|j|j| _t | _d S r   )r#   r$   r   re   r'   r   Tanh
activationr>   rA   rC   rD   r$     s   
zLayoutLMv2Pooler.__init__c                 C   s(   |d d df }|  |}| |}|S )Nr   )r   ri  )r?   rx   first_token_tensorpooled_outputrC   rC   rD   r     s   

zLayoutLMv2Pooler.forwardr   rC   rC   rA   rD   rg    s    rg  zdThe bare LayoutLMv2 Model transformer outputting raw hidden-states without any specific head on top.c                       s   e Zd Z fddZdd Zdd Zd dd	Zd
d Zdd Zd!ddZ	e
edeeed											d"deej deej deej deej deej deej deej deej dee dee dee deeef fddZ  ZS )#LayoutLMv2Modelc                    s   t | d t | || _|j| _t|| _t|| _t	
|jd |j| _| jr8t	t	d|jjd | _t	j|j|jd| _t	|j| _t|| _t|| _|   d S )Nr  r    r   r   r   )r   r#   r$   r@   has_visual_segment_embeddingr   
embeddingsr,  visualr   re   rD  r'   visual_projrg   r%   r   visual_segment_embeddingr5   r6   visual_LayerNormr7   r8   visual_dropoutr   encoderrg  pooler	post_initr>   rA   rC   rD   r$     s   




zLayoutLMv2Model.__init__c                 C   s   | j jS r   rn  r)   r?   rC   rC   rD   get_input_embeddings  s   z$LayoutLMv2Model.get_input_embeddingsc                 C   s   || j _d S r   rw  )r?   rm   rC   rC   rD   set_input_embeddings  s   z$LayoutLMv2Model.set_input_embeddingsNc                 C   s   |d ur	|  }n|  d d }|d }|d u r+tj|tj|jd}|d|}|d u r4t|}|d u r>| j	|}| j
|}| j|}	| j|}
|| |	 |
 }| j|}| j|}|S )Nr    r   r  r   )ro   r;   r<   r   r  r   	expand_asr   rn  r)   r+   rQ   r4   r5   r9   )r?   	input_idsrJ   r   token_type_idsinputs_embedsrL  
seq_lengthr+   rP   r4   rn  rC   rC   rD   _calc_text_embeddings  s$   

z%LayoutLMv2Model._calc_text_embeddingsc                 C   s\   |  | |}| j|}| j|}|| | }| jr"|| j7 }| |}| |}|S r   )	rp  ro  rn  r+   rQ   rm  rq  rr  rs  )r?   imagerJ   r   visual_embeddingsr+   rP   rn  rC   rC   rD   _calc_img_embeddings  s   


z$LayoutLMv2Model._calc_img_embeddingsc                 C   s
  t jt jdd|d d  d||jd| jjd dd}t jt jdd| jjd d  d||jd| jjd dd}t j|d d |d d|d d |d ddd|dd  |d d|dd  |d dddgdd	d|
d}||d dd}|S )	Nr   i  r   )r  r   floor)rounding_moder    rF   )r;   divr<   r   r@   rD  stackrepeatr   rp   ro   )r?   rD  rJ   r  final_shapevisual_bbox_xvisual_bbox_yvisual_bboxrC   rC   rD   _calc_visual_bbox  sD   


z!LayoutLMv2Model._calc_visual_bboxc                 C   sH   |d ur|d urt d|d ur| S |d ur | d d S t d)NDYou cannot specify both input_ids and inputs_embeds at the same timer    5You have to specify either input_ids or inputs_embeds)r^   ro   )r?   r|  r~  rC   rC   rD   _get_input_shape  s   z LayoutLMv2Model._get_input_shapez(batch_size, sequence_length)output_typer  r|  rJ   r  r   r}  r   r   r~  r   r   r   r   c              
   C   s  |	dur|	n| j j}	|
dur|
n| j j}
|dur|n| j j}| ||}|dur+|jn|j}t|}| j jd | j jd  |d< t	|}t| ||}|d  |d 7  < t	|}| 
| j j|||}tj||gdd}|du rztj||d}tj||d}tj||gdd}|du rtj|tj|d}|du r|d }| jjddd|f }||}tjd|d tj|d|d d}tj||gdd}|du rtjtt|dg tj|d}| j|||||d}| j|||d	}tj||gdd}|dd
}|j| jd}d| t| jj }|durX| dkr:|dddd}|| j jdddd}n| d
krL|ddd}|jt|  jd}ndg| j j }| j ||||||	|
|d}|d }| !|}|s||f|dd  S t"|||j#|j$dS )au  
        Return:

        Examples:

        ```python
        >>> from transformers import AutoProcessor, LayoutLMv2Model, set_seed
        >>> from PIL import Image
        >>> import torch
        >>> from datasets import load_dataset

        >>> set_seed(88)

        >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased")
        >>> model = LayoutLMv2Model.from_pretrained("microsoft/layoutlmv2-base-uncased")


        >>> dataset = load_dataset("hf-internal-testing/fixtures_docvqa")
        >>> image_path = dataset["test"][0]["file"]
        >>> image = Image.open(image_path).convert("RGB")

        >>> encoding = processor(image, return_tensors="pt")

        >>> outputs = model(**encoding)
        >>> last_hidden_states = outputs.last_hidden_state

        >>> last_hidden_states.shape
        torch.Size([1, 342, 768])
        ```
        Nr   r   rF   )r  r     )r|  rJ   r}  r   r~  r  rJ   r   rE   )r   r  r    )rJ   r   r   r   r   r   )r   pooler_outputrx   r   )%r@   r   r   use_return_dictr  r  rT  rD  r;   Sizer  rI   onesrh   r   rn  r   r=   r<   r  r   r  r  r   r   r   r   r   rG   r   next
parametersrt  ru  r   rx   r   )r?   r|  rJ   r  r   r}  r   r   r~  r   r   r   rL  r  visual_shaper  r  
final_bboxvisual_attention_maskfinal_attention_maskr  visual_position_idsfinal_position_idstext_layout_emb
visual_emb	final_embextended_attention_maskencoder_outputssequence_outputrk  rC   rC   rD   r   "  s   .


 


zLayoutLMv2Model.forwardr   )NN)NNNNNNNNNNN)rR   rS   rT   r$   ry  rz  r  r  r  r  r   LAYOUTLMV2_INPUTS_DOCSTRINGformatr   r   _CONFIG_FOR_DOCr   r;   
LongTensorFloatTensorr   r   r   r   r   rV   rC   rC   rA   rD   rl    s\    

%

	

rl  ax  
    LayoutLMv2 Model with a sequence classification head on top (a linear layer on top of the concatenation of the
    final hidden state of the [CLS] token, average-pooled initial visual embeddings and average-pooled final visual
    embeddings, e.g. for document image classification tasks such as the
    [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.
    c                          e Zd Z fddZdd Zeedee	e
d												ddeej d	eej d
eej deej deej deej deej deej deej dee dee dee deee	f fddZ  ZS )#LayoutLMv2ForSequenceClassificationc                    sN   t  | |j| _t|| _t|j| _t	|j
d |j| _|   d S )Nr	   r#   r$   
num_labelsrl  r  r   r7   r8   r9   re   r'   
classifierrv  r>   rA   rC   rD   r$     s   
z,LayoutLMv2ForSequenceClassification.__init__c                 C   
   | j jjS r   r  rn  r)   rx  rC   rC   rD   ry       
z8LayoutLMv2ForSequenceClassification.get_input_embeddingsbatch_size, sequence_lengthr  Nr|  rJ   r  r   r}  r   r   r~  labelsr   r   r   r   c                 C   s0  |dur|n| j j}|dur|durtd|dur%| || | }n|dur2| dd }ntd|dur=|jn|j}t|}| j jd | j jd  |d< t	|}t|}|d  |d 7  < t	|}| j
| j j|||}tjd|d tj|d|d d}| j
j|||d}| j
|||||||||
||d	}|dur| }n| dd }|d }|d ddd|f |d dd|df }}|dddddf }|jdd
}|jdd
}tj|||gdd
}| |}| |}d}|	durv| j jdu r/| jdkrd| j _n| jdkr+|	jtjks&|	jtjkr+d| j _nd| j _| j jdkrOt }| jdkrI|| |	 }n-|||	}n'| j jdkrgt }||d| j|	d}n| j jdkrvt }|||	}|s|f|dd  }|dur|f| S |S t|||j|jdS )a5  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Returns:

        Example:

        ```python
        >>> from transformers import AutoProcessor, LayoutLMv2ForSequenceClassification, set_seed
        >>> from PIL import Image
        >>> import torch
        >>> from datasets import load_dataset

        >>> set_seed(88)

        >>> dataset = load_dataset("rvl_cdip", split="train", streaming=True)
        >>> data = next(iter(dataset))
        >>> image = data["image"].convert("RGB")

        >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased")
        >>> model = LayoutLMv2ForSequenceClassification.from_pretrained(
        ...     "microsoft/layoutlmv2-base-uncased", num_labels=dataset.info.features["label"].num_classes
        ... )

        >>> encoding = processor(image, return_tensors="pt")
        >>> sequence_label = torch.tensor([data["label"]])

        >>> outputs = model(**encoding, labels=sequence_label)

        >>> loss, logits = outputs.loss, outputs.logits
        >>> predicted_idx = logits.argmax(dim=-1).item()
        >>> predicted_answer = dataset.info.features["label"].names[4]
        >>> predicted_idx, predicted_answer
        (4, 'advertisement')
        ```
        Nr  r    r  r   r   r  r  r|  rJ   r  r   r}  r   r   r~  r   r   r   rF   
regressionsingle_label_classificationmulti_label_classificationrE   losslogitsrx   r   ) r@   r  r^   %warn_if_padding_and_no_attention_maskro   r  rT  rD  r;   r  r  r  r<   r   r  r  r  rI   r9   r  problem_typer  r   r`   r   squeezer   rp   r   r   rx   r   )r?   r|  rJ   r  r   r}  r   r   r~  r  r   r   r   rL  r  r  r  r  r  initial_image_embeddingsr   r  r  final_image_embeddingscls_final_outputpooled_initial_image_embeddingspooled_final_image_embeddingsr  r  loss_fctr   rC   rC   rD   r     s   9



2




(

z+LayoutLMv2ForSequenceClassification.forwardNNNNNNNNNNNN)rR   rS   rT   r$   ry  r   r  r  r   r   r  r   r;   r  r  r   r   r   r   rV   rC   rC   rA   rD   r    X    


	

r  a  
    LayoutLMv2 Model with a token classification head on top (a linear layer on top of the text part of the hidden
    states) e.g. for sequence labeling (information extraction) tasks such as
    [FUNSD](https://guillaumejaume.github.io/FUNSD/), [SROIE](https://rrc.cvc.uab.es/?ch=13),
    [CORD](https://github.com/clovaai/cord) and [Kleister-NDA](https://github.com/applicaai/kleister-nda).
    c                       r  ) LayoutLMv2ForTokenClassificationc                    sJ   t  | |j| _t|| _t|j| _t	|j
|j| _|   d S r   r  r>   rA   rC   rD   r$   p  s   
z)LayoutLMv2ForTokenClassification.__init__c                 C   r  r   r  rx  rC   rC   rD   ry  z  r  z5LayoutLMv2ForTokenClassification.get_input_embeddingsr  r  Nr|  rJ   r  r   r}  r   r   r~  r  r   r   r   r   c                 C   s   |dur|n| j j}| j|||||||||
||d}|dur#| }n| dd }|d }|d ddd|f }| |}| |}d}|	dur[t }||d| j|	d}|sq|f|dd  }|duro|f| S |S t	|||j
|jdS )a2  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.

        Returns:

        Example:

        ```python
        >>> from transformers import AutoProcessor, LayoutLMv2ForTokenClassification, set_seed
        >>> from PIL import Image
        >>> from datasets import load_dataset

        >>> set_seed(88)

        >>> datasets = load_dataset("nielsr/funsd", split="test")
        >>> labels = datasets.features["ner_tags"].feature.names
        >>> id2label = {v: k for v, k in enumerate(labels)}

        >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr")
        >>> model = LayoutLMv2ForTokenClassification.from_pretrained(
        ...     "microsoft/layoutlmv2-base-uncased", num_labels=len(labels)
        ... )

        >>> data = datasets[0]
        >>> image = Image.open(data["image_path"]).convert("RGB")
        >>> words = data["words"]
        >>> boxes = data["bboxes"]  # make sure to normalize your bounding boxes
        >>> word_labels = data["ner_tags"]
        >>> encoding = processor(
        ...     image,
        ...     words,
        ...     boxes=boxes,
        ...     word_labels=word_labels,
        ...     padding="max_length",
        ...     truncation=True,
        ...     return_tensors="pt",
        ... )

        >>> outputs = model(**encoding)
        >>> logits, loss = outputs.logits, outputs.loss

        >>> predicted_token_class_ids = logits.argmax(-1)
        >>> predicted_tokens_classes = [id2label[t.item()] for t in predicted_token_class_ids[0]]
        >>> predicted_tokens_classes[:5]
        ['B-ANSWER', 'B-HEADER', 'B-HEADER', 'B-HEADER', 'B-HEADER']
        ```
        Nr  r    r   r   rE   r  )r@   r  r  ro   r9   r  r   rp   r  r   rx   r   )r?   r|  rJ   r  r   r}  r   r   r~  r  r   r   r   r   rL  r  r  r  r  r  r   rC   rC   rD   r   }  sD   B


z(LayoutLMv2ForTokenClassification.forwardr  )rR   rS   rT   r$   ry  r   r  r  r   r   r  r   r;   r  r  r   r   r   r   rV   rC   rC   rA   rD   r  f  r  r  a  
    LayoutLMv2 Model with a span classification head on top for extractive question-answering tasks such as
    [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the text part of the hidden-states output to
    compute `span start logits` and `span end logits`).
    c                !       s   e Zd Zd fdd	Zdd Zeedee	e
d													dd	eej d
eej deej deej deej deej deej deej deej deej dee dee dee deee	f fddZ  ZS )LayoutLMv2ForQuestionAnsweringTc                    sB   t  | |j| _||_t|| _t|j|j| _	| 
  d S r   )r#   r$   r  rm  rl  r  r   re   r'   
qa_outputsrv  )r?   r@   rm  rA   rC   rD   r$     s   
z'LayoutLMv2ForQuestionAnswering.__init__c                 C   r  r   r  rx  rC   rC   rD   ry    r  z3LayoutLMv2ForQuestionAnswering.get_input_embeddingsr  r  Nr|  rJ   r  r   r}  r   r   r~  start_positionsend_positionsr   r   r   r   c                 C   s  |dur|n| j j}| j|||||||||||d}|dur#| }n| dd }|d }|d ddd|f }| |}|jddd\}}|d }|d }d}|	dur|
durt|	 dkrn|	d}	t|
 dkr{|
d}
|d}|		d|}	|
	d|}
t
|d}|||	}|||
}|| d }|s||f|dd  }|dur|f| S |S t||||j|jd	S )
am  
        start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the start of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.
        end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for position (index) of the end of the labelled span for computing the token classification loss.
            Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
            are not taken into account for computing the loss.

        Returns:

        Example:

        In this example below, we give the LayoutLMv2 model an image (of texts) and ask it a question. It will give us
        a prediction of what it thinks the answer is (the span of the answer within the texts parsed from the image).

        ```python
        >>> from transformers import AutoProcessor, LayoutLMv2ForQuestionAnswering, set_seed
        >>> import torch
        >>> from PIL import Image
        >>> from datasets import load_dataset

        >>> set_seed(88)
        >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased")
        >>> model = LayoutLMv2ForQuestionAnswering.from_pretrained("microsoft/layoutlmv2-base-uncased")

        >>> dataset = load_dataset("hf-internal-testing/fixtures_docvqa")
        >>> image_path = dataset["test"][0]["file"]
        >>> image = Image.open(image_path).convert("RGB")
        >>> question = "When is coffee break?"
        >>> encoding = processor(image, question, return_tensors="pt")

        >>> outputs = model(**encoding)
        >>> predicted_start_idx = outputs.start_logits.argmax(-1).item()
        >>> predicted_end_idx = outputs.end_logits.argmax(-1).item()
        >>> predicted_start_idx, predicted_end_idx
        (154, 287)

        >>> predicted_answer_tokens = encoding.input_ids.squeeze()[predicted_start_idx : predicted_end_idx + 1]
        >>> predicted_answer = processor.tokenizer.decode(predicted_answer_tokens)
        >>> predicted_answer  # results are not very good without further fine-tuning
        'council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public ...
        ```

        ```python
        >>> target_start_index = torch.tensor([7])
        >>> target_end_index = torch.tensor([14])
        >>> outputs = model(**encoding, start_positions=target_start_index, end_positions=target_end_index)
        >>> predicted_answer_span_start = outputs.start_logits.argmax(-1).item()
        >>> predicted_answer_span_end = outputs.end_logits.argmax(-1).item()
        >>> predicted_answer_span_start, predicted_answer_span_end
        (154, 287)
        ```
        Nr  r    r   r   rF   )ignore_indexrE   )r  start_logits
end_logitsrx   r   )r@   r  r  ro   r  splitr  r   r9  clampr   r   rx   r   )r?   r|  rJ   r  r   r}  r   r   r~  r  r  r   r   r   r   rL  r  r  r  r  r  
total_lossignored_indexr  
start_lossend_lossr   rC   rC   rD   r      s\   J







z&LayoutLMv2ForQuestionAnswering.forward)T)NNNNNNNNNNNNN)rR   rS   rT   r$   ry  r   r  r  r   r   r  r   r;   r  r  r   r   r   r   rV   rC   rC   rA   rD   r    s^    	

	

r  )Tr   r   r   )?rU   r   typingr   r   r   r;   torch.utils.checkpointr   torch.nnr   r   r   activationsr
   modeling_outputsr   r   r   r   r   modeling_utilsr   pytorch_utilsr   utilsr   r   r   r   r   r   configuration_layoutlmv2r   r  detectron2.modelingr   
get_loggerrR   r>  _CHECKPOINT_FOR_DOCr  r  Moduler   rW   r   r   r   r   r   r   r   r  r(  r,  LAYOUTLMV2_START_DOCSTRINGr  rg  rl  r  r  r  rC   rC   rC   rD   <module>   sv    
/]
+,z
B< }	 .	{