o
    h+                     @   sV   d Z ddlmZ ddlmZmZ eeZdddZ	e r!ddl
Z
G d	d
 d
eZdS )z LayoutLMv2 model configuration   )PretrainedConfig)is_detectron2_availableloggingzQhttps://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/config.jsonzRhttps://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/config.json)zlayoutlmv2-base-uncasedzlayoutlmv2-large-uncased    Nc                       st   e Zd ZdZdZddddddddd	d
ddddddddddg dddddddf fdd	Zedd Zdd Z  Z	S )LayoutLMv2Configa_  
    This is the configuration class to store the configuration of a [`LayoutLMv2Model`]. It is used to instantiate an
    LayoutLMv2 model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the LayoutLMv2
    [microsoft/layoutlmv2-base-uncased](https://huggingface.co/microsoft/layoutlmv2-base-uncased) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 30522):
            Vocabulary size of the LayoutLMv2 model. Defines the number of different tokens that can be represented by
            the `inputs_ids` passed when calling [`LayoutLMv2Model`] or [`TFLayoutLMv2Model`].
        hidden_size (`int`, *optional*, defaults to 768):
            Dimension of the encoder layers and the pooler layer.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` are supported.
        hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
        attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities.
        max_position_embeddings (`int`, *optional*, defaults to 512):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        type_vocab_size (`int`, *optional*, defaults to 2):
            The vocabulary size of the `token_type_ids` passed when calling [`LayoutLMv2Model`] or
            [`TFLayoutLMv2Model`].
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        layer_norm_eps (`float`, *optional*, defaults to 1e-12):
            The epsilon used by the layer normalization layers.
        max_2d_position_embeddings (`int`, *optional*, defaults to 1024):
            The maximum value that the 2D position embedding might ever be used with. Typically set this to something
            large just in case (e.g., 1024).
        max_rel_pos (`int`, *optional*, defaults to 128):
            The maximum number of relative positions to be used in the self-attention mechanism.
        rel_pos_bins (`int`, *optional*, defaults to 32):
            The number of relative position bins to be used in the self-attention mechanism.
        fast_qkv (`bool`, *optional*, defaults to `True`):
            Whether or not to use a single matrix for the queries, keys, values in the self-attention layers.
        max_rel_2d_pos (`int`, *optional*, defaults to 256):
            The maximum number of relative 2D positions in the self-attention mechanism.
        rel_2d_pos_bins (`int`, *optional*, defaults to 64):
            The number of 2D relative position bins in the self-attention mechanism.
        image_feature_pool_shape (`List[int]`, *optional*, defaults to [7, 7, 256]):
            The shape of the average-pooled feature map.
        coordinate_size (`int`, *optional*, defaults to 128):
            Dimension of the coordinate embeddings.
        shape_size (`int`, *optional*, defaults to 128):
            Dimension of the width and height embeddings.
        has_relative_attention_bias (`bool`, *optional*, defaults to `True`):
            Whether or not to use a relative attention bias in the self-attention mechanism.
        has_spatial_attention_bias (`bool`, *optional*, defaults to `True`):
            Whether or not to use a spatial attention bias in the self-attention mechanism.
        has_visual_segment_embedding (`bool`, *optional*, defaults to `False`):
            Whether or not to add visual segment embeddings.
        detectron2_config_args (`dict`, *optional*):
            Dictionary containing the configuration arguments of the Detectron2 visual backbone. Refer to [this
            file](https://github.com/microsoft/unilm/blob/master/layoutlmft/layoutlmft/models/layoutlmv2/detectron2_config.py)
            for details regarding default values.

    Example:

    ```python
    >>> from transformers import LayoutLMv2Config, LayoutLMv2Model

    >>> # Initializing a LayoutLMv2 microsoft/layoutlmv2-base-uncased style configuration
    >>> configuration = LayoutLMv2Config()

    >>> # Initializing a model (with random weights) from the microsoft/layoutlmv2-base-uncased style configuration
    >>> model = LayoutLMv2Model(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```
layoutlmv2i:w  i      i   gelug?      g{Gz?g-q=r   i          T   @   )   r   r   FNc                    s   t  jd|||||||||	|
|||d| || _|| _|| _|| _|| _|| _|| _|| _	|| _
|| _|| _|| _|| _|d urG|| _d S |  | _d S )N)
vocab_sizehidden_sizenum_hidden_layersnum_attention_headsintermediate_size
hidden_acthidden_dropout_probattention_probs_dropout_probmax_position_embeddingstype_vocab_sizeinitializer_rangelayer_norm_epspad_token_id )super__init__max_2d_position_embeddingsmax_rel_posrel_pos_binsfast_qkvmax_rel_2d_posrel_2d_pos_binsconvert_sync_batchnormimage_feature_pool_shapecoordinate_size
shape_sizehas_relative_attention_biashas_spatial_attention_biashas_visual_segment_embeddingget_default_detectron2_configdetectron2_config_args)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r/   kwargs	__class__r   m/var/www/html/ai/venv/lib/python3.10/site-packages/transformers/models/layoutlmv2/configuration_layoutlmv2.pyr    x   sD   
zLayoutLMv2Config.__init__c                 C   s   i dddg ddddg dd	d
gdgdgdgdggdg ddddddddddddddg ddddd d!d"d#d$d%d&d'd
gdgdgdgdggg d(gg dd
d)d*d+	S ),NzMODEL.MASK_ONTzMODEL.PIXEL_STD)g     L@g(\L@g(\2M@zMODEL.BACKBONE.NAMEbuild_resnet_fpn_backbonezMODEL.FPN.IN_FEATURES)res2res3res4res5zMODEL.ANCHOR_GENERATOR.SIZESr   r   r   r   r
   zMODEL.RPN.IN_FEATURES)p2p3p4p5p6zMODEL.RPN.PRE_NMS_TOPK_TRAINi  zMODEL.RPN.PRE_NMS_TOPK_TESTi  zMODEL.RPN.POST_NMS_TOPK_TRAINzMODEL.POST_NMS_TOPK_TESTzMODEL.ROI_HEADS.NAMEStandardROIHeadszMODEL.ROI_HEADS.NUM_CLASSES   zMODEL.ROI_HEADS.IN_FEATURES)r:   r;   r<   r=   zMODEL.ROI_BOX_HEAD.NAMEFastRCNNConvFCHeadzMODEL.ROI_BOX_HEAD.NUM_FCr   z$MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION   zMODEL.ROI_MASK_HEAD.NAMEMaskRCNNConvUpsampleHead   r   e   )g      ?g      ?g       @   F)	zMODEL.ROI_MASK_HEAD.NUM_CONVz%MODEL.ROI_MASK_HEAD.POOLER_RESOLUTIONzMODEL.RESNETS.DEPTHzMODEL.RESNETS.SIZESzMODEL.RESNETS.ASPECT_RATIOSzMODEL.RESNETS.OUT_FEATURESzMODEL.RESNETS.NUM_GROUPSzMODEL.RESNETS.WIDTH_PER_GROUPzMODEL.RESNETS.STRIDE_IN_1X1r   )r0   r   r   r4   r.      sZ   	
z.LayoutLMv2Config.get_default_detectron2_configc                 C   s\   t j }| j D ]!\}}|d}|}|d d D ]}t||}qt||d | q
|S )N.)
detectron2configget_cfgr/   itemssplitgetattrsetattr)r0   detectron2_configkv
attributesto_set	attributer   r   r4   get_detectron2_config   s   

z&LayoutLMv2Config.get_detectron2_config)
__name__
__module____qualname____doc__
model_typer    classmethodr.   rV   __classcell__r   r   r2   r4   r   "   sD    S@
r   )rZ   configuration_utilsr   utilsr   r   
get_loggerrW   logger(LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAPrI   r   r   r   r   r4   <module>   s   
