o
    h,                     @   sN   d Z ddlmZ ddlmZ ddlmZ eeZ	ddiZ
G dd	 d	eZd
S )z DETA model configuration   )PretrainedConfig)logging   )CONFIG_MAPPINGzut/detaz7https://huggingface.co/ut/deta/resolve/main/config.jsonc                       s   e Zd ZdZdZdddZ							
				
																												d" fdd	ZedefddZ	edefd d!Z
  ZS )#
DetaConfiga4  
    This is the configuration class to store the configuration of a [`DetaModel`]. It is used to instantiate a DETA
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the DETA
    [SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`):
            The configuration of the backbone model.
        num_queries (`int`, *optional*, defaults to 900):
            Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetaModel`] can
            detect in a single image. In case `two_stage` is set to `True`, we use `two_stage_num_proposals` instead.
        d_model (`int`, *optional*, defaults to 256):
            Dimension of the layers.
        encoder_layers (`int`, *optional*, defaults to 6):
            Number of encoder layers.
        decoder_layers (`int`, *optional*, defaults to 6):
            Number of decoder layers.
        encoder_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer encoder.
        decoder_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer decoder.
        decoder_ffn_dim (`int`, *optional*, defaults to 2048):
            Dimension of the "intermediate" (often named feed-forward) layer in decoder.
        encoder_ffn_dim (`int`, *optional*, defaults to 2048):
            Dimension of the "intermediate" (often named feed-forward) layer in decoder.
        activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"silu"` and `"gelu_new"` are supported.
        dropout (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        activation_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for activations inside the fully connected layer.
        init_std (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        init_xavier_std (`float`, *optional*, defaults to 1):
            The scaling factor used for the Xavier initialization gain in the HM Attention map module.
        encoder_layerdrop (`float`, *optional*, defaults to 0.0):
            The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
            for more details.
        auxiliary_loss (`bool`, *optional*, defaults to `False`):
            Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
        position_embedding_type (`str`, *optional*, defaults to `"sine"`):
            Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
        class_cost (`float`, *optional*, defaults to 1):
            Relative weight of the classification error in the Hungarian matching cost.
        bbox_cost (`float`, *optional*, defaults to 5):
            Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
        giou_cost (`float`, *optional*, defaults to 2):
            Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
        mask_loss_coefficient (`float`, *optional*, defaults to 1):
            Relative weight of the Focal loss in the panoptic segmentation loss.
        dice_loss_coefficient (`float`, *optional*, defaults to 1):
            Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
        bbox_loss_coefficient (`float`, *optional*, defaults to 5):
            Relative weight of the L1 bounding box loss in the object detection loss.
        giou_loss_coefficient (`float`, *optional*, defaults to 2):
            Relative weight of the generalized IoU loss in the object detection loss.
        eos_coefficient (`float`, *optional*, defaults to 0.1):
            Relative classification weight of the 'no-object' class in the object detection loss.
        num_feature_levels (`int`, *optional*, defaults to 5):
            The number of input feature levels.
        encoder_n_points (`int`, *optional*, defaults to 4):
            The number of sampled keys in each feature level for each attention head in the encoder.
        decoder_n_points (`int`, *optional*, defaults to 4):
            The number of sampled keys in each feature level for each attention head in the decoder.
        two_stage (`bool`, *optional*, defaults to `True`):
            Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
            DETA, which are further fed into the decoder for iterative bounding box refinement.
        two_stage_num_proposals (`int`, *optional*, defaults to 300):
            The number of region proposals to be generated, in case `two_stage` is set to `True`.
        with_box_refine (`bool`, *optional*, defaults to `True`):
            Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
            based on the predictions from the previous layer.
        focal_alpha (`float`, *optional*, defaults to 0.25):
            Alpha parameter in the focal loss.

    Examples:

    ```python
    >>> from transformers import DetaConfig, DetaModel

    >>> # Initializing a DETA SenseTime/deformable-detr style configuration
    >>> configuration = DetaConfig()

    >>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration
    >>> model = DetaModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```detad_modelencoder_attention_heads)hidden_sizenum_attention_headsN                      Trelu   皙?{Gz?      ?Fsine      ,     r         ?c&           )         sP  |d u rt d td g dd}nt|tr&|d}'t|' }(|(|}|| _|| _|| _	|| _
|| _|| _|| _|| _|| _|	| _|| _|| _|| _|| _|| _|| _|
| _|| _|| _|| _|| _|| _|| _|| _|| _|| _ |du r|du rt!d|| _"|| _#|| _$| | _%|!| _&|"| _'|#| _(|$| _)|%| _*t+ j,d
d	|i|& d S )NzX`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.resnet)stage2stage3stage4)out_features
model_typeTFz3If two_stage is True, with_box_refine must be True.is_encoder_decoder )-loggerinfor   
isinstancedictpop	from_dictbackbone_confignum_queriesmax_position_embeddingsr   encoder_ffn_dimencoder_layersr	   decoder_ffn_dimdecoder_layersdecoder_attention_headsdropoutattention_dropoutactivation_dropoutactivation_functioninit_stdinit_xavier_stdencoder_layerdropauxiliary_lossposition_embedding_typenum_feature_levelsencoder_n_pointsdecoder_n_points	two_stagetwo_stage_num_proposalswith_box_refineassign_first_stage
ValueError
class_cost	bbox_cost	giou_costmask_loss_coefficientdice_loss_coefficientbbox_loss_coefficientgiou_loss_coefficienteos_coefficientfocal_alphasuper__init__))selfr+   r,   r-   r/   r.   r	   r1   r0   r2   r9   r#   r6   r   r3   r4   r5   r7   r8   return_intermediater:   r;   r<   r=   r>   r?   r@   rA   rB   rD   rE   rF   rG   rH   rI   rJ   rK   rL   kwargsbackbone_model_typeconfig_class	__class__r$   a/var/www/html/ai/venv/lib/python3.10/site-packages/transformers/models/deta/configuration_deta.pyrN      sZ   )



zDetaConfig.__init__returnc                 C      | j S N)r	   rO   r$   r$   rV   r         zDetaConfig.num_attention_headsc                 C   rX   rY   )r   rZ   r$   r$   rV   r
      r[   zDetaConfig.hidden_size)%Nr   r   r   r   r   r   r   r   r   Tr   r   r   r   r   r   r   TFr   r   r   r   Tr   TTr   r   r   r   r   r   r   r   r   )__name__
__module____qualname____doc__r"   attribute_maprN   propertyintr   r
   __classcell__r$   r$   rT   rV   r      s`    a\r   N)r_   configuration_utilsr   utilsr   autor   
get_loggerr\   r%   "DETA_PRETRAINED_CONFIG_ARCHIVE_MAPr   r$   r$   r$   rV   <module>   s   
