o
    h                  	   @   s  d Z ddlZddlmZ ddlmZmZmZ ddlZddl	Zddlm
Z
 ddlmZmZmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZ ddlmZmZmZmZmZmZmZm Z m!Z! ddl"m#Z# ddl$m%Z% e rvddl&m'Z'm(Z( ndd Z(dd Z'e)e*Z+dZ,dZ-g dZ.dZ/dZ0dgZ1eG dd deZ2eG dd deZ3eG dd deZ4G dd  d e
j5Z6G d!d" d"e
j5Z7G d#d$ d$e
j5Z8dLd'ej9d(e:d)e;d*ej9fd+d,Z<G d-d. d.e
j5Z=G d/d0 d0e
j5Z>G d1d2 d2e
j5Z?G d3d4 d4e
j5Z@G d5d6 d6e
j5ZAG d7d8 d8e
j5ZBG d9d: d:e
j5ZCG d;d< d<e
j5ZDG d=d> d>e
j5ZEG d?d@ d@eZFdAZGdBZHedCeGG dDdE dEeFZIedFeGG dGdH dHeFZJedIeGG dJdK dKeFe#ZKdS )Mz2 PyTorch Neighborhood Attention Transformer model.    N)	dataclass)OptionalTupleUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)BackboneOutput)PreTrainedModel) find_pruneable_heads_and_indicesprune_linear_layer)	ModelOutputOptionalDependencyNotAvailableadd_code_sample_docstringsadd_start_docstrings%add_start_docstrings_to_model_forwardis_natten_availableloggingreplace_return_docstringsrequires_backends)BackboneMixin   )	NatConfig)
natten2davnatten2dqkrpbc                  O      t  Nr   argskwargs r$   Z/var/www/html/ai/venv/lib/python3.10/site-packages/transformers/models/nat/modeling_nat.pyr   2      r   c                  O   r   r   r    r!   r$   r$   r%   r   5   r&   r   r   zshi-labs/nat-mini-in1k-224)r      r'   i   z	tiger catc                   @   sb   e Zd ZU dZdZejed< dZe	e
ej  ed< dZe	e
ej  ed< dZe	e
ej  ed< dS )NatEncoderOutputa  
    Nat encoder's outputs, with potential hidden states and attentions.

    Args:
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nlast_hidden_statehidden_states
attentionsreshaped_hidden_states)__name__
__module____qualname____doc__r)   torchFloatTensor__annotations__r*   r   r   r+   r,   r$   r$   r$   r%   r(   O   s   
 r(   c                   @   st   e Zd ZU dZdZejed< dZe	ej ed< dZ
e	eej  ed< dZe	eej  ed< dZe	eej  ed< dS )NatModelOutputaS  
    Nat model's outputs that also contains a pooling of the last hidden states.

    Args:
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
            Average pooling of the last layer hidden-state.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nr)   pooler_outputr*   r+   r,   )r-   r.   r/   r0   r)   r1   r2   r3   r5   r   r*   r   r+   r,   r$   r$   r$   r%   r4   p   s   
 r4   c                   @   st   e Zd ZU dZdZeej ed< dZ	ejed< dZ
eeej  ed< dZeeej  ed< dZeeej  ed< dS )NatImageClassifierOutputa   
    Nat outputs for image classification.

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
            Classification (or regression if config.num_labels==1) loss.
        logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
            Classification (or regression if config.num_labels==1) scores (before SoftMax).
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
        reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
            shape `(batch_size, hidden_size, height, width)`.

            Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
            include the spatial dimensions.
    Nlosslogitsr*   r+   r,   )r-   r.   r/   r0   r7   r   r1   r2   r3   r8   r*   r   r+   r,   r$   r$   r$   r%   r6      s   
 r6   c                       s>   e Zd ZdZ fddZdeej deej	 fddZ
  ZS )NatEmbeddingsz6
    Construct the patch and position embeddings.
    c                    s4   t    t|| _t|j| _t|j	| _
d S r   )super__init__NatPatchEmbeddingspatch_embeddingsr   	LayerNorm	embed_dimnormDropouthidden_dropout_probdropoutselfconfig	__class__r$   r%   r;      s   

zNatEmbeddings.__init__pixel_valuesreturnc                 C   s"   |  |}| |}| |}|S r   )r=   r@   rC   )rE   rI   
embeddingsr$   r$   r%   forward   s   


zNatEmbeddings.forward)r-   r.   r/   r0   r;   r   r1   r2   r   TensorrL   __classcell__r$   r$   rG   r%   r9      s    &r9   c                       s:   e Zd ZdZ fddZdeej dejfddZ	  Z
S )r<   z
    This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
    `hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a
    Transformer.
    c              
      sr   t    |j}|j|j}}|| _|dkrntdttj| j|d ddddtj|d |dddd| _	d S )N   z2Dinat only supports patch size of 4 at the moment.   r
   r
   rP   rP   r   r   )kernel_sizestridepadding)
r:   r;   
patch_sizenum_channelsr?   
ValueErrorr   
SequentialConv2d
projection)rE   rF   rW   rX   hidden_sizerG   r$   r%   r;      s   

zNatPatchEmbeddings.__init__rI   rJ   c                 C   s>   |j \}}}}|| jkrtd| |}|dddd}|S )NzeMake sure that the channel dimension of the pixel values match with the one set in the configuration.r   rP   r
   r   )shaperX   rY   r\   permute)rE   rI   _rX   heightwidthrK   r$   r$   r%   rL      s   

zNatPatchEmbeddings.forward)r-   r.   r/   r0   r;   r   r1   r2   rM   rL   rN   r$   r$   rG   r%   r<      s    "r<   c                       sL   e Zd ZdZejfdedejddf fddZde	j
de	j
fd	d
Z  ZS )NatDownsamplerz
    Convolutional Downsampling Layer.

    Args:
        dim (`int`):
            Number of input channels.
        norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
            Normalization layer class.
    dim
norm_layerrJ   Nc                    s>   t    || _tj|d| ddddd| _|d| | _d S )NrP   rQ   rR   rS   F)rT   rU   rV   bias)r:   r;   rd   r   r[   	reductionr@   )rE   rd   re   rG   r$   r%   r;      s   
zNatDownsampler.__init__input_featurec                 C   s0   |  |dddddddd}| |}|S )Nr   r
   r   rP   )rg   r_   r@   )rE   rh   r$   r$   r%   rL     s   "
zNatDownsampler.forward)r-   r.   r/   r0   r   r>   intModuler;   r1   rM   rL   rN   r$   r$   rG   r%   rc      s    "
rc           Finput	drop_probtrainingrJ   c                 C   sd   |dks|s| S d| }| j d fd| jd   }|tj|| j| jd }|  | || }|S )aF  
    Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

    Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
    however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
    See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
    layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
    argument.
    rk   r   r   )r   )dtypedevice)r^   ndimr1   randro   rp   floor_div)rl   rm   rn   	keep_probr^   random_tensoroutputr$   r$   r%   	drop_path
  s   
rx   c                       sT   e Zd ZdZddee ddf fddZdejdejfdd	Z	de
fd
dZ  ZS )NatDropPathzXDrop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).Nrm   rJ   c                    s   t    || _d S r   )r:   r;   rm   )rE   rm   rG   r$   r%   r;   "  s   

zNatDropPath.__init__r*   c                 C   s   t || j| jS r   )rx   rm   rn   rE   r*   r$   r$   r%   rL   &  s   zNatDropPath.forwardc                 C   s   d | jS )Nzp={})formatrm   rE   r$   r$   r%   
extra_repr)  s   zNatDropPath.extra_reprr   )r-   r.   r/   r0   r   floatr;   r1   rM   rL   strr}   rN   r$   r$   rG   r%   ry     s
    ry   c                       J   e Zd Z fddZdd Z	ddejdee de	ej fd	d
Z
  ZS )NeighborhoodAttentionc                    s   t    || dkrtd| d| d|| _t|| | _| j| j | _|| _t	t
|d| j d d| j d | _tj| j| j|jd| _tj| j| j|jd| _tj| j| j|jd| _t|j| _d S )Nr   zThe hidden size (z6) is not a multiple of the number of attention heads ()rP   r   )rf   )r:   r;   rY   num_attention_headsri   attention_head_sizeall_head_sizerT   r   	Parameterr1   zerosrpbLinearqkv_biasquerykeyvaluerA   attention_probs_dropout_probrC   rE   rF   rd   	num_headsrT   rG   r$   r%   r;   .  s   
*zNeighborhoodAttention.__init__c                 C   s8   |  d d | j| jf }||}|dddddS )Nr   r
   r   rP   rO   )sizer   r   viewr_   )rE   xnew_x_shaper$   r$   r%   transpose_for_scoresC  s   
z*NeighborhoodAttention.transpose_for_scoresFr*   output_attentionsrJ   c                 C   s   |  | |}|  | |}|  | |}|t| j }t||| j| j	d}t
jj|dd}| |}t||| j	d}|ddddd }| d d | jf }	||	}|rc||f}
|
S |f}
|
S )	Nr   r   rd   r   rP   r
   rO   )r   r   r   r   mathsqrtr   r   r   rT   r   
functionalsoftmaxrC   r   r_   
contiguousr   r   r   )rE   r*   r   query_layer	key_layervalue_layerattention_scoresattention_probscontext_layernew_context_layer_shapeoutputsr$   r$   r%   rL   H  s   

zNeighborhoodAttention.forwardF)r-   r.   r/   r;   r   r1   rM   r   boolr   rL   rN   r$   r$   rG   r%   r   -  s    r   c                       s8   e Zd Z fddZdejdejdejfddZ  ZS )NeighborhoodAttentionOutputc                    s*   t    t||| _t|j| _d S r   )r:   r;   r   r   denserA   r   rC   rE   rF   rd   rG   r$   r%   r;   k  s   
z$NeighborhoodAttentionOutput.__init__r*   input_tensorrJ   c                 C      |  |}| |}|S r   r   rC   )rE   r*   r   r$   r$   r%   rL   p  s   

z#NeighborhoodAttentionOutput.forwardr-   r.   r/   r;   r1   rM   rL   rN   r$   r$   rG   r%   r   j  s    $r   c                       r   )NeighborhoodAttentionModulec                    s2   t    t||||| _t||| _t | _d S r   )r:   r;   r   rE   r   rw   setpruned_headsr   rG   r$   r%   r;   x  s   
z$NeighborhoodAttentionModule.__init__c                 C   s   t |dkrd S t|| jj| jj| j\}}t| jj|| j_t| jj|| j_t| jj	|| j_	t| j
j|dd| j
_| jjt | | j_| jj| jj | j_| j|| _d S )Nr   r   r   )lenr   rE   r   r   r   r   r   r   r   rw   r   r   union)rE   headsindexr$   r$   r%   prune_heads~  s   z'NeighborhoodAttentionModule.prune_headsFr*   r   rJ   c                 C   s2   |  ||}| |d |}|f|dd   }|S Nr   r   )rE   rw   )rE   r*   r   self_outputsattention_outputr   r$   r$   r%   rL     s   z#NeighborhoodAttentionModule.forwardr   )r-   r.   r/   r;   r   r1   rM   r   r   r   rL   rN   r$   r$   rG   r%   r   w  s    r   c                       2   e Zd Z fddZdejdejfddZ  ZS )NatIntermediatec                    sJ   t    t|t|j| | _t|jt	rt
|j | _d S |j| _d S r   )r:   r;   r   r   ri   	mlp_ratior   
isinstance
hidden_actr   r   intermediate_act_fnr   rG   r$   r%   r;     s
   
zNatIntermediate.__init__r*   rJ   c                 C   r   r   )r   r   rz   r$   r$   r%   rL        

zNatIntermediate.forwardr   r$   r$   rG   r%   r     s    r   c                       r   )	NatOutputc                    s4   t    tt|j| || _t|j| _	d S r   )
r:   r;   r   r   ri   r   r   rA   rB   rC   r   rG   r$   r%   r;     s   
zNatOutput.__init__r*   rJ   c                 C   r   r   r   rz   r$   r$   r%   rL     r   zNatOutput.forwardr   r$   r$   rG   r%   r     s    r   c                	       sR   e Zd Zd fdd	Zdd Z	ddejdee d	e	ejejf fd
dZ
  ZS )NatLayerrk   c                    s   t    |j| _|j| _tj||jd| _t|||| jd| _	|dkr(t
|nt | _tj||jd| _t||| _t||| _|jdkrYtj|jtd|f dd| _d S d | _d S )Neps)rT   rk   r   rP   T)requires_grad)r:   r;   chunk_size_feed_forwardrT   r   r>   layer_norm_epslayernorm_beforer   	attentionry   Identityrx   layernorm_afterr   intermediater   rw   layer_scale_init_valuer   r1   oneslayer_scale_parameters)rE   rF   rd   r   drop_path_raterG   r$   r%   r;     s   

zNatLayer.__init__c           
      C   sd   | j }d}||k s||k r.d }}td|| }td|| }	dd||||	f}tj||}||fS )N)r   r   r   r   r   r   r   )rT   maxr   r   pad)
rE   r*   ra   rb   window_size
pad_valuespad_lpad_tpad_rpad_br$   r$   r%   	maybe_pad  s   zNatLayer.maybe_padFr*   r   rJ   c                 C   s  |  \}}}}|}| |}| |||\}}|j\}	}
}}	| j||d}|d }|d dkp5|d dk}|rJ|d d d |d |d d f  }| jd urV| jd | }|| | }| |}| 	| 
|}| jd urv| jd | }|| | }|r||d f}|S |f}|S )N)r   r   r
      r   )r   r   r   r^   r   r   r   rx   r   rw   r   )rE   r*   r   
batch_sizera   rb   channelsshortcutr   r`   
height_pad	width_padattention_outputsr   
was_paddedlayer_outputlayer_outputsr$   r$   r%   rL     s,   
$


zNatLayer.forward)rk   r   )r-   r.   r/   r;   r   r1   rM   r   r   r   rL   rN   r$   r$   rG   r%   r     s    r   c                       sB   e Zd Z fddZ	d	dejdee deej fddZ	  Z
S )
NatStagec                    sd   t     | _| _t fddt|D | _|d ur*|tjd| _	nd | _	d| _
d S )Nc                    s    g | ]}t  | d qS ))rF   rd   r   r   )r   .0irF   rd   r   r   r$   r%   
<listcomp>  s    z%NatStage.__init__.<locals>.<listcomp>)rd   re   F)r:   r;   rF   rd   r   
ModuleListrangelayersr>   
downsamplepointing)rE   rF   rd   depthr   r   r   rG   r   r%   r;     s   

zNatStage.__init__Fr*   r   rJ   c                 C   sn   |  \}}}}t| jD ]\}}|||}|d }q|}	| jd ur'| |	}||	f}
|r5|
|dd  7 }
|
S r   )r   	enumerater   r   )rE   r*   r   r`   ra   rb   r   layer_moduler   !hidden_states_before_downsamplingstage_outputsr$   r$   r%   rL     s   



zNatStage.forwardr   )r-   r.   r/   r;   r1   rM   r   r   r   rL   rN   r$   r$   rG   r%   r     s    r   c                       sb   e Zd Z fddZ				ddejdee dee dee d	ee d
ee	e
f fddZ  ZS )
NatEncoderc                    sd   t    t j_ _dd td jt	 jD t
 fddtjD _d S )Nc                 S   s   g | ]}|  qS r$   )item)r   r   r$   r$   r%   r   ,  s    z'NatEncoder.__init__.<locals>.<listcomp>r   c                    st   g | ]6}t  t jd |   j|  j| t jd| t jd|d   |jd k r4tnddqS )rP   Nr   )rF   rd   r   r   r   r   )r   ri   r?   depthsr   sum
num_levelsrc   )r   i_layerrF   dprrE   r$   r%   r   .  s    	*)r:   r;   r   r   r   rF   r1   linspacer   r   r   r   r   levelsrD   rG   r   r%   r;   (  s   
 	
zNatEncoder.__init__FTr*   r   output_hidden_states(output_hidden_states_before_downsamplingreturn_dictrJ   c                 C   s  |rdnd }|r
dnd }|rdnd }|r&| dddd}	||f7 }||	f7 }t| jD ]H\}
}|||}|d }|d }|rS|rS| dddd}	||f7 }||	f7 }n|ri|si| dddd}	||f7 }||	f7 }|rs||dd  7 }q+|stdd |||fD S t||||dS )	Nr$   r   r
   r   rP   c                 s   s    | ]	}|d ur|V  qd S r   r$   )r   vr$   r$   r%   	<genexpr>b  s    z%NatEncoder.forward.<locals>.<genexpr>)r)   r*   r+   r,   )r_   r   r   tupler(   )rE   r*   r   r   r   r  all_hidden_statesall_reshaped_hidden_statesall_self_attentionsreshaped_hidden_stater   r   r   r   r$   r$   r%   rL   ;  s<   





zNatEncoder.forward)FFFT)r-   r.   r/   r;   r1   rM   r   r   r   r   r(   rL   rN   r$   r$   rG   r%   r   '  s&    
r   c                   @   s$   e Zd ZdZeZdZdZdd ZdS )NatPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    natrI   c                 C   st   t |tjtjfr#|jjjd| jjd |j	dur!|j	j
  dS dS t |tjr8|j	j
  |jjd dS dS )zInitialize the weightsrk   )meanstdNg      ?)r   r   r   r[   weightdatanormal_rF   initializer_rangerf   zero_r>   fill_)rE   moduler$   r$   r%   _init_weightsv  s   
z NatPreTrainedModel._init_weightsN)	r-   r.   r/   r0   r   config_classbase_model_prefixmain_input_namer  r$   r$   r$   r%   r	  l  s    r	  aF  
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`NatConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a  
    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
            for details.

        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
z]The bare Nat Model transformer outputting raw hidden-states without any specific head on top.c                       s   e Zd Zd fdd	Zdd Zdd Zeeee	e
eded		
	
	
	
ddeej dee dee dee deee
f f
ddZ  ZS )NatModelTc                    s   t  | t| dg || _t|j| _t|jd| jd   | _	t
|| _t|| _tj| j	|jd| _|r=tdnd | _|   d S )NnattenrP   r   r   )r:   r;   r   rF   r   r   r   ri   r?   num_featuresr9   rK   r   encoderr   r>   r   	layernormAdaptiveAvgPool1dpooler	post_init)rE   rF   add_pooling_layerrG   r$   r%   r;     s   

zNatModel.__init__c                 C      | j jS r   rK   r=   r|   r$   r$   r%   get_input_embeddings     zNatModel.get_input_embeddingsc                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr  layerr   r   )rE   heads_to_pruner&  r   r$   r$   r%   _prune_heads  s   zNatModel._prune_headsvision)
checkpointoutput_typer  modalityexpected_outputNrI   r   r   r  rJ   c           
      C   s   |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|d u r&td| |}| j||||d}|d }| |}d }| jd urW| |	dd
dd}t	|d}|se||f|dd   }	|	S t|||j|j|jdS )Nz You have to specify pixel_valuesr   r   r  r   r   rP   )r)   r5   r*   r+   r,   )rF   r   r   use_return_dictrY   rK   r  r  r  flatten	transposer1   r4   r*   r+   r,   )
rE   rI   r   r   r  embedding_outputencoder_outputssequence_outputpooled_outputrw   r$   r$   r%   rL     s:   


zNatModel.forward)T)NNNN)r-   r.   r/   r;   r#  r(  r   NAT_INPUTS_DOCSTRINGr   _CHECKPOINT_FOR_DOCr4   _CONFIG_FOR_DOC_EXPECTED_OUTPUT_SHAPEr   r1   r2   r   r   r   rL   rN   r$   r$   rG   r%   r    s6    	
r  z
    Nat Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
    the [CLS] token) e.g. for ImageNet.
    c                       s   e Zd Z fddZeeeeee	e
d					ddeej deej dee dee d	ee d
eeef fddZ  ZS )NatForImageClassificationc                    s\   t  | t| dg |j| _t|| _|jdkr#t| jj|jnt	 | _
|   d S )Nr  r   )r:   r;   r   
num_labelsr  r
  r   r   r  r   
classifierr  rD   rG   r$   r%   r;     s   
"z"NatForImageClassification.__init__)r*  r+  r  r-  NrI   labelsr   r   r  rJ   c                 C   sb  |dur|n| j j}| j||||d}|d }| |}d}	|dur| j jdu rL| jdkr2d| j _n| jdkrH|jtjksC|jtj	krHd| j _nd| j _| j jdkrjt
 }
| jdkrd|
| | }	n+|
||}	n%| j jdkrt }
|
|d| j|d}	n| j jdkrt }
|
||}	|s|f|dd  }|	dur|	f| S |S t|	||j|j|jd	S )
a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        Nr.  r   
regressionsingle_label_classificationmulti_label_classificationr   rP   )r7   r8   r*   r+   r,   )rF   r/  r
  r<  problem_typer;  ro   r1   longri   r	   squeezer   r   r   r6   r*   r+   r,   )rE   rI   r=  r   r   r  r   r5  r8   r7   loss_fctrw   r$   r$   r%   rL     sL   


"


z!NatForImageClassification.forward)NNNNN)r-   r.   r/   r;   r   r6  r   _IMAGE_CLASS_CHECKPOINTr6   r8  _IMAGE_CLASS_EXPECTED_OUTPUTr   r1   r2   
LongTensorr   r   r   rL   rN   r$   r$   rG   r%   r:    s6    
r:  zBNAT backbone, to be used with frameworks like DETR and MaskFormer.c                       sl   e Zd Z fddZdd Zeeeee	d			dde
jdee d	ee d
ee def
ddZ  ZS )NatBackbonec                    s   t    t    t| dg t | _t | _ jg fddt	t
 jD  | _i }t| j| jD ]\}}t|||< q8t|| _|   d S )Nr  c                    s   g | ]}t  jd |  qS )rP   )ri   r?   r   rF   r$   r%   r   d  s    z(NatBackbone.__init__.<locals>.<listcomp>)r:   r;   _init_backboner   r9   rK   r   r  r?   r   r   r   r  zipout_featuresr   r   r>   
ModuleDicthidden_states_normsr  )rE   rF   rN  stagerX   rG   rI  r%   r;   \  s   

&zNatBackbone.__init__c                 C   r!  r   r"  r|   r$   r$   r%   r#  o  r$  z NatBackbone.get_input_embeddings)r+  r  NrI   r   r   r  rJ   c                 C   s,  |dur|n| j j}|dur|n| j j}|dur|n| j j}| |}| j||dddd}|j}d}t| j|D ]A\}	}
|	| j	v ry|
j
\}}}}|
dddd }
|
||| |}
| j|	 |
}
|
||||}
|
dddd }
||
f7 }q8|s|f}|r||jf7 }|S t||r|jnd|jd	S )
aA  
        Returns:

        Examples:

        ```python
        >>> from transformers import AutoImageProcessor, AutoBackbone
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224")
        >>> model = AutoBackbone.from_pretrained(
        ...     "shi-labs/nat-mini-in1k-224", out_features=["stage1", "stage2", "stage3", "stage4"]
        ... )

        >>> inputs = processor(image, return_tensors="pt")

        >>> outputs = model(**inputs)

        >>> feature_maps = outputs.feature_maps
        >>> list(feature_maps[-1].shape)
        [1, 512, 7, 7]
        ```NT)r   r   r   r  r$   r   rP   r
   r   )feature_mapsr*   r+   )rF   r/  r   r   rK   r  r,   rK  stage_namesrL  r^   r_   r   r   rN  r*   r   r+   )rE   rI   r   r   r  r2  r   r*   rP  rO  hidden_stater   rX   ra   rb   rw   r$   r$   r%   rL   r  sD   $


zNatBackbone.forward)NNN)r-   r.   r/   r;   r#  r   r6  r   r   r8  r1   rM   r   r   rL   rN   r$   r$   rG   r%   rH  W  s&    
rH  )rk   F)Lr0   r   dataclassesr   typingr   r   r   r1   torch.utils.checkpointr   torch.nnr   r   r	   activationsr   modeling_outputsr   modeling_utilsr   pytorch_utilsr   r   utilsr   r   r   r   r   r   r   r   r   utils.backbone_utilsr   configuration_natr   natten.functionalr   r   
get_loggerr-   loggerr8  r7  r9  rE  rF  !NAT_PRETRAINED_MODEL_ARCHIVE_LISTr(   r4   r6   rj   r9   r<   rc   rM   r~   r   rx   ry   r   r   r   r   r   r   r   r   r	  NAT_START_DOCSTRINGr6  r  r:  rH  r$   r$   r$   r%   <module>   s   ,
 ##$ =$C.EUW