o
    hL                     @   sR  d Z ddlZddlZddlmZ ddlmZ ddlm	Z	m
Z
mZmZ ddlZddlZddlZddlmZ ddlmZmZmZ dd	lmZ dd
lmZmZ ddlmZ ddlmZmZ ddl m!Z!m"Z"m#Z#m$Z$m%Z% ddl&m'Z'm(Z( ddl)m*Z* e$+e,Z-dZ.dZ/dgZ0eG dd de!Z1eG dd de!Z2dd Z3G dd dej4Z5G dd dej4Z6G dd dej4Z7G dd  d ej4Z8G d!d" d"ej4Z9G d#d$ d$ej4Z:G d%d& d&ej4Z;G d'd( d(ej4Z<G d)d* d*ej4Z=G d+d, d,eZ>d-Z?d.Z@e"d/e?G d0d1 d1e>ZAG d2d3 d3ej4ZBe"d4e?G d5d6 d6e>ZCe"d7e?G d8d9 d9e>ZDdS ):z- PyTorch VideoMAE (masked autoencoder) model.    N)deepcopy)	dataclass)OptionalSetTupleUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)BaseModelOutputImageClassifierOutput)PreTrainedModel) find_pruneable_heads_and_indicesprune_linear_layer)ModelOutputadd_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings)IMAGENET_DEFAULT_MEANIMAGENET_DEFAULT_STD   )VideoMAEConfigr   zMCG-NJU/videomae-basec                   @   sL   e Zd ZU dZdZejed< dZe	e
ej  ed< dZe	e
ej  ed< dS )VideoMAEDecoderOutputaO  
    Class for VideoMAEDecoder's outputs, with potential hidden states and attentions.

    Args:
        logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
            Pixel reconstruction logits.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
            plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
            the self-attention heads.
    Nlogitshidden_states
attentions)__name__
__module____qualname____doc__r   torchFloatTensor__annotations__r   r   r   r    r'   r'   d/var/www/html/ai/venv/lib/python3.10/site-packages/transformers/models/videomae/modeling_videomae.pyr   8   s
   
 r   c                   @   s^   e Zd ZU dZdZeej ed< dZ	ejed< dZ
eeej  ed< dZeeej  ed< dS )VideoMAEForPreTrainingOutputa  
    Class for VideoMAEForPreTraining's outputs, with potential hidden states and attentions.

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`):
            Pixel reconstruction loss.
        logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
            Pixel reconstruction logits.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
            plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
            the self-attention heads.
    Nlossr   r   r   )r    r!   r"   r#   r*   r   r$   r%   r&   r   r   r   r   r'   r'   r'   r(   r)   O   s   
 r)   c                    s    fddt fddt| D }t |dddddf |dddddf< t |dddddf |dddddf< t|dS )	z Sinusoid position encoding tablec                    s    fddt D S )Nc              	      s(   g | ]}t d d|d     qS )i'     )nppower).0hid_j)d_hidpositionr'   r(   
<listcomp>p   s   ( zOget_sinusoid_encoding_table.<locals>.get_position_angle_vec.<locals>.<listcomp>)ranger1   )r0   r4   r(   get_position_angle_veco   s   z;get_sinusoid_encoding_table.<locals>.get_position_angle_vecc                    s   g | ]} |qS r'   r'   )r.   pos_i)r5   r'   r(   r2   r       z/get_sinusoid_encoding_table.<locals>.<listcomp>Nr   r+   r   )r,   arrayr3   sincosr$   r%   	unsqueeze)
n_positionr0   sinusoid_tabler'   )r0   r5   r(   get_sinusoid_encoding_tablek   s
   ..r>   c                       (   e Zd ZdZ fddZdd Z  ZS )VideoMAEEmbeddingsz7
    Construct the patch and position embeddings.

    c                    s8   t    t|| _| jj| _t| j|j| _|| _d S N)	super__init__VideoMAEPatchEmbeddingspatch_embeddingsnum_patchesr>   hidden_sizeposition_embeddingsconfigselfrI   	__class__r'   r(   rC      s
   



zVideoMAEEmbeddings.__init__c                 C   sZ   |  |}|| j||j   }|d ur+|j\}}}||  }||d|}|S )N)	rE   rH   type_astodeviceclonedetachshapereshape)rK   pixel_valuesbool_masked_pos
embeddings
batch_size_num_channelsr'   r'   r(   forward   s   
 
zVideoMAEEmbeddings.forwardr    r!   r"   r#   rC   r\   __classcell__r'   r'   rL   r(   r@   y   s    	r@   c                       r?   )rD   aw  
    Video to Patch Embedding. This module turns a batch of videos of shape (batch_size, num_frames, num_channels,
    height, width) into a tensor of shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.

    The seq_len (the number of patches) equals (number of frames // tubelet_size) * (height // patch_size) * (width //
    patch_size).

    c           	         s   t    |j}|j}|j}|j}|j}|j}t|t	j
jr |n||f}t|t	j
jr-|n||f}|| _|| _t|| _|d |d  |d |d   || j  }|| _|| _tj||| j|d |d f| j|d |d fd| _d S )Nr   r   )in_channelsout_channelskernel_sizestride)rB   rC   
image_size
patch_sizer[   rG   
num_framestubelet_size
isinstancecollectionsabcIterableintrF   r   Conv3d
projection)	rK   rI   rc   rd   r[   rG   re   rf   rF   rL   r'   r(   rC      s,   

(z VideoMAEPatchEmbeddings.__init__c              
   C   s   |j \}}}}}|| jkrtd|| jd ks|| jd kr6td| d| d| jd  d| jd  d	|dddd	d
}| |ddd}|S )NzeMake sure that the channel dimension of the pixel values match with the one set in the configuration.r   r   zInput image size (*z) doesn't match model (z).r+   r      )rT   r[   
ValueErrorrc   permuterm   flatten	transpose)rK   rV   rY   re   r[   heightwidthrX   r'   r'   r(   r\      s   
(zVideoMAEPatchEmbeddings.forwardr]   r'   r'   rL   r(   rD      s    	rD   c                
       sv   e Zd Zdeddf fddZdejdejfddZ		dd
eej de	de
eejejf eej f fddZ  ZS )VideoMAESelfAttentionrI   returnNc                    s   t    |j|j dkr t|ds td|jf d|j d|j| _t|j|j | _| j| j | _t	j
|j| jdd| _t	j
|j| jdd| _t	j
|j| jdd| _|jrmt	t| j| _t	t| j| _nd | _d | _t	|j| _d S )Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .Fbias)rB   rC   rG   num_attention_headshasattrrp   rk   attention_head_sizeall_head_sizer   Linearquerykeyvalueqkv_bias	Parameterr$   zerosq_biasv_biasDropoutattention_probs_dropout_probdropoutrJ   rL   r'   r(   rC      s&   
zVideoMAESelfAttention.__init__xc                 C   s6   |  d d | j| jf }||}|ddddS )NrN   r   r+   r   r   )sizer|   r~   viewrq   )rK   r   new_x_shaper'   r'   r(   transpose_for_scores   s   
z*VideoMAESelfAttention.transpose_for_scoresF	head_maskoutput_attentionsc                 C   s,  | j d urtj| jddnd }tjj|| jj|d}tjj|| j	j| jd}tjj|| j
j| j d}| |}| |}	| |}
t|
|dd}|t| j }tjj|dd}| |}|d urh|| }t||	}|ddd	d
 }| d d | jf }||}|r||f}|S |f}|S )NF)requires_grad)inputweightr{   rN   dimr   r+   r   r   )r   r$   
zeros_liker   r   
functionallinearr   r   r   r   r   matmulrs   mathsqrtr~   softmaxr   rq   
contiguousr   r   r   )rK   r   r   r   k_biaskeysvaluesqueries	key_layervalue_layerquery_layerattention_scoresattention_probscontext_layernew_context_layer_shapeoutputsr'   r'   r(   r\      s*   




zVideoMAESelfAttention.forwardNF)r    r!   r"   r   rC   r$   Tensorr   r   boolr   r   r\   r^   r'   r'   rL   r(   rv      s    rv   c                       sF   e Zd ZdZdeddf fddZdejdejdejfd	d
Z  Z	S )VideoMAESelfOutputz
    The residual connection is defined in VideoMAELayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    rI   rw   Nc                    s.   t    t|j|j| _t|j| _d S rA   )	rB   rC   r   r   rG   denser   hidden_dropout_probr   rJ   rL   r'   r(   rC        
zVideoMAESelfOutput.__init__r   input_tensorc                 C      |  |}| |}|S rA   r   r   rK   r   r   r'   r'   r(   r\         

zVideoMAESelfOutput.forward)
r    r!   r"   r#   r   rC   r$   r   r\   r^   r'   r'   rL   r(   r     s    $r   c                       s~   e Zd Zdeddf fddZdee ddfddZ			dd
ej	de
ej	 dedeeej	ej	f eej	 f fddZ  ZS )VideoMAEAttentionrI   rw   Nc                    s*   t    t|| _t|| _t | _d S rA   )rB   rC   rv   	attentionr   outputsetpruned_headsrJ   rL   r'   r(   rC   )  s   


zVideoMAEAttention.__init__headsc                 C   s   t |dkrd S t|| jj| jj| j\}}t| jj|| j_t| jj|| j_t| jj	|| j_	t| j
j|dd| j
_| jjt | | j_| jj| jj | j_| j|| _d S )Nr   r   r   )lenr   r   r|   r~   r   r   r   r   r   r   r   r   union)rK   r   indexr'   r'   r(   prune_heads/  s   zVideoMAEAttention.prune_headsFr   r   r   c                 C   s4   |  |||}| |d |}|f|dd   }|S )Nr   r   )r   r   )rK   r   r   r   self_outputsattention_outputr   r'   r'   r(   r\   A  s   zVideoMAEAttention.forwardr   )r    r!   r"   r   rC   r   rk   r   r$   r   r   r   r   r   r\   r^   r'   r'   rL   r(   r   (  s    r   c                       s<   e Zd Zdeddf fddZdejdejfddZ  ZS )	VideoMAEIntermediaterI   rw   Nc                    sD   t    t|j|j| _t|jt	rt
|j | _d S |j| _d S rA   )rB   rC   r   r   rG   intermediate_sizer   rg   
hidden_actstrr   intermediate_act_fnrJ   rL   r'   r(   rC   Q  s
   
zVideoMAEIntermediate.__init__r   c                 C   r   rA   )r   r   )rK   r   r'   r'   r(   r\   Y  r   zVideoMAEIntermediate.forward	r    r!   r"   r   rC   r$   r   r\   r^   r'   r'   rL   r(   r   P  s    r   c                       sB   e Zd Zdeddf fddZdejdejdejfdd	Z  ZS )
VideoMAEOutputrI   rw   Nc                    s.   t    t|j|j| _t|j| _	d S rA   )
rB   rC   r   r   r   rG   r   r   r   r   rJ   rL   r'   r(   rC   b  r   zVideoMAEOutput.__init__r   r   c                 C   s    |  |}| |}|| }|S rA   r   r   r'   r'   r(   r\   g  s   

zVideoMAEOutput.forwardr   r'   r'   rL   r(   r   a  s    $r   c                       sl   e Zd ZdZdeddf fddZ		ddejd	eej d
e	de
eejejf eej f fddZ  ZS )VideoMAELayerz?This corresponds to the Block class in the timm implementation.rI   rw   Nc                    sb   t    |j| _d| _t|| _t|| _t|| _	t
j|j|jd| _t
j|j|jd| _d S )Nr   eps)rB   rC   chunk_size_feed_forwardseq_len_dimr   r   r   intermediater   r   r   	LayerNormrG   layer_norm_epslayernorm_beforelayernorm_afterrJ   rL   r'   r(   rC   t  s   



zVideoMAELayer.__init__Fr   r   r   c                 C   s`   | j | |||d}|d }|dd  }|| }| |}| |}| ||}|f| }|S )N)r   r   r   )r   r   r   r   r   )rK   r   r   r   self_attention_outputsr   r   layer_outputr'   r'   r(   r\   ~  s   


zVideoMAELayer.forwardr   )r    r!   r"   r#   r   rC   r$   r   r   r   r   r   r\   r^   r'   r'   rL   r(   r   q  s    r   c                       sb   e Zd Zdeddf fddZ				ddejd	eej d
ededede	e
ef fddZ  ZS )VideoMAEEncoderrI   rw   Nc                    s:   t     | _t fddt jD | _d| _d S )Nc                       g | ]}t  qS r'   r   r.   rZ   rI   r'   r(   r2     r7   z,VideoMAEEncoder.__init__.<locals>.<listcomp>F)	rB   rC   rI   r   
ModuleListr3   num_hidden_layerslayergradient_checkpointingrJ   rL   r   r(   rC     s   
 
zVideoMAEEncoder.__init__FTr   r   r   output_hidden_statesreturn_dictc                 C   s   |rdnd }|r
dnd }t | jD ]8\}}	|r||f }|d ur$|| nd }
| jr6| jr6| |	j||
|}n|	||
|}|d }|rI||d f }q|rQ||f }|s_tdd |||fD S t|||dS )Nr'   r   r   c                 s       | ]	}|d ur|V  qd S rA   r'   r.   vr'   r'   r(   	<genexpr>      z*VideoMAEEncoder.forward.<locals>.<genexpr>last_hidden_stater   r   )	enumerater   r   training_gradient_checkpointing_func__call__tupler   )rK   r   r   r   r   r   all_hidden_statesall_self_attentionsilayer_modulelayer_head_masklayer_outputsr'   r'   r(   r\     s6   

zVideoMAEEncoder.forward)NFFT)r    r!   r"   r   rC   r$   r   r   r   r   r   r   r\   r^   r'   r'   rL   r(   r     s&    	
r   c                   @   s(   e Zd ZdZeZdZdZdZdd Z	dS )VideoMAEPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    videomaerV   Tc                 C   st   t |tjtjfr#|jjjd| jjd |j	dur!|j	j
  dS dS t |tjr8|j	j
  |jjd dS dS )zInitialize the weightsg        )meanstdNg      ?)rg   r   r   rl   r   datanormal_rI   initializer_ranger{   zero_r   fill_)rK   moduler'   r'   r(   _init_weights  s   
z%VideoMAEPreTrainedModel._init_weightsN)
r    r!   r"   r#   r   config_classbase_model_prefixmain_input_namesupports_gradient_checkpointingr   r'   r'   r'   r(   r     s    r   aJ  
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
    as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`VideoMAEConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a\  
    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
            [`VideoMAEImageProcessor.__call__`] for details.

        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
zbThe bare VideoMAE Model transformer outputting raw hidden-states without any specific head on top.c                       s   e Zd Z fddZdd Zdd Zeeee	e
d					dd	ejd
eej deej dee dee dee deee	f fddZ  ZS )VideoMAEModelc                    sT   t  | || _t|| _t|| _|jrd | _n
t	j
|j|jd| _|   d S )Nr   )rB   rC   rI   r@   rX   r   encoderuse_mean_pooling	layernormr   r   rG   r   	post_initrJ   rL   r'   r(   rC     s   

zVideoMAEModel.__init__c                 C   s   | j jS rA   )rX   rE   )rK   r'   r'   r(   get_input_embeddings  s   z"VideoMAEModel.get_input_embeddingsc                 C   s*   |  D ]\}}| jj| j| qdS )z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr  r   r   r   )rK   heads_to_pruner   r   r'   r'   r(   _prune_heads   s   zVideoMAEModel._prune_headsoutput_typer   NrV   rW   r   r   r   r   rw   c           
      C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}| || j j}| ||}| j|||||d}|d }	| jdurD| |	}	|sO|	f|dd  S t	|	|j
|jdS )a  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the
            batch must have the same number of masked patches. If `None`, then all patches are considered. Sequence
            length is `(num_frames // tubelet_size) * (image_size // patch_size) ** 2`.

        Returns:

        Examples:

        ```python
        >>> import av
        >>> import numpy as np

        >>> from transformers import AutoImageProcessor, VideoMAEModel
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`List[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 16 frames
        >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container, indices)

        >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
        >>> model = VideoMAEModel.from_pretrained("MCG-NJU/videomae-base")

        >>> # prepare video for the model
        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> # forward pass
        >>> outputs = model(**inputs)
        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 1568, 768]
        ```Nr   r   r   r   r   r   r   )rI   r   r   use_return_dictget_head_maskr   rX   r  r  r   r   r   )
rK   rV   rW   r   r   r   r   embedding_outputencoder_outputssequence_outputr'   r'   r(   r\   (  s.   \

zVideoMAEModel.forward)NNNNN)r    r!   r"   rC   r	  r  r   VIDEOMAE_INPUTS_DOCSTRINGr   r   _CONFIG_FOR_DOCr$   r%   r   
BoolTensorr   r   r   r   r\   r^   r'   r'   rL   r(   r  	  s4    

r  c                       s,   e Zd Z fddZ			dddZ  ZS )VideoMAEDecoderc                    s   t    |j|j |jd  }t| |j _|j _	|j
 _|j _t fddt|jD | _t|j| _|dkrFt|j|nt | _d| _|| _d S )Nr+   c                    r   r'   r   r   decoder_configr'   r(   r2     r7   z,VideoMAEDecoder.__init__.<locals>.<listcomp>r   F)rB   rC   r[   rf   rd   r   decoder_hidden_sizerG   decoder_num_hidden_layersr   decoder_num_attention_headsr|   decoder_intermediate_sizer   r   r   r3   decoder_layersr   normr   Identityheadr   rI   )rK   rI   rF   decoder_num_labelsrL   r  r(   rC     s   

zVideoMAEDecoder.__init__FTc                 C   s   |rdnd }|r
dnd }t | jD ]/\}}	|r||f }| jr,| jr,| |	j|d |}
n|	|d |d}
|
d }|r@||
d f }q|rH||f }|dkrW|d d | d f }| |}| |}|sotdd |||fD S t	|||dS )Nr'   )r   r   r   r   c                 s   r   rA   r'   r   r'   r'   r(   r     r   z*VideoMAEDecoder.forward.<locals>.<genexpr>)r   r   r   )
r   r  r   r   r   r   r   r"  r   r   )rK   r   return_token_numr   r   r   r   r   r   r   r   r   r'   r'   r(   r\     s4   	



zVideoMAEDecoder.forward)FFT)r    r!   r"   rC   r\   r^   r'   r'   rL   r(   r    s    r  zXThe VideoMAE Model transformer with the decoder on top for self-supervised pre-training.c                       s~   e Zd Z fddZeeeeed				dde	j
de	jdee	j dee d	ee d
ee deeef fddZ  ZS )VideoMAEForPreTrainingc                    s~   t  | || _t|| _tj|j|jdd| _	t
tdd|j| _t| jjj|j| _t|| jjjd| _|   d S )NFrz   r   )rF   )rB   rC   rI   r  r   r   r   rG   r  encoder_to_decoderr   r$   r   
mask_tokenr>   rX   rF   rH   r  decoderr  rJ   rL   r'   r(   rC     s   
zVideoMAEForPreTraining.__init__r  NrV   rW   r   r   r   r   rw   c           #      C   sj  |dur|n| j j}| j||||||d}|d }| |}|j\}	}
}|du r,td| j|	dd|}|	|j
  }||  |	d|}|| |	d|}tj|| | j| gdd}| ||jd }|j}d}t  | j jdkr|}n2|j
}|j}ttj	||d	ddddddf }ttj	||d	ddddddf }|| | }|j\}	}}}}| j j| j j}}| j jr*||	|| |||| ||| |}|ddd
ddddd }||	|| | | | | || | |}||jddd |j dddd! d  }||	|| | | | | || | | }nB| j jdkr5td||	|| |||| ||| |}|ddd
ddddd }||	|| | | | | || | | }|j\}	}}|| |	d|} W d   n	1 sw   Y  t" }!|!|| }|s|f|dd  }"|dur|f|" S |"S t#|||j$|j%dS )a  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the
            batch must have the same number of masked patches. Sequence length is `(num_frames // tubelet_size) *
            (image_size // patch_size) ** 2`.

        Returns:

        Examples:
        ```python
        >>> from transformers import AutoImageProcessor, VideoMAEForPreTraining
        >>> import numpy as np
        >>> import torch

        >>> num_frames = 16
        >>> video = list(np.random.randint(0, 256, (num_frames, 3, 224, 224)))

        >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
        >>> model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base")

        >>> pixel_values = image_processor(video, return_tensors="pt").pixel_values

        >>> num_patches_per_frame = (model.config.image_size // model.config.patch_size) ** 2
        >>> seq_length = (num_frames // model.config.tubelet_size) * num_patches_per_frame
        >>> bool_masked_pos = torch.randint(0, 2, (1, seq_length)).bool()

        >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
        >>> loss = outputs.loss
        ```N)rW   r   r   r   r   r   z!One must provided a boolean mask rN   r   r   r   )rQ   dtypero      r+         r   T)r   keepdim)r   unbiasedr-  gư>zQCan't unnormalize non-RGB images. Consider setting config.norm_pix_loss to False.r*   r   r   r   )&rI   r  r   r&  rT   rp   rH   expandrO   rP   rQ   rR   rS   rU   r$   catr'  r(  r   no_gradr[   r)  	as_tensorr   r   rf   rd   norm_pix_lossr   rq   r   r   varr   r   r)   r   r   )#rK   rV   rW   r   r   r   r   r   r  rY   seq_lenr[   expanded_position_embeddingspos_emb_visiblepos_emb_maskx_fulldecoder_outputsr   r*   framesrQ   r)  r   r   timert   ru   rf   rd   frames_normvideos_patchrZ   labelsloss_fctr   r'   r'   r(   r\     s   (	&&

J
zVideoMAEForPreTraining.forward)NNNN)r    r!   r"   rC   r   r  r   r)   r  r$   r%   r  r   r   r   r   r   r\   r^   r'   r'   rL   r(   r%    s.    

r%  zVideoMAE Model transformer with a video classification head on top (a linear layer on top of the average pooled hidden
    states of all tokens) e.g. for ImageNet.c                       s   e Zd Z fddZeeeeed						dde	e
j de	e
j de	e
j de	e d	e	e d
e	e deeef fddZ  ZS )VideoMAEForVideoClassificationc                    sf   t  | |j| _t|| _|jrt|jnd | _	|jdkr(t
|j|jnt | _|   d S )Nr   )rB   rC   
num_labelsr  r   r  r   r   rG   fc_normr   r!  
classifierr  rJ   rL   r'   r(   rC     s   
$z'VideoMAEForVideoClassification.__init__r  NrV   r   r@  r   r   r   rw   c                 C   s  |dur|n| j j}| j|||||d}|d }| jdur&| |d}n|dddf }| |}	d}
|dur| j jdu rc| jdkrId| j _n| jdkr_|jt	j
ksZ|jt	jkr_d| j _nd| j _| j jdkrt }| jdkr{||	 | }
n+||	|}
n%| j jdkrt }||	d| j|d}
n| j jdkrt }||	|}
|s|	f|dd  }|
dur|
f| S |S t|
|	|j|jd	S )
a3  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Returns:

        Examples:

        ```python
        >>> import av
        >>> import torch
        >>> import numpy as np

        >>> from transformers import AutoImageProcessor, VideoMAEForVideoClassification
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`List[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 16 frames
        >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container, indices)

        >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
        >>> model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")

        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)
        ...     logits = outputs.logits

        >>> # model predicts one of the 400 Kinetics-400 classes
        >>> predicted_label = logits.argmax(-1).item()
        >>> print(model.config.id2label[predicted_label])
        eating spaghetti
        ```Nr  r   r   
regressionsingle_label_classificationmulti_label_classificationrN   r/  )rI   r  r   rD  r   rE  problem_typerC  r)  r$   longrk   r   squeezer
   r   r	   r   r   r   )rK   rV   r   r@  r   r   r   r   r  r   r*   rA  r   r'   r'   r(   r\     sR   _



"


z&VideoMAEForVideoClassification.forward)NNNNNN)r    r!   r"   rC   r   r  r   r   r  r   r$   r   r   r   r   r\   r^   r'   r'   rL   r(   rB    s2    

rB  )Er#   collections.abcrh   r   copyr   dataclassesr   typingr   r   r   r   numpyr,   r$   torch.utils.checkpointr   torch.nnr	   r
   r   activationsr   modeling_outputsr   r   modeling_utilsr   pytorch_utilsr   r   utilsr   r   r   r   r   utils.constantsr   r   configuration_videomaer   
get_loggerr    loggerr  _CHECKPOINT_FOR_DOC&VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LISTr   r)   r>   Moduler@   rD   rv   r   r   r   r   r   r   r   VIDEOMAE_START_DOCSTRINGr  r  r  r%  rB  r'   r'   r'   r(   <module>   st   
 5G(+3 D 5