o
    h9                     @   s  d Z ddlZddlmZmZmZmZ ddlZddlZddlm	Z	 ddl
mZmZmZ ddlmZ ddlmZmZ dd	lmZ dd
lmZmZmZ ddlmZ ddlmZmZmZmZ ddl m!Z! e"e#Z$dZ%G dd de	j&Z'G dd de'Z(G dd de'Z)dd Z*d,ddZ+G dd de	j&Z,G dd de	j&Z-G dd de	j&Z.d Z/ed!e/G d"d# d#eZ0d$Z1ed!e/G d%d& d&e0Z2G d'd( d(e0Z3ed)e/G d*d+ d+e0Z4dS )-z PyTorch Persimmon model.    N)ListOptionalTupleUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)CacheDynamicCache)!_prepare_4d_causal_attention_mask)BaseModelOutputWithPastCausalLMOutputWithPast SequenceClassifierOutputWithPast)PreTrainedModel)add_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings   )PersimmonConfigr   c                       s0   e Zd Zd
 fdd	Zdd Zddd	Z  ZS )PersimmonRotaryEmbedding   '  Nc                    sr   t    || _|| _|| _d| jtd| jd || j   }| j	d|dd | j
|| jjt d d S )N      ?r      inv_freqF
persistentseq_lendevicedtype)super__init__dimmax_position_embeddingsbasetorcharangefloattoregister_buffer_set_cos_sin_cacher   r#   get_default_dtype)selfr'   r(   r)   r#   r   	__class__ f/var/www/html/ai/venv/lib/python3.10/site-packages/transformers/models/persimmon/modeling_persimmon.pyr&   -   s   
*
z!PersimmonRotaryEmbedding.__init__c                 C   st   || _ tj| j || jjd}t|| j}tj||fdd}| jd| 	|dd | jd|
 	|dd d S Nr#   r$   r'   
cos_cachedFr   
sin_cached)max_seq_len_cachedr*   r+   r   r$   outercatr.   cosr-   sinr1   r"   r#   r$   tfreqsembr4   r4   r5   r/   ;   s   z+PersimmonRotaryEmbedding._set_cos_sin_cachec                 C   sN   || j kr| j||j|jd | jd | j|jd| jd | j|jdfS )Nr!   )r$   )r<   r/   r#   r$   r:   r-   r;   )r1   xr"   r4   r4   r5   forwardE   s
   
z PersimmonRotaryEmbedding.forward)r   r   NN)__name__
__module____qualname__r&   r/   rF   __classcell__r4   r4   r2   r5   r   ,   s    
r   c                       *   e Zd ZdZd
 fdd	Zdd	 Z  ZS )%PersimmonLinearScalingRotaryEmbeddingz_PersimmonRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendevr   r   Nr   c                       || _ t |||| d S rG   scaling_factorr%   r&   r1   r'   r(   r)   r#   rP   r2   r4   r5   r&   T      z.PersimmonLinearScalingRotaryEmbedding.__init__c                 C   s~   || _ tj| j || jjd}|| j }t|| j}tj||fdd}| jd|	 
|dd | jd| 
|dd d S r6   )r<   r*   r+   r   r$   rP   r=   r>   r.   r?   r-   r@   rA   r4   r4   r5   r/   X   s   
z8PersimmonLinearScalingRotaryEmbedding._set_cos_sin_cacher   r   Nr   rH   rI   rJ   __doc__r&   r/   rK   r4   r4   r2   r5   rM   Q       rM   c                       rL   ))PersimmonDynamicNTKScalingRotaryEmbeddingzqPersimmonRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozillar   r   Nr   c                    rN   rG   rO   rQ   r2   r4   r5   r&   h   rR   z2PersimmonDynamicNTKScalingRotaryEmbedding.__init__c           	      C   s   || _ || jkr<| j| j| | j | jd  | j| jd    }d|td| jd || j   }| j	d|dd tj| j || j
jd}t|| j
}tj||fd	d
}| j	d| |dd | j	d| |dd d S )Nr   r   r   r   r   Fr   r7   r8   r9   r:   r;   )r<   r(   r)   rP   r'   r*   r+   r,   r-   r.   r   r$   r=   r>   r?   r@   )	r1   r"   r#   r$   r)   r   rB   rC   rD   r4   r4   r5   r/   l   s   
(z<PersimmonDynamicNTKScalingRotaryEmbedding._set_cos_sin_cacherS   rT   r4   r4   r2   r5   rW   e   rV   rW   c                 C   sH   | dd| j d d f }| d| j d d df }tj| |fddS )z*Rotates half the hidden dims of the input..Nr8   r   r9   )shaper*   r>   )rE   x1x2r4   r4   r5   rotate_half   s   r[   c                 C   sL   ||  |}||  |}| | t| |  }|| t||  }||fS )an  Applies Rotary Position Embedding to the query and key tensors.

    Args:
        q (`torch.Tensor`): The query tensor.
        k (`torch.Tensor`): The key tensor.
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        position_ids (`torch.Tensor`):
            The position indices of the tokens corresponding to the query and key tensors. For example, this can be
            used to pass offsetted position ids when working with a KV-cache.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    )	unsqueezer[   )qkr?   r@   position_idsunsqueeze_dimq_embedk_embedr4   r4   r5   apply_rotary_pos_emb   s
   rc   c                       s$   e Zd Z fddZdd Z  ZS )PersimmonMLPc                    s>   t    t|j|j| _t|j|j| _t|j	 | _
d S rG   )r%   r&   r   Linearhidden_sizeintermediate_sizedense_h_to_4hdense_4h_to_hr   
hidden_actactr1   configr2   r4   r5   r&      s   
zPersimmonMLP.__init__c                 C   s"   |  |}| |}| |}|S rG   )rh   rk   ri   )r1   hidden_statesr4   r4   r5   rF      s   


zPersimmonMLP.forward)rH   rI   rJ   r&   rF   rK   r4   r4   r2   r5   rd      s    rd   c                       s   e Zd ZdZddedee f fddZdd Zd	e	j
d
ee	j
e	j
e	j
f fddZ					dde	j
dee	j
 dee	j dee deded
ee	j
ee	j
 eee	j
  f fddZ  ZS )PersimmonAttentionz=Multi-headed attention from 'Attention Is All You Need' paperNrm   	layer_idxc                    s2  t    || _|| _|d u rtd| jj d |j| _|j	| _
| j| j
 | _|j| _|j| _|j| _d| _| j| j
 | jkrNtd| j d| j
 dtj| jd| j dd| _tj| j
| j | jdd| _|j| _| jrtj|j| j
 |jdd	| _tj|j| j
 |jdd	| _t|j| _|   d S )
NzInstantiating z without passing `layer_idx` is not recommended and will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.Tz?hidden_size must be divisible by num_heads (got `hidden_size`: z and `num_heads`: z).r
   bias)epselementwise_affine)r%   r&   rm   rp   loggerwarning_oncer3   rH   rf   num_attention_heads	num_headshead_dimr(   
rope_thetapartial_rotary_factor	is_causal
ValueErrorr   re   query_key_valuedenseqk_layernorm	LayerNormlayer_norm_epsq_layernormk_layernormDropoutattention_dropout
_init_roper1   rm   rp   r2   r4   r5   r&      s@   

zPersimmonAttention.__init__c                 C   s   | j jd u rtt| j| j | j| jd| _d S | j jd }| j jd }|dkr;t	t| j| j | j|| jd| _d S |dkrRt
t| j| j | j|| jd| _d S td| )N)r(   r)   typefactorlinear)r(   rP   r)   dynamiczUnknown RoPE scaling type )rm   rope_scalingr   intr{   ry   r(   rz   
rotary_embrM   rW   r}   )r1   scaling_typerP   r4   r4   r5   r      s.   zPersimmonAttention._init_rope	fused_qkvreturnc                 C   sV   |j \}}}|||| jd| j}|ddddf |ddddf |ddddf fS )a  
        Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory
        storage as `fused_qkv`

        Args:
            fused_qkv (`torch.tensor`, *required*): [batch_size, seq_length, num_heads * 3 * head_dim]

        Returns:
            query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]
            value: [batch_size, seq_length, num_heads, head_dim]
        r
   .r   Nr   r   )rX   viewrx   ry   )r1   r   
batch_size
seq_lengththree_times_hidden_sizer4   r4   r5   _split_heads   s   4zPersimmonAttention._split_headsFrn   attention_maskr_   past_key_valueoutput_attentions	use_cachec                 C   s  |  \}}}	| |}
| |
\}}}| jr!| |}| |}|dd}|dd}|dd}|jd }|d urT| jd u rKt	d| j
j d|||| j7 }| j||d\}}|dd | jjf |d| jjd f }}|dd | jjf |d| jjd f }}t|||||\}}tj||fdd	}tj||fdd	}|d ur||| jjd
}|||| j|\}}t||ddt| j }|  || j||fkrt	d|| j||f d|   |d ur
|  |d||fkrt	d|d||f d|   || }tjj|tjdd|j}| |}t||}|  || j|| jfkrBt	d|| j|| jf d|   |dd }| ||| j!}| "|}|s\d }|||fS )Nr   r   zFThe cache structure has changed since version v4.36. If you are using zv for auto-regressive decoding with k/v caching, please make sure to initialize the attention class with a layer index.)r"   .r8   r9   )r@   r?   partial_rotation_sizer
   z$Attention weights should be of size z	, but is z!Attention mask should be of size )r$   r'   z `attn_output` should be of size )#sizer~   r   r   r   r   	transposerX   rp   r}   r3   rH   get_usable_lengthr   r'   rc   r*   r>   updatematmulmathsqrtry   rx   r   
functionalsoftmaxfloat32r-   r$   r   
contiguousreshaperf   r   )r1   rn   r   r_   r   r   r   bszq_len_r   query_states
key_statesvalue_states
kv_seq_lenr?   r@   	query_rot
query_passkey_rotkey_passcache_kwargsattn_weightsattn_outputr4   r4   r5   rF     sr   	




 



zPersimmonAttention.forwardrG   NNNFF)rH   rI   rJ   rU   r   r   r   r&   r   r*   Tensorr   r   
LongTensorr   boolrF   rK   r4   r4   r2   r5   ro      s2    &$ro   c                       s   e Zd Zdedef fddZ					ddejdeej d	eej	 d
ee
ej  dee dee de
ejee
ejejf  f fddZ  ZS )PersimmonDecoderLayerrm   rp   c                    sd   t    |j| _t||d| _t|| _tj|j|j	d| _
tj|j|j	d| _t|j| _d S )N)rm   rp   rs   )r%   r&   rf   ro   	self_attnrd   mlpr   r   r   input_layernormpost_attention_layernormr   hidden_dropoutdropoutr   r2   r4   r5   r&   i  s   

zPersimmonDecoderLayer.__init__NFrn   r   r_   r   r   r   r   c                 C   s   |}|  |}| j||||||d\}}}	|| }|}| |}| |}| |}|| }|f}
|r8|
|f7 }
|r?|
|	f7 }
|
S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
                Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
                `[0, config.n_positions - 1]`.

                [What are position IDs?](../glossary#position-ids)
            past_key_value (`Tuple(torch.FloatTensor)`, *optional*):
                cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
        )rn   r   r_   r   r   r   )r   r   r   r   r   )r1   rn   r   r_   r   r   r   residualself_attn_weightspresent_key_valueoutputsr4   r4   r5   rF   r  s,   





zPersimmonDecoderLayer.forwardr   )rH   rI   rJ   r   r   r&   r*   r   r   r   r   r   FloatTensorrF   rK   r4   r4   r2   r5   r   h  s,    r   aN  
    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`PersimmonConfig`]):
            Model configuration class with all the parameters of the model. Initializing with a config file does not
            load the weights associated with the model, only the configuration. Check out the
            [`~PreTrainedModel.from_pretrained`] method to load the model weights.
zWThe bare Persimmon Model outputting raw hidden-states without any specific head on top.c                   @   s.   e Zd ZeZdZdZdgZdZdZ	dd Z
dS )PersimmonPreTrainedModelmodelTr   past_key_valuesc                 C   s   | j j}t|tjr"|jjjd|d |jd ur |jj	  d S d S t|tj
rA|jjjd|d |jd urC|jj|j 	  d S d S d S )Ng        )meanstd)rm   initializer_range
isinstancer   re   weightdatanormal_rr   zero_	Embeddingpadding_idx)r1   moduler   r4   r4   r5   _init_weights  s   

z&PersimmonPreTrainedModel._init_weightsN)rH   rI   rJ   r   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_supports_cache_classr   r4   r4   r4   r5   r     s    r   aL  
    Args:
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
            it.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
            `past_key_values`).

            If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
            and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
            information on the default strategy.

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.
        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.n_positions - 1]`.

            [What are position IDs?](../glossary#position-ids)
        past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
            Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
            blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
            returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.

            Two formats are allowed:
            - a [`~cache_utils.Cache`] instance;
            - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
            shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
            cache format.

            The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
            legacy cache format will be returned.

            If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
            have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
            of shape `(batch_size, sequence_length)`.
        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
            model's internal embedding lookup matrix.
        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
c                       s   e Zd ZdZdef fddZdd Zdd Zee																			dd
e
jdee
j dee
j deee
j  dee
j dee dee dee dee deeef fddZ  ZS )PersimmonModelz
    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`PersimmonDecoderLayer`]

    Args:
        config: PersimmonConfig
    rm   c                    sx   t     j| _ j| _t j j| j| _t	 fddt
 jD | _tj j jd| _d| _|   d S )Nc                    s   g | ]}t  |qS r4   )r   ).0rp   rm   r4   r5   
<listcomp>3  s    z+PersimmonModel.__init__.<locals>.<listcomp>r   F)r%   r&   pad_token_idr   
vocab_sizer   r   rf   embed_tokens
ModuleListrangenum_hidden_layerslayersr   r   final_layernormgradient_checkpointing	post_initrl   r2   r   r5   r&   ,  s   zPersimmonModel.__init__c                 C      | j S rG   r   r1   r4   r4   r5   get_input_embeddings;     z#PersimmonModel.get_input_embeddingsc                 C   
   || _ d S rG   r   r1   valuer4   r4   r5   set_input_embeddings>     
z#PersimmonModel.set_input_embeddingsN	input_idsr   r_   r   inputs_embedsr   r   output_hidden_statesreturn_dictr   c
              	   C   sz  |d ur|n| j j}|d ur|n| j j}|d ur|n| j j}|	d ur$|	n| j j}	|d ur4|d ur4td|d ur>|j\}
}n|d urI|j\}
}}ntd|}d}|rit|t }|r`t	
|}||}|| }|d u r|d urt|jn|j}tj||| tj|d}|d}|d u r| |}|d u rtj|
|ftj|jd}t||
|f||}|}| jr| jr|rtd d}|rdnd }|rdnd }d }| jD ]=}|r||f7 }| jr| jr| |j|||||}n
|||||||d}|d }|r||rd	nd
 }|r	||d
 f7 }q| |}|r||f7 }d }|r%|r#| n|}|	s5tdd ||||fD S t||||dS )NzTYou cannot specify both decoder_input_ids and decoder_inputs_embeds at the same timezEYou have to specify either decoder_input_ids or decoder_inputs_embedsr   )r$   r#   zZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fr4   )r   r_   r   r   r   r   r   c                 s   s    | ]	}|d ur|V  qd S rG   r4   )r   vr4   r4   r5   	<genexpr>  s    z)PersimmonModel.forward.<locals>.<genexpr>)last_hidden_stater   rn   
attentions) rm   r   r   r   use_return_dictr}   rX   r   r   r   from_legacy_cacher   r#   r*   r+   longr\   r   onesr   r   r   trainingru   rv   r   _gradient_checkpointing_func__call__r   to_legacy_cachetupler   )r1   r   r   r_   r   r   r   r   r   r   r   r   r   seq_length_with_pastpast_key_values_lengthuse_legacy_cacher#   rn   all_hidden_statesall_self_attnsnext_decoder_cachedecoder_layerlayer_outputs
next_cacher4   r4   r5   rF   A  s   





		

zPersimmonModel.forward)	NNNNNNNNN)rH   rI   rJ   rU   r   r&   r   r   r   PERSIMMON_INPUTS_DOCSTRINGr*   r   r   r   r   r   r   r   r   r   rF   rK   r4   r4   r2   r5   r      sH    	

r   c                       s  e Zd ZdgZ fddZdd Zdd Zdd	 Zd
d Zdd Z	dd Z
eeeeed										d#dejdeej deej deeej  deej deej dee dee dee dee deeef fddZ	d$dd Zed!d" Z  ZS )%PersimmonForCausalLMzlm_head.weightc                    s@   t  | t|| _|j| _tj|j|jdd| _| 	  d S NFrq   )
r%   r&   r   r   r   r   re   rf   lm_headr   rl   r2   r4   r5   r&     s
   
zPersimmonForCausalLM.__init__c                 C      | j jS rG   r   r   r   r4   r4   r5   r        z)PersimmonForCausalLM.get_input_embeddingsc                 C      || j _d S rG   r  r   r4   r4   r5   r        z)PersimmonForCausalLM.set_input_embeddingsc                 C   r   rG   r  r   r4   r4   r5   get_output_embeddings  r   z*PersimmonForCausalLM.get_output_embeddingsc                 C   r   rG   r  )r1   new_embeddingsr4   r4   r5   set_output_embeddings  r   z*PersimmonForCausalLM.set_output_embeddingsc                 C   r   rG   r   )r1   decoderr4   r4   r5   set_decoder  r   z PersimmonForCausalLM.set_decoderc                 C   r   rG   r  r   r4   r4   r5   get_decoder  r   z PersimmonForCausalLM.get_decoder)output_typer   Nr   r   r_   r   r   labelsr   r   r   r   r   c                 C   s  |dur|n| j j}|	dur|	n| j j}	|
dur|
n| j j}
| j||||||||	|
d	}|d }| |}d}|durm|dddddf  }|dddf  }t }|d| j j	}|d}|
|j}|||}|
s|f|dd  }|dur|f| S |S t|||j|j|jdS )u  
        Args:
            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
                config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
                (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Returns:

        Example:

        ```python
        >>> from transformers import AutoTokenizer, PersimmonForCausalLM

        >>> model = PersimmonForCausalLM.from_pretrained("adept/persimmon-8b-base")
        >>> tokenizer = AutoTokenizer.from_pretrained("adept/persimmon-8b-base")

        >>> prompt = "human: Hey, what should I eat for dinner?"
        >>> inputs = tokenizer(prompt, return_tensors="pt")

        >>> # Generate
        >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
        >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
        'human: Hey, what should I eat for dinner?\n\ncat: 🐱\n\nhuman: 😐\n\n'
        ```N)	r   r   r_   r   r   r   r   r   r   r   .r8   r   losslogitsr   rn   r   )rm   r   r   r   r   r  r   r   r   r   r-   r#   r   r   rn   r   )r1   r   r   r_   r   r   r"  r   r   r   r   r   rn   r%  r$  shift_logitsshift_labelsloss_fctoutputr4   r4   r5   rF     sH   )


zPersimmonForCausalLM.forwardc                 K   sz  |d urnt |tr| }|j}| }n|d d jd  }}d }|d urA|jd |jd krA|d d |jd |  d f }n||jd k rR|d d |d f }|d urn|d urn||jd  |krn|d d | d f }|dd }	|d ur|	d u r| dd }	|		|dkd |r|	d d |jd  d f }	|d ur|d u rd|i}
nd|i}
|

|	||d|d	 |
S )
Nr   r   r   r_   r8   r   r   r   )r_   r   r   r   )r   r   get_seq_lengthseen_tokensget_max_lengthrX   getr   cumsummasked_fill_r   )r1   r   r   r   r   kwargscache_lengthpast_lengthmax_cache_lengthr_   model_inputsr4   r4   r5   prepare_inputs_for_generation;  s@   

"
z2PersimmonForCausalLM.prepare_inputs_for_generationc                    s.   d}| D ]}|t  fdd|D f7 }q|S )Nr4   c                 3   s$    | ]}| d  |jV  qdS )r   N)index_selectr-   r#   )r   
past_statebeam_idxr4   r5   r   x  s   " z6PersimmonForCausalLM._reorder_cache.<locals>.<genexpr>)r  )r   r9  reordered_past
layer_pastr4   r8  r5   _reorder_caches  s   z#PersimmonForCausalLM._reorder_cache
NNNNNNNNNN)NNN)rH   rI   rJ   _tied_weights_keysr&   r   r   r  r  r  r   r   r  r   r   _CONFIG_FOR_DOCr*   r   r   r   r   r   r   r   r   rF   r5  staticmethodr<  rK   r4   r4   r2   r5   r    s`    

	

X
8r  a  
    The Persimmon transformer with a sequence classification head on top (linear layer).

    [`PersimmonForSequenceClassification`] uses the last token in order to do the classification, as other causal
    models (e.g. GPT-2) do.

    Since it does classification on the last token, it requires to know the position of the last token. If a
    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
    each row of the batch).
    c                       s   e Zd Z fddZdd Zdd Zee										ddej	d	e
ej d
e
ej	 de
eej  de
ej de
ej	 de
e de
e de
e de
e deeef fddZ  ZS )"PersimmonForSequenceClassificationc                    s@   t  | |j| _t|| _tj|j| jdd| _| 	  d S r  )
r%   r&   
num_labelsr   r   r   re   rf   scorer   rl   r2   r4   r5   r&     s
   
z+PersimmonForSequenceClassification.__init__c                 C   r  rG   r  r   r4   r4   r5   r     r  z7PersimmonForSequenceClassification.get_input_embeddingsc                 C   r  rG   r  r   r4   r4   r5   r     r  z7PersimmonForSequenceClassification.set_input_embeddingsNr   r   r_   r   r   r"  r   r   r   r   r   c                 C   s  |
dur|
n| j j}
| j||||||||	|
d	}|d }| |}|dur+|jd }n|jd }| j jdu r>|dkr>td| j jdu rGd}n|dur_t|| j j	 
dd |j}nd}|tj||jd|f }d}|dur||j}| j jdu r| jdkrd| j _n| jdkr|jtjks|jtj	krd	| j _nd
| j _| j jdkrt }| jdkr|| | }n+|||}n%| j jd	krt }||d| j|d}n| j jd
krt }|||}|
s|f|dd  }|dur|f| S |S t|||j|j|jdS )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        N)r   r_   r   r   r   r   r   r   r   r   z=Cannot handle batch sizes > 1 if no padding token is defined.r8   )r#   
regressionsingle_label_classificationmulti_label_classificationr#  )rm   r   r   rC  rX   r   r}   r*   eqr   argmaxr-   r#   r+   problem_typerB  r$   r   r	   squeezer   r   r   r   r   rn   r   )r1   r   r   r_   r   r   r"  r   r   r   r   transformer_outputsrn   r%  r   sequence_lengthspooled_logitsr$  r(  r)  r4   r4   r5   rF     sr   



"


z*PersimmonForSequenceClassification.forwardr=  )rH   rI   rJ   r&   r   r   r   r  r*   r   r   r   r   r   r   r   r   r   rF   rK   r4   r4   r2   r5   rA  }  sL    		

rA  )r   )5rU   r   typingr   r   r   r   r*   torch.utils.checkpointr   torch.nnr   r   r	   activationsr   cache_utilsr   r   modeling_attn_mask_utilsr   modeling_outputsr   r   r   modeling_utilsr   utilsr   r   r   r   configuration_persimmonr   
get_loggerrH   ru   r?  Moduler   rM   rW   r[   rc   rd   ro   r   PERSIMMON_START_DOCSTRINGr   r  r   r  rA  r4   r4   r4   r5   <module>   sZ   
%
 6IF  A