o
    hK9                     @   s   d dl Z d dlmZmZmZmZ ddlmZmZm	Z	m
Z
 ddlmZmZ e r+d dlZe	 r2d dlZe
eZG dd dZeed	G d
d deZdS )    N)AnyDictListUnion   )add_end_docstringsis_tf_availableis_torch_availablelogging   )PIPELINE_INIT_ARGSPipelinec                   @   s   e Zd ZdZ	d*deeeeeef  f dej	fddZ
dd Zd	eeef fd
dZd+dedefddZdefddZdd Zdd Zdd Zdd Zdd Zdd Zd d! Zed"d# Zed$d% Zed&d' Zed(d) ZdS ),Conversationa  
    Utility class containing a conversation and its history. This class is meant to be used as an input to the
    [`ConversationalPipeline`]. The conversation contains several utility functions to manage the addition of new user
    inputs and generated model responses.

    Arguments:
        messages (Union[str, List[Dict[str, str]]], *optional*):
            The initial messages to start the conversation, either a string, or a list of dicts containing "role" and
            "content" keys. If a string is passed, it is interpreted as a single message with the "user" role.
        conversation_id (`uuid.UUID`, *optional*):
            Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the
            conversation.

    Usage:

    ```python
    conversation = Conversation("Going to the movies tonight - any suggestions?")
    conversation.add_message({"role": "assistant", "content": "The Big lebowski."})
    conversation.add_message({"role": "user", "content": "Is it good?"})
    ```Nmessagesconversation_idc           	      K   s  |st  }|d u r|dd }|d urd|dg}ng }nt|tr)d|dg}d| _|dd }|dd }|d urD|d u rDtd|d urg }|d u rPg }ttt	|t	|gD ]"}|t	|k rn|
d|| d |t	|k r~|
d|| d q\|| }|| _ || _d S )	Ntextuserrolecontentr   generated_responsespast_user_inputsz>generated_responses cannot be passed without past_user_inputs!	assistant)uuiduuid4pop
isinstancestr_num_processed_user_inputs
ValueErrorrangemaxlenappendr   )	selfr   r   deprecated_kwargsr   r   r   legacy_messagesi r(   [/var/www/html/ai/venv/lib/python3.10/site-packages/transformers/pipelines/conversational.py__init__(   s6   

zConversation.__init__c                 C   s&   t |tsdS | j|jkp| j|jkS )NF)r   r   r   r   )r$   otherr(   r(   r)   __eq__M   s   
zConversation.__eq__messagec                 C   s@   t | ddhkstd|d dvrtd| j| d S )Nr   r   z6Message should contain only 'role' and 'content' keys!)r   r   systemzBOnly 'user', 'assistant' and 'system' roles are supported for now!)setkeysr   r   r#   r$   r-   r(   r(   r)   add_messageR   s
   zConversation.add_messageFr   	overwritec                 C   s   t | dkr:| d d dkr:|r(td| d d  d| d || d d< dS td| d d  d	| d
 dS | jd|d dS )a3  
        Add a user input to the conversation for the next round. This is a legacy method that assumes that inputs must
        alternate user/assistant/user/assistant, and so will not add multiple user messages in succession. We recommend
        just using `add_message` with role "user" instead.
        r   r   r   z8User input added while unprocessed input was existing: "r   z" was overwritten with: "z".z" new input ignored: "z>". Set `overwrite` to True to overwrite unprocessed user inputr   N)r"   loggerwarningr   r#   )r$   r   r3   r(   r(   r)   add_user_inputY   s   zConversation.add_user_inputresponsec                 C   s   | j d|d dS )zr
        This is a legacy method. We recommend just using `add_message` with an appropriate role instead.
        r   r   N)r   r#   )r$   r8   r(   r(   r)   append_responsen   s   zConversation.append_responsec                 C   s   t | j| _dS )a)  
        This is a legacy method, as the Conversation no longer distinguishes between processed and unprocessed user
        input. We set a counter here to keep behaviour mostly backward-compatible, but in general you should just read
        the messages directly when writing new code.
        N)r"   _user_messagesr   r$   r(   r(   r)   mark_processedt   s   zConversation.mark_processedc                 c   s    | j D ]}|V  qd S Nr   r1   r(   r(   r)   __iter__|   s   
zConversation.__iter__c                 C   s
   | j | S r=   r>   )r$   itemr(   r(   r)   __getitem__      
zConversation.__getitem__c                 C   s   || j |< d S r=   r>   )r$   keyvaluer(   r(   r)   __setitem__   s   zConversation.__setitem__c                 C   s
   t | jS r=   )r"   r   r;   r(   r(   r)   __len__   rB   zConversation.__len__c                 C   s:   d| j  d}| jD ]}||d  d|d  d7 }q
|S )a  
        Generates a string representation of the conversation.

        Returns:
            `str`:

        Example:
            Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114 user: Going to the movies tonight - any suggestions?
            bot: The Big Lebowski
        zConversation id: 
r   z: r   )r   r   )r$   outputr-   r(   r(   r)   __repr__   s   
zConversation.__repr__c                 c   s(    | j D ]}|d dk|d fV  qd S )Nr   r   r   r>   r1   r(   r(   r)   
iter_texts   s   
zConversation.iter_textsc                 C      dd | j D S )Nc                 S       g | ]}|d  dkr|d qS )r   r   r   r(   .0r-   r(   r(   r)   
<listcomp>        z/Conversation._user_messages.<locals>.<listcomp>r>   r;   r(   r(   r)   r:         zConversation._user_messagesc                 C   s@   | j sg S | jd d dks| jt| j kr| j S | j d d S )Nr4   r   r   )r:   r   r   r"   r;   r(   r(   r)   r      s
   "zConversation.past_user_inputsc                 C   rK   )Nc                 S   rL   )r   r   r   r(   rM   r(   r(   r)   rO      rP   z4Conversation.generated_responses.<locals>.<listcomp>r>   r;   r(   r(   r)   r      rQ   z Conversation.generated_responsesc                 C   s
   | j d S )Nr4   )r:   r;   r(   r(   r)   new_user_input   s   
zConversation.new_user_input)NN)F)__name__
__module____qualname____doc__r   r   r   r   r   UUIDr*   r,   r2   boolr7   r9   r<   r?   rA   rE   rF   rI   rJ   propertyr:   r   r   rR   r(   r(   r(   r)   r      s6    
%


r   a  
        min_length_for_response (`int`, *optional*, defaults to 32):
            The minimum length (in number of tokens) for a response.
        minimum_tokens (`int`, *optional*, defaults to 10):
            The minimum length of tokens to leave for a response.
    c                       s   e Zd ZdZ fddZ	dddZddeee e	ee	 f f fd	d
Z
dde	deeef fddZdddZdddZ  ZS )ConversationalPipelinea  
    Multi-turn conversational pipeline.

    Example:

    ```python
    >>> from transformers import pipeline, Conversation
    # Any model with a chat template can be used in a ConversationalPipeline.

    >>> chatbot = pipeline(model="facebook/blenderbot-400M-distill")
    >>> # Conversation objects initialized with a string will treat it as a user message
    >>> conversation = Conversation("I'm looking for a movie - what's your favourite one?")
    >>> conversation = chatbot(conversation)
    >>> conversation.messages[-1]["content"]
    ' I don't really have a favorite movie, but I do like action movies. What about you?'

    >>> conversation.add_message({"role": "user", "content": "That's interesting, why do you like action movies?"})
    >>> conversation = chatbot(conversation)
    >>> conversation.messages[-1]["content"]
    ' I think it's just because they're so fast-paced and action-fantastic.'
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)

    This conversational pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"conversational"`.

    This pipeline can be used with any model that has a [chat
    template](https://huggingface.co/docs/transformers/chat_templating) set.
    c                    s2   t  j|i | | jjd u r| jj| j_d S d S r=   )superr*   	tokenizerpad_token_id	eos_token	pad_token)r$   argskwargs	__class__r(   r)   r*      s   zConversationalPipeline.__init__Nc                 K   sh   i }i }i }|d ur||d< |d ur||d< d|v r |d |d< |d ur(||d< |r/| | |||fS )Nmin_length_for_responseminimum_tokens
max_lengthclean_up_tokenization_spaces)update)r$   rd   re   rg   generate_kwargspreprocess_paramsforward_paramspostprocess_paramsr(   r(   r)   _sanitize_parameters   s   

z+ConversationalPipeline._sanitize_parametersr   conversationsc                    s   t |trt |d trt|}nt |tr$t |d tr$dd |D }t j|fd|i|}t |tr?t|dkr?|d S |S )a/  
        Generate responses for the conversation(s) given as inputs.

        Args:
            conversations (a [`Conversation`] or a list of [`Conversation`]):
                Conversation to generate responses for. Inputs can also be passed as a list of dictionaries with `role`
                and `content` keys - in this case, they will be converted to `Conversation` objects automatically.
                Multiple conversations in either format may be passed as a list.
            clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
                Whether or not to clean up the potential extra spaces in the text output.
            generate_kwargs:
                Additional keyword arguments to pass along to the generate method of the model (see the generate method
                corresponding to your framework [here](./model#generative-models)).

        Returns:
            [`Conversation`] or a list of [`Conversation`]: Conversation(s) with updated generated responses for those
            containing a new user input.
        r   c                 S   s   g | ]}t |qS r(   )r   )rN   convr(   r(   r)   rO      s    z3ConversationalPipeline.__call__.<locals>.<listcomp>num_workersr   )r   listdictr   r[   __call__r"   )r$   rn   rp   ra   outputsrb   r(   r)   rs     s   
zConversationalPipeline.__call__    conversationreturnc                 C   sH   | j j|dd}| jdkrt|g}n| jdkrt|g}||dS )NT)add_generation_promptpttf)	input_idsrv   )r\   apply_chat_template	frameworktorch
LongTensorrz   constant)r$   rv   rd   r{   r(   r(   r)   
preprocess&  s   


z!ConversationalPipeline.preprocess
   c                 K   st   |d j d }|d}d|vrd|vrd|d< | jjdi ||}| jjjr+d}n|}|d d |d f |dS )	Nr{   r   rv   rf   max_new_tokens   )
output_idsrv   r(   )shaper   modelgenerateconfigis_encoder_decoder)r$   model_inputsre   ri   nrv   r   start_positionr(   r(   r)   _forward/  s   

zConversationalPipeline._forwardTc                 C   s:   |d }| j j|d d|d}|d }|d|d |S )Nr   r   T)skip_special_tokensrg   rv   r   r   )r\   decoder2   )r$   model_outputsrg   r   answerrv   r(   r(   r)   postprocess;  s   z"ConversationalPipeline.postprocess)NNN)r   )ru   )r   )T)rS   rT   rU   rV   r*   rm   r   r   r   r   rs   r   r   r   r   r   __classcell__r(   r(   rb   r)   rZ      s    

& 
	rZ   )r   typingr   r   r   r   utilsr   r   r	   r
   baser   r   
tensorflowrz   r~   
get_loggerrS   r5   r   rZ   r(   r(   r(   r)   <module>   s     
 1	