o
    h                     @   s@  d Z ddlZddlZddlZddlmZ ddlmZmZm	Z	m
Z
mZmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZmZmZmZ d	d
lmZ d	dlmZ d	dl m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z* d	dl+m,Z,m-Z-m.Z. e./e0Z1dZ2dZ3dZ4dZ5e!d7 Z!eeeedZ6de2iZ7e-e!G dd de&Z8dS )z
 Tokenization classes for fast tokenizers (provided by HuggingFace's tokenizers library). For slow (python) tokenizers
 see tokenization_utils.py
    N)defaultdict)AnyDictListOptionalTupleUnion)Encoding)	Tokenizer)Decoder)
BpeTrainerUnigramTrainerWordLevelTrainerWordPieceTrainer   )convert_slow_tokenizer)PreTrainedTokenizer)
INIT_TOKENIZER_DOCSTRING
AddedTokenBatchEncodingPreTokenizedInputPreTokenizedInputPairPreTrainedTokenizerBaseSpecialTokensMixin	TextInputTextInputPairTruncationStrategy)PaddingStrategyadd_end_docstringsloggingztokenizer.jsonzspecial_tokens_map.jsonztokenizer_config.jsonzadded_tokens.jsonu  
        tokenizer_object ([`tokenizers.Tokenizer`]):
            A [`tokenizers.Tokenizer`] object from 🤗 tokenizers to instantiate from. See [Using tokenizers from 🤗
            tokenizers](../fast_tokenizers) for more information.
        tokenizer_file ([`str`]):
            A path to a local JSON file representing a previously serialized [`tokenizers.Tokenizer`] object from 🤗
            tokenizers.
)BPEUnigram	WordLevel	WordPiecetokenizer_filec                %       s>  e Zd ZU dZeZdZeed<  fddZ	e
defddZe
defd	d
Ze
defddZdeeef fddZe
deeef fddZe
deeef fddZe
deeef fddZdeeef fddZdefddZe
defddZe
defddZ							d^ded ee d!ee d"ed#ed$ed%ed&edeeee f e!e f fd'd(Z"d)e#ee!e f de#ee!e f fd*d+Z$d,edefd-d.Z%d/edee fd0d1Z&d_d2e!e#eef  defd3d4Z'd_d5edefd6d7Z(	d_d8e#ee!e f d9ede#ee!e f fd:d;Z)d`d<ed5ee d=ede!e fd>d?Z*d@e+dAe,dBedCedDee f
dEdFZ-de+j.e,j/ddGddddddddddfdHe#e!e0 e!e1 e!e2 e!e3 f d=ed@e+dAe,dBee dCedIedDee dJee d ee d!ee d"ed#ed$ed%ed&ede4f"dKdLZ5dde+j.e,j/ddGddddddddddfd<e#e0e2f dMee#e0e2f  d=ed@e+dAe,dBee dCedIedDee dJee d ee d!ee d"ed#ed$ed%ed&ede4f$dNdOZ6d)e!e defdPdQZ7		dadRe#ee!e f d9edSedefdTdUZ8		dbdVe#ee9j:f dWee dXee dYee dee f
dZd[Z;			dcd\d]Z<  Z=S )dPreTrainedTokenizerFastaQ  
    Base class for all fast tokenizers (wrapping HuggingFace tokenizers library).

    Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`].

    Handles all the shared methods for tokenization and special tokens, as well as methods for
    downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary.

    This class also contains the added tokens in a unified way on top of all tokenizers so we don't have to handle the
    specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
    Nslow_tokenizer_classc                    s  | dd }| dd }| dd }| dd}| di }|r-|d u r-jd u r-td|d ur7t|}n+|d urC|sCt|}n|d urLt|}njd ur^j|i |}t|}ntd|_|d uro|	|j
 d_jj}	|	d urjjdi |	 |d	|	d	  |d
|	d  |d|	d  |d|	d  nj  jj}
|
d urjjdi |
 |d|
d  |d|
d  |d|
d  |d	|
d  |d|
d  t jdi | fddt| dd dD tj dd D    fddjD 7 tdkrid }g }j}D ]5}t|tr9|jp8t||v nt||v }|d u sI||krO| | n
j!||d |g}|}q'|rkj!||d d S d S d S )Ntokenizer_object__slow_tokenizerr$   	from_slowFadded_tokens_decoderzCannot instantiate this tokenizer from a slow version. If it's based on sentencepiece, make sure you have sentencepiece installed.a-  Couldn't instantiate the backend tokenizer from one of: 
(1) a `tokenizers` library serialization file, 
(2) a slow tokenizer instance to convert or 
(3) an equivalent slow tokenizer class to instantiate and convert. 
You need to have sentencepiece installed to convert a slow tokenizer to a fast one.
max_lengthtruncation_side	directionstridetruncation_strategystrategy	pad_tokenpad_token_type_idpad_type_idpadding_sidelengthpad_to_multiple_ofc                    s   g | ]\}}| j vr|qS  )r*   ).0indextokenselfr7   Z/var/www/html/ai/venv/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py
<listcomp>   s
    
z4PreTrainedTokenizerFast.__init__.<locals>.<listcomp>c                 S      | d S Nr   r7   )xr7   r7   r=   <lambda>       z2PreTrainedTokenizerFast.__init__.<locals>.<lambda>keyc                 S   s   g | ]}t |qS r7   )strr8   r:   r7   r7   r=   r>      s    c                    s    g | ]}| vr|vr|qS r7   r7   rG   )encodertokens_to_addr7   r=   r>      s    r   )special_tokensr7   )"popr&   
ValueErrorcopydeepcopyTokenizerFast	from_filer   
_tokenizerupdateinit_kwargs_decode_use_source_tokenizer
truncationenable_truncation
setdefaultno_truncationpaddingenable_paddingsuper__init__sorteditemslistadded_tokens_encoderkeysall_special_tokens_extendedlenall_special_tokens
isinstancer   specialrF   append_add_tokens)r<   argskwargsr'   slow_tokenizerfast_tokenizer_filer)   r*   fast_tokenizer_truncation_paddingis_last_specialtokensrJ   r:   
is_special	__class__)rH   r<   rI   r=   r\   ^   s   





z PreTrainedTokenizerFast.__init__returnc                 C      dS )NTr7   r;   r7   r7   r=   is_fast   s   zPreTrainedTokenizerFast.is_fastc                 C   rv   )z
        `bool`: Whether or not the slow tokenizer can be saved. Usually for sentencepiece based slow tokenizer, this
        can only be `True` if the original `"sentencepiece.model"` was not deleted.
        Tr7   r;   r7   r7   r=   can_save_slow_tokenizer   s   z/PreTrainedTokenizerFast.can_save_slow_tokenizerc                 C      | j jddS )zP
        `int`: Size of the base vocabulary (without the added tokens).
        Fwith_added_tokensrQ   get_vocab_sizer;   r7   r7   r=   
vocab_size   s   z"PreTrainedTokenizerFast.vocab_sizec                 C   ry   )NTrz   )rQ   	get_vocabr;   r7   r7   r=   r         z!PreTrainedTokenizerFast.get_vocabc                 C   s   |   S N)r   r;   r7   r7   r=   vocab   s   zPreTrainedTokenizerFast.vocabc                 C       dd t | j dd dD S )z
        Returns the sorted mapping from string to index. The added tokens encoder is cached for performance
        optimisation in `self._added_tokens_encoder` for the slow tokenizers.
        c                 S      i | ]\}}|j |qS r7   contentr8   vkr7   r7   r=   
<dictcomp>       z@PreTrainedTokenizerFast.added_tokens_encoder.<locals>.<dictcomp>c                 S   r?   r@   r7   itemr7   r7   r=   rB      rC   z>PreTrainedTokenizerFast.added_tokens_encoder.<locals>.<lambda>rD   r]   r*   r^   r;   r7   r7   r=   r`      s    z,PreTrainedTokenizerFast.added_tokens_encoderc                 C   s
   | j  S )z
        Returns the added tokens in the vocabulary as a dictionary of index to AddedToken.

        Returns:
            `Dict[str, int]`: The added tokens.
        )rQ   get_added_tokens_decoderr;   r7   r7   r=   r*      s   
z,PreTrainedTokenizerFast.added_tokens_decoderc                 C   r   )z
        Returns the added tokens in the vocabulary as a dictionary of token to index.

        Returns:
            `Dict[str, int]`: The added tokens.
        c                 S   r   r7   r   r   r7   r7   r=   r      r   z;PreTrainedTokenizerFast.get_added_vocab.<locals>.<dictcomp>c                 S   r?   r@   r7   r   r7   r7   r=   rB      rC   z9PreTrainedTokenizerFast.get_added_vocab.<locals>.<lambda>rD   r   r;   r7   r7   r=   get_added_vocab   s    z'PreTrainedTokenizerFast.get_added_vocabc                 C   ry   )zD
        Size of the full vocabulary with the added tokens.
        Trz   r|   r;   r7   r7   r=   __len__   s   zPreTrainedTokenizerFast.__len__c                 C   s   | j S )zc
        `tokenizers.implementations.BaseTokenizer`: The Rust tokenizer used as a backend.
        )rQ   r;   r7   r7   r=   backend_tokenizer   s   z)PreTrainedTokenizerFast.backend_tokenizerc                 C   s   | j jS )zU
        `tokenizers.decoders.Decoder`: The Rust decoder for this tokenizer.
        )rQ   decoderr;   r7   r7   r=   r     s   zPreTrainedTokenizerFast.decoderFTencodingreturn_token_type_idsreturn_attention_maskreturn_overflowing_tokensreturn_special_tokens_maskreturn_offsets_mappingreturn_lengthverbosec	                 C   s   |du r	d| j v }|du rd| j v }|r |jdur |g|j }	n|g}	tt}
|	D ]>}|
d |j |r=|
d |j |rG|
d |j |rQ|
d |j |r[|
d |j	 |rg|
d t
|j q)|
|	fS )a  
        Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict and a list
        of encodings, take care of building a batch from overflowing tokens.

        Overflowing tokens are converted to additional examples (like batches) so the output values of the dict are
        lists (overflows) of lists (tokens).

        Output shape: (overflows, sequence length)
        Ntoken_type_idsattention_mask	input_idsspecial_tokens_maskoffset_mappingr5   )model_input_namesoverflowingr   r_   rg   idstype_idsr   r   offsetsrc   )r<   r   r   r   r   r   r   r   r   	encodingsencoding_dicter7   r7   r=   _convert_encoding  s,   

z)PreTrainedTokenizerFast._convert_encodingrq   c                    s2   |du rdS t |tr |S  fdd|D S )aT  
        Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the
        vocabulary.

        Args:
            tokens (`str` or `List[str]`): One or several token(s) to convert to token id(s).

        Returns:
            `int` or `List[int]`: The token id or list of token ids.
        Nc                       g | ]}  |qS r7   )#_convert_token_to_id_with_added_vocrG   r;   r7   r=   r>   K      zAPreTrainedTokenizerFast.convert_tokens_to_ids.<locals>.<listcomp>)re   rF   r   r<   rq   r7   r;   r=   convert_tokens_to_ids:  s
   

z-PreTrainedTokenizerFast.convert_tokens_to_idsr:   c                 C   s   | j |}|d u r| jS |S r   )rQ   token_to_idunk_token_id)r<   r:   r9   r7   r7   r=   r   M  s   z;PreTrainedTokenizerFast._convert_token_to_id_with_added_vocr9   c                 C   s   | j t|S r   )rQ   id_to_tokenint)r<   r9   r7   r7   r=   _convert_id_to_tokenS  s   z,PreTrainedTokenizerFast._convert_id_to_token
new_tokensc                 C   s   |r| j |S | j |S r   )rQ   add_special_tokens
add_tokens)r<   r   rJ   r7   r7   r=   rh   V  s   z#PreTrainedTokenizerFast._add_tokenspairc                 C   s   | j |S )aG  
        Returns the number of added tokens when encoding a sequence with special tokens.

        <Tip>

        This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put
        this inside your training loop.

        </Tip>

        Args:
            pair (`bool`, *optional*, defaults to `False`):
                Whether the number of added tokens should be computed in the case of a sequence pair or a single
                sequence.

        Returns:
            `int`: Number of special tokens added to sequences.
        )rQ   num_special_tokens_to_add)r<   r   r7   r7   r=   r   \  s   z1PreTrainedTokenizerFast.num_special_tokens_to_addr   skip_special_tokensc                 C   sR   t |tr| j|S g }|D ]}t|}|r|| jv rq|| j| q|S )a  
        Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
        added tokens.

        Args:
            ids (`int` or `List[int]`):
                The token id (or token ids) to convert to tokens.
            skip_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not to remove special tokens in the decoding.

        Returns:
            `str` or `List[str]`: The decoded token(s).
        )re   r   rQ   r   all_special_idsrg   )r<   r   r   rq   r9   r7   r7   r=   convert_ids_to_tokensq  s   
z-PreTrainedTokenizerFast.convert_ids_to_tokenstextr   c                 K   s   | j d|||d| S )N)r   	text_pairr   r7   )encode_plusrq   )r<   r   r   r   rj   r7   r7   r=   tokenize  s   z PreTrainedTokenizerFast.tokenizepadding_strategyr/   r+   r.   r6   c           
         s   | j j | j j}|tjkr dur| j   n&|||j| jd} du r'd}n	 fdd|D }||kr=| j jdi | |t	j
krO|durM| j   dS dS |t	jkrV|nd}	|	| j| j| j| j|d}||krt| j jdi | dS dS )a  
        Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers
        library) and restore the tokenizer settings afterwards.

        The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a
        padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed
        section.

        Args:
            padding_strategy ([`~utils.PaddingStrategy`]):
                The kind of padding that will be applied to the input
            truncation_strategy ([`~tokenization_utils_base.TruncationStrategy`]):
                The kind of truncation that will be applied to the input
            max_length (`int`):
                The maximum size of a sequence.
            stride (`int`):
                The stride to use when handling overflow.
            pad_to_multiple_of (`int`, *optional*):
                If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
                the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
        N)r+   r.   r0   r-   c                    s   i | ]	}|  |d qS r   get)r8   r   rn   r7   r=   r     s    zFPreTrainedTokenizerFast.set_truncation_and_padding.<locals>.<dictcomp>)r5   r-   pad_idr1   r3   r6   r7   )rQ   rU   rY   r   DO_NOT_TRUNCATErX   valuer,   rV   r   
DO_NOT_PAD
no_padding
MAX_LENGTHr4   pad_token_idr1   r2   rZ   )
r<   r   r/   r+   r.   r6   ro   targetcurrentr5   r7   r   r=   set_truncation_and_padding  s>   


z2PreTrainedTokenizerFast.set_truncation_and_paddingr   batch_text_or_text_pairsis_split_into_wordsreturn_tensorsc                    s  t |ttfstdt| dj|||||d jj|||d}fdd|D }i }|d d  D ]  fdd|D }|| < q>d	d |D }rsg }t	|D ]\}\}}||gt
|d
  7 }q]||d< |d
 D ]	}|| qwt|||	dS )Nz:batch_text_or_text_pairs has to be a list or a tuple (got ))r   r/   r+   r.   r6   )r   is_pretokenizedc                    s&   g | ]}j | d qS ))r   r   r   r   r   r   r   r   )r   )r8   r   )r   r   r   r   r   r   r<   r   r7   r=   r>     s    z>PreTrainedTokenizerFast._batch_encode_plus.<locals>.<listcomp>r   c                    s"   g | ]\}}|  D ]}|q
qS r7   r7   )r8   r   _r   rD   r7   r=   r>     s   " c                 S   s   g | ]\}}|D ]}|qqS r7   r7   )r8   r   r   r   r7   r7   r=   r>     s    r   overflow_to_sample_mapping)tensor_type)re   tupler_   	TypeErrortyper   rQ   encode_batchra   	enumeraterc   &_eventual_warn_about_too_long_sequencer   )r<   r   r   r   r/   r+   r.   r   r6   r   r   r   r   r   r   r   r   r   tokens_and_encodingssanitized_tokensstacksanitized_encodingsr   itoksr   r   r7   )	rE   r   r   r   r   r   r   r<   r   r=   _batch_encode_plus  s@   
z*PreTrainedTokenizerFast._batch_encode_plusr   c                 K   s   |r||fgn|g}| j |f|||||||	|
|||||||d|}|
d u r6|s6tdd | D |j}| |d || |S )N)r   r   r   r/   r+   r.   r6   r   r   r   r   r   r   r   r   c                 S   s8   i | ]\}}|t |d krt|d  tr|d  n|qS )r   )rc   re   r_   )r8   rE   r   r7   r7   r=   r   X  s    &z8PreTrainedTokenizerFast._encode_plus.<locals>.<dictcomp>r   )r   r   r^   r   r   )r<   r   r   r   r   r/   r+   r.   r   r6   r   r   r   r   r   r   r   r   rj   batched_inputbatched_outputr7   r7   r=   _encode_plus*  s>   z$PreTrainedTokenizerFast._encode_plusc                 C   s   | j j|S r   )r   r   decoder   r7   r7   r=   convert_tokens_to_stringc  r   z0PreTrainedTokenizerFast.convert_tokens_to_string	token_idsclean_up_tokenization_spacesc                 K   sV   | dd| _t|tr|g}| jj||d}|d ur|n| j}|r)| |}|S |S )Nuse_source_tokenizerF)r   )rK   rT   re   r   rQ   r   r   clean_up_tokenization)r<   r   r   r   rj   r   
clean_textr7   r7   r=   _decodef  s   

zPreTrainedTokenizerFast._decodesave_directory
file_nameslegacy_formatfilename_prefixc                    s6  t |} jdu r|du rtd|du s|du o  jduo  j}|du p(|du }|r}tj||r5|d ndt } fdd j	 D }|rot
|d	d
d}	tj|ddddd }
|	|
 W d   n1 sjw   Y   j||d}|| |f }|rtj||r|d ndt } j| ||f }|S )z
        Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens as well as in a unique JSON
        file containing {config + vocab + added-tokens}.
        NTzYour tokenizer does not have a legacy version defined and therefore cannot register this version. You might consider leaving the legacy_format at `None` or setting it to `False`.F- c                    s    i | ]\}}| j kr||qS r7   )r~   )r8   tokr9   r;   r7   r=   r     s     z<PreTrainedTokenizerFast._save_pretrained.<locals>.<dictcomp>wzutf-8)r      )indent	sort_keysensure_ascii
)r   )rF   r&   rL   rx   ospathjoinADDED_TOKENS_FILEr`   r^   openjsondumpswritesave_vocabularyTOKENIZER_FILEr   save)r<   r   r   r   r   	save_slow	save_fastadded_tokens_fileadded_vocabfout_strvocab_filesr$   r7   r;   r=   _save_pretrained~  s<   
z(PreTrainedTokenizerFast._save_pretrainedc              	      s  t | j }|d}|d}	d}
|d d dkr)i |d d< g |d d< nW|d d d	kre|d d
 durd|d d
 }|d d | d }
 durU|
 v rU |
 }
d|d d
< |
dgg|d d< n|d d dv rti |d d< ntd|d d  d durd|d v r|d d  v r |d d  |d d< tt |g }|D ]5}|dd}|dd}|d d d	kr|sq dur|d  v rՈ |d  |d< |	t
d'i | q|dur|| |d d dkrd|vr|d d dur|d d |d< |d d dkr'd|vr'|d d dur'|d d |d< |d d d	kr9|
dur9|
|d< |d durP|d d dkrPtj |d< t|d d  }|d'||d|}j|||d |	durt  }d|	v r|	d D ]0}|	d | d } dur fdd|D }||	d | d< fdd|D |	d | d < qd!D ]'}||	v r|	| \}} dur| v r͈ | }|}||g|	|< q|	|d< tt || j }tj }|d" |D ]E}t| d#| dur;t| |} dur| v r | }t| d#| }t|t
r7t
||j|j|j|jd$d%||< q|||< q| j}|durJ|| t|dkrU||d"< | jd'd&i|S )(uf  
        Trains a tokenizer on a new corpus with the same defaults (in terms of special tokens or tokenization pipeline)
        as the current one.

        Args:
            text_iterator (generator of `List[str]`):
                The training corpus. Should be a generator of batches of texts, for instance a list of lists of texts
                if you have everything in memory.
            vocab_size (`int`):
                The size of the vocabulary you want for your tokenizer.
            length (`int`, *optional*):
                The total number of sequences in the iterator. This is used to provide meaningful progress tracking
            new_special_tokens (list of `str` or `AddedToken`, *optional*):
                A list of new special tokens to add to the tokenizer you are training.
            special_tokens_map (`Dict[str, str]`, *optional*):
                If you want to rename some of the special tokens this tokenizer uses, pass along a mapping old special
                token name to new special token name in this argument.
            kwargs (`Dict[str, Any]`, *optional*):
                Additional keyword arguments passed along to the trainer from the 🤗 Tokenizers library.

        Returns:
            [`PreTrainedTokenizerFast`]: A new tokenizer of the same type as the original one, trained on
            `text_iterator`.

        added_tokenspost_processorNmodelr   r    r   mergesr!   unk_idr   g        )r"   r#   z;This method does not support this type of tokenizer (found z-) only BPE, Unigram, WordLevel and WordPiece.	unk_tokenrf   idr   continuing_subword_prefixend_of_word_suffixpre_tokenizer	ByteLevelinitial_alphabet)r~   rJ   )r5   trainerrJ   rq   c                    s   g | ]}  ||qS r7   r   rG   )special_tokens_mapr7   r=   r>      r   zCPreTrainedTokenizerFast.train_new_from_iterator.<locals>.<listcomp>c                    r   r7   )r   rG   )	tokenizerr7   r=   r>   "  r   r   )clssepadditional_special_tokensr   T)single_wordlstriprstrip
normalizedrf   r'   r7   ) r   loadsrQ   to_strrK   rL   rO   from_strr  rg   r   extendpre_tokenizers_fastr  alphabetMODEL_TO_TRAINER_MAPPINGtrain_from_iteratorr   rS   rM   r   SPECIAL_TOKENS_ATTRIBUTESremovegetattrre   r   r!  r"  r#  r  rc   rt   )r<   text_iteratorr~   r5   new_special_tokensr  rj   tokenizer_jsonr  r  r  r  rJ   added_tokenrf   r   trainer_classr  trained_tokenizer_jsonrE   rq   special_tokenr:   token_idspecial_tokens_listspecial_token_fullr  r7   )r  r  r=   train_new_from_iterator  s   "




 


"





	

z/PreTrainedTokenizerFast.train_new_from_iterator)NNFFFFT)F)NF)FN)NN)NNN)>__name__
__module____qualname____doc__VOCAB_FILES_NAMESvocab_files_namesr&   r   __annotations__r\   propertyboolrw   rx   r   r~   r   rF   r   r   r`   r   r*   r   r   rO   r   DecoderFastr   EncodingFastr   r   r   r   r   r   r   r   r   rh   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   PathLiker  r9  __classcell__r7   r7   rs   r=   r%   M   s  
 d				

*/ 
$
L	

X
	

9

5r%   )9r=  rM   r   r   collectionsr   typingr   r   r   r   r   r   tokenizers.pre_tokenizerspre_tokenizersr(  
tokenizersr	   rD  r
   rO   tokenizers.decodersr   rC  tokenizers.trainersr   r   r   r   r   tokenization_utilsr   tokenization_utils_baser   r   r   r   r   r   r   r   r   r   utilsr   r   r   
get_loggerr:  loggerr  SPECIAL_TOKENS_MAP_FILETOKENIZER_CONFIG_FILEr   r*  r>  r%   r7   r7   r7   r=   <module>   s:    0

