o
    h                    @   s  d Z ddlZddlZddlZddlZddlZddlZddlZddl	Z	ddl
Z
ddlZddlZddlZddlZddlZddlZddlmZ ddlmZ ddlmZmZmZmZmZmZmZmZ ddlmZm Z  ddl!m"Z# ddl$Z%ddl&Z&ddl'm(Z) ddl*m+Z+m,Z,m-Z- dd	l.m/Z/ dd
l&m0Z0 ddl1m2Z2m3Z3m4Z4m5Z5 ddl6m7Z7 ddl8m9Z9 ddl:m;Z;m<Z<m=Z= ddl>m?Z?m@Z@ ddlAmBZBmCZC ddlDmEZEmFZFmGZG ddlHmIZI ddlJmKZKmLZLmMZM ddlNmOZOmPZP ddlQmRZRmSZS ddlTmUZUmVZV ddlWmXZX ddlYmZZZm[Z[m\Z\m]Z]m^Z^m_Z_m`Z` ddlambZbmcZcmdZdmeZemfZfmgZgmhZhmiZimjZjmkZkmlZlmmZmmnZnmoZompZpmqZqmrZrmsZs ddltmuZumvZvmwZwmxZxmyZymzZzm{Z{m|Z|m}Z}m~Z~mZmZmZmZmZmZmZmZmZmZmZmZ ddlmZmZmZ ddl"mZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZ ddlmZ e[gZe]Ze rddlmZ eZe rddlmZ e rddlZed d!rddlm  mZ ddlm  mZ e rddlm  m&Z ddlm7Z e/ee/d"kZdd#lamZmZmZmZ nd Ze rddlZe r'dd$lmZ e rldd%lmZmZ ddlm7Z dd&lmZmZmZmZmZmZ e4gZe/e͡e/d'krbdd(lmZ eeg7 ZeG rldd)lmZ ersddlZeeۡZd*Zd+Zd,Zd-Zd.Zd/Zd0ZG d1d2 d2ZdS )3uc   
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
    N)Mapping)Path)TYPE_CHECKINGAnyCallableDictListOptionalTupleUnion   )#get_reporting_integration_callbacks	hp_params)	ModelCardcreate_repoupload_folder)version)nn)
DataLoaderDatasetRandomSamplerSequentialSampler)__version__)PretrainedConfig)DataCollatorDataCollatorWithPaddingdefault_data_collator)DebugOptionDebugUnderflowOverflow)"ALL_HYPERPARAMETER_SEARCH_BACKENDSdefault_hp_search_backend)deepspeed_initdeepspeed_load_checkpointis_deepspeed_available)TrainingSummary)PreTrainedModelload_sharded_checkpointunwrap_model)!MODEL_FOR_CAUSAL_LM_MAPPING_NAMESMODEL_MAPPING_NAMES)	Adafactorget_scheduler)ALL_LAYERNORM_LAYERSis_torch_less_than_1_11)PreTrainedTokenizerBase)CallbackHandlerDefaultFlowCallbackPrinterCallbackProgressCallbackTrainerCallbackTrainerControlTrainerState)DistributedTensorGathererIterableDatasetShardLabelSmootherLengthGroupedSamplerSequentialDistributedSamplerdistributed_broadcast_scalarsdistributed_concatfind_batch_sizeget_dataloader_samplerget_model_param_countget_module_class_from_nameget_parameter_namesnested_concatnested_detachnested_numpifynested_xla_mesh_reducereissue_pt_warningsremove_dummy_checkpoint)PREFIX_CHECKPOINT_DIRBestRunEvalLoopOutputEvalPredictionHPSearchBackendHubStrategyIntervalStrategyPredictionOutputRemoveColumnsCollatorTrainerMemoryTrackerTrainOutputdefault_compute_objectivedenumpify_detensorizeenable_full_determinismfind_executable_batch_sizeget_last_checkpoint
has_lengthneftune_post_forward_hooknumber_of_argumentsseed_workerset_seedspeed_metrics)OptimizerNamesParallelModeTrainingArguments)ADAPTER_CONFIG_NAMEADAPTER_SAFE_WEIGHTS_NAMEADAPTER_WEIGHTS_NAMECONFIG_NAMESAFE_WEIGHTS_INDEX_NAMESAFE_WEIGHTS_NAMEWEIGHTS_INDEX_NAMEWEIGHTS_NAMEPushInProgresscan_return_lossfind_labelsis_accelerate_availableis_apex_availableis_bitsandbytes_availableis_datasets_availableis_in_notebookis_ipex_availableis_peft_availableis_safetensors_availableis_sagemaker_dp_enabledis_sagemaker_mp_enabledis_torch_compile_availableis_torch_neuroncore_availableis_torch_npu_availableis_torch_tpu_availablelogging	strtobool)QuantizationMethod)NotebookProgressCallback)ampF)check_devicez1.10)smp_forward_backwardsmp_forward_only
smp_gathersmp_nested_concat)	PeftModel)Acceleratorskip_first_batches)DistributedDataParallelKwargsGradientAccumulationPluginload_fsdp_modelload_fsdp_optimizersave_fsdp_modelsave_fsdp_optimizer0.23.0)SeedableRandomSampler)DeepSpeedSchedulerWrapperztraining_args.binztrainer_state.jsonzoptimizer.ptzoptimizer.binzscheduler.ptz	scaler.ptpytorch_model_fsdpc                   @   s  e Zd ZdZddlmZmZmZmZm	Z	 											dde
eejf dedee d	ee d
ee
eeeef f  dee deeg ef  deeegef  deee  deejjejjjf deeejejgejf  fddZ dd Z!dd Z"dd Z#dd Z$dd Z%dd Z&dd  Z'dd!d"d#ee fd$d%Z(	dded#ee d&efd'd(Z)d&eej*j+j, fd)d*Z-d&e.fd+d,Z/d
ed&eej*j+j, fd-d.Z0dd
ee d&e.fd/d0Z1d1ed&e.fd2d3Z2d4e3fd5d6Z4d&ee fd7d8Z5d9d: Z6e7ded&ee8e8f fd;d<Z9dd4e3d=ejjfd>d?Z:d@e.d&e3fdAdBZ;ddCe.dDee3 d&e3fdEdFZ<dGe
dHeee8f f fdIdJZ=dGe
dHeee8f f dKe3dLeee>f fdMdNZ?dOefdPdQZ@ddRdSZAddUdVZBdTejCfdWdXZDddZd[ZE			dd\ee
eeFf  dGe
dHeee8f f d]eee  fd^d_ZG	dd`daZHdbdc ZIddddeZJdfdg ZKdhdi ZLdjdk ZMdldm ZNddndoZOdpdq ZPdrds ZQdtdu ZR			v	w		ddxeedHgeee>f f  dyeeeee>f ge>f  dze3d{e
eee f d|ee
d}eSf  d~eedHgef  d&e
eTeeT f fddZUdeee>f d&dfddZVde
eje8f d&e
eje8f fddZWdeee
eje8f f d&eee
eje8f f fddZXdd ZYddeeF fddZZdejdeee
eje8f f d&ejfddZ[dddZ\d&eFfddZ]d&eFfddZ^ddee deFfddZ_ddee fddZ`ddee fddZadd ZbdecdTfd&ee fddZdddddZe			dd
ee deee  ded&eee>f fddZf	dd1edeee  ded&egfddZh			dd@e.d#edeeF deee  ded&eifddZjdddZk	ddejdeee
eje8f f deFdeee  d&eeej eej eej f f
ddZldeee
eje8f f fddZmdd Zn									ddee dee de
eee df dee dee de
eee df de
eee df d!e
eee df de
eee df fddZoddÄ Zpddń Zqddee deFd&efddʄZr			dd@e.d#edeeF deee  ded&eifdd̄Zsdd΄ ZtdddЄZudd҄ ZvdS )Trainerul  
    Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.

    Args:
        model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*):
            The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed.

            <Tip>

            [`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use
            your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers
            models.

            </Tip>

        args ([`TrainingArguments`], *optional*):
            The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the
            `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided.
        data_collator (`DataCollator`, *optional*):
            The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will
            default to [`default_data_collator`] if no `tokenizer` is provided, an instance of
            [`DataCollatorWithPadding`] otherwise.
        train_dataset (`torch.utils.data.Dataset` or `torch.utils.data.IterableDataset`, *optional*):
            The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the
            `model.forward()` method are automatically removed.

            Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a
            distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a
            `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will
            manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally
            sets the seed of the RNGs used.
        eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`]), *optional*):
             The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the
             `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each
             dataset prepending the dictionary key to the metric name.
        tokenizer ([`PreTrainedTokenizerBase`], *optional*):
            The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs to the
            maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
            interrupted training or reuse the fine-tuned model.
        model_init (`Callable[[], PreTrainedModel]`, *optional*):
            A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start
            from a new instance of the model as given by this function.

            The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to
            be able to choose different architectures according to hyper parameters (such as layer count, sizes of
            inner layers, dropout probabilities etc).
        compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*):
            The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return
            a dictionary string to metric values.
        callbacks (List of [`TrainerCallback`], *optional*):
            A list of callbacks to customize the training loop. Will add those to the list of default callbacks
            detailed in [here](callback).

            If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method.
        optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*, defaults to `(None, None)`):
            A tuple containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your
            model and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`.
        preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*):
            A function that preprocess the logits right before caching them at each evaluation step. Must take two
            tensors, the logits and the labels, and return the logits once processed as desired. The modifications made
            by this function will be reflected in the predictions received by `compute_metrics`.

            Note that the labels (second parameter) will be `None` if the dataset does not have them.

    Important attributes:

        - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`]
          subclass.
        - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
          original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`,
          the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner
          model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`.
        - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
          data parallelism, this means some of the model layers are split on different GPUs).
        - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
          to `False` if model parallel or deepspeed is used, or if the default
          `TrainingArguments.place_model_on_device` is overridden to return `False` .
        - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while
          in `train`)

    r   )_get_learning_ratelog_metricsmetrics_formatsave_metrics
save_stateNNNmodelargsdata_collatortrain_dataseteval_dataset	tokenizer
model_initcompute_metrics	callbacks
optimizerspreprocess_logits_for_metricsc                 C   sB  |d u rd}t d| d t|d}|| _| jjr!t| jjnt| jj d | _d | _	d| _
|   t| jj| _| j  | }t| |j |d u r`|d ur\|| _|  }ntd|d urjtdt || _|jjtv r}td|jj d	t|d
r|jr|j rd| _!nd| _!t"|dd d urdd t#|j$% D }t&|dkrd| _!nt&|dkr| jj't('|d k| _!nd| _!| j!rt d t) ot*|t+}t"|ddot"|dd }|r|std|rt"|ddstd|j,d | _-t&|j.dkr| j/r
td|j,d s|j0t1j2krtd|j3| _3| j!s;| j/s;|j4s/|j5r3|j6r;| j-s;| j7r>d| _3|d u rEt8nt9|}|d urP|n|| _:|| _;|| _<|| _=| j3rqt"|dd t>j?ksq| @||j' | j!ryd| j_A|| _B|| _C|jD| _D|| _E|| _F|
\| _G| _H|d ur| jGd us| jHd urtdtI r| jGd ur| jCJ D ]}|j'} | jGjKD ]}t&|d dkr|d d j'} nq||krtd| j/s| j-s| j7r| jGd us| jHd urtdtLtM| jjN }|	d u r|n||	 }	tO|	| jC| j=| jG| jH| _P| Q| jjRr!tSntT d| _Ud | _V| jjWr3| X  | jjYrAtZj[| jj\dd t]| j:sUt]t"| j:d d rUtd!|j^dkr`t d" |d urtt_|st|j^dkrttd#|d urt*|t(j`jajbr|jcrtd$d | _dd| _ed| _ftg r|jhrtd%tir|jjtkjljmjjkrt nd&tkjljmjj d'|jj d(tkjljmjj  tkjljmjj|_jnttkjljmd)rt nd&tkjljmjj d* |jjs|jhr|jod+kr|j't('d,kr|jjrtd-d.|_ot d/|jo d0 |jjs|jhr6| j/s6tg s6|jod.kr%d| _ft(jp| _qn|jod1kr6tr s3tsd2d| _e| jjtdkrFtu| jjtd3| _vnd | _vtw| x | y d4| _ltz | _{d| _|d | _}t~| jCj}| jjd u rm|n| jj| _t| jCj| _| jP| j| jl| j{| _{|j| _d| _| j  |jrt std5d S d S )6Ntmp_trainerz1No `TrainingArguments` passed, using `output_dir=z`.)
output_dirFz<`Trainer` requires either a `model` or `model_init` argumentz`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.zThe model you have picked (a  ) cannot be used as is for training: it only computes hidden states and does not accept any labels. You should choose a model with a head suitable for your task like any of the `AutoModelForXxx` listed at https://huggingface.co/docs/transformers/model_doc/autois_parallelizableThf_device_mapc                 S   s   g | ]}|d vr|qS ))cpudisk ).0devicer   r   J/var/www/html/ai/venv/lib/python3.10/site-packages/transformers/trainer.py
<listcomp>      z$Trainer.__init__.<locals>.<listcomp>r   r   zYou have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.is_quantized_hf_peft_config_loadedzYou cannot perform fine-tuning on purely quantized models. Please attach trainable adapters on top of the quantized model to correctly perform fine-tuning. Please see: https://huggingface.co/docs/transformers/peft for more details_is_quantized_training_enabledzThe model you want to train is loaded in 8-bit precision.  if you want to fine-tune an 8-bit model, please make sure that you have installed `bitsandbytes>=0.37.0`. xlazZUsing --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags.z.Using fsdp only works in distributed training.quantization_methodzPassing a `model_init` is incompatible with providing the `optimizers` argument. You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method.paramsa[  The model and the optimizer parameters are not on the same device, which probably means you created an optimizer around your model **before** putting on the device and passing it to the `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and `model.to(xm.xla_device())` is performed before the optimizer creation in your script.zPassing `optimizers` is not allowed if Deepspeed or PyTorch FSDP is enabled. You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method.exist_okcollate_batchzRThe `data_collator` should be a simple callable (function, class with `__call__`).zHmax_steps is given, it will override any value given in num_train_epochszThe train_dataset does not implement __len__, max_steps has to be specified. The number of steps needs to be known in advance for the learning rate scheduler.zTthe `--group_by_length` option is only available for `Dataset`, not `IterableDatasetzOSageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead z(FP16 provided in SM_HP_MP_PARAMETERS is z+, but FP16 provided in trainer argument is z, setting to fp16zJ, but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer.autor   z2Tried to use `fp16` but it is not supported on cpucpu_ampzUsing z half precision backendapexzcUsing FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex.)epsilon)is_local_process_zerois_world_process_zeroz3Using torch.compile requires PyTorch 2.0 or higher.)loggerinfor`   r   full_determinismrU   seedr\   hp_name	deepspeedis_in_train"create_accelerator_and_postprocessrQ   skip_memory_metrics_memory_trackerstartget_process_log_levelrz   set_verbosity_setup_devicesr   call_model_initRuntimeErrorwarningswarnFutureWarning	__class____name__r)   
ValueErrorhasattrr   model_parallelis_model_parallelgetattrsetr   valueslenr   torchrr   
isinstancer   fsdp_configis_fsdp_xla_enabledfsdpis_deepspeed_enabledparallel_moder_   DISTRIBUTEDplace_model_on_devicefp16_full_evalbf16_full_evaldo_trainis_fsdp_enabledr   r   r   r   r   r   r|   BITS_AND_BYTES_move_model_to_device_n_gpumodel_wrappedr   neftune_noise_alphar   r   	optimizerlr_schedulerry   
parametersparam_groupsDEFAULT_CALLBACKSr   	report_tor/   callback_handleradd_callbackdisable_tqdmr1   DEFAULT_PROGRESS_CALLBACK_loggers_initializedhub_model_idpush_to_hubinit_hf_reposhould_saveosmakedirsr   callable	max_stepsrX   utilsdataIterableDatasetgroup_by_length_signature_columnsuse_apexuse_cpu_ampru   bf16IS_SAGEMAKER_MP_POST_1_10r   smpstatecfgwarninghalf_precision_backendbfloat16	amp_dtyperm   ImportErrorlabel_smoothing_factorr8   label_smootherr5   r   r   r4   controlcurrent_floshp_search_backendrk   label_namesrj   on_init_endtrain_batch_size_train_batch_size_created_lr_schedulerstop_and_update_metricstorch_compilerv   )selfr   r   r   r   r   r   r   r   r   r   r   r   	log_leveldevices_is_peft_model_is_quantized_and_base_modeldefault_collatorparammodel_deviceparam_groupoptimizer_devicedefault_callbacksdefault_label_namesr   r   r   __init__>  s  



	"


 
 
 

zTrainer.__init__c                 C   sL   t |}t rt|tr|jj }n| }~| j|_|t	}|| _
|S )z
        Activates the neftune as presented in this code: https://github.com/neelsjain/NEFTune and paper:
        https://arxiv.org/abs/2310.05914
        )r'   rr   r   r   
base_modelr   get_input_embeddingsr   register_forward_hookrY   neftune_hook_handle)r  r   unwrapped_model
embeddingshook_handler   r   r   _activate_neftuneg  s   
zTrainer._activate_neftunec                 C   sT   t | ds	tdt|}t rt|tr|jj }n| }| j	
  |`~dS )z^
        Deactivates the neftune method. Make sure to call `_activate_neftune` first.
        r.  zNNeftune is not activated make sure to call `trainer._activate_neftune()` firstN)r   r   r'   rr   r   r   r+  r   r,  r.  remover   )r  r   r/  r0  r   r   r   _deactivate_neftunez  s   


zTrainer._deactivate_neftunec                 C      | j | dS )ag  
        Add a callback to the current list of [`~transformers.TrainerCallback`].

        Args:
           callback (`type` or [`~transformers.TrainerCallback`]):
               A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
               first case, will instantiate a member of that class.
        N)r   r   r  callbackr   r   r   r        	zTrainer.add_callbackc                 C   s   | j |S )aK  
        Remove a callback from the current list of [`~transformers.TrainerCallback`] and returns it.

        If the callback is not found, returns `None` (and no error is raised).

        Args:
           callback (`type` or [`~transformers.TrainerCallback`]):
               A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
               first case, will pop the first member of that class found in the list of callbacks.

        Returns:
            [`~transformers.TrainerCallback`]: The callback removed, if found.
        )r   pop_callbackr6  r   r   r   r9    s   zTrainer.pop_callbackc                 C   r5  )a  
        Remove a callback from the current list of [`~transformers.TrainerCallback`].

        Args:
           callback (`type` or [`~transformers.TrainerCallback`]):
               A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
               first case, will remove the first member of that class found in the list of callbacks.
        N)r   remove_callbackr6  r   r   r   r:    r8  zTrainer.remove_callbackc                 C   s6   | |}| jjtjkrt|dr|  d S d S d S )Ntie_weights)tor   r   r_   TPUr   r;  )r  r   r   r   r   r   r     s   
zTrainer._move_model_to_devicec                 C   sP   | j d u r&t| jj}t|j | _ |  j ttddg| j	 7  _ d S d S )Nlabel	label_ids)
r  inspect	signaturer   forwardlistr   keysr   r  )r  rA  r   r   r    _set_signature_columns_if_needed  s
   
$z(Trainer._set_signature_columns_if_neededdatasetzdatasets.Datasetdescriptionc                    s   | j js S |   | j}tt jt| }t|dkrK|d u r$dnd| d}t	d| d| j
jj dd| d	d| d
| j
jj d  fdd|D }ttjtdk ro j jd | jd d  S  |S )Nr    zin the z setzThe following columns z) don't have a corresponding argument in `z!.forward` and have been ignored: , z. If z are not expected by `z/.forward`,  you can safely ignore this message.c                    s   g | ]	}| j v r|qS r   )column_namesr   krF  r   r   r         z2Trainer._remove_unused_columns.<locals>.<listcomp>z1.4.0typeformat_kwargs)rO  columnsrP  )r   remove_unused_columnsrE  r  rC  r   rJ  r   r   r   r   r   r   joinr   parsedatasetsr   
set_formatformatremove_columns)r  rF  rG  signature_columnsignored_columnsdset_descriptionrQ  r   rM  r   _remove_unused_columns  s2   
zTrainer._remove_unused_columnsreturnc                 C   s6   | j js|S |   | j}t||t|| jjjd}|S )z=Wrap the data collator in a callable removing unused columns.)r   rY  r   rG  
model_name)	r   rR  rE  r  rP   r   r   r   r   )r  r   rG  rY  remove_columns_collatorr   r   r   "_get_collator_with_removed_columns  s   z*Trainer._get_collator_with_removed_columnsc                 C   s   | j d u s
t| j sd S | jjrJt r,t| j tjr,| jj| j j	v r)| j | jj nd }nd }| j
d ur9| j
jd nd }t| jj| jj | j ||dS t| j S )Nr   )rF  lengthsmodel_input_name)r   rX   r   r  ro   r   rU  r   length_column_namerJ  r   model_input_namesr9   r  gradient_accumulation_stepsr   )r  ra  rb  r   r   r   _get_train_sampler  s"   
zTrainer._get_train_samplerc                 C   s   | j du r	td| j }| j}t r t|tjr | j|dd}n| j|dd}| j	|| j
j| j
j| j
jd}t|tjjjsN|  |d< | j
j|d< t|d< | jt|fi |S )	a@  
        Returns the training [`~torch.utils.data.DataLoader`].

        Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed
        training if necessary) otherwise.

        Subclass and override this method if you want to inject some custom behavior.
        Nz+Trainer: training requires a train_dataset.trainingrG  
batch_size
collate_fnnum_workers
pin_memorypersistent_workerssampler	drop_lastworker_init_fn)r   r   r   ro   r   rU  r   r\  r`  r  r   dataloader_num_workersdataloader_pin_memorydataloader_persistent_workersr   r  r  r  rf  dataloader_drop_lastr[   acceleratorpreparer   )r  r   r   dataloader_paramsr   r   r   get_train_dataloader  s$   
	zTrainer.get_train_dataloaderc                 C   sf   | j jr't rt|t t dS t r#t|t	 t
 | j jdS t|S | j jdkr1t|S d S )N)num_replicasrank)rz  r{  rj  r   )r   use_legacy_prediction_loopry   r:   xmxrt_world_sizeget_ordinalru   r
  dp_sizedp_rankper_device_eval_batch_sizer   
world_size)r  r   r   r   r   _get_eval_sampler&  s    zTrainer._get_eval_samplerc                 C   s   |du r| j du rtd|dur|n| j }| j}t r*t|tjr*| j|dd}n| j|dd}| j	j
|| j	j| j	j| j	jd}t|tjjjsV| ||d< | j	j|d< | jt|fi |S )a  
        Returns the evaluation [`~torch.utils.data.DataLoader`].

        Subclass and override this method if you want to inject some custom behavior.

        Args:
            eval_dataset (`torch.utils.data.Dataset`, *optional*):
                If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
                by the `model.forward()` method are automatically removed. It must implement `__len__`.
        Nz-Trainer: evaluation requires an eval_dataset.
evaluationrh  ri  ro  rp  )r   r   r   ro   r   rU  r   r\  r`  r   eval_batch_sizerr  rs  rt  r   r  r  r  r  ru  rv  rw  r   )r  r   r   rx  r   r   r   get_eval_dataloader<  s"   zTrainer.get_eval_dataloadertest_datasetc                 C   s   | j }t rt|tjr| j|dd}n| j|dd}| jj|| jj	| jj
| jjd}t|tjjjs@| ||d< | jj|d< | jt|fi |S )a  
        Returns the test [`~torch.utils.data.DataLoader`].

        Subclass and override this method if you want to inject some custom behavior.

        Args:
            test_dataset (`torch.utils.data.Dataset`, *optional*):
                The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the
                `model.forward()` method are automatically removed. It must implement `__len__`.
        testrh  ri  ro  rp  )r   ro   r   rU  r   r\  r`  r   r  rr  rs  rt  r   r  r  r  r  ru  rv  rw  r   )r  r  r   rx  r   r   r   get_test_dataloader_  s   zTrainer.get_test_dataloadernum_training_stepsc                 C   s8   |    trtjjjr| jj}n| j}| j||d dS )aZ  
        Setup the optimizer and the learning rate scheduler.

        We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
        Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or
        `create_scheduler`) in a subclass.
        )r  r   N)create_optimizerr	  r
  r  r  r   r   create_schedulerr  r  r   r   r   r   create_optimizer_and_scheduler  s
   
z&Trainer.create_optimizer_and_schedulerc                 C   s   t |t}dd |D }|S )a!  
        Get all parameter names that weight decay will be applied to

        Note that some models implement their own layernorm instead of calling nn.LayerNorm, weight decay could still
        apply to those modules since this function only filter out instance of nn.LayerNorm
        c                 S   s   g | ]}d |vr|qS )biasr   r   namer   r   r   r     r   z5Trainer.get_decay_parameter_names.<locals>.<listcomp>)rA   r,   )r  r   decay_parametersr   r   r   get_decay_parameter_names  s   
z!Trainer.get_decay_parameter_namesc           	         sN  t  r| jn| j}| jdu r| |  fdd| D | jjd fdd| D ddg}t	| j\}}||fi || _|j
dkrddl}|jj }d}| D ]7}t|tjr|td	d
 | D  7 }td| d|d  d ||dddi td| d qWtd|d  d t  rt| j| _| jS )a   
        Setup the optimizer.

        We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
        Trainer's init through `optimizers`, or subclass and override this method in a subclass.
        Nc                    s"   g | ]\}}| v r|j r|qS r   requires_gradr   npr  r   r   r         z,Trainer.create_optimizer.<locals>.<listcomp>)r   weight_decayc                    s"   g | ]\}}| vr|j r|qS r   r  r  r  r   r   r     r          Adam8bitr   c                 S   s   i | ]	}|  | qS r   )data_ptrnumel)r   r  r   r   r   
<dictcomp>  rN  z,Trainer.create_optimizer.<locals>.<dictcomp>zskipped z: i   zM paramsweight
optim_bits    zbitsandbytes: will optimize z in fp32z	skipped: )ru   r   r   r   r  named_parametersr   r  r   get_optimizer_cls_and_kwargsr   bitsandbytesoptimGlobalOptimManagerget_instancemodulesr   r   	Embeddingsumr   r   r   r   register_module_overridedebugr
  DistributedOptimizer)	r  	opt_modeloptimizer_grouped_parametersoptimizer_clsoptimizer_kwargsr  managerskippedmoduler   r  r   r    s>   




zTrainer.create_optimizerc                 C   s  i }| j r| j dddD ]}|d\}}|||< qd| ji}| j| jf| jd}| jtj	kr@t
}|ddd ||fS | jtjkrWd	d
lm} |}|| ||fS | jtjtjfv r~dd
lm} |}|| | jtjkrz|ddi ||fS | jtjkrzdd
lm} |}|| W ||fS  ty   tdw | jtjkrzddlm}	 |	}|| W ||fS  ty   tdw | jtjkrzddlm}
 |
}|| W ||fS  ty   tdw | jtjtjtjtjtj tj!tj"tj#fv rszJddl$m}m%} d}d}d}|}d| jv rd}d| jv r!d}d| jv r*|}nd| jv r:|}d| j| jfi}||d}|| || W n tyV   tdw t& rot'(t)j*'dt'(dk rot+,d  ||fS | jtj-krz7dd!l.m/} |}|| |t0|1d"d#t2t3|1d$d%t2t3|1d&d%t2t3|1d'd(d) W ||fS  ty   td*w | jtj4krt3jj4}||fS | jtj5krt3jj6}||fS | jtj7krt3jj8}||fS td+| j ),z
        Returns the optimizer class and optimizer parameters based on the training arguments.

        Args:
            args (`transformers.training_args.TrainingArguments`):
                The training arguments for the training session.

         rH  ,=lr)betasepsF)scale_parameterrelative_stepr   )AdamWr   fusedTz7Trainer failed to import syncfree AdamW from torch_xla.)NpuFusedAdamWz3Trainer failed to import FusedAdamW from torch_npu.)	FusedAdamzFTrainer tried to instantiate apex FusedAdam but apex is not installed!)r  Lionr  Npaged8bit   adamlionr  )is_pagedr  zDTrainer tried to instantiate bnb optimizer but bnb is not installed!r  z0.41.1zYou are using 8-bit optimizers with a version of `bitsandbytes` < 0.41.1. It is recommended to update your version as a major bug has been fixed in 8-bit optimizers.)AnyPrecisionAdamWuse_kahan_summationFalsemomentum_dtypefloat32variance_dtypecompensation_buffer_dtyper  )r  r  r  r  z4Please install https://github.com/pytorch/torchdistxz2Trainer cannot instantiate unsupported optimizer: )9
optim_argsreplacesplitlearning_rate
adam_beta1
adam_beta2adam_epsilonr  r^   	ADAFACTORr*   updateADAMW_HFoptimizationr  ADAMW_TORCHADAMW_TORCH_FUSEDtorch.optimADAMW_TORCH_XLAtorch_xla.amp.syncfreer  r   ADAMW_TORCH_NPU_FUSEDtorch_npu.optimr  ADAMW_APEX_FUSEDapex.optimizersr  	ADAMW_BNB
ADAMW_8BITPAGED_ADAMWPAGED_ADAMW_8BITLION	LION_8BIT
PAGED_LIONPAGED_LION_8BITbitsandbytes.optimr  rn   r   rT  	importlibmetadatar   r  ADAMW_ANYPRECISIONtorchdistx.optimizersr  r{   getr   r   SGDADAGRADAdagradRMSPROPRMSprop)r   r  mappingkeyvaluer  adam_kwargsr  r  r  r  r  r  r  additional_optim_kwargs
bnb_kwargsr  r   r   r   r    s   


h
c
\VNF



 
z$Trainer.get_optimizer_cls_and_kwargsr   c                 C   sH   | j du r!t| jj|du r| jn|| j||| jjd| _ d| _| j S )z
        Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
        passed as an argument.

        Args:
            num_training_steps (int): The number of training steps to do.
        N)r   num_warmup_stepsr  scheduler_specific_kwargsT)r   r+   r   lr_scheduler_typer   get_warmup_stepslr_scheduler_kwargsr  r  r   r   r   r  O  s   

zTrainer.create_scheduler
dataloaderc              
   C   sT   z|j }t|trt|j j W S t|j W S  tttfy)   t|| jj  Y S w )z
        Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When
        dataloader.dataset does not exist or has no length, estimates as best it can
        )	rF  r   r7   r   	NameErrorAttributeError	TypeErrorr   per_device_train_batch_size)r  r  rF  r   r   r   num_examplesb  s   
zTrainer.num_examplestrain_dlr   c                 C   sh   d}z!t |D ]\}}|d  }|dur||   W S ||7 }q|W S  ty3   td | Y S w )zq
        Helper to get number of tokens in a [`~torch.utils.data.DataLoader`] by enumerating dataloader.
        r   	input_idsNz%Cannot get num_tokens from dataloader)	enumerater  KeyErrorr   r  )r  r
  r   train_tokensstepbatchtokensr   r   r   
num_tokensp  s   

zTrainer.num_tokenstrialzoptuna.Trialc                 C   s  || _ | jdu s|du rdS | jtjkr| |}n(| jtjkr)|}|dd n| jtjkr:dd |j	 D }n| jtj
krB|}|	 D ],\}}t| j|sZtd| d qFt| j|d}|durkt||}t| j|| qF| jtjkrtd|j  | jtjkrtd|j  | jtj
krtd	|  | jr| jjdu rtd
ddlm} ddlm} || jj| j_| jj| j || jjd| j_|   dS )zHP search setup codeNwandbc                 S   s(   i | ]\}}|t |trt|n|qS r   )r   strintr   rL  vr   r   r   r    s   ( z,Trainer._hp_search_setup.<locals>.<dictcomp>zTrying to set zY in the hyperparameter search but there is no corresponding field in `TrainingArguments`.zTrial: zSigOpt Assignments: zW&B Sweep parameters: z7For sweeps with deepspeed, `args.deepspeed` must be setr   )DeepSpeedPluginHfTrainerDeepSpeedConfig)hf_ds_config)_trialr  rL   OPTUNAhp_spaceRAYpopSIGOPTassignmentsitemsWANDBr   r   r   r  r   rO  setattrr   r   r   r   r   accelerate.utilsr  #transformers.integrations.deepspeedr  hf_deepspeed_configtrainer_config_processdeepspeed_pluginr   )r  r  r   r  r  old_attrr  r  r   r   r   _hp_search_setup  sJ   
zTrainer._hp_search_setupr  metricsc                 C   s
  | j d u s	|d u rd S | }| || _| j tjkrBdd l}|j s>|	| j| |
 r@| j| j| j| j | d S d S | j tjkrdd l}t )}d }| jjrd| j|d |jj|}| j|d< |jj	||d W d    d S 1 s|w   Y  d S d S )Nr   )checkpoint_dir	objective
checkpoint)r  copycompute_objectiver0  rL   r  optunastudy_is_multi_objectivereportshould_pruner   on_train_endr   r  r  TrialPrunedr   	ray.traintempfileTemporaryDirectoryr   _tune_save_checkpointtrain
Checkpointfrom_directory)r  r  r  r.  r5  raytemp_checkpoint_dirr2  r   r   r   _report_to_hp_search  s0   


"zTrainer._report_to_hp_searchr/  c                 C   s   t j|t d| jj }| j|dd | jjrB| j	t j|t
 t| j t j|t t| j t j|t d S d S )N-T_internal_call)r   pathrS  rH   r  global_step
save_modelr   r   save_to_jsonTRAINER_STATE_NAMEr   saver   
state_dictOPTIMIZER_NAMEr   SCHEDULER_NAME)r  r/  r   r   r   r   r?    s    zTrainer._tune_save_checkpointc                 C   sL   t | j}|dkr|  }n|dkr| |}ntd|d u r$td|S )Nr   r   z'model_init should have 0 or 1 argument.z"model_init should not return None.)rZ   r   r   )r  r  model_init_argcountr   r   r   r   r     s   

zTrainer.call_model_initFc           
         s  |s|d u rt d |S tt| |   zt|}|  |jdd }|r.||_	| j
jddk t W tttjjtdkrjt trYtjj| dd}n1tjj| fdd D dd}n g } D ]}t | }|| qnt|}tjj||dd	}W d    n1 sw   Y  W d    n1 sw   Y  tj|}t  |di   |di   W d    n1 sw   Y  |}d| _W |S  tttttfy }	 zt d
|	 d W Y d }	~	|S d }	~	ww |S )NzAfailed to use PyTorch jit mode due to current dataloader is none._original_forwardF)cache_enabledz2.0.0)example_kwarg_inputsstrictc                    s   i | ]}| | qS r   r   )r   r  example_batchr   r   r    s    z0Trainer.torch_jit_model_eval.<locals>.<dictcomp>rV  z'failed to use PyTorch jit mode due to: .r   ) r   r  nextiter_prepare_inputsr3  eval__dict__r!  rB  rv  autocastr   no_gradr   rT  r   base_versionr   dictjittrace	ones_likeappendtuplefreezer  r   r  r   r  
IndexError)
r  r   r  rg  	jit_modeloriginal_forward
jit_inputsr  example_tensorer   rW  r   torch_jit_model_eval  sZ   



 
zTrainer.torch_jit_model_evalc                 C   s   t  stddd l}|s+|  | js| jjrtjn|}|j	||dd| j d}|S |j
s2|  |j	||| jddd\}| _|S )NzUsing IPEX but IPEX is not installed or IPEX's version does not match current PyTorch, please refer to https://github.com/intel/intel-extension-for-pytorch.r   O1F)dtypelevelconv_bn_foldinginplaceT)rr  r   ru  rs  )rq   r  intel_extension_for_pytorchr^  r   r   r   r   r  optimizerg  r@  r   )r  r   rg  rr  ipexr   r   r   ipex_optimize_model	  s    zTrainer.ipex_optimize_modelTc                    s  | j jr| jr
tjntj}| j|||d}t r,t| j	t
jjr#| j	S t
j|| j jdS t||ur4|S | jrG|rGtj|| j| j jd\}| _| j jdkrXt|ddsXt|}| j jrqt }| |||}tt | d| _|su|S | jrzdd	lm  dd
lm  ddl!m"}m#} W n t$y   t$dw d }d }	t|dd }
| j j%&d|
}| j j%d dkrt'j(|| j j%d d}n%|d urt) }|D ]}t*||}|d u rt+d|,| qt'j(||d}| j j-}| j j%d r fdd}	 |f||	d| | _}di fdd}|t._/|S t0 r%tj1j2|t3t45dgd}|S | j j6t7j8krwt9 r3|S i }| j j:d urC| j j:|d< nt|t;rP|j< |d< nd|d< | j j=d ura| j j=|d< | j j>d urn| j j>|d< t?di || j@_A|S ) N)rr  )backward_passes_per_step)	opt_levelr   is_loaded_in_8bitF   r   )XlaFullyShardedDataParallel)checkpoint_module)size_based_auto_wrap_policytransformer_auto_wrap_policyzJMissing XLA FSDP related module; please make sure to use torch-xla >= 2.0._no_split_modulestransformer_layer_cls_to_wrapmin_num_params)r  z@Could not find the transformer layer class to wrap in the model.)transformer_layer_clsxla_fsdp_grad_ckptc                    s    | g|R i |S Nr   )mr   kwargsFSDPr  r   r   auto_wrapper_callablei  s   z2Trainer._wrap_model.<locals>.auto_wrapper_callable)auto_wrap_policyr  c                 S   s    | j di |}|rt  |S )Nr   )r  r}  	mark_step)r   barrieroptimizer_argslossr   r   r   patched_optimizer_stepv  s   z3Trainer._wrap_model.<locals>.patched_optimizer_stepSMDATAPARALLEL_LOCAL_RANK)
device_idsfind_unused_parametersTbucket_cap_mbbroadcast_buffersr   )Br   use_ipexr  r   r  r  ry  ru   r   r   r
  r   DistributedModelre  r'   r  r~   
initializer   fp16_opt_leveln_gpur   r   DataParalleljit_mode_evaltimerp  roundjit_compilation_timer   torch_xla.distributed.fsdpr~  r  torch_xla.distributed.fsdp.wrapr  r  r  r   r  	functoolspartialr   r@   	Exceptionaddxla_fsdp_configr}  optimizer_steprt   parallelDistributedDataParallelr  r   getenvr   r_   r   rw   ddp_find_unused_parametersr%   is_gradient_checkpointingddp_bucket_cap_mbddp_broadcast_buffersr   rv  ddp_handler)r  r   rg  r  rr  
start_timer  r  r  r  %default_transformer_cls_names_to_wrap"fsdp_transformer_layer_cls_to_wraptransformer_cls_to_wraplayer_classtransformer_clsfsdp_kwargsr  r  r   r  r   _wrap_model   s   


	zTrainer._wrap_modelresume_from_checkpointignore_keys_for_evalc           	      K   s  |du rd}| j   | j}d| _| jdur| | j| _|js#|jr.|j	s.| 
| j|j d|v r=|d}tdt t|dkrRtddt|  d	| | | jj| _d}| jdur| jjrmt| jjnt| jj | || _d}d
\| _| _t |t!r|rt"|j#}|du rt$d|j# d|durt% s| j&s| j's| (| t)*t+j,|t-}|jdur|j| _|r| j.r| 
| j|j | j| _/t0| j1| j|j2}|j3rzt45  |||||dW t46  S t46  w |||||dS )a  
        Main training entry point.

        Args:
            resume_from_checkpoint (`str` or `bool`, *optional*):
                If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a
                `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance
                of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here.
            trial (`optuna.Trial` or `Dict[str, Any]`, *optional*):
                The trial run or the hyperparameter dictionary for hyperparameter search.
            ignore_keys_for_eval (`List[str]`, *optional*)
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions for evaluation during the training.
            kwargs (`Dict[str, Any]`, *optional*):
                Additional keyword arguments used to hide deprecated arguments
        FNT
model_pathzi`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` instead.r   z3train() received got unexpected keyword arguments: rI  rZ  r   z/No valid checkpoint found in output directory ())r   r  r  r  )7r   r   r   r   r   r2  r   r   r   r   r   r   r!  r   r   r   r   r  rS  rC  rD  r-  r  r  r   r   rU   r   r\   r   r   r   r   boolrW   r   r   ru   r   r   _load_from_checkpointr5   load_from_jsonr   rI  rM  r   r   rV   _inner_training_loopauto_find_batch_sizer   hf_hub_utilsdisable_progress_barsenable_progress_bars)	r  r  r  r  r  r   model_reloadedr  inner_training_loopr   r   r   r@    s   








zTrainer.trainc           /   
   C   s  | j   || _| jjr| j| j_td| j  | 	 }| j|j
 |j }d }d }	t|rt|}||j
 }
t|
d}
| |}|jdkrk|j}|j|
 t|j|
 dk }|j| }|jrj| ||j|j
 }	nOt|j|
 }t|j}| ||j }|jr| ||j }	n-|jdkr|j}tj}|}
||j }|j| }|jr| ||j|j
 }	ntd|j tj| jjv r| jjdkrtdt| j}t p| j p| j!}| j"rd | _#d| _"| j$rt%| |d\| _&| _#|s| j'|d t( | _|d u| j_)| j| j_|j*d ur"|j*dk rt||j* | j_*n|j*| j_*|j+d ur>|j+dk r9t||j+ | j_+n|j+| j_+|j,d urZ|j,dk rUt||j, | j_,n|j,| j_,|j-rq|j.d u rgi }n|j.}| jj/|d | 0| j1}|| ju rd	nd}|r| j'|d |r| j2  t3| j#d
r| j4r| j 5| j}n| j 5| j| j&\}| _&n| j 5| j| j&| j#\}| _&| _#| j!r| | _| _1|| jur|| _1| j$r| j1| _6|d ur| j$rt7| j1| nt s| j!r| 8|| j1 | 9| t:d t:d|d t:d|d t:d| jj;d | jj;| jkr5t:d| jd t:d|d t:d|j
  t:d|d t:dt<|d	dd d| j_=t>> }d}d}d }|d urt?j@At?j@B|tCrt(Dt?j@B|tC| _| jjE|
 }|jFs| jjE|
 }||j
9 }nd}t:d t:d|  t:d| jjE  |jFst:d| d| d | j| jG_| j&| jG_&| j#| jG_#|| jG_H| jId ur| jJd ur| I| jJ| j_K|d ur| jLtMjNkr|jOn|}tP|| j_Qnd | j_Q|| j_|| j_| R | j_R| S | j_StTUdV|jW}d| _X| jjE| _Y|Z  | jG[|| j| j\| _\|jFst]|D ];}t^|}t_g}t`atbt`adkr^|ctd te|tf|}tgsk|sr|D ]} qE|d ury|ng }th|}qEd}t]||D ],}|} t3| dr| i| |jjdkrd | _k|d urt| n|j|j
 }!| jGl|| j| j\| _\||kr|d ur|dkr| m| d}"d}#|dkrtn| |} |}#d}d	}"d}$to| D ]~\}$}%|d7 }| jjprtq| jd d!}&|&|%vr	trd" n| j js| j t|%|& u 7  _s|"r#| m| d}"|dkrB|d8 }|d ur6|vd |dkr@| m| q|d urM|w  d }|$|j
 dkr`| jGx|| j| j\| _\| j y| | z||%}'W d    n	1 sxw   Y  |j{rt| stT}|'stT~|'r||d| jjE | jY  7 }n||'7 }|  jt| |%7  _|!|j
ko|$d |!k}(||j
 dks|(rP|(r| j jd	 |jd ur|jdkrt r|jr| j&|j n| j4rtjt| j&|j n
| j | |j | j&  | j j })|)r#te| j#tTjj#js#| j#  |Z  | j jEd7  _E||$d |# |!  | j_=| jG|| j| j\| _\| ||||| n| jG|| j| j\| _\| j\jse| j\jrg nq|$dk rtrd#| jjE d$| d% d	| j\_| jG|| j| j\| _\| ||||| tj| jjv rt| rtt  ntrd& | j\jr nq|jjrt3| d'rt| d' t:d( |jr| jjd urt| rtd) n|jtjkrt  nt rt  |   |  jX| 7  _X| jX| jjE }*td*||| jj|	d+}+|   | jj|+d,< |*|+d-< d| _| j|+ | |+ | |},| jd|,d.}-| jjrk| jjd urk| jjdkrk|-D ]}.t?j@|.| jjsit:d/|. d0 t|. qO| jG|| j| j\| _\|   | jd ur| | j t| jjE|*|+S )1Nz)Currently training with a batch size of: r   r   zYargs.max_steps must be set to a positive value if dataloader does not have a length, was zCurrently --debug underflow_overflow is not supported under DP. Please use DDP (torchrun or torch.distributed.launch (deprecated)).F)r  )gradient_checkpointing_kwargsTr  z***** Running training *****  Num examples = r  z  Num Epochs = z(  Instantaneous batch size per device = zA  Training with DataParallel so batch size has been adjusted to: zE  Total train batch size (w. parallel, distributed & accumulation) = z   Gradient Accumulation steps = z  Total optimization steps = z#  Number of trainable parameters = )trainable_onlyzE  Continuing training from checkpoint, will skip to saved global_stepz!  Continuing training from epoch z'  Continuing training from global step z  Will skip the first z epochs then the first z batches in the first epoch.r  r   	set_epochmain_input_namer  zTried to track the number of tokens seen, however the current model is not configured properly to know what item is the input. To fix this, add a `main_input_name` attribute to the model class you are using.zXThere seems to be not a single sample in your epoch_iterator, stopping training at step zI! This is expected if you're using an IterableDataset and set num_steps (z.) higher than the number of available samples.zYou enabled PyTorch/XLA debug metrics but you don't have a TPU configured. Check your training configuration if this is unexpected._pastzU

Training completed. Do not forget to share your model on huggingface.co/models =)

load_best_model_at_endr@  )num_samples	num_stepsr  
total_flos
train_loss	use_mtimer   Deleting older checkpoint [] due to args.save_total_limit)rv  free_memoryr  r   r  r  r  r   r  ry  re  r  rX   r   maxr	  r   r  include_tokens_per_secondr  mathceilnum_train_epochssysmaxsizer   r   UNDERFLOW_OVERFLOWr  r   r   ru   r   r   r  r   r   r!   r   r  r5   is_hyper_param_searchlogging_steps
eval_steps
save_stepsgradient_checkpointingr  gradient_checkpointing_enabler  r   r@  r   r  rw  r   r"   r  _load_optimizer_and_schedulerr   r  r?   epochr  r   rI  isfilerS  rM  r  rJ  ignore_data_skipr   train_dataloaderr   r  
trial_namer  rL   r"  r#  r   trial_paramsr   r   r   tensorr<  r   _total_loss_scalar_globalstep_last_logged	zero_gradon_train_beginr  ranger>   r   r   rT  accelerate_versionrg  r   r   rh  r-   rC  r  
past_indexr  on_epoch_begin_load_rng_stater   r  include_num_input_tokens_seenr   r  num_input_tokens_seengatherr  r  closeon_step_begin
accumulatetraining_steplogging_nan_inf_filterry   isnanisinfr  floatfloating_point_opsgradient_state_set_sync_gradientsmax_grad_normr   clip_master_gradsr   r  clip_grad_norm_r~   master_paramsr   r  optimizer_step_was_skippedr  ReduceLROnPlateauon_step_end_maybe_log_save_evaluateon_substep_endshould_epoch_stopshould_training_stopon_epoch_endTPU_METRICS_DEBUGr}  master_printmetmetrics_reportdelattrr  best_model_checkpoint
rendezvousr   r_   r   distr  r
  _load_best_modelitemr]   
store_flosr  r   r   r  log_get_output_dir_sorted_checkpointsr   save_total_limitsamefileshutilrmtreer:  _finish_current_pushr   r4  rR   )/r  rj  r   r  r  r  r  total_train_batch_sizelen_dataloadernum_train_tokensnum_update_steps_per_epochr	  r   r  num_train_samplesdebug_overflowdelay_optimizer_creationr  r   use_accelerator_preparer  epochs_trainedsteps_trained_in_current_epochsteps_trained_progress_barr#  tr_lossr  ro  sampler_kindsis_random_sampler_total_batched_samplesepoch_iteratorsteps_in_epochrng_to_syncsteps_skippedr  inputsr  tr_loss_step)is_last_step_and_steps_less_than_grad_accoptimizer_was_runr  r.  run_dircheckpoints_sortedr2  r   r   r   r    s  


































 

















&
zTrainer._inner_training_loopc                 C   s   | j d urW|d urW| j tjkr|j}n*| j tjkr%dd l}|j  }n| j tj	kr/|j
}n| j tjkr=dd l}|jj
}| jd urG| |nd| }tj| jj|}|S | jj}|S )Nr   zrun-)r  rL   r  numberr   r<  r@  get_contextget_trial_idr"  idr%  r  runr   r   rI  rS  r   r   )r  r  run_idrC  r  run_namer=  r   r   r   r    s    zTrainer._get_output_dirc                    s  |d u r| j }tj t}tj t}tj t}tj t}tj t}tj t	}tj t
}	tj oKt fddt D }
|
rY| jsYtd  dtdd ||||	||fD sq|
sqtd  td  d tj|rt|}|j}|d ur|tkrtd	| d
t d tj|stj|s|
r$t rtjtj drtj tddd d S t| jdr| jjdu rtd tj|dd}d|d< |j |dd}~d S | jrt!| j"j#j$| j"|  d S | jj%rtj|rt&jj'|dd}ntj|dd}| |d}~| (| d S t) r^t*|t+r^t|drWt|drWtj, rL|j- |j.dd d S tdt d d S td d S t/| t | jj%d}t st| (| d S d S )Nc                 3   s.    | ]}t jt j |rt|v V  qd S r  )r   rI  isdirrS  FSDP_MODEL_NAMEr   folder_namer  r   r   	<genexpr>  s    
z0Trainer._load_from_checkpoint.<locals>.<genexpr>zCheckpoint found at z* is only supported when using PyTorch FSDPc                 s   s    | ]	}t j|V  qd S r  )r   rI  r  )r   fr   r   r   rK    s
    

z!Can't find a valid checkpoint at zLoading model from rZ  z9You are resuming training from a checkpoint trained with z- of Transformers but your current version is zJ. This is not recommended and could yield to errors or unwanted behaviors.user_content.ptFrI  tagr  load_optimizerr   TzPEnabling FP16 and loading from smp < 1.10 checkpoint together is not suppported.r   map_location_smp_is_partialrY  r   active_adapterload_adapter)is_trainablejThe intermediate checkpoints of PEFT may not be saved correctly, consider using a custom callback to save i in corresponding saving folders. Check some examples here: https://github.com/huggingface/peft/issues/96GCould not load adapter model, make sure to have `peft>=0.3.0` installed)rV  prefer_safe)0r   r   rI  rS  rd   rc   rb   rh   rg   rf   re   rF  anylistdirr   r   r   r   r  r   from_json_filetransformers_versionr   r  ru   r
  r  r   r   r   r   loadload_state_dictr   rv  r  fsdp_pluginsave_safetensorssafetensors	load_file_issue_warnings_after_loadrr   r   r   existsrV  rU  r&   )r  r  r   config_fileadapter_weights_fileadapter_safe_weights_fileweights_fileweights_index_filesafe_weights_filesafe_weights_index_fileis_fsdp_ckptconfigcheckpoint_versionrO  load_resultr   rJ  r   r    s   


zTrainer._load_from_checkpointc           
      C   s  t d| jj d| jj d tj| jjt}tj| jjt	}tj| jjt
}tj| jjt}t r:| jn| j}| jrJt| j| jj d S | jr\t| jjj| j|| jj}d S tj|sutj|sutj|sutj|r8d}t rtjtj| jjdrtj| jjtddd d S | jjrtj|rtjj|dd	}ntj|dd
}d|d< |j|dd}d S t  rt!|t"rt#|drt#|drtj|stj|r|$| jj|j% ddl&m'}	 |	g g }n6t (dt
 d d}n*t (d d}n"| jjrtj|rtjj|dd	}ntj|dd
}||d}t s4|r6| )| d S d S d S tjtj| jjt*r]t+|| jjt d}t s[| )| d S d S t (d| d d S )NzLoading best model from z	 (score: z).TrM  FrN  r   rT  rQ  rS  rY  rU  rV  r   )_IncompatibleKeysrX  rY  rZ  z#Could not locate the best model at zi, if you are running a distributed training on multiple nodes, you should activate `--save_on_each_node`.),r   r   r  r  best_metricr   rI  rS  rh   rf   rc   rb   ru   r   r   r   r"   r   r   rv  rb  rg  r  r
  r  r   rc  rd  r   re  r`  ra  rr   r   r   r   rV  rU  torch.nn.modules.modulers  r  rf  rg   r&   )
r  best_model_pathbest_safe_model_pathbest_adapter_model_pathbest_safe_adapter_model_pathr   rr  has_been_loadedrO  rs  r   r   r   r  R  s    




	

zTrainer._load_best_modelc                 C   sz   t |jdkr(| jjd urt|jt| jjkr| j  n
td|j d t |jdkr;td|j d d S d S )Nr   z8There were missing keys in the checkpoint model loaded: rZ  z;There were unexpected keys in the checkpoint model loaded: )	r   missing_keysr   _keys_to_ignore_on_saver   r;  r   r  unexpected_keys)r  rr  r   r   r   rf    s   z"Trainer._issue_warnings_after_loadc                 C   sl  | j jrCt rt  i }| |  }||8 }t|| j	j
| j  d|d< |  |d< |  j|7  _| j	j
| _|   | | d }| j jrt| jtrli }| j D ]\}	}
| j|
|d|	 d}|| qVn| j|d}| || j	j
| t| jtjjjr| jj}|dsd| }| j||  | j j r| j!|||d | j"#| j| j	| j | _ d S d S )Nr}  r  r  eval_)r   ignore_keysmetric_key_prefixr  )r.  )$r  
should_logry   r}  r  _nested_gathermeanr  r  r  rJ  r  r   r  r  r  should_evaluater   r   rc  r$  evaluater  rE  r   r   r  r  r   metric_for_best_model
startswithr  r   _save_checkpointr   on_save)r  r0  r   r  r  r  logstr_loss_scalarr.  eval_dataset_namer   dataset_metricsmetric_to_checkr   r   r   r    sF   



z Trainer._maybe_log_save_evaluatec              
   C   s  |d u rd S | j jdkr-| j j}tj|d| d}tj|s,td| d d S ntj|d}tj|sAtd d S t	
|}t|d  tj|d	  t	j|d
  t	j r| j jtjkrst	jj|d  n'zt	jj|d  W n ty } ztd| d W Y d }~nd }~ww t rt|d  t r| j jtjkrt	jj|d  d S zt	jj|d  W d S  ty } ztd| d W Y d }~d S d }~ww d S )Nr   
rng_state_.pthz$Didn't find an RNG file for process zr, if you are resuming a training that wasn't launched in a distributed fashion, reproducibility is not guaranteed.rng_state.pthzDidn't find an RNG file, if you are resuming a training that was launched in a distributed fashion, reproducibility is not guaranteed.pythonnumpyr   cudazUDidn't manage to set back the RNG states of the GPU because of the following error:
 zO
This won't yield the same results as if the training had not been interrupted.r   npuzUDidn't manage to set back the RNG states of the NPU because of the following error:
 )r   r  process_indexr   rI  rS  r  r   r   r   r`  randomsetstatenp	set_stateset_rng_stater  is_availabler   r_   r   set_rng_state_allr  ry   r}  rx   r  )r  r2  r  rng_filecheckpoint_rng_statero  r   r   r   r    s\   




zTrainer._load_rng_statec                 C   s  t  d| jj }| jd u r|d u r|   | j|d}tj||}tj	|r>t
t|dkr>td| d |}n
tj|d| }| j|dd | jjs]| | | | |d ur| jjd ur| jj}|d	sud	| }|| }	| jjrtjntj}
| jjd u s| jjd u s|
|	| jjr|	| j_|| j_| jjr| jtj|t | jjr| | ||krt || | jjr| j!d|d
 d S d S )NrF  )r  r   z!Checkpoint destination directory zV already exists and is non-empty.Saving will proceed but saved results may be invalid.ztmp-TrG  r~  r  )"rH   r  rJ  r  r  r  r   rI  rS  rg  r   r]  r   r  rK  r   save_only_model_save_optimizer_and_scheduler_save_rng_stater  r  greater_is_betterr  greaterlessrt  r  r   rL  rM  r   _push_from_checkpointrename_rotate_checkpoints)r  r   r  r.  checkpoint_folderr=  r   staging_output_dirr  metric_valueoperatorr   r   r   r  	  sH   





zTrainer._save_checkpointc              	   C   s   t  tj  tj  d}tj r+| jj	t
jkr#tjj  |d< ntjj  |d< t r4t |d< t rO| jj	t
jkrGtjj  |d< ntjj  |d< tj|dd | jjdkrit|tj|d d S t|tj|d	| jj d
 d S )N)r  r  r   r  r   r  Tr   r   r  r  r  )r  getstater  	get_stater   get_rng_stater  r  r   r   r_   r   get_rng_state_allry   r}  rx   r  r   r   r  rN  rI  rS  r  )r  r   
rng_statesr   r   r   r  U	  s$   
&zTrainer._save_rng_statec                 C   s  t  r?td t| j tj|t	 t
jdd}t| j tj|t t| W d    n1 s9w   Y  net rj| jjdd}t  t dksXtjjjritj|tj|t	dtjjjd n:| jrt| j| n0| jrt| jjj| j| j| t| jjj| j| j| j| n| j j!rt"| j tj|t	 | jot#| jt$ }| j j!r| jr|rt  st
jdd}t"| j tj|t W d    n1 sw   Y  t| d S d S d S d S )Nsaving_optimizer_statesTrecordF)gather_if_shardr   )r  v3)%ry   r}  r  rN  r   rO  r   rI  rS  rP  r   catch_warningsr   rQ  rF   ru   local_state_dictr
  r  rdp_rankr  r  shard_optimizer_stater   r   save_checkpointr   r   rv  rb  r   r   r   r   r   r   r   )r  r   caught_warningsopt_state_dictis_deepspeed_custom_schedulerr   r   r   r  u	  sX   

z%Trainer._save_optimizer_and_schedulerc              	      s   du rdS | j r9t| jts7tjdd}| jtt	j
 t W d   n1 s.w   Y  t| dS t rHtt	j
 td n)t	j
t	j
 tpqt	j
t	j
 tpqt	j
 oqt fddt	 D }|rQt	j
t	j
 trSt rtjt	j
 tdd}tjdd}tjt	j
 tdd}W d   n1 sw   Y  t| t|| jj t|| jj | j| | j| dS t rt	j
t	j
 d	r fd
d}n fdd}| j| n0| jjdkr| jjnd}| jrt | j!j"j#| j!| j| j$  n| jtjt	j
 t|d tjdd}| jtt	j
 t W d   n	1 sFw   Y  t| dS dS dS )z3If optimizer and scheduler states exist, load them.NTr  _*c                 3   s8    | ]}t jt j |rtd d |v V  qdS )rZ  r   N)r   rI  rF  rS  OPTIMIZER_NAME_BINr  rH  r1  r   r   rK  	  s    
z8Trainer._load_optimizer_and_scheduler.<locals>.<genexpr>r   rQ  rM  c                    s"   | tjtj tdd d S )NTr  )ra  r
  r`  r   rI  rS  rP  modoptr1  r   r   opt_load_hook	  s   "z<Trainer._load_optimizer_and_scheduler.<locals>.opt_load_hookc                    sJ   t r|tjtj tddd d S |tjtj tdd d S )NT)r  back_compatr  )r	  ra  r
  r`  r   rI  rS  rP  r  r1  r   r   r  	  s
   "r   )%r   r   r   r   r   r  ra  r   r`  r   rI  rS  rQ  rF   ru   globrP  r  r  rF  r\  r]  ry   r}  send_cpu_data_to_devicer   r   r   r   register_post_step_hookr  r   r   rv  r  rb  r   )r  r2  r  checkpoint_file_existsoptimizer_statelr_scheduler_stater  rR  r   r1  r   r  	  sh   z%Trainer._load_optimizer_and_scheduler   minimizer  r4  n_trials	directionbackendr  r   c           
      K   s   |du rt  }t|}t|  }|  || _| jdu r td|du r'|jn|| _|| _	|du r3t
n|| _|j| ||fi |}	d| _|	S )aD  
        Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined
        by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided,
        the sum of all metrics otherwise.

        <Tip warning={true}>

        To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to
        reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to
        subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom
        optimizer/scheduler.

        </Tip>

        Args:
            hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*):
                A function that defines the hyperparameter search space. Will default to
                [`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or
                [`~trainer_utils.default_hp_space_sigopt`] depending on your backend.
            compute_objective (`Callable[[Dict[str, float]], float]`, *optional*):
                A function computing the objective to minimize or maximize from the metrics returned by the `evaluate`
                method. Will default to [`~trainer_utils.default_compute_objective`].
            n_trials (`int`, *optional*, defaults to 100):
                The number of trial runs to test.
            direction (`str` or `List[str]`, *optional*, defaults to `"minimize"`):
                If it's single objective optimization, direction is `str`, can be `"minimize"` or `"maximize"`, you
                should pick `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or
                several metrics. If it's multi objectives optimization, direction is `List[str]`, can be List of
                `"minimize"` and `"maximize"`, you should pick `"minimize"` when optimizing the validation loss,
                `"maximize"` when optimizing one or several metrics.
            backend (`str` or [`~training_utils.HPSearchBackend`], *optional*):
                The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending
                on which one is installed. If all are installed, will default to optuna.
            hp_name (`Callable[["optuna.Trial"], str]]`, *optional*):
                A function that defines the trial/run name. Will default to None.
            kwargs (`Dict[str, Any]`, *optional*):
                Additional keyword arguments passed along to `optuna.create_study` or `ray.tune.run`. For more
                information see:

                - the documentation of
                  [optuna.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html)
                - the documentation of [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run)
                - the documentation of [sigopt](https://app.sigopt.com/docs/endpoints/experiments/create)

        Returns:
            [`trainer_utils.BestRun` or `List[trainer_utils.BestRun]`]: All the information about the best run or best
            runs for multi-objective optimization. Experiment summary can be found in `run_summary` attribute for Ray
            backend.
        NzXTo use hyperparameter search, you need to pass your model through a model_init function.)r    rL   r   ensure_availabler  r   r   default_hp_spacer  r   rS   r4  rC  )
r  r  r4  r  r  r  r   r  backend_objbest_runr   r   r   hyperparameter_search	  s    ;

zTrainer.hyperparameter_searchr  c                 C   sr   | j jdurt| j jd|d< | jjr| j j|d< i |d| j ji}| j j| | j	
| j| j | j|| _dS )z
        Log `logs` on the various objects watching training.

        Subclass and override this method to inject custom behavior.

        Args:
            logs (`Dict[str, float]`):
                The values to log.
        N   r  r  r  )r  r  r  r   r  r  rJ  log_historyrg  r   on_logr  )r  r  outputr   r   r   r  B
  s   
zTrainer.logr  c                    s   t |trt| fdd| D S t |ttfr(t| fdd|D S t |tjrVd jj	i} j
rNt|sAt|rN|d jjjj i |jdi |S |S )	z|
        Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors.
        c                    s   i | ]
\}}|  |qS r   _prepare_inputr  r  r   r   r  Z
  s    z*Trainer._prepare_input.<locals>.<dictcomp>c                 3       | ]}  |V  qd S r  r  )r   r  r  r   r   rK  \
      z)Trainer._prepare_input.<locals>.<genexpr>r   rr  Nr   )r   r   rO  r$  rh  rC  r   Tensorr   r   r   is_floating_point
is_complexr  rv  r  r+  r  rr  r<  )r  r  r  r   r  r   r  U
  s   
zTrainer._prepare_inputr9  c                 C   sR   |  |}t|dkrtdd| j d| jjdkr'| jdur'| j|d< |S )z
        Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and
        handling potential state.
        r   zThe batch received was empty, your model won't be able to train on it. Double-check that your training dataset contains keys expected by the model: r  rZ  Nmems)r  r   r   rS  r  r   r  r  r  r9  r   r   r   r]  g
  s   


zTrainer._prepare_inputsc                 C   s   |   S )zF
        A helper wrapper to group together context managers.
        )autocast_smart_context_managerr  r   r   r   compute_loss_context_managerw
  s   z$Trainer.compute_loss_context_managerrT  c                 C   s*   | j rtjjj|| jd}|S t }|S )z
        A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired
        arguments, depending on the situation.
        )rT  rr  )r  r   r   r~   r`  r  
contextlibnullcontext)r  rT  ctx_managerr   r   r   r  }
  s
   z&Trainer.autocast_smart_context_managerc                 C   s   |   | |}t rt||| jj}|  | jj	S | 
  | ||}W d   n1 s4w   Y  | jjdkrC| }| jrbt|| j}|  W d   n1 s\w   Y  n| j| | | jj S )aq  
        Perform a training step on a batch of inputs.

        Subclass and override to inject custom behavior.

        Args:
            model (`nn.Module`):
                The model to train.
            inputs (`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument `labels`. Check your model's documentation for all accepted arguments.

        Return:
            `torch.Tensor`: The tensor with training loss on this batch.
        Nr   )r@  r]  ru   r   r   re  reduce_meandetachr<  r   r  compute_lossr  r  r  r~   
scale_lossr   backwardrv  )r  r   r9  loss_mbr  scaled_lossr   r   r   r  
  s"   


zTrainer.training_stepc           	      C   s  | j durd|v r|d}nd}|di |}| jjdkr%|| jj | _|durVt|}t r<t|tr<|j	j
 }n| }|t v rO| j ||dd}n2|  ||}n+t|trtd|vrttdd|  d	d|  d
t|tr}|d n|d }|r||fS |S )z
        How the loss is computed by Trainer. By default, all models return the loss in the first element.

        Subclass and override for custom behavior.
        Nlabelsr   T)shift_labelsr  zJThe model did not return a loss from the inputs, only the following keys: r  z,. For reference, the inputs it received are rZ  r   )r  r!  r   r  r  r'   rr   r   r   r+  r   	_get_namer(   r   rc  r   rS  rD  )	r  r   r9  return_outputsr  outputsr/  r^  r  r   r   r   r  
  s0   zTrainer.compute_lossc                 C   s   | j jdkS )z
        Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
        machines) main process.
        r   )r   local_process_indexr  r   r   r   r   
  s   zTrainer.is_local_process_zeroc                 C   s   t  r	t dkS | jjdkS )z
        Whether or not this process is the global main process (when training in a distributed fashion on several
        machines, this is only going to be `True` for one process).
        r   )ru   r
  r{  r   r  r  r   r   r   r   
  s   zTrainer.is_world_process_zeror   rH  c                 C   s~  |du r| j j}t r| | nt r9tj|dd | j }| j j	r+| j
||d tr8ttj|d  nt| jrcdt| jjjjv rbtttdkrb| j| j}| j j	rb| j
||d nJ| jrz| j| j}| j j	ry| j
||d W n2 ty   td | j j	r| j
|i d t | j j	|t!t"g | j#| Y n
w | j j	r| 
| | j j$r|s| j$d	d
 dS dS dS )z
        Will save the model, so you can reload it using `from_pretrained()`.

        Will only save from the main process.
        NTr   )rO  rM  FULL_STATE_DICTz0.24.1z| stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use zero_to_fp32.py to recover weightsz
Model save)commit_message)%r   r   ry   	_save_tpuru   r   r   r   rO  r   _saver	  r   rI  rS  touchr   r  rv  r  rb  state_dict_typer   rT  r  get_state_dictr   r   r   r   r   r  rG   rh   rf   r  r   )r  r   rH  rO  r   r   r   rK  
  sN   

zTrainer.save_modelc                 C   s  |d ur|n| j j}td|  t r)tj|dd t	| j tj
|t td t| jtsktt| jtrRt| jdj|| j j| j tj	d n(td | j d}t	|tj
|t n| jdj|| j jtj	d | jd ur| j jr| j| d S d S d S )	NSaving model checkpoint to Tr   saving_checkpointr   )is_main_processrO  save_functionETrainer.model is not a `PreTrainedModel`, only saving its state dict.)r  r  )r   r   r   r   r}  is_master_ordinalr   r   r   rN  rI  rS  TRAINING_ARGS_NAMEr  r   r   r%   r'   r<  save_pretrainedr   rO  rh   r   )r  r   rO  r   r   r   r    s.   

zTrainer._save_tpuc                 C   s  |d ur|n| j j}tj|dd td|  t stfnttf}t	| j
|sj|d u r2| j
 }t	t| j
|rHt| j
j||| j jd n-td | j jr^tj|tj|t nt|tj|t n| j
j||| j jd | jd ur| j| t| j tj|t d S )NTr   r  )rO  safe_serializationr  )r   r   r   r   r   r   rr   r%   r   r   r   rO  r'   r  rc  rd  r   	save_filerI  rS  rf   rN  rh   r   r  )r  r   rO  supported_classesr   r   r   r  9  s*   





zTrainer._savec                 C   s\   | j jtjkr | j jt| jg| j jd	 
 7  _d| _d S | j j| j7  _d| _d S )NrT  r   )r   r   r_   r   r  r  r;   r  r   r  r  r  r   r   r   r  [  s   

zTrainer.store_flosc                 C   s  g }dd t || dD }|D ]1}|r#|tj||f qtd| d|}|d urD| d urD|t	| d |f qt
|}dd |D }| jjd urtt | jj|v r|tt | jj}	t|	t|d D ]}
||
d	  ||
 ||
< ||
d	 < qt|S )
Nc                 S   s    g | ]}t j|rt|qS r   )r   rI  rF  r  )r   xr   r   r   r   k  s     z/Trainer._sorted_checkpoints.<locals>.<listcomp>-*z.*z	-([0-9]+)r   c                 S   s   g | ]}|d  qS )r   r   )r   r2  r   r   r   r   v  s    r  r   )r   r  rg  r   rI  getmtimerematchgroupsr  sortedr  r  r  indexr  r   )r  r   checkpoint_prefixr  ordering_and_checkpoint_pathglob_checkpointsrI  regex_matchr>  best_model_indexir   r   r   r  f  s"   $zTrainer._sorted_checkpointsc                 C   s   | j jd u s| j jdkrd S | j||d}t|| j jkrd S | j j}| jjd ur9| j jdkr9|d | jjkr9d}tdt|| }|d | }|D ]}td| d t	j
|dd	 qJd S )
Nr   r  r   r  r  r  r  T)ignore_errors)r   r   r  r   r  r  r  r   r   r"  r#  )r  r  r   r>  r   number_of_checkpoints_to_deletecheckpoints_to_be_deletedr2  r   r   r   r    s    zTrainer._rotate_checkpointsr^  r  r  c           	   
   C   s  | j   | |}t }| jjr| jn| j}||d| jdu r"dnd||d}| jj	| jj
 }| d|jv rB||j| d 7 }|jt|||jt|j| d | |j tj| jjv ritt  | j| j| j| j|j| _| j |j |jS )a'  
        Run evaluation and returns metrics.

        The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
        (pass it to the init `compute_metrics` argument).

        You can also subclass and override this method to inject custom behavior.

        Args:
            eval_dataset (`Dataset`, *optional*):
                Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns
                not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`
                method.
            ignore_keys (`List[str]`, *optional*):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.
            metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
                An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
                "eval_bleu" if the prefix is "eval" (default)

        Returns:
            A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
            dictionary also contains the epoch number which comes from the training state.
        
EvaluationNT)rG  prediction_loss_onlyr  r  _jit_compilation_timer  r  )r   r   r  r  r   r|  prediction_loopevaluation_loopr   r  r  r.  r  r]   r  r  r  r  r   r  r  r}  r  r  r  r   on_evaluater  r  r  )	r  r   r  r  eval_dataloaderr  	eval_loopr  total_batch_sizer   r   r   r    s8   


	zTrainer.evaluater  c           	   
   C   s   | j   | |}t }| jjr| jn| j}||d||d}| jj| jj	 }| d|j
v r:||j
| d 7 }|j
t|||jt|j| d | j| j| j| j|j
| _| j |j
 t|j|j|j
dS )a  
        Run prediction and returns predictions and potential metrics.

        Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
        will also return metrics, like in `evaluate()`.

        Args:
            test_dataset (`Dataset`):
                Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the
                `model.forward()` method are automatically removed. Has to implement the method `__len__`
            ignore_keys (`List[str]`, *optional*):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.
            metric_key_prefix (`str`, *optional*, defaults to `"test"`):
                An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
                "test_bleu" if the prefix is "test" (default)

        <Tip>

        If your predictions or labels have different sequence length (for instance because you're doing dynamic padding
        in a token classification task) the predictions will be padded (on the right) to allow for concatenation into
        one array. The padding index is -100.

        </Tip>

        Returns: *NamedTuple* A namedtuple with the following keys:

            - predictions (`np.ndarray`): The predictions on `test_dataset`.
            - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
            - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
              labels).
        
Prediction)rG  r  r  r  r  )predictionsr?  r.  )r   r   r  r  r   r|  r  r   r  r  r.  r  r]   r  r  r  r   
on_predictr  r  r  rO   r&  r?  )	r  r  r  r  test_dataloaderr  r#  r  r$  r   r   r   predict  s*   
$
	zTrainer.predictr  c                  C   sh  | j }|dur	|n|j}| jr| jdu rt| ddd\}}| j| jd|d}t| jj	dkrY|| ju rY| jr<| j
|n| jj|dd}| jrJ|| _|| jurR|| _| jrY| j| _| jsu|jri|jtj|jd}n|jru|jtj|jd}| j j}	td	| d
 t|rtd| |  ntd td|	  |  || j_t|dd}
|jdkrd| _ d}d}d}d}d}d}d}d}d}t!|D ]K\}}t"|}|dur||7 }|	du r|}	| j#||||d\}}}t| jdd}|j$r| %|| nd}t& r	t'(  |dur$| )|*|	}|du r|nt+||dd}|dur2| jj,|ddd}|durS| jj,|ddd}| )|}|du rL|nt+||dd}|dur| jj,|ddd}| j-durm| -||}| )|}|du ry|nt+||dd}|dur| )|}|du r|nt+||dd}| j.|| j/| j0| _0|j1dur|d |j1 dkr|durt2|}|du r|nt3j4||fdd}|durt2|}|du r|nt+||dd}|durt2|}|du r|nt+||dd}|durt2|}|du r
|nt+||dd}d\}}}}q| jj5| _)|jr,t6| dr,t7| d |durEt2|}|du r<|nt3j4||fdd}|dur\t2|}|du rU|nt+||dd}|durst2|}|du rl|nt+||dd}|durt2|}|du r|nt+||dd}t|
rt|
}n t8|
t9rt|
dddkr|
j}nt|r| |}n|}|dkr|dkr|}| j:dur|dur|dur|j$r| :t;|||d}n| :t;||d}ni }t<|}|dur|= > || d< t6| dr| j?|| d< t@|A D ]}|B| ds*|C||| d| < qtD||||d S )!
        Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.

        Works both with or without labels.
        Nr   Tr  	inferenceFrg  r  evaluation_moderr  r   ***** Running  *****r  z  Num examples: Unknown  Batch size = rF  r  r  r  padding_indexr   )dim	pad_index)axisNNNNr  r	  r&  r?  r9  r&  r?  _lossr  r  r3  r&  r?  r.  r  )Er   r  r   r   r!   r  r   r   rv  _modelsrw  prepare_modelr   r   r   r   r<  r   float16r   r   r  r  r   r   rX   r	  r^  r   r"  r   r  r  r  r=   prediction_stepinclude_inputs_for_metricsr  ry   r}  r  gather_functionrepeatrB   pad_across_processesr   on_prediction_stepr  r  eval_accumulation_stepsrD   r  concatenategather_for_metricsr   r  r   r7   r   rK   rT   r  r  r  rC  rD  r  r!  rJ   ) r  r  rG  r  r  r  r   r3  r   rj  r   losses_host
preds_hostlabels_hostinputs_host
all_losses	all_preds
all_labels
all_inputsobserved_num_examplesr  r9  observed_batch_sizer  logitsr  r  inputs_decodelossesr  r.  r  r   r   r   r     s  











 
 






 





 
zTrainer.evaluation_loopc                 C   s|   |du rdS t  r|du rd}t||}|S t rt|}|S | jjdur,| jjjdks8| jjdu r<| jjdkr<t|}|S )
        Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
        concatenating them to `gathered`
        Nnested_gatherNOr  )	ry   rE   ru   r   r   distributed_statedistributed_type
local_rankr<   r  tensorsr  r   r   r   r    s   
zTrainer._nested_gatherc              	      s  t | jdkr	dntfdd| jD }dd}|du r"| j}t | jdkr-|r-dnd}|  du rIt| jdrGt| jj	d	g  ng  |sM|rft
tfd
d| jD }t |dkre|d }nd}t  t rt|}	|sy|rt|	tr|	d }
t fdd|	 D }n
|	d }
|	dd }|
   }t|}nd}t|	trt fdd|	 D }n|	}t|}n|s|r|   | j|dd\}}W d   n1 sw   Y  |  }t|trt fdd| D }nK|dd }nDd}|   |di }W d   n	1 sw   Y  t|tr8t fdd| D }n|}| jjdkrJ|| jjd  | _W d   n	1 sUw   Y  |rb|ddfS t
|}t |dkrq|d }|||fS )a  
        Perform an evaluation step on `model` using `inputs`.

        Subclass and override to inject custom behavior.

        Args:
            model (`nn.Module`):
                The model to evaluate.
            inputs (`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

                The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
                argument `labels`. Check your model's documentation for all accepted arguments.
            prediction_loss_only (`bool`):
                Whether or not to return the loss only.
            ignore_keys (`List[str]`, *optional*):
                A list of keys in the output of your model (if it is a dictionary) that should be ignored when
                gathering predictions.

        Return:
            Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
            logits and labels (each being optional).
        r   Fc                 3   s    | ]
}  |d uV  qd S r  r  rK  r9  r   r   rK  &  s    z*Trainer.prediction_step.<locals>.<genexpr>return_lossNTrp  keys_to_ignore_at_inferencec                 3   r  r  r`  r  ra  r   r   rK  8  r  r   r  c                 3   &    | ]\}}| d g vr|V  qdS r  Nr   r  r  r   r   rK  D     $ c                 3        | ]\}}| vr|V  qd S r  r   r  r  r   r   rK  N      )r  c                 3   rd  re  r   r  r  r   r   rK  Y  rf  c                 3   rg  r  r   r  r  r   r   rK  a  rh  r   )r   r  allr  rj   r]  r   r   r   rp  rC   rh  r   ra  ru   r   r   rc  r$  r  r  r   r   r  r  r  r   r  r  )r  r   r9  r  r  
has_labelsrb  loss_without_labelsr  raw_outputsr  	logits_mbr  rU  r  r   )r  r9  r   rB    sr   *










*

zTrainer.prediction_stepc                 C   s   t | jdr| j|S dS )a  
        For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point
        operations for every backward + forward pass. If using another model, either implement such a method in the
        model or subclass and override this method.

        Args:
            inputs (`Dict[str, Union[torch.Tensor, Any]]`):
                The inputs and targets of the model.

        Returns:
            `int`: The number of floating-point operations.
        r  r   )r   r   r  r  r   r   r   r  q  s   zTrainer.floating_point_opsc                 C   s^   |   sdS | jjdu rt| jj j}n| jj}t|| jj| jj	dd}|j
| _d| _dS )zE
        Initializes a git repo in `self.args.hub_model_id`.
        NT)tokenprivater   )r   r   r   r   r   absoluter  r   	hub_tokenhub_private_reporepo_idpush_in_progress)r  	repo_namerepo_urlr   r   r   r     s   
zTrainer.init_hf_repolanguagelicensetagsr^  finetuned_fromtasksdataset_tagsdataset_argsc
                 C   s   |   sdS tj| jjd}
d}tj|
r$t|
j	
d}|dk}tj| |||||||||	d
}| }t|
d}|| W d   n1 sLw   Y  |r_t| j| jj dS dS )a  
        Creates a draft of a model card using the information available to the `Trainer`.

        Args:
            language (`str`, *optional*):
                The language of the model (if applicable)
            license (`str`, *optional*):
                The license of the model. Will default to the license of the pretrained model used, if the original
                model given to the `Trainer` comes from a repo on the Hub.
            tags (`str` or `List[str]`, *optional*):
                Some tags to be included in the metadata of the model card.
            model_name (`str`, *optional*):
                The name of the model.
            finetuned_from (`str`, *optional*):
                The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo
                of the original model given to the `Trainer` (if it comes from the Hub).
            tasks (`str` or `List[str]`, *optional*):
                One or several task identifiers, to be included in the metadata of the model card.
            dataset_tags (`str` or `List[str]`, *optional*):
                One or several dataset tags, to be included in the metadata of the model card.
            dataset (`str` or `List[str]`, *optional*):
                One or several dataset identifiers, to be included in the metadata of the model card.
            dataset_args (`str` or `List[str]`, *optional*):
               One or several dataset arguments, to be included in the metadata of the model card.
        Nz	README.mdFlibrary_namepeft)	rw  rx  ry  r^  rz  r{  r|  rF  r}  w)r   r   rI  rS  r   r   rg  r   r`  r  r  r$   from_trainerto_model_cardopenwriter'   r   create_or_update_model_card)r  rw  rx  ry  r^  rz  r{  r|  rF  r}  model_card_filepathis_peft_libraryr~  training_summary
model_cardrL  r   r   r   create_model_card  s4   %zTrainer.create_model_cardc           
   	   C   s  |   r| jjtjkrd S | jjs| jd ur| j sd S | jj}t	t
tg}t r1|tttg |D ]}tjtj||rPttj||tj|| q3| jd ur\| j| t| jtj|t | jjtjkrwd| jj  }n	dt!| jj" }t#| j$||| jj%ddt& dgd}|g}| jjtj'tj(fv r| jjtj'krdnt)|j*}t#| j$|||d | jj%dd	}	|+|	 | jd u s| j rt,|| _d S | jj-| d S )
NzTraining in progress, step zTraining in progress, epoch Tr  r  rs  folder_pathr  rn  run_as_futureignore_patternszlast-checkpointz, checkpoint)rs  r  path_in_repor  rn  r  ).r   r   hub_strategyrM   ENDhub_always_pushrt  is_doner   rd   rh   rf   rr   extendra   rc   rb   r   rI  r  rS  r"  r3  r   r  r   rN  r  save_strategyrN   STEPSr  rJ  r  r  r   r   rq  rH   
CHECKPOINTALL_CHECKPOINTSr   r  rg  ri   jobs)
r  r  r   modeling_filesmodeling_filer  model_push_job	push_jobsr  checkpoint_pushr   r   r   r    sT   
 
	
zTrainer._push_from_checkpointc                 C   sB   t | dsd S | jd ur| j std | j  d S d S d S )Nrt  z\Waiting for the current checkpoint push to be finished, this might take a couple of minutes.)r   rt  r  r   r   wait_until_doner  r   r   r   r$    s   

zTrainer._finish_current_pushEnd of trainingr  blockingc              	   K   s   | dd}|du r%| jjr%| jjdu rt| jjj}n	| jjdd }| jdu r.|   | j	dd | 
 s:dS | jd
d|i| |   t| j| jj|| jj| dt dgd	S )u  
        Upload `self.model` and `self.tokenizer` to the 🤗 model hub on the repo `self.args.hub_model_id`.

        Parameters:
            commit_message (`str`, *optional*, defaults to `"End of training"`):
                Message to commit while pushing.
            blocking (`bool`, *optional*, defaults to `True`):
                Whether the function should return only when the `git push` has finished.
            kwargs (`Dict[str, Any]`, *optional*):
                Additional keyword arguments passed along to [`~Trainer.create_model_card`].

        Returns:
            The URL of the repository where the model was pushed if `blocking=False`, or a `Future` object tracking the
            progress of the commit if `blocking=True`.
        r^  N/r  TrG  r  r  r  r   )r!  r   r   r   r   r   r  r  r   rK  r   r  r$  r   rq  rH   )r  r  r  r  r^  r   r   r   r     s(   
zTrainer.push_to_hubc           #      C   s  | j }t|std|dur|n|j}| jr%| jdu r%t| ddd\}}| j| jd|d}t	| j
jdkra|| ju ra| jrD| j
|n| j
j|dd}| jrR|| _|| jurZ|| _| jra| j| _| js}|jrq|jtj|jd	}n|jr}|jtj|jd	}|j}	| |}
td
| d td|
  td|	  d}d}d}d}td|j}t||
|	d}|sd}t|drt |j!t"r|j!j}t||
|d}t||
|d}t||
|d}|#  |j$dkrd| _%|| j&_'t(|D ]\}}| j)||||d\}}}t*| jdd}|j+r| ,|| nd}|dur-|-|	}|du r$|ntj.||fdd}|dur@|du r9|nt/||dd}|durS|du rL|nt/||dd}|durf|du r_|nt/||dd}| j&0|| j1| j2| _2|j3dur|d |j3 dkr|4| 5|d |s|4| 5|d |4| 5|d |4| 5|d d\}}}}q|j$rt| drt6| d |4| 5|d |s|4| 5|d |4| 5|d |4| 5|d |7 }|s|7 nd}|s|7 nd}|s|7 nd} | j8dur-|dur-|dur-|j+r#| 8t9||| d}!n| 8t9||d}!ni }!t:|!}!|durC|; < |!| d< t=|!> D ]}"|"?| d s`|!@|"|!| d |" < qItA|||!|
d!S )"r*  z+dataloader must implement a working __len__Nr   Tr+  Fr-  r.  r0  r1  r2  r  r3  r   )make_multiple_ofro  r  r  r  )r7  r4  r5  eval_losses
eval_predseval_label_idseval_inputs_idsr:  r  r;  r<  r=  r3  r>  )Br   rX   r   r  r   r   r!   r  r   r   rv  r?  rw  r@  r   r   r   r   r<  r   rA  r   r   r  rj  r	  r   r   r  r  r6   r   r   ro  r:   r^  r  r  r   r"  r  rB  r   rC  r  rE  catrB   rG  r  r  rH  
add_arrays_gather_and_numpifyr  finalizer   rK   rT   r  r  rC  rD  r  r!  rJ   )#r  r  rG  r  r  r  r   r3  r   rj  r	  rK  rL  rM  rN  r  eval_losses_gathererr  preds_gathererlabels_gathererinputs_gathererr  r9  r  rU  r  r  rV  rW  	eval_losspredsr?  
inputs_idsr.  r  r   r   r   r  I  s   




 



 
 
zTrainer.prediction_loopc                 C   sX   |du rdS t  rt||}t	|S t rt|}t	|S | jjtjkr(t|}t	|S )rX  N)
ry   rE   ru   r   r   r   r_   r   r<   rD   r^  r   r   r   r    s   
zTrainer._gather_and_numpifyc                 C   sD  |   sdS ddg}tjtj| jjdr8ttj| jjdd}| }W d   n1 s2w   Y  nd}|}|D ]}||vrU|	drN||7 }q>|d| 7 }q>||krttj| jjdd}t
d	|  || W d   n1 s~w   Y  | jd td
 | j s| jd | j  dS dS )z8Add SageMaker Checkpointing patterns to .gitignore file.Nz*.sagemaker-uploadingz*.sagemaker-uploadedz
.gitignorerrH  
r  z"Writing .gitignore file. Content: g      ?z'Add *.sagemaker patterns to .gitignore.)r   r   rI  rg  rS  repo	local_dirr  readendswithr   r  r  git_addr  sleepis_repo_clean
git_commitgit_push)r  patternsrL  current_contentcontentpatternr   r   r   _add_sm_patterns_to_gitignore  s6   




z%Trainer._add_sm_patterns_to_gitignorec                 C   s,  d| j ji}d|d< tdi |}t| j j| j j| j j|d| _| jj| _	t
| jjdd d u| _t
| jjdd d u| _| jrf| jjj}| j jd|j|_tdrf| j jd	|j|_|jrf| j jrftd
| jrt
| j dd d u rddlm} | jjj}||jj|_|jj|_|j| j  d S d S d S )Nr  Fsync_with_dataloader)dispatch_batchessplit_batchesr+  gradient_accumulation_pluginr+  rb  limit_all_gathersr   activation_checkpointingzThe activation_checkpointing in FSDP config and the gradient_checkpointing in training arg can't be set to True simultaneously. Please use FSDP's activation_checkpointing logic when using FSDP.r)  r   r  r   )r   re  r   r   r  r  r+  rv  rJ  rD  r   r  r   r   rb  r   r  r  rl   r  r  r   r(  r  r  rp  deepspeed_configr*  )r  grad_acc_kwargsr  rb  r  	ds_pluginr   r   r   r     sD   



z*Trainer.create_accelerator_and_postprocess)NNNNNNNNNr   Nr  )F)TN)NNN)NNNNN)NNr  r  NN)T)NF)FN)r]  N)NNr^  )Nr  )	NNNNNNNNN)r  T)wr   
__module____qualname____doc__trainer_pt_utilsr   r   r   r   r   r   r%   r   Moduler`   r	   r   r   r   r  r.   r   rK   r   r3   r
   r   r  	Optimizerr   LambdaLRr  r*  r2  r4  r   r9  r:  r   rE  r\  r`  r  r  Samplerrf  r   ry  r  r  r  r  r  r  r  staticmethodr   r  r  r	  r  r-  r  rE  r?  r   rp  r  ry  r  r  r@  r  r  r  r  rf  r  r  r  r  r  r  rL   rI   r  r  r  r]  r  r  r  r  r   r   rK  r  r  r  rH   r  r  r  rO   r)  rJ   r   r  rB  r  r   r  r  r$  r   r  r  r  r   r   r   r   r   r      s   S	


  +
"#!0 .1

*
z

q
   M
lV0
4< ,T	
O&6,
'%3"



H

B

 
W

 i	

A98

 
'r   )r  r  r3  r  r  importlib.metadatar  r@  r  r   r  r  r"  r  r=  r  r   collections.abcr   pathlibr   typingr   r   r   r   r   r	   r
   r   integrationsr   r   huggingface_hub.utilsr  r  r  r  r   torch.distributeddistributedr  huggingface_hubr   r   r   	packagingr   r   torch.utils.datar   r   r   r   rH  r   configuration_utilsr   data.data_collatorr   r   r   debug_utilsr   r   r  r   r    integrations.deepspeedr!   r"   r#   	modelcardr$   modeling_utilsr%   r&   r'   models.auto.modeling_autor(   r)   r  r*   r+   pytorch_utilsr,   r-   tokenization_utils_baser.   trainer_callbackr/   r0   r1   r2   r3   r4   r5   r  r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   rG   trainer_utilsrH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   training_argsr^   r_   r`   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   ro   rp   rq   rr   rs   rt   ru   rv   rw   rx   ry   rz   r{   utils.quantization_configr|   r   r   utils.notebookr}   r   r~   rU  torch_xla.core.xla_modelcore	xla_modelr}  torch_xla.debug.metricsr  r.  r  !smdistributed.modelparallel.torchmodelparallelr
  smdistributed.modelparallelSMP_VERSIONrT  r	  r   r   r   r   safetensors.torchrd  r  r   
accelerater   r   r  r'  r   r   r   r   r   r   DATA_SAMPLERSaccelerate.data_loaderr   r   r5  
get_loggerr   r   r  rM  rP  r  rQ  SCALER_NAMErG  r   r   r   r   r   <module>   s   ($P	`t 	

