o
    h                     @   sf   U d dl mZmZmZ d dlZd dlm  mZ d dlm	Z	 g Z
ee ed< ejjG dd dZdS )    )DictListOptionalN)Tensor__all__c                   @   sx   e Zd Z											ddee ded	ed
ededededededededefddZdeee  fddZ	dS )_FunctionalAdagrad{Gz?              ?绽|=TFparamslrlr_decayweight_decayinitial_accumulator_valuewarmup_lr_multiplierwarmup_num_itersepscoalesce_gradforeachmaximize_allow_empty_param_listc                 C   s   |||||||d| _ |	| _|
| _|| _tjttjtt	tjf f i | _
t|dkr2|s2tdd|i| _| jd D ]}t|j|tdd| j
|< q<d S )N)r   r   r   r   r   r   r   r   z%optimizer got an empty parameter listr   r	   )sumstep)defaultsr   r   r   torchjitannotater   r   strstatelen
ValueErrorparam_group	full_likedatatensor)selfr   r   r   r   r   r   r   r   r   r   r   r   p r(   `/var/www/html/ai/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_adagrad.py__init__   s(   	$
z_FunctionalAdagrad.__init__	gradientsc                 C   s$  | j d }g }g }g }g }t|t|kr(tddt| d dt|  d}t| j d |D ]*\}}	|	d ur\|	jr?d}|| ||	 | j| }
||
d  ||
d	  q2t ' t	j
||||| jd
 | jd | jd | jd || j| jd W d    d S 1 sw   Y  d S )Nr   zEthe gradients passed in does not equal to the size of the parameters!zParams length: z. zGradients length: FTr   r   r   r   r   r   )r   r   r   r   has_sparse_gradr   r   )r"   r    r!   zip	is_sparseappendr   r   no_gradFadagradr   r   r   )r&   r+   r   params_with_gradgrads
state_sumsstate_stepsr,   paramgradientr   r(   r(   r)   r   A   sN   




"z_FunctionalAdagrad.stepN)r   r	   r	   r	   r
   r	   r   TFFF)
__name__
__module____qualname__r   r   floatboolr*   r   r   r(   r(   r(   r)   r      sL    	

,r   )typingr   r   r   r   torch.optim._functionaloptim_functionalr1   r   r   r   __annotations__r   scriptr   r(   r(   r(   r)   <module>   s    