o
    h                     @   sf   U d dl mZmZmZ d dlZd dlm  mZ d dlm	Z	 g Z
ee ed< ejjG dd dZdS )    )DictListOptionalN)Tensor__all__c                   @   s`   e Zd Z							ddee deded	ed
edededefddZdeee  fddZ	dS )_FunctionalAdadelta      ??ư>        Fparamslrrhoepsweight_decayforeachmaximize_allow_empty_param_listc	           	      C   sf   ||||d| _ || _|| _t|dkr|stdd|i| _tjt	tj
t	ttj
f f i | _d S )N)r   r   r   r   r   z%optimizer got an empty parameter listr   )defaultsr   r   len
ValueErrorparam_grouptorchjitannotater   r   strstate)	selfr   r   r   r   r   r   r   r    r   a/var/www/html/ai/venv/lib/python3.10/site-packages/torch/distributed/optim/functional_adadelta.py__init__   s   
(z_FunctionalAdadelta.__init__	gradientsc                 C   sr  | j d }g }g }g }g }| jd }| jd }| jd }	| jd }
t|t|kr<tddt| d d	t|  t||D ]O\}}|d ur|| || || jvr}i | j|< | j| }td
|d< tj	|tj
d|d< tj	|tj
d|d< | j| }||d  ||d  qAt  tj|||||||	|
| j| jd
 W d    d S 1 sw   Y  d S )Nr   r   r   r   r   zEthe gradients passed in does not equal to the size of the parameters!zParams length: z. zGradients length: r   step)memory_format
square_avg	acc_delta)r   r   r   r   r   r   )r   r   r   r   zipappendr   r   tensor
zeros_likepreserve_formatno_gradFadadeltar   r   )r   r!   r   params_with_gradgradssquare_avgs
acc_deltasr   r   r   r   paramgradientr   r   r   r   r"   2   sb   













"z_FunctionalAdadelta.stepN)r   r	   r
   r   FFF)
__name__
__module____qualname__r   r   floatboolr    r   r"   r   r   r   r   r      s4    	
r   )typingr   r   r   r   torch.optim._functionaloptim_functionalr,   r   r   r   __annotations__r   scriptr   r   r   r   r   <module>   s    