U
    Mh>                     @   sd  d dl mZmZmZmZ d dlZd dlmZ ddlmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZ ddgZG dd deZd	d
e de de de
 d	 e_ee ee ee ee ee eeeeeeeedddZee ee ee ee ee eeeeeeeedddZeeddee ee ee ee ee eee eeeeeeedddZdS )    )AnyDictListOptionalN)Tensor   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc!_get_capturable_supported_devices_get_scalar_dtype_maximize_doc_use_grad_for_differentiable_view_as_real	OptimizerParamsTAdadeltaadadeltac                       s   e Zd Zdddddeeeeeee eeed	 fd	d
Z fddZe	e
ef ee ee ee ee ee dddZedddZ  ZS )r         ??ư>r   NF)
capturablemaximizedifferentiable)	paramslrrhoepsweight_decayforeachr   r   r   c             
      s   d|kst d| d|  kr*dks:n t d| d|ksPt d| d|ksft d| t||||||||	d}
t ||
 d S )Ng        zInvalid learning rate: r   zInvalid rho value: zInvalid epsilon value: zInvalid weight_decay value: )r   r   r   r    r   r   r!   r   )
ValueErrordictsuper__init__)selfr   r   r   r   r    r!   r   r   r   defaults	__class__ F/var/www/html/venv/lib/python3.8/site-packages/torch/optim/adadelta.pyr%      s&    
zAdadelta.__init__c                    s   t  | | jD ]}|dd  |dd |dd |dd |d D ]h}| j|g }t|dkrNt|d sNt	|d }|d rtj
|t |jd	ntj
|t d
|d< qNqd S )Nr!   r   Fr   r   r   r   stepdtypedevicer.   )r$   __setstate__param_groups
setdefaultstategetlentorchZ	is_tensorfloattensorr   r/   )r&   r4   grouppZp_stateZstep_valr(   r*   r+   r1   <   s$    
  zAdadelta.__setstate__)r:   params_with_gradgradssquare_avgs
acc_deltasstate_stepsc           
      C   s   d}|d D ]}|j d krq|t|O }|| |j jrDtd||j  | j| }	t|	dkr|d rtjdt	 |j
dntjdt	 d|	d	< tj|tjd
|	d< tj|tjd
|	d< ||	d  ||	d  ||	d	  q|S )NFr   z*Adadelta does not support sparse gradientsr   r   r*   r-   r0   r,   )Zmemory_format
square_avg	acc_delta)gradr7   
is_complexappendZ	is_sparseRuntimeErrorr4   r6   Zzerosr   r/   Z
zeros_likeZpreserve_format)
r&   r:   r<   r=   r>   r?   r@   has_complexr;   r4   r*   r*   r+   _init_groupO   s6    	


 
 
zAdadelta._init_groupc                 C   s   |    d}|dk	r.t  | }W 5 Q R X | jD ]}g }g }g }g }g }|d |d |d |d |d |d |d |d	 f\}	}
}}}}}}| ||||||}t||||||	|
|||||||d
 q4|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r    r!   r   r   r   )	r   r   r   r    r!   r   r   r   rG   )Z _cuda_graph_capture_health_checkr7   Zenable_gradr2   rH   r   )r&   closureZlossr:   r<   r=   r>   r?   r@   r   r   r   r    r!   r   r   r   rG   r*   r*   r+   r,   z   sl    

     zAdadelta.step)r   r   r   r   N)N)__name__
__module____qualname__r   r8   r   boolr%   r1   r   strr   r   r   rH   r   r,   __classcell__r*   r*   r(   r+   r      s<        	"
+a  Implements Adadelta algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)},
                \: f(\theta) \text{ (objective)}, \: \rho \text{ (decay)},
                \: \lambda \text{ (weight decay)}                                                \\
            &\textbf{initialize} :  v_0  \leftarrow 0 \: \text{ (square avg)},
                \: u_0 \leftarrow 0 \: \text{ (accumulate variables)}                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm}if \: \lambda \neq 0                                                    \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm} v_t      \leftarrow v_{t-1} \rho + g^2_t (1 - \rho)                    \\
            &\hspace{5mm}\Delta x_t    \leftarrow   \frac{\sqrt{u_{t-1} +
                \epsilon }}{ \sqrt{v_t + \epsilon}  }g_t \hspace{21mm}                           \\
            &\hspace{5mm} u_t  \leftarrow   u_{t-1}  \rho +
                 \Delta x^2_t  (1 - \rho)                                                        \\
            &\hspace{5mm}\theta_t      \leftarrow   \theta_{t-1} - \gamma  \Delta x_t            \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `ADADELTA: An Adaptive Learning Rate Method`_.
    a  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        rho (float, optional): coefficient used for computing a running average
            of squared gradients (default: 0.9). A higher value of `rho` will
            result in a slower average, which can be helpful for preventing
            oscillations in the learning process.
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-6).
        lr (float, optional): coefficient that scale delta before it is applied
            to the parameters (default: 1.0)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        z	
        zd

    .. _ADADELTA\: An Adaptive Learning Rate Method:
        https://arxiv.org/abs/1212.5701

    )r   r=   r>   r?   r@   r   r   r   r    r   r   r   rG   c                   sN  t j sD|rDtdd t fddt| |D sDtd  dt| ||||D ]\}}}}}|d7 }|	sr|n| }|dkr|j||d	}t |rt 	|}t 	|}t 	|}|
|j||d| d
 || }|| }|
r| }||
| |
|j||d| d
 t |r8t |}|j|| d	 qTd S )NFZsupports_xlac                 3   s.   | ]&\}}|j j|j jko$|j j kV  qd S Nr/   type.0r;   r,   Zcapturable_supported_devicesr*   r+   	<genexpr>  s   z*_single_tensor_adadelta.<locals>.<genexpr>IIf capturable=True, params and state_steps must be on supported devices: .r   r   alphavalue)r7   _utilsis_compilingr   allzipAssertionErroraddrD   Zview_as_realZmul_Zaddcmul_Zsqrt_cloneZdiv_Zview_as_complexZadd_)r   r=   r>   r?   r@   r   r   r   r    r   r   r   rG   paramrC   rA   rB   r,   stddeltar*   rV   r+   _single_tensor_adadelta   sD    

    




rh   c                   s  |
rt dtj sP|rPtdd t fddt| |D sPt d  dt| dkr`d S t	| ||||g}|
 D ]`\\}}}}}}|rt|||| |d jrtj|tjd	d
dd	d nt|d |	rt|}|dkr|	rtj|||d ntj|||d}t|| tj|||d| d t||}t| t||}t| t|| t|| t|| tj|||d| d |rt|tjrt||  t|| q|tj||| d q|d S )Nz#_foreach ops don't support autogradFrP   c                 3   s.   | ]&\}}|j j|j jko$|j j kV  qd S rQ   rR   rT   rV   r*   r+   rW   >  s   z)_multi_tensor_adadelta.<locals>.<genexpr>rX   rY   r   r   cpu)r/   rZ   r   r\   )rb   r7   r^   r_   r   r`   ra   r6   r   Z"_group_tensors_by_device_and_dtypevaluesr   Zis_cpuZ_foreach_add_r9   Z_foreach_negZ_foreach_addZ_foreach_mul_Z_foreach_addcmul_Z_foreach_sqrt_Z_foreach_div_
isinstancer   )r   r=   r>   r?   r@   r   r   r   r    r   r   r   rG   Zgrouped_tensorsZdevice_paramsZdevice_gradsZdevice_square_avgsZdevice_acc_deltasZdevice_state_steps_rf   Zdeltasr*   rV   r+   _multi_tensor_adadelta'  s    

	
   
  

     

rm   )Zsingle_tensor_fnF)r   r=   r>   r?   r@   r   r!   r   rG   r   r   r   r    r   c	                C   s   t j s$tdd |D s$td|dkr>t| |dd\}}|rTt j rTtd|rht j sht}nt	}|| |||||	|
||||||d dS )	zvFunctional API that performs Adadelta algorithm computation.

    See :class:`~torch.optim.Adadelta` for details.
    c                 s   s   | ]}t |tjV  qd S rQ   )rk   r7   r   )rU   tr*   r*   r+   rW     s    zadadelta.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)Z	use_fusedz6torch.jit.script not supported with foreach optimizers)r   r   r   r    r   r   r   rG   )
r7   r^   r_   r`   rF   r	   ZjitZis_scriptingrm   rh   )r   r=   r>   r?   r@   r   r!   r   rG   r   r   r   r    r   rl   funcr*   r*   r+   r     s@      
)FNFF)typingr   r   r   r   r7   r   Z	optimizerr   r	   r
   r   r   r   r   r   r   r   r   r   __all__r   __doc__r8   rM   rh   rm   r   r*   r*   r*   r+   <module>   s   8 $77_	    