U
    Mh@]                     @   s|  d dl mZmZmZmZmZ d dlZd dlmZ ddlm	Z	m
Z
mZmZmZmZmZmZmZmZmZmZmZmZ ddgZG dd deZd	d
e de de de	 d	 e_ee ee ee ee ee eeeeeeeeeedddZee ee ee ee ee eeeeeeeeeedddZeeddee ee ee ee ee eee eeeeeeeeedddZdS )    )castListOptionalTupleUnionN)Tensor   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_dispatch_sqrt_foreach_doc!_get_capturable_supported_devices_get_scalar_dtype
_get_value_maximize_doc_use_grad_for_differentiable_view_as_real	OptimizerParamsTRAdamradamc                       sp   e Zd Zddddddeeeeef eeeee eeed
 fd	d
Z fddZ	dd Z
edddZ  ZS )r   MbP?g?g+?:0yE>r   FN)foreachmaximize
capturabledifferentiable)
paramslrbetasepsweight_decaydecoupled_weight_decayr   r   r   r   c                   s   d|kst d| d|ks,t d| d|d   krDdk sXn t d|d  d|d   krpdk sn t d|d  d|kst d	| t|||||||	||
d
	}t || d S )N        zInvalid learning rate: zInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: r   z#Invalid beta parameter at index 1: zInvalid weight_decay value: )	r!   r"   r#   r$   r   r   r   r%   r   )
ValueErrordictsuper__init__)selfr    r!   r"   r#   r$   r%   r   r   r   r   defaults	__class__ C/var/www/html/venv/lib/python3.8/site-packages/torch/optim/radam.pyr+      s,    zRAdam.__init__c                    s   t  | | jD ]}|dd  |dd |dd |dd |dd |d D ]h}| j|g }t|dkrZt|d	 sZt	|d	 }|d rtj
|t |jd
ntj
|t d|d	< qZqd S )Nr   r   Fr   r%   r   r    r   stepdtypedevicer4   )r*   __setstate__param_groups
setdefaultstategetlentorchZ	is_tensorfloattensorr   r5   )r,   r:   grouppZp_stateZstep_valr.   r0   r1   r7   B   s&    
  zRAdam.__setstate__c           
      C   s   d}|d D ]}|j d k	r|t|O }|| |j jrBtd||j  | j| }	t|	dkr|d rtjdt	 |j
dntjdt	 d	|	d
< tj|tjd|	d< tj|tjd|	d< ||	d  ||	d  ||	d
  q|S )NFr    z'RAdam does not support sparse gradientsr   r   r0   r3   r&   r6   r2   )Zmemory_formatexp_avg
exp_avg_sq)gradr=   
is_complexappendZ	is_sparseRuntimeErrorr:   r<   Zzerosr   r5   r?   Z
zeros_likeZpreserve_format)
r,   r@   params_with_gradgradsexp_avgsexp_avg_sqsstate_stepshas_complexrA   r:   r0   r0   r1   _init_groupV   s4    


 
 
zRAdam._init_groupc                 C   s   |    d}|dk	r.t  | }W 5 Q R X | jD ]}g }g }g }g }g }ttttf |d \}	}
| ||||||}t||||||	|
|d |d |d |d |d |d |d	 |d
 |d q4|S )zPerforms a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr"   r!   r$   r#   r   r   r   r   r%   )beta1beta2r!   r$   r#   r   r   r   r   r%   rM   )	Z _cuda_graph_capture_health_checkr=   Zenable_gradr8   r   r   r>   rN   r   )r,   closureZlossr@   rH   rI   rJ   rK   rL   rO   rP   rM   r0   r0   r1   r2   y   sN    

     z
RAdam.step)r   r   r   r   F)N)__name__
__module____qualname__r   r>   r   boolr   r+   r7   rN   r   r2   __classcell__r0   r0   r.   r1   r      s4        	
&#a  Implements RAdam algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \: \beta_1, \beta_2
                \text{ (betas)}, \: \theta_0 \text{ (params)}, \:f(\theta) \text{ (objective)}, \:
                \lambda \text{ (weightdecay)}, \:\textit{maximize}                               \\
            &\hspace{13mm} \epsilon \text{ (epsilon)}, \textit{decoupled\_weight\_decay}         \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                v_0 \leftarrow 0 \text{ ( second moment)},                                       \\
            &\hspace{18mm} \rho_{\infty} \leftarrow 2/(1-\beta_2) -1                      \\[-1.ex]
            &\rule{110mm}{0.4pt}  \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{6mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{12mm}g_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})         \\
            &\hspace{6mm}\textbf{else}                                                           \\
            &\hspace{12mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{6mm} \theta_t \leftarrow \theta_{t-1}                                       \\
            &\hspace{6mm} \textbf{if} \: \lambda \neq 0                                          \\
            &\hspace{12mm}\textbf{if} \: \textit{decoupled\_weight\_decay}                       \\
            &\hspace{18mm} \theta_t \leftarrow \theta_{t} - \gamma \lambda \theta_{t}            \\
            &\hspace{12mm}\textbf{else}                                                          \\
            &\hspace{18mm} g_t \leftarrow g_t + \lambda \theta_{t}                               \\
            &\hspace{6mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{6mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{6mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{6mm}\rho_t \leftarrow \rho_{\infty} -
                2 t \beta^t_2 /\big(1-\beta_2^t \big)                                    \\[0.1.ex]
            &\hspace{6mm}\textbf{if} \: \rho_t > 5                                               \\
            &\hspace{12mm} l_t \leftarrow \frac{\sqrt{ (1-\beta^t_2) }}{ \sqrt{v_t} +\epsilon  } \\
            &\hspace{12mm} r_t \leftarrow
      \sqrt{\frac{(\rho_t-4)(\rho_t-2)\rho_{\infty}}{(\rho_{\infty}-4)(\rho_{\infty}-2) \rho_t}} \\
            &\hspace{12mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t} r_t l_t        \\
            &\hspace{6mm}\textbf{else}                                                           \\
            &\hspace{12mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}                \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `On the variance of the adaptive learning rate and beyond`_.

    This implementation provides an option to use either the original weight_decay implementation as in Adam
    (where the weight_decay is applied to the gradient) or the one from AdamW (where weight_decay is applied
    to the weight) through the decoupled_weight_decay option. When decoupled_weight_decay is set to False
    (default), it uses the original Adam style weight decay, otherwise, it uses the AdamW style which
    corresponds more closely to the `author's implementation`_ in the RAdam paper. Further information
    about decoupled weight decay can be found in `Decoupled Weight Decay Regularization`_.

    a  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 1e-3)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        decoupled_weight_decay (bool, optional): whether to use decoupled weight
            decay as in AdamW to obtain RAdamW (default: False)
        z	
        a  

    .. _On the variance of the adaptive learning rate and beyond:
        https://arxiv.org/abs/1908.03265
    .. _author's implementation:
        https://github.com/LiyuanLucasLiu/RAdam
    .. _Decoupled Weight Decay Regularization:
        https://arxiv.org/abs/1711.05101

    )r    rI   rJ   rK   rL   rO   rP   r!   r$   r#   r%   r   r   r   rM   c       
            s  t | D ]\}}|s|| n||  }|| }|| || }tj s|rt }|jj|jjkrp|jj|kstd| dt|rt	|}t	|}t	|}t	|d7 }|r|nt
|}|dkr|
r|d||   n|j||d}||d|  |j||d| d d||  }d||   || }dd|  d d| ||     fdd	} fd
d}|rtdk| |  d}|j|| | dd qdkr|j|| |  |  dd q|j|| dd qd S )NIIf capturable=True, params and state_steps must be on supported devices: .r   r   alpha)value   c                      s,   d d     d  d    d S )N   r\         ?r0   r0   )rho_infrho_tr0   r1   _compute_rect9  s    z+_single_tensor_radam.<locals>._compute_rectc                     s.     } r| } n
| }  d |  S )Nr^   )sqrtaddadd_)Zexp_avg_sq_sqrt)bias_correction2r   r#   rC   r0   r1   _compute_adaptive_lrA  s
    
z2_single_tensor_radam.<locals>._compute_adaptive_lr      @r'   g      )	enumerater=   _utilsis_compilingr   r5   typeAssertionErrorrE   Zview_as_realr   Zmul_rc   Zlerp_Zaddcmul_whererd   )r    rI   rJ   rK   rL   rO   rP   r!   r$   r#   r%   r   r   r   rM   iparamrD   rB   Zstep_tcapturable_supported_devicesr2   bias_correction1Zbias_corrected_exp_avgra   rf   updater0   )re   r   r#   rC   r_   r`   r1   _single_tensor_radam   sh    







 
 
rs   c       
             s  t | dkrd S |rtdtj s`|r`tddtfddt| |D s`td dt	| ||||g}|
 D ]`\\}}}}}}|d jrtj|tjd	d
dd	d nt|d |rt|||| |rt|}dd  d |rft|}t| t|d t|}t|| t|d t|| t| t| |}nfdd|D }|dkr|
rt|d|   n(|rtj|||d ntj|||d}t||d   t| t|||d  ~|r2t|d}t|d}t|| ~t| d d  t|}t|| ~t| dd t||D }~~dd |D }t| t |}t| t|d t|| t| t|}t| t|d t| t| t|| ~t| t|| ~nffdd|D }dd |D } fdd|D }fddt||D }fddt|||D }t|}t||	 t|| t| t|| t||| q|d S )Nr   z#_foreach ops don't support autogradF)Zsupports_xlac                 3   s.   | ]&\}}|j j|j jko$|j j kV  qd S N)r5   rk   ).0rA   r2   )rp   r0   r1   	<genexpr>y  s   z&_multi_tensor_radam.<locals>.<genexpr>rW   rX   r'   cpu)r5   rY   r   r\   c                    s8   g | ]0}d t |  t |  d t |    qS )r\   r   r   ru   r2   )rP   r_   r0   r1   
<listcomp>  s   
z'_multi_tensor_radam.<locals>.<listcomp>r]   c                 S   s"   g | ]\}}t |d k|dqS )rg   r&   r=   rm   )ru   nr`   r0   r0   r1   rz     s    c                 S   s   g | ]}t |d kddqS )r   r&   r'   r{   ru   rectr0   r0   r1   rz     s     c                    sD   g | ]<}|d kr<t |d |d     d  d  |  ndqS )   r]   r\   r   )r   )ru   r`   )r_   r0   r1   rz     s   	c                 S   s   g | ]}|d krd ndqS )r   r'   r0   r}   r0   r0   r1   rz     s     c                    s   g | ]}d  t |  qS )r   rx   ry   )rO   r0   r1   rz     s    c                    s    g | ]\}} | | d  qS )r0   )ru   r~   bc)r!   r0   r1   rz     s    c                    s6   g | ].\}}}t d  t|  | |  d qS )r   r   )r   r   )ru   r2   r~   r   )rP   r!   r0   r1   rz   
  s   )r<   rl   r=   ri   rj   r   allzipr   Z"_group_tensors_by_device_and_dtypevaluesZis_cpuZ_foreach_add_r?   r   Z_foreach_negZ_foreach_powZ_foreach_neg_Z_foreach_mul_Z_foreach_div_Z_foreach_addZ_foreach_lerp_Z_foreach_addcmul_Z_foreach_subZ_foreach_mulZ_foreach_sqrt_Z_foreach_sqrtZ_foreach_reciprocal_) r    rI   rJ   rK   rL   rO   rP   r!   r$   r#   r%   r   r   r   rM   Zgrouped_tensorsZgrouped_paramsZgrouped_gradsZgrouped_exp_avgsZgrouped_exp_avg_sqsZgrouped_state_steps_rq   re   Z
rho_t_listnumZsub2denomr~   Zunrect_step_sizeZunrectifiedbufferr0   )rO   rP   rp   r!   r_   r1   _multi_tensor_radam]  s   

	

     


	
       






	




r   )Zsingle_tensor_fnF)r    rI   rJ   rK   rL   r%   r   r   r   rM   r   rO   rP   r!   r$   r#   c                C   s   t dd |D std|dkr4t| |dd\}}|rJtj rJtd|r^tj s^t}nt}|| ||||||||||
||||	d dS )	zpFunctional API that performs RAdam algorithm computation.

    See :class:`~torch.optim.RAdam` for details.
    c                 s   s   | ]}t |tjV  qd S rt   )
isinstancer=   r   )ru   tr0   r0   r1   rv   4  s     zradam.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)Z	use_fusedz6torch.jit.script not supported with foreach optimizers)
rO   rP   r!   r$   r#   r   r%   r   r   rM   )r   rG   r
   r=   ZjitZis_scriptingr   rs   )r    rI   rJ   rK   rL   r%   r   r   r   rM   r   rO   rP   r!   r$   r#   r   funcr0   r0   r1   r     s@      
)FNFFFF)typingr   r   r   r   r   r=   r   Z	optimizerr	   r
   r   r   r   r   r   r   r   r   r   r   r   r   __all__r   __doc__r>   rU   rs   r   r   r0   r0   r0   r1   <module>   s   @ 3Qd =	      