U
    Mh	w                     @   sH  d dl mZmZmZmZmZ d dlZd dlmZ d dlm	Z	 ddl
mZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZ ddgZG d	d deZd
de de de de de d e_ee ee ee ee ee ee ee ee ee e eee f e e eeeedddZ!ee ee ee ee ee ee ee ee ee e eee f e e eeeedddZ"ee ee ee ee ee ee ee ee ee e eee f e e eeeeddddZ#ee!ddee ee ee ee ee ee ee eeee ee ee eee e ee ef e e edddZ$dS )    )castListOptionalTupleUnionN)Tensor)$_get_fused_kernels_supported_devices   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_dispatch_sqrt_foreach_doc
_fused_doc!_get_capturable_supported_devices_get_scalar_dtype
_get_value_maximize_doc_stack_if_compiling_use_grad_for_differentiable_view_as_real
DeviceDict	OptimizerParamsTAdamWadamwc                       s   e Zd Zdddddddeeeef eeef eeeee	e eee	e d fd	d
Z
 fddZdd ZedddZ  ZS )r   MbP?g?g+?:0yE>{Gz?FN)maximizeforeach
capturabledifferentiablefused)paramslrbetasepsweight_decayamsgradr!   r"   r#   r$   r%   c                   s8  d|kst d| t|tr0|r0|	s0t dd|ksFt d| d|d   kr^dk srn t d|d  d|d   krdk sn t d	|d  d|kst d
| t||||||||	|
|d
}t || |r4|
rtdd| _t  t	 fdd| j
D s&td  d|r4tdd S )N        zInvalid learning rate: Elr as a Tensor is not supported for capturable=False and foreach=TruezInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: r	   z#Invalid beta parameter at index 1: zInvalid weight_decay value: )
r'   r(   r)   r*   r+   r"   r!   r#   r$   r%   z)`fused` does not support `differentiable`Tc                 3   s2   | ]*}|d  D ]}|j j ko&t|V  qqdS )r&   N)devicetypetorchZis_floating_point).0ZpgpZfused_supported_devices C/var/www/html/venv/lib/python3.8/site-packages/torch/optim/adamw.py	<genexpr>S   s   
 z!AdamW.__init__.<locals>.<genexpr>zX`fused=True` requires all the params to be floating point Tensors of supported devices: .z0`fused` and `foreach` cannot be `True` together.)
ValueError
isinstancer   dictsuper__init__RuntimeErrorZ_step_supports_amp_scalingr   allparam_groups)selfr&   r'   r(   r)   r*   r+   r!   r"   r#   r$   r%   defaults	__class__r4   r6   r=      sP    

zAdamW.__init__c                    s   t  | | jD ]}|dd |dd |dd  |dd |dd |dd }|d D ]t}| j|g }t|d	krft|d
 sft	|d
 }|d s|d rtj
|t|d|jdntj
|t d|d
< qfqd S )Nr+   Fr!   r"   r#   r$   r%   r&   r   stepZis_fuseddtyper/   rH   )r<   __setstate__r@   
setdefaultstategetlenr1   Z	is_tensorfloattensorr   r/   )rA   rL   groupr%   r3   Zp_stateZstep_valrC   r5   r6   rJ   _   s,    
zAdamW.__setstate__c	                 C   sz  d}	|d D ]f}
|
j d krq|	t|
O }	||
 |
j jrFtd||
j  | j|
 }t|dkr|d sx|d rtjdt	|d d|
j
d	ntjd
t	 d|d< tj|
tjd|d< tj|
tjd|d< |rtj|
tjd|d< ||d  ||d  |d r||d  |d r:|d jr:td|d rft|d trf|d sftd||d  q|	S )NFr&   z'AdamW does not support sparse gradientsr   r#   r%   r5   rF   rG   r,   rI   rE   )Zmemory_formatexp_avg
exp_avg_sqmax_exp_avg_sqr+   r$   zB`requires_grad` is not supported for `step` in differentiable moder"   r'   r-   )gradr1   
is_complexappendZ	is_sparser>   rL   rN   Zzerosr   r/   rP   Z
zeros_likeZpreserve_formatZrequires_gradr:   r   )rA   rQ   params_with_gradgradsr+   exp_avgsexp_avg_sqsmax_exp_avg_sqsstate_stepshas_complexr3   rL   r5   r5   r6   _init_groupv   sj    


	
 
 
 

zAdamW._init_groupc                 C   s   |    d}|dk	r.t  | }W 5 Q R X | jD ]}g }g }g }g }g }g }	|d }
ttttf |d \}}| ||||
||||	}t||||||	|
|||d |d |d |d |d |d	 |d
 |d t	| ddt	| dd|d q4|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr+   r(   r'   r*   r)   r!   r"   r#   r$   r%   
grad_scale	found_inf)r+   beta1beta2r'   r*   r)   r!   r"   r#   r$   r%   r`   ra   r^   )
Z _cuda_graph_capture_health_checkr1   Zenable_gradr@   r   r   rO   r_   r   getattr)rA   closureZlossrQ   rX   rY   rZ   r[   r\   r]   r+   rb   rc   r^   r5   r5   r6   rE      s^    



z
AdamW.step)r   r   r   r    F)N)__name__
__module____qualname__r   r   rO   r   r   boolr   r=   rJ   r_   r   rE   __classcell__r5   r5   rC   r6   r      s8        	

@Ia  Implements AdamW algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{(lr)}, \: \beta_1, \beta_2
                \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)},
                \: \epsilon \text{ (epsilon)}                                                    \\
            &\hspace{13mm}      \lambda \text{(weight decay)},  \: \textit{amsgrad},
                \: \textit{maximize}                                                             \\
            &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0
                \text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0              \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\

            &\hspace{5mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{10mm}g_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1}         \\
            &\hspace{5mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{5mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{5mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{5mm}\widehat{v_t} \leftarrow   v_t/\big(1-\beta_2^t \big)                   \\
            &\hspace{5mm}\textbf{if} \: amsgrad                                                  \\
            &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
                \widehat{v_t})                                                                   \\
            &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big)                                 \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}} + \epsilon \big)                                       \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_.
    a  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR
            is not yet supported for all our implementations. Please use a float
            LR if you are not also specifying fused=True or capturable=True.
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay coefficient (default: 1e-2)
        amsgrad (bool, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False)
        z	
        z
    .. _Decoupled Weight Decay Regularization:
        https://arxiv.org/abs/1711.05101
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ

    )r&   rY   rZ   r[   r\   r]   r`   ra   r+   rb   rc   r'   r*   r)   r!   r#   r$   r^   c       
   !      C   s  |d kr|d kst tj r,t|ts,t t| D ]p\}}|sJ|| n||  }|| }|| }|| }tj s|rt	 }|j
j|j
jkr|j
j|kst d| dt|rt|}t|}t|}|rt|| ||< t|}|d7 }|d||   ||d|	  ||
j||d|
 d |sD|r|}d|	|  }d|
|  }|| }| }| }|r|r||  }n|| }|| t|| ||  ||  || } n| ||  || } |||  nt|}d|	|  }d|
|  }|| }t|}|r^tj|| ||| d ||  | |} n| | |} |j|| | d |r4t| | r4t|| ||< q4d S )NIIf capturable=True, params and state_steps must be on supported devices: r8   r	   )value)out)AssertionErrorr1   jitis_scriptingr:   rO   	enumerate_utilsis_compilingr   r/   r0   rV   Zview_as_realZmul_Zlerp_Zaddcmul_negsqrtcloneZcopy_maximumZadd_Zaddcdiv_r   r   Zview_as_complex)!r&   rY   rZ   r[   r\   r]   r`   ra   r+   rb   rc   r'   r*   r)   r!   r#   r$   r^   iparamrU   rR   rS   Zstep_tcapturable_supported_devicesrE   bias_correction1bias_correction2	step_sizeZstep_size_negbias_correction2_sqrtrT   denomr5   r5   r6   _single_tensor_adamwB  sx    







r   c       
            s  t | dkrd S ttr&|s&tdtj sj|rjtddtfddt	| |D sjt
d d|rvt
d	|d kr|d kst
t| |||||g}| D ],\\}}}}}}}|r|rt||||| nt|||| |rt|}|d jrtj|tjd
ddd
d nt|d |dkrFt|d|   t||d   t| t|||d  ~|r>t |}t|}t|d t|d t| t| t| t| |}|}|r t|| t|}n
t|}t|| t|| t|| t||| q fdd|D }fdd|D }tfdd|D }dd |D }|rt|| t|}n
t|}t|| t|| t|||| qd S )Nr   r-   F)Zsupports_xlac                 3   s.   | ]&\}}|j j|j jko$|j j kV  qd S N)r/   r0   )r2   r3   rE   )rz   r5   r6   r7     s   z&_multi_tensor_adamw.<locals>.<genexpr>rk   r8   z#_foreach ops don't support autogradr.   cpu)r/   )alphar	   c                    s   g | ]}d  t |  qS r	   r   r2   rE   )rb   r5   r6   
<listcomp>D  s    z'_multi_tensor_adamw.<locals>.<listcomp>c                    s   g | ]}d  t |  qS r   r   r   )rc   r5   r6   r   G  s    c                    s   g | ]} | d  qS )r5   r2   bc)r'   r5   r6   r   K  s     c                 S   s   g | ]}t |qS r5   )r   r   r5   r5   r6   r   M  s    ) rN   r:   r   r>   r1   rr   rs   r   r?   ziprn   r   "_group_tensors_by_device_and_dtypevaluesr   Z_foreach_negZis_cpu_foreach_add_rP   Z_foreach_mul_Z_foreach_lerp_Z_foreach_addcmul_Z_foreach_pow_foreach_sub_Z_foreach_neg_Z_foreach_div_Z_foreach_reciprocal_Z_foreach_sqrt_Z_foreach_maximum_Z_foreach_sqrtZ_foreach_addcdiv_r   )r&   rY   rZ   r[   r\   r]   r`   ra   r+   rb   rc   r'   r*   r)   r!   r#   r$   r^   grouped_tensorsdevice_paramsdevice_gradsdevice_exp_avgsdevice_exp_avg_sqsdevice_max_exp_avg_sqsdevice_state_steps_r{   r|   r}   r~   Zexp_avg_sq_sqrtr5   )rb   rc   rz   r'   r6   _multi_tensor_adamw  s    



   
  
   






r   )r&   rY   rZ   r[   r\   r]   r`   ra   r+   rb   rc   r'   r*   r)   r!   r#   r$   r^   returnc       
          C   sf  | sd S |rt d|d k	r&|j|ini }|d k	r<|j|ini }t|trbt|jdkrb|j|ind }t| |||||g}| D ]\\}}\\}}}}}}}d\}}|d k	r|||j	|dd}|d k	r|||j	|dd}|d k	r||kr|||j	|dd}t
|d t
j|||||||||	|
|||||d |d k	rt
||gt|  qd S )	Nz9Adam with fused=True does not support differentiable=Truer   )NNT)non_blocking)r/   r   r	   )	r+   r'   rb   rc   r*   r)   r!   r`   ra   )r>   r/   r:   r   strr   r   itemsrK   tor1   r   Z_fused_adamw_r   rN   ) r&   rY   rZ   r[   r\   r]   r`   ra   r+   rb   rc   r'   r*   r)   r!   r#   r$   r^   Zgrad_scale_dictZfound_inf_dictZlr_dictr   r/   r   r   r   r   r   r   r   Zdevice_grad_scaleZdevice_found_infr5   r5   r6   _fused_adamwd  s|    $    r   )Zsingle_tensor_fnF)r&   rY   rZ   r[   r\   r]   r"   r#   r$   r%   r`   ra   r^   r+   rb   rc   r'   r*   r)   r!   c                C   s   t j s$tdd |D s$td|	dkr\|dkr\t| |dd\}}|r\t|tr\|s\d}|	dkrhd}	|dkrtd}|rt j	 rtd|	rt j	 rtd|	rt j	 st
}n|rt j	 st}nt}|| |||||||||||||||
||d	 dS )
zpFunctional API that performs AdamW algorithm computation.

    See :class:`~torch.optim.AdamW` for details.
    c                 s   s   | ]}t |tjV  qd S r   )r:   r1   r   )r2   tr5   r5   r6   r7     s    zadamw.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)Z	use_fusedz6torch.jit.script not supported with foreach optimizersz4torch.jit.script not supported with fused optimizers)r+   rb   rc   r'   r*   r)   r!   r#   r$   r`   ra   r^   )r1   rr   rs   r?   r>   r   r:   r   ro   rp   r   r   r   )r&   rY   rZ   r[   r\   r]   r"   r#   r$   r%   r`   ra   r^   r+   rb   rc   r'   r*   r)   r!   r   funcr5   r5   r6   r     s^      
)NFFNNNF)%typingr   r   r   r   r   r1   r   Ztorch.utils._foreach_utilsr   Z	optimizerr
   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   __all__r   __doc__ri   rO   r   r   r   r   r5   r5   r5   r6   <module>   s   L a'F
x
 -
Z
       
