U
    Mhvy                     @   sD  d dl mZmZmZmZ d dlZd dlmZ d dlmZ ddl	m
Z
mZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZ ddgZG d	d deZd
de de de
 de de d e_ee ee ee ee ee ee ee ee eeeeeeef eeeeedddZ ee ee ee ee ee ee ee ee eeeeeeef eeeeedddZ!ee ee ee ee ee ee ee ee eeeeeeef eeeeeddddZ"ee ddee ee ee ee ee ee ee eeee ee ee eeeeeeef eeedddZ#dS )    )ListOptionalTupleUnionN)Tensor)$_get_fused_kernels_supported_devices   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_dispatch_sqrt_foreach_doc
_fused_doc!_get_capturable_supported_devices_get_scalar_dtype
_get_value_maximize_doc_stack_if_compiling_use_grad_for_differentiable_view_as_real
DeviceDict	OptimizerParamsTAdamadamc                       s   e Zd Zdddddddeeeef eeef eeee	e eeee	e d fd	d
Z
 fddZdd ZedddZ  ZS )r   MbP?g?g+?:0yE>r   FN)foreachmaximize
capturabledifferentiablefused)paramslrbetasepsweight_decayamsgradr   r    r!   r"   r#   c                   s8  d|kst d| t|tr0|r0|	s0t dd|ksFt d| d|d   kr^dk srn t d|d  d|d   krdk sn t d	|d  d|kst d
| t||||||||	|
|d
}t || |r4|
rtdd| _t  t	 fdd| j
D s&td  d|r4tdd S )N        zInvalid learning rate: Elr as a Tensor is not supported for capturable=False and foreach=TruezInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: r   z#Invalid beta parameter at index 1: zInvalid weight_decay value: )
r%   r&   r'   r(   r)   r    r   r!   r"   r#   z)`fused` does not support `differentiable`Tc                 3   s2   | ]*}|d  D ]}|j j ko&t|V  qqdS )r$   N)devicetypetorchZis_floating_point).0ZpgpZfused_supported_devices B/var/www/html/venv/lib/python3.8/site-packages/torch/optim/adam.py	<genexpr>T   s   
 z Adam.__init__.<locals>.<genexpr>zX`fused=True` requires all the params to be floating point Tensors of supported devices: .z0`fused` and `foreach` cannot be `True` together.)
ValueError
isinstancer   dictsuper__init__RuntimeErrorZ_step_supports_amp_scalingr   allparam_groups)selfr$   r%   r&   r'   r(   r)   r   r    r!   r"   r#   defaults	__class__r2   r4   r;      sP    

zAdam.__init__c                    s   t  | | jD ]}|dd |dd |dd  |dd |dd |dd }|d D ]t}| j|g }t|d	krft|d
 sft	|d
 }|d s|d rtj
|t|d|jdntj
|t d|d
< qfqd S )Nr)   Fr    r   r!   r"   r#   r$   r   stepZis_fuseddtyper-   rF   )r:   __setstate__r>   
setdefaultstategetlenr/   	is_tensorfloattensorr   r-   )r?   rJ   groupr#   r1   Zp_stateZstep_valrA   r3   r4   rH   `   s,    
zAdam.__setstate__c                 C   s|  d}|d D ]h}	|	j d k	r|t|	O }||	 |	j jrDtd||	j  | j|	 }
t|
dkr|d sv|d rtjdt	|d d|	j
d	ntjd
t	 d|
d< tj|	tjd|
d< tj|	tjd|
d< |d rtj|	tjd|
d< ||
d  ||
d  |d r||
d  |d r<|
d jr<td|d rht|d rh|d shtd||
d  q|S )NFr$   zJAdam does not support sparse gradients, please consider SparseAdam insteadr   r!   r#   r3   rD   rE   r*   rG   rC   )Zmemory_formatexp_avg
exp_avg_sqr)   max_exp_avg_sqr"   zB`requires_grad` is not supported for `step` in differentiable moder   r%   r+   )gradr/   
is_complexappendZ	is_sparser<   rJ   rL   Zzerosr   r-   rO   Z
zeros_likeZpreserve_formatZrequires_gradrM   )r?   rP   params_with_gradgradsexp_avgsexp_avg_sqsmax_exp_avg_sqsstate_stepshas_complexr1   rJ   r3   r3   r4   _init_groupw   sl    





 
 
 

zAdam._init_groupc                 C   s   |    d}|dk	r.t  | }W 5 Q R X | jD ]}g }g }g }g }g }g }	|d \}
}| |||||||	}t||||||	|d ||
||d |d |d |d |d |d	 |d
 |d t| ddt| ddd q4|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr&   r)   r%   r(   r'   r    r   r!   r"   r#   
grad_scale	found_inf)r)   r]   beta1beta2r%   r(   r'   r    r   r!   r"   r#   r_   r`   )Z _cuda_graph_capture_health_checkr/   Zenable_gradr>   r^   r   getattr)r?   closureZlossrP   rW   rX   rY   rZ   r[   r\   ra   rb   r]   r3   r3   r4   rC      sZ    




z	Adam.step)r   r   r   r   F)N)__name__
__module____qualname__r   r   rN   r   r   boolr   r;   rH   r^   r   rC   __classcell__r3   r3   rA   r4   r      s8        	

AIa  Implements Adam algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \beta_1, \beta_2
                \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)}          \\
            &\hspace{13mm}      \lambda \text{ (weight decay)},  \: \textit{amsgrad},
                \:\textit{maximize}                                                              \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                v_0\leftarrow 0 \text{ (second moment)},\: \widehat{v_0}^{max}\leftarrow 0\\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\

            &\hspace{5mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{10mm}g_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})         \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{5mm}\textbf{if} \: \lambda \neq 0                                           \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{5mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{5mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{5mm}\widehat{v_t} \leftarrow   v_t/\big(1-\beta_2^t \big)                   \\
            &\hspace{5mm}\textbf{if} \: amsgrad                                                  \\
            &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
                \widehat{v_t})                                                                   \\
            &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big)                                 \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}} + \epsilon \big)                                       \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
    a  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR
            is not yet supported for all our implementations. Please use a float
            LR if you are not also specifying fused=True or capturable=True.
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        amsgrad (bool, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False)
        z	
        z
    .. _Adam\: A Method for Stochastic Optimization:
        https://arxiv.org/abs/1412.6980
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ

    )r$   rX   rY   rZ   r[   r\   r_   r`   r)   r]   ra   rb   r%   r(   r'   r    r!   r"   c       
   !      C   s  |d kr|d kst tj r,t|ts,t t| D ]|\}}|sJ|| n||  }|| }|| }|| }tj s|rt	 }|j
j|j
jkr|j
j|kst d| d|d7 }|dkr|j||d}t|rt|}t|}t|}|rt|| ||< t|}||d|
  ||j|| d| d |sP|r|}d|
|  }d||  }|| }| }| }|r|r||  }n|| }|| t|| ||  ||  || } n| ||  || } |||  nt|}d|
|  }d||  }|| }t|}|rjtj|| ||| d ||  | |} n| | |} |j|| | d |r4t| | r4t|| ||< q4d S )NIIf capturable=True, params and state_steps must be on supported devices: r6   r   r   alpha)value)out)AssertionErrorr/   jitis_scriptingr8   rN   	enumerate_utilsis_compilingr   r-   r.   addrU   Zview_as_realZlerp_Zmul_Zaddcmul_ZconjnegsqrtcloneZcopy_maximumZadd_Zaddcdiv_r   r   Zview_as_complex)!r$   rX   rY   rZ   r[   r\   r_   r`   r)   r]   ra   rb   r%   r(   r'   r    r!   r"   iparamrT   rQ   rR   Zstep_tcapturable_supported_devicesrC   bias_correction1bias_correction2	step_sizeZstep_size_negbias_correction2_sqrtrS   denomr3   r3   r4   _single_tensor_adamA  sz    






r   c       
            s  t | dkrd S ttr&|s&tdtj sj|rjtddtfddt	| |D sjt
d d|d krz|d ks~t
|rt
d	t| |||||g}| D ]@\\}}}}}}}|	r|rt||||| nt|||| |rt|}|d jrtj|tjd
ddd
d nt|d |dkrZ|rJtj|||d ntj|||d}t||d   t| t|||d  ~|rRt |}t|}t|d t|d t| t| t| t| |}|}|rt|| t|}n
t|}t|| t|| t|| t||| q fdd|D }fdd|D }t fdd|D }dd |D }|rt|| t|}n
t|}t|| t|| t|||| qd S )Nr   r+   F)Zsupports_xlac                 3   s.   | ]&\}}|j j|j jko$|j j kV  qd S N)r-   r.   )r0   r1   rC   )r|   r3   r4   r5     s   z%_multi_tensor_adam.<locals>.<genexpr>rj   r6   z#_foreach ops don't support autogradr,   cpu)r-   rk   r   c                    s   g | ]}d  t |  qS r   r   r0   rC   )ra   r3   r4   
<listcomp>H  s    z&_multi_tensor_adam.<locals>.<listcomp>c                    s   g | ]}d  t |  qS r   r   r   )rb   r3   r4   r   K  s    c                    s   g | ]} | d  qS )r3   r0   bc)r%   r3   r4   r   O  s     c                 S   s   g | ]}t |qS r3   )r   r   r3   r3   r4   r   Q  s     )!rL   r8   r   r<   r/   rs   rt   r   r=   zipro   r   "_group_tensors_by_device_and_dtypevaluesr   Z_foreach_negZis_cpu_foreach_add_rO   Z_foreach_addZ_foreach_lerp_Z_foreach_mul_Z_foreach_addcmul_Z_foreach_pow_foreach_sub_Z_foreach_neg_Z_foreach_div_Z_foreach_reciprocal_Z_foreach_sqrt_Z_foreach_maximum_Z_foreach_sqrtZ_foreach_addcdiv_r   )r$   rX   rY   rZ   r[   r\   r_   r`   r)   r]   ra   rb   r%   r(   r'   r    r!   r"   grouped_tensorsdevice_paramsdevice_gradsdevice_exp_avgsdevice_exp_avg_sqsdevice_max_exp_avg_sqsdevice_state_steps_r}   r~   r   r   Zexp_avg_sq_sqrtr3   )ra   rb   r|   r%   r4   _multi_tensor_adam  s    



   
  
     






   r   )r$   rX   rY   rZ   r[   r\   r_   r`   r)   r]   ra   rb   r%   r(   r'   r    r!   r"   returnc       
          C   sj  | sd S |rt d|d k	r&|j|ini }|d k	r<|j|ini }t|trbt|jdkrb|j|ind }t| |||||g}| D ]\\}}\\}}}}}}}d\}}|d k	r|||j	|dd}|d k	r|||j	|dd}|d k	r||kr|j	|dd||< || }t
|d t
j|||||||||
||||||d |d k	rt
||gt|  qd S )	Nz9Adam with fused=True does not support differentiable=Truer   )NNT)non_blocking)r-   r   r   )	r)   r%   ra   rb   r(   r'   r    r_   r`   )r<   r-   r8   r   strr   r   itemsrI   tor/   r   Z_fused_adam_r   rL   ) r$   rX   rY   rZ   r[   r\   r_   r`   r)   r]   ra   rb   r%   r(   r'   r    r!   r"   Zgrad_scale_dictZfound_inf_dictZlr_dictr   r-   r   r   r   r   r   r   r   Zdevice_grad_scaleZdevice_found_infr3   r3   r4   _fused_adamc  sx    $   r   )Zsingle_tensor_fnF)r$   rX   rY   rZ   r[   r\   r   r!   r"   r#   r_   r`   r]   r)   ra   rb   r%   r(   r'   r    c                C   s   |	dkr8|dkr8t | |dd\}}|r8t|tr8|s8d}|	dkrDd}	|dkrPd}tj sttdd |D sttd|rtj	 rtd|	rtj	 rtd|	rtj	 st
}n|rtj	 st}nt}|| ||||||||||||||||
|d	 dS )
znFunctional API that performs Adam algorithm computation.

    See :class:`~torch.optim.Adam` for details.
    NF)Z	use_fusedc                 s   s   | ]}t |tjV  qd S r   )r8   r/   r   )r0   tr3   r3   r4   r5     s    zadam.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsz6torch.jit.script not supported with foreach optimizersz4torch.jit.script not supported with fused optimizers)r)   r]   ra   rb   r%   r(   r'   r    r!   r"   r_   r`   )r
   r8   r   r/   rs   rt   r=   r<   rp   rq   r   r   r   )r$   rX   rY   rZ   r[   r\   r   r!   r"   r#   r_   r`   r]   r)   ra   rb   r%   r(   r'   r    r   funcr3   r3   r4   r     s^    "  
)NFFNNNF)$typingr   r   r   r   r/   r   Ztorch.utils._foreach_utilsr   Z	optimizerr	   r
   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   __all__r   __doc__rh   rN   r   r   r   r   r3   r3   r3   r4   <module>   s   L `'F
x
 -
X
       
