U
    MhRA                     @   sn  d dl mZmZmZmZ d dlZd dlmZ ddlmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZmZ ddgZG dd deZd	d
e de de
 de d	 e_ee ee ee ee ee eeeeeeeeedddZee ee ee ee ee eeeeeeeeedddZeeddee ee ee ee ee ee eeeeeeeeedddZdS )    )ListOptionalTupleUnionN)Tensor   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc!_get_capturable_supported_devices_get_scalar_dtype
_get_value_maximize_doc_use_grad_for_differentiable_view_as_real	OptimizerParamsTAdamaxadamaxc                       sl   e Zd Zdddddeeeeef eeee eeed	 fd	d
Z fddZ	dd Z
edddZ  ZS )r   Mb`?g?g+?:0yE>r   NF)maximizedifferentiable
capturable)	paramslrbetasepsweight_decayforeachr   r   r   c             
      s   d|kst d| d|ks,t d| d|d   krDdk sXn t d|d  d|d   krpdk sn t d|d  d|kst d	| t||||||||	d
}
t ||
 d S )N        zInvalid learning rate: zInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: r   z#Invalid beta parameter at index 1: zInvalid weight_decay value: )r   r   r    r!   r"   r   r   r   )
ValueErrordictsuper__init__)selfr   r   r   r    r!   r"   r   r   r   defaults	__class__ D/var/www/html/venv/lib/python3.8/site-packages/torch/optim/adamax.pyr(      s*    
zAdamax.__init__c                    s   t  | | jD ]}|dd  |dd |dd |dd |d D ]h}| j|g }t|dkrNt|d sNt	|d }|d rtj
|t |jd	ntj
|t d
|d< qNqd S )Nr"   r   Fr   r   r   r   stepdtypedevicer1   )r'   __setstate__param_groups
setdefaultstategetlentorchZ	is_tensorfloattensorr   r2   )r)   r7   grouppZp_stateZstep_valr+   r-   r.   r4   ?   s$    
  zAdamax.__setstate__c           
      C   s   d}|d D ]}|j d krq|t|O }|| |j jrDtd||j  | j| }	t|	dkr|d rtjdt	 |j
dntjdt	 d	|	d
< tj|tjd|	d< tj|tjd|	d< ||	d  ||	d  ||	d
  q|S )NFr   z(Adamax does not support sparse gradientsr   r   r-   r0   r#   r3   r/   )Zmemory_formatexp_avgexp_inf)gradr:   
is_complexappendZ	is_sparseRuntimeErrorr7   r9   Zzerosr   r2   r<   Z
zeros_likeZpreserve_format)
r)   r=   params_with_gradgradsexp_avgsexp_infsstate_stepshas_complexr>   r7   r-   r-   r.   _init_groupR   s6    


 
 
zAdamax._init_groupc                 C   s   |    d}|dk	r.t  | }W 5 Q R X | jD ]}g }g }g }g }g }|d \}	}
|d }|d }|d }|d }|d }|d }|d	 }| ||||||}t|||||||	|
|||||||d
 q4|S )zPerforms a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r    r   r!   r"   r   r   r   )
r    beta1beta2r   r!   r"   r   r   r   rJ   )Z _cuda_graph_capture_health_checkr:   Zenable_gradr5   rK   r   )r)   closureZlossr=   rE   rF   rG   rH   rI   rL   rM   r    r   r!   r"   r   r   r   rJ   r-   r-   r.   r/   u   sZ    

     zAdamax.step)r   r   r   r   N)N)__name__
__module____qualname__r   r;   r   r   boolr(   r4   rK   r   r/   __classcell__r-   r-   r+   r.   r      s0        	
$#a  Implements Adamax algorithm (a variant of Adam based on infinity norm).

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \beta_1, \beta_2
                \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)},
                \: \lambda \text{ (weight decay)},                                                \\
            &\hspace{13mm}    \epsilon \text{ (epsilon)}                                          \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                u_0 \leftarrow 0 \text{ ( infinity norm)}                                 \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm}if \: \lambda \neq 0                                                    \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm}m_t      \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t               \\
            &\hspace{5mm}u_t      \leftarrow   \mathrm{max}(\beta_2 u_{t-1}, |g_{t}|+\epsilon)   \\
            &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \frac{\gamma m_t}{(1-\beta^t_1) u_t} \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
    a
  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 2e-3)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        z	
        zd

    .. _Adam\: A Method for Stochastic Optimization:
        https://arxiv.org/abs/1412.6980

    )r   rF   rG   rH   rI   r    rL   rM   r   r!   r   r   r   rJ   c       	         C   s  t | D ]\}}|| }|
s"|n| }|| }|| }|| }tj s|rt }|jj|jjkrp|jj|kstd| d|d7 }|	dkr|j||	d}t	|rt
|}t
|}t
|}t
|}||d|  |stj||| ||d nBt||d| |dgd}|tj|ddd |r||| d }|| || }||| qd|t|  }|| }|j||| d	 qd S )
NIIf capturable=True, params and state_steps must be on supported devices: .r   r   alpha)outF)Zkeepdim)value)	enumerater:   _utilsis_compilingr   r2   typeAssertionErroraddrB   Zview_as_realZlerp_maximumZmul_absZadd_catZ	unsqueezeZ
unsqueeze_Zcopy_ZamaxZdiv_Zaddcdiv_r   )r   rF   rG   rH   rI   r    rL   rM   r   r!   r   r   r   rJ   iparamrA   r?   r@   Zstep_tcapturable_supported_devicesZnorm_bufZneg_bias_correctiondenomZbias_correctionZclrr-   r-   r.   _single_tensor_adamax   sT    






"
rg   c       	            s  |rt dt| dkrd S tj s`|r`tddtfddt| |D s`t d dt	| ||||g}|
 D ]l\\}}}}}}|rt|||| |
rt|}|d jrtj|tjd	d
dd	d nt|d |	dkr|
rtj|||	d ntj|||	d}t||d   t|| |
sN|	dkrNt|}n
t| t|| t|| |rt |}t|d t| t||}t||| q| fdd|D }fdd|D }t|||| q|d S )Nz#_foreach ops don't support autogradr   F)Zsupports_xlac                 3   s.   | ]&\}}|j j|j jko$|j j kV  qd S N)r2   r]   ).0r>   r/   )re   r-   r.   	<genexpr>C  s   z'_multi_tensor_adamax.<locals>.<genexpr>rT   rU   r$   cpu)r2   rV   r   c                    s   g | ]}d  t |  qS )r   r   )ri   r/   )rL   r-   r.   
<listcomp>  s    z(_multi_tensor_adamax.<locals>.<listcomp>c                    s   g | ]}t  | d  qS )rl   )ri   bc)r   r-   r.   rm     s     )r^   r9   r:   r[   r\   r   allzipr   Z"_group_tensors_by_device_and_dtypevaluesr   Z_foreach_negZis_cpuZ_foreach_add_r<   Z_foreach_addZ_foreach_lerp_Z_foreach_mul_Z_foreach_absZ_foreach_abs_Z_foreach_maximum_Z_foreach_powZ_foreach_sub_Z_foreach_div_Z_foreach_mulZ_foreach_addcdiv_)r   rF   rG   rH   rI   r    rL   rM   r   r!   r   r   r   rJ   Zgrouped_tensorsZgrouped_paramsZgrouped_gradsZgrouped_exp_avgsZgrouped_exp_infsZgrouped_state_steps_Zbias_correctionsrf   Z	step_sizer-   )rL   re   r   r.   _multi_tensor_adamax(  s    

	
   

  
  

   rt   )Zsingle_tensor_fnF)r   rF   rG   rH   rI   r"   r   r   r   rJ   r    rL   rM   r   r!   c
                C   s   t j s$tdd |D s$td|dkr>t| |dd\}}|rTt j rTtd|rht j sht}nt	}|| |||||
|||||||	|d dS )	zrFunctional API that performs adamax algorithm computation.

    See :class:`~torch.optim.Adamax` for details.
    c                 s   s   | ]}t |tjV  qd S rh   )
isinstancer:   r   )ri   tr-   r-   r.   rj     s    zadamax.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)Z	use_fusedz6torch.jit.script not supported with foreach optimizers)	r    rL   rM   r   r!   r   r   rJ   r   )
r:   r[   r\   rp   rD   r	   ZjitZis_scriptingrt   rg   )r   rF   rG   rH   rI   r"   r   r   r   rJ   r    rL   rM   r   r!   rs   funcr-   r-   r.   r     sB      
)NFFFF)typingr   r   r   r   r:   r   Z	optimizerr   r	   r
   r   r   r   r   r   r   r   r   r   r   __all__r   __doc__r;   rR   rg   rt   r   r-   r-   r-   r.   <module>   s   < 
2Kj	     