U
    <cb,                     @   s   d dl Z d dl mZ ddlmZ d dlmZmZ ddgZG dd deZdee ee ee ee ee e	e	e
e
e
e
e
d
ddZee ee ee ee ee e
e
e
e
e
e	dddZee ee ee ee ee e
e
e
e
e
e	dddZdS )    N)Tensor   )	Optimizer)ListOptionalAdamaxadamaxc                       sR   e Zd ZdZdddee ed	 fd
dZ fddZe	 dddZ
  ZS )r   a	  Implements Adamax algorithm (a variant of Adam based on infinity norm).

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \beta_1, \beta_2
                \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)},
                \: \lambda \text{ (weight decay)},                                                \\
            &\hspace{13mm}    \epsilon \text{ (epsilon)}                                          \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                u_0 \leftarrow 0 \text{ ( infinity norm)}                                 \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm}if \: \lambda \neq 0                                                    \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm}m_t      \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t               \\
            &\hspace{5mm}u_t      \leftarrow   \mathrm{max}(\beta_2 u_{t-1}, |g_{t}|+\epsilon)   \\
            &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \frac{\gamma m_t}{(1-\beta^t_1) u_t} \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.

    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 2e-3)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        foreach (bool, optional): whether foreach implementation of optimizer is used (default: None)
        maximize (bool, optional): maximize the params based on the objective, instead of
            minimizing (default: False)

    .. _Adam\: A Method for Stochastic Optimization:
        https://arxiv.org/abs/1412.6980
    Mb`?g?g+?:0yE>r   NF)maximize)foreachr   c          	         s   d|kst d|d|ks,t d|d|d   krDdk sXn t d|d d|d   krpdk sn t d|d d|kst d	|t||||||d
}tt| || d S )N        zInvalid learning rate: {}zInvalid epsilon value: {}r   g      ?z%Invalid beta parameter at index 0: {}r   z%Invalid beta parameter at index 1: {}zInvalid weight_decay value: {})lrbetasepsweight_decayr   r   )
ValueErrorformatdictsuperr   __init__)	selfparamsr   r   r   r   r   r   defaults	__class__ 6/tmp/pip-unpacked-wheel-gikjz4vx/torch/optim/adamax.pyr   5   s    
 zAdamax.__init__c                    s   t  | | jD ]}|dd  |dd qt| j }t|dkoZt	|d d }|s|D ]}t
t|d |d< qdd S )Nr   r   Fr   step)r   __setstate__param_groups
setdefaultliststatevalueslentorchZ	is_tensortensorfloat)r   r$   groupZstate_valuesZstep_is_tensorsr   r   r   r    F   s    
zAdamax.__setstate__c                 C   sX  d}|dk	r&t   | }W 5 Q R X | jD ]$}g }g }g }g }g }|d \}	}
|d }|d }|d }|d }|d }|d D ]}|jdkrq|| |jjrtd	||j | j| }t|d
krt 	d|d< t j
|t jd|d< t j
|t jd|d< ||d  ||d  ||d  qt|||||||	|
||||d q,|S )zPerforms a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r   r   r   r   z(Adamax does not support sparse gradientsr   r   r   )Zmemory_formatexp_avgexp_inf)r   beta1beta2r   r   r   r   )r'   Zenable_gradr!   gradappendZ	is_sparseRuntimeErrorr$   r&   r(   Z
zeros_likeZpreserve_formatr   )r   closureZlossr*   Zparams_with_gradgradsexp_avgsexp_infsstate_stepsr.   r/   r   r   r   r   r   pr$   r   r   r   r   Q   sZ    



zAdamax.step)r	   r
   r   r   N)N)__name__
__module____qualname____doc__r   boolr   r    r'   Zno_gradr   __classcell__r   r   r   r   r   	   s   +     F)r   r4   r5   r6   r7   r   r   r   r.   r/   r   r   c                C   sv   t dd |D std|dkr&d}|r<tj r<td|rPtj sPt}nt}|| |||||||	|
||d dS )zrFunctional API that performs adamax algorithm computation.

    See :class:`~torch.optim.Adamax` for details.
    c                 s   s   | ]}t |tjV  qd S )N)
isinstancer'   r   ).0tr   r   r   	<genexpr>   s     zadamax.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNFz6torch.jit.script not supported with foreach optimizers)r   r.   r/   r   r   r   )allr2   r'   ZjitZis_scripting_multi_tensor_adamax_single_tensor_adamax)r   r4   r5   r6   r7   r   r   r   r.   r/   r   r   funcr   r   r   r      s*    )r   r4   r5   r6   r7   r   r.   r/   r   r   r   c                C   s  t | D ]
\}}|| }|
s"|n| }|| }|| }|| }|d7 }| }|	dkrf|j||	d}t|rt|}t|}t|}t|}||j|d| d t||	d|
 |dgd}tj|dd|d d||  }|| }|j||| d qd S )Nr   r   alphaFZkeepdimout)value)	enumerateitemaddr'   
is_complexview_as_realZmul_add_cat	unsqueezeabs
unsqueeze_ZamaxZaddcdiv_)r   r4   r5   r6   r7   r   r.   r/   r   r   r   iparamr0   r,   r-   Zstep_tr   norm_bufbias_correctionclrr   r   r   rE      s4    




rE   )r   r4   r5   r6   r7   r.   r/   r   r   r   r   c                   s6  t | dkrd S |
rt|}dd | D } dd |D }dd |D }dd |D }t|d |dkrztj|| |d t|  tj||d  d t|| t||D ]L\}}t|d| 	|	
dgd}tj|dd	||  fd
 q fdd|D }fdd|D }t| ||| d S )Nr   c                 S   s$   g | ]}t |rt |n|qS r   r'   rO   rP   r@   xr   r   r   
<listcomp>  s     z(_multi_tensor_adamax.<locals>.<listcomp>c                 S   s$   g | ]}t |rt |n|qS r   r[   r\   r   r   r   r^     s     c                 S   s$   g | ]}t |rt |n|qS r   r[   r\   r   r   r   r^     s     c                 S   s$   g | ]}t |rt |n|qS r   r[   r\   r   r   r   r^     s     r   rG   FrI   c                    s   g | ]}d  |    qS )r   )rM   )r@   r   )r.   r   r   r^     s     c                    s   g | ]}d  |  qS )r   )r@   rY   )r   r   r   r^     s     )r&   r'   Z_foreach_negZ_foreach_add_Z_foreach_mul_ziprR   rS   rT   rQ   rU   maxnewlongZ_foreach_addcdiv_)r   r4   r5   r6   r7   r.   r/   r   r   r   r   r-   r0   rX   Zbias_correctionsrZ   r   )r.   r   r   rD      s2    
 rD   )NF)r'   r   Z	optimizerr   typingr   r   __all__r   r=   r)   r   rE   rD   r   r   r   r   <module>   sZ      0/