U
    <ºc®J  ã                   @   s  d dl Z d dlZd dlmZ ddlmZ d dlmZmZ ddgZG dd„ deƒZ	dee ee ee ee ee ee e
e
e
eeeeee
d
œdd„Zee ee ee ee ee ee e
eeeeee
e
dœdd„Zee ee ee ee ee ee e
eeeeee
e
dœdd„ZdS )é    N)ÚTensoré   )Ú	Optimizer)ÚListÚOptionalÚAdamWÚadamwc                       sX   e Zd ZdZdddddœeee edœ‡ fd	d
„Z‡ fdd„Ze 	¡ ddd„ƒZ
‡  ZS )r   a”  Implements AdamW algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{(lr)}, \: \beta_1, \beta_2
                \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)},
                \: \epsilon \text{ (epsilon)}                                                    \\
            &\hspace{13mm}      \lambda \text{(weight decay)},  \: \textit{amsgrad},
                \: \textit{maximize}                                                             \\
            &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0
                \text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0              \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\

            &\hspace{5mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{10mm}g_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1}         \\
            &\hspace{5mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{5mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{5mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{5mm}\widehat{v_t} \leftarrow   v_t/\big(1-\beta_2^t \big)                   \\
            &\hspace{5mm}\textbf{if} \: amsgrad                                                  \\
            &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
                \widehat{v_t})                                                                   \\
            &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big)                                 \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}} + \epsilon \big)                                       \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_.

    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 1e-3)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay coefficient (default: 1e-2)
        amsgrad (bool, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False)
        maximize (bool, optional): maximize the params based on the objective, instead of
            minimizing (default: False)
        foreach (bool, optional): whether foreach implementation of optimizer
            is used (default: None)
        capturable (bool, optional): whether this instance is safe to capture in a CUDA graph.
            Passing True can impair ungraphed performance, so if you don't intend to
            graph capture this instance, leave it False (default: False)

    .. _Decoupled Weight Decay Regularization:
        https://arxiv.org/abs/1711.05101
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ
    çü©ñÒMbP?©gÍÌÌÌÌÌì?g+‡ÙÎ÷ï?ç:Œ0âŽyE>ç{®Gáz„?FN)ÚmaximizeÚforeachÚ
capturablec             
      sÈ   d|kst d |¡ƒ‚d|ks,t d |¡ƒ‚d|d   krDdk sXn t d |d ¡ƒ‚d|d   krpdk s„n t d |d ¡ƒ‚d|ksšt d	 |¡ƒ‚t||||||||	d
}
tt| ƒ ||
¡ d S )Nç        zInvalid learning rate: {}zInvalid epsilon value: {}r   g      ð?z%Invalid beta parameter at index 0: {}r   z%Invalid beta parameter at index 1: {}zInvalid weight_decay value: {})ÚlrÚbetasÚepsÚweight_decayÚamsgradr   r   r   )Ú
ValueErrorÚformatÚdictÚsuperr   Ú__init__)ÚselfÚparamsr   r   r   r   r   r   r   r   Údefaults©Ú	__class__© ú5/tmp/pip-unpacked-wheel-gikjz4vx/torch/optim/adamw.pyr   K   s$       þzAdamW.__init__c                    sœ   t ƒ  |¡ | jD ]4}| dd¡ | dd¡ | dd ¡ | dd¡ qt| j ¡ ƒ}t|ƒdkort 	|d d ¡}|s˜|D ]}t 
t|d ƒ¡|d< q|d S )Nr   Fr   r   r   r   Ústep)r   Ú__setstate__Úparam_groupsÚ
setdefaultÚlistÚstateÚvaluesÚlenÚtorchZ	is_tensorÚtensorÚfloat)r   r'   ÚgroupZstate_valuesZstep_is_tensorÚsr   r    r!   r#   ^   s    
zAdamW.__setstate__c                 C   s®  |   ¡  d}|dk	r.t ¡  |ƒ }W 5 Q R X | jD ]r}g }g }g }g }g }g }	|d }
|d \}}|d D ]ú}|jdkr~qn| |¡ |jjr˜tdƒ‚| |j¡ | j| }t	|ƒdkr*| j
d rÚtjdtj|jd	nt d
¡|d< tj|tjd|d< tj|tjd|d< |
r*tj|tjd|d< | |d ¡ | |d ¡ |
rZ| |d ¡ |	 |d ¡ qnt||||||	|
|||d |d |d |d |d |d d q4|S )z±Performs a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   z'AdamW does not support sparse gradientsr   r   ©r   )ZdtypeÚdevicer   r"   )Zmemory_formatÚexp_avgÚ
exp_avg_sqZmax_exp_avg_sqr   r   r   r   r   )	r   Úbeta1Úbeta2r   r   r   r   r   r   )Z _cuda_graph_capture_health_checkr*   Zenable_gradr$   ÚgradÚappendZ	is_sparseÚRuntimeErrorr'   r)   r   Úzerosr,   r0   r+   Z
zeros_likeZpreserve_formatr   )r   ÚclosureZlossr-   Zparams_with_gradÚgradsÚexp_avgsÚexp_avg_sqsÚmax_exp_avg_sqsÚstate_stepsr   r3   r4   Úpr'   r    r    r!   r"   k   sj    



ÿÿòz
AdamW.step)r	   r
   r   r   F)N)Ú__name__Ú
__module__Ú__qualname__Ú__doc__Úboolr   r   r#   r*   Zno_gradr"   Ú__classcell__r    r    r   r!   r   	   s    A    ÿýýF)r   r:   r;   r<   r=   r>   r   r   r   r3   r4   r   r   r   r   c                C   s|   t dd„ |D ƒƒstdƒ‚|dkr&d}|r<tj ¡ r<tdƒ‚|rPtj ¡ sPt}nt}|| |||||||	|
|||||d dS )zpFunctional API that performs AdamW algorithm computation.

    See :class:`~torch.optim.AdamW` for details.
    c                 s   s   | ]}t |tjƒV  qd S ©N)Ú
isinstancer*   r   )Ú.0Útr    r    r!   Ú	<genexpr>Ì   s     zadamw.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNFz6torch.jit.script not supported with foreach optimizers)r   r3   r4   r   r   r   r   r   )Úallr7   r*   ZjitZis_scriptingÚ_multi_tensor_adamwÚ_single_tensor_adamw)r   r:   r;   r<   r=   r>   r   r   r   r3   r4   r   r   r   r   Úfuncr    r    r!   r   µ   s0    ó)r   r:   r;   r<   r=   r>   r   r3   r4   r   r   r   r   r   c                C   s  t | ƒD ] \}}|s|| n||  }|| }|| }|| }|rX|jrP|jsXtdƒ‚t |¡rŠt |¡}t |¡}t |¡}t |¡}|d7 }| d|	|
  ¡ | |¡j|d| d | |¡j||d| d |r~|}dt 	||¡ }dt 	||¡ }|	| }| 
¡ }| ¡ }|rVtj|| ||| d ||  ¡ ||   || ¡}n| ¡ ||   || ¡}| ||¡ q| ¡ }d||  }d||  }|	| }t |¡}|rætj|| ||| d ||  ¡ |  |¡}n| ¡ |  |¡}|j||| d qd S )Nú@If capturable=True, params and state_steps must be CUDA tensors.r   ©Úalpha)Úvalue)Úout)Ú	enumerateÚis_cudaÚAssertionErrorr*   Ú
is_complexÚview_as_realZmul_Zadd_Zaddcmul_ÚpowÚnegÚsqrtÚmaximumZaddcdiv_ÚitemÚmath)r   r:   r;   r<   r=   r>   r   r3   r4   r   r   r   r   r   ÚiÚparamr5   r1   r2   Zstep_tr"   Úbias_correction1Úbias_correction2Ú	step_sizeZstep_size_negÚbias_correction2_sqrtÚdenomr    r    r!   rM   ë   sL    




 
rM   c                   s²  t | ƒdkrd S |r4tdd„ t| |ƒD ƒƒs4tdƒ‚|rFt t|ƒ¡}dd„ |D ƒ}dd„ |D ƒ}dd„ |D ƒ}d	d„ | D ƒ} t |d
¡ t | d
ˆ|
  ¡ t |ˆ ¡ tj||d
ˆ  d t |ˆ¡ t 	|||d
ˆ ¡ |r‡ fdd„|D ƒ}‡fdd„|D ƒ}t 
|d
¡ t 
|d
¡ t |¡ t |¡ t |ˆ¡}t |¡ t |¡ t |¡}|r²t ||¡ t |¡}t |t ||¡¡ t ||¡}t |¡ t ||¡}n@t |¡}t |t ||¡¡ t ||¡}t |¡ t ||¡}t | ||¡ n¬‡ fdd„|D ƒ}‡fdd„|D ƒ}‡fdd„|D ƒ}dd„ |D ƒ}|r|t ||¡ t |¡}t ||¡ t ||¡}n"t |¡}t ||¡ t ||¡}t | |||¡ d S )Nr   c                 s   s   | ]\}}|j o|j V  qd S rF   )rU   )rH   r?   r"   r    r    r!   rJ   T  s     z&_multi_tensor_adamw.<locals>.<genexpr>rO   c                 S   s$   g | ]}t  |¡rt  |¡n|‘qS r    ©r*   rW   rX   ©rH   Úxr    r    r!   Ú
<listcomp>Z  s     z'_multi_tensor_adamw.<locals>.<listcomp>c                 S   s$   g | ]}t  |¡rt  |¡n|‘qS r    rf   rg   r    r    r!   ri   [  s     c                 S   s$   g | ]}t  |¡rt  |¡n|‘qS r    rf   rg   r    r    r!   ri   \  s     c                 S   s$   g | ]}t  |¡rt  |¡n|‘qS r    rf   rg   r    r    r!   ri   ]  s     r   rP   c                    s   g | ]}t  ˆ |¡‘qS r    ©r*   rY   ©rH   r"   ©r3   r    r!   ri   n  s     c                    s   g | ]}t  ˆ |¡‘qS r    rj   rk   ©r4   r    r!   ri   o  s     c                    s   g | ]}d ˆ |  ¡   ‘qS r/   ©r]   rk   rl   r    r!   ri   ’  s     c                    s   g | ]}d ˆ |  ¡   ‘qS r/   rn   rk   rm   r    r!   ri   “  s     c                    s   g | ]}ˆ | d  ‘qS )éÿÿÿÿr    ©rH   Zbc)r   r    r!   ri   •  s     c                 S   s   g | ]}t  |¡‘qS r    )r^   r[   rp   r    r    r!   ri   —  s     )r)   rK   ÚziprV   r*   Z_foreach_negÚtupleZ_foreach_add_Z_foreach_mul_Z_foreach_addcmul_Z_foreach_sub_Z_foreach_neg_Z_foreach_divZ_foreach_reciprocal_Z_foreach_sqrtZ_foreach_maximum_Z_foreach_div_Z_foreach_mulZ_foreach_addZ_foreach_addcdiv_)r   r:   r;   r<   r=   r>   r   r3   r4   r   r   r   r   r   ra   rb   rc   rd   Zmax_exp_avg_sq_sqrtZeps_over_step_sizere   Zexp_avg_sq_sqrtr    )r3   r4   r   r!   rL   A  sn    ÿ










rL   )NF)r^   r*   r   Z	optimizerr   Útypingr   r   Ú__all__r   rD   r,   r   rM   rL   r    r    r    r!   Ú<module>   sn    5  ÷ï6òVò