U
    <ºcÊ*  ã                   @   sÚ   d dl Z d dl mZ ddlmZ d dlmZmZ ddgZG dd„ deƒZdee ee ee ee e	e	e
e
e
e
d
œ
dd„Zee ee ee ee e
e
e
e
e	dœ	dd„Zee ee ee ee e
e
e
e
e	dœ	dd„ZdS )é    N)ÚTensoré   )Ú	Optimizer)ÚListÚOptionalÚRpropÚrpropc                       sL   e Zd ZdZdee edœ‡ fdd	„Z‡ fd
d„Ze 	¡ ddd„ƒZ
‡  ZS )r   aŸ  Implements the resilient backpropagation algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \theta_0 \in \mathbf{R}^d \text{ (params)},f(\theta)
                \text{ (objective)},                                                             \\
            &\hspace{13mm}      \eta_{+/-} \text{ (etaplus, etaminus)}, \Gamma_{max/min}
                \text{ (step sizes)}                                                             \\
            &\textbf{initialize} :   g^0_{prev} \leftarrow 0,
                \: \eta_0 \leftarrow \text{lr (learning rate)}                                   \\
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm} \textbf{for} \text{  } i = 0, 1, \ldots, d-1 \: \mathbf{do}            \\
            &\hspace{10mm}  \textbf{if} \:   g^i_{prev} g^i_t  > 0                               \\
            &\hspace{15mm}  \eta^i_t \leftarrow \mathrm{min}(\eta^i_{t-1} \eta_{+},
                \Gamma_{max})                                                                    \\
            &\hspace{10mm}  \textbf{else if}  \:  g^i_{prev} g^i_t < 0                           \\
            &\hspace{15mm}  \eta^i_t \leftarrow \mathrm{max}(\eta^i_{t-1} \eta_{-},
                \Gamma_{min})                                                                    \\
            &\hspace{15mm}  g^i_t \leftarrow 0                                                   \\
            &\hspace{10mm}  \textbf{else}  \:                                                    \\
            &\hspace{15mm}  \eta^i_t \leftarrow \eta^i_{t-1}                                     \\
            &\hspace{5mm}\theta_t \leftarrow \theta_{t-1}- \eta_t \mathrm{sign}(g_t)             \\
            &\hspace{5mm}g_{prev} \leftarrow  g_t                                                \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to the paper
    `A Direct Adaptive Method for Faster Backpropagation Learning: The RPROP Algorithm
    <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.21.1417>`_.

    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 1e-2)
        etas (Tuple[float, float], optional): pair of (etaminus, etaplis), that
            are multiplicative increase and decrease factors
            (default: (0.5, 1.2))
        step_sizes (Tuple[float, float], optional): a pair of minimal and
            maximal allowed step sizes (default: (1e-6, 50))
        foreach (bool, optional): whether foreach implementation of optimizer
            is used (default: None)
        maximize (bool, optional): maximize the params based on the objective, instead of
            minimizing (default: False)
    ç{®Gáz„?©g      à?g333333ó?©gíµ ÷Æ°>é2   NF)ÚforeachÚmaximizec                    s~   d|kst d |¡ƒ‚d|d   k r<d  k r<|d k sVn t d |d |d ¡ƒ‚t|||||d}tt| ƒ ||¡ d S )Ng        zInvalid learning rate: {}r   g      ð?r   zInvalid eta values: {}, {})ÚlrÚetasÚ
step_sizesr   r   )Ú
ValueErrorÚformatÚdictÚsuperr   Ú__init__)ÚselfÚparamsr   r   r   r   r   Údefaults©Ú	__class__© ú5/tmp/pip-unpacked-wheel-gikjz4vx/torch/optim/rprop.pyr   ;   s    (zRprop.__init__c                    s4   t ƒ  |¡ | jD ]}| dd ¡ | dd¡ qd S )Nr   r   F)r   Ú__setstate__Úparam_groupsÚ
setdefault)r   ÚstateÚgroupr   r   r   r   E   s    
zRprop.__setstate__c                 C   s|  d}|dk	r&t  ¡  |ƒ }W 5 Q R X | jD ]H}g }g }g }g }|d \}}	|d \}
}|d }|d }|d D ]æ}|jdkr‚qr| |¡ |j}|jr tdƒ‚| |¡ | j| }t|ƒdkr,d|d	< t j	|t j
d
|d< |jjr| ¡  |¡ t|d |d ƒ¡|d< n| ¡  |¡ |d ¡|d< | |d ¡ | |d ¡ |d	  d7  < qrt|||||
|||	||d
 q,|S )z±Performs a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r   r   z'Rprop does not support sparse gradientsr   Ústep©Zmemory_formatÚprevr   Ú	step_sizer   )Ústep_size_minÚstep_size_maxÚetaminusÚetaplusr   r   )ÚtorchZenable_gradr   ÚgradÚappendZ	is_sparseÚRuntimeErrorr!   ÚlenZ
zeros_likeÚpreserve_formatZdtypeÚ
is_complexÚnewZ
resize_as_Zfill_Úcomplexr   )r   ÚclosureZlossr"   r   ÚgradsÚprevsr   r)   r*   r'   r(   r   r   Úpr,   r!   r   r   r   r#   K   sV    





(÷z
Rprop.step)r	   r
   r   NF)N)Ú__name__Ú
__module__Ú__qualname__Ú__doc__r   Úboolr   r   r+   Zno_gradr#   Ú__classcell__r   r   r   r   r      s   2    ÿ ÿ
F)
r   r5   r6   r   r   r   r'   r(   r)   r*   c                C   sX   |dkrd}|r"t j ¡ r"tdƒ‚|r6t j ¡ s6t}
nt}
|
| |||||||	|d	 dS )zpFunctional API that performs rprop algorithm computation.

    See :class:`~torch.optim.Rprop` for details.
    NFz6torch.jit.script not supported with foreach optimizers)r'   r(   r)   r*   r   )r+   ZjitZis_scriptingr.   Ú_multi_tensor_rpropÚ_single_tensor_rprop)r   r5   r6   r   r   r   r'   r(   r)   r*   Úfuncr   r   r   r   ‹   s"    ø)	r   r5   r6   r   r'   r(   r)   r*   r   c                C   sò   t | ƒD ]ä\}	}
||	 }|s |n| }||	 }||	 }t |
¡rht |¡}t |¡}t |
¡}
t |¡}| |¡ ¡ }||| d¡< ||| d¡< d|| d¡< | 	|¡ 
||¡ |jtjd}d|| |¡< |
j| ¡ |dd | |¡ qd S )Nr   r   r$   éÿÿÿÿ©Úvalue)Ú	enumerater+   r1   Úview_as_realÚmulÚsignÚgtÚltÚeqZmul_Úclamp_Úcloner0   Zaddcmul_Úcopy_)r   r5   r6   r   r'   r(   r)   r*   r   ÚiÚparamr,   r%   r&   rG   r   r   r   r?   ´   s&    




r?   c                C   sF  t | ƒdkrd S dd„ }	|	|ƒ}|	|ƒ}|	| ƒ} |	|ƒ}|rFt |¡}t ||¡}
dd„ |
D ƒ}
|
D ].}||| d¡< ||| d¡< d|| d¡< qdt ||
¡ |D ]}| ||¡ q¤t	|ƒ}t
t |ƒƒD ]0}|| jtjd||< d|| |
|  |¡< qÊdd„ |D ƒ}tj| ||d	d
 t
t |ƒƒD ]}||  || ¡ q(d S )Nr   c                 S   s   dd„ | D ƒS )Nc                 S   s$   g | ]}t  |¡rt  |¡n|‘qS r   )r+   r1   rE   )Ú.0Útr   r   r   Ú
<listcomp>î   s     zF_multi_tensor_rprop.<locals>._view_complex_as_real.<locals>.<listcomp>r   )Ztensor_listr   r   r   Ú_view_complex_as_realí   s    z2_multi_tensor_rprop.<locals>._view_complex_as_realc                 S   s   g | ]}|  ¡ ‘qS r   ©rG   )rP   Úsr   r   r   rR   ù   s     z'_multi_tensor_rprop.<locals>.<listcomp>r   r$   c                 S   s   g | ]}|  ¡ ‘qS r   rT   )rP   r,   r   r   r   rR     s     rA   rB   )r/   r+   Z_foreach_negZ_foreach_mulrH   rI   rJ   Z_foreach_mul_rK   ÚlistÚrangerL   r0   Z_foreach_addcmul_rM   )r   r5   r6   r   r'   r(   r)   r*   r   rS   ZsignsrG   r&   rN   Z
grad_signsr   r   r   r>   Þ   s4    
r>   )NF)r+   r   Z	optimizerr   Útypingr   r   Ú__all__r   r<   Úfloatr   r?   r>   r   r   r   r   Ú<module>   sN    
  ùô)÷*÷