U
    <c                     @   s   d dl Z d dlZd dlmZ d dlmZmZ eejeej f ZdddgZ	dee
e
eejd	d
dZdee
e
eejd	ddZee
ddddZdS )    N)inf)UnionIterableclip_grad_norm_clip_grad_normclip_grad_value_       @F)
parametersmax_norm	norm_typeerror_if_nonfinitereturnc           
         s  t | tjr| g} dd | D }t|}tt|dkrFtdS |d j tkr fdd|D }t|dkr~|d ntt	|}n"t
t	 fdd|D }|rt| | rtd d	||d
  }tj|dd}|D ]}	|	 ||	j q|S )aA  Clips gradient norm of an iterable of parameters.

    The norm is computed over all gradients together, as if they were
    concatenated into a single vector. Gradients are modified in-place.

    Args:
        parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
            single Tensor that will have gradients normalized
        max_norm (float or int): max norm of the gradients
        norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
            infinity norm.
        error_if_nonfinite (bool): if True, an error is thrown if the total
            norm of the gradients from :attr:`parameters` is ``nan``,
            ``inf``, or ``-inf``. Default: False (will switch to True in the future)

    Returns:
        Total norm of the parameter gradients (viewed as a single vector).
    c                 S   s   g | ]}|j d k	r|j qS Ngrad).0p r   </tmp/pip-unpacked-wheel-gikjz4vx/torch/nn/utils/clip_grad.py
<listcomp>!   s     
 z#clip_grad_norm_.<locals>.<listcomp>r   g        c                    s"   g | ]}|     qS r   )detachabsmaxtor   g)devicer   r   r   (   s        c                    s"   g | ]}t |  qS r   )torchnormr   r   r   r   r   r   r   r   +   s     zThe total norm of order z for gradients from `parameters` is non-finite, so it cannot be clipped. To disable this error and scale the gradients by the non-finite norm anyway, set `error_if_nonfinite=False`gư>g      ?)r   )
isinstancer   TensorfloatlenZtensorr   r   r   stackr   
logical_orisnanisinfRuntimeErrorclampr   Zmul_r   )
r	   r
   r   r   ZgradsZnormsZ
total_normZ	clip_coefZclip_coef_clampedr   r   r    r   r   
   s*    

&"
c                 C   s   t jddd t| |||S )zClips gradient norm of an iterable of parameters.

    .. warning::
        This method is now deprecated in favor of
        :func:`torch.nn.utils.clip_grad_norm_`.
    z[torch.nn.utils.clip_grad_norm is now deprecated in favor of torch.nn.utils.clip_grad_norm_.   )
stacklevel)warningswarnr   )r	   r
   r   r   r   r   r   r   <   s    	)r	   
clip_valuer   c                 C   sF   t | tjr| g} t|}tdd | D ]}|jjj| |d q(dS )a  Clips gradient of an iterable of parameters at specified value.

    Gradients are modified in-place.

    Args:
        parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
            single Tensor that will have gradients normalized
        clip_value (float or int): maximum allowed value of the gradients.
            The gradients are clipped in the range
            :math:`\left[\text{-clip\_value}, \text{clip\_value}\right]`
    c                 S   s
   | j d k	S r   r   )r   r   r   r   <lambda>Y       z"clip_grad_value_.<locals>.<lambda>)minr   N)r!   r   r"   r#   filterr   dataZclamp_)r	   r/   r   r   r   r   r   J   s
    )r   F)r   F)r-   r   Z
torch._sixr   typingr   r   r"   Z_tensor_or_tensors__all__r#   boolr   r   r   r   r   r   r   <module>   s0   
     3     