U
    <cX                     @   s`   d dl mZ ddlmZmZ ddlmZ ddlmZ ddgZ	G dd deZ
G d	d deZd
S )   )Module    )TupleUnion)Tensor)_sizeFlatten	Unflattenc                       sd   e Zd ZU dZddgZeed< eed< deedd fdd	Zeed
ddZ	e
dddZ  ZS )r   a  
    Flattens a contiguous range of dims into a tensor. For use with :class:`~nn.Sequential`.

    Shape:
        - Input: :math:`(*, S_{\text{start}},..., S_{i}, ..., S_{\text{end}}, *)`,'
          where :math:`S_{i}` is the size at dimension :math:`i` and :math:`*` means any
          number of dimensions including none.
        - Output: :math:`(*, \prod_{i=\text{start}}^{\text{end}} S_{i}, *)`.

    Args:
        start_dim: first dim to flatten (default = 1).
        end_dim: last dim to flatten (default = -1).

    Examples::
        >>> input = torch.randn(32, 1, 5, 5)
        >>> # With default parameters
        >>> m = nn.Flatten()
        >>> output = m(input)
        >>> output.size()
        torch.Size([32, 25])
        >>> # With non-default parameters
        >>> m = nn.Flatten(0, 2)
        >>> output = m(input)
        >>> output.size()
        torch.Size([160, 5])
    	start_dimend_dimr   N)r
   r   returnc                    s   t t|   || _|| _d S N)superr   __init__r
   r   )selfr
   r   	__class__ </tmp/pip-unpacked-wheel-gikjz4vx/torch/nn/modules/flatten.pyr   (   s    zFlatten.__init__inputr   c                 C   s   | | j| jS r   )flattenr
   r   r   r   r   r   r   forward-   s    zFlatten.forwardr   c                 C   s   d | j| jS )Nzstart_dim={}, end_dim={})formatr
   r   r   r   r   r   
extra_repr0   s     zFlatten.extra_repr)r   r   )__name__
__module____qualname____doc____constants__int__annotations__r   r   r   strr   __classcell__r   r   r   r   r   	   s   
c                       s   e Zd ZU dZeeeef  ZddgZe	eef e
d< e	eef e
d< e	eef e	eef dd fddZdd	 Zd
d ZeedddZedddZ  ZS )r	   a  
    Unflattens a tensor dim expanding it to a desired shape. For use with :class:`~nn.Sequential`.

    * :attr:`dim` specifies the dimension of the input tensor to be unflattened, and it can
      be either `int` or `str` when `Tensor` or `NamedTensor` is used, respectively.

    * :attr:`unflattened_size` is the new shape of the unflattened dimension of the tensor and it can be
      a `tuple` of ints or a `list` of ints or `torch.Size` for `Tensor` input;  a `NamedShape`
      (tuple of `(name, size)` tuples) for `NamedTensor` input.

    Shape:
        - Input: :math:`(*, S_{\text{dim}}, *)`, where :math:`S_{\text{dim}}` is the size at
          dimension :attr:`dim` and :math:`*` means any number of dimensions including none.
        - Output: :math:`(*, U_1, ..., U_n, *)`, where :math:`U` = :attr:`unflattened_size` and
          :math:`\prod_{i=1}^n U_i = S_{\text{dim}}`.

    Args:
        dim (Union[int, str]): Dimension to be unflattened
        unflattened_size (Union[torch.Size, Tuple, List, NamedShape]): New shape of the unflattened dimension

    Examples:
        >>> input = torch.randn(2, 50)
        >>> # With tuple of ints
        >>> m = nn.Sequential(
        >>>     nn.Linear(50, 50),
        >>>     nn.Unflatten(1, (2, 5, 5))
        >>> )
        >>> output = m(input)
        >>> output.size()
        torch.Size([2, 2, 5, 5])
        >>> # With torch.Size
        >>> m = nn.Sequential(
        >>>     nn.Linear(50, 50),
        >>>     nn.Unflatten(1, torch.Size([2, 5, 5]))
        >>> )
        >>> output = m(input)
        >>> output.size()
        torch.Size([2, 2, 5, 5])
        >>> # With namedshape (tuple of tuples)
        >>> input = torch.randn(2, 50, names=('N', 'features'))
        >>> unflatten = nn.Unflatten('features', (('C', 2), ('H', 5), ('W', 5)))
        >>> output = unflatten(input)
        >>> output.size()
        torch.Size([2, 2, 5, 5])
    dimunflattened_sizeN)r(   r)   r   c                    sR   t t|   t|tr$| | nt|tr:| | ntd|| _	|| _
d S )Nz'invalid argument type for dim parameter)r   r	   r   
isinstancer$   _require_tuple_intr&   _require_tuple_tuple	TypeErrorr(   r)   )r   r(   r)   r   r   r   r   j   s    

zUnflatten.__init__c                 C   s`   t |trDt|D ],\}}t |tstddt|j| qd S tddt|j d S )Nz*unflattened_size must be tuple of tuples, &but found element of type {} at pos {}z,unflattened_size must be a tuple of tuples, zbut found type {})r*   tuple	enumerater-   r   typer   r   r   idxelemr   r   r   r,   w   s    

zUnflatten._require_tuple_tuplec                 C   s`   t |ttfrHt|D ],\}}t |tstddt|j| qd S tdt|jd S )Nz(unflattened_size must be tuple of ints, r.   z;unflattened_size must be a tuple of ints, but found type {})	r*   r/   listr0   r$   r-   r   r1   r   r2   r   r   r   r+      s    
zUnflatten._require_tuple_intr   c                 C   s   | | j| jS r   )Z	unflattenr(   r)   r   r   r   r   r      s    zUnflatten.forwardr   c                 C   s   d | j| jS )Nzdim={}, unflattened_size={})r   r(   r)   r   r   r   r   r      s    zUnflatten.extra_repr)r   r    r!   r"   r   r&   r$   Z
NamedShaper#   r   r%   r   r   r,   r+   r   r   r   r'   r   r   r   r   r	   6   s   
-&
	N)moduler   typingr   r   Ztorchr   Ztorch.typesr   __all__r   r	   r   r   r   r   <module>   s   -