U
    JcA                  
   @   sj   d dl mZmZmZ ddlmZ d dlmZ d dlm	Z	 dej
eeeeeeeje	f f  dd	d
ZdS )    )IteratorTupleUnion   )ShardedOptimizerN)ShardedTensor T)moduleprefixrecursereturnc           	      c   s   |r| j |dn|| fg}t }|D ]X\}}t| D ]B\}}t|tr8||kr8|| ||rfdnd | }||fV  q8q$|  D ]\}}||fV  qdS )a  Returns an iterator over module parameters (together with the
    ShardedTensor parameters), yielding both the name of the parameter
    as well as the parameter itself. This is typically passed to a
    :class:torch.distributed._shard.sharded_optim.ShardedOptimizer

    Args:
        prefix (str): prefix to prepend to all parameter names.
        recurse (bool): if True, then yields parameters of this module
            and all submodules. Otherwise, yields only parameters that
            are direct members of this module.

    Yields:
        (str, Union[Tensor, ShardedTensor]): Tuple containing
            the name and parameter (or ShardedTensor parameter)

    Example::

        >>> # xdoctest: +SKIP
        >>> model = torch.nn.Linear(*linear_size)
        >>> shard_parameter(model, "weight", spec)
        >>> for name, param in named_params_with_sharded_tensor(model):
        >>>    if name in ['weight']:
        >>>        print(param.size())

    )r
   .r   N)Znamed_modulessetvarsitems
isinstancer   addZnamed_parameters)	r	   r
   r   modulesmemoZ
mod_prefixmodnameval r   S/tmp/pip-unpacked-wheel-gikjz4vx/torch/distributed/_shard/sharded_optim/__init__.py named_params_with_sharded_tensor
   s    
r   )r   T)typingr   r   r   apir   Ztorch.nnnnZ'torch.distributed._shard.sharded_tensorr   Modulestrbool	Parameterr   r   r   r   r   <module>   s     