U
    Jc*                     @   sl  d dl Z d dlmZ d dlmZmZ d dlmZmZ d dl	m
Z
mZmZmZmZmZmZmZmZmZmZmZ d dlZd dlmZ d dlmZ d dlm  mZ d dlmZ ddl m!Z!m"Z" dd	l#m$Z$m%Z%m&Z&m'Z' d
dddddddgZ(G dd deZ)G dd deZ*G dd deZ+G dd deZ,G dd deZ-eG dd dZ.G dd
 d
ej/Z0G dd dZ1dS )    N)	dataclass)autoEnum)
accumulatechain)AnycastDict	GeneratorIteratorList
NamedTupleOptionalSequenceSetTupleUnion)Tensor   )_ext_post_unflatten_transform_ext_pre_flatten_transform)_alloc_storage_free_storage_set_fsdp_flattenedp_assertFlatParameterFlatParamHandleFlatParamShardMetadata	ParamInfoSharedParamInfoHandleConfigHandleShardingStrategyHandleTrainingStatec                   @   s,   e Zd ZU dZeed< ejed< eed< dS )r   z-Information for an original module parameter.
param_namemodulemodule_nameN__name__
__module____qualname____doc__str__annotations__nnModule r/   r/   E/tmp/pip-unpacked-wheel-gikjz4vx/torch/distributed/fsdp/flat_param.pyr   )   s   

c                   @   sF   e Zd ZU dZeed< ejed< eed< eed< ejed< eed< dS )	r   ai  
    Additional information for a shared parameter.

    For each shared parameter, we designate one module and its parameter
    variable to be the primary owner, determined as the first one encountered
    in the parameter walk. These are prefixed with "prim". The primary module
    and parameter do not have their own :class:`SharedParamInfo` instance.
    r#   r$   r%   prim_param_nameprim_moduleprim_module_nameNr&   r/   r/   r/   r0   r   1   s   
	

c                   @   s\   e Zd ZU dZeedf ed< eejdf ed< ee	df ed< eee	e	f df ed< dS )r   a  
    This holds metadata specific to this rank's shard of the flattened
    parameter.

    Attributes:
        param_names (Tuple[str, ...]): Prefixed parameter names of this rank's
            shard of the parameters; see :class:`FlatParameter`.
        param_shapes (Tuple[torch.Size, ...]): Parameter shapes of this rank's
            shard of the parameters; see :class:`FlatParameter`.
        param_numels (Tuple[int, ...]): Parameter numels of this rank's shard
            of the parameters; see :class:`FlatParameter`.
        param_offsets (Tuple[Tuple[int, int], ...]): [start, end] offsets (in
            units of numels) giving this rank's part of each flattened
            original module parameter.
    .Zparam_namesZparam_shapesZparam_numelsparam_offsetsN)
r'   r(   r)   r*   r   r+   r,   torchSizeintr/   r/   r/   r0   r   C   s
   
c                   @   s   e Zd Ze Ze Ze ZdS )r!   N)r'   r(   r)   r   Z
FULL_SHARDZSHARD_GRAD_OPNO_SHARDr/   r/   r/   r0   r!   ]   s   c                   @   s*   e Zd Ze Ze Ze Ze Ze ZdS )r"   N)	r'   r(   r)   r   IDLEFORWARDBACKWARD_PREZBACKWARD_POSTSUMMON_FULL_PARAMSr/   r/   r/   r0   r"   c   s
   c                   @   sJ   e Zd ZU eed< eed< eej ed< eej ed< dZ	ee ed< dS )r    sharding_strategyoffload_paramsparam_dtypeZreduce_dtypeFkeep_low_precision_gradsN)
r'   r(   r)   r!   r,   boolr   r5   dtyper@   r/   r/   r/   r0   r    k   s
   
c                   @   sD   e Zd ZdZee ee eej ee	 ee
 ee ddddZdS )r   a  
    This is the flattened parameter used by :class:`FullyShardedDataParallel`.
    It is comprised of one or more original parameters, which are flattened
    and concatenated to construct the flattened parameter.

    Under the current design, this parameter logically represents both the
    unsharded and sharded flattened parameter, and its data changes storages
    dynamically.
        - In the :class:`FullyShardedDataParallel` constructor, the parameter
        is initialized as unsharded and then sharded in-place.
        - At runtime, the parameter is lazily (re)-initialized. The sharded
        parameter data is saved in ``self._local_shard``, and a new ``Tensor``
        ``self._full_param_padded`` is created, which is the all-gather
        destination and owns the unsharded parameter storage thereafter. (See
        :meth:`FullyShardedDataParallel._init_param_attributes`.)
        - Throughout runtime, the parameter data changes storages as needed,
        e.g. to the sharded flattened parameter, reduced-precision sharded
        flattened parameter, or the unsharded flattened parameter.

    Attributes:
        _unpadded_unsharded_size (torch.Size): Unsharded flattened parameter's
            size without padding.
        _padded_unsharded_size (torch.Size): Unsharded flattened parameter's
            size with padding. This is only set for sharded strategies since
            they require padding for the all-gather.

        _param_infos (Tuple[ParamInfo, ...]): Each parameter's parameter info
            entry; see :class:`ParamInfo`.
        _numels (Tuple[int, ...]): Each parameter's numel.
        _shapes (Tuple[torch.Size, ...]): Each parameter's shape.
        _prefixed_param_names (Tuple[str, ...]): Each parameter's name prefixed
            with the parent module names starting from the module passed to
            construct this flattened parameter via :class:`FlatParamHandle`;
            the prefixed names are guaranteed to be unique within the subtree
            rooted in that module.
        _num_params (int): Number of original parameters flattened into this
            flattened parameter; this is the length of ``_param_infos``,
            ``_numels``, ``_shapes``, and ``_prefixed_param_names``.
        _shared_param_infos (Tuple[SharedParamInfo, ...]): Shared parameter
            info entries; see :class:`SharedParamInfo`.
        _param_extensions (Tuple[Optional[Any], ...]): Parameter extensions
            (i.e. some per-parameter state) used to customize pre-flatten and
            post-unflatten behavior. This is experimental, and users should not
            depend on its existence in the future.

        _shard_param_offsets (List[Tuple[int, int])): [start, end] offsets (in
            units of numel) giving this rank's part of each flattened original
            module parameter; for any parameter ``p`` that is not sharded
            across ranks, this will be [0, ``p.numel()``-1].
        _shard_indices (Tuple[int, int]): [start, end] indices (in units of
            parameters) for this rank's shard of the original model parameters,
            where the parameters follow the order in which they were originally
            flattened; this indexes appropriately into any data structure that
            follows the flattening order (e.g. ``_param_infos``, ``_numels``,
            etc.).
        _shard_numel_padded (int): Numel padded for this rank's sharded
            flattened parameter.

        _local_shard (Tensor): Sharded flattened parameter with padding if
            using a sharded strategy. If using ``NO_SHARD``, then this is the
            unpadded unsharded flattened parameter, and there is no notion of a
            sharded flattened parameter or padded unsharded flattened
            parameter.
        _full_param_padded (Tensor): Unsharded flattened parameter with
            padding. This is not defined for ``NO_SHARD``. When using mixed
            precision for parameters, this has the low precision.
        _full_prec_full_param_padded (Tensor): Full precision unsharded
            flattened parameter with padding. This is used for unsharding
            outside of computation when using mixed precision for parameters.
            This is never defined for ``NO_SHARD``.
        _post_backward_hook_state (Tuple[AccumulateGrad, RemovableHandle]):
            Flattened parameter's :class:`AccumulateGrad` object and
            post-backward hook handle.
        _mp_shard (Tensor): Low precision sharded flattened parameter with
            padding. This is only defined when parameter mixed precision is
            enabled. For ``NO_SHARD``, this is used for computation.
        _cpu_grad (Tensor): Sharded gradient with padding stored on CPU.
            This is only defined when offloading parameters is enabled.
        _saved_grad_shard (Tensor): Sharded gradient with padding from previous
            iterations for gradient accumulation without :meth:`no_sync`.
    N)param_infosnumelsshapesprefixed_param_namesshared_param_infosparam_extensionsreturnc                 C   s   t |t |kstt |t |ks(tt |t |ks<tt |t |ksPtt || _t|| _t|| _t|| _t|| _t|| _t|| _	| 
 | _t|  dS )a  
        Initializes attributes holding metadata about the original parameters
        comprising the flattened parameter.

        We expose this method separate from the constructor to keep the
        constructor only responsible for the flattened parameter's tensor data.
        This method should only be called once per model, while the constructor
        may be called multiple times, e.g. when reloading from a checkpoint, in
        which case only the tensor data needs to be passed to the constructor.
        Since :meth:`load_state_dict` is implemented via :meth:`copy_`, the
        metadata is correctly assumed to be unchanged.

        Args:
            See the Attributes in the class docstring.
        N)lenAssertionErrorZ_num_paramstuple_param_infos_numels_shapes_prefixed_param_names_shared_param_infos_param_extensionssize_unpadded_unsharded_sizer   )selfrC   rD   rE   rF   rG   rH   r/   r/   r0   _init_metadata   s    







zFlatParameter._init_metadata)r'   r(   r)   r*   r   r   r7   r5   r6   r+   r   r   rV   r/   r/   r/   r0   r   t   s   Tc                       s  e Zd ZdZeej ejej	e
dd fddZeeej  ejddddZeeej eed	d
dZe ejdddZeeeddddZeeeeeeef df eeef f dddZeeeeeeef dddZeeeeeeef dddZeeeeejdddZeeeef  dddZ e!dd d!Z"edd"d#Z#d$d% Z$d&d' Z%edd(d)Z&d*d+ Z'ejdd,d-Z(edd.d/d0Z)ejdd.d1d2Z*d3d4 Z+d5d6 Z,d7d8 Z-e.j/d9d: Z0ed;d<d=Z1d>d? Z2d@dA Z3dddBdCZ4edceeej e5e dDdEdFZ6eddGdHdIZ7e.j/e8ddJdKZ9dLdM Z:e;ej ddNdOZ<e5ee=e=f  ddPdQZ>dRdS Z?edTdUdVZ@eedTdWdXZAeedTdYdZZBd[d\ ZCeDedd]d^ZEeDedd_d`ZFeDeddadbZG  ZHS )dr   a  
    This handle manages a flattened parameter (:class:`FlatParameter`). This
    includes sharding and view management.

    Args:
        params (Sequence[nn.Parameter]): The parameters to use for the
            flattened parameter.
        module (nn.Module): A module that is the root of the subtree containing
            all parameters in ``params``; for non-recursive wrapping, this must
            be the top-level module, while for recursive wrapping, this may not
            necessarily be the top-level module.
        device (torch.device): The compute and communication device, which
            should be a non-CPU device. We refer to it as the compute device.
        config (HandleConfig): A config customizing the handle based on FSDP's
            available features.
    N)paramsr$   deviceconfigrI   c                    s:   t    || _|| _tj| _| || | jdd d S )NF	as_params)	super__init__rX   _configr"   r9   _training_state_init_flat_param
_unflatten)rU   rW   r$   rX   rY   	__class__r/   r0   r]     s    
zFlatParamHandle.__init__)rW   r$   rI   c                 C   s  t |}|d t|dks&tdg }g }g }g }g }i }	g }
g }d}d}| D ]V\}}|jddD ]>\}}||krql||	kr|	| \}}}|t|||||| qlt|t	krt
d|dk	r|j|krt
d| d|j |dkr| st
d	|dk	r&|j|kr&t
d
t|\}}|| |j}|j}|||f|	|< |
| |t||| ||  ||j |r|d | n|}|| qlqV|dk	stt|
|| _| j|||||| dS )a  
        Initializes the flattened parameter ``self.flat_param`` by flattening
        the parameters in ``params`` into a single :class:`FlatParameter` and
        saves relevant metadata. Shared parameters are only included in the
        flattened parameter once.

        This checks that all comprising parameters have the same dtype and
        ``requires_grad`` and does not support nested construction of
        :class:`FlatParameter` s.

        Args:
            See the Args in the class docstring.
        Nr   z@Cannot initialize a `FlatParameter` from an empty parameter listF)Zrecursez(`FlatParameter` does not support nestingz/`FlatParameter` requires uniform dtype but got z and z"Integer parameters are unsupportedz0`FlatParameter` requires uniform `requires_grad`.)setdiscardrJ   rK   Znamed_modulesZnamed_parametersappendr   typer   
ValueErrorrB   Zis_floating_pointrequires_gradr   r   numelshaper   flatten_params
flat_paramrV   )rU   rW   r$   Z
params_setrC   rD   rE   rF   rG   Zshared_param_memoZparams_to_flattenrH   rB   rj   Zsubmodule_name	submoduler#   paramr2   r3   r1   	extensionZprefixed_param_namer/   r/   r0   r`     s    




 z FlatParamHandle._init_flat_param)rW   rj   rI   c              	   C   s@   t  " dd | D }t j|dd}W 5 Q R X t||d}|S )a  
        Flattens the parameters in ``params`` into a single
        :class:`FlatParameter`. This should be the only way used to construct
        :class:`FlatParameter` s.

        We expose this factory method for checkpointing (e.g. sharded state
        dict). The flattened parameter's metadata should only be initialized
        once (see :meth:`_init_metadata`), but its tensor data may be reloaded.
        c                 S   s0   g | ](}t |tjr"| d n|d qS ))
isinstancer-   	ParameterdetachZreshape).0pr/   r/   r0   
<listcomp>  s   z2FlatParamHandle.flatten_params.<locals>.<listcomp>r   Zdim)rj   )r5   no_gradcatr   )rW   rj   Zflat_paramsZflat_param_datarn   r/   r/   r0   rm   q  s    
zFlatParamHandle.flatten_params)process_groupc                 C   s   | j s
dS | j}|| _| | _| | _| dks>td| }t	
|| j| j\}}|| | | || j | dkr|d dS )a  
        Shards the handle's ``FlatParameter``. In terms of memory, this
        allocates new memory for the sharded flattened parameter and frees the
        unsharded flattened parameter's storage.

        Postcondition: ``self.flat_param`` is the sharded flattened parameter.
        ``process_group``, ``rank``, and ``world_size`` attributes are set.

        TODO (awgu): Once we retire ``FlattenParamsWrapper``, we should pass
        the process group directly to the ``FlatParamHandle`` constructor. For
        now, we decouple ``FlattenParamsWrapper` from a process group, but this
        makes the process-group-related attributes not necessarily defined.
        Nr   z;The `FlatParameter` is not the sole occupant of its storage)uses_sharded_strategyrn   r|   rankrS   
world_sizeZstorage_offsetrK   storager   
_get_shardset__init_shard_metadatark   Zresize_)rU   r|   rn   Zorig_storageZlocal_shardnumel_paddedr/   r/   r0   shard  s(    


  
zFlatParamHandle.shard)sharded_flat_param_numelr   r~   rI   c                 C   sZ   ||krt d| d| d|| }||d  d }| ||\| j_| j_|| j_dS )a  
        Initializes shard-related metadata for this rank's shard of the
        flattened parameter: ``_shard_param_offsets``, ``_shard_indices``, and
        ``_shard_numel_padded``.

        Args:
            sharded_flat_param_numel (int): Numel of each rank's sharded
                flattened parameter with padding (i.e. including
                ``numel_padded``).
            numel_padded (int): Numel padded for this rank's sharded flattened
                parameter.
            rank (int): Caller's rank.
        z!Sharded flattened parameter with z numel cannot have z numel paddedr   N)ri   _get_shard_metadatarn   _shard_param_offsets_shard_indicesZ_shard_numel_padded)rU   r   r   r~   startendr/   r/   r0   r     s    
z$FlatParamHandle._init_shard_metadata.)r   r   rI   c                 C   s   |   }g }g }t|D ]Z\}\}}||ks||k r6q||krDd}	n|| }	t||| }
|| ||	|
f qt|dkrd}t|dkstn0|d |d f}t||d |d  d kstt||fS )a  
        Computes the shard metadata based on ``start`` and ``end``, which give
        the closed interval of the unsharded flattened parameter specifying the
        shard.

        Args:
            start (int): Start index (in units of numel) of this rank's shard
                of the flattened parameter.
            end (int): End index (in units of numel and inclusive) of this
                rank's shard of the flattened parameter.

        Return:
            Tuple[Tuple[Tuple[int, int], ...], Tuple[int, int]]: See
            ``_shard_param_offsets`` and ``_shard_indices`` in
            :class:`FlatParameter` 's docstring.
        r   )r   r   rr   r   )_get_flat_param_offsets	enumerateminrg   rJ   rK   rL   )rU   r   r   Zflat_param_offsetsZshard_param_indices_rangeZshard_param_offsetsiZparam_startZ	param_endZintra_param_startZintra_param_endZshard_param_indicesr/   r/   r0   r     s2    
z#FlatParamHandle._get_shard_metadata)tensorr~   r   rI   c                 C   sd   t | |}t||d k r0|d d}n|| }|d  |  }|dks\td||fS )aa  
        Returns the shard of ``tensor`` without any padding for the given
        ``rank`` and ``world_size`` and the numel to pad for that shard.

        If ``tensor`` is already flattened or may be viewed in the flattened
        shape (which is true in the expected usage), then this method does not
        allocate any new tensor memory.
        r   r   z5Chunk's size should be at most the first chunk's size)r5   flattenchunkrJ   Z	new_emptyrk   rK   )r   r~   r   chunksr   numel_to_padr/   r/   r0   _get_unpadded_shard  s    z#FlatParamHandle._get_unpadded_shardc                 C   s:   t | ||\}}| }|dkr2t|d|g}||fS )a(  
        Returns the shard of ``tensor`` with padding for the given ``rank`` and
        ``world_size`` and the numel padded for that shard.

        This method allocates new memory (via :meth:`clone`) since the
        unsharded ``tensor`` may be deallocated after this method returns.
        r   )r   r   cloneFpad)r   r~   r   r   r   r   r/   r/   r0   r     s      zFlatParamHandle._get_shardc                 C   s^   t | jdkst| j t| ||\}}| }t |dksJt| t|d | gS )z
        Returns the shape of ``tensor`` after sharding including padding. This
        requires ``tensor`` to have 1D shape and ensures that the returned
        shape is 1D.
        r   r   )rJ   rl   rK   r   r   rS   r5   r6   )r   r~   r   Zunpadded_sharded_tensorr   Zunpadded_sharded_sizer/   r/   r0   _get_sharded_size3  s      z!FlatParamHandle._get_sharded_size)rI   c                 C   sB   t t| jj}dg|dd  }dd |D }t t||}|S )zReturns [start, end] offsets of each original parameter's flattened
        data in the unsharded flattened parameter (without padding).r   Nrr   c                 S   s   g | ]}|d  qS )r   r/   )rv   r   r/   r/   r0   rx   G  s     z;FlatParamHandle._get_flat_param_offsets.<locals>.<listcomp>)listr   rn   rN   zip)rU   Zcumulative_sumZstartsZendsr4   r/   r/   r0   r   B  s
    z'FlatParamHandle._get_flat_param_offsetsc                 C   s   t | jdrt | jds td| jjd }| jjd }||krNt||d ntdd}t| jj| | jj| | jj| | jj	dd S )z`Returns shard-related metadata specific to this rank's shard of the
        flattened parameter.r   r   z'Shard metadata has not been initializedr   r   N)
hasattrrn   rK   r   slicer   rP   rO   rN   r   )rU   Zshard_param_start_indexZshard_param_end_indexslr/   r/   r0   shard_metadataK  s$     


zFlatParamHandle.shard_metadatac                 C   sp   d}| j r| jjs|  snD| jr6| js6|   d}n*| jjr`| jj| jkr`| j	| jdd d}| 
| j |S )a$  
        Returns: ``False`` if this is a no-op and ``True`` otherwise.

        Postcondition: ``self.flat_param`` 's data is on the device for
        communication and is what should be all-gathered. This means that it
        matches the dtype of the expected unsharded parameter.
        FTZnon_blocking)r}   r^   r>   needs_unshard_uses_param_mixed_precision_force_full_precision_use_low_precision_shardrn   rX   _flat_param_to_check_on_compute_device)rU   retr/   r/   r0   pre_unshardd  s     zFlatParamHandle.pre_unshardc                 C   sF   |    | j}t|j|j  |j|jj| jdd |j|_	dS )z
        Allocates the low precision shard directly on the compute device and
        switches to using the low precision sharded flattened parameter.
        Tr   N)
_check_low_precision_shardrn   r   	_mp_shard_local_shardrS   copy_torX   data)rU   rn   r/   r/   r0   r   }  s      z(FlatParamHandle._use_low_precision_shardc                 C   s:   |   s$| jr |  }| | dS |  }| | dS )a  
        Runs the unshard logic. This includes all-gathering the flattened
        parameter and switching to using the unsharded flattened parameter. If
        the handle does not need unsharding, then this only switches to using
        the unsharded flattened parameter. For ``NO_SHARD``, this is a no-op.

        If FSDP is in :meth:`summon_full_params` and the handle uses parameter
        mixed precision, then the parameter is forced to full precision.
        N)r   r}    _get_padded_unsharded_flat_param_use_unsharded_flat_param"_alloc_padded_unsharded_flat_param_all_gather_flat_paramrU   unsharded_flat_paramr/   r/   r0   unshard  s    

zFlatParamHandle.unshardc                 C   s,   | j s
dS |  }|  | k}| S )zBReturns if the handle's flattened parameter needs to be unsharded.F)r}   r   r   rS   rk   )rU   r   Zalready_unshardedr/   r/   r0   r     s    zFlatParamHandle.needs_unshardc                 C   s0   |    | j}|  }| | t||j |S )a*  
        Allocates the *padded* unsharded flattened parameter. The unpadded
        unsharded flattened parameter is always a view into the padded one.
        This padded parameter is saved to a different attribute on the
        ``FlatParameter`` depending on if we force full precision.
        )_check_sharded_strategyrn   r   _check_storage_freedr   _padded_unsharded_sizerU   rn   r   r/   r/   r0   r     s    
z2FlatParamHandle._alloc_padded_unsharded_flat_paramc                 C   sD   |    | j}| jr:|j}t|j| jjkd| jj  n|j}|S )z
        Returns a reference to the padded unsharded flattened parameter
        depending on the calling context. This should only be called if using a
        sharded strategy.
        zExpects full precision but got )	r   rn   r   Z_full_prec_full_param_paddedr   rB   r^   r?   Z_full_param_paddedr   r/   r/   r0   r     s    z0FlatParamHandle._get_padded_unsharded_flat_param)padded_unsharded_flat_paramrI   c                 C   sp   t t| dot| dd | jj}| | j }t | |kd| d|   t||| j | 	| dS )z
        All-gathers the handle's flattened parameter to the destination
        ``padded_unsharded_flat_param``, and switches to using the all-gathered
        tensor.
        r|   r   zEExpects a process group and world size to have been set via `shard()`Expects  numel but got N)
r   r   rn   r   rk   r   distZ_all_gather_baser|   r   )rU   r   Zsharded_flat_paramZexpected_numelr/   r/   r0   r     s     	
z&FlatParamHandle._all_gather_flat_paramc                 C   s&   | j j}|d|  || j _dS )z
        Switches to using the *unpadded* unsharded flattened parameter, which
        is a view into the *padded* unsharded flattened parameter.
        N)rn   rT   rk   viewr   )rU   r   Zunsharded_sizer/   r/   r0   r     s    z)FlatParamHandle._use_unsharded_flat_paramc                 C   s$   | j r| jr|   | | j dS )zo
        Runs the post-unshard logic. This includes freeing the low precision
        shard if needed.
        N)r   r}   !_free_low_precision_sharded_paramr   rn   rU   r/   r/   r0   post_unshard  s    zFlatParamHandle.post_unshardc                 C   s   |    t| jj dS )z4Frees the low precision sharded flattened parameter.N)r   r   rn   r   r   r/   r/   r0   r     s    z1FlatParamHandle._free_low_precision_sharded_paramc                 C   s  t | jtjtjfkd | j}|jdk	r|j |jksJ|jj	|j	kr| 
| j |jj	| j	k}t | pr| jjd| j	 d|jj	  |j |j k}|r|s|jj|_| jjr|jj|jjkr|j|jj|_n,|j}t |j |kd| d|j   d|_dS )z
        Prepares the gradient for the backward computation by saving and
        clearing any existing sharded gradient in ``.grad`` to enable computing
        a new unsharded gradient.
        z:Expects to be in `BACKWARD_PRE` or `IDLE` (if prefetching)Nz&Expects the sharded gradient to be on 	 but got zFExpects `.grad` to be the unsharded gradient in `no_sync()` with size z but got size )r   r_   r"   r;   r9   rn   ZgradrS   rT   rX   r   r^   r>   r   r   Z_saved_grad_shardr@   rB   r   r   )rU   rn   Zgrad_offloadedZprev_iter_synced_gradientsZpadded_unsharded_sizer/   r/   r0   prepare_gradient  sP    


z FlatParamHandle.prepare_gradientc                 c   s   |    t| j | jjkd| jj d| j   | | j | j  }|    }t||kd | 	t
d |   z
dV  W 5 t| j | jjkd| jj d| j   |  }|d| j  | j | | X dS )ao  
        Moves the unpadded unsharded flattened parameter to CPU while in the
        context and moves it back to the previous device upon exit. For now,
        this assumes the ``FlatParameter`` is the unpadded unsharded flattened
        parameter since (1) there is no reason to include the padding in the
        copy and (2) there is no use case for the sharded flattened parameter.

        Precondition: ``self.flat_param`` 's data is the unpadded unsharded
        flattened parameter on the compute device, and the handle uses a
        sharded strategy.
        Postcondition: Same as the precondition.
        zExpects size r   zEExpects the unpadded parameter to be a view into the padded parametercpuN)r   r   rn   rS   rT   r   r   Zdata_ptrr   r   r5   rX   _free_unsharded_flat_paramr   rk   r   r   )rU   Zunpadded_storage_ptrZpadded_storage_ptrr   r/   r/   r0   to_cpuD  s4    
zFlatParamHandle.to_cpu)free_unsharded_flat_paramc                 C   s   |r|    |   dS )z
        Runs the reshard logic. This includes freeing the unsharded flattened
        parameter if ``free_unsharded_flat_param`` and switching to using the
        sharded flattened parameter.
        N)r   _use_sharded_flat_param)rU   r   r/   r/   r0   reshardt  s    zFlatParamHandle.reshardc                 C   s   | j r| js| js|   dS )aE  
        Runs the post-reshard logic. This includes freeing any memory that
        can now be freed given that the ``FlatParameter`` points to the full
        precision sharded flattened parameter.

        Precondition: ``self.flat_param`` 's data points to the full precision
        sharded flattened parameter.
        N)r   r}   r   r   r   r/   r/   r0   post_reshard~  s    zFlatParamHandle.post_reshardc                 C   sJ   |    |  }| | | | |ttjjtj	
  t| dS )z
        Frees the padded unsharded flattened parameter. The tensor to free
        depends on the calling context since the unshard may have forced full
        precision, in which case a different tensor is used.
        N)r   r   _check_storage_allocatedr   Zrecord_streamr   r5   Z_CZStreamZcudaZcurrent_streamr   r   r/   r/   r0   r     s    

z*FlatParamHandle._free_unsharded_flat_paramc                 C   s<   | j }| jjr0|jj}t|tdkd|  |j|_dS )z2Switches to using the sharded flattened parameter.r   z-Expects the local shard to be on CPU but got N)rn   r^   r>   r   rX   r   r5   r   )rU   rn   rX   r/   r/   r0   r     s    z'FlatParamHandle._use_sharded_flat_param)rn   r   rI   c                 C   sh   |dkr| }t | | j kd| j  d|  d dd ttj|| jdd| j| jD }|S )	a  
        Returns unflattened ``Tensor`` views into ``tensor`` if it is not
        ``None`` or ``flat_param`` otherwise, where the unflattening is based
        on ``flat_param`` 's metadata.

        In other words, to get views into the unsharded flattened parameter,
        pass ``tensor`` as ``None``, but to get views into tensor optimizer
        state, pass ``tensor`` as the optimizer state tensor.
        Nr   r   z numelc                 s   s$   | ]\}}}t |||V  qd S N)r   r   )rv   Z	subtensorrl   Zparam_extensionr/   r/   r0   	<genexpr>  s   z4FlatParamHandle._get_unflat_views.<locals>.<genexpr>r   ry   )	r   rk   rT   r   r5   splitrN   rO   rR   )rn   r   viewsr/   r/   r0   _get_unflat_views  s     z!FlatParamHandle._get_unflat_views)r[   rI   c           
      C   s   |  | j}t|| jjD ]F\}\}}}t||r<t|| |rT||t| qt	||| q| jj
D ]j\}}}}}}t||rt|| t||stt||}	|rt|	tjst|||	 qjt	|||	 qjdS )a  
        Unflattens the unsharded flattened parameter by setting the original
        module parameter variables to be views into it.

        Args:
            as_params (bool): If ``True``, then registers the original
                parameters as ``nn.Parameter`` s; if ``False``, then registers
                the original parameters only as ``Tensor`` s. ``False`` should
                be used during forward/backward computation and when hiding the
                original parameters from :meth:`nn.Module.named_parameters`.
        N)r   rn   r   rM   r   delattrZregister_parameterr-   rt   setattrrQ   rK   getattrrs   )
rU   r[   r   r   r#   r$   _r1   r2   rp   r/   r/   r0   ra     s.    




zFlatParamHandle._unflattenc              	   c   s*   | j dd z
dV  W 5 | j dd X dS )a1  
        Assumes the flattened parameter is unsharded. When in the context,
        unflattens the original parameters as ``nn.Parameter`` views into the
        flattened parameter, and after the context, restores the original
        parameters as ``Tensor`` views into the flattened parameter.
        TrZ   FN)ra   r   r/   r/   r0   unflatten_as_params  s    
z#FlatParamHandle.unflatten_as_paramsc                 O   s   | j j||| j _dS )z<Wraps an in-place call to ``.to()`` for ``self.flat_param``.N)rn   r   r   )rU   argskwargsr/   r/   r0   r     s    zFlatParamHandle._flat_param_toc                 C   s.   t dd | jjD t dd | jjD S )zqReturns a :class:`set` of the modules whose parameters are included
        in this handle's flattened parameter.c                 s   s   | ]}|j V  qd S r   r$   )rv   pir/   r/   r0   r     s     z/FlatParamHandle._get_modules.<locals>.<genexpr>c                 s   s   | ]}|j V  qd S r   r   )rv   Zspir/   r/   r0   r     s     )re   rn   rM   unionrQ   r   r/   r/   r0   _get_modules  s    zFlatParamHandle._get_modulesc                 c   s:   dd | j jD }t| j j|D ]\}}}||fV  q d S )Nc                 S   s$   g | ]\}}}}}}t |||qS r/   )r   )rv   r#   r$   r%   r   r/   r/   r0   rx     s   z:FlatParamHandle.parameter_module_names.<locals>.<listcomp>)rn   rQ   r   rM   )rU   rG   r#   r   r%   r/   r/   r0   parameter_module_names  s    	 z&FlatParamHandle.parameter_module_namesc                 C   s   t | jd d S )NzExpects sharded strategy)r   r}   r   r/   r/   r0   r   #  s    z'FlatParamHandle._check_sharded_strategy)r   c                 C   s   t |j| jkd| j  d S )Nz+Expects tensor to be on the compute device )r   rX   )rU   r   r/   r/   r0   r   &  s    

z(FlatParamHandle._check_on_compute_devicec                 C   s$   |    }t|dkd|  d S )Nr   z6Expects storage to be freed but got storage with size r   rS   r   r   Zstorage_sizer/   r/   r0   r   ,  s
    z$FlatParamHandle._check_storage_freedc                 C   s   |    }t|dkd d S )Nr   zExpects storage to be allocatedr   r   r/   r/   r0   r   4  s    z(FlatParamHandle._check_storage_allocatedc                 C   sP   t | jd t t| jdd d k	d | jjj}t || jkd| j d|  d S )Nz&Not using low precision for parametersr   zExpects `_mp_shard` to existz)Expects the low precision shard to be on r   )r   r   r   rn   r   rX   )rU   rX   r/   r/   r0   r   9  s    
z*FlatParamHandle._check_low_precision_shardc                 C   s   | j jtjkS r   )r^   r=   r!   r8   r   r/   r/   r0   r}   K  s    z%FlatParamHandle.uses_sharded_strategyc                 C   s   | j jd k	S r   )r^   r?   r   r/   r/   r0   r   O  s    z+FlatParamHandle._uses_param_mixed_precisionc                 C   s   | j tjko| jS r   )r_   r"   r<   r   r   r/   r/   r0   r   S  s    z%FlatParamHandle._force_full_precision)N)Ir'   r(   r)   r*   r   r-   rt   r.   r5   rX   r    r]   r   r`   staticmethodr   rA   r   rm   rz   r   ZProcessGroupr   r7   r   r   r   r   r   r6   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   
contextlibcontextmanagerr   r   r   r   r   r   r   ra   r
   r   r   r   r   r+   r   r   r   r   r   r   propertyr}   r   r   __classcell__r/   r/   rb   r0   r      s   `"""7


	=
/
 &)2r   Zdataclassesr   enumr   r   	itertoolsr   r   typingr   r   r	   r
   r   r   r   r   r   r   r   r   r5   Ztorch.distributedZdistributedr   Ztorch.nnr-   Ztorch.nn.functionalZ
functionalr   r   Z_fsdp_extensionsr   r   _utilsr   r   r   r   __all__r   r   r   r!   r"   r    rt   r   r   r/   r/   r/   r0   <module>   s:   8z