U
    Jºcn  ã                   @   s¸   d Z ddlZddlmZ ddlmZ ddlmZ ddlmZ ddl	m
Z
 dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ eejdƒrœddlmZ ddlmZ ddlmZ dS )aW  
:mod:`torch.distributed.optim` exposes DistributedOptimizer, which takes a list
of remote parameters (:class:`~torch.distributed.rpc.RRef`) and runs the
optimizer locally on the workers where the parameters live.  The distributed
optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to
apply the gradients on each worker.
é    N)Úoptimé   )Ú_FunctionalAdagrad)Ú_FunctionalAdam)Ú_FunctionalAdamW)Ú_FunctionalSGD)Ú_FunctionalAdadelta)Ú_FunctionalRMSprop)Ú_FunctionalRprop)Ú_FunctionalAdamax)Úas_functional_optimZ	_rpc_init)ÚDistributedOptimizer)ÚPostLocalSGDOptimizer)ÚZeroRedundancyOptimizer)Ú__doc__Ztorchr   Zfunctional_adagradr   Zfunctional_adamr   Zfunctional_adamwr   Zfunctional_sgdr   Zfunctional_adadeltar   Zfunctional_rmspropr	   Zfunctional_rpropr
   Zfunctional_adamaxr   Úutilsr   ÚhasattrZ_CZ	optimizerr   Zpost_localSGD_optimizerr   Zzero_redundancy_optimizerr   © r   r   úD/tmp/pip-unpacked-wheel-gikjz4vx/torch/distributed/optim/__init__.pyÚ<module>   s   