U
    yh                  	   @   s  d Z ddlZddlZddlmZ ddlmZmZ ddlmZ ddl	m
Z
 ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ e   ed ejdedd W 5 Q R X eej drddl!m"Z" ddl#m$Z$ ddl%m&Z& ddddgZ'dS )aW  
:mod:`torch.distributed.optim` exposes DistributedOptimizer, which takes a list
of remote parameters (:class:`~torch.distributed.rpc.RRef`) and runs the
optimizer locally on the workers where the parameters live.  The distributed
optimizer can use any of the local optimizer :ref:`optimizer-algorithms` to
apply the gradients on each worker.
    N)optim   )_apply_optimizer_in_backward_get_in_backward_optimizers)_FunctionalAdadelta)_FunctionalAdagrad)_FunctionalAdam)_FunctionalAdamax)_FunctionalAdamW)_FunctionalRMSprop)_FunctionalRprop)_FunctionalSGD)_NamedOptimizer)as_functional_optimalwaysz`TorchScript` support for functional optimizers is deprecated and will be removed in a future PyTorch release. Consider using the `torch.compile` optimizer instead.   )
stacklevelZ	_rpc_init)DistributedOptimizer)PostLocalSGDOptimizer)ZeroRedundancyOptimizerr   r   r   r   )(__doc__warningsZtorchr   Zapply_optimizer_in_backwardr   r   Zfunctional_adadeltar   Zfunctional_adagradr   Zfunctional_adamr   Zfunctional_adamaxr	   Zfunctional_adamwr
   Zfunctional_rmspropr   Zfunctional_rpropr   Zfunctional_sgdr   Znamed_optimizerr   utilsr   catch_warningssimplefilterwarnDeprecationWarninghasattrZ_CZ	optimizerr   Zpost_localSGD_optimizerr   Zzero_redundancy_optimizerr   __all__ r   r   R/var/www/html/venv/lib/python3.8/site-packages/torch/distributed/optim/__init__.py<module>   s4   


