U
    yhy                     @   s|   d dl Z d dlZdd Ze r0ej s0ede rjd dlmZmZm	Z	m
Z
mZmZmZmZmZmZmZ G dd dZdS )    Nc                   C   s   t tjdS )N_dist_autograd_init)hasattrtorch_C r   r   U/var/www/html/venv/lib/python3.8/site-packages/torch/distributed/autograd/__init__.pyis_available   s    r   z/Failed to initialize torch.distributed.autograd)get_gradientsbackward_init_new_context_release_context_get_max_id_is_valid_context_retrieve_context_current_context_get_debug_infoDistAutogradContextc                   @   s    e Zd ZdZdd Zdd ZdS )contexta!  
    Context object to wrap forward and backward passes when using
    distributed autograd. The ``context_id`` generated in the ``with``
    statement  is required to uniquely identify a distributed backward pass
    on all workers. Each worker stores metadata associated with this
    ``context_id``, which is required to correctly execute a distributed
    autograd pass.

    Example::
        >>> # xdoctest: +SKIP
        >>> import torch.distributed.autograd as dist_autograd
        >>> with dist_autograd.context() as context_id:
        >>>     t1 = torch.rand((3, 3), requires_grad=True)
        >>>     t2 = torch.rand((3, 3), requires_grad=True)
        >>>     loss = rpc.rpc_sync("worker1", torch.add, args=(t1, t2)).sum()
        >>>     dist_autograd.backward(context_id, [loss])
    c                 C   s   t  | _| j S N)r   autograd_context_context_id)selfr   r   r   	__enter__0   s    zcontext.__enter__c                 C   s   t | j  d S r   )r   r   r   )r   typevalue	tracebackr   r   r   __exit__4   s    zcontext.__exit__N)__name__
__module____qualname____doc__r   r   r   r   r   r   r      s   r   )sysr   r   r   r   RuntimeErrorZtorch._C._distributed_autogradr	   r
   r   r   r   r   r   r   r   r   r   r   r   r   r   r   <module>   s   4