U
    yh                     @   s  d dl Z d dlZd dlmZ d dlZd dlZd dlZedddZe rZej	
 sZedej	jZej	jZej	jZej	jZe rd dlmZmZmZmZmZmZmZmZmZmZm Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/ G dd	 d	ej0Z1de2d
ddZ3ej4dkr,d dlm5Z5m6Z6 ddl7T ddl7m8Z8m9Z9m:Z:m;Z;m<Z<m=Z=m>Z>m?Z? ddl@m@Z@mAZAmBZB ddlCmDZD ddlEmFZFmGZG e,  nG dd dZHeHejId _dS )    N)Enum)returnc                   C   s   t tjdS )a  
    Return ``True`` if the distributed package is available.

    Otherwise,
    ``torch.distributed`` does not expose any other APIs. Currently,
    ``torch.distributed`` is available on Linux, MacOS and Windows. Set
    ``USE_DISTRIBUTED=1`` to enable it when building PyTorch from source.
    Currently, the default value is ``USE_DISTRIBUTED=1`` for Linux and Windows,
    ``USE_DISTRIBUTED=0`` for MacOS.
    
_c10d_init)hasattrtorch_C r   r   L/var/www/html/venv/lib/python3.8/site-packages/torch/distributed/__init__.pyis_available
   s    r
   z&Failed to initialize torch.distributed)Store	FileStoreTCPStoreProcessGroupBackendPrefixStoreReducerLoggerBuiltinCommHookType
GradBucketWork_DEFAULT_FIRST_BUCKET_BYTES_register_comm_hook_register_builtin_comm_hook_broadcast_coalesced"_compute_bucket_assignment_by_size_verify_params_across_processes_test_python_store
DebugLevelget_debug_levelset_debug_levelset_debug_level_from_env_make_nccl_premul_sum_ControlCollectives_StoreCollectivesc                   @   s   e Zd ZdZdd ZdS )_DistributedPdbz
        Supports using PDB from inside a multiprocessing child process.

        Usage:
        _DistributedPdb().set_trace()
        c                 O   s6   t j}z"tdt _tjj| f|| W 5 |t _X d S )Nz
/dev/stdin)sysstdinopenpdbPdbinteraction)selfargskwargsZ_stdinr   r   r	   r*   E   s
    
z_DistributedPdb.interactionN)__name__
__module____qualname____doc__r*   r   r   r   r	   r$   >   s   r$   )rankc              	   C   sj   t  | kr*t }|d|  d |  tj }tj }tjd z
t	  W 5 tj| ~X dS )z
        Set a breakpoint, but only on a single rank.  All other ranks will wait for you to be
        done with the breakpoint before continuing.

        Args:
            rank (int): Which rank to break on.  Default: ``0``
        zS
!!! ATTENTION !!!

Type 'up' to get to the frame that called dist.breakpoint(rank=z)
FN)
Zget_rankr$   messageZ	set_tracer   r   Z_meta_in_tls_dispatch_includeZ_DisableTorchDispatchZ!_set_meta_in_tls_dispatch_includeZbarrier)r2   r(   Zmeta_in_tlsguardr   r   r	   
breakpointM   s    




r5   win32)	HashStore_round_robin_process_groups   )*)_all_gather_base_reduce_scatter_base_create_process_group_wrapper_rank_not_in_group_coalescing_manager_CoalescingManager_get_process_group_nameget_node_local_rank)
rendezvous_create_store_from_optionsregister_rendezvous_handler)_remote_device)init_device_mesh
DeviceMeshc                   @   s   e Zd ZdS )_ProcessGroupStubN)r.   r/   r0   r   r   r   r	   rI      s   rI   ztorch.distributed)r   )Josr%   enumr   r(   ior   boolr
   r   r   RuntimeErrorZ
_DistErrorZ	DistErrorZ_DistBackendErrorZDistBackendErrorZ_DistNetworkErrorZDistNetworkErrorZ_DistStoreErrorZDistStoreErrorZtorch._C._distributed_c10dr   r   r   r   r   Z_Backendr   r   r   r   r   r   Z_Workr   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r)   r$   intr5   platformr7   r8   Zdistributed_c10dr;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   Zremote_devicerF   Zdevice_meshrG   rH   rI   modulesr   r   r   r	   <module>   s4   l(	