U
    Mhk<                      @   st  U d Z ddlZddlZddlmZ ddlmZmZmZm	Z	m
Z
mZmZ ddlZddlZddlmZ ddlmZmZ dd	lmZ dd
lmZmZ dae Ze Zg ae	eeg df e	e f  e d< e!ej"ddd Z#eeee$df Z%e a&dZ'eej"j( e d< e)dddZ*e* r2ej"j+Z+ej"j,Z-ej"j.Z/n(edZ+e$e$dddZ-e$e$dddZ/edde$dddZ0e)dddZ1d d! Z2d"d# Z3d$d% Z4d&d' Z5d(d) Z6G d*d+ d+Z7G d,d- d-ZG d.d/ d/eZ8e%ddd0d1Z9dce
e% edd2d3Z:eddde
e% eeef dd4d5Z;dee
e% e+dd6d7Z<e$dd8d9Z=ee$eejf ejdd:d;Z>G d<d= d=Z?e
d> e?d?d@dAZ@dBdC ZAedDdEdFZBdfe
e% eddGdHZCdge%dddIdJZDdddKdLZEejej"j(ddMdNZFdhe$ee$eejf ddPdQdRZGdiee$eejf e$ddSdTZHddUlIT dVdWd=d9dHdd-d/ddLd5d3d7dXdYdZd'd[dd!d#d\d]d^d_d1d`dadFdAdbdJg ZJdS )jz
This package introduces support for the XPU backend, specifically tailored for
Intel GPU optimization.

This package is lazily initialized, so you can always import it, and use
:func:`is_available()` to determine if your system supports XPU.
    N)	lru_cache)AnyCallableDictListOptionalTupleUnion   device)_dummy_type_LazySeedTracker   )_get_device_index)EventStreamF_queued_callsZ_xpu_isInBadForkc                   C   s   dS NF r   r   r   D/var/www/html/venv/lib/python3.8/site-packages/torch/xpu/__init__.py<lambda>       r   r   default_generators)returnc                   C   s   t jjS )z(Return true if compile with XPU support.)torch_CZ_has_xpur   r   r   r   _is_compiled!   s    r   _XpuDeviceProperties)r   r   c                 C   s   t dd S Nz(PyTorch was compiled without XPU supportNotImplementedErrorr   r   r   r   _exchange_device.   s    r"   c                 C   s   t dd S r   r    r   r   r   r   _maybe_exchange_device1   s    r#   )maxsizec                   C   s   t  s
dS tj S )z*Return the number of XPU device available.r   )r   r   r   Z_xpu_getDeviceCountr   r   r   r   device_count5   s    r%   c                   C   s
   t  dkS )z7Return a bool indicating if XPU is currently available.r   )r%   r   r   r   r   is_available=   s    r&   c                   C   s   dS )zKReturn a bool indicating if the current XPU device supports dtype bfloat16.Tr   r   r   r   r   is_bf16_supportedC   s    r'   c                   C   s   t o
t  S )z8Return whether PyTorch's XPU state has been initialized.)_initialized_is_in_bad_forkr   r   r   r   is_initializedH   s    r*   c                 K   s`   t  r|   nN|ddr,t| t  n0|ddrJt| t  nt| t f d S )Nseed_allFseed)	r*   get_lazy_seed_trackerZqueue_seed_all	tracebackformat_stackZ
queue_seedr   append)callablekwargsr   r   r   
_lazy_callM   s    r4   c                   C   s
   t   dS )zInitialize PyTorch's XPU state.
    This is a Python API about lazy initialization that avoids initializing
    XPU until the first time it is accessed. Does nothing if the XPU state is
    already initialized.
    N)
_lazy_initr   r   r   r   init[   s    r6   c                  C   s   t  sttdrd S t t  r.W 5 Q R  d S t r<tdt sJtdtj	
  dt_t D ]} | rbt|  qbzdtD ]Z\}}z
|  W q| tk
r } z(dt| dd| }t||W 5 d }~X Y q|X q|W 5 ttd X daW 5 Q R X d S )Nis_initializingzuCannot re-initialize XPU in forked subprocess. To use XPU with multiprocessing, you must use the 'spawn' start methodz#Torch not compiled with XPU enabledTz5XPU call failed lazily at initialization with error: z'

XPU call was originally invoked at:

 )r*   hasattr_tls_initialization_lockr)   RuntimeErrorr   AssertionErrorr   r   Z	_xpu_initr7   r.   Z	get_callsr   r1   delattr	Exceptionstrjoinr(   )ZcallsZqueued_callZorig_tracebackemsgr   r   r   r5   d   s4    

"r5   c                   @   s4   e Zd ZedddZdd ZeeedddZd	S )
_DeviceGuard)indexc                 C   s   || _ d| _d S N)idxprev_idx)selfrE   r   r   r   __init__   s    z_DeviceGuard.__init__c                 C   s   t j| j| _d S Nr   xpur"   rH   rI   rJ   r   r   r   	__enter__   s    z_DeviceGuard.__enter__typevaluer/   c                 C   s   t j| j| _dS r   r   rN   r#   rI   rH   rJ   rR   rS   r/   r   r   r   __exit__   s    z_DeviceGuard.__exit__N)__name__
__module____qualname__intrK   rP   r   rV   r   r   r   r   rD      s   rD   c                   @   s8   e Zd ZdZedddZdd Zeeeddd	Zd
S )r   zContext-manager that changes the selected device.

    Args:
        device (torch.device or int or str): device index to select. It's a no-op if
            this argument is a negative integer or ``None``.
    r   c                 C   s   t |dd| _d| _d S )NToptionalrG   )r   rH   rI   )rJ   r   r   r   r   rK      s    zdevice.__init__c                 C   s   t j| j| _d S rL   rM   rO   r   r   r   rP      s    zdevice.__enter__rQ   c                 C   s   t j| j| _dS r   rT   rU   r   r   r   rV      s    zdevice.__exit__N)rW   rX   rY   __doc__r   rK   rP   rV   r   r   r   r   r      s   r   c                       s    e Zd ZdZ fddZ  ZS )	device_ofa  Context-manager that changes the current device to that of given object.

    You can use both tensors and storages as arguments. If a given object is
    not allocated on a XPU, this is a no-op.

    Args:
        obj (Tensor or Storage): object allocated on the selected device.
    c                    s"   |j r| nd}t | d S rF   )Zis_xpuZ
get_devicesuperrK   )rJ   objrH   	__class__r   r   rK      s    zdevice_of.__init__)rW   rX   rY   r]   rK   __classcell__r   r   ra   r   r^      s   	r^   c                 C   s&   t   t| } | dkr"tj|  dS )zSet the current device.

    Args:
        device (torch.device or int or str): selected device. This function is a
            no-op if this argument is negative.
    r   N)r5   r   r   r   Z_xpu_setDevicer   r   r   r   
set_device   s    rd   c                 C   s
   t | jS )a  Get the name of a device.

    Args:
        device (torch.device or int or str, optional): device for which to
            return the name. This function is a no-op if this argument is a
            negative integer. It uses the current device, given by :func:`~torch.xpu.current_device`,
            if :attr:`device` is ``None`` (default).

    Returns:
        str: the name of the device
    )get_device_propertiesnamer   r   r   r   get_device_name   s    rg   c                    s   t |   fddt D S )a  Get the xpu capability of a device.

    Args:
        device (torch.device or int or str, optional): device for which to
            return the device capability. This function is a no-op if this
            argument is a negative integer. It uses the current device, given by
            :func:`~torch.xpu.current_device`, if :attr:`device` is ``None``
            (default).

    Returns:
        Dict[str, Any]: the xpu capability dictionary of the device
    c                    s"   i | ]}| d s|t |qS )__)
startswithgetattr).0proppropsr   r   
<dictcomp>   s    
  z)get_device_capability.<locals>.<dictcomp>)re   dirr   r   rm   r   get_device_capability   s    
rq   c                 C   s4   t   t| dd} | dk s$| t kr,tdt| S )zGet the properties of a device.

    Args:
        device (torch.device or int or str): device for which to return the
            properties of the device.

    Returns:
        _XpuDeviceProperties: the properties of the device
    Tr[   r   zInvalid device index)r5   r   r%   r=   Z_get_device_propertiesr   r   r   r   re      s
    
re   c                   C   s   t   tj S )z0Return the index of a currently selected device.)r5   r   r   Z_xpu_getDevicer   r   r   r   current_device   s    rr   c                 C   s0   t | trt| } nt | tr,td| } | S )zReturn the torch.device type object from the passed in device.

    Args:
        device (torch.device or int or str): selected device.
    rN   )
isinstancer@   r   r   rZ   r   r   r   r   _get_device  s
    

rt   c                   @   sJ   e Zd ZU dZed ed< ed dddZdd Zeeed	d
dZ	dS )StreamContexta  Context-manager that selects a given stream.

    All XPU kernels queued within its context will be enqueued on a selected
    stream.

    Args:
        Stream (Stream): selected stream. This manager is a no-op if it's
            ``None``.
    .. note:: Streams are per-device.
    torch.xpu.Stream
cur_streamstreamc                 C   s&   || _ td d| _| jd kr"d| _d S )NTrG   )ry   r   rH   )rJ   ry   r   r   r   rK     s    
zStreamContext.__init__c              	   C   sn   | j }|d ks| jdkrd S tjd | _| jj|jkr^t|j tj|j| _W 5 Q R X tj| d S rF   )	ry   rH   r   rN   current_streamsrc_prev_streamr   dst_prev_stream
set_stream)rJ   rw   r   r   r   rP   %  s    zStreamContext.__enter__rQ   c                 C   sJ   | j }|d ks| jdkrd S | jj|jkr8tj| j tj| j d S rF   )ry   rH   r{   r   r   rN   r}   r|   )rJ   rR   rS   r/   rw   r   r   r   rV   1  s    zStreamContext.__exit__N)
rW   rX   rY   r]   r   __annotations__rK   rP   r   rV   r   r   r   r   ru     s
   

ru   rv   )ry   r   c                 C   s   t | S )zWrap around the Context-manager StreamContext that selects a given stream.

    Arguments:
        stream (Stream): selected stream. This manager is a no-op if it's ``None``.
    )ru   rx   r   r   r   ry   <  s    ry   c                 C   s   t jj| ||d dS )a  set stream specified by the stream id, device index and device type

    Args: stream_id (int): not visible to the user, used to assigned to the specific stream.
          device_index (int): selected device index.
          device_type (int): selected device type.
    	stream_iddevice_indexdevice_typeN)r   r   Z_xpu_setStreamr   r   r   r   _set_stream_by_idE  s
    r   rx   c                 C   s*   | dkrdS t   t| j| j| jd dS )a  Set the current stream.This is a wrapper API to set the stream.
        Usage of this function is discouraged in favor of the ``stream``
        context manager.

    Args:
        stream (Stream): selected stream. This function is a no-op
            if this argument is ``None``.
    Nr   )r5   r   r   r   r   rx   r   r   r   r}   S  s    	r}   c                 C   s4   t   tjt| dd}t|d |d |d dS )aR  Return the currently selected :class:`Stream` for a given device.

    Args:
        device (torch.device or int, optional): selected device. Returns
            the currently selected :class:`Stream` for the current device, given
            by :func:`~torch.xpu.current_device`, if :attr:`device` is ``None``
            (default).
    Tr[   r   r   r
   r   )r5   r   r   Z_xpu_getCurrentStreamr   r   )r   Z
streamdatar   r   r   rz   f  s    	
  rz   c                 C   s   t   t| dd} tj| S )a*  Wait for all kernels in all streams on a XPU device to complete.

    Args:
        device (torch.device or int, optional): device for which to synchronize.
            It uses the current device, given by :func:`~torch.xpu.current_device`,
            if :attr:`device` is ``None`` (default).
    Tr[   )r5   r   r   r   Z_xpu_synchronizer   r   r   r   synchronizex  s    r   c                   C   s   t  rtj  dS )aZ  Release all unoccupied cached memory currently held by the caching
    allocator so that those can be used in other XPU application.

    .. note::
        :func:`~torch.xpu.empty_cache` doesn't increase the amount of XPU
        memory available for PyTorch. However, it may help reduce fragmentation
        of XPU memory in certain cases.
    N)r*   r   r   Z_xpu_emptyCacher   r   r   r   empty_cache  s    	r   c                 C   s    | j }|dkrt }tjj| S )zuReturn the XPU Generator object for the given device.

    Args:
        device (torch.device): selected device.
    N)rE   rr   r   rN   r   )r   rH   r   r   r   _get_generator  s    r   rN   )offsetr   r   c                    s"   t |  fdd}t| dS )a$  Set the random number generator state offset of the specified GPU.

    Args:
        offset (int): The desired offset
        device (torch.device or int, optional): The device to set the RNG state.
            Default: ``'xpu'`` (i.e., ``torch.device('xpu')``, the current XPU device).
    c                     s   t  } |  d S rL   )r   Z
set_offset)default_generatorfinal_devicer   r   r   cb  s    z!_set_rng_state_offset.<locals>.cbN)rt   r4   )r   r   r   r   r   r   _set_rng_state_offset  s    
r   c                 C   s   t   t| }t|}| S )aL  Return the random number generator state offset of the specified GPU.

    Args:
        device (torch.device or int, optional): The device to return the RNG state offset of.
            Default: ``'xpu'`` (i.e., ``torch.device('xpu')``, the current XPU device).

    .. warning::
        This function eagerly initializes XPU.
    )r5   rt   r   Z
get_offset)r   r   r   r   r   r   _get_rng_state_offset  s    
r   )*r   r   Zget_rng_stateZget_rng_state_all
get_streamZinitial_seedZmanual_seedZmanual_seed_allr,   r+   Zset_rng_stateZset_rng_state_allstreams)N)N)N)N)N)rN   )rN   )Kr]   	threadingr/   	functoolsr   typingr   r   r   r   r   r   r	   r   Ztorch._Cr8   r   Z_deviceZ_utilsr   r   r   r   r   r   r(   localr:   Lockr;   r   r@   r~   rj   r   r)   rZ   Z	_device_tr.   r   	Generatorboolr   r   Z_xpu_exchangeDevicer"   Z_xpu_maybeExchangeDevicer#   r%   r&   r'   r*   r4   r6   r5   rD   r^   rd   rg   rq   re   rr   rt   ru   ry   r   r}   rz   r   r   r   r   r   random__all__r   r   r   r   <module>   s   $
	* *	  