U
    yh                     @   s   U d dl Z d dlZd dlmZmZ d dlmZ d dlZd dlm	Z	m
Z
 G dd dZeeeddd	Zd
d Zeaeed< e jdd ZG dd dZdS )    N)CallableOptional)
deprecated)KernelRegistrationHandlec                   @   s0   e Zd ZdZedddZeeedddZdS )	AbstractImplHolderz0A holder where one can register an fake impl to.)qualnamec                 C   s   || _ d | _d | _d S N)r   kernellib)selfr    r   N/var/www/html/venv/lib/python3.8/site-packages/torch/_library/abstract_impl.py__init__   s    zAbstractImplHolder.__init__)funcsourcereturnc                    s    j dk	r&td j d j j dtj jdrHtd j dtj jdrjtd j dt|| _  jdkr j	d	d
 }tj
|d _t j } j j|d  fdd}t|S )z}Register an fake impl.

        Returns a RegistrationHandle that one can use to de-register this
        fake impl.
        Nz!register_fake(...): the operator z( already has an fake impl registered at .ZMetaz already has an DispatchKey::Meta implementation via a pre-existing torch.library or TORCH_LIBRARY registration. Please either remove that registration or don't call register_fake.ZCompositeImplicitAutograda%   already has an implementation for this device type via a pre-existing registration to DispatchKey::CompositeImplicitAutograd.CompositeImplicitAutograd operators do not need an fake impl; instead, the operator will decompose into its constituents and those can have fake impls defined on them.z::r   ZFRAGMENTc                      s     j r j   d  _ d  _d S r	   )r   Z_destroyr
   r   r   r   r   deregister_fake_classA   s    
z:AbstractImplHolder.register.<locals>.deregister_fake_class)r
   RuntimeErrorr   r   torchZ_CZ%_dispatch_has_kernel_for_dispatch_keyr   r   splitlibraryLibraryconstruct_meta_kernelimplr   )r   r   r   nsmeta_kernelr   r   r   r   register   s.    
 
zAbstractImplHolder.registerN)	__name__
__module____qualname____doc__strr   r   r   r   r   r   r   r   r      s   r   )r   abstract_impl_holderr   c                    s.    j d k	stt j j fdd}|S )Nc               
      sP   j d k	stj j  fdd}t| j | |W  5 Q R  S Q R X d S )Nc                      s   t d  d dd S )Nz<Attempted to call get_ctx() for the meta implementation for z (implemented at z)You have presumably called get_ctx() because the operator has a data-dependent output shape; if so, there is no such meta implementation and this error is the correct behavior.)r   r   )r   r   r   r   error_on_ctxT   s    z@construct_meta_kernel.<locals>.meta_kernel.<locals>.error_on_ctx)r
   AssertionErrorr   set_ctx_getter)argskwargsr&   r%   r   )r   r   r   O   s
    

z*construct_meta_kernel.<locals>.meta_kernel)r
   r'   	functoolswrapsr   )r   r%   r   r   r+   r   r   J   s    r   c                   C   s   d S r	   r   r   r   r   r   get_noned   s    r.   global_ctx_getterc                 c   s   t }z| a d V  W 5 |a X d S r	   )r/   )Z
ctx_getterprevr   r   r   r(   k   s
    
r(   c                   @   sT   e Zd ZdZdd Zededdddejd	d
dZ	dddejd	ddZ
dS )AbstractImplCtxzO
    Context object for writing fake implementations for custom operators.
    c                 C   s   || _ |j| _|| _d S r	   )
_fake_modeZ	shape_env
_shape_env_op)r   r2   r4   r   r   r   r   {   s    zAbstractImplCtx.__init__zM`create_unbacked_symint` is deprecated, please use `new_dynamic_size` instead)category   Nminmax)r   c                C   s   | j ||dS )Nr7   )new_dynamic_size)r   r8   r9   r   r   r   create_unbacked_symint   s    z&AbstractImplCtx.create_unbacked_symintr   c                C   s   | j dks| j js"tjj| jt|tjs:t|tjrPt	d| d| d|dk rht	d| d| j 
 }tjjjj|||d |S )a	  Constructs a new symint (symbolic int) representing a data-dependent value.

        This is useful for writing the fake implementation (which is necessary
        for torch.compile) for a CustomOp where an output Tensor has a size
        that depends on the data of the input Tensors.

        Args:
            min (int): A statically known inclusive lower bound for this symint. Default: 0
            max (Optional[int]): A statically known inclusive upper bound for this
                symint. Default: None

        .. warning:

            It is important that the ``min`` and ``max`` (if not None) values are set
            correctly, otherwise, there will be undefined behavior under
            torch.compile. The default value of ``min`` is 2 due to torch.compile
            specializing on 0/1 sizes.

            You must also verify that your implementation on concrete Tensors
            (e.g. CPU/CUDA) only returns Tensors where the size that corresponds
            to the symint also has respects these constraint.
            The easiest way to do this is to add an assertion in the CPU/CUDA/etc
            implementation that the size follows these bounds.

        Example::

            >>> # An operator with data-dependent output shape
            >>> lib = torch.library.Library("mymodule", "FRAGMENT")
            >>> lib.define("mymodule::custom_nonzero(Tensor x) -> Tensor")
            >>>
            >>> @torch.library.register_fake("mymodule::custom_nonzero")
            >>> def _(x):
            >>>     # Number of nonzero-elements is data-dependent.
            >>>     # Since we cannot peek at the data in an fake impl,
            >>>     # we use the ctx object to construct a new symint that
            >>>     # represents the data-dependent size.
            >>>     ctx = torch.library.get_ctx()
            >>>     nnz = ctx.new_dynamic_size()
            >>>     shape = [nnz, x.dim()]
            >>>     result = x.new_empty(shape, dtype=torch.int64)
            >>>     return result
            >>>
            >>> @torch.library.impl(lib, "custom_nonzero", "CPU")
            >>> def _(x):
            >>>     x_np = x.numpy()
            >>>     res = np.stack(np.nonzero(x_np), axis=1)
            >>>     return torch.tensor(res, device=x.device)

        Nzctx.new_dynamic_size(min=z, max=zZ): expected min and max to be statically known ints but got SymInt. This is not supported.r   zc, ...): expected min to be greater than or equal to 0: this API can only create non-negative sizes.r7   )r3   Zallow_dynamic_output_shape_opsr   Z_subclassesZfake_tensorZDynamicOutputShapeExceptionr4   
isinstanceSymInt
ValueErrorr;   ZfxZexperimentalZsymbolic_shapesZ_constrain_range_for_size)r   r8   r9   resultr   r   r   r:      s(    3


  z AbstractImplCtx.new_dynamic_size)r    r!   r"   r#   r   r   FutureWarningr   r=   r;   r:   r   r   r   r   r1   v   s   r1   )
contextlibr,   typingr   r   typing_extensionsr   r   Ztorch._library.utilsr   r   r   r$   r   r.   r/   __annotations__contextmanagerr(   r1   r   r   r   r   <module>   s     @ 

