U
    hJ                     @   s$  d dl Z d dlmZmZmZmZmZmZmZm	Z	 d dl
Zd dlZd dlmZmZ d dlmZmZ d dlmZmZ ddlmZmZmZmZmZmZ G dd	 d	eZG d
d deZ G dd deZ!G dd deZ"G dd deZ#G dd deZ$G dd deZ%G dd deZ&G dd deZ'dS )    N)AnyCallableDictListOptionalSequenceTypeUnion)tree_flattentree_unflatten)
transforms
tv_tensors)
functional	Transform   )_parse_labels_getter_setup_number_or_seq_setup_sizeget_bounding_boxeshas_anyis_pure_tensorc                   @   s&   e Zd Zeeeef edddZdS )Identityinptparamsreturnc                 C   s   |S N selfr   r   r   r   Q/var/www/html/venv/lib/python3.8/site-packages/torchvision/transforms/v2/_misc.py
_transform   s    zIdentity._transformN)__name__
__module____qualname__r   r   strr!   r   r   r   r    r      s   r   c                       s`   e Zd ZdZefZeegef ed fddZ	ee
eef edddZedd	d
Z  ZS )LambdazApply a user-defined function as a transform.

    This transform does not support torchscript.

    Args:
        lambd (function): Lambda/function to be used for transform.
    )lambdtypesc                    s    t    || _|p| j| _d S r   )super__init__r'   _transformed_typesr(   )r   r'   r(   	__class__r   r    r*       s    
zLambda.__init__r   c                 C   s   t || jr| |S |S d S r   )
isinstancer(   r'   r   r   r   r    r!   %   s    
zLambda._transform)r   c                 C   sF   g }t | jdd }|r || |ddd | jD   d|S )Nr"   ztypes=c                 S   s   g | ]
}|j qS r   )r"   ).0typer   r   r    
<listcomp>0   s     z%Lambda.extra_repr.<locals>.<listcomp>z, )getattrr'   appendr(   join)r   extrasnamer   r   r    
extra_repr+   s    
zLambda.extra_repr)r"   r#   r$   __doc__objectr+   r   r   r   r*   r   r%   r!   r7   __classcell__r   r   r,   r    r&      s
   r&   c                       sj   e Zd ZdZejZeej	ej
fZejejd fddZeedddZeeeef edd	d
Z  ZS )LinearTransformationa,  Transform a tensor image or video with a square transformation matrix and a mean_vector computed offline.

    This transform does not support PIL Image.
    Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and
    subtract mean_vector from it which is then followed by computing the dot
    product with the transformation matrix and then reshaping the tensor to its
    original shape.

    Applications:
        whitening transformation: Suppose X is a column vector zero-centered data.
        Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),
        perform SVD on this matrix and pass it as transformation_matrix.

    Args:
        transformation_matrix (Tensor): tensor [D x D], D = C x H x W
        mean_vector (Tensor): tensor [D], D = C x H x W
    )transformation_matrixmean_vectorc                    s   t    |d|dkr6tdt|  d|d|dkrntd|d dt|  d|j|jkrtd|j d	|j |j|jkrtd
|j d	|j || _|| _d S )Nr   r   z,transformation_matrix should be square. Got z rectangular matrix.z(mean_vector should have the same length z< as any one of the dimensions of the transformation_matrix []z0Input tensors should be on the same device. Got z and z.Input tensors should have the same dtype. Got )	r)   r*   size
ValueErrortupledevicedtyper<   r=   )r   r<   r=   r,   r   r    r*   K   s&    
zLinearTransformation.__init__sampler   c                 C   s&   t |tjjr"tt| j dd S Nz() does not support PIL images.r   PILImage	TypeErrorr0   r"   r   rE   r   r   r    _check_inputsf   s    z"LinearTransformation._check_inputsr   c              	   C   s   |j }|d |d  |d  }|| jj d krhtdd|d  d|d  d|d  d | jj d   |jj| jjjkrtd	|j d
| jj |d|| j }| j|j}t	
||}||}t|tjtjfrtj||d}|S )Nr   z?Input tensor and transformation matrix have incompatible shape.[z x z] != zXInput tensor should be on the same device as transformation matrix and mean vector. Got z vs like)shaper<   r@   rB   r0   r=   ZreshapetorC   torchmmr.   r   rI   Videowrap)r   r   r   rS   nZ	flat_inptr<   outputr   r   r    r!   j   s*    "
zLinearTransformation._transform)r"   r#   r$   r8   _transformsr;   _v1_transform_clsr   r   rI   rW   r+   rU   Tensorr*   r   rL   r   r%   r!   r:   r   r   r,   r    r;   4   s   r;   c                       sd   e Zd ZdZejZdee ee e	d fddZ
eedddZeeeef ed	d
dZ  ZS )	Normalizea  Normalize a tensor image or video with mean and standard deviation.

    This transform does not support PIL Image.
    Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
    channels, this transform will normalize each channel of the input
    ``torch.*Tensor`` i.e.,
    ``output[channel] = (input[channel] - mean[channel]) / std[channel]``

    .. note::
        This transform acts out of place, i.e., it does not mutate the input tensor.

    Args:
        mean (sequence): Sequence of means for each channel.
        std (sequence): Sequence of standard deviations for each channel.
        inplace(bool,optional): Bool to make this operation in-place.

    Fmeanstdinplacec                    s(   t    t|| _t|| _|| _d S r   )r)   r*   listr`   ra   rb   )r   r`   ra   rb   r,   r   r    r*      s    


zNormalize.__init__rD   c                 C   s&   t |tjjr"tt| j dd S rF   rG   rK   r   r   r    rL      s    zNormalize._check_inputsr   c                 C   s   | j tj|| j| j| jdS )Nr_   )_call_kernelF	normalizer`   ra   rb   r   r   r   r    r!      s    zNormalize._transform)F)r"   r#   r$   r8   r[   r^   r\   r   floatboolr*   r   rL   r   r%   r!   r:   r   r   r,   r    r^      s
    r^   c                       s   e Zd ZdZejZdeee	e f eee
e	e
 f dd fddZee eeef ddd	Zeeeef ed
ddZ  ZS )GaussianBlura  Blurs image with randomly chosen Gaussian blur kernel.

    The convolution will be using reflection padding corresponding to the kernel size, to maintain the input shape.

    If the input is a Tensor, it is expected
    to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions.

    Args:
        kernel_size (int or sequence): Size of the Gaussian kernel.
        sigma (float or tuple of float (min, max)): Standard deviation to be used for
            creating kernel to perform blurring. If float, sigma is fixed. If it is tuple
            of float (min, max), sigma is chosen uniformly at random to lie in the
            given range.
    皙?g       @N)kernel_sizesigmar   c                    s   t    t|d| _| jD ] }|dks4|d dkrtdqt|d| _d| jd   k rj| jd ks|n td| j d S )	Nz2Kernel size should be a tuple/list of two integersr      z7Kernel size value should be an odd and positive number.rm           r   z@sigma values should be positive and of the form (min, max). Got )r)   r*   r   rl   r@   r   rm   )r   rl   rm   ksr,   r   r    r*      s    


"zGaussianBlur.__init__)flat_inputsr   c                 C   s0   t d| jd | jd  }t||gdS )Nr   r   )rm   )rU   emptyZuniform_rm   itemdict)r   rq   rm   r   r   r    _get_params   s    "zGaussianBlur._get_paramsr   c                 C   s   | j tj|| jf|S r   )rd   re   Zgaussian_blurrl   r   r   r   r    r!      s    zGaussianBlur._transform)rj   )r"   r#   r$   r8   r[   ri   r\   r	   intr   rg   r*   r   r   r   r%   ru   r!   r:   r   r   r,   r    ri      s     ri   c                       sF   e Zd ZdZdeedd fddZeeeef ed	d
dZ	  Z
S )GaussianNoisea  Add gaussian noise to images or videos.

    The input tensor is expected to be in [..., 1 or 3, H, W] format,
    where ... means it can have an arbitrary number of leading dimensions.
    Each image or frame in a batch will be transformed independently i.e. the
    noise added to each image will be different.

    The input tensor is also expected to be of float dtype in ``[0, 1]``.
    This transform does not support PIL images.

    Args:
        mean (float): Mean of the sampled normal distribution. Default is 0.
        sigma (float): Standard deviation of the sampled normal distribution. Default is 0.1.
        clip (bool, optional): Whether to clip the values in ``[0, 1]`` after adding noise. Default is True.
    ro   rk   TN)r`   rm   r   c                    s    t    || _|| _|| _d S r   )r)   r*   r`   rm   clip)r   r`   rm   rx   r,   r   r    r*      s    
zGaussianNoise.__init__r   c                 C   s   | j tj|| j| j| jdS )N)r`   rm   rx   )rd   re   Zgaussian_noiser`   rm   rx   r   r   r   r    r!      s    zGaussianNoise._transform)ro   rk   T)r"   r#   r$   r8   rg   r*   r   r   r%   r!   r:   r   r   r,   r    rw      s   rw   c                       sn   e Zd ZdZejfZd
eeje	ee
ef eej f f edd fddZee	eef eddd	Z  ZS )ToDtypea  Converts the input to a specific dtype, optionally scaling the values for images or videos.

    .. note::
        ``ToDtype(dtype, scale=True)`` is the recommended replacement for ``ConvertImageDtype(dtype)``.

    Args:
        dtype (``torch.dtype`` or dict of ``TVTensor`` -> ``torch.dtype``): The dtype to convert to.
            If a ``torch.dtype`` is passed, e.g. ``torch.float32``, only images and videos will be converted
            to that dtype: this is for compatibility with :class:`~torchvision.transforms.v2.ConvertImageDtype`.
            A dict can be passed to specify per-tv_tensor conversions, e.g.
            ``dtype={tv_tensors.Image: torch.float32, tv_tensors.Mask: torch.int64, "others":None}``. The "others"
            key can be used as a catch-all for any other tv_tensor type, and ``None`` means no conversion.
        scale (bool, optional): Whether to scale the values for images or videos. See :ref:`range_and_dtype`.
            Default: ``False``.
    FN)rC   scaler   c                    sz   t    t ttjfs.tdt  dt trjtj krjt	 fddt
jt
jfD rjtd  | _|| _d S )Nz+dtype must be a dict or a torch.dtype, got z insteadc                 3   s   | ]}| kV  qd S r   r   )r/   clsrC   r   r    	<genexpr>	  s     z#ToDtype.__init__.<locals>.<genexpr>a  Got `dtype` values for `torch.Tensor` and either `tv_tensors.Image` or `tv_tensors.Video`. Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) in case a `tv_tensors.Image` or `tv_tensors.Video` is present in the input.)r)   r*   r.   rt   rU   rC   r@   r0   r]   anyr   rI   rW   warningswarnrz   )r   rC   rz   r,   r|   r    r*      s    
zToDtype.__init__r   c                 C   s   t | jtjr4t|s,t |tjtjfs,|S | j}nHt|| jkrR| jt| }n*d| jkrh| jd }ntdt| dt|pt |tjtjf}|d kr| j	r|rt
d |S | jtj||| j	dS )NZothersz No dtype was specified for type a  . If you only need to convert the dtype of images or videos, you can just pass e.g. dtype=torch.float32. If you're passing a dict as dtype, you can use "others" as a catch-all key e.g. dtype={tv_tensors.Mask: torch.int64, "others": None} to pass-through the rest of the inputs.z_scale was set to True but no dtype was specified for images or videos: no scaling will be done.rC   rz   )r.   rC   rU   r   r   rI   rW   r0   r@   rz   r   r   rd   re   to_dtype)r   r   r   rC   Zsupports_scalingr   r   r    r!     s&    

zToDtype._transform)F)r"   r#   r$   r8   rU   r]   r+   r	   rC   r   r   r%   r   rh   r*   r   r!   r:   r   r   r,   r    ry      s    " ry   c                       sP   e Zd ZdZejZejfej	dd fddZ
eeeef edddZ  ZS )	ConvertImageDtypea  [DEPRECATED] Use ``v2.ToDtype(dtype, scale=True)`` instead.

    Convert input image to the given ``dtype`` and scale the values accordingly.

    .. warning::
        Consider using ``ToDtype(dtype, scale=True)`` instead. See :class:`~torchvision.transforms.v2.ToDtype`.

    This function does not support PIL Image.

    Args:
        dtype (torch.dtype): Desired data type of the output

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    N)rC   r   c                    s   t    || _d S r   )r)   r*   rC   )r   rC   r,   r   r    r*   N  s    
zConvertImageDtype.__init__r   c                 C   s   | j tj|| jddS )NTr   )rd   re   r   rC   r   r   r   r    r!   R  s    zConvertImageDtype._transform)r"   r#   r$   r8   r[   r   r\   rU   Zfloat32rC   r*   r   r   r%   r!   r:   r   r   r,   r    r   3  s   r   c                       sl   e Zd ZdZdeeeeegef edf dd fddZ	eedd	d
Z
eeeef edddZ  ZS )SanitizeBoundingBoxesau  Remove degenerate/invalid bounding boxes and their corresponding labels and masks.

    This transform removes bounding boxes and their associated labels/masks that:

    - are below a given ``min_size`` or ``min_area``: by default this also removes degenerate boxes that have e.g. X2 <= X1.
    - have any coordinate outside of their corresponding image. You may want to
      call :class:`~torchvision.transforms.v2.ClampBoundingBoxes` first to avoid undesired removals.

    It can also sanitize other tensors like the "iscrowd" or "area" properties from COCO
    (see ``labels_getter`` parameter).

    It is recommended to call it at the end of a pipeline, before passing the
    input to the models. It is critical to call this transform if
    :class:`~torchvision.transforms.v2.RandomIoUCrop` was called.
    If you want to be extra careful, you may call it after all transforms that
    may modify bounding boxes but once at the end should be enough in most
    cases.

    Args:
        min_size (float, optional): The size below which bounding boxes are removed. Default is 1.
        min_area (float, optional): The area below which bounding boxes are removed. Default is 1.
        labels_getter (callable or str or None, optional): indicates how to identify the labels in the input
            (or anything else that needs to be sanitized along with the bounding boxes).
            By default, this will try to find a "labels" key in the input (case-insensitive), if
            the input is a dict or it is a tuple whose second element is a dict.
            This heuristic should work well with a lot of datasets, including the built-in torchvision datasets.

            It can also be a callable that takes the same input as the transform, and returns either:

            - A single tensor (the labels)
            - A tuple/list of tensors, each of which will be subject to the same sanitization as the bounding boxes.
              This is useful to sanitize multiple tensors like the labels, and the "iscrowd" or "area" properties
              from COCO.

            If ``labels_getter`` is None then only bounding boxes are sanitized.
          ?defaultN)min_sizemin_arealabels_getterr   c                    sZ   t    |dk r"td| d|| _|dk r@td| d|| _|| _t|| _d S )Nr   zmin_size must be >= 1, got .zmin_area must be >= 1, got )r)   r*   r@   r   r   r   r   _labels_getter)r   r   r   r   r,   r   r    r*   |  s    
zSanitizeBoundingBoxes.__init__)inputsr   c                    s,  t |dkr|n|d }|}|d k	rd}t|tjrB|f}nNt|ttfr||D ]$}t|tjsTt|jt	|dqTnt|jt	|dt
|\}}t|}|d k	r|D ]2}|jd |jd krtd|j d|j dqtjj||j|jjjd}	t|	|d	  fd
d|D }
t|
|S )Nr   r   zRThe labels in the input to forward() must be a tensor or None, got {type} instead.)r0   zNumber of boxes (shape=z>) and must match the number of labels.Found labels with shape=z).)formatcanvas_sizer   r   )validlabelsc                    s   g | ]} | qS r   )r!   )r/   r   r   r   r   r    r1     s     z1SanitizeBoundingBoxes.forward.<locals>.<listcomp>)lenr   r.   rU   r]   rA   rc   r@   r   r0   r
   r   rS   re   Z_miscZ!_get_sanitize_bounding_boxes_maskr   r   r   rt   r   )r   r   r   msgentryrq   specZboxeslabelr   Zflat_outputsr   r   r    forward  s:    
zSanitizeBoundingBoxes.forwardr   c                    sj   |d d k	o$t  fdd|d D }t tjtjf}|sD|sD S  |d  }|rX|S tj| dS d S )Nr   c                 3   s   | ]} |kV  qd S r   r   )r/   r   r   r   r    r}     s     z3SanitizeBoundingBoxes._transform.<locals>.<genexpr>r   rQ   )r~   r.   r   ZBoundingBoxesMaskrX   )r   r   r   Zis_labelZis_bounding_boxes_or_maskrZ   r   r   r    r!     s    &z SanitizeBoundingBoxes._transform)r   r   r   )r"   r#   r$   r8   rg   r	   r   r   r%   r*   r   r   r!   r:   r   r   r,   r    r   V  s   '   (r   )(r   typingr   r   r   r   r   r   r   r	   Z	PIL.ImagerH   rU   Ztorch.utils._pytreer
   r   Ztorchvisionr   r[   r   Ztorchvision.transforms.v2r   re   r   Z_utilsr   r   r   r   r   r   r   r&   r;   r^   ri   rw   ry   r   r   r   r   r   r    <module>   s    ( Q#(H#