U
    h$!                     @   s   d Z ddlmZmZ ddlZddlmZmZ ddlmZ	m
Z
 ddd	d
dgZG dd dejZG dd dejZG dd	 d	ejZG dd
 d
ejZG dd dejZdS )z
This file is part of the private API. Please do not use directly these classes as they will be modified on
future versions without warning. The classes should be accessed only via the transforms argument of Weights.
    )OptionalTupleN)nnTensor   )
functionalInterpolationModeObjectDetectionImageClassificationVideoClassificationSemanticSegmentationOpticalFlowc                   @   s8   e Zd ZeedddZedddZedddZd	S )
r	   imgreturnc                 C   s"   t |tst|}t|tjS N)
isinstancer   Fpil_to_tensorconvert_image_dtypetorchfloatselfr    r   Q/var/www/html/venv/lib/python3.8/site-packages/torchvision/transforms/_presets.pyforward   s    

zObjectDetection.forwardr   c                 C   s   | j jd S Nz()	__class____name__r   r   r   r   __repr__   s    zObjectDetection.__repr__c                 C   s   dS )NzAccepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. The images are rescaled to ``[0.0, 1.0]``.r   r"   r   r   r   describe   s    zObjectDetection.describeN)r!   
__module____qualname__r   r   strr#   r$   r   r   r   r   r	      s   c                	       s~   e Zd Zdddejddeeeedf eedf eee	 dd fd	d
Z
eedddZedddZedddZ  ZS )r
      g
ףp=
?gv/?gCl?gZd;O?gy&1?g?T)resize_sizemeanstdinterpolation	antialias.N)	crop_sizer+   r,   r-   r.   r/   r   c                   s>   t    |g| _|g| _t|| _t|| _|| _|| _d S r   )	super__init__r0   r+   listr,   r-   r.   r/   )r   r0   r+   r,   r-   r.   r/   r    r   r   r2   '   s    



zImageClassification.__init__r   c                 C   s`   t j|| j| j| jd}t || j}t|ts:t 	|}t 
|tj}t j|| j| jd}|S Nr.   r/   r,   r-   )r   resizer+   r.   r/   center_cropr0   r   r   r   r   r   r   	normalizer,   r-   r   r   r   r   r   9   s    

zImageClassification.forwardr   c                 C   sh   | j jd }|d| j 7 }|d| j 7 }|d| j 7 }|d| j 7 }|d| j 7 }|d7 }|S N(z
    crop_size=
    resize_size=

    mean=	
    std=
    interpolation=
)r    r!   r0   r+   r,   r-   r.   r   format_stringr   r   r   r#   B   s    zImageClassification.__repr__c                 C   s.   d| j  d| j d| j d| j d| j dS )NAccepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. The images are resized to ``resize_size=`` using ``interpolation=.``, followed by a central crop of ``crop_size=]``. Finally the values are first rescaled to ``[0.0, 1.0]`` and then normalized using ``mean=`` and ``std=``.r+   r.   r0   r,   r-   r"   r   r   r   r$   L   s    ,zImageClassification.describe)r!   r%   r&   r   BILINEARintr   r   r   boolr2   r   r   r'   r#   r$   __classcell__r   r   r4   r   r
   &   s"   

	
c                       s   e Zd Zddejdeeef eeef eedf eedf edd fddZe	e	d	d
dZ
edddZedddZ  ZS )r   )gFj?g.5B?g?)gr@H0?gc=yX?gDKK?)r,   r-   r.   .N)r0   r+   r,   r-   r.   r   c                   s<   t    t|| _t|| _t|| _t|| _|| _d S r   )r1   r2   r3   r0   r+   r,   r-   r.   )r   r0   r+   r,   r-   r.   r4   r   r   r2   V   s    	




zVideoClassification.__init__)vidr   c                 C   s   d}|j dk r|jdd}d}|j\}}}}}|d|||}tj|| j| jdd}t|| j	}t
|tj}tj|| j| jd}| j	\}}||||||}|dd	d
dd}|r|jdd}|S )NF   r   )dimTr6   r7      r         )ndimZ	unsqueezeshapeviewr   r8   r+   r.   r9   r0   r   r   r   r:   r,   r-   ZpermuteZsqueeze)r   rP   Zneed_squeezeNTCHWr   r   r   r   f   s     

zVideoClassification.forwardr   c                 C   sh   | j jd }|d| j 7 }|d| j 7 }|d| j 7 }|d| j 7 }|d| j 7 }|d7 }|S r;   rB   rC   r   r   r   r#   ~   s    zVideoClassification.__repr__c                 C   s.   d| j  d| j d| j d| j d| j dS )NzAccepts batched ``(B, T, C, H, W)`` and single ``(T, C, H, W)`` video frame ``torch.Tensor`` objects. The frames are resized to ``resize_size=rF   rG   rH   rI   zP``. Finally the output dimensions are permuted to ``(..., C, T, H, W)`` tensors.rK   r"   r   r   r   r$      s    ,zVideoClassification.describe)r!   r%   r&   r   rL   r   rM   r   r2   r   r   r'   r#   r$   rO   r   r   r4   r   r   U   s   




c                       s~   e Zd Zddejddee eedf eedf eee	 dd fdd	Z
eed
ddZedddZedddZ  ZS )r   r)   r*   T)r,   r-   r.   r/   .N)r+   r,   r-   r.   r/   r   c                   sB   t    |d k	r|gnd | _t|| _t|| _|| _|| _d S r   )r1   r2   r+   r3   r,   r-   r.   r/   )r   r+   r,   r-   r.   r/   r4   r   r   r2      s    	


zSemanticSegmentation.__init__r   c                 C   s^   t | jtr$tj|| j| j| jd}t |ts8t|}t	|t
j}tj|| j| jd}|S r5   )r   r+   r3   r   r8   r.   r/   r   r   r   r   r   r:   r,   r-   r   r   r   r   r      s    

zSemanticSegmentation.forwardr   c                 C   sX   | j jd }|d| j 7 }|d| j 7 }|d| j 7 }|d| j 7 }|d7 }|S )Nr<   r=   r>   r?   r@   rA   )r    r!   r+   r,   r-   r.   rC   r   r   r   r#      s    zSemanticSegmentation.__repr__c              	   C   s&   d| j  d| j d| j d| j d	S )NrE   rF   rH   rI   rJ   )r+   r.   r,   r-   r"   r   r   r   r$      s    $zSemanticSegmentation.describe)r!   r%   r&   r   rL   r   rM   r   r   rN   r2   r   r   r'   r#   r$   rO   r   r   r4   r   r      s   

		c                   @   sB   e Zd Zeeeeef dddZedddZedddZd	S )
r   )img1img2r   c                 C   s   t |tst|}t |ts(t|}t|tj}t|tj}tj|dddgdddgd}tj|dddgdddgd}| }| }||fS )Ng      ?r7   )	r   r   r   r   r   r   r   r:   
contiguous)r   r_   r`   r   r   r   r      s    



zOpticalFlow.forwardr   c                 C   s   | j jd S r   r   r"   r   r   r   r#      s    zOpticalFlow.__repr__c                 C   s   dS )NzAccepts ``PIL.Image``, batched ``(B, C, H, W)`` and single ``(C, H, W)`` image ``torch.Tensor`` objects. The images are rescaled to ``[-1.0, 1.0]``.r   r"   r   r   r   r$      s    zOpticalFlow.describeN)	r!   r%   r&   r   r   r   r'   r#   r$   r   r   r   r   r      s   )__doc__typingr   r   r   r   r    r   r   r   __all__Moduler	   r
   r   r   r   r   r   r   r   <module>   s   	/=,