U
    MhJ                     @   s  d dl Z d dlZd dlZd dlZd dlmZmZmZmZm	Z	m
Z
mZmZmZmZ d dlmZ d dlmZmZ ddlmZmZ ddd	d
ddddgZedddZedZeeef Zeedf ZedeeZG dd dee ZG dd dee ee ZG dd	 d	eeedf  Z G dd
 d
ee Z!G dd dee Z"G dd deZ#G dd dee Z$efee eee%e&f  e
e e	e$e  dddZ'dS )    N)
castDictGenericIterableListOptionalSequenceTupleTypeVarUnion)
deprecated)default_generatorrandperm   )	GeneratorTensorDatasetIterableDatasetTensorDatasetStackDatasetConcatDatasetChainDatasetSubsetrandom_splitT_coT)	covariantT.T_stackc                   @   s.   e Zd ZdZedddZddddd	Zd
S )r   a  An abstract class representing a :class:`Dataset`.

    All datasets that represent a map from keys to data samples should subclass
    it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a
    data sample for a given key. Subclasses could also optionally overwrite
    :meth:`__len__`, which is expected to return the size of the dataset by many
    :class:`~torch.utils.data.Sampler` implementations and the default options
    of :class:`~torch.utils.data.DataLoader`. Subclasses could also
    optionally implement :meth:`__getitems__`, for speedup batched samples
    loading. This method accepts list of indices of samples of batch and returns
    list of samples.

    .. note::
      :class:`~torch.utils.data.DataLoader` by default constructs an index
      sampler that yields integral indices.  To make it work with a map-style
      dataset with non-integral indices/keys, a custom sampler must be provided.
    )returnc                 C   s   t dd S )Nz3Subclasses of Dataset should implement __getitem__.)NotImplementedErrorselfindex r#   J/var/www/html/venv/lib/python3.8/site-packages/torch/utils/data/dataset.py__getitem__>   s    zDataset.__getitem__zDataset[T_co]zConcatDataset[T_co])otherr   c                 C   s   t | |gS N)r   r!   r&   r#   r#   r$   __add__E   s    zDataset.__add__N)__name__
__module____qualname____doc__r   r%   r)   r#   r#   r#   r$   r   +   s   c                   @   s"   e Zd ZdZee dddZdS )r   aH  An iterable Dataset.

    All datasets that represent an iterable of data samples should subclass it.
    Such form of datasets is particularly useful when data come from a stream.

    All subclasses should overwrite :meth:`__iter__`, which would return an
    iterator of samples in this dataset.

    When a subclass is used with :class:`~torch.utils.data.DataLoader`, each
    item in the dataset will be yielded from the :class:`~torch.utils.data.DataLoader`
    iterator. When :attr:`num_workers > 0`, each worker process will have a
    different copy of the dataset object, so it is often desired to configure
    each copy independently to avoid having duplicate data returned from the
    workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker
    process, returns information about the worker. It can be used in either the
    dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's
    :attr:`worker_init_fn` option to modify each copy's behavior.

    Example 1: splitting workload across all workers in :meth:`__iter__`::

        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER)
        >>> # xdoctest: +SKIP("Fails on MacOS12")
        >>> class MyIterableDataset(torch.utils.data.IterableDataset):
        ...     def __init__(self, start, end):
        ...         super(MyIterableDataset).__init__()
        ...         assert end > start, "this example code only works with end >= start"
        ...         self.start = start
        ...         self.end = end
        ...
        ...     def __iter__(self):
        ...         worker_info = torch.utils.data.get_worker_info()
        ...         if worker_info is None:  # single-process data loading, return the full iterator
        ...             iter_start = self.start
        ...             iter_end = self.end
        ...         else:  # in a worker process
        ...             # split workload
        ...             per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))
        ...             worker_id = worker_info.id
        ...             iter_start = self.start + worker_id * per_worker
        ...             iter_end = min(iter_start + per_worker, self.end)
        ...         return iter(range(iter_start, iter_end))
        ...
        >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
        >>> ds = MyIterableDataset(start=3, end=7)

        >>> # Single-process loading
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
        [tensor([3]), tensor([4]), tensor([5]), tensor([6])]

        >>> # xdoctest: +REQUIRES(POSIX)
        >>> # Mult-process loading with two worker processes
        >>> # Worker 0 fetched [3, 4].  Worker 1 fetched [5, 6].
        >>> # xdoctest: +IGNORE_WANT("non deterministic")
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
        [tensor([3]), tensor([5]), tensor([4]), tensor([6])]

        >>> # With even more workers
        >>> # xdoctest: +IGNORE_WANT("non deterministic")
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=12)))
        [tensor([3]), tensor([5]), tensor([4]), tensor([6])]

    Example 2: splitting workload across all workers using :attr:`worker_init_fn`::

        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_DATALOADER)
        >>> class MyIterableDataset(torch.utils.data.IterableDataset):
        ...     def __init__(self, start, end):
        ...         super(MyIterableDataset).__init__()
        ...         assert end > start, "this example code only works with end >= start"
        ...         self.start = start
        ...         self.end = end
        ...
        ...     def __iter__(self):
        ...         return iter(range(self.start, self.end))
        ...
        >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
        >>> ds = MyIterableDataset(start=3, end=7)

        >>> # Single-process loading
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
        [3, 4, 5, 6]
        >>>
        >>> # Directly doing multi-process loading yields duplicate data
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
        [3, 3, 4, 4, 5, 5, 6, 6]

        >>> # Define a `worker_init_fn` that configures each dataset copy differently
        >>> def worker_init_fn(worker_id):
        ...     worker_info = torch.utils.data.get_worker_info()
        ...     dataset = worker_info.dataset  # the dataset copy in this worker process
        ...     overall_start = dataset.start
        ...     overall_end = dataset.end
        ...     # configure the dataset to only process the split workload
        ...     per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers)))
        ...     worker_id = worker_info.id
        ...     dataset.start = overall_start + worker_id * per_worker
        ...     dataset.end = min(dataset.start + per_worker, overall_end)
        ...

        >>> # Mult-process loading with the custom `worker_init_fn`
        >>> # Worker 0 fetched [3, 4].  Worker 1 fetched [5, 6].
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2, worker_init_fn=worker_init_fn)))
        [3, 5, 4, 6]

        >>> # With even more workers
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=12, worker_init_fn=worker_init_fn)))
        [3, 4, 5, 6]
    )r&   c                 C   s   t | |gS r'   )r   r(   r#   r#   r$   r)      s    zIterableDataset.__add__N)r*   r+   r,   r-   r   r   r)   r#   r#   r#   r$   r   M   s   lc                   @   sB   e Zd ZU dZeedf ed< eddddZdd	 Zd
d Z	dS )r   zDataset wrapping tensors.

    Each sample will be retrieved by indexing tensors along the first dimension.

    Args:
        *tensors (Tensor): tensors that have the same size of the first dimension.
    .tensorsN)r.   r   c                    s(   t  fdd D std | _d S )Nc                 3   s&   | ]} d   d | d kV  qdS )r   N)size.0Ztensorr.   r#   r$   	<genexpr>   s    z)TensorDataset.__init__.<locals>.<genexpr>zSize mismatch between tensors)allAssertionErrorr.   )r!   r.   r#   r2   r$   __init__   s    
zTensorDataset.__init__c                    s   t  fdd| jD S )Nc                 3   s   | ]}|  V  qd S r'   r#   r0   r"   r#   r$   r3      s     z,TensorDataset.__getitem__.<locals>.<genexpr>)tupler.   r    r#   r7   r$   r%      s    zTensorDataset.__getitem__c                 C   s   | j d dS Nr   )r.   r/   r!   r#   r#   r$   __len__   s    zTensorDataset.__len__)
r*   r+   r,   r-   r	   r   __annotations__r6   r%   r;   r#   r#   r#   r$   r      s
   
c                   @   sZ   e Zd ZU dZeeef ed< ee	 ee	 ddddZ
dd Zed	d
dZdd ZdS )r   a  Dataset as a stacking of multiple datasets.

    This class is useful to assemble different parts of complex input data, given as datasets.

    Example:
        >>> # xdoctest: +SKIP
        >>> images = ImageDataset()
        >>> texts = TextDataset()
        >>> tuple_stack = StackDataset(images, texts)
        >>> tuple_stack[0] == (images[0], texts[0])
        >>> dict_stack = StackDataset(image=images, text=texts)
        >>> dict_stack[0] == {'image': images[0], 'text': texts[0]}

    Args:
        *args (Dataset): Datasets for stacking returned as tuple.
        **kwargs (Dataset): Datasets for stacking returned as dict.
    datasetsN)argskwargsr   c                    s   |rD|rt dt|d  _t fdd|D r<t d| _nL|rt| }t|d  _t fdd|D rt d| _nt dd S )NztSupported either ``tuple``- (via ``args``) or``dict``- (via ``kwargs``) like input/output, but both types are given.r   c                 3   s   | ]} j t|kV  qd S r'   _lengthlenr1   datasetr:   r#   r$   r3      s     z(StackDataset.__init__.<locals>.<genexpr>zSize mismatch between datasetsc                 3   s   | ]} j t|kV  qd S r'   r@   rC   r:   r#   r$   r3      s     z%At least one dataset should be passed)
ValueErrorrB   rA   anyr=   listvalues)r!   r>   r?   tmpr#   r:   r$   r6      s     zStackDataset.__init__c                    s<   t | jtr$ fdd| j D S t fdd| jD S )Nc                    s   i | ]\}}||  qS r#   r#   )r1   krD   r7   r#   r$   
<dictcomp>  s      z,StackDataset.__getitem__.<locals>.<dictcomp>c                 3   s   | ]}|  V  qd S r'   r#   rC   r7   r#   r$   r3     s     z+StackDataset.__getitem__.<locals>.<genexpr>)
isinstancer=   dictitemsr8   r    r#   r7   r$   r%     s    zStackDataset.__getitem__indicesc                 C   sr  t | jtrdd |D }| j D ]\}}tt|dd r||}t|t|krrtdt| dt| t	||D ]\}}|||< q|q$t	||D ]\}}|| ||< qq$|S dd |D }	| jD ]}tt|dd r:||}t|t|krtdt| dt| t	||	D ]\}}
|

| q"qt	||	D ]\}}
|

||  qDqdd |	D }|S )Nc                 S   s   g | ]}i qS r#   r#   r1   _r#   r#   r$   
<listcomp>
  s     z-StackDataset.__getitems__.<locals>.<listcomp>__getitems__z0Nested dataset's output size mismatch. Expected z, got c                 S   s   g | ]}g qS r#   r#   rQ   r#   r#   r$   rS     s     c                 S   s   g | ]}t |qS r#   )r8   )r1   sampler#   r#   r$   rS   )  s     )rL   r=   rM   rN   callablegetattrrT   rB   rE   zipappend)r!   rP   Z
dict_batchrJ   rD   rN   dataZd_sampleidxZ
list_batchZt_sampleZtuple_batchr#   r#   r$   rT     s8    


zStackDataset.__getitems__c                 C   s   | j S r'   )rA   r:   r#   r#   r$   r;   ,  s    zStackDataset.__len__)r*   r+   r,   r-   r   r8   rM   r<   r   r   r6   r%   rG   rT   r;   r#   r#   r#   r$   r      s   
%c                       s~   e Zd ZU dZeee  ed< ee ed< e	dd Z
ee dd fdd	Zd
d Zdd Zeededdd Z  ZS )r   zDataset as a concatenation of multiple datasets.

    This class is useful to assemble different existing datasets.

    Args:
        datasets (sequence): List of datasets to be concatenated
    r=   cumulative_sizesc                 C   s6   g d }}| D ]"}t |}|||  ||7 }q|S r9   )rB   rY   )sequencerselr#   r#   r$   cumsum<  s    

zConcatDataset.cumsumNr=   r   c                    sZ   t    t|| _t| jdks*td| jD ]}t|tr0tdq0| | j| _	d S )Nr   z(datasets should not be an empty iterablez.ConcatDataset does not support IterableDataset)
superr6   rG   r=   rB   r5   rL   r   rb   r\   )r!   r=   d	__class__r#   r$   r6   E  s    


 zConcatDataset.__init__c                 C   s
   | j d S )Nr\   r:   r#   r#   r$   r;   O  s    zConcatDataset.__len__c                 C   sf   |dk r*| t | krtdt | | }t| j|}|dkrF|}n|| j|d   }| j| | S )Nr   z8absolute value of index should not exceed dataset length   )rB   rE   bisectbisect_rightr\   r=   )r!   r[   Zdataset_idxZ
sample_idxr#   r#   r$   r%   R  s    zConcatDataset.__getitem__z>`cummulative_sizes` attribute is renamed to `cumulative_sizes`)categoryc                 C   s   | j S r'   ri   r:   r#   r#   r$   cummulative_sizes`  s    zConcatDataset.cummulative_sizes)r*   r+   r,   r-   r   r   r   r<   intstaticmethodrb   r   r6   r;   r%   propertyr   FutureWarningrn   __classcell__r#   r#   rf   r$   r   0  s   


c                       s<   e Zd ZdZee dd fddZdd Zdd	 Z  Z	S )
r   a_  Dataset for chaining multiple :class:`IterableDataset` s.

    This class is useful to assemble different existing dataset streams. The
    chaining operation is done on-the-fly, so concatenating large-scale
    datasets with this class will be efficient.

    Args:
        datasets (iterable of IterableDataset): datasets to be chained together
    Nrc   c                    s   t    || _d S r'   )rd   r6   r=   )r!   r=   rf   r#   r$   r6   t  s    
zChainDataset.__init__c                 c   s,   | j D ] }t|tstd|E d H  qd S )N*ChainDataset only supports IterableDataset)r=   rL   r   r5   )r!   re   r#   r#   r$   __iter__x  s    
 zChainDataset.__iter__c                 C   s2   d}| j D ]"}t|ts td|t|7 }q
|S )Nr   rt   )r=   rL   r   r5   rB   )r!   totalre   r#   r#   r$   r;     s    
 zChainDataset.__len__)
r*   r+   r,   r-   r   r   r6   ru   r;   rs   r#   r#   rf   r$   r   i  s   
c                   @   sl   e Zd ZU dZee ed< ee ed< ee ee ddddZ	dd	 Z
ee ee d
ddZdd ZdS )r   z
    Subset of a dataset at specified indices.

    Args:
        dataset (Dataset): The whole Dataset
        indices (sequence): Indices in the whole set selected for subset
    rD   rP   N)rD   rP   r   c                 C   s   || _ || _d S r'   rD   rP   )r!   rD   rP   r#   r#   r$   r6     s    zSubset.__init__c                    s2   t |tr" j fdd|D  S  j j|  S )Nc                    s   g | ]} j | qS r#   rO   )r1   ir:   r#   r$   rS     s     z&Subset.__getitem__.<locals>.<listcomp>)rL   rG   rD   rP   )r!   r[   r#   r:   r$   r%     s    
zSubset.__getitem__)rP   r   c                    sB   t t jdd r, j fdd|D S  fdd|D S d S )NrT   c                    s   g | ]} j | qS r#   rO   r1   r[   r:   r#   r$   rS     s     z'Subset.__getitems__.<locals>.<listcomp>c                    s   g | ]} j  j|  qS r#   rw   ry   r:   r#   r$   rS     s     )rV   rW   rD   rT   )r!   rP   r#   r:   r$   rT     s    zSubset.__getitems__c                 C   s
   t | jS r'   )rB   rP   r:   r#   r#   r$   r;     s    zSubset.__len__)r*   r+   r,   r-   r   r   r<   r   ro   r6   r%   r   rT   r;   r#   r#   r#   r$   r     s   
)rD   lengths	generatorr   c           
         s6  t t|drt|dkrg }t|D ]H\}}|dk s@|dkrPtd| dtt t | }|| q(t t| }t	|D ] }|t| }||  d7  < q|}t|D ]"\}}	|	dkrt
d| d qt|t krtdtt||d ttt |} fd	d
tt||D S )a  
    Randomly split a dataset into non-overlapping new datasets of given lengths.

    If a list of fractions that sum up to 1 is given,
    the lengths will be computed automatically as
    floor(frac * len(dataset)) for each fraction provided.

    After computing the lengths, if there are any remainders, 1 count will be
    distributed in round-robin fashion to the lengths
    until there are no remainders left.

    Optionally fix the generator for reproducible results, e.g.:

    Example:
        >>> # xdoctest: +SKIP
        >>> generator1 = torch.Generator().manual_seed(42)
        >>> generator2 = torch.Generator().manual_seed(42)
        >>> random_split(range(10), [3, 7], generator=generator1)
        >>> random_split(range(30), [0.3, 0.3, 0.4], generator=generator2)

    Args:
        dataset (Dataset): Dataset to be split
        lengths (sequence): lengths or fractions of splits to be produced
        generator (Generator): Generator used for the random permutation.
    rj   r   zFraction at index z is not between 0 and 1zLength of split at index z- is 0. This might result in an empty dataset.zDSum of input lengths does not equal the length of the input dataset!)r{   c                    s&   g | ]\}}t  || | qS r#   )r   )r1   offsetlengthrw   r#   r$   rS     s   z random_split.<locals>.<listcomp>)mathisclosesum	enumeraterE   ro   floorrB   rY   rangewarningswarnr   tolistr   r   rX   	itertools
accumulate)
rD   rz   r{   Zsubset_lengthsrx   fracZn_items_in_split	remainderZidx_to_add_atr}   r#   rw   r$   r     s8    
)(rk   r   r~   r   typingr   r   r   r   r   r   r   r	   r
   r   typing_extensionsr   Ztorchr   r    r   r   __all__r   r   strZT_dictZT_tupler   r   r   r   r   r   r   r   ro   floatr   r#   r#   r#   r$   <module>   sF   0"tW9 $
