示例#1
0
    def train_dataloader(self):
        transforms_augment_cpu = []
        transforms_augment = []

        #transforms_augment_cpu.append(rtr.intensity.RandomAddValue(UniformParameter(-0.2, 0.2)))
        #cpu_transforms = Compose(transforms_augment_cpu)

        keys = ('data', 'label')
        # transforms_augment.append(rtr.GaussianNoise(0., 0.05))
        transforms_augment.append(rtr.Rot90(dims=(0, 1, 2), keys=keys))
        transforms_augment.append(rtr.Mirror(dims=(0, 1, 2), keys=keys))
        #transforms_augment.append(ElasticDeformer3d(32, 4, keys=keys,
        #    interp_mode={ 'data': 'linear', 'label': 'nearest' }))
        #transforms_augment.append(rtr.BaseAffine(
        #    scale=UniformParameter(0.95, 1.05),
        #    rotation=UniformParameter(-45, 45), degree=True,
        #    translation=UniformParameter(-0.05, 0.05),
        #    keys=('data', 'label'),
        #    interpolation_mode='nearest'))
        gpu_transforms = Compose(transforms_augment)
        return DataLoader(self.train_dataset,
                          batch_size=self.hparams.batch_size,
                          num_workers=self.hparams.num_loader_workers,
                          shuffle=True,
                          #batch_transforms=cpu_transforms,
                          gpu_transforms=gpu_transforms,
                          pin_memory=True)
示例#2
0
    def val_dataloader(self):
        gpu_transforms = []
        keys = ('data', 'label')
        gpu_transforms.append(rtr.Rot90(dims=(0, 1, 2), keys=keys))
        gpu_transforms.append(rtr.Mirror(dims=(0, 1, 2), keys=keys))
        gpu_transforms = Compose(gpu_transforms)

        return DataLoader(self.val_dataset,
                          batch_size=2 * self.hparams.batch_size,
                          num_workers=self.hparams.num_loader_workers,
                          shuffle=False,
                          gpu_transforms=gpu_transforms,
                          pin_memory=True)
示例#3
0
 def train_dataloader(self):
     keys = ('data', 'label')
     transforms_augment = []
     transforms_augment.append(rtr.Rot90(dims=(0, 1, 2), keys=keys))
     transforms_augment.append(rtr.Mirror(dims=(0, 1, 2), keys=keys))
     #transforms_augment.append(ElasticDeformer3d(32, 4, keys=keys,
     #    interp_mode={ 'data': 'linear', 'label': 'nearest' }))
     gpu_transforms = Compose(transforms_augment)
     return DataLoader(self.train_dataset,
                       batch_size=self.hparams.batch_size,
                       num_workers=self.hparams.num_loader_workers,
                       shuffle=True,
                       gpu_transforms=gpu_transforms,
                       pin_memory=True)
示例#4
0
    def val_dataloader(self):
        gpu_transforms = []
        gpu_transforms.append(rtr.Rot90(dims=(0, 1, 2), keys=('data', 'label')))
        gpu_transforms.append(rtr.Mirror(dims=(0, 1, 2), keys=('data', 'label')))
        gpu_transforms = Compose(gpu_transforms)

        # batch_transforms = []
        # batch_transforms.append(BatchRandomCrop(self.hparams.crop_size, bs=1, dist=0, keys=('data', 'label')))
        # batch_transforms = Compose(batch_transforms)

        return DataLoader(self.val_dataset,
                          batch_size=2 * self.hparams.batch_size,
                          num_workers=self.hparams.num_loader_workers,
                          shuffle=False,
                          # batch_transforms=batch_transforms,
                          gpu_transforms=gpu_transforms,
                          pin_memory=True)
示例#5
0
文件: loader.py 项目: kiminh/rising
    def __init__(self,
                 dataset: Union[Sequence, Dataset],
                 batch_size: int = 1,
                 shuffle: bool = False,
                 batch_transforms: Optional[Callable] = None,
                 gpu_transforms: Optional[Callable] = None,
                 sample_transforms: Optional[Callable] = None,
                 pseudo_batch_dim: bool = False,
                 device: Optional[Union[str, torch.device]] = None,
                 sampler: Optional[Sampler] = None,
                 batch_sampler: Optional[Sampler] = None,
                 num_workers: int = 0,
                 collate_fn: Optional[Callable] = None,
                 pin_memory: bool = False,
                 drop_last: bool = False,
                 timeout: Union[int, float] = 0,
                 worker_init_fn: Optional[Callable] = None,
                 multiprocessing_context=None,
                 auto_convert: bool = True,
                 transform_call: Callable[[Any, Callable],
                                          Any] = default_transform_call):
        """
        Args:
            dataset: dataset from which to load the data
            batch_size: how many samples per batch to load (default: ``1``).
            shuffle: set to ``True`` to have the data reshuffled at every epoch
                (default: ``False``)
            batch_transforms: transforms which can be applied to a whole
                batch. Usually this accepts either mappings or sequences and
                returns the same type containing transformed elements
            gpu_transforms: transforms which can be applied to a whole batch
                (on the GPU). Unlike :attr:`batch_transforms` this is not
                done in multiple processes, but in the main process on the
                GPU, because GPUs are capable of non-blocking and asynchronous
                working. Before executing these transforms all data will be
                moved to :attr:`device`. This copy is done in a non-blocking
                way if :attr:`pin_memory` is set to True.
            sample_transforms: transforms applied to each sample (on CPU).
                These are the first transforms applied to the data, since they
                are applied on sample retrieval from dataset before batching
                occurs.
            pseudo_batch_dim: whether the :attr:`sample_transforms` work on
                batches and thus need a pseudo batch dim of 1 to work
                correctly.
            device: the device to move the data to for gpu_transforms.
                If None: the device will be the current device.
            sampler: defines the strategy to draw samples from
                the dataset. If specified, :attr:`shuffle` must be ``False``.
            batch_sampler: like :attr:`sampler`, but returns a batch of
                indices at a time. Mutually exclusive with :attr:`batch_size`,
                :attr:`shuffle`, :attr:`sampler`, and :attr:`drop_last`.
            num_workers: how many subprocesses to use for data loading.
                ``0`` means that the data will be loaded in the main process.
                (default: ``0``)
            collate_fn: merges a list of samples to form a
                mini-batch of Tensor(s).  Used when using batched loading from a
                map-style dataset.
            pin_memory: If ``True``, the data loader will copy Tensors
                into CUDA pinned memory before returning them.  If your data
                elements are a custom type, or your :attr:`collate_fn` returns a
                batch that is a custom type, see the example below.
            drop_last: set to ``True`` to drop the last incomplete batch,
                if the dataset size is not divisible by the batch size.
                If ``False`` and the size of dataset is not divisible by the batch
                size, then the last batch will be smaller. (default: ``False``)
            timeout: if positive, the timeout value for collecting a batch
                from workers. Should always be non-negative. (default: ``0``)
            worker_init_fn: If not ``None``, this will be called on each
                worker subprocess with the worker id
                (an int in ``[0, num_workers - 1]``) as input, after seeding and
                before data loading. (default: ``None``)
            auto_convert: if set to ``True``, the batches will always be
                transformed to :class:`torch.Tensors`, if possible.
                (default: ``True``)
            transform_call: function which determines how transforms are
                called. By default Mappings and Sequences are unpacked during
                the transform.
        """
        super().__init__(dataset=dataset,
                         batch_size=batch_size,
                         shuffle=shuffle,
                         sampler=sampler,
                         batch_sampler=batch_sampler,
                         num_workers=num_workers,
                         collate_fn=collate_fn,
                         pin_memory=pin_memory,
                         drop_last=drop_last,
                         timeout=timeout,
                         worker_init_fn=worker_init_fn,
                         multiprocessing_context=multiprocessing_context)

        if gpu_transforms is not None and not torch.cuda.is_available():
            if hasattr(gpu_transforms, 'to'):
                gpu_transforms = gpu_transforms.to('cpu')
            transforms = (batch_transforms, gpu_transforms
                          ) if batch_transforms is not None else gpu_transforms
            batch_transforms = Compose(transforms)
            warnings.warn(
                "No CUDA-capable device was found. "
                "Applying GPU-Transforms on CPU instead.", RuntimeWarning)
            gpu_transforms = None

        self.batch_transforms = batch_transforms

        if gpu_transforms is not None:
            if device is None:
                device = torch.cuda.current_device()

            to_gpu_trafo = ToDevice(device=device, non_blocking=pin_memory)

            gpu_transforms = Compose(to_gpu_trafo, gpu_transforms)
            gpu_transforms = gpu_transforms.to(device)

        self.sample_transforms = sample_transforms
        self.pseudo_batch_dim = pseudo_batch_dim and sample_transforms is not None
        self.gpu_transforms = gpu_transforms
        self.auto_convert = auto_convert
        self.transform_call = transform_call