def _create_dataset(self):
     self._dataset = DatasetFactory.get_by_name(self._opt.dataset_mode, self._opt, self._is_for_train)
     self._dataloader = torch.utils.data.DataLoader(
         self._dataset,
         batch_size=self._opt.batch_size,
         shuffle=not self._opt.serial_batches,
         num_workers=int(self._num_threds),
         drop_last=True)
 def _create_dataset(self):
     self._dataset = DatasetFactory.get_by_name(self._opt.dataset_mode, self._opt, self._is_for_train)
     self._dataloader = torch.utils.data.DataLoader(
         self._dataset,
         batch_size=self._opt.batch_size,
         shuffle=not self._opt.serial_batches,
         num_workers=int(self._num_threds),
         drop_last=True)
 def _create_datasets(self):
     self.datasets = OrderedDict()
     for i, dataset_name in enumerate(self._opt.dataset_names):
         task = self._opt.tasks[i]
         self.datasets[task] = DatasetFactory.get_by_name(
             dataset_name, self._opt, self.train_mode, self.transform)
     self.cumulative_sizes = self.cumsum([
         dataset for (k, dataset) in self.datasets.items()
     ])  # number of instances, cumulative sizes
Пример #4
0
 def _create_dataset(self):
     self._dataset = DatasetFactory.get_by_name(self._opt.dataset_mode,
                                                self._opt,
                                                self._is_for_train)
     if self._is_for_train:
         self._dataloader = torch.utils.data.DataLoader(
             self._dataset,
             batch_size=self._opt.train_batch_size,
             ## TODO
             shuffle=self._is_for_train,
             # shuffle=False,
             num_workers=int(self._num_threds),
             drop_last=True)
     else:
         self._dataloader = torch.utils.data.DataLoader(
             self._dataset,
             batch_size=self._opt.test_batch_size,
             shuffle=self._is_for_train,
             num_workers=int(self._num_threds),
             drop_last=True)
Пример #5
0
    def _create_dataset(self):
        self._dataset = DatasetFactory.get_by_name(self._opt.dataset_mode,
                                                   self._opt, self._mode)

        if hasattr(self._dataset, 'collate_fn'):
            self._dataloader = torch.utils.data.DataLoader(
                self._dataset,
                batch_size=self._opt.batch_size,
                collate_fn=self._dataset.collate_fn,
                shuffle=True,
                #shuffle=not self._opt.serial_batches and self._mode == 'train',
                num_workers=int(self._num_threds),
                drop_last=True)
        else:
            self._dataloader = torch.utils.data.DataLoader(
                self._dataset,
                batch_size=self._opt.batch_size,
                shuffle=True,
                #shuffle=not self._opt.serial_batches and self._mode == 'train',
                num_workers=int(self._num_threds),
                drop_last=True)