def __call__(self, kspace, target, attrs, fname, slice):

        kspace = np.array(kspace)
        target = np.array(target)
        kspace = transforms.to_tensor(kspace)
        target = transforms.to_tensor(target)

        kspace = transforms.complex_center_crop(
            kspace, (self.resolution, self.resolution))
        target = transforms.center_crop(target,
                                        (self.resolution, self.resolution))
        return kspace, target
Exemplo n.º 2
0
 def _get_cutout_train_transform(self):
     transform = torchvision.transforms.Compose([
         transforms.normalize(self.mean, self.std),
         transforms.cutout(self.config['cutout_size'],
                           self.config['cutout_prob'],
                           self.config['cutout_inside']),
         transforms.to_tensor(),
     ])
     return transform
Exemplo n.º 3
0
 def _get_cutout_train_transform(self):
     transform = torchvision.transforms.Compose([
         torchvision.transforms.RandomCrop(28, padding=4),
         torchvision.transforms.RandomHorizontalFlip(),
         transforms.normalize(self.mean, self.std),
         transforms.cutout(self.config['cutout_size'],
                           self.config['cutout_prob'],
                           self.config['cutout_inside']),
         transforms.to_tensor(),
     ])
     return transform
Exemplo n.º 4
0
 def _get_random_erasing_train_transform(self):
     transform = torchvision.transforms.Compose([
         transforms.normalize(self.mean, self.std),
         transforms.random_erasing(
             self.config['random_erasing_prob'],
             self.config['random_erasing_area_ratio_range'],
             self.config['random_erasing_min_aspect_ratio'],
             self.config['random_erasing_max_attempt']),
         transforms.to_tensor(),
     ])
     return transform
Exemplo n.º 5
0
 def verify_img_data(img_data, expected_output, mode):
     if mode is None:
         img = transforms.ToPILImage()(img_data)
         assert img.mode == 'RGB'  # default should assume RGB
     else:
         img = transforms.ToPILImage(mode=mode)(img_data)
         assert img.mode == mode
     split = img.split()
     for i in range(3):
         assert np.allclose(expected_output[i].numpy(),
                            transforms.to_tensor(split[i]).numpy())
Exemplo n.º 6
0
    def __call__(self, pic):
        """
        Args:
            pic (PIL or numpy.ndarray): Image to be converted to tensor

        Returns:
            Tensor: Converted image.
        """
        if isinstance(pic, np.ndarray):
            # This is what TorchVision 0.2.0 returns for transforms.toTensor() for np.ndarray
            return torch.from_numpy(pic.transpose((2, 0, 1))).float().div(255)
        else:
            return transforms.to_tensor(pic)
Exemplo n.º 7
0
 def _get_random_erasing_train_transform(self):
     transform = torchvision.transforms.Compose([
         torchvision.transforms.Resize((32,32)),
         torchvision.transforms.ColorJitter(0.1,0.1,0.1),
         # torchvision.transforms.RandomRotation(15),
         # RandomAffine(degrees=15,scale=(0.8,1.2),shear=15),
         torchvision.transforms.RandomCrop(32, padding=4),
         torchvision.transforms.RandomHorizontalFlip(),
         transforms.normalize(self.mean, self.std),
         transforms.random_erasing(
             self.config['random_erasing_prob'],
             self.config['random_erasing_area_ratio_range'],
             self.config['random_erasing_min_aspect_ratio'],
             self.config['random_erasing_max_attempt']),
         transforms.to_tensor(),
     ])
     return transform
 def _get_test_transform(self):
     transform = torchvision.transforms.Compose([
         transforms.normalize(self.mean, self.std),
         transforms.to_tensor(),
     ])
     return transform
 def _add_to_tensor(self):
     self._train_transforms.append(transforms.to_tensor())