Пример #1
0
def create_ellipses_dataloader(num_images_train=100,
                               num_images_test=30,
                               resolution=(64, 64)):
    """
    Creating dataloders of ellipses from dataset
    
    param: num_images_train: int: number of ellipses in train loader
    param: num_images_test: int: number of ellipses in test loader
    param: resolution: tuple(int,int): image resolution
    return: tuple of dataloaders for train and test
    """

    ToTensor_pair = Lambda(lambda image_mask_pair: (torch.Tensor(
        image_mask_pair[0]), torch.Tensor(image_mask_pair[1])))
    Transpose_pair = Lambda(lambda image_mask_pair: ((image_mask_pair[
        0]).transpose(0, 2).transpose(1, 2), torch.Tensor(image_mask_pair[1])))
    e_train = Ellipses(
        num_images=100,
        image_shape=(64, 64),
        transform=Compose([RandomD4_for_pair(), ToTensor_pair,
                           Transpose_pair]))
    e_test = Ellipses(num_images=30,
                      image_shape=(64, 64),
                      transform=Compose([ToTensor_pair, Transpose_pair]))
    ellipses_train_loader = DataLoader(e_train, batch_size=2, shuffle=False)
    ellipses_test_loader = DataLoader(e_test, batch_size=2, shuffle=False)
    return ellipses_train_loader, ellipses_test_loader
Пример #2
0
    def __getitem__(self, index):
        transforms = list()
        transforms.append(
            Lambda(self.__to_numpy
                   ))  # First convert PIL.Image.Image to numpy.ndarray. HxWxC
        transforms.append(Lambda(self.__per_pixel_subtraction_normalization)
                          )  # Subtract per-pixel mean

        if self.train:
            if random.random() > 0.5:
                transforms.append(Lambda(self.__horizontal_flip)
                                  )  # Flip horizontally with 50:50 chance
            transforms.append(Lambda(self.__pad_and_random_crop)
                              )  # Pad 0 along H and W dims and randomly crop

        transforms.append(
            ToTensor()
        )  # convert numpy.ndarray to torch.tensor (notice that HxWxC -> CxHxW)
        transforms = Compose(transforms)

        if self.train:
            return transforms(
                self.cifar10_train[index][0]), self.cifar10_train[index][1]
        else:
            return transforms(
                self.cifar10_test[index][0]), self.cifar10_test[index][1]
    def get_transforms(kmeans):
        clusters = kmeans.kmeans.n_clusters
        _transform_colorisation = Compose([
            Resize((32, 32)),
            ToNumpy(),
            ConvertChannel(),
            QuantizeAB(kmeans),
            OneHotEncoding(clusters),
            ToTensor()
        ])
        transform_colorisation = Compose([
            Lambda(lambda batch: torch.stack(
                [_transform_colorisation(im) for im in batch]))
        ])

        _transform_training = Compose([
            Resize((256, 256)),
            Grayscale(),
            ToTensor(),
            Normalize(mean=config.IMAGE_MEAN, std=config.IMAGE_STD)
        ])
        transform_training = Compose([
            Lambda(lambda batch: torch.stack(
                [_transform_training(im) for im in batch]))
        ])

        _transform_testing = Compose(
            [Resize((256, 256)),
             ToNumpy(), ConvertChannel()])
        transform_testing = Compose(
            [Lambda(lambda batch: [_transform_testing(im) for im in batch])])

        return [transform_training, transform_colorisation, transform_testing]
def valid_lr_transform(crop_size, normalize=True):
    return Compose([
        ToPILImage(),
        Resize(crop_size, interpolation=Image.BICUBIC),
        ToTensor(),
        Lambda(imagenet_normalise) if normalize else Lambda(lambda img: img),
    ])
    def get_params(brightness, contrast, saturation, hue):
        """Get a randomized transform to be applied on image.

        Arguments are same as that of __init__.

        Returns:
            Transform which randomly adjusts brightness, contrast and
            saturation in a random order.
        """

        transforms = []
        brightness_factor = random.uniform(max(0, brightness[0]),
                                           brightness[1])
        transforms.append(
            Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))

        contrast_factor = random.uniform(max(0, contrast[0]), contrast[1])
        transforms.append(
            Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))

        saturation_factor = random.uniform(max(0, saturation[0]),
                                           saturation[1])
        transforms.append(
            Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))

        hue_factor = random.uniform(-hue[0], hue[1])
        transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))

        random.shuffle(transforms)
        transform = Compose(transforms)

        return transform
def get_dataloader(batch_size):
    train_data = KgForestDataset(
        split='train-37479',
        transform=Compose([
            Lambda(lambda x: randomShiftScaleRotate(
                x, u=0.75, shift_limit=6, scale_limit=6, rotate_limit=45)),
            Lambda(lambda x: randomFlip(x)),
            Lambda(lambda x: randomTranspose(x)),
            Lambda(lambda x: toTensor(x)),
            Normalize(mean=mean, std=std)
        ]),
        height=256,
        width=256)
    train_data_loader = DataLoader(batch_size=batch_size,
                                   dataset=train_data,
                                   shuffle=True)

    validation = KgForestDataset(
        split='validation-3000',
        transform=Compose([
            # Lambda(lambda x: randomShiftScaleRotate(x, u=0.75, shift_limit=6, scale_limit=6, rotate_limit=45)),
            # Lambda(lambda x: randomFlip(x)),
            #  Lambda(lambda x: randomTranspose(x)),
            Lambda(lambda x: toTensor(x)),
            Normalize(mean=mean, std=std)
        ]),
        height=256,
        width=256)

    valid_dataloader = DataLoader(dataset=validation,
                                  shuffle=False,
                                  batch_size=batch_size)
    return train_data_loader, valid_dataloader
Пример #7
0
def load_image(filename, root="", ensemble=False):
    """Load an image.

    :param filename: path to image
    :param root: can be specified, if filename is a relative path
    :param ensemble: if `True`, perform ten crops and return that
      instead.
    :returns: an image of dimension (1,3,224,224) if `ensemble`
      is `False`, otherwise (10,3,224,224).
    """
    transform_list = []
    if ensemble:
        norm = Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        transform_list.append(Resize((256, 256)))
        transform_list.append(TenCrop(224))
        transform_list.append(
            Lambda(lambda crops: torch.stack(
                [ToTensor()(crop) for crop in crops])))
        transform_list.append(
            Lambda(lambda crops: torch.stack([norm(crop) for crop in crops])))
    else:
        transform_list.append(Resize((224, 224)))
        transform_list.append(ToTensor())
        transform_list.append(Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
    transformer = Compose(transform_list)
    filepath = "%s/%s" % (root, filename)
    img = Image.open(filepath)
    img = img.convert("RGB")
    img = transformer(img)
    if ensemble:
        return img
    else:
        return img.unsqueeze(0)
Пример #8
0
    def get_params(brightness, contrast, saturation):
        """Get a randomized transform to be applied on image.

            Arguments are same as that of __init__.

            Returns:
                Transform which randomly adjusts brightness, contrast and
                saturation in a random order.
            """
        transforms = []

        if brightness is not None:
            brightness_factor = random.gauss(brightness[0], brightness[1])
            transforms.append(
                Lambda(lambda img: adjust_brightness(img, brightness_factor)))

        if contrast is not None:
            contrast_factor = random.gauss(contrast[0], contrast[1])
            transforms.append(
                Lambda(lambda img: adjust_contrast(img, contrast_factor)))

        if saturation is not None:
            saturation_factor = random.gauss(saturation[0], saturation[1])
            transforms.append(
                Lambda(lambda img: adjust_saturation(img, saturation_factor)))

        random.shuffle(transforms)
        transform = Compose(transforms)

        print(brightness_factor)

        return transform
Пример #9
0
    def get_params(brightness, contrast, saturation, hue):
        """Get a randomized transform to be applied on image.
        Arguments are same as that of __init__.
        Returns:
            Transform which randomly adjusts brightness, contrast and
            saturation in a random order.
        """
        transforms = []
        if brightness > 0:
            brightness_factor = random.uniform(max(0, 1 - brightness),
                                               1 + brightness)
            transforms.append(
                Lambda(lambda img: adjust_brightness(img, brightness_factor)))

        if contrast > 0:
            contrast_factor = random.uniform(max(0, 1 - contrast),
                                             1 + contrast)
            transforms.append(
                Lambda(lambda img: adjust_contrast(img, contrast_factor)))

        if saturation > 0:
            saturation_factor = random.uniform(max(0, 1 - saturation),
                                               1 + saturation)
            transforms.append(
                Lambda(lambda img: adjust_saturation(img, saturation_factor)))

        if hue > 0:
            hue_factor = random.uniform(-hue, hue)
            transforms.append(Lambda(lambda img: adjust_hue(img, hue_factor)))

        random.shuffle(transforms)
        transform = Compose(transforms)

        return transform
Пример #10
0
    def test_transforms_groups_constructor_error(self):
        original_dataset = MNIST('./data/mnist', download=True)
        with self.assertRaises(Exception):
            # Test tuple has only one element
            dataset = AvalancheDataset(original_dataset,
                                       transform_groups=dict(
                                           train=(ToTensor(), None),
                                           eval=(Lambda(lambda t: float(t)))))

        with self.assertRaises(Exception):
            # Test is not a tuple has only one element
            dataset = AvalancheDataset(
                original_dataset,
                transform_groups=dict(train=(ToTensor(), None),
                                      eval=[None,
                                            Lambda(lambda t: float(t))]))

        with self.assertRaises(Exception):
            # Train is None
            dataset = AvalancheDataset(
                original_dataset,
                transform_groups=dict(train=None,
                                      eval=(None, Lambda(lambda t: float(t)))))

        with self.assertRaises(Exception):
            # transform_groups is not a dictionary
            dataset = AvalancheDataset(original_dataset,
                                       transform_groups='Hello world!')
Пример #11
0
def get_test_data(data_path):
    with open(data_path, 'rb') as f:
        train_test_paths_labels = pickle.load(f)

    test_paths = train_test_paths_labels[2]
    test_labels = train_test_paths_labels[5]
    test_num_each = train_test_paths_labels[8]

    print('test_paths   : {:6d}'.format(len(test_paths)))
    print('test_labels  : {:6d}'.format(len(test_labels)))

    test_labels = np.asarray(test_labels, dtype=np.int64)

    test_transforms = None
    if crop_type == 0:
        test_transforms = transforms.Compose([
            transforms.RandomCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.4310, 0.2971, 0.3126], [0.2405, 0.1863, 0.1935])
        ])
    elif crop_type == 1:
        test_transforms = transforms.Compose([
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.4310, 0.2971, 0.3126], [0.2405, 0.1863, 0.1935])
        ])
    elif crop_type == 5:
        test_transforms = transforms.Compose([
            transforms.FiveCrop(224),
            Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
            Lambda(
                lambda crops: torch.stack(
                    [transforms.Normalize([0.4310, 0.2971, 0.3126], [0.2405, 0.1863, 0.1935])(crop) for crop in crops]))
        ])
    elif crop_type == 10:
        test_transforms = transforms.Compose([
            transforms.TenCrop(224),
            Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
            Lambda(
                lambda crops: torch.stack(
                    [transforms.Normalize([0.4310, 0.2971, 0.3126], [0.2405, 0.1863, 0.1935])(crop) for crop in crops]))
        ])
    elif crop_type == 2:
        test_transforms = transforms.Compose([
            transforms.Resize((250, 250)),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize([0.4310, 0.2971, 0.3126], [0.2405, 0.1863, 0.1935])
        ])
    elif crop_type == 3:
        test_transforms = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize([0.4310, 0.2971, 0.3126], [0.2405, 0.1863, 0.1935])
        ])

    test_dataset = CholecDataset(test_paths, test_labels, test_transforms)
    return test_dataset, test_num_each
Пример #12
0
 def __init__(self):
     self.transform = Compose([
         Lambda(lambda x: x / 255),
         Normalize(-mean, std),
         Lambda(lambda x: x.clamp(0, 1)),
         Lambda(lambda x: x.flip(0)),
         Lambda(lambda x: x.cpu()),
         ToPILImage()
     ])
Пример #13
0
    def forward(self, batch):
        # 1.取数据,模型输入的特征维度应该等于self.feature_dim
        x = batch['X']

        # 2.根据输入数据计算模型的输出结果,模型输出的特征维度应该等于self.output_dim
        k = 1
        dgc_mode = 'hybrid'
        obs_timesteps = 10
        pred_timesteps = 3
        nb_nodes = 208
        encoder = torch.nn.RNN(DGCRNNCell(k, dgc_mode=dgc_mode),
                               return_state=True)
        decoder = torch.nn.RNN(DGCRNNCell(k, dgc_mode=dgc_mode),
                               return_sequences=True,
                               return_state=True)

        unstack_k = Lambda(unstack)
        choice = Scheduled()

        input_obs = torch.randn(size=(obs_timesteps, nb_nodes, 1))
        input_gt = torch.randn(size=(pred_timesteps, nb_nodes,
                                     1))  # (None, T, N, 1)
        encoder_inputs = Lambda(lambda x: torch.squeeze(x, dim=-1))(
            input_obs)  # (None, T, N)

        encoder_outputs, state_h = encoder(encoder_inputs)

        unstacked = unstack_k(input_gt)  # [(None, N, 1) x T] list

        initial = unstacked[0]  # (None, N, 1)

        decoder_inputs = Lambda(lambda x: torch.transpose(x, 1, 2))(
            initial)  # (None, 1, N)
        decoder_outputs_new, state_h_new = decoder(decoder_inputs,
                                                   initial_state=state_h)
        state_h = state_h_new

        # prediction part
        prediction = []
        decoded_results = decoder_outputs_new
        prediction.append(decoded_results)

        if pred_timesteps > 1:
            for i in range(1, pred_timesteps):
                decoder_inputs = choice([prediction[-1],
                                         unstacked[i]])  # (None, 208, 1)
                decoder_inputs = Lambda(lambda x: torch.transpose(x, 1, 2))(
                    decoder_inputs)
                decoder_outputs_new, state_h_new = decoder(
                    decoder_inputs, initial_state=state_h)
                state_h = state_h_new
                decoded_results = decoder_outputs_new
                prediction.append(decoded_results)

        outputs = Lambda(stack)(prediction)

        return outputs
Пример #14
0
 def __init__(self, preserve_color, device):
     self.normalize = Compose([
         Lambda(lambda x: x.flip(0)),
         Normalize(mean, std),
         Lambda(lambda x: x * 255),
         Lambda(lambda x: x.unsqueeze(0))
     ])
     self.device = device
     self.preserve_color = preserve_color
Пример #15
0
def get_data(data_path):
    with open(data_path, 'rb') as f:
        train_test_paths_labels = pickle.load(f)
    train_paths = train_test_paths_labels[0]
    val_paths = train_test_paths_labels[1]
    test_paths = train_test_paths_labels[2]
    train_labels = train_test_paths_labels[3]
    val_labels = train_test_paths_labels[4]
    test_labels = train_test_paths_labels[5]
    train_num_each = train_test_paths_labels[6]
    val_num_each = train_test_paths_labels[7]
    test_num_each = train_test_paths_labels[8]

    print('train_paths  : {:6d}'.format(len(train_paths)))
    print('train_labels : {:6d}'.format(len(train_labels)))
    print('valid_paths  : {:6d}'.format(len(val_paths)))
    print('valid_labels : {:6d}'.format(len(val_labels)))
    print('test_paths   : {:6d}'.format(len(test_paths)))
    print('test_labels  : {:6d}'.format(len(test_labels)))

    train_labels = np.asarray(train_labels, dtype=np.int64)
    val_labels = np.asarray(val_labels, dtype=np.int64)
    test_labels = np.asarray(test_labels, dtype=np.int64)

    # train_transforms = transforms.Compose([
    #     transforms.RandomCrop(224),
    #     transforms.ToTensor(),
    #     transforms.Normalize([0.3456, 0.2281, 0.2233], [0.2528, 0.2135, 0.2104])
    # ])
    train_transforms = transforms.Compose([
        transforms.TenCrop(224),
        Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
        Lambda(
            lambda crops: torch.stack(
                [transforms.Normalize([0.3456, 0.2281, 0.2233], [0.2528, 0.2135, 0.2104])(crop) for crop in crops]))
    ])


    val_transforms = transforms.Compose([
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.3456, 0.2281, 0.2233], [0.2528, 0.2135, 0.2104])
    ])

    test_transforms = transforms.Compose([
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.3456, 0.2281, 0.2233], [0.2528, 0.2135, 0.2104])
    ])

    train_dataset = CholecDataset(train_paths, train_labels, train_transforms)
    val_dataset = CholecDataset(val_paths, val_labels, val_transforms)
    test_dataset = CholecDataset(test_paths, test_labels, test_transforms)

    return train_dataset, train_num_each, val_dataset, val_num_each, test_dataset, test_num_each
Пример #16
0
def sr_output_train_transform(convert_luma):
  def transform(pred, target):
    return pred_transform(pred), target_transform(target)

  transforms = [Lambda(lambda img: normalize_range(img, (-1., 1.)))]
  if convert_luma:
    transforms.append(Lambda(lambda img: convert_to_luma(img)))

  pred_transform = Compose(transforms)
  target_transform = Compose(transforms)
  return transform
Пример #17
0
 def __call__(self, img1, img2):
     if self.istrain:
         img1 = F.resize(img1, self.resize, Image.BILINEAR)
         img2 = F.resize(img2, self.resize, Image.BILINEAR)
         # random crop
         w1, h1 = img1.size
         w2, h2 = img2.size
         w = min(w1, w2)
         h = min(h1, h2)
         th, tw = self.output_size, self.output_size
         i = random.randint(0, h - th)
         j = random.randint(0, w - tw)
         img1 = F.crop(img1, i, j, th, tw)
         img2 = F.crop(img2, i, j, th, tw)
         # random flip
         if random.random() < 0.5:
             img1 = F.hflip(img1)
             img2 = F.hflip(img2)
         if random.random() < 0.5:
             img1 = F.vflip(img1)
             img2 = F.vflip(img2)
         # color jitter
         brightness = 0.4
         saturation = 0.4
         hue = 0.4
         brightness_factor = random.uniform(max(0, 1 - brightness),
                                            1 + brightness)
         saturation_factor = random.uniform(max(0, 1 - saturation),
                                            1 + saturation)
         hue_factor = random.uniform(-hue, hue)
         transforms = []
         transforms.append(
             Lambda(
                 lambda img: F.adjust_brightness(img, brightness_factor)))
         transforms.append(
             Lambda(
                 lambda img: F.adjust_saturation(img, saturation_factor)))
         transforms.append(
             Lambda(lambda img: F.adjust_hue(img, hue_factor)))
         random.shuffle(transforms)
         transform = Compose(transforms)
         img1 = transform(img1)
         img2 = transform(img2)
     else:
         img1 = F.resize(img1, (self.output_size, self.output_size),
                         Image.BILINEAR)
         img2 = F.resize(img2, (self.output_size, self.output_size),
                         Image.BILINEAR)
     img1 = F.to_tensor(img1)
     img2 = F.to_tensor(img2)
     img1 = F.normalize(img1, self.mean, self.std)
     img2 = F.normalize(img2, self.mean, self.std)
     return img1, img2
    def _get_transforms(self, augmentation_config: Optional[CfgNode],
                        dataset_name: str,
                        is_ssl_encoder_module: bool) -> Tuple[Any, Any]:

        # is_ssl_encoder_module will be True for ssl training, False for linear head training
        train_transforms = ImageTransformationPipeline([Lambda(lambda x: x)])  # do nothing
        val_transforms = ImageTransformationPipeline([Lambda(lambda x: x + 1)])  # add 1

        if is_ssl_encoder_module:
            train_transforms = DualViewTransformWrapper(train_transforms)  # type: ignore
            val_transforms = DualViewTransformWrapper(val_transforms)  # type: ignore
        return train_transforms, val_transforms
Пример #19
0
    def test_transform(self):
        train_transform = Lambda(lambda k: 1)
        test_transform = Lambda(lambda k: 0)
        dataset = ActiveLearningDataset(MyDataset(train_transform), make_unlabelled=lambda x: (x[0], -1),
                                        pool_specifics={'transform': test_transform})
        dataset.label(np.arange(10))
        pool = dataset.pool
        assert np.equal([i for i in pool], [(0, -1) for i in np.arange(10, 100)]).all()
        assert np.equal([i for i in dataset], [(1, i) for i in np.arange(10)]).all()

        with pytest.raises(ValueError) as e:
            ActiveLearningDataset(MyDataset(train_transform), pool_specifics={'whatever': 123}).pool
Пример #20
0
 def test_transform(self):
     train_transform = Lambda(lambda k: 1)
     test_transform = Lambda(lambda k: 0)
     dataset = ActiveLearningDataset(MyDataset(train_transform),
                                     test_transform,
                                     make_unlabelled=lambda x: (x[0], -1))
     dataset.label(np.arange(10))
     pool = dataset.pool
     assert np.equal([i for i in pool],
                     [(0, -1) for i in np.arange(10, 100)]).all()
     assert np.equal([i for i in dataset],
                     [(1, i) for i in np.arange(10)]).all()
Пример #21
0
    def __init__(self, config, data_feature):
        # 1.初始化父类
        super().__init__(config, data_feature)

        # 2.从data_feature(TrafficStatePointDataset)中获取需要的信息
        self._scaler = self.data_feature.get('scaler')  # 归一化方法
        self.adj_mx = self.data_feature.get('adj_mx')  # 邻接矩阵
        self.num_nodes = self.data_feature.get('adj_mx')  # 点的个数
        self.feature_dim = self.data_feature.get('feature_dim')  # 输入数据的维度
        self.output_dim = self.data_feature.get('output_dim')  # 模型输出的维度

        # 3.初始化log用于必要的输出
        self._logger = getLogger()

        # 4.初始化device
        self.device = config.get('device', torch.device('cpu'))

        # 5.Build network
        self.encoder = torch.nn.RNN()
        self.decoder = torch.nn.RNN()
        self.input_obs = torch.randn()
        self.input_gt = torch.randn()
        self.encoder_inputs = Lambda(lambda x: torch.squeeze(x, dim=-1))(
            self.input_obs)
        self.encoder_outputs, state_h = self.encoder(self.encoder_inputs)
        self.unstacked = self.unstack_k(self.input_gt)
        self.initial = self.unstacked[0]  # (None, N, 1)
        self.decoder_inputs = Lambda(lambda x: torch.transpose(x, 1, 2))(
            self.initial)
        self.decoder_outputs_new, self.state_h_new = self.decoder(
            self.decoder_inputs, initial_state=state_h)
        self.state_h = self.state_h_new

        self.prediction = []
        self.decoded_results = self.decoder_outputs_new
        self.prediction.append(self.decoded_results)
        self.pred_timesteps = 3
        self.choice = Scheduled()

        if self.pred_timesteps > 1:
            for i in range(1, self.pred_timesteps):
                self.decoder_inputs = self.choice(
                    [self.prediction[-1], self.unstacked[i]])  # (None, 208, 1)
                self.decoder_inputs = Lambda(
                    lambda x: torch.transpose(x, 1, 2))(
                        self.decoder_inputs)  # (None, 1, 208)
                self.decoder_outputs_new, self.state_h_new = self.decoder(
                    self.decoder_inputs, initial_state=state_h)
                self.state_h = self.state_h_new
                self.decoded_results = self.decoder_outputs_new
                self.prediction.append(self.decoded_results)

        self.outputs = Lambda(stack)(self.prediction)
Пример #22
0
    def __init__(self,
                 root,
                 preload_train=False,
                 preload_test=False,
                 im_size=28):
        transforms = Compose([
            Resize(im_size, Image.LANCZOS),
            ToTensor(),
            Lambda(lambda x: x.unsqueeze(0)),  # used to add batch dimension
        ])
        t_transforms = Lambda(lambda x: torch.tensor(x).unsqueeze(0))
        self.cifar_train = CIFAR10(
            root=root,
            #background=True,
            train=True,
            download=True,
            transform=transforms,
            target_transform=t_transforms,
        )
        self.cifar_test = CIFAR10(
            root=root,
            #background=False,
            train=False,
            download=True,
            transform=transforms,
            target_transform=t_transforms,
        )

        # task_train contains tasks i.e. 963 sets of 20 characters
        self.tasks_train = np.arange(len(self.cifar_train._character_images))
        # chars_train contains the characters directly i.e. 964*20=19280 images-labels pairs
        self.chars_train = np.arange(
            len(self.cifar_train._flat_character_images))
        # task_test contains tasks i.e. 659 sets of 20 characters
        self.tasks_test = np.arange(len(self.cifar_test._character_images))
        # chars_train contains the characters directly i.e. 600*20=13180 images-labels pairs
        self.chars_test = np.arange(len(
            self.cifar_test._flat_character_images))

        if preload_train:
            start = time()
            print("Pre-loading CIFAR 10 train...")
            _ = [img for img in self.cifar_train]
            end = time()
            print(f"{end - start:.1f}s :CIFAR 10 train pre-loaded.")

        if preload_test:
            start = time()
            print("Pre-loading CIFAR 10 test...")
            _ = [img for img in self.cifar_test]
            end = time()
            print(f"{end - start:.1f}s : CIFAR 10 test pre-loaded.")
Пример #23
0
def get_basic_input_transform(patch_size, mean, std):
    cv2_scale = (lambda x: cv2.resize(
        x, dsize=(patch_size, patch_size), interpolation=cv2.INTER_LINEAR)
                 if x.shape[0] != patch_size else x.copy())
    np_reshape = lambda x: np.reshape(x, (patch_size, patch_size, 1))
    tforms = [
        Lambda(cv2_scale),
        Lambda(np_reshape),
        ToTensor(),
        Normalize((mean, ), (std, )),
    ]
    tforms = Compose(tforms)
    return tforms
    def create_encodings(self):
        color_table = self.create_colors()
        colors = []
        images = []
        X = []
        instances = []

        image_transform = Compose([
            Lambda(lambda x: x.detach().cpu()),
            ToPILImage(),
            Resize(45),
            Lambda(lambda x: np.asarray(x))
        ])

        print("Retrieve visualization data...")
        for idx, enc in enumerate(tqdm(self.encoding_dataset)):
            # save metadata
            instances.append({
                "rooms": enc["anchor"]["meta"]["reference"],
                "labels": enc["anchor"]["meta"]["label"],
                "instances": enc["anchor"]["meta"]["reference"] + "_" + enc["anchor"]["meta"]["label"] + "_" + str(enc["anchor"]["meta"]["instance_id"])
            })

            # load image to be visualized
            image = self.encoding_dataset.dataset[idx]["anchor"]["image"]
            image = image_transform(image)
            images.append(image)

            # load color to be visualized
            key = self.get_key(enc, idx)
            colors.append(color_table[key])

            # load encodings to be visualized. Always load encoding regardless of filter because otherwise the t-SNE and PCA would be different!!
            anchor_encodings = enc["anchor"]["encodings"]
            if self.dim == -1:
                all_encs = torch.cat(anchor_encodings, dim=1)
                X.append(all_encs.clone())
            else:
                X.append(anchor_encodings[self.dim].clone())

        assert (len(X) == len(colors))
        assert (len(colors) == len(images))
        assert (len(images) == len(instances))

        images = np.array(images)
        X = torch.cat(X)

        return X, colors, images, instances
Пример #25
0
def input_transform_augment(crop_size):
    return Compose([
        CenterCrop(crop_size),
        Scale(256),
        ToTensor(),                   # [0, 255] --> [ 0., 1.]
        Lambda(lambda x: 2 * x - 1),  # [0., 1.] --> [-1., 1.]
    ])
Пример #26
0
    def __init__(self, phase, kwargs):
        self.mode        = Mode[kwargs['mode']]
        self.image_size  = kwargs['image_size']
        self.hidden_size = kwargs['hidden_size']

        self.debug_use_dataset  = kwargs['debug_use_dataset']
        self.debug_one_sentence = kwargs['debug_one_sentence']
        self.__use_densenet     = kwargs['__use_densenet']

        self.sent_tokenizer = PunktSentenceTokenizer()
        self.word_tokenizer = TweetTokenizer()

        if phase == Phase.train:
            jitter = [ColorJitter(brightness=0.5, contrast=0.5)]
        else:
            jitter = []

        if self.__use_densenet:
            self.transform = Compose((
                [Lambda(lambda img: img.convert('RGB'))] +
                [Resize((256, 256))] +
                jitter +
                [ToTensor()] +
                [Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]
            ))
        else:
            self.transform = Compose((
                [Resize((256, 256))] +
                jitter +
                [ToTensor()]
            ))
Пример #27
0
    def test_transforms_groups_base_usage(self):
        original_dataset = MNIST('./data/mnist', download=True)
        dataset = AvalancheDataset(
            original_dataset,
            transform_groups=dict(train=(ToTensor(), None),
                                  eval=(None, Lambda(lambda t: float(t)))))

        x, y, _ = dataset[0]
        self.assertIsInstance(x, Tensor)
        self.assertIsInstance(y, int)

        dataset_test = dataset.eval()

        x2, y2, _ = dataset_test[0]
        x3, y3, _ = dataset[0]
        self.assertIsInstance(x2, Image)
        self.assertIsInstance(y2, float)
        self.assertIsInstance(x3, Tensor)
        self.assertIsInstance(y3, int)

        dataset_train = dataset.train()
        dataset.transform = None

        x4, y4, _ = dataset_train[0]
        x5, y5, _ = dataset[0]
        self.assertIsInstance(x4, Tensor)
        self.assertIsInstance(y4, int)
        self.assertIsInstance(x5, Image)
        self.assertIsInstance(y5, int)
Пример #28
0
def preprocess(images):
    ''' 
        Performs preprocessing on a batch of images (bs, h, w, c) or on a single image (h, w, c).
        It doesn't handle flickering!! (there is no flickering in breakout)
        Use grayscale instead of luminance.
    '''
    size_preprocessed_image = 84
    transformations = Compose([
        Lambda(lambda image: image.permute(2, 0, 1)),
        ToPILImage(),
        Grayscale(),
        Resize((size_preprocessed_image, size_preprocessed_image)),
        ToTensor()
    ])
    if len(images.shape) == 3:
        images = images.unsqueeze(0)
    assert len(images.shape) == 4
    batch_size = images.shape[0]
    preprocessed_images = []
    for i in range(batch_size):
        preprocessed_images.append(transformations(images[i]).squeeze(0))
    preprocessed_images = torch.stack(preprocessed_images).permute(
        1, 2, 0).squeeze()
    if len(preprocessed_images.shape) == 3:
        preprocessed_images, _ = torch.max(preprocessed_images,
                                           dim=2,
                                           keepdim=False)
    return preprocessed_images
Пример #29
0
    def test_if_applies_transforms(self, input_directory):
        transform = Lambda(lambda x: np.pad(x, (2, 2)))
        dataset = datasets.NiftiFolder.from_dir(input_directory, transform)

        for idx in range(len(dataset)):
            assert np.all(dataset[idx].shape == np.array(INPUT_IMAGE_SHAPE) +
                          4)
Пример #30
0
def valid_lr_transform(crop_size):
    return Compose([
        ToPILImage(),
        Resize(crop_size, interpolation=Image.BICUBIC),
        ToTensor(),
        Lambda(imagenet_normalise),
    ])