示例#1
0
    def __getitem__(self, index):
        x_orig, classifier_target = self.dataset[index]
        x_orig = x_orig.numpy()
        classifier_target = classifier_target.numpy()

        x_tf_0 = np.copy(x_orig)
        x_tf_90 = np.rot90(x_orig.copy(), k=1).copy()
        x_tf_180 = np.rot90(x_orig.copy(), k=2).copy()
        x_tf_270 = np.rot90(x_orig.copy(), k=3).copy()

        possible_translations = list(itertools.product([0, 8, -8], [0, 8, -8]))
        num_possible_translations = len(possible_translations)
        tx, ty = possible_translations[random.randint(
            0, num_possible_translations - 1)]
        tx_target = {0: 0, 8: 1, -8: 2}[tx]
        ty_target = {0: 0, 8: 1, -8: 2}[ty]
        x_tf_trans = cv2f.affine(np.asarray(x_orig).copy(),
                                 0, (tx, ty),
                                 1,
                                 0,
                                 interpolation=cv2.INTER_CUBIC,
                                 mode=cv2.BORDER_REFLECT_101)

        return \
            normalize(trnF.to_tensor(x_tf_0)), \
            normalize(trnF.to_tensor(x_tf_90)), \
            normalize(trnF.to_tensor(x_tf_180)), \
            normalize(trnF.to_tensor(x_tf_270)), \
            normalize(trnF.to_tensor(x_tf_trans)), \
            torch.tensor(tx_target), \
            torch.tensor(ty_target), \
            torch.tensor(classifier_target)
 def __call__(self, img):
     """
         img (numpy ndarray): Image to be transformed.
     Returns:
         numpy ndarray: Affine transformed image.
     """
     ret = self.get_params(self.degrees, self.translate, self.scale,
                           self.shear, (img.shape[1], img.shape[0]))
     return F.affine(img,
                     *ret,
                     interpolation=self.interpolation,
                     fillcolor=self.fillcolor)
示例#3
0
 def __call__(self, imgs):
     """
         imgs (numpy ndarray): Image sequence (time*height*width*channel) to be transformed.
     Returns:
         numpy ndarray: Affine transformed image sequence.
     """
     ret = self.get_params(self.degrees, self.translate, self.scale,
                           self.shear, (imgs.shape[2], imgs.shape[1]))
     # Apply to all images
     output_imgs = []
     for I in imgs:
         output_imgs.append(
             F.affine(I,
                      *ret,
                      interpolation=self.interpolation,
                      fillcolor=self.fillcolor))
     return np.array(output_imgs)
示例#4
0
    def __getitem__(self, index):
        x, _ = self.dataset[index]
        pert = pert_configs[self.pert_number]

        x = np.asarray(resize_and_crop(x))

        x = cv2f.affine(np.asarray(x),
                        0, (pert[0], pert[1]),
                        1,
                        0,
                        interpolation=cv2.INTER_LINEAR,
                        mode=cv2.BORDER_REFLECT_101)
        x = np.rot90(x, pert[2])

        return trnF.to_tensor(x.copy()), [
            expanded_params[i].index(pert[i])
            for i in range(len(expanded_params))
        ]
示例#5
0
    def __getitem__(self, index):
        x, _ = self.dataset[index // num_perts]
        pert = pert_configs[index % num_perts]

        x = np.asarray(resize_and_crop(x))

        if np.random.uniform() < 0.5:
            x = x[:, ::-1]
        x = cv2f.affine(np.asarray(x), 0, (pert[0], pert[1]), 1, 0,
                        interpolation=cv2.INTER_LINEAR, mode=cv2.BORDER_REFLECT_101)

        label = [expanded_params[i].index(pert[i]) for i in range(len(expanded_params))]
        label = np.vstack((label + [0], label + [1], label + [2], label + [3]))

        x = trnF.to_tensor(x.copy()).unsqueeze(0).numpy()
        x = np.concatenate((x, np.rot90(x, 1, axes=(2, 3)),
                            np.rot90(x, 2, axes=(2, 3)), np.rot90(x, 3, axes=(2, 3))), 0)

        return torch.FloatTensor(x), label