示例#1
0
 def transform(self, img, hflip=False):
     if self.crop is not None:
         if isinstance(self.crop, int):
             img = tfs.CenterCrop(self.crop)(img)
         else:
             assert len(self.crop) == 4, 'Crop size must be an integer for center crop, or a list of 4 integers (y0,x0,h,w)'
             img = tfs.crop(img, *self.crop)
     img = tfs.resize(img, (self.image_size, self.image_size))
     if hflip:
         img = tfs.hflip(img)
     return tfs.to_tensor(img)
示例#2
0
    def get_latent(self, input_image):

        input_image = (input_image - 127.5) / 127.5
        input_image = np.expand_dims(input_image, axis=2)

        input_image = input_image.transpose(2, 0, 1)
        input_image = np.expand_dims(input_image, axis=0)
        input_image = input_image.astype('float32')
        input_image = transform.to_tensor(jt.array(input_image))
        mus_mouth = self.net_encoder(input_image)
        print('mus_mouth:', mus_mouth.shape)
        return mus_mouth
示例#3
0
    def __getitem__(self, index):
        img, target = self.data[index], self.targets[index]
        # doing this so that it is consistent with all other datasets
        # to return a PIL Image
        img = Image.fromarray(img)
        if self.transform is not None:
            img = self.transform(img)

        if self.target_transform is not None:
            target = self.target_transform(target)

        return trans.to_tensor(img), target
示例#4
0
 def verify_img_data(img_data, expected_output, mode):
     if mode is None:
         img = transform.ToPILImage()(img_data)
         self.assertEqual(img.mode, 'RGB')  # default should assume RGB
     else:
         img = transform.ToPILImage(mode=mode)(img_data)
         self.assertEqual(img.mode, mode)
     split = img.split()
     for i in range(3):
         self.assertTrue(
             np.allclose(expected_output[:, :, i],
                         transform.to_tensor(split[i])))
 def __getitem__(self, index):
     A_path = self.file_paths[index]
     A = Image.open(A_path)
     new_w = 512
     new_h = 512
     A = A.resize((new_w, new_h), Image.NEAREST)
     A_tensor = transform.to_tensor(A) * 255.0
     if self.part_sketch != 'bg':
         loc_p = self.part[self.part_sketch]
         A_tensor = A_tensor[0, loc_p[1]:loc_p[1] + loc_p[2],
                             loc_p[0]:loc_p[0] + loc_p[2]]
     else:
         for key_p in self.part.keys():
             if key_p != 'bg':
                 loc_p = self.part[key_p]
                 A_tensor[0, loc_p[1]:loc_p[1] + loc_p[2],
                          loc_p[0]:loc_p[0] + loc_p[2]] = 255
     A_tensor = (A_tensor - 127.5) / 127.5
     A_tensor = np.expand_dims(A_tensor, axis=0)
     A_tensor = A_tensor.astype('float32')
     A_tensor = transform.to_tensor(jt.array(A_tensor))
     return A_tensor
示例#6
0
文件: mnist.py 项目: zzmcdc/jittor
 def __getitem__(self, index):
     img = Image.fromarray(self.mnist['images'][index]).convert('RGB')
     if self.transform:
         img = self.transform(img)
     return trans.to_tensor(img), self.mnist['labels'][index]
示例#7
0
 def __call__(self, image, target):
     image = T.to_tensor(image)
     return image, target
 def __call__(self, image, target):
     return T.to_tensor(image), target
示例#9
0
 def __call__(self, img):
     from jittor.transform import to_tensor
     return to_tensor(img)