def __getitem__(self, index, tp=True): # return 256*256 image cls = 'A' if index < len(self.cage) else 'B' img = cv2.resize(cv2.imread(self.real_dir[index]), (256, 256)) / 255.0 img, target_img = random_warp( random_transform(img, **self.random_transform_args)) img = self.model( Variable(torch.unsqueeze(self.toTensor(img).float(), 0)), cls) img = torch.squeeze(img, 0) target_img = self.toTensor(target_img) target_img = target_img.float() return {'LQ': img, 'GT': target_img}
def get_training_data( images, batch_size ): indices = numpy.random.randint( len(images), size=batch_size ) for i,index in enumerate(indices): image = images[index] image = random_transform( image, **random_transform_args ) warped_img, target_img = random_warp( image ) if i == 0: warped_images = numpy.empty( (batch_size,) + warped_img.shape, warped_img.dtype ) target_images = numpy.empty( (batch_size,) + target_img.shape, warped_img.dtype ) warped_images[i] = warped_img target_images[i] = target_img return warped_images, target_images
def get_training_data(images, batch_size): indices = numpy.random.randint(len(images), size=batch_size) for i, index in enumerate(indices): image = images[index] image = random_transform(image, **random_transform_args) warped_img, target_img = random_warp(image) if i == 0: warped_images = numpy.empty((batch_size,) + warped_img.shape, warped_img.dtype) target_images = numpy.empty((batch_size,) + target_img.shape, warped_img.dtype) warped_images[i] = warped_img target_images[i] = target_img return warped_images, target_images
def convert_face(croped_face): resized_face = cv2.resize(croped_face, (256, 256)) normalized_face = resized_face / 255.0 #normalized_face = normalized_face.reshape(1, normalized_face.shape[0], normalized_face.shape[1], normalized_face.shape[2]) warped_img, _ = random_warp(normalized_face) batch_warped_img = np.expand_dims(warped_img, axis=0) batch_warped_img = toTensor(batch_warped_img) batch_warped_img = batch_warped_img.to(device).float() #print(batch_warped_img.shape, batch_warped_img) model = Autoencoder().to(device) checkpoint = torch.load('./checkpoint/autoencoder.t7') model.load_state_dict(checkpoint['state']) converted_face = model(batch_warped_img, 'B') return converted_face
def get_training_data(images, batch_size): indices = numpy.random.randint(len(images), size=batch_size) #返回一个随机整型数,范围从低(包括)到高(不包括 for i, index in enumerate(indices): image = images[index] image = random_transform(image, **random_transform_args) #进行图片转换,图片增强 warped_img, target_img = random_warp(image) #每一张图片都转换 if i == 0: #两个新的空数组 warped_images = numpy.empty((batch_size, ) + warped_img.shape, warped_img.dtype) target_images = numpy.empty((batch_size, ) + target_img.shape, warped_img.dtype) warped_images[i] = warped_img target_images[i] = target_img return warped_images, target_images