def main(args): os.makedirs(args.outputs, exist_ok=True) generator = get_derivable_generator(args.gan_model, args.inversion_type, args) loss = get_loss(args.loss_type, args) generator.cuda() loss.cuda() inversion = get_inversion(args.optimization, args) image_list = image_files(args.target_images) frameSize = MODEL_POOL[args.gan_model]['resolution'] for i, images in enumerate(split_to_batches(image_list, 1)): print('%d: Inverting %d images :' % (i + 1, 1), end='') pt_image_str = '%s\n' print(pt_image_str % tuple(images)) image_name_list = [] image_tensor_list = [] for image in images: image_name_list.append(os.path.split(image)[1]) image_tensor_list.append(_add_batch_one(load_as_tensor(image))) y_gt = _sigmoid_to_tanh(torch.cat(image_tensor_list, dim=0)).cuda() # Invert latent_estimates, history = inversion.invert(generator, y_gt, loss, batch_size=1, video=args.video) # Get Images y_estimate_list = torch.split(torch.clamp(_tanh_to_sigmoid( generator(latent_estimates)), min=0., max=1.).cpu(), 1, dim=0) # Save for img_id, image in enumerate(images): y_estimate_pil = Tensor2PIL(y_estimate_list[img_id]) y_estimate_pil.save( os.path.join(args.outputs, image_name_list[img_id])) # Create video if args.video: print('Create GAN-Inversion video.') video = cv2.VideoWriter(filename=os.path.join( args.outputs, '%s_inversion.avi' % image_name_list[img_id]), fourcc=cv2.VideoWriter_fourcc(*'MJPG'), fps=args.fps, frameSize=(frameSize, frameSize)) print('Save frames.') for i, sample in enumerate(history): image = generator(sample) image_cv2 = convert_array_to_images( image.detach().cpu().numpy())[0][:, :, ::-1] video.write(image_cv2) video.release()
def main(args): os.makedirs(args.outputs, exist_ok=True) generator = get_derivable_generator(args.gan_model, args.inversion_type, args) # 生成器 loss = get_loss(args.loss_type, args) sr_loss = SR_loss(loss, args.down, args.factor) # SR计算loss的方式 # to cuda generator.cuda() loss.cuda() inversion = get_inversion(args.optimization, args) image_list = image_files(args.target_images) frameSize = MODEL_POOL[args.gan_model]['resolution'] for i, images in enumerate(split_to_batches(image_list, 1)): print('%d: Super-resolving %d images ' % (i + 1, 1), end='') pt_image_str = '%s\n' print(pt_image_str % tuple(images)) image_name_list = [] image_tensor_list = [] for image in images: image_name_list.append(os.path.split(image)[1]) # image = _add_batch_one(load_as_tensor(image)) image = convert2target(_add_batch_one(load_as_tensor(image)), 'nearest') # 更改size使得适用于更多类型的分辨率图像 image_tensor_list.append(image) # print("add..: ", _add_batch_one(load_as_tensor(image)).size()) # torch.Size([1, 3, 64, 64]) y_gt = _sigmoid_to_tanh(torch.cat(image_tensor_list, dim=0)).cuda() # Invert latent_estimates, history = inversion.invert(generator, y_gt, sr_loss, batch_size=BATCH_SIZE, video=args.video) # Get Images # 将optimizer优化好的latent_estimates再放入generator中生成图像 # 并且将batch_size那一列的数据去除 y_estimate_list = torch.split(torch.clamp(_tanh_to_sigmoid( generator(latent_estimates)), min=0., max=1.).cpu(), 1, dim=0) # 保存结果 for img_id, image in enumerate(images): # up_nn, up_bic, down = downsample_images(image_tensor_list[img_id], factor=args.factor, mode=args.down) # y_nn_pil = Tensor2PIL(up_nn) # 低分辨率化后的图像 y_estimate_pil = Tensor2PIL(y_estimate_list[img_id]) y_estimate_pil.save( os.path.join( os.path.join(args.outputs, '%s.png' % image_name_list[img_id][:-4]))) #y_nn_pil.save(os.path.join(os.path.join(args.outputs, '%s-nn.png' % image_name_list[img_id][:-4]))) # Create video if args.video: print('Create GAN-Inversion video.') video = cv2.VideoWriter(filename=os.path.join( args.outputs, '%s_sr.avi' % image_name_list[img_id][:-4]), fourcc=cv2.VideoWriter_fourcc(*'MJPG'), fps=args.fps, frameSize=(frameSize, frameSize)) print('Save frames.') for i, sample in enumerate(history): image = generator( sample ) # 用generator从history(保存的训练中的estimate_latent的值)中生成图像 image_cv2 = convert_array_to_images( image.detach().cpu().numpy())[0][:, :, ::-1] video.write(image_cv2) video.release()
def main(args): os.makedirs(args.outputs, exist_ok=True) out_dir, exp_name = create_experiments_directory(args, args.exp_id) print(out_dir) print(exp_name) generator = get_derivable_generator(args.gan_model, args.inversion_type, args) generator.cuda() if args.target_images.endswith('.png') or args.target_images.endswith( '.jpg'): image_list = os.path.abspath(args.target_images) image_list = [image_list] else: image_list = image_files(args.target_images) frameSize = MODEL_POOL[args.gan_model]['resolution'] n_blocks = generator.n_blocks print('There are %d blocks in this generator.' % n_blocks) # 19 for pggan latent_space = generator.PGGAN_LATENT[args.layer] print('The latent space is ', latent_space) with open(args.matrix_dir, 'rb') as file_in: matrix = pkl.load(file_in) print('Load matrix successfully.') print('Matrix shape ', matrix.shape) matrix2 = thrC(matrix, args.alpha) predict, _ = post_proC(matrix2, args.n_subs, args.d_subs, args.power) print(predict) p_sum = [sum(predict == k) for k in range(1, args.cluster_numbers, 1)] p_sum = np.array(p_sum) p_sort = np.argsort(p_sum)[::-1] print(p_sum) predict_new = predict.copy() for i in range(1, args.cluster_numbers, 1): predict_new[predict == (p_sort[i - 1] + 1)] = i predict = predict_new.copy() p_sum = [sum(predict == k) for k in range(1, args.cluster_numbers, 1)] print(predict) print(p_sum) # pre_see the images gan_type, image_type = args.gan_model.split("_") print('The gan type is %s, and the image type is %s' % (gan_type, image_type)) test_image_dir = os.path.join('./bin/', gan_type, image_type) print(test_image_dir) files = os.listdir(test_image_dir) test_zs = [] for i in range(len(files)): if files[i].endswith('.pkl'): with open(os.path.join(test_image_dir, files[i]), 'rb') as file_in: test_zs.append(pkl.load(file_in)) test_zs = torch.from_numpy( np.concatenate(test_zs, axis=0).astype(np.float32)).cuda() print('Load all testing zs, shape is ', test_zs.size()) image_number = 3 sel_idx = np.random.choice(test_zs.shape[0], size=[image_number], replace=False) F = generator([test_zs[sel_idx]], which_block=args.layer, pre_model=True) features = F.detach().cpu().numpy() predict_masks = [] for i in range(1, args.cluster_numbers + 1, 1): mask = torch.from_numpy((predict == i).astype(np.float32)).cuda() predict_masks += [mask.reshape((1, -1, 1, 1))] for i, images in enumerate(split_to_batches(image_list, 1)): # input_size = generator.PGGAN_LATENT[args.layer + 1] # print("We are making ", input_size, ". ") print('%d: Inverting %d images :' % (i + 1, 1), end='') pt_image_str = '%s\n' print(pt_image_str % tuple(images)) image_name_only = images[0].split(".")[0] image_name_only = image_name_only.split("/")[-1] print(image_name_only) image_name_list = [] image_tensor_list = [] for image in images: image_name_list.append(os.path.split(image)[1]) image_tensor_list.append(_add_batch_one(load_as_tensor(image))) print("image_name_list", image_name_list) print("image_tensor_list, [", image_tensor_list[0].size(), "]") y_image = _sigmoid_to_tanh(torch.cat(image_tensor_list, dim=0)).cuda() print('image size is ', y_image.size()) z_estimate = generator.init_value(batch_size=1, which_layer=0, init=args.init_type, z_numbers=args.cluster_numbers * args.code_per_cluster) base_estimate = generator.init_value(batch_size=1, which_layer=0, init=args.init_type, z_numbers=1) if args.optimization == 'GD': z_optimizer = torch.optim.SGD(z_estimate + base_estimate, lr=args.lr) elif args.optimization == 'Adam': z_optimizer = torch.optim.Adam(z_estimate + base_estimate, lr=args.lr) else: raise NotImplemented( 'We don\'t support this type of optimization.') for iter in range(args.iterations): for estimate in z_estimate: estimate.requires_grad = True for estimate in base_estimate: estimate.requires_grad = True features = generator([ z_estimate[0].reshape( [args.cluster_numbers * args.code_per_cluster, 512, 1, 1]) ], which_block=args.layer, pre_model=True) base_feature = generator( [base_estimate[0].reshape([1, 512, 1, 1])], which_block=args.layer, pre_model=True) for t in range(args.cluster_numbers * args.code_per_cluster): if t == 0: f_mix = features[t].view(*(1, ) + latent_space) * predict_masks[int( t / args.code_per_cluster)] else: f_mix = f_mix + features[t].view( *(1, ) + latent_space) * predict_masks[int( t / args.code_per_cluster)] f_mix = f_mix + base_feature y_estimate = generator([f_mix], which_block=args.layer, post_model=True) y_raw_estimate = generator([base_feature], which_block=args.layer, post_model=True) z_optimizer.zero_grad() loss = 0.01 * torch.mean(torch.pow( y_estimate - y_image, 2.0)) + torch.mean( torch.pow(y_raw_estimate - y_image, 2.0)) loss.backward() z_optimizer.step() if iter % args.report_value == 0: print('Iter %d, layer %d, loss = %.4f.' % (iter, args.layer, float(loss.item()))) if iter % args.report_image == 0: print('Saving the images.') y_estimate_pil = Tensor2PIL( torch.clamp(_tanh_to_sigmoid(y_estimate.detach().cpu()), min=0.0, max=1.0)) y_estimate_pil.save( os.path.join( out_dir, image_name_only + "_estimate_iter%d.png" % iter)) y_estimate_pil = Tensor2PIL( torch.clamp(_tanh_to_sigmoid( y_raw_estimate.detach().cpu()), min=0.0, max=1.0)) y_estimate_pil.save( os.path.join( out_dir, image_name_only + "_raw_estimate_iter%d.png" % iter)) # add bias added output picture. # save all the codes codes = [] for code_idx in range(args.cluster_numbers * args.code_per_cluster): code_f = generator([z_estimate[0][code_idx]], which_block=args.layer + 1, pre_model=True) code_y = generator([code_f], which_block=args.layer + 1, post_model=True).detach().cpu() codes.append( torch.clamp(_tanh_to_sigmoid(code_y), min=0, max=1)) codes = torch.cat(codes, dim=0).detach().cpu() torchvision.utils.save_image( codes, os.path.join( out_dir, image_name_only + '_codes_iter%d.png' % (iter)), nrow=(args.cluster_numbers * args.code_per_cluster) // 2) if iter % args.report_model == 0: print('Save the models') save_dict = { "z": z_estimate[0].detach().cpu().numpy(), 'matrix': matrix, 'layer': args.layer, 'predict': predict } with open( os.path.join( out_dir, 'save_dict_iter_%d_layer_%d.pkl' % (iter, args.layer)), 'wb') as file_out: pkl.dump(save_dict, file_out) print('Save the models OK!')
def main(args): os.makedirs(args.outputs + '/input', exist_ok=True) os.makedirs(args.outputs + '/GT', exist_ok=True) os.makedirs(args.outputs + '/mGANoutput', exist_ok=True) with open(args.outputs + '/mGANargs.txt', 'w') as f: json.dump(args.__dict__, f, indent=2) generator = get_derivable_generator(args.gan_model, args.inversion_type, args) loss = get_loss(args.loss_type, args) mask = parsing_mask('code/mganprior/masks/' + args.mask).cuda() mask_cpu = parsing_mask('code/mganprior/masks/' + args.mask) crop_loss = masked_loss(loss, mask) generator.cuda() loss.cuda() inversion = get_inversion(args.optimization, args) image_list = image_files(args.target_images) if len(image_list) > 300: print('Limiting the image set to 300.') image_list = image_list[:300] frameSize = MODEL_POOL[args.gan_model]['resolution'] start_time = time.time() for i, images in enumerate(split_to_batches(image_list, 1)): print('%d: Processing %d images :' % (i, 1), end='') pt_image_str = '%s\n' print(pt_image_str % tuple(images)) image_name_list = [] image_tensor_list = [] for image in images: image_name_list.append(os.path.split(image)[1]) image_tensor_list.append(_add_batch_one(load_as_tensor(image))) y_gt = _sigmoid_to_tanh(torch.cat(image_tensor_list, dim=0)).cuda() # Invert if args.varmask: os.makedirs(args.outputs + '/mask', exist_ok=True) mask_cpu = get_var_mask(y_gt.shape[-2:], args.min_p, args.max_p, args.width_mean, args.width_var) mask = mask_cpu.cuda() save_image(mask, os.path.join(args.outputs + '/mask/%d%s' % (i, '.png'))) crop_loss = masked_loss(loss, mask) latent_estimates, history = inversion.invert(generator, y_gt, crop_loss, batch_size=1, video=args.video) # Get Images y_estimate_list = torch.split(torch.clamp(_tanh_to_sigmoid( generator(latent_estimates)), min=0., max=1.).cpu(), 1, dim=0) # Save for img_id, image in enumerate(images): y_RGB = Tensor2PIL(image_tensor_list[img_id]) y_RGB.save(args.outputs + '/GT/%d%s' % (i, image_name_list[img_id][-4:])) y_gt_pil = Tensor2PIL( mask_images(image_tensor_list[img_id], mask_cpu)) y_estimate_pil = Tensor2PIL(y_estimate_list[img_id]) y_estimate_pil.save( os.path.join(args.outputs + '/mGANoutput/%d%s' % (i, image_name_list[img_id][-4:]))) y_gt_pil.save( os.path.join(args.outputs + '/input/%d%s' % (i, image_name_list[img_id][-4:]))) # Create video if args.video: print('Create GAN-Inversion video.') video = cv2.VideoWriter(filename=os.path.join( args.outputs, '%s_inpainting_%s.avi' % (image_name_list[img_id][:-4], os.path.split( args.mask[:-4])[1])), fourcc=cv2.VideoWriter_fourcc(*'MJPG'), fps=args.fps, frameSize=(frameSize, frameSize)) print('Save frames.') for i, sample in enumerate(history): image = generator(sample) image_cv2 = convert_array_to_images( image.detach().cpu().numpy())[0][:, :, ::-1] video.write(image_cv2) video.release() print(f'{(time.time()-start_time)/60:.2f}', 'minutes taken in total;', f'{(time.time()-start_time)/60/len(image_list):.2f}', 'per image.')
def run(args): os.makedirs(args.outputs, exist_ok=True) # 生成输出路径文件夹,存在则跳过 # 生成器 generator = get_derivable_generator(args.gan_model, args.inversion_type, args) loss = get_loss(args.loss_type, args) # 损失函数 generator.cuda() # pytorch需要手动放入GPU进行运算 loss.cuda() inversion = get_inversion(args.optimization, args) image_list = image_files(args.target_images) # 获取输入图片路径 frameSize = MODEL_POOL[args.gan_model]['resolution'] # 获取图像分辨率 # 按照batch大小分批处理图像 for i, images in enumerate(split_to_batches(image_list, 1)): print('%d: Inverting %d images :' % (i + 1, 1), end='') # pt_image_str = '%s\n' print('%s\n' % tuple(images)) image_name_list = [] image_tensor_list = [] for image in images: image_name_list.append(os.path.split(image)[1]) image_tensor_list.append(_add_batch_one(load_as_tensor(image))) # torch.cat(tensors, dim=0, out=None) → Tensor # tensors (sequence of Tensors) – any python sequence of tensors of the same type. Non-empty tensors provided must have the same shape, except in the cat dimension. # dim (int, optional) – the dimension over which the tensors are concatenated # out (Tensor, optional) – the output tensor. y_gt = _sigmoid_to_tanh( torch.cat(image_tensor_list, dim=0)).cuda() # 在维度0上连接所有的tensor并且将值域映射到[-1, 1] # 逆映射, 生成图像tensor latent_estimates, history = inversion.invert(generator, y_gt, loss, batch_size=BATCH_SIZE, video=args.video) # 将值域从[-1,1]映射到[0,1], 使用torch.clamp()进一步保证值域在[0,1] y_estimate_list = torch.split(torch.clamp(_tanh_to_sigmoid( generator(latent_estimates)), min=0., max=1.).cpu(), 1, dim=0) # Save for img_id, image in enumerate(images): y_estimate_pil = Tensor2PIL( y_estimate_list[img_id]) # 从tensor转化为PIL image并保存 y_estimate_pil.save( os.path.join(args.outputs, image_name_list[img_id])) # Create video if args.video: print('Create GAN-Inversion video.') video = cv2.VideoWriter(filename=os.path.join( args.outputs, '%s_inversion.avi' % image_name_list[img_id]), fourcc=cv2.VideoWriter_fourcc(*'MJPG'), fps=args.fps, frameSize=(frameSize, frameSize)) print('Save frames.') for i, sample in enumerate(history): image = generator(sample) image_cv2 = convert_array_to_images( image.detach().cpu().numpy())[0][:, :, ::-1] video.write(image_cv2) video.release()
def main(args): os.makedirs(args.outputs, exist_ok=True) generator = get_derivable_generator(args.gan_model, args.inversion_type, args) loss = get_loss(args.loss_type, args) cor_loss = Color_loss(loss) generator.cuda() loss.cuda() inversion = get_inversion(args.optimization, args) image_list = image_files(args.target_images) frameSize = MODEL_POOL[args.gan_model]['resolution'] for i, images in enumerate(split_to_batches(image_list, 1)): print('%d: Processing %d images :' % (i + 1, 1), end='') pt_image_str = '%s\n' print(pt_image_str % tuple(images)) image_name_list = [] image_tensor_list = [] for image in images: image_name_list.append(os.path.split(image)[1]) image_tensor_list.append(_add_batch_one(load_as_tensor(image))) y_gt = _sigmoid_to_tanh(torch.cat(image_tensor_list, dim=0)).cuda() # Invert latent_estimates, history = inversion.invert(generator, y_gt, cor_loss, batch_size=1, video=args.video) # Get Images y_estimate_list = torch.split(torch.clamp(_tanh_to_sigmoid( generator(latent_estimates)), min=0., max=1.).cpu(), 1, dim=0) # Save for img_id, image in enumerate(images): up_gray = colorization_images(image_tensor_list[img_id]) y_gray_pil = Tensor2PIL(up_gray, mode='L') y_gray_pil.save( os.path.join(args.outputs, '%s-%s.png' % (image_name_list[img_id], 'gray'))) Y_gt = Tensor2PIL(image_tensor_list[img_id], mode='RGB').convert('YCbCr') y_estimate_pil = Tensor2PIL(y_estimate_list[img_id], mode='RGB').convert('YCbCr') _, Cb, Cr = y_estimate_pil.split() Y, _, _ = Y_gt.split() y_colorization = Image.merge('YCbCr', (Y, Cb, Cr)) y_colorization.convert('RGB').save( os.path.join( args.outputs, '%s-%d.png' % (image_name_list[img_id], math.floor(time.time())))) # Create video if args.video: print('Create GAN-Inversion video.') video = cv2.VideoWriter(filename=os.path.join( args.outputs, '%s_inversion.avi' % image_name_list[img_id]), fourcc=cv2.VideoWriter_fourcc(*'MJPG'), fps=args.fps, frameSize=(frameSize, frameSize)) print('Save frames.') for i, sample in enumerate(history): image = torch.clamp(_tanh_to_sigmoid(generator(sample)), min=0., max=1.).cpu() image_pil = Tensor2PIL(image, mode='RGB').convert('YCbCr') _, Cb, Cr = image_pil.split() y_colorization = Image.merge('YCbCr', (Y, Cb, Cr)).convert('RGB') image_cv2 = cv2.cvtColor(np.asarray(y_colorization), cv2.COLOR_RGB2BGR) # image_cv2 = cv2.cvtColor(np.asarray(image_pil.convert('RGB')), cv2.COLOR_RGB2BGR) video.write(image_cv2) video.release()
def main(args): generator = get_derivable_generator(args.gan_model, args.inversion_type, args) loss = get_loss(args.loss_type, args) loss.cuda() generator.cuda() inversion = get_inversion(args.optimization, args) os.makedirs(args.outputs, exist_ok=True) save_manipulate_dir = os.path.join(args.outputs, args.attribute_name) images_list = image_files(args.target_images) for i, images in enumerate(images_list): print('%d: Processing images ' % (i + 1), end='') image_name = os.path.split(images)[1] print(image_name) image_name_list = [] image_tensor_list = [] image_name_list.append(os.path.split(images)[1]) image_tensor_list.append(_add_batch_one(load_as_tensor(images))) y_gt = _sigmoid_to_tanh(torch.cat(image_tensor_list, dim=0)).cuda() # Invert latent_estimates, history = inversion.invert(generator, y_gt, loss, batch_size=1) image_manipulate_dir = os.path.join(save_manipulate_dir, image_name[:-4]) os.makedirs(image_manipulate_dir, exist_ok=True) wp = latent_estimates[0].cpu().detach().numpy() mask = latent_estimates[1].cpu().detach().numpy() # Visualize results with given w+ latent vector. if args.original: print('Save inversion.') image = generator(latent_estimates) image_cv2 = convert_array_to_images(image.detach().cpu().numpy()) cv2.imwrite( os.path.join(image_manipulate_dir, 'original_inversion.png'), image_cv2[0][:, :, ::-1]) boundary, bias = get_boundary( os.path.join( BOUNDARY_DIR, 'pggan_celebahq_%s_boundary.npy' % args.attribute_name)) wp_list = get_interpolated_wp(wp, boundary, max_step=args.max_step, num_frames=args.fps * args.duration) # Create video for attribute manipulation with given w+ latent_vector. if args.video: print('Create attribute manipulation video.') video = cv2.VideoWriter(filename=os.path.join( image_manipulate_dir, '%s_manipulate.avi' % args.attribute_name), fourcc=cv2.VideoWriter_fourcc(*'MJPG'), fps=args.fps, frameSize=(1024, 1024)) print('Save frames.') for i, sample in enumerate(wp_list): image = generator([ torch.from_numpy(sample).view((1, ) + sample.shape).cuda(), torch.from_numpy(mask).cuda() ]) image_cv2 = convert_array_to_images( image.detach().cpu().numpy())[0][:, :, ::-1] cv2.imwrite(os.path.join(image_manipulate_dir, '%d.png' % i), image_cv2) video.write(image_cv2) video.release()
def main(args): os.makedirs(args.outputs+'/input', exist_ok=True) os.makedirs(args.outputs+'/GT', exist_ok=True) os.makedirs(args.outputs+'/mGANoutput', exist_ok=True) with open(args.outputs+'/mGANargs.txt', 'w') as f: json.dump(args.__dict__, f, indent=2) generator = get_derivable_generator(args.gan_model, args.inversion_type, args) loss = get_loss(args.loss_type, args) cor_loss = Color_loss(loss) generator.cuda() loss.cuda() inversion = get_inversion(args.optimization, args) image_list = image_files(args.target_images) if len(image_list)>300: print('Limiting the image set to 300.') image_list = image_list[:300] frameSize = MODEL_POOL[args.gan_model]['resolution'] start_time = time.time() for i, images in enumerate(split_to_batches(image_list, 1)): print('%d: Processing %d images :' % (i, 1), end='') pt_image_str = '%s\n' print(pt_image_str % tuple(images)) image_name_list = [] image_tensor_list = [] for image in images: image_name_list.append(os.path.split(image)[1]) image_tensor_list.append(_add_batch_one(load_as_tensor(image))) y_gt = _sigmoid_to_tanh(torch.cat(image_tensor_list, dim=0)).cuda() # Invert latent_estimates, history = inversion.invert(generator, y_gt, cor_loss, batch_size=1, video=args.video) # Get Images y_estimate_list = torch.split(torch.clamp(_tanh_to_sigmoid(generator(latent_estimates)), min=0., max=1.).cpu(), 1, dim=0) # Save for img_id, image in enumerate(images): up_gray = colorization_images(image_tensor_list[img_id]) y_gray_pil = Tensor2PIL(up_gray, mode='L') y_gray_pil.save(args.outputs + '/input/%d%s' % (i, image_name_list[img_id][-4:])) y_RGB = Tensor2PIL(image_tensor_list[img_id]) y_RGB.save(args.outputs + '/GT/%d%s' % (i, image_name_list[img_id][-4:])) Y_gt = Tensor2PIL(image_tensor_list[img_id], mode='RGB').convert('YCbCr') y_estimate_pil = Tensor2PIL(y_estimate_list[img_id], mode='RGB').convert('YCbCr') _, Cb, Cr = y_estimate_pil.split() Y, _, _ = Y_gt.split() y_colorization = Image.merge('YCbCr', (Y, Cb, Cr)) y_colorization.convert('RGB').save(args.outputs + '/mGANoutput/%d%s' % (i, image_name_list[img_id][-4:])) # Create video if args.video: print('Create GAN-Inversion video.') video = cv2.VideoWriter( filename=os.path.join(args.outputs, '%s_inversion.avi' % image_name_list[img_id]), fourcc=cv2.VideoWriter_fourcc(*'MJPG'), fps=args.fps, frameSize=(frameSize, frameSize)) print('Save frames.') for i, sample in enumerate(history): image = torch.clamp(_tanh_to_sigmoid(generator(sample)), min=0., max=1.).cpu() image_pil = Tensor2PIL(image, mode='RGB').convert('YCbCr') _, Cb, Cr = image_pil.split() y_colorization = Image.merge('YCbCr', (Y, Cb, Cr)).convert('RGB') image_cv2 = cv2.cvtColor(np.asarray(y_colorization), cv2.COLOR_RGB2BGR) # image_cv2 = cv2.cvtColor(np.asarray(image_pil.convert('RGB')), cv2.COLOR_RGB2BGR) video.write(image_cv2) video.release() print(f'{(time.time()-start_time)/60:.2f}','minutes taken in total;', f'{(time.time()-start_time)/60/len(image_list):.2f}', 'per image.')