def optimize_latents(): print("Optimizing Latents.") synthesizer = StyleGANGenerator(args.model_type).model.synthesis latent_optimizer = LatentOptimizer(synthesizer, args.vgg_layer) # Optimize only the dlatents. for param in latent_optimizer.parameters(): param.requires_grad_(False) if args.video or args.save_optimized_image: # Hook, saves an image during optimization to be used to create video. generated_image_hook = GeneratedImageHook( latent_optimizer.post_synthesis_processing, args.save_frequency) reference_image = load_images([args.image_path]) reference_image = torch.from_numpy(reference_image).cuda() reference_image = latent_optimizer.vgg_processing(reference_image) reference_features = latent_optimizer.vgg16(reference_image).detach() reference_image = reference_image.detach() if args.use_latent_finder: image_to_latent = ImageToLatent().cuda() image_to_latent.load_state_dict(torch.load(args.image_to_latent_path)) image_to_latent.eval() latents_to_be_optimized = image_to_latent(reference_image) latents_to_be_optimized = latents_to_be_optimized.detach().cuda( ).requires_grad_(True) else: latents_to_be_optimized = torch.zeros( (1, 18, 512)).cuda().requires_grad_(True) criterion = LatentLoss() optimizer = torch.optim.SGD([latents_to_be_optimized], lr=args.learning_rate) progress_bar = tqdm(range(args.iterations)) for step in progress_bar: optimizer.zero_grad() generated_image_features = latent_optimizer(latents_to_be_optimized) loss = criterion(generated_image_features, reference_features) loss.backward() loss = loss.item() optimizer.step() progress_bar.set_description("Step: {}, Loss: {}".format(step, loss)) optimized_dlatents = latents_to_be_optimized.detach().cpu().numpy() np.save(args.dlatent_path, optimized_dlatents) if args.video: images_to_video(generated_image_hook.get_images(), args.video_path) if args.save_optimized_image: save_image(generated_image_hook.last_image, args.optimized_image_path)
def Embedding(image_path, image_name): vgg_processing = VGGProcessing() post_processing = PostSynthesisProcessing() image_to_latent = ImageToLatent().to(device) image_to_latent.load_state_dict(torch.load('./image_to_latent.pt')) image_to_latent.eval() post_process = lambda image: post_processing(image).detach().cpu().numpy( ).astype(np.uint8)[0] reference_image = torch.from_numpy( load_images([os.path.join(image_path, image_name)])).to(device) reference_image = vgg_processing(reference_image).detach() pred_dlatents = image_to_latent(reference_image) pred_images = synthesizer(pred_dlatents) pred_images = post_process(pred_images) pred_images = np.transpose(pred_images, (1, 2, 0)) plt.imsave('./outputs/' + image_name, pred_images) print('Embedding for Image {} is finished.'.format(image_name))
self.image_size = 128 self.mean = torch.tensor([0.5,0.5,0.5], device="cpu").view(-1, 1, 1) self.std = torch.tensor([0.5,0.5,0.5], device="cpu").view(-1, 1, 1) def forward(self, image): image = image / torch.tensor(255).float() #image = F.adaptive_avg_pool2d(image, self.image_size) image = (image - self.mean) / self.std #-1~1 return image length=8 normalize=normalize() step = int(math.log(128, 2)) - 2 l=np.zeros((30)) for j in range(30): y=torch.zeros((length)) x=torch.zeros((length,3,128,128)) for i in range(length): #3 for each pictrue #image_path='./sample_32/ref_'+str(i)+'.png' image_path='./real/'+str(i+10*j)+'.jpg' print(image_path) reference_image = load_images([image_path]) reference_image = torch.from_numpy(reference_image) reference_image = normalize(reference_image) #normalize reference_image = reference_image.detach() x[i]=reference_image label=discriminator(x.to(device), step=step, alpha=1) l[j]=label.mean() del(x) del(y) print(l.mean())
def optimize_latents(): print("Optimizing Latents.") generator = StyledGenerator(512).to(device) generator.load_state_dict(torch.load(args.path)['generator']) generator.eval() latent_optimizer = LatentOptimizer(generator, args.vgg_layer) mean_style = get_mean_style(generator, device) total = np.zeros((83 * 3, 512)) # Optimize only the dlatents. for param in latent_optimizer.parameters(): param.requires_grad_(False) if args.video or args.save_optimized_image: # Hook, saves an image during optimization to be used to create video. generated_image_hook = GeneratedImageHook( latent_optimizer.post_synthesis_processing, args.save_frequency) for i in range(3 * 83): #3 for each pictrue iid = i % 3 path = int(i / 3) iterations = int(200 * iid + 300) image_path = './data/' + str(path) + '.jpg' print(image_path) reference_image = load_images([image_path]) reference_image = torch.from_numpy(reference_image).to(device) reference_image = latent_optimizer.vgg_processing( reference_image) #normalize reference_features = latent_optimizer.vgg16( reference_image).detach() #vgg reference_image = reference_image.detach() if args.use_latent_finder: image_to_latent = ImageToLatent().cuda() image_to_latent.load_state_dict( torch.load(args.image_to_latent_path)) image_to_latent.eval() latents_to_be_optimized = image_to_latent(reference_image) latents_to_be_optimized = latents_to_be_optimized.detach().cuda( ).requires_grad_(True) else: latents_to_be_optimized = torch.zeros( (1, 512)).cuda().requires_grad_(True) criterion = LatentLoss() optimizer = torch.optim.SGD([latents_to_be_optimized], lr=args.learning_rate) progress_bar = tqdm(range(iterations)) for step in progress_bar: optimizer.zero_grad() generated_image_features = latent_optimizer( latents_to_be_optimized, mean_style, i) #print(latents_to_be_optimized) loss = criterion(generated_image_features, reference_features) loss.backward() loss = loss.item() optimizer.step() progress_bar.set_description("Step: {}, Loss: {}".format( step, loss)) optimized_dlatents = latents_to_be_optimized.detach().cpu().numpy() total[i] = optimized_dlatents[0] np.save(args.dlatent_path, total)
def optimize_latents(): print("Optimizing Latents.") generator = StyledGenerator(512).to(device) generator.load_state_dict(torch.load(args.path)['generator']) generator.eval() latent_optimizer = LatentOptimizer(generator, args.vgg_layer) mean_style = get_mean_style(generator, device) # Optimize only the dlatents. for param in latent_optimizer.parameters(): param.requires_grad_(False) if args.video or args.save_optimized_image: # Hook, saves an image during optimization to be used to create video. generated_image_hook = GeneratedImageHook( latent_optimizer.post_synthesis_processing, args.save_frequency) reference_image = load_images([args.image_path]) reference_image = torch.from_numpy(reference_image).to(device) reference_image = latent_optimizer.vgg_processing( reference_image) #normalize utils.save_image(reference_image, './reference.png', nrow=1, normalize=True, range=(-1, 1)) reference_features = latent_optimizer.vgg16(reference_image).detach() #vgg reference_image = reference_image.detach() if args.use_latent_finder: image_to_latent = ImageToLatent().cuda() image_to_latent.load_state_dict(torch.load(args.image_to_latent_path)) image_to_latent.eval() latents_to_be_optimized = image_to_latent(reference_image) latents_to_be_optimized = latents_to_be_optimized.detach().cuda( ).requires_grad_(True) else: latents_to_be_optimized = torch.zeros( (1, 512)).cuda().requires_grad_(True) #print(latents_to_be_optimized.mean(),latents_to_be_optimized.std()) criterion = LatentLoss() optimizer = torch.optim.RMSprop([latents_to_be_optimized], lr=args.learning_rate, weight_decay=0.02) #print(latents_to_be_optimized.mean(),latents_to_be_optimized.std()) progress_bar = tqdm(range(args.iterations)) for step in progress_bar: #print(latents_to_be_optimized) optimizer.zero_grad() generated_image_features = latent_optimizer(latents_to_be_optimized, mean_style) loss = criterion(generated_image_features, reference_features) loss.backward() loss = loss.item() optimizer.step() # if step==args.iterations: # break # with torch.no_grad(): # latents_to_be_optimized.add_(-latents_to_be_optimized.mean()+3e-2*torch.randn(1).to('cuda')) # latents_to_be_optimized.div_(latents_to_be_optimized.std()+3e-2*torch.randn(1).to('cuda')) #print(latents_to_be_optimized.mean(),latents_to_be_optimized.std()) progress_bar.set_description("Step: {}, Loss: {}".format(step, loss)) print(latents_to_be_optimized) #latents_to_be_optimized=latent_optimizer.normalize(latents_to_be_optimized) #print(latents_to_be_optimized.mean(),latents_to_be_optimized.std()) optimized_dlatents = latents_to_be_optimized.detach().cpu().numpy() np.save(args.dlatent_path, optimized_dlatents) if args.video: images_to_video(generated_image_hook.get_images(), args.video_path) if args.save_optimized_image: save_image(generated_image_hook.last_image, args.optimized_image_path)