WR.counter.update(i) batch = next(LazyLoader.w300().loader_train_inf) real_img = batch["data"].cuda() landmarks = torch.clamp(next(LazyLoader.w300_landmarks(args.data_path).loader_train_inf).cuda(), max=1) heatmap_sum = heatmapper.forward(landmarks).sum(1, keepdim=True).detach() coefs = json.load(open(os.path.join(sys.path[0], "../parameters/cycle_loss_2.json"))) fake, fake_latent = enc_dec.generate(heatmap_sum) fake_latent_pred = enc_dec.encode_latent(fake) gan_model_tuda.discriminator_train([real_img], [fake.detach()]) ( gan_model_tuda.generator_loss([real_img], [fake]) + l1_loss(fake_latent_pred, fake_latent) * coefs["style"] ).minimize_step(gan_model_tuda.optimizer.opt_min, style_opt) hm_pred = hg.forward(real_img)["hm_sum"] hm_ref = heatmapper.forward(landmarks).detach().sum(1, keepdim=True) gan_model_obratno.discriminator_train([hm_ref], [hm_pred.detach()]) gan_model_obratno.generator_loss([hm_ref], [hm_pred]).__mul__(coefs["obratno"])\ .minimize_step(gan_model_obratno.optimizer.opt_min) fake2, _ = enc_dec.generate(heatmap_sum) WR.writable("cycle", mes_loss.forward)(hg.forward(fake2)["mes"], UniformMeasure2D01(landmarks)).__mul__(coefs["hm"]) \ .minimize_step(gan_model_tuda.optimizer.opt_min, gan_model_obratno.optimizer.opt_min) latent = enc_dec.encode_latent(g_transforms(image=real_img)["image"]) restored = enc_dec.decode(hg.forward(real_img)["hm_sum"], latent) WR.writable("cycle2", psp_loss.forward)(real_img, real_img, restored, latent).__mul__(coefs["img"])\
max=1, min=0) heatmap = heatmapper.forward(landmarks).detach() except Exception as e: print(e) print("input data exception") continue # coefs = json.load(open(os.path.join(sys.path[0], "../parameters/cycle_loss.json"))) fake, fake_latent = enc_dec.generate(heatmap) fake_latent_pred = enc_dec.encode_latent(fake) gan_model_tuda.discriminator_train([real_img], [fake.detach()]) (gan_model_tuda.generator_loss([real_img], [fake]) + l1_loss(fake_latent_pred, fake_latent) * args.style).minimize_step( gan_model_tuda.optimizer.opt_min, style_opt) hm_pred = hg.forward(real_img)["hm"] hm_ref = heatmapper.forward(landmarks).detach() gan_model_obratno.discriminator_train([hm_ref], [hm_pred.detach()]) gan_model_obratno.generator_loss([hm_ref], [hm_pred]).__mul__(args.obratno)\ .minimize_step(gan_model_obratno.optimizer.opt_min) fake2, _ = enc_dec.generate(heatmap) WR.writable("cycle", mes_loss.forward)(hg.forward(fake2)["mes"], UniformMeasure2D01(landmarks)).__mul__(args.hm)\ .minimize_step(gan_model_tuda.optimizer.opt_min, gan_model_obratno.optimizer.opt_min) latent = enc_dec.encode_latent(cond_img) restored = enc_dec.decode(hg.forward(real_img)["hm"], latent) WR.writable("cycle2", psp_loss.forward)(real_img, real_img, restored, latent).__mul__(args.img)\
open(os.path.join(sys.path[0], "../parameters/cycle_loss_2.json"))) # WR.writable("sup", mes_loss.forward)(hg.forward(real_img)["mes"], UniformMeasure2D01(train_landmarks)).__mul__(coefs["sup"]) \ # .minimize_step(cont_opt) with torch.no_grad(): pred = hg.forward(real_img) hm_pred = pred["hm_sum"].detach() mes_pred = pred["mes"].detach() fake, fake_latent = enc_dec.generate(hm_pred) fake_latent_pred = enc_dec.encode_latent(fake) gan_model_tuda.discriminator_train([real_img], [fake.detach()]) (gan_model_tuda.generator_loss([real_img], [fake]) + l1_loss(fake_latent_pred, fake_latent) * coefs["style"]).minimize_step( gan_model_tuda.optimizer.opt_min, style_opt) train_content(cont_opt, R_b, R_t, real_img, hg) fake2, _ = enc_dec.generate(hm_pred) WR.writable("cycle", mes_loss.forward)(hg.forward(fake2)["mes"], mes_pred).__mul__(coefs["hm"]) \ .minimize_step(gan_model_tuda.optimizer.opt_min, cont_opt) latent = enc_dec.encode_latent(g_transforms(image=real_img)["image"]) restored = enc_dec.decode(hg.forward(real_img)["hm_sum"], latent) WR.writable("cycle2", psp_loss.forward)(real_img, real_img, restored, latent).__mul__(coefs["img"])\ .minimize_step(gan_model_tuda.optimizer.opt_min, cont_opt, style_opt) # requires_grad(discriminator_img, False) # requires_grad(enc_dec.generator, False)
batch = next(LazyLoader.w300().loader_train_inf) real_img = next(LazyLoader.w300().loader_train_inf)["data"].cuda() landmarks = torch.clamp(batch["meta"]["keypts_normalized"].cuda(), max=1).cuda() heatmap_sum = heatmapper.forward(landmarks).sum(1, keepdim=True).detach() fake, fake_latent = enc_dec.generate(heatmap_sum) fake_latent_pred = enc_dec.encode_latent(fake) real_gan_img = real_img if i % 2 == 0 else next( LazyLoader.celeba().loader).cuda() gan_model_tuda.discriminator_train([real_gan_img], [fake.detach()]) (gan_model_tuda.generator_loss([real_gan_img], [fake]) + l1_loss(fake_latent_pred, fake_latent)).minimize_step( gan_model_tuda.optimizer.opt_min, style_opt) latent = enc_dec.encode_latent(real_img) restored = enc_dec.decode(heatmap_sum, latent) WR.writable("cycle2", psp_loss.forward)(real_img, real_img, restored, latent).__mul__(20)\ .minimize_step(gan_model_tuda.optimizer.opt_min, style_opt) image_accumulator.step(i) # enc_accumulator.step(i) if i % 10000 == 0 and i > 0: torch.save( { 'gi': enc_dec.generator.state_dict(), 'di': discriminator_img.state_dict(),