def test(args): transform = transforms.Compose( [transforms.Resize((args.crop_height,args.crop_width)), transforms.ToTensor(), # transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) dataset_dirs = utils.get_testdata_link(args.dataset_dir) # a_test_data = dsets.ImageFolder(dataset_dirs['testA'], transform=transform) # b_test_data = dsets.ImageFolder(dataset_dirs['testB'], transform=transform) a_test_data = ListDataSet('/media/l/新加卷1/city/data/river/train_256_9w.lst', transform=transform) b_test_data = ListDataSet('/media/l/新加卷/city/jinan_z3.lst', transform=transform) a_test_loader = torch.utils.data.DataLoader(a_test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) b_test_loader = torch.utils.data.DataLoader(b_test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) Gab = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG='resnet_9blocks', norm=args.norm, use_dropout= not args.no_dropout, gpu_ids=args.gpu_ids) Gba = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG='resnet_9blocks', norm=args.norm, use_dropout= not args.no_dropout, gpu_ids=args.gpu_ids) utils.print_networks([Gab, Gba], ['Gab', 'Gba']) try: ckpt = utils.load_checkpoint('/media/l/新加卷/city/project/cycleGAN-PyTorch/checkpoints/horse2zebra/latest.ckpt') Gab.load_state_dict(ckpt['Gab']) Gba.load_state_dict(ckpt['Gba']) except: print(' [*] No checkpoint!') res = [] for i in range(3): """ run """ a_real_test = Variable(iter(a_test_loader).next()[0], requires_grad=True) b_real_test = Variable(iter(b_test_loader).next()[0], requires_grad=True) a_real_test, b_real_test = utils.cuda([a_real_test, b_real_test]) Gab.eval() Gba.eval() with torch.no_grad(): a_fake_test = Gab(b_real_test) b_fake_test = Gba(a_real_test) a_recon_test = Gab(b_fake_test) b_recon_test = Gba(a_fake_test) res.append(a_real_test) res.append(b_fake_test) res.append(b_real_test) res.append(a_fake_test) pic = (torch.cat(res, dim=0).data + 1) / 2.0 if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) torchvision.utils.save_image(pic, args.results_dir + '/sample.png', nrow=2)
def test(args): transform = transforms.Compose([ transforms.Resize((args.crop_height, args.crop_width)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) dataset_dirs = utils.get_testdata_link(args.dataset_dir) a_test_data = dsets.ImageFolder(dataset_dirs['testA'], transform=transform) b_test_data = dsets.ImageFolder(dataset_dirs['testB'], transform=transform) a_test_loader = DataLoader(a_test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) b_test_loader = DataLoader(b_test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) Gab = define_Gen(input_nc=3, output_nc=3, netG=args.gen_net, gpu_ids=args.gpu_ids) Gba = define_Gen(input_nc=3, output_nc=3, netG=args.gen_net, gpu_ids=args.gpu_ids) ckpt = utils.load_checkpoint('%s/latest.ckpt' % (args.checkpoint_dir)) Gab.load_state_dict(ckpt['Gab']) Gba.load_state_dict(ckpt['Gba']) a_real_test = iter(a_test_loader).next()[0] b_real_test = iter(b_test_loader).next()[0] a_real_test, b_real_test = utils.cuda([a_real_test, b_real_test]) Gab.eval() Gba.eval() with torch.no_grad(): a_fake_test = Gab(b_real_test) b_fake_test = Gba(a_real_test) a_recon_test = Gab(b_fake_test) b_recon_test = Gba(a_fake_test) pic = (torch.cat([ a_real_test, b_fake_test, a_recon_test, b_real_test, a_fake_test, b_recon_test ], dim=0).data + 1) / 2.0 if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) torchvision.utils.save_image(pic, args.results_dir + '/sample.jpg', nrow=3)
def test(args, epoch): transform = transforms.Compose( [transforms.Resize((args.crop_height, args.crop_width)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]) dataset_dirs = utils.get_testdata_link(args.dataset_dir) a_test_data = dsets.ImageFolder(dataset_dirs['testA'], transform=transform) b_test_data = dsets.ImageFolder(dataset_dirs['testB'], transform=transform) a_test_loader = torch.utils.data.DataLoader(a_test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) b_test_loader = torch.utils.data.DataLoader(b_test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) Gab = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG=args.gen_net, norm=args.norm, use_dropout= args.use_dropout, gpu_ids=args.gpu_ids, self_attn=args.self_attn, spectral = args.spectral) Gba = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG=args.gen_net, norm=args.norm, use_dropout= args.use_dropout, gpu_ids=args.gpu_ids, self_attn=args.self_attn, spectral = args.spectral) utils.print_networks([Gab,Gba], ['Gab','Gba']) ckpt = utils.load_checkpoint('%s/latest.ckpt' % (args.checkpoint_path)) Gab.load_state_dict(ckpt['Gab']) Gba.load_state_dict(ckpt['Gba']) """ run """ a_real_test = Variable(iter(a_test_loader).next()[0], requires_grad=True) b_real_test = Variable(iter(b_test_loader).next()[0], requires_grad=True) a_real_test, b_real_test = utils.cuda([a_real_test, b_real_test]) Gab.eval() Gba.eval() with torch.no_grad(): a_fake_test = Gab(b_real_test) b_fake_test = Gba(a_real_test) a_recon_test = Gab(b_fake_test) b_recon_test = Gba(a_fake_test) # Calculate ssim loss gray = kornia.color.RgbToGrayscale() m = kornia.losses.SSIM(11, 'mean') ba_ssim = m(gray((a_real_test + 1) / 2.0), gray((b_fake_test + 1) / 2.0)) ab_ssim = m(gray((b_real_test + 1) / 2.0), gray((a_fake_test + 1) / 2.0)) pic = (torch.cat([a_real_test, b_fake_test, a_recon_test, b_real_test, a_fake_test, b_recon_test], dim=0).data + 1) / 2.0 if not os.path.isdir(args.results_path): os.makedirs(args.results_path) torchvision.utils.save_image(pic, args.results_path+'/sample_' + str(epoch) + '_' + str(1 - 2*round(ba_ssim.item(), 4)) + '_' + str(1 - 2*round(ab_ssim.item(), 4)) + '.jpg', nrow=args.batch_size)
def test(args): transform = transforms.Compose( [transforms.Resize((args.crop_height, args.crop_width)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]) dataset_dirs = utils.get_testdata_link(args.dataset_dir) a_test_data = dsets.ImageFolder(dataset_dirs['testA'], transform=transform) b_test_data = dsets.ImageFolder(dataset_dirs['testB'], transform=transform) a_test_loader = torch.utils.data.DataLoader(a_test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) b_test_loader = torch.utils.data.DataLoader(b_test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) Gab = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG='resnet_9blocks', norm=args.norm, use_dropout=not args.no_dropout, gpu_ids=args.gpu_ids) Gba = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG='resnet_9blocks', norm=args.norm, use_dropout=not args.no_dropout, gpu_ids=args.gpu_ids) utils.print_networks([Gab, Gba], ['Gab', 'Gba']) try: # ckpt = utils.load_checkpoint('%s/latest.ckpt' % (args.checkpoint_dir)) ckpt = utils.load_checkpoint('%s' % args.checkpoint) Gab.load_state_dict(ckpt['Gab']) Gba.load_state_dict(ckpt['Gba']) except: print(' [*] No checkpoint!') """ run """ a_real_test = Variable(iter(a_test_loader).next()[0], requires_grad=True) b_real_test = Variable(iter(b_test_loader).next()[0], requires_grad=True) a_real_test, b_real_test = utils.cuda([a_real_test, b_real_test]) Gab.eval() Gba.eval() with torch.no_grad(): a_fake_test = Gab(b_real_test) b_fake_test = Gba(a_real_test) a_recon_test = Gab(b_fake_test) b_recon_test = Gba(a_fake_test) pic = (torch.cat([a_real_test, b_fake_test, a_recon_test, b_real_test, a_fake_test, b_recon_test], dim=0).data + 1) / 2.0 if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) torchvision.utils.save_image(pic, args.results_dir + '/sample.jpg', nrow=3)
def test(args): utils.cuda_devices([args.gpu_id]) transform = transforms.Compose( [transforms.Resize((args.img_height,args.img_width)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]) dataset_dirs = utils.get_testdata_link(args.dataset_dir) a_test_data = dsets.ImageFolder(dataset_dirs['testA'], transform=transform) b_test_data = dsets.ImageFolder(dataset_dirs['testB'], transform=transform) a_test_loader = torch.utils.data.DataLoader(a_test_data, batch_size=args.batch_size, shuffle=False, num_workers=4) b_test_loader = torch.utils.data.DataLoader(b_test_data, batch_size=args.batch_size, shuffle=False, num_workers=4) Gab = model.Generator() Gba = model.Generator() utils.cuda([Gab, Gba]) try: ckpt = utils.load_checkpoint('%s/latest.ckpt' % (args.checkpoint_dir)) Gab.load_state_dict(ckpt['Gab']) Gba.load_state_dict(ckpt['Gba']) except: print(' [*] No checkpoint!') """ run """ a_real_test = Variable(iter(a_test_loader).next()[0], requires_grad=True) b_real_test = Variable(iter(b_test_loader).next()[0], requires_grad=True) a_real_test, b_real_test = utils.cuda([a_real_test, b_real_test]) Gab.eval() Gba.eval() with torch.no_grad(): a_fake_test = Gab(b_real_test) b_fake_test = Gba(a_real_test) a_recon_test = Gab(b_fake_test) b_recon_test = Gba(a_fake_test) pic = (torch.cat([a_real_test, b_fake_test, a_recon_test, b_real_test, a_fake_test, b_recon_test], dim=0).data + 1) / 2.0 if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) torchvision.utils.save_image(pic, args.results_dir+'/sample.jpg', nrow=3)
def test(args): transform = transforms.Compose([ transforms.Resize((args.crop_height, args.crop_width)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) dataset_dirs = utils.get_testdata_link(args.dataset_dir) a_test_data = datasets.ImageFolder(dataset_dirs['testA'], transform=transform) b_test_data = datasets.ImageFolder(dataset_dirs['testB'], transform=transform) a_test_loader = torch.utils.data.DataLoader(a_test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) b_test_loader = torch.utils.data.DataLoader(b_test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) G_AtoB = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, norm=args.norm, use_dropout=not args.no_dropout, gpu_ids=args.gpu_ids) G_BtoA = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, norm=args.norm, use_dropout=not args.no_dropout, gpu_ids=args.gpu_ids) utils.print_networks([G_AtoB, G_BtoA], ['G_AtoB', 'G_BtoA']) try: ckpt = utils.load_checkpoint('%s/latest.ckpt' % (args.checkpoint_dir)) G_AtoB.load_state_dict(ckpt['G_AtoB']) G_BtoA.load_state_dict(ckpt['G_BtoA']) except: print(' [*] No checkpoint! ') for i, (a_real_test, b_real_test) in enumerate(zip(a_test_loader, b_test_loader)): a_real_test = Variable(a_real_test[0], requires_grad=True) b_real_test = Variable(b_real_test[0], requires_grad=True) a_real_test, b_real_test = utils.cuda([a_real_test, b_real_test]) G_AtoB.eval() G_BtoA.eval() with torch.no_grad(): a_fake_test = G_BtoA(b_real_test) b_fake_test = G_AtoB(a_real_test) a_recon_test = G_BtoA(b_fake_test) b_recon_test = G_AtoB(a_fake_test) pic = (torch.cat([ a_real_test, b_fake_test, a_recon_test, b_real_test, a_fake_test, b_recon_test ], dim=0).data + 1) / 2.0 if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) torchvision.utils.save_image(pic, args.results_dir + '/sample_' + str(i) + '.jpg', nrow=3)
checkpoint_dir = './checkpoints/sketch2pokemon' checkpoint_name = 'latest' results_dir = './results' batch_size = 1 ngf = 64 norm = 'instance' no_dropout = False gpu_ids = [0] transform = transforms.Compose([ transforms.Resize((crop_height, crop_width)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) dataset_dirs = utils.get_testdata_link(dataset_dir) a_test_data = dsets.ImageFolder(dataset_dirs['testA'], transform=transform) b_test_data = dsets.ImageFolder(dataset_dirs['testB'], transform=transform) a_test_loader = torch.utils.data.DataLoader( a_test_data, batch_size=batch_size, shuffle=False, num_workers=4) # set shuffle to false for testing b_test_loader = torch.utils.data.DataLoader(b_test_data, batch_size=batch_size, shuffle=False, num_workers=4) Gab = define_Gen(input_nc=3, output_nc=3, ngf=ngf,
def test(args): transform = transforms.Compose([ transforms.Resize((args.crop_height, args.crop_width)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) dataset_dirs = utils.get_testdata_link(args.dataset_dir) a_test_data = dsets.ImageFolder(dataset_dirs['testA'], transform=transform) b_test_data = dsets.ImageFolder(dataset_dirs['testB'], transform=transform) a_test_loader = torch.utils.data.DataLoader(a_test_data, batch_size=args.batch_size, num_workers=4) b_test_loader = torch.utils.data.DataLoader(b_test_data, batch_size=args.batch_size, num_workers=4) Gab = Generator(input_nc=3, output_nc=3, ngf=args.ngf, netG='resnet_9blocks', norm=args.norm, use_dropout=not args.no_dropout, gpu_ids=args.gpu_ids) Gba = Generator(input_nc=3, output_nc=3, ngf=args.ngf, netG='resnet_9blocks', norm=args.norm, use_dropout=not args.no_dropout, gpu_ids=args.gpu_ids) utils.print_networks([Gab, Gba], ['Gab', 'Gba']) try: ckpt = utils.load_checkpoint('%s/latest.ckpt' % (args.checkpoint_dir)) Gab.load_state_dict(ckpt['Gab']) Gba.load_state_dict(ckpt['Gba']) except: print(' [*] No checkpoint!') """ run """ a_real_test = Variable(iter(a_test_loader).next()[0], requires_grad=True) b_real_test = Variable(iter(b_test_loader).next()[0], requires_grad=True) a_real_test, b_real_test = utils.cuda([a_real_test, b_real_test]) Gab.eval() Gba.eval() with torch.no_grad(): a_fake_test = Gab(b_real_test) b_fake_test = Gba(a_real_test) a_recon_test = Gab(b_fake_test) b_recon_test = Gba(a_fake_test) pic = (torch.cat([ a_real_test, b_fake_test, a_recon_test, b_real_test, a_fake_test, b_recon_test ], dim=0).data + 1) / 2.0 if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) torchvision.utils.save_image(pic, args.results_dir + '/sample.jpg', nrow=3) #create output dirs if they don't exist if not os.path.exists(args.results_dir + '/inputA'): os.makedirs(args.results_dir + '/inputA') if not os.path.exists(args.results_dir + '/outputA'): os.makedirs(args.results_dir + '/outputA') if not os.path.exists(args.results_dir + './outputB'): os.makedirs(args.results_dir + '/outputB') for i, (a_real_test, b_real_test) in enumerate(zip(a_test_loader, b_test_loader)): #set model input a_real_test = Variable(a_real_test[0], requires_grad=True) b_real_test = Variable(b_real_test[0], requires_grad=True) a_real_test, b_real_test = utils.cuda([a_real_test, b_real_test]) Gab.eval() Gba.eval() with torch.no_grad(): a_fake_test = Gab(b_real_test) b_fake_test = Gba(a_real_test) a_recon_test = Gab(b_fake_test) b_recon_test = Gba(a_fake_test) a_fake_test = (a_fake_test + 1) / 2.0 b_real_test = (b_real_test + 1) / 2.0 a_real_test = (a_real_test + 1) / 2.0 b_fake_test = (b_fake_test + 1) / 2.0 # Save image files torchvision.utils.save_image( b_real_test, args.results_dir + '/inputA/%04d.png' % (i + 1)) torchvision.utils.save_image( a_fake_test, args.results_dir + '/outputA/%04d.png' % (i + 1)) print("\n\nCreated Output Directories\n\n")
def train(self, args): # Test input transform_test = transforms.Compose([ transforms.Resize((args.crop_height, args.crop_width)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) dataset_dirs = utils.get_testdata_link(args.dataset_dir) a_test_data = dsets.ImageFolder(dataset_dirs['testA'], transform=transform_test) b_test_data = dsets.ImageFolder(dataset_dirs['testB'], transform=transform_test) a_test_loader = torch.utils.data.DataLoader(a_test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) b_test_loader = torch.utils.data.DataLoader(b_test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) # For transforming the input image transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Resize((args.load_height, args.load_width)), transforms.RandomCrop((args.crop_height, args.crop_width)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) dataset_dirs = utils.get_traindata_link(args.dataset_dir) # Pytorch dataloader a_loader = torch.utils.data.DataLoader(dsets.ImageFolder( dataset_dirs['trainA'], transform=transform), batch_size=args.batch_size, shuffle=True, num_workers=4) b_loader = torch.utils.data.DataLoader(dsets.ImageFolder( dataset_dirs['trainB'], transform=transform), batch_size=args.batch_size, shuffle=True, num_workers=4) a_fake_sample = utils.Sample_from_Pool() b_fake_sample = utils.Sample_from_Pool() for epoch in range(self.start_epoch, args.epochs): lr = self.g_optimizer.param_groups[0]['lr'] print('learning rate = %.7f' % lr) for i, (a_real, b_real) in enumerate(zip(a_loader, b_loader)): # step step = epoch * min(len(a_loader), len(b_loader)) + i + 1 # Generator Computations ################################################## set_grad([self.Da, self.Db], False) self.g_optimizer.zero_grad() a_real = Variable(a_real[0]) b_real = Variable(b_real[0]) a_real, b_real = utils.cuda([a_real, b_real]) # Forward pass through generators ################################################## a_fake = self.Gab(b_real) b_fake = self.Gba(a_real) a_recon = self.Gab(b_fake) b_recon = self.Gba(a_fake) a_idt = self.Gab(a_real) b_idt = self.Gba(b_real) # Identity losses ################################################### a_idt_loss = self.L1(a_idt, a_real) * args.lamda * args.idt_coef b_idt_loss = self.L1(b_idt, b_real) * args.lamda * args.idt_coef # Adversarial losses ################################################### a_fake_dis = self.Da(a_fake) b_fake_dis = self.Db(b_fake) real_label = utils.cuda(Variable(torch.ones( a_fake_dis.size()))) a_gen_loss = self.MSE(a_fake_dis, real_label) b_gen_loss = self.MSE(b_fake_dis, real_label) # Cycle consistency losses ################################################### a_cycle_loss = self.L1(a_recon, a_real) * args.lamda b_cycle_loss = self.L1(b_recon, b_real) * args.lamda # Total generators losses ################################################### gen_loss = a_gen_loss + b_gen_loss + a_cycle_loss + b_cycle_loss + a_idt_loss + b_idt_loss # Update generators ################################################### gen_loss.backward() self.g_optimizer.step() # Discriminator Computations ################################################# set_grad([self.Da, self.Db], True) self.d_optimizer.zero_grad() # Sample from history of generated images ################################################# a_fake = Variable( torch.Tensor( a_fake_sample([a_fake.cpu().data.numpy()])[0])) b_fake = Variable( torch.Tensor( b_fake_sample([b_fake.cpu().data.numpy()])[0])) a_fake, b_fake = utils.cuda([a_fake, b_fake]) # Forward pass through discriminators ################################################# a_real_dis = self.Da(a_real) a_fake_dis = self.Da(a_fake) b_real_dis = self.Db(b_real) b_fake_dis = self.Db(b_fake) real_label = utils.cuda(Variable(torch.ones( a_real_dis.size()))) fake_label = utils.cuda( Variable(torch.zeros(a_fake_dis.size()))) # Discriminator losses ################################################## a_dis_real_loss = self.MSE(a_real_dis, real_label) a_dis_fake_loss = self.MSE(a_fake_dis, fake_label) b_dis_real_loss = self.MSE(b_real_dis, real_label) b_dis_fake_loss = self.MSE(b_fake_dis, fake_label) # Total discriminators losses a_dis_loss = (a_dis_real_loss + a_dis_fake_loss) * 0.5 b_dis_loss = (b_dis_real_loss + b_dis_fake_loss) * 0.5 # Update discriminators ################################################## a_dis_loss.backward() b_dis_loss.backward() self.d_optimizer.step() print( "Epoch: (%3d) (%5d/%5d) | Gen Loss:%.2e | Dis Loss:%.2e" % (epoch, i + 1, min(len(a_loader), len(b_loader)), gen_loss, a_dis_loss + b_dis_loss)) # Override the latest checkpoint ####################################################### utils.save_checkpoint( { 'epoch': epoch + 1, 'Da': self.Da.state_dict(), 'Db': self.Db.state_dict(), 'Gab': self.Gab.state_dict(), 'Gba': self.Gba.state_dict(), 'd_optimizer': self.d_optimizer.state_dict(), 'g_optimizer': self.g_optimizer.state_dict() }, '%s/latest.ckpt' % (args.checkpoint_dir)) # Save image current : ####################################################################### """ run """ a_real_test = Variable(iter(a_test_loader).next()[0], requires_grad=True) b_real_test = Variable(iter(b_test_loader).next()[0], requires_grad=True) a_real_test, b_real_test = utils.cuda([a_real_test, b_real_test]) self.Gab.eval() self.Gba.eval() with torch.no_grad(): a_fake_test = self.Gab(b_real_test) b_fake_test = self.Gba(a_real_test) a_recon_test = self.Gab(b_fake_test) b_recon_test = self.Gba(a_fake_test) pic = (torch.cat([ a_real_test, b_fake_test, a_recon_test, b_real_test, a_fake_test, b_recon_test ], dim=0).data + 1) / 2.0 if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) torchvision.utils.save_image(pic, args.results_dir + '/sample_{}.jpg'.format(epoch), nrow=3) self.Gab.train() self.Gba.train() # Update learning rates ######################## self.g_lr_scheduler.step() self.d_lr_scheduler.step()
def train(self, args): # For transforming the input image transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Resize((args.load_height, args.load_width)), transforms.RandomCrop((args.crop_height, args.crop_width)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) test_transform = transforms.Compose([ transforms.Resize((args.test_crop_height, args.test_crop_width)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) dataset_dirs = utils.get_traindata_link(args.dataset_dir) testset_dirs = utils.get_testdata_link(args.dataset_dir) # Pytorch dataloader a_loader = torch.utils.data.DataLoader(dsets.ImageFolder( dataset_dirs['trainA'], transform=transform), batch_size=args.batch_size, shuffle=True, num_workers=4) b_loader = torch.utils.data.DataLoader(dsets.ImageFolder( dataset_dirs['trainB'], transform=transform), batch_size=args.batch_size, shuffle=True, num_workers=4) a_test_loader = torch.utils.data.DataLoader(dsets.ImageFolder( testset_dirs['testA'], transform=test_transform), batch_size=1, shuffle=False, num_workers=4) b_test_loader = torch.utils.data.DataLoader(dsets.ImageFolder( testset_dirs['testB'], transform=test_transform), batch_size=1, shuffle=False, num_workers=4) a_fake_sample = utils.Sample_from_Pool() b_fake_sample = utils.Sample_from_Pool() for epoch in range(self.start_epoch, args.epochs): if epoch >= 1: print('generating test result...') self.save_sample_image(args.test_length, a_test_loader, b_test_loader, args.results_dir, epoch) lr = self.g_optimizer.param_groups[0]['lr'] print('learning rate = %.7f' % lr) running_Gen_loss = 0 running_Dis_loss = 0 ################################################## # BEGIN TRAINING FOR ONE EPOCH ################################################## for i, (a_real, b_real) in enumerate(zip(a_loader, b_loader)): # step step = epoch * min(len(a_loader), len(b_loader)) + i + 1 ################################################## # Part 1: Generator Computations ################################################## set_grad([self.Da, self.Db], False) self.g_optimizer.zero_grad() a_real = Variable(a_real[0]) b_real = Variable(b_real[0]) a_real, b_real = utils.cuda([a_real, b_real]) # Forward pass through generators ################################################## a_fake = self.Gab(b_real) b_fake = self.Gba(a_real) a_recon = self.Gab(b_fake) b_recon = self.Gba(a_fake) a_idt = self.Gab(a_real) b_idt = self.Gba(b_real) # Identity losses ################################################### a_idt_loss = self.identity_criteron( a_idt, a_real) * args.lamda * args.idt_coef b_idt_loss = self.identity_criteron( b_idt, b_real) * args.lamda * args.idt_coef # a_idt_loss = 0 # b_idt_loss = 0 # Adversarial losses ################################################### a_fake_dis = self.Da(a_fake) b_fake_dis = self.Db(b_fake) real_label = utils.cuda(Variable(torch.ones( a_fake_dis.size()))) a_gen_loss = self.adversarial_criteron(a_fake_dis, real_label) b_gen_loss = self.adversarial_criteron(b_fake_dis, real_label) # Cycle consistency losses ################################################### a_cycle_loss = self.cycle_consistency_criteron( a_recon, a_real) * args.lamda b_cycle_loss = self.cycle_consistency_criteron( b_recon, b_real) * args.lamda # Total generators losses ################################################### gen_loss = a_gen_loss + b_gen_loss + a_cycle_loss + b_cycle_loss + a_idt_loss + b_idt_loss # Update generators ################################################### gen_loss.backward() self.g_optimizer.step() ################################################## # Part 2: Discriminator Computations ################################################# set_grad([self.Da, self.Db], True) self.d_optimizer.zero_grad() # Sample from history of generated images ################################################# a_fake = Variable( torch.Tensor( a_fake_sample([a_fake.cpu().data.numpy()])[0])) b_fake = Variable( torch.Tensor( b_fake_sample([b_fake.cpu().data.numpy()])[0])) a_fake, b_fake = utils.cuda([a_fake, b_fake]) # Forward pass through discriminators ################################################# a_real_dis = self.Da(a_real) a_fake_dis = self.Da(a_fake) b_real_dis = self.Db(b_real) b_fake_dis = self.Db(b_fake) real_label = utils.cuda(Variable(torch.ones( a_real_dis.size()))) fake_label = utils.cuda( Variable(torch.zeros(a_fake_dis.size()))) # Discriminator losses ################################################## a_dis_real_loss = self.adversarial_criteron( a_real_dis, real_label) a_dis_fake_loss = self.adversarial_criteron( a_fake_dis, fake_label) b_dis_real_loss = self.adversarial_criteron( b_real_dis, real_label) b_dis_fake_loss = self.adversarial_criteron( b_fake_dis, fake_label) # Total discriminators losses a_dis_loss = (a_dis_real_loss + a_dis_fake_loss) * 0.5 b_dis_loss = (b_dis_real_loss + b_dis_fake_loss) * 0.5 # Update discriminators ################################################## a_dis_loss.backward() b_dis_loss.backward() self.d_optimizer.step() print( "Epoch: (%3d) (%5d/%5d) | Gen Loss:%.2e | Dis Loss:%.2e" % (epoch, i + 1, min(len(a_loader), len(b_loader)), gen_loss, a_dis_loss + b_dis_loss)) running_Gen_loss += gen_loss running_Dis_loss += (a_dis_loss + b_dis_loss) ################################################## # END TRAINING FOR ONE EPOCH ################################################## self.writer.add_scalar( 'Gen Loss', running_Gen_loss / min(len(a_loader), len(b_loader)), epoch) self.writer.add_scalar( 'Dis Loss', running_Dis_loss / min(len(a_loader), len(b_loader)), epoch) self.writer.add_scalar('Gen_LR', self.g_lr_scheduler.get_lr()[0], epoch) self.writer.add_scalar('Dis_LR', self.d_lr_scheduler.get_lr()[0], epoch) # Override the latest checkpoint ####################################################### utils.save_checkpoint( { 'epoch': epoch + 1, 'Da': self.Da.state_dict(), 'Db': self.Db.state_dict(), 'Gab': self.Gab.state_dict(), 'Gba': self.Gba.state_dict(), 'd_optimizer': self.d_optimizer.state_dict(), 'g_optimizer': self.g_optimizer.state_dict() }, '%s/latest.ckpt' % (args.checkpoint_dir)) # Update learning rates ######################## self.g_lr_scheduler.step() self.d_lr_scheduler.step() self.writer.close()
def gen_samples(args, epoch): transform = transforms.Compose([ transforms.Resize((args.crop_height, args.crop_width)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) if args.specific_samples: dataset_dirs = utils.get_sampledata_link(args.dataset_dir) a_test_data = dsets.ImageFolder(dataset_dirs['sampleA'], transform=transform) b_test_data = dsets.ImageFolder(dataset_dirs['sampleB'], transform=transform) else: dataset_dirs = utils.get_testdata_link(args.dataset_dir) a_test_data = dsets.ImageFolder(dataset_dirs['testA'], transform=transform) b_test_data = dsets.ImageFolder(dataset_dirs['testB'], transform=transform) a_test_loader = torch.utils.data.DataLoader(a_test_data, batch_size=args.batch_size, shuffle=False, num_workers=4) b_test_loader = torch.utils.data.DataLoader(b_test_data, batch_size=args.batch_size, shuffle=False, num_workers=4) Gab = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG=args.gen_net, norm=args.norm, use_dropout=args.use_dropout, gpu_ids=args.gpu_ids, self_attn=args.self_attn, spectral=args.spectral) Gba = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG=args.gen_net, norm=args.norm, use_dropout=args.use_dropout, gpu_ids=args.gpu_ids, self_attn=args.self_attn, spectral=args.spectral) utils.print_networks([Gab, Gba], ['Gab', 'Gba']) ckpt = utils.load_checkpoint('%s/latest.ckpt' % (args.checkpoint_path)) Gab.load_state_dict(ckpt['Gab']) Gba.load_state_dict(ckpt['Gba']) ab_ssims = [] ba_ssims = [] a_names = [] b_names = [] """ run """ for i, (a_real_test, b_real_test) in enumerate(zip(a_test_loader, b_test_loader)): a_fnames = a_test_loader.dataset.samples[i * 16:i * 16 + 16] b_fnames = b_test_loader.dataset.samples[i * 16:i * 16 + 16] a_real_test = Variable(a_real_test[0], requires_grad=True) b_real_test = Variable(b_real_test[0], requires_grad=True) a_real_test, b_real_test = utils.cuda([a_real_test, b_real_test]) gray = kornia.color.RgbToGrayscale() m = kornia.losses.SSIM(11, 'mean') Gab.eval() Gba.eval() with torch.no_grad(): a_fake_test = Gab(b_real_test) b_fake_test = Gba(a_real_test) a_recon_test = Gab(b_fake_test) b_recon_test = Gba(a_fake_test) # Calculate ssim loss b = a_real_test.size(0) for j in range(min(args.batch_size, b)): a_real = a_real_test[j].unsqueeze(0) b_fake = b_fake_test[j].unsqueeze(0) a_recon = a_recon_test[j].unsqueeze(0) b_real = b_real_test[j].unsqueeze(0) a_fake = a_fake_test[j].unsqueeze(0) b_recon = b_recon_test[j].unsqueeze(0) ba_ssim = m(gray((a_real + 1) / 2.0), gray((b_fake + 1) / 2.0)) ab_ssim = m(gray((b_real + 1) / 2.0), gray((a_fake + 1) / 2.0)) ab_ssims.append(ab_ssim.item()) ba_ssims.append(ba_ssim.item()) pic = (torch.cat([ a_real_test, b_fake_test, a_recon_test, b_real_test, a_fake_test, b_recon_test ], dim=0).data + 1) / 2.0 path = args.results_path + '/b_fake/' image_path = path + a_fnames[j][0].split('/')[-1] if not os.path.isdir(path): os.makedirs(path) torchvision.utils.save_image((b_fake.data + 1) / 2.0, image_path) a_names.append(a_fnames[j][0].split('/')[-1]) path = args.results_path + '/a_recon/' image_path = path + a_fnames[j][0].split('/')[-1] if not os.path.isdir(path): os.makedirs(path) torchvision.utils.save_image((a_recon.data + 1) / 2.0, image_path) path = args.results_path + '/a_fake/' image_path = path + b_fnames[j][0].split('/')[-1] if not os.path.isdir(path): os.makedirs(path) torchvision.utils.save_image((a_fake.data + 1) / 2.0, image_path) b_names.append(b_fnames[j][0].split('/')[-1]) path = args.results_path + '/b_recon/' image_path = path + b_fnames[j][0].split('/')[-1] if not os.path.isdir(path): os.makedirs(path) torchvision.utils.save_image((b_recon.data + 1) / 2.0, image_path) df1 = pd.DataFrame(list(zip(a_names, ba_ssims)), columns=['Name', 'SSIM_A_to_B']) df2 = pd.DataFrame(list(zip(b_names, ab_ssims)), columns=['Name', 'SSIM_B_to_A']) df1.to_csv(args.results_path + '/b_fake/' + 'SSIM_A_to_B.csv') df2.to_csv(args.results_path + '/a_fake/' + 'SSIM_B_to_A.csv')
def test(args): transform = transforms.Compose([ transforms.Resize((args.crop_height, args.crop_width)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) ]) dataset_dirs = utils.get_testdata_link(args.dataset_dir) a_test_data = dsets.ImageFolder(dataset_dirs['testA'], transform=transform) b_test_data = dsets.ImageFolder(dataset_dirs['testB'], transform=transform) a_test_loader = torch.utils.data.DataLoader(a_test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) b_test_loader = torch.utils.data.DataLoader(b_test_data, batch_size=args.batch_size, shuffle=True, num_workers=4) Gab = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG=args.gen_net, norm=args.norm, use_dropout=not args.no_dropout, gpu_ids=args.gpu_ids) Gba = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG=args.gen_net, norm=args.norm, use_dropout=not args.no_dropout, gpu_ids=args.gpu_ids) utils.print_networks([Gab, Gba], ['Gab', 'Gba']) try: ckpt = utils.load_checkpoint('%s/latest.ckpt' % (args.checkpoint_dir)) if len(args.gpu_ids) > 0: print(args.gpu_ids) Gab.load_state_dict(ckpt['Gab']) Gba.load_state_dict(ckpt['Gba']) else: new_state_dict = {} for k, v in ckpt['Gab'].items(): name = k.replace('module.', '') new_state_dict[name] = v Gab.load_state_dict(new_state_dict) new_state_dict = {} for k, v in ckpt['Gba'].items(): name = k.replace('module.', '') new_state_dict[name] = v Gba.load_state_dict(new_state_dict) except: print(' [*] No checkpoint!') """ run """ a_it = iter(a_test_loader) b_it = iter(b_test_loader) for i in range(len(a_test_loader)): try: a_real_test = Variable(next(a_it)[0], requires_grad=True) except: a_it = iter(a_test_loader) try: b_real_test = Variable(next(b_it)[0], requires_grad=True) except: b_it = iter(b_test_loader) print(a_real_test) return a_real_test, b_real_test = utils.cuda([a_real_test, b_real_test]) print(a_real_test.shape) Gab.eval() Gba.eval() with torch.no_grad(): a_fake_test = Gab(b_real_test) b_fake_test = Gba(a_real_test) a_recon_test = Gab(b_fake_test) b_recon_test = Gba(a_fake_test) pic = (torch.cat([ a_real_test, b_fake_test, a_recon_test, b_real_test, a_fake_test, b_recon_test ], dim=0).data + 1) / 2.0 if not os.path.isdir(args.results_dir): os.makedirs(args.results_dir) path = str.format("{}/sample-{}.jpg", args.results_dir, i) torchvision.utils.save_image(pic, path, nrow=3)