batch_size=1, shuffle=False) out_path = "images_set" + str(t_set) if not os.path.exists(out_path): os.makedirs(out_path) model_path = "epochs_" + str(v_set) for i in range(1, 41): if v_set == 5: j = i * 25 if v_set == 14: j = i * 50 MODEL_NAME = 'netG_epoch_4_' + str(j) + '.pth' model = Generator(UPSCALE_FACTOR).eval() if torch.cuda.is_available(): model = model.cuda() model.load_state_dict(torch.load(model_path + "/" + MODEL_NAME)) #output image for each epoch image = Image.open(image_path) image = Variable(ToTensor()(image), volatile=True).unsqueeze(0) image = image.cuda() out = model(image) out_img = ToPILImage()(out[0].data.cpu()) out_img.save(out_path + "/" + 'out' + str(j) + '_' + IMAGE_NAME) for image_name, lr_image, hr_restore_img, hr_image in test_loader: image_name = image_name[0] lr_image = Variable(lr_image, volatile=True) hr_image = Variable(hr_image, volatile=True)
if __name__ == '__main__': train_set = Train_Dataset(train_data_dir, crop_size=crop_size, upscale_factor=upscale_factor) val_set = Val_Dataset(val_data_dir, upscale_factor=upscale_factor) train_loader = DataLoader(dataset=train_set, num_workers=4, batch_size=64, shuffle=True) val_loader = DataLoader(dataset=val_set, num_workers=4, batch_size=1, shuffle=False) G = Generator(upscale_factor) D = Discriminator() G_criterion = GeneratorLoss().cuda() if torch.cuda.is_available(): G.cuda() D.cuda() G_optimizer = optim.Adam(G.parameters()) D_optimizer = optim.Adam(D.parameters()) results = { 'd_loss': [], 'g_loss': [], 'd_score': [], 'g_score': [], 'psnr': [], 'ssim': [] }
def train(args): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") transform = transforms.Compose( [crop(args.scale, args.patch_size), augmentation()]) dataset = mydata(GT_path=args.GT_path, LR_path=args.LR_path, in_memory=args.in_memory, transform=transform) loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers) generator = Generator(img_feat=3, n_feats=64, kernel_size=3, num_block=args.res_num, scale=args.scale) if args.fine_tuning: generator.load_state_dict(torch.load(args.generator_path)) print("pre-trained model is loaded") print("path : %s" % (args.generator_path)) generator = generator.to(device) generator.train() l2_loss = nn.MSELoss() g_optim = optim.Adam(generator.parameters(), lr=1e-4) pre_epoch = 0 fine_epoch = 0 #### Train using L2_loss while pre_epoch < args.pre_train_epoch: for i, tr_data in enumerate(loader): gt = tr_data['GT'].to(device) lr = tr_data['LR'].to(device) output, _ = generator(lr) loss = l2_loss(gt, output) g_optim.zero_grad() loss.backward() g_optim.step() pre_epoch += 1 if pre_epoch % 2 == 0: print(pre_epoch) print(loss.item()) print('=========') if pre_epoch % 800 == 0: torch.save( generator.state_dict(), 'C:/Users/jihun/SRGAN-PyTorch/model/pre_trained_model_%03d.pt' % pre_epoch) #### Train using perceptual & adversarial loss vgg_net = vgg19().to(device) vgg_net = vgg_net.eval() discriminator = Discriminator(patch_size=args.patch_size * args.scale) discriminator = discriminator.to(device) discriminator.train() d_optim = optim.Adam(discriminator.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.StepLR(g_optim, step_size=2000, gamma=0.1) VGG_loss = perceptual_loss(vgg_net) cross_ent = nn.BCELoss() tv_loss = TVLoss() real_label = torch.ones((args.batch_size, 1)).to(device) fake_label = torch.zeros((args.batch_size, 1)).to(device) while fine_epoch < args.fine_train_epoch: scheduler.step() for i, tr_data in enumerate(loader): gt = tr_data['GT'].to(device) lr = tr_data['LR'].to(device) ## Training Discriminator output, _ = generator(lr) fake_prob = discriminator(output) real_prob = discriminator(gt) d_loss_real = cross_ent(real_prob, real_label) d_loss_fake = cross_ent(fake_prob, fake_label) d_loss = d_loss_real + d_loss_fake g_optim.zero_grad() d_optim.zero_grad() d_loss.backward() d_optim.step() ## Training Generator output, _ = generator(lr) fake_prob = discriminator(output) _percep_loss, hr_feat, sr_feat = VGG_loss((gt + 1.0) / 2.0, (output + 1.0) / 2.0, layer=args.feat_layer) L2_loss = l2_loss(output, gt) percep_loss = args.vgg_rescale_coeff * _percep_loss adversarial_loss = args.adv_coeff * cross_ent( fake_prob, real_label) total_variance_loss = args.tv_loss_coeff * tv_loss( args.vgg_rescale_coeff * (hr_feat - sr_feat)**2) g_loss = percep_loss + adversarial_loss + total_variance_loss + L2_loss g_optim.zero_grad() d_optim.zero_grad() g_loss.backward() g_optim.step() fine_epoch += 1 if fine_epoch % 2 == 0: print(fine_epoch) print(g_loss.item()) print(d_loss.item()) print('=========') if fine_epoch % 500 == 0: torch.save( generator.state_dict(), 'C:/Users/jihun/SRGAN-PyTorch/model/SRGAN_gene_%03d.pt' % fine_epoch) torch.save( discriminator.state_dict(), 'C:/Users/jihun/SRGAN-PyTorch/model/SRGAN_discrim_%03d.pt' % fine_epoch)
args = parser.parse_args() lr_shape = (64, 64, 3) hr_shape = (256, 256, 3) batch_size = args.batch_size save_interval = args.save_interval model_interval = args.model_interval _lambda = args.lam epochs = args.epoch lr_vgg = 2e-4 lr_D = 2e-4 lr_G = 2e-4 beta_1 = 0.5 optimizer = Adam(lr_vgg, beta_1) gen = Generator(lr_shape) dis = Discriminator(hr_shape) VGG1, VGG2 = VGG(hr_shape) VGG1.trainable = False VGG2.trainable = False VGG1.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) VGG2.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) # ----------------- # loss of generator # # perceptual loss = adversarial loss # + contet loss # -----------------