def test(args): params = load_paras(args) if params['multi']: my_model = model.MultiScaleNet(n_feats=params['n_feats'], n_resblocks=params['n_resblocks'], is_skip=params['skip']) else: my_model = model.SingleScaleNet(n_feats=params['n_feats'], n_resblocks=params['n_resblocks'], is_skip=params['skip']) my_model.cuda() my_model.load_state_dict( torch.load( os.path.join(args.train_dir, args.exp_name, 'model', 'model_lastest.pt'))) my_model.eval() output_dir = os.path.join(args.train_dir, args.exp_name, args.output_dir) if not os.path.exists(output_dir): os.mkdir(output_dir) for img_path in args.image: with torch.no_grad(): images = load_images(img_path, params['multi']) input_b1 = Variable(images['input_b1'].cuda()) target_s1 = Variable(images['target_s1'].cuda()) if params['multi']: input_b2 = Variable(images['input_b2'].cuda()) input_b3 = Variable(images['input_b3'].cuda()) output_l1, _, _ = my_model((input_b1, input_b2, input_b3)) else: output_l1 = my_model(input_b1) output_l1 = tensor_to_rgb(output_l1) output_l1 = output_l1.transpose(1, 2, 0) if target_s1 is not None: target_s1 = tensor_to_rgb(target_s1) target_s1 = target_s1.transpose(1, 2, 0) psnr = compute_psnr(target_s1, output_l1) print('Image %s psnr %.2f dB' % (os.path.basename(img_path), psnr)) out = Image.fromarray(np.uint8(output_l1), mode='RGB') output_path = os.path.join(output_dir, os.path.basename(img_path)) out.save(output_path) print('One image saved at ' + output_path)
def test(args): params = load_params(args) if params[ 'multi']: # check multi parameter to see whether to use multi model or single test_model = model.MultiScaleNet(num_features=params['num_features'], num_resblocks=params['num_resblocks'], is_skip=params['skip']) else: test_model = model.SingleScaleNet( num_features=params['num_features'], num_resblocks=params['num_resblocks'], is_skip=params['skip']) # load the saved model test_model.load_state_dict( torch.load( os.path.join(args.save_dir, args.exp_name, 'model', 'model_lastest.pt'))) test_model.cuda() test_model.eval() testdataloader = get_testdataset(args) if args.save: output_dir = os.path.join(args.save_dir, args.exp_name, 'test_output') if not os.path.exists(output_dir): os.mkdir(output_dir) log_file = open(os.path.join(output_dir, 'test_logs.txt'), 'w') total_psnr, total_ssim, total_mssim, count = 0, 0, 0, 0 for batch, images in enumerate(testdataloader): count += 1 with torch.no_grad(): input_b1 = Variable(images['input_b1'].cuda()) target_s1 = Variable(images['target_s1'].cuda()) if params['multi']: input_b2 = Variable(images['input_b2'].cuda()) input_b3 = Variable(images['input_b3'].cuda()) output_l1, _, _ = test_model((input_b1, input_b2, input_b3)) else: output_l1 = test_model(input_b1) output_l1 = tensor_to_rgb(output_l1) target_s1 = tensor_to_rgb(target_s1) p = args.padding if p != 0: img1 = output_l1[:, p:-p, p:-p].squeeze() img2 = target_s1[:, p:-p, p:-p].squeeze() else: img1 = output_l1.squeeze() img2 = target_s1.squeeze() # Calculate psnr, ssim, mssim using libraries with torch.no_grad(): mssim = compare_mssim( torch.from_numpy(img1[None]).cuda(), torch.from_numpy(img2[None]).cuda()).cpu().numpy() ssim = compare_ssim( torch.from_numpy(img1[None] / 255.0).cuda(), torch.from_numpy(img2[None] / 255.0).cuda()).cpu().numpy() psnr = compare_psnr(img1, img2) total_psnr += psnr total_ssim += ssim total_mssim += mssim if args.save: out = Image.fromarray(np.uint8(output_l1.transpose(1, 2, 0)), mode='RGB') # output of SRCNN out.save(os.path.join(output_dir, 'DB_{:04d}.png'.format(count))) log = 'Image {:04d} - PSNR {:.2f} - SSIM {:.4f} - MSSIM {:.4f}'.format( count, psnr, ssim, mssim) print(log) log_file.write(log + "\n") avg_psnr = total_psnr / (batch + 1) avg_ssim = total_ssim / (batch + 1) avg_mssim = total_mssim / (batch + 1) log = 'Average - PSNR {:.2f} dB - SSIM {:.4f} - MSSIM {:.4f}'.format( avg_psnr, avg_ssim, avg_mssim) print(log) log_file.write(log + "\n") log_file.close() if args.save: print('{:04d} images saved at {}'.format(count, output_dir))
def train(args): print(args) if args.multi: net_model = model.MultiScaleNet(num_features=args.num_features, num_resblocks=args.num_resblocks, is_skip=args.skip) else: net_model = model.SingleScaleNet(num_features=args.num_features, num_resblocks=args.num_resblocks, is_skip=args.skip) net_model = net_model.cuda() loss_function = set_loss(args) loss_function.cuda() last_epoch = 0 loss_values = [] save = SaveData(args.save_dir, args.exp_name, args.finetuning) save.save_params(args) num_params = count_parameters(net_model) save.save_log(str(num_params)) if args.finetuning: net_model, last_epoch = save.load_model(net_model) start_epoch = last_epoch total_loss = 0 total_time = 0 # load dataset dataloader = get_dataset(args) testdataloader = get_testdataset(args) for epoch in range(start_epoch, args.epochs): start = time.time() optimizer = optim.Adam(net_model.parameters(), lr=args.lr) learning_rate = set_lr(args, epoch, optimizer) print("Epoch {}/{}".format(epoch + 1, args.epochs)) total_loss_ = 0 loss = 0 for batch, images in tqdm_notebook(enumerate(dataloader)): input_b1 = Variable(images['input_b1'].cuda()) target_s1 = Variable(images['target_s1'].cuda()) if args.multi: input_b2 = Variable(images['input_b2'].cuda()) target_s2 = Variable(images['target_s2'].cuda()) input_b3 = Variable(images['input_b3'].cuda()) target_s3 = Variable(images['target_s3'].cuda()) output_l1, output_l2, output_l3 = net_model( (input_b1, input_b2, input_b3)) loss = (loss_function(output_l1, target_s1) + loss_function(output_l2, target_s2) + loss_function(output_l3, target_s3)) / 3 else: output_l1 = net_model(input_b1) loss = loss_function(output_l1, target_s1) net_model.zero_grad() loss.backward() optimizer.step() #tqdm._instances.clear() total_loss_ += loss.data.cpu().numpy() total_loss = total_loss_ / (batch + 1) loss_values.append(total_loss) save.add_scalar('train/loss', total_loss, epoch) end = time.time() epoch_time = (end - start) total_time = total_time + epoch_time if (epoch + 1) % args.period == 0: if args.val_data: net_model.eval() psnr = validation(net_model, testdataloader, args.multi) net_model.train() log = "Epoch {}/{} \t Learning rate: {:.5f} \t Train total_loss: {:.5f} \t * Val_PSNR: {:.2f} \t Time: {:.4f}\n".format( epoch + 1, args.epochs, learning_rate, total_loss, psnr, total_time) print(log) save.save_log(log) save.add_scalar('valid/psnr', psnr, epoch) save.save_model(net_model, epoch) total_time = 0 else: log = "Epoch {}/{} \t Learning rate: {:.5f} \t Train total_loss: {:.5f} t Time: {:.4f} \t Total time: {:.4f}\n".format( epoch + 1, args.epochs, learning_rate, total_loss, total_time) print(log) save.save_log(log) save.save_model(net_model, epoch) total_time = 0
def test(args): params = load_paras(args) if params['multi']: my_model = model.MultiScaleNet(n_feats=params['n_feats'], n_resblocks=params['n_resblocks'], is_skip=params['skip']) else: my_model = model.SingleScaleNet(n_feats=params['n_feats'], n_resblocks=params['n_resblocks'], is_skip=params['skip']) my_model.cuda() my_model.load_state_dict(torch.load(os.path.join(args.save_dir, args.exp_name, 'model', 'model_lastest.pt'))) my_model.eval() dataloader = get_dataset(args.data_dir, n_threads=args.n_threads, multi=params['multi']) if args.save: output_dir = os.path.join(args.save_dir, args.exp_name, 'test_output') if not os.path.exists(output_dir): os.mkdir(output_dir) log_file = open(os.path.join(args.save_dir, args.exp_name, 'test_logs.txt'), 'w') total_psnr, total_ssim, total_mssim, cnt = 0, 0, 0, 0 for batch, images in enumerate(dataloader): cnt += 1 with torch.no_grad(): input_b1 = Variable(images['input_b1'].cuda()) target_s1 = Variable(images['target_s1'].cuda()) if params['multi']: input_b2 = Variable(images['input_b2'].cuda()) input_b3 = Variable(images['input_b3'].cuda()) output_l1, _, _ = my_model((input_b1, input_b2, input_b3)) else: output_l1 = my_model(input_b1) output_l1 = tensor_to_rgb(output_l1) target_s1 = tensor_to_rgb(target_s1) p = args.padding if p != 0: img1 = output_l1[:, p:-p, p:-p].squeeze() img2 = target_s1[:, p:-p, p:-p].squeeze() else: img1 = output_l1.squeeze() img2 = target_s1.squeeze() with torch.no_grad(): mssim = compare_mssim(torch.from_numpy(img1[None]).cuda(), torch.from_numpy(img2[None]).cuda()).cpu().numpy() ssim = compare_ssim(torch.from_numpy(img1[None] / 255.0).cuda(), torch.from_numpy(img2[None] / 255.0).cuda()).cpu().numpy() psnr = compare_psnr(img1, img2) total_psnr += psnr total_ssim += ssim total_mssim += mssim if args.save: out = Image.fromarray(np.uint8(output_l1.transpose(1, 2, 0)), mode='RGB') # output of SRCNN out.save(os.path.join(output_dir, 'DB_%04d.png' % (cnt))) log = 'Image %04d - PSNR %.2f - SSIM %.4f - MSSIM %.4f' % (cnt, psnr, ssim, mssim) print(log) log_file.write(log + "\n") avg_psnr = total_psnr / (batch + 1) avg_ssim = total_ssim / (batch + 1) avg_mssim = total_mssim / (batch + 1) log = 'Average - PSNR %.2f dB - SSIM %.4f - MSSIM %.4f' % (avg_psnr, avg_ssim, avg_mssim) print(log) log_file.write(log + "\n") log_file.close() if args.save: print('%04d images save at %s' % (cnt, output_dir))
def train(args): print(args) if args.multi: my_model = model.MultiScaleNet(n_feats=args.n_feats, n_resblocks=args.n_resblocks, is_skip=args.skip) else: my_model = model.SingleScaleNet(n_feats=args.n_feats, n_resblocks=args.n_resblocks, is_skip=args.skip) my_model = my_model.cuda() loss_function = nn.MSELoss().cuda() optimizer = optim.Adam(my_model.parameters(), lr=args.lr) scheduler = lr_scheduler.StepLR(optimizer, args.lr_step_size, args.lr_gamma) # utility for saving models, parameters and logs save = SaveData(args.save_dir, args.exp_name, args.finetuning) save.save_params(args) num_params = count_parameters(my_model) save.save_log(str(num_params)) # load pre-trained model if provided last_epoch = -1 if args.finetuning: my_model, last_epoch = save.load_model(my_model) start_epoch = last_epoch + 1 # load dataset data_loader = get_dataset(args.data_dir, patch_size=args.patch_size, batch_size=args.batch_size, n_threads=args.n_threads, is_train=True, multi=args.multi) if args.val_data_dir: valid_data_loader = get_dataset(args.val_data_dir, n_threads=args.n_threads, multi=args.multi) for epoch in range(start_epoch, args.epochs): print("* Epoch {}/{}".format(epoch + 1, args.epochs)) scheduler.step() learning_rate = optimizer.param_groups[0]['lr'] total_loss = 0 for batch, images in tqdm(enumerate(data_loader)): input_b1 = Variable(images['input_b1'].cuda()) target_s1 = Variable(images['target_s1'].cuda()) if args.multi: input_b2 = Variable(images['input_b2'].cuda()) target_s2 = Variable(images['target_s2'].cuda()) input_b3 = Variable(images['input_b3'].cuda()) target_s3 = Variable(images['target_s3'].cuda()) output_l1, output_l2, output_l3 = my_model( (input_b1, input_b2, input_b3)) loss = (loss_function(output_l1, target_s1) + loss_function(output_l2, target_s2) + loss_function(output_l3, target_s3)) / 3 else: output_l1 = my_model(input_b1) loss = loss_function(output_l1, target_s1) my_model.zero_grad() loss.backward() optimizer.step() total_loss += loss.data.cpu().numpy() loss = total_loss / (batch + 1) save.add_scalar('train/loss', loss, epoch) if epoch % args.period == 0: if args.val_data_dir: my_model.eval() psnr = validation(my_model, valid_data_loader, args.multi) my_model.train() log = "Epoch {}/{} \t Learning rate: {:.5f} \t Train total_loss: {:.5f} \t * Val PSNR: {:.2f}\n".format( epoch + 1, args.epochs, learning_rate, loss, psnr) print(log) save.save_log(log) save.add_scalar('valid/psnr', psnr, epoch) save.save_model(my_model, epoch) else: log = "Epoch {}/{} \t Learning rate: {:.5f} \t Train total_loss: {:.5f}\n".format( epoch + 1, args.epochs, learning_rate, loss) print(log) save.save_log(log)