def test(model, device): test_path = config.dataset_dir + config.phone + '/test_data/patches/canon/' test_image_num = len([ name for name in os.listdir(test_path) if os.path.isfile(os.path.join(test_path, name)) ]) // config.batch_size * config.batch_size score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar = 0.0, 0.0, 0.0, 0.0 for start in range(0, test_image_num, config.batch_size): end = min(start + config.batch_size, test_image_num) test_phone, test_dslr = load_test_data( config.phone, config.dataset_dir, start, end, config.height * config.width * config.channels) x = torch.from_numpy(test_phone).float() y_real = torch.from_numpy(test_dslr).float() x = x.view(-1, config.height, config.width, config.channels).permute(0, 3, 1, 2).to(device) y_real = y_real.view(-1, config.height, config.width, config.channels).permute(0, 3, 1, 2).to(device) y_fake = model.gen_g(x) # Calculate PSNR & SSIM scores score_psnr += psnr(y_fake, y_real) * config.batch_size y_fake_np = y_fake.detach().cpu().numpy().transpose(0, 2, 3, 1) y_real_np = y_real.cpu().numpy().transpose(0, 2, 3, 1) temp_ssim, _ = compare_ssim(y_fake_np, y_real_np, multichannel=True, gaussian_weights=True, full=True) score_ssim_skimage += (temp_ssim * config.batch_size) temp_ssim, _ = ssim(y_fake, y_real, kernel_size=11, kernel_sigma=1.5) score_ssim_minstar += temp_ssim * config.batch_size score_msssim_minstar += multi_scale_ssim( y_fake, y_real, kernel_size=11, kernel_sigma=1.5) * config.batch_size print('PSNR & SSIM scores of {} images are calculated.'.format(end)) score_psnr /= test_image_num score_ssim_skimage /= test_image_num score_ssim_minstar /= test_image_num score_msssim_minstar /= test_image_num print( 'PSNR : {:.4f}, SSIM_skimage : {:.4f}, SSIM_minstar : {:.4f}, SSIM_msssim: {:.4f}' .format(score_psnr, score_ssim_skimage, score_ssim_minstar, score_msssim_minstar))
PATCH_WIDTH = 100 PATCH_HEIGHT = 100 PATCH_SIZE = PATCH_WIDTH * PATCH_HEIGHT * 3 # processing command arguments phone, batch_size, train_size, learning_rate, num_train_iters, \ w_content, w_color, w_texture, w_tv, w_ssim,\ dped_dir, vgg_dir, eval_step = lutils.process_command_args(sys.argv) #np.random.seed(0) # loading training and test data print("Loading test data...") test_data, test_answ = load_test_data(phone, dped_dir, PATCH_SIZE) print("Test data was loaded\n") print("Loading training data...") train_data = trainset(dped_dir + '/iphone/', dped_dir + '/canon/') #train_data = trainset(dped_dir + '/train/raw/', dped_dir + '/train/target/') trainloader = DataLoader(train_data, batch_size=batch_size, shuffle=True) print("Training data was loaded\n") TEST_SIZE = test_data.shape[0] print(TEST_SIZE) print('-----') num_test_batches = int(test_data.shape[0] / batch_size) # defining system architecture
print("Initializing variables") sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(var_list=generator_vars, max_to_keep=100) if LEVEL < 5: print("Restoring Variables") saver.restore( sess, "models/pynet_level_" + str(LEVEL + 1) + "_iteration_" + str(restore_iter) + ".ckpt") # Loading training and test data print("Loading test data...") test_data, test_answ = load_test_data(dataset_dir, PATCH_WIDTH, PATCH_HEIGHT, DSLR_SCALE) print("Test data was loaded\n") print("Loading training data...") train_data, train_answ = load_training_batch(dataset_dir, train_size, PATCH_WIDTH, PATCH_HEIGHT, DSLR_SCALE) print("Training data was loaded\n") TEST_SIZE = test_data.shape[0] num_test_batches = int(test_data.shape[0] / batch_size) visual_crops_ids = np.random.randint(0, TEST_SIZE, batch_size) visual_test_crops = test_data[visual_crops_ids, :] visual_target_crops = test_answ[visual_crops_ids, :]
PATCH_WIDTH = 360 PATCH_HEIGHT = 240 PATCH_SIZE = PATCH_WIDTH * PATCH_HEIGHT * 3 # processing command arguments batch_size, train_size, learning_rate, num_train_iters, \ w_color, dped_dir, eval_step = utils.process_command_args(sys.argv) np.random.seed(0) # loading training and test data print("Loading test data...") test_data, test_answ = load_test_data(PATCH_SIZE) print("Test data was loaded\n") print("Loading training data...") train_data, train_answ = load_batch(dped_dir, train_size, PATCH_SIZE) print("Training data was loaded\n") TEST_SIZE = test_data.shape[0] num_test_batches = int(test_data.shape[0] / batch_size) # defining system architecture with tf.Graph().as_default(), tf.Session(config=tf.ConfigProto( device_count={'GPU': 0})) as sess: # placeholders for training data
enhanced_flat = tf.reshape(enhanced, [-1, TARGET_SIZE]) dslr_flat = tf.reshape(dslr_, [-1, TARGET_SIZE]) loss_ms_ssim = tf.reduce_mean(tf.image.ssim_multiscale(enhanced, dslr_, 1.0)) loss_mse = tf.reduce_sum(tf.pow(dslr_flat - enhanced_flat, 2)) / TARGET_SIZE loss_psnr = 20 * utils.log10(1.0 / tf.sqrt(loss_mse)) saver = tf.train.Saver() # Restore your own model from a checkpoint # saver.restore(sess, "path_to_your_checkpoint"), e.g.: saver.restore(sess, "models/original/pynet_level_0.ckpt") print("Loading test data...") test_data, test_answ = load_test_data(dataset_dir, PATCH_WIDTH, PATCH_HEIGHT, 2.0) print("Test data was loaded\n") loss_ssim_ = 0.0 loss_psnr_ = 0.0 test_size = test_data.shape[0] for j in range(test_size): if j % 100 == 0: print(j) phone_images = np.reshape(test_data[j], [1, PATCH_HEIGHT, PATCH_WIDTH, 4]) dslr_images = np.reshape(test_answ[j], [1, int(PATCH_HEIGHT * 2), int(PATCH_WIDTH * 2), 3]) losses = sess.run([loss_psnr, loss_ms_ssim], feed_dict={phone_: phone_images, dslr_: dslr_images})