def LPIPS_cal(dataset_teacher, number_classes, image_dir, output_file, device): print("Calculate LPIPS value...") all_pairs_lpips = False model = perceptual_models.PerceptualLoss(model='net-lin', net='alex', use_gpu=use_gpu, version=0.1) dists_among_fake_imgs = [] for i in range(number_classes): files = os.listdir(image_dir + dataset_teacher + '/fake_images/' + str(i)) num = len(files)-1 if num > 10: num = 10 for (ff, file0) in enumerate(files[0:num]): # [:-1]): img0 = im2tensor( load_image(os.path.join(image_dir + dataset_teacher + '/fake_images/' + str(i), file0))).to(device) # RGB image from [-1,1] if (all_pairs_lpips): files1 = files[ff + 1:] else: files1 = [files[ff + 1], ] for file1 in files1[0:num]: # : img1 = im2tensor( load_image(os.path.join(image_dir + dataset_teacher + '/fake_images/' + str(i), file1))).to( device) # RGB image from [-1,1] # Compute distance dist01 = model.forward(img0, img1) dists_among_fake_imgs.append(dist01.item()) avg_dist_among_fake_imgs = numpy.mean(numpy.array(dists_among_fake_imgs)) stderr_dist_among_fake_imgs = numpy.std(numpy.array(dists_among_fake_imgs))/numpy.sqrt(len(dists_among_fake_imgs)) print("[LPIPS_mean: %s] [LPIPS_std: %s] " % ( avg_dist_among_fake_imgs, stderr_dist_among_fake_imgs)) with open(output_file, 'a') as f: f.write(str(avg_dist_among_fake_imgs) + '\n') f.close()
fn.split('/')[-1])) # PSNR img_gt = (val_H * 255).astype(np.uint8) img_target = ((batch_Out) * 255).astype(np.uint8) CROP_S = 16 if CROP_S > 0: img_gt = img_gt[CROP_S:-CROP_S, CROP_S:-CROP_S] img_target = img_target[CROP_S:-CROP_S, CROP_S:-CROP_S] psnrs.append( PSNR( _rgb2ycbcr(img_gt)[:, :, 0], _rgb2ycbcr(img_target)[:, :, 0], 0)) # LPIPS dist = model_LPIPS.forward( im2tensor(img_target), im2tensor(img_gt)) # RGB image from [0,255] to [-1,1] lpips.append(dist) print('AVG PSNR/LPIPS: Validation: {}/{}'.format( np.mean(np.asarray(psnrs)), np.mean(np.asarray(lpips)))) # Save best model if i % I_SAVE == 0: if np.mean(np.asarray(lpips)) < best_avg_lpips: best_avg_lpips = np.mean(np.asarray(lpips)) SaveCheckpoint(i, best=True)
testset, out_fn)) # Evaluation CROP_S = 4 # PSNR psnrs.append(PSNR(img_gt_y, batch_out_y, CROP_S)) # SSIM ssims.append( skimage.metrics.structural_similarity( img_gt_y, batch_out_y, data_range=255, multichannel=False, gaussian_weights=True, sigma=1.5, use_sample_covariance=False)) # LPIPS img_gt_t = im2tensor(img_gt) batch_out_t = im2tensor(batch_out) if CROP_S > 0: batch_out_t = batch_out_t[:, :, CROP_S:-CROP_S, CROP_S:-CROP_S] img_gt_t = img_gt_t[:, :, CROP_S:-CROP_S, CROP_S:-CROP_S] lpips.append(model_LPIPS.forward(batch_out_t, img_gt_t)) print('{} AVG PSNR/SSIM/LPIPS: {}/{}/{}'.format( testset, np.mean(np.asarray(psnrs)), np.mean(np.asarray(ssims)), np.mean(np.asarray(lpips))))
img_gt = np.around(img_H * 255).astype(np.uint8) img_out = np.around(batch_output * 255).astype(np.uint8) Image.fromarray(img_out).save('{}/result/{}/{}/{}.png'.format( EXP_NAME, str(VERSION), valset_name, fn.split('/')[-1])) # PSNR CROP_S = 4 psnrs.append( PSNR( _rgb2ycbcr(img_gt)[:, :, 0], _rgb2ycbcr(img_out)[:, :, 0], CROP_S)) # LPIPS img_gt = im2tensor(img_gt) # RGB image from [-1,1] img_out = im2tensor(img_out) if CROP_S > 0: img_out = img_out[:, :, CROP_S:-CROP_S, CROP_S:-CROP_S] img_gt = img_gt[:, :, CROP_S:-CROP_S, CROP_S:-CROP_S] dist = model_LPIPS.forward(img_out, img_gt) lpips.append(dist) mean_psnr = np.mean(np.asarray(psnrs)) mean_lpips = np.mean(np.asarray(lpips)) print('AVG PSNR/LPIPS: {}: {}/{}'.format(valset_name, mean_psnr, mean_lpips)) writer.add_scalar(valset_name, mean_psnr, i) writer.add_scalar(valset_name + '_lpips', mean_lpips, i)