def evaluate(model, args): input_list = sorted(os.listdir(args.test_input_dir)) gt_list = sorted(os.listdir(args.test_gt_dir)) num = len(input_list) cumulative_psnr = 0 cumulative_ssim = 0 psnr_list = [] ssim_list = [] for i in range(num): prefix = input_list[i].split('_')[0] print('Processing image: %s' % (input_list[i])) img = cv2.imread(opj(args.test_input_dir, input_list[i])) gt = cv2.imread(opj(args.test_gt_dir, gt_list[i])) img = image_align(img) gt = image_align(gt) result = predict_single(model, img) result = np.array(result, dtype='uint8') cur_psnr = calc_psnr(result, gt) cur_ssim = calc_ssim(result, gt) print('PSNR is %.4f and SSIM is %.4f' % (cur_psnr, cur_ssim)) cumulative_psnr += cur_psnr cumulative_ssim += cur_ssim psnr_list.append(cur_psnr) ssim_list.append(cur_ssim) out_name = prefix + "_" + "output.png" cv2.imwrite(opj(args.test_output_dir, out_name), result) print('In testing dataset, PSNR is %.4f and SSIM is %.4f' % (cumulative_psnr / num, cumulative_ssim / num)) df = pd.DataFrame(np.array([psnr_list, ssim_list]).T, columns=['psnr', 'ssim']) df.head() print(df.apply(status)) return np.mean(ssim_list), np.mean(psnr_list)
def validate(model, inputs, labels): model.eval() raw_image_in = Variable(torch.FloatTensor(inputs['noisy_img'])).cuda() raw_image_var = Variable(torch.FloatTensor(inputs['variance'])).cuda() raw_image_gt = Variable(torch.FloatTensor(labels)).cuda() red_gain = Variable(torch.FloatTensor(inputs['red_gain'])).cuda() blue_gain = Variable(torch.FloatTensor(inputs['blue_gain'])).cuda() cam2rgb = Variable(torch.FloatTensor(inputs['cam2rgb'])).cuda() with torch.no_grad(): raw_image_out = model(raw_image_in, raw_image_var) # Process RAW images to RGB rgb_image_in = process.process(raw_image_in, red_gain, blue_gain, cam2rgb) rgb_image_out = process.process(raw_image_out, red_gain, blue_gain, cam2rgb) rgb_image_gt = process.process(raw_image_gt, red_gain, blue_gain, cam2rgb) rgb_image_out = rgb_image_out[0, :, :, :].cpu().data.numpy().transpose( (1, 2, 0)) rgb_image_out = np.array(rgb_image_out * 255.0, dtype='uint8') rgb_image_gt = rgb_image_gt[0, :, :, :].cpu().data.numpy().transpose( (1, 2, 0)) rgb_image_gt = np.array(rgb_image_gt * 255.0, dtype='uint8') # print(np.shape(rgb_image_out), np.shape(rgb_image_gt)) cur_psnr = calc_psnr(rgb_image_out, rgb_image_gt) cur_ssim = calc_ssim(rgb_image_out, rgb_image_gt) return cur_psnr, cur_ssim
def predict(args): model = Generator().cuda() model.load_state_dict(torch.load(opj(args.model_dir, args.g_weights))) if args.mode == 'demo': input_list = sorted(os.listdir(args.input_dir)) num = len(input_list) for i in range(num): print('Processing image: %s' % (input_list[i])) img = cv2.imread(opj(args.input_dir, input_list[i])) img = image_align(img) result = predict_single(model, img) img_name = input_list[i].split('.')[0] cv2.imwrite(opj(args.output_dir, img_name + '.jpg'), result) elif args.mode == 'test': input_list = sorted(os.listdir(args.input_dir)) gt_list = sorted(os.listdir(args.gt_dir)) num = len(input_list) cumulative_psnr = 0 cumulative_ssim = 0 psnr_list = [] ssim_list = [] for i in range(num): print('Processing image: %s' % (input_list[i])) img = cv2.imread(opj(args.input_dir, input_list[i])) gt = cv2.imread(opj(args.gt_dir, gt_list[i])) img = image_align(img) gt = image_align(gt) result = predict_single(model, img) result = np.array(result, dtype='uint8') cur_psnr = calc_psnr(result, gt) cur_ssim = calc_ssim(result, gt) print('PSNR is %.4f and SSIM is %.4f' % (cur_psnr, cur_ssim)) cumulative_psnr += cur_psnr cumulative_ssim += cur_ssim psnr_list.append(cur_psnr) ssim_list.append(cur_ssim) print('In testing dataset, PSNR is %.4f and SSIM is %.4f' % (cumulative_psnr / num, cumulative_ssim / num)) with open('../try/psnr_list', 'wb') as fout: fout.write(pkl.dumps(psnr_list)) with open('../try/ssim_list', 'wb') as fout: fout.write(pkl.dumps(ssim_list)) df = pd.DataFrame(np.array([psnr_list, ssim_list]).T, columns=['psnr', 'ssim']) df.head() print(df.apply(status)) else: print('Mode Invalid!')
def split_result(args): # split the result with ssim (0,0.5);(0.5,0.82);(0.82,0.87);(0.87,1.0) split_point = [0.5, 0.82, 0.87] model = Generator().cuda() model.load_state_dict(torch.load(opj(args.model_dir, args.g_weights))) input_list = sorted(os.listdir(args.input_dir)) gt_list = sorted(os.listdir(args.gt_dir)) num = len(input_list) cumulative_psnr = 0 cumulative_ssim = 0 split_dir = args.split_dir interval1 = opj(split_dir, 'interval_1') interval2 = opj(split_dir, 'interval_2') interval3 = opj(split_dir, 'interval_3') interval4 = opj(split_dir, 'interval_4') interval_dirs = [interval1, interval2, interval3, interval4] for dir in interval_dirs: if not os.path.exists(dir): os.mkdir(dir) for i in range(num): print('Processing image: %s' % (input_list[i])) img = cv2.imread(opj(args.input_dir, input_list[i])) gt = cv2.imread(opj(args.gt_dir, gt_list[i])) img = image_align(img) gt = image_align(gt) result = predict_single(model, img) result = np.array(result, dtype='uint8') cur_psnr = calc_psnr(result, gt) cur_ssim = calc_ssim(result, gt) print('PSNR is %.4f and SSIM is %.4f' % (cur_psnr, cur_ssim)) cumulative_psnr += cur_psnr cumulative_ssim += cur_ssim prefix = input_list[i].split('_')[0] if cur_ssim < split_point[0]: write_interval(interval_dirs[0], prefix, img, gt, result) elif cur_ssim < split_point[1]: write_interval(interval_dirs[1], prefix, img, gt, result) elif cur_ssim < split_point[2]: write_interval(interval_dirs[2], prefix, img, gt, result) else: write_interval(interval_dirs[3], prefix, img, gt, result) print('In testing dataset, PSNR is %.4f and SSIM is %.4f' % (cumulative_psnr / num, cumulative_ssim / num))
def get_analysis(img_path, gt_path): img = cv2.imread(img_path) # img = cv2.imread(args.input_dir + input_list[_i]) gt = cv2.imread(gt_path) # gt = cv2.imread(args.gt_dir + gt_list[_i]) dsize = (720, 480) img = cv2.resize(img, dsize) gt = cv2.resize(gt, dsize) img_tensor = prepare_img_to_tensor(img) with torch.no_grad(): out = generator(img_tensor, times_in_attention, device)[-1] out = out.cpu().data out = out.numpy() out = out.transpose((0, 2, 3, 1)) out = out[0, :, :, :] * 255. out = np.array(out, dtype='uint8') cur_psnr = calc_psnr(out, gt) cur_ssim = calc_ssim(out, gt) return cur_psnr, cur_ssim
img = cv2.imread(args.input_dir + input_list[i]) img = align_to_four(img) result = predict(img) img_name = input_list[i].split('.')[0] cv2.imwrite(args.output_dir + img_name + '.jpg', result) elif args.mode == 'test': input_list = sorted(os.listdir(args.input_dir)) gt_list = sorted(os.listdir(args.gt_dir)) num = len(input_list) cumulative_psnr = 0 cumulative_ssim = 0 for i in range(num): print('Processing image: %s' % (input_list[i])) img = cv2.imread(args.input_dir + input_list[i]) gt = cv2.imread(args.gt_dir + gt_list[i]) img = align_to_four(img) gt = align_to_four(gt) result = predict(img) result = np.array(result, dtype='uint8') cur_psnr = calc_psnr(result, gt) cur_ssim = calc_ssim(result, gt) print('PSNR is %.4f and SSIM is %.4f' % (cur_psnr, cur_ssim)) cumulative_psnr += cur_psnr cumulative_ssim += cur_ssim print('In testing dataset, PSNR is %.4f and SSIM is %.4f' % (cumulative_psnr / num, cumulative_ssim / num)) else: print('Mode Invalid!')
def evaluate(self, epoch, keep_frame=False, keep_batch=False, save_prediction=False, ssim=False): with torch.no_grad(): loss = [[] for _ in range(self.opt.sequence_length - self.opt.context_frames)] for iter_, (images, haptics, audios, behaviors, vibros) in enumerate(self.dataloader['valid']): if not self.opt.use_haptic: haptics = torch.zeros_like(haptics).to(self.device) if not self.opt.use_behavior: behaviors = torch.zeros_like(behaviors).to(self.device) if not self.opt.use_audio: audios = torch.zeros_like(audios).to(self.device) if not self.opt.use_vibro: vibros = torch.zeros_like(vibros).to(self.device) behaviors = behaviors.unsqueeze(-1).unsqueeze(-1) images = images.permute([1, 0, 2, 3, 4]).unbind(0) haptics = haptics.permute([1, 0, 2, 3, 4]).unbind(0) audios = audios.permute([1, 0, 2, 3, 4]).unbind(0) vibros = vibros.permute([1, 0, 2, 3, 4]).unbind(0) gen_images, gen_haptics, gen_audios, gen_vibros = self.net( images, haptics, audios, behaviors, vibros, train=False) for i, (image, gen_image) in enumerate( zip(images[self.opt.context_frames:], gen_images[self.opt.context_frames - 1:])): stats = None if ssim: image = image.permute([0, 2, 3, 1]).unbind(0) image = [ cv2.cvtColor( (im.cpu().numpy() * 255).astype(np.uint8), cv2.COLOR_BGR2GRAY) for im in image ] gen_image = gen_image.permute([0, 2, 3, 1]).unbind(0) gen_image = [ cv2.cvtColor( (im.cpu().numpy() * 255).astype(np.uint8), cv2.COLOR_BGR2GRAY) for im in gen_image ] stats = [ calc_ssim(im, gim)[0] for im, gim in zip(image, gen_image) ] else: stats = mse_to_psnr( torch.mean((image - gen_image)**2, dim=[1, 2, 3]).cpu()) loss[i].extend(stats) if keep_frame: stds = [np.std(item) for item in loss] loss = [np.mean(item) for item in loss] return loss, stds if keep_batch: loss = np.stack([it for it in loss if it]) loss = np.mean(loss, axis=0) return loss else: loss = np.stack(loss) loss = np.mean(loss) return loss