def nrmse_3d(y_true, y_pred, norm_type='min-max'): """Get the mean rmse of a 3 dim array with shape axes ZXY or ZYX I think the authors are using min-max""" z = [] for i in range(len(y_true)): z.append(nrmse(y_true, y_pred, norm_type)) return np.mean(z)
def get_scores(img1, img2, method="ssim"): if method == "ssim": return ssim(img1, img2) elif method == ["mse"]: return mse(img1, img2) elif method == "nrmse": return nrmse(img1, img2) return None
def compute_image_quality_metrics(ground_truth_images, ground_truth_angles, generated_images, generated_angles): # order the images according to ascending angle ground_truth_images = ground_truth_images[np.argsort(ground_truth_angles[:, 0], 0)] generated_images = generated_images[np.argsort(generated_angles[:, 0], 0)] loop_mse, loop_nrmse, loop_ssim = [], [], [] for (im_gt, im_gen) in zip(ground_truth_images, generated_images): loop_mse.append(mse(im_gt, im_gen)) loop_nrmse.append(nrmse(im_gt, im_gen)) loop_ssim.append(ssim(im_gt.squeeze(), im_gen.squeeze())) return np.array(loop_mse).mean(), np.array(loop_nrmse).mean(), np.array(loop_ssim).mean()
def calculate_nrmse_similarity(pair): """Compute the normalized root mean-squared error (NRMSE) between two images. :param pair: image pair to compare :return: """ image1, image2 = __check_files_and_open(pair) img1f = img_as_float(image1) img2f = img_as_float(image2) similarity = nrmse(img1f, img2f) pair.similarity = round(similarity, 3)
def plot_one(ax, true, pred, title=None, gray=True): """Plots predicted image and calculates measures: nrmse, ssim, psnr. Parameters ---------- ax : Matplotlib axes for plotting true : PIL.Image Ground truth Image for measures calculation pred : PIL.Image Predicted Image for measures calculation title : str, optional Set plot title gray : bool Flag, if true expecting grayscale image, if false RGB image""" label = 'NRMSE: {:.3f}; SSIM: {:.3f}; PSNR: {:.3f}' #convert images to numpy array if gray: arr_t = np.array(true)[:, :, 0] arr_p = np.array(pred)[:, :, 0] multichannel = False else: arr_t = np.array(true) arr_p = np.array(pred) multichannel = True assert arr_t.shape==arr_p.shape, \ f'Shapes of input images must match: true:{arr_t.shape} pred:{arr_p.shape}' ax.imshow(pred, cmap='gray', vmin=0, vmax=255) ax.set_axis_off() if title is not None: ax.set_title(title) ax.annotate(xy=(0, -.1), s=label.format(nrmse(arr_p, arr_t), ssim(arr_p, arr_t, multichannel=multichannel), psnr(arr_p, arr_t)), xycoords='axes fraction')
def normalized_root_mean_square_error(window_orig, window_warped, weights=False): # MSE = math.sqrt(np.mean(waights*(window_orig - window_warped) ** 2))/np.mean(window_orig - window_warped) NRMSE = nrmse(window_orig, window_warped) return NRMSE
def calc_and_save_all_metrics(test_set: Type[LightFieldDataset], output_path: os.path, h5_file_loc: os.path = None, h5_dataset_key: str = None) -> dict: with h5py.File(h5_file_loc, 'a') as h5_file: output_images = h5_file[h5_dataset_key] all_targets_shape = (len(test_set), test_set.num_views_y, test_set.num_channels, test_set.height_y, test_set.width_y) assert output_images.shape == all_targets_shape # TODO: If the images are in an array we need to reshape them # TODO: And again when saving. num_images = all_targets_shape[0] num_views = all_targets_shape[1] ssim_results = np.zeros((num_images, num_views), dtype=np.float32) psnr_results = np.zeros((num_images, num_views), dtype=np.float32) mse_results = np.zeros((num_images, num_views), dtype=np.float32) nrmse_results = np.zeros((num_images, num_views), dtype=np.float32) ssim_meter = AverageMeter(name='SSIM', cum=False) custom = CustomProgressBar(label='SSIM') print("Calculating image metrics.") for image_idx in custom.bar(range(num_images)): target_lf = test_set.get_only_y(image_idx) for view_idx in range(num_views): target_reshape = np.moveaxis(target_lf[view_idx], -3, -1) output_reshape = np.moveaxis( output_images[image_idx, view_idx], -3, -1) ssim_results[image_idx, view_idx] = ssim(target_reshape, output_reshape, multichannel=True) psnr_results[image_idx, view_idx] = psnr(target_reshape, output_reshape) mse_results[image_idx, view_idx] = mse(target_reshape, output_reshape) nrmse_results[image_idx, view_idx] = nrmse(target_reshape, output_reshape) # Log errors ssim_meter.update(float(np.mean(ssim_results[image_idx]))) custom.format_custom_text.update_mapping(value=ssim_meter.value()) metrics = { 'ssim_avg': float(np.mean(ssim_results)), 'ssim_std': float(np.std(ssim_results)), 'psnr_avg': float(np.mean(psnr_results)), 'psnr_std': float(np.std(psnr_results)), 'mse_avg': float(np.mean(mse_results)), 'mse_std': float(np.std(mse_results)), 'nrmse_avg': float(np.mean(nrmse_results)), 'nrmse_std': float(np.std(nrmse_results)) } # Also save to a json for easy viewing. with open(os.path.join(output_path, "metrics.json"), 'w') as fp: json.dump(metrics, fp, indent=4, sort_keys=True) output_images.attrs.create('ssim', ssim_results) output_images.attrs.create('psnr', psnr_results) output_images.attrs.create('mse', mse_results) output_images.attrs.create('nrmse', nrmse_results) output_images.attrs.create('ssim_avg', metrics['ssim_avg']) output_images.attrs.create('ssim_std', metrics['ssim_std']) output_images.attrs.create('psnr_avg', metrics['psnr_avg']) output_images.attrs.create('psnr_std', metrics['psnr_std']) output_images.attrs.create('mse_avg', metrics['mse_avg']) output_images.attrs.create('mse_std', metrics['mse_std']) output_images.attrs.create('nrmse_avg', metrics['nrmse_avg']) output_images.attrs.create('nrmse_std', metrics['nrmse_std'])
def fPredict(test_ref, test_art, dParam, dHyper): weights_file = dParam['sOutPath'] + os.sep + '{}.h5'.format( dHyper['bestModel']) patchSize = dParam['patchSize'] vae = createModel(patchSize, dHyper) vae.compile(optimizer='adam', loss=None) vae.load_weights(weights_file) test_ref = np.expand_dims(test_ref, axis=1) test_art = np.expand_dims(test_art, axis=1) predict_ref, predict_art = vae.predict([test_ref, test_art], dParam['batchSize'][0], verbose=1) test_ref = np.squeeze(test_ref, axis=1) test_art = np.squeeze(test_art, axis=1) predict_art = np.squeeze(predict_art, axis=1) if dHyper['unpatch']: test_ref = fRigidUnpatchingCorrection2D(dHyper['actualSize'], test_ref, dParam['patchOverlap']) test_art = fRigidUnpatchingCorrection2D(dHyper['actualSize'], test_art, dParam['patchOverlap']) predict_art = fRigidUnpatchingCorrection2D(dHyper['actualSize'], predict_art, dParam['patchOverlap'], 'average') # pre TV processing test_art_tv_1 = denoise_tv_chambolle(test_art, weight=1) test_art_tv_3 = denoise_tv_chambolle(test_art, weight=3) test_art_tv_5 = denoise_tv_chambolle(test_art, weight=5) if dHyper['evaluate']: if dParam['lSaveIndividual']: fig = plt.figure() plt.gray() label = 'NRMSE: {:.2f}, SSIM: {:.3f}, NMI: {:.3f}' for i in range(len(test_ref)): ax = imshow(test_ref[i]) plt.xticks([]) plt.yticks([]) ax.set_xlabel( label.format( nrmse(test_ref[i], test_ref[i]), ssim(test_ref[i], test_ref[i], data_range=(test_ref[i].max() - test_ref[i].min())), nmi(test_ref[i].flatten(), test_ref[i].flatten()))) ax.set_title('reference image') if dParam['lSave']: plt.savefig(dParam['sOutPath'] + os.sep + 'result' + os.sep + 'reference_' + str(i) + '.png') else: plt.show() ax = imshow(test_art[i]) plt.xticks([]) plt.yticks([]) ax.set_xlabel( label.format( nrmse(test_ref[i], test_art[i]), ssim(test_ref[i], test_art[i], data_range=(test_art[i].max() - test_art[i].min())), nmi(test_ref[i].flatten(), test_art[i].flatten()))) ax.set_title('motion-affected image') if dParam['lSave']: plt.savefig(dParam['sOutPath'] + os.sep + 'result' + os.sep + 'art_' + str(i) + '.png') else: plt.show() ax = imshow(predict_art[i]) plt.xticks([]) plt.yticks([]) ax.set_xlabel( label.format( nrmse(test_ref[i], predict_art[i]), ssim(test_ref[i], predict_art[i], data_range=(predict_art[i].max() - predict_art[i].min())), nmi(test_ref[1].flatten(), predict_art[i].flatten()))) ax.set_title('reconstructed image') if dParam['lSave']: plt.savefig(dParam['sOutPath'] + os.sep + 'result' + os.sep + 'recon_' + str(i) + '.png') else: plt.show() ax = imshow(test_art_tv_1[i]) plt.xticks([]) plt.yticks([]) ax.set_xlabel( label.format( nrmse(test_ref[i], test_art_tv_1[i]), ssim(test_ref[i], test_art_tv_1[i], data_range=(test_art_tv_1[i].max() - test_art_tv_1[i].min())), nmi(test_ref[i].flatten(), test_art_tv_1[i].flatten()))) ax.set_title('TV weight 1') if dParam['lSave']: plt.savefig(dParam['sOutPath'] + os.sep + 'result' + os.sep + 'tv1_' + str(i) + '.png') else: plt.show() ax = imshow(test_art_tv_3[i]) plt.xticks([]) plt.yticks([]) ax.set_xlabel( label.format( nrmse(test_ref[i], test_art_tv_3[i]), ssim(test_ref[i], test_art_tv_3[i], data_range=(test_art_tv_3[i].max() - test_art_tv_3[i].min())), nmi(test_ref[i].flatten(), test_art_tv_3[i].flatten()))) ax.set_title('TV weight 3') if dParam['lSave']: plt.savefig(dParam['sOutPath'] + os.sep + 'result' + os.sep + 'tv3_' + str(i) + '.png') else: plt.show() ax = imshow(test_art_tv_5[i]) plt.xticks([]) plt.yticks([]) ax.set_xlabel( label.format( nrmse(test_ref[i], test_art_tv_5[i]), ssim(test_ref[i], test_art_tv_5[i], data_range=(test_art_tv_5[i].max() - test_art_tv_5[i].min())), nmi(test_ref[i].flatten(), test_art_tv_5[i].flatten()))) ax.set_title('TV weight 5') if dParam['lSave']: plt.savefig(dParam['sOutPath'] + os.sep + 'result' + os.sep + 'tv5_' + str(i) + '.png') else: plt.show() else: fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(15, 10), sharex=True, sharey=True) # fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(15, 15), sharex=True, sharey=True) ax = axes.ravel() plt.gray() label = 'NRMSE: {:.2f}, SSIM: {:.3f}, NMI: {:.3f}' for i in range(len(test_ref)): # orignal reconstructed images ax[0].imshow(test_ref[i]) ax[0].set_xlabel( label.format( nrmse(test_ref[i], test_ref[i]), ssim(test_ref[i], test_ref[i], data_range=(test_ref[i].max() - test_ref[i].min())), nmi(test_ref[i].flatten(), test_ref[i].flatten()))) ax[0].set_title('reference image') ax[1].imshow(test_art[i]) ax[1].set_xlabel( label.format( nrmse(test_ref[i], test_art[i]), ssim(test_ref[i], test_art[i], data_range=(test_art[i].max() - test_art[i].min())), nmi(test_ref[i].flatten(), test_art[i].flatten()))) ax[1].set_title('motion-affected image') ax[2].imshow(predict_art[i]) ax[2].set_xlabel( label.format( nrmse(test_ref[i], predict_art[i]), ssim(test_ref[i], predict_art[i], data_range=(predict_art[i].max() - predict_art[i].min())), nmi(test_ref[1].flatten(), predict_art[i].flatten()))) ax[2].set_title('reconstructed image') # TV denoiser ax[3].imshow(test_art_tv_1[i]) ax[3].set_xlabel( label.format( nrmse(test_ref[i], test_art_tv_1[i]), ssim(test_ref[i], test_art_tv_1[i], data_range=(test_art_tv_1[i].max() - test_art_tv_1[i].min())), nmi(test_ref[i].flatten(), test_art_tv_1[i].flatten()))) ax[3].set_title('TV weight 1') ax[4].imshow(test_art_tv_3[i]) ax[4].set_xlabel( label.format( nrmse(test_ref[i], test_art_tv_3[i]), ssim(test_ref[i], test_art_tv_3[i], data_range=(test_art_tv_3[i].max() - test_art_tv_3[i].min())), nmi(test_ref[i].flatten(), test_art_tv_3[i].flatten()))) ax[4].set_title('TV weight 3') ax[5].imshow(test_art_tv_5[i]) ax[5].set_xlabel( label.format( nrmse(test_ref[i], test_art_tv_5[i]), ssim(test_ref[i], test_art_tv_5[i], data_range=(test_art_tv_5[i].max() - test_art_tv_5[i].min())), nmi(test_ref[i].flatten(), test_art_tv_5[i].flatten()))) ax[5].set_title('TV weight 5') if dParam['lSave']: plt.savefig(dParam['sOutPath'] + os.sep + 'result' + os.sep + str(i) + '.png') else: plt.show() else: plt.figure() plt.gray() for i in range(predict_art.shape[0]): plt.imshow(predict_art[i]) if dParam['lSave']: plt.savefig(dParam['sOutPath'] + os.sep + 'result' + os.sep + str(i) + '.png', dpi=300) else: plt.show() else: nPatch = predict_art.shape[0] for i in range(nPatch // 4): fig, axes = plt.subplots(nrows=4, ncols=2) plt.gray() cols_title = ['original_art', 'predicted_art'] for ax, col in zip(axes[0], cols_title): ax.set_title(col) for j in range(4): axes[j, 0].imshow(test_art[4 * i + j]) axes[j, 1].imshow(predict_art[4 * i + j]) if dParam['lSave']: plt.savefig(dParam['sOutPath'] + os.sep + 'result' + os.sep + str(i) + '.png') else: plt.show()
def cnrmse(A, B): return np.sqrt(nrmse(A, B) * nrmse(B, A))
def main(): parser = argparse.ArgumentParser(description='Select the type of reduced.') parser.add_argument("-f", "--filename", type=str, required=True, help='Path to the file the contains the dictionary with the info of the dataset reduced.') args = vars(parser.parse_args()) info_filename = args["filename"] # test set with open(info_filename, "rb") as fp: # Unpickling images_info = pickle.load(fp) grouper = itemgetter('parkinglot', 'space') images_info = sorted(images_info, key=grouper) parkinglots = extractUniqueItemsByKey(images_info, 'parkinglot') images_info_by_patkinglot = {} for parkinglot in parkinglots: image_info_parkinglot = [i for i in images_info if i['parkinglot'] == parkinglot] spaces_parkinglot = extractUniqueItemsByKey(image_info_parkinglot, 'space') images_info_by_spaces = {} for space in spaces_parkinglot: images_info_by_spaces[space] = [getNewImageInfo(i) for i in image_info_parkinglot if i['space'] == space] images_info_by_patkinglot[parkinglot] = images_info_by_spaces # Hasta este punto ya tengo un dictionario dividido por estacionamiento que a su vez se divide por espacios # Voy a obtener la lista de un espacio en particular de un estacionamiento, voy a obtener el primer espacio vacio que # encuentre y despues voy a compararlo con los demas # Mostrar en una ventana el espacio vacio y en la otra la comparacion y el resultado empty_space_filepath = '' errors = [] for parkinglot, images_info_by_spaces in images_info_by_patkinglot.items(): for space, images_info_of_space in images_info_by_spaces.items(): error_count_empty = 0 error_count_occupied = 0 error_empty = 0 error_occupied = 0 empty_space_filepath = '' example_list = images_info_of_space for example in tqdm(example_list): if example['state'] == '0' and len(empty_space_filepath) == 0: empty_space_filepath = example['filepath'] img_empty_space = getGrayscaleImage(empty_space_filepath) break for example in tqdm(example_list): comparision_space_filepath = example['filepath'] img_comparision_space = getGrayscaleImage(comparision_space_filepath) try: sim = ssim(img_empty_space, img_comparision_space) except: height1, width1 = img_empty_space.shape img_comparision_space = cv2.resize(img_comparision_space, (width1, height1)) sim = ssim(img_empty_space, img_comparision_space) nm = nrmse(img_empty_space, img_comparision_space) # m = mse(img_empty_space, img_comparision_space) space_comparing_name = 'state: {} sim: {} nrmse: {}'.format(example['state'], sim, nm) if sim < 0.4 and example['state'] == '0': error_count_empty += 1 error_empty += abs(0.4 - sim) if sim >= 0.4 and example['state'] == '1': error_count_occupied += 1 error_occupied += abs(sim - 0.4) if sim > 0.7: empty_space_filepath = example['filepath'] img_empty_space = img_comparision_space """ fig = plt.figure('title') plt.suptitle(space_comparing_name) # show first image ax = fig.add_subplot(1, 2, 1) plt.imshow(img_empty_space, cmap=plt.cm.gray) plt.axis("off") # show the second image ax = fig.add_subplot(1, 2, 2) plt.imshow(img_comparision_space, cmap=plt.cm.gray) plt.axis("off") # show the images plt.show() """ error_occupied = 0 if error_count_occupied == 0 else (error_occupied / error_count_occupied) error_empty = 0 if error_count_empty == 0 else (error_empty / error_count_empty) print('In the space {} in a total of {} there was an error of occupied {} {} empty {} {}'.format(space, len( example_list), error_count_occupied, error_occupied, error_count_empty, error_empty)) errors.append({'parkinglot': parkinglot, 'space': space, 'total': len(example_list), 'error_count_occupied': error_count_occupied, 'error_occupied': error_occupied, 'error_count_empty': error_count_empty, 'error_empty': error_empty}) info = {'dataset': info_filename, 'threshold': 0.4, 'comparision_method': 'sim', 'errors': errors} dataset_name = ntpath.basename(info_filename).split('.')[0] feedback_filename = '{}_{}_{}.json'.format(dataset_name, 0.4, 'sim') with open(feedback_filename, 'w') as outfile: json.dump(info, outfile)