images = np.stack(images) gt_masks = np.stack(gt_masks) # scale back from args.flu_scale gt_masks = np.uint8(gt_masks / args.scale * 255) pr_masks = pr_masks / args.scale * 255 pr_masks = np.uint8(np.clip(pr_masks, 0, 255)) # save prediction examples plot_fig_file = model_folder + '/pred_examples.png' nb_images = 10 plot_flu_prediction(plot_fig_file, images, gt_masks, pr_masks, nb_images) # output_dir = model_folder+'/pred_fl'; generate_folder(output_dir) # plot_set_prediction(output_dir, images, gt_masks, pr_masks) # calculate PSNR mPSNR, psnr_scores = calculate_psnr(gt_masks, pr_masks) print('PSNR: {:.4f}'.format(mPSNR)) # calculate Pearson correlation coefficient mPear, pear_scores = calculate_pearsonr(gt_masks, pr_masks) print('Pearsonr:{:.4f}'.format(mPear)) with open(model_folder + '/metric_summary.txt', 'w+') as f: # loss f.write("loss {}: {:.5}\n".format(metric.__name__, value)) # average psnr for metric, value in zip(metrics, scores[1:]): f.write("mean {}: {:.5}\n".format(metric.__name__, value)) # save PSNR over fluorescent 1 and fluorescent 2 f.write('PSNR: {:.4f}\n'.format(mPSNR)) f.write('Pearsonr:{:.4f}\n'.format(mPear))
gt_vol2 = extract_vol(gt_masks) pr_vol2 = extract_vol(pr_masks) else: gt_vol2 = gt_masks pr_vol2 = pr_masks elif fl_ch == 'fl12': gt_vol = gt_masks[:, :, :, 0] pr_vol = pr_masks[:, :, :, 0] gt_vol2 = gt_masks[:, :, :, 1] pr_vol2 = pr_masks[:, :, :, 1] if fl_ch == 'fl12' or fl_ch == 'fl1': pr_vol = pr_vol[:, offset:-offset, offset:-offset] gt_vol = gt_vol[:, offset:-offset, offset:-offset] mse_score = np.mean(np.square(pr_vol - gt_vol)) psnr_score = calculate_psnr(pr_vol, gt_vol) cor_score = calculate_pearsonr(pr_vol, gt_vol) print(pr_vol.shape, gt_vol.shape) mse_scores.append(mse_score) psnr_scores.append(psnr_score) cor_scores.append(cor_score) print('{}-FL1: psnr {:.4f}, cor {:.4f}, mse {:.4f}\n'.format( vol_fn, psnr_score, cor_score, mse_score)) if fl_ch == 'fl12' or fl_ch == 'fl2': pr_vol2 = pr_vol2[:, offset:-offset, offset:-offset] gt_vol2 = gt_vol2[:, offset:-offset, offset:-offset] mse_score2 = np.mean(np.square(pr_vol2 - gt_vol2)) psnr_score2 = calculate_psnr(pr_vol2, gt_vol2) cor_score2 = calculate_pearsonr(pr_vol2, gt_vol2) print(pr_vol2.shape, gt_vol2.shape) mse2_scores.append(mse_score2)
image_ids, nb_images, rand_seed=2) ghf.plot_flu_prediction8(model_folder + '/grants_fl_v2', images, gt_masks, pr_masks, cut_folder, fl1_folder, fl2_folder, image_ids, nb_images, rand_seed=2) # calculate PSNR f1_mPSNR, f1_psnr_scores = calculate_psnr(gt_masks[:, :, :, 0], pr_masks[:, :, :, 0]) f2_mPSNR, f2_psnr_scores = calculate_psnr(gt_masks[:, :, :, 1], pr_masks[:, :, :, 1]) mPSNR, f_psnr_scores = calculate_psnr(gt_masks, pr_masks) print('PSNR: fluo1 {:.4f}, fluo2 {:.4f}, combined {:.4f}'.format( f1_mPSNR, f2_mPSNR, mPSNR)) # calculate Pearson correlation coefficient f1_mPear, f1_pear_scores = calculate_pearsonr(gt_masks[:, :, :, 0], pr_masks[:, :, :, 0]) f2_mPear, f2_pear_scores = calculate_pearsonr(gt_masks[:, :, :, 1], pr_masks[:, :, :, 1]) f_mPear, f_pear_scores = calculate_pearsonr(gt_masks, pr_masks) print('Pearsonr: fluo1 {:.4f}, fluo2 {:.4f}, combined {:.4f}'.format( f1_mPear, f2_mPear, f_mPear))
# scale back gt_masks = gt_masks / scale_factor pr_masks = pr_masks / scale_factor # crop if args.dataset == 'bone_marrow' or args.dataset == 'colorectal': offset1, offset2 = int((val_dim - img_dim) / 2), val_dim - int( (val_dim - img_dim) / 2) gt_masks = gt_masks[:, offset1:offset2, offset1:offset2] pr_masks = pr_masks[:, offset1:offset2, offset1:offset2] images = images[:, offset1:offset2, offset1:offset2] print('output: {}'.format(pr_masks.shape)) # save prediction examples plot_fig_file = model_folder + '/pred_examples.png' nb_images = 4 plot_reg_prediction(plot_fig_file, images, gt_masks, pr_masks, nb_images, rand_seed=6) # calculate PSNR mPSNR, psnr_scores = calculate_psnr(gt_masks, pr_masks, max_val=1.0) print('PSNR: {:.4f}'.format(mPSNR)) with open(model_folder + '/metric_psnr.txt', 'w+') as f: # save PSNR over fluorescent 1 and fluorescent 2 f.write('PSNR: {:.4f}'.format(mPSNR))
return np.mean(np.square(vol1-vol2)) # prediction val_loss = []; gts = []; prs = [] for i in range(len(valid_dataloader)): img_input = valid_dataloader[i][0] prs.append(model.predict(valid_dataloader[i])) gts.append(valid_dataloader[i][1]) gts_arr = np.concatenate(gts,axis =0) prs_arr = np.concatenate(prs, axis =0) cor_score = calculate_pearsonr(gts_arr, prs_arr) mse_score = calculate_mse(gts_arr, prs_arr) psnr_score = calculate_psnr(gts_arr, prs_arr) print('mMSE {:4f}, mCor:{:.4f}, mPSNR {:.4f}'.format(mse_score, cor_score, psnr_score)) # scale back and compute gts_arr_1 = gts_arr/scale*255 prs_arr_1 = prs_arr/scale*255 cor_score_1 = calculate_pearsonr(gts_arr_1, prs_arr_1) mse_score_1 = calculate_mse(gts_arr_1, prs_arr_1) psnr_score_1 = calculate_psnr(gts_arr_1, prs_arr_1) ## mse mse_scores = [calculate_mse(gts_arr_1[i,:], prs_arr_1[i,:]) for i in range(prs_arr_1.shape[0])] cor_scores = [calculate_pearsonr(gts_arr_1[i,:], prs_arr_1[i,:]) for i in range(prs_arr_1.shape[0])] psnr_scores = [calculate_psnr(gts_arr_1[i,:], prs_arr_1[i,:]) for i in range(prs_arr_1.shape[0])]
# extract the target prediction # offset = 12 ph_vol = extract_vol(images) if cho == 3: gt_vol = extract_vol(gt_masks) pr_vol = extract_vol(pr_masks) else: gt_vol = gt_masks pr_vol = pr_masks pr_vol = pr_vol[:, offset:-offset, offset:-offset] gt_vol = gt_vol[:, offset:-offset, offset:-offset] ph_vol = ph_vol[:, offset:-offset, offset:-offset] # psnr and pearsonr correlation mse_score = np.mean(np.square(pr_vol - gt_vol)) psnr_score = calculate_psnr(pr_vol, gt_vol) cor_score = calculate_pearsonr(pr_vol, gt_vol) mse_scores.append(mse_score) psnr_scores.append(psnr_score) cor_scores.append(cor_score) # save prediction pred_save = False if pred_save: pr_vol_dir = model_folder + '/pred_vols' generate_folder(pr_vol_dir) np.save(os.path.join(pr_vol_dir, 'Pr_{}.npy'.format(vol_fn)), pr_vol) np.save(os.path.join(pr_vol_dir, 'GT_{}.npy'.format(vol_fn)), gt_vol) print(pr_vol.shape) print('{}: psnr {:.4f}, cor {:.4f}, mse {:.4f}\n'.format(