def _per_view_evaluation(image_id, pred, true, mapped_pred, mapped_true, view, n_classes, results, per_view_results, out_dir, args): """ TODO """ if np.random.rand() > args.eval_prob: print("Skipping evaluation for view %s... " "(eval_prob=%.3f)" % (view, args.eval_prob)) return # Evaluate the raw view performance view_dices = evaluate(pred, true, n_classes) mapped_dices = evaluate(mapped_pred, mapped_true, n_classes) mean_dice = mapped_dices[~np.isnan(mapped_dices)][1:].mean() # Print dice scores print("View dice scores: ", view_dices) print("Mapped dice scores: ", mapped_dices) print("Mean dice (n=%i): " % (len(mapped_dices) - 1), mean_dice) # Add to results results.loc[image_id, str(view)] = mean_dice per_view_results[str(view)][image_id] = mapped_dices[1:] # Overwrite with so-far results save_all(results, per_view_results, out_dir)
def get_results_dicts(out_dir, views, image_pairs_dict, n_classes, _continue): from mpunet.logging import init_result_dicts, save_all, load_result_dicts if _continue: csv_dir = os.path.join(out_dir, "csv") results, detailed_res = load_result_dicts(csv_dir=csv_dir, views=views) else: # Prepare dictionary to store results in pd df results, detailed_res = init_result_dicts(views, image_pairs_dict, n_classes) # Save to check correct format save_all(results, detailed_res, out_dir) return results, detailed_res
def _merged_eval(image_id, pred, true, n_classes, results, per_view_results, out_dir): # Calculate combined prediction dice dices = evaluate(pred, true, n_classes, ignore_zero=True) mean_dice = dices[~np.isnan(dices)].mean() per_view_results["MJ"][image_id] = dices print("Combined dices: ", dices) print("Combined mean dice: ", mean_dice) results.loc[image_id, "MJ"] = mean_dice # Overwrite with so-far results save_all(results, per_view_results, out_dir)
def entry_func(args=None): # Get command line arguments args = get_argparser().parse_args(args) assert_args(args) # Get most important paths project_dir = os.path.abspath(args.project_dir) out_dir = os.path.abspath(args.out_dir) # Check if valid dir structures validate_folders(project_dir, out_dir, overwrite=args.overwrite, _continue=vars(args)["continue"]) nii_res_dir = os.path.join(out_dir, "nii_files") create_folders(nii_res_dir, create_deep=True) # Get settings from YAML file hparams = load_hparams(project_dir) # Get dataset image_pair_loader, image_pair_dict = get_image_pair_loader(args, hparams, out_dir) # Wait for PID to terminate before continuing, if specified if args.wait_for: await_PIDs(args.wait_for, check_every=120) # Set GPU device set_gpu_vis(args) # Get views views = np.load("%s/views.npz" % project_dir)["arr_0"] del hparams['fit']['views'] # Prepare result dicts results, per_view_results = None, None if not args.no_eval: results, per_view_results = get_results_dicts(out_dir, views, image_pair_dict, hparams["build"]["n_classes"], vars(args)["continue"]) # Get model and load weights, assign to one or more GPUs model, weights_name = get_model(project_dir, hparams['build']) fusion_model = None if not args.sum_fusion: fusion_model = get_fusion_model(n_views=len(views), n_classes=hparams["build"]["n_classes"], project_dir=project_dir, weights_name=weights_name) run_predictions_and_eval( image_pair_loader=image_pair_loader, image_pair_dict=image_pair_dict, model=model, fusion_model=fusion_model, views=views, hparams=hparams, args=args, results=results, per_view_results=per_view_results, out_dir=out_dir, nii_res_dir=nii_res_dir ) if not args.no_eval: # Write final results save_all(results, per_view_results, out_dir)