elif evaluate_all: logging.warning('EVALUATING ON ALL TRAINING DATA') input_path = sys_config.data_root output_path = os.path.join(model_path, 'predictions_alltrain') else: logging.warning('EVALUATING ON VALIDATION SET') input_path = sys_config.data_root output_path = os.path.join(model_path, 'predictions') path_pred = os.path.join(output_path, 'prediction') path_image = os.path.join(output_path, 'image') utils.makefolder(path_pred) utils.makefolder(path_image) path_gt = os.path.join(output_path, 'ground_truth') path_diff = os.path.join(output_path, 'difference') path_eval = os.path.join(output_path, 'eval') utils.makefolder(path_diff) utils.makefolder(path_gt) init_iteration = score_data(input_path, output_path, model_path, num_classes=args.num_classes, do_postprocessing=True, gt_exists=(not evaluate_test_set), evaluate_all=evaluate_all) metrics_acdc.main(path_gt, path_pred, path_eval)
] avg_dices = [] for s in seeds: np.random.seed(s) init_iteration = score_data( input_path, output_path, model_path, args, do_postprocessing=True, gt_exists=(not evaluate_test_set), evaluate_all=evaluate_all, random_center_ratio=args.random_center_ratio) avg_dice = metrics_acdc.main(path_gt, path_pred, path_eval) avg_dices.append(avg_dice) avg_dices = np.asarray(avg_dices) print(avg_dices) print("mean: {}\tstd: {}".format(np.mean(avg_dices), np.std(avg_dices))) print(np.mean(avg_dices)) print(np.std(avg_dices)) else: init_iteration = score_data(input_path, output_path, model_path, args, do_postprocessing=True, gt_exists=(not evaluate_test_set), evaluate_all=evaluate_all,