예제 #1
0
def main():
    # first we obtain the user arguments, set random seeds, make directories, and store the experiment settings.
    args = parse_args()
    if args.samples > 1:
        use_mc = True
    else:
        use_mc = False

    os.makedirs(args.output_directory, exist_ok=True)
    model_settings = loadExperimentSettings(
        path.join(args.model_dir, 'settings.yaml'))
    model_file = path.join(args.model_dir,
                           str(model_settings.max_iters) + '.model')
    output_seg_dir = os.path.join(args.output_directory, "pred_labels")
    os.makedirs(output_seg_dir, exist_ok=True)
    # IMPORTANT: ASSUMING LAST LAYER HAS 4 OUTPUT CHANNELS and INPUT LAYER HAS ONE CHANNEL
    n_classes = 4
    n_channels_input = 1

    trainer, pad = get_trainer(model_settings,
                               n_classes,
                               n_channels_input,
                               model_file=model_file)
    trainer.evaluate_with_dropout = use_mc
    print("INFO - arguments")
    print(args)
    testset_generator = get_images_nifti(args.data_dir,
                                         resample=False,
                                         patid=args.patid,
                                         rescale=True)
    pat_id_saved = None

    for sample in tqdm(testset_generator,
                       desc="Generating segmentation volumes"):
        image, spacing, reference = sample['image'], sample['spacing'], sample[
            'reference']
        pat_id, phase_id, frame_id = sample['patient_id'], sample[
            'cardiac_phase'], sample['frame_id']
        num_of_frames = sample['num_of_frames']
        original_spacing = sample['original_spacing']
        shape_changed = False
        if pat_id_saved is None or pat_id != pat_id_saved:
            # initialize 4d segmentation volume
            segmentation4d = np.empty(
                (0, image.shape[0], image.shape[1], image.shape[2]))

        if pad > 0 or model_settings.network[:
                                             4] == "unet" or model_settings.network[:
                                                                                    3] == "drn":
            if model_settings.network[:4] == "unet":
                image, shape_changed, yx_padding = fit_unet_image(
                    image, num_downsamplings=4)
            elif model_settings.network[:3] == "drn":
                # print("WARNING - adjust image", image.shape)
                image, shape_changed, yx_padding = fit_unet_image(
                    image, num_downsamplings=3)
            else:
                # image has shape [z, y, x] so pad last two dimensions
                image = np.pad(
                    image, ((0, 0), (pad, pad), (pad, pad)),
                    mode="edge")  # "'constant', constant_values=(0,))

        image = image[:, None]  # add extra dim of size 1
        image = torch.from_numpy(image)
        pat_predictions = Predictions()
        with torch.set_grad_enabled(False):
            for s in range(args.samples):
                output = trainer.predict(image)
                if shape_changed:

                    output = restore_original_size(output, yx_padding)
                    # print("WARNING - restore original size", output["softmax"].shape)
                soft_probs = output['softmax'].detach().numpy()
                pat_predictions(soft_probs,
                                cardiac_phase_tag=phase_id,
                                pred_logits=None)
                # torch.cuda.empty_cache()

        # if mc_dropout is true we compute the Bayesian maps (stddev) otherwise Entropy. Method makes sure
        # that in case we're sampling that pred_probs are averaged over samples.
        # 14-8-2019 IMPORTANT: We are computing Bayesian maps with MEAN stddev over classes! Before MAX
        pred_probs, uncertainties = pat_predictions.get_predictions(
            compute_uncertainty=True, mc_dropout=use_mc, agg_func="mean")
        segmentation = np.argmax(pred_probs, axis=1)
        eval_obj = VolumeEvaluation(pat_id,
                                    segmentation,
                                    reference,
                                    voxel_spacing=spacing,
                                    num_of_classes=n_classes,
                                    mc_dropout=use_mc,
                                    cardiac_phase=phase_id)

        eval_obj.post_processing_only()
        # IMPORTANT: in fast_evaluate we post process the predictions only keeping the largest connected components
        segmentation = eval_obj.pred_labels
        # print(segmentation4d.shape, segmentation.shape, segmentation[None].shape, spacing, original_spacing)
        segmentation4d = np.vstack((segmentation4d, segmentation[None]))
        del output

        if args.save_output and num_of_frames == frame_id + 1:
            do_resample = True if model_settings.resample or original_spacing[
                -1] < 1. else False
            # IMPORTANT: if frame_id is None (e.g. when processing 4D data) then filename is without suffix frame_id
            segmentation4d = np.squeeze(segmentation4d)
            save_segmentations(segmentation4d,
                               pat_id,
                               output_seg_dir,
                               spacing,
                               do_resample,
                               new_spacing=original_spacing,
                               frame_id=None)

        pat_id_saved = pat_id
예제 #2
0
def main():
    # first we obtain the user arguments, set random seeds, make directories, and store the experiment settings.
    args = parse_args()
    if args.samples > 1:
        use_mc = True
    else:
        use_mc = False

    os.makedirs(args.output_directory, exist_ok=True)
    experiment_settings = loadExperimentSettings(
        path.join(args.experiment_directory, 'settings.yaml'))
    # if we pass args.dataset (to evaluate on dataset different than trained on) use it to set input dir for images to be segmented
    dta_settings = get_config(dataset=args.dataset if args.dataset is not None
                              else experiment_settings.dataset)

    model_file = path.join(args.experiment_directory,
                           str(args.checkpoint) + '.model')
    output_dirs = make_dirs(args.output_directory, use_mc)
    # we create a trainer
    if experiment_settings.dataset != dta_settings.dataset:
        n_classes = len(
            get_config(experiment_settings.dataset).tissue_structure_labels)
    else:
        n_classes = len(dta_settings.tissue_structure_labels)
    n_channels_input = 1

    trainer, pad = get_trainer(experiment_settings,
                               n_classes,
                               n_channels_input,
                               model_file=model_file)
    trainer.evaluate_with_dropout = use_mc
    print("WARNING - Rescaling intensities is set to {}".format(args.rescale))
    if args.dataset is None:
        # TODO TEMPORARY !!!!
        root_dir = os.path.expanduser("~/data/ACDC_SR/")
        testset_generator = acdc_validation_fold_image4d(
            experiment_settings.fold,
            root_dir=root_dir,  # dta_settings.short_axis_dir,
            file_suffix="4d_acai.nii.gz",
            resample=experiment_settings.resample,
            patid=args.patid,
            rescale=args.rescale)
    else:
        print("INFO - You passed following arguments")
        print(args)
        testset_generator = get_4dimages_nifti(dta_settings.short_axis_dir,
                                               resample=False,
                                               patid=args.patid,
                                               rescale=True)
    pat_id_saved = None

    for sample in tqdm(testset_generator,
                       desc="Generating 4d segmentation volumes"):
        image, spacing, reference = sample['image'], sample['spacing'], sample[
            'reference']
        pat_id, phase_id, frame_id = sample['patient_id'], sample[
            'cardiac_phase'], sample['frame_id']
        num_of_frames = sample['num_of_frames']
        original_spacing = sample['original_spacing']
        shape_changed = False
        if pat_id_saved is None or pat_id != pat_id_saved:
            # initialize 4d segmentation volume
            segmentation4d = np.empty(
                (0, image.shape[0], image.shape[1], image.shape[2]))

        if pad > 0 or experiment_settings.network[:
                                                  4] == "unet" or experiment_settings.network[:
                                                                                              3] == "drn":
            if experiment_settings.network[:4] == "unet":
                image, shape_changed, yx_padding = fit_unet_image(
                    image, num_downsamplings=4)
            elif experiment_settings.network[:3] == "drn":
                # print("WARNING - adjust image", image.shape)
                image, shape_changed, yx_padding = fit_unet_image(
                    image, num_downsamplings=3)
            else:
                # image has shape [z, y, x] so pad last two dimensions
                image = np.pad(
                    image, ((0, 0), (pad, pad), (pad, pad)),
                    mode="edge")  # "'constant', constant_values=(0,))

        image = image[:, None]  # add extra dim of size 1
        image = torch.from_numpy(image)
        pat_predictions = Predictions()
        with torch.set_grad_enabled(False):
            for s in range(args.samples):
                output = trainer.predict(image)
                if shape_changed:

                    output = restore_original_size(output, yx_padding)
                    # print("WARNING - restore original size", output["softmax"].shape)
                soft_probs = output['softmax'].detach().numpy()
                pat_predictions(soft_probs,
                                cardiac_phase_tag=phase_id,
                                pred_logits=None)
                # torch.cuda.empty_cache()

        # if mc_dropout is true we compute the Bayesian maps (stddev) otherwise Entropy. Method makes sure
        # that in case we're sampling that pred_probs are averaged over samples.
        # 14-8-2019 IMPORTANT: We are computing Bayesian maps with MEAN stddev over classes! Before MAX
        pred_probs, uncertainties = pat_predictions.get_predictions(
            compute_uncertainty=True, mc_dropout=use_mc, agg_func="mean")
        segmentation = np.argmax(pred_probs, axis=1)
        eval_obj = VolumeEvaluation(pat_id,
                                    segmentation,
                                    reference,
                                    voxel_spacing=spacing,
                                    num_of_classes=n_classes,
                                    mc_dropout=use_mc,
                                    cardiac_phase=phase_id)

        eval_obj.post_processing_only()
        # IMPORTANT: in fast_evaluate we post process the predictions only keeping the largest connected components
        segmentation = eval_obj.pred_labels
        # print(segmentation4d.shape, segmentation.shape, segmentation[None].shape, spacing, original_spacing)
        segmentation4d = np.vstack((segmentation4d, segmentation[None]))
        del output

        if args.save_output and num_of_frames == frame_id + 1:
            do_resample = True if experiment_settings.resample or original_spacing[
                -1] < 1. else False
            # IMPORTANT: if frame_id is None (e.g. when processing 4D data) then filename is without suffix frame_id
            save_segmentations(segmentation4d,
                               pat_id,
                               output_dirs,
                               spacing,
                               do_resample,
                               new_spacing=original_spacing,
                               frame_id=None)

        pat_id_saved = pat_id
    def __init__(self,
                 path_detection_dir,
                 patient_ids=None,
                 all_folds_settings=None,
                 correct_all_seg_errors=False,
                 verbose=False,
                 eval_run_id=None):
        """
        -------------------------------------------------------------------------------------
        Example usage:
        patient_ids = None  # ["patient016", "patient017"]
        src_exper_dir = os.path.expanduser("~/expers/acdc/unet_mc_dice")
        all_folds_settings = {'network': 'unet_mc', 'input_channels': 'bmap', }
        # fold0:["patient016","patient017", "patient018", "patient019", "patient020"]
        sim_expert = SimulateExpert(src_exper_dir, all_folds_settings=all_folds_settings,
                                    patient_ids=patient_ids, verbose=True, correct_all_seg_errors=False)
        sim_expert.evaluate_detections(evaluate_baseline=True)
        sim_expert.save()
        -------------------------------------------------------------------------------------
        :param path_detection_dir: absolute path to dir that stores details about detection model training
        :param all_folds_settings: dictionary that should be not NONE in case path_detection_dir
                    does not point to an experiment dir with settings.yaml file.
                    In this case we're processing ALL FOLDS and we need to pass the settings:
                    {'input_channels': emap/bmap, 'dt_config_id': 'fixed_46_31'
                     'network': drn_mc, dcnn_mc, unet_mc, }
        :param patient_ids:
        :param correct_all_seg_errors: boolean, IMPORTANT: if set to TRUE we will correct ALL filtered segmentation
                                       errors (t_roi_maps) without taking the region detection heat maps into account
                                       This is used as a kind of BASELINE.
        :param dt_config_id: extension for directories self.dir_dt_maps and self.dir_detector_labels in order to
                             separate different configurations for error detection
        :param eval_run_id: in case we want to run different evaluations for the same dt_config_id we specify a separate
                            eval_run_id. NOTE: you also need to specify this for the EvaluationHandler when creating the
                            actual heat maps.
        :param verbose:
        """
        self.patient_ids = patient_ids
        self.correct_all_seg_errors = correct_all_seg_errors
        if os.path.isfile(os.path.join(path_detection_dir, 'settings.yaml')):
            self.exper_settings = loadExperimentSettings(
                os.path.join(path_detection_dir, 'settings.yaml'))
            self._settings()
        else:
            # path_detection_dir is actually not an exper dir under .../dt_logs. Meaning, we're not processing a
            # specific detection model/fold but just ALL patient ids we can find in the HEAT_MAP_DIR
            # So option is useful if we want to evaluate ALL folds in one go.
            # path_detection_dir should be something like: ~/expers/acdc/dcnn_mc_brier/
            #   We assume there exists a "heat_maps" directory under this main dir
            assert all_folds_settings is not None
            self._no_settings(path_detection_dir, all_folds_settings)

        assert self.input_channels in ['bmap', 'emap']
        print("INFO - Working with settings model: {} fold: {} dropout: {} "
              "dt_config: {} input-channels: {}".format(
                  self.model_name, self.fold, self.mc_dropout,
                  self.dt_config_id, self.input_channels))
        self.eval_run_id = eval_run_id
        self.verbose = verbose

        self.num_cardiac_phases = 1
        self.heat_map_dir = None
        self.corr_pred_labels_dir = None
        self.output_dir_results = None
        self.result_dir = None
        self.heat_maps = OrderedDict()
        self.corrected_pred_labels = OrderedDict()
        self.detected_voxel_mask = OrderedDict()
        self.tp_detection_rate = OrderedDict()
        self.patient_frame_array = np.empty(0)
        self.patient_frame_array = np.empty(0)

        self._check_dirs()
        self.data_handler = None
        self.heat_map_handler = None

        # keep the evaluation objects per patient
        self.base_fold_eval_obj = None
        self.fold_eval_obj = None
        # important: _check_dirs has to be called first
        self._get_patient_ids()

        print("Warning - Using heat maps: {} ".format(
            not self.correct_all_seg_errors))
예제 #4
0
def main():
    # first we obtain the user arguments, set random seeds, make directories, and store the experiment settings.
    args = parse_args()
    if args.samples > 1:
        use_mc = True
        type_of_map = "bmap"
        res_suffix = "_mc.npz"
    else:
        use_mc = False
        type_of_map = "emap"
        res_suffix = ".npz"

    os.makedirs(args.output_directory, exist_ok=True)
    experiment_settings = loadExperimentSettings(path.join(args.experiment_directory, 'settings.yaml'))
    dta_settings = get_config("ARVC")

    model_file = path.join(args.experiment_directory, str(args.checkpoint) + '.model')
    output_dirs = make_dirs(args.output_directory, use_mc)
    # we create a trainer
    n_classes = len(dta_settings.tissue_structure_labels)
    n_channels_input = 1
    transfer_learn = False
    # IMPORTANT!!! if necessary enable transfer learning settings
    if experiment_settings.dataset != dta_settings.dataset:
        n_classes = len(get_config(experiment_settings.dataset).tissue_structure_labels)
        transfer_learn = True
        print("INFO - transfer learning: trained on nclasses {}".format(n_classes))

    trainer, pad = get_trainer(experiment_settings, n_classes, n_channels_input, model_file=model_file)
    trainer.evaluate_with_dropout = use_mc
    test_results = ARVCTestResult()
    # if patid is not None e.g. "NL256100_1" then we do a single evaluation
    testset_generator = get_test_set_generator(args, experiment_settings, dta_settings, patid=args.patid, all_frames=args.all_frames)
    # we're evaluating patient by patient. one patient can max have 4 volumes if all tissue structures are
    # annotated in separate phases.
    pat_saved, c_phase_saved, sample_saved, phase_results, result_obj = None, None, None, None, None
    for sample in testset_generator:
        image, reference = sample['image'], sample['reference']
        pat_id, phase_id, frame_id = sample['patient_id'], sample['cardiac_phase'], sample['frame_id']
        spacing, original_spacing, direction = sample['spacing'], sample['original_spacing'], sample['direction']

        if pat_saved != pat_id:
            check_save(pat_saved, sample_saved, result_obj, args, experiment_settings, output_dirs, type_of_map)
        result_obj = prepare_result_obj(pat_saved, sample, result_obj, experiment_settings)
        # get ignore_labels (numpy array of shape [n_classes]. Add batch dim in front with None
        ignore_labels, merge_results, phase_results = prepare_evaluation(sample, pat_saved, c_phase_saved,
                                                                         phase_results)
        if transfer_learn:
            reference, ignore_labels = prepare_transfer_learning(n_classes, reference, dta_settings.cls_translate,
                                                                 ignore_labels)
        if pad > 0:
            # image has shape [z, y, x] so pad last two dimensions
            image = np.pad(image, ((0,0), (pad, pad), (pad, pad)), mode="edge") # "'constant', constant_values=(0,))

        image = image[:, None]  # add extra dim of size 1
        image = torch.from_numpy(image)
        pat_predictions = Predictions()
        with torch.set_grad_enabled(False):
            for s in range(args.samples):
                output = trainer.predict(image)
                if ignore_labels is not None:
                    # ignore_labels if not None is a binary vector of size n_classes (target dataset)
                    pred_mask = get_loss_mask(output['softmax'], ignore_labels[None])
                    soft_probs = output['softmax'].detach().numpy() * pred_mask.detach().numpy()
                else:
                    soft_probs = output['softmax'].detach().numpy()
                aleatoric = None if 'aleatoric' not in output.keys() else np.squeeze(output['aleatoric'])
                pat_predictions(soft_probs, cardiac_phase_tag=phase_id, pred_logits=None)
                # torch.cuda.empty_cache()

        pred_probs, uncertainties = pat_predictions.get_predictions(compute_uncertainty=True, mc_dropout=use_mc)
        segmentation = np.argmax(pred_probs, axis=1)

        eval_obj = VolumeEvaluation(pat_id, segmentation, reference,
                                    voxel_spacing=spacing, num_of_classes=n_classes,
                                    mc_dropout=use_mc, cardiac_phase=phase_id, ignore_labels=ignore_labels)
        if args.all_frames:
            eval_obj.post_processing_only()
        else:
            eval_obj.fast_evaluate(compute_hd=True)
            phase_results = process_volume_results(test_results, pat_id, phase_id, merge_results,
                                                   phase_results)
        # IMPORTANT: in fast_evaluate we post process the predictions only keeping the largest connected components
        segmentation = eval_obj.pred_labels

        result_obj = process_volume(result_obj, sample, segmentation, uncertainties, aleatoric=aleatoric)
        if transfer_learn and not args.all_frames:
            print("{}: RV/LV {:.2f} {:.2f}".format(eval_obj.patient_id, eval_obj.dice[1], eval_obj.dice[3]))
        del output
        # save patient and phase we just processed, we need this in order to know whether or not to merge the results
        pat_saved, c_phase_saved, sample_saved = pat_id, phase_id, sample

    if not args.all_frames:
        test_results.show_results(transfer_learning=transfer_learn)
    check_save(pat_saved, sample_saved, result_obj, args, experiment_settings, output_dirs, type_of_map)

    if args.save_results:
        fname = path.join(args.output_directory, "results_f" + str(args.fold) + "_{}".format(len(test_results.pat_ids)) +
                          res_suffix)
        test_results.save(filename=fname)
        print("INFO - performance results saved to {}".format(fname))
예제 #5
0
def main():
    # first we obtain the user arguments, set random seeds, make directories, and store the experiment settings.
    args = parse_args()
    if args.samples > 1:
        use_mc = True
        type_of_map = "bmap"
        res_suffix = "_mc.npz"
    else:
        use_mc = False
        type_of_map = "emap"
        res_suffix = ".npz"

    print("INFO - Evaluating with super resolution = {}".format(
        args.super_resolution))
    os.makedirs(args.output_directory, exist_ok=True)
    experiment_settings = loadExperimentSettings(
        path.join(args.experiment_directory, 'settings.yaml'))
    dta_settings = get_config(experiment_settings.dataset)

    model_file = path.join(args.experiment_directory,
                           str(args.checkpoint) + '.model')
    output_dirs = make_dirs(args.output_directory, use_mc)
    # we create a trainer
    n_classes = len(dta_settings.tissue_structure_labels)
    n_channels_input = 1

    trainer, pad = get_trainer(experiment_settings,
                               n_classes,
                               n_channels_input,
                               model_file=model_file)
    trainer.evaluate_with_dropout = use_mc
    test_results = ACDCTestResult()
    testset_generator = get_test_set_generator(args,
                                               experiment_settings,
                                               dta_settings,
                                               patid=args.patid)

    for sample in testset_generator:
        image, spacing, reference = sample['image'], sample['spacing'], sample[
            'reference']
        pat_id, phase_id, frame_id = sample['patient_id'], sample[
            'cardiac_phase'], sample['frame_id']
        original_spacing = sample['original_spacing']
        shape_changed = False
        # if pat_id == "patient037" and phase_id == "ES":
        #     print("WARNING - Skip {}".format(pat_id))
        #     continue
        if pad > 0 or experiment_settings.network[:
                                                  4] == "unet" or experiment_settings.network[:
                                                                                              3] == "drn":
            if experiment_settings.network[:4] == "unet":
                image, shape_changed, yx_padding = fit_unet_image(
                    image, num_downsamplings=4)
            elif experiment_settings.network[:3] == "drn":
                # print("WARNING - adjust image", image.shape)
                image, shape_changed, yx_padding = fit_unet_image(
                    image, num_downsamplings=3)
            else:
                # image has shape [z, y, x] so pad last two dimensions
                image = np.pad(
                    image, ((0, 0), (pad, pad), (pad, pad)),
                    mode="edge")  # "'constant', constant_values=(0,))

        image = image[:, None]  # add extra dim of size 1
        image = torch.from_numpy(image)
        pat_predictions = Predictions()
        with torch.set_grad_enabled(False):
            for s in range(args.samples):
                output = trainer.predict(image)
                if shape_changed:

                    output = restore_original_size(output, yx_padding)
                    # print("WARNING - restore original size", output["softmax"].shape)
                soft_probs = output['softmax'].detach().numpy()
                aleatoric = None if 'aleatoric' not in output.keys(
                ) else np.squeeze(output['aleatoric'])
                aleatoric = None if use_mc else aleatoric
                pat_predictions(soft_probs,
                                cardiac_phase_tag=phase_id,
                                pred_logits=None)
                # torch.cuda.empty_cache()

        # if mc_dropout is true we compute the Bayesian maps (stddev) otherwise Entropy. Method makes sure
        # that in case we're sampling that pred_probs are averaged over samples.
        # 14-8-2019 IMPORTANT: We are computing Bayesian maps with MEAN stddev over classes! Before MAX
        pred_probs, uncertainties = pat_predictions.get_predictions(
            compute_uncertainty=True, mc_dropout=use_mc, agg_func="mean")
        segmentation = np.argmax(pred_probs, axis=1)
        eval_obj = VolumeEvaluation(pat_id,
                                    segmentation,
                                    reference,
                                    voxel_spacing=spacing,
                                    num_of_classes=n_classes,
                                    mc_dropout=use_mc,
                                    cardiac_phase=phase_id)

        eval_obj.fast_evaluate(compute_hd=True)
        # IMPORTANT: in fast_evaluate we post process the predictions only keeping the largest connected components
        segmentation = eval_obj.pred_labels
        test_results(eval_obj.dice,
                     hd=eval_obj.hd,
                     cardiac_phase_tag=phase_id,
                     pat_id=pat_id,
                     hd95=eval_obj.hd95,
                     assd=eval_obj.assd)
        eval_obj.show_results()
        if args.save_output:
            # print("INFO - image/reference size ", image.shape, reference.shape)
            do_resample = True if experiment_settings.resample or original_spacing[
                -1] < 1. else False
            save_pat_objects(
                pat_id,
                phase_id,
                segmentation,
                None,
                uncertainties,
                aleatoric,
                type_of_map,
                spacing,
                output_dirs,
                new_spacing=original_spacing,
                do_resample=do_resample,
                pred_probs=pred_probs if args.save_probs else None)
        # Work-around to save predicted probabilities only
        if args.save_probs and not args.save_output:
            do_resample = True if experiment_settings.resample or original_spacing[
                -1] < 1. else False
            save_pred_probs(pat_id,
                            phase_id,
                            pred_probs,
                            spacing,
                            output_dirs,
                            new_spacing=original_spacing,
                            do_resample=do_resample,
                            direction=None,
                            origin=None)

        del output

    test_results.show_results()
    test_results.excel_string()

    if args.save_results:
        fname = path.join(
            args.output_directory,
            "results_f" + str(experiment_settings.fold) +
            "_{}".format(len(test_results.pat_ids)) + res_suffix)
        test_results.save(filename=fname)
        print("INFO - performance results saved to {}".format(fname))