Пример #1
0
def run_inference(pred_folder,
                  im_lst,
                  thr_pred,
                  gt_folder,
                  target_suf,
                  param_eval,
                  unc_name=None,
                  thr_unc=None):
    # init df
    df_results = pd.DataFrame()

    # loop across images
    for fname_pref in im_lst:
        if not any(elem is None for elem in [unc_name, thr_unc]):
            logger.debug(thr_unc)
            # uncertainty map
            fname_unc = os.path.join(pred_folder,
                                     fname_pref + unc_name + '.nii.gz')
            im = nib.load(fname_unc)
            data_unc = im.get_data()
            del im

            # list MC samples
            data_pred_lst = np.array([
                nib.load(os.path.join(pred_folder, f)).get_data()
                for f in os.listdir(pred_folder) if fname_pref + '_pred_' in f
            ])
        else:
            data_pred_lst = np.array([
                nib.load(os.path.join(pred_folder, f)).get_data()
                for f in os.listdir(pred_folder) if fname_pref + '_pred.' in f
            ])

        # ground-truth fname
        fname_gt = os.path.join(gt_folder,
                                fname_pref.split('_')[0], 'anat',
                                fname_pref + target_suf + '.nii.gz')
        nib_gt = nib.load(fname_gt)
        data_gt = nib_gt.get_data()

        # soft prediction
        data_soft = np.mean(data_pred_lst, axis=0)

        if not any(elem is None for elem in [unc_name, thr_unc]):
            logger.debug("thr")
            # discard uncertain lesions from data_soft
            data_soft[data_unc > thr_unc] = 0

        data_hard = imed_postpro.threshold_predictions(
            data_soft, thr=thr_pred).astype(np.uint8)

        eval = imed_utils.Evaluation3DMetrics(
            data_pred=data_hard,
            data_gt=data_gt,
            dim_lst=nib_gt.header['pixdim'][1:4],
            params=param_eval)

        results_pred, _ = eval.run_eval()

        # save results of this fname_pred
        results_pred['image_id'] = fname_pref.split('_')[0]
        df_results = df_results.append(results_pred, ignore_index=True)

    return df_results
Пример #2
0
def run_experiment(level, unc_name, thr_unc_lst, thr_pred_lst, gt_folder,
                   pred_folder, im_lst, target_suf, param_eval):
    # init results
    tmp_lst = [[] for _ in range(len(thr_pred_lst))]
    res_init_lst = [deepcopy(tmp_lst) for _ in range(len(thr_unc_lst))]
    res_dct = {
        'tpr': deepcopy(res_init_lst),
        'fdr': deepcopy(res_init_lst),
        'retained_elt': [[] for _ in range(len(thr_unc_lst))]
    }

    # loop across images
    for fname_pref in im_lst:
        # uncertainty map
        fname_unc = os.path.join(pred_folder,
                                 fname_pref + unc_name + '.nii.gz')
        im = nib.load(fname_unc)
        data_unc = im.get_data()
        del im

        # list MC samples
        data_pred_lst = np.array([
            nib.load(os.path.join(pred_folder, f)).get_data()
            for f in os.listdir(pred_folder) if fname_pref + '_pred_' in f
        ])

        # ground-truth fname
        fname_gt = os.path.join(gt_folder,
                                fname_pref.split('_')[0], 'anat',
                                fname_pref + target_suf + '.nii.gz')
        if os.path.isfile(fname_gt):
            nib_gt = nib.load(fname_gt)
            data_gt = nib_gt.get_data()
            logger.debug(np.sum(data_gt))
            # soft prediction
            data_soft = np.mean(data_pred_lst, axis=0)

            if np.any(data_soft):
                for i_unc, thr_unc in enumerate(thr_unc_lst):
                    # discard uncertain lesions from data_soft
                    data_soft_thrUnc = deepcopy(data_soft)
                    data_soft_thrUnc[data_unc > thr_unc] = 0
                    cmpt = count_retained(
                        (data_soft > 0).astype(np.int),
                        (data_soft_thrUnc > 0).astype(np.int), level)
                    res_dct['retained_elt'][i_unc].append(cmpt)
                    logger.debug(f"{thr_unc} {cmpt}")
                    for i_pred, thr_pred in enumerate(thr_pred_lst):
                        data_hard = imed_postpro.threshold_predictions(deepcopy(data_soft_thrUnc), thr=thr_pred)\
                                                .astype(np.uint8)

                        eval = imed_utils.Evaluation3DMetrics(
                            data_pred=data_hard,
                            data_gt=data_gt,
                            dim_lst=nib_gt.header['pixdim'][1:4],
                            params=param_eval)

                        if level == 'vox':
                            tpr = imed_metrics.recall_score(eval.data_pred,
                                                            eval.data_gt,
                                                            err_value=np.nan)
                            fdr = 100. - imed_metrics.precision_score(
                                eval.data_pred, eval.data_gt, err_value=np.nan)
                        else:
                            tpr, _ = eval.get_ltpr()
                            fdr = eval.get_lfdr()
                        logger.debug(
                            f"{thr_pred} {np.count_nonzero(deepcopy(data_soft_thrUnc))} "
                            f"{np.count_nonzero(data_hard)} {tpr} {fdr}")
                        res_dct['tpr'][i_unc][i_pred].append(tpr / 100.)
                        res_dct['fdr'][i_unc][i_pred].append(fdr / 100.)

    return res_dct