Beispiel #1
0
    def run_eval(self):
        """Stores evaluation results in dictionary

        Returns:
            dict, ndarray: dictionary containing evaluation results, data with each object painted a different color
        """
        dct = {}
        data_gt = self.data_gt.copy()
        data_pred = self.data_pred.copy()
        for n in range(self.n_classes):
            self.data_pred = data_pred[..., n]
            self.data_gt = data_gt[..., n]
            dct['vol_pred_class' + str(n)] = self.get_vol(self.data_pred)
            dct['vol_gt_class' + str(n)] = self.get_vol(self.data_gt)
            dct['rvd_class' +
                str(n)], dct['avd_class' +
                             str(n)] = self.get_rvd(), self.get_avd()
            dct['dice_class' + str(n)] = imed_metrics.dice_score(
                self.data_gt, self.data_pred)
            dct['recall_class' + str(n)] = imed_metrics.recall_score(
                self.data_pred, self.data_gt, err_value=np.nan)
            dct['precision_class' + str(n)] = imed_metrics.precision_score(
                self.data_pred, self.data_gt, err_value=np.nan)
            dct['specificity_class' + str(n)] = imed_metrics.specificity_score(
                self.data_pred, self.data_gt, err_value=np.nan)
            dct['n_pred_class' +
                str(n)], dct['n_gt_class' +
                             str(n)] = self.n_pred[n], self.n_gt[n]
            dct['ltpr_class' + str(n)], _ = self.get_ltpr(class_idx=n)
            dct['lfdr_class' + str(n)] = self.get_lfdr(class_idx=n)
            dct['mse_class' + str(n)] = imed_metrics.mse(
                self.data_gt, self.data_pred)

            for lb_size, gt_pred in zip(self.label_size_lst[n][0],
                                        self.label_size_lst[n][1]):
                suffix = self.size_suffix_lst[int(lb_size) - 1]

                if gt_pred == 'gt':
                    dct['ltpr' + suffix + "_class" +
                        str(n)], dct['n' + suffix] = self.get_ltpr(
                            label_size=lb_size, class_idx=n)
                else:  # gt_pred == 'pred'
                    dct['lfdr' + suffix + "_class" + str(n)] = self.get_lfdr(
                        label_size=lb_size, class_idx=n)

        if self.n_classes == 1:
            self.data_painted = np.squeeze(self.data_painted, axis=-1)

        return dct, self.data_painted
Beispiel #2
0
def test_err_rec(image, image_2):
    results = imed_metrics.recall_score(image, image_2, err_value=1)
    assert results == 1
def run_experiment(level, unc_name, thr_unc_lst, thr_pred_lst, gt_folder,
                   pred_folder, im_lst, target_suf, param_eval):
    # init results
    tmp_lst = [[] for _ in range(len(thr_pred_lst))]
    res_init_lst = [deepcopy(tmp_lst) for _ in range(len(thr_unc_lst))]
    res_dct = {
        'tpr': deepcopy(res_init_lst),
        'fdr': deepcopy(res_init_lst),
        'retained_elt': [[] for _ in range(len(thr_unc_lst))]
    }

    # loop across images
    for fname_pref in im_lst:
        # uncertainty map
        fname_unc = os.path.join(pred_folder,
                                 fname_pref + unc_name + '.nii.gz')
        im = nib.load(fname_unc)
        data_unc = im.get_data()
        del im

        # list MC samples
        data_pred_lst = np.array([
            nib.load(os.path.join(pred_folder, f)).get_data()
            for f in os.listdir(pred_folder) if fname_pref + '_pred_' in f
        ])

        # ground-truth fname
        fname_gt = os.path.join(gt_folder,
                                fname_pref.split('_')[0], 'anat',
                                fname_pref + target_suf + '.nii.gz')
        if os.path.isfile(fname_gt):
            nib_gt = nib.load(fname_gt)
            data_gt = nib_gt.get_data()
            logger.debug(np.sum(data_gt))
            # soft prediction
            data_soft = np.mean(data_pred_lst, axis=0)

            if np.any(data_soft):
                for i_unc, thr_unc in enumerate(thr_unc_lst):
                    # discard uncertain lesions from data_soft
                    data_soft_thrUnc = deepcopy(data_soft)
                    data_soft_thrUnc[data_unc > thr_unc] = 0
                    cmpt = count_retained(
                        (data_soft > 0).astype(np.int),
                        (data_soft_thrUnc > 0).astype(np.int), level)
                    res_dct['retained_elt'][i_unc].append(cmpt)
                    logger.debug(f"{thr_unc} {cmpt}")
                    for i_pred, thr_pred in enumerate(thr_pred_lst):
                        data_hard = imed_postpro.threshold_predictions(deepcopy(data_soft_thrUnc), thr=thr_pred)\
                                                .astype(np.uint8)

                        eval = imed_utils.Evaluation3DMetrics(
                            data_pred=data_hard,
                            data_gt=data_gt,
                            dim_lst=nib_gt.header['pixdim'][1:4],
                            params=param_eval)

                        if level == 'vox':
                            tpr = imed_metrics.recall_score(eval.data_pred,
                                                            eval.data_gt,
                                                            err_value=np.nan)
                            fdr = 100. - imed_metrics.precision_score(
                                eval.data_pred, eval.data_gt, err_value=np.nan)
                        else:
                            tpr, _ = eval.get_ltpr()
                            fdr = eval.get_lfdr()
                        logger.debug(
                            f"{thr_pred} {np.count_nonzero(deepcopy(data_soft_thrUnc))} "
                            f"{np.count_nonzero(data_hard)} {tpr} {fdr}")
                        res_dct['tpr'][i_unc][i_pred].append(tpr / 100.)
                        res_dct['fdr'][i_unc][i_pred].append(fdr / 100.)

    return res_dct