Esempio n. 1
0
    def extract_patch(self, index):
        """Load an image and convert it to a torch tensor."""

        img = utls.imread(self.img_path)
        selem = square(2)
        this_mask = self.sp_labels == self.unique_sp_labels[index]
        i_mask, j_mask = np.where(this_mask)
        w = max(j_mask) - min(j_mask)
        h = max(i_mask) - min(i_mask)
        if (w < h):
            cols_to_add = h - w + 1
            idx_i = np.arange(min(i_mask), max(i_mask) + 1).astype(int)
            idx_j = np.arange(
                min(j_mask) - np.floor(cols_to_add / 2),
                max(j_mask) + np.ceil(cols_to_add / 2)).astype(int)
        elif (w > h):
            rows_to_add = w - h + 1
            idx_i = np.arange(
                min(i_mask) - np.floor(rows_to_add / 2),
                max(i_mask) + np.ceil(rows_to_add / 2)).astype(int)
            idx_j = np.arange(min(j_mask), max(j_mask) + 1).astype(int)
        else:
            idx_i = np.arange(min(i_mask), max(i_mask) + 1)
            idx_j = np.arange(min(j_mask), max(j_mask) + 1)

        patch = resize(
            img.take(idx_i, mode='wrap', axis=0).take(idx_j,
                                                      mode='wrap',
                                                      axis=1),
            (self.patch_size, self.patch_size)).astype(np.float32)

        # Convert to PIL and apply torch transform
        patch = (patch * 255 / np.max(patch)).astype('uint8')
        patch = Image.fromarray(patch)
        patch = self.transform(patch).squeeze()

        return (patch, self.unique_sp_labels[index])
Esempio n. 2
0
from ksptrack.exps import results_dirs as rd
from PIL import Image, ImageFont, ImageDraw
import glob
"""
Makes plots self
"""

cmap = plt.get_cmap('viridis')


def gray2rgb(im):
    return (color.gray2rgb(im) * 255).astype(np.uint8)


file_out = os.path.join(rd.root_dir, 'plots_results')
placehold = utls.imread(os.path.join(file_out, 'placeholder.png'))

n_sets_per_type = 1

dfs = []
# Self-learning

ims = []
ksp = []
pm = []

#for key in rd.res_dirs_dict_ksp.keys(): # Types
for key in rd.types:  # Types
    ims.append([])
    ksp.append([])
def main(conf, plot_fname='metrics.pdf', logger=None, skip_frames=False):

    logger = logging.getLogger('plot_results_ksp')

    logger.info('--------')
    logger.info('Self-learning on: ' + conf.dataOutDir)
    logger.info('--------')

    metrics_path = os.path.join(conf.dataOutDir, 'metrics.npz')

    if (not os.path.exists(metrics_path)):

        res = np.load(os.path.join(conf.dataOutDir, 'results.npz'))

        list_paths_back = res['list_paths_back']
        list_paths_for = res['list_paths_for']

        my_dataset = ds.DataManager(conf)
        my_dataset.load_labels_if_not_exist()
        #my_dataset.load_pm_fg_from_file()

        l_dataset = learning_dataset.LearningDataset(conf, pos_thr=0.5)
        #l_dataset.make_y_array_true(l_dataset.gt)

        fpr_ksp = [0.]
        tpr_ksp = [0.]
        pr_ksp = [1.]
        rc_ksp = [0.]
        f1_ksp = []

        logger.info('[1/4] Calculating metrics on KSP... ')

        seeds = utls.list_paths_to_seeds(list_paths_for, list_paths_back)
        l_dataset.set_seeds(seeds)
        #l_dataset.make_y_array(l_dataset.seeds)

        ksp_scores = res['ksp_scores_mat']
        fpr, tpr, _ = roc_curve(l_dataset.gt.ravel(), ksp_scores.ravel())
        precision, recall, _ = precision_recall_curve(l_dataset.gt.ravel(),
                                                      ksp_scores.ravel())
        num = precision * recall
        denum = precision + recall
        f1_ = np.nan_to_num(2 * (num) / (denum))
        f1_ksp.append(np.max(f1_))
        fpr_ksp.append(fpr[1])
        tpr_ksp.append(tpr[1])
        pr_ksp.append(precision[1])
        rc_ksp.append(recall[1])

        fpr_ksp.append(1.)
        tpr_ksp.append(1.)
        pr_ksp.append(0.)
        rc_ksp.append(1.)

        logger.info('[2/4] Calculating metrics on PM... ')
        fpr_pm = []
        tpr_pm = []
        pr_pm = []
        rc_pm = []
        f1_pm = []

        seeds = utls.list_paths_to_seeds(list_paths_for, list_paths_back)
        l_dataset.fg_marked = seeds
        l_dataset.calc_pm(l_dataset.fg_marked,
                          save=False,
                          marked_feats=None,
                          all_feats_df=my_dataset.sp_desc_df,
                          in_type='not csv',
                          mode='foreground',
                          bag_n_feats=conf.bag_n_feats,
                          feat_fields=['desc'],
                          n_jobs=conf.bag_jobs)

        pm = l_dataset.get_pm_array()

        fpr, tpr, _ = roc_curve(l_dataset.gt.ravel(), pm.ravel())
        #fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2], probas)
        precision, recall, _ = precision_recall_curve(l_dataset.gt.ravel(),
                                                      pm.ravel())
        fpr, tpr, _ = roc_curve(l_dataset.gt.ravel(), pm.ravel())
        num = precision * recall
        denum = precision + recall
        f1_ = np.nan_to_num(2 * (num) / (denum))
        f1_pm.append(np.max(f1_))
        fpr_pm.append(fpr)
        tpr_pm.append(tpr)
        pr_pm.append(precision)
        rc_pm.append(recall)

        # Saving metrics
        data = dict()
        data['fpr_pm'] = fpr_pm
        data['tpr_pm'] = tpr_pm
        data['pr_pm'] = pr_pm
        data['rc_pm'] = rc_pm
        data['f1_pm'] = f1_pm

        data['fpr_ksp'] = fpr_ksp
        data['tpr_ksp'] = tpr_ksp
        data['pr_ksp'] = pr_ksp
        data['rc_ksp'] = rc_ksp
        data['f1_ksp'] = f1_ksp

        data['ksp_scores'] = ksp_scores
        data['pm'] = pm
        np.savez(os.path.join(conf.dataOutDir, 'metrics.npz'), **data)

    else:
        logger.info('Loading metrics.npz...')
        metrics = np.load(os.path.join(conf.dataOutDir, 'metrics.npz'))
        fpr_pm = metrics['fpr_pm']
        tpr_pm = metrics['tpr_pm']
        pr_pm = metrics['pr_pm']
        rc_pm = metrics['rc_pm']
        f1_pm = metrics['f1_pm']

        fpr_ksp = metrics['fpr_ksp']
        tpr_ksp = metrics['tpr_ksp']
        pr_ksp = metrics['pr_ksp']
        rc_ksp = metrics['rc_ksp']
        f1_ksp = metrics['f1_ksp']

        ksp_scores = metrics['ksp_scores']
        if ('pm' in metrics.keys()):
            pm = metrics['pm']
        else:
            pm = metrics['pm_ksp']

        my_dataset = ds.DataManager(conf)
        my_dataset.load_labels_if_not_exist()
        my_dataset.load_pm_fg_from_file()
        l_dataset = learning_dataset.LearningDataset(conf, pos_thr=0.5)

        res = np.load(os.path.join(conf.dataOutDir, 'results.npz'))
        list_paths_back = res['list_paths_back']
        list_paths_for = res['list_paths_for']

    # Plot all iterations of PM
    conf.roc_xlim = [0, 0.4]
    conf.pr_rc_xlim = [0.6, 1.]

    lw = 1

    # PM curves
    auc_ = auc(np.asarray(fpr_pm[-1]).ravel(), np.asarray(tpr_pm[-1]).ravel())
    max_f1 = np.max(f1_pm[-1])

    plt.subplot(121)
    plt.plot(np.asarray(fpr_pm[-1]).ravel(),
             np.asarray(tpr_pm[-1]).ravel(),
             'r-',
             lw=lw,
             label='KSP/PM (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    auc_ = auc(np.asarray(rc_pm[-1]).ravel(), np.asarray(pr_pm[-1]).ravel())
    plt.subplot(122)
    plt.plot(np.asarray(rc_pm[-1]).ravel(),
             np.asarray(pr_pm[-1]).ravel(),
             'r-',
             lw=lw,
             label='KSP/PM (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    # Plot KSP
    auc_ = auc(np.asarray(fpr_ksp).ravel(),
               np.asarray(tpr_ksp).ravel(),
               reorder=True)
    max_f1 = np.max(f1_ksp)
    plt.subplot(121)
    plt.plot(np.asarray(fpr_ksp).ravel(),
             np.asarray(tpr_ksp).ravel(),
             'ro--',
             lw=lw,
             label='KSP (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))
    plt.subplot(122)
    auc_ = auc(np.asarray(rc_ksp).ravel(),
               np.asarray(pr_ksp).ravel(),
               reorder=True)
    plt.plot(np.asarray(rc_ksp).ravel(),
             np.asarray(pr_ksp).ravel(),
             'ro--',
             lw=lw,
             label='KSP (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    plt.subplot(121)
    plt.legend()
    plt.xlim(conf.roc_xlim)
    plt.xlabel('fpr')
    plt.ylabel('tpr')
    plt.subplot(122)
    plt.legend()
    plt.xlim(conf.pr_rc_xlim)
    plt.xlabel('recall')
    plt.ylabel('precision')
    plt.suptitle(conf.seq_type + ', ' + conf.ds_dir + '\n' + 'T: ' +
                 str(conf.T))
    fig = plt.gcf()
    fig.set_size_inches(18.5, 10.5)
    fig.savefig(os.path.join(conf.dataOutDir, plot_fname), dpi=200)

    # Make plots
    gt = l_dataset.gt
    frame_dir = 'ksp_pm_frames'
    frame_path = os.path.join(conf.dataOutDir, frame_dir)
    if (os.path.exists(frame_path)):
        logger.info('[!!!] Frame dir: ' + frame_path +
                    ' exists. Delete to rerun.')
    elif (skip_frames):
        logger.info('[!!!] Skipping saving of frames')
    else:
        logger.info('Saving KSP, PM...')
        n_iters_ksp = len(list_paths_back)

        if (conf.csvFileType == 'pandas'):
            locs2d = pd.read_csv(
                os.path.join(conf.root_path, conf.ds_dir, conf.locs_dir,
                             conf.csvFileName_fg))
        elif (conf.csvFileType == 'anna'):
            locs2d = utls.readCsv(
                os.path.join(conf.root_path, conf.ds_dir, conf.locs_dir,
                             conf.csvFileName_fg))
        os.mkdir(frame_path)
        with progressbar.ProgressBar(maxval=len(conf.frameFileNames)) as bar:
            for f in range(len(conf.frameFileNames)):
                cont_gt = segmentation.find_boundaries(gt[..., f],
                                                       mode='thick')
                idx_cont_gt = np.where(cont_gt)
                im = utls.imread(conf.frameFileNames[f])
                im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
                im = csv.draw2DPoint(utls.pandas_to_std_csv(locs2d),
                                     f,
                                     im,
                                     radius=7)

                bar.update(f)
                plt.subplot(221)
                plt.imshow(ksp_scores[..., f])
                plt.title('KSP')
                plt.subplot(222)
                plt.imshow(pm[..., f])
                plt.title('KSP -> PM')
                plt.subplot(223)
                plt.imshow(im)
                plt.title('image')
                plt.suptitle('frame: ' + str(f) + ', n_iters_ksp: ' +
                             str(n_iters_ksp))
                plt.savefig(os.path.join(frame_path, 'f_' + str(f) + '.png'),
                            dpi=200)

    logger.info('Saving SPs per iterations plot...')
    n_sps = []
    for i in range(len(list_paths_back)):

        seeds = utls.list_paths_to_seeds(list_paths_for,
                                         list_paths_back,
                                         iter_=i)
        n = seeds.shape[0]
        n_sps.append((i + 1, n))

    n_sps.append((len(list_paths_back) + 1, n))
    n_sps = np.asarray(n_sps)

    plt.plot(n_sps[:, 0], n_sps[:, 1], 'bo-')
    plt.plot(n_sps[-1, 0], n_sps[-1, 1], 'ro')
    plt.xlabel('iterations')
    plt.ylabel('num. of superpixels')
    plt.title('num of superpixels vs. iterations. SS threshold: ' +
              str(conf.ss_thr))
    plt.savefig(os.path.join(conf.dataOutDir, 'sps_iters.eps'), dpi=200)

    pr_pm, rc_pm, _ = precision_recall_curve(l_dataset.gt.ravel(), pm.ravel())
    ksp_pm_pix_f1 = np.max(2 * (pr_pm * rc_pm) / (pr_pm + rc_pm))
    ksp_pix_f1 = f1_score(l_dataset.gt.ravel(), ksp_scores.ravel())

    file_out = os.path.join(conf.dataOutDir, 'scores.csv')
    logger.info('Saving to {}'.format(file_out))
    C = pd.Index(["F1"], name="columns")
    I = pd.Index(['KSP', 'KSP/PM'], name="Methods")
    data = np.asarray([ksp_pix_f1, ksp_pm_pix_f1]).reshape(2, 1)
    df = pd.DataFrame(data=data, index=I, columns=C)
    df.to_csv(path_or_buf=file_out)
    dataset = learning_dataset.LearningDataset(conf)
    dataset.load_labels_contours_if_not_exist()
    gt = dataset.gt
    sp_conts = dataset.labelContourMask

    file_ksp = os.path.join(rd.root_dir, rd.res_dirs_dict_ksp[key][dset][0],
                            'metrics.npz')

    print('Loading: ' + file_ksp)
    npzfile = np.load(file_ksp)

    for f in frames_idx:

        # Image
        im = utls.imread(conf.frameFileNames[f])
        ims.append(im)
        locs2d = utls.readCsv(
            os.path.join(conf.root_path, conf.ds_dir, conf.locs_dir,
                         conf.csvFileName_fg))
        im = csv.draw2DPoint(locs2d, f, im, radius=14)
        ims_gaze.append(np.copy(im))

        mi, mj = np.where(sp_conts[..., f])
        im[mi, mj, :] = (255, 255, 255)

        ksp_ = gray2rgb(npzfile['ksp_scores'][..., f])
        ksp.append(ksp_)

widths = [ims[i].shape[1] for i in range(len(ims))]
heights = [ims[i].shape[0] for i in range(len(ims))]
Esempio n. 5
0
    'momentum': 0.9,
    'weight_decay_adam': 0,
    'num_epochs': 150,
    'out_dir': save_dir,
    'cp_fname': cp_fname,
    'bm_fname': bm_fname
}

model = UNetFeatExtr(params)

model.model = ptu.load_checkpoint(os.path.join(save_dir, bm_fname),
                                  model.model, params['cuda'])

fnames = utls.get_images(os.path.join(data_root, dataset_dir, frame_dir))

orig_shape = utls.imread(fnames[0]).shape

in_shape = ptu.comp_unet_input_shape(orig_shape,
                                     model.model.depth,
                                     max_shape=(600, 600))

dataset = Dataset(in_shape, im_paths=fnames, mode='eval', cuda=params['cuda'])

model.model.eval()

idx = [0, 20, 40, 60, 80, 100]
feats = []
ims = []
for i in idx:
    print(i)
    im, prior, truth, im_orig = dataset[i]
Esempio n. 6
0
path_saves = '/home/laurent.lejeune/medical-labeling/'
save_out_fname = 'brats_matching_1.npz'

# Range (normalized) of volume to compute error
range_comp = np.asarray((0, 1.))
im_ind_my = [70, 28, 20, 44]

my_paths = [sorted(glob.glob(os.path.join(p, '*.png'))) for p in my_root_paths]

ims_my = list()
for i in range(len(my_paths)):
    for j in range(len(my_paths[i])):
        if (j == im_ind_my[i]):
            ims_my.append(
                np.flip(utls.imread(my_paths[i][j]), 1)[..., np.newaxis])

print('Extracting feats on my')
im_my = dict()
for i in range(len(ims_my)):
    im_ = get_feats(ims_my[i])
    im_my[my_root_paths[i]] = im_
print('Done.')

print('Saving features on my')
np.savez(os.path.join(path_saves, 'hists_my.npz'), **{'ims': im_my})

match_my = dict()

print('Matching on brats')
for sd, si in zip(sub_root_dir, range(len(sub_root_dir))):
Esempio n. 7
0
 def X_all_images(self):
     out = [
         utls.imread(self.conf.frameFileNames[i])[..., np.newaxis]
         for i in range(len(self.conf.frameFileNames))
     ]
     return np.concatenate(out, axis=3)
Esempio n. 8
0
save_dir_root = os.path.join(rd.root_dir, 'plots_results', 'frames_labels2018')

n_sets_per_type = 1

dfs = []
# Self-learning

ims = []
gts = []

for key in rd.types:  # Types
    dset = np.asarray(rd.best_dict_ksp[key][0:n_sets_per_type])[0][0]
    conf = rd.confs_dict_ksp[key][dset][0]
    save_dir = os.path.join(save_dir_root, key)
    if (not os.path.exists(save_dir)):
        os.makedirs(save_dir)

    dataset = learning_dataset.LearningDataset(conf)
    gts_fname = sorted(
        glob.glob(
            os.path.join(conf.root_path, conf.ds_dir, conf.truth_dir,
                         '*.png')))
    gts = [utls.imread(f) for f in gts_fname]
    imgs = [utls.imread(f) for f in conf.frameFileNames]

    for i, (im, gt) in enumerate(zip(imgs, gts)):

        both_ = np.concatenate((im, gt), axis=1)
        fname = os.path.join(save_dir, 'im_{}.png'.format(i))
        io.imsave(fname, both_)