def main(conf, logger=None):

    logger = logging.getLogger('plot_results_ksp')

    logger.info('--------')
    logger.info('Writing result frames to: ' + conf.dataOutDir)
    logger.info('--------')

    res = np.load(os.path.join(conf.dataOutDir, 'results.npz'))

    frame_dir = os.path.join(conf.dataOutDir, 'results')
    if (not os.path.exists(frame_dir)):
        logger.info('Creating output frame dir: {}'.format(frame_dir))
        os.makedirs(frame_dir)

    scores = (res['ksp_scores_mat'].astype('uint8')) * 255
    imgs = [io.imread(f) for f in conf.frameFileNames]
    truth_dir = os.path.join(conf.root_path, conf.ds_dir, conf.truth_dir)
    gts = [
        io.imread(f)
        for f in sorted(glob.glob(os.path.join(truth_dir, '*.png')))
    ]

    locs2d = csv.readCsv(
        os.path.join(conf.root_path, conf.ds_dir, conf.locs_dir,
                     conf.csvFileName_fg))

    for f in range(scores.shape[-1]):
        logger.info('{}/{}'.format(f + 1, scores.shape[-1]))
        cont_gt = segmentation.find_boundaries(gts[f], mode='thick')
        idx_cont_gt = np.where(cont_gt)

        im = csv.draw2DPoint(locs2d, f, imgs[f], radius=7)

        im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
        score_ = np.repeat(scores[..., f][..., np.newaxis], 3, axis=2)
        im_ = np.concatenate((im, score_), axis=1)

        io.imsave(os.path.join(frame_dir, 'im_{0:04d}.png'.format(f)), im_)
Example #2
0
        dataset = learning_dataset.LearningDataset(confs[0])
        gt = dataset.gt

        f = rd.self_frames_dict[key][dset]

        # Image
        cont_gt = segmentation.find_boundaries(gt[..., f], mode='thick')
        idx_cont_gt = np.where(cont_gt)
        im = utls.imread(confs[0].frameFileNames[f])
        im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)

        locs2d = utls.readCsv(
            os.path.join(confs[0].root_path, confs[0].ds_dir,
                         confs[0].locs_dir, confs[0].csvFileName_fg))
        im = csv.draw2DPoint(locs2d, f, im, radius=7)
        ims[-1].append(im)

        file_ksp = os.path.join(rd.root_dir,
                                rd.res_dirs_dict_ksp[key][dset][gset],
                                'metrics.npz')

        file_pm = sorted(
            glob.glob(
                os.path.join(rd.root_dir,
                             rd.res_dirs_dict_ksp[key][dset][gset],
                             'pm_scores*.npz')))[-1]

        print('Loading (KSP): ' + file_ksp)
        npzfile = np.load(file_ksp)
        ksp_ = gray2rgb(npzfile['ksp_scores'][..., f])
def main(conf, plot_fname='metrics.pdf', logger=None, skip_frames=False):

    logger = logging.getLogger('plot_results_ksp')

    logger.info('--------')
    logger.info('Self-learning on: ' + conf.dataOutDir)
    logger.info('--------')

    metrics_path = os.path.join(conf.dataOutDir, 'metrics.npz')

    if (not os.path.exists(metrics_path)):

        res = np.load(os.path.join(conf.dataOutDir, 'results.npz'))

        list_paths_back = res['list_paths_back']
        list_paths_for = res['list_paths_for']

        my_dataset = ds.DataManager(conf)
        my_dataset.load_labels_if_not_exist()
        #my_dataset.load_pm_fg_from_file()

        l_dataset = learning_dataset.LearningDataset(conf, pos_thr=0.5)
        #l_dataset.make_y_array_true(l_dataset.gt)

        fpr_ksp = [0.]
        tpr_ksp = [0.]
        pr_ksp = [1.]
        rc_ksp = [0.]
        f1_ksp = []

        logger.info('[1/4] Calculating metrics on KSP... ')

        seeds = utls.list_paths_to_seeds(list_paths_for, list_paths_back)
        l_dataset.set_seeds(seeds)
        #l_dataset.make_y_array(l_dataset.seeds)

        ksp_scores = res['ksp_scores_mat']
        fpr, tpr, _ = roc_curve(l_dataset.gt.ravel(), ksp_scores.ravel())
        precision, recall, _ = precision_recall_curve(l_dataset.gt.ravel(),
                                                      ksp_scores.ravel())
        num = precision * recall
        denum = precision + recall
        f1_ = np.nan_to_num(2 * (num) / (denum))
        f1_ksp.append(np.max(f1_))
        fpr_ksp.append(fpr[1])
        tpr_ksp.append(tpr[1])
        pr_ksp.append(precision[1])
        rc_ksp.append(recall[1])

        fpr_ksp.append(1.)
        tpr_ksp.append(1.)
        pr_ksp.append(0.)
        rc_ksp.append(1.)

        logger.info('[2/4] Calculating metrics on PM... ')
        fpr_pm = []
        tpr_pm = []
        pr_pm = []
        rc_pm = []
        f1_pm = []

        seeds = utls.list_paths_to_seeds(list_paths_for, list_paths_back)
        l_dataset.fg_marked = seeds
        l_dataset.calc_pm(l_dataset.fg_marked,
                          save=False,
                          marked_feats=None,
                          all_feats_df=my_dataset.sp_desc_df,
                          in_type='not csv',
                          mode='foreground',
                          bag_n_feats=conf.bag_n_feats,
                          feat_fields=['desc'],
                          n_jobs=conf.bag_jobs)

        pm = l_dataset.get_pm_array()

        fpr, tpr, _ = roc_curve(l_dataset.gt.ravel(), pm.ravel())
        #fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2], probas)
        precision, recall, _ = precision_recall_curve(l_dataset.gt.ravel(),
                                                      pm.ravel())
        fpr, tpr, _ = roc_curve(l_dataset.gt.ravel(), pm.ravel())
        num = precision * recall
        denum = precision + recall
        f1_ = np.nan_to_num(2 * (num) / (denum))
        f1_pm.append(np.max(f1_))
        fpr_pm.append(fpr)
        tpr_pm.append(tpr)
        pr_pm.append(precision)
        rc_pm.append(recall)

        # Saving metrics
        data = dict()
        data['fpr_pm'] = fpr_pm
        data['tpr_pm'] = tpr_pm
        data['pr_pm'] = pr_pm
        data['rc_pm'] = rc_pm
        data['f1_pm'] = f1_pm

        data['fpr_ksp'] = fpr_ksp
        data['tpr_ksp'] = tpr_ksp
        data['pr_ksp'] = pr_ksp
        data['rc_ksp'] = rc_ksp
        data['f1_ksp'] = f1_ksp

        data['ksp_scores'] = ksp_scores
        data['pm'] = pm
        np.savez(os.path.join(conf.dataOutDir, 'metrics.npz'), **data)

    else:
        logger.info('Loading metrics.npz...')
        metrics = np.load(os.path.join(conf.dataOutDir, 'metrics.npz'))
        fpr_pm = metrics['fpr_pm']
        tpr_pm = metrics['tpr_pm']
        pr_pm = metrics['pr_pm']
        rc_pm = metrics['rc_pm']
        f1_pm = metrics['f1_pm']

        fpr_ksp = metrics['fpr_ksp']
        tpr_ksp = metrics['tpr_ksp']
        pr_ksp = metrics['pr_ksp']
        rc_ksp = metrics['rc_ksp']
        f1_ksp = metrics['f1_ksp']

        ksp_scores = metrics['ksp_scores']
        if ('pm' in metrics.keys()):
            pm = metrics['pm']
        else:
            pm = metrics['pm_ksp']

        my_dataset = ds.DataManager(conf)
        my_dataset.load_labels_if_not_exist()
        my_dataset.load_pm_fg_from_file()
        l_dataset = learning_dataset.LearningDataset(conf, pos_thr=0.5)

        res = np.load(os.path.join(conf.dataOutDir, 'results.npz'))
        list_paths_back = res['list_paths_back']
        list_paths_for = res['list_paths_for']

    # Plot all iterations of PM
    conf.roc_xlim = [0, 0.4]
    conf.pr_rc_xlim = [0.6, 1.]

    lw = 1

    # PM curves
    auc_ = auc(np.asarray(fpr_pm[-1]).ravel(), np.asarray(tpr_pm[-1]).ravel())
    max_f1 = np.max(f1_pm[-1])

    plt.subplot(121)
    plt.plot(np.asarray(fpr_pm[-1]).ravel(),
             np.asarray(tpr_pm[-1]).ravel(),
             'r-',
             lw=lw,
             label='KSP/PM (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    auc_ = auc(np.asarray(rc_pm[-1]).ravel(), np.asarray(pr_pm[-1]).ravel())
    plt.subplot(122)
    plt.plot(np.asarray(rc_pm[-1]).ravel(),
             np.asarray(pr_pm[-1]).ravel(),
             'r-',
             lw=lw,
             label='KSP/PM (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    # Plot KSP
    auc_ = auc(np.asarray(fpr_ksp).ravel(),
               np.asarray(tpr_ksp).ravel(),
               reorder=True)
    max_f1 = np.max(f1_ksp)
    plt.subplot(121)
    plt.plot(np.asarray(fpr_ksp).ravel(),
             np.asarray(tpr_ksp).ravel(),
             'ro--',
             lw=lw,
             label='KSP (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))
    plt.subplot(122)
    auc_ = auc(np.asarray(rc_ksp).ravel(),
               np.asarray(pr_ksp).ravel(),
               reorder=True)
    plt.plot(np.asarray(rc_ksp).ravel(),
             np.asarray(pr_ksp).ravel(),
             'ro--',
             lw=lw,
             label='KSP (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    plt.subplot(121)
    plt.legend()
    plt.xlim(conf.roc_xlim)
    plt.xlabel('fpr')
    plt.ylabel('tpr')
    plt.subplot(122)
    plt.legend()
    plt.xlim(conf.pr_rc_xlim)
    plt.xlabel('recall')
    plt.ylabel('precision')
    plt.suptitle(conf.seq_type + ', ' + conf.ds_dir + '\n' + 'T: ' +
                 str(conf.T))
    fig = plt.gcf()
    fig.set_size_inches(18.5, 10.5)
    fig.savefig(os.path.join(conf.dataOutDir, plot_fname), dpi=200)

    # Make plots
    gt = l_dataset.gt
    frame_dir = 'ksp_pm_frames'
    frame_path = os.path.join(conf.dataOutDir, frame_dir)
    if (os.path.exists(frame_path)):
        logger.info('[!!!] Frame dir: ' + frame_path +
                    ' exists. Delete to rerun.')
    elif (skip_frames):
        logger.info('[!!!] Skipping saving of frames')
    else:
        logger.info('Saving KSP, PM...')
        n_iters_ksp = len(list_paths_back)

        if (conf.csvFileType == 'pandas'):
            locs2d = pd.read_csv(
                os.path.join(conf.root_path, conf.ds_dir, conf.locs_dir,
                             conf.csvFileName_fg))
        elif (conf.csvFileType == 'anna'):
            locs2d = utls.readCsv(
                os.path.join(conf.root_path, conf.ds_dir, conf.locs_dir,
                             conf.csvFileName_fg))
        os.mkdir(frame_path)
        with progressbar.ProgressBar(maxval=len(conf.frameFileNames)) as bar:
            for f in range(len(conf.frameFileNames)):
                cont_gt = segmentation.find_boundaries(gt[..., f],
                                                       mode='thick')
                idx_cont_gt = np.where(cont_gt)
                im = utls.imread(conf.frameFileNames[f])
                im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
                im = csv.draw2DPoint(utls.pandas_to_std_csv(locs2d),
                                     f,
                                     im,
                                     radius=7)

                bar.update(f)
                plt.subplot(221)
                plt.imshow(ksp_scores[..., f])
                plt.title('KSP')
                plt.subplot(222)
                plt.imshow(pm[..., f])
                plt.title('KSP -> PM')
                plt.subplot(223)
                plt.imshow(im)
                plt.title('image')
                plt.suptitle('frame: ' + str(f) + ', n_iters_ksp: ' +
                             str(n_iters_ksp))
                plt.savefig(os.path.join(frame_path, 'f_' + str(f) + '.png'),
                            dpi=200)

    logger.info('Saving SPs per iterations plot...')
    n_sps = []
    for i in range(len(list_paths_back)):

        seeds = utls.list_paths_to_seeds(list_paths_for,
                                         list_paths_back,
                                         iter_=i)
        n = seeds.shape[0]
        n_sps.append((i + 1, n))

    n_sps.append((len(list_paths_back) + 1, n))
    n_sps = np.asarray(n_sps)

    plt.plot(n_sps[:, 0], n_sps[:, 1], 'bo-')
    plt.plot(n_sps[-1, 0], n_sps[-1, 1], 'ro')
    plt.xlabel('iterations')
    plt.ylabel('num. of superpixels')
    plt.title('num of superpixels vs. iterations. SS threshold: ' +
              str(conf.ss_thr))
    plt.savefig(os.path.join(conf.dataOutDir, 'sps_iters.eps'), dpi=200)

    pr_pm, rc_pm, _ = precision_recall_curve(l_dataset.gt.ravel(), pm.ravel())
    ksp_pm_pix_f1 = np.max(2 * (pr_pm * rc_pm) / (pr_pm + rc_pm))
    ksp_pix_f1 = f1_score(l_dataset.gt.ravel(), ksp_scores.ravel())

    file_out = os.path.join(conf.dataOutDir, 'scores.csv')
    logger.info('Saving to {}'.format(file_out))
    C = pd.Index(["F1"], name="columns")
    I = pd.Index(['KSP', 'KSP/PM'], name="Methods")
    data = np.asarray([ksp_pix_f1, ksp_pm_pix_f1]).reshape(2, 1)
    df = pd.DataFrame(data=data, index=I, columns=C)
    df.to_csv(path_or_buf=file_out)
Example #4
0
def main(cfg):
    locs2d = utls.readCsv(
        os.path.join(cfg.in_path, cfg.locs_dir, cfg.csv_fname))

    # ---------- Descriptors/superpixel costs
    dm = DataManager(cfg.in_path, cfg.precomp_dir)
    dm.calc_superpix(cfg.slic_compactness, cfg.slic_n_sp)

    link_agent, desc_df = make_link_agent(cfg)

    if (cfg.use_siam_pred):
        print('will use DEC/siam objectness probabilities')
        probas = link_agent.obj_preds
        pm_scores_fg = utls.get_pm_array(link_agent.labels, probas)
    else:
        pm = utls.calc_pm(desc_df,
                          np.array(link_agent.get_all_entrance_sps(desc_df)),
                          cfg.bag_n_feats, cfg.bag_t, cfg.bag_max_depth,
                          cfg.bag_max_samples, cfg.bag_jobs)
        pm_scores_fg = utls.get_pm_array(link_agent.labels, pm)

    dl = LocPriorDataset(cfg.in_path,
                         resize_shape=512,
                         normalization='rescale',
                         csv_fname=cfg.csv_fname)

    cluster_maps = link_agent.make_cluster_maps()

    if (cfg.do_all):
        cfg.fin = np.arange(len(dl))

    ims = []
    pbar = tqdm.tqdm(total=len(cfg.fin))
    for fin in cfg.fin:

        loc = locs2d[locs2d['frame'] == fin]
        if (loc.shape[0] > 0):
            i_in, j_in = link_agent.get_i_j(loc.iloc[0])

            entrance_probas = np.zeros(link_agent.labels.shape[1:])
            label_in = link_agent.labels[fin, i_in, j_in]
            for l in np.unique(link_agent.labels[fin]):
                proba = link_agent.get_proba(fin, label_in, fin, l, desc_df)
                entrance_probas[link_agent.labels[fin] == l] = proba

            truth = dl[fin]['label/segmentation'][..., 0]
            truth_ct = segmentation.find_boundaries(truth, mode='thick')
            im1 = dl[fin]['image_unnormal']
            rr, cc = draw.circle_perimeter(i_in,
                                           j_in,
                                           int(cfg.norm_neighbor_in *
                                               im1.shape[1]),
                                           shape=im1.shape)

            im1[truth_ct, ...] = (255, 0, 0)

            im1[rr, cc, 0] = 0
            im1[rr, cc, 1] = 255
            im1[rr, cc, 2] = 0

            im1 = csv.draw2DPoint(locs2d.to_numpy(), fin, im1, radius=7)
            ims_ = []
            ims_.append(im1)
            ims_.append(colorize(pm_scores_fg[fin]))
            ims_.append(
                colorize((pm_scores_fg[fin] > cfg.pm_thr).astype(float)))
            ims_.append(cluster_maps[fin])
            ims_.append(colorize(entrance_probas))
            ims.append(ims_)

        else:
            im1 = dl[fin]['image_unnormal']

            ims_ = []
            ims_.append(im1)
            ims_.append(colorize(pm_scores_fg[fin]))
            ims_.append(
                colorize((pm_scores_fg[fin] > cfg.pm_thr).astype(float)))
            ims_.append(cluster_maps[fin])
            ims_.append(colorize(np.zeros_like(pm_scores_fg[fin])))
            ims.append(ims_)

        pbar.update(1)
    pbar.close()

    if (cfg.do_all):
        print('will save all to {}'.format(cfg.save_path))
        if (not os.path.exists(cfg.save_path)):
            os.makedirs(cfg.save_path)
        pbar = tqdm.tqdm(total=len(ims))
        for i, im in enumerate(ims):
            io.imsave(pjoin(cfg.save_path, 'im_{:04d}.png'.format(i)),
                      np.concatenate(im, axis=1))
            pbar.update(1)
        pbar.close()

    if (cfg.return_dict):
        ims_dicts = []
        for ims_ in ims:
            dict_ = {
                'image': ims_[0],
                'pm': ims_[1],
                'pm_thr': ims_[2],
                'clusters': ims_[3],
                'entrance': ims_[4]
            }
            ims_dicts.append(dict_)
        return ims_dicts

    return np.concatenate([np.concatenate(im, axis=1) for im in ims], axis=0)
Example #5
0
def main(cfg):
    locs2d = utls.readCsv(
        os.path.join(cfg.in_path, cfg.locs_dir, cfg.locs_fname))

    # ---------- Descriptors/superpixel costs
    spext = SuperpixelExtractor(cfg.in_path,
                                desc_dir=cfg.precomp_dir,
                                compactness=cfg.slic_compactness,
                                n_segments=cfg.slic_n_sp)
    spext.run()

    link_agent, _ = make_link_agent(cfg)

    probas = link_agent.obj_preds
    pm_scores_fg = utls.get_pm_array(link_agent.labels, probas)

    dl = LocPriorDataset(cfg.in_path,
                         normalization='rescale',
                         locs_fname=cfg.locs_fname,
                         sp_labels_fname='sp_labels.npy')

    scores = dict()
    if cfg.do_scores:
        shape = pm_scores_fg.shape[1:]
        truths = np.array([
            transform.resize(s['label/segmentation'],
                             shape,
                             preserve_range=True).astype(np.uint8) for s in dl
        ])
        fpr, tpr, _ = roc_curve(truths.flatten(), pm_scores_fg.flatten())
        precision, recall, _ = precision_recall_curve(
            truths.flatten(),
            pm_scores_fg.flatten() >= 0.5)
        precision = precision[1]
        recall = recall[1]
        nom = 2 * (precision * recall)
        denom = (precision + recall)
        if denom > 0:
            f1 = nom / denom
        else:
            f1 = 0.

        auc_ = auc(fpr, tpr)
        scores['f1'] = f1
        scores['auc'] = auc_
        scores['fpr'] = fpr
        scores['tpr'] = tpr

    if (cfg.do_all):
        cfg.fin = np.arange(len(dl))

    ims = []
    pbar = tqdm.tqdm(total=len(cfg.fin))
    for fin in cfg.fin:

        loc = locs2d[locs2d['frame'] == fin]
        if (loc.shape[0] > 0):
            i_in, j_in = link_agent.get_i_j(loc.iloc[0])

            truth = dl[fin]['label/segmentation']
            truth_ct = segmentation.find_boundaries(truth, mode='thick')
            im1 = (255 * dl[fin]['image']).astype(np.uint8)
            rr, cc = draw.circle_perimeter(i_in,
                                           j_in,
                                           int(cfg.norm_neighbor_in *
                                               im1.shape[1]),
                                           shape=im1.shape)
            pos_labels = dl[fin]['annotations']

            pos_sps = [
                dl[fin]['labels'].squeeze() == l for l in pos_labels['label']
            ]

            pos_ct = [segmentation.find_boundaries(p) for p in pos_sps]

            for p in pos_ct:
                im1[p, ...] = (0, 255, 0)

            im1[truth_ct, ...] = (255, 0, 0)

            im1[rr, cc, 0] = 0
            im1[rr, cc, 1] = 255
            im1[rr, cc, 2] = 0

            im1 = csv.draw2DPoint(locs2d.to_numpy(), fin, im1, radius=7)
            ims_ = []
            ims_.append(im1)
            ims_.append(colorize(pm_scores_fg[fin]))
            ims_.append(
                colorize((pm_scores_fg[fin] >= cfg.pm_thr).astype(float)))
            ims.append(ims_)

        else:

            im1 = (255 * dl[fin]['image']).astype(np.uint8)
            ims_ = []
            ims_.append(im1)
            ims_.append(colorize(pm_scores_fg[fin]))
            ims_.append(
                colorize((pm_scores_fg[fin] >= cfg.pm_thr).astype(float)))
            ims_.append(colorize(np.zeros_like(pm_scores_fg[fin])))
            ims.append(ims_)

        pbar.update(1)
    pbar.close()

    if (cfg.do_all):
        print('will save all to {}'.format(cfg.save_path))
        if (not os.path.exists(cfg.save_path)):
            os.makedirs(cfg.save_path)
        pbar = tqdm.tqdm(total=len(ims))
        for i, im in enumerate(ims):
            io.imsave(pjoin(cfg.save_path, 'im_{:04d}.png'.format(i)),
                      np.concatenate(im, axis=1))
            pbar.update(1)
        pbar.close()

    res = dict()
    ims_dicts = []
    for ims_ in ims:
        dict_ = {
            'image': ims_[0],
            'pm': ims_[1],
            'pm_thr': ims_[2],
        }
        ims_dicts.append(dict_)
    res['images'] = ims_dicts
    res['scores'] = scores
    return res