Exemplo n.º 1
0
def main(conf, plot_fname='metrics.pdf', csv_fname='score.csv', logger=None):

    logger = logging.getLogger('plot_results_vilar')

    logger.info('--------')
    logger.info('Self-learning on: ' + conf.dataOutDir)
    logger.info('--------')

    if (not os.path.exists(os.path.join(conf.dataOutDir, 'metrics.npz'))):

        my_dataset = ds.DatasetVilar(conf)
        my_dataset.load_labels_if_not_exist()
        #l_dataset = learning_dataset.Dataset(conf)
        l_dataset = learning_dataset.LearningDataset(conf)

        logger.info('[1/4] Loading predicted frames... ')
        pred_frames = np.asarray([
            my_dataset.get_pred_frame(f)
            for f in range(len(conf.frameFileNames))
        ]).transpose(1, 2, 0)

        logger.info('[2/4] Extracting seeds... ')
        seeds = utls.make_y_array_true(pred_frames, my_dataset.labels)

        l_dataset.make_y_array_true(l_dataset.gt)
        seeds_true = l_dataset.y_true

        logger.info('[3/4] Calculating metrics... ')
        f1 = f1_score(seeds_true[:, 2], seeds[:, 2])
        logger.info('f1 score: ' + str(f1))

        logger.info('[4/4] Calculating maps... ')

        # Saving metrics
        data = dict()
        data['f1'] = f1
        data['seeds'] = seeds
        data['seeds_true'] = seeds_true

        np.savez(os.path.join(conf.dataOutDir, 'metrics.npz'), **data)

    else:
        logger.info('Loading metrics.npz...')
        metrics = np.load(os.path.join(conf.dataOutDir, 'metrics.npz'))
        f1 = metrics['f1']
        seeds = metrics['seeds']
        seeds_true = metrics['seeds_true']

        my_dataset = ds.DatasetVilar(conf)
        my_dataset.load_labels_if_not_exist()
        l_dataset = learning_dataset.LearningDataset(conf, pos_thr=0.5)

    csv_out = os.path.join(conf.dataOutDir, csv_fname)
    logger.info('Saving f1 scores to: ' + csv_fname)
    C = pd.Index(["F1"], name="columns")
    data = np.asarray(f1).reshape(1, 1)
    df = pd.DataFrame(data=data, columns=C)
    df.to_csv(path_or_buf=csv_out)

    # Plot all iterations of PM

    # Make plots
    logger.info('Saving frames...')
    gt = l_dataset.gt
    frame_dir = 'vilar_frames'
    frame_path = os.path.join(conf.dataOutDir, frame_dir)
    if (os.path.exists(frame_path)):
        logger.info('[!!!] Frame dir: ' + frame_path +
                    ' exists. Delete to rerun.')
    else:
        os.mkdir(frame_path)
        seeds_true = seeds_true[np.where(seeds_true[:, 2])[0], 0:2]
        seeds = seeds[np.where(seeds[:, 2])[0], 0:2]
        scores_true = utls.seeds_to_scores(my_dataset.labels, seeds_true)
        scores = utls.seeds_to_scores(my_dataset.labels, seeds)
        with progressbar.ProgressBar(maxval=len(conf.frameFileNames)) as bar:
            for f in range(len(conf.frameFileNames)):
                cont_gt = segmentation.find_boundaries(gt[..., f],
                                                       mode='thick')
                idx_cont_gt = np.where(cont_gt)
                im = utls.imread(conf.frameFileNames[f])
                im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
                im = gaze.drawGazePoint(conf.myGaze_fg, f, im, radius=7)
                pred_frame = my_dataset.get_pred_frame(f)

                bar.update(f)
                plt.subplot(321)
                plt.imshow(scores_true[..., f])
                plt.title('True')
                plt.subplot(322)
                plt.imshow(scores[..., f])
                plt.title('Vilarino')
                plt.subplot(323)
                plt.imshow(im)
                plt.title('image')
                plt.subplot(324)
                plt.imshow(pred_frame)
                plt.title('pixel prediction')
                plt.suptitle('frame: ' + str(f))
                plt.savefig(os.path.join(frame_path, 'f_' + str(f) + '.png'),
                            dpi=200)
Exemplo n.º 2
0
def main(confs,
         confunet,
         out_dir=None,
         train=True,
         pred=True,
         score=True,
         resume_model=None):

    n_points = 2000

    if (out_dir is None):
        now = datetime.datetime.now()
        dateTime = now.strftime("%Y-%m-%d_%H-%M-%S")
        out_dir = os.path.join(
            confs[0].dataOutRoot, 'learning_exps',
            'learning_' + confs[0].seq_type + '_' + dateTime)

    dir_in = [c.dataOutDir for c in confs]

    if (not os.path.exists(out_dir)):
        os.mkdir(out_dir)

    with open(os.path.join(out_dir, 'cfg.yml'), 'w') as outfile:
        yaml.dump(confunet, outfile, default_flow_style=True)

    datasets = []
    utls.setup_logging(out_dir)
    logger = logging.getLogger('learning_exp_unet')

    logger.info('Starting learning experiment on:')
    logger.info(dir_in)
    logger.info('Gaze file: ' + str(confs[0].csvFileName_fg))
    logger.info('')

    if (not os.path.exists(os.path.join(out_dir, 'datasets.npz'))):
        logger.info('Building target vectors')
        for i in range(len(dir_in)):
            with open(os.path.join(dir_in[i], 'cfg.yml'), 'r') as outfile:
                conf = yaml.load(outfile)

            logger.info('Dataset: ' + str(i + 1) + '/' + str(len(dir_in)))

            dataset = learning_dataset.LearningDataset(conf)

            npz_file = np.load(os.path.join(dir_in[i], 'results.npz'))

            #seeds = np.asarray(utls.get_node_list_tracklets(npz_file['list_ksp'][-1]))
            dataset.scores = npz_file['ksp_scores_mat'].astype(bool)
            datasets.append(dataset)

        if (not os.path.exists(out_dir)):
            os.mkdir(out_dir)

        logger.info('saving datasets to: ' + out_dir)
        np.savez(os.path.join(out_dir, 'datasets.npz'),
                 **{'datasets': datasets})

    dir_my_root = os.path.join(out_dir, 'my')
    dir_true_root = os.path.join(out_dir, 'true')
    n_folds = 4

    if (train):
        from nets import UNetBasic

        logger.info('Loading datasets...')
        datasets = np.load(os.path.join(out_dir, 'datasets.npz'))['datasets']
        fold_ids = np.arange(0, 4)[::-1]

        for i in range(n_folds):

            logger.info('-----------------')
            pred_fold = i
            train_folds = np.asarray([
                fold_ids[j] for j in range(n_folds)
                if (fold_ids[j] != pred_fold)
            ])

            logger.info('train_folds: ' + str(train_folds))
            logger.info('pred_folds: ' + str(pred_fold))
            logger.info('-----------------')
            logger.info('Extracting X')
            X = (resize_datasets([
                datasets[train_folds[j]].X_all_images
                for j in range(train_folds.shape[0])
            ], confunet.unet_im_size) * 255).astype(np.uint8)
            logger.info('Extracting y')
            y = (resize_datasets([
                np.tile(
                    datasets[train_folds[j]].scores[:, :, np.newaxis, :].
                    astype(np.uint8) * 255, (1, 1, 3, 1))
                for j in range(train_folds.shape[0])
            ], confunet.unet_im_size) * 255).astype(np.uint8)

            # Set dirs
            dir_my = os.path.join(dir_my_root, 'fold_' + str(pred_fold))
            dir_my_train = os.path.join(dir_my, 'train')
            logger.info('Writing _my_ train images/gts to disk...')
            ims_my, gts_my = write_frames_train(X, y, dir_my_train, logger)

            logger.info('Training U-Net on my segmentation...')
            unet_my = UNetBasic.UNetBasic(confunet, dir_my_train, ims_my[0])
            if (resume_model is not None):
                model_path_my = get_model_path(dir_my_train,
                                               take_min_loss=False)
                initial_epoch_my = 0

                if (len(model_path_my) == 0):
                    n_epochs_my = confunet.n_epochs
                    model_path_my = None
                    initial_epoch_my = 0
                else:
                    n_epochs_my = confunet.n_epochs
                    initial_epoch_my = int(
                        os.path.split(model_path_my)[-1][6:8])
            else:
                model_path_my = None
                n_epochs_my = confunet.n_epochs
                initial_epoch_my = 0

            unet_my.train(confunet,
                          ims_my,
                          gts_my,
                          dir_my_train,
                          n_epochs_my,
                          initial_epoch=initial_epoch_my,
                          dir_eval_clbk=dir_my,
                          resume_model=model_path_my)

            logger.info('Extracting y')
            y = (resize_datasets([
                np.tile(
                    datasets[train_folds[j]].gt[:, :, np.newaxis, :].astype(
                        np.uint8) * 255, (1, 1, 3, 1))
                for j in range(train_folds.shape[0])
            ], confunet.unet_im_size) * 255).astype(np.uint8)

            dir_true = os.path.join(dir_true_root, 'fold_' + str(pred_fold))
            dir_true_train = os.path.join(dir_true, 'train')

            logger.info('Writing _true_ train images/gts to disk...')
            ims_true, gts_true = write_frames_train(X, y, dir_true_train,
                                                    logger)

            logger.info('Training U-Net on true segmentation...')
            unet_true = UNetBasic.UNetBasic(confunet, dir_true_train,
                                            ims_true[0])
            if (resume_model is not None):
                model_path_true = get_model_path(dir_true_train,
                                                 take_min_loss=False)
                initial_epoch_true = 0
                if (len(model_path_true) == 0):
                    n_epochs_true = confunet.n_epochs
                    model_path_true = None
                    initial_epoch_true = 0
                else:
                    n_epochs_true = confunet.n_epochs
                    initial_epoch_true = int(
                        os.path.split(model_path_true)[-1][6:8])
            else:
                model_path_true = None
                n_epochs_true = confunet.n_epochs
                initial_epoch_true = 0

            unet_true.train(confunet,
                            ims_true,
                            gts_true,
                            dir_true_train,
                            n_epochs_true,
                            initial_epoch=initial_epoch_true,
                            dir_eval_clbk=dir_true,
                            resume_model=model_path_true)

        else:
            logger.info('Results directory')
            logger.info(dir_my_root)
            logger.info(dir_true_root)
            logger.info('Exist. Delete and re-compute')

    logger.info('-----------------')
    logger.info('Loading datasets...')
    datasets = np.load(os.path.join(out_dir, 'datasets.npz'))['datasets']
    if (pred):
        from nets import UNetBasic
        fold_ids = np.arange(0, 4)[::-1]

        for i in range(n_folds):

            pred_fold = i
            logger.info('Predicting on fold_' + str(pred_fold))

            dir_my = os.path.join(dir_my_root, 'fold_' + str(pred_fold))
            dir_true = os.path.join(dir_true_root, 'fold_' + str(pred_fold))

            #model_path_my = get_best_model_path(os.path.join(dir_my, 'train'))
            model_path_my = get_model_path(os.path.join(dir_my, 'train'),
                                           take_min_loss=False)

            #model_path_true = get_best_model_path(os.path.join(dir_true,'train'))
            model_path_true = get_model_path(os.path.join(dir_true, 'train'),
                                             take_min_loss=False)

            dir_true_pred = os.path.join(dir_true, 'pred')
            dir_my_pred = os.path.join(dir_my, 'pred')
            dir_true_pred_res = os.path.join(dir_true, 'pred_res')
            dir_my_pred_res = os.path.join(dir_my, 'pred_res')

            logger.info('Will use models:')
            logger.info(model_path_my)
            logger.info(model_path_true)

            logger.info('Extracting y_my/y_true')
            X = datasets[pred_fold].X_all_images
            X = resize_stack(X, confunet.unet_im_size)

            logger.info('Writing _my_ pred images to disk...')
            ims_my = write_frames_pred(X, dir_my_pred, logger)

            logger.info('Writing _true_ pred images to disk...')
            ims_true = write_frames_pred(X, dir_true_pred, logger)

            X = X.transpose((3, 0, 1, 2))
            mean = np.mean(X.reshape(-1, 3), axis=0)
            std = np.std(X.reshape(-1, 3), axis=0)

            unet_true = UNetBasic.UNetBasic(confunet, dir_true_pred,
                                            ims_true[0])
            unet_my = UNetBasic.UNetBasic(confunet, dir_my_pred, ims_my[0])

            # Get normalization parameters of training set
            logger.info('Extracting X (train) normalization factors')

            train_folds = np.asarray([
                fold_ids[j] for j in range(n_folds)
                if (fold_ids[j] != pred_fold)
            ])

            im_list_train = [
                datasets[train_folds[j]].conf.frameFileNames
                for j in range(train_folds.shape[0])
            ]
            im_list_train = [
                item for sublist in im_list_train for item in sublist
            ]
            input_y = unet_my.inputDimY
            input_x = unet_my.inputDimX
            n_chans = unet_my.nbrChannels
            ims_train = UNetImpl.preprocess_imgs(unet_my, im_list_train,
                                                 input_y, input_x, n_chans)

            #mean = np.mean(ims_train.reshape(-1,3), axis = 0)
            #std = np.std(ims_train.reshape(-1,3), axis = 0)
            #ims_train = utls.normalize_imgs(ims_train, mean, std)

            logger.info('Predicting on my segmentation...')
            preds_my = unet_my.eval(confunet, model_path_my, ims_my, mean, std)

            logger.info('Predicting on true segmentation...')
            preds_true = unet_true.eval(confunet, model_path_true, ims_true,
                                        mean, std)

            logger.info('Writing _my_ pred results images to disk...')
            ims_my = write_frames_pred(preds_my,
                                       dir_my_pred_res,
                                       logger,
                                       nchans=1)

            logger.info('Writing _true_ pred results images to disk...')
            ims_true = write_frames_pred(preds_true,
                                         dir_true_pred_res,
                                         logger,
                                         nchans=1)

    if (score):
        for i in range(n_folds):
            score_dict = dict()
            pred_fold = i
            logger.info('Scoring on fold_' + str(pred_fold))

            dir_my = os.path.join(dir_my_root, 'fold_' + str(pred_fold))
            dir_true = os.path.join(dir_true_root, 'fold_' + str(pred_fold))

            dir_true_pred_res = os.path.join(dir_true, 'pred_res', 'img')
            dir_my_pred_res = os.path.join(dir_my, 'pred_res', 'img')

            fnames_true_pred_res = glob.glob(
                os.path.join(dir_true_pred_res, '*.png'))
            fnames_true_pred_res = sorted(fnames_true_pred_res)

            fnames_my_pred_res = glob.glob(
                os.path.join(dir_my_pred_res, '*.png'))
            fnames_my_pred_res = sorted(fnames_my_pred_res)

            my_preds = np.asarray([utls.imread(f) for f in fnames_my_pred_res
                                   ]).transpose(1, 2, 3, 0) / 255
            true_preds = np.asarray(
                [utls.imread(f)
                 for f in fnames_true_pred_res]).transpose(1, 2, 3, 0) / 255
            gts = (resize_datasets([
                np.tile(datasets[pred_fold].gt[:, :, np.newaxis, :],
                        (1, 1, 3, 1))
            ], confunet.unet_im_size) > 0).astype(np.uint8)

            vals_gt = gts.ravel()
            vals_my = my_preds.ravel()
            vals_true = true_preds.ravel()

            logger.info('Calculating metrics on my... ')
            all_scores = get_all_scores(vals_gt, vals_my, n_points)

            score_dict['conf'] = datasets[pred_fold].conf
            score_dict['fold'] = pred_fold
            score_dict['fpr_my'] = all_scores[0]
            score_dict['tpr_my'] = all_scores[1]
            score_dict['auc_my'] = all_scores[2]
            score_dict['pr_my'] = all_scores[3]
            score_dict['rc_my'] = all_scores[4]
            score_dict['f1_my'] = all_scores[5]
            score_dict['thr_my'] = all_scores[6]

            logger.info('Calculating metrics on true... ')
            all_scores = get_all_scores(vals_gt, vals_true, n_points)

            score_dict['fpr_true'] = all_scores[0]
            score_dict['tpr_true'] = all_scores[1]
            score_dict['auc_true'] = all_scores[2]
            score_dict['pr_true'] = all_scores[3]
            score_dict['rc_true'] = all_scores[4]
            score_dict['f1_true'] = all_scores[5]
            score_dict['thr_true'] = all_scores[6]

            score_dict['my_preds'] = my_preds
            score_dict['true_preds'] = true_preds

            logger.info('Saving results on fold: ' + str(pred_fold))
            file_out = os.path.join(out_dir,
                                    'scores_' + str(pred_fold) + '.npz')
            np.savez(file_out, **score_dict)
Exemplo n.º 3
0
file_out = os.path.join(out_result_dir, 'multigaze.npz')
if (not os.path.exists(file_out)):
    ims = []
    ksp_means = []
    #for key in rd.out_dirs_dict_ksp.keys():
    for key in rd.types:
        f1 = []
        for dset in range(len(rd.out_dirs_dict_ksp[key])):
            print('Loading: ' + str(rd.out_dirs_dict_ksp[key][dset]))
            path_ = os.path.join(rd.root_dir, 'learning_exps',
                                 rd.out_dirs_dict_ksp[key][dset])

            # Make images/gts/gaze-point
            ims.append([])
            ksp_means.append([])
            l_dataset = ld.LearningDataset(rd.confs_dict_ksp[key][dset][0],
                                           pos_thr=0.5)
            confs = rd.confs_dict_ksp[key][dset]
            gt = l_dataset.gt
            ksp_mean_all = np.load(os.path.join(
                path_, 'dataset.npz'))['mean_ksp_scores']
            for f in rd.all_frames_dict[key][dset]:
                cont_gt = segmentation.find_boundaries(gt[..., f],
                                                       mode='thick')
                idx_cont_gt = np.where(cont_gt)
                im = utls.imread(confs[0].frameFileNames[f])
                im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
                for key_conf in confs.keys():
                    im = gaze.drawGazePoint(confs[key_conf].myGaze_fg,
                                            f,
                                            im,
                                            radius=7)
Exemplo n.º 4
0
def main(confs, out_dir=None):

    alpha = 0.3
    n_points = 2000
    seq_type = confs[0].seq_type

    if (out_dir is None):
        now = datetime.datetime.now()
        dateTime = now.strftime("%Y-%m-%d_%H-%M-%S")
        out_dir = os.path.join(
            confs[0].dataOutRoot, 'learning_exps',
            'learning_' + confs[0].seq_type + '_' + dateTime)

    dir_in = [c.dataOutDir for c in confs]

    if (not os.path.exists(out_dir)):
        os.mkdir(out_dir)

    datasets = []
    utls.setup_logging(out_dir)
    logger = logging.getLogger('learning_exp')

    logger.info('Starting learning experiment on:')
    logger.info(dir_in)
    logger.info('Gaze file: ' + str(confs[0].csvFileName_fg))
    logger.info('')

    if (not os.path.exists(os.path.join(out_dir, 'datasets.npz'))):
        logger.info('Building target vectors')
        for i in range(len(dir_in)):
            with open(os.path.join(dir_in[i], 'cfg.yml'), 'r') as outfile:
                conf = yaml.load(outfile)

            logger.info('Dataset: ' + str(i + 1) + '/' + str(len(dir_in)))
            files = sorted(
                glob.glob(os.path.join(dir_in[i], 'pm_scores_iter*')))[-1]

            #logger.info('Init. learner')
            dataset = learning_dataset.LearningDataset(conf)

            npz_file = np.load(os.path.join(dir_in[i], 'results.npz'))

            seeds = np.asarray(
                utls.get_node_list_tracklets(npz_file['list_ksp'][-1]))
            if (confs[0].use_ss):
                dataset.load_ss_from_file()
                seeds = ss.thr_all_graphs(dataset.g_ss, seeds, conf.ss_thr)
            dataset.set_seeds(seeds)

            dataset.make_y_array(seeds)
            dataset.make_y_array_true(dataset.gt)

            datasets.append(dataset)

        if (not os.path.exists(out_dir)):
            os.mkdir(out_dir)

        logger.info('saving datasets to: ' + out_dir)
        np.savez(os.path.join(out_dir, 'datasets.npz'),
                 **{'datasets': datasets})
    else:
        logger.info('Loading datasets...')
        datasets = np.load(os.path.join(out_dir, 'datasets.npz'))['datasets']

    n_folds = 4
    fold_ids = np.arange(0, 4)[::-1]
    res_list = []

    n_e = 150

    if (not os.path.exists(os.path.join(out_dir, 'results.npz'))):
        for i in range(n_folds):

            logger.info('-----------------')
            pred_fold = i
            train_folds = np.asarray([
                fold_ids[j] for j in range(n_folds)
                if (fold_ids[j] != pred_fold)
            ])

            logger.info('train_folds: ' + str(train_folds))
            logger.info('pred_folds: ' + str(pred_fold))
            logger.info('-----------------')

            X_train = utls.concat_arr(
                np.concatenate([
                    datasets[train_folds[j]].X
                    for j in range(train_folds.shape[0])
                ]))
            y_train_my = np.concatenate([
                datasets[train_folds[j]].y[:, 2]
                for j in range(train_folds.shape[0])
            ])
            y_train_true = np.concatenate([
                datasets[train_folds[j]].y_true[:, 2]
                for j in range(train_folds.shape[0])
            ])

            logger.info('Extracting X_test')
            X_test = utls.concat_arr(datasets[pred_fold].X)
            logger.info('Extracting y_test')
            y_test = datasets[pred_fold].y_true[:, 2]

            logger.info('Fitting...')
            bag_n_feats = confs[0].bag_n_feats_rf
            bag_max_depth = confs[0].bag_max_depth_rf
            logger.info('bag_n_feats: ' + str(bag_n_feats))
            logger.info('bag_max_depth: ' + str(bag_max_depth))
            n_trees = datasets[0].conf.T
            clf_my = RandomForestClassifier(max_features=bag_n_feats,
                                            class_weight='balanced',
                                            n_estimators=n_trees)
            clf_true = RandomForestClassifier(max_features=bag_n_feats,
                                              class_weight='balanced',
                                              n_estimators=n_trees)
            clf_my.fit(X_train, y_train_my)
            clf_true.fit(X_train, y_train_true)

            logger.info('Predicting...')
            probas_my = clf_my.predict_proba(X_test)[:, 1]
            probas_true = clf_true.predict_proba(X_test)[:, 1]

            #probas_my = rf.run(X_train,y_train_my,X_test,150)
            #probas_true = rf.run(X_train,y_train_true,X_test,150)

            logger.info('Computing ROC curves on true model')
            fpr_true, tpr_true, thresholds_true = roc_curve(
                y_test, probas_true)

            auc_true = auc(fpr_true, tpr_true)
            logger.info('auc_true: ' + str(auc_true))
            logger.info('Computing ROC curves on my model')
            fpr_my, tpr_my, thresholds_my = roc_curve(y_test,
                                                      probas_my,
                                                      pos_label=1)
            auc_my = auc(fpr_my, tpr_my)
            logger.info('auc_my: ' + str(auc_my))

            logger.info('Computing prec-recall curves on true model')
            precision_true, recall_true, _ = precision_recall_curve(
                y_test, probas_true)
            logger.info('Computing prec-recall curves on my model')
            precision_my, recall_my, _ = precision_recall_curve(
                y_test, probas_my)

            dict_ = dict()
            dict_['train_folds'] = train_folds
            dict_['pred_fold'] = pred_fold
            dict_['n_estimators'] = n_e
            dict_['fpr_true'] = fpr_true
            dict_['tpr_true'] = tpr_true
            dict_['fpr_my'] = fpr_my
            dict_['tpr_my'] = tpr_my
            dict_['auc_true'] = auc_true
            dict_['precision_true'] = precision_true
            dict_['recall_true'] = recall_true
            dict_['auc_true'] = auc_true
            dict_['auc_my'] = auc_my
            dict_['precision_my'] = precision_my
            dict_['recall_my'] = recall_my
            dict_['probas_my'] = probas_my
            dict_['probas_true'] = probas_true
            dict_['y_test'] = y_test

            res_list.append(dict_)

        file_out = os.path.join(out_dir, 'results.npz')
        logger.info('Saving metrics to ')
        np.savez(file_out, **{'res_list': res_list})
    else:
        logger.info('Loading results...')
        res_list = np.load(os.path.join(out_dir, 'results.npz'))['res_list']

    #Plot folds
    colors = ['blue', 'darkorange', 'seagreen', 'yellow', 'blue']
    lw = 1
    plt.clf()

    l_fpr_true = []
    l_tpr_true = []
    l_pr_true = []
    l_rc_true = []

    l_fpr_my = []
    l_tpr_my = []
    l_pr_my = []
    l_rc_my = []

    for i in range(len(res_list)):
        fpr_true = res_list[i]['fpr_true']
        tpr_true = res_list[i]['tpr_true']
        #fpr_true, tpr_true = utls.my_interp(fpr_true, tpr_true, n_points)
        l_fpr_true.append(fpr_true)
        l_tpr_true.append(tpr_true)

        fpr_my = res_list[i]['fpr_my']
        tpr_my = res_list[i]['tpr_my']
        #fpr_my, tpr_my = utls.my_interp(fpr_my, tpr_my, n_points)
        l_fpr_my.append(fpr_my)
        l_tpr_my.append(tpr_my)

        pr_true = res_list[i]['precision_true']
        rc_true = res_list[i]['recall_true']
        #rc_true, pr_true = utls.my_interp(rc_true, pr_true, n_points)
        l_rc_true.append(rc_true)
        l_pr_true.append(pr_true)

        pr_my = res_list[i]['precision_my']
        rc_my = res_list[i]['recall_my']
        #rc_my, pr_my = utls.my_interp(rc_my, pr_my, n_points)
        l_rc_my.append(rc_my)
        l_pr_my.append(pr_my)

    rc_range_my = [
        np.min([np.min(l_rc_my[i]) for i in range(len(l_rc_my))]),
        np.max([np.max(l_rc_my[i]) for i in range(len(l_rc_my))])
    ]

    rc_range_true = [
        np.min([np.min(l_rc_true[i]) for i in range(len(l_rc_true))]),
        np.max([np.max(l_rc_true[i]) for i in range(len(l_rc_true))])
    ]

    rc_range = [
        np.min((rc_range_my[0], rc_range_true[0])),
        np.max((rc_range_my[1], rc_range_true[1]))
    ]

    fpr_range_my = [
        np.min([np.min(l_fpr_my[i]) for i in range(len(l_fpr_my))]),
        np.max([np.max(l_fpr_my[i]) for i in range(len(l_fpr_my))])
    ]

    fpr_range_true = [
        np.min([np.min(l_fpr_true[i]) for i in range(len(l_fpr_true))]),
        np.max([np.max(l_fpr_true[i]) for i in range(len(l_fpr_true))])
    ]

    fpr_range = [
        np.min((fpr_range_my[0], fpr_range_true[0])),
        np.max((fpr_range_my[1], fpr_range_true[1]))
    ]

    l_fpr_tpr_my_interp = np.asarray([
        utls.my_interp(l_fpr_my[i], l_tpr_my[i], n_points, fpr_range)
        for i in range(len(l_fpr_my))
    ]).transpose(1, 0, 2)
    l_fpr_my = l_fpr_tpr_my_interp[0, ...]
    l_tpr_my = l_fpr_tpr_my_interp[1, ...]

    l_pr_rc_my_interp = np.asarray([
        utls.my_interp(l_rc_my[i], l_pr_my[i], n_points, rc_range)
        for i in range(len(l_rc_my))
    ]).transpose(1, 0, 2)
    l_rc_my = l_pr_rc_my_interp[0, ...]
    l_pr_my = l_pr_rc_my_interp[1, ...]

    l_fpr_tpr_true_interp = np.asarray([
        utls.my_interp(l_fpr_true[i], l_tpr_true[i], n_points, fpr_range)
        for i in range(len(l_fpr_true))
    ]).transpose(1, 0, 2)
    l_fpr_true = l_fpr_tpr_true_interp[0, ...]
    l_tpr_true = l_fpr_tpr_true_interp[1, ...]

    l_pr_rc_true_interp = np.asarray([
        utls.my_interp(l_rc_true[i], l_pr_true[i], n_points, rc_range)
        for i in range(len(l_rc_true))
    ]).transpose(1, 0, 2)
    l_rc_true = l_pr_rc_true_interp[0, ...]
    l_pr_true = l_pr_rc_true_interp[1, ...]

    roc_xlim = [0, 1]
    pr_rc_xlim = [0, 1]
    logger.info('Concatenating results for scoring')
    all_y_true = np.concatenate([r['y_test'] for r in res_list])
    all_probas_my = np.concatenate([r['probas_my'] for r in res_list])
    all_probas_true = np.concatenate([r['probas_true'] for r in res_list])

    fpr_my_all, tpr_my_all, thresholds_my_all = roc_curve(all_y_true,
                                                          all_probas_my,
                                                          pos_label=1)
    fpr_my_all, tpr_my_all = utls.my_interp(fpr_my_all, tpr_my_all, n_points)

    fpr_true_all, tpr_true_all, thresholds_true_all = roc_curve(
        all_y_true, all_probas_true, pos_label=1)
    fpr_true_all, tpr_true_all = utls.my_interp(fpr_true_all, tpr_true_all,
                                                n_points)
    pr_my_all, rc_my_all, _ = precision_recall_curve(all_y_true, all_probas_my)
    pr_my_all, rc_my_all = utls.my_interp(pr_my_all, rc_my_all, n_points)
    pr_true_all, rc_true_all, _ = precision_recall_curve(
        all_y_true, all_probas_true)
    pr_true_all, rc_true_all = utls.my_interp(pr_true_all, rc_true_all,
                                              n_points)
    auc_my_all = auc(fpr_my_all, tpr_my_all)
    probas_thr = np.linspace(0, 1, n_points)
    f1_my = [f1_score(all_y_true, all_probas_my > p) for p in probas_thr]
    probas_thr = np.linspace(0, 1, 200)
    f1_true = [f1_score(all_y_true, all_probas_true > p) for p in probas_thr]
    auc_true_all = auc(fpr_true_all, tpr_true_all)

    # Plotting
    lw = 3
    plt.figure('tpr')
    plt.plot(l_fpr_true.mean(axis=0),
             l_tpr_true.mean(axis=0),
             '-',
             lw=lw,
             color=colors[0],
             label='all folds (true) (area = %0.4f, max_f1 = %0.4f)' %
             (auc_true_all, np.max(f1_true)))

    plt.fill_between(l_fpr_true.mean(axis=0),
                     l_tpr_true.mean(axis=0) + l_tpr_true.std(axis=0),
                     l_tpr_true.mean(axis=0) - l_tpr_true.std(axis=0),
                     facecolor=colors[0],
                     alpha=alpha)

    plt.plot(l_fpr_my.mean(axis=0),
             l_tpr_my.mean(axis=0),
             '-',
             lw=lw,
             color=colors[1],
             label='all folds (my) (area = %0.4f, max_f1 = %0.4f)' %
             (auc_my_all, np.max(f1_my)))

    plt.fill_between(l_fpr_my.mean(axis=0),
                     l_tpr_my.mean(axis=0) + l_tpr_my.std(axis=0),
                     l_tpr_my.mean(axis=0) - l_tpr_my.std(axis=0),
                     facecolor=colors[1],
                     alpha=alpha)
    plt.legend()
    plt.xlim(roc_xlim)
    plt.xlabel('fpr')
    plt.ylabel('tpr')
    plt.suptitle('Sequence: ' + seq_type + '. Gaze: ' +
                 confs[0].csvFileName_fg)
    plt.savefig(os.path.join(out_dir, 'folds_tpr_fpr.pdf'))

    plt.figure('rc')
    plt.plot(l_rc_true.mean(axis=0),
             l_pr_true.mean(axis=0),
             '-',
             lw=lw,
             color=colors[0],
             label='all folds (true)')
    plt.fill_between(l_rc_true.mean(axis=0),
                     l_pr_true.mean(axis=0) + l_pr_true.std(axis=0),
                     l_pr_true.mean(axis=0) - l_pr_true.std(axis=0),
                     facecolor=colors[0],
                     alpha=alpha)
    plt.plot(l_rc_my.mean(axis=0),
             l_pr_my.mean(axis=0),
             '-',
             lw=lw,
             color=colors[1],
             label='all folds (my)')
    plt.fill_between(l_rc_my.mean(axis=0),
                     l_pr_my.mean(axis=0) + l_pr_my.std(axis=0),
                     l_pr_my.mean(axis=0) - l_pr_my.std(axis=0),
                     facecolor=colors[1],
                     alpha=alpha)
    plt.legend()
    plt.xlim(pr_rc_xlim)
    plt.xlabel('recall')
    plt.ylabel('precision')
    plt.suptitle('Sequence: ' + seq_type + '. Gaze: ' +
                 confs[0].csvFileName_fg)
    #plt.figure('rc').set_size_inches(18.5, 10.5)
    plt.savefig(os.path.join(out_dir, 'folds_pr_rc.pdf'))

    min_n_frames = np.min([len(d.conf.frameFileNames) for d in datasets])

    dir_frames = os.path.join(out_dir, 'frames')

    if (not os.path.exists(dir_frames)):
        os.mkdir(dir_frames)
    else:
        logger.info('frames already exist, delete and re-run...')
        #shutil.rmtree(dir_frames)
        #os.mkdir(dir_frames)

    logger.info('Generating prediction frames...')
    #Plot by-frame predictions
    for f in range(min_n_frames):
        my = []
        true = []
        ims = []
        for j in range(len(datasets)):

            y_true = datasets[j].y
            idx_y = np.where(y_true[:, 0] == f)[0]
            y_true = y_true[idx_y]
            probas_true = res_list[j]['probas_true'][idx_y]
            probas_my = res_list[j]['probas_my'][idx_y]

            scores_my = utls.get_scores_from_sps(y_true[:, 0:2],
                                                 datasets[j].get_labels(),
                                                 probas_my)[..., f]
            my.append(scores_my)

            scores_true = utls.get_scores_from_sps(y_true[:, 0:2],
                                                   datasets[j].get_labels(),
                                                   probas_true)[..., f]
            true.append(scores_true)

            cont_gt = segmentation.find_boundaries(datasets[j].gt[..., f],
                                                   mode='thick')
            idx_cont_gt = np.where(cont_gt)
            im = utls.imread(datasets[j].conf.frameFileNames[f])
            im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
            im = gaze.drawGazePoint(datasets[j].conf.myGaze_fg,
                                    f,
                                    im,
                                    radius=7)
            ims.append(im)
        gs = gridspec.GridSpec(3, 4)
        for c in range(4):
            ax = plt.subplot(gs[0, c])
            ax.imshow(true[c])
            plt.title('true')

            ax = plt.subplot(gs[1, c])
            ax.imshow(my[c])
            plt.title('my')

            ax = plt.subplot(gs[2, c])
            ax.imshow(ims[c])
            plt.title('image')

        plt.suptitle('Sequence: ' + seq_type + '. Frame: ' + str(f))
        fig = plt.gcf()
        fig.set_size_inches(18.5, 10.5)
        plt.savefig(os.path.join(dir_frames, 'frame_' + str(f) + '.png'))
def main(out_dir,
         confs,
         plot_fname='metrics',
         metrics_fname='metrics.csv',
         logger=None):

    logger = logging.getLogger('plot_results_ksp')

    out_dirs = [c.dataOutDir for c in confs]
    logger.info('--------')
    logger.info('Self-learning on: ')
    logger.info(out_dirs)
    logger.info('out_dir: ')
    logger.info(out_dir)
    logger.info('--------')

    l_dataset = learning_dataset.LearningDataset(confs[0], pos_thr=0.5)

    plot_curves(out_dir, confs, plot_fname, metrics_fname, logger)

    l_ksp_scores = list()
    l_ksp_ss_scores = list()
    l_ksp_ss_thr_scores = list()

    for i in range(len(confs)):

        file_ = os.path.join(confs[i].dataOutDir, 'metrics.npz')
        logger.info('Loading ' + file_)
        npzfile = np.load(file_)

        l_ksp_scores.append(npzfile['ksp_scores'])
        l_ksp_ss_scores.append(npzfile['ksp_ss_scores'])
        l_ksp_ss_thr_scores.append(npzfile['ksp_ss_thr_scores'])

    # Make plots
    mean_ksp_scores = np.mean(np.asarray(l_ksp_scores), axis=0)
    mean_ksp_ss_scores = np.mean(np.asarray(l_ksp_ss_scores), axis=0)
    mean_ksp_ss_thr_scores = np.mean(np.asarray(l_ksp_ss_thr_scores), axis=0)

    std_ksp_scores = np.std(np.asarray(l_ksp_scores), axis=0)
    std_ksp_ss_scores = np.std(np.asarray(l_ksp_ss_scores), axis=0)
    std_ksp_ss_thr_scores = np.std(np.asarray(l_ksp_ss_thr_scores), axis=0)

    path_ = os.path.join(out_dir, 'dataset.npz')
    data = dict()
    data['mean_ksp_scores'] = mean_ksp_scores
    data['mean_ksp_ss_scores'] = mean_ksp_ss_scores
    data['mean_ksp_ss_thr_scores'] = mean_ksp_ss_thr_scores
    data['std_ksp_scores'] = std_ksp_scores
    data['std_ksp_ss_scores'] = std_ksp_ss_scores
    data['std_ksp_ss_thr_scores'] = std_ksp_ss_thr_scores

    np.savez(path_, **data)

    logger.info('Saving KSP, PM and SS merged frames...')
    gt = l_dataset.gt
    frame_dir = 'ksp_pm_frames'
    frame_path = os.path.join(out_dir, frame_dir)
    if (os.path.exists(frame_path)):
        logger.info('[!!!] Frame dir: ' + frame_path +
                    ' exists. Delete to rerun.')
    else:
        os.mkdir(frame_path)
        c0 = confs[0]
        with progressbar.ProgressBar(maxval=len(c0.frameFileNames)) as bar:
            for f in range(len(c0.frameFileNames)):
                cont_gt = segmentation.find_boundaries(gt[..., f],
                                                       mode='thick')
                idx_cont_gt = np.where(cont_gt)
                im = utls.imread(c0.frameFileNames[f])
                im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
                for c in confs:
                    im = gaze.drawGazePoint(c.myGaze_fg, f, im, radius=7)

                bar.update(f)
                plt.subplot(241)
                plt.imshow(mean_ksp_scores[..., f])
                plt.title('mean KSP')
                plt.subplot(242)
                plt.imshow(std_ksp_scores[..., f])
                plt.title('std KSP')
                plt.subplot(243)
                plt.imshow(mean_ksp_ss_scores[..., f])
                plt.title('mean KSP+SS')
                plt.subplot(244)
                plt.imshow(std_ksp_ss_scores[..., f])
                plt.title('std KSP+SS')
                plt.subplot(245)
                plt.imshow(mean_ksp_ss_thr_scores[..., f])
                plt.title('mean KSP+SS -> PM -> (thr = %0.2f)' % (c.pm_thr))
                plt.subplot(246)
                plt.imshow(std_ksp_ss_thr_scores[..., f])
                plt.title('std KSP+SS -> PM -> (thr = %0.2f)' % (c.pm_thr))
                plt.subplot(247)
                plt.imshow(im)
                plt.title('image')
                plt.suptitle('frame: ' + str(f))
                plt.savefig(os.path.join(frame_path, 'f_' + str(f) + '.png'),
                            dpi=200)
Exemplo n.º 6
0
def main(conf, logger=None):

    logger = logging.getLogger('plot_results_ksp')

    logger.info('--------')
    logger.info('Self-learning on: ' + conf.dataOutDir)
    logger.info('--------')

    conf.pm_thr = 0.8

    if (not os.path.exists(os.path.join(conf.dataOutDir, 'metrics.npz'))):

        list_ksp = np.load(os.path.join(conf.dataOutDir,
                                        'results.npz'))['list_ksp']
        gt_dir = os.path.join(conf.root_path, conf.ds_dir, conf.truth_dir)
        my_dataset = ds.Dataset(conf)
        my_dataset.load_labels_if_not_exist()
        my_dataset.load_pm_fg_from_file()
        my_dataset.load_ss_from_file()

        l_dataset = learning_dataset.LearningDataset(conf, pos_thr=0.5)
        l_dataset.make_y_array_true(l_dataset.gt)

        logger.info('[1/8] Calculating metrics on KSP+SS PM... ')
        #probas_thr = np.linspace(0,1,20)
        seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[-1]))
        new_seeds = ss.thr_all_graphs(my_dataset.g_ss, seeds, conf.ss_thr)
        my_dataset.fg_marked = new_seeds
        l_dataset.set_seeds(new_seeds)
        l_dataset.make_y_array(l_dataset.seeds)
        my_dataset.calc_pm(my_dataset.fg_marked,
                           save=False,
                           marked_feats=None,
                           all_feats_df=my_dataset.sp_desc_df,
                           in_type='not csv',
                           mode='foreground',
                           feat_fields=['desc'],
                           T=conf.T,
                           bag_max_depth=conf.bag_max_depth,
                           bag_n_feats=conf.bag_n_feats)

        probas_ksp_ss_pm = my_dataset.fg_pm_df['proba'].as_matrix()
        fpr_pm_ss, tpr_pm_ss, _ = roc_curve(l_dataset.y_true[:, 2],
                                            probas_ksp_ss_pm)
        pr_pm_ss, rc_pm_ss, _ = precision_recall_curve(l_dataset.y_true[:, 2],
                                                       probas_ksp_ss_pm)
        probas_thr = np.unique(probas_ksp_ss_pm)
        f1_pm_ss = [
            f1_score(l_dataset.y_true[:, 2], probas_ksp_ss_pm > p)
            for p in probas_thr
        ]

        fpr_ksp = [0.]
        tpr_ksp = [0.]
        pr_ksp = [1.]
        rc_ksp = [0.]
        f1_ksp = []

        logger.info('[2/8] Calculating metrics on KSP... ')
        for i in range(len(list_ksp)):
            logger.info('iter: ' + str(i + 1) + '/' + str(len(list_ksp)))

            seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[i]))
            l_dataset.set_seeds(seeds)
            l_dataset.make_y_array(l_dataset.seeds)

            fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2], l_dataset.y[:, 2])
            precision, recall, _ = precision_recall_curve(
                l_dataset.y_true[:, 2], l_dataset.y[:, 2])
            f1_ksp.append(f1_score(l_dataset.y_true[:, 2], l_dataset.y[:, 2]))
            fpr_ksp.append(fpr[1])
            tpr_ksp.append(tpr[1])
            pr_ksp.append(precision[1])
            rc_ksp.append(recall[1])

        fpr_ksp.append(1.)
        tpr_ksp.append(1.)
        pr_ksp.append(0.)
        rc_ksp.append(1.)

        fpr_ksp_ss = [0.]
        tpr_ksp_ss = [0.]
        pr_ksp_ss = [1.]
        rc_ksp_ss = [0.]
        f1_ksp_ss = []

        logger.info('[3/8] Calculating metrics on KSP+SS... ')
        for i in range(len(list_ksp)):
            logger.info('iter: ' + str(i + 1) + '/' + str(len(list_ksp)))

            seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[i]))
            new_seeds = ss.thr_all_graphs(my_dataset.g_ss, seeds, conf.ss_thr)
            l_dataset.set_seeds(new_seeds)
            l_dataset.make_y_array(l_dataset.seeds)

            fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2], l_dataset.y[:, 2])
            precision, recall, _ = precision_recall_curve(
                l_dataset.y_true[:, 2], l_dataset.y[:, 2])
            f1_ksp_ss.append(
                f1_score(l_dataset.y_true[:, 2], l_dataset.y[:, 2]))
            fpr_ksp_ss.append(fpr[1])
            tpr_ksp_ss.append(tpr[1])
            pr_ksp_ss.append(precision[1])
            rc_ksp_ss.append(recall[1])

        fpr_ksp_ss.append(1.)
        tpr_ksp_ss.append(1.)
        pr_ksp_ss.append(0.)
        rc_ksp_ss.append(1.)

        #Will append thresholded values to old
        fpr_ksp_ss_thr = list(fpr_ksp_ss)
        tpr_ksp_ss_thr = list(tpr_ksp_ss)
        pr_ksp_ss_thr = list(pr_ksp_ss)
        rc_ksp_ss_thr = list(rc_ksp_ss)
        f1_ksp_ss_thr = list(f1_ksp_ss)
        #probas_ksp_ss_pm = my_dataset.fg_pm_df['proba'].as_matrix()

        logger.info('[4/8] Calculating metrics on KSP+SS thresholded... ')
        y_ksp_ss_thr = probas_ksp_ss_pm > conf.pm_thr

        fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2],
                                y_ksp_ss_thr.astype(float))
        precision, recall, _ = precision_recall_curve(
            l_dataset.y_true[:, 2], y_ksp_ss_thr.astype(float))
        f1_ksp_ss_thr.append(
            f1_score(l_dataset.y_true[:, 2], y_ksp_ss_thr.astype(float)))
        fpr_ksp_ss_thr.append(fpr[1])
        tpr_ksp_ss_thr.append(tpr[1])
        rc_ksp_ss_thr.append(recall[1])
        pr_ksp_ss_thr.append(precision[1])

        logger.info('[5/8] Calculating metrics on PM... ')
        #probas_thr = np.linspace(0,1,20)
        fpr_pm = []
        tpr_pm = []
        pr_pm = []
        rc_pm = []
        f1_pm = []
        for i in range(len(list_ksp)):
            logger.info('iter: ' + str(i + 1) + '/' + str(len(list_ksp)))
            seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[i]))
            my_dataset.fg_marked = seeds
            my_dataset.calc_pm(my_dataset.fg_marked,
                               save=False,
                               marked_feats=None,
                               all_feats_df=my_dataset.sp_desc_df,
                               in_type='not csv',
                               mode='foreground',
                               bag_n_feats=conf.max_feats_ratio,
                               feat_fields=['desc'])

            probas = my_dataset.fg_pm_df['proba'].as_matrix()
            fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2], probas)
            precision, recall, _ = precision_recall_curve(
                l_dataset.y_true[:, 2], probas)
            probas_thr = np.unique(probas)
            f1_pm_ = [
                f1_score(l_dataset.y_true[:, 2], probas > p)
                for p in probas_thr
            ]
            f1_pm.append(f1_pm_)
            fpr_pm.append(fpr)
            tpr_pm.append(tpr)
            pr_pm.append(precision)
            rc_pm.append(recall)

        logger.info('[6/8] Calculating metrics on true ground-truth... ')
        seeds_gt = l_dataset.y_true[l_dataset.y_true[:, 2] == 1, :]
        my_dataset.fg_marked = seeds_gt
        my_dataset.calc_pm(my_dataset.fg_marked,
                           save=False,
                           marked_feats=None,
                           all_feats_df=my_dataset.sp_desc_df,
                           in_type='not csv',
                           mode='foreground',
                           bag_n_feats=conf.max_feats_ratio,
                           feat_fields=['desc'])

        probas = my_dataset.fg_pm_df['proba'].as_matrix()
        fpr_gt, tpr_gt, _ = roc_curve(l_dataset.y_true[:, 2], probas)
        pr_gt, rc_gt, _ = precision_recall_curve(l_dataset.y_true[:, 2],
                                                 probas)
        probas_thr = np.unique(probas)
        f1_gt = [
            f1_score(l_dataset.y_true[:, 2], probas > p) for p in probas_thr
        ]

        #Make PM and KSP frames on SS
        logger.info('[7/8] Making prediction maps of KSP and KSP+SS PM... ')
        seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[-1]))
        ksp_scores = utls.get_scores_from_sps(seeds, my_dataset.labels)
        new_seeds = ss.thr_all_graphs(my_dataset.g_ss, seeds, conf.ss_thr)
        ksp_ss_scores = utls.get_scores_from_sps(new_seeds, my_dataset.labels)

        my_dataset.fg_marked = np.asarray(
            utls.get_node_list_tracklets(list_ksp[-1]))
        my_dataset.calc_pm(my_dataset.fg_marked,
                           save=False,
                           marked_feats=None,
                           all_feats_df=my_dataset.sp_desc_df,
                           in_type='not csv',
                           mode='foreground',
                           bag_n_feats=conf.max_feats_ratio,
                           feat_fields=['desc'])
        pm_ksp = my_dataset.get_pm_array(mode='foreground')
        my_dataset.fg_marked = new_seeds
        my_dataset.calc_pm(my_dataset.fg_marked,
                           save=False,
                           marked_feats=None,
                           all_feats_df=my_dataset.sp_desc_df,
                           in_type='not csv',
                           mode='foreground',
                           bag_n_feats=conf.max_feats_ratio,
                           feat_fields=['desc'])
        pm_ksp_ss = my_dataset.get_pm_array(mode='foreground')

        #Make PM and KSP frames on SS
        f1_pm_thr = []
        fpr_pm_thr = []
        tpr_pm_thr = []
        rc_pm_thr = []
        pr_pm_thr = []
        logger.info(
            '[8/8] Making prediction maps and metrics of KSP+SS PM thresholded... '
        )
        new_seeds_thr_frames = my_dataset.fg_pm_df.loc[y_ksp_ss_thr,
                                                       'frame'].as_matrix()
        new_seeds_thr_labels = my_dataset.fg_pm_df.loc[y_ksp_ss_thr,
                                                       'sp_label'].as_matrix()
        new_seeds_thr = np.concatenate((new_seeds_thr_frames.reshape(
            -1, 1), new_seeds_thr_labels.reshape(-1, 1)),
                                       axis=1)
        ksp_ss_thr_scores = utls.get_scores_from_sps(new_seeds_thr,
                                                     my_dataset.labels)

        my_dataset.fg_marked = new_seeds_thr
        my_dataset.calc_pm(my_dataset.fg_marked,
                           save=False,
                           marked_feats=None,
                           all_feats_df=my_dataset.sp_desc_df,
                           in_type='not csv',
                           mode='foreground',
                           bag_n_feats=conf.max_feats_ratio,
                           feat_fields=['desc'])
        pm_ksp_ss_thr = my_dataset.get_pm_array(mode='foreground')

        probas = my_dataset.fg_pm_df['proba'].as_matrix()
        fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2], probas)
        precision, recall, _ = precision_recall_curve(l_dataset.y_true[:, 2],
                                                      probas)
        probas_thr = np.unique(probas)
        f1_pm_thr_ = [
            f1_score(l_dataset.y_true[:, 2], probas > p) for p in probas_thr
        ]
        f1_pm_thr.append(f1_pm_thr_)
        fpr_pm_thr.append(fpr)
        tpr_pm_thr.append(tpr)
        pr_pm_thr.append(precision)
        rc_pm_thr.append(recall)

        ##Saving metrics
        data = dict()
        data['probas_thr'] = probas_thr
        data['fpr_pm'] = fpr_pm
        data['tpr_pm'] = tpr_pm
        data['pr_pm'] = pr_pm
        data['rc_pm'] = rc_pm
        data['f1_pm'] = f1_pm

        data['fpr_pm_thr'] = fpr_pm_thr
        data['tpr_pm_thr'] = tpr_pm_thr
        data['pr_pm_thr'] = pr_pm_thr
        data['rc_pm_thr'] = rc_pm_thr
        data['f1_pm_thr'] = f1_pm_thr

        data['fpr_ksp'] = fpr_ksp
        data['tpr_ksp'] = tpr_ksp
        data['pr_ksp'] = pr_ksp
        data['rc_ksp'] = rc_ksp
        data['f1_ksp'] = f1_ksp

        data['fpr_ksp_ss'] = fpr_ksp_ss
        data['tpr_ksp_ss'] = tpr_ksp_ss
        data['pr_ksp_ss'] = pr_ksp_ss
        data['rc_ksp_ss'] = rc_ksp_ss
        data['f1_ksp_ss'] = f1_ksp_ss

        data['fpr_ksp_ss_thr'] = fpr_ksp_ss_thr
        data['tpr_ksp_ss_thr'] = tpr_ksp_ss_thr
        data['pr_ksp_ss_thr'] = pr_ksp_ss_thr
        data['rc_ksp_ss_thr'] = rc_ksp_ss_thr
        data['f1_ksp_ss_thr'] = f1_ksp_ss_thr

        data['fpr_pm_ss'] = fpr_pm_ss
        data['tpr_pm_ss'] = tpr_pm_ss
        data['pr_pm_ss'] = pr_pm_ss
        data['rc_pm_ss'] = rc_pm_ss
        data['f1_pm_ss'] = f1_pm_ss

        data['fpr_gt'] = fpr_gt
        data['tpr_gt'] = tpr_gt
        data['pr_gt'] = pr_gt
        data['rc_gt'] = rc_gt
        data['f1_gt'] = f1_gt

        #ksp_ss_thr_scores
        data['seeds'] = seeds
        data['ksp_scores'] = ksp_scores
        data['new_seeds'] = seeds_gt
        data['ksp_ss_scores'] = ksp_ss_scores
        data['ksp_ss_thr_scores'] = ksp_ss_thr_scores
        data['pm_ksp'] = pm_ksp
        data['pm_ksp_ss'] = pm_ksp_ss
        data['pm_ksp_ss_thr'] = pm_ksp_ss_thr
        np.savez(os.path.join(conf.dataOutDir, 'metrics.npz'), **data)
    else:
        logger.info('Loading metrics.npz...')
        metrics = np.load(os.path.join(conf.dataOutDir, 'metrics.npz'))
        probas_thr = metrics['probas_thr']
        fpr_pm = metrics['fpr_pm']
        tpr_pm = metrics['tpr_pm']
        pr_pm = metrics['pr_pm']
        rc_pm = metrics['rc_pm']
        f1_pm = metrics['f1_pm']

        fpr_pm_thr = metrics['fpr_pm_thr']
        tpr_pm_thr = metrics['tpr_pm_thr']
        pr_pm_thr = metrics['pr_pm_thr']
        rc_pm_thr = metrics['rc_pm_thr']
        f1_pm_thr = metrics['f1_pm_thr']

        fpr_ksp = metrics['fpr_ksp']
        tpr_ksp = metrics['tpr_ksp']
        pr_ksp = metrics['pr_ksp']
        rc_ksp = metrics['rc_ksp']
        f1_ksp = metrics['f1_ksp']

        fpr_ksp_ss = metrics['fpr_ksp_ss']
        tpr_ksp_ss = metrics['tpr_ksp_ss']
        pr_ksp_ss = metrics['pr_ksp_ss']
        rc_ksp_ss = metrics['rc_ksp_ss']
        f1_ksp_ss = metrics['f1_ksp_ss']

        fpr_ksp_ss_thr = metrics['fpr_ksp_ss_thr']
        tpr_ksp_ss_thr = metrics['tpr_ksp_ss_thr']
        pr_ksp_ss_thr = metrics['pr_ksp_ss_thr']
        rc_ksp_ss_thr = metrics['rc_ksp_ss_thr']
        f1_ksp_ss_thr = metrics['f1_ksp_ss_thr']

        fpr_pm_ss = metrics['fpr_pm_ss']
        tpr_pm_ss = metrics['tpr_pm_ss']
        pr_pm_ss = metrics['pr_pm_ss']
        rc_pm_ss = metrics['rc_pm_ss']
        f1_pm_ss = metrics['f1_pm_ss']

        fpr_gt = metrics['fpr_gt']
        tpr_gt = metrics['tpr_gt']
        pr_gt = metrics['pr_gt']
        rc_gt = metrics['rc_gt']
        f1_gt = metrics['f1_gt']

        seeds = metrics['seeds']
        ksp_scores = metrics['ksp_scores']
        seeds_gt = metrics['new_seeds']
        ksp_ss_scores = metrics['ksp_ss_scores']
        ksp_ss_thr_scores = metrics['ksp_ss_thr_scores']
        pm_ksp = metrics['pm_ksp']
        pm_ksp_ss = metrics['pm_ksp_ss']
        pm_ksp_ss_thr = metrics['pm_ksp_ss_thr']

        my_dataset = ds.Dataset(conf)
        my_dataset.load_labels_if_not_exist()
        my_dataset.load_pm_fg_from_file()
        my_dataset.load_ss_from_file()
        l_dataset = learning_dataset.LearningDataset(conf, pos_thr=0.5)
        list_ksp = np.load(os.path.join(conf.dataOutDir,
                                        'results.npz'))['list_ksp']

    #Plot all iterations of PM
    plt.clf()
    conf.roc_xlim = [0, 0.4]
    conf.pr_rc_xlim = [0.6, 1.]

    colors = cycle([
        'brown', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange',
        'slateblue', 'lightpink', 'darkmagenta'
    ])
    lw = 1
    #PM curves
    for i, color in zip(range(len(tpr_pm)), colors):
        auc_ = auc(fpr_pm[i], tpr_pm[i])
        max_f1 = np.max(f1_pm[i])

        plt.subplot(121)
        plt.plot(fpr_pm[i],
                 tpr_pm[i],
                 '-',
                 lw=lw,
                 color=color,
                 label='KSP/PM iter. %d (area = %0.4f, max(F1) = %0.4f)' %
                 (i + 1, auc_, max_f1))

        auc_ = auc(rc_pm[i], pr_pm[i])
        plt.subplot(122)
        plt.plot(rc_pm[i],
                 pr_pm[i],
                 '-',
                 lw=lw,
                 color=color,
                 label='KSP/PM iter. %d (area = %0.4f, max(F1) = %0.4f)' %
                 (i + 1, auc_, max_f1))

    #Plot true groundtruth
    #auc_ = auc(fpr_gt, tpr_gt)
    #max_f1 = np.max(f1_gt)
    #plt.subplot(121)
    #plt.plot(fpr_gt, tpr_gt,'r-', lw=lw,
    #            label='GT (area = %0.4f, max(F1) = %0.4f)' % (auc_,max_f1))
    #plt.subplot(122)
    #auc_ = auc(rc_gt, pr_gt)
    #plt.plot(rc_gt, pr_gt,'r-', lw=lw,
    #            label='GT (area = %0.4f, max(F1) = %0.4f)' % (auc_,max_f1))

    #Plot KSP
    auc_ = auc(fpr_ksp, tpr_ksp, reorder=True)
    max_f1 = np.max(f1_ksp)
    plt.subplot(121)
    plt.plot(fpr_ksp,
             tpr_ksp,
             'go--',
             lw=lw,
             label='KSP (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))
    plt.subplot(122)
    auc_ = auc(rc_ksp, pr_ksp, reorder=True)
    plt.plot(rc_ksp,
             pr_ksp,
             'go--',
             lw=lw,
             label='KSP (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    #Plot KSP+SS
    auc_ = auc(fpr_ksp_ss, tpr_ksp_ss, reorder=True)
    max_f1 = np.max(f1_ksp_ss)
    plt.subplot(121)
    plt.plot(fpr_ksp_ss,
             tpr_ksp_ss,
             'ro--',
             lw=lw,
             label='KSP+SS (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))
    plt.subplot(122)
    auc_ = auc(rc_ksp_ss, pr_ksp_ss, reorder=True)
    plt.plot(rc_ksp_ss,
             pr_ksp_ss,
             'ro--',
             lw=lw,
             label='KSP+SS (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    #Plot KSP+SS thresholded
    auc_ = auc(fpr_ksp_ss_thr, tpr_ksp_ss_thr, reorder=True)
    max_f1 = np.max(f1_ksp_ss_thr)
    plt.subplot(121)
    plt.plot(np.sort(fpr_ksp_ss_thr),
             np.sort(tpr_ksp_ss_thr),
             'ko--',
             lw=lw,
             label='KSP+SS (thr = %0.2f) (area = %0.4f, max(F1) = %0.4f)' %
             (conf.pm_thr, auc_, max_f1))
    plt.subplot(122)
    auc_ = auc(np.asarray(rc_ksp_ss_thr).ravel(),
               np.asarray(pr_ksp_ss_thr).ravel(),
               reorder=True)
    plt.plot(np.sort(rc_ksp_ss_thr)[::-1],
             np.sort(pr_ksp_ss_thr[::-1]),
             'ko--',
             lw=lw,
             label='KSP+SS (thr = %0.2f) (area = %0.4f, max(F1) = %0.4f)' %
             (conf.pm_thr, auc_, max_f1))

    #Plot KSP+SS PM
    auc_ = auc(fpr_pm_ss, tpr_pm_ss)
    max_f1 = np.max(f1_pm_ss)
    plt.subplot(121)
    plt.plot(np.asarray(fpr_pm_ss).ravel(),
             np.asarray(tpr_pm_ss).ravel(),
             'm-',
             lw=lw,
             label='KSP+SS/PM (area = %0.4f, max(F1) = %0.4f)' %
             (auc_, max_f1))
    plt.subplot(122)
    auc_ = auc(rc_pm_ss, pr_pm_ss)
    plt.plot(rc_pm_ss,
             pr_pm_ss,
             'm-',
             lw=lw,
             label='KSP+SS/PM (area = %0.4f, max(F1) = %0.4f)' %
             (auc_, max_f1))

    #Plot KSP+SS PM thresholded
    auc_ = auc(np.asarray(fpr_pm_thr).ravel(), np.asarray(tpr_pm_thr).ravel())
    max_f1 = np.max(f1_pm_thr)
    plt.subplot(121)
    plt.plot(
        np.asarray(fpr_pm_thr).ravel(),
        np.asarray(tpr_pm_thr).ravel(),
        'c-',
        lw=lw,
        label='KSP+SS/PM (thr = %0.2f)/PM (area = %0.4f, max(F1) = %0.4f)' %
        (conf.pm_thr, auc_, max_f1))
    plt.subplot(122)
    auc_ = auc(np.asarray(rc_pm_thr).ravel(), np.asarray(pr_pm_thr).ravel())
    plt.plot(np.asarray(rc_pm_thr).ravel(),
             np.asarray(pr_pm_thr).ravel(),
             'c-',
             lw=lw,
             label='KSP+SS/PM (thr = %0.2f) (area = %0.4f, max(F1) = %0.4f)' %
             (conf.pm_thr, auc_, max_f1))

    plt.subplot(121)
    plt.legend()
    plt.xlim(conf.roc_xlim)
    plt.xlabel('fpr')
    plt.ylabel('tpr')
    plt.subplot(122)
    plt.legend()
    plt.xlim(conf.pr_rc_xlim)
    plt.xlabel('recall')
    plt.ylabel('precision')
    plt.suptitle(conf.seq_type + ', ' + conf.ds_dir + '\n' + 'T: ' +
                 str(conf.T))
    fig = plt.gcf()
    fig.set_size_inches(18.5, 10.5)
    fig.savefig(os.path.join(conf.dataOutDir, 'metrics.eps'), dpi=200)

    ###Make plots
    logger.info('Saving KSP, PM and SS merged frames...')
    gt = l_dataset.gt
    frame_dir = 'ksp_pm_frames'
    frame_path = os.path.join(conf.dataOutDir, frame_dir)
    if (os.path.exists(frame_path)):
        logger.info('[!!!] Frame dir: ' + frame_path +
                    ' exists. Delete to rerun.')
    else:
        n_iters_ksp = len(list_ksp)
        os.mkdir(frame_path)
        with progressbar.ProgressBar(maxval=len(conf.frameFileNames)) as bar:
            for f in range(len(conf.frameFileNames)):
                cont_gt = segmentation.find_boundaries(gt[..., f],
                                                       mode='thick')
                idx_cont_gt = np.where(cont_gt)
                im = utls.imread(conf.frameFileNames[f])
                im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
                im = gaze.drawGazePoint(conf.myGaze_fg, f, im, radius=7)

                bar.update(f)
                plt.subplot(231)
                plt.imshow(ksp_scores[..., f])
                plt.title('KSP')
                plt.subplot(232)
                plt.imshow(pm_ksp[..., f])
                plt.title('KSP -> PM')
                plt.subplot(233)
                plt.imshow(ksp_ss_scores[..., f])
                plt.title('KSP+SS')
                plt.subplot(234)
                plt.imshow(ksp_ss_thr_scores[..., f])
                plt.title('KSP+SS -> PM -> (thr = %0.2f)' % (conf.pm_thr))
                plt.subplot(235)
                plt.imshow(pm_ksp_ss_thr[..., f])
                plt.title('KSP+SS -> PM -> (thr = %0.2f) -> PM' %
                          (conf.pm_thr))
                plt.subplot(236)
                plt.imshow(im)
                plt.title('image')
                plt.suptitle('frame: ' + str(f) + ', n_iters_ksp: ' +
                             str(n_iters_ksp))
                plt.savefig(os.path.join(frame_path, 'f_' + str(f) + '.png'),
                            dpi=200)

    logger.info('Saving SPs per iterations plot...')
    n_sps = []
    for i in range(len(list_ksp)):
        n = np.asarray(utls.get_node_list_tracklets(list_ksp[i])).shape[0]
        n_sps.append((i + 1, n))

    seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[-1]))
    n = ss.thr_all_graphs(my_dataset.g_ss, seeds, conf.ss_thr).shape[0]
    n_sps.append((len(list_ksp) + 1, n))
    n_sps = np.asarray(n_sps)

    plt.clf()
    plt.plot(n_sps[:, 0], n_sps[:, 1], 'bo-')
    plt.plot(n_sps[-1, 0], n_sps[-1, 1], 'ro')
    plt.xlabel('iterations')
    plt.ylabel('num. of superpixels')
    plt.title('num of superpixels vs. iterations. SS threshold: ' +
              str(conf.ss_thr))
    plt.savefig(os.path.join(conf.dataOutDir, 'sps_iters.eps'), dpi=200)
Exemplo n.º 7
0
plt.grid()
plt.xlabel('coverage ratio [%]')
plt.ylabel('F1 score')
print('Saving plot to: ' + file_out)
#plt.show()
plt.savefig(file_out, dpi=200)
plt.clf()

ims_list = []
covs_list = []
type_ = 'Tweezer'
dset = 'Dataset00'
dset_num = 0

conf = rd.confs_dict_ksp[type_][dset_num][0]
dataset = ld.LearningDataset(conf)
dataset.load_labels_contours_if_not_exist()
dataset.load_labels_if_not_exist()
labels = dataset.labels
labels_contours = dataset.labelContourMask
colors_ = [
    color_dict['red'], color_dict['green'], color_dict['blue'],
    color_dict['magenta'], color_dict['white']
]

path_ = os.path.join(rd.root_dir, rd.res_dirs_dict_ksp_cov_ref[dset][20])
df_labels_ref = pd.read_csv(os.path.join(path_, 'labels_ref.csv'))
ref_frame = df_labels_ref.as_matrix()[0, 0]
im = utls.imread(conf.frameFileNames[ref_frame])
gt = dataset.gt
cont_gt = segmentation.find_boundaries(gt[..., ref_frame], mode='thick')
Exemplo n.º 8
0
def main(conf, plot_fname='metrics.pdf', logger=None):

    logger = logging.getLogger('plot_results_ksp')

    logger.info('--------')
    logger.info('Self-learning on: ' + conf.dataOutDir)
    logger.info('--------')

    metrics_path = os.path.join(conf.dataOutDir, 'metrics.npz')

    if (not os.path.exists(metrics_path)):

        list_ksp = np.load(os.path.join(conf.dataOutDir,
                                        'results.npz'))['list_ksp']
        my_dataset = ds.Dataset(conf)
        my_dataset.load_labels_if_not_exist()
        my_dataset.load_pm_fg_from_file()

        l_dataset = learning_dataset.LearningDataset(conf, pos_thr=0.5)
        l_dataset.make_y_array_true(l_dataset.gt)

        fpr_ksp = [0.]
        tpr_ksp = [0.]
        pr_ksp = [1.]
        rc_ksp = [0.]
        f1_ksp = []

        logger.info('[1/4] Calculating metrics on KSP... ')

        seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[-1]))
        l_dataset.set_seeds(seeds)
        l_dataset.make_y_array(l_dataset.seeds)

        fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2], l_dataset.y[:, 2])
        precision, recall, _ = precision_recall_curve(l_dataset.y_true[:, 2],
                                                      l_dataset.y[:, 2])
        f1_ksp.append(f1_score(l_dataset.y_true[:, 2], l_dataset.y[:, 2]))
        fpr_ksp.append(fpr[1])
        tpr_ksp.append(tpr[1])
        pr_ksp.append(precision[1])
        rc_ksp.append(recall[1])

        fpr_ksp.append(1.)
        tpr_ksp.append(1.)
        pr_ksp.append(0.)
        rc_ksp.append(1.)

        logger.info('[2/4] Calculating metrics on PM... ')
        fpr_pm = []
        tpr_pm = []
        pr_pm = []
        rc_pm = []
        f1_pm = []

        seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[-1]))
        my_dataset.fg_marked = seeds
        my_dataset.calc_pm(my_dataset.fg_marked,
                           save=False,
                           marked_feats=None,
                           all_feats_df=my_dataset.sp_desc_df,
                           in_type='not csv',
                           mode='foreground',
                           bag_n_feats=conf.bag_n_feats,
                           feat_fields=['desc'],
                           n_jobs=conf.bag_jobs)

        probas = my_dataset.fg_pm_df['proba'].as_matrix()
        fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2], probas)
        precision, recall, _ = precision_recall_curve(l_dataset.y_true[:, 2],
                                                      probas)
        probas_thr = np.unique(probas)
        f1_pm_ = [
            f1_score(l_dataset.y_true[:, 2], probas > p) for p in probas_thr
        ]
        f1_pm.append(f1_pm_)
        fpr_pm.append(fpr)
        tpr_pm.append(tpr)
        pr_pm.append(precision)
        rc_pm.append(recall)

        # Make PM and KSP frames on SS
        logger.info('[3/4] Making prediction maps of KSP... ')
        seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[-1]))
        ksp_scores = utls.get_scores_from_sps(seeds, my_dataset.labels)

        my_dataset.fg_marked = np.asarray(
            utls.get_node_list_tracklets(list_ksp[-1]))
        my_dataset.calc_pm(my_dataset.fg_marked,
                           save=False,
                           marked_feats=None,
                           all_feats_df=my_dataset.sp_desc_df,
                           in_type='not csv',
                           mode='foreground',
                           bag_n_feats=conf.bag_n_feats,
                           feat_fields=['desc'],
                           n_jobs=conf.bag_jobs)
        pm_ksp = my_dataset.get_pm_array(mode='foreground')

        # Saving metrics
        data = dict()
        data['probas_thr'] = probas_thr
        data['fpr_pm'] = fpr_pm
        data['tpr_pm'] = tpr_pm
        data['pr_pm'] = pr_pm
        data['rc_pm'] = rc_pm
        data['f1_pm'] = f1_pm

        data['fpr_ksp'] = fpr_ksp
        data['tpr_ksp'] = tpr_ksp
        data['pr_ksp'] = pr_ksp
        data['rc_ksp'] = rc_ksp
        data['f1_ksp'] = f1_ksp

        data['ksp_scores'] = ksp_scores
        np.savez(os.path.join(conf.dataOutDir, 'metrics.npz'), **data)

    else:
        logger.info('Loading metrics.npz...')
        metrics = np.load(os.path.join(conf.dataOutDir, 'metrics.npz'))
        probas_thr = metrics['probas_thr']
        fpr_pm = metrics['fpr_pm']
        tpr_pm = metrics['tpr_pm']
        pr_pm = metrics['pr_pm']
        rc_pm = metrics['rc_pm']
        f1_pm = metrics['f1_pm']

        fpr_ksp = metrics['fpr_ksp']
        tpr_ksp = metrics['tpr_ksp']
        pr_ksp = metrics['pr_ksp']
        rc_ksp = metrics['rc_ksp']
        f1_ksp = metrics['f1_ksp']

        ksp_scores = metrics['ksp_scores']
        pm_ksp = metrics['pm_ksp']

        my_dataset = ds.Dataset(conf)
        my_dataset.load_labels_if_not_exist()
        my_dataset.load_pm_fg_from_file()
        l_dataset = learning_dataset.LearningDataset(conf, pos_thr=0.5)
        list_ksp = np.load(os.path.join(conf.dataOutDir,
                                        'results.npz'))['list_ksp']

    # Plot all iterations of PM
    plt.clf()
    conf.roc_xlim = [0, 0.4]
    conf.pr_rc_xlim = [0.6, 1.]

    lw = 1

    # PM curves
    auc_ = auc(np.asarray(fpr_pm[-1]).ravel(), np.asarray(tpr_pm[-1]).ravel())
    max_f1 = np.max(f1_pm[-1])

    plt.subplot(121)
    plt.plot(np.asarray(fpr_pm[-1]).ravel(),
             np.asarray(tpr_pm[-1]).ravel(),
             'r-',
             lw=lw,
             label='KSP/PM (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    auc_ = auc(np.asarray(rc_pm[-1]).ravel(), np.asarray(pr_pm[-1]).ravel())
    plt.subplot(122)
    plt.plot(np.asarray(rc_pm[-1]).ravel(),
             np.asarray(pr_pm[-1]).ravel(),
             'r-',
             lw=lw,
             label='KSP/PM (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    # Plot KSP
    auc_ = auc(np.asarray(fpr_ksp).ravel(),
               np.asarray(tpr_ksp).ravel(),
               reorder=True)
    max_f1 = np.max(f1_ksp)
    plt.subplot(121)
    plt.plot(np.asarray(fpr_ksp).ravel(),
             np.asarray(tpr_ksp).ravel(),
             'ro--',
             lw=lw,
             label='KSP (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))
    plt.subplot(122)
    auc_ = auc(np.asarray(rc_ksp).ravel(),
               np.asarray(pr_ksp).ravel(),
               reorder=True)
    plt.plot(np.asarray(rc_ksp).ravel(),
             np.asarray(pr_ksp).ravel(),
             'ro--',
             lw=lw,
             label='KSP (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    plt.subplot(121)
    plt.legend()
    plt.xlim(conf.roc_xlim)
    plt.xlabel('fpr')
    plt.ylabel('tpr')
    plt.subplot(122)
    plt.legend()
    plt.xlim(conf.pr_rc_xlim)
    plt.xlabel('recall')
    plt.ylabel('precision')
    plt.suptitle(conf.seq_type + ', ' + conf.ds_dir + '\n' + 'T: ' +
                 str(conf.T))
    fig = plt.gcf()
    fig.set_size_inches(18.5, 10.5)
    fig.savefig(os.path.join(conf.dataOutDir, plot_fname), dpi=200)

    # Make plots
    logger.info('Saving KSP, PM and SS merged frames...')
    gt = l_dataset.gt
    frame_dir = 'ksp_pm_frames'
    frame_path = os.path.join(conf.dataOutDir, frame_dir)
    if (os.path.exists(frame_path)):
        logger.info('[!!!] Frame dir: ' + frame_path +
                    ' exists. Delete to rerun.')
    else:
        n_iters_ksp = len(list_ksp)
        os.mkdir(frame_path)
        with progressbar.ProgressBar(maxval=len(conf.frameFileNames)) as bar:
            for f in range(len(conf.frameFileNames)):
                cont_gt = segmentation.find_boundaries(gt[..., f],
                                                       mode='thick')
                idx_cont_gt = np.where(cont_gt)
                im = utls.imread(conf.frameFileNames[f])
                im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
                im = csv.draw2DPoint(utls.pandas_to_std_csv(conf.myGaze_fg),
                                     f,
                                     im,
                                     radius=7)

                bar.update(f)
                plt.subplot(221)
                plt.imshow(ksp_scores[..., f])
                plt.title('KSP')
                plt.subplot(222)
                plt.imshow(pm_ksp[..., f])
                plt.title('KSP -> PM')
                plt.subplot(223)
                plt.imshow(im)
                plt.title('image')
                plt.suptitle('frame: ' + str(f) + ', n_iters_ksp: ' +
                             str(n_iters_ksp))
                plt.savefig(os.path.join(frame_path, 'f_' + str(f) + '.png'),
                            dpi=200)

    logger.info('Saving SPs per iterations plot...')
    n_sps = []
    for i in range(len(list_ksp)):
        n = np.asarray(utls.get_node_list_tracklets(list_ksp[-1])).shape[0]
        n_sps.append((i + 1, n))

    n_sps.append((len(list_ksp) + 1, n))
    n_sps = np.asarray(n_sps)

    plt.clf()
    plt.plot(n_sps[:, 0], n_sps[:, 1], 'bo-')
    plt.plot(n_sps[-1, 0], n_sps[-1, 1], 'ro')
    plt.xlabel('iterations')
    plt.ylabel('num. of superpixels')
    plt.title('num of superpixels vs. iterations. SS threshold: ' +
              str(conf.ss_thr))
    plt.savefig(os.path.join(conf.dataOutDir, 'sps_iters.eps'), dpi=200)

    pr_pm, rc_pm, _ = precision_recall_curve(l_dataset.gt.ravel(),
                                             pm_ksp.ravel())
    ksp_pm_pix_f1 = np.max(2 * (pr_pm * rc_pm) / (pr_pm + rc_pm))
    ksp_pix_f1 = f1_score(l_dataset.gt.ravel(), ksp_scores.ravel())
    file_out = os.path.join(conf.dataOutDir, 'scores.csv')

    C = pd.Index(["F1"], name="columns")
    I = pd.Index(['KSP', 'KSP/PM'], name="Methods")
    data = np.asarray([ksp_pix_f1, ksp_pm_pix_f1]).reshape(2, 1)
    df = pd.DataFrame(data=data, index=I, columns=C)
    df.to_csv(path_or_buf=file_out)