コード例 #1
0
def main():
    app = QtGui.QApplication(sys.argv)
    app.setApplicationName('Mes jolies images')

    #Get frame file names
    root_dir = '/home/laurent.lejeune/medical-labeling'
    dataset_dir = 'Dataset03'
    save_dir = os.path.join(root_dir, dataset_dir, 'results')

    # Used to compute coverage
    ref_frame = 0

    frame_dir = os.path.join(root_dir, dataset_dir, 'input-frames')

    gt_dir = os.path.join(root_dir, dataset_dir, 'ground_truth-frames')

    dir_clicks = os.path.join('.')

    sorted_frames = sorted(glob.glob(os.path.join(frame_dir, '*.png')))

    sorted_gt = sorted(glob.glob(os.path.join(gt_dir, '*.png')))

    gts = [utls.imread(sorted_gt[i]) for i in range(len(sorted_gt))]
    gts = [(g[..., 0] > 0)[..., np.newaxis] for g in gts]
    gts = np.concatenate(gts, axis=2)

    #Tweezer
    label_contour_path = 'precomp_descriptors/'
    label_contour_fname = 'sp_labels_tsp_contours'

    npzfile = np.load(os.path.join(root_dir, dataset_dir, label_contour_path,
                                   label_contour_fname) + '.npz',
                      fix_imports=True,
                      encoding='bytes')

    labelContourMask = npzfile['labelContourMask']

    labels = np.load(
        os.path.join(root_dir, dataset_dir, 'input-frames',
                     'sp_labels.npz'))['sp_labels']

    pos_sps = vutls.get_pos_sps(labels[..., ref_frame], gts[..., ref_frame])

    my_window = Window(frame_dir, dataset_dir, save_dir, sorted_frames,
                       sorted_gt, ref_frame, pos_sps, dir_clicks,
                       labelContourMask, labels)

    #Make event on click
    my_window.window.getImageItem().mousePressEvent = my_window.on_click

    #Mouse move coordinates
    #app.installEventFilter(my_window)

    my_window.show()
    app.exec_()
コード例 #2
0
ファイル: sp.py プロジェクト: AmmieQi/ksptrack
def getColorDesc(frameFileNames, labels, dense_step=1, normCst=1):
    """
    normCst : pixel values are normalized w.r.t that. Default = 1
    """

    keyFramesIdx = np.arange(0, len(frameFileNames))
    desc = []  # average histogram (descriptor) of keypoint

    for keyFrame in keyFramesIdx:
        desc.append([])
        img = utils.imread(frameFileNames[keyFrame])
        img = color.rgb2hsv(img)
        for i in np.unique(labels[:, :, keyFrame]):
            mask = (labels[:, :, keyFrame] == i)
            this_region = np.zeros((img.shape[2], np.sum(mask)))
            for j in range(img.shape[2]):
                this_channel = img[:, :, j]
                this_region[j, :] = np.ravel(this_channel[mask])

            this_mean = np.mean(this_region, axis=1) / np.tile(normCst, 3)

            desc[keyFrame].append((i, this_mean))

    return (desc)
コード例 #3
0
def run_on_dset(dataset_dir):

    # read image
    labelMatPath = 'EE/sp_labels_ml.mat'
    data_root_dir = '/home/laurent.lejeune/medical-labeling/'
    frame_extension = '.png'
    frame_dir = 'input-frames'
    frame_prefix = 'frame_'
    frame_digits = 4

    wide = True
    patch_size = 231

    if (wide):
        feat_out_dir = 'EE/overfeat_wide'
    else:
        feat_out_dir = 'EE/overfeat'

    labels = scipy.io.loadmat(
        os.path.join(data_root_dir, dataset_dir, labelMatPath))['labels']

    filenames = utls.makeFrameFileNames(frame_prefix, frame_digits, frame_dir,
                                        data_root_dir, dataset_dir,
                                        frame_extension)

    # initialize overfeat. Note that this takes time, so do it only once if possible
    overfeat_dir = '/home/laurent.lejeune/Documents/OverFeat'
    overfeat_extr = OverfeatFeatureExtractor(
        os.path.join(overfeat_dir, 'data', 'default'),
        os.path.join(overfeat_dir, 'bin', 'linux_64', 'cuda',
                     'overfeatcmd_cuda'))

    out_path = os.path.join(data_root_dir, dataset_dir, feat_out_dir)

    if not os.path.exists(out_path):
        os.makedirs(out_path)

    selem = square(2)
    for i in range(len(filenames)):
        patches = []
        this_filename = os.path.splitext(os.path.basename(filenames[i]))[0]
        outfile = os.path.join(data_root_dir, dataset_dir, feat_out_dir,
                               this_filename)
        if (not os.path.isfile(outfile + '.npz')):
            print('frame {}/{}'.format(i + 1, len(filenames)))
            features = []
            image = utls.imread(filenames[i])
            if (image.shape[2] > 3): image = image[:, :, 0:3]
            for j in np.unique(labels[:, :, i]):
                if (wide):
                    this_mask = labels[:, :, i] == j
                    this_mask = binary_dilation(this_mask, selem)
                    this_mask_idx = np.where(this_mask)
                    this_mask_labels = np.unique(labels[this_mask_idx[0],
                                                        this_mask_idx[1], i])
                    this_mask = np.in1d(labels[:, :, i],
                                        this_mask_labels).reshape(
                                            image.shape[0], image.shape[1])
                else:
                    this_mask = labels[:, :, i] == j
                i_mask, j_mask = np.where(this_mask)
                w = max(j_mask) - min(j_mask)
                h = max(i_mask) - min(i_mask)
                if (w < h):
                    cols_to_add = h - w + 1
                    idx_i = np.arange(min(i_mask), max(i_mask) + 1).astype(int)
                    idx_j = np.arange(
                        min(j_mask) - np.floor(cols_to_add / 2),
                        max(j_mask) + np.ceil(cols_to_add / 2)).astype(int)
                elif (w > h):
                    rows_to_add = w - h + 1
                    idx_i = np.arange(
                        min(i_mask) - np.floor(rows_to_add / 2),
                        max(i_mask) + np.ceil(rows_to_add / 2)).astype(int)
                    idx_j = np.arange(min(j_mask), max(j_mask) + 1).astype(int)
                else:
                    idx_i = np.arange(min(i_mask), max(i_mask) + 1)
                    idx_j = np.arange(min(j_mask), max(j_mask) + 1)
                patches.append(
                    resize(
                        image.take(idx_i, mode='wrap',
                                   axis=0).take(idx_j, mode='wrap', axis=1),
                        (patch_size, patch_size)).astype(np.float32))

            X = np.asarray(patches)
            features = overfeat_extr.get_feats_overfeat(
                X, np.unique(labels[:, :, i]), '/tmp')
            data = dict()
            data['features'] = features

            np.savez_compressed(outfile + '.npz', **data)
            scipy.io.savemat(outfile + '.mat', data)
        else:
            print("File: " + outfile + " exists. Delete to recompute...")
コード例 #4
0
dir_root = '/home/laurent.lejeune/medical-labeling/'
ds_dir = ['Dataset12']

feat_path = 'precomp_descriptors/unet/feat.h5'


for i in range(len(ds_dir)):

    print('Interpolating Unet features on:')
    print(ds_dir[i])
    labels = np.load(os.path.join(dir_root,                                      ds_dir[i],'input-frames',                                         'sp_labels.npz'))['sp_labels']
    frameFileNames = utls.makeFrameFileNames(
    'frame_', 4, 'input-frames',
        dir_root, ds_dir[i], 'png')

    im = utls.imread(frameFileNames[0])
    img_height = im.shape[0]
    img_width = im.shape[1]

    # initialize hdf5 files
    hdInFile = h5py.File(os.path.join(dir_root,ds_dir[i],feat_path), 'r')
    feats_arr = hdInFile['raw_feat'][...]
    hdInFile.close()

    modelDepth = 4

    # get feat properties
    feat_h = feats_arr.shape[1]
    feat_w = feats_arr.shape[2]
    feat_d = feats_arr.shape[3]
コード例 #5
0
def main(confs,
         confunet,
         out_dir=None,
         train=True,
         pred=True,
         score=True,
         resume_model=None):

    n_points = 2000

    if (out_dir is None):
        now = datetime.datetime.now()
        dateTime = now.strftime("%Y-%m-%d_%H-%M-%S")
        out_dir = os.path.join(
            confs[0].dataOutRoot, 'learning_exps',
            'learning_' + confs[0].seq_type + '_' + dateTime)

    dir_in = [c.dataOutDir for c in confs]

    if (not os.path.exists(out_dir)):
        os.mkdir(out_dir)

    with open(os.path.join(out_dir, 'cfg.yml'), 'w') as outfile:
        yaml.dump(confunet, outfile, default_flow_style=True)

    datasets = []
    utls.setup_logging(out_dir)
    logger = logging.getLogger('learning_exp_unet')

    logger.info('Starting learning experiment on:')
    logger.info(dir_in)
    logger.info('Gaze file: ' + str(confs[0].csvFileName_fg))
    logger.info('')

    if (not os.path.exists(os.path.join(out_dir, 'datasets.npz'))):
        logger.info('Building target vectors')
        for i in range(len(dir_in)):
            with open(os.path.join(dir_in[i], 'cfg.yml'), 'r') as outfile:
                conf = yaml.load(outfile)

            logger.info('Dataset: ' + str(i + 1) + '/' + str(len(dir_in)))

            dataset = learning_dataset.LearningDataset(conf)

            npz_file = np.load(os.path.join(dir_in[i], 'results.npz'))

            #seeds = np.asarray(utls.get_node_list_tracklets(npz_file['list_ksp'][-1]))
            dataset.scores = npz_file['ksp_scores_mat'].astype(bool)
            datasets.append(dataset)

        if (not os.path.exists(out_dir)):
            os.mkdir(out_dir)

        logger.info('saving datasets to: ' + out_dir)
        np.savez(os.path.join(out_dir, 'datasets.npz'),
                 **{'datasets': datasets})

    dir_my_root = os.path.join(out_dir, 'my')
    dir_true_root = os.path.join(out_dir, 'true')
    n_folds = 4

    if (train):
        from nets import UNetBasic

        logger.info('Loading datasets...')
        datasets = np.load(os.path.join(out_dir, 'datasets.npz'))['datasets']
        fold_ids = np.arange(0, 4)[::-1]

        for i in range(n_folds):

            logger.info('-----------------')
            pred_fold = i
            train_folds = np.asarray([
                fold_ids[j] for j in range(n_folds)
                if (fold_ids[j] != pred_fold)
            ])

            logger.info('train_folds: ' + str(train_folds))
            logger.info('pred_folds: ' + str(pred_fold))
            logger.info('-----------------')
            logger.info('Extracting X')
            X = (resize_datasets([
                datasets[train_folds[j]].X_all_images
                for j in range(train_folds.shape[0])
            ], confunet.unet_im_size) * 255).astype(np.uint8)
            logger.info('Extracting y')
            y = (resize_datasets([
                np.tile(
                    datasets[train_folds[j]].scores[:, :, np.newaxis, :].
                    astype(np.uint8) * 255, (1, 1, 3, 1))
                for j in range(train_folds.shape[0])
            ], confunet.unet_im_size) * 255).astype(np.uint8)

            # Set dirs
            dir_my = os.path.join(dir_my_root, 'fold_' + str(pred_fold))
            dir_my_train = os.path.join(dir_my, 'train')
            logger.info('Writing _my_ train images/gts to disk...')
            ims_my, gts_my = write_frames_train(X, y, dir_my_train, logger)

            logger.info('Training U-Net on my segmentation...')
            unet_my = UNetBasic.UNetBasic(confunet, dir_my_train, ims_my[0])
            if (resume_model is not None):
                model_path_my = get_model_path(dir_my_train,
                                               take_min_loss=False)
                initial_epoch_my = 0

                if (len(model_path_my) == 0):
                    n_epochs_my = confunet.n_epochs
                    model_path_my = None
                    initial_epoch_my = 0
                else:
                    n_epochs_my = confunet.n_epochs
                    initial_epoch_my = int(
                        os.path.split(model_path_my)[-1][6:8])
            else:
                model_path_my = None
                n_epochs_my = confunet.n_epochs
                initial_epoch_my = 0

            unet_my.train(confunet,
                          ims_my,
                          gts_my,
                          dir_my_train,
                          n_epochs_my,
                          initial_epoch=initial_epoch_my,
                          dir_eval_clbk=dir_my,
                          resume_model=model_path_my)

            logger.info('Extracting y')
            y = (resize_datasets([
                np.tile(
                    datasets[train_folds[j]].gt[:, :, np.newaxis, :].astype(
                        np.uint8) * 255, (1, 1, 3, 1))
                for j in range(train_folds.shape[0])
            ], confunet.unet_im_size) * 255).astype(np.uint8)

            dir_true = os.path.join(dir_true_root, 'fold_' + str(pred_fold))
            dir_true_train = os.path.join(dir_true, 'train')

            logger.info('Writing _true_ train images/gts to disk...')
            ims_true, gts_true = write_frames_train(X, y, dir_true_train,
                                                    logger)

            logger.info('Training U-Net on true segmentation...')
            unet_true = UNetBasic.UNetBasic(confunet, dir_true_train,
                                            ims_true[0])
            if (resume_model is not None):
                model_path_true = get_model_path(dir_true_train,
                                                 take_min_loss=False)
                initial_epoch_true = 0
                if (len(model_path_true) == 0):
                    n_epochs_true = confunet.n_epochs
                    model_path_true = None
                    initial_epoch_true = 0
                else:
                    n_epochs_true = confunet.n_epochs
                    initial_epoch_true = int(
                        os.path.split(model_path_true)[-1][6:8])
            else:
                model_path_true = None
                n_epochs_true = confunet.n_epochs
                initial_epoch_true = 0

            unet_true.train(confunet,
                            ims_true,
                            gts_true,
                            dir_true_train,
                            n_epochs_true,
                            initial_epoch=initial_epoch_true,
                            dir_eval_clbk=dir_true,
                            resume_model=model_path_true)

        else:
            logger.info('Results directory')
            logger.info(dir_my_root)
            logger.info(dir_true_root)
            logger.info('Exist. Delete and re-compute')

    logger.info('-----------------')
    logger.info('Loading datasets...')
    datasets = np.load(os.path.join(out_dir, 'datasets.npz'))['datasets']
    if (pred):
        from nets import UNetBasic
        fold_ids = np.arange(0, 4)[::-1]

        for i in range(n_folds):

            pred_fold = i
            logger.info('Predicting on fold_' + str(pred_fold))

            dir_my = os.path.join(dir_my_root, 'fold_' + str(pred_fold))
            dir_true = os.path.join(dir_true_root, 'fold_' + str(pred_fold))

            #model_path_my = get_best_model_path(os.path.join(dir_my, 'train'))
            model_path_my = get_model_path(os.path.join(dir_my, 'train'),
                                           take_min_loss=False)

            #model_path_true = get_best_model_path(os.path.join(dir_true,'train'))
            model_path_true = get_model_path(os.path.join(dir_true, 'train'),
                                             take_min_loss=False)

            dir_true_pred = os.path.join(dir_true, 'pred')
            dir_my_pred = os.path.join(dir_my, 'pred')
            dir_true_pred_res = os.path.join(dir_true, 'pred_res')
            dir_my_pred_res = os.path.join(dir_my, 'pred_res')

            logger.info('Will use models:')
            logger.info(model_path_my)
            logger.info(model_path_true)

            logger.info('Extracting y_my/y_true')
            X = datasets[pred_fold].X_all_images
            X = resize_stack(X, confunet.unet_im_size)

            logger.info('Writing _my_ pred images to disk...')
            ims_my = write_frames_pred(X, dir_my_pred, logger)

            logger.info('Writing _true_ pred images to disk...')
            ims_true = write_frames_pred(X, dir_true_pred, logger)

            X = X.transpose((3, 0, 1, 2))
            mean = np.mean(X.reshape(-1, 3), axis=0)
            std = np.std(X.reshape(-1, 3), axis=0)

            unet_true = UNetBasic.UNetBasic(confunet, dir_true_pred,
                                            ims_true[0])
            unet_my = UNetBasic.UNetBasic(confunet, dir_my_pred, ims_my[0])

            # Get normalization parameters of training set
            logger.info('Extracting X (train) normalization factors')

            train_folds = np.asarray([
                fold_ids[j] for j in range(n_folds)
                if (fold_ids[j] != pred_fold)
            ])

            im_list_train = [
                datasets[train_folds[j]].conf.frameFileNames
                for j in range(train_folds.shape[0])
            ]
            im_list_train = [
                item for sublist in im_list_train for item in sublist
            ]
            input_y = unet_my.inputDimY
            input_x = unet_my.inputDimX
            n_chans = unet_my.nbrChannels
            ims_train = UNetImpl.preprocess_imgs(unet_my, im_list_train,
                                                 input_y, input_x, n_chans)

            #mean = np.mean(ims_train.reshape(-1,3), axis = 0)
            #std = np.std(ims_train.reshape(-1,3), axis = 0)
            #ims_train = utls.normalize_imgs(ims_train, mean, std)

            logger.info('Predicting on my segmentation...')
            preds_my = unet_my.eval(confunet, model_path_my, ims_my, mean, std)

            logger.info('Predicting on true segmentation...')
            preds_true = unet_true.eval(confunet, model_path_true, ims_true,
                                        mean, std)

            logger.info('Writing _my_ pred results images to disk...')
            ims_my = write_frames_pred(preds_my,
                                       dir_my_pred_res,
                                       logger,
                                       nchans=1)

            logger.info('Writing _true_ pred results images to disk...')
            ims_true = write_frames_pred(preds_true,
                                         dir_true_pred_res,
                                         logger,
                                         nchans=1)

    if (score):
        for i in range(n_folds):
            score_dict = dict()
            pred_fold = i
            logger.info('Scoring on fold_' + str(pred_fold))

            dir_my = os.path.join(dir_my_root, 'fold_' + str(pred_fold))
            dir_true = os.path.join(dir_true_root, 'fold_' + str(pred_fold))

            dir_true_pred_res = os.path.join(dir_true, 'pred_res', 'img')
            dir_my_pred_res = os.path.join(dir_my, 'pred_res', 'img')

            fnames_true_pred_res = glob.glob(
                os.path.join(dir_true_pred_res, '*.png'))
            fnames_true_pred_res = sorted(fnames_true_pred_res)

            fnames_my_pred_res = glob.glob(
                os.path.join(dir_my_pred_res, '*.png'))
            fnames_my_pred_res = sorted(fnames_my_pred_res)

            my_preds = np.asarray([utls.imread(f) for f in fnames_my_pred_res
                                   ]).transpose(1, 2, 3, 0) / 255
            true_preds = np.asarray(
                [utls.imread(f)
                 for f in fnames_true_pred_res]).transpose(1, 2, 3, 0) / 255
            gts = (resize_datasets([
                np.tile(datasets[pred_fold].gt[:, :, np.newaxis, :],
                        (1, 1, 3, 1))
            ], confunet.unet_im_size) > 0).astype(np.uint8)

            vals_gt = gts.ravel()
            vals_my = my_preds.ravel()
            vals_true = true_preds.ravel()

            logger.info('Calculating metrics on my... ')
            all_scores = get_all_scores(vals_gt, vals_my, n_points)

            score_dict['conf'] = datasets[pred_fold].conf
            score_dict['fold'] = pred_fold
            score_dict['fpr_my'] = all_scores[0]
            score_dict['tpr_my'] = all_scores[1]
            score_dict['auc_my'] = all_scores[2]
            score_dict['pr_my'] = all_scores[3]
            score_dict['rc_my'] = all_scores[4]
            score_dict['f1_my'] = all_scores[5]
            score_dict['thr_my'] = all_scores[6]

            logger.info('Calculating metrics on true... ')
            all_scores = get_all_scores(vals_gt, vals_true, n_points)

            score_dict['fpr_true'] = all_scores[0]
            score_dict['tpr_true'] = all_scores[1]
            score_dict['auc_true'] = all_scores[2]
            score_dict['pr_true'] = all_scores[3]
            score_dict['rc_true'] = all_scores[4]
            score_dict['f1_true'] = all_scores[5]
            score_dict['thr_true'] = all_scores[6]

            score_dict['my_preds'] = my_preds
            score_dict['true_preds'] = true_preds

            logger.info('Saving results on fold: ' + str(pred_fold))
            file_out = os.path.join(out_dir,
                                    'scores_' + str(pred_fold) + '.npz')
            np.savez(file_out, **score_dict)
コード例 #6
0
ファイル: make_fig_learning.py プロジェクト: AmmieQi/ksptrack
    preds_my.append([])
    preds_true.append([])
    path_ = os.path.join(rd.root_dir,
                        'learning_exps',
                        rd.learning_dirs_dict[key])
    print('Loading: ' + path_)
    dset = np.load(os.path.join(path_, 'datasets.npz'))['datasets']
    folds = rd.best_folds_learning[key][0:2]
    for fold in folds:
        conf = dset[fold].conf
        f = rd.self_frames_dict[key][fold]

        cont_gt = segmentation.find_boundaries(
            dset[fold].gt[..., f], mode='thick')
        idx_cont_gt = np.where(cont_gt)
        im = utls.imread(conf.frameFileNames[f])
        im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
        ims[-1].append(im)
        im_shape = im.shape
        path_my = os.path.join(path_,
                               'my',
                               'fold_'+str(fold),
                               'pred_res',
                               'img',
                               'frame'+str(f)+'.png')
        path_true = os.path.join(path_,
                                 'true',
                                 'fold_'+str(fold),
                                 'pred_res',
                                 'img',
                                 'frame'+str(f)+'.png')
コード例 #7
0
ファイル: batch_resize.py プロジェクト: AmmieQi/ksptrack
    print('Deleting ' + out_dir_ims)
    shutil.rmtree(out_dir_ims)

if(not os.path.exists(out_dir_gts)):
    os.mkdir(out_dir_gts)
else:
    print('Deleting ' + out_dir_gts)
    shutil.rmtree(out_dir_gts)

ims_cropped = []
gts_cropped = []
print('Cropping:')
with progressbar.ProgressBar(maxval=len(ims)) as bar:
    for i in range(len(ims)):
        bar.update(i)
        im = utls.imread(ims[i])
        gt = utls.imread(gts[i])

        new_im = im[top_left[0]:bot_right[0],top_left[1]:bot_right[1],:]
        new_gt = gt[top_left[0]:bot_right[0],top_left[1]:bot_right[1],:]
        ims_cropped.append(new_im)
        gts_cropped.append(new_gt)

ims_resized = []
gts_resized = []
print('Resizing:')
with progressbar.ProgressBar(maxval=len(ims_cropped)) as bar:
    for i in range(len(ims_cropped)):
        bar.update(i)

        new_im = resize(ims_cropped[i],out_size)
コード例 #8
0
conf = rd.confs_dict_ksp[type_][dset_num][0]
dataset = ld.LearningDataset(conf)
dataset.load_labels_contours_if_not_exist()
dataset.load_labels_if_not_exist()
labels = dataset.labels
labels_contours = dataset.labelContourMask
colors_ = [
    color_dict['red'], color_dict['green'], color_dict['blue'],
    color_dict['magenta'], color_dict['white']
]

path_ = os.path.join(rd.root_dir, rd.res_dirs_dict_ksp_cov_ref[dset][20])
df_labels_ref = pd.read_csv(os.path.join(path_, 'labels_ref.csv'))
ref_frame = df_labels_ref.as_matrix()[0, 0]
im = utls.imread(conf.frameFileNames[ref_frame])
gt = dataset.gt
cont_gt = segmentation.find_boundaries(gt[..., ref_frame], mode='thick')
idx_cont_gt = np.where(cont_gt)
im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
im = im.astype(float) / 255
ims_list.append(im)

for c in covs:
    if (rd.res_dirs_dict_ksp_cov_ref[dset][c] is not None):
        path_ = os.path.join(rd.root_dir,
                             rd.res_dirs_dict_ksp_cov_ref[dset][c])

        print('Loading: ' + path_)
        df_coverage = pd.read_csv(os.path.join(path_, 'coverage.csv'))
        covs_list.append(df_coverage.iloc[0][1])
コード例 #9
0
def main(conf, plot_fname='metrics.pdf', csv_fname='score.csv', logger=None):

    logger = logging.getLogger('plot_results_vilar')

    logger.info('--------')
    logger.info('Self-learning on: ' + conf.dataOutDir)
    logger.info('--------')

    if (not os.path.exists(os.path.join(conf.dataOutDir, 'metrics.npz'))):

        my_dataset = ds.DatasetVilar(conf)
        my_dataset.load_labels_if_not_exist()
        #l_dataset = learning_dataset.Dataset(conf)
        l_dataset = learning_dataset.LearningDataset(conf)

        logger.info('[1/4] Loading predicted frames... ')
        pred_frames = np.asarray([
            my_dataset.get_pred_frame(f)
            for f in range(len(conf.frameFileNames))
        ]).transpose(1, 2, 0)

        logger.info('[2/4] Extracting seeds... ')
        seeds = utls.make_y_array_true(pred_frames, my_dataset.labels)

        l_dataset.make_y_array_true(l_dataset.gt)
        seeds_true = l_dataset.y_true

        logger.info('[3/4] Calculating metrics... ')
        f1 = f1_score(seeds_true[:, 2], seeds[:, 2])
        logger.info('f1 score: ' + str(f1))

        logger.info('[4/4] Calculating maps... ')

        # Saving metrics
        data = dict()
        data['f1'] = f1
        data['seeds'] = seeds
        data['seeds_true'] = seeds_true

        np.savez(os.path.join(conf.dataOutDir, 'metrics.npz'), **data)

    else:
        logger.info('Loading metrics.npz...')
        metrics = np.load(os.path.join(conf.dataOutDir, 'metrics.npz'))
        f1 = metrics['f1']
        seeds = metrics['seeds']
        seeds_true = metrics['seeds_true']

        my_dataset = ds.DatasetVilar(conf)
        my_dataset.load_labels_if_not_exist()
        l_dataset = learning_dataset.LearningDataset(conf, pos_thr=0.5)

    csv_out = os.path.join(conf.dataOutDir, csv_fname)
    logger.info('Saving f1 scores to: ' + csv_fname)
    C = pd.Index(["F1"], name="columns")
    data = np.asarray(f1).reshape(1, 1)
    df = pd.DataFrame(data=data, columns=C)
    df.to_csv(path_or_buf=csv_out)

    # Plot all iterations of PM

    # Make plots
    logger.info('Saving frames...')
    gt = l_dataset.gt
    frame_dir = 'vilar_frames'
    frame_path = os.path.join(conf.dataOutDir, frame_dir)
    if (os.path.exists(frame_path)):
        logger.info('[!!!] Frame dir: ' + frame_path +
                    ' exists. Delete to rerun.')
    else:
        os.mkdir(frame_path)
        seeds_true = seeds_true[np.where(seeds_true[:, 2])[0], 0:2]
        seeds = seeds[np.where(seeds[:, 2])[0], 0:2]
        scores_true = utls.seeds_to_scores(my_dataset.labels, seeds_true)
        scores = utls.seeds_to_scores(my_dataset.labels, seeds)
        with progressbar.ProgressBar(maxval=len(conf.frameFileNames)) as bar:
            for f in range(len(conf.frameFileNames)):
                cont_gt = segmentation.find_boundaries(gt[..., f],
                                                       mode='thick')
                idx_cont_gt = np.where(cont_gt)
                im = utls.imread(conf.frameFileNames[f])
                im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
                im = gaze.drawGazePoint(conf.myGaze_fg, f, im, radius=7)
                pred_frame = my_dataset.get_pred_frame(f)

                bar.update(f)
                plt.subplot(321)
                plt.imshow(scores_true[..., f])
                plt.title('True')
                plt.subplot(322)
                plt.imshow(scores[..., f])
                plt.title('Vilarino')
                plt.subplot(323)
                plt.imshow(im)
                plt.title('image')
                plt.subplot(324)
                plt.imshow(pred_frame)
                plt.title('pixel prediction')
                plt.suptitle('frame: ' + str(f))
                plt.savefig(os.path.join(frame_path, 'f_' + str(f) + '.png'),
                            dpi=200)
コード例 #10
0
from sklearn.metrics import (f1_score,roc_curve,auc,precision_recall_curve)
import warnings, itertools, _pickle, progressbar, sys, os, datetime, yaml, hashlib
import my_utils as utls
import logging
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import learning_dataset as ld
from skimage import (color, segmentation, util, transform, io)
import gazeCsv as gaze
import results_dirs as rd
import shutil as sh
from PIL import Image, ImageFont, ImageDraw

n_decs = 2
dsets_per_type = 1

out_result_dir = os.path.join(rd.root_dir, 'plots_results')

im_path = '/home/laurent.lejeune/medical-labeling/Dataset00/input-frames/frame_0400.png'
unet_out_path = '/home/laurent.lejeune/medical-labeling/Dataset00/precomp_descriptors/gaze_out_example.npz'

unet_out = np.load(unet_out_path)['out']
import pdb; pdb.set_trace()
im = utls.imread(im_path)

#print('Saving to: ' + file_out)
#img.save(file_out)
コード例 #11
0
def main(out_dir,
         confs,
         plot_fname='metrics',
         metrics_fname='metrics.csv',
         logger=None):

    logger = logging.getLogger('plot_results_ksp')

    out_dirs = [c.dataOutDir for c in confs]
    logger.info('--------')
    logger.info('Self-learning on: ')
    logger.info(out_dirs)
    logger.info('out_dir: ')
    logger.info(out_dir)
    logger.info('--------')

    l_dataset = learning_dataset.LearningDataset(confs[0], pos_thr=0.5)

    plot_curves(out_dir, confs, plot_fname, metrics_fname, logger)

    l_ksp_scores = list()
    l_ksp_ss_scores = list()
    l_ksp_ss_thr_scores = list()

    for i in range(len(confs)):

        file_ = os.path.join(confs[i].dataOutDir, 'metrics.npz')
        logger.info('Loading ' + file_)
        npzfile = np.load(file_)

        l_ksp_scores.append(npzfile['ksp_scores'])
        l_ksp_ss_scores.append(npzfile['ksp_ss_scores'])
        l_ksp_ss_thr_scores.append(npzfile['ksp_ss_thr_scores'])

    # Make plots
    mean_ksp_scores = np.mean(np.asarray(l_ksp_scores), axis=0)
    mean_ksp_ss_scores = np.mean(np.asarray(l_ksp_ss_scores), axis=0)
    mean_ksp_ss_thr_scores = np.mean(np.asarray(l_ksp_ss_thr_scores), axis=0)

    std_ksp_scores = np.std(np.asarray(l_ksp_scores), axis=0)
    std_ksp_ss_scores = np.std(np.asarray(l_ksp_ss_scores), axis=0)
    std_ksp_ss_thr_scores = np.std(np.asarray(l_ksp_ss_thr_scores), axis=0)

    path_ = os.path.join(out_dir, 'dataset.npz')
    data = dict()
    data['mean_ksp_scores'] = mean_ksp_scores
    data['mean_ksp_ss_scores'] = mean_ksp_ss_scores
    data['mean_ksp_ss_thr_scores'] = mean_ksp_ss_thr_scores
    data['std_ksp_scores'] = std_ksp_scores
    data['std_ksp_ss_scores'] = std_ksp_ss_scores
    data['std_ksp_ss_thr_scores'] = std_ksp_ss_thr_scores

    np.savez(path_, **data)

    logger.info('Saving KSP, PM and SS merged frames...')
    gt = l_dataset.gt
    frame_dir = 'ksp_pm_frames'
    frame_path = os.path.join(out_dir, frame_dir)
    if (os.path.exists(frame_path)):
        logger.info('[!!!] Frame dir: ' + frame_path +
                    ' exists. Delete to rerun.')
    else:
        os.mkdir(frame_path)
        c0 = confs[0]
        with progressbar.ProgressBar(maxval=len(c0.frameFileNames)) as bar:
            for f in range(len(c0.frameFileNames)):
                cont_gt = segmentation.find_boundaries(gt[..., f],
                                                       mode='thick')
                idx_cont_gt = np.where(cont_gt)
                im = utls.imread(c0.frameFileNames[f])
                im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
                for c in confs:
                    im = gaze.drawGazePoint(c.myGaze_fg, f, im, radius=7)

                bar.update(f)
                plt.subplot(241)
                plt.imshow(mean_ksp_scores[..., f])
                plt.title('mean KSP')
                plt.subplot(242)
                plt.imshow(std_ksp_scores[..., f])
                plt.title('std KSP')
                plt.subplot(243)
                plt.imshow(mean_ksp_ss_scores[..., f])
                plt.title('mean KSP+SS')
                plt.subplot(244)
                plt.imshow(std_ksp_ss_scores[..., f])
                plt.title('std KSP+SS')
                plt.subplot(245)
                plt.imshow(mean_ksp_ss_thr_scores[..., f])
                plt.title('mean KSP+SS -> PM -> (thr = %0.2f)' % (c.pm_thr))
                plt.subplot(246)
                plt.imshow(std_ksp_ss_thr_scores[..., f])
                plt.title('std KSP+SS -> PM -> (thr = %0.2f)' % (c.pm_thr))
                plt.subplot(247)
                plt.imshow(im)
                plt.title('image')
                plt.suptitle('frame: ' + str(f))
                plt.savefig(os.path.join(frame_path, 'f_' + str(f) + '.png'),
                            dpi=200)
コード例 #12
0
    def __init__(self,
                 frame_dir,
                 dataset_dir,
                 save_dir,
                 frames,
                 gts,
                 ref_frame,
                 pos_sps,
                 dir_clicks,
                 label_contours=None,
                 labels=None,
                 parent=None):
        super(Window, self).__init__(parent)
        self.setWindowTitle('Mes jolies images')
        self.window = pg.ImageView(view=pg.PlotItem())
        self.slider_text = vutls.SliderWithText()
        self.label_contours = label_contours
        self.labels = labels

        self.setCentralWidget(self.window)
        self.slider_text.slider.setMinimum(0)
        self.slider_text.slider.setMaximum(len(frames))
        self.slider_text.slider.setTickInterval(1)
        self.slider_text.slider.setSingleStep(1)

        self.label = QtGui.QLabel(self)
        #self.label.setGeometry(160, 40, 80, 30)

        self.savebutton = QtGui.QPushButton("Save")

        self.frames = frames
        self.ref_frame = ref_frame
        self.frame_dir = frame_dir
        self.dataset_dir = dataset_dir
        self.save_dir = save_dir
        self.gts = gts
        self.pos_sps = pos_sps
        self.dir_clicks = dir_clicks

        self.curr_img = utls.imread(
            os.path.join(self.frame_dir, self.frames[0]))
        self.mouse_list_ref = []
        self.mouse_list = []
        self.labels_list_ref = []
        self.labels_list = []

        # add the menubar with the method createMenuBar()
        self.createMenuBar()
        # add the dock widget with the method createDockWidget()
        self.createDockWidget()
        #
        # first set the default value to a
        self.curr_idx = 0
        self.max_idx = len(self.frames)
        self.slider_text.slider.setValue(self.curr_idx)
        self.drawFrame(self.curr_idx)

        self.sliderValueChanged(0)

        # Connect widgets to functions
        self.slider_text.slider.valueChanged.connect(self.sliderValueChanged)
        self.savebutton.clicked.connect(self.savebuttonClick)
コード例 #13
0
ファイル: plot_results_ksp.py プロジェクト: AmmieQi/ksptrack
def main(conf, logger=None):

    logger = logging.getLogger('plot_results_ksp')

    logger.info('--------')
    logger.info('Self-learning on: ' + conf.dataOutDir)
    logger.info('--------')

    conf.pm_thr = 0.8

    if (not os.path.exists(os.path.join(conf.dataOutDir, 'metrics.npz'))):

        list_ksp = np.load(os.path.join(conf.dataOutDir,
                                        'results.npz'))['list_ksp']
        gt_dir = os.path.join(conf.root_path, conf.ds_dir, conf.truth_dir)
        my_dataset = ds.Dataset(conf)
        my_dataset.load_labels_if_not_exist()
        my_dataset.load_pm_fg_from_file()
        my_dataset.load_ss_from_file()

        l_dataset = learning_dataset.LearningDataset(conf, pos_thr=0.5)
        l_dataset.make_y_array_true(l_dataset.gt)

        logger.info('[1/8] Calculating metrics on KSP+SS PM... ')
        #probas_thr = np.linspace(0,1,20)
        seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[-1]))
        new_seeds = ss.thr_all_graphs(my_dataset.g_ss, seeds, conf.ss_thr)
        my_dataset.fg_marked = new_seeds
        l_dataset.set_seeds(new_seeds)
        l_dataset.make_y_array(l_dataset.seeds)
        my_dataset.calc_pm(my_dataset.fg_marked,
                           save=False,
                           marked_feats=None,
                           all_feats_df=my_dataset.sp_desc_df,
                           in_type='not csv',
                           mode='foreground',
                           feat_fields=['desc'],
                           T=conf.T,
                           bag_max_depth=conf.bag_max_depth,
                           bag_n_feats=conf.bag_n_feats)

        probas_ksp_ss_pm = my_dataset.fg_pm_df['proba'].as_matrix()
        fpr_pm_ss, tpr_pm_ss, _ = roc_curve(l_dataset.y_true[:, 2],
                                            probas_ksp_ss_pm)
        pr_pm_ss, rc_pm_ss, _ = precision_recall_curve(l_dataset.y_true[:, 2],
                                                       probas_ksp_ss_pm)
        probas_thr = np.unique(probas_ksp_ss_pm)
        f1_pm_ss = [
            f1_score(l_dataset.y_true[:, 2], probas_ksp_ss_pm > p)
            for p in probas_thr
        ]

        fpr_ksp = [0.]
        tpr_ksp = [0.]
        pr_ksp = [1.]
        rc_ksp = [0.]
        f1_ksp = []

        logger.info('[2/8] Calculating metrics on KSP... ')
        for i in range(len(list_ksp)):
            logger.info('iter: ' + str(i + 1) + '/' + str(len(list_ksp)))

            seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[i]))
            l_dataset.set_seeds(seeds)
            l_dataset.make_y_array(l_dataset.seeds)

            fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2], l_dataset.y[:, 2])
            precision, recall, _ = precision_recall_curve(
                l_dataset.y_true[:, 2], l_dataset.y[:, 2])
            f1_ksp.append(f1_score(l_dataset.y_true[:, 2], l_dataset.y[:, 2]))
            fpr_ksp.append(fpr[1])
            tpr_ksp.append(tpr[1])
            pr_ksp.append(precision[1])
            rc_ksp.append(recall[1])

        fpr_ksp.append(1.)
        tpr_ksp.append(1.)
        pr_ksp.append(0.)
        rc_ksp.append(1.)

        fpr_ksp_ss = [0.]
        tpr_ksp_ss = [0.]
        pr_ksp_ss = [1.]
        rc_ksp_ss = [0.]
        f1_ksp_ss = []

        logger.info('[3/8] Calculating metrics on KSP+SS... ')
        for i in range(len(list_ksp)):
            logger.info('iter: ' + str(i + 1) + '/' + str(len(list_ksp)))

            seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[i]))
            new_seeds = ss.thr_all_graphs(my_dataset.g_ss, seeds, conf.ss_thr)
            l_dataset.set_seeds(new_seeds)
            l_dataset.make_y_array(l_dataset.seeds)

            fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2], l_dataset.y[:, 2])
            precision, recall, _ = precision_recall_curve(
                l_dataset.y_true[:, 2], l_dataset.y[:, 2])
            f1_ksp_ss.append(
                f1_score(l_dataset.y_true[:, 2], l_dataset.y[:, 2]))
            fpr_ksp_ss.append(fpr[1])
            tpr_ksp_ss.append(tpr[1])
            pr_ksp_ss.append(precision[1])
            rc_ksp_ss.append(recall[1])

        fpr_ksp_ss.append(1.)
        tpr_ksp_ss.append(1.)
        pr_ksp_ss.append(0.)
        rc_ksp_ss.append(1.)

        #Will append thresholded values to old
        fpr_ksp_ss_thr = list(fpr_ksp_ss)
        tpr_ksp_ss_thr = list(tpr_ksp_ss)
        pr_ksp_ss_thr = list(pr_ksp_ss)
        rc_ksp_ss_thr = list(rc_ksp_ss)
        f1_ksp_ss_thr = list(f1_ksp_ss)
        #probas_ksp_ss_pm = my_dataset.fg_pm_df['proba'].as_matrix()

        logger.info('[4/8] Calculating metrics on KSP+SS thresholded... ')
        y_ksp_ss_thr = probas_ksp_ss_pm > conf.pm_thr

        fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2],
                                y_ksp_ss_thr.astype(float))
        precision, recall, _ = precision_recall_curve(
            l_dataset.y_true[:, 2], y_ksp_ss_thr.astype(float))
        f1_ksp_ss_thr.append(
            f1_score(l_dataset.y_true[:, 2], y_ksp_ss_thr.astype(float)))
        fpr_ksp_ss_thr.append(fpr[1])
        tpr_ksp_ss_thr.append(tpr[1])
        rc_ksp_ss_thr.append(recall[1])
        pr_ksp_ss_thr.append(precision[1])

        logger.info('[5/8] Calculating metrics on PM... ')
        #probas_thr = np.linspace(0,1,20)
        fpr_pm = []
        tpr_pm = []
        pr_pm = []
        rc_pm = []
        f1_pm = []
        for i in range(len(list_ksp)):
            logger.info('iter: ' + str(i + 1) + '/' + str(len(list_ksp)))
            seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[i]))
            my_dataset.fg_marked = seeds
            my_dataset.calc_pm(my_dataset.fg_marked,
                               save=False,
                               marked_feats=None,
                               all_feats_df=my_dataset.sp_desc_df,
                               in_type='not csv',
                               mode='foreground',
                               bag_n_feats=conf.max_feats_ratio,
                               feat_fields=['desc'])

            probas = my_dataset.fg_pm_df['proba'].as_matrix()
            fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2], probas)
            precision, recall, _ = precision_recall_curve(
                l_dataset.y_true[:, 2], probas)
            probas_thr = np.unique(probas)
            f1_pm_ = [
                f1_score(l_dataset.y_true[:, 2], probas > p)
                for p in probas_thr
            ]
            f1_pm.append(f1_pm_)
            fpr_pm.append(fpr)
            tpr_pm.append(tpr)
            pr_pm.append(precision)
            rc_pm.append(recall)

        logger.info('[6/8] Calculating metrics on true ground-truth... ')
        seeds_gt = l_dataset.y_true[l_dataset.y_true[:, 2] == 1, :]
        my_dataset.fg_marked = seeds_gt
        my_dataset.calc_pm(my_dataset.fg_marked,
                           save=False,
                           marked_feats=None,
                           all_feats_df=my_dataset.sp_desc_df,
                           in_type='not csv',
                           mode='foreground',
                           bag_n_feats=conf.max_feats_ratio,
                           feat_fields=['desc'])

        probas = my_dataset.fg_pm_df['proba'].as_matrix()
        fpr_gt, tpr_gt, _ = roc_curve(l_dataset.y_true[:, 2], probas)
        pr_gt, rc_gt, _ = precision_recall_curve(l_dataset.y_true[:, 2],
                                                 probas)
        probas_thr = np.unique(probas)
        f1_gt = [
            f1_score(l_dataset.y_true[:, 2], probas > p) for p in probas_thr
        ]

        #Make PM and KSP frames on SS
        logger.info('[7/8] Making prediction maps of KSP and KSP+SS PM... ')
        seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[-1]))
        ksp_scores = utls.get_scores_from_sps(seeds, my_dataset.labels)
        new_seeds = ss.thr_all_graphs(my_dataset.g_ss, seeds, conf.ss_thr)
        ksp_ss_scores = utls.get_scores_from_sps(new_seeds, my_dataset.labels)

        my_dataset.fg_marked = np.asarray(
            utls.get_node_list_tracklets(list_ksp[-1]))
        my_dataset.calc_pm(my_dataset.fg_marked,
                           save=False,
                           marked_feats=None,
                           all_feats_df=my_dataset.sp_desc_df,
                           in_type='not csv',
                           mode='foreground',
                           bag_n_feats=conf.max_feats_ratio,
                           feat_fields=['desc'])
        pm_ksp = my_dataset.get_pm_array(mode='foreground')
        my_dataset.fg_marked = new_seeds
        my_dataset.calc_pm(my_dataset.fg_marked,
                           save=False,
                           marked_feats=None,
                           all_feats_df=my_dataset.sp_desc_df,
                           in_type='not csv',
                           mode='foreground',
                           bag_n_feats=conf.max_feats_ratio,
                           feat_fields=['desc'])
        pm_ksp_ss = my_dataset.get_pm_array(mode='foreground')

        #Make PM and KSP frames on SS
        f1_pm_thr = []
        fpr_pm_thr = []
        tpr_pm_thr = []
        rc_pm_thr = []
        pr_pm_thr = []
        logger.info(
            '[8/8] Making prediction maps and metrics of KSP+SS PM thresholded... '
        )
        new_seeds_thr_frames = my_dataset.fg_pm_df.loc[y_ksp_ss_thr,
                                                       'frame'].as_matrix()
        new_seeds_thr_labels = my_dataset.fg_pm_df.loc[y_ksp_ss_thr,
                                                       'sp_label'].as_matrix()
        new_seeds_thr = np.concatenate((new_seeds_thr_frames.reshape(
            -1, 1), new_seeds_thr_labels.reshape(-1, 1)),
                                       axis=1)
        ksp_ss_thr_scores = utls.get_scores_from_sps(new_seeds_thr,
                                                     my_dataset.labels)

        my_dataset.fg_marked = new_seeds_thr
        my_dataset.calc_pm(my_dataset.fg_marked,
                           save=False,
                           marked_feats=None,
                           all_feats_df=my_dataset.sp_desc_df,
                           in_type='not csv',
                           mode='foreground',
                           bag_n_feats=conf.max_feats_ratio,
                           feat_fields=['desc'])
        pm_ksp_ss_thr = my_dataset.get_pm_array(mode='foreground')

        probas = my_dataset.fg_pm_df['proba'].as_matrix()
        fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2], probas)
        precision, recall, _ = precision_recall_curve(l_dataset.y_true[:, 2],
                                                      probas)
        probas_thr = np.unique(probas)
        f1_pm_thr_ = [
            f1_score(l_dataset.y_true[:, 2], probas > p) for p in probas_thr
        ]
        f1_pm_thr.append(f1_pm_thr_)
        fpr_pm_thr.append(fpr)
        tpr_pm_thr.append(tpr)
        pr_pm_thr.append(precision)
        rc_pm_thr.append(recall)

        ##Saving metrics
        data = dict()
        data['probas_thr'] = probas_thr
        data['fpr_pm'] = fpr_pm
        data['tpr_pm'] = tpr_pm
        data['pr_pm'] = pr_pm
        data['rc_pm'] = rc_pm
        data['f1_pm'] = f1_pm

        data['fpr_pm_thr'] = fpr_pm_thr
        data['tpr_pm_thr'] = tpr_pm_thr
        data['pr_pm_thr'] = pr_pm_thr
        data['rc_pm_thr'] = rc_pm_thr
        data['f1_pm_thr'] = f1_pm_thr

        data['fpr_ksp'] = fpr_ksp
        data['tpr_ksp'] = tpr_ksp
        data['pr_ksp'] = pr_ksp
        data['rc_ksp'] = rc_ksp
        data['f1_ksp'] = f1_ksp

        data['fpr_ksp_ss'] = fpr_ksp_ss
        data['tpr_ksp_ss'] = tpr_ksp_ss
        data['pr_ksp_ss'] = pr_ksp_ss
        data['rc_ksp_ss'] = rc_ksp_ss
        data['f1_ksp_ss'] = f1_ksp_ss

        data['fpr_ksp_ss_thr'] = fpr_ksp_ss_thr
        data['tpr_ksp_ss_thr'] = tpr_ksp_ss_thr
        data['pr_ksp_ss_thr'] = pr_ksp_ss_thr
        data['rc_ksp_ss_thr'] = rc_ksp_ss_thr
        data['f1_ksp_ss_thr'] = f1_ksp_ss_thr

        data['fpr_pm_ss'] = fpr_pm_ss
        data['tpr_pm_ss'] = tpr_pm_ss
        data['pr_pm_ss'] = pr_pm_ss
        data['rc_pm_ss'] = rc_pm_ss
        data['f1_pm_ss'] = f1_pm_ss

        data['fpr_gt'] = fpr_gt
        data['tpr_gt'] = tpr_gt
        data['pr_gt'] = pr_gt
        data['rc_gt'] = rc_gt
        data['f1_gt'] = f1_gt

        #ksp_ss_thr_scores
        data['seeds'] = seeds
        data['ksp_scores'] = ksp_scores
        data['new_seeds'] = seeds_gt
        data['ksp_ss_scores'] = ksp_ss_scores
        data['ksp_ss_thr_scores'] = ksp_ss_thr_scores
        data['pm_ksp'] = pm_ksp
        data['pm_ksp_ss'] = pm_ksp_ss
        data['pm_ksp_ss_thr'] = pm_ksp_ss_thr
        np.savez(os.path.join(conf.dataOutDir, 'metrics.npz'), **data)
    else:
        logger.info('Loading metrics.npz...')
        metrics = np.load(os.path.join(conf.dataOutDir, 'metrics.npz'))
        probas_thr = metrics['probas_thr']
        fpr_pm = metrics['fpr_pm']
        tpr_pm = metrics['tpr_pm']
        pr_pm = metrics['pr_pm']
        rc_pm = metrics['rc_pm']
        f1_pm = metrics['f1_pm']

        fpr_pm_thr = metrics['fpr_pm_thr']
        tpr_pm_thr = metrics['tpr_pm_thr']
        pr_pm_thr = metrics['pr_pm_thr']
        rc_pm_thr = metrics['rc_pm_thr']
        f1_pm_thr = metrics['f1_pm_thr']

        fpr_ksp = metrics['fpr_ksp']
        tpr_ksp = metrics['tpr_ksp']
        pr_ksp = metrics['pr_ksp']
        rc_ksp = metrics['rc_ksp']
        f1_ksp = metrics['f1_ksp']

        fpr_ksp_ss = metrics['fpr_ksp_ss']
        tpr_ksp_ss = metrics['tpr_ksp_ss']
        pr_ksp_ss = metrics['pr_ksp_ss']
        rc_ksp_ss = metrics['rc_ksp_ss']
        f1_ksp_ss = metrics['f1_ksp_ss']

        fpr_ksp_ss_thr = metrics['fpr_ksp_ss_thr']
        tpr_ksp_ss_thr = metrics['tpr_ksp_ss_thr']
        pr_ksp_ss_thr = metrics['pr_ksp_ss_thr']
        rc_ksp_ss_thr = metrics['rc_ksp_ss_thr']
        f1_ksp_ss_thr = metrics['f1_ksp_ss_thr']

        fpr_pm_ss = metrics['fpr_pm_ss']
        tpr_pm_ss = metrics['tpr_pm_ss']
        pr_pm_ss = metrics['pr_pm_ss']
        rc_pm_ss = metrics['rc_pm_ss']
        f1_pm_ss = metrics['f1_pm_ss']

        fpr_gt = metrics['fpr_gt']
        tpr_gt = metrics['tpr_gt']
        pr_gt = metrics['pr_gt']
        rc_gt = metrics['rc_gt']
        f1_gt = metrics['f1_gt']

        seeds = metrics['seeds']
        ksp_scores = metrics['ksp_scores']
        seeds_gt = metrics['new_seeds']
        ksp_ss_scores = metrics['ksp_ss_scores']
        ksp_ss_thr_scores = metrics['ksp_ss_thr_scores']
        pm_ksp = metrics['pm_ksp']
        pm_ksp_ss = metrics['pm_ksp_ss']
        pm_ksp_ss_thr = metrics['pm_ksp_ss_thr']

        my_dataset = ds.Dataset(conf)
        my_dataset.load_labels_if_not_exist()
        my_dataset.load_pm_fg_from_file()
        my_dataset.load_ss_from_file()
        l_dataset = learning_dataset.LearningDataset(conf, pos_thr=0.5)
        list_ksp = np.load(os.path.join(conf.dataOutDir,
                                        'results.npz'))['list_ksp']

    #Plot all iterations of PM
    plt.clf()
    conf.roc_xlim = [0, 0.4]
    conf.pr_rc_xlim = [0.6, 1.]

    colors = cycle([
        'brown', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange',
        'slateblue', 'lightpink', 'darkmagenta'
    ])
    lw = 1
    #PM curves
    for i, color in zip(range(len(tpr_pm)), colors):
        auc_ = auc(fpr_pm[i], tpr_pm[i])
        max_f1 = np.max(f1_pm[i])

        plt.subplot(121)
        plt.plot(fpr_pm[i],
                 tpr_pm[i],
                 '-',
                 lw=lw,
                 color=color,
                 label='KSP/PM iter. %d (area = %0.4f, max(F1) = %0.4f)' %
                 (i + 1, auc_, max_f1))

        auc_ = auc(rc_pm[i], pr_pm[i])
        plt.subplot(122)
        plt.plot(rc_pm[i],
                 pr_pm[i],
                 '-',
                 lw=lw,
                 color=color,
                 label='KSP/PM iter. %d (area = %0.4f, max(F1) = %0.4f)' %
                 (i + 1, auc_, max_f1))

    #Plot true groundtruth
    #auc_ = auc(fpr_gt, tpr_gt)
    #max_f1 = np.max(f1_gt)
    #plt.subplot(121)
    #plt.plot(fpr_gt, tpr_gt,'r-', lw=lw,
    #            label='GT (area = %0.4f, max(F1) = %0.4f)' % (auc_,max_f1))
    #plt.subplot(122)
    #auc_ = auc(rc_gt, pr_gt)
    #plt.plot(rc_gt, pr_gt,'r-', lw=lw,
    #            label='GT (area = %0.4f, max(F1) = %0.4f)' % (auc_,max_f1))

    #Plot KSP
    auc_ = auc(fpr_ksp, tpr_ksp, reorder=True)
    max_f1 = np.max(f1_ksp)
    plt.subplot(121)
    plt.plot(fpr_ksp,
             tpr_ksp,
             'go--',
             lw=lw,
             label='KSP (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))
    plt.subplot(122)
    auc_ = auc(rc_ksp, pr_ksp, reorder=True)
    plt.plot(rc_ksp,
             pr_ksp,
             'go--',
             lw=lw,
             label='KSP (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    #Plot KSP+SS
    auc_ = auc(fpr_ksp_ss, tpr_ksp_ss, reorder=True)
    max_f1 = np.max(f1_ksp_ss)
    plt.subplot(121)
    plt.plot(fpr_ksp_ss,
             tpr_ksp_ss,
             'ro--',
             lw=lw,
             label='KSP+SS (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))
    plt.subplot(122)
    auc_ = auc(rc_ksp_ss, pr_ksp_ss, reorder=True)
    plt.plot(rc_ksp_ss,
             pr_ksp_ss,
             'ro--',
             lw=lw,
             label='KSP+SS (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    #Plot KSP+SS thresholded
    auc_ = auc(fpr_ksp_ss_thr, tpr_ksp_ss_thr, reorder=True)
    max_f1 = np.max(f1_ksp_ss_thr)
    plt.subplot(121)
    plt.plot(np.sort(fpr_ksp_ss_thr),
             np.sort(tpr_ksp_ss_thr),
             'ko--',
             lw=lw,
             label='KSP+SS (thr = %0.2f) (area = %0.4f, max(F1) = %0.4f)' %
             (conf.pm_thr, auc_, max_f1))
    plt.subplot(122)
    auc_ = auc(np.asarray(rc_ksp_ss_thr).ravel(),
               np.asarray(pr_ksp_ss_thr).ravel(),
               reorder=True)
    plt.plot(np.sort(rc_ksp_ss_thr)[::-1],
             np.sort(pr_ksp_ss_thr[::-1]),
             'ko--',
             lw=lw,
             label='KSP+SS (thr = %0.2f) (area = %0.4f, max(F1) = %0.4f)' %
             (conf.pm_thr, auc_, max_f1))

    #Plot KSP+SS PM
    auc_ = auc(fpr_pm_ss, tpr_pm_ss)
    max_f1 = np.max(f1_pm_ss)
    plt.subplot(121)
    plt.plot(np.asarray(fpr_pm_ss).ravel(),
             np.asarray(tpr_pm_ss).ravel(),
             'm-',
             lw=lw,
             label='KSP+SS/PM (area = %0.4f, max(F1) = %0.4f)' %
             (auc_, max_f1))
    plt.subplot(122)
    auc_ = auc(rc_pm_ss, pr_pm_ss)
    plt.plot(rc_pm_ss,
             pr_pm_ss,
             'm-',
             lw=lw,
             label='KSP+SS/PM (area = %0.4f, max(F1) = %0.4f)' %
             (auc_, max_f1))

    #Plot KSP+SS PM thresholded
    auc_ = auc(np.asarray(fpr_pm_thr).ravel(), np.asarray(tpr_pm_thr).ravel())
    max_f1 = np.max(f1_pm_thr)
    plt.subplot(121)
    plt.plot(
        np.asarray(fpr_pm_thr).ravel(),
        np.asarray(tpr_pm_thr).ravel(),
        'c-',
        lw=lw,
        label='KSP+SS/PM (thr = %0.2f)/PM (area = %0.4f, max(F1) = %0.4f)' %
        (conf.pm_thr, auc_, max_f1))
    plt.subplot(122)
    auc_ = auc(np.asarray(rc_pm_thr).ravel(), np.asarray(pr_pm_thr).ravel())
    plt.plot(np.asarray(rc_pm_thr).ravel(),
             np.asarray(pr_pm_thr).ravel(),
             'c-',
             lw=lw,
             label='KSP+SS/PM (thr = %0.2f) (area = %0.4f, max(F1) = %0.4f)' %
             (conf.pm_thr, auc_, max_f1))

    plt.subplot(121)
    plt.legend()
    plt.xlim(conf.roc_xlim)
    plt.xlabel('fpr')
    plt.ylabel('tpr')
    plt.subplot(122)
    plt.legend()
    plt.xlim(conf.pr_rc_xlim)
    plt.xlabel('recall')
    plt.ylabel('precision')
    plt.suptitle(conf.seq_type + ', ' + conf.ds_dir + '\n' + 'T: ' +
                 str(conf.T))
    fig = plt.gcf()
    fig.set_size_inches(18.5, 10.5)
    fig.savefig(os.path.join(conf.dataOutDir, 'metrics.eps'), dpi=200)

    ###Make plots
    logger.info('Saving KSP, PM and SS merged frames...')
    gt = l_dataset.gt
    frame_dir = 'ksp_pm_frames'
    frame_path = os.path.join(conf.dataOutDir, frame_dir)
    if (os.path.exists(frame_path)):
        logger.info('[!!!] Frame dir: ' + frame_path +
                    ' exists. Delete to rerun.')
    else:
        n_iters_ksp = len(list_ksp)
        os.mkdir(frame_path)
        with progressbar.ProgressBar(maxval=len(conf.frameFileNames)) as bar:
            for f in range(len(conf.frameFileNames)):
                cont_gt = segmentation.find_boundaries(gt[..., f],
                                                       mode='thick')
                idx_cont_gt = np.where(cont_gt)
                im = utls.imread(conf.frameFileNames[f])
                im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
                im = gaze.drawGazePoint(conf.myGaze_fg, f, im, radius=7)

                bar.update(f)
                plt.subplot(231)
                plt.imshow(ksp_scores[..., f])
                plt.title('KSP')
                plt.subplot(232)
                plt.imshow(pm_ksp[..., f])
                plt.title('KSP -> PM')
                plt.subplot(233)
                plt.imshow(ksp_ss_scores[..., f])
                plt.title('KSP+SS')
                plt.subplot(234)
                plt.imshow(ksp_ss_thr_scores[..., f])
                plt.title('KSP+SS -> PM -> (thr = %0.2f)' % (conf.pm_thr))
                plt.subplot(235)
                plt.imshow(pm_ksp_ss_thr[..., f])
                plt.title('KSP+SS -> PM -> (thr = %0.2f) -> PM' %
                          (conf.pm_thr))
                plt.subplot(236)
                plt.imshow(im)
                plt.title('image')
                plt.suptitle('frame: ' + str(f) + ', n_iters_ksp: ' +
                             str(n_iters_ksp))
                plt.savefig(os.path.join(frame_path, 'f_' + str(f) + '.png'),
                            dpi=200)

    logger.info('Saving SPs per iterations plot...')
    n_sps = []
    for i in range(len(list_ksp)):
        n = np.asarray(utls.get_node_list_tracklets(list_ksp[i])).shape[0]
        n_sps.append((i + 1, n))

    seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[-1]))
    n = ss.thr_all_graphs(my_dataset.g_ss, seeds, conf.ss_thr).shape[0]
    n_sps.append((len(list_ksp) + 1, n))
    n_sps = np.asarray(n_sps)

    plt.clf()
    plt.plot(n_sps[:, 0], n_sps[:, 1], 'bo-')
    plt.plot(n_sps[-1, 0], n_sps[-1, 1], 'ro')
    plt.xlabel('iterations')
    plt.ylabel('num. of superpixels')
    plt.title('num of superpixels vs. iterations. SS threshold: ' +
              str(conf.ss_thr))
    plt.savefig(os.path.join(conf.dataOutDir, 'sps_iters.eps'), dpi=200)
コード例 #14
0
ファイル: sp.py プロジェクト: AmmieQi/ksptrack
def getMeanSPs(sp, feature2d, frameFileNames, denseStep, sp_num_iterations):
    """
    Computes mean descriptors inside superpixel segmentation for a set of images

    Parameters
    ----------
    sp : Superpixel object
    feature2d : OpenCV object to compute keypoints and descriptors
    frameFileNames : list of strings
        Complete path to set of images
    denseStep : Step size of grid in which keypoints are generated
    sp_num_iterations : Number of iterations for superpixel method

    Returns
    -------
    avgDesc : list of list
        Average descriptor of each superpixel
    kps : list of list
        Keypoints (all of them)
    desc : list of list
        Descriptors (all of them)
    labels : list of arrays
        Superpixel labels of each image
    spLabelContourMask : list of arrays
        Superpixel contour mask of each image
    """

    keyFramesIdx = np.arange(0, len(frameFileNames))

    labels = []
    spLabelContourMask = []
    kpKeyFrame = []
    avgDesc = []
    kps = []
    desc = []

    for keyFrame in keyFramesIdx:
        sys.stdout.write("Processing frame %i/%i \n" %
                         (keyFrame, len(keyFramesIdx) - 1))

        img = utils.imread(frameFileNames[keyFrame])
        sp.iterate(cv2.cvtColor(img, cv2.COLOR_RGB2HSV), sp_num_iterations)
        spLabelToCheck = sp.getLabels()

        while (True):
            detectedEmpty = False

            kpDense = [
                cv2.KeyPoint(x, y, denseStep)
                for y in range(0, img.shape[0], denseStep)
                for x in range(0, img.shape[1], denseStep)
            ]

            mask = np.zeros(img.shape)
            for i in range(sp.getNumberOfSuperpixels()):
                mask = (spLabelToCheck == i)
                thisSPkps, thisSPkpsIdx = maskKeypoints(kpDense, mask)

                if len(thisSPkps) == 0:
                    detectedEmpty = True
                    denseStep -= 1
                    #print denseStep
                    break
                    #print "A region without keypoints was detected, reducing keypoint step size by 1 on this image"

            if detectedEmpty == False:
                break

        labels.append(spLabelToCheck)
        spLabelContourMask.append(sp.getLabelContourMask())
        kptmp, destmp = feature2d.compute(img, kpDense)
        kps.append(kptmp)
        desc.append(destmp)

        #Average descriptors contained in superpixels
        thisFrameAvgDesc = []
        for i in range(sp.getNumberOfSuperpixels()):
            mask = (labels[-1] == i)
            thisSPkps, thisSPkpsIdx = maskKeypoints(kps[-1], mask)
            thisSPDesc = desc[-1][thisSPkpsIdx]
            #print thisSPDesc
            #print "thisSPDesc.shape: ", thisSPDesc.shape
            #thisFrameAvgDesc.append(np.mean(thisSPDesc,axis=0))
            thisFrameAvgDesc.append(np.mean(thisSPDesc, axis=0))

        avgDesc.append(thisFrameAvgDesc)

    avgDesc = np.asarray(avgDesc)

    #return(avgDesc,kps,desc,labels,spLabelContourMask)
    return (np.asarray(avgDesc), kps, desc, labels, spLabelContourMask)
コード例 #15
0
ファイル: make_plots_multi.py プロジェクト: AmmieQi/ksptrack
                                 rd.out_dirs_dict_ksp[key][dset])

            # Make images/gts/gaze-point
            ims.append([])
            ksp_means.append([])
            l_dataset = ld.LearningDataset(rd.confs_dict_ksp[key][dset][0],
                                           pos_thr=0.5)
            confs = rd.confs_dict_ksp[key][dset]
            gt = l_dataset.gt
            ksp_mean_all = np.load(os.path.join(
                path_, 'dataset.npz'))['mean_ksp_scores']
            for f in rd.all_frames_dict[key][dset]:
                cont_gt = segmentation.find_boundaries(gt[..., f],
                                                       mode='thick')
                idx_cont_gt = np.where(cont_gt)
                im = utls.imread(confs[0].frameFileNames[f])
                im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
                for key_conf in confs.keys():
                    im = gaze.drawGazePoint(confs[key_conf].myGaze_fg,
                                            f,
                                            im,
                                            radius=7)

                ims[-1].append(im)
                ksp_means[-1].append(ksp_mean_all[..., f])

    data = {'ims': ims, 'ksp_means': ksp_means}
    np.savez(file_out, **data)
else:
    print('Loading: ' + file_out)
    npzfile = np.load(file_out)
コード例 #16
0
ファイル: dataset_vilar.py プロジェクト: AmmieQi/ksptrack
    def calc_training_patches(self, save=False):

        ps = self.conf.patch_size
        my_gaze = self.conf.myGaze_fg
        scale_factor = self.conf.scale_factor
        seen_patches = list()
        unseen_patches = list()
        selem = morphology.square(5)

        print('Getting seen patches')

        with progressbar.ProgressBar(maxval=len(self.conf.frameFileNames)) as bar:
            for i in range(len(self.conf.frameFileNames)):
                bar.update(i)

                img = utls.imread(self.conf.frameFileNames[i])
                img = (color.rgb2gray(img)*255).astype(np.uint8)
                img = median(img, selem=selem)  # Uses square of size 3

                ci_seen, cj_seen = gaze.gazeCoord2Pixel(
                    my_gaze[i, 3], my_gaze[i, 4], img.shape[1], img.shape[0])

                i_, j_ = self.get_centers_negatives(ci_seen, cj_seen, ps,
                                                    self.conf.overlap_ratio,
                                                    img.shape)

                img_padded = pad(img,((ps,ps),),mode='symmetric')
                seen_patch = img_padded[int(ci_seen + ps/2):int(ci_seen + 3*ps/2),int(cj_seen + ps/2):int(cj_seen + 3*ps/2)]

                seen_patch_mean = np.mean(seen_patch)
                seen_patch_std = np.std(seen_patch)
                if(seen_patch_std == 0): seen_patch_std = 1
                seen_patch = (seen_patch - seen_patch_mean) / seen_patch_std
                seen_patch = rescale(
                    seen_patch,
                    scale=scale_factor,
                    order=1,
                    mode='reflect',
                    preserve_range=True)
                seen_patches.append((i,
                                     ci_seen,
                                     cj_seen,
                                     seen_patch.ravel()))

                for k in range(i_.shape[0]):
                    unseen_patch = img[int(i_[k]- ps / 2):int(i_[k] + ps / 2), int(
                        j_[k]- ps / 2):int(j_[k]+ ps / 2)]
                    unseen_patch_mean = np.mean(unseen_patch)
                    unseen_patch_std = np.std(unseen_patch)
                    if(unseen_patch_std == 0): unseen_patch_std = 1
                    unseen_patch = (unseen_patch - unseen_patch_mean) / unseen_patch_std
                    unseen_patch = rescale(
                        unseen_patch,
                        scale=scale_factor,
                        order=1,
                        mode='reflect',
                        preserve_range=True)
                    unseen_patches.append((i,
                                            i_[k],
                                            j_[k],
                                            unseen_patch.ravel()))
        if(save):
            seen_patches_df = pd.DataFrame(seen_patches,
                                            columns=['frame',
                                                     'c_i',
                                                     'c_j',
                                                     'patch'])
            unseen_patches_df = pd.DataFrame(unseen_patches,
                                            columns=['frame',
                                                     'c_i',
                                                     'c_j',
                                                     'patch'])
            save_path = os.path.join(self.conf.dataOutDir,
                                     'vilar')
            if(not os.path.exists(save_path)):
                os.mkdir(save_path)
            seen_patches_df.to_pickle(os.path.join(save_path,
                                                   'vilar_seen_patches_df.p'))
            unseen_patches_df.to_pickle(
                os.path.join(save_path,
                            'vilar_unseen_patches_df.p'))

        return True
コード例 #17
0
def make_frame(idx,
               ref_idx,
               frames,
               labels,
               label_contours,
               gts,
               labels_clicked,
               mouse_clicks,
               mode='right',
               highlight_idx=None):

    if (mode == 'right'):
        the_idx = idx
    else:
        the_idx = ref_idx

    shape = labels[:, :, 0].shape

    colors_ = [
        color_dict['red'], color_dict['green'], color_dict['blue'],
        color_dict['magenta'], color_dict['white']
    ]

    mask = np.zeros(shape, dtype=np.uint8)

    img = utls.imread(frames[the_idx])
    if (img.shape[2] > 3): img = img[:, :, 0:3]
    if (label_contours is not None):
        idx_contours = np.where(label_contours[the_idx, :, :])
        img[idx_contours[0], idx_contours[1], :] = (255, 255, 255)
        l_ = (utls.imread(gts[the_idx]) > 0)[..., 0].astype(np.uint8)
        l_idx = np.where(l_)
    mask[l_idx[0], l_idx[1]] = 1

    #Draw set of tracked labels
    if (mode == 'right'):
        labels_to_draw = [l[-1] for l in labels_clicked if (l[1] == idx)]
    else:
        print('labels_clicked: ' + str(labels_clicked))
        print('idx: ' + str(idx))
        labels_to_draw = [l[-1] for l in labels_clicked if (l[0] == ref_idx)]
        print('labels_to_draw: ' + str(labels_to_draw))

    if (len(labels_to_draw) > 0):

        #print(self.curr_idx)
        for i in range(len(labels_to_draw)):
            this_label = labels_to_draw[i]
            mask_tmp = labels[:, :, the_idx] == this_label

            l_ = mask_tmp.astype(np.uint8)
            l_idx = np.where(l_)
            mask[l_idx[0], l_idx[1]] = 2

    if (mode == 'right'):
        mouse_clicks_to_draw = [r for r in mouse_clicks if (r[1] == idx)]
        #print(mouse_clicks_to_draw)
    else:
        mouse_clicks_to_draw = [r for r in mouse_clicks if (r[0] == ref_idx)]
        #mouse_clicks_to_draw = mouse_clicks

    if (len(mouse_clicks_to_draw) > 0):
        for i in range(len(mouse_clicks_to_draw)):
            x = mouse_clicks_to_draw[i][-2]
            y = mouse_clicks_to_draw[i][-1]
            g_i, g_j = gaze.gazeCoord2Pixel(x, y, mask.shape[1], mask.shape[0])
            rr, cc = circle(g_i, g_j, radius=7)
            mask[rr, cc] = 3

    #mask = np.zeros(shape, dtype=np.uint8)

    #if(highlight_idx is not None):
    #    mouse_clicks_to_draw = [r for r in mouse_clicks if(r[0] in highlight_idx)]
    #    for i in range(len(mouse_clicks_to_draw)):
    #        x = mouse_clicks_to_draw[i][3]
    #        y = mouse_clicks_to_draw[i][4]
    #        g_i, g_j = gaze.gazeCoord2Pixel(x,
    #                                    y,
    #                                    mask.shape[1],
    #                                    mask.shape[0])
    #        rr, cc = circle(g_i, g_j, radius=7)
    #        mask[rr, cc] = 4

    img = color.label2rgb(mask, img, alpha=.2, bg_label=0, colors=colors_)

    img = np.rot90(img)[::-1, :, :]

    return img
コード例 #18
0
ファイル: learning_exp.py プロジェクト: AmmieQi/ksptrack
def main(confs, out_dir=None):

    alpha = 0.3
    n_points = 2000
    seq_type = confs[0].seq_type

    if (out_dir is None):
        now = datetime.datetime.now()
        dateTime = now.strftime("%Y-%m-%d_%H-%M-%S")
        out_dir = os.path.join(
            confs[0].dataOutRoot, 'learning_exps',
            'learning_' + confs[0].seq_type + '_' + dateTime)

    dir_in = [c.dataOutDir for c in confs]

    if (not os.path.exists(out_dir)):
        os.mkdir(out_dir)

    datasets = []
    utls.setup_logging(out_dir)
    logger = logging.getLogger('learning_exp')

    logger.info('Starting learning experiment on:')
    logger.info(dir_in)
    logger.info('Gaze file: ' + str(confs[0].csvFileName_fg))
    logger.info('')

    if (not os.path.exists(os.path.join(out_dir, 'datasets.npz'))):
        logger.info('Building target vectors')
        for i in range(len(dir_in)):
            with open(os.path.join(dir_in[i], 'cfg.yml'), 'r') as outfile:
                conf = yaml.load(outfile)

            logger.info('Dataset: ' + str(i + 1) + '/' + str(len(dir_in)))
            files = sorted(
                glob.glob(os.path.join(dir_in[i], 'pm_scores_iter*')))[-1]

            #logger.info('Init. learner')
            dataset = learning_dataset.LearningDataset(conf)

            npz_file = np.load(os.path.join(dir_in[i], 'results.npz'))

            seeds = np.asarray(
                utls.get_node_list_tracklets(npz_file['list_ksp'][-1]))
            if (confs[0].use_ss):
                dataset.load_ss_from_file()
                seeds = ss.thr_all_graphs(dataset.g_ss, seeds, conf.ss_thr)
            dataset.set_seeds(seeds)

            dataset.make_y_array(seeds)
            dataset.make_y_array_true(dataset.gt)

            datasets.append(dataset)

        if (not os.path.exists(out_dir)):
            os.mkdir(out_dir)

        logger.info('saving datasets to: ' + out_dir)
        np.savez(os.path.join(out_dir, 'datasets.npz'),
                 **{'datasets': datasets})
    else:
        logger.info('Loading datasets...')
        datasets = np.load(os.path.join(out_dir, 'datasets.npz'))['datasets']

    n_folds = 4
    fold_ids = np.arange(0, 4)[::-1]
    res_list = []

    n_e = 150

    if (not os.path.exists(os.path.join(out_dir, 'results.npz'))):
        for i in range(n_folds):

            logger.info('-----------------')
            pred_fold = i
            train_folds = np.asarray([
                fold_ids[j] for j in range(n_folds)
                if (fold_ids[j] != pred_fold)
            ])

            logger.info('train_folds: ' + str(train_folds))
            logger.info('pred_folds: ' + str(pred_fold))
            logger.info('-----------------')

            X_train = utls.concat_arr(
                np.concatenate([
                    datasets[train_folds[j]].X
                    for j in range(train_folds.shape[0])
                ]))
            y_train_my = np.concatenate([
                datasets[train_folds[j]].y[:, 2]
                for j in range(train_folds.shape[0])
            ])
            y_train_true = np.concatenate([
                datasets[train_folds[j]].y_true[:, 2]
                for j in range(train_folds.shape[0])
            ])

            logger.info('Extracting X_test')
            X_test = utls.concat_arr(datasets[pred_fold].X)
            logger.info('Extracting y_test')
            y_test = datasets[pred_fold].y_true[:, 2]

            logger.info('Fitting...')
            bag_n_feats = confs[0].bag_n_feats_rf
            bag_max_depth = confs[0].bag_max_depth_rf
            logger.info('bag_n_feats: ' + str(bag_n_feats))
            logger.info('bag_max_depth: ' + str(bag_max_depth))
            n_trees = datasets[0].conf.T
            clf_my = RandomForestClassifier(max_features=bag_n_feats,
                                            class_weight='balanced',
                                            n_estimators=n_trees)
            clf_true = RandomForestClassifier(max_features=bag_n_feats,
                                              class_weight='balanced',
                                              n_estimators=n_trees)
            clf_my.fit(X_train, y_train_my)
            clf_true.fit(X_train, y_train_true)

            logger.info('Predicting...')
            probas_my = clf_my.predict_proba(X_test)[:, 1]
            probas_true = clf_true.predict_proba(X_test)[:, 1]

            #probas_my = rf.run(X_train,y_train_my,X_test,150)
            #probas_true = rf.run(X_train,y_train_true,X_test,150)

            logger.info('Computing ROC curves on true model')
            fpr_true, tpr_true, thresholds_true = roc_curve(
                y_test, probas_true)

            auc_true = auc(fpr_true, tpr_true)
            logger.info('auc_true: ' + str(auc_true))
            logger.info('Computing ROC curves on my model')
            fpr_my, tpr_my, thresholds_my = roc_curve(y_test,
                                                      probas_my,
                                                      pos_label=1)
            auc_my = auc(fpr_my, tpr_my)
            logger.info('auc_my: ' + str(auc_my))

            logger.info('Computing prec-recall curves on true model')
            precision_true, recall_true, _ = precision_recall_curve(
                y_test, probas_true)
            logger.info('Computing prec-recall curves on my model')
            precision_my, recall_my, _ = precision_recall_curve(
                y_test, probas_my)

            dict_ = dict()
            dict_['train_folds'] = train_folds
            dict_['pred_fold'] = pred_fold
            dict_['n_estimators'] = n_e
            dict_['fpr_true'] = fpr_true
            dict_['tpr_true'] = tpr_true
            dict_['fpr_my'] = fpr_my
            dict_['tpr_my'] = tpr_my
            dict_['auc_true'] = auc_true
            dict_['precision_true'] = precision_true
            dict_['recall_true'] = recall_true
            dict_['auc_true'] = auc_true
            dict_['auc_my'] = auc_my
            dict_['precision_my'] = precision_my
            dict_['recall_my'] = recall_my
            dict_['probas_my'] = probas_my
            dict_['probas_true'] = probas_true
            dict_['y_test'] = y_test

            res_list.append(dict_)

        file_out = os.path.join(out_dir, 'results.npz')
        logger.info('Saving metrics to ')
        np.savez(file_out, **{'res_list': res_list})
    else:
        logger.info('Loading results...')
        res_list = np.load(os.path.join(out_dir, 'results.npz'))['res_list']

    #Plot folds
    colors = ['blue', 'darkorange', 'seagreen', 'yellow', 'blue']
    lw = 1
    plt.clf()

    l_fpr_true = []
    l_tpr_true = []
    l_pr_true = []
    l_rc_true = []

    l_fpr_my = []
    l_tpr_my = []
    l_pr_my = []
    l_rc_my = []

    for i in range(len(res_list)):
        fpr_true = res_list[i]['fpr_true']
        tpr_true = res_list[i]['tpr_true']
        #fpr_true, tpr_true = utls.my_interp(fpr_true, tpr_true, n_points)
        l_fpr_true.append(fpr_true)
        l_tpr_true.append(tpr_true)

        fpr_my = res_list[i]['fpr_my']
        tpr_my = res_list[i]['tpr_my']
        #fpr_my, tpr_my = utls.my_interp(fpr_my, tpr_my, n_points)
        l_fpr_my.append(fpr_my)
        l_tpr_my.append(tpr_my)

        pr_true = res_list[i]['precision_true']
        rc_true = res_list[i]['recall_true']
        #rc_true, pr_true = utls.my_interp(rc_true, pr_true, n_points)
        l_rc_true.append(rc_true)
        l_pr_true.append(pr_true)

        pr_my = res_list[i]['precision_my']
        rc_my = res_list[i]['recall_my']
        #rc_my, pr_my = utls.my_interp(rc_my, pr_my, n_points)
        l_rc_my.append(rc_my)
        l_pr_my.append(pr_my)

    rc_range_my = [
        np.min([np.min(l_rc_my[i]) for i in range(len(l_rc_my))]),
        np.max([np.max(l_rc_my[i]) for i in range(len(l_rc_my))])
    ]

    rc_range_true = [
        np.min([np.min(l_rc_true[i]) for i in range(len(l_rc_true))]),
        np.max([np.max(l_rc_true[i]) for i in range(len(l_rc_true))])
    ]

    rc_range = [
        np.min((rc_range_my[0], rc_range_true[0])),
        np.max((rc_range_my[1], rc_range_true[1]))
    ]

    fpr_range_my = [
        np.min([np.min(l_fpr_my[i]) for i in range(len(l_fpr_my))]),
        np.max([np.max(l_fpr_my[i]) for i in range(len(l_fpr_my))])
    ]

    fpr_range_true = [
        np.min([np.min(l_fpr_true[i]) for i in range(len(l_fpr_true))]),
        np.max([np.max(l_fpr_true[i]) for i in range(len(l_fpr_true))])
    ]

    fpr_range = [
        np.min((fpr_range_my[0], fpr_range_true[0])),
        np.max((fpr_range_my[1], fpr_range_true[1]))
    ]

    l_fpr_tpr_my_interp = np.asarray([
        utls.my_interp(l_fpr_my[i], l_tpr_my[i], n_points, fpr_range)
        for i in range(len(l_fpr_my))
    ]).transpose(1, 0, 2)
    l_fpr_my = l_fpr_tpr_my_interp[0, ...]
    l_tpr_my = l_fpr_tpr_my_interp[1, ...]

    l_pr_rc_my_interp = np.asarray([
        utls.my_interp(l_rc_my[i], l_pr_my[i], n_points, rc_range)
        for i in range(len(l_rc_my))
    ]).transpose(1, 0, 2)
    l_rc_my = l_pr_rc_my_interp[0, ...]
    l_pr_my = l_pr_rc_my_interp[1, ...]

    l_fpr_tpr_true_interp = np.asarray([
        utls.my_interp(l_fpr_true[i], l_tpr_true[i], n_points, fpr_range)
        for i in range(len(l_fpr_true))
    ]).transpose(1, 0, 2)
    l_fpr_true = l_fpr_tpr_true_interp[0, ...]
    l_tpr_true = l_fpr_tpr_true_interp[1, ...]

    l_pr_rc_true_interp = np.asarray([
        utls.my_interp(l_rc_true[i], l_pr_true[i], n_points, rc_range)
        for i in range(len(l_rc_true))
    ]).transpose(1, 0, 2)
    l_rc_true = l_pr_rc_true_interp[0, ...]
    l_pr_true = l_pr_rc_true_interp[1, ...]

    roc_xlim = [0, 1]
    pr_rc_xlim = [0, 1]
    logger.info('Concatenating results for scoring')
    all_y_true = np.concatenate([r['y_test'] for r in res_list])
    all_probas_my = np.concatenate([r['probas_my'] for r in res_list])
    all_probas_true = np.concatenate([r['probas_true'] for r in res_list])

    fpr_my_all, tpr_my_all, thresholds_my_all = roc_curve(all_y_true,
                                                          all_probas_my,
                                                          pos_label=1)
    fpr_my_all, tpr_my_all = utls.my_interp(fpr_my_all, tpr_my_all, n_points)

    fpr_true_all, tpr_true_all, thresholds_true_all = roc_curve(
        all_y_true, all_probas_true, pos_label=1)
    fpr_true_all, tpr_true_all = utls.my_interp(fpr_true_all, tpr_true_all,
                                                n_points)
    pr_my_all, rc_my_all, _ = precision_recall_curve(all_y_true, all_probas_my)
    pr_my_all, rc_my_all = utls.my_interp(pr_my_all, rc_my_all, n_points)
    pr_true_all, rc_true_all, _ = precision_recall_curve(
        all_y_true, all_probas_true)
    pr_true_all, rc_true_all = utls.my_interp(pr_true_all, rc_true_all,
                                              n_points)
    auc_my_all = auc(fpr_my_all, tpr_my_all)
    probas_thr = np.linspace(0, 1, n_points)
    f1_my = [f1_score(all_y_true, all_probas_my > p) for p in probas_thr]
    probas_thr = np.linspace(0, 1, 200)
    f1_true = [f1_score(all_y_true, all_probas_true > p) for p in probas_thr]
    auc_true_all = auc(fpr_true_all, tpr_true_all)

    # Plotting
    lw = 3
    plt.figure('tpr')
    plt.plot(l_fpr_true.mean(axis=0),
             l_tpr_true.mean(axis=0),
             '-',
             lw=lw,
             color=colors[0],
             label='all folds (true) (area = %0.4f, max_f1 = %0.4f)' %
             (auc_true_all, np.max(f1_true)))

    plt.fill_between(l_fpr_true.mean(axis=0),
                     l_tpr_true.mean(axis=0) + l_tpr_true.std(axis=0),
                     l_tpr_true.mean(axis=0) - l_tpr_true.std(axis=0),
                     facecolor=colors[0],
                     alpha=alpha)

    plt.plot(l_fpr_my.mean(axis=0),
             l_tpr_my.mean(axis=0),
             '-',
             lw=lw,
             color=colors[1],
             label='all folds (my) (area = %0.4f, max_f1 = %0.4f)' %
             (auc_my_all, np.max(f1_my)))

    plt.fill_between(l_fpr_my.mean(axis=0),
                     l_tpr_my.mean(axis=0) + l_tpr_my.std(axis=0),
                     l_tpr_my.mean(axis=0) - l_tpr_my.std(axis=0),
                     facecolor=colors[1],
                     alpha=alpha)
    plt.legend()
    plt.xlim(roc_xlim)
    plt.xlabel('fpr')
    plt.ylabel('tpr')
    plt.suptitle('Sequence: ' + seq_type + '. Gaze: ' +
                 confs[0].csvFileName_fg)
    plt.savefig(os.path.join(out_dir, 'folds_tpr_fpr.pdf'))

    plt.figure('rc')
    plt.plot(l_rc_true.mean(axis=0),
             l_pr_true.mean(axis=0),
             '-',
             lw=lw,
             color=colors[0],
             label='all folds (true)')
    plt.fill_between(l_rc_true.mean(axis=0),
                     l_pr_true.mean(axis=0) + l_pr_true.std(axis=0),
                     l_pr_true.mean(axis=0) - l_pr_true.std(axis=0),
                     facecolor=colors[0],
                     alpha=alpha)
    plt.plot(l_rc_my.mean(axis=0),
             l_pr_my.mean(axis=0),
             '-',
             lw=lw,
             color=colors[1],
             label='all folds (my)')
    plt.fill_between(l_rc_my.mean(axis=0),
                     l_pr_my.mean(axis=0) + l_pr_my.std(axis=0),
                     l_pr_my.mean(axis=0) - l_pr_my.std(axis=0),
                     facecolor=colors[1],
                     alpha=alpha)
    plt.legend()
    plt.xlim(pr_rc_xlim)
    plt.xlabel('recall')
    plt.ylabel('precision')
    plt.suptitle('Sequence: ' + seq_type + '. Gaze: ' +
                 confs[0].csvFileName_fg)
    #plt.figure('rc').set_size_inches(18.5, 10.5)
    plt.savefig(os.path.join(out_dir, 'folds_pr_rc.pdf'))

    min_n_frames = np.min([len(d.conf.frameFileNames) for d in datasets])

    dir_frames = os.path.join(out_dir, 'frames')

    if (not os.path.exists(dir_frames)):
        os.mkdir(dir_frames)
    else:
        logger.info('frames already exist, delete and re-run...')
        #shutil.rmtree(dir_frames)
        #os.mkdir(dir_frames)

    logger.info('Generating prediction frames...')
    #Plot by-frame predictions
    for f in range(min_n_frames):
        my = []
        true = []
        ims = []
        for j in range(len(datasets)):

            y_true = datasets[j].y
            idx_y = np.where(y_true[:, 0] == f)[0]
            y_true = y_true[idx_y]
            probas_true = res_list[j]['probas_true'][idx_y]
            probas_my = res_list[j]['probas_my'][idx_y]

            scores_my = utls.get_scores_from_sps(y_true[:, 0:2],
                                                 datasets[j].get_labels(),
                                                 probas_my)[..., f]
            my.append(scores_my)

            scores_true = utls.get_scores_from_sps(y_true[:, 0:2],
                                                   datasets[j].get_labels(),
                                                   probas_true)[..., f]
            true.append(scores_true)

            cont_gt = segmentation.find_boundaries(datasets[j].gt[..., f],
                                                   mode='thick')
            idx_cont_gt = np.where(cont_gt)
            im = utls.imread(datasets[j].conf.frameFileNames[f])
            im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
            im = gaze.drawGazePoint(datasets[j].conf.myGaze_fg,
                                    f,
                                    im,
                                    radius=7)
            ims.append(im)
        gs = gridspec.GridSpec(3, 4)
        for c in range(4):
            ax = plt.subplot(gs[0, c])
            ax.imshow(true[c])
            plt.title('true')

            ax = plt.subplot(gs[1, c])
            ax.imshow(my[c])
            plt.title('my')

            ax = plt.subplot(gs[2, c])
            ax.imshow(ims[c])
            plt.title('image')

        plt.suptitle('Sequence: ' + seq_type + '. Frame: ' + str(f))
        fig = plt.gcf()
        fig.set_size_inches(18.5, 10.5)
        plt.savefig(os.path.join(dir_frames, 'frame_' + str(f) + '.png'))
コード例 #19
0
ファイル: vilar.py プロジェクト: AmmieQi/ksptrack
def predict(conf, clf, out_dir=None, n_jobs=1):
    """
    Parallel extraction and prediction of patches on every pixel

    :imgs: list of images
    :returns predictions
    """

    # Set logger
    if (out_dir is not None):
        conf.dataOutDir = out_dir

    save_path = os.path.join(conf.dataOutDir, 'vilar')

    utls.setup_logging(conf.dataOutDir)

    logger = logging.getLogger('vilar (predict)')

    logger.info('---------------------------')
    logger.info('Predicting on: ' + conf.ds_dir)
    logger.info('type of sequence: ' + conf.seq_type)
    logger.info('gaze filename: ' + conf.csvFileName_fg)
    logger.info('Result dir:')
    logger.info(conf.dataOutDir)
    logger.info('---------------------------')
    n_frames = conf.vilar_n_frames
    if (n_frames == -1):
        n_frames = len(conf.frameFileNames)

    batch_size = n_frames
    batch_idx = np.array_split(np.arange(0, n_frames), n_frames / n_jobs)

    selem = morphology.square(5)
    ims = []

    for i in range(n_frames):

        img = utls.imread(conf.frameFileNames[i])
        img = (color.rgb2gray(img) * 255).astype(np.uint8)
        #img = (color.rgb2gray(img)*255).astype(np.uint8)
        #img = utls.imread(self.conf.frameFileNames[i])
        img = median(img, selem=selem)  # Uses square of size 3
        ims.append(img)  # make feat segment dictionary list

    save_path = os.path.join(conf.dataOutDir, 'vilar')

    if (not os.path.exists(save_path)):
        os.mkdir(save_path)

    for b in range(len(batch_idx)):
        stack = list()
        cnt = 0
        for b_ in range(batch_idx[b].shape[0]):
            i = batch_idx[b][b_]
            stack.append(dict())
            stack[cnt]['clf'] = clf
            stack[cnt]['im'] = ims[i]
            stack[cnt]['shape'] = ims[i].shape
            stack[cnt]['f_num'] = i
            stack[cnt]['ps'] = conf.patch_size
            stack[cnt]['scale_factor'] = conf.scale_factor
            stack[cnt]['save_path_patch'] = os.path.join(
                save_path, 'all_patches_im_' + str(i) + '.npz')
            stack[cnt]['save_path_pred'] = os.path.join(
                save_path, 'pred_im_' + str(i) + '.npz')
            cnt += 1

        with Pool(processes=n_jobs) as pool:
            pool.map(predict_job, stack)

    return conf
コード例 #20
0
def main(conf, plot_fname='metrics.pdf', logger=None):

    logger = logging.getLogger('plot_results_ksp')

    logger.info('--------')
    logger.info('Self-learning on: ' + conf.dataOutDir)
    logger.info('--------')

    metrics_path = os.path.join(conf.dataOutDir, 'metrics.npz')

    if (not os.path.exists(metrics_path)):

        list_ksp = np.load(os.path.join(conf.dataOutDir,
                                        'results.npz'))['list_ksp']
        my_dataset = ds.Dataset(conf)
        my_dataset.load_labels_if_not_exist()
        my_dataset.load_pm_fg_from_file()

        l_dataset = learning_dataset.LearningDataset(conf, pos_thr=0.5)
        l_dataset.make_y_array_true(l_dataset.gt)

        fpr_ksp = [0.]
        tpr_ksp = [0.]
        pr_ksp = [1.]
        rc_ksp = [0.]
        f1_ksp = []

        logger.info('[1/4] Calculating metrics on KSP... ')

        seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[-1]))
        l_dataset.set_seeds(seeds)
        l_dataset.make_y_array(l_dataset.seeds)

        fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2], l_dataset.y[:, 2])
        precision, recall, _ = precision_recall_curve(l_dataset.y_true[:, 2],
                                                      l_dataset.y[:, 2])
        f1_ksp.append(f1_score(l_dataset.y_true[:, 2], l_dataset.y[:, 2]))
        fpr_ksp.append(fpr[1])
        tpr_ksp.append(tpr[1])
        pr_ksp.append(precision[1])
        rc_ksp.append(recall[1])

        fpr_ksp.append(1.)
        tpr_ksp.append(1.)
        pr_ksp.append(0.)
        rc_ksp.append(1.)

        logger.info('[2/4] Calculating metrics on PM... ')
        fpr_pm = []
        tpr_pm = []
        pr_pm = []
        rc_pm = []
        f1_pm = []

        seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[-1]))
        my_dataset.fg_marked = seeds
        my_dataset.calc_pm(my_dataset.fg_marked,
                           save=False,
                           marked_feats=None,
                           all_feats_df=my_dataset.sp_desc_df,
                           in_type='not csv',
                           mode='foreground',
                           bag_n_feats=conf.bag_n_feats,
                           feat_fields=['desc'],
                           n_jobs=conf.bag_jobs)

        probas = my_dataset.fg_pm_df['proba'].as_matrix()
        fpr, tpr, _ = roc_curve(l_dataset.y_true[:, 2], probas)
        precision, recall, _ = precision_recall_curve(l_dataset.y_true[:, 2],
                                                      probas)
        probas_thr = np.unique(probas)
        f1_pm_ = [
            f1_score(l_dataset.y_true[:, 2], probas > p) for p in probas_thr
        ]
        f1_pm.append(f1_pm_)
        fpr_pm.append(fpr)
        tpr_pm.append(tpr)
        pr_pm.append(precision)
        rc_pm.append(recall)

        # Make PM and KSP frames on SS
        logger.info('[3/4] Making prediction maps of KSP... ')
        seeds = np.asarray(utls.get_node_list_tracklets(list_ksp[-1]))
        ksp_scores = utls.get_scores_from_sps(seeds, my_dataset.labels)

        my_dataset.fg_marked = np.asarray(
            utls.get_node_list_tracklets(list_ksp[-1]))
        my_dataset.calc_pm(my_dataset.fg_marked,
                           save=False,
                           marked_feats=None,
                           all_feats_df=my_dataset.sp_desc_df,
                           in_type='not csv',
                           mode='foreground',
                           bag_n_feats=conf.bag_n_feats,
                           feat_fields=['desc'],
                           n_jobs=conf.bag_jobs)
        pm_ksp = my_dataset.get_pm_array(mode='foreground')

        # Saving metrics
        data = dict()
        data['probas_thr'] = probas_thr
        data['fpr_pm'] = fpr_pm
        data['tpr_pm'] = tpr_pm
        data['pr_pm'] = pr_pm
        data['rc_pm'] = rc_pm
        data['f1_pm'] = f1_pm

        data['fpr_ksp'] = fpr_ksp
        data['tpr_ksp'] = tpr_ksp
        data['pr_ksp'] = pr_ksp
        data['rc_ksp'] = rc_ksp
        data['f1_ksp'] = f1_ksp

        data['ksp_scores'] = ksp_scores
        np.savez(os.path.join(conf.dataOutDir, 'metrics.npz'), **data)

    else:
        logger.info('Loading metrics.npz...')
        metrics = np.load(os.path.join(conf.dataOutDir, 'metrics.npz'))
        probas_thr = metrics['probas_thr']
        fpr_pm = metrics['fpr_pm']
        tpr_pm = metrics['tpr_pm']
        pr_pm = metrics['pr_pm']
        rc_pm = metrics['rc_pm']
        f1_pm = metrics['f1_pm']

        fpr_ksp = metrics['fpr_ksp']
        tpr_ksp = metrics['tpr_ksp']
        pr_ksp = metrics['pr_ksp']
        rc_ksp = metrics['rc_ksp']
        f1_ksp = metrics['f1_ksp']

        ksp_scores = metrics['ksp_scores']
        pm_ksp = metrics['pm_ksp']

        my_dataset = ds.Dataset(conf)
        my_dataset.load_labels_if_not_exist()
        my_dataset.load_pm_fg_from_file()
        l_dataset = learning_dataset.LearningDataset(conf, pos_thr=0.5)
        list_ksp = np.load(os.path.join(conf.dataOutDir,
                                        'results.npz'))['list_ksp']

    # Plot all iterations of PM
    plt.clf()
    conf.roc_xlim = [0, 0.4]
    conf.pr_rc_xlim = [0.6, 1.]

    lw = 1

    # PM curves
    auc_ = auc(np.asarray(fpr_pm[-1]).ravel(), np.asarray(tpr_pm[-1]).ravel())
    max_f1 = np.max(f1_pm[-1])

    plt.subplot(121)
    plt.plot(np.asarray(fpr_pm[-1]).ravel(),
             np.asarray(tpr_pm[-1]).ravel(),
             'r-',
             lw=lw,
             label='KSP/PM (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    auc_ = auc(np.asarray(rc_pm[-1]).ravel(), np.asarray(pr_pm[-1]).ravel())
    plt.subplot(122)
    plt.plot(np.asarray(rc_pm[-1]).ravel(),
             np.asarray(pr_pm[-1]).ravel(),
             'r-',
             lw=lw,
             label='KSP/PM (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    # Plot KSP
    auc_ = auc(np.asarray(fpr_ksp).ravel(),
               np.asarray(tpr_ksp).ravel(),
               reorder=True)
    max_f1 = np.max(f1_ksp)
    plt.subplot(121)
    plt.plot(np.asarray(fpr_ksp).ravel(),
             np.asarray(tpr_ksp).ravel(),
             'ro--',
             lw=lw,
             label='KSP (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))
    plt.subplot(122)
    auc_ = auc(np.asarray(rc_ksp).ravel(),
               np.asarray(pr_ksp).ravel(),
               reorder=True)
    plt.plot(np.asarray(rc_ksp).ravel(),
             np.asarray(pr_ksp).ravel(),
             'ro--',
             lw=lw,
             label='KSP (area = %0.4f, max(F1) = %0.4f)' % (auc_, max_f1))

    plt.subplot(121)
    plt.legend()
    plt.xlim(conf.roc_xlim)
    plt.xlabel('fpr')
    plt.ylabel('tpr')
    plt.subplot(122)
    plt.legend()
    plt.xlim(conf.pr_rc_xlim)
    plt.xlabel('recall')
    plt.ylabel('precision')
    plt.suptitle(conf.seq_type + ', ' + conf.ds_dir + '\n' + 'T: ' +
                 str(conf.T))
    fig = plt.gcf()
    fig.set_size_inches(18.5, 10.5)
    fig.savefig(os.path.join(conf.dataOutDir, plot_fname), dpi=200)

    # Make plots
    logger.info('Saving KSP, PM and SS merged frames...')
    gt = l_dataset.gt
    frame_dir = 'ksp_pm_frames'
    frame_path = os.path.join(conf.dataOutDir, frame_dir)
    if (os.path.exists(frame_path)):
        logger.info('[!!!] Frame dir: ' + frame_path +
                    ' exists. Delete to rerun.')
    else:
        n_iters_ksp = len(list_ksp)
        os.mkdir(frame_path)
        with progressbar.ProgressBar(maxval=len(conf.frameFileNames)) as bar:
            for f in range(len(conf.frameFileNames)):
                cont_gt = segmentation.find_boundaries(gt[..., f],
                                                       mode='thick')
                idx_cont_gt = np.where(cont_gt)
                im = utls.imread(conf.frameFileNames[f])
                im[idx_cont_gt[0], idx_cont_gt[1], :] = (255, 0, 0)
                im = csv.draw2DPoint(utls.pandas_to_std_csv(conf.myGaze_fg),
                                     f,
                                     im,
                                     radius=7)

                bar.update(f)
                plt.subplot(221)
                plt.imshow(ksp_scores[..., f])
                plt.title('KSP')
                plt.subplot(222)
                plt.imshow(pm_ksp[..., f])
                plt.title('KSP -> PM')
                plt.subplot(223)
                plt.imshow(im)
                plt.title('image')
                plt.suptitle('frame: ' + str(f) + ', n_iters_ksp: ' +
                             str(n_iters_ksp))
                plt.savefig(os.path.join(frame_path, 'f_' + str(f) + '.png'),
                            dpi=200)

    logger.info('Saving SPs per iterations plot...')
    n_sps = []
    for i in range(len(list_ksp)):
        n = np.asarray(utls.get_node_list_tracklets(list_ksp[-1])).shape[0]
        n_sps.append((i + 1, n))

    n_sps.append((len(list_ksp) + 1, n))
    n_sps = np.asarray(n_sps)

    plt.clf()
    plt.plot(n_sps[:, 0], n_sps[:, 1], 'bo-')
    plt.plot(n_sps[-1, 0], n_sps[-1, 1], 'ro')
    plt.xlabel('iterations')
    plt.ylabel('num. of superpixels')
    plt.title('num of superpixels vs. iterations. SS threshold: ' +
              str(conf.ss_thr))
    plt.savefig(os.path.join(conf.dataOutDir, 'sps_iters.eps'), dpi=200)

    pr_pm, rc_pm, _ = precision_recall_curve(l_dataset.gt.ravel(),
                                             pm_ksp.ravel())
    ksp_pm_pix_f1 = np.max(2 * (pr_pm * rc_pm) / (pr_pm + rc_pm))
    ksp_pix_f1 = f1_score(l_dataset.gt.ravel(), ksp_scores.ravel())
    file_out = os.path.join(conf.dataOutDir, 'scores.csv')

    C = pd.Index(["F1"], name="columns")
    I = pd.Index(['KSP', 'KSP/PM'], name="Methods")
    data = np.asarray([ksp_pix_f1, ksp_pm_pix_f1]).reshape(2, 1)
    df = pd.DataFrame(data=data, index=I, columns=C)
    df.to_csv(path_or_buf=file_out)