Ejemplo n.º 1
0
def get_val_dataset(p, transforms):
    """ Return the validation dataset """

    db_name = p['val_db_name']
    print('Preparing val loader for db: {}'.format(db_name))

    if db_name == 'PASCALContext':
        from data.pascal_context import PASCALContext
        database = PASCALContext(split=['val'],
                                 transform=transforms,
                                 retname=True,
                                 do_semseg='semseg' in p.TASKS.NAMES,
                                 do_edge='edge' in p.TASKS.NAMES,
                                 do_normals='normals' in p.TASKS.NAMES,
                                 do_sal='sal' in p.TASKS.NAMES,
                                 do_human_parts='human_parts' in p.TASKS.NAMES,
                                 overfit=p['overfit'])

    elif db_name == 'NYUD':
        from data.nyud import NYUD_MT
        database = NYUD_MT(split='val',
                           transform=transforms,
                           do_edge='edge' in p.TASKS.NAMES,
                           do_semseg='semseg' in p.TASKS.NAMES,
                           do_normals='normals' in p.TASKS.NAMES,
                           do_depth='depth' in p.TASKS.NAMES,
                           overfit=p['overfit'])

    else:
        raise NotImplemented(
            "test_db_name: Choose among PASCALContext and NYUD")

    return database
def eval_sal_predictions(database, save_dir, overfit=False):
    """ Evaluate the saliency estimation maps that are stored in the save dir """

    # Dataloaders
    if database == 'PASCALContext':
        from data.pascal_context import PASCALContext
        split = 'val'
        db = PASCALContext(split=split,
                           do_edge=False,
                           do_human_parts=False,
                           do_semseg=False,
                           do_normals=False,
                           do_sal=True,
                           overfit=overfit)

    else:
        raise NotImplementedError

    base_name = database + '_' + 'test' + '_sal'
    fname = os.path.join(save_dir, base_name + '.json')

    # Eval the model
    print('Evalute the saved images (saliency)')
    eval_results = eval_sal(db,
                            os.path.join(save_dir, 'sal'),
                            mask_thres=np.linspace(0.2, 0.9, 15))
    with open(fname, 'w') as f:
        json.dump(eval_results, f)

    # Print the results
    print('Results for Saliency Estimation')
    print('mIoU: {0:.3f}'.format(100 * eval_results['mIoU']))
    print('maxF: {0:.3f}'.format(100 * eval_results['maxF']))

    return eval_results
def eval_semseg_predictions(database, save_dir, overfit=False):
    """ Evaluate the segmentation maps that are stored in the save dir """

    # Dataloaders
    if database == 'PASCALContext':
        from data.pascal_context import PASCALContext
        n_classes = 20
        cat_names = VOC_CATEGORY_NAMES
        has_bg = True
        gt_set = 'val'
        db = PASCALContext(split=gt_set,
                           do_edge=False,
                           do_human_parts=False,
                           do_semseg=True,
                           do_normals=False,
                           overfit=overfit)

    elif database == 'NYUD':
        from data.nyud import NYUD_MT
        n_classes = 40
        cat_names = NYU_CATEGORY_NAMES
        has_bg = False
        gt_set = 'val'
        db = NYUD_MT(split=gt_set, do_semseg=True, overfit=overfit)

    else:
        raise NotImplementedError

    base_name = database + '_' + 'test' + '_semseg'
    fname = os.path.join(save_dir, base_name + '.json')

    # Eval the model
    print('Evaluate the saved images (semseg)')
    eval_results = eval_semseg(db,
                               os.path.join(save_dir, 'semseg'),
                               n_classes=n_classes,
                               has_bg=has_bg)
    with open(fname, 'w') as f:
        json.dump(eval_results, f)

    # Print results
    class_IoU = eval_results['jaccards_all_categs']
    mIoU = eval_results['mIoU']

    print('\nSemantic Segmentation mIoU: {0:.4f}\n'.format(100 * mIoU))
    for i in range(len(class_IoU)):
        spaces = ''
        for j in range(0, 15 - len(cat_names[i])):
            spaces += ' '
        print('{0:s}{1:s}{2:.4f}'.format(cat_names[i], spaces,
                                         100 * class_IoU[i]))

    return eval_results
Ejemplo n.º 4
0
def eval_human_parts_predictions(database, save_dir, overfit=False):
    """ Evaluate the human parts predictions that are stored in the save dir """

    # Dataloaders
    if database == 'PASCALContext':
        from data.pascal_context import PASCALContext
        gt_set = 'val'
        db = PASCALContext(split=gt_set,
                           do_edge=False,
                           do_human_parts=True,
                           do_semseg=False,
                           do_normals=False,
                           do_sal=False,
                           overfit=overfit)

    else:
        raise NotImplementedError

    base_name = database + '_' + 'test' + '_human_parts'
    fname = os.path.join(save_dir, base_name + '.json')

    # Eval the model
    print('Evaluate the saved images (human parts)')
    eval_results = eval_human_parts(db, os.path.join(save_dir, 'human_parts'))
    with open(fname, 'w') as f:
        json.dump(eval_results, f)

    # Print Results
    class_IoU = eval_results['jaccards_all_categs']
    mIoU = eval_results['mIoU']

    print('\nHuman Parts mIoU: {0:.4f}\n'.format(100 * mIoU))
    for i in range(len(class_IoU)):
        spaces = ''
        for j in range(0, 15 - len(PART_CATEGORY_NAMES[i])):
            spaces += ' '
        print('{0:s}{1:s}{2:.4f}'.format(PART_CATEGORY_NAMES[i], spaces,
                                         100 * class_IoU[i]))

    return eval_results
Ejemplo n.º 5
0
def eval_normals_predictions(database, save_dir, overfit=False):
    """ Evaluate the normals maps that are stored in the save dir """

    # Dataloaders
    if database == 'PASCALContext':
        from data.pascal_context import PASCALContext
        gt_set = 'val'
        db = PASCALContext(split=gt_set,
                           do_edge=False,
                           do_human_parts=False,
                           do_semseg=False,
                           do_normals=True,
                           overfit=overfit)
    elif database == 'NYUD':
        from data.nyud import NYUD_MT
        gt_set = 'val'
        db = NYUD_MT(split=gt_set, do_normals=True, overfit=overfit)

    else:
        raise NotImplementedError

    base_name = database + '_' + 'test' + '_normals'
    fname = os.path.join(save_dir, base_name + '.json')

    # Eval the model
    print('Evaluate the saved images (surface normals)')
    eval_results = eval_normals(db, os.path.join(save_dir, 'normals'))
    with open(fname, 'w') as f:
        json.dump(eval_results, f)

    # Print results
    print('Results for Surface Normal Estimation')
    for x in eval_results:
        spaces = ""
        for j in range(0, 15 - len(x)):
            spaces += ' '
        print('{0:s}{1:s}{2:.4f}'.format(x, spaces, eval_results[x]))

    return eval_results
def eval_edge_predictions(p, database, save_dir):
    """ The edge are evaluated through seism """

    print(
        'Evaluate the edge prediction using seism ... This can take a while ...'
    )

    # DataLoaders
    if database == 'PASCALContext':
        from data.pascal_context import PASCALContext
        split = 'val'
        db = PASCALContext(split=split,
                           do_edge=True,
                           do_human_parts=False,
                           do_semseg=False,
                           do_normals=False,
                           do_sal=True,
                           overfit=False)

    else:
        raise NotImplementedError

    # First check if all files are there
    files = glob.glob(os.path.join(save_dir, 'edge/*png'))

    assert (len(files) == len(db))

    # rsync the results to the seism root
    print('Rsync the results to the seism root ...')
    exp_name = database + '_' + p['setup'] + '_' + p['model']
    seism_root = MyPath.seism_root()
    result_dir = os.path.join(seism_root,
                              'datasets/%s/%s/' % (database, exp_name))
    mkdir_if_missing(result_dir)
    os.system('rsync -a %s %s' %
              (os.path.join(save_dir, 'edge/*'), result_dir))
    print('Done ...')

    v = list(np.arange(0.01, 1.00, 0.01))
    parameters_location = os.path.join(seism_root,
                                       'parameters/%s.txt' % (exp_name))
    with open(parameters_location, 'w') as f:
        for l in v:
            f.write('%.2f\n' % (l))

    # generate a seism script that we will run.
    print('Generate seism script to perform the evaluation ...')
    seism_base = os.path.join(PROJECT_ROOT_DIR,
                              'evaluation/seism/pr_curves_base.m')
    with open(seism_base) as f:
        seism_file = f.readlines()
    seism_file = [line.strip() for line in seism_file]
    output_file = [seism_file[0]]

    ## Add experiments parameters (TODO)
    output_file += [
        'addpath(\'%s\')' % (os.path.join(seism_root, 'src/scripts/'))
    ]
    output_file += [
        'addpath(\'%s\')' % (os.path.join(seism_root, 'src/misc/'))
    ]
    output_file += [
        'addpath(\'%s\')' % (os.path.join(seism_root, 'src/tests/'))
    ]
    output_file += [
        'addpath(\'%s\')' % (os.path.join(seism_root, 'src/gt_wrappers/'))
    ]
    output_file += ['addpath(\'%s\')' % (os.path.join(seism_root, 'src/io/'))]
    output_file += [
        'addpath(\'%s\')' % (os.path.join(seism_root, 'src/measures/'))
    ]
    output_file += [
        'addpath(\'%s\')' % (os.path.join(seism_root, 'src/piotr_edges/'))
    ]
    output_file += [
        'addpath(\'%s\')' % (os.path.join(seism_root, 'src/segbench/'))
    ]
    output_file.extend(seism_file[1:18])

    ## Add method (TODO)
    output_file += [
        'methods(end+1).name = \'%s\'; methods(end).io_func = @read_one_png; methods(end).legend =     methods(end).name;  methods(end).type = \'contour\';'
        % (exp_name)
    ]
    output_file.extend(seism_file[19:61])

    ## Add path to save output
    output_file += [
        'filename = \'%s\'' %
        (os.path.join(save_dir, database + '_' + 'test' + '_edge.txt'))
    ]
    output_file += seism_file[62:]

    # save the file to the seism dir
    output_file_path = os.path.join(seism_root, exp_name + '.m')
    with open(output_file_path, 'w') as f:
        for line in output_file:
            f.write(line + '\n')

    # go to the seism dir and perform evaluation
    print(
        'Go to seism root dir and run the evaluation ... This takes time ...')
    cwd = os.getcwd()
    os.chdir(seism_root)
    os.system(
        "matlab -nodisplay -nosplash -nodesktop -r \"addpath(\'%s\');%s;exit\""
        % (seism_root, exp_name))
    os.chdir(cwd)

    # write to json
    print('Finished evaluation in seism ... Write results to JSON ...')
    with open(os.path.join(save_dir, database + '_' + 'test' + '_edge.txt'),
              'r') as f:
        seism_result = [line.strip() for line in f.readlines()]

    eval_dict = {}
    for line in seism_result:
        metric, score = line.split(':')
        eval_dict[metric] = float(score)

    with open(os.path.join(save_dir, database + '_' + 'test' + '_edge.json'),
              'w') as f:
        json.dump(eval_dict, f)

    # print
    print('Edge Detection Evaluation')
    for k, v in eval_dict.items():
        spaces = ''
        for j in range(0, 10 - len(k)):
            spaces += ' '
        print('{0:s}{1:s}{2:.4f}'.format(k, spaces, 100 * v))

    # cleanup - Important. Else Matlab will reuse the files.
    print('Cleanup files in seism ...')
    result_rm = os.path.join(seism_root,
                             'results/%s/%s/' % (database, exp_name))
    data_rm = os.path.join(seism_root,
                           'datasets/%s/%s/' % (database, exp_name))
    os.system("rm -rf %s" % (result_rm))
    os.system("rm -rf %s" % (data_rm))
    print('Finished cleanup ...')

    return eval_dict