예제 #1
0
def eval_depth_predictions(database, save_dir, overfit=False):

    # Dataloaders
    if database == 'NYUD':
        from data.nyud import NYUD_MT
        gt_set = 'val'
        db = NYUD_MT(split=gt_set, do_depth=True, overfit=overfit)

    else:
        raise NotImplementedError

    base_name = database + '_' + 'test' + '_depth'
    fname = os.path.join(save_dir, base_name + '.json')

    # Eval the model
    print('Evaluate the saved images (depth)')
    eval_results = eval_depth(db, os.path.join(save_dir, 'depth'))
    with open(fname, 'w') as f:
        json.dump(eval_results, f)

    # Print results
    print('Results for Depth Estimation')
    for x in eval_results:
        spaces = ''
        for j in range(0, 15 - len(x)):
            spaces += ' '
        print('{0:s}{1:s}{2:.4f}'.format(x, spaces, eval_results[x]))

    return eval_results
예제 #2
0
def get_val_dataset(p, transforms):
    """ Return the validation dataset """

    db_name = p['val_db_name']
    print('Preparing val loader for db: {}'.format(db_name))

    if db_name == 'PASCALContext':
        from data.pascal_context import PASCALContext
        database = PASCALContext(split=['val'],
                                 transform=transforms,
                                 retname=True,
                                 do_semseg='semseg' in p.TASKS.NAMES,
                                 do_edge='edge' in p.TASKS.NAMES,
                                 do_normals='normals' in p.TASKS.NAMES,
                                 do_sal='sal' in p.TASKS.NAMES,
                                 do_human_parts='human_parts' in p.TASKS.NAMES,
                                 overfit=p['overfit'])

    elif db_name == 'NYUD':
        from data.nyud import NYUD_MT
        database = NYUD_MT(split='val',
                           transform=transforms,
                           do_edge='edge' in p.TASKS.NAMES,
                           do_semseg='semseg' in p.TASKS.NAMES,
                           do_normals='normals' in p.TASKS.NAMES,
                           do_depth='depth' in p.TASKS.NAMES,
                           overfit=p['overfit'])

    else:
        raise NotImplemented(
            "test_db_name: Choose among PASCALContext and NYUD")

    return database
def eval_semseg_predictions(database, save_dir, overfit=False):
    """ Evaluate the segmentation maps that are stored in the save dir """

    # Dataloaders
    if database == 'PASCALContext':
        from data.pascal_context import PASCALContext
        n_classes = 20
        cat_names = VOC_CATEGORY_NAMES
        has_bg = True
        gt_set = 'val'
        db = PASCALContext(split=gt_set,
                           do_edge=False,
                           do_human_parts=False,
                           do_semseg=True,
                           do_normals=False,
                           overfit=overfit)

    elif database == 'NYUD':
        from data.nyud import NYUD_MT
        n_classes = 40
        cat_names = NYU_CATEGORY_NAMES
        has_bg = False
        gt_set = 'val'
        db = NYUD_MT(split=gt_set, do_semseg=True, overfit=overfit)

    else:
        raise NotImplementedError

    base_name = database + '_' + 'test' + '_semseg'
    fname = os.path.join(save_dir, base_name + '.json')

    # Eval the model
    print('Evaluate the saved images (semseg)')
    eval_results = eval_semseg(db,
                               os.path.join(save_dir, 'semseg'),
                               n_classes=n_classes,
                               has_bg=has_bg)
    with open(fname, 'w') as f:
        json.dump(eval_results, f)

    # Print results
    class_IoU = eval_results['jaccards_all_categs']
    mIoU = eval_results['mIoU']

    print('\nSemantic Segmentation mIoU: {0:.4f}\n'.format(100 * mIoU))
    for i in range(len(class_IoU)):
        spaces = ''
        for j in range(0, 15 - len(cat_names[i])):
            spaces += ' '
        print('{0:s}{1:s}{2:.4f}'.format(cat_names[i], spaces,
                                         100 * class_IoU[i]))

    return eval_results
예제 #4
0
def eval_normals_predictions(database, save_dir, overfit=False):
    """ Evaluate the normals maps that are stored in the save dir """

    # Dataloaders
    if database == 'PASCALContext':
        from data.pascal_context import PASCALContext
        gt_set = 'val'
        db = PASCALContext(split=gt_set,
                           do_edge=False,
                           do_human_parts=False,
                           do_semseg=False,
                           do_normals=True,
                           overfit=overfit)
    elif database == 'NYUD':
        from data.nyud import NYUD_MT
        gt_set = 'val'
        db = NYUD_MT(split=gt_set, do_normals=True, overfit=overfit)

    else:
        raise NotImplementedError

    base_name = database + '_' + 'test' + '_normals'
    fname = os.path.join(save_dir, base_name + '.json')

    # Eval the model
    print('Evaluate the saved images (surface normals)')
    eval_results = eval_normals(db, os.path.join(save_dir, 'normals'))
    with open(fname, 'w') as f:
        json.dump(eval_results, f)

    # Print results
    print('Results for Surface Normal Estimation')
    for x in eval_results:
        spaces = ""
        for j in range(0, 15 - len(x)):
            spaces += ' '
        print('{0:s}{1:s}{2:.4f}'.format(x, spaces, eval_results[x]))

    return eval_results