Ejemplo n.º 1
0
def plot_re_rect_occlusion(eval_args, eval_dir, scene_ids, all_test_visibs, bins = 10):
    obj_id = eval_args.getint('DATA','obj_id')
    top_n_eval = eval_args.getint('EVALUATION','TOP_N_EVAL')
    top_n = eval_args.getint('METRIC','TOP_N')
    # if top_n_eval < 1:
    #     return
        
    all_angle_errs = []
    for scene_id in scene_ids:

        if not os.path.exists(os.path.join(eval_dir,'error=re_ntop=%s' % top_n,'errors_{:02d}.yml'.format(scene_id))):
            print 'WARNING: ' + os.path.join(eval_dir,'error=re_ntop=%s' % top_n,'errors_{:02d}.yml'.format(scene_id)) + ' not found'
            continue

        angle_errs_dict = inout.load_yaml(os.path.join(eval_dir,'error=re_ntop=%s' % top_n,'errors_{:02d}.yml'.format(scene_id)))
        all_angle_errs += [angle_e['errors'].values()[0] for angle_e in angle_errs_dict]

    if len(all_angle_errs) == 0:
        return
    all_angle_errs = np.array(all_angle_errs)
    # print all_vsd_errs

    fig = plt.figure()
    plt.grid()
    plt.ylabel('rot err [deg]')
    plt.xlabel('visibility [percent]')
    # plt.axis((-0.1, 1.1, -0.1, 1.1))
    # plt.xlim((0.0, 1.0))
    # plt.ylim((0.0, 1.0))
    
    total_views = len(all_angle_errs)/top_n
    angle_errs_rect = np.empty((total_views,))

    for view in xrange(total_views):
        top_n_errors = all_angle_errs[view*top_n:(view+1)*top_n]
        angle_errs_rect[view] = np.min([top_n_errors[0], 180-top_n_errors[0]])

    bounds = np.linspace(0,1,bins+1)
    bin_angle_errs = []
    bin_count = []

    for idx in xrange(bins):
        bin_idcs = np.where((all_test_visibs>bounds[idx]) & (all_test_visibs<bounds[idx+1]))
        # median_angle_err[idx] = np.median(angle_errs_rect[bin_idcs])
        bin_angle_errs.append(angle_errs_rect[bin_idcs])
        bin_count.append(len(bin_idcs[0]))
    
    middle_bin_vis = bounds[:-1] + (bounds[1]-bounds[0])/2.
    # plt.bar(middle_bin_vis,median_angle_err,0.5/bins)
    plt.boxplot(bin_angle_errs, positions = middle_bin_vis, widths=0.5/bins, sym='+')

    # count_str = 'bin count ' + bins * '%s ' 
    # count_str = count_str % tuple(bin_count)
    plt.title('Visibility vs Median Rectified Rotation Error' + str(bin_count))
    plt.savefig(os.path.join(eval_dir,'figures','R_err_occlusion_{:02d}.png'.format(obj_id)), dpi=300)

    tikz_save(os.path.join(eval_dir,'latex','R_err_occlusion.tex'), figurewidth ='0.45\\textheight', figureheight='0.45\\textheight', show_info=False)
Ejemplo n.º 2
0
def get_gt_scene_crops(scene_id, eval_args, train_args):
    

    dataset_name = eval_args.get('DATA','DATASET')
    cam_type = eval_args.get('DATA','CAM_TYPE')
    icp = eval_args.getboolean('EVALUATION','ICP')

    delta = eval_args.get('METRIC', 'VSD_DELTA')

    workspace_path = os.environ.get('AE_WORKSPACE_PATH')
    dataset_path = u.get_dataset_path(workspace_path)

    H = train_args.getint('Dataset','H')

    # linemod with 13 objects and linemod with 15 objects may use the same md5 file!
    # for example, 7 in 13 is duck, but 7 in 15 is cup!
    cfg_string = str([scene_id] + eval_args.items('DATA') + eval_args.items('BBOXES') + [H])
    current_config_hash = hashlib.md5(cfg_string).hexdigest()
    # current_config_hash = cfg_string
    current_file_name = os.path.join(dataset_path, current_config_hash + '.npz')
    

    if os.path.exists(current_file_name):
        data = np.load(current_file_name)
        test_img_crops = data['test_img_crops'].item()
        test_img_depth_crops = data['test_img_depth_crops'].item()
        bb_scores = data['bb_scores'].item()
        bb_vis = data['visib_gt'].item()
        bbs = data['bbs'].item()
    if not os.path.exists(current_file_name) or len(test_img_crops) == 0 or len(test_img_depth_crops) == 0:
        test_imgs = load_scenes(scene_id, eval_args)
        test_imgs_depth = load_scenes(scene_id, eval_args, depth=True) if icp else None

        data_params = dataset_params.get_dataset_params(dataset_name, model_type='', train_type='', test_type=cam_type, cam_type=cam_type)

        # only available for primesense, sixdtoolkit can generate. calc_gt_stats.py
        visib_gt = inout.load_yaml(data_params['scene_gt_stats_mpath'].format(scene_id, delta))
        
        bb_gt = inout.load_gt(data_params['scene_gt_mpath'].format(scene_id))

        test_img_crops, test_img_depth_crops, bbs, bb_scores, bb_vis = generate_scene_crops(test_imgs, test_imgs_depth, bb_gt, eval_args, 
                                                                                            train_args, visib_gt=visib_gt)

        np.savez(current_file_name, test_img_crops=test_img_crops, test_img_depth_crops=test_img_depth_crops, bbs = bbs, bb_scores=bb_scores, visib_gt=bb_vis)
        
        current_cfg_file_name = os.path.join(dataset_path, current_config_hash + '.cfg')
        with open(current_cfg_file_name, 'w') as f:
            f.write(cfg_string)
        print 'created new ground truth crops!'
    else:
        print 'loaded previously generated ground truth crops!'
        print len(test_img_crops), len(test_img_depth_crops)



    return (test_img_crops, test_img_depth_crops, bbs, bb_scores, bb_vis)
def plot_R_err_hist_vis(eval_args, eval_dir, scene_ids, bins=20):
    top_n_eval = eval_args.getint('EVALUATION','TOP_N_EVAL')
    top_n = eval_args.getint('METRIC','TOP_N')
    cam_type = eval_args.get('DATA','cam_type')
    dataset_name = eval_args.get('DATA','dataset')
    obj_id = eval_args.getint('DATA','obj_id')


    # if top_n_eval < 1:
    #     return

    data_params = dataset_params.get_dataset_params(dataset_name, model_type='', train_type='', test_type=cam_type, cam_type=cam_type)

    angle_errs = []
    for scene_id in scene_ids:
        error_file_path = os.path.join(eval_dir,'error=re_ntop=%s' % top_n,'errors_{:02d}.yml'.format(scene_id))
        if not os.path.exists(error_file_path):
            print(('WARNING: ' + error_file_path + ' not found'))
            continue
        # angle_errs_dict = inout.load_yaml(error_file_path)
        # angle_errs += [angle_e['errors'].values()[0] for angle_e in angle_errs_dict]
        
        gts = inout.load_gt(data_params['scene_gt_mpath'].format(scene_id))
        visib_gts = inout.load_yaml(data_params['scene_gt_stats_mpath'].format(scene_id, 15))
        re_dict = inout.load_yaml(error_file_path)

        for view in range(len(gts)):
            res = re_dict[view*top_n:(view+1)*top_n]
            for gt,visib_gt in zip(gts[view],visib_gts[view]):
                if gt['obj_id'] == obj_id:
                    if visib_gt['visib_fract'] > 0.1:
                        for re_e in res:
                            angle_errs += [list(re_e['errors'].values())[0]]

    if len(angle_errs) == 0:
        return
        
    angle_errs = np.array(angle_errs)

    plot_R_err_hist2(angle_errs, eval_dir, bins=bins)
Ejemplo n.º 4
0
def get_all_scenes_for_obj(eval_args):
    workspace_path = os.environ.get('AE_WORKSPACE_PATH')
    dataset_path = u.get_dataset_path(workspace_path)

    dataset_name = eval_args.get('DATA', 'DATASET')
    cam_type = eval_args.get('DATA', 'CAM_TYPE')
    try:
        obj_id = eval_args.getint('DATA', 'OBJ_ID')
    except:
        obj_id = eval(eval_args.get('DATA', 'OBJECTS'))[0]

    cfg_string = str(dataset_name)
    current_config_hash = hashlib.md5(cfg_string).hexdigest()
    current_file_name = os.path.join(dataset_path,
                                     current_config_hash + '.npy')

    if os.path.exists(current_file_name):
        obj_scene_dict = np.load(current_file_name).item()
    else:
        p = dataset_params.get_dataset_params(dataset_name,
                                              model_type='',
                                              train_type='',
                                              test_type=cam_type,
                                              cam_type=cam_type)

        obj_scene_dict = {}
        scene_gts = []

        for scene_id in range(1, p['scene_count'] + 1):
            print(scene_id)

            scene_gts.append(
                inout.load_yaml(p['scene_gt_mpath'].format(scene_id)))

        for obj in range(1, p['obj_count'] + 1):
            eval_scenes = set()
            for scene_i, scene_gt in enumerate(scene_gts):
                for view_gt in scene_gt[0]:
                    if view_gt['obj_id'] == obj:
                        eval_scenes.add(scene_i + 1)
            obj_scene_dict[obj] = list(eval_scenes)
        np.save(current_file_name, obj_scene_dict)

    eval_scenes = obj_scene_dict[obj_id]

    return eval_scenes
Ejemplo n.º 5
0
def plot_vsd_occlusion(eval_args, eval_dir, scene_ids, all_test_visibs, bins = 10):

    top_n_eval = eval_args.getint('EVALUATION','TOP_N_EVAL')
    top_n = eval_args.getint('METRIC','TOP_N')
    delta = eval_args.getint('METRIC','VSD_DELTA')
    tau = eval_args.getint('METRIC','VSD_TAU')
    cost = eval_args.get('METRIC','VSD_COST')
    obj_id = eval_args.getint('DATA','obj_id')

    # if top_n_eval < 1:
    #     return

    all_vsd_errs = []
    for scene_id in scene_ids:
        error_file_path = os.path.join(eval_dir,'error=vsd_ntop=%s_delta=%s_tau=%s_cost=%s' % (top_n, delta, tau, cost), 'errors_{:02d}.yml'.format(scene_id))

        if not os.path.exists(error_file_path):
            print 'WARNING: ' + error_file_path + ' not found'
            continue

        vsd_dict = inout.load_yaml(error_file_path)
        all_vsd_errs += [vsd_e['errors'].values()[0] for vsd_e in vsd_dict]

    if len(all_vsd_errs) == 0:
        return
    all_vsd_errs = np.array(all_vsd_errs)

    # print all_vsd_errs

    fig = plt.figure()
    # ax = plt.axes()
    # ax.set_xlim((0.0,1.0))
    # ax.set_ylim((0.0,1.0))
    ax = plt.gca()
    ax.set_xlim((0.0,1.0))
    plt.grid()
    plt.ylabel('vsd err')
    plt.xlabel('visibility [percent]')
    # plt.xlim((0.0, 1.0))
    # plt.ylim((0.0, 1.0))
    
    total_views = len(all_vsd_errs)/top_n
    vsd_errs = np.empty((total_views,))

    for view in xrange(total_views):
        top_n_errors = all_vsd_errs[view*top_n:(view+1)*top_n]
        vsd_errs[view] = top_n_errors[0]

    bounds = np.linspace(0,1,bins+1)
    bin_vsd_errs = []
    bin_count = []

    for idx in xrange(bins):
        bin_idcs = np.where((all_test_visibs>bounds[idx]) & (all_test_visibs<bounds[idx+1]))
        bin_vsd_errs.append(vsd_errs[bin_idcs])
        bin_count.append(len(bin_idcs[0]))
    
    middle_bin_vis = bounds[:-1] + (bounds[1]-bounds[0])/2.
    # plt.bar(middle_bin_vis,mean_vsd_err,0.5/bins)

    plt.boxplot(bin_vsd_errs, positions = middle_bin_vis, widths=0.5/bins, sym='+')


    # count_str = 'bin count ' + bins * '%s ' 
    # count_str = count_str % tuple(bin_count)
    plt.title('Visibility vs Mean VSD Error' + str(bin_count))
    plt.savefig(os.path.join(eval_dir,'figures','vsd_occlusion_{:02d}.png'.format(obj_id)), dpi=300)

    tikz_save(os.path.join(eval_dir,'latex','vsd_occlusion.tex'), figurewidth ='0.45\\textheight', figureheight='0.45\\textheight', show_info=False)
Ejemplo n.º 6
0
def plot_vsd_err_hist(eval_args, eval_dir, scene_ids):
    # THIS DOES NOT WORK WELL FOR MULTIPLE INSTANCE
    # E.G. TEJANI 04 MILK. lxc
    top_n_eval = eval_args.getint('EVALUATION','TOP_N_EVAL')
    top_n = eval_args.getint('METRIC','TOP_N')
    delta = eval_args.getint('METRIC','VSD_DELTA')
    tau = eval_args.getint('METRIC','VSD_TAU')
    cost = eval_args.get('METRIC','VSD_COST')
    cam_type = eval_args.get('DATA','cam_type')
    dataset_name = eval_args.get('DATA','dataset')
    obj_id = eval_args.getint('DATA','obj_id')

    # if top_n_eval < 1:
    #     return

    data_params = dataset_params.get_dataset_params(dataset_name, model_type='', train_type='', test_type=cam_type, cam_type=cam_type)
    
    vsd_errs = []
    for scene_id in scene_ids:
        error_file_path = os.path.join(eval_dir,'error=vsd_ntop=%s_delta=%s_tau=%s_cost=%s' % (top_n, delta, tau, cost), 'errors_{:02d}.yml'.format(scene_id))

        if not os.path.exists(error_file_path):
            print 'WARNING: ' + error_file_path + ' not found'
            continue
        gts = inout.load_gt(data_params['scene_gt_mpath'].format(scene_id))
        visib_gts = inout.load_yaml(data_params['scene_gt_stats_mpath'].format(scene_id, 15))
        vsd_dict = inout.load_yaml(error_file_path)
        for view,vsd_e in enumerate(vsd_dict):
            vsds = vsd_dict[view*top_n:(view+1)*top_n]
            for gt,visib_gt in zip(gts[view],visib_gts[view]):
                if gt['obj_id'] == obj_id:
                    if visib_gt['visib_fract'] > 0.1:
                        for vsd_e in vsds:
                            vsd_errs += [vsd_e['errors'].values()[0]]


    if len(vsd_errs) == 0:
        return
    vsd_errs = np.array(vsd_errs)
    print len(vsd_errs)

    fig = plt.figure()
    ax = plt.gca()
    ax.set_xlim((0.0,1.0))
    plt.grid()
    plt.xlabel('vsd err')
    plt.ylabel('recall')
    plt.title('VSD Error vs Recall')
    legend=[]
    
    for n in np.unique(np.array([top_n, 1])):
        
        total_views = len(vsd_errs)/top_n
        min_vsd_errs = np.empty((total_views,))

        for view in xrange(total_views):
            top_n_errors = vsd_errs[view*top_n:(view+1)*top_n]
            if n == 1:
                top_n_errors = top_n_errors[np.newaxis,0]
            min_vsd_errs[view] = np.min(top_n_errors)

        min_vsd_errs_sorted = np.sort(min_vsd_errs)
        recall = np.float32(np.arange(total_views)+1.)/total_views

        # fill curve
        min_vsd_errs_sorted = np.hstack((min_vsd_errs_sorted, np.array([1.])))
        recall = np.hstack((recall,np.array([1.])))

        AUC_vsd = np.trapz(recall, min_vsd_errs_sorted)
        plt.plot(min_vsd_errs_sorted,recall)
        
        legend += ['top {0} vsd err, AUC = {1:.4f}'.format(n,AUC_vsd)]
    plt.legend(legend)
    plt.savefig(os.path.join(eval_dir,'figures','vsd_err_hist_{:02d}.png'.format(obj_id)), dpi=300)


    tikz_save(os.path.join(eval_dir,'latex','vsd_err_hist.tex'), figurewidth ='0.45\\textheight', figureheight='0.45\\textheight', show_info=False)
Ejemplo n.º 7
0
def plot_R_err_hist(eval_args, eval_dir, scene_ids):
    
    top_n_eval = eval_args.getint('EVALUATION','TOP_N_EVAL')
    top_n = eval_args.getint('METRIC','TOP_N')
    cam_type = eval_args.get('DATA','cam_type')
    dataset_name = eval_args.get('DATA','dataset')
    obj_id = eval_args.getint('DATA','obj_id')


    # if top_n_eval < 1: # why?
    #     return

    data_params = dataset_params.get_dataset_params(dataset_name, model_type='', train_type='', test_type=cam_type, cam_type=cam_type)

    angle_errs = []
    for scene_id in scene_ids:
        error_file_path = os.path.join(eval_dir,'error=re_ntop=%s' % top_n,'errors_{:02d}.yml'.format(scene_id))
        if not os.path.exists(error_file_path):
            print 'WARNING: ' + error_file_path + ' not found'
            continue
        # angle_errs_dict = inout.load_yaml(error_file_path)
        # angle_errs += [angle_e['errors'].values()[0] for angle_e in angle_errs_dict]
        
        gts = inout.load_gt(data_params['scene_gt_mpath'].format(scene_id))
        visib_gts = inout.load_yaml(data_params['scene_gt_stats_mpath'].format(scene_id, 15))
        re_dict = inout.load_yaml(error_file_path)

        for view in xrange(len(gts)):
            res = re_dict[view*top_n:(view+1)*top_n]
            for gt,visib_gt in zip(gts[view],visib_gts[view]):
                if gt['obj_id'] == obj_id:
                    if visib_gt['visib_fract'] > 0.1:
                        for re_e in res:
                            angle_errs += [re_e['errors'].values()[0]]

    if len(angle_errs) == 0:
        return
        
    angle_errs = np.array(angle_errs)

    fig = plt.figure()
    plt.grid()
    plt.xlabel('angle err [deg]')
    plt.ylabel('recall')
    plt.title('Angle Error vs Recall')
    legend=[]


    for n in np.unique(np.array([top_n, 1])):
        
        total_views = len(angle_errs)/top_n
        min_angle_errs = np.empty((total_views,))
        min_angle_errs_rect = np.empty((total_views,))

        for view in xrange(total_views):
            top_n_errors = angle_errs[view*top_n:(view+1)*top_n]
            if n == 1:
                top_n_errors = top_n_errors[np.newaxis,0]
            min_angle_errs[view] = np.min(top_n_errors)
            min_angle_errs_rect[view] = np.min(np.hstack((top_n_errors, 180-top_n_errors)))

        min_angle_errs_sorted = np.sort(min_angle_errs)
        min_angle_errs_rect_sorted = np.sort(min_angle_errs_rect)
        recall = (np.arange(total_views)+1.)/total_views

        # fill curve
        min_angle_errs_sorted = np.hstack((min_angle_errs_sorted, np.array([180.])))
        min_angle_errs_rect_sorted = np.hstack((min_angle_errs_rect_sorted, np.array([90.])))
        recall = np.hstack((recall,np.array([1.])))

        AUC_angle = np.trapz(recall,min_angle_errs_sorted/180.)
        AUC_angle_rect = np.trapz(recall,min_angle_errs_rect_sorted/90.)
        
        plt.plot(min_angle_errs_sorted,recall)
        plt.plot(min_angle_errs_rect_sorted,recall)
        
        legend += ['top {0} angle err, AUC = {1:.4f}'.format(n,AUC_angle), 'top {0} rectified angle err, AUC = {1:.4f}'.format(n,AUC_angle_rect)]
    plt.legend(legend)
    plt.savefig(os.path.join(eval_dir,'figures','R_err_hist_{:02d}.png'.format(obj_id)), dpi=300)

    tikz_save(os.path.join(eval_dir,'latex','R_err_hist.tex'), figurewidth ='0.45\\textheight', figureheight='0.45\\textheight', show_info=False)
Ejemplo n.º 8
0
def main():
    '''
    lxc:
    use_euclidean means the similarity between test embedding and template embedding 
    are computed using Euclidean Distance
    '''
    #use_euclidean = False

    parser = argparse.ArgumentParser()

    parser.add_argument('experiment_name')
    parser.add_argument('evaluation_name')
    parser.add_argument('--eval_cfg', default='eval.cfg', required=False)
    parser.add_argument('--at_step', default=None, required=False)
    arguments = parser.parse_args()
    full_name = arguments.experiment_name.split('/')
    experiment_name = full_name.pop()
    experiment_group = full_name.pop() if len(full_name) > 0 else ''
    evaluation_name = arguments.evaluation_name
    eval_cfg = arguments.eval_cfg
    at_step = arguments.at_step

    workspace_path = os.environ.get('AE_WORKSPACE_PATH')
    train_cfg_file_path = u.get_config_file_path(workspace_path,
                                                 experiment_name,
                                                 experiment_group)
    eval_cfg_file_path = u.get_eval_config_file_path(workspace_path,
                                                     eval_cfg=eval_cfg)

    train_args = configparser.ConfigParser()
    eval_args = configparser.ConfigParser()
    train_args.read(train_cfg_file_path)
    eval_args.read(eval_cfg_file_path)

    #[DATA]
    # target data params
    dataset_name = eval_args.get('DATA', 'DATASET')
    obj_id = eval_args.getint('DATA', 'OBJ_ID')
    scenes = eval(eval_args.get(
        'DATA', 'SCENES')) if len(eval(eval_args.get(
            'DATA',
            'SCENES'))) > 0 else eval_utils.get_all_scenes_for_obj(eval_args)
    cam_type = eval_args.get('DATA', 'cam_type')
    model_type = 'reconst' if dataset_name == 'tless' else ''  # model_type set to reconst only for tless.

    data_params = dataset_params.get_dataset_params(dataset_name,
                                                    model_type=model_type,
                                                    train_type='',
                                                    test_type=cam_type,
                                                    cam_type=cam_type)
    target_models_info = inout.load_yaml(
        data_params['models_info_path'])  # lxc

    # source data params, lxc
    source_dataset_name = 'toyotalight'
    # source_dataset_name = train_args.get('DATA','DATASET') # TODO train args no section DATA
    # source_obj_id = train_args.getint('DATA','OBJ_ID') # TODO train args no section DATA
    source_obj_id = int(train_cfg_file_path[-6:-4])  # TODO workaround
    source_data_params = dataset_params.get_dataset_params(source_dataset_name,
                                                           model_type='',
                                                           train_type='',
                                                           test_type='',
                                                           cam_type='')
    # for tless temporarily.
    # source_data_params = dataset_params.get_dataset_params(source_dataset_name, model_type='', train_type='', test_type='kinect', cam_type='kinect')
    source_models_info = inout.load_yaml(
        source_data_params['models_info_path'])
    print("source_models_info_path:", source_data_params['models_info_path'])
    # 'diameter' is not equal to sqrt(x^2+y^2+z^2) for hinterstoisser, rutgers, tless, tejaniDB. etc.
    # for toyotalight, 'diameter' == sqrt(...).
    target_models_3Dlength = np.linalg.norm([
        target_models_info[obj_id][key]
        for key in ['size_x', 'size_y', 'size_z']
    ])
    source_models_3Dlength = np.linalg.norm([
        source_models_info[source_obj_id][key]
        for key in ['size_x', 'size_y', 'size_z']
    ])

    target_source_length_ratio = target_models_3Dlength / source_models_3Dlength
    print("target_source_length_ratio:", target_source_length_ratio)
    print("source id {:02d}, target id {:02d}".format(source_obj_id, obj_id))
    print('basepath: ', data_params['base_path'])
    #[BBOXES]
    estimate_bbs = eval_args.getboolean('BBOXES', 'ESTIMATE_BBS')
    #[METRIC]
    top_nn = eval_args.getint('METRIC', 'TOP_N')
    #[EVALUATION]
    icp = eval_args.getboolean('EVALUATION', 'ICP')

    evaluation_name = evaluation_name + '_icp' if icp else evaluation_name
    evaluation_name = evaluation_name + '_bbest' if estimate_bbs else evaluation_name

    data = dataset_name + '_' + cam_type if len(cam_type) > 0 else dataset_name

    log_dir = u.get_log_dir(workspace_path, experiment_name, experiment_group)
    ckpt_dir = u.get_checkpoint_dir(log_dir)
    eval_dir = u.get_eval_dir(log_dir, evaluation_name, data)

    # if eval_args.getboolean('EVALUATION','EVALUATE_ERRORS'):
    #     eval_loc.match_and_eval_performance_scores(eval_args, eval_dir)
    #     exit()

    if not os.path.exists(eval_dir):
        os.makedirs(eval_dir)
    shutil.copy2(eval_cfg_file_path, eval_dir)

    print "eval_args: ", eval_args

    codebook, dataset, decoder = factory.build_codebook_from_name(
        experiment_name,
        experiment_group,
        return_dataset=True,
        return_decoder=True)
    dataset.renderer
    gpu_options = tf.GPUOptions(allow_growth=True,
                                per_process_gpu_memory_fraction=0.5)
    config = tf.ConfigProto(gpu_options=gpu_options)

    sess = tf.Session(config=config)
    factory.restore_checkpoint(sess,
                               tf.train.Saver(),
                               ckpt_dir,
                               at_step=at_step)

    if estimate_bbs:
        #Object Detection, seperate from main
        # sys.path.append('/net/rmc-lx0050/home_local/sund_ma/src/SSD_Tensorflow')
        # from ssd_detector import SSD_detector
        # #TODO: set num_classes, network etc.
        # ssd = SSD_detector(sess, num_classes=31, net_shape=(300,300))
        from rmcssd.bin import detector
        ssd = detector.Detector(eval_args.get('BBOXES', 'CKPT'))

    t_errors = []
    R_errors = []
    all_test_visibs = []

    test_embeddings = []
    for scene_id in scenes:

        test_imgs = eval_utils.load_scenes(scene_id, eval_args)
        test_imgs_depth = eval_utils.load_scenes(
            scene_id, eval_args, depth=True) if icp else None

        if estimate_bbs:
            print eval_args.get('BBOXES', 'EXTERNAL')
            if eval_args.get('BBOXES', 'EXTERNAL') == 'False':
                bb_preds = {}
                for i, img in enumerate(test_imgs):
                    print img.shape
                    bb_preds[i] = ssd.detectSceneBBs(img,
                                                     min_score=.2,
                                                     nms_threshold=.45)
                # inout.save_yaml(os.path.join(scene_res_dir,'bb_preds.yml'), bb_preds)
                print bb_preds
            else:
                bb_preds = inout.load_yaml(
                    os.path.join(eval_args.get('BBOXES', 'EXTERNAL'),
                                 '{:02d}.yml'.format(scene_id)))

            test_img_crops, test_img_depth_crops, bbs, bb_scores, visibilities = eval_utils.generate_scene_crops(
                test_imgs, test_imgs_depth, bb_preds, eval_args, train_args)
        else:
            # test_img_crops: each crop contains some bbox(es) for specified object id.
            test_img_crops, test_img_depth_crops, bbs, bb_scores, visibilities = eval_utils.get_gt_scene_crops(
                scene_id, eval_args, train_args)

        if len(test_img_crops) == 0:
            print 'ERROR: object %s not in scene %s' % (obj_id, scene_id)
            exit()

        info = inout.load_info(
            data_params['scene_info_mpath'].format(scene_id))
        Ks_test = [np.array(v['cam_K']).reshape(3, 3) for v in info.values()]

        ######remove
        gts = inout.load_gt(data_params['scene_gt_mpath'].format(scene_id))
        visib_gts = inout.load_yaml(data_params['scene_gt_stats_mpath'].format(
            scene_id, 15))
        #######
        W_test, H_test = data_params['test_im_size']

        icp_renderer = icp_utils.SynRenderer(train_args) if icp else None
        noof_scene_views = eval_utils.noof_scene_views(scene_id, eval_args)

        test_embeddings.append([])

        scene_res_dir = os.path.join(
            eval_dir, '{scene_id:02d}'.format(scene_id=scene_id))
        if not os.path.exists(scene_res_dir):
            os.makedirs(scene_res_dir)

        for view in xrange(
                noof_scene_views
        ):  # for example, LINEMOD ape noof_scene_views = 1236
            try:
                # only a specified object id is selected throughout the whole scene views.
                test_crops, test_crops_depth, test_bbs, test_scores, test_visibs = eval_utils.select_img_crops(
                    test_img_crops[view][obj_id],
                    test_img_depth_crops[view][obj_id] if icp else None,
                    bbs[view][obj_id], bb_scores[view][obj_id],
                    visibilities[view][obj_id], eval_args)
            except:
                print 'no detections'
                continue

            print view
            preds = {}
            pred_views = []
            all_test_visibs.append(test_visibs[0])
            t_errors_crop = []
            R_errors_crop = []

            for i, (test_crop, test_bb, test_score) in enumerate(
                    zip(test_crops, test_bbs, test_scores)):
                # each test_crop is a ground truth patch
                if train_args.getint('Dataset', 'C') == 1:
                    test_crop = cv2.cvtColor(test_crop,
                                             cv2.COLOR_BGR2GRAY)[:, :, None]
                start = time.time()
                '''modify here to change the pose estimation algorithm. lxc'''

                Rs_est, ts_est = codebook.auto_pose6d(
                    sess,
                    test_crop,
                    test_bb,
                    Ks_test[view].copy(),
                    top_nn,
                    train_args,
                    target_source_length_ratio=target_source_length_ratio)
                ae_time = time.time() - start
                run_time = ae_time + bb_preds[view][0][
                    'det_time'] if estimate_bbs else ae_time

                if eval_args.getboolean('PLOT', 'EMBEDDING_PCA'):
                    test_embeddings[-1].append(
                        codebook.test_embedding(sess,
                                                test_crop,
                                                normalized=True))

                # icp = False if view<350 else True
                #TODO:
                Rs_est_old, ts_est_old = Rs_est.copy(), ts_est.copy()
                for p in xrange(top_nn):
                    if icp:
                        start = time.time()
                        # icp only along tz
                        R_est_refined, t_est_refined = icp_utils.icp_refinement(
                            test_crops_depth[i],
                            icp_renderer,
                            Rs_est[p],
                            ts_est[p],
                            Ks_test[view].copy(), (W_test, H_test),
                            depth_only=True,
                            max_mean_dist_factor=5.0)
                        print ts_est[p]
                        print t_est_refined
                        # x,y update,does not change tz:
                        _, ts_est_refined = codebook.auto_pose6d(
                            sess,
                            test_crop,
                            test_bb,
                            Ks_test[view].copy(),
                            top_nn,
                            train_args,
                            depth_pred=t_est_refined[2])
                        # commented by lxc
                        # _, ts_est_refined, _ = codebook.auto_pose6d(sess, test_crop, test_bb, Ks_test[view].copy(), top_nn, train_args,depth_pred=t_est_refined[2])
                        t_est_refined = ts_est_refined[p]
                        # rotation icp, only accepted if below 20 deg change
                        R_est_refined, _ = icp_utils.icp_refinement(
                            test_crops_depth[i],
                            icp_renderer,
                            R_est_refined,
                            t_est_refined,
                            Ks_test[view].copy(), (W_test, H_test),
                            no_depth=True)
                        print Rs_est[p]
                        print R_est_refined
                        icp_time = time.time() - start
                        Rs_est[p], ts_est[p] = R_est_refined, t_est_refined
                    preds.setdefault('ests', []).append({
                        'score': test_score,
                        'R': Rs_est[p],
                        't': ts_est[p]
                    })
                run_time = run_time + icp_time if icp else run_time

                min_t_err, min_R_err = eval_plots.print_trans_rot_errors(
                    gts[view], obj_id, ts_est, ts_est_old, Rs_est, Rs_est_old)
                t_errors_crop.append(min_t_err)
                R_errors_crop.append(min_R_err)

                if eval_args.getboolean('PLOT', 'RECONSTRUCTION'):
                    eval_plots.plot_reconstruction_test(
                        sess, codebook._encoder, decoder, test_crop)
                    # eval_plots.plot_reconstruction_train(sess, decoder, nearest_train_codes[0])
                if eval_args.getboolean('PLOT',
                                        'NEAREST_NEIGHBORS') and not icp:
                    for R_est, t_est in zip(Rs_est, ts_est):
                        pred_views.append(
                            dataset.render_rot(R_est, downSample=2))
                    eval_plots.show_nearest_rotation(pred_views, test_crop,
                                                     view)
                if eval_args.getboolean('PLOT', 'SCENE_WITH_ESTIMATE'):
                    eval_plots.plot_scene_with_estimate(
                        test_imgs[view].copy(),
                        icp_renderer.renderer if icp else dataset.renderer,
                        Ks_test[view].copy(), Rs_est_old[0], ts_est_old[0],
                        Rs_est[0], ts_est[0], test_bb, test_score, obj_id,
                        gts[view], bb_preds[view] if estimate_bbs else None)

                if cv2.waitKey(1) == 32:
                    cv2.waitKey(0)

            t_errors.append(t_errors_crop[np.argmin(
                np.linalg.norm(np.array(t_errors_crop), axis=1))])
            R_errors.append(R_errors_crop[np.argmin(
                np.linalg.norm(np.array(t_errors_crop), axis=1))])

            # save predictions in sixd format
            res_path = os.path.join(scene_res_dir,
                                    '%04d_%02d.yml' % (view, obj_id))
            inout.save_results_sixd17(res_path, preds, run_time=run_time)

    if not os.path.exists(os.path.join(eval_dir, 'latex')):
        os.makedirs(os.path.join(eval_dir, 'latex'))
    if not os.path.exists(os.path.join(eval_dir, 'figures')):
        os.makedirs(os.path.join(eval_dir, 'figures'))
    '''evaluation code
        dataset_renderer renders source object model for evaluation;
        If we need target object model for evaluation, go get a new renderer.
    '''

    if eval_args.getboolean('EVALUATION', 'COMPUTE_ERRORS'):
        eval_calc_errors.eval_calc_errors(eval_args,
                                          eval_dir,
                                          dataset_renderer=dataset.renderer)
    if eval_args.getboolean('EVALUATION', 'EVALUATE_ERRORS'):
        eval_loc.match_and_eval_performance_scores(eval_args, eval_dir)
    '''plot code'''
    cyclo = train_args.getint('Embedding', 'NUM_CYCLO')
    if eval_args.getboolean('PLOT', 'EMBEDDING_PCA'):
        embedding = sess.run(codebook.embedding_normalized)
        eval_plots.compute_pca_plot_embedding(eval_dir,
                                              embedding[::cyclo],
                                              np.array(test_embeddings[0]),
                                              obj_id=obj_id)
    if eval_args.getboolean('PLOT', 'VIEWSPHERE'):
        eval_plots.plot_viewsphere_for_embedding(
            dataset.viewsphere_for_embedding[::cyclo], eval_dir, obj_id=obj_id)
    if eval_args.getboolean('PLOT', 'CUM_T_ERROR_HIST'):
        eval_plots.plot_t_err_hist(np.array(t_errors), eval_dir, obj_id=obj_id)
        eval_plots.plot_t_err_hist2(np.array(t_errors),
                                    eval_dir,
                                    obj_id=obj_id)
    if eval_args.getboolean('PLOT', 'CUM_R_ERROR_HIST'):
        eval_plots.plot_R_err_hist(eval_args, eval_dir, scenes)
        eval_plots.plot_R_err_hist2(np.array(R_errors),
                                    eval_dir,
                                    obj_id=obj_id)
    if eval_args.getboolean('PLOT', 'CUM_VSD_ERROR_HIST'):
        eval_plots.plot_vsd_err_hist(eval_args, eval_dir, scenes)
    if eval_args.getboolean('PLOT', 'VSD_OCCLUSION'):
        eval_plots.plot_vsd_occlusion(eval_args, eval_dir, scenes,
                                      np.array(all_test_visibs))
    if eval_args.getboolean('PLOT', 'R_ERROR_OCCLUSION'):
        eval_plots.plot_re_rect_occlusion(eval_args, eval_dir, scenes,
                                          np.array(all_test_visibs))
    if eval_args.getboolean('PLOT', 'ANIMATE_EMBEDDING_PCA'):
        eval_plots.animate_embedding_path(test_embeddings[0])
    if eval_args.getboolean('PLOT', 'RECONSTRUCTION_TEST_BATCH'):
        eval_plots.plot_reconstruction_test_batch(sess,
                                                  codebook,
                                                  decoder,
                                                  test_img_crops,
                                                  noof_scene_views,
                                                  obj_id,
                                                  eval_dir=eval_dir)
        # plt.show()

        # calculate 6D pose errors
        # print 'exiting ...'
        # eval_calc_errors.eval_calc_errors(eval_args, eval_dir)
        # calculate 6D pose errors

    report = latex_report.Report(eval_dir, log_dir)
    report.write_configuration(train_cfg_file_path, eval_cfg_file_path)
    report.merge_all_tex_files()
    report.include_all_figures()
    report.save(open_pdf=False)
Ejemplo n.º 9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('experiment_group')
    parser.add_argument('--eval_name', default='*', required=False)
    args = parser.parse_args()

    experiment_group = args.experiment_group
    eval_name = args.eval_name
    print eval_name

    workspace_path = os.environ.get('AE_WORKSPACE_PATH')

    exp_group_path = os.path.join(workspace_path, 'experiments',
                                  experiment_group)
    print exp_group_path
    error_score_files = glob.glob(
        os.path.join(exp_group_path, '*/eval', eval_name, '*/error*/scores*'))
    print error_score_files
    data_re = []
    data_auc_re = []
    data_auc_rerect = []
    data_te = []
    data_vsd = []
    data_cou = []
    data_add = []
    data_adi = []
    data_proj = []
    data_paper_vsd = {}
    data_paper_auc = {}
    latex_content = []

    for error_score_file in error_score_files:
        split_path = error_score_file.split('/')
        exp_name = split_path[-6]
        eval_name = split_path[-4]
        occl = 'occlusion' if 'occlusion' in error_score_file else ''
        test_data = split_path[-3]
        error_type = split_path[-2].split('_')[0].split('=')[1]
        print error_type
        topn = split_path[-2].split('=')[2].split('_')[0]
        error_thres = split_path[-1].split('=')[1].split('_')[0]

        eval_cfg_file_path = os.path.join(workspace_path, 'experiments',
                                          experiment_group, exp_name, 'eval',
                                          eval_name, test_data, '*.cfg')
        eval_cfg_file_pathes = glob.glob(eval_cfg_file_path)
        if len(eval_cfg_file_pathes) == 0:
            continue
        else:
            eval_cfg_file_path = eval_cfg_file_pathes[0]

        eval_args = configparser.ConfigParser()
        eval_args.read(eval_cfg_file_path)
        print eval_cfg_file_path
        estimate_bbs = eval_args.getboolean('BBOXES', 'ESTIMATE_BBS')
        try:
            obj_id = eval_args.getint('DATA', 'OBJ_ID')
        except:
            obj_id = eval(eval_args.get('DATA', 'OBJECTS'))[0]

        scenes = eval_utils.get_all_scenes_for_obj(eval_args)

        data = [item[1] for item in eval_args.items('DATA')]
        data[2] = eval(eval_args.get(
            'DATA', 'SCENES')) if len(eval(eval_args.get(
                'DATA', 'SCENES'))) > 0 else eval_utils.get_all_scenes_for_obj(
                    eval_args)

        # print str(data)

        error_score_dict = inout.load_yaml(error_score_file)
        try:
            sixd_recall = error_score_dict['obj_recalls'][obj_id]
        except:
            continue

        if error_type == 're':
            data_re.append({
                'exp_name': exp_name,
                'eval_name': eval_name,
                'error_type': error_type,
                'thres': error_thres,
                'top': topn,
                'sixd_recall': sixd_recall,
                'EST_BBS': estimate_bbs,
                'eval_data': str(data[:2] + [occl]),
                'eval_scenes': str(data[2]),
                'eval_obj': str(data[3])
            })
            err_file = os.path.join(
                os.path.dirname(os.path.dirname(error_score_file)),
                'latex/R_err_hist.tex')
            try:
                with open(err_file, 'r') as f:
                    for line in f:
                        if re.match('(.*)legend entries(.*)', line):
                            auc_re = float(line.split('=')[2].split('}')[0])
                            auc_rerect = float(
                                line.split('=')[3].split('}')[0])

                data_auc_re.append({
                    'exp_name': exp_name,
                    'eval_name': eval_name,
                    'error_type': 'auc_re',
                    'thres': 'None',
                    'top': topn,
                    'sixd_recall': auc_re,
                    'EST_BBS': estimate_bbs,
                    'eval_data': str(data[:2] + [occl]),
                    'eval_scenes': str(data[2]),
                    'eval_obj': str(data[3])
                })
                data_auc_rerect.append({
                    'exp_name': exp_name,
                    'eval_name': eval_name,
                    'error_type': 'auc_rerect',
                    'thres': 'None',
                    'top': topn,
                    'sixd_recall': auc_rerect,
                    'EST_BBS': estimate_bbs,
                    'eval_data': str(data[:2] + [occl]),
                    'eval_scenes': str(data[2]),
                    'eval_obj': str(data[3])
                })

                if not data_paper_auc.has_key(int(data[3])):
                    data_paper_auc[int(data[3])] = {}
                    data_paper_auc[int(data[3])]['eval_obj'] = int(data[3])
                data_paper_auc[int(
                    data[3])][eval_name + '_' + 'auc_re' + '_' +
                              str(data[1])] = float(auc_re) * 100
                data_paper_auc[int(
                    data[3])][eval_name + '_' + 'auc_rerect' + '_' +
                              str(data[1])] = float(auc_rerect) * 100
            except:
                print err_file, 'not found'

        elif error_type == 'te':
            data_te.append({
                'exp_name': exp_name,
                'eval_name': eval_name,
                'error_type': error_type,
                'thres': error_thres,
                'top': topn,
                'sixd_recall': sixd_recall,
                'EST_BBS': estimate_bbs,
                'eval_data': str(data[:2] + [occl]),
                'eval_scenes': str(data[2]),
                'eval_obj': str(data[3])
            })
        elif error_type == 'vsd':
            data_vsd.append({
                'exp_name':
                exp_name,
                'eval_name':
                eval_name,
                'error_type':
                error_type,
                'thres':
                error_thres,
                'top':
                topn,
                'sixd_recall':
                sixd_recall,
                'EST_BBS':
                estimate_bbs,
                'eval_data':
                str(data[:2] + [occl]),
                'eval_scenes':
                str(data[2]),
                'eval_obj':
                int(data[3]) if '[' not in data[3] else eval(data[3])[0]
            })
            if not data_paper_vsd.has_key(int(data[3])):
                data_paper_vsd[int(data[3])] = {}
                data_paper_vsd[int(data[3])]['eval_obj'] = int(data[3])
            data_paper_vsd[int(
                data[3])][eval_name + '_' + error_type + '_' +
                          str(data[1])] = float(sixd_recall) * 100

        elif error_type == 'cou':
            data_cou.append({
                'exp_name': exp_name,
                'eval_name': eval_name,
                'error_type': error_type,
                'thres': error_thres,
                'top': topn,
                'sixd_recall': sixd_recall,
                'EST_BBS': estimate_bbs,
                'eval_data': str(data[:2] + [occl]),
                'eval_scenes': str(data[2]),
                'eval_obj': str(data[3])
            })
        elif error_type == 'add':
            data_add.append({
                'exp_name': exp_name,
                'eval_name': eval_name,
                'error_type': error_type,
                'thres': error_thres,
                'top': topn,
                'sixd_recall': sixd_recall,
                'EST_BBS': estimate_bbs,
                'eval_data': str(data[:2] + [occl]),
                'eval_scenes': str(data[2]),
                'eval_obj': str(data[3])
            })
        elif error_type == 'proj':
            data_proj.append({
                'exp_name': exp_name,
                'eval_name': eval_name,
                'error_type': error_type,
                'thres': error_thres,
                'top': topn,
                'sixd_recall': sixd_recall,
                'EST_BBS': estimate_bbs,
                'eval_data': str(data[:2] + [occl]),
                'eval_scenes': str(data[2]),
                'eval_obj': str(data[3])
            })
        elif error_type == 'adi':
            data_adi.append({
                'exp_name': exp_name,
                'eval_name': eval_name,
                'error_type': error_type,
                'thres': error_thres,
                'top': topn,
                'sixd_recall': sixd_recall,
                'EST_BBS': estimate_bbs,
                'eval_data': str(data[:2] + [occl]),
                'eval_scenes': str(data[2]),
                'eval_obj': str(data[3])
            })
        else:
            print 'error not known: ', error_type

    if len(data_re) > 0:
        df_re = pd.DataFrame(data_re).sort_values(
            by=['eval_obj', 'eval_name', 'eval_data', 'sixd_recall'])
        latex_content.append('\\begin{adjustbox}{max width=\\textwidth}')
        latex_content.append(df_re.to_latex(index=False, multirow=True))
        latex_content.append('\\end{adjustbox}')
        latex_content.append('\n')
        latex_content.append('\n')
    if len(data_auc_re) > 0:
        df_re = pd.DataFrame(data_auc_re).sort_values(
            by=['eval_obj', 'eval_name', 'eval_data', 'sixd_recall'])
        latex_content.append('\\begin{adjustbox}{max width=\\textwidth}')
        latex_content.append(df_re.to_latex(index=False, multirow=True))
        latex_content.append('\\end{adjustbox}')
        latex_content.append('\n')
        latex_content.append('\n')
    if len(data_auc_rerect) > 0:
        df_re = pd.DataFrame(data_auc_rerect).sort_values(
            by=['eval_obj', 'eval_name', 'eval_data', 'sixd_recall'])
        latex_content.append('\\begin{adjustbox}{max width=\\textwidth}')
        latex_content.append(df_re.to_latex(index=False, multirow=True))
        latex_content.append('\\end{adjustbox}')
        latex_content.append('\n')
        latex_content.append('\n')
    if len(data_te) > 0:
        df_te = pd.DataFrame(data_te).sort_values(
            by=['eval_obj', 'eval_name', 'eval_data', 'sixd_recall'])
        latex_content.append('\\begin{adjustbox}{max width=\\textwidth}')
        latex_content.append(df_te.to_latex(index=False, multirow=True))
        latex_content.append('\\end{adjustbox}')
        latex_content.append('\n')
        latex_content.append('\n')
    if len(data_cou) > 0:
        df_cou = pd.DataFrame(data_cou).sort_values(
            by=['eval_obj', 'eval_name', 'eval_data', 'sixd_recall'])
        latex_content.append('\\begin{adjustbox}{max width=\\textwidth}')
        latex_content.append(df_cou.to_latex(index=False, multirow=True))
        latex_content.append('\\end{adjustbox}')
        latex_content.append('\n')
        latex_content.append('\n')
    if len(data_add) > 0:
        df_add = pd.DataFrame(data_add).sort_values(
            by=['eval_obj', 'eval_name', 'eval_data', 'sixd_recall'])
        latex_content.append('\\begin{adjustbox}{max width=\\textwidth}')
        latex_content.append(df_add.to_latex(index=False, multirow=True))
        latex_content.append('\\end{adjustbox}')
        latex_content.append('\n')
        latex_content.append('\n')
    if len(data_proj) > 0:
        df_proj = pd.DataFrame(data_proj).sort_values(
            by=['eval_obj', 'eval_name', 'eval_data', 'sixd_recall'])
        latex_content.append('\\begin{adjustbox}{max width=\\textwidth}')
        latex_content.append(df_proj.to_latex(index=False, multirow=True))
        latex_content.append('\\end{adjustbox}')
        latex_content.append('\n')
        latex_content.append('\n')
    if len(data_adi) > 0:
        df_adi = pd.DataFrame(data_adi).sort_values(
            by=['eval_obj', 'eval_name', 'eval_data', 'sixd_recall'])
        latex_content.append('\\begin{adjustbox}{max width=\\textwidth}')
        latex_content.append(df_adi.to_latex(index=False, multirow=True))
        latex_content.append('\\end{adjustbox}')
        latex_content.append('\n')
        latex_content.append('\n')
    if len(data_paper_vsd) > 0:
        df_paper = pd.DataFrame.from_dict(data_paper_vsd, orient='index')
        cols = ['eval_obj'] + [col for col in df_paper if col != 'eval_obj']
        df_paper = df_paper[cols]
        df_paper = df_paper.sort_index(axis=1)
        df_paper.loc['mean'] = df_paper.mean(axis=0)
        # df_paper.loc['mean'][0] = 0

        latex_content.append('\\begin{adjustbox}{max width=\\textwidth}')
        latex_list = df_paper.to_latex(index=False,
                                       multirow=True,
                                       float_format='%.2f').splitlines()
        latex_list.insert(len(latex_list) - 3, '\midrule')
        latex_new = '\n'.join(latex_list)
        latex_content.append(latex_new)
        latex_content.append('\\end{adjustbox}')
        latex_content.append('\n')
        latex_content.append('\n')
    if len(data_paper_auc) > 0:
        df_paper = pd.DataFrame.from_dict(data_paper_auc, orient='index')
        cols = ['eval_obj'] + [col for col in df_paper if col != 'eval_obj']
        df_paper = df_paper[cols]
        df_paper = df_paper.sort_index(axis=1)
        df_paper.loc['mean'] = df_paper.mean(axis=0)
        # df_paper.loc['mean'][0] = 0

        latex_content.append('\\begin{adjustbox}{max width=\\textwidth}')
        latex_list = df_paper.to_latex(index=False,
                                       multirow=True,
                                       float_format='%.2f').splitlines()
        latex_list.insert(len(latex_list) - 3, '\midrule')
        latex_new = '\n'.join(latex_list)
        latex_content.append(latex_new)
        latex_content.append('\\end{adjustbox}')
        latex_content.append('\n')
        latex_content.append('\n')
    if len(data_vsd) > 0:
        df_vsd = pd.DataFrame(data_vsd).sort_values(
            by=['eval_obj', 'eval_name', 'eval_data', 'sixd_recall'])
        latex_content.append('\\begin{adjustbox}{max width=\\textwidth}')
        latex_content.append(df_vsd.to_latex(index=False, multirow=True))
        latex_content.append('\\end{adjustbox}')

    latex_content = ''.join(latex_content)

    full_filename = os.path.join(exp_group_path, 'latex', 'report.tex')
    if not os.path.exists(os.path.join(exp_group_path, 'latex')):
        os.makedirs(os.path.join(exp_group_path, 'latex'))

    with open(full_filename, 'w') as f:
        f.write(prolog % (time.ctime(), experiment_group.replace('_', '\_')))
        f.write(latex_content)
        f.write(epilog)

    from subprocess import check_output, Popen
    check_output(['pdflatex', 'report.tex'],
                 cwd=os.path.dirname(full_filename))
    Popen(['okular', 'report.pdf'], cwd=os.path.dirname(full_filename))

    print 'finished'
Ejemplo n.º 10
0
def main():

    parser = argparse.ArgumentParser()

    parser.add_argument('experiment_name')
    parser.add_argument('evaluation_name')
    parser.add_argument('--eval_cfg', default='eval.cfg', required=False)
    parser.add_argument('--at_step', default=None, type=str, required=False)
    parser.add_argument('--model_path', default=None, required=True)
    arguments = parser.parse_args()
    full_name = arguments.experiment_name.split('/')
    experiment_name = full_name.pop()
    experiment_group = full_name.pop() if len(full_name) > 0 else ''
    evaluation_name = arguments.evaluation_name
    eval_cfg = arguments.eval_cfg
    at_step = arguments.at_step
    model_path = arguments.model_path

    workspace_path = os.environ.get('AE_WORKSPACE_PATH')
    log_dir = u.get_log_dir(workspace_path, experiment_name, experiment_group)
    train_cfg_file_path = u.get_train_config_exp_file_path(
        log_dir, experiment_name)
    eval_cfg_file_path = u.get_eval_config_file_path(workspace_path,
                                                     eval_cfg=eval_cfg)

    train_args = configparser.ConfigParser(inline_comment_prefixes="#")
    eval_args = configparser.ConfigParser(inline_comment_prefixes="#")
    train_args.read(train_cfg_file_path)
    eval_args.read(eval_cfg_file_path)

    #[DATA]
    dataset_name = eval_args.get('DATA', 'DATASET')
    obj_id = eval_args.getint('DATA', 'OBJ_ID')
    scenes = eval(eval_args.get(
        'DATA', 'SCENES')) if len(eval(eval_args.get(
            'DATA',
            'SCENES'))) > 0 else eval_utils.get_all_scenes_for_obj(eval_args)
    cam_type = eval_args.get('DATA', 'cam_type')
    data_params = dataset_params.get_dataset_params(dataset_name,
                                                    model_type='',
                                                    train_type='',
                                                    test_type=cam_type,
                                                    cam_type=cam_type)
    #[BBOXES]
    estimate_bbs = eval_args.getboolean('BBOXES', 'ESTIMATE_BBS')
    gt_masks = eval_args.getboolean('BBOXES', 'gt_masks')
    estimate_masks = eval_args.getboolean('BBOXES', 'estimate_masks')

    #[METRIC]
    top_nn = eval_args.getint('METRIC', 'TOP_N')
    #[EVALUATION]
    icp = eval_args.getboolean('EVALUATION', 'ICP')
    gt_trans = eval_args.getboolean('EVALUATION', 'gt_trans')
    iterative_code_refinement = eval_args.getboolean(
        'EVALUATION', 'iterative_code_refinement')

    H_AE = train_args.getint('Dataset', 'H')
    W_AE = train_args.getint('Dataset', 'W')

    evaluation_name = evaluation_name + '_icp' if icp else evaluation_name
    evaluation_name = evaluation_name + '_bbest' if estimate_bbs else evaluation_name
    evaluation_name = evaluation_name + '_maskest' if estimate_masks else evaluation_name
    evaluation_name = evaluation_name + '_gttrans' if gt_trans else evaluation_name
    evaluation_name = evaluation_name + '_gtmasks' if gt_masks else evaluation_name
    evaluation_name = evaluation_name + '_refined' if iterative_code_refinement else evaluation_name

    data = dataset_name + '_' + cam_type if len(cam_type) > 0 else dataset_name

    if at_step is None:
        checkpoint_file = u.get_checkpoint_basefilename(
            log_dir,
            False,
            latest=train_args.getint('Training', 'NUM_ITER'),
            joint=True)
    else:
        checkpoint_file = u.get_checkpoint_basefilename(log_dir,
                                                        False,
                                                        latest=at_step,
                                                        joint=True)
    print(checkpoint_file)
    eval_dir = u.get_eval_dir(log_dir, evaluation_name, data)

    if not os.path.exists(eval_dir):
        os.makedirs(eval_dir)
    shutil.copy2(eval_cfg_file_path, eval_dir)

    codebook, dataset = factory.build_codebook_from_name(experiment_name,
                                                         experiment_group,
                                                         return_dataset=True,
                                                         joint=True)
    dataset._kw['model_path'] = [model_path]
    dataset._kw['model'] = 'cad' if 'cad' in model_path else 'reconst'
    dataset._kw['model'] = 'reconst' if 'reconst' in model_path else 'cad'

    gpu_options = tf.GPUOptions(allow_growth=True,
                                per_process_gpu_memory_fraction=0.5)
    config = tf.ConfigProto(gpu_options=gpu_options)

    sess = tf.Session(config=config)
    saver = tf.train.Saver()
    saver.restore(sess, checkpoint_file)

    t_errors = []
    R_errors = []
    all_test_visibs = []

    external_path = eval_args.get('BBOXES', 'EXTERNAL')

    test_embeddings = []
    for scene_id in scenes:

        test_imgs = eval_utils.load_scenes(scene_id, eval_args)
        test_imgs_depth = eval_utils.load_scenes(
            scene_id, eval_args, depth=True) if icp else None

        if estimate_bbs:
            print(external_path)
            if external_path == 'False':
                bb_preds = {}
                for i, img in enumerate(test_imgs):
                    print((img.shape))
                    bb_preds[i] = ssd.detectSceneBBs(img,
                                                     min_score=.05,
                                                     nms_threshold=.45)
                print(bb_preds)
            else:
                if estimate_masks:
                    bb_preds = inout.load_yaml(
                        os.path.join(
                            external_path,
                            '{:02d}/mask_rcnn_predict.yml'.format(scene_id)))
                    print(list(bb_preds[0][0].keys()))
                    mask_paths = glob.glob(
                        os.path.join(external_path,
                                     '{:02d}/masks/*.npy'.format(scene_id)))
                    maskrcnn_scene_masks = [np.load(mp) for mp in mask_paths]
                else:
                    maskrcnn_scene_masks = None
                    bb_preds = inout.load_yaml(
                        os.path.join(external_path,
                                     '{:02d}.yml'.format(scene_id)))

            test_img_crops, test_img_depth_crops, bbs, bb_scores, visibilities = eval_utils.generate_scene_crops(
                test_imgs,
                test_imgs_depth,
                bb_preds,
                eval_args, (H_AE, W_AE),
                inst_masks=maskrcnn_scene_masks)
        else:
            test_img_crops, test_img_depth_crops, bbs, bb_scores, visibilities = eval_utils.get_gt_scene_crops(
                scene_id,
                eval_args,
                train_args,
                load_gt_masks=external_path if gt_masks else gt_masks)

        if len(test_img_crops) == 0:
            print(('ERROR: object %s not in scene %s' % (obj_id, scene_id)))
            exit()

        info = inout.load_info(
            data_params['scene_info_mpath'].format(scene_id))
        Ks_test = [
            np.array(v['cam_K']).reshape(3, 3) for v in list(info.values())
        ]

        ######remove
        gts = inout.load_gt(data_params['scene_gt_mpath'].format(scene_id))
        visib_gts = inout.load_yaml(data_params['scene_gt_stats_mpath'].format(
            scene_id, 15))
        #######
        W_test, H_test = data_params['test_im_size']

        icp_renderer = icp_utils.SynRenderer(
            train_args, dataset._kw['model_path'][0]) if icp else None
        noof_scene_views = eval_utils.noof_scene_views(scene_id, eval_args)

        test_embeddings.append([])

        scene_res_dir = os.path.join(
            eval_dir, '{scene_id:02d}'.format(scene_id=scene_id))
        if not os.path.exists(scene_res_dir):
            os.makedirs(scene_res_dir)

        for view in range(noof_scene_views):
            try:
                test_crops, test_crops_depth, test_bbs, test_scores, test_visibs = eval_utils.select_img_crops(
                    test_img_crops[view][obj_id],
                    test_img_depth_crops[view][obj_id] if icp else None,
                    bbs[view][obj_id], bb_scores[view][obj_id],
                    visibilities[view][obj_id], eval_args)
            except:
                print('no detections')
                continue

            print(view)
            preds = {}
            pred_views = []
            all_test_visibs.append(test_visibs[0])
            t_errors_crop = []
            R_errors_crop = []

            for i, (test_crop, test_bb, test_score) in enumerate(
                    zip(test_crops, test_bbs, test_scores)):

                start = time.time()
                if train_args.getint('Dataset', 'C') == 1:
                    test_crop = cv2.cvtColor(test_crop,
                                             cv2.COLOR_BGR2GRAY)[:, :, None]
                Rs_est, ts_est, _ = codebook.auto_pose6d(
                    sess,
                    test_crop,
                    test_bb,
                    Ks_test[view].copy(),
                    top_nn,
                    train_args,
                    codebook._get_codebook_name(model_path),
                    refine=iterative_code_refinement)
                Rs_est_old, ts_est_old = Rs_est.copy(), ts_est.copy()
                ae_time = time.time() - start

                if eval_args.getboolean('PLOT', 'EMBEDDING_PCA'):
                    test_embeddings[-1].append(
                        codebook.test_embedding(sess,
                                                test_crop,
                                                normalized=True))

                if eval_args.getboolean('EVALUATION', 'gt_trans'):
                    ts_est = np.empty((top_nn, 3))
                    for n in range(top_nn):
                        smallest_diff = np.inf
                        for visib_gt, gt in zip(visib_gts[view], gts[view]):
                            if gt['obj_id'] == obj_id:
                                diff = np.sum(
                                    np.abs(gt['obj_bb'] -
                                           np.array(visib_gt['bbox_obj'])))
                                if diff < smallest_diff:
                                    smallest_diff = diff
                                    gt_obj = gt.copy()
                                    print('Im there')
                        ts_est[n] = np.array(gt_obj['cam_t_m2c']).reshape(-1)

                try:
                    run_time = ae_time + bb_preds[view][0][
                        'det_time'] if estimate_bbs else ae_time
                except:
                    run_time = ae_time

                for p in range(top_nn):
                    if icp:
                        # note: In the CVPR paper a different ICP was used
                        start = time.time()
                        # depth icp
                        R_est_refined, t_est_refined = icp_utils.icp_refinement(
                            test_crops_depth[i],
                            icp_renderer,
                            Rs_est[p],
                            ts_est[p],
                            Ks_test[view].copy(), (W_test, H_test),
                            depth_only=True,
                            max_mean_dist_factor=5.0)
                        print(t_est_refined)

                        # x,y update,does not change tz:
                        _, ts_est_refined, _ = codebook.auto_pose6d(
                            sess,
                            test_crop,
                            test_bb,
                            Ks_test[view].copy(),
                            top_nn,
                            train_args,
                            codebook._get_codebook_name(model_path),
                            depth_pred=t_est_refined[2],
                            refine=iterative_code_refinement)

                        t_est_refined = ts_est_refined[p]

                        # rotation icp, only accepted if below 20 deg change
                        R_est_refined, _ = icp_utils.icp_refinement(
                            test_crops_depth[i],
                            icp_renderer,
                            R_est_refined,
                            t_est_refined,
                            Ks_test[view].copy(), (W_test, H_test),
                            no_depth=True)
                        print((Rs_est[p]))
                        print(R_est_refined)

                        icp_time = time.time() - start
                        Rs_est[p], ts_est[p] = R_est_refined, t_est_refined

                    preds.setdefault('ests', []).append({
                        'score': test_score,
                        'R': Rs_est[p],
                        't': ts_est[p]
                    })
                run_time = run_time + icp_time if icp else run_time

                min_t_err, min_R_err = eval_plots.print_trans_rot_errors(
                    gts[view], obj_id, ts_est, ts_est_old, Rs_est, Rs_est_old)
                t_errors_crop.append(min_t_err)
                R_errors_crop.append(min_R_err)

                if eval_args.getboolean('PLOT',
                                        'NEAREST_NEIGHBORS') and not icp:
                    for R_est, t_est in zip(Rs_est, ts_est):
                        pred_views.append(
                            dataset.render_rot(R_est, downSample=2))
                    eval_plots.show_nearest_rotation(pred_views, test_crop,
                                                     view)
                if eval_args.getboolean('PLOT', 'SCENE_WITH_ESTIMATE'):
                    eval_plots.plot_scene_with_estimate(
                        test_imgs[view].copy(),
                        icp_renderer.renderer if icp else dataset.renderer,
                        Ks_test[view].copy(), Rs_est_old[0], ts_est_old[0],
                        Rs_est[0], ts_est[0], test_bb, test_score, obj_id,
                        gts[view], bb_preds[view] if estimate_bbs else None)

                if cv2.waitKey(1) == 32:
                    cv2.waitKey(0)

            t_errors.append(t_errors_crop[np.argmin(
                np.linalg.norm(np.array(t_errors_crop), axis=1))])
            R_errors.append(R_errors_crop[np.argmin(
                np.linalg.norm(np.array(t_errors_crop), axis=1))])

            # save predictions in sixd format
            res_path = os.path.join(scene_res_dir,
                                    '%04d_%02d.yml' % (view, obj_id))
            inout.save_results_sixd17(res_path, preds, run_time=run_time)

    if not os.path.exists(os.path.join(eval_dir, 'latex')):
        os.makedirs(os.path.join(eval_dir, 'latex'))
    if not os.path.exists(os.path.join(eval_dir, 'figures')):
        os.makedirs(os.path.join(eval_dir, 'figures'))

    if eval_args.getboolean('EVALUATION', 'COMPUTE_ERRORS'):
        eval_calc_errors.eval_calc_errors(eval_args, eval_dir)
    if eval_args.getboolean('EVALUATION', 'EVALUATE_ERRORS'):
        eval_loc.match_and_eval_performance_scores(eval_args, eval_dir)

    cyclo = train_args.getint('Embedding', 'NUM_CYCLO')
    if eval_args.getboolean('PLOT', 'EMBEDDING_PCA'):
        embedding = sess.run(codebook.embedding_normalized)
        eval_plots.compute_pca_plot_embedding(eval_dir, embedding[::cyclo],
                                              np.array(test_embeddings[0]))
    if eval_args.getboolean('PLOT', 'VIEWSPHERE'):
        eval_plots.plot_viewsphere_for_embedding(
            dataset.viewsphere_for_embedding[::cyclo], eval_dir)
    if eval_args.getboolean('PLOT', 'CUM_T_ERROR_HIST'):
        eval_plots.plot_t_err_hist(np.array(t_errors), eval_dir)
        eval_plots.plot_t_err_hist2(np.array(t_errors), eval_dir)
    if eval_args.getboolean('PLOT', 'CUM_R_ERROR_HIST'):
        eval_plots.plot_R_err_recall(eval_args, eval_dir, scenes)
        eval_plots.plot_R_err_hist2(np.array(R_errors), eval_dir)
    if eval_args.getboolean('PLOT', 'CUM_VSD_ERROR_HIST'):
        try:
            eval_plots.plot_vsd_err_hist(eval_args, eval_dir, scenes)
        except:
            pass
    if eval_args.getboolean('PLOT', 'VSD_OCCLUSION'):
        try:
            eval_plots.plot_vsd_occlusion(eval_args, eval_dir, scenes,
                                          np.array(all_test_visibs))
        except:
            pass
    if eval_args.getboolean('PLOT', 'R_ERROR_OCCLUSION'):
        try:
            eval_plots.plot_re_rect_occlusion(eval_args, eval_dir, scenes,
                                              np.array(all_test_visibs))
        except:
            pass
    if eval_args.getboolean('PLOT', 'ANIMATE_EMBEDDING_PCA'):
        eval_plots.animate_embedding_path(test_embeddings[0])

    report = latex_report.Report(eval_dir, log_dir)
    report.write_configuration(train_cfg_file_path, eval_cfg_file_path)
    report.merge_all_tex_files()
    report.include_all_figures()
    report.save(open_pdf=True)
Ejemplo n.º 11
0
def get_gt_scene_crops(scene_id, eval_args, train_args, load_gt_masks=False):

    dataset_name = eval_args.get('DATA', 'DATASET')
    cam_type = eval_args.get('DATA', 'CAM_TYPE')
    icp = eval_args.getboolean('EVALUATION', 'ICP')

    delta = eval_args.get('METRIC', 'VSD_DELTA')

    workspace_path = os.environ.get('AE_WORKSPACE_PATH')
    dataset_path = u.get_dataset_path(workspace_path)

    H_AE = train_args.getint('Dataset', 'H')
    W_AE = train_args.getint('Dataset', 'W')

    cfg_string = str([scene_id] + eval_args.items('DATA') +
                     eval_args.items('BBOXES') + [H_AE])
    cfg_string = cfg_string.encode('utf-8')
    current_config_hash = hashlib.md5(cfg_string).hexdigest()

    current_file_name = os.path.join(dataset_path,
                                     current_config_hash + '.npz')

    if os.path.exists(current_file_name):
        data = np.load(current_file_name)
        test_img_crops = data['test_img_crops'].item()
        test_img_depth_crops = data['test_img_depth_crops'].item()
        bb_scores = data['bb_scores'].item()
        bb_vis = data['visib_gt'].item()
        bbs = data['bbs'].item()

    if not os.path.exists(current_file_name) or len(
            test_img_crops) == 0 or len(test_img_depth_crops) == 0:
        test_imgs = load_scenes(scene_id, eval_args)
        test_imgs_depth = load_scenes(scene_id, eval_args,
                                      depth=True) if icp else None

        data_params = dataset_params.get_dataset_params(dataset_name,
                                                        model_type='',
                                                        train_type='',
                                                        test_type=cam_type,
                                                        cam_type=cam_type)

        # only available for primesense, sixdtoolkit can generate
        visib_gt = inout.load_yaml(data_params['scene_gt_stats_mpath'].format(
            scene_id, delta))

        gt = inout.load_gt(data_params['scene_gt_mpath'].format(scene_id))

        gt_inst_masks = None
        if load_gt_masks:
            mask_paths = glob.glob(
                os.path.join(load_gt_masks,
                             '{:02d}/masks/*.npy'.format(scene_id)))
            gt_inst_masks = [np.load(mp) for mp in mask_paths]

        test_img_crops, test_img_depth_crops, bbs, bb_scores, bb_vis = generate_scene_crops(
            test_imgs,
            test_imgs_depth,
            gt,
            eval_args, (H_AE, W_AE),
            visib_gt=visib_gt,
            inst_masks=gt_inst_masks)

        np.savez(current_file_name,
                 test_img_crops=test_img_crops,
                 test_img_depth_crops=test_img_depth_crops,
                 bbs=bbs,
                 bb_scores=bb_scores,
                 visib_gt=bb_vis)

        current_cfg_file_name = os.path.join(dataset_path,
                                             current_config_hash + '.cfg')
        with open(current_cfg_file_name, 'w') as f:
            f.write(cfg_string)
        print('created new ground truth crops!')
    else:
        print('loaded previously generated ground truth crops!')
        print((len(test_img_crops), len(test_img_depth_crops)))

    return (test_img_crops, test_img_depth_crops, bbs, bb_scores, bb_vis)
Ejemplo n.º 12
0
def match_and_eval_performance_scores(eval_args, eval_dir):
    # Paths to pose errors (calculated using eval_calc_errors.py)
    #---------------------------------------------------------------------------
    # error_bpath = '/path/to/eval/'
    # error_paths = [
    #     pjoin(error_bpath, 'hodan-iros15_hinterstoisser'),
    #     # pjoin(error_bpath, 'hodan-iros15_tless_primesense'),
    # ]

    #[METHOD]
    method = eval_args.get('METHOD', 'METHOD')

    #[DATA]
    dataset = eval_args.get('DATA', 'DATASET')
    test_type = eval_args.get('DATA', 'CAM_TYPE')

    #[METRIC]
    # Top N pose estimates (with the highest score) to be evaluated for each
    # object in each image
    n_top = eval_args.getint(
        'EVALUATION', 'TOP_N_EVAL'
    )  # 0 = all estimates, -1 = given by the number of GT poses
    n_top_str = eval_args.getint('METRIC', 'TOP_N')
    # Pose error function
    error_types = eval(eval_args.get(
        'METRIC', 'ERROR_TYPE'))  # 'vsd', 'adi', 'add', 'cou', 're', 'te'
    error_thresh = eval(eval_args.get('METRIC', 'ERROR_THRES'))
    error_thresh_fact = eval(eval_args.get('METRIC', 'ERROR_THRESH_FACT'))
    # VSD parameters
    vsd_delta = eval_args.getint('METRIC', 'VSD_DELTA')
    vsd_tau = eval_args.getint('METRIC', 'VSD_TAU')
    vsd_cost = eval_args.get('METRIC', 'VSD_COST')  # 'step', 'tlinear'

    idx_th = 0
    idx_thf = 0

    for error_type in error_types:

        # Error signature
        error_sign = 'error=' + error_type + '_ntop=' + str(n_top_str)
        if error_type == 'vsd':
            error_sign += '_delta={}_tau={}_cost={}'.format(
                vsd_delta, vsd_tau, vsd_cost)

        error_path = os.path.join(eval_dir, error_sign)
        # error_dir = 'error=vsd_ntop=1_delta=15_tau=20_cost=step'
        # Other paths
        #---------------------------------------------------------------------------
        # Mask of path to the input file with calculated errors
        errors_mpath = pjoin('{error_path}', 'errors_{scene_id:02d}.yml')

        # Mask of path to the output file with established matches and calculated scores
        matches_mpath = pjoin('{error_path}', 'matches_{eval_sign}.yml')
        scores_mpath = pjoin('{error_path}', 'scores_{eval_sign}.yml')

        # Parameters
        #---------------------------------------------------------------------------
        require_all_errors = False  # Whether to break if some errors are missing
        visib_gt_min = 0.1  # Minimum visible surface fraction of valid GT pose
        visib_delta = 15  # [mm]

        # # Threshold of correctness
        # error_thresh = {
        #     'vsd': 0.3,
        #     'cou': 0.5,
        #     'te': 5.0, # [cm]
        #     're': 5.0 # [deg]
        # }

        # # Factor k; threshold of correctness = k * d, where d is the object diameter
        # error_thresh_fact = {
        #     'add': 0.1,
        #     'adi': 0.1
        # }

        # Evaluation
        #---------------------------------------------------------------------------

        # Evaluation signature
        if error_type in ['add', 'adi']:
            if type(error_thresh_fact[error_type]) is list:
                cur_thres_f = error_thresh_fact[error_type][idx_thf]
                idx_thf += 1
            else:
                cur_thres_f = error_thresh_fact[error_type]
            eval_sign = 'thf=' + str(cur_thres_f)
        else:
            if type(error_thresh[error_type]) is list:
                cur_thres = error_thresh[error_type][idx_th]
                idx_th += 1
            else:
                cur_thres = error_thresh[error_type]
            eval_sign = 'th=' + str(cur_thres)
        eval_sign += '_min-visib=' + str(visib_gt_min)

        print('--- Processing: {}, {}, {}'.format(method, dataset, error_type))

        # Load dataset parameters
        dp = get_dataset_params(dataset, test_type=test_type)
        obj_ids = range(1, dp['obj_count'] + 1)
        scene_ids = range(1, dp['scene_count'] + 1)

        # Set threshold of correctness (might be different for each object)
        error_threshs = {}
        if error_type in ['add', 'adi']:
            # Relative to object diameter
            models_info = inout.load_yaml(dp['models_info_path'])
            for obj_id in obj_ids:
                obj_diameter = models_info[obj_id]['diameter']
                error_threshs[obj_id] = cur_thres_f * obj_diameter
        else:
            # The same threshold for all objects
            for obj_id in obj_ids:
                error_threshs[obj_id] = cur_thres

        # Go through the test scenes and match estimated poses to GT poses
        #-----------------------------------------------------------------------
        matches = []  # Stores info about the matching estimate for each GT
        for scene_id in scene_ids:

            # Load GT poses
            gts = inout.load_gt(dp['scene_gt_mpath'].format(scene_id))

            # Load statistics (e.g. visibility fraction) of the GT poses
            gt_stats_path = dp['scene_gt_stats_mpath'].format(
                scene_id, visib_delta)
            gt_stats = inout.load_yaml(gt_stats_path)

            # Load pre-calculated errors of the pose estimates
            scene_errs_path = errors_mpath.format(error_path=error_path,
                                                  scene_id=scene_id)

            if os.path.isfile(scene_errs_path):
                errs = inout.load_errors(scene_errs_path)

                matches += match_poses(gts, gt_stats, errs, scene_id,
                                       visib_gt_min, error_threshs, n_top)

            elif require_all_errors:
                raise IOError(
                    '{} is missing, but errors for all scenes are required'
                    ' (require_all_results = True).'.format(scene_errs_path))

        # Calculate the performance scores
        #-----------------------------------------------------------------------
        # Split the dataset of Hinterstoisser to the original LINEMOD dataset
        # and the Occlusion dataset by TUD (i.e. the extended GT for scene #2)
        if dataset == 'hinterstoisser':

            print('-- LINEMOD dataset')
            eval_sign_lm = 'linemod_' + eval_sign
            matches_lm = [m for m in matches if m['scene_id'] == m['obj_id']]
            scores_lm = calc_scores(scene_ids, obj_ids, matches_lm, n_top)

            # Save scores
            scores_lm_path = scores_mpath.format(error_path=error_path,
                                                 eval_sign=eval_sign_lm)
            inout.save_yaml(scores_lm_path, scores_lm)

            # Save matches
            matches_path = matches_mpath.format(error_path=error_path,
                                                eval_sign=eval_sign_lm)
            inout.save_yaml(matches_path, matches_lm)

            print('-- Occlusion dataset')
            eval_sign_occ = 'occlusion_' + eval_sign
            matches_occ = [m for m in matches if m['scene_id'] == 2]
            scene_ids_occ = [2]
            obj_ids_occ = [1, 2, 5, 6, 8, 9, 10, 11, 12]
            scores_occ = calc_scores(scene_ids_occ, obj_ids_occ, matches_occ,
                                     n_top)
            # Save scores
            scores_occ_path = scores_mpath.format(error_path=error_path,
                                                  eval_sign=eval_sign_occ)
            inout.save_yaml(scores_occ_path, scores_occ)

            # Save matches
            matches_path = matches_mpath.format(error_path=error_path,
                                                eval_sign=eval_sign_occ)
            inout.save_yaml(matches_path, matches_occ)
        else:
            scores = calc_scores(scene_ids, obj_ids, matches, n_top)

            # Save scores
            scores_path = scores_mpath.format(error_path=error_path,
                                              eval_sign=eval_sign)
            inout.save_yaml(scores_path, scores)

            # Save matches
            matches_path = matches_mpath.format(error_path=error_path,
                                                eval_sign=eval_sign)
            inout.save_yaml(matches_path, matches)

    print('Done.')