vis_rgb = True # Indicates whether to resolve visibility in the rendered RGB image (using # depth renderings). If True, only the part of object surface, which is not # occluded by any other modeled object, is visible. If False, RGB renderings # of individual objects are blended together. vis_rgb_resolve_visib = True # Indicates whether to render depth image vis_depth = True # If to use the original model color vis_orig_color = False # Define new object colors (used if vis_orig_colors == False) colors = inout.load_yaml('../data/colors.yml') # Path masks for output images vis_rgb_mpath = '../output/vis_gt_poses_{}/{:02d}/{:04d}.jpg' vis_depth_mpath = '../output/vis_gt_poses_{}/{:02d}/{:04d}_depth_diff.jpg' scene_ids_curr = range(1, par['scene_count'] + 1) if scene_ids: scene_ids_curr = set(scene_ids_curr).intersection(scene_ids) for scene_id in scene_ids_curr: misc.ensure_dir(os.path.dirname(vis_rgb_mpath.format(dataset, scene_id, 0))) # Load scene info and gt poses scene_info = inout.load_info(par['scene_info_mpath'].format(scene_id)) scene_gt = inout.load_gt(par['scene_gt_mpath'].format(scene_id))
poseRefine = linemodLevelup_pybind.poseRefine() im_size = dp['test_im_size'] shape = (im_size[1], im_size[0]) print('test img size: {}'.format(shape)) # Frame buffer object, bind here to avoid memory leak, maybe? window = renderer.app.Window(visible=False) color_buf = np.zeros((shape[0], shape[1], 4), np.float32).view(renderer.gloo.TextureFloat2D) depth_buf = np.zeros((shape[0], shape[1]), np.float32).view(renderer.gloo.DepthTexture) fbo = renderer.gloo.FrameBuffer(color=color_buf, depth=depth_buf) fbo.activate() use_image_subset = True if use_image_subset: im_ids_sets = inout.load_yaml(dp['test_set_fpath']) else: im_ids_sets = None for scene_id in scene_ids_curr: obj_id_in_scene_array = list() obj_id_in_scene_array.append(scene_id) if dataset =='doumanoglou' and scene_id == 3: obj_id_in_scene_array = [1, 2] if dataset == 'hinterstoisser' and scene_id == 2: obj_id_in_scene_array = [1, 2, 5, 6, 8, 9, 10, 11, 12] # for occ dataset for obj_id_in_scene in obj_id_in_scene_array: # Load scene info and gt poses
model_texture_path = par['model_texture_mpath'].format(obj_id) model_texture = inout.load_im(model_texture_path) else: model_texture = None obj_info = {} obj_gt = {} im_id = 0 for radius in radii: # Sample views # TODO 在这修改 # views, views_level = view_sampler.sample_views(min_n_views, radius, # azimuth_range, elev_range) pose = inout.load_yaml( '/home/sun/ClionProjects/pose_estimation/MPPF/data/gt_test/test/gt.yml' ) # views, views_level = view_sampler.sample_views(min_n_views, radius, # azimuth_range, elev_range) views = [] views_level = [] views.append({ 'R': np.array(pose['cam_R_m2c']).reshape(3, 3), 't': np.array(pose['cam_t_m2c']) }) views_level.append(0) print('Sampled views: ' + str(len(views))) view_sampler.save_vis(out_views_vis_mpath.format(str(radius)), views, views_level)
vis_rgb = True # Indicates whether to resolve visibility in the rendered RGB image (using # depth renderings). If True, only the part of object surface, which is not # occluded by any other modeled object, is visible. If False, RGB renderings # of individual objects are blended together. vis_rgb_resolve_visib = True # Indicates whether to render depth image vis_depth = False # If to use the original model color vis_orig_color = False # Define new object colors (used if vis_orig_colors == False) colors = inout.load_yaml('../data/colors.yml') # Path masks for output images vis_rgb_mpath = '../output/vis_gt_poses_{}/{:02d}/{:04d}.jpg' vis_depth_mpath = '../output/vis_gt_poses_{}/{:02d}/{:04d}_depth_diff.jpg' # Whether to consider only the specified subset of images use_image_subset = True # Subset of images to be considered if use_image_subset: im_ids_sets = inout.load_yaml(dp['test_set_fpath']) else: im_ids_sets = None scene_ids_curr = range(1, dp['scene_count'] + 1)
# Load dataset parameters dp = get_dataset_params(dataset) if dataset_part == 'train': data_ids = range(1, dp['obj_count'] + 1) gt_mpath_key = 'obj_gt_mpath' gt_stats_mpath_key = 'obj_gt_stats_mpath' else: # 'test' data_ids = range(1, dp['scene_count'] + 1) gt_mpath_key = 'scene_gt_mpath' gt_stats_mpath_key = 'scene_gt_stats_mpath' # Subset of images to be considered if dataset_part == 'test' and use_image_subset: im_ids_sets = inout.load_yaml(dp['test_set_fpath']) else: im_ids_sets = None # Load the GT statistics gt_stats = [] for data_id in data_ids: print('Loading GT stats: {}, {}'.format(dataset, data_id)) gts = inout.load_gt(dp[gt_mpath_key].format(data_id)) gt_stats_curr = inout.load_yaml(dp[gt_stats_mpath_key].format( data_id, delta)) # Considered subset of images for the current scene if im_ids_sets is not None: im_ids_curr = im_ids_sets[data_id] else:
n_top = int(error_sign.split('_')[1].split('=')[1]) res_sign = os.path.basename(os.path.dirname(error_path)).split('_') method = res_sign[0] dataset = res_sign[1] test_type = res_sign[2] if len(res_sign) > 3 else '' # Load dataset parameters dp = get_dataset_params(dataset, test_type=test_type) obj_ids = range(1, dp['obj_count'] + 1) scene_ids = range(1, dp['scene_count'] + 1) # Set threshold of correctness (might be different for each object) error_threshs = {} if error_type in ['add', 'adi']: # Relative to object diameter models_info = inout.load_yaml(dp['models_info_path']) for obj_id in obj_ids: obj_diameter = models_info[obj_id]['diameter'] error_threshs[ obj_id] = error_thresh_fact[error_type] * obj_diameter else: # The same threshold for all objects for obj_id in obj_ids: error_threshs[obj_id] = error_thresh[error_type] # Go through the test scenes and match estimated poses to GT poses matches = [] # Stores info about the matching estimate for each GT for scene_id in scene_ids: print('Matching: {}, {}, {}, {}, {}'.format(error_type, method, dataset, test_type, scene_id))
vis_rgb = True # Indicates whether to resolve visibility in the rendered RGB image (using # depth renderings). If True, only the part of object surface, which is not # occluded by any other modeled object, is visible. If False, RGB renderings # of individual objects are blended together. vis_rgb_resolve_visib = True # Indicates whether to render depth image vis_depth = False # If to use the original model color vis_orig_color = False # Define new object colors (used if vis_orig_colors == False) colors = inout.load_yaml('../data/colors.yml') # Path masks for output images training_dataset_path = '/media/sun/Data1/Brachmann/test/{}/training' test_dataset_path = '/media/sun/Data1/Brachmann/test/{}/test' # Output path masks out_rgb_mpath = training_dataset_path + '/{:02d}/rgb_noseg/color_{:05d}.png' out_depth_mpath = training_dataset_path + '/{:02d}/depth_noseg/depth_{:05d}.png' out_seg_mpath = training_dataset_path + '/{:02d}/seg/seg_{:05d}.png' out_obj_mpath = training_dataset_path + '/{:02d}/obj/obj_{:05d}.png' out_info_path = training_dataset_path + '/{:02d}/info/info_{:05}.txt' test_rgb_mpath = test_dataset_path + '/{:02d}/rgb_noseg/color_{:05d}.png'
def main(): # Paths to pose errors (calculated using eval_calc_errors.py) #--------------------------------------------------------------------------- top_level_path = os.path.dirname(os.path.dirname( os.path.abspath(__file__))) dataset = 'hinterstoisser' # dataset = 'tless' # dataset = 'tudlight' # dataset = 'rutgers' # dataset = 'tejani' # dataset = 'doumanoglou' # dataset = 'toyotalight' error_bpath = pjoin(top_level_path, 'eval') error_paths = [ pjoin(error_bpath, 'patch-linemod_' + dataset), # pjoin(error_bpath, 'hodan-iros15_tless_primesense'), ] error_dir = 'error=vsd_ntop=1_delta=15_tau=20_cost=step' for i in range(len(error_paths)): error_paths[i] = os.path.join(error_paths[i], error_dir) # Other paths #--------------------------------------------------------------------------- # Mask of path to the input file with calculated errors errors_mpath = pjoin('{error_path}', 'errors_{scene_id:02d}.yml') # Mask of path to the output file with established matches and calculated scores matches_mpath = pjoin('{error_path}', 'matches_{eval_sign}.yml') scores_mpath = pjoin('{error_path}', 'scores_{eval_sign}.yml') # Parameters #--------------------------------------------------------------------------- use_image_subset = True # Whether to use the specified subset of images require_all_errors = True # Whether to break if some errors are missing visib_gt_min = 0.1 # Minimum visible surface fraction of valid GT pose visib_delta = 15 # [mm] # Threshold of correctness error_thresh = { 'vsd': 0.3, 'cou': 0.5, 'te': 5.0, # [cm] 're': 5.0 # [deg] } # Factor k; threshold of correctness = k * d, where d is the object diameter error_thresh_fact = {'add': 0.1, 'adi': 0.1} # Evaluation #--------------------------------------------------------------------------- for error_path in error_paths: # Parse info about the errors from the folder names error_sign = os.path.basename(error_path) error_type = error_sign.split('_')[0].split('=')[1] n_top = int(error_sign.split('_')[1].split('=')[1]) res_sign = os.path.basename(os.path.dirname(error_path)).split('_') method = res_sign[0] dataset = res_sign[1] test_type = res_sign[2] if len(res_sign) > 3 else '' # Evaluation signature if error_type in ['add', 'adi']: eval_sign = 'thf=' + str(error_thresh_fact[error_type]) else: eval_sign = 'th=' + str(error_thresh[error_type]) eval_sign += '_min-visib=' + str(visib_gt_min) print('--- Processing: {}, {}, {}'.format(method, dataset, error_type)) # Load dataset parameters dp = get_dataset_params(dataset, test_type=test_type) obj_ids = range(1, dp['obj_count'] + 1) scene_ids = range(1, dp['scene_count'] + 1) # Subset of images to be considered if use_image_subset: im_ids_sets = inout.load_yaml(dp['test_set_fpath']) else: im_ids_sets = None # Set threshold of correctness (might be different for each object) error_threshs = {} if error_type in ['add', 'adi']: # Relative to object diameter models_info = inout.load_yaml(dp['models_info_path']) for obj_id in obj_ids: obj_diameter = models_info[obj_id]['diameter'] error_threshs[obj_id] = error_thresh_fact[error_type] *\ obj_diameter else: # The same threshold for all objects for obj_id in obj_ids: error_threshs[obj_id] = error_thresh[error_type] # Go through the test scenes and match estimated poses to GT poses #----------------------------------------------------------------------- matches = [] # Stores info about the matching estimate for each GT for scene_id in scene_ids: # Load GT poses gts = inout.load_gt(dp['scene_gt_mpath'].format(scene_id)) # Load statistics (e.g. visibility fraction) of the GT poses gt_stats_path = dp['scene_gt_stats_mpath'].format( scene_id, visib_delta) gt_stats = inout.load_yaml(gt_stats_path) # Keep the GT poses and their stats only for the selected images if im_ids_sets is not None: im_ids = im_ids_sets[scene_id] gts = {im_id: gts[im_id] for im_id in im_ids} gt_stats = {im_id: gt_stats[im_id] for im_id in im_ids} # Load pre-calculated errors of the pose estimates scene_errs_path = errors_mpath.format(error_path=error_path, scene_id=scene_id) if os.path.isfile(scene_errs_path): errs = inout.load_errors(scene_errs_path) matches += match_poses(gts, gt_stats, errs, scene_id, visib_gt_min, error_threshs, n_top) elif require_all_errors: raise IOError( '{} is missing, but errors for all scenes are required' ' (require_all_results = True).'.format(scene_errs_path)) # Calculate the performance scores #----------------------------------------------------------------------- # Split the dataset of Hinterstoisser to the original LINEMOD dataset # and the Occlusion dataset by TUD (i.e. the extended GT for scene #2) if dataset == 'hinterstoisser': print('-- LINEMOD dataset') eval_sign_lm = 'linemod_' + eval_sign matches_lm = [m for m in matches if m['scene_id'] == m['obj_id']] scores_lm = calc_scores(scene_ids, obj_ids, matches_lm, n_top) # Save scores scores_lm_path = scores_mpath.format(error_path=error_path, eval_sign=eval_sign_lm) inout.save_yaml(scores_lm_path, scores_lm) # Save matches matches_path = matches_mpath.format(error_path=error_path, eval_sign=eval_sign_lm) inout.save_yaml(matches_path, matches_lm) print('-- Occlusion dataset') eval_sign_occ = 'occlusion_' + eval_sign matches_occ = [m for m in matches if m['scene_id'] == 2] scene_ids_occ = [2] obj_ids_occ = [1, 2, 5, 6, 8, 9, 10, 11, 12] scores_occ = calc_scores(scene_ids_occ, obj_ids_occ, matches_occ, n_top) # Save scores scores_occ_path = scores_mpath.format(error_path=error_path, eval_sign=eval_sign_occ) inout.save_yaml(scores_occ_path, scores_occ) # Save matches matches_path = matches_mpath.format(error_path=error_path, eval_sign=eval_sign_occ) inout.save_yaml(matches_path, matches_occ) else: scores = calc_scores(scene_ids, obj_ids, matches, n_top) # Save scores scores_path = scores_mpath.format(error_path=error_path, eval_sign=eval_sign) inout.save_yaml(scores_path, scores) # Save matches matches_path = matches_mpath.format(error_path=error_path, eval_sign=eval_sign) inout.save_yaml(matches_path, matches) print('Done.')
data_type = 'primesense' cam_type = 'primesense' model_type = 'cad' else: data_type = '' model_type = '' cam_type = '' # Load dataset parameters dp = get_dataset_params(dataset, model_type=model_type, train_type=data_type, test_type=data_type, cam_type=cam_type) obj_ids = range(1, dp['obj_count'] + 1) # Subset of images to be considered if data_type == 'test' and use_image_subset: im_ids_sets = inout.load_yaml(dp['test_set_fpath']) else: im_ids_sets = None if dataset_part == 'train': data_ids = range(1, dp['obj_count'] + 1) depth_mpath_key = 'train_depth_mpath' info_mpath_key = 'obj_info_mpath' gt_mpath_key = 'obj_gt_mpath' gt_stats_mpath_key = 'obj_gt_stats_mpath' else: # 'test' data_ids = range(1, dp['scene_count'] + 1) depth_mpath_key = 'test_depth_mpath' info_mpath_key = 'scene_info_mpath' gt_mpath_key = 'scene_gt_mpath'
if dataset_part == 'train': data_ids = range(1, dp['obj_count'] + 1) gt_mpath_key = 'obj_gt_mpath' gt_stats_mpath_key = 'obj_gt_stats_mpath' else: # 'test' data_ids = range(1, dp['scene_count'] + 1) gt_mpath_key = 'scene_gt_mpath' gt_stats_mpath_key = 'scene_gt_stats_mpath' # Load the GT statistics gt_stats = [] for data_id in data_ids: print('Loading GT stats: {}, {}'.format(dataset, data_id)) gts = inout.load_gt(dp[gt_mpath_key].format(data_id)) gt_stats_curr = inout.load_yaml( dp[gt_stats_mpath_key].format(data_id, delta)) for im_id, gt_stats_im in gt_stats_curr.items(): for gt_id, p in enumerate(gt_stats_im): p['data_id'] = data_id p['im_id'] = im_id p['gt_id'] = gt_id p['obj_id'] = gts[im_id][gt_id]['obj_id'] gt_stats.append(p) print('GT count: {}'.format(len(gt_stats))) # Collect the data px_count_all = [p['px_count_all'] for p in gt_stats] px_count_valid = [p['px_count_valid'] for p in gt_stats] px_count_visib = [p['px_count_visib'] for p in gt_stats] visib_fract = [p['visib_fract'] for p in gt_stats]
vis_rgb = True # Indicates whether to resolve visibility in the rendered RGB image (using # depth renderings). If True, only the part of object surface, which is not # occluded by any other modeled object, is visible. If False, RGB renderings # of individual objects are blended together. vis_rgb_resolve_visib = True # Indicates whether to render depth image vis_depth = False # If to use the original model color vis_orig_color = False # Object colors (used if vis_orig_colors == False) colors = inout.load_yaml('../data/colors.yml') assert(vis_rgb or vis_depth) # Visualization #------------------------------------------------------------------------------- for result_path in result_paths: print('Processing: ' + result_path) result_name = os.path.basename(result_path) info = result_name.split('_') method = info[0] dataset = info[1] test_type = info[2] if len(info) > 2 else '' # Select data type
# dataset = 'tejani' # dataset = 'doumanoglou' delta = 15 # Tolerance used in the visibility test [mm] # Load dataset parameters dp = get_dataset_params(dataset) obj_ids = range(1, dp['obj_count'] + 1) scene_ids = range(1, dp['scene_count'] + 1) # Load the GT statistics gt_stats = [] for scene_id in scene_ids: print('Loading GT stats: {}, {}'.format(dataset, scene_id)) gts = inout.load_gt(dp['scene_gt_mpath'].format(scene_id)) gt_stats_curr = inout.load_yaml(dp['scene_gt_stats_mpath'].format( scene_id, delta)) for im_id, gt_stats_im in gt_stats_curr.items(): for gt_id, p in enumerate(gt_stats_im): p['scene_id'] = scene_id p['im_id'] = im_id p['gt_id'] = gt_id p['obj_id'] = gts[im_id][gt_id]['obj_id'] gt_stats.append(p) print('GT count: {}'.format(len(gt_stats))) # Collect the data px_count_all = [p['px_count_all'] for p in gt_stats] px_count_valid = [p['px_count_valid'] for p in gt_stats] px_count_visib = [p['px_count_visib'] for p in gt_stats] visib_fract = [p['visib_fract'] for p in gt_stats]
print('{}: Rendering {}. Generated {} images'.format( j, model, num_imgs)) pass return if __name__ == '__main__': from pysixd.inout import load_yaml base_level = 2 print('Rendering Linemod Classes') #model_file = '/home/bokorn/src/generic_pose/generic_pose/training_sets/model_sets/linemod.txt' #with open(model_file, 'r') as f: # linemod_filenames = f.read().split() models_folder = '/media/bokorn/ExtraDrive2/benchmark/linemod6DC/models/' models_info = load_yaml(os.path.join(models_folder, 'models_info.yml')) model_scales = [] linemod_filenames = [] for k, v in models_info.items(): linemod_filenames.append( os.path.join(models_folder, 'obj_{:02d}.ply'.format(k))) model_scales.append(1 / v['diameter']) #linemod_folder = '/ssd0/bokorn/data/renders/linemod' linemod_folder = '/media/bokorn/ExtraDrive2/renders/linemod6DC' os.makedirs(linemod_folder, exist_ok=True) renderDataModelBatch(base_level=base_level, num_workers=20, model_filenames=linemod_filenames, data_folder=linemod_folder, model_scales=model_scales)
# Load dataset parameters dp = get_dataset_params(dataset) if dataset_part == 'train': data_ids = range(1, dp['obj_count'] + 1) gt_mpath_key = 'obj_gt_mpath' gt_stats_mpath_key = 'obj_gt_stats_mpath' else: # 'test' data_ids = range(1, dp['scene_count'] + 1) gt_mpath_key = 'scene_gt_mpath' gt_stats_mpath_key = 'scene_gt_stats_mpath' # Subset of images to be considered if dataset_part == 'test' and use_image_subset: im_ids_sets = inout.load_yaml(dp['test_set_fpath']) else: im_ids_sets = None # Load the GT statistics gt_stats = [] for data_id in data_ids: print('Loading GT stats: {}, {}'.format(dataset, data_id)) gts = inout.load_gt(dp[gt_mpath_key].format(data_id)) gt_stats_curr = inout.load_yaml( dp[gt_stats_mpath_key].format(data_id, delta)) # Considered subset of images for the current scene if im_ids_sets is not None: im_ids_curr = im_ids_sets[data_id] else:
def main(): # Paths to pose errors (calculated using eval_calc_errors.py) # --------------------------------------------------------------------------- error_bpath = "/path/to/eval/" error_paths = [ pjoin(error_bpath, "hodan-iros15_hinterstoisser"), # pjoin(error_bpath, 'hodan-iros15_tless_primesense'), ] error_dir = "error:vsd_ntop:1_delta:15_tau:20_cost:step" for i in range(len(error_paths)): error_paths[i] = os.path.join(error_paths[i], error_dir) # Other paths # --------------------------------------------------------------------------- # Mask of path to the input file with calculated errors errors_mpath = pjoin("{error_path}", "errors_{scene_id:02d}.yml") # Mask of path to the output file with established matches and calculated scores matches_mpath = pjoin("{error_path}", "matches_{eval_sign}.yml") scores_mpath = pjoin("{error_path}", "scores_{eval_sign}.yml") # Parameters # --------------------------------------------------------------------------- use_image_subset = True # Whether to use the specified subset of images require_all_errors = True # Whether to break if some errors are missing visib_gt_min = 0.1 # Minimum visible surface fraction of valid GT pose visib_delta = 15 # [mm] # Threshold of correctness error_thresh = { "vsd": 0.3, "cou": 0.5, "te": 5.0, "re": 5.0 } # [cm] # [deg] # Factor k; threshold of correctness = k * d, where d is the object diameter error_thresh_fact = {"add": 0.1, "adi": 0.1} # Evaluation # --------------------------------------------------------------------------- for error_path in error_paths: # Parse info about the errors from the folder names error_sign = os.path.basename(error_path) error_type = error_sign.split("_")[0].split(":")[1] n_top = int(error_sign.split("_")[1].split(":")[1]) res_sign = os.path.basename(os.path.dirname(error_path)).split("_") method = res_sign[0] dataset = res_sign[1] test_type = res_sign[2] if len(res_sign) > 3 else "" # Evaluation signature if error_type in ["add", "adi"]: eval_sign = "thf:" + str(error_thresh_fact[error_type]) else: eval_sign = "th:" + str(error_thresh[error_type]) eval_sign += "_min-visib:" + str(visib_gt_min) print("--- Processing: {}, {}, {}".format(method, dataset, error_type)) # Load dataset parameters dp = get_dataset_params(dataset, test_type=test_type) obj_ids = range(1, dp["obj_count"] + 1) scene_ids = range(1, dp["scene_count"] + 1) # Subset of images to be considered if use_image_subset: im_ids_sets = inout.load_yaml(dp["test_set_fpath"]) else: im_ids_sets = None # Set threshold of correctness (might be different for each object) error_threshs = {} if error_type in ["add", "adi"]: # Relative to object diameter models_info = inout.load_yaml(dp["models_info_path"]) for obj_id in obj_ids: obj_diameter = models_info[obj_id]["diameter"] error_threshs[ obj_id] = error_thresh_fact[error_type] * obj_diameter else: # The same threshold for all objects for obj_id in obj_ids: error_threshs[obj_id] = error_thresh[error_type] # Go through the test scenes and match estimated poses to GT poses # ----------------------------------------------------------------------- matches = [] # Stores info about the matching estimate for each GT for scene_id in scene_ids: # Load GT poses gts = inout.load_gt(dp["scene_gt_mpath"].format(scene_id)) # Load statistics (e.g. visibility fraction) of the GT poses gt_stats_path = dp["scene_gt_stats_mpath"].format( scene_id, visib_delta) gt_stats = inout.load_yaml(gt_stats_path) # Keep the GT poses and their stats only for the selected images if im_ids_sets is not None: im_ids = im_ids_sets[scene_id] gts = {im_id: gts[im_id] for im_id in im_ids} gt_stats = {im_id: gt_stats[im_id] for im_id in im_ids} # Load pre-calculated errors of the pose estimates scene_errs_path = errors_mpath.format(error_path=error_path, scene_id=scene_id) if os.path.isfile(scene_errs_path): errs = inout.load_errors(scene_errs_path) matches += match_poses(gts, gt_stats, errs, scene_id, visib_gt_min, error_threshs, n_top) elif require_all_errors: raise IOError( "{} is missing, but errors for all scenes are required" " (require_all_results = True).".format(scene_errs_path)) # Calculate the performance scores # ----------------------------------------------------------------------- # Split the dataset of Hinterstoisser to the original LINEMOD dataset # and the Occlusion dataset by TUD (i.e. the extended GT for scene #2) if dataset == "hinterstoisser": print("-- LINEMOD dataset") eval_sign_lm = "linemod_" + eval_sign matches_lm = [m for m in matches if m["scene_id"] == m["obj_id"]] scores_lm = calc_scores(scene_ids, obj_ids, matches_lm, n_top) # Save scores scores_lm_path = scores_mpath.format(error_path=error_path, eval_sign=eval_sign_lm) inout.save_yaml(scores_lm_path, scores_lm) # Save matches matches_path = matches_mpath.format(error_path=error_path, eval_sign=eval_sign_lm) inout.save_yaml(matches_path, matches_lm) print("-- Occlusion dataset") eval_sign_occ = "occlusion_" + eval_sign matches_occ = [m for m in matches if m["scene_id"] == 2] scene_ids_occ = [2] obj_ids_occ = [1, 2, 5, 6, 8, 9, 10, 11, 12] scores_occ = calc_scores(scene_ids_occ, obj_ids_occ, matches_occ, n_top) # Save scores scores_occ_path = scores_mpath.format(error_path=error_path, eval_sign=eval_sign_occ) inout.save_yaml(scores_occ_path, scores_occ) # Save matches matches_path = matches_mpath.format(error_path=error_path, eval_sign=eval_sign_occ) inout.save_yaml(matches_path, matches_occ) else: scores = calc_scores(scene_ids, obj_ids, matches, n_top) # Save scores scores_path = scores_mpath.format(error_path=error_path, eval_sign=eval_sign) inout.save_yaml(scores_path, scores) # Save matches matches_path = matches_mpath.format(error_path=error_path, eval_sign=eval_sign) inout.save_yaml(matches_path, matches) print("Done.")