Ejemplo n.º 1
0
    ests_counter = 0
    time_start = time.time()

    # Parse info about the method and the dataset from the filename.
    result_name = os.path.splitext(os.path.basename(result_filename))[0]
    result_info = result_name.split('_')
    method = str(result_info[0])
    dataset_info = result_info[1].split('-')
    dataset = str(dataset_info[0])
    split = str(dataset_info[1])
    split_type = str(dataset_info[2]) if len(dataset_info) > 2 else None
    split_type_str = ' - ' + split_type if split_type is not None else ''

    # Load dataset parameters.
    dp_split = dataset_params.get_split_params(p['datasets_path'], dataset,
                                               split, split_type)

    model_type = 'eval'
    dp_model = dataset_params.get_model_params(p['datasets_path'], dataset,
                                               model_type)

    # Load object models.
    models = {}
    if p['error_type'] in ['ad', 'add', 'adi', 'mssd', 'mspd', 'proj']:
        misc.log('Loading object models...')
        for obj_id in dp_model['obj_ids']:
            models[obj_id] = inout.load_ply(
                dp_model['model_tpath'].format(obj_id=obj_id))

    # Load models info.
    models_info = None
    'dataset': 'lm',

    # Dataset split. Options: 'train', 'val', 'test'.
    'dataset_split': 'test',

    # Dataset split type. None = default. See dataset_params.py for options.
    'dataset_split_type': None,

    # Folder containing the BOP datasets.
    'datasets_path': config.datasets_path,
}
################################################################################

# Load dataset parameters.
dp_split = dataset_params.get_split_params(p['datasets_path'], p['dataset'],
                                           p['dataset_split'],
                                           p['dataset_split_type'])

scene_ids = dp_split['scene_ids']
dists = []
azimuths = []
elevs = []
visib_fracts = []
ims_count = 0
for scene_id in scene_ids:
    misc.log('Processing - dataset: {} ({}, {}), scene: {}'.format(
        p['dataset'], p['dataset_split'], p['dataset_split_type'], scene_id))

    # Load GT poses.
    scene_gt = inout.load_scene_gt(
        dp_split['scene_gt_tpath'].format(scene_id=scene_id))
Ejemplo n.º 3
0
  os.path.join('{out_path}', '{obj_id:06d}', 'rgb', '{im_id:06d}.png')
out_depth_tpath =\
  os.path.join('{out_path}', '{obj_id:06d}', 'depth', '{im_id:06d}.png')
out_scene_camera_tpath =\
  os.path.join('{out_path}', '{obj_id:06d}', 'scene_camera.json')
out_scene_gt_tpath =\
  os.path.join('{out_path}', '{obj_id:06d}', 'scene_gt.json')
out_views_vis_tpath =\
  os.path.join('{out_path}', '{obj_id:06d}', 'views_radius={radius}.ply')
################################################################################

out_path = out_tpath.format(dataset=dataset)
misc.ensure_dir(out_path)

# Load dataset parameters.
dp_split_test = dataset_params.get_split_params(datasets_path, dataset, 'test')
dp_model = dataset_params.get_model_params(datasets_path, dataset, model_type)
dp_camera = dataset_params.get_camera_params(datasets_path, dataset, cam_type)

if not obj_ids:
    obj_ids = dp_model['obj_ids']

# Image size and K for the RGB image (potentially with SSAA).
im_size_rgb = [int(round(x * float(ssaa_fact))) for x in dp_camera['im_size']]
K_rgb = dp_camera['K'] * ssaa_fact

# Intrinsic parameters for RGB rendering.
fx_rgb, fy_rgb, cx_rgb, cy_rgb =\
  K_rgb[0, 0], K_rgb[1, 1], K_rgb[0, 2], K_rgb[1, 2]

# Intrinsic parameters for depth rendering.