Exemplo n.º 1
0
    model_type = 'eval'
    dp_model = dataset_params.get_model_params(p['datasets_path'], dataset,
                                               model_type)

    # Load object models.
    models = {}
    if p['error_type'] in ['ad', 'add', 'adi', 'mssd', 'mspd', 'proj']:
        misc.log('Loading object models...')
        for obj_id in dp_model['obj_ids']:
            models[obj_id] = inout.load_ply(
                dp_model['model_tpath'].format(obj_id=obj_id))

    # Load models info.
    models_info = None
    if p['error_type'] in ['ad', 'add', 'adi', 'vsd', 'mssd', 'mspd', 'cus']:
        models_info = inout.load_json(dp_model['models_info_path'],
                                      keys_to_int=True)

    # Get sets of symmetry transformations for the object models.
    models_sym = None
    if p['error_type'] in ['mssd', 'mspd']:
        models_sym = {}
        for obj_id in dp_model['obj_ids']:
            models_sym[obj_id] = misc.get_symmetry_transformations(
                models_info[obj_id], p['max_sym_disc_step'])

    # Initialize a renderer.
    ren = None
    if p['error_type'] in ['vsd', 'cus']:
        misc.log('Initializing renderer...')
        width, height = dp_split['im_size']
        ren = renderer.create_renderer(width,
dists = []
azimuths = []
elevs = []
visib_fracts = []
ims_count = 0
for scene_id in scene_ids:
    misc.log('Processing - dataset: {} ({}, {}), scene: {}'.format(
        p['dataset'], p['dataset_split'], p['dataset_split_type'], scene_id))

    # Load GT poses.
    scene_gt = inout.load_scene_gt(
        dp_split['scene_gt_tpath'].format(scene_id=scene_id))

    # Load info about the GT poses.
    scene_gt_info = inout.load_json(
        dp_split['scene_gt_info_tpath'].format(scene_id=scene_id),
        keys_to_int=True)

    ims_count += len(scene_gt)

    for im_id in scene_gt.keys():
        for gt_id, im_gt in enumerate(scene_gt[im_id]):

            # Object distance.
            dist = np.linalg.norm(im_gt['cam_t_m2c'])
            dists.append(dist)

            # Camera origin in the model coordinate system.
            cam_orig_m = -np.linalg.inv(im_gt['cam_R_m2c']).dot(
                im_gt['cam_t_m2c'])
        for error_sign, error_dir_path in error_dir_paths.items():
            recall_dict[error['type']][error_sign] = []
            for correct_th in error['correct_th']:

                # Path to file with calculated scores.
                score_sign = misc.get_score_signature(correct_th,
                                                      p['visib_gt_min'])

                scores_filename = 'scores_{}.json'.format(score_sign)
                scores_path = os.path.join(config.eval_path, result_name,
                                           error_sign, scores_filename)

                # Load the scores.
                misc.log(
                    'Loading calculated scores from: {}'.format(scores_path))
                scores = inout.load_json(scores_path)
                recalls.append(scores['total_recall'])
                recall_dict[error['type']][error_sign].append(
                    scores['total_recall'])

        # Area under the recall surface/curve.
        aur[error['type']] = np.mean(recalls)

    time_total = time.time() - time_start

    # output final scores and plot recall curves
    err_types = [e['type'] for e in p['errors']]
    for err_type in err_types:
        misc.log('Average Recall {}: {}'.format(err_type, aur[err_type]))

    if set(['vsd', 'mssd', 'mspd']).issubset(err_types):
Exemplo n.º 4
0
}
################################################################################

# Load dataset parameters.
dp_split = dataset_params.get_split_params(p['datasets_path'], p['dataset'],
                                           p['dataset_split'],
                                           p['dataset_split_type'])

model_type = 'eval'  # None = default.
dp_model = dataset_params.get_model_params(p['datasets_path'], p['dataset'],
                                           model_type)

# Load colors.
colors_path = os.path.join(os.path.dirname(visualization.__file__),
                           'colors.json')
colors = inout.load_json(colors_path)

# Subset of images for which the ground-truth poses will be rendered.
if p['targets_filename'] is not None:
    targets = inout.load_json(
        os.path.join(dp_split['base_path'], p['targets_filename']))
    scene_im_ids = {}
    for target in targets:
        scene_im_ids.setdefault(target['scene_id'], set()).add(target['im_id'])
else:
    scene_im_ids = None

# List of considered scenes.
scene_ids_curr = dp_split['scene_ids']
if p['scene_ids']:
    scene_ids_curr = set(scene_ids_curr).intersection(p['scene_ids'])
Exemplo n.º 5
0
  'vis_path': os.path.join(config.output_path, 'vis_est_poses'),
  
  # Path templates for output images.
  'vis_rgb_tpath': os.path.join(
    '{vis_path}', '{result_name}', '{scene_id:06d}', '{vis_name}.jpg'),
  'vis_depth_diff_tpath': os.path.join(
    '{vis_path}', '{result_name}', '{scene_id:06d}',
    '{vis_name}_depth_diff.jpg'),
}
################################################################################


# Load colors.
colors_path = os.path.join(
  os.path.dirname(visualization.__file__), 'colors.json')
colors = inout.load_json(colors_path)

for result_fname in p['result_filenames']:
  misc.log('Processing: ' + result_fname)

  # Parse info about the method and the dataset from the filename.
  result_name = os.path.splitext(os.path.basename(result_fname))[0]
  result_info = result_name.split('_')
  method = result_info[0]
  dataset_info = result_info[1].split('-')
  dataset = dataset_info[0]
  split = dataset_info[1]
  split_type = dataset_info[2] if len(dataset_info) > 2 else None

  # Load dataset parameters.
  dp_split = dataset_params.get_split_params(
Exemplo n.º 6
0
    score_sign = misc.get_score_signature(p['correct_th'][err_type],
                                          p['visib_gt_min'])

    misc.log('Calculating score - error: {}, method: {}, dataset: {}.'.format(
        err_type, method, dataset))

    # Load dataset parameters.
    dp_split = dataset_params.get_split_params(p['datasets_path'], dataset,
                                               split, split_type)

    model_type = 'eval'
    dp_model = dataset_params.get_model_params(p['datasets_path'], dataset,
                                               model_type)

    # Load info about the object models.
    models_info = inout.load_json(dp_model['models_info_path'],
                                  keys_to_int=True)

    # Load the estimation targets to consider.
    targets = inout.load_json(
        os.path.join(dp_split['base_path'], p['targets_filename']))
    scene_im_ids = {}

    # Organize the targets by scene, image and object.
    misc.log('Organizing estimation targets...')
    targets_org = {}
    for target in targets:
        targets_org.setdefault(target['scene_id'],
                               {}).setdefault(target['im_id'],
                                              {})[target['obj_id']] = target

    # Go through the test scenes and match estimated poses to GT poses.
                                           model_type)
dp_camera = dataset_params.get_camera_params(p['datasets_path'], p['dataset'])

K = dp_camera['K']
fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2]

# Create a renderer.
width, height = dp_camera['im_size']
ren = renderer.create_renderer(width,
                               height,
                               p['renderer_type'],
                               mode='rgb',
                               shading='flat')

# Load meta info about the models (including symmetries).
models_info = inout.load_json(dp_model['models_info_path'], keys_to_int=True)

for obj_id in dp_model['obj_ids']:

    # Load object model.
    misc.log('Loading 3D model of object {}...'.format(obj_id))
    model_path = dp_model['model_tpath'].format(obj_id=obj_id)
    ren.add_object(obj_id, model_path)

    poses = misc.get_symmetry_transformations(models_info[obj_id],
                                              p['max_sym_disc_step'])

    for pose_id, pose in enumerate(poses):

        for view_id, view in enumerate(p['views']):