'vis_rgb_tpath':
    os.path.join('{vis_path}', '{result_name}', '{scene_id:06d}',
                 '{vis_name}.jpg'),
    'vis_depth_diff_tpath':
    os.path.join('{vis_path}', '{result_name}', '{scene_id:06d}',
                 '{vis_name}_depth_diff.jpg'),
}
################################################################################

# Load colors.
colors_path = os.path.join(os.path.dirname(visualization.__file__),
                           'colors.json')
colors = inout.load_json(colors_path)

for result_fname in p['result_filenames']:
    misc.log('Processing: ' + result_fname)

    # Parse info about the method and the dataset from the filename.
    result_name = os.path.splitext(os.path.basename(result_fname))[0]
    result_info = result_name.split('_')
    method = result_info[0]
    dataset_info = result_info[1].split('-')
    dataset = dataset_info[0]
    split = dataset_info[1]
    split_type = dataset_info[2] if len(dataset_info) > 2 else None

    # Load dataset parameters.
    dp_split = dataset_params.get_split_params(p['datasets_path'], dataset,
                                               split, split_type)

    model_type = 'eval'
for err_type in p['correct_th']:
  p['correct_th'][err_type] =\
    list(map(float, args.__dict__['correct_th_' + err_type].split(',')))

p['normalized_by_diameter'] = args.normalized_by_diameter.split(',')
p['normalized_by_im_width'] = args.normalized_by_im_width.split(',')
p['visib_gt_min'] = float(args.visib_gt_min)
p['error_dir_paths'] = args.error_dir_paths.split(',')
p['eval_path'] = str(args.eval_path)
p['datasets_path'] = str(args.datasets_path)
p['targets_filename'] = str(args.targets_filename)
p['error_tpath'] = str(args.error_tpath)
p['out_matches_tpath'] = str(args.out_matches_tpath)
p['out_scores_tpath'] = str(args.out_scores_tpath)

misc.log('-----------')
misc.log('Parameters:')
for k, v in p.items():
  misc.log('- {}: {}'.format(k, v))
misc.log('-----------')

# Calculation of the performance scores.
# ------------------------------------------------------------------------------
for error_dir_path in p['error_dir_paths']:
  misc.log('Processing: {}'.format(error_dir_path))

  time_start = time.time()

  # Parse info about the errors from the folder name.
  error_sign = os.path.basename(error_dir_path)
  err_type = str(error_sign.split('_')[0].split('=')[1])
Example #3
0
# ------------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument('--visib_gt_min', default=p['visib_gt_min'])
parser.add_argument('--result_filenames',
                    default=','.join(p['result_filenames']),
                    help='Comma-separated names of files with results.')
args = parser.parse_args()

p['visib_gt_min'] = float(args.visib_gt_min)
p['result_filenames'] = args.result_filenames.split(',')

# Evaluation.
# ------------------------------------------------------------------------------
for result_filename in p['result_filenames']:

    misc.log('===========')
    misc.log('SHOWING: {}'.format(result_filename))
    misc.log('===========')

    time_start = time.time()
    aur = {}

    recall_dict = {e['type']: {} for e in p['errors']}

    for error in p['errors']:

        # Name of the result and the dataset.
        result_name = os.path.splitext(os.path.basename(result_filename))[0]
        dataset = str(result_name.split('_')[1].split('-')[0])

        # Paths (rel. to config.eval_path) to folders with calculated pose errors.
p['error_type'] = str(args.error_type)
p['vsd_deltas'] = {
    str(e.split(':')[0]): float(e.split(':')[1])
    for e in args.vsd_deltas.split(',')
}
p['vsd_taus'] = map(float, args.vsd_taus.split(','))
p['vsd_normalized_by_diameter'] = bool(args.vsd_normalized_by_diameter)
p['max_sym_disc_step'] = bool(args.max_sym_disc_step)
p['skip_missing'] = bool(args.skip_missing)
p['renderer_type'] = str(args.renderer_type)
p['result_filenames'] = args.result_filenames.split(',')
p['datasets_path'] = str(args.datasets_path)
p['targets_filename'] = str(args.targets_filename)
p['out_errors_tpath'] = str(args.out_errors_tpath)

misc.log('-----------')
misc.log('Parameters:')
for k, v in p.items():
    misc.log('- {}: {}'.format(k, v))
misc.log('-----------')

# Error calculation.
# ------------------------------------------------------------------------------
for result_filename in p['result_filenames']:
    misc.log('Processing: {}'.format(result_filename))

    ests_counter = 0
    time_start = time.time()

    # Parse info about the method and the dataset from the filename.
    result_name = os.path.splitext(os.path.basename(result_filename))[0]
Example #5
0
################################################################################
p = {
    # Names of files with results for which to calculate the errors (assumed to be
    # stored in folder config.eval_path). See docs/bop_challenge_2019.md for a
    # description of the format. Example results can be found at:
    # http://ptak.felk.cvut.cz/6DB/public/bop_sample_results/bop_challenge_2019/
    'result_filenames': [
        '/path/to/csv/with/results',
    ],
}
################################################################################

# Command line arguments.
# ------------------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument('--result_filenames',
                    default=','.join(p['result_filenames']),
                    help='Comma-separated names of files with results.')
args = parser.parse_args()

p['result_filenames'] = args.result_filenames.split(',')

if __name__ == '__main__':

    for result_filename in p['result_filenames']:
        result_path = os.path.join(config.results_path, result_filename)
        check_passed, check_msg = inout.check_bop_results(result_path,
                                                          version='bop19')

        misc.log('Check msg: {}'.format(check_msg))
    segmentation_id = 1

    coco_scene_output = {
        "info": INFO,
        "licenses": [],
        "categories": CATEGORIES,
        "images": [],
        "annotations": []
    }

    scene_gt = inout.load_scene_gt(
        dp_split['scene_gt_tpath'].format(scene_id=scene_id))
    coco_gt_path = dp_split['scene_gt_coco_tpath'].format(scene_id=scene_id)

    misc.log('Calculating Coco Annotations - dataset: {} ({}, {}), scene: {}'.
             format(p['dataset'], p['dataset_split'], p['dataset_split_type'],
                    scene_id))

    for scene_view, inst_list in scene_gt.items():
        im_id = int(scene_view)
        mask_paths = os.path.join(dp_split['base_path'], complete_split,
                                  '{:06d}/mask_visib'.format(scene_id))
        img_path = dp_split['rgb_tpath'].format(scene_id=scene_id, im_id=im_id)
        relative_img_path = os.path.relpath(img_path,
                                            os.path.dirname(coco_gt_path))
        image_info = pycoco_utils.create_image_info(image_id,
                                                    relative_img_path,
                                                    dp_split['im_size'])
        coco_scene_output["images"].append(image_info)

        for idx, inst in enumerate(inst_list):
    model_texture = inout.load_im(model_texture_path)
  else:
    model_texture = None
  model_uv_texture = None

  scene_camera = {}
  scene_gt = {}
  im_id = 0
  for radius in radii:
    # Sample viewpoints.
    view_sampler_mode = 'hinterstoisser'  # 'hinterstoisser' or 'fibonacci'.
    views, views_level = view_sampler.sample_views(
      min_n_views, radius, dp_split_test['azimuth_range'],
      dp_split_test['elev_range'], view_sampler_mode)

    misc.log('Sampled views: ' + str(len(views)))
    # out_views_vis_path = out_views_vis_tpath.format(
    #   out_path=out_path, obj_id=obj_id, radius=radius)
    # view_sampler.save_vis(out_views_vis_path, views, views_level)

    # Render the object model from all views.
    for view_id, view in enumerate(views): # for debugging [:30]
      if view_id % 10 == 0:
        misc.log('Rendering - obj: {}, radius: {}, view: {}/{}'.format(
          obj_id, radius, view_id, len(views)))

      # Rendering.
      rgb = ren_rgb.render_object(
        obj_id, view['R'], view['t'], fx_rgb, fy_rgb, cx_rgb, cy_rgb)['rgb']
      # depth = ren_depth.render_object(
      #   obj_id, view['R'], view['t'], fx_d, fy_d, cx_d, cy_d)['depth']
Example #8
0
def calc_localization_scores(scene_ids,
                             obj_ids,
                             matches,
                             n_top,
                             do_print=True):
    """Calculates performance scores for the 6D object localization task.

  References:
  Hodan et al., BOP: Benchmark for 6D Object Pose Estimation, ECCV'18.
  Hodan et al., On Evaluation of 6D Object Pose Estimation, ECCVW'16.

  :param scene_ids: ID's of considered scenes.
  :param obj_ids: ID's of considered objects.
  :param matches: Info about matching pose estimates to ground-truth poses
    (see pose_matching.py for details).
  :param n_top: Number of top pose estimates to consider per test target.
  :param do_print: Whether to print the scores to the standard output.
  :return: Dictionary with the evaluation scores.
  """
    # Count the number of visible object instances in each image.
    insts = {
        i: {j: defaultdict(lambda: 0)
            for j in scene_ids}
        for i in obj_ids
    }
    for m in matches:
        if m['valid']:
            insts[m['obj_id']][m['scene_id']][m['im_id']] += 1

    # Count the number of targets = object instances to be found.
    # For SiSo, there is either zero or one target in each image - there is just
    # one even if there are more instances of the object of interest.
    tars = 0  # Total number of targets.
    obj_tars = {i: 0 for i in obj_ids}  # Targets per object.
    scene_tars = {i: 0 for i in scene_ids}  # Targets per scene.
    for obj_id, obj_insts in insts.items():
        for scene_id, scene_insts in obj_insts.items():

            # Count the number of targets for the current object in the current scene.
            if n_top > 0:
                count = sum(np.minimum(n_top, list(scene_insts.values())))
            else:
                count = sum(list(scene_insts.values()))

            tars += count
            obj_tars[obj_id] += count
            scene_tars[scene_id] += count

    # Count the number of true positives.
    tps = 0  # Total number of true positives.
    obj_tps = {i: 0 for i in obj_ids}  # True positives per object.
    scene_tps = {i: 0 for i in scene_ids}  # True positives per scene.
    for m in matches:
        if m['valid'] and m['est_id'] != -1:
            tps += 1
            obj_tps[m['obj_id']] += 1
            scene_tps[m['scene_id']] += 1

    # Total recall.
    recall = calc_recall(tps, tars)

    # Recall per object.
    obj_recalls = {}
    for i in obj_ids:
        obj_recalls[i] = calc_recall(obj_tps[i], obj_tars[i])
    mean_obj_recall = float(np.mean(list(obj_recalls.values())).squeeze())

    # Recall per scene.
    scene_recalls = {}
    for i in scene_ids:
        scene_recalls[i] = float(calc_recall(scene_tps[i], scene_tars[i]))
    mean_scene_recall = float(np.mean(list(scene_recalls.values())).squeeze())

    # Final scores.
    scores = {
        'recall': float(recall),
        'obj_recalls': obj_recalls,
        'mean_obj_recall': float(mean_obj_recall),
        'scene_recalls': scene_recalls,
        'mean_scene_recall': float(mean_scene_recall),
        'gt_count': len(matches),
        'targets_count': int(tars),
        'tp_count': int(tps),
    }

    if do_print:
        obj_recalls_str = ', '.join([
            '{}: {:.3f}'.format(i, s)
            for i, s in scores['obj_recalls'].items()
        ])

        scene_recalls_str = ', '.join([
            '{}: {:.3f}'.format(i, s)
            for i, s in scores['scene_recalls'].items()
        ])

        misc.log('')
        misc.log('GT count:           {:d}'.format(scores['gt_count']))
        misc.log('Target count:       {:d}'.format(scores['targets_count']))
        misc.log('TP count:           {:d}'.format(scores['tp_count']))
        misc.log('Recall:             {:.4f}'.format(scores['recall']))
        misc.log('Mean object recall: {:.4f}'.format(
            scores['mean_obj_recall']))
        misc.log('Mean scene recall:  {:.4f}'.format(
            scores['mean_scene_recall']))
        misc.log('Object recalls:\n{}'.format(obj_recalls_str))
        misc.log('Scene recalls:\n{}'.format(scene_recalls_str))
        misc.log('')

    return scores
Example #9
0
def load_ply(path):
    """Loads a 3D mesh model from a PLY file.

  :param path: Path to a PLY file.
  :return: The loaded model given by a dictionary with items:
   - 'pts' (nx3 ndarray)
   - 'normals' (nx3 ndarray), optional
   - 'colors' (nx3 ndarray), optional
   - 'faces' (mx3 ndarray), optional
   - 'texture_uv' (nx2 ndarray), optional
   - 'texture_uv_face' (mx6 ndarray), optional
   - 'texture_file' (string), optional
  """
    f = open(path, 'rb')

    # Only triangular faces are supported.
    face_n_corners = 3

    n_pts = 0
    n_faces = 0
    pt_props = []
    face_props = []
    is_binary = False
    header_vertex_section = False
    header_face_section = False
    texture_file = None

    # Read the header.
    while True:

        # Strip the newline character(s).
        line = f.readline().decode('utf8').rstrip('\n').rstrip('\r')

        if line.startswith('comment TextureFile'):
            texture_file = line.split()[-1]
        elif line.startswith('element vertex'):
            n_pts = int(line.split()[-1])
            header_vertex_section = True
            header_face_section = False
        elif line.startswith('element face'):
            n_faces = int(line.split()[-1])
            header_vertex_section = False
            header_face_section = True
        elif line.startswith('element'):  # Some other element.
            header_vertex_section = False
            header_face_section = False
        elif line.startswith('property') and header_vertex_section:
            # (name of the property, data type)
            pt_props.append((line.split()[-1], line.split()[-2]))
        elif line.startswith('property list') and header_face_section:
            elems = line.split()
            if elems[-1] == 'vertex_indices' or elems[-1] == 'vertex_index':
                # (name of the property, data type)
                face_props.append(('n_corners', elems[2]))
                for i in range(face_n_corners):
                    face_props.append(('ind_' + str(i), elems[3]))
            elif elems[-1] == 'texcoord':
                # (name of the property, data type)
                face_props.append(('texcoord', elems[2]))
                for i in range(face_n_corners * 2):
                    face_props.append(('texcoord_ind_' + str(i), elems[3]))
            else:
                misc.log('Warning: Not supported face property: ' + elems[-1])
        elif line.startswith('format'):
            if 'binary' in line:
                is_binary = True
        elif line.startswith('end_header'):
            break

    # Prepare data structures.
    model = {}
    if texture_file is not None:
        model['texture_file'] = texture_file
    model['pts'] = np.zeros((n_pts, 3), np.float)
    if n_faces > 0:
        model['faces'] = np.zeros((n_faces, face_n_corners), np.float)

    pt_props_names = [p[0] for p in pt_props]
    face_props_names = [p[0] for p in face_props]

    is_normal = False
    if {'nx', 'ny', 'nz'}.issubset(set(pt_props_names)):
        is_normal = True
        model['normals'] = np.zeros((n_pts, 3), np.float)

    is_color = False
    if {'red', 'green', 'blue'}.issubset(set(pt_props_names)):
        is_color = True
        model['colors'] = np.zeros((n_pts, 3), np.float)

    is_texture_pt = False
    if {'texture_u', 'texture_v'}.issubset(set(pt_props_names)):
        is_texture_pt = True
        model['texture_uv'] = np.zeros((n_pts, 2), np.float)

    is_texture_face = False
    if {'texcoord'}.issubset(set(face_props_names)):
        is_texture_face = True
        model['texture_uv_face'] = np.zeros((n_faces, 6), np.float)

    # Formats for the binary case.
    formats = {
        'float': ('f', 4),
        'double': ('d', 8),
        'int': ('i', 4),
        'uchar': ('B', 1)
    }

    # Load vertices.
    for pt_id in range(n_pts):
        prop_vals = {}
        load_props = [
            'x', 'y', 'z', 'nx', 'ny', 'nz', 'red', 'green', 'blue',
            'texture_u', 'texture_v'
        ]
        if is_binary:
            for prop in pt_props:
                format = formats[prop[1]]
                read_data = f.read(format[1])
                val = struct.unpack(format[0], read_data)[0]
                if prop[0] in load_props:
                    prop_vals[prop[0]] = val
        else:
            elems = f.readline().decode('utf8').rstrip('\n').rstrip(
                '\r').split()
            for prop_id, prop in enumerate(pt_props):
                if prop[0] in load_props:
                    prop_vals[prop[0]] = elems[prop_id]

        model['pts'][pt_id, 0] = float(prop_vals['x'])
        model['pts'][pt_id, 1] = float(prop_vals['y'])
        model['pts'][pt_id, 2] = float(prop_vals['z'])

        if is_normal:
            model['normals'][pt_id, 0] = float(prop_vals['nx'])
            model['normals'][pt_id, 1] = float(prop_vals['ny'])
            model['normals'][pt_id, 2] = float(prop_vals['nz'])

        if is_color:
            model['colors'][pt_id, 0] = float(prop_vals['red'])
            model['colors'][pt_id, 1] = float(prop_vals['green'])
            model['colors'][pt_id, 2] = float(prop_vals['blue'])

        if is_texture_pt:
            model['texture_uv'][pt_id, 0] = float(prop_vals['texture_u'])
            model['texture_uv'][pt_id, 1] = float(prop_vals['texture_v'])

    # Load faces.
    for face_id in range(n_faces):
        prop_vals = {}
        if is_binary:
            for prop in face_props:
                format = formats[prop[1]]
                val = struct.unpack(format[0], f.read(format[1]))[0]
                if prop[0] == 'n_corners':
                    if val != face_n_corners:
                        raise ValueError(
                            'Only triangular faces are supported.')
                elif prop[0] == 'texcoord':
                    if val != face_n_corners * 2:
                        raise ValueError(
                            'Wrong number of UV face coordinates.')
                else:
                    prop_vals[prop[0]] = val
        else:
            elems = f.readline().decode('utf8').rstrip('\n').rstrip(
                '\r').split()
            for prop_id, prop in enumerate(face_props):
                if prop[0] == 'n_corners':
                    if int(elems[prop_id]) != face_n_corners:
                        raise ValueError(
                            'Only triangular faces are supported.')
                elif prop[0] == 'texcoord':
                    if int(elems[prop_id]) != face_n_corners * 2:
                        raise ValueError(
                            'Wrong number of UV face coordinates.')
                else:
                    prop_vals[prop[0]] = elems[prop_id]

        model['faces'][face_id, 0] = int(prop_vals['ind_0'])
        model['faces'][face_id, 1] = int(prop_vals['ind_1'])
        model['faces'][face_id, 2] = int(prop_vals['ind_2'])

        if is_texture_face:
            for i in range(6):
                model['texture_uv_face'][face_id, i] = float(
                    prop_vals['texcoord_ind_{}'.format(i)])

    f.close()

    return model
Example #10
0
    # Type of input object models.
    'model_type': None,

    # Folder containing the BOP datasets.
    'datasets_path': config.datasets_path,
}
################################################################################

# Load dataset parameters.
dp_model = dataset_params.get_model_params(p['datasets_path'], p['dataset'],
                                           p['model_type'])

models_info = {}
for obj_id in dp_model['obj_ids']:
    misc.log('Processing model of object {}...'.format(obj_id))

    model = inout.load_ply(dp_model['model_tpath'].format(obj_id=obj_id))

    # Calculate 3D bounding box.
    ref_pt = map(float, model['pts'].min(axis=0).flatten())
    size = map(float, (model['pts'].max(axis=0) - ref_pt).flatten())

    # Calculated diameter.
    diameter = misc.calc_pts_diameter(model['pts'])

    models_info[obj_id] = {
        'min_x': ref_pt[0],
        'min_y': ref_pt[1],
        'min_z': ref_pt[2],
        'size_x': size[0],
Example #11
0
            np.array([0, 0, 0]),
            np.array([0, 0, 0])
        ]

        if views_level:
            max_level = max(1, max(views_level))
            intens = (255 * views_level[view_id]) / float(max_level)
        else:
            intens = 255 * view_id / float(len(views))
        colors += [[intens, intens, intens], [255, 0, 0], [0, 255, 0],
                   [0, 0, 255]]

    inout.save_ply2(path,
                    pts=np.array(pts),
                    pts_normals=np.array(normals),
                    pts_colors=np.array(colors))


if __name__ == '__main__':

    # Example of sampling views from a view sphere.
    views, views_level = sample_views(min_n_views=25,
                                      radius=1,
                                      azimuth_range=(0, 2 * math.pi),
                                      elev_range=(-0.5 * math.pi,
                                                  0.5 * math.pi),
                                      mode='fibonacci')
    misc.log('Sampled views: ' + str(len(views)))
    out_views_vis_path = 'view_sphere.ply'
    save_vis(out_views_vis_path, views)
Example #12
0
    'datasets_path': config.datasets_path,
}
################################################################################

# Load dataset parameters.
dp_split = dataset_params.get_split_params(p['datasets_path'], p['dataset'],
                                           p['dataset_split'])

scene_ids = dp_split['scene_ids']
dists = []
azimuths = []
elevs = []
visib_fracts = []
ims_count = 0
for scene_id in scene_ids:
    misc.log('Processing - dataset: {} {}, scene: {}'.format(
        p['dataset'], p['dataset_split'], scene_id))

    # Load GT poses.
    scene_gt = inout.load_scene_gt(
        dp_split['scene_gt_tpath'].format(scene_id=scene_id))

    # Load info about the GT poses.
    scene_gt_info = inout.load_json(
        dp_split['scene_gt_info_tpath'].format(scene_id=scene_id),
        keys_to_int=True)

    ims_count += len(scene_gt)

    for im_id in scene_gt.keys():
        for gt_id, im_gt in enumerate(scene_gt[im_id]):
Example #13
0
                    default=','.join(p['result_filenames']),
                    help='Comma-separated names of files with results.')
parser.add_argument('--targets_filename', default=p['targets_filename'])
args = parser.parse_args()

p['visib_gt_min'] = float(args.visib_gt_min)
p['max_sym_disc_step'] = float(args.max_sym_disc_step)
p['renderer_type'] = str(args.renderer_type)
p['result_filenames'] = args.result_filenames.split(',')
p['targets_filename'] = str(args.targets_filename)

# Evaluation.
# ------------------------------------------------------------------------------
for result_filename in p['result_filenames']:

    misc.log('===========')
    misc.log('EVALUATING: {}'.format(result_filename))
    misc.log('===========')

    time_start = time.time()
    aur = {}

    for error in p['errors']:

        # Calculate error of the pose estimates.
        calc_errors_cmd = [
            'python',
            os.path.join('scripts', 'eval_calc_errors.py'),
            '--n_top={}'.format(error['n_top']),
            '--error_type={}'.format(error['type']),
            '--result_filenames={}'.format(result_filename),
# Create a renderer.
width, height = dp_camera['im_size']
ren = renderer.create_renderer(width,
                               height,
                               p['renderer_type'],
                               mode='rgb',
                               shading='flat')

# Load meta info about the models (including symmetries).
models_info = inout.load_json(dp_model['models_info_path'], keys_to_int=True)

for obj_id in dp_model['obj_ids']:

    # Load object model.
    misc.log('Loading 3D model of object {}...'.format(obj_id))
    model_path = dp_model['model_tpath'].format(obj_id=obj_id)
    ren.add_object(obj_id, model_path)

    poses = misc.get_symmetry_transformations(models_info[obj_id],
                                              p['max_sym_disc_step'])

    for pose_id, pose in enumerate(poses):

        for view_id, view in enumerate(p['views']):

            R = view['R'].dot(pose['R'])
            t = view['R'].dot(pose['t']) + view['t']

            vis_rgb = ren.render_object(obj_id, R, t, fx, fy, cx, cy)['rgb']
Example #15
0
    scene_id=scene_id)
  scene_camera = inout.load_scene_camera(scene_camera_path)

  # Create folders for the output masks (if they do not exist yet).
  mask_dir_path = os.path.dirname(
    dp_split['mask_tpath'].format(
      scene_id=scene_id, im_id=0, gt_id=0))
  misc.ensure_dir(mask_dir_path)

  mask_visib_dir_path = os.path.dirname(
    dp_split['mask_visib_tpath'].format(
      scene_id=scene_id, im_id=0, gt_id=0))
  misc.ensure_dir(mask_visib_dir_path)

  # Initialize a renderer.
  misc.log('Initializing renderer...')
  width, height = dp_split['im_size']
  ren = renderer.create_renderer(
    width, height, renderer_type=p['renderer_type'], mode='depth')

  # Add object models.
  for obj_id in dp_model['obj_ids']:
    ren.add_object(obj_id, dp_model['model_tpath'].format(obj_id=obj_id))

  im_ids = sorted(scene_gt.keys())
  for im_id in im_ids:

    if im_id % 100 == 0:
      misc.log(
        'Calculating masks - dataset: {} ({}, {}), scene: {}, im: {}'.format(
          p['dataset'], p['dataset_split'], p['dataset_split_type'], scene_id,
Example #16
0
            '{}: {:.3f}'.format(i, s)
            for i, s in scores['scene_recalls'].items()
        ])

        misc.log('')
        misc.log('GT count:           {:d}'.format(scores['gt_count']))
        misc.log('Target count:       {:d}'.format(scores['targets_count']))
        misc.log('TP count:           {:d}'.format(scores['tp_count']))
        misc.log('Recall:             {:.4f}'.format(scores['recall']))
        misc.log('Mean object recall: {:.4f}'.format(
            scores['mean_obj_recall']))
        misc.log('Mean scene recall:  {:.4f}'.format(
            scores['mean_scene_recall']))
        misc.log('Object recalls:\n{}'.format(obj_recalls_str))
        misc.log('Scene recalls:\n{}'.format(scene_recalls_str))
        misc.log('')

    return scores


if __name__ == '__main__':

    # AP test.
    tp = np.array([False, True, True, False, True, False])
    fp = np.logical_not(tp)
    tp_c = np.cumsum(tp).astype(np.float)
    fp_c = np.cumsum(fp).astype(np.float)
    rec = tp_c / tp.size
    pre = tp_c / (fp_c + tp_c)
    misc.log('Average Precision: ' + str(calc_ap(rec, pre)))
Example #17
0
if p['vis_depth_diff'] or (p['vis_rgb'] and p['vis_rgb_resolve_visib']):
    renderer_modalities.append('depth')
renderer_mode = '+'.join(renderer_modalities)

# Create a renderer.
width, height = dp_split['im_size']
ren = renderer.create_renderer(width,
                               height,
                               p['renderer_type'],
                               mode=renderer_mode,
                               shading='flat')

# Load object models.
models = {}
for obj_id in dp_model['obj_ids']:
    misc.log('Loading 3D model of object {}...'.format(obj_id))
    model_path = dp_model['model_tpath'].format(obj_id=obj_id)
    model_color = None
    if not p['vis_orig_color']:
        model_color = tuple(colors[(obj_id - 1) % len(colors)])
    ren.add_object(obj_id, model_path, surf_color=model_color)

for scene_id in scene_ids_curr:

    # Load scene info and ground-truth poses.
    scene_camera = inout.load_scene_camera(
        dp_split['scene_camera_tpath'].format(scene_id=scene_id))
    scene_gt = inout.load_scene_gt(
        dp_split['scene_gt_tpath'].format(scene_id=scene_id))

    # List of considered images.
parser.add_argument('--targets_filename', default=p['targets_filename'])
parser.add_argument('--datasets_path', default=p['datasets_path'])
args = parser.parse_args()

p['renderer_type'] = str(args.renderer_type)
p['result_filenames'] = args.result_filenames.split(',')
p['results_path'] = str(args.results_path)
p['eval_path'] = str(args.eval_path)
p['targets_filename'] = str(args.targets_filename)
p['datasets_path'] = str(args.datasets_path)

# Evaluation.
# ------------------------------------------------------------------------------
for result_filename in p['result_filenames']:

    misc.log('===========')
    misc.log('EVALUATING: {}'.format(result_filename))
    misc.log('===========')

    time_start = time.time()

    # Volume under recall surface (VSD) / area under recall curve (MSSD, MSPD).
    average_recalls = {}

    # Name of the result and the dataset.
    result_name = os.path.splitext(os.path.basename(result_filename))[0]
    dataset = str(result_name.split('_')[1].split('-')[0])

    # Calculate the average estimation time per image.
    ests = inout.load_bop_results(os.path.join(p['results_path'],
                                               result_filename),
    'meshlab_script_path':
    os.path.join(os.path.dirname(os.path.realpath(__file__)),
                 'meshlab_scripts', r'remesh_for_eval_cell=0.25.mlx'),
}
################################################################################

# Load dataset parameters.
dp_model_in = dataset_params.get_model_params(p['datasets_path'], p['dataset'],
                                              p['model_in_type'])

dp_model_out = dataset_params.get_model_params(p['datasets_path'],
                                               p['dataset'],
                                               p['model_out_type'])

# Attributes to save for the output models.
attrs_to_save = []

# Process models of all objects in the selected dataset.
for obj_id in dp_model_in['obj_ids']:
    misc.log('\n\n\nProcessing model of object {}...\n'.format(obj_id))

    model_in_path = dp_model_in['model_tpath'].format(obj_id=obj_id)
    model_out_path = dp_model_out['model_tpath'].format(obj_id=obj_id)

    misc.ensure_dir(os.path.dirname(model_out_path))

    misc.run_meshlab_script(p['meshlab_server_path'], p['meshlab_script_path'],
                            model_in_path, model_out_path, attrs_to_save)

misc.log('Done.')