Exemplo n.º 1
0
    scene_camera = inout.load_scene_camera(scene_camera_path)

    # Create folders for the output masks (if they do not exist yet).
    mask_dir_path = os.path.dirname(dp_split['mask_tpath'].format(
        scene_id=scene_id, im_id=0, gt_id=0))
    misc.ensure_dir(mask_dir_path)

    mask_visib_dir_path = os.path.dirname(dp_split['mask_visib_tpath'].format(
        scene_id=scene_id, im_id=0, gt_id=0))
    misc.ensure_dir(mask_visib_dir_path)

    # Initialize a renderer.
    misc.log('Initializing renderer...')
    width, height = dp_split['im_size']
    ren = renderer.create_renderer(width,
                                   height,
                                   renderer_type=p['renderer_type'],
                                   mode='depth')

    # Add object models.
    for obj_id in dp_model['obj_ids']:
        ren.add_object(obj_id, dp_model['model_tpath'].format(obj_id=obj_id))

    im_ids = sorted(scene_gt.keys())
    for im_id in im_ids:

        if im_id % 100 == 0:
            misc.log(
                'Calculating masks - dataset: {} ({}, {}), scene: {}, im: {}'.
                format(p['dataset'], p['dataset_split'],
                       p['dataset_split_type'], scene_id, im_id))
Exemplo n.º 2
0
if p['scene_ids']:
    scene_ids_curr = set(scene_ids_curr).intersection(p['scene_ids'])

# Rendering mode.
renderer_modalities = []
if p['vis_rgb']:
    renderer_modalities.append('rgb')
if p['vis_depth_diff'] or (p['vis_rgb'] and p['vis_rgb_resolve_visib']):
    renderer_modalities.append('depth')
renderer_mode = '+'.join(renderer_modalities)

# Create a renderer.
width, height = dp_split['im_size']
ren = renderer.create_renderer(width,
                               height,
                               p['renderer_type'],
                               mode=renderer_mode,
                               shading='flat')

# Load object models.
models = {}
for obj_id in dp_model['obj_ids']:
    misc.log('Loading 3D model of object {}...'.format(obj_id))
    model_path = dp_model['model_tpath'].format(obj_id=obj_id)
    model_color = None
    if not p['vis_orig_color']:
        model_color = tuple(colors[(obj_id - 1) % len(colors)])
    ren.add_object(obj_id, model_path, surf_color=model_color)

for scene_id in scene_ids_curr:
Exemplo n.º 3
0
im_size_rgb = [int(round(x * float(ssaa_fact))) for x in dp_camera['im_size']]
K_rgb = dp_camera['K'] * ssaa_fact

# Intrinsic parameters for RGB rendering.
fx_rgb, fy_rgb, cx_rgb, cy_rgb =\
  K_rgb[0, 0], K_rgb[1, 1], K_rgb[0, 2], K_rgb[1, 2]

# Intrinsic parameters for depth rendering.
K = dp_camera['K']
fx_d, fy_d, cx_d, cy_d = K[0, 0], K[1, 1], K[0, 2], K[1, 2]

# Create the RGB renderer.
width_rgb, height_rgb = im_size_rgb[0], im_size_rgb[1]
ren_rgb = renderer.create_renderer(width_rgb,
                                   height_rgb,
                                   renderer_type,
                                   mode='rgb',
                                   shading=shading)
ren_rgb.set_light_ambient_weight(ambient_weight)

# Add object models to the RGB renderer.
for obj_id in obj_ids:
    ren_rgb.add_object(obj_id, dp_model['model_tpath'].format(obj_id=obj_id))

# Create the depth renderer.
width_depth, height_depth, = dp_camera['im_size'][0], dp_camera['im_size'][1]
ren_depth = renderer.create_renderer(width_depth,
                                     height_depth,
                                     renderer_type,
                                     mode='depth')
Exemplo n.º 4
0
  model_type = 'eval'
  dp_model = dataset_params.get_model_params(
    p['datasets_path'], dataset, model_type)

  # Rendering mode.
  renderer_modalities = []
  if p['vis_rgb']:
    renderer_modalities.append('rgb')
  if p['vis_depth_diff'] or (p['vis_rgb'] and p['vis_rgb_resolve_visib']):
    renderer_modalities.append('depth')
  renderer_mode = '+'.join(renderer_modalities)

  # Create a renderer.
  width, height = dp_split['im_size']
  ren = renderer.create_renderer(
    width, height, p['renderer_type'], mode=renderer_mode)

  # Load object models.
  models = {}
  for obj_id in dp_model['obj_ids']:
    misc.log('Loading 3D model of object {}...'.format(obj_id))
    model_path = dp_model['model_tpath'].format(obj_id=obj_id)
    model_color = None
    if not p['vis_orig_color']:
      model_color = tuple(colors[(obj_id - 1) % len(colors)])
    ren.add_object(obj_id, model_path, surf_color=model_color)

  # Load pose estimates.
  misc.log('Loading pose estimates...')
  ests = inout.load_bop_results(
    os.path.join(config.results_path, result_fname))
model_type = None
if p['dataset'] == 'tless':
    model_type = 'cad'
dp_model = dataset_params.get_model_params(p['datasets_path'], p['dataset'],
                                           model_type)

# Initialize a renderer.
misc.log('Initializing renderer...')

# The renderer has a larger canvas for generation of masks of truncated objects.
im_width, im_height = dp_split['im_size']
ren_width, ren_height = 3 * im_width, 3 * im_height
ren_cx_offset, ren_cy_offset = im_width, im_height
ren = renderer.create_renderer(ren_width,
                               ren_height,
                               p['renderer_type'],
                               mode='depth')

for obj_id in dp_model['obj_ids']:
    model_fpath = dp_model['model_tpath'].format(obj_id=obj_id)
    ren.add_object(obj_id, model_fpath)

scene_ids = dataset_params.get_present_scene_ids(dp_split)
for scene_id in scene_ids:

    # Load scene info and ground-truth poses.
    scene_camera = inout.load_scene_camera(
        dp_split['scene_camera_tpath'].format(scene_id=scene_id))
    scene_gt = inout.load_scene_gt(
        dp_split['scene_gt_tpath'].format(scene_id=scene_id))