Ejemplo n.º 1
0
    # parser.add_argument("depth_image_filename", type=str, help="Sequence of the dataset")
    args = parser.parse_args()
    args.ho3d_path = '/home/tpatten/v4rtemp/datasets/HandTracking/HO3D_v2'
    args.mask_dir = '/home/tpatten/Data/Hands/HO3D_V2/HO3D_v2_segmentations_rendered/'
    args.scene = 'ABF10'
    args.model_render = False

    ren = None
    if args.model_render:
        width = 640
        height = 480
        renderer_modalities = []
        renderer_modalities.append('rgb')
        renderer_modalities.append('depth')
        renderer_mode = '+'.join(renderer_modalities)
        ren = renderer.create_renderer(width, height, 'python', mode=renderer_mode, shading='flat')
        add_objects_to_renderer(ren)

    frame_ids = sorted(os.listdir(os.path.join(args.ho3d_path, data_split, args.scene, 'rgb')))
    for i in range(500, len(frame_ids)):
        # Get the id
        frame_id = frame_ids[i].split('.')[0]

        # Load the image
        depth, mask_obj, mask_hand, mask_scene = load_data(args.ho3d_path, args.mask_dir, args.scene, frame_id)

        # Load the object pose
        obj_id, object_mesh, object_pose, camK = load_object_and_pose(args.ho3d_path, args.scene, frame_id)

        if args.model_render:
            rendered_depth = render_depth(ren, obj_id, object_pose, camK)
Ejemplo n.º 2
0
      dp_model['models_info_path'], keys_to_int=True)

  # Get sets of symmetry transformations for the object models.
  models_sym = None
  if p['error_type'] in ['mssd', 'mspd']:
    models_sym = {}
    for obj_id in dp_model['obj_ids']:
      models_sym[obj_id] = misc.get_symmetry_transformations(
        models_info[obj_id], p['max_sym_disc_step'])

  # Initialize a renderer.
  ren = None
  if p['error_type'] in ['vsd', 'cus']:
    misc.log('Initializing renderer...')
    width, height = dp_split['im_size']
    ren = renderer.create_renderer(
      width, height, p['renderer_type'], mode='depth')
    for obj_id in dp_model['obj_ids']:
      ren.add_object(obj_id, dp_model['model_tpath'].format(obj_id=obj_id))

  # Load the estimation targets.
  targets = inout.load_json(
    os.path.join(dp_split['base_path'], p['targets_filename']))

  # Organize the targets by scene, image and object.
  misc.log('Organizing estimation targets...')
  targets_org = {}
  for target in targets:
    targets_org.setdefault(target['scene_id'], {}).setdefault(
      target['im_id'], {})[target['obj_id']] = target

  # Load pose estimates.
Ejemplo n.º 3
0
# Image size and K for the RGB image (potentially with SSAA).
im_size_rgb = [int(round(x * float(ssaa_fact))) for x in dp_camera['im_size']]
K_rgb = dp_camera['K'] * ssaa_fact

# Intrinsic parameters for RGB rendering.
fx_rgb, fy_rgb, cx_rgb, cy_rgb =\
  K_rgb[0, 0], K_rgb[1, 1], K_rgb[0, 2], K_rgb[1, 2]

# Intrinsic parameters for depth rendering.
K = dp_camera['K']
fx_d, fy_d, cx_d, cy_d = K[0, 0], K[1, 1], K[0, 2], K[1, 2]

# Create RGB and depth renderers (two are created because the RGB has a higher
# resolution if SSAA is used).
width_rgb, height_rgb = im_size_rgb[0], im_size_rgb[1]
ren_rgb = renderer.create_renderer(
  width_rgb, height_rgb, renderer_type, mode='rgb', shading=shading)
ren_rgb.set_light_ambient_weight(ambient_weight)

width_depth, height_depth,  = dp_camera['im_size'][0], dp_camera['im_size'][1]
ren_depth = renderer.create_renderer(
  width_depth, height_depth, renderer_type, mode='depth')

# Render training images for all object models.
for obj_id in obj_ids:

  # Add the current object model to the renderer.
  ren_rgb.add_object(obj_id, dp_model['model_tpath'].format(obj_id=obj_id))
  ren_depth.add_object(obj_id, dp_model['model_tpath'].format(obj_id=obj_id))

  # Prepare output folders.
  misc.ensure_dir(os.path.dirname(out_rgb_tpath.format(
Ejemplo n.º 4
0
if p['scene_ids']:
    scene_ids_curr = set(scene_ids_curr).intersection(p['scene_ids'])

# Rendering mode.
renderer_modalities = []
if p['vis_rgb']:
    renderer_modalities.append('rgb')
if p['vis_depth_diff'] or (p['vis_rgb'] and p['vis_rgb_resolve_visib']):
    renderer_modalities.append('depth')
renderer_mode = '+'.join(renderer_modalities)

# Create a renderer.
width, height = dp_split['im_size']
ren = renderer.create_renderer(width,
                               height,
                               p['renderer_type'],
                               mode=renderer_mode,
                               shading='flat')

# Load object models.
models = {}
for obj_id in dp_model['obj_ids']:
    misc.log('Loading 3D model of object {}...'.format(obj_id))
    model_path = dp_model['model_tpath'].format(obj_id=obj_id)
    model_color = None
    if not p['vis_orig_color']:
        model_color = tuple(colors[(obj_id - 1) % len(colors)])
    ren.add_object(obj_id, model_path, surf_color=model_color)

scene_ids = dataset_params.get_present_scene_ids(dp_split)
for scene_id in scene_ids: