scene_gt[im_id_out] = [] for pose in poses: R_m2c = pose['R'] t_m2c = pose['t'] t_m2c *= 1000.0 # Convert to [mm] # Transfom the GT pose (to compensate transformation of the models) t_m2c = t_m2c + R_m2c.dot(t_model) # Get 2D bounding box of the object model at the ground truth pose obj_bb = misc.calc_pose_2d_bbox(model, par.cam['im_size'], par.cam['K'], R_m2c, t_m2c) # Visualisation if False: ren_rgb = renderer.render(model, par.cam['im_size'], par.cam['K'], R_m2c, t_m2c, mode='rgb') vis_rgb = 0.4 * rgb.astype(np.float32) + 0.6 * ren_rgb.astype(np.float32) vis_rgb = vis_rgb.astype(np.uint8) vis_rgb = misc.draw_rect(vis_rgb, obj_bb) plt.imshow(vis_rgb) plt.show() scene_gt[im_id_out].append( { 'obj_id': obj_id, 'cam_R_m2c': R_m2c.flatten().tolist(), 'cam_t_m2c': t_m2c.flatten().tolist(), 'obj_bb': [int(x) for x in obj_bb] } )
elev_range) print('Sampled views: ' + str(len(views))) view_sampler.save_vis(out_views_vis_mpath.format(str(radius)), views, views_level) # Render the object model from all the views for view_id, view in enumerate(views): if view_id % 10 == 0: print('obj,radius,view: ' + str(obj_id) + ',' + str(radius) + ',' + str(view_id)) # Render depth image depth = renderer.render(model, par.cam['im_size'], par.cam['K'], view['R'], view['t'], clip_near, clip_far, mode='depth') # Convert depth is in the same units as for test images depth /= par.cam['depth_scale'] # Render RGB image rgb = renderer.render(model, im_size_rgb, K_rgb, view['R'], view['t'], clip_near, clip_far,
if pose['R'].size != 0 and pose['t'].size != 0: # Transfom the GT pose R_m2c = pose['R'].dot(R_conv) t_m2c = pose['t'] * 1000 # from [m] to [mm] # Get 2D bounding box of the object model at the ground truth pose obj_bb = misc.calc_pose_2d_bbox(model, par.cam.im_size, par.cam.K, R_m2c, t_m2c) # Visualisation if False: rgb = inout.read_im(rgb_mpath.format(im_id, im_id)) ren_rgb = renderer.render(model, par.cam.im_size, par.cam.K, R_m2c, t_m2c, mode='rgb') vis_rgb = 0.4 * rgb.astype(np.float32) + 0.6 * ren_rgb.astype( np.float32) vis_rgb = vis_rgb.astype(np.uint8) vis_rgb = misc.draw_rect(vis_rgb, obj_bb) plt.imshow(vis_rgb) plt.show() scene_gt.setdefault(im_id, []).append({ 'obj_id': obj_id, 'cam_R_m2c': R_m2c.flatten().tolist(), 'cam_t_m2c':
ren_depth = np.zeros(depth.shape, np.float) gt_ids_curr = range(len(scene_gt[im_id])) if gt_ids: gt_ids_curr = set(gt_ids_curr).intersection(gt_ids) for gt_id in gt_ids_curr: gt = scene_gt[im_id][gt_id] model = models[gt['obj_id']] K = scene_info[im_id]['cam_K'] R = gt['cam_R_m2c'] t = gt['cam_t_m2c'] # Rendering if vis_rgb: m_rgb = renderer.render(model, im_size, K, R, t, mode='rgb') if vis_depth or (vis_rgb and vis_rgb_resolve_visib): m_depth = renderer.render(model, im_size, K, R, t, mode='depth') # Get mask of the surface parts that are closer than the # surfaces rendered before visible_mask = np.logical_or(ren_depth == 0, m_depth < ren_depth) mask = np.logical_and(m_depth != 0, visible_mask) ren_depth[mask] = m_depth[mask].astype(ren_depth.dtype)