im_size_rgb, K_rgb, view['R'], view['t'], clip_near, clip_far, texture=model_texture, ambient_weight=ambient_weight, shading=shading, mode='rgb') rgb = cv2.resize(rgb, par.cam['im_size'], interpolation=cv2.INTER_AREA) # Save the rendered images inout.write_im(out_rgb_mpath.format(obj_id, im_id), rgb) inout.write_depth(out_depth_mpath.format(obj_id, im_id), depth) # Get 2D bounding box of the object model at the ground truth pose ys, xs = np.nonzero(depth > 0) obj_bb = misc.calc_2d_bbox(xs, ys, par.cam['im_size']) obj_info[im_id] = { 'cam_K': par.cam['K'].flatten().tolist(), 'view_level': int(views_level[view_id]), #'sphere_radius': float(radius) } obj_gt[im_id] = [{ 'cam_R_m2c': view['R'].flatten().tolist(), 'cam_t_m2c': view['t'].flatten().tolist(),
# 1) Translate the bounding box center to the origin t_model = bbox_cens[obj_id - 1, :].reshape((3, 1)) im_id_out = 0 for im_id in im_ids: # if im_id % 10 == 0: print('scene,view: ' + str(scene_id) + ',' + str(im_id)) # Load the RGB and depth image rgb = inout.read_im(rgb_in_mpath.format(scene_id, im_id)) depth = inout.read_depth(depth_in_mpath.format(scene_id, im_id)) depth *= 10.0 # Convert depth map to [100um] # Save the RGB and depth image inout.write_im(rgb_out_mpath.format(scene_id, im_id_out), rgb) inout.write_depth(depth_out_mpath.format(scene_id, im_id_out), depth) scene_info[im_id_out] = { 'cam_K': par.cam['K'].flatten().tolist() } # Process the GT poses poses = load_tejani_poses(pose_mpath.format(scene_id, im_id)) scene_gt[im_id_out] = [] for pose in poses: R_m2c = pose['R'] t_m2c = pose['t'] t_m2c *= 1000.0 # Convert to [mm]
mask = np.logical_and(m_depth != 0, visible_mask) ren_depth[mask] = m_depth[mask].astype(ren_depth.dtype) # Combine the RGB renderings if vis_rgb: if vis_rgb_resolve_visib: ren_rgb[mask] = m_rgb[mask].astype(ren_rgb.dtype) else: ren_rgb += m_rgb.astype(ren_rgb.dtype) # Save RGB visualization if vis_rgb: vis_im_rgb = 0.4 * rgb.astype(np.float) + 0.6 * ren_rgb vis_im_rgb[vis_im_rgb > 255] = 255 inout.write_im(vis_rgb_mpath.format(scene_id, im_id), vis_im_rgb.astype(np.uint8)) # Save image of depth differences if vis_depth: # Calculate the depth difference at pixels where both depth maps # are valid valid_mask = (depth > 0) * (ren_depth > 0) depth_diff = valid_mask * (depth - ren_depth.astype(np.float)) plt.matshow(depth_diff) plt.axis('off') plt.title('measured - GT depth [mm]') plt.colorbar() plt.savefig(vis_depth_mpath.format(scene_id, im_id), pad=0, bbox_inches='tight')