def generate_images(self, salient_edge_set, n_samples=1): """Generate depth image, normal image, and binary edge mask tuples. Parameters ---------- salient_edge_set : SalientEdgeSet A salient edge set to generate images of. n_samples : int The number of samples to generate. Returns ------- depth_ims : (n,) list of perception.DepthImage Randomly-rendered depth images of object. normal_ims : (n,) list of perception.PointCloudImage Normals for the given image edge_masks : (n,) list of perception.BinaryImage Masks for pixels on the salient edges of the object. """ # Compute stable poses of mesh mesh = salient_edge_set.mesh stp_pose_tfs, probs = mesh.compute_stable_poses() probs = probs / sum(probs) # Generate n renders depth_ims, normal_ims, edge_masks = [], [], [] scene = Scene() so = SceneObject(mesh, RigidTransform(from_frame='obj', to_frame='world')) scene.add_object('object', so) for i in range(n_samples): # Sample random stable pose. tf_id = np.random.choice(np.arange(len(probs)), p=probs) tf = stp_pose_tfs[tf_id] T_obj_world = RigidTransform(tf[:3,:3], tf[:3,3], from_frame='obj', to_frame='world') so.T_obj_world = T_obj_world # Create the random uniform workspace sampler ws_cfg = self._config['worksurface_rv_config'] uvs = UniformPlanarWorksurfaceImageRandomVariable('object', scene, [RenderMode.DEPTH], frame='camera', config=ws_cfg) # Sample and extract the depth image, camera intrinsics, and T_obj_camera sample = uvs.sample() depth_im = sample.renders[RenderMode.DEPTH] cs = sample.camera ci = CameraIntrinsics(frame='camera', fx=cs.focal, fy=cs.focal, cx=cs.cx, cy=cs.cy, skew=0.0, height=ws_cfg['im_height'], width=ws_cfg['im_width']) T_obj_camera = cs.T_camera_world.inverse().dot(T_obj_world) edge_mask = self._compute_edge_mask(salient_edge_set, depth_im, ci, T_obj_camera) point_cloud_im = ci.deproject_to_image(depth_im) normal_im = point_cloud_im.normal_cloud_im() depth_ims.append(depth_im) normal_ims.append(normal_im) edge_masks.append(edge_mask) return depth_ims, normal_ims, edge_masks
def fine_grid_search(pc, indices, model, shadow, splits): length, width, height = shadow.extents split_size = max(length, width) pc_data, ind = get_pc_data(pc, indices) maxes = np.max(pc_data, axis=0) mins = np.min(pc_data, axis=0) bin_base = mins[2] plane_normal = model[0:3] #splits = 3 step_size = split_size / splits plane_data = get_plane_data(pc, indices) plane_pc = PointCloud(plane_data.T, pc.frame) plane_pc = cp.inverse().apply(plane_pc) di = ci.project_to_image(plane_pc) bi = di.to_binary() bi = bi.inverse() scene = Scene() camera = VirtualCamera(ci, cp) scene.camera = camera shadow_obj = SceneObject(shadow) scene.add_object('shadow', shadow_obj) orig_tow = shadow_obj.T_obj_world numx = (int(np.round((maxes[0]-mins[0])/split_size)) - 1) * splits + 1 numy = (int(np.round((maxes[1]-mins[1])/split_size)) - 1) * splits + 1 scores = np.zeros((numx, numy)) for i in range(numx): x = mins[0] + i*step_size for j in range(numy): y = mins[1] + j*step_size for tow in transforms(pc, pc_data, shadow, x, y, x+split_size, y+split_size, 8, orig_tow): shadow_obj.T_obj_world = tow scores[i][j] = under_shadow(scene, bi) shadow_obj.T_obj_world = orig_tow print("\nScores: \n" + str(scores)) best = best_cell(scores) print("\nBest Cell: " + str(best) + ", with score = " + str(scores[best[0]][best[1]])) #------- # Visualize best placement vis3d.figure() x = mins[0] + best[0]*step_size y = mins[1] + best[1]*step_size cell_indices = np.where((x < pc_data[:,0]) & (pc_data[:,0] < x+split_size) & (y < pc_data[:,1]) & (pc_data[:,1] < y+split_size))[0] points = pc_data[cell_indices] rest = pc_data[np.setdiff1d(np.arange(len(pc_data)), cell_indices)] vis3d.points(points, color=(0,1,1)) vis3d.points(rest, color=(1,0,1)) vis3d.show() #-------- return best, scene
def sample_scene_objs(self): model_pose = RigidTransform( rotation=np.eye(3), translation=np.array([0.0, 0.0, 0.0]), from_frame='obj', to_frame='world') #model is already transformed for mesh, model_name in self.sample_models_set(): model_material = self.sample_model_material() model_obj = SceneObject(mesh, model_pose, model_material) grip_obj = SceneObject(get_top_surface(mesh), model_pose, model_material) yield model_obj, grip_obj, model_name
def do_stuff(pc, indices, model, rotated_shadow, img_file): scene = Scene() camera = VirtualCamera(ci, cp) scene.camera = camera # Works shadow_obj = SceneObject(rotated_shadow) scene.add_object('shadow', shadow_obj) wd = scene.wrapped_render([RenderMode.DEPTH])[0] wd_bi = wd.to_binary() vis2d.figure() vis2d.imshow(wd_bi) vis2d.show() # Doesn't work yet plane = pc.data.T[indices] plane_pc = PointCloud(plane.T, pc.frame) di = ci.project_to_image(plane_pc) bi = di.to_binary() vis2d.figure() vis2d.imshow(bi) vis2d.show() # Works both = bi.mask_binary(wd_bi) vis2d.figure() vis2d.imshow(both) vis2d.show()
def sample_ground_obj(self): ground_mesh = trimesh.creation.box(self.GROUND_BOUND) ground_pose = RigidTransform(rotation=np.eye(3), translation=np.array([0.0, 0.0, 0.0]), from_frame='obj', to_frame='world') ground_material = self.sample_ground_material() ground_obj = SceneObject(ground_mesh, ground_pose, ground_material) return ground_obj
def plot3d(points, color=(0.5, 0.5, 0.5), tube_radius=0.005, n_components=30, name=None): """Plot a 3d curve through a set of points using tubes. Parameters ---------- points : (n,3) float A series of 3D points that define a curve in space. color : (3,) float The color of the tube. tube_radius : float Radius of tube representing curve. n_components : int The number of edges in each polygon representing the tube. name : str A name for the object to be added. """ points = np.asanyarray(points) mp = MaterialProperties(color=np.array(color), k_a=0.5, k_d=0.3, k_s=0.0, alpha=10.0, smooth=True) # Generate circular polygon vec = np.array([0, 1]) * tube_radius angle = np.pi * 2.0 / n_components rotmat = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) perim = [] for i in range(n_components): perim.append(vec) vec = np.dot(rotmat, vec) poly = Polygon(perim) # Sweep it out along the path mesh = trimesh.creation.sweep_polygon(poly, points) obj = SceneObject(mesh, material=mp) if name is None: name = str(uuid.uuid4()) Visualizer3D._scene.add_object(name, obj)
def create_scene(camera, workspace_objects): # Start with an empty scene scene = Scene() # Create a VirtualCamera virt_cam = VirtualCamera(camera.intrinsics, camera.pose) # Add the camera to the scene scene.camera = virt_cam mp = MaterialProperties( color=np.array([0.3,0.3,0.3]), k_a=0.5, k_d=0.3, k_s=0.0, alpha=10.0 ) if camera.geometry is not None: so = SceneObject(camera.geometry, camera.pose.copy(), mp) scene.add_object(camera.name, so) return scene
def get_sim_point_cloud(scene, grasp_obj): # Remove old objects scene_objs = scene.objects.copy() if 'obj' in scene_objs: scene.remove_object('obj') # Get graspable object material properties and add to scene mp = hasattr(grasp_obj, 'material_properties') if not mp: mp = MaterialProperties( color=np.random.uniform(0.0, 1.0, size=3), k_a=0.5, k_d=0.3, k_s=0.0, alpha=10.0 ) so = SceneObject(grasp_obj.mesh, grasp_obj.T_obj_world.copy(), mp) scene.add_object(grasp_obj.key, so) # Create simulated pointcloud for ICP matching wrapped_depth = depth_scene.wrapped_render([RenderMode.DEPTH]) sim_point_cloud = phoxi_tf*phoxi.intrinsics.deproject(wrapped_depth[0]) sim_point_cloud_masked, _ = sim_point_cloud.box_mask(mask_box) return sim_point_cloud_masked
def mesh(mesh, T_mesh_world=RigidTransform(from_frame='obj', to_frame='world'), style='surface', smooth=False, color=(0.5, 0.5, 0.5), name=None): """Visualize a 3D triangular mesh. Parameters ---------- mesh : trimesh.Trimesh The mesh to visualize. T_mesh_world : autolab_core.RigidTransform The pose of the mesh, specified as a transformation from mesh frame to world frame. style : str Triangular mesh style, either 'surface' or 'wireframe'. smooth : bool If true, the mesh is smoothed before rendering. color : 3-tuple Color tuple. name : str A name for the object to be added. """ if not isinstance(mesh, trimesh.Trimesh): raise ValueError('Must provide a trimesh.Trimesh object') mp = MaterialProperties(color=np.array(color), k_a=0.5, k_d=0.3, k_s=0.1, alpha=10.0, smooth=smooth, wireframe=(style == 'wireframe')) obj = SceneObject(mesh, T_mesh_world, mp) if name is None: name = str(uuid.uuid4()) Visualizer3D._scene.add_object(name, obj)
def load_3d_model(model_path): # Start with an empty scene scene = Scene() # Add objects to the scene # Begin by loading meshes pawn_mesh = trimesh.load_mesh(model_path) # Set up object's pose in the world pawn_pose = RigidTransform(rotation=np.eye(3), translation=np.array([0.0, 0.0, 0.0]), from_frame='obj', to_frame='world') # Set up each object's material properties pawn_material = MaterialProperties(color=np.array([1.0, 1.0, 1.0]), k_a=1.0, k_d=1.0, k_s=0.0, alpha=1.0, smooth=False, wireframe=False) # Create SceneObjects for each object pawn_obj = SceneObject(pawn_mesh, pawn_pose, pawn_material) # Add the SceneObjects to the scene scene.add_object('pawn', pawn_obj) return scene, pawn_mesh
k_s=0.2, alpha=10.0, smooth=False, wireframe=False) #bar_material = MaterialProperties( # color = 7.0*np.array([0.1, 0.1, 0.1]), # k_a = 0.5, # k_d = 0.3, # k_s = 0.1, # alpha = 10.0, # smooth=False #) bar_material = pawn_material # Create SceneObjects for each object pawn_obj = SceneObject(pawn_mesh, pawn_pose, pawn_material) bar_obj = SceneObject(bar_mesh, bar_pose, bar_material) pawn_inst_obj = InstancedSceneObject(pawn_mesh, [pawn_pose, bar_pose], colors=np.array([[0, 0, 1], [0, 1, 0]]), material=pawn_material) # Add the SceneObjects to the scene scene.add_object('pawn', pawn_inst_obj) scene.add_object('bar', bar_obj) #==================================== # Add lighting to the scene #==================================== # Create an ambient light ambient = AmbientLight(color=np.array([1.0, 1.0, 1.0]), strength=1.0)
# read workspace bounds workspace_box = Box(np.array(workspace_config['min_pt']), np.array(workspace_config['max_pt']), frame='world') # read workspace objects workspace_objects = {} for obj_key, obj_config in workspace_config['objects'].iteritems(): mesh_filename = obj_config['mesh_filename'] pose_filename = obj_config['pose_filename'] obj_mesh = trimesh.load_mesh(mesh_filename) obj_pose = RigidTransform.load(pose_filename) obj_mat_props = MaterialProperties(smooth=True, wireframe=False) scene_obj = SceneObject(obj_mesh, obj_pose, obj_mat_props) workspace_objects[obj_key] = scene_obj # setup each sensor datasets = {} sensors = {} sensor_poses = {} camera_intrs = {} workspace_ims = {} for sensor_name, sensor_config in sensor_configs.iteritems(): # read params sensor_type = sensor_config['type'] sensor_frame = sensor_config['frame'] # read camera calib tf_filename = '%s_to_world.tf' %(sensor_frame)
def generate_examples(self, salient_edge_set_filename, n_samples=1): """Generate RegistrationExamples for evaluating the algorithm. Parameters ---------- salient_edge_set_filename : str A file containing the salient edge set to generate images of. n_samples : int The number of samples to generate. Returns ------- list of RegistrationExample A list of RegistrationExamples. """ # Compute stable poses of mesh salient_edge_set = SalientEdgeSet.load(salient_edge_set_filename) mesh = salient_edge_set.mesh stp_pose_tfs, probs = mesh.compute_stable_poses() probs = probs / sum(probs) # Generate n renders examples = [] scene = Scene() so = SceneObject(mesh, RigidTransform(from_frame='obj', to_frame='world')) scene.add_object('object', so) for i in range(n_samples): # Sample random stable pose. tf_id = np.random.choice(np.arange(len(probs)), p=probs) tf = stp_pose_tfs[tf_id] T_obj_world = RigidTransform(tf[:3, :3], tf[:3, 3], from_frame='obj', to_frame='world') so.T_obj_world = T_obj_world # Create the random uniform workspace sampler ws_cfg = self._config['worksurface_rv_config'] uvs = UniformPlanarWorksurfaceImageRandomVariable( 'object', scene, [RenderMode.DEPTH], frame='camera', config=ws_cfg) # Sample and extract the depth image, camera intrinsics, and T_obj_camera sample = uvs.sample() depth_im = sample.renders[RenderMode.DEPTH] cs = sample.camera ci = CameraIntrinsics(frame='camera', fx=cs.focal, fy=cs.focal, cx=cs.cx, cy=cs.cy, skew=0.0, height=ws_cfg['im_height'], width=ws_cfg['im_width']) T_obj_camera = cs.T_camera_world.inverse().dot(T_obj_world) examples.append( RegistrationExample(salient_edge_set_filename, depth_im, ci, T_obj_camera)) return examples
k_d = 1.0, k_s = 1.0, alpha = 10.0, smooth=False ) sphere_material = MaterialProperties( color = np.array([0.1, 0.1, 0.5]), k_a = 0.3, k_d = 1.0, k_s = 1.0, alpha = 10.0, smooth=True ) # Create SceneObjects for each object cube_obj = SceneObject(cube_mesh, cube_pose, cube_material) sphere_obj = SceneObject(sphere_mesh, sphere_pose, sphere_material) # Add the SceneObjects to the scene scene.add_object('cube', cube_obj) scene.add_object('sphere', sphere_obj) #==================================== # Add lighting to the scene #==================================== # Create an ambient light ambient = AmbientLight( color=np.array([1.0, 1.0, 1.0]), strength=1.0 )
default_pose = RigidTransform(rotation=np.eye(3), translation=np.array([0.0, 0.0, 0.0]), from_frame='obj', to_frame='world') obj_material_properties = MaterialProperties( color=np.array([66, 134, 244]) / 255., # color = 5.0*np.array([0.1, 0.1, 0.1]), k_a=0.3, k_d=0.5, k_s=0.2, alpha=10.0, smooth=False, wireframe=False) obj = SceneObject(mesh, default_pose, obj_material_properties) scene.add_object('to_render', obj) print("ADDED OBJECT SUCCESSFULLY") # table_obj_properties = MaterialProperties( # color = np.array([0, 0, 0]), # ) # wrap the table as a SceneObject # table_mesh = trimesh.load(PLANE_MESH) # T_table_world = RigidTransform.load(PLANE_POSE) # table = SceneObject(table_mesh, T_table_world, table_obj_properties) # scene.add_object('table', table) # add light ambient = AmbientLight(color=np.array([1.0, 1.0, 1.0]), strength=1.0)
def fast_grid_search(pc, indices, model, shadow): length, width, height = shadow.extents split_size = max(length, width) pc_data, ind = get_pc_data(pc, indices) maxes = np.max(pc_data, axis=0) mins = np.min(pc_data, axis=0) bin_base = mins[2] plane_normal = model[0:3] #di_temp = ci.project_to_image(pc) #vis2d.figure() #vis2d.imshow(di_temp) #vis2d.show() #plane_data = pc.data.T[indices] #plane_pc = PointCloud(plane_data.T, pc.frame) #di = ci.project_to_image(plane_pc) #bi = di.to_binary() plane_data = get_plane_data(pc, indices) plane_pc = PointCloud(plane_data.T, pc.frame) #vis3d.figure() #vis3d.points(plane_pc) #vis3d.show() plane_pc = cp.inverse().apply(plane_pc) di = ci.project_to_image(plane_pc) bi = di.to_binary() bi = bi.inverse() #vis2d.figure() #vis2d.imshow(bi) #vis2d.show() scene = Scene() camera = VirtualCamera(ci, cp) scene.camera = camera shadow_obj = SceneObject(shadow) scene.add_object('shadow', shadow_obj) orig_tow = shadow_obj.T_obj_world #tr = transforms(pc, pc_data, shadow, mins[0], mins[1], mins[0]+split_size, mins[1]+split_size, 8, orig_tow) #shadow_obj.T_obj_world = tr[0] wd = scene.wrapped_render([RenderMode.DEPTH])[0] wd_bi = wd.to_binary() #vis2d.figure() #vis2d.imshow(wd_bi) #vis2d.show() scores = np.zeros((int(np.round((maxes[0]-mins[0])/split_size)), int(np.round((maxes[1]-mins[1])/split_size)))) for i in range(int(np.round((maxes[0]-mins[0])/split_size))): x = mins[0] + i*split_size for j in range(int(np.round((maxes[1]-mins[1])/split_size))): y = mins[1] + j*split_size for tow in transforms(pc, pc_data, shadow, x, y, x+split_size, y+split_size, 8, orig_tow): shadow_obj.T_obj_world = tow scores[i][j] = under_shadow(scene, bi) shadow_obj.T_obj_world = orig_tow print("\nScores: \n" + str(scores)) best = best_cell(scores) print("\nBest Cell: " + str(best) + ", with score = " + str(scores[best[0]][best[1]])) #------- # Visualize best placement vis3d.figure() x = mins[0] + best[0]*split_size y = mins[1] + best[1]*split_size cell_indices = np.where((x < pc_data[:,0]) & (pc_data[:,0] < x+split_size) & (y < pc_data[:,1]) & (pc_data[:,1] < y+split_size))[0] points = pc_data[cell_indices] rest = pc_data[np.setdiff1d(np.arange(len(pc_data)), cell_indices)] vis3d.points(points, color=(0,1,1)) vis3d.points(rest, color=(1,0,1)) vis3d.show()
def fast_grid_search(pc, indices, model, shadow, img_file): length, width, height = shadow.extents split_size = max(length, width) pc_data, ind = get_pc_data(pc, indices) maxes = np.max(pc_data, axis=0) mins = np.min(pc_data, axis=0) bin_base = mins[2] plane_normal = model[0:3] di_temp = ci.project_to_image(pc) vis2d.figure() vis2d.imshow(di_temp) vis2d.show() plane_data = pc.data.T[indices] #all_indices = np.where([(plane_data[::,2] > 0.795) & (plane_data[::,2] < 0.862)]) #all_indices = np.where((plane_data[::,1] < 0.16) & (plane_data[::,1] > -0.24) & (plane_data[::,0] > -0.3) & (plane_data[::,0] < 0.24))[0] #plane_data = plane_data[all_indices] plane_pc = PointCloud(plane_data.T, pc.frame) di = ci.project_to_image(plane_pc) bi = di.to_binary() scene = Scene() camera = VirtualCamera(ci, cp) scene.camera = camera # Get shadow depth img. shadow_obj = SceneObject(shadow) scene.add_object('shadow', shadow_obj) orig_tow = shadow_obj.T_obj_world scores = np.zeros((int(np.round((maxes[0] - mins[0]) / split_size)), int(np.round((maxes[1] - mins[1]) / split_size)))) for i in range(int(np.round((maxes[0] - mins[0]) / split_size))): x = mins[0] + i * split_size for j in range(int(np.round((maxes[1] - mins[1]) / split_size))): y = mins[1] + j * split_size for tow in transforms(pc, pc_data, shadow, x, y, x + split_size, y + split_size, 8): shadow_obj.T_obj_world = tow scores[i][j] = under_shadow(pc, pc_data, indices, model, shadow, x, x + split_size, y, y + split_size, scene, bi) shadow_obj.T_obj_world = orig_tow print("\nScores: \n" + str(scores)) best = best_cell(scores) print("\nBest Cell: " + str(best) + ", with score = " + str(scores[best[0]][best[1]])) #------- # Visualize best placement vis3d.figure() x = mins[0] + best[0] * split_size y = mins[1] + best[1] * split_size cell_indices = np.where((x < pc_data[:, 0]) & (pc_data[:, 0] < x + split_size) & (y < pc_data[:, 1]) & (pc_data[:, 1] < y + split_size))[0] points = pc_data[cell_indices] rest = pc_data[np.setdiff1d(np.arange(len(pc_data)), cell_indices)] vis3d.points(points, color=(0, 1, 1)) vis3d.points(rest, color=(1, 0, 1)) vis3d.show()