def DepthCameraDemoSystem(): builder = DiagramBuilder() # Create the physics engine + scene graph. plant, scene_graph = AddMultibodyPlantSceneGraph(builder) # Add a single object into it. Parser(plant, scene_graph).AddModelFromFile( FindResourceOrThrow( "drake/manipulation/models/ycb/sdf/006_mustard_bottle.sdf")) # Add a rendering engine renderer = "my_renderer" scene_graph.AddRenderer(renderer, MakeRenderEngineVtk(RenderEngineVtkParams())) plant.Finalize() # Add a visualizer just to help us see the object. use_meshcat = False if use_meshcat: meshcat = builder.AddSystem(MeshcatVisualizer(scene_graph)) builder.Connect(scene_graph.get_pose_bundle_output_port(), meshcat.get_input_port(0)) # Add a camera to the environment. pose = RigidTransform(RollPitchYaw(-0.2, 0.2, 0), [-.1, -0.1, -.5]) properties = DepthCameraProperties(width=640, height=480, fov_y=np.pi / 4.0, renderer_name=renderer, z_near=0.1, z_far=10.0) camera = builder.AddSystem( RgbdSensor(parent_id=scene_graph.world_frame_id(), X_PB=pose, properties=properties, show_window=False)) camera.set_name("rgbd_sensor") builder.Connect(scene_graph.get_query_output_port(), camera.query_object_input_port()) # Export the camera outputs builder.ExportOutput(camera.color_image_output_port(), "color_image") builder.ExportOutput(camera.depth_image_32F_output_port(), "depth_image") # Add a system to convert the camera output into a point cloud to_point_cloud = builder.AddSystem( DepthImageToPointCloud(camera_info=camera.depth_camera_info(), fields=BaseField.kXYZs | BaseField.kRGBs)) builder.Connect(camera.depth_image_32F_output_port(), to_point_cloud.depth_image_input_port()) builder.Connect(camera.color_image_output_port(), to_point_cloud.color_image_input_port()) # Export the point cloud output. builder.ExportOutput(to_point_cloud.point_cloud_output_port(), "point_cloud") diagram = builder.Build() diagram.set_name("depth_camera_demo_system") return diagram
def _add_renderer(self): renderer_name = "renderer" self.scenegraph.AddRenderer(renderer_name, MakeRenderEngineVtk(RenderEngineVtkParams())) # Add a camera depth_camera = DepthRenderCamera( RenderCameraCore( renderer_name, CameraInfo(width=640, height=480, fov_y=np.pi/4), ClippingRange(0.01, 10.0), RigidTransform()), DepthRange(0.01,10.0) ) world_id = self.plant.GetBodyFrameIdOrThrow(self.plant.world_body().index()) X_WB = xyz_rpy_deg([4,0,0],[-90,0,90]) sensor = RgbdSensor(world_id, X_PB=X_WB, depth_camera=depth_camera) self.builder.AddSystem(sensor) self.builder.Connect(self.scenegraph.get_query_output_port(), sensor.query_object_input_port())
def add_sensor(self, camera_name, sensor_config): """ Adds Rgbd sensor to the diagram :param camera_name: :type camera_name: :param sensor_config: :type sensor_config: :return: :rtype: """ builder = self.builder sg = self.sg width = sensor_config['width'] height = sensor_config['height'] fov_y = sensor_config['fov_y'] z_near = sensor_config['z_near'] z_far = sensor_config['z_far'] renderer_name = self._renderer_name depth_camera_properties = DepthCameraProperties( width=width, height=height, fov_y=fov_y, renderer_name=renderer_name, z_near=z_near, z_far=z_far) parent_frame_id = sg.world_frame_id() # This is in right-down-forward convention pos = np.array(sensor_config['pos']) quat_eigen = Quaternion(sensor_config['quat']) # camera to world transform T_W_camera = RigidTransform(quaternion=quat_eigen, p=pos) # add camera system camera = builder.AddSystem( RgbdSensor(parent_frame_id, T_W_camera, depth_camera_properties, show_window=False)) builder.Connect(sg.get_query_output_port(), camera.query_object_input_port()) self._rgbd_sensors[camera_name] = camera
def add_rgbd_sensor(self, camera_name, sensor_config): """ Adds Rgbd camera to the diagram """ builder = self._builder if self._renderer_name is None: self._renderer_name = "vtk_renderer" self._sg.AddRenderer(self._renderer_name, MakeRenderEngineVtk(RenderEngineVtkParams())) width = sensor_config['width'] height = sensor_config['height'] fov_y = sensor_config['fov_y'] z_near = sensor_config['z_near'] z_far = sensor_config['z_far'] # This is in right-down-forward convention X_W_camera = transform_from_dict(sensor_config) color_camera = ColorRenderCamera( RenderCameraCore(self._renderer_name, CameraInfo(width, height, fov_y), ClippingRange(z_near, z_far), RigidTransform()), False) depth_camera = DepthRenderCamera(color_camera.core(), DepthRange(z_near, z_far)) # add camera system camera = builder.AddSystem( RgbdSensor(parent_id=self._sg.world_frame_id(), X_PB=X_W_camera, color_camera=color_camera, depth_camera=depth_camera)) builder.Connect(self._sg.get_query_output_port(), camera.query_object_input_port()) self._rgbd_sensors[camera_name] = camera
def test_render_engine_api(self): class DummyRenderEngine(mut.render.RenderEngine): """Mirror of C++ DummyRenderEngine.""" # See comment below about `rgbd_sensor_test.cc`. latest_instance = None def __init__(self, render_label=None): mut.render.RenderEngine.__init__(self) # N.B. We do not hide these because this is a test class. # Normally, you would want to hide this. self.force_accept = False self.registered_geometries = set() self.updated_ids = {} self.include_group_name = "in_test" self.X_WC = RigidTransform_[float]() self.color_count = 0 self.depth_count = 0 self.label_count = 0 self.color_camera = None self.depth_camera = None self.label_camera = None def UpdateViewpoint(self, X_WC): DummyRenderEngine.latest_instance = self self.X_WC = X_WC def ImplementGeometry(self, shape, user_data): DummyRenderEngine.latest_instance = self def DoRegisterVisual(self, id, shape, properties, X_WG): DummyRenderEngine.latest_instance = self mut.GetRenderLabelOrThrow(properties) if self.force_accept or properties.HasGroup( self.include_group_name ): self.registered_geometries.add(id) return True return False def DoUpdateVisualPose(self, id, X_WG): DummyRenderEngine.latest_instance = self self.updated_ids[id] = X_WG def DoRemoveGeometry(self, id): DummyRenderEngine.latest_instance = self self.registered_geometries.remove(id) def DoClone(self): DummyRenderEngine.latest_instance = self new = DummyRenderEngine() new.force_accept = copy.copy(self.force_accept) new.registered_geometries = copy.copy( self.registered_geometries) new.updated_ids = copy.copy(self.updated_ids) new.include_group_name = copy.copy(self.include_group_name) new.X_WC = copy.copy(self.X_WC) new.color_count = copy.copy(self.color_count) new.depth_count = copy.copy(self.depth_count) new.label_count = copy.copy(self.label_count) new.color_camera = copy.copy(self.color_camera) new.depth_camera = copy.copy(self.depth_camera) new.label_camera = copy.copy(self.label_camera) return new def DoRenderColorImage(self, camera, color_image_out): DummyRenderEngine.latest_instance = self self.color_count += 1 self.color_camera = camera def DoRenderDepthImage(self, camera, depth_image_out): DummyRenderEngine.latest_instance = self self.depth_count += 1 self.depth_camera = camera def DoRenderLabelImage(self, camera, label_image_out): DummyRenderEngine.latest_instance = self self.label_count += 1 self.label_camera = camera engine = DummyRenderEngine() self.assertIsInstance(engine, mut.render.RenderEngine) self.assertIsInstance(engine.Clone(), DummyRenderEngine) # Test implementation of C++ interface by using RgbdSensor. renderer_name = "renderer" builder = DiagramBuilder() scene_graph = builder.AddSystem(mut.SceneGraph()) # N.B. This passes ownership. scene_graph.AddRenderer(renderer_name, engine) sensor = builder.AddSystem(RgbdSensor( parent_id=scene_graph.world_frame_id(), X_PB=RigidTransform(), depth_camera=mut.render.DepthRenderCamera( mut.render.RenderCameraCore( renderer_name, CameraInfo(640, 480, np.pi/4), mut.render.ClippingRange(0.1, 5.0), RigidTransform()), mut.render.DepthRange(0.1, 5.0)))) builder.Connect( scene_graph.get_query_output_port(), sensor.query_object_input_port(), ) diagram = builder.Build() diagram_context = diagram.CreateDefaultContext() sensor_context = sensor.GetMyContextFromRoot(diagram_context) image = sensor.color_image_output_port().Eval(sensor_context) # N.B. Because there's context cloning going on under the hood, we # won't be interacting with our originally registered instance. # See `rgbd_sensor_test.cc` as well. current_engine = DummyRenderEngine.latest_instance self.assertIsNot(current_engine, engine) self.assertIsInstance(image, ImageRgba8U) self.assertEqual(current_engine.color_count, 1) image = sensor.depth_image_32F_output_port().Eval(sensor_context) self.assertIsInstance(image, ImageDepth32F) self.assertEqual(current_engine.depth_count, 1) image = sensor.depth_image_16U_output_port().Eval(sensor_context) self.assertIsInstance(image, ImageDepth16U) self.assertEqual(current_engine.depth_count, 2) image = sensor.label_image_output_port().Eval(sensor_context) self.assertIsInstance(image, ImageLabel16I) self.assertEqual(current_engine.label_count, 1)
renderer_name = "renderer" scene_graph.AddRenderer(renderer_name, MakeRenderEngineVtk(RenderEngineVtkParams())) # Now a camera with color and depth (values chosen arbitrarily) # So here we leverage the renderer attached to the scene graph to power a camera for our robot depth_camera = DepthRenderCamera( RenderCameraCore(renderer_name, CameraInfo(width=640, height=480, fov_y=np.pi / 4), ClippingRange(0.01, 10.0), RigidTransform()), DepthRange(0.01, 10.0)) world_id = plant.GetBodyFrameIdOrThrow(plant.world_body().index()) X_WB = xyz_rpy_deg([4, 0, 0], [-90, 0, 90]) sensor = RgbdSensor(world_id, X_PB=X_WB, depth_camera=depth_camera) builder.AddSystem(sensor) builder.Connect(scene_graph.get_query_output_port(), sensor.query_object_input_port()) # So here we are adding the drake visualizer into the mix. # IMPORTANT: Need to actually run "drake visualizer" from the build. DrakeVisualizer.AddToBuilder(builder, scene_graph) # If we want meshcat instead, can run this line # Or can swap out the "new" and instead connect to an existing meshcat-server instance # meshcat_vis = ConnectMeshcatVisualizer(builder, scene_graph, zmq_url="new", open_browser=False) plant.Finalize() diagram = builder.Build()
def create_camera(builder, world_id, X_WB, depth_camera, scene_graph): sensor = RgbdSensor(world_id, X_PB=X_WB, depth_camera=depth_camera) builder.AddSystem(sensor) builder.Connect(scene_graph.get_query_output_port(), sensor.query_object_input_port()) return sensor
z_far=10.) world_id = plant.GetBodyFrameIdOrThrow(plant.world_body().index()) frame_id = None if camera.reference_object is not None: # get the reference object's body indexes body_indices = plant.GetBodyIndices( plant.GetModelInstanceByName(camera.reference_object)) for idx in body_indices: body = plant.get_body(idx) if body.name() == camera.reference_object_frame: # we found the frame we want frame_id = plant.GetBodyFrameIdOrThrow(idx) if frame_id is None: frame_id = world_id camera_instance = RgbdSensor(frame_id, X_PB=camera.pose, color_properties=color_prop, depth_properties=depth_prop) camera_instance.set_name(camera.name) builder.AddSystem(camera_instance) builder.Connect(scene_graph.get_query_output_port(), camera_instance.query_object_input_port()) #add to the dictionary of cameras, with no image associated for now camera_images_rgb[camera.name] = np.zeros([480, 640, 4]) camera_images_depth[camera.name] = np.zeros([480, 640, 1]) color_info = camera_instance.color_camera_info() depth_info = camera_instance.depth_camera_info() cloud_generator_instance = DepthImageToPointCloud(depth_info) builder.AddSystem(cloud_generator_instance) builder.Connect(camera_instance.depth_image_32F_output_port(), cloud_generator_instance.depth_image_input_port())