def test_rgbd_sensor(self): def check_ports(system): self.assertIsInstance(system.query_object_input_port(), InputPort) self.assertIsInstance(system.color_image_output_port(), OutputPort) self.assertIsInstance(system.depth_image_32F_output_port(), OutputPort) self.assertIsInstance(system.depth_image_16U_output_port(), OutputPort) self.assertIsInstance(system.label_image_output_port(), OutputPort) # Use HDTV size. width = 1280 height = 720 properties = DepthCameraProperties(width=width, height=height, fov_y=np.pi / 6, renderer_name="renderer", z_near=0.1, z_far=5.5) # Put it at the origin. X_WB = RigidTransform() # This id would fail if we tried to render; no such id exists. parent_id = FrameId.get_new_id() camera_poses = mut.RgbdSensor.CameraPoses() sensor = mut.RgbdSensor(parent_id=parent_id, X_PB=X_WB, properties=properties, camera_poses=camera_poses) def check_info(camera_info): self.assertIsInstance(camera_info, mut.CameraInfo) self.assertEqual(camera_info.width(), width) self.assertEqual(camera_info.height(), height) check_info(sensor.color_camera_info()) check_info(sensor.depth_camera_info()) self.assertIsInstance(sensor.X_BC(), RigidTransform) self.assertIsInstance(sensor.X_BD(), RigidTransform) self.assertEqual(sensor.parent_frame_id(), parent_id) check_ports(sensor) # Test discrete camera. period = mut.RgbdSensorDiscrete.kDefaultPeriod discrete = mut.RgbdSensorDiscrete(sensor=sensor, period=period, render_label_image=True) self.assertTrue(discrete.sensor() is sensor) self.assertEqual(discrete.period(), period) check_ports(discrete) # That we can access the state as images. context = discrete.CreateDefaultContext() values = context.get_abstract_state() self.assertIsInstance(values.get_value(0), Value[mut.ImageRgba8U]) self.assertIsInstance(values.get_value(1), Value[mut.ImageDepth32F]) self.assertIsInstance(values.get_value(2), Value[mut.ImageDepth16U]) self.assertIsInstance(values.get_value(3), Value[mut.ImageLabel16I])
def DepthCameraDemoSystem(): builder = DiagramBuilder() # Create the physics engine + scene graph. plant, scene_graph = AddMultibodyPlantSceneGraph(builder) # Add a single object into it. Parser(plant, scene_graph).AddModelFromFile( FindResourceOrThrow( "drake/manipulation/models/ycb/sdf/006_mustard_bottle.sdf")) # Add a rendering engine renderer = "my_renderer" scene_graph.AddRenderer(renderer, MakeRenderEngineVtk(RenderEngineVtkParams())) plant.Finalize() # Add a visualizer just to help us see the object. use_meshcat = False if use_meshcat: meshcat = builder.AddSystem(MeshcatVisualizer(scene_graph)) builder.Connect(scene_graph.get_pose_bundle_output_port(), meshcat.get_input_port(0)) # Add a camera to the environment. pose = RigidTransform(RollPitchYaw(-0.2, 0.2, 0), [-.1, -0.1, -.5]) properties = DepthCameraProperties(width=640, height=480, fov_y=np.pi / 4.0, renderer_name=renderer, z_near=0.1, z_far=10.0) camera = builder.AddSystem( RgbdSensor(parent_id=scene_graph.world_frame_id(), X_PB=pose, properties=properties, show_window=False)) camera.set_name("rgbd_sensor") builder.Connect(scene_graph.get_query_output_port(), camera.query_object_input_port()) # Export the camera outputs builder.ExportOutput(camera.color_image_output_port(), "color_image") builder.ExportOutput(camera.depth_image_32F_output_port(), "depth_image") # Add a system to convert the camera output into a point cloud to_point_cloud = builder.AddSystem( DepthImageToPointCloud(camera_info=camera.depth_camera_info(), fields=BaseField.kXYZs | BaseField.kRGBs)) builder.Connect(camera.depth_image_32F_output_port(), to_point_cloud.depth_image_input_port()) builder.Connect(camera.color_image_output_port(), to_point_cloud.color_image_input_port()) # Export the point cloud output. builder.ExportOutput(to_point_cloud.point_cloud_output_port(), "point_cloud") diagram = builder.Build() diagram.set_name("depth_camera_demo_system") return diagram
def construct_single_deprecated(parent_id, X_PB): with catch_drake_warnings(expected_count=3): depth_properties = DepthCameraProperties( width=width, height=height, fov_y=np.pi/6, renderer_name="renderer", z_near=0.1, z_far=5.5) return mut.RgbdSensor( parent_id=parent_id, X_PB=X_PB, properties=depth_properties, camera_poses=mut.RgbdSensor.CameraPoses(), show_window=False)
def test_rgbd_sensor_registration_deprecated(self): X_PC = RigidTransform(p=[1, 2, 3]) station = ManipulationStation(time_step=0.001) station.SetupManipulationClassStation() plant = station.get_multibody_plant() with catch_drake_warnings(expected_count=2): properties = DepthCameraProperties(10, 10, np.pi / 4, "renderer", 0.01, 5.0) station.RegisterRgbdSensor("deprecated", plant.world_frame(), X_PC, properties) station.Finalize() camera_poses = station.GetStaticCameraPosesInWorld() # The three default cameras plus the one just added. self.assertEqual(len(camera_poses), 4) self.assertTrue("deprecated" in camera_poses)
def construct_deprecated(parent_id, X_PB): # One deprecation warning for CameraPoses and one for RgbdSensor. with catch_drake_warnings(expected_count=4): color_properties = CameraProperties( width=width, height=height, fov_y=np.pi/6, renderer_name="renderer") depth_properties = DepthCameraProperties( width=width, height=height, fov_y=np.pi/6, renderer_name="renderer", z_near=0.1, z_far=5.5) camera_poses = mut.RgbdSensor.CameraPoses( X_BC=RigidTransform(), X_BD=RigidTransform()) return mut.RgbdSensor(parent_id=parent_id, X_PB=X_PB, color_properties=color_properties, depth_properties=depth_properties, camera_poses=camera_poses, show_window=False)
def add_sensor(self, camera_name, sensor_config): """ Adds Rgbd sensor to the diagram :param camera_name: :type camera_name: :param sensor_config: :type sensor_config: :return: :rtype: """ builder = self.builder sg = self.sg width = sensor_config['width'] height = sensor_config['height'] fov_y = sensor_config['fov_y'] z_near = sensor_config['z_near'] z_far = sensor_config['z_far'] renderer_name = self._renderer_name depth_camera_properties = DepthCameraProperties( width=width, height=height, fov_y=fov_y, renderer_name=renderer_name, z_near=z_near, z_far=z_far) parent_frame_id = sg.world_frame_id() # This is in right-down-forward convention pos = np.array(sensor_config['pos']) quat_eigen = Quaternion(sensor_config['quat']) # camera to world transform T_W_camera = RigidTransform(quaternion=quat_eigen, p=pos) # add camera system camera = builder.AddSystem( RgbdSensor(parent_frame_id, T_W_camera, depth_camera_properties, show_window=False)) builder.Connect(sg.get_query_output_port(), camera.query_object_input_port()) self._rgbd_sensors[camera_name] = camera
self.name = name self.pose = xyz_rpy_deg(pose[0:3], pose[3:]) self.reference_object_frame = "plane_base" self.reference_object = "plane" camera = Camera('camera', [-0.79, 0., 0.6, 220.0, 0.0, -90.0]) camera_images_rgb = {} camera_images_depth = {} color_prop = CameraProperties(width=640, height=480, fov_y=np.pi / 4, renderer_name="renderer") depth_prop = DepthCameraProperties(width=640, height=480, fov_y=np.pi / 4, renderer_name="renderer", z_near=0.01, z_far=10.) world_id = plant.GetBodyFrameIdOrThrow(plant.world_body().index()) frame_id = None if camera.reference_object is not None: # get the reference object's body indexes body_indices = plant.GetBodyIndices( plant.GetModelInstanceByName(camera.reference_object)) for idx in body_indices: body = plant.get_body(idx) if body.name() == camera.reference_object_frame: # we found the frame we want frame_id = plant.GetBodyFrameIdOrThrow(idx) if frame_id is None: frame_id = world_id