def build_image_request(image_source_name, quality_percent=75, image_format=None): """Helper function which builds an ImageRequest from an image source name. By default the robot will choose an appropriate format when no image format is provided. For example, it will choose JPEG for visual images, or RAW for depth images. Clients can provide an image_format for other cases. Args: image_source_name (string): The image source to query. quality_percent (int): The image quality from [0,100] (percent-value). image_format (image_pb2.Image.Format): The type of format for the image data, such as JPEG, RAW, or RLE. Returns: The ImageRequest protobuf message for the given parameters. """ return image_pb2.ImageRequest(image_source_name=image_source_name, quality_percent=quality_percent, image_format=image_format)
def _test_camera_service(use_background_capture_thread, logger=None): robot = MockRobot() src_name = "source1" r_amt = 10 c_amt = 21 gain = 25 visual_src = VisualImageSource(src_name, FakeCamera(capture_fake, decode_fake), rows=r_amt, cols=c_amt, gain=gain) src_name2 = "source_cap_error" visual_src2 = VisualImageSource(src_name2, FakeCamera(capture_with_error, decode_fake), rows=r_amt, cols=c_amt) src_name3 = "source_decode_error" visual_src3 = VisualImageSource(src_name3, FakeCamera(capture_fake, decode_with_error), rows=r_amt, cols=c_amt) src_name4 = "source_capture_malformed" visual_src4 = VisualImageSource(src_name4, FakeCamera(capture_return_onething, decode_fake), rows=r_amt, cols=c_amt) src_name5 = "source_decode_malformed" visual_src5 = VisualImageSource(src_name5, FakeCamera(capture_fake, decode_less_args), rows=r_amt, cols=c_amt) src_name6 = "source2" visual_src6 = VisualImageSource(src_name6, FakeCamera(capture_fake, decode_fake), rows=r_amt, cols=c_amt, gain=gain) image_sources = [ visual_src, visual_src2, visual_src3, visual_src4, visual_src5, visual_src6 ] camera_service = CameraBaseImageServicer( robot, "camera-service", image_sources, use_background_capture_thread=use_background_capture_thread, logger=logger) req = image_pb2.ListImageSourcesRequest() resp = camera_service.ListImageSources(req, None) assert resp.header.error.code == header_pb2.CommonError.CODE_OK assert len(resp.image_sources) == 6 found_src1, found_src2, found_src3, found_src4, found_src5, found_src6 = False, False, False, False, False, False for src in resp.image_sources: if src.name == src_name: found_src1 = True if src.name == src_name2: found_src2 = True if src.name == src_name3: found_src3 = True if src.name == src_name4: found_src4 = True if src.name == src_name5: found_src5 = True if src.name == src_name6: found_src6 = True assert found_src1 and found_src2 and found_src3 and found_src4 and found_src5 and found_src6 # Request a known image source and make sure the response is as expected. req = image_pb2.GetImageRequest() req.image_requests.extend([ image_pb2.ImageRequest(image_source_name=src_name, quality_percent=10) ]) resp = camera_service.GetImage(req, None) assert resp.header.error.code == header_pb2.CommonError.CODE_OK assert len(resp.image_responses) == 1 img_resp = resp.image_responses[0] assert img_resp.source.name == src_name assert img_resp.source.rows == r_amt assert img_resp.source.cols == c_amt assert img_resp.shot.capture_params.gain == gain assert img_resp.shot.image.rows == 15 # Output of decode_fake assert img_resp.shot.image.cols == c_amt assert abs(img_resp.shot.acquisition_time.seconds - 10) < 1e-3 # Robot converted timestamp assert abs(img_resp.shot.acquisition_time.nanos - 20) < 1e-3 # Robot converted timestamp # Request multiple image sources and make sure the response is complete. req = image_pb2.GetImageRequest() req.image_requests.extend([ image_pb2.ImageRequest(image_source_name=src_name, quality_percent=10), image_pb2.ImageRequest(image_source_name=src_name6) ]) resp = camera_service.GetImage(req, None) assert resp.header.error.code == header_pb2.CommonError.CODE_OK assert len(resp.image_responses) == 2 found_src1, found_src6 = False, False for src in resp.image_responses: if src.source.name == src_name: found_src1 = True if src.source.name == src_name6: found_src6 = True assert found_src6 and found_src1 # Request an image source that does not exist. req = image_pb2.GetImageRequest() req.image_requests.extend([ image_pb2.ImageRequest(image_source_name="unknown", quality_percent=10) ]) resp = camera_service.GetImage(req, None) assert resp.header.error.code == header_pb2.CommonError.CODE_OK assert len(resp.image_responses) == 1 img_resp = resp.image_responses[0] assert img_resp.status == image_pb2.ImageResponse.STATUS_UNKNOWN_CAMERA # Request an image from a source with a bad capture function. req = image_pb2.GetImageRequest() req.image_requests.extend([ image_pb2.ImageRequest(image_source_name=src_name2, quality_percent=10) ]) resp = camera_service.GetImage(req, None) assert resp.header.error.code == header_pb2.CommonError.CODE_OK assert len(resp.image_responses) == 1 img_resp = resp.image_responses[0] assert img_resp.status == image_pb2.ImageResponse.STATUS_IMAGE_DATA_ERROR # Request an image from a source with a decode error. req = image_pb2.GetImageRequest() req.image_requests.extend([ image_pb2.ImageRequest(image_source_name=src_name3, quality_percent=10) ]) resp = camera_service.GetImage(req, None) assert resp.header.error.code == header_pb2.CommonError.CODE_OK assert len(resp.image_responses) == 1 img_resp = resp.image_responses[0] print(img_resp) assert img_resp.status == image_pb2.ImageResponse.STATUS_UNSUPPORTED_IMAGE_FORMAT_REQUESTED # Request an image with a malformed capture function (should raise an error so developer can fix). req = image_pb2.GetImageRequest() req.image_requests.extend([ image_pb2.ImageRequest(image_source_name=src_name4, quality_percent=10) ]) resp = camera_service.GetImage(req, None) assert resp.header.error.code == header_pb2.CommonError.CODE_OK assert len(resp.image_responses) == 1 img_resp = resp.image_responses[0] assert img_resp.status == image_pb2.ImageResponse.STATUS_IMAGE_DATA_ERROR
def start_spot_ros_interface(self): # ROS Node initialization rospy.init_node('spot_ros_interface_py') rate = rospy.Rate(200) # Update at 200 Hz # Each service will handle a specific command to Spot instance rospy.Service("self_right_cmd", spot_ros_srvs.srv.Stand, self.self_right_cmd_srv) rospy.Service("stand_cmd", spot_ros_srvs.srv.Stand, self.stand_cmd_srv) rospy.Service("trajectory_cmd", spot_ros_srvs.srv.Trajectory, self.trajectory_cmd_srv) rospy.Service("velocity_cmd", spot_ros_srvs.srv.Velocity, self.velocity_cmd_srv) # Single image publisher will publish all images from all Spot cameras kinematic_state_pub = rospy.Publisher( "kinematic_state", spot_ros_msgs.msg.KinematicState, queue_size=20) robot_state_pub = rospy.Publisher( "robot_state", spot_ros_msgs.msg.RobotState, queue_size=20) occupancy_grid_pub = rospy.Publisher( "occupancy_grid", visualization_msgs.msg.Marker, queue_size=20) # Publish tf2 from visual odometry frame to Spot's base link spot_tf_broadcaster = tf2_ros.TransformBroadcaster() # Publish static tf2 from Spot's base link to front-left camera spot_tf_static_broadcaster = tf2_ros.StaticTransformBroadcaster() image_only_pub = rospy.Publisher( "spot_image", sensor_msgs.msg.Image, queue_size=20) camera_info_pub = rospy.Publisher( "spot_cam_info", sensor_msgs.msg.CameraInfo, queue_size=20) # TODO: Publish depth images # depth_image_pub = rospy.Publisher( # "depth_image", sensor_msgs.msg.Image, queue_size=20) # For RViz 3rd person POV visualization if self.third_person_view: joint_state_pub = rospy.Publisher( "joint_state_from_spot", sensor_msgs.msg.JointState, queue_size=20) try: with bosdyn.client.lease.LeaseKeepAlive(self.lease_client), bosdyn.client.estop.EstopKeepAlive( self.estop_endpoint): rospy.loginfo("Acquired lease") if self.motors_on: rospy.loginfo("Powering on robot... This may take a several seconds.") self.robot.power_on(timeout_sec=20) assert self.robot.is_powered_on(), "Robot power on failed." rospy.loginfo("Robot powered on.") else: rospy.loginfo("Not powering on robot, continuing") while not rospy.is_shutdown(): ''' Publish Robot State''' kinematic_state, robot_state = self.get_robot_state() kinematic_state_pub.publish(kinematic_state) robot_state_pub.publish(robot_state) # Publish tf2 from the fixed vision_odometry_frame to the Spot's base_link t = geometry_msgs.msg.TransformStamped() t.header.stamp = rospy.Time.now() t.header.frame_id = "vision_odometry_frame" t.child_frame_id = "base_link" t.transform.translation.x = kinematic_state.vision_tform_body.translation.x t.transform.translation.y = kinematic_state.vision_tform_body.translation.y t.transform.translation.z = kinematic_state.vision_tform_body.translation.z t.transform.rotation.x = kinematic_state.vision_tform_body.rotation.x t.transform.rotation.y = kinematic_state.vision_tform_body.rotation.y t.transform.rotation.z = kinematic_state.vision_tform_body.rotation.z t.transform.rotation.w = kinematic_state.vision_tform_body.rotation.w spot_tf_broadcaster.sendTransform(t) if self.third_person_view: joint_state_pub.publish(kinematic_state.joint_states) ''' Publish Images''' img_reqs = [image_pb2.ImageRequest(image_source_name=source, image_format=image_pb2.Image.FORMAT_RAW) for source in self.image_source_names[2:3]] image_list = self.image_client.get_image(img_reqs) for img in image_list: if img.status == image_pb2.ImageResponse.STATUS_OK: header = std_msgs.msg.Header() header.stamp = t.header.stamp header.frame_id = img.source.name if img.shot.image.pixel_format == image_pb2.Image.PIXEL_FORMAT_DEPTH_U16: dtype = np.uint16 else: dtype = np.uint8 if img.shot.image.format == image_pb2.Image.FORMAT_RAW: image = np.fromstring(img.shot.image.data, dtype=dtype) image = image.reshape(img.shot.image.rows, img.shot.image.cols) # Make Image component of ImageCapture i = sensor_msgs.msg.Image() i.header = header i.width = img.shot.image.cols i.height = img.shot.image.rows i.data = img.shot.image.data if img.shot.image.format != image_pb2.Image.FORMAT_RAW else image.tobytes() i.step = img.shot.image.cols i.encoding = 'mono8' # CameraInfo cam_info = sensor_msgs.msg.CameraInfo() cam_info.header = i.header cam_info.width = i.width cam_info.height = i.height cam_info.distortion_model = "plumb_bob" cam_info.D = [0.0,0.0,0.0,0.0] f = img.source.pinhole.intrinsics.focal_length c = img.source.pinhole.intrinsics.principal_point cam_info.K = \ [f.x, 0, c.x, \ 0, f.y, c.y, \ 0, 0, 1] # Transform from base_link to camera for current img body_tform_cam = get_a_tform_b(img.shot.transforms_snapshot, BODY_FRAME_NAME, img.shot.frame_name_image_sensor) # Generate camera to body Transform body_tform_cam_tf = geometry_msgs.msg.Transform() body_tform_cam_tf.translation.x = body_tform_cam.position.x body_tform_cam_tf.translation.y = body_tform_cam.position.y body_tform_cam_tf.translation.z = body_tform_cam.position.z body_tform_cam_tf.rotation.x = body_tform_cam.rotation.x body_tform_cam_tf.rotation.y = body_tform_cam.rotation.y body_tform_cam_tf.rotation.z = body_tform_cam.rotation.z body_tform_cam_tf.rotation.w = body_tform_cam.rotation.w camera_transform_stamped = geometry_msgs.msg.TransformStamped() camera_transform_stamped.header.stamp = header.stamp camera_transform_stamped.header.frame_id = "base_link" camera_transform_stamped.transform = body_tform_cam_tf camera_transform_stamped.child_frame_id = img.source.name # Publish body to camera static tf spot_tf_static_broadcaster.sendTransform(camera_transform_stamped) # Publish current image and camera info image_only_pub.publish(i) camera_info_pub.publish(cam_info) ''' Publish occupancy grid''' if occupancy_grid_pub.get_num_connections() > 0: local_grid_proto = self.grid_client.get_local_grids(['terrain']) markers = get_terrain_markers(local_grid_proto) occupancy_grid_pub.publish(markers) rospy.logdebug("Looping...") rate.sleep() finally: # If we successfully acquired a lease, return it. self.lease_client.return_lease(self.lease)