def test_relative_transform(self): """ :return: """ pose_1 = common.sd_pose(np.array([0.307, 0, 0.59, 0.924, -0.382, 0, 0])) pose_2 = common.sd_pose(np.array([0.307, 0, 0.59, -0.708, 0.706, 0, 0])) rel_trans = common.sd_pose(np.array([0, 0, 0, 0., 0., 0.383, 0.924])) trans = np.dot(pose_1, rel_trans) ros_pose_2 = common.to_ros_pose(pose_2) print(trans, '\n', pose_2, '\n', ros_pose_2) self.assertTrue( common.all_close(common.to_ros_pose(trans), ros_pose_2, 0.01))
def decode_to_pose_array_msg(self, ref_frame, ref_frame_id=None, scaling_factor=1.0): """Decode the bytes in the streaming data to pose array message. :param ref_frame: str Reference frame name of the generated pose array message. :param ref_frame_id: None/int If not None, all poses will be shifted subject to the frame with this ID. This frame should belong to the human. :param scaling_factor: float Scale the position of the pose if src_frame_id is not None. Its value equals to the robot/human body dimension ratio """ pose_array_msg = GeometryMsg.PoseArray() pose_array_msg.header.stamp = rospy.Time.now() pose_array_msg.header.frame_id = ref_frame for i in range(self.item_num): item = self.payload[i * self._item_size:(i + 1) * self._item_size] pose_msg = self._type_02_decode_to_pose_msg(item) if pose_msg is None: return None pose_array_msg.poses.append(pose_msg) if ref_frame_id is not None and ref_frame_id < len( pose_array_msg.poses): relative_pose_array_msg = GeometryMsg.PoseArray() relative_pose_array_msg.header.frame_id = ref_frame reference_pose = pose_array_msg.poses[ref_frame_id] for p in pose_array_msg.poses: std_relative_pose = common.get_transform_same_origin( reference_pose, p) relative_pose = common.to_ros_pose(std_relative_pose) relative_pose_array_msg.poses.append(relative_pose) return relative_pose_array_msg return pose_array_msg
def test_sd_pose(self): """ Initial pose of franka panda_arm group position: x: 0.307019570052 y: -5.22132961561e-12 z: 0.590269558277 orientation: x: 0.923955699469 y: -0.382499497279 z: 1.3249325839e-12 w: 3.20041176635e-12 :return: """ ros_pose = GeometryMsg.Pose() ros_pose.position.x = 0.307019570052 ros_pose.position.y = 0 ros_pose.position.z = 0.590269558277 ros_pose.orientation.x = 0.923955699469 ros_pose.orientation.y = -0.382499497279 ros_pose.orientation.z = 0 ros_pose.orientation.w = 0 pose_mat = common.sd_pose(ros_pose) re_pose = common.to_ros_pose(pose_mat) self.assertTrue(common.all_close(ros_pose, re_pose, 0.001))
def sense_manipulation_poses(self, device_names, algorithm_id, data_types=None): """Sensing the manipulation poses (i.e., the poses that the robot's end-effector should move to to perform manipulation.) By default, these poses are wrt the sensor's frame. :param device_names: str Names of the device. :param algorithm_id: int ID of the algorithm. :param data_types: int Type ID of the data. If not given, the method will guess it based on the device name. """ for name in device_names: assert name in self.device_names, rospy.logerr( 'Device name {} is not registered'.format(name)) assert algorithm_id in self.algorithm_ids if data_types is None: temp_types = [] for name in device_names: if 'rgb' in name or 'RGB' in name or 'color' in name: temp_types.append(0) elif 'depth' in name: temp_types.append(1) else: raise NotImplementedError data_types = temp_types # Get sensory data from the devices via the service provided by the robot # TODO add more modalities of data other than image ok, data = self._get_image_data_client(device_names) if not ok: return False, None, None # Send sensory data to the algorithm and get the poses ok, sd_poses = self._post_data(self.algorithm_ports[algorithm_id], data, data_types) if ok: return True, common.to_ros_poses(sd_poses), common.to_ros_pose( sd_poses[0]) else: return False, None, None
def _type_02_decode_to_pose_msg(item): """Decode a type 02 stream to ROS pose message. :param item: str String of bytes """ # segment_id = common.byte_to_uint32(item[:4]) if len(item) != 32: # FIXME some of the received data only has length=2 return None x = common.byte_to_float(item[4:8]) y = common.byte_to_float(item[8:12]) z = common.byte_to_float(item[12:16]) qw = common.byte_to_float(item[16:20]) qx = common.byte_to_float(item[20:24]) qy = common.byte_to_float(item[24:28]) qz = common.byte_to_float(item[28:32]) # We do not need to convert the pose from MVN frame (x forward, y up, z right) to ROS frame, # since the type 02 data is Z-up, see: # https://www.xsens.com/hubfs/Downloads/Manuals/MVN_real-time_network_streaming_protocol_specification.pdf return common.to_ros_pose(np.array([x, y, z, qx, qy, qz, qw]))