コード例 #1
0
ファイル: offline_tf.py プロジェクト: LongStart/pyproj
class OfflineTfBuffer():
    def __init__(self, transforms):
        self.t_front = transforms[0].header.stamp
        self.t_back = transforms[-1].header.stamp
        self.buff = Buffer(cache_time=(self.t_back - self.t_front),
                           debug=False)
        for transform in transforms:
            self.buff.set_transform(transform, '/auth')

    def LookupTransform(self, target_frame, source_frame, times):
        return [
            self.buff.lookup_transform(target_frame, source_frame, t)
            for t in times
        ]

    def AvailableTimeRange(self, target_frame, source_frame, times):
        if times[0] > self.t_back or times[-1] < self.t_front:
            return []
        idx_front = 0
        idx_back = len(times) - 1
        while times[idx_front] < self.t_front:
            idx_front += 1
        while times[idx_back] > self.t_back:
            idx_back -= 1
        return (idx_front, idx_back + 1)

    def CanTransform(self, target_frame, source_frame, times):
        return self.buff.can_transform(
            target_frame, source_frame, times[0]) and self.buff.can_transform(
                target_frame, source_frame, times[-1])
コード例 #2
0
class LandmarkMethodROS(LandmarkMethodBase):
    def __init__(self, img_proc=None):
        super(LandmarkMethodROS,
              self).__init__(device_id_facedetection=rospy.get_param(
                  "~device_id_facedetection", default="cuda:0"))
        self.subject_tracker = FaceEncodingTracker() if rospy.get_param(
            "~use_face_encoding_tracker",
            default=True) else SequentialTracker()
        self.bridge = CvBridge()
        self.__subject_bridge = SubjectListBridge()

        self.camera_frame = rospy.get_param("~camera_frame", "kinect2_link")
        self.ros_tf_frame = rospy.get_param("~ros_tf_frame",
                                            "kinect2_ros_frame")

        self.tf2_broadcaster = TransformBroadcaster()
        self.tf2_buffer = Buffer()
        self.tf2_listener = TransformListener(self.tf2_buffer)
        self.tf_prefix = rospy.get_param("~tf_prefix", default="gaze")
        self.visualise_headpose = rospy.get_param("~visualise_headpose",
                                                  default=True)

        self.pose_stabilizers = {}  # Introduce scalar stabilizers for pose.

        try:
            if img_proc is None:
                tqdm.write("Wait for camera message")
                cam_info = rospy.wait_for_message("/camera_info",
                                                  CameraInfo,
                                                  timeout=None)
                self.img_proc = PinholeCameraModel()
                # noinspection PyTypeChecker
                self.img_proc.fromCameraInfo(cam_info)
            else:
                self.img_proc = img_proc

            if np.array_equal(
                    self.img_proc.intrinsicMatrix().A,
                    np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])):
                raise Exception(
                    'Camera matrix is zero-matrix. Did you calibrate'
                    'the camera and linked to the yaml file in the launch file?'
                )
            tqdm.write("Camera message received")
        except rospy.ROSException:
            raise Exception("Could not get camera info")

        # multiple person images publication
        self.subject_pub = rospy.Publisher("/subjects/images",
                                           MSG_SubjectImagesList,
                                           queue_size=3)
        # multiple person faces publication for visualisation
        self.subject_faces_pub = rospy.Publisher("/subjects/faces",
                                                 Image,
                                                 queue_size=3)

        Server(ModelSizeConfig, self._dyn_reconfig_callback)

        # have the subscriber the last thing that's run to avoid conflicts
        self.color_sub = rospy.Subscriber("/image",
                                          Image,
                                          self.process_image,
                                          buff_size=2**24,
                                          queue_size=3)

    def _dyn_reconfig_callback(self, config, _):
        self.model_points /= (self.model_size_rescale *
                              self.interpupillary_distance)
        self.model_size_rescale = config["model_size"]
        self.interpupillary_distance = config["interpupillary_distance"]
        self.model_points *= (self.model_size_rescale *
                              self.interpupillary_distance)
        self.head_pitch = config["head_pitch"]
        return config

    def process_image(self, color_msg):
        color_img = gaze_tools.convert_image(color_msg, "bgr8")
        timestamp = color_msg.header.stamp

        self.update_subject_tracker(color_img)

        if not self.subject_tracker.get_tracked_elements():
            tqdm.write("\033[2K\033[1;31mNo face found\033[0m", end="\r")
            return

        self.subject_tracker.update_eye_images(self.eye_image_size)

        final_head_pose_images = []
        for subject_id, subject in self.subject_tracker.get_tracked_elements(
        ).items():
            if subject.left_eye_color is None or subject.right_eye_color is None:
                continue
            if subject_id not in self.pose_stabilizers:
                self.pose_stabilizers[subject_id] = [
                    Stabilizer(state_num=2,
                               measure_num=1,
                               cov_process=0.1,
                               cov_measure=0.1) for _ in range(6)
                ]

            success, head_rpy, translation_vector = self.get_head_pose(
                subject.marks, subject_id)

            if success:
                # Publish all the data
                self.publish_pose(timestamp, translation_vector, head_rpy,
                                  subject_id)

                if self.visualise_headpose:
                    # pitch roll yaw
                    trans_msg = self.tf2_buffer.lookup_transform(
                        self.ros_tf_frame, self.tf_prefix +
                        "/head_pose_estimated" + str(subject_id), timestamp)
                    rotation = trans_msg.transform.rotation
                    euler_angles_head = list(
                        transformations.euler_from_quaternion(
                            [rotation.x, rotation.y, rotation.z, rotation.w]))
                    euler_angles_head = gaze_tools.limit_yaw(euler_angles_head)

                    face_image_resized = cv2.resize(
                        subject.face_color,
                        dsize=(224, 224),
                        interpolation=cv2.INTER_CUBIC)

                    final_head_pose_images.append(
                        LandmarkMethodROS.visualize_headpose_result(
                            face_image_resized,
                            gaze_tools.get_phi_theta_from_euler(
                                euler_angles_head)))

        if len(self.subject_tracker.get_tracked_elements().items()) > 0:
            self.publish_subject_list(
                timestamp, self.subject_tracker.get_tracked_elements())

        if len(final_head_pose_images) > 0:
            headpose_image_ros = self.bridge.cv2_to_imgmsg(
                np.hstack(final_head_pose_images), "bgr8")
            headpose_image_ros.header.stamp = timestamp
            self.subject_faces_pub.publish(headpose_image_ros)

    def get_head_pose(self, landmarks, subject_id):
        """
        We are given a set of 2D points in the form of landmarks. The basic idea is that we assume a generic 3D head
        model, and try to fit the 2D points to the 3D model based on the Levenberg-Marquardt optimization. We can use
        OpenCV's implementation of SolvePnP for this.
        This method is inspired by http://www.learnopencv.com/head-pose-estimation-using-opencv-and-dlib/
        We are planning to replace this with our own head pose estimator.
        :param landmarks: Landmark positions to be used to determine head pose
        :param subject_id: ID of the subject that corresponds to the given landmarks
        :return: success - Whether the pose was successfully extracted
                 rotation_vector - rotation vector that along with translation vector brings points from the model
                 coordinate system to the camera coordinate system
                 translation_vector - see rotation_vector
        """

        camera_matrix = self.img_proc.intrinsicMatrix()
        dist_coeffs = self.img_proc.distortionCoeffs()

        try:
            success, rodrigues_rotation, translation_vector, _ = cv2.solvePnPRansac(
                self.model_points,
                landmarks.reshape(len(self.model_points), 1, 2),
                cameraMatrix=camera_matrix,
                distCoeffs=dist_coeffs,
                flags=cv2.SOLVEPNP_DLS)

        except cv2.error as e:
            tqdm.write(
                '\033[2K\033[1;31mCould not estimate head pose: {}\033[0m'.
                format(e),
                end="\r")
            return False, None, None

        if not success:
            tqdm.write(
                '\033[2K\033[1;31mUnsuccessful in solvingPnPRanscan\033[0m',
                end="\r")
            return False, None, None

        # this is generic point stabiliser, the underlying representation doesn't matter
        rotation_vector, translation_vector = self.apply_kalman_filter_head_pose(
            subject_id, rodrigues_rotation, translation_vector / 1000.0)

        rotation_vector[0] += self.head_pitch

        _rotation_matrix, _ = cv2.Rodrigues(rotation_vector)
        _rotation_matrix = np.matmul(
            _rotation_matrix, np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]]))
        _m = np.zeros((4, 4))
        _m[:3, :3] = _rotation_matrix
        _m[3, 3] = 1
        _rpy_rotation = np.array(transformations.euler_from_matrix(_m))

        return success, _rpy_rotation, translation_vector

    def apply_kalman_filter_head_pose(self, subject_id,
                                      rotation_vector_unstable,
                                      translation_vector_unstable):
        stable_pose = []
        pose_np = np.array(
            (rotation_vector_unstable, translation_vector_unstable)).flatten()
        for value, ps_stb in zip(pose_np, self.pose_stabilizers[subject_id]):
            ps_stb.update([value])
            stable_pose.append(ps_stb.state[0])
        stable_pose = np.reshape(stable_pose, (-1, 3))
        rotation_vector = stable_pose[0]
        translation_vector = stable_pose[1]
        return rotation_vector, translation_vector

    def publish_subject_list(self, timestamp, subjects):
        assert (subjects is not None)

        subject_list_message = self.__subject_bridge.images_to_msg(
            subjects, timestamp)

        self.subject_pub.publish(subject_list_message)

    def publish_pose(self, timestamp, nose_center_3d_tf, head_rpy, subject_id):
        t = TransformStamped()
        t.header.frame_id = self.camera_frame
        t.header.stamp = timestamp
        t.child_frame_id = self.tf_prefix + "/head_pose_estimated" + str(
            subject_id)
        t.transform.translation.x = nose_center_3d_tf[0]
        t.transform.translation.y = nose_center_3d_tf[1]
        t.transform.translation.z = nose_center_3d_tf[2]

        rotation = transformations.quaternion_from_euler(*head_rpy)
        t.transform.rotation.x = rotation[0]
        t.transform.rotation.y = rotation[1]
        t.transform.rotation.z = rotation[2]
        t.transform.rotation.w = rotation[3]

        try:
            self.tf2_broadcaster.sendTransform([t])
        except rospy.ROSException as exc:
            if str(exc) == "publish() to a closed topic":
                pass
            else:
                raise exc

        self.tf2_buffer.set_transform(t, 'extract_landmarks')

    def update_subject_tracker(self, color_img):
        faceboxes = self.get_face_bb(color_img)
        if len(faceboxes) == 0:
            self.subject_tracker.clear_elements()
            return

        tracked_subjects = self.get_subjects_from_faceboxes(
            color_img, faceboxes)

        # track the new faceboxes according to the previous ones
        self.subject_tracker.track(tracked_subjects)
コード例 #3
0
class ArmTCPTransformHandler:
    """
    This class uses a TransformListener to handle transforms related to the TCP.
    """
    def __init__(self):
        self.__tf_buffer = Buffer()
        self.__tf_listener = TransformListener(self.__tf_buffer)
        self.__static_broadcaster = StaticTransformBroadcaster()

    def ee_link_to_tcp_pose_target(self, pose, ee_link):
        try:
            transform_tcp_to_ee_link = self.__tf_buffer.lookup_transform(
                ee_link, "TCP", rospy.Time(0))
            transform_tcp_to_ee_link.header.frame_id = "ee_link_target"
            transform_tcp_to_ee_link.child_frame_id = "tcp_target"

            stamp = transform_tcp_to_ee_link.header.stamp
            transform_world_to_tcp_target = self.transform_from_pose(
                pose, "base_link", "ee_link_target", stamp)

            self.__tf_buffer.set_transform(transform_world_to_tcp_target,
                                           "default_authority")
            self.__tf_buffer.set_transform(transform_tcp_to_ee_link,
                                           "default_authority")

            ee_link_target_transform = self.__tf_buffer.lookup_transform(
                "base_link", "tcp_target", stamp)

            return self.pose_from_transform(ee_link_target_transform.transform)
        except ArmTCPTransformHandlerException:
            self.set_empty_tcp_to_ee_link_transform(ee_link)
            return pose

    def tcp_to_ee_link_pose_target(self, pose, ee_link):
        try:
            transform_tcp_to_ee_link = self.__tf_buffer.lookup_transform(
                "TCP", ee_link, rospy.Time(0))
            transform_tcp_to_ee_link.header.frame_id = "tcp_target"
            transform_tcp_to_ee_link.child_frame_id = "ee_link_target"

            stamp = transform_tcp_to_ee_link.header.stamp
            transform_world_to_tcp_target = self.transform_from_pose(
                pose, "base_link", "tcp_target", stamp)

            self.__tf_buffer.set_transform(transform_world_to_tcp_target,
                                           "default_authority")
            self.__tf_buffer.set_transform(transform_tcp_to_ee_link,
                                           "default_authority")

            ee_link_target_transform = self.__tf_buffer.lookup_transform(
                "base_link", "ee_link_target", stamp)

            return self.pose_from_transform(ee_link_target_transform.transform)
        except ArmTCPTransformHandlerException:
            self.set_empty_tcp_to_ee_link_transform(ee_link)
            return pose

    def tcp_to_ee_link_position_target(self, position, ee_link):
        pose = Pose(position, Quaternion(0, 0, 0, 1))
        return self.tcp_to_ee_link_pose_target(pose, ee_link).position

    def tcp_to_ee_link_quaternion_target(self, quaternion, ee_link):
        pose = Pose(Point(0, 0, 0), quaternion)
        return self.tcp_to_ee_link_pose_target(pose, ee_link).orientation

    def tcp_to_ee_link_rpy_target(self, roll, pitch, yaw, ee_link):
        qx, qy, qz, qw = quaternion_from_euler(roll, pitch, yaw)
        pose = Pose(Point(0, 0, 0), Quaternion(qx, qy, qz, qw))
        pose = self.tcp_to_ee_link_pose_target(pose, ee_link)
        new_roll, new_pitch, new_yaw = euler_from_quaternion(*pose.orientation)
        return new_roll, new_pitch, new_yaw

    def set_empty_tcp_to_ee_link_transform(self, ee_link):
        ee_link_to_tcp_transform = self.transform_from_pose(
            Pose(Point(0, 0, 0), Quaternion(0, 0, 0, 1)), ee_link, "TCP")
        self.__static_broadcaster.sendTransform(ee_link_to_tcp_transform)
        return ee_link_to_tcp_transform

    def lookup_transform(self, target_frame, source_frame, stamp=None):
        if stamp:
            return self.__tf_buffer.lookup_transform(target_frame,
                                                     source_frame, stamp)

        return self.__tf_buffer.lookup_transform(target_frame, source_frame,
                                                 rospy.Time(0))

    def display_target_pose(self, pose, name="target_pose"):
        t = self.transform_from_pose(pose, "base_link", name)
        self.__static_broadcaster.sendTransform(t)

    @staticmethod
    def transform_from_pose(pose, parent_frame, child_frame, stamp=None):
        t = TransformStamped()
        t.transform.translation.x = pose.position.x
        t.transform.translation.y = pose.position.y
        t.transform.translation.z = pose.position.z

        t.transform.rotation = pose.orientation

        t.header.frame_id = parent_frame
        t.child_frame_id = child_frame

        if stamp:
            t.header.stamp = stamp

        return t

    @staticmethod
    def pose_from_transform(transform):
        pose = Pose()
        pose.position.x = transform.translation.x
        pose.position.y = transform.translation.y
        pose.position.z = transform.translation.z
        pose.orientation = transform.rotation
        return pose
コード例 #4
0
class ToolTransformHandler:
    """
    This class uses a tfBuffer to handle transforms related to the tools.
    """
    def __init__(self):
        self.__tf_buffer = Buffer()
        self.__tf_listener = TransformListener(self.__tf_buffer)
        self.__static_broadcaster = StaticTransformBroadcaster()

        self.__tool_transform = self.empty_transform()
        self.__tcp_transform = self.empty_transform()
        self.__enable_tcp = False

        # Publisher
        self.__tcp_publisher = rospy.Publisher('~tcp',
                                               TCP,
                                               queue_size=10,
                                               latch=True)
        rospy.Timer(rospy.Duration.from_sec(0.5), self.__send_tcp_transform)

        # Services
        rospy.Service('~set_tcp', SetTCP, self.__callback_set_tcp)
        rospy.Service('~reset_tcp', Trigger, self.__callback_reset_tcp)
        rospy.Service('~enable_tcp', SetBool, self.__callback_enable_tcp)

    def __callback_set_tcp(self, req):
        self.__enable_tcp = True

        t = self.empty_transform()
        if req.orientation == Quaternion():
            t.transform.rotation = Quaternion(*quaternion_from_euler(
                req.rpy.roll, req.rpy.pitch, req.rpy.yaw))
        else:
            t.transform.rotation = req.orientation
        t.transform.translation.x = req.position.x
        t.transform.translation.y = req.position.y
        t.transform.translation.z = req.position.z

        self.set_tcp(t)
        return CommandStatus.SUCCESS, "Success"

    def __callback_reset_tcp(self, _):
        self.set_tcp(copy.deepcopy(self.__tool_transform))
        return CommandStatus.SUCCESS, "Success"

    def __callback_enable_tcp(self, req):
        self.__enable_tcp = req.value
        self.__send_tcp_transform(None)
        self.__publish_tcp_transform()
        return CommandStatus.SUCCESS, "Success"

    def set_tool(self, tool_transform):
        """
        Updates the transform object_base -> tool_link_target in local tfBuffer
        :param grip:

        """
        self.__tool_transform = tool_transform
        return self.set_tcp(copy.deepcopy(self.__tool_transform))

    def set_tcp(self, tcp_transform):
        """
        Updates the transform object_base -> tool_link_target in local tfBuffer
        :param grip:

        """
        self.__tcp_transform = tcp_transform
        self.__tf_buffer.set_transform(self.__tcp_transform,
                                       "default_authority")
        self.__send_tcp_transform(None)
        self.__publish_tcp_transform()
        return True

    def __publish_tcp_transform(self):
        msg = TCP()
        msg.enabled = self.__enable_tcp

        if self.__enable_tcp:
            msg.position.x = self.__tcp_transform.transform.translation.x
            msg.position.y = self.__tcp_transform.transform.translation.y
            msg.position.z = self.__tcp_transform.transform.translation.z
            msg.orientation = self.__tcp_transform.transform.rotation
            msg.rpy.roll, msg.rpy.pitch, msg.rpy.yaw = euler_from_quaternion([
                msg.orientation.x, msg.orientation.y, msg.orientation.z,
                msg.orientation.w
            ])
        else:
            msg.orientation = Quaternion(0, 0, 0, 1)

        self.__tcp_publisher.publish(msg)

    def __send_tcp_transform(self, _):
        self.__static_broadcaster.sendTransform(
            self.__tcp_transform if self.__enable_tcp else self.
            empty_transform())

    @staticmethod
    def transform_from_dict(dict_):
        t = TransformStamped()
        t.transform.translation.x = dict_["translation"][0]
        t.transform.translation.y = dict_["translation"][1]
        t.transform.translation.z = dict_["translation"][2]

        t.transform.rotation.x = dict_["quaternion"][0]
        t.transform.rotation.y = dict_["quaternion"][1]
        t.transform.rotation.z = dict_["quaternion"][2]
        t.transform.rotation.w = dict_["quaternion"][3]

        t.header.frame_id = "tool_link"
        t.child_frame_id = "TCP"

        return t

    @staticmethod
    def empty_transform():
        t = TransformStamped()
        t.transform.rotation = Quaternion(0, 0, 0, 1)
        t.header.frame_id = "tool_link"
        t.child_frame_id = "TCP"
        return t