def process_image(self, color_msg):
        color_img = gaze_tools.convert_image(color_msg, "bgr8")
        timestamp = color_msg.header.stamp

        self.update_subject_tracker(color_img)

        if not self.subject_tracker.get_tracked_elements():
            tqdm.write("\033[2K\033[1;31mNo face found\033[0m", end="\r")
            return

        self.subject_tracker.update_eye_images(self.eye_image_size)

        final_head_pose_images = []
        for subject_id, subject in self.subject_tracker.get_tracked_elements(
        ).items():
            if subject.left_eye_color is None or subject.right_eye_color is None:
                continue
            if subject_id not in self.pose_stabilizers:
                self.pose_stabilizers[subject_id] = [
                    Stabilizer(state_num=2,
                               measure_num=1,
                               cov_process=0.1,
                               cov_measure=0.1) for _ in range(6)
                ]

            success, head_rpy, translation_vector = self.get_head_pose(
                subject.marks, subject_id)

            if success:
                # Publish all the data
                self.publish_pose(timestamp, translation_vector, head_rpy,
                                  subject_id)

                if self.visualise_headpose:
                    roll_pitch_yaw = gaze_tools.limit_yaw(head_rpy)
                    face_image_resized = cv2.resize(
                        subject.face_color,
                        dsize=(224, 224),
                        interpolation=cv2.INTER_CUBIC)

                    final_head_pose_images.append(
                        LandmarkMethodROS.visualize_headpose_result(
                            face_image_resized,
                            gaze_tools.get_phi_theta_from_euler(
                                roll_pitch_yaw)))

        if len(self.subject_tracker.get_tracked_elements().items()) > 0:
            self.publish_subject_list(
                timestamp, self.subject_tracker.get_tracked_elements())

        if len(final_head_pose_images) > 0:
            headpose_image_ros = self.bridge.cv2_to_imgmsg(
                np.hstack(final_head_pose_images), "bgr8")
            headpose_image_ros.header.stamp = timestamp
            self.subject_faces_pub.publish(headpose_image_ros)
Example #2
0
    def process_image(self, color_msg):
        tqdm.write('Time now: ' + str(rospy.Time.now().to_sec()) +
                   ' message color: ' + str(color_msg.header.stamp.to_sec()) +
                   ' diff: ' + str(rospy.Time.now().to_sec() -
                                   color_msg.header.stamp.to_sec()))

        start_time = time.time()

        color_img = gaze_tools.convert_image(color_msg, "bgr8")
        timestamp = color_msg.header.stamp

        self.detect_landmarks(color_img, timestamp)  # update self.subjects

        tqdm.write('Elapsed after detecting transformed_landmarks: ' +
                   str(time.time() - start_time))

        if not self.subjects:
            tqdm.write("No face found")
            return

        self.get_eye_image()  # update self.subjects

        final_head_pose_images = None
        for subject_id, subject in self.subjects.items():
            if subject.left_eye_color is None or subject.right_eye_color is None:
                continue
            if subject_id not in self.last_rvec:
                self.last_rvec[subject_id] = np.array([[0.01891013],
                                                       [0.08560084],
                                                       [-3.14392813]])
            if subject_id not in self.last_tvec:
                self.last_tvec[subject_id] = np.array([[-14.97821226],
                                                       [-10.62040383],
                                                       [-2053.03596872]])
            if subject_id not in self.pose_stabilizers:
                self.pose_stabilizers[subject_id] = [
                    Stabilizer(state_num=2,
                               measure_num=1,
                               cov_process=0.1,
                               cov_measure=0.1) for _ in range(6)
                ]

            success, rotation_vector, translation_vector = self.get_head_pose(
                subject.marks, subject_id)

            # Publish all the data
            translation_headpose_tf = self.get_head_translation(
                timestamp, subject_id)

            if success:
                if translation_headpose_tf is not None:
                    euler_angles_head = self.publish_pose(
                        timestamp, translation_headpose_tf, rotation_vector,
                        subject_id)

                    if euler_angles_head is not None:
                        head_pose_image = self.visualize_headpose_result(
                            subject.face_color,
                            gaze_tools.get_phi_theta_from_euler(
                                euler_angles_head))
                        head_pose_image_resized = cv2.resize(
                            head_pose_image,
                            dsize=(224, 224),
                            interpolation=cv2.INTER_CUBIC)

                        if final_head_pose_images is None:
                            final_head_pose_images = head_pose_image_resized
                        else:
                            final_head_pose_images = np.concatenate(
                                (final_head_pose_images,
                                 head_pose_image_resized),
                                axis=1)
            else:
                if not success:
                    tqdm.write("Could not get head pose properly")

        if final_head_pose_images is not None:
            self.publish_subject_list(timestamp, self.subjects)
            headpose_image_ros = self.bridge.cv2_to_imgmsg(
                final_head_pose_images, "bgr8")
            headpose_image_ros.header.stamp = timestamp
            self.subject_faces_pub.publish(headpose_image_ros)

        tqdm.write('Elapsed total: ' + str(time.time() - start_time) + '\n\n')