def detections(self, face_data, rotation=None):
        face_detector = FaceDetector(enable_tracking=False)

        # for test image set this needs to be zero, in normal use it doesn't matter because of filtering
        face_detector._FACE_DETECTOR_PYRAMID_LAYERS = 0

        emotion_classifier = EmotionClassifier(apply_mean_filter=False)

        if rotation == "cw":
            expected_rotation_angle = -self._face_rotate_angle
        elif rotation == "ccw":
            expected_rotation_angle = self._face_rotate_angle
        else:
            expected_rotation_angle = 0

        for i, (frame, width) in enumerate(face_data):
            face = face_detector(frame)
            self.face_assertions(
                face=face,
                expected_rotation_angle=expected_rotation_angle,
                width=width)
            emotion = emotion_classifier(face)
            self.emotion_assertions(emotion=emotion,
                                    expected_emotion_data=emotion_data[i],
                                    frame=frame)
    def test_no_face(self):
        face_detector = FaceDetector(enable_tracking=False)

        # for test image set this needs to be zero, in normal use it doesn't matter because of filtering
        face_detector._FACE_DETECTOR_PYRAMID_LAYERS = 0

        emotion_classifier = EmotionClassifier(apply_mean_filter=False)
        frame = self._blank_cv_frame.copy()

        face = face_detector(frame)

        self.assertFalse(face.found)
        self.assertIsNone(face.center)
        self.assertIsNone(face.angle)
        self.assertIsNone(face.features)
        self.assertIsNone(face.rectangle)
        self.assertIsNone(face.mouth_dimensions)
        self.assertIsNone(face.left_eye_dimensions)
        self.assertIsNone(face.right_eye_dimensions)
        self.assertIsNone(face.left_eye_center)
        self.assertIsNone(face.right_eye_center)
        self.assertIsNone(face.nose_bottom)
        self.assertIsNone(face.pupil_distance)
        self.assertEqual(face.robot_view.shape[0], self._height)
        self.assertEqual(face.robot_view.shape[1], self._width)

        # check nothing has been drawn to robot view
        comparison = face.robot_view == frame
        self.assertTrue(comparison.all())

        emotion = emotion_classifier(face)

        self.assertIsNone(emotion.type)
        self.assertEqual(emotion.confidence, 0.0)
        self.assertIsInstance(emotion.robot_view, np.ndarray)

        # check nothing has been drawn to robot view
        comparison = emotion.robot_view == frame
        self.assertTrue(comparison.all())
    def test_calculated_face_data_68_landmark(self):
        face_detector = FaceDetector(enable_tracking=False)
        # for test image set this needs to be zero, in normal use it doesn't matter because of filtering
        face_detector._FACE_DETECTOR_PYRAMID_LAYERS = 0

        test_image = self._face_image_data[0][0]

        face = face_detector(test_image)

        self.assertAlmostEqual(face.angle, -0.5, delta=0.1)
        self.assertAlmostEqual(face.pupil_distance, 112.0, delta=3)

        x, y = face.left_eye_center
        self.assertAlmostEqual(x, 371, delta=3)
        self.assertAlmostEqual(y, 197, delta=3)

        x, y = face.right_eye_center
        self.assertAlmostEqual(x, 259, delta=3)
        self.assertAlmostEqual(y, 195, delta=3)

        w, h = face.left_eye_dimensions
        self.assertAlmostEqual(w, 42.2, delta=3)
        self.assertAlmostEqual(h, 18.1, delta=3)

        w, h = face.right_eye_dimensions
        self.assertAlmostEqual(w, 44.2, delta=3)
        self.assertAlmostEqual(h, 18.3, delta=3)

        x, y = face.mouth_center
        self.assertAlmostEqual(x, 312, delta=3)
        self.assertAlmostEqual(y, 311, delta=3)

        w, h = face.mouth_dimensions
        self.assertAlmostEqual(w, 88.0, delta=3)
        self.assertAlmostEqual(h, 24.1, delta=3)

        x, y = face.nose_bottom
        self.assertAlmostEqual(x, 314, delta=3)
        self.assertAlmostEqual(y, 272, delta=3)
    def test_calculated_face_data_5_landmark(self):
        face_detector = FaceDetector(
            dlib_landmark_predictor_filename=
            "shape_predictor_5_face_landmarks.dat",
            enable_tracking=False,
        )
        # for test image set this needs to be zero, in normal use it doesn't matter because of filtering
        face_detector._FACE_DETECTOR_PYRAMID_LAYERS = 0

        test_image = self._face_image_data[0][0]

        face = face_detector(test_image)

        self.assertAlmostEqual(face.angle, -1.1, delta=1.0)
        self.assertAlmostEqual(face.pupil_distance, 108.0, delta=3)

        x, y = face.left_eye_center
        self.assertAlmostEqual(x, 371, delta=3)
        self.assertAlmostEqual(y, 197, delta=3)

        x, y = face.right_eye_center
        self.assertAlmostEqual(x, 259, delta=3)
        self.assertAlmostEqual(y, 195, delta=3)

        w, h = face.left_eye_dimensions
        self.assertAlmostEqual(w, 42.2, delta=3)
        self.assertIsNone(h)

        w, h = face.right_eye_dimensions
        self.assertAlmostEqual(w, 44.2, delta=3)
        self.assertIsNone(h)

        self.assertRaises(ValueError, lambda: face.mouth_center)
        self.assertRaises(ValueError, lambda: face.mouth_dimensions)

        x, y = face.nose_bottom
        self.assertAlmostEqual(x, 314, delta=3)
        self.assertAlmostEqual(y, 276, delta=3)
Beispiel #5
0
from signal import pause

import cv2

from pitop import Camera
from pitop.processing.algorithms.faces import FaceDetector


def find_faces(frame):
    face = face_detector(frame)
    robot_view = face.robot_view

    cv2.imshow("Faces", robot_view)
    cv2.waitKey(1)

    if face.found:
        print(f"Face angle: {face.angle} \n"
              f"Face center: {face.center} \n"
              f"Face rectangle: {face.rectangle} \n")
    else:
        print("Cannot find face!")


camera = Camera(resolution=(640, 480), flip_top_bottom=True)
face_detector = FaceDetector()

camera.on_frame = find_faces

pause()