class UnitTestMtcnnFaceDetector(tf.test.TestCase):

    def setUp(self):
        tf.reset_default_graph()
        with self.test_session() as sess:
            self.detector = MtcnnFaceDetector(sess, )
        self.image = TestUtils.get_image_and_bbox("one_face.jpg")["image"]

    def tearDown(self):
        pass

    def test_should_detect_one_face(self):
        bboxes, face_landmarks, scores, dnorm_scores = self.detector.detect(self.image)
        self.assertEqual(len(bboxes), 1)

    def test_should_not_detect_a_face(self):
        zero_image = np.zeros(self.image.shape)
        bboxes, face_landmarks, scores, dnorm_scores = self.detector.detect(zero_image)
        self.assertEqual(len(bboxes), 0)

    def test_should_crop_a_face(self):
        bboxes, face_landmarks, scores, dnorm_scores = self.detector.detect(self.image)
        raw_faces = self.detector.crop_faces(self.image, bboxes, False)
        square_faces = self.detector.crop_faces(self.image, bboxes, True)

        self.assertEqual(len(bboxes), len(raw_faces))
        self.assertEqual(len(bboxes), len(square_faces))

    def test_should_initialize_with_internal_session(self):
        _ = MtcnnFaceDetector()

    def test_should_work_with_two_instances(self):
        _ = MtcnnFaceDetector()
        _ = MtcnnFaceDetector()
Example #2
0
    def test_nms_from_pnet(self):
        total_boxes = np.empty((0, 9))
        with self.test_session() as sess:
            detector = MtcnnFaceDetector(sess)
        scale = SCALES_ASSET[-1]
        hs = int(np.ceil(self.image.shape[0] * scale))
        ws = int(np.ceil(self.image.shape[1] * scale))
        im = cv2.resize(self.image, (ws, hs), interpolation=cv2.INTER_AREA)
        boxes = _run_Pnet_one_scale(detector.pnet, im, scale, 0.6)
        total_boxes = np.append(total_boxes, boxes, axis=0)
        filtered_boxes = nms(total_boxes.copy(), 0.5, 'Union')

        self.assertEqual(filtered_boxes.shape[0], 1)
Example #3
0
    def test_run_nets(self):
        with self.test_session() as sess:
            detector = MtcnnFaceDetector(sess)

        boxes_Pnet, proposals_batch = _run_Pnet(detector.pnet, self.image, SCALES_ASSET, 0.6)
        self.assertEqual(proposals_batch.shape, (24, 24, 3, 43))

        boxes_RNet, refined_proposals_batch = _run_Rnet(detector.rnet, boxes_Pnet, self.image, proposals_batch, 0.7)
        self.assertEqual(refined_proposals_batch.shape, (48, 48, 3, 3))

        boxes_ONet, landmarks, features = _run_Onet(detector.onet, boxes_RNet, refined_proposals_batch, 0.9)
        self.assertEqual(landmarks.shape, (10, 1))
        self.assertEqual(features.shape, (1, 256))
Example #4
0
    def test_run_pnet_one_scale(self):
        scale = SCALES_ASSET[-1]
        hs = int(np.ceil(self.image.shape[0] * scale))
        ws = int(np.ceil(self.image.shape[1] * scale))
        print("Scale h: ", hs)
        im = cv2.resize(self.image, (ws, hs), interpolation=cv2.INTER_AREA)

        with self.test_session() as sess:
            detector = MtcnnFaceDetector(sess)

        boxes = _run_Pnet_one_scale(detector.pnet, im, scale, 0.6)

        self.assertEqual(boxes.shape[0], 5)
        self.assertEqual(boxes.shape[1], 9)

        self.assertArrayNear(boxes[0, :], PNET_BOX_0, 0.000001)
 def test_should_work_with_two_instances(self):
     _ = MtcnnFaceDetector()
     _ = MtcnnFaceDetector()
 def test_should_initialize_with_internal_session(self):
     _ = MtcnnFaceDetector()
 def setUp(self):
     tf.reset_default_graph()
     with self.test_session() as sess:
         self.detector = MtcnnFaceDetector(sess, )
     self.image = TestUtils.get_image_and_bbox("one_face.jpg")["image"]