def __init__(self, pil_image, scale):
     self.pil_image = pil_image
     self.cv_scale = scale
     self.numpy_image = shrunken_numpy_image(pil_image, scale)
     self.raw_face_locations = api._raw_face_locations(self.numpy_image)
     self._largest_face_encoding = None
     self._largest_face_location = None
    def test_raw_face_locations_32bit_image(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', '32bit.png'))
        detected_faces = api._raw_face_locations(img)

        self.assertEqual(len(detected_faces), 1)
        self.assertEqual(detected_faces[0].top(), 290)
        self.assertEqual(detected_faces[0].bottom(), 558)
    def test_cnn_raw_face_locations_32bit_image(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', '32bit.png'))
        detected_faces = api._raw_face_locations(img, model="cnn")

        self.assertEqual(len(detected_faces), 1)
        self.assertAlmostEqual(detected_faces[0].rect.top(), 259, delta=25)
        self.assertAlmostEqual(detected_faces[0].rect.bottom(), 552, delta=25)
    def test_cnn_raw_face_locations_32bit_image(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', '32bit.png'))
        detected_faces = api._raw_face_locations(img, model="cnn")

        self.assertEqual(len(detected_faces), 1)
        self.assertAlmostEqual(detected_faces[0].rect.top(), 259, delta=25)
        self.assertAlmostEqual(detected_faces[0].rect.bottom(), 552, delta=25)
    def test_raw_face_locations(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
        detected_faces = api._raw_face_locations(img)

        self.assertEqual(len(detected_faces), 1)
        self.assertEqual(detected_faces[0].top(), 142)
        self.assertEqual(detected_faces[0].bottom(), 409)
    def test_raw_face_locations(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg'))
        detected_faces = api._raw_face_locations(img)

        self.assertEqual(len(detected_faces), 1)
        self.assertEqual(detected_faces[0].top(), 142)
        self.assertEqual(detected_faces[0].bottom(), 409)
    def test_raw_face_locations_32bit_image(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', '32bit.png'))
        detected_faces = api._raw_face_locations(img)

        self.assertEqual(len(detected_faces), 1)
        self.assertEqual(detected_faces[0].top(), 290)
        self.assertEqual(detected_faces[0].bottom(), 558)
예제 #8
0
파일: facerec1.py 프로젝트: nerdogan/nenra
    def test_raw_face_locations_32bit_image(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "32bit.png"))
        detected_faces = api._raw_face_locations(img)

        assert len(detected_faces) == 1
        assert detected_faces[0].top() == 290
        assert detected_faces[0].bottom() == 558
예제 #9
0
파일: facerec1.py 프로젝트: nerdogan/nenra
    def test_raw_face_locations(self):
        img = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama.jpg"))
        detected_faces = api._raw_face_locations(img)

        assert len(detected_faces) == 1
        assert detected_faces[0].top() == 142
        assert detected_faces[0].bottom() == 409
예제 #10
0
def run(cap, models, gamma, predictor):
    # try:
        # while(True):
        ret, unknown_image = cap.read()
        unknown_image = adjust_gamma(unknown_image, gamma)

        # Искать прямоугольники (телефоны, рамки и т.п)
        # Если лицо внутри рамки, то это подставочка
        # log.info(find_phone(unknown_image))

        # known_image = face_recognition.load_image_file("aaiCMWZqifg.jpg")
        # known_image = imutils.resize(known_image, width=500)
        # biden_encoding = face_encodings(known_image)[0]

        # unknown_image = face_recognition.load_image_file("71lOOv_lN5A.jpg")
        # unknown_image = imutils.resize(unknown_image, width=500)
        _face_locations = _raw_face_locations(unknown_image)
        unknown_encodings = face_encodings(unknown_image, face_locations=_face_locations, model=predictor)
        for unknown_encoding in unknown_encodings:
            for model_name, model in models.items():
                results = face_recognition.compare_faces(model, unknown_encoding)
                # file_name = str(uuid.uuid1()) + str(results) + '.jpg'
                # cv2.imwrite(file_name, unknown_image)
                log.info(model_name)
                log.info(results)
                log.info(np.mean(results))
                if np.mean(results) > 0.7:
                    log.info(model_name)
                    yield model_name
    def test_cnn_raw_face_loc(self):
        img = api.load_img_file(
            os.path.join(os.path.dirname(__file__), 'test_img', 'obama.jpg'))
        detected_faces = api._raw_face_locations(img, model="cnn")

        self.assertEqual(len(detected_faces), 1)
        self.assertAlmostEqual(detected_faces[0].rect.top(), 144, delta=25)
        self.assertAlmostEqual(detected_faces[0].rect.bottom(), 389, delta=25)
예제 #12
0
    def test_raw_face_locations_32bit_image(self):
        img = api.load_image_file(
            os.path.join(os.path.dirname(__file__), "test_images",
                         "32bit.png"))
        detected_faces = api._raw_face_locations(img)

        assert len(detected_faces) == 1
        assert detected_faces[0].top() == 290
        assert detected_faces[0].bottom() == 558
예제 #13
0
    def test_raw_face_locations(self):
        img = api.load_image_file(
            os.path.join(os.path.dirname(__file__), "test_images",
                         "obama.jpg"))
        detected_faces = api._raw_face_locations(img)

        assert len(detected_faces) == 1
        assert detected_faces[0].top() == 142
        assert detected_faces[0].bottom() == 409
예제 #14
0
def _raw_face_landmarks(face_image, face_locations=None, model="large"):
    face_locations = face_locations or _raw_face_locations(face_image)
    pose_predictor = {'small': pose_predictor_5_point, 'large': pose_predictor_68_point}[model]

    return [pose_predictor(face_image, face_location) for face_location in face_locations]