Esempio n. 1
0
class OpenFaceRecognition(object):
    def __init__(self,
                 detector_model_filename,
                 detector_weights_filename,
                 detector_config_filename,
                 frontalize=False,
                 metric_distance=euclidean):
        self.face_detector = OpenCVDNNDetector(detector_model_filename,
                                               detector_weights_filename,
                                               detector_config_filename, 300)

        self.detector_model_filename = detector_model_filename
        self.facial_features_estimator = FacialFeaturesEstimator(
            face_3d_model_filename, embedding_model_file)
        self.detector_weights_filename = detector_weights_filename
        self.frontalize = frontalize
        self.metric_distance = metric_distance

    def extract(self, rgb_image):
        face_list = self.face_detector.detect(rgb_image)
        if len(face_list) == 0:
            print("no image found for extraction")
            return []
        else:
            self.facial_features_estimator.estimate(rgb_image, face_list,
                                                    self.frontalize)
            name = self.facial_features_estimator.name
            return face_list[0].features[name]

    def predict(self, rgb_image_1, rgb_image_2):
        feature1 = self.extract(rgb_image_1)
        feature2 = self.extract(rgb_image_2)
        return (1 -
                self.metric_distance(feature1.to_array(), feature2.to_array()))
Esempio n. 2
0
class OpenFaceRecognition(object):
    def __init__(self,
                 detector_model_filename,
                 detector_weights_filename,
                 detector_config_filename,
                 face_3d_model_filename,
                 embedding_model_file,
                 shape_predictor_config_filename,
                 frontalize=False,
                 metric_distance=euclidean):
        self.face_detector = OpenCVDNNDetector(detector_model_filename,
                                               detector_weights_filename,
                                               detector_config_filename, 300)

        self.detector_model_filename = detector_model_filename
        self.facial_landmarks_estimator = FacialLandmarksEstimator(
            shape_predictor_config_filename)
        self.face_alignement_estimator = FaceAlignementEstimator()
        self.facial_features_estimator = FacialFeaturesEstimator(
            face_3d_model_filename, embedding_model_file, frontalize)
        #self.input_shape = input_shape
        self.detector_weights_filename = detector_weights_filename
        self.frontalize = frontalize
        self.metric_distance = metric_distance

    def extract(self, rgb_image):
        face_list = self.face_detector.detect(rgb_image)
        if len(face_list) == 0:
            print("no image found for extraction")
            return None
        else:
            # cv2.imshow('image',rgb_image)
            # cv2.waitKey(0)

            self.facial_landmarks_estimator.estimate(rgb_image, face_list)
            self.facial_features_estimator.estimate(rgb_image, face_list,
                                                    self.frontalize)
            name = self.facial_features_estimator.name

            # rgb_image2 = self.face_alignement_estimator.align(rgb_image,face_list[0])
            # cv2.imshow('image',rgb_image2)

            # cv2.waitKey(0)
            return face_list[0].features[name]

    def predict(self, rgb_image_1, rgb_image_2):
        feature1 = self.extract(rgb_image_1)
        feature2 = self.extract(rgb_image_2)
        return (1 -
                self.metric_distance(feature1.to_array(), feature2.to_array()))
                                      detector_config_filename, 300)

    try:
        os.makedirs(snapshot_directory)
    except OSError as e:
        if not os.path.isdir(snapshot_directory):
            raise RuntimeError("{}".format(e))
    snapshot_index = 0

    capture = cv2.VideoCapture(0)
    while True:
        ok, frame = capture.read()
        viz_frame = frame.copy()
        if ok:
            rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            face_list = face_detector.detect(rgb_image)
            if len(face_list) > 0:
                # _,a,score  = knn.predict(model.extract(rgb_image).to_array())
                # face_list[0].confidence = score[0]
                # face_list[0].label += " " + a
                color = (0, 230, 0)
                # if a != "alexandre":
                #     color = (251,0,0)
                face_list[0].draw(frame, color)
            k = cv2.waitKey(1) & 0xFF
            if k == 32 and len(face_list) > 0:
                print("Save image " + str(snapshot_index) + ".jpg !")
                cv2.imwrite(snapshot_directory + str(snapshot_index) + ".jpg",
                            viz_frame)
                snapshot_index += 1
                cv2.imshow("Snapshot recorder", (255 - frame))