Exemple #1
0
    def __init__(self,
                 detector_model_filename,
                 detector_weights_filename,
                 detector_config_filename,
                 frontalize=False,
                 metric_distance=euclidean):
        self.face_detector = OpenCVDNNDetector(detector_model_filename,
                                               detector_weights_filename,
                                               detector_config_filename, 300)

        self.detector_model_filename = detector_model_filename
        self.facial_features_estimator = FacialFeaturesEstimator(
            face_3d_model_filename, embedding_model_file)
        self.detector_weights_filename = detector_weights_filename
        self.frontalize = frontalize
        self.metric_distance = metric_distance
Exemple #2
0
class OpenFaceRecognition(object):
    def __init__(self,
                 detector_model_filename,
                 detector_weights_filename,
                 detector_config_filename,
                 frontalize=False,
                 metric_distance=euclidean):
        self.face_detector = OpenCVDNNDetector(detector_model_filename,
                                               detector_weights_filename,
                                               detector_config_filename, 300)

        self.detector_model_filename = detector_model_filename
        self.facial_features_estimator = FacialFeaturesEstimator(
            face_3d_model_filename, embedding_model_file)
        self.detector_weights_filename = detector_weights_filename
        self.frontalize = frontalize
        self.metric_distance = metric_distance

    def extract(self, rgb_image):
        face_list = self.face_detector.detect(rgb_image)
        if len(face_list) == 0:
            print("no image found for extraction")
            return []
        else:
            self.facial_features_estimator.estimate(rgb_image, face_list,
                                                    self.frontalize)
            name = self.facial_features_estimator.name
            return face_list[0].features[name]

    def predict(self, rgb_image_1, rgb_image_2):
        feature1 = self.extract(rgb_image_1)
        feature2 = self.extract(rgb_image_2)
        return (1 -
                self.metric_distance(feature1.to_array(), feature2.to_array()))
Exemple #3
0
    def __init__(self,
                 detector_model_filename,
                 detector_weights_filename,
                 detector_config_filename,
                 face_3d_model_filename,
                 embedding_model_file,
                 shape_predictor_config_filename,
                 frontalize=False,
                 metric_distance=euclidean):
        self.face_detector = OpenCVDNNDetector(detector_model_filename,
                                               detector_weights_filename,
                                               detector_config_filename, 300)

        self.detector_model_filename = detector_model_filename
        self.facial_landmarks_estimator = FacialLandmarksEstimator(
            shape_predictor_config_filename)
        self.face_alignement_estimator = FaceAlignementEstimator()
        self.facial_features_estimator = FacialFeaturesEstimator(
            face_3d_model_filename, embedding_model_file, frontalize)
        #self.input_shape = input_shape
        self.detector_weights_filename = detector_weights_filename
        self.frontalize = frontalize
        self.metric_distance = metric_distance
Exemple #4
0
class OpenFaceRecognition(object):
    def __init__(self,
                 detector_model_filename,
                 detector_weights_filename,
                 detector_config_filename,
                 face_3d_model_filename,
                 embedding_model_file,
                 shape_predictor_config_filename,
                 frontalize=False,
                 metric_distance=euclidean):
        self.face_detector = OpenCVDNNDetector(detector_model_filename,
                                               detector_weights_filename,
                                               detector_config_filename, 300)

        self.detector_model_filename = detector_model_filename
        self.facial_landmarks_estimator = FacialLandmarksEstimator(
            shape_predictor_config_filename)
        self.face_alignement_estimator = FaceAlignementEstimator()
        self.facial_features_estimator = FacialFeaturesEstimator(
            face_3d_model_filename, embedding_model_file, frontalize)
        #self.input_shape = input_shape
        self.detector_weights_filename = detector_weights_filename
        self.frontalize = frontalize
        self.metric_distance = metric_distance

    def extract(self, rgb_image):
        face_list = self.face_detector.detect(rgb_image)
        if len(face_list) == 0:
            print("no image found for extraction")
            return None
        else:
            # cv2.imshow('image',rgb_image)
            # cv2.waitKey(0)

            self.facial_landmarks_estimator.estimate(rgb_image, face_list)
            self.facial_features_estimator.estimate(rgb_image, face_list,
                                                    self.frontalize)
            name = self.facial_features_estimator.name

            # rgb_image2 = self.face_alignement_estimator.align(rgb_image,face_list[0])
            # cv2.imshow('image',rgb_image2)

            # cv2.waitKey(0)
            return face_list[0].features[name]

    def predict(self, rgb_image_1, rgb_image_2):
        feature1 = self.extract(rgb_image_1)
        feature2 = self.extract(rgb_image_2)
        return (1 -
                self.metric_distance(feature1.to_array(), feature2.to_array()))
                        type=str,
                        help='The label used to name the data directory')
    parser.add_argument(
        "-d",
        "--data_dir",
        type=str,
        default="/tmp/snapshots/",
        help="The root data directory (default '/tmp/snapshots/')")
    args = parser.parse_args()
    snapshot_directory = args.data_dir + args.label + "/"
    detector_model = "../models/detection/opencv_face_detector_uint8.pb"
    detector_model_txt = "../models/detection/opencv_face_detector.pbtxt"
    detector_config_filename = "../config/detection/face_config.yaml"
    # model = OpenFaceRecognition(detector_model,detector_model_txt,detector_config_filename)

    face_detector = OpenCVDNNDetector(detector_model, detector_model_txt,
                                      detector_config_filename, 300)

    try:
        os.makedirs(snapshot_directory)
    except OSError as e:
        if not os.path.isdir(snapshot_directory):
            raise RuntimeError("{}".format(e))
    snapshot_index = 0

    capture = cv2.VideoCapture(0)
    while True:
        ok, frame = capture.read()
        viz_frame = frame.copy()
        if ok:
            rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            face_list = face_detector.detect(rgb_image)