def deepFaceAnalysis(retina_model, deepface_models, image_path):
    age_list = {}
    gender_list = {}
    race_list = {}
    emotion_list = {}

    # Pass all images through DeepFace
    faces = RetinaFace.extract_faces(image_path, model=retina_model)

    if len(faces) > 0:
        if len(faces) > 1:
            removeBackgroundFaces(faces)

        for i, face in enumerate(faces, start=1):
            dict = "face_" + str(i)
            face_feature = DeepFace.analyze(
                face,
                actions=['age', 'gender', 'race', 'emotion'],
                models=deepface_models,
                enforce_detection=False)
            age_list[dict] = (face_feature["age"])
            gender_list[dict] = (face_feature["gender"])
            race_list[dict] = (face_feature["dominant_race"])
            emotion_list[dict] = (face_feature["dominant_emotion"])

        return len(faces), age_list, gender_list, race_list, emotion_list
    else:
        return 0, "None", "None", "None", "None"
Exemple #2
0
def detect_face(face_detector, img, align = True):

    face = None
    img_region = [0, 0, img.shape[0], img.shape[1]]

    img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #retinaface expects RGB but OpenCV read BGR

    faces = RetinaFace.extract_faces(img_rgb, model = face_detector, align = align)

    if len(faces) > 0:
        face = faces[0][:, :, ::-1]

    return face, img_region
def detect_face(img,
                detector_backend='opencv',
                grayscale=False,
                enforce_detection=True):

    home = str(Path.home())

    img_region = [0, 0, img.shape[0], img.shape[1]]

    #if functions.preproces_face is called directly, then face_detector global variable might not been initialized.
    if not "face_detector" in globals():
        initialize_detector(detector_backend=detector_backend)

    if detector_backend == 'opencv':

        faces = []

        try:
            faces = face_detector.detectMultiScale(img, 1.3, 5)
        except:
            pass

        if len(faces) > 0:
            x, y, w, h = faces[0]  #focus on the 1st face found in the image
            detected_face = img[int(y):int(y + h), int(x):int(x + w)]
            return detected_face, [x, y, w, h]

        else:  #if no face detected

            if enforce_detection != True:
                return img, img_region

            else:
                raise ValueError(
                    "Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False."
                )

    elif detector_backend == 'ssd':

        ssd_labels = [
            "img_id", "is_face", "confidence", "left", "top", "right", "bottom"
        ]

        target_size = (300, 300)

        base_img = img.copy()  #we will restore base_img to img later

        original_size = img.shape

        img = cv2.resize(img, target_size)

        aspect_ratio_x = (original_size[1] / target_size[1])
        aspect_ratio_y = (original_size[0] / target_size[0])

        imageBlob = cv2.dnn.blobFromImage(image=img)

        face_detector.setInput(imageBlob)
        detections = face_detector.forward()

        detections_df = pd.DataFrame(detections[0][0], columns=ssd_labels)

        detections_df = detections_df[detections_df['is_face'] ==
                                      1]  #0: background, 1: face
        detections_df = detections_df[detections_df['confidence'] >= 0.90]

        detections_df['left'] = (detections_df['left'] * 300).astype(int)
        detections_df['bottom'] = (detections_df['bottom'] * 300).astype(int)
        detections_df['right'] = (detections_df['right'] * 300).astype(int)
        detections_df['top'] = (detections_df['top'] * 300).astype(int)

        if detections_df.shape[0] > 0:

            #TODO: sort detections_df

            #get the first face in the image
            instance = detections_df.iloc[0]

            left = instance["left"]
            right = instance["right"]
            bottom = instance["bottom"]
            top = instance["top"]

            detected_face = base_img[int(top *
                                         aspect_ratio_y):int(bottom *
                                                             aspect_ratio_y),
                                     int(left *
                                         aspect_ratio_x):int(right *
                                                             aspect_ratio_x)]

            return detected_face, [
                int(left * aspect_ratio_x),
                int(top * aspect_ratio_y),
                int(right * aspect_ratio_x) - int(left * aspect_ratio_x),
                int(bottom * aspect_ratio_y) - int(top * aspect_ratio_y)
            ]

        else:  #if no face detected

            if enforce_detection != True:
                img = base_img.copy()
                return img, img_region

            else:
                raise ValueError(
                    "Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False."
                )

    elif detector_backend == 'dlib':

        detections = face_detector(img, 1)

        if len(detections) > 0:

            for idx, d in enumerate(detections):
                left = d.left()
                right = d.right()
                top = d.top()
                bottom = d.bottom()

                detected_face = img[top:bottom, left:right]

                return detected_face, [left, top, right - left, bottom - top]

        else:  #if no face detected

            if enforce_detection != True:
                return img, img_region

            else:
                raise ValueError(
                    "Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False."
                )

    elif detector_backend == 'mtcnn':

        img_rgb = cv2.cvtColor(
            img, cv2.COLOR_BGR2RGB)  #mtcnn expects RGB but OpenCV read BGR
        detections = face_detector.detect_faces(img_rgb)

        if len(detections) > 0:
            detection = detections[0]
            x, y, w, h = detection["box"]
            detected_face = img[int(y):int(y + h), int(x):int(x + w)]
            return detected_face, [x, y, w, h]

        else:  #if no face detected
            if not enforce_detection:
                return img, img_region

            else:
                raise ValueError(
                    "Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False."
                )

    elif detector_backend == 'retinaface':
        img_rgb = cv2.cvtColor(
            img,
            cv2.COLOR_BGR2RGB)  #retinaface expects RGB but OpenCV read BGR

        from retinaface import RetinaFace
        faces = RetinaFace.extract_faces(img_rgb, align=True)

        if len(faces) > 0:
            face = faces[0]
            return face, img_region

        else:  #if no face detected
            if not enforce_detection:
                return img, img_region
            else:
                raise ValueError(
                    "Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False."
                )

    else:
        detectors = ['opencv', 'ssd', 'dlib', 'mtcnn']
        raise ValueError("Valid backends are ", detectors, " but you passed ",
                         detector_backend)