Ejemplo n.º 1
0
def detect_face(face_detector, img, align=True):

    from retinaface import RetinaFace
    from retinaface.commons import postprocess

    #---------------------------------

    resp = []

    # The BGR2RGB conversion will be done in the preprocessing step of retinaface.
    # img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #retinaface expects RGB but OpenCV read BGR
    """
    face = None
    img_region = [0, 0, img.shape[0], img.shape[1]] #Really?

    faces = RetinaFace.extract_faces(img_rgb, model = face_detector, align = align)

    if len(faces) > 0:
        face = faces[0][:, :, ::-1]

    return face, img_region
    """

    #--------------------------

    obj = RetinaFace.detect_faces(img, model=face_detector, threshold=0.9)

    if type(obj) == dict:
        for key in obj:
            identity = obj[key]
            facial_area = identity["facial_area"]

            y = facial_area[1]
            h = facial_area[3] - y
            x = facial_area[0]
            w = facial_area[2] - x
            img_region = [x, y, w, h]

            #detected_face = img[int(y):int(y+h), int(x):int(x+w)] #opencv
            detected_face = img[facial_area[1]:facial_area[3],
                                facial_area[0]:facial_area[2]]

            if align:
                landmarks = identity["landmarks"]
                left_eye = landmarks["left_eye"]
                right_eye = landmarks["right_eye"]
                nose = landmarks["nose"]
                #mouth_right = landmarks["mouth_right"]
                #mouth_left = landmarks["mouth_left"]

                detected_face = postprocess.alignment_procedure(
                    detected_face, right_eye, left_eye, nose)

            resp.append((detected_face, img_region))

    return resp
def extractForegroundFaces(model, image_path):
    image = depthEstimation.generateDepthMap(model=model,
                                             image_path=image_path)
    faces = RetinaFace.detect_faces(image_path)

    facial_depth = []
    for key in faces:
        facial_area = faces[key]['facial_area']

        # create mask with zeros
        mask = np.zeros((image.shape), dtype=np.uint8)
        pts = np.array([[[facial_area[0], facial_area[1]],
                         [facial_area[0], facial_area[3]],
                         [facial_area[2], facial_area[3]],
                         [facial_area[2], facial_area[1]]]],
                       dtype=np.int32)
        cv2.fillPoly(mask, pts, 255)

        # get color values
        values = image[np.where(mask == 255)]
        facial_depth.append(values.mean())
    return facial_depth
Ejemplo n.º 3
0
#! /usr/bin/python3
from retinaface import RetinaFace

resp = RetinaFace.detect_faces("img/gena1.jpg")
print(resp)
Ejemplo n.º 4
0
from retinaface import RetinaFace
import matplotlib.pyplot as plt
import cv2

img_path = "dataset/img3.jpg"
img = cv2.imread(img_path)

resp = RetinaFace.detect_faces(img_path, threshold=0.5)
print(resp)

for key in resp:
    identity = resp[key]

    #---------------------

    landmarks = identity["landmarks"]
    diameter = 1
    cv2.circle(img, tuple(landmarks["left_eye"]), diameter, (0, 0, 255), -1)
    cv2.circle(img, tuple(landmarks["right_eye"]), diameter, (0, 0, 255), -1)
    cv2.circle(img, tuple(landmarks["nose"]), diameter, (0, 0, 255), -1)
    cv2.circle(img, tuple(landmarks["mouth_left"]), diameter, (0, 0, 255), -1)
    cv2.circle(img, tuple(landmarks["mouth_right"]), diameter, (0, 0, 255), -1)

    facial_area = identity["facial_area"]
    cv2.rectangle(img, (facial_area[2], facial_area[3]),
                  (facial_area[0], facial_area[1]), (255, 255, 255), 1)
    #facial_img = img[facial_area[1]: facial_area[3], facial_area[0]: facial_area[2]]
    #plt.imshow(facial_img[:, :, ::-1])

plt.imshow(img[:, :, ::-1])
plt.axis('off')