def opencv_cnn(image, **kwargs):
    if is_grayscale(image):
        raise SettingNotSupportedError("Grayscale not supported for " +
                                       opencv_cnn.__name__)

    model_file = "models/face_detection/opencv_face_detector_uint8.pb"
    config_file = "models/face_detection/opencv_face_detector.pbtxt"
    detector = cv2.dnn.readNetFromTensorflow(model_file, config_file)

    scale_x = image.shape[1] / 300
    scale_y = image.shape[0] / 300

    def convert_rect(rect):
        left, bottom, right, top = rect[3:7] * 300
        return [
            int(left * scale_x),
            int(right * scale_x),
            int(top * scale_y),
            int(bottom * scale_y)
        ]

    def run():
        # part of benchmark because this is extra work that we  would not do when not using this model
        image_resized = cv2.resize(image, (300, 300))
        image_scaled = cv2.dnn.blobFromImage(image_resized, 1.0, (300, 300),
                                             (104.0, 177.0, 123.0))

        detector.setInput(image_scaled)
        result = list(detector.forward()[0, 0])
        if (len(result)) == 0:
            raise ExperimentFailure("No faces found by " + dlib_hog.__name__)

        return [convert_rect(r) for r in result]

    return run
示例#2
0
def dlib_5point(image, **kwargs):
    if is_grayscale(image):
        raise SettingNotSupportedError("Grayscale not supported for " +
                                       dlib_5point.__name__)

    landmark_model = dlib.shape_predictor(
        "models/face_cropping/shape_predictor_5_face_landmarks.dat")

    face = get_first_face_location(image)

    def run():
        shape = landmark_model(image, face)
        return dlib.get_face_chip(image, shape)

    return run
示例#3
0
def dlib_resnet(image, **kwargs):
    if is_grayscale(image):
        raise SettingNotSupportedError("Grayscale not supported for " +
                                       dlib_resnet.__name__)

    landmark_model = dlib.shape_predictor(
        "models/face_cropping/shape_predictor_5_face_landmarks.dat")
    feature_model = dlib.face_recognition_model_v1(
        "models/feature_extraction/dlib_face_recognition_resnet_model_v1.dat")

    face = get_first_face_location(image)
    shape = landmark_model(image, face)
    crop = dlib.get_face_chip(image, shape)

    def run():
        return feature_model.compute_face_descriptor(crop)

    return run
def opencv_haar(image, **kwargs):
    detector = cv2.CascadeClassifier(
        'models/face_detection/haarcascade_frontalface_default.xml')

    # not part of benchmark because would supply image in correct color order in main code
    if not is_grayscale(image):
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

    def convert_rect(rect):
        x, y, w, h = list(rect)
        return [x, x + w, y + h, y]

    def run():
        result = list(
            [convert_rect(r) for r in detector.detectMultiScale(image)])
        if (len(result)) == 0:
            raise ExperimentFailure("No faces found by " + dlib_hog.__name__)
        return result

    return run
示例#5
0
def openface(image, **kwargs):
    if is_grayscale(image):
        raise SettingNotSupportedError("Grayscale not supported for " +
                                       dlib_resnet.__name__)

    landmark_model = dlib.shape_predictor(
        "models/face_cropping/shape_predictor_5_face_landmarks.dat")
    feature_model = cv2.dnn.readNetFromTorch(
        "models/feature_extraction/nn4.small2.v1.t7")

    face = get_first_face_location(image)
    shape = landmark_model(image, face)
    crop = dlib.get_face_chip(image, shape)

    def run():
        crop_blob = cv2.dnn.blobFromImage(crop,
                                          1.0 / 255, (96, 96), (0, 0, 0),
                                          swapRB=True,
                                          crop=False)
        feature_model.setInput(crop_blob)
        return feature_model.forward()[0]

    return run