コード例 #1
0
def run(in_='media/test_1.mp4', out='media/test_1_out.mp4'):
    face_detector('dataset/live_stream_images')
    liveStream('dataset/live_stream_images_out', in_, out)
コード例 #2
0
from configs.app_face_detection_config import THRESHOLD, \
    PRETRAINED_MODEL_RES10, PRETRAINED_MODEL_OPENCV, PRETRAINED_MODEL_RETAIL_0044
from configs.app_age_prediction_config import PRETRAINED_MODEL as PRETRAINED_MODEL_AGE
from face_detection import face_detector
from age_prediction import age_predictor

import cv2

if __name__ == "__main__":

    output_path = "./output/out.jpg"
    image = cv2.imread(filename="./img/amanda_bynes.jpg")

    image_marked, face_bboxes, t_elapsed = face_detector(
        image=image,
        pretrained_model=PRETRAINED_MODEL_RETAIL_0044,
        threshold=THRESHOLD,
        output_path=output_path)
    print("face bboxes: ", face_bboxes)
    print("elapsed time: ", t_elapsed)

    image_marked, ages, t_elapsed = age_predictor(
        image=image_marked,
        pretrained_model=PRETRAINED_MODEL_AGE,
        bboxes=face_bboxes,
        output_path=output_path)

    print("ages: ", ages)
    print("elapsed time: ", t_elapsed)

    cv2.imshow("age", image_marked)
コード例 #3
0
#USE GUI for running the specific case of searching from database.
#or Use each command line.

#for face detection stuff
from face_detection import face_detector
face_detector('dataset/Test_data_full')

#for example for matching faces
from face_match_v3 import match_faces
mn, tn, ml, aa = match_faces('dataset/missing_images_out',
                             'dataset/Test_data_full',
                             'matched/final_output_top3')
コード例 #4
0
def run():
    face_detector('dataset/live_stream_images')
    liveStream('dataset/live_stream_images_out')
コード例 #5
0
    ap = argparse.ArgumentParser()
    default_model = "../models/res10_300x300_ssd_iter_140000_fp16.caffemodel"
    ap.add_argument("-p",
                    "--prototxt",
                    default="../models/deploy.prototxt",
                    help="path to facial landmark predictor")
    ap.add_argument("-m",
                    "--model",
                    default=default_model,
                    help="path to facial landmark predictor")
    ap.add_argument("-s",
                    "--shape-predictor",
                    default="../models/shape_predictor_68_face_landmarks.dat",
                    help="path to facial landmark predictor")
    args = vars(ap.parse_args())

    # initialize the detectors
    detector = face_detection.face_detector(args["prototxt"], args["model"])
    predictor = facial_landmark_detector(args["shape_predictor"])

    cap = cv2.VideoCapture(0)

    while cv2.waitKey(1) != 27:
        ret, frame = cap.read()
        (boxes, confidences) = detector.detect(frame)
        shapes = predictor.predict(frame, boxes)

        frame = detector.draw(frame, boxes, confidences)
        frame = predictor.draw(frame, shapes)
        cv2.imshow("Camera", frame)
コード例 #6
0
ファイル: main.py プロジェクト: kiochan/facede
def main():
	face = face_detector()
	face.set_listener(f)
	face.detect()