def identify_faces(image, feature_type, classifier_name):
    """ Function detects and classifies faces in an image """

    model_file = cfg.ssd_model
    prototxt_file = cfg.prototxt_file

    if np.shape(image)[0] >= np.shape(image)[1]:
        #individual photo config
        merge_overlap = 0.1
        aspect_ratio_bounds = (0.4, 2)
        min_confidence = 0.6
        step_size = 1000
        window_size = (2000, 2000)
    else:
        # group photo config
        merge_overlap = 0.2
        aspect_ratio_bounds = (0.5, 1.7)
        min_confidence = 0.5
        ws = int(round(np.shape(image)[0] / 6))
        step_size = int(round(ws / 2))
        window_size = (ws, ws)

    face_bboxes = detect_faces(image, model_file, prototxt_file,
                               min_confidence, aspect_ratio_bounds,
                               merge_overlap, step_size, window_size)

    for face_bbox in face_bboxes:
        x1, y1, x2, y2 = face_bbox
        face_img = image[y1:y2, x1:x2]

        #skip empty bboxes
        if any([x == 0 for x in np.shape(face_img)]):
            continue
        x = int(round(np.mean([x1, x2])))
        y = int(round(np.mean([y1, y2])))
        face_id = classify_face(face_img,
                                method=classifier_name,
                                feature_type=feature_type)
        #u.imshow(face_img)
        #remove unknowns and convert to 2-character ID
        face_id = int(
            face_id
        ) if 'unknown' not in face_id else cfg.unknown_face_return_value
        this_result = np.array([[face_id, x, y]], dtype=np.int64)

        #concat to results matrix if it exists, otherwise create it
        if 'result_mat' not in locals():
            result_mat = this_result
        else:
            result_mat = np.concatenate([result_mat, this_result], axis=0)

    #if no faces found, return empty array
    if len(face_bboxes) == 0 or 'result_mat' not in locals():
        #if no faces detected or all face bboxes detected invalid (1 dim ==0)
        result_mat = np.array([[]])

    return result_mat
def recognize(img_vec, tags, mean, eig_vec):
    face_classifier_name = 'haarcascade_frontalface_default.xml'
    face_classifier = cv2.CascadeClassifier(face_classifier_name)

    # Capture video
    frame_first_flg = 1
    cap = cv2.VideoCapture(0)
    while True:
        ret, frame = cap.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces_vec = detect_faces(gray, face_classifier)
        cv2.imshow("frame", gray)

        key = cv2.waitKey(1)
        if key & 0xFF == ord(' '):
            if not faces_vec:
                print("No faces detected. Try again.")
            else:
                # Weights of the dataset
                pca_data = img_vec - mean
                data_weights = np.transpose(
                    np.dot(eig_vec, np.transpose(pca_data)))

                img_idx = 0
                for face in faces_vec:
                    # Weights of test image
                    pca_data = face - mean
                    test_weights = []
                    for i in range(len(eig_vec)):
                        test_weights.append(
                            np.dot(eig_vec[i], np.transpose(pca_data)))

                    # Euclidean distance between weights
                    dist = []
                    for idx in range(len(data_weights)):
                        diff = np.linalg.norm(test_weights - data_weights[idx])
                        dist.append(diff)
                    min_dist = np.argmin(dist)
                    recognized_tag = tags[np.argmin(dist)]
                    img_idx += 1

                    print("Recognized person #{}: {}".format(
                        img_idx, recognized_tag))
        if key == 27:
            break

    cap.release()
    cv2.destroyAllWindows()
Esempio n. 3
0
def correct_eyes_face(image, underline):
    img0 = image.copy()
    img = image
    #Faces
    faces = detect_faces(img, underline)
    for (x, y, w, h) in faces:
        face_area = img[y:y + h, x:x + w]
        face_area0 = img0[y:y + h, x:x + w]
        #Eyes in the face
        eyes = detect_eyes(face_area0, underline)
        correct_eyes(face_area, eyes)
    #Underline borders
    if underline:
        for (x, y, w, h) in faces:
            face_area = img[y:y + h, x:x + w]
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
            for (ex, ey, ew, eh) in eyes:
                cv2.rectangle(face_area, (ex, ey), (ex + ew, ey + eh),
                              (0, 255, 0), 2)
Esempio n. 4
0
def get_similar_faces(img, max_results, query_dets):
    result_lst = []
    score_lst = []

    if query_dets == " ":
        # 1 - Detect face in query image (dlib or Retina face)
        try:
            detector = init_detector()
            dets = detect_faces(img, detector)
        except:
            raise DetectorNotFoundError()
    else:
        dets = convert_bbox_string(query_dets)

    if len(dets) == 0:
        raise NoFaceInImageError()

    # 2 - Align face
    try:
        fa = init_aligner()
        faces = align_faces_sim_search(img, dets, fa)
        face = faces[0]
    except:
        raise AlignerNotFoundError()

    # 3 - Extract feature vector for query face
    try:
        face_embedding = embed_face(face)
    except:
        raise ModelNotFoundError()

    # 4 - similarity search on faiss index
    try:
        index = load_index(PATH_INDEX_FILE, MODE)
        lbs_index = load_labels(PATH_LABELS_FILE)
    except:
        raise IndexNotFoundError()

    result_lst = similarity_search(index, lbs_index, [face_embedding],
                                   max_results)

    return result_lst
Esempio n. 5
0
def add_people(args):
    print("Parsing data...")
    parser = add_parser()
    args = parse_args(parser)

    print("Reading data...")
    if not os.path.isfile(args.new_data_file):
        copyfile(args.old_data_file, args.new_data_file)

    data = CsvData("image_data")
    data.read(args.new_data_file)

    print("Adding images to the dataset...")
    face_classifier_name = 'haarcascade_frontalface_default.xml'
    face_classifier = cv2.CascadeClassifier(face_classifier_name)
    tag_prev = ''
    for root, dir, files in os.walk(args.new_db_path):
        for file in files:
            tag = root[root.rfind('/') + 1:]
            if tag != tag_prev:
                tag_prev = tag
                if tag in data.x:
                    print("{} already in the database. "
                          "Omitting...".format(tag))
                    continue
                print("Adding {}...".format(tag))
            img = cv2.imread(os.path.join(root, file), 0)
            img = detect_faces(img, face_classifier, detect_one=True)
            [img_w, img_h] = np.shape(img)
            img_vec = np.reshape(img, img_w * img_h)

            # Save tag and image vector to a csv file
            df = pd.DataFrame({'tag': [tag], 'face': [img_vec.tolist()]})
            with open(args.new_data_file, 'a') as f:
                df.to_csv(f, header=False, index=False)

    print("Done.")
Esempio n. 6
0
def analyze_video(queue, video_file, settings):
    """
    Finds faces' locations and encodings in the desired frames, and finds unique faces
    :param queue: Multiprocessing queue
    :param video_file: Path to input video file
    :param settings: Settings dictionary
    """
    # get video
    video = cv2.VideoCapture(video_file)  # input VideoCapture object
    frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))  # number of frames in input video
    # initialize models
    net = models.detection_dnn_model()  # detection model
    face_encoder = models.face_encoder()  # face recognition encoding model
    pose_predictor = models.pose_predictor()  # pose predictor model for finding face landmarks
    # get settings
    track_period = settings["track_period"]  # track period from settings
    resamples = settings["resamples"]  # number of resamples from settings
    tolerance = settings["tolerance"]  # face matching tolerance from settings
    # initialize lists
    face_locations = []  # list of face locations for each frame
    face_encodings = []  # list of face encodings for each frame
    unique_frames = []  # list of frame images including unique faces
    unique_face_locations = []  # list of unique faces' locations
    unique_encodings = []  # list of unique faces' encodings
    for i in range(frame_count):
        ret, img = video.read()  # ret indicates if frame was read correctly, img is last read frame
        if i % track_period == 0:  # frame for detection
            rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # frame in rgb format
            face_locations.append(detection.detect_faces(img, net))
            face_encodings.append(recognition.encode_faces(rgb, face_locations[-1], pose_predictor, face_encoder, resamples))
            new_indices, new_encodings = recognition.exclude_faces(face_encodings[-1], np.array(unique_encodings), tolerance)  # indices and encodings of new faces found
            for k in range(len(new_indices)):  # for each new face found
                unique_frames.append(rgb)
                unique_face_locations.append(face_locations[-1][k])
                unique_encodings.append(new_encodings[k])
    send_data(queue, [face_locations, face_encodings, unique_frames, unique_face_locations, unique_encodings])
Esempio n. 7
0
    print("Start Reading...")
    while True:
        _, img = video.read()
        img = cv2.transpose(img)
        img = cv2.flip(img, 1)

        height, width, channel = img.shape
        # matrix = cv2.getRotationMatrix2D((width / 2, height / 2), 270, 1)
        # frame = cv2.warpAffine(frame, matrix, (width, height))

        tic = time.time()
        resize_img = cv2.resize(img, (0, 0), fx=resize_rate, fy=resize_rate)

        if resize_img.ndim == 2:
            resize_img = facenet.to_rgb(resize_img)
        resize_img = resize_img[:, :, 0:3]

        bounding_boxes = detection.detect_faces(resize_img, img.shape)

        if bounding_boxes.shape[0] > 0:
            match_names, p = recognition.recognize_faces(img, bounding_boxes)
        else:
            bounding_boxes = match_names = p = []
        toc = time.time() - tic

        img = utils.mosaic(img, bounding_boxes, match_names, 6)
        img = utils.draw_box(img, bounding_boxes, match_names, p)

        cv2.imshow("origin", img)
        cv2.waitKey(0)