Esempio n. 1
0
def landmark(video, model, tracking, output):
    """Facial features detection"""

    # face generator
    frame_width, frame_height = video.frame_size
    faceGenerator = getFaceGenerator(tracking,
                                     frame_width, frame_height,
                                     double=False)
    faceGenerator.send(None)

    face = Face(landmarks=model)

    with open(output, 'w') as foutput:

        for timestamp, rgb in video:

            # get all detected faces at this time
            T, faces = faceGenerator.send(timestamp)
            # not that T might be differ slightly from t
            # due to different steps in frame iteration

            for identifier, boundingBox, _ in faces:

                landmarks = face._get_landmarks(rgb, boundingBox)

                foutput.write('{t:.3f} {identifier:d}'.format(
                    t=T, identifier=identifier))
                for x, y in landmarks:
                    foutput.write(' {x:.5f} {y:.5f}'.format(x=x / frame_width,
                                                            y=y / frame_height))
                foutput.write('\n')

            foutput.flush()
Esempio n. 2
0
def features(video, model, shape, output):
    """Openface FaceNet feature extraction"""

    face = Face(size=96, openface=model)

    # shape generator
    frame_width, frame_height = video.frame_size
    landmarkGenerator = getLandmarkGenerator(shape, frame_width, frame_height)
    landmarkGenerator.send(None)

    with open(output, 'w') as foutput:

        for timestamp, rgb in video:

            T, shapes = landmarkGenerator.send(timestamp)

            for identifier, landmarks in shapes:
                normalized_rgb = face._get_normalized(rgb, landmarks)
                normalized_bgr = cv2.cvtColor(normalized_rgb,
                                              cv2.COLOR_BGR2RGB)
                openface = face._get_openface(normalized_bgr)

                foutput.write('{t:.3f} {identifier:d}'.format(
                    t=T, identifier=identifier))
                for x in openface:
                    foutput.write(' {x:.5f}'.format(x=x))
                foutput.write('\n')

            foutput.flush()
Esempio n. 3
0
def extract(video, landmark_model, embedding_model, tracking, landmark_output,
            embedding_output):
    """Facial features detection"""

    # face generator
    frame_width, frame_height = video.frame_size
    faceGenerator = getFaceGenerator(tracking,
                                     frame_width,
                                     frame_height,
                                     double=False)
    faceGenerator.send(None)

    face = Face(landmarks=landmark_model, embedding=embedding_model)

    with open(landmark_output, 'w') as flandmark, \
         open(embedding_output, 'w') as fembedding:

        for timestamp, rgb in video:

            # get all detected faces at this time
            T, faces = faceGenerator.send(timestamp)
            # not that T might be differ slightly from t
            # due to different steps in frame iteration

            for identifier, bounding_box, _ in faces:

                landmarks = face.get_landmarks(rgb, bounding_box)
                embedding = face.get_embedding(rgb, landmarks)

                flandmark.write('{t:.3f} {identifier:d}'.format(
                    t=T, identifier=identifier))
                for p in landmarks.parts():
                    x, y = p.x, p.y
                    flandmark.write(' {x:.5f} {y:.5f}'.format(
                        x=x / frame_width, y=y / frame_height))
                flandmark.write('\n')

                fembedding.write('{t:.3f} {identifier:d}'.format(
                    t=T, identifier=identifier))
                for x in embedding:
                    fembedding.write(' {x:.5f}'.format(x=x))
                fembedding.write('\n')

            flandmark.flush()
            fembedding.flush()
Esempio n. 4
0
def extract(video, landmark_model, embedding_model, tracking, landmark_output, embedding_output):
    """Facial features detection"""

    # face generator
    frame_width, frame_height = video.frame_size
    faceGenerator = getFaceGenerator(tracking,
                                     frame_width, frame_height,
                                     double=False)
    faceGenerator.send(None)

    face = Face(landmarks=landmark_model,
                embedding=embedding_model)

    with open(landmark_output, 'w') as flandmark, \
         open(embedding_output, 'w') as fembedding:

        for timestamp, rgb in video:

            # get all detected faces at this time
            T, faces = faceGenerator.send(timestamp)
            # not that T might be differ slightly from t
            # due to different steps in frame iteration

            for identifier, bounding_box, _ in faces:

                landmarks = face.get_landmarks(rgb, bounding_box)
                embedding = face.get_embedding(rgb, landmarks)

                flandmark.write('{t:.3f} {identifier:d}'.format(
                    t=T, identifier=identifier))
                for p in landmarks.parts():
                    x, y = p.x, p.y
                    flandmark.write(' {x:.5f} {y:.5f}'.format(x=x / frame_width,
                                                            y=y / frame_height))
                flandmark.write('\n')

                fembedding.write('{t:.3f} {identifier:d}'.format(
                    t=T, identifier=identifier))
                for x in embedding:
                    fembedding.write(' {x:.5f}'.format(x=x))
                fembedding.write('\n')

            flandmark.flush()
            fembedding.flush()