Beispiel #1
0
def annotate(pose,
             face_box_model='mtcnn',
             au_model='rf',
             face_id_model='deepface',
             every=1,
             output_path=None,
             save_results=True,
             shot_detection=True,
             extract_aus=True,
             extract_face_id=True,
             num_workers=None):

    ########## Run pose estimation ##########

    pose_data = estimate_pose(pose, num_workers=num_workers)
    # Split tracks based on shot detection

    ########## Run shot detection ##########

    if shot_detection:
        tqdm.write("Detecting shots...")
        shots = utils.get_shots(pose.vid_path)
        # Here, shots is a list of tuples (each tuple contains the in and out frames of each shot)
        pose_data, pose.splitcount = utils.split_tracks(pose_data, shots)
        pose.shots = shots

    # Add pose data to the pose object
    pose.pose_data = pose_data
    pose.n_tracks = len(pose_data)

    ########## Run face detection + face feature extraction ##########

    if extract_aus:
        detector = Detector(face_model=face_box_model, au_model=au_model)
        tqdm.write("Extracting facial expressions...")
        pose.face_data = detector.detect_video(pose.vid_path,
                                               skip_frames=every)

    ########## Extract face identify encodings ##########

    if extract_aus and extract_face_id:
        add_face_id(pose)

    ########## Saving results ##########
    if output_path == None:
        output_path = os.getcwd()

    if save_results:
        os.makedirs(output_path + '/' + pose.vid_name, exist_ok=True)
        if extract_aus:
            pose.face_data.to_csv(output_path + '/' + pose.vid_name +
                                  '/psypose_faces.csv')
        joblib.dump(
            pose.pose_data,
            os.path.join(output_path + '/' + pose.vid_name +
                         '/psypose_bodies.pkl'))

    print('Finished annotation for file: ', pose.vid_name)
Beispiel #2
0
def test_simultaneous():
    # Test processing everything:
    detector04 = Detector(
        face_model="RetinaFace",
        emotion_model="fer",
        landmark_model="PFLD",
        au_model="jaanet",
    )
    files = detector04.process_frame(img01, 0)
Beispiel #3
0
def test_pfld():
    detector03 = Detector(face_model="RetinaFace",
                          emotion_model=None,
                          landmark_model="PFLD")
    bboxes = detector03.detect_faces(img01)
    landmarks = detector03.detect_landmarks(img01, bboxes)
    assert landmarks[0].shape == (68, 2)
    assert (np.any(landmarks[0][:, 0] > 0) and np.any(landmarks[0][:, 0] < w)
            and np.any(landmarks[0][:, 1] > 0)
            and np.any(landmarks[0][:, 1] < h))
Beispiel #4
0
def test_mobilenet():
    detector02 = Detector(face_model="RetinaFace",
                          emotion_model=None,
                          landmark_model="MobileNet")
    bboxes = detector02.detect_faces(img01)
    landmarks = detector02.detect_landmarks(img01, bboxes)
    assert landmarks[0].shape == (68, 2)
    assert (np.any(landmarks[0][:, 0] > 0) and np.any(landmarks[0][:, 0] < w)
            and np.any(landmarks[0][:, 1] > 0)
            and np.any(landmarks[0][:, 1] < h))
Beispiel #5
0
def test_multiface():
    inputFname2 = os.path.join(get_test_data_path(),
                               "tim-mossholder-hOF1bWoet_Q-unsplash.jpg")
    img02 = cv2.imread(inputFname2)
    detector = Detector(
        face_model="RetinaFace",
        emotion_model="fer",
        landmark_model="PFLD",
        au_model="jaanet",
    )
    files = detector.process_frame(img02, 0)
    assert files.shape[0] == 5
Beispiel #6
0
def test_jaanet():
    # AU Detection Case:
    detector1 = Detector(
        face_model="RetinaFace",
        emotion_model=None,
        landmark_model="MobileFaceNet",
        au_model="jaanet",
    )
    bboxes = detector1.detect_faces(img01)
    lands = detector1.detect_landmarks(img01, bboxes)
    aus = detector1.detect_aus(img01, lands)
    assert np.sum(np.isnan(aus)) == 0
    assert aus.shape[-1] == 12
Beispiel #7
0
def test_pnp():
    # Test that facepose can be estimated properly using landmarks + pnp algorithm
    detector = Detector(face_model="RetinaFace",
                        landmark_model="MobileFaceNet",
                        facepose_model="PnP")
    bboxes = detector.detect_faces(frame=img01)
    lms = detector.detect_landmarks(frame=img01, detected_faces=bboxes)
    poses = detector.detect_facepose(frame=img01, landmarks=lms)
    pose_to_test = poses[0][0]  # first image and first face
    pitch, roll, yaw = pose_to_test.reshape(-1)
    assert -10 < pitch < 10
    assert -5 < roll < 5
    assert -10 < yaw < 10
Beispiel #8
0
def test_drml():
    # AU Detection Case2:
    inputFname = os.path.join(get_test_data_path(), "sampler0000.jpg")
    img01 = cv2.imread(inputFname)
    detector1 = Detector(
        face_model="RetinaFace",
        emotion_model=None,
        landmark_model="MobileFaceNet",
        au_model="drml",
    )
    bboxes = detector1.detect_faces(img01)
    lands = detector1.detect_landmarks(img01, bboxes)
    aus = detector1.detect_aus(img01, lands)
    assert np.sum(np.isnan(aus)) == 0
    assert aus.shape[-1] == 12
Beispiel #9
0
def test_detect_image():
    # Test detect image
    detector = Detector(n_jobs=1)
    inputFname = os.path.join(get_test_data_path(), "input.jpg")
    out = detector.detect_image(inputFname=inputFname)
    assert type(out) == Fex
    assert len(out) == 1
    assert out.happiness.values[0] > 0

    outputFname = os.path.join(get_test_data_path(), "output.csv")
    out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)
    assert out
    assert os.path.exists(outputFname)
    out = pd.read_csv(outputFname)
    assert out.happiness.values[0] > 0
Beispiel #10
0
def test_mtcnn():
    detector03 = Detector(
        face_model="MTCNN",
        landmark_model=None,
        au_model=None,
        emotion_model=None,
        n_jobs=1,
    )
    out = detector03.detect_faces(img01)
    bbox_x = out[0][0]
    assert bbox_x != None
    bbox_width = out[0][1]
    bbox_y = out[0][2]
    bbox_height = out[0][3]
    assert len(out[0]) == 5
    assert bbox_x > 180 and bbox_x < 200
Beispiel #11
0
def test_retinaface():
    detector02 = Detector(
        face_model="RetinaFace",
        landmark_model=None,
        au_model=None,
        emotion_model=None,
        n_jobs=1,
    )
    out = detector02.detect_faces(img01)
    bbox_x = out[0][0]
    assert bbox_x != None
    bbox_width = out[0][1]
    bbox_y = out[0][2]
    bbox_height = out[0][3]
    assert len(out[0]) == 5
    assert bbox_x > 180 and bbox_x < 200
Beispiel #12
0
def test_faceboxes():
    # Face Detector Test Case:
    detector01 = Detector(
        face_model="FaceBoxes",
        landmark_model=None,
        au_model=None,
        emotion_model=None,
        n_jobs=1,
    )
    out = detector01.detect_faces(img01)
    bbox_x = out[0][0]
    assert bbox_x != None
    bbox_width = out[0][1]
    bbox_y = out[0][2]
    bbox_height = out[0][3]
    assert len(out[0]) == 5
    assert bbox_x > 180 and bbox_x < 200
Beispiel #13
0
def test_detect_video_parallel():
    # Test detect video
    detector = Detector(n_jobs=2)
    inputFname = os.path.join(get_test_data_path(), "input.mp4")
    out = detector.detect_video(inputFname=inputFname,
                                skip_frames=20,
                                verbose=True)
    assert len(out) == 4

    outputFname = os.path.join(get_test_data_path(), "output.csv")
    out = detector.detect_video(inputFname=inputFname,
                                outputFname=outputFname,
                                skip_frames=10)
    assert out
    assert os.path.exists(outputFname)
    out = pd.read_csv(outputFname)
    assert out.happiness.values.max() > 0
Beispiel #14
0
def test_img2pose():
    # Test that both face detection and facepose estimation work
    detector = Detector(face_model="img2pose", facepose_model="img2pose")
    # Face detection
    faces = detector.detect_faces(img01)[0]
    bbox_x = faces[0][0]
    assert bbox_x is not None
    assert len(faces[0]) == 5
    assert 180 < bbox_x < 200

    # Pose estimation
    poses = detector.detect_facepose(img01)[0]
    pose_to_test = poses[0][0]  # first image and first face
    pitch, roll, yaw = pose_to_test.reshape(-1)
    assert -10 < pitch < 10
    assert -5 < roll < 5
    assert -10 < yaw < 10
Beispiel #15
0
def test_multiface():
    # Test multiple faces
    inputFname2 = os.path.join(get_test_data_path(),
                               "tim-mossholder-hOF1bWoet_Q-unsplash.jpg")
    img01 = read_pictures([inputFname])
    _, h, w, _ = img01.shape

    img02 = cv2.imread(inputFname2)
    # @tiankang: seems to be a problem with fer
    detector = Detector(
        face_model="RetinaFace",
        emotion_model="fer",
        landmark_model="PFLD",
        au_model="jaanet",
    )
    files, _ = detector.process_frame(img02, 0)
    assert files.shape[0] == 5
Beispiel #16
0
def test_mtcnn():
    detector03 = Detector(
        face_model="MTCNN",
        landmark_model=None,
        au_model=None,
        emotion_model=None,
        n_jobs=1,
    )
    out = detector03.detect_faces(img01)
    bbox_left = out[0][0]
    assert bbox_left != None
    bbox_right = out[0][1]
    bbox_top = out[0][2]
    bbox_bottom = out[0][3]
    assert len(out[0]) == 5
    assert (bbox_left > 0 and bbox_right > 0 and bbox_top > 0
            and bbox_bottom > 0 and bbox_left < bbox_right
            and bbox_top < bbox_bottom and bbox_left < w and bbox_right < w
            and bbox_top < h and bbox_bottom < h)
Beispiel #17
0
def test_faceboxes():
    # Face Detector Test Case:
    detector01 = Detector(
        face_model="FaceBoxes",
        landmark_model=None,
        au_model=None,
        emotion_model=None,
        n_jobs=1,
    )
    out = detector01.detect_faces(img01)
    bbox_left = out[0][0]
    assert bbox_left != None
    bbox_right = out[0][1]
    bbox_top = out[0][2]
    bbox_bottom = out[0][3]
    assert len(out[0]) == 5
    assert (bbox_left > 0 and bbox_right > 0 and bbox_top > 0
            and bbox_bottom > 0 and bbox_left < bbox_right
            and bbox_top < bbox_bottom and bbox_left < w and bbox_right < w
            and bbox_top < h and bbox_bottom < h)
Beispiel #18
0
def test_detector():
    detector = Detector(n_jobs=1)
    assert detector['n_jobs'] == 1
    assert type(detector) == Detector

    # Test detect image
    inputFname = os.path.join(get_test_data_path(), "input.jpg")
    out = detector.detect_image(inputFname=inputFname)
    assert type(out) == Fex
    assert len(out) == 1
    assert out.happiness.values[0] > 0

    outputFname = os.path.join(get_test_data_path(), "output.csv")
    out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)
    assert out
    assert os.path.exists(outputFname)
    out = pd.read_csv(outputFname)
    assert out.happiness.values[0] > 0

    # Test detect video
    inputFname = os.path.join(get_test_data_path(), "input.mp4")
    out = detector.detect_video(inputFname=inputFname)
    assert len(out) == 72

    outputFname = os.path.join(get_test_data_path(), "output.csv")
    out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)
    assert out
    assert os.path.exists(outputFname)
    out = pd.read_csv(outputFname)
    assert out.happiness.values.max() > 0
Beispiel #19
0
def test_rf():
    # AU Detection Case:
    detector1 = Detector(
        face_model="RetinaFace",
        emotion_model=None,
        landmark_model="MobileFaceNet",
        au_model="RF",
    )
    bboxes = detector1.detect_faces(img01)
    lands = detector1.detect_landmarks(img01, bboxes)
    convex_hull, new_lands = detector1.extract_face(
        frame=img01,
        detected_faces=[bboxes[0:4]],
        landmarks=lands,
        size_output=112)
    hogs = detector1.extract_hog(frame=convex_hull, visualize=False)
    aus = detector1.detect_aus(frame=hogs, landmarks=new_lands)
    assert np.sum(np.isnan(aus)) == 0
    assert aus.shape[-1] == 20
Beispiel #20
0
def test_svm():
    # AU Detection Case:
    detector1 = Detector(
        face_model="RetinaFace",
        emotion_model=None,
        landmark_model="MobileFaceNet",
        au_model="svm",
    )
    detected_faces = detector1.detect_faces(img01)
    landmarks = detector1.detect_landmarks(img01, detected_faces)
    hogs, new_lands = detector1._batch_hog(frames=img01,
                                           detected_faces=detected_faces,
                                           landmarks=landmarks)
    aus = detector1.detect_aus(frame=hogs, landmarks=new_lands)

    assert np.sum(np.isnan(aus)) == 0
    assert aus.shape[-1] == 20
Beispiel #21
0
def test_img2pose_mismatch():
    # Check that `detector` properly handles case where user selects img2pose as face pose estimator
    # but selects a different face detector. Detector should tell user they must use img2pose as both face detector and
    # pose estimator, and force face model to be `img2pose`.
    detector = Detector(face_model="RetinaFace", facepose_model="img2pose-c")
    assert detector.info["face_model"] == "img2pose-c"
Beispiel #22
0
def test_emotionsvm():
    inputFname = os.path.join(get_test_data_path(), "input.jpg")
    detector1 = Detector(emotion_model="svm")
    out = detector1.detect_image(inputFname)
    assert out.emotions()["happiness"].values > 0.5
Beispiel #23
0
def test_detector():
    detector = Detector(n_jobs=1)
    assert detector["n_jobs"] == 1
    assert type(detector) == Detector
Beispiel #24
0
def test_detect_video():
    # Test detect video
    detector = Detector(n_jobs=1)
    inputFname = os.path.join(get_test_data_path(), "input.mp4")
    out = detector.detect_video(inputFname=inputFname, skip_frames=60)
    assert len(out) == 2
Beispiel #25
0
def test_emotionrf():
    # Emotion RF models is not good
    inputFname = os.path.join(get_test_data_path(), "input.jpg")
    detector1 = Detector(emotion_model="rf")
    out = detector1.detect_image(inputFname)
    assert out.emotions()["happiness"].values > 0.0
Beispiel #26
0
def test_wrongmodelname():
    with pytest.raises(KeyError):
        detector1 = Detector(emotion_model="badmodelname")
Beispiel #27
0
def test_resmasknet():
    inputFname = os.path.join(get_test_data_path(), "sampler0000.jpg")
    detector1 = Detector(emotion_model="resmasknet")
    out = detector1.detect_image(inputFname)
    assert out.emotions()["neutral"].values > 0.5
Beispiel #28
0
def test_nofile():
    with pytest.raises(FileNotFoundError):
        inputFname = os.path.join(get_test_data_path(), "nosuchfile.jpg")
        detector1 = Detector(emotion_model="svm")
        out = detector1.detect_image(inputFname)