def test_mobilenet(): detector02 = Detector(face_model="RetinaFace", emotion_model=None, landmark_model="MobileNet") bboxes = detector02.detect_faces(img01) landmarks = detector02.detect_landmarks(img01, bboxes) assert landmarks[0].shape == (68, 2) assert (np.any(landmarks[0][:, 0] > 0) and np.any(landmarks[0][:, 0] < w) and np.any(landmarks[0][:, 1] > 0) and np.any(landmarks[0][:, 1] < h))
def test_pfld(): detector03 = Detector(face_model="RetinaFace", emotion_model=None, landmark_model="PFLD") bboxes = detector03.detect_faces(img01) landmarks = detector03.detect_landmarks(img01, bboxes) assert landmarks[0].shape == (68, 2) assert (np.any(landmarks[0][:, 0] > 0) and np.any(landmarks[0][:, 0] < w) and np.any(landmarks[0][:, 1] > 0) and np.any(landmarks[0][:, 1] < h))
def test_jaanet(): # AU Detection Case: detector1 = Detector( face_model="RetinaFace", emotion_model=None, landmark_model="MobileFaceNet", au_model="jaanet", ) bboxes = detector1.detect_faces(img01) lands = detector1.detect_landmarks(img01, bboxes) aus = detector1.detect_aus(img01, lands) assert np.sum(np.isnan(aus)) == 0 assert aus.shape[-1] == 12
def test_pnp(): # Test that facepose can be estimated properly using landmarks + pnp algorithm detector = Detector(face_model="RetinaFace", landmark_model="MobileFaceNet", facepose_model="PnP") bboxes = detector.detect_faces(frame=img01) lms = detector.detect_landmarks(frame=img01, detected_faces=bboxes) poses = detector.detect_facepose(frame=img01, landmarks=lms) pose_to_test = poses[0][0] # first image and first face pitch, roll, yaw = pose_to_test.reshape(-1) assert -10 < pitch < 10 assert -5 < roll < 5 assert -10 < yaw < 10
def test_drml(): # AU Detection Case2: inputFname = os.path.join(get_test_data_path(), "sampler0000.jpg") img01 = cv2.imread(inputFname) detector1 = Detector( face_model="RetinaFace", emotion_model=None, landmark_model="MobileFaceNet", au_model="drml", ) bboxes = detector1.detect_faces(img01) lands = detector1.detect_landmarks(img01, bboxes) aus = detector1.detect_aus(img01, lands) assert np.sum(np.isnan(aus)) == 0 assert aus.shape[-1] == 12
def test_mtcnn(): detector03 = Detector( face_model="MTCNN", landmark_model=None, au_model=None, emotion_model=None, n_jobs=1, ) out = detector03.detect_faces(img01) bbox_x = out[0][0] assert bbox_x != None bbox_width = out[0][1] bbox_y = out[0][2] bbox_height = out[0][3] assert len(out[0]) == 5 assert bbox_x > 180 and bbox_x < 200
def test_retinaface(): detector02 = Detector( face_model="RetinaFace", landmark_model=None, au_model=None, emotion_model=None, n_jobs=1, ) out = detector02.detect_faces(img01) bbox_x = out[0][0] assert bbox_x != None bbox_width = out[0][1] bbox_y = out[0][2] bbox_height = out[0][3] assert len(out[0]) == 5 assert bbox_x > 180 and bbox_x < 200
def test_faceboxes(): # Face Detector Test Case: detector01 = Detector( face_model="FaceBoxes", landmark_model=None, au_model=None, emotion_model=None, n_jobs=1, ) out = detector01.detect_faces(img01) bbox_x = out[0][0] assert bbox_x != None bbox_width = out[0][1] bbox_y = out[0][2] bbox_height = out[0][3] assert len(out[0]) == 5 assert bbox_x > 180 and bbox_x < 200
def test_img2pose(): # Test that both face detection and facepose estimation work detector = Detector(face_model="img2pose", facepose_model="img2pose") # Face detection faces = detector.detect_faces(img01)[0] bbox_x = faces[0][0] assert bbox_x is not None assert len(faces[0]) == 5 assert 180 < bbox_x < 200 # Pose estimation poses = detector.detect_facepose(img01)[0] pose_to_test = poses[0][0] # first image and first face pitch, roll, yaw = pose_to_test.reshape(-1) assert -10 < pitch < 10 assert -5 < roll < 5 assert -10 < yaw < 10
def test_svm(): # AU Detection Case: detector1 = Detector( face_model="RetinaFace", emotion_model=None, landmark_model="MobileFaceNet", au_model="svm", ) detected_faces = detector1.detect_faces(img01) landmarks = detector1.detect_landmarks(img01, detected_faces) hogs, new_lands = detector1._batch_hog(frames=img01, detected_faces=detected_faces, landmarks=landmarks) aus = detector1.detect_aus(frame=hogs, landmarks=new_lands) assert np.sum(np.isnan(aus)) == 0 assert aus.shape[-1] == 20
def test_mtcnn(): detector03 = Detector( face_model="MTCNN", landmark_model=None, au_model=None, emotion_model=None, n_jobs=1, ) out = detector03.detect_faces(img01) bbox_left = out[0][0] assert bbox_left != None bbox_right = out[0][1] bbox_top = out[0][2] bbox_bottom = out[0][3] assert len(out[0]) == 5 assert (bbox_left > 0 and bbox_right > 0 and bbox_top > 0 and bbox_bottom > 0 and bbox_left < bbox_right and bbox_top < bbox_bottom and bbox_left < w and bbox_right < w and bbox_top < h and bbox_bottom < h)
def test_rf(): # AU Detection Case: detector1 = Detector( face_model="RetinaFace", emotion_model=None, landmark_model="MobileFaceNet", au_model="RF", ) bboxes = detector1.detect_faces(img01) lands = detector1.detect_landmarks(img01, bboxes) convex_hull, new_lands = detector1.extract_face( frame=img01, detected_faces=[bboxes[0:4]], landmarks=lands, size_output=112) hogs = detector1.extract_hog(frame=convex_hull, visualize=False) aus = detector1.detect_aus(frame=hogs, landmarks=new_lands) assert np.sum(np.isnan(aus)) == 0 assert aus.shape[-1] == 20
def test_faceboxes(): # Face Detector Test Case: detector01 = Detector( face_model="FaceBoxes", landmark_model=None, au_model=None, emotion_model=None, n_jobs=1, ) out = detector01.detect_faces(img01) bbox_left = out[0][0] assert bbox_left != None bbox_right = out[0][1] bbox_top = out[0][2] bbox_bottom = out[0][3] assert len(out[0]) == 5 assert (bbox_left > 0 and bbox_right > 0 and bbox_top > 0 and bbox_bottom > 0 and bbox_left < bbox_right and bbox_top < bbox_bottom and bbox_left < w and bbox_right < w and bbox_top < h and bbox_bottom < h)