Exemple #1
0
def test_multiface():
    # Test multiple faces
    inputFname2 = os.path.join(get_test_data_path(),
                               "tim-mossholder-hOF1bWoet_Q-unsplash.jpg")
    img01 = read_pictures([inputFname])
    _, h, w, _ = img01.shape

    img02 = cv2.imread(inputFname2)
    # @tiankang: seems to be a problem with fer
    detector = Detector(
        face_model="RetinaFace",
        emotion_model="fer",
        landmark_model="PFLD",
        au_model="jaanet",
    )
    files, _ = detector.process_frame(img02, 0)
    assert files.shape[0] == 5
Exemple #2
0
Gaze vectors are length 4 (lefteye_x, lefteye_y, righteye_x, righteye_y) where the y orientation is positive for looking upwards.

from feat.plotting import plot_face
from feat.utils import load_h5
import numpy as np
import matplotlib.pyplot as plt

# Add data
au = np.zeros(20)

# Add some gaze vectors: (lefteye_x, lefteye_y, righteye_x, righteye_y)
gaze = [-1, 5, 1, 5]

# Plot face
plot_face(model=None, au = au, gaze = gaze, color='k', linewidth=1, linestyle='-')

## Call plot method on Fex instances
It is possible to call the `plot_aus` method within openface, facet, affdex fex instances

OpenFace

from feat.plotting import plot_face
import numpy as np
import matplotlib.pyplot as plt
from feat.utils import  load_h5, get_resource_path, read_openface
from feat.tests.utils import get_test_data_path
from os.path import join

test_file = join(get_test_data_path(),'OpenFace_Test.csv')
openface = read_openface(test_file)
openface.plot_aus(12, muscles={'all': "heatmap"}, gaze = None)
# coding: utf-8

# # Loading data from other detectors
# *written by Jin Hyun Cheong*
#
# While Py-FEAT provides it's own set of detectors, you can still use Py-FEAT if you extracted features from other models. Currently we support data files extracted from OpenFace, FACET iMotions, and Affectiva JavaScript SDK. Please open an Issue if you would like to see support for other model outputs.
#
# ## Loading OpenFace data

# In[1]:

import glob, os
from feat.tests.utils import get_test_data_path
from feat.utils import read_openface

openface_file = os.path.join(get_test_data_path(), "OpenFace_Test.csv")
detections = read_openface(openface_file)
print(type(detections))
display(detections.head())

# All functionalities of the `Fex` class will be available when you load an OpenFace file using `read_openface`. For example, you can quickly grab the facial landmark columns using `landmarks()` or the aus using `aus()`

# In[2]:

detections.landmark().head()

# In[3]:

detections.aus().head()

# ## Loading FACET iMotions data
Exemple #4
0
def test_nofile():
    with pytest.raises(FileNotFoundError):
        inputFname = os.path.join(get_test_data_path(), "nosuchfile.jpg")
        detector1 = Detector(emotion_model="svm")
        out = detector1.detect_image(inputFname)
Exemple #5
0
def test_emotionrf():
    # Emotion RF models is not good
    inputFname = os.path.join(get_test_data_path(), "input.jpg")
    detector1 = Detector(emotion_model="rf")
    out = detector1.detect_image(inputFname)
    assert out.emotions()["happiness"].values > 0.0
Exemple #6
0
def test_emotionsvm():
    inputFname = os.path.join(get_test_data_path(), "input.jpg")
    detector1 = Detector(emotion_model="svm")
    out = detector1.detect_image(inputFname)
    assert out.emotions()["happiness"].values > 0.5
Exemple #7
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `feat` package."""

from feat.detector import Detector
from feat.data import Fex
from feat.utils import get_resource_path
from feat.tests.utils import get_test_data_path
import pandas as pd
import feat
import os
import cv2
import numpy as np
import pytest

inputFname = os.path.join(get_test_data_path(), "input.jpg")
img01 = cv2.imread(inputFname)
h, w, _ = img01.shape


def test_detector():
    detector = Detector(n_jobs=1)
    assert detector["n_jobs"] == 1
    assert type(detector) == Detector


def test_faceboxes():
    # Face Detector Test Case:
    detector01 = Detector(
        face_model="FaceBoxes",
        landmark_model=None,