コード例 #1
0
def test_plot_detections():
    test_data_dir = get_test_data_path()
    test_image = join(test_data_dir, "input.jpg")
    detector = Detector()
    image_prediction = detector.detect_image(test_image)
    axes = image_prediction.plot_detections()
    assert axes[1].get_xlim() == (0.0, 1.1)
    plt.close()

    axes = image_prediction.plot_detections(muscle=True)
    assert axes[1].get_xlim() == (0.0, 1.1)
    plt.close()

    image_prediction2 = image_prediction.copy()
    image_prediction2["input"] = "NO_SUCH_FILE_EXISTS"
    axes = image_prediction2.plot_detections()
    assert axes[1].get_xlim() == (0.0, 1.1)
    plt.close()
コード例 #2
0
ファイル: analysis.py プロジェクト: cosanlab/py-feat
import pandas as pd
import seaborn as sns
sns.set_context("talk")

clip_attrs = pd.read_csv("clip_attrs.csv")
videos = np.sort(glob.glob("*.mp4"))
print(videos)


# Process each video using our detector. 

# In[ ]:


from feat import Detector
detector = Detector(au_model = "rf", emotion_model = "resmasknet")
for video in videos: 
    detector.detect_video(video, outputFname = video.replace(".mp4", ".csv"))


# In[9]:


from feat.utils import read_feat
import pandas as pd

for ix ,video in enumerate(videos):
    outputF = video.replace(".mp4", ".csv")
    if ix == 0: 
        fex = read_feat(outputF)
    else:
コード例 #3
0
ファイル: detector.py プロジェクト: cosanlab/py-feat
# !pip install -q py-feat

# ## Detecting facial expressions from images.
#
# First, load the detector class. You can specify which models you want to use.

# In[2]:

from feat import Detector
face_model = "retinaface"
landmark_model = "mobilenet"
au_model = "rf"
emotion_model = "resmasknet"
detector = Detector(face_model=face_model,
                    landmark_model=landmark_model,
                    au_model=au_model,
                    emotion_model=emotion_model)

# Find the file you want to process. In our case, we'll use our test image `input.jpg`.

# In[3]:

# Find the file you want to process.
from feat.tests.utils import get_test_data_path
import os
test_data_dir = get_test_data_path()
test_image = os.path.join(test_data_dir, "input.jpg")

# Here is what our test image looks like.

# In[4]:
コード例 #4
0
ファイル: trainAUvisModel.py プロジェクト: cosanlab/py-feat
    fd, hog_image = hog(resized_face_np,
                        orientations=8,
                        pixels_per_cell=(8, 8),
                        cells_per_block=(2, 2),
                        visualize=True,
                        multichannel=True)

    return fd, hog_image, points


# Replace the paths so that it points to your local dataset directory.

# In[ ]:

detector = Detector(face_model="retinaface", landmark_model="mobilenet")
# Correct path to your downloaded dataset.
EmotioNet_images = np.sort(glob.glob("/Storage/Data/EmotioNet/imgs/*.jpg"))
labels = pd.read_csv(
    "/Storage/Data/EmotioNet/labels/EmotioNet_FACS_aws_2020_24600.csv")
labels = labels.dropna(axis=0)
for col in labels.columns:
    if "AU" in col:
        kwargs = {
            col.replace("'", '').replace('"', '').replace(" ", ""):
            labels[[col]]
        }
        labels = labels.assign(**kwargs)
        labels = labels.drop(columns=col)
labels = labels.assign(
    URL=labels.URL.apply(lambda x: x.split("/")[-1].replace("'", "")))
コード例 #5
0
ファイル: detector.py プロジェクト: fdovila/feat
## How to use the Feat Detector class.

*Written by Jin Hyun Cheong*

Here is an example of how to use the `Detector` class to detect faces, facial landmarks, Action Units, and emotions, from face images or videos. 

## Detecting facial expressions from images. 

First, load the detector class. You can specify which models you want to use.

from feat import Detector
face_model = "retinaface"
landmark_model = "mobilenet"
au_model = "jaanet"
emotion_model = "fer"
detector = Detector()

Find the file you want to process. In our case, we'll use our test image `input.jpg`. 

# Find the file you want to process.
from feat.tests.utils import get_test_data_path
import os
test_data_dir = get_test_data_path()
test_image = os.path.join(test_data_dir, "input.jpg")

Here is what our test image looks like.

from PIL import Image
import matplotlib.pyplot as plt
f, ax = plt.subplots()
im = Image.open(test_image)
コード例 #6
0
# Installation example

*Written by Jin Hyun Cheong*

Open the current notebook in [Google Colab](http://colab.research.google.com/) and run the cell below to install Py-Feat.

# Install Py-Feat from Pypi.
!pip install py-feat

# Check Fex class installation.
from feat import Fex
fex = Fex()

# Check Detector class installation.
from feat import Detector
detector = Detector()
コード例 #7
0
#!/usr/bin/env python3
from feat import Detector

face_model = "retinaface"
landmark_model = "mobilenet"
au_model = "rf"
emotion_model = "resmasknet"
detector = Detector(face_model=face_model,
                    landmark_model=landmark_model,
                    au_model=au_model,
                    emotion_model=emotion_model)
コード例 #8
0
#!/usr/bin/env python3
from feat import Detector
face_model = "retinaface"
landmark_model = "mobilenet"
au_model = "rf"
emotion_model = "resmasknet"
detector = Detector(face_model=face_model,
                    landmark_model=landmark_model,
                    au_model=au_model,
                    emotion_model=emotion_model)

from feat.tests.utils import get_test_data_path
import os
test_data_dir = get_test_data_path()
test_image = os.path.join(test_data_dir, "input.jpg")

from PIL import Image
import matplotlib.pyplot as plt
f, ax = plt.subplots()
im = Image.open(test_image)
ax.imshow(im)

image_prediction = detector.detect_image(test_image)
# Show results
image_prediction
コード例 #9
0
ファイル: detector.py プロジェクト: timedcy/py-feat
Here is an example of how to use the `Detector` class to detect faces, facial landmarks, Action Units, and emotions, from face images or videos. 

Let's start by installing Py-FEAT if you have not already done so or usign this from Google Colab

!pip install -q py-feat

## Detecting facial expressions from images. 

First, load the detector class. You can specify which models you want to use.

from feat import Detector
face_model = "retinaface"
landmark_model = "mobilenet"
au_model = "rf"
emotion_model = "resmasknet"
detector = Detector(face_model = face_model, landmark_model = landmark_model, au_model = au_model, emotion_model = emotion_model)

Find the file you want to process. In our case, we'll use our test image `input.jpg`. 

# Find the file you want to process.
from feat.tests.utils import get_test_data_path
import os
test_data_dir = get_test_data_path()
test_image = os.path.join(test_data_dir, "input.jpg")

Here is what our test image looks like.

from PIL import Image
import matplotlib.pyplot as plt
f, ax = plt.subplots()
im = Image.open(test_image)