Ejemplo n.º 1
0
sns.set_context("talk")

clip_attrs = pd.read_csv("clip_attrs.csv")
videos = np.sort(glob.glob("*.mp4"))
print(videos)


# Process each video using our detector. 

# In[ ]:


from feat import Detector
detector = Detector(au_model = "rf", emotion_model = "resmasknet")
for video in videos: 
    detector.detect_video(video, outputFname = video.replace(".mp4", ".csv"))


# In[9]:


from feat.utils import read_feat
import pandas as pd

for ix ,video in enumerate(videos):
    outputF = video.replace(".mp4", ".csv")
    if ix == 0: 
        fex = read_feat(outputF)
    else:
        fex = pd.concat([fex, read_feat(outputF)])
fex = fex.dropna()
Ejemplo n.º 2
0
# Find the file you want to process.
from feat.tests.utils import get_test_data_path
import os, glob
test_data_dir = get_test_data_path()
test_video = os.path.join(test_data_dir, "WolfgangLanger_Pexels.mp4")

# Show video
from IPython.display import Video
Video(test_video, embed=True)

# Let's predict facial expressions from the video using the `detect_video()` method.

# In[22]:

video_prediction = detector.detect_video(test_video, skip_frames=24)
video_prediction.head()

# You can also plot the detection results from a video. The frames are not extracted from the video (that will result in thousands of images) so the visualization only shows the detected face without the underlying image.
#
# The video has 24 fps and the actress show sadness around the 0:02, and happiness at 0:14 seconds.

# In[23]:

video_prediction.loc[[48]].plot_detections()

# In[24]:

video_prediction.loc[[17 * 24]].plot_detections(pose=True)

# We can also leverage existing pandas plotting functions to show how emotions unfold over time. We can clearly see how her emotions change from sadness to happiness.