Exemple #1
0
def add_patient_dir_scores(all_dict, patient_dir):
    if 'all_dict.txt' in os.listdir(patient_dir):
        emotion_dict = AUScorer.make_frame_emotions(
            AUScorer.convert_dict_to_int(
                json.load(open(os.path.join(patient_dir, 'all_dict.txt')))))
    else:
        emotion_dict = AUScorer.AUScorer(patient_dir).emotions
    all_dict[patient_dir.replace('_cropped', '')] = emotion_dict
Exemple #2
0
def prevalence_score(emotionDict):
    """
    Calculate a prevalence score for the max emotion in a emotion dictionary

    :param emotionDict: Dictionary mapping one of the basic emotions to its corresponding score (calculated by AUScorer)
    :return: Score calculated using both value of highest value emotion as well as how prevalent that emotion is
    """
    reverse_emotions = AUScorer.reverse_emotions(emotionDict)

    if reverse_emotions:
        max_value = max(reverse_emotions.keys())
        # if len(reverse_emotions[max_value]) > 1:
        #    score = 0
        # else:
        score = ((max_value**2) / np.sum(
            [x * len(reverse_emotions[x]) for x in reverse_emotions.keys()]))
    else:
        score = 0

    return score
def scores_and_duration_dict(all_dict_q, duration_dict_q, dir):
    """Creates all_dict and duration_dict for the given dir and adds them to each queue.
    Queues must be threadsafe.

    :param all_dict_q: Queue for all_dict
    :type all_dict_q: Queue
    :param duration_dict_q: Queue for duration_dict
    :type duration_dict_q: Queue
    :param dir: Directory
    :type dir: str
    """

    if 'cropped' in dir:
        all_dicts = {}
        duration_dict = {}
        remove_crop = dir.replace('_cropped', '')
        dir_num = remove_crop[len(remove_crop) - 4:len(remove_crop)]
        patient_name = remove_crop.replace('_' + dir_num, '')

        if patient_name not in all_dicts:
            all_dicts[patient_name] = {}

        if patient_name not in duration_dict:
            duration_dict[patient_name] = {}
        all_dict_file = os.path.join(dir, 'all_dict.txt')

        if os.path.exists(all_dict_file):
            all_dicts[patient_name][int(dir_num)] = json.load(
                open(all_dict_file))
        else:
            all_dicts[patient_name][int(dir_num)] = AUScorer.AUScorer(
                dir).emotions
        duration_dict[patient_name][int(dir_num)] = int(
            VidCropper.duration(SecondRunOpenFace.get_vid_from_dir(dir)) * 30)
        all_dict_q.put(all_dicts)
        duration_dict_q.put(duration_dict)
Exemple #4
0
    def __init__(self, parent, id, scorer):
        """
        Default constructor.

        :param parent: wx.Frame object.
        :param id: ID
        """
        # self.log = log
        wx.Panel.__init__(
            self, parent, id, style=wx.TAB_TRAVERSAL | wx.CLIP_CHILDREN)
        # Create some controls
        try:
            self.mc = wx.media.MediaCtrl(self, style=wx.SIMPLE_BORDER)
        except NotImplementedError as e:
            self.Destroy()
            raise e
        self.parent = parent
        loadButton = wx.Button(self, -1, "Load File")
        self.Bind(wx.EVT_BUTTON, self.onLoadFile, loadButton)

        playButton = wx.Button(self, -1, "Play")
        self.Bind(wx.EVT_BUTTON, self.onPlay, playButton)

        pauseButton = wx.Button(self, -1, "Pause")
        self.Bind(wx.EVT_BUTTON, self.onPause, pauseButton)

        stopButton = wx.Button(self, -1, "Stop")
        self.Bind(wx.EVT_BUTTON, self.onStop, stopButton)

        slider = wx.Slider(self, -1, 0, 0, 0, size=wx.Size(300, -1))
        self.slider = slider
        self.Bind(wx.EVT_SLIDER, self.onSeek, slider)

        self.scorer = scorer

        self.st_file = wx.StaticText(
            self, -1, ".mid .mp3 .wav .au .avi .mpg", size=(200, -1))
        self.st_size = wx.StaticText(self, -1, size=(100, -1))
        self.st_len = wx.StaticText(self, -1, size=(100, -1))
        self.st_pos = wx.StaticText(self, -1, size=(100, -1))

        self.emotion_list = AUScorer.emotion_list()
        full_au_list = self.emotion_list + \
            (['Best Score', '', 'Prominence', ''])
        self.emotion_texts = [
            wx.StaticText(
                self, wx.NewId(), "{0}".format(emotion), size=(100, -1))
            for emotion in full_au_list
        ]
        self.blank_gauges = [
            wx.Gauge(self, wx.NewId(), size=(100, -1))
            for emotion in full_au_list
        ]
        self.blank_gauges[len(self.emotion_list)] = wx.StaticText(
            self, wx.NewId(), size=(100, -1))
        self.blank_gauges[len(self.emotion_list) + 1] = wx.StaticText(
            self, wx.NewId(), size=(100, -1))

        # setup the button/label layout using a sizer
        sizer = wx.GridBagSizer(6, 6)
        sizer.Add(loadButton, (1, 1))
        sizer.Add(playButton, (2, 1))
        sizer.Add(pauseButton, (3, 1))
        sizer.Add(stopButton, (4, 1))
        sizer.Add(self.st_file, (1, 2))
        sizer.Add(self.st_size, (2, 2))
        sizer.Add(self.st_len, (3, 2))
        sizer.Add(self.st_pos, (4, 2))

        for index, emotionText in enumerate(self.emotion_texts):
            sizer.Add(emotionText, (index + 1, 5))

        for index, blank_gauge in enumerate(self.blank_gauges):
            sizer.Add(blank_gauge, (index + 1, 6))

        sizer.Add(self.mc, (6, 1), span=(4, 4))  # for .avi .mpg video files
        self.SetSizer(sizer)
        self.timer = wx.Timer(self)
        self.Bind(wx.EVT_TIMER, self.onTimer)
        self.timer.Start(100)
Exemple #5
0
    def __init__(self,
                 parent,
                 frame_id,
                 name,
                 curr_directory,
                 path_to_csv=None):
        """
        Default constructor, creates a new AUGui

        :param parent: Inherited from wx.Frame
        :param frame_id: Unused, inherited from wx.Frame
        :param name: Unused, inherited from wx.Frame
        :param curr_directory: Directory with image files, AU files, etc.
        :param path_to_csv: Path to a CSV file with landmarks,
            in the form generated by FaceMapper
        """
        self.tmp_dir = 'tmp_video'
        self.path_to_csv = path_to_csv
        self.prominent_images = None
        self.skipping_index = 1
        self.fps_frac = 30  # Frames per second of video
        wx.Frame.__init__(self, parent, frame_id, name)
        os.chdir(curr_directory)
        self.images, self.imageIndex = make_images()

        self.image_map = None
        self.annotated_map = None
        self.all_shown = True

        if self.path_to_csv:
            self.image_map = csv_emotion_reader(path_to_csv)
            self.annotated_map = {
                self.images[index * self.fps_frac]: emotion
                for index, emotion in enumerate(
                    sorted(self.image_map.values()))
                if (index * self.fps_frac) < len(self.images)
            }  # Relies on image map only having one item per image

        self.AU_threshold = 0
        self.scorer = AUScorer.AUScorer(curr_directory)

        n_c = NavCanvas.NavCanvas(self, Debug=0, BackgroundColor="BLACK")
        self.Canvas = n_c.Canvas

        self.curr_emotions = []
        self.AU_choices = self.make_full_au_choices()
        self.AU_box = wx.BoxSizer(wx.VERTICAL)
        self.AU_List = wx.ListBox(
            self,
            wx.NewId(),
            style=wx.LC_REPORT | wx.SUNKEN_BORDER,
            name='List of Emotions',
            choices=self.curr_emotions)
        self.AU_box.Add(self.AU_List, 3, wx.EXPAND)

        if self.path_to_csv:
            self.annotation_box = wx.TextCtrl(
                self,
                wx.NewId(),
                value='N/A',
                style=wx.TE_READONLY | wx.TE_MULTILINE)
            self.AU_box.Add(self.annotation_box, 1, wx.EXPAND)
        self.pic_box = wx.ListBox(
            self,
            wx.NewId(),
            style=wx.LC_REPORT | wx.SUNKEN_BORDER,
            name='Pictures',
            choices=self.AU_choices)

        box = wx.BoxSizer(wx.HORIZONTAL)
        box.Add(self.pic_box, 1, wx.EXPAND)
        box.Add(n_c, 3, wx.EXPAND)
        box.Add(self.AU_box, 1, wx.EXPAND)

        botBox = wx.BoxSizer(wx.HORIZONTAL)
        self.order = 'Index'
        self.order_button = wx.Button(
            self, wx.NewId(), label='Order By Prominence')
        show_landmarksButton = wx.Button(
            self, wx.NewId(), label='Show/Hide Landmarks')
        self.au_text = wx.TextCtrl(
            self,
            wx.NewId(),
            value='N/A',
            style=wx.VSCROLL | wx.TE_READONLY | wx.TE_MULTILINE)

        show_vidButton = wx.Button(self, wx.NewId(), label='Show Video')

        botBox.Add(self.order_button, 1, wx.EXPAND)
        botBox.Add(show_landmarksButton, 1, wx.EXPAND)

        if self.path_to_csv:
            self.show_annotations_button = wx.Button(
                self, wx.NewId(), label='Show Annotated Frames')
            botBox.Add(self.show_annotations_button, 1, wx.EXPAND)
            self.Bind(
                wx.EVT_BUTTON,
                self.show_hide_annotations,
                id=self.show_annotations_button.GetId())
        botBox.Add(show_vidButton, 1, wx.EXPAND)
        botBox.Add(self.au_text, 4, wx.EXPAND)

        self.allBox = wx.BoxSizer(wx.VERTICAL)
        self.allBox.Add(box, 4, wx.EXPAND)
        self.allBox.Add(botBox, 1, wx.EXPAND)

        # -- Make Bindings --
        self.Bind(wx.EVT_LISTBOX, self.click_on_pic, id=self.pic_box.GetId())
        self.Bind(
            wx.EVT_LISTBOX, self.click_on_emotion, id=self.AU_List.GetId())
        self.Bind(
            wx.EVT_BUTTON, self.evt_reorder_pics, id=self.order_button.GetId())
        self.Bind(
            wx.EVT_BUTTON,
            self.show_landmarks,
            id=show_landmarksButton.GetId())
        self.Bind(wx.EVT_BUTTON, self.show_video, id=show_vidButton.GetId())

        self.SetSizer(self.allBox)
        self.Layout()
        self.bind_to_canvas()

        # Landmark stuff
        self.landmarks_exist = False
        self.landmarks_shown = False
        marked_pics_dir = os.path.join(curr_directory, 'labeled_frames/')

        if os.path.exists(marked_pics_dir):
            self.landmark_images = OpenFaceScorer.OpenFaceScorer.find_im_files(
                marked_pics_dir)

            if self.landmark_images:
                self.landmarks_exist = True

        self.update_all()
def make_scatter_data(patient):
    """Create scatter plot for the given patient.

    :param patient: patient directory containing plot_dict.txt
    :type patient: str
    """
    patient_scores_dir = patient
    plot_dict_file = os.path.join(patient_scores_dir, 'plot_dict.txt')

    if os.path.exists(patient_scores_dir):
        try:
            temp_plot_dict = json.load(open(plot_dict_file)) if os.path.exists(
                plot_dict_file) else None
        except:
            temp_plot_dict = None
    else:
        temp_plot_dict = None

    #plot_dict = make_scatter_plot_dict(scores_dict[patient])
    plot_dict = scores_dict[patient]

    if not temp_plot_dict or (plot_dict != temp_plot_dict):
        emotions = AUScorer.emotion_list()
        emotions.append('Neutral')
        temp_data = {emotion: [] for emotion in emotions}

        for vid in sorted(plot_dict.keys()):
            for frame in sorted(plot_dict[vid].keys()):
                for emotion in emotions:
                    if emotion in plot_dict[vid][frame][0]:
                        temp_data[emotion].append(plot_dict[vid][frame][1])
                    else:
                        temp_data[emotion].append(0)

        # for index, emotion in enumerate(emotions):
        #     new_row = []
        #     for vid in sorted(plot_dict[emotion].keys()):
        #         for frame in sorted(plot_dict[emotion][vid].keys()):
        #             new_row.append(plot_dict[emotion][vid][frame])
        #     z = (list(new_row))
        #     temp_data[emotion] = z

        data = []

        for index, emotion in enumerate(
                sorted(x for x in emotions if x != 'Neutral')):
            data.append([
                x + 0.00000000000000000000000000000001
                for x in temp_data[emotion]
            ])

        neutral_data = [
            0.00000000000000000000000000000001 for _ in range(len(data[0]))
        ]

        emotion_dict = {
            'Angry': .4,
            'Sad': .3,
            'Happy': .3,
            'Disgust': .3,
            'Fear': .5,
            'Surprise': .5
        }

        for index, datum in enumerate(data):
            emotion = emotions[index]

            for index, val in enumerate(datum):
                if val < emotion_dict[emotion]:
                    datum[index] = 0.00000000000000000000000000000001
                    neutral_data[index] = max(val, neutral_data[index])

        data.append(neutral_data)

        # real_data = np.ndarray((len(temp_data),))
        # for index, vals in enumerate(data):
        #     real_data[index] = vals

        if all(data):
            # contains = False
            data = np.asarray(data, dtype=np.float64)
            # for _ in data:
            #     for index, val in enumerate(_):
            #         if val < 1.5:
            #             _[index] = 0
            #         else:
            #             contains = True
            # if contains:

            if not os.path.exists(patient):
                os.mkdir(patient)

            # cict = {
            #     'red': ((0.0, 0.0, 0.0),
            #             (1.0, 0.0, 0.0)),
            #
            #     'green': ((0.0, 0.0, 0.0),
            #               (1.0, 0.0, 0.0)),
            #
            #     'blue': ((0.0, 0.0, 0.0),
            #                  (.25, 0.0, 1.0),
            #                  (1.0, 1.0, 1.0))
            # }
            # test_colormap = matplotlib.colors.ListedColormap('test', cict)

            ax = sns.heatmap(
                data,
                cbar_kws={'ticks': LogLocator()},
                yticklabels=emotions,
                xticklabels=False,
                cmap='BuGn',
                norm=matplotlib.colors.LogNorm())
            # ax.set_clim(vmin=1.5, vmax=5)
            ax.set_title(patient)
            fig = ax.get_figure()
            fig.savefig(os.path.join(patient, 'day_scores.png'))
            # print(patient)
            plt.close()
            json.dump(plot_dict, open(plot_dict_file, 'w'))
    if not os.path.exists(scores_file):
        print('making scores file')
        scores_dict = {}

        for patient in all_dicts:
            scores_dict[patient] = {}
            currPatientDict = all_dicts[patient]

            for vid in currPatientDict:
                scores_dict[patient][vid] = {}

                for frame in currPatientDict[vid]:
                    emotionDict = currPatientDict[vid][frame]

                    if emotionDict:
                        reverse_emotions = AUScorer.reverse_emotions(
                            emotionDict)
                        max_value = max(reverse_emotions.keys())
                        max_emotions = reverse_emotions[max_value]
                        prevalence_score = AUGui.prevalence_score(emotionDict)
                        scores_dict[patient][vid][frame] = [
                            max_emotions, prevalence_score
                        ]
        json.dump(scores_dict, open(scores_file, 'w'))
    else:
        scores_dict = json.load(open(scores_file))
    print('making scatters...')
    bar = progressbar.ProgressBar(
        redirect_stdout=True, max_value=len(scores_dict.keys()))

    for i, _ in enumerate(
            Pool().imap(
Exemple #8
0
import numpy as np

import matplotlib

matplotlib.use('Agg')
import matplotlib.pyplot as plt

from collections import defaultdict
from progressbar import ProgressBar
from pathos.multiprocessing import ProcessingPool as Pool
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC

all_emotions = AUScorer.emotion_list()
all_emotions.extend(['Neutral', 'Sleeping'])


def use_classifier(classifier, au_train, au_test, target_train, target_test):
    """
    Fit the classifier on the training AUs and predict emotions

    :param classifier: Classifier
    :param au_train:  Training action units
    :param au_test:   Testing action units
    :param target_train: Training emotions
    :param target_test: Testing emotions
    :returns: Testing emotions, predicted probabilities
    """