def process_videos(subject, type):

    print("\t" + subject, 15 * '-', '\n')
    out_dir_openface = "time_series/" + subject + "/openface_ts/"
    out_dir_eyetracking = "time_series/" + subject + "/eyetracking_ts/"
    out_dir_facial = "time_series/" + subject + "/facial_features_ts/"
    out_dir_emotions = "time_series/" + subject + "/emotions_ts/"
    out_dir_smiles = "time_series/" + subject + "/smiles_ts/"
    out_dir_dlibSmiles = "time_series/" + subject + "/smiles_ts/"

    for out_dir in [
            out_dir_openface, out_dir_eyetracking, out_dir_facial,
            out_dir_emotions, out_dir_smiles, out_dir_dlibSmiles
    ]:
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)

    videos = glob.glob("data/videos/" + subject + "/*.avi")
    videos.sort()

    # compute the index of the index BOLD signal frequency
    physio_index = [0.6025]
    for i in range(1, 50):
        physio_index.append(1.205 + physio_index[i - 1])

    for video in videos:
        #try:
        if type == "eye":
            os.system("python3 src/generate_ts/eyetracking.py " + video + " " +
                      out_dir_eyetracking + " --save")

        elif type == 'e':
            os.system("python3 src/generate_ts/facial_emotions.py " + video +
                      " " + out_dir_emotions)

        elif type == 'openface':
            os.system("python3 src/generate_ts/openface_features.py " + video +
                      " " + out_dir_openface)

        if type == "c":
            os.system("python3 src/generate_ts/colorfulness.py " + video +
                      " " + out_dir_colors)

        elif type == "facial":
            os.system("python3 src/generate_ts/facial_features.py " + video +
                      " " + out_dir_facial)

        elif type == "dlib_smiles":
            os.system("python3 src/generate_ts/dlib_smiles.py " + video + " " +
                      out_dir_dlibSmiles)

        elif type == "smiles":
            try:
                if os.path.exists(out_dir_smiles +
                                  video.split('/')[-1].split('.')[0] + ".pkl"):
                    print("File already processed !")
                    continue
                os.system("Rscript src/generate_ts/generateSmiles.R " +
                          video.split('/')[-1].split('.')[0] + " " +
                          out_dir_smiles)

                if os.path.exists(out_dir_smiles +
                                  video.split('/')[-1].split('.')[0] + ".csv"):
                    csv_data = pd.read_csv(out_dir_smiles +
                                           video.split('/')[-1].split('.')[0] +
                                           ".csv",
                                           sep=';').loc[:, ["time", "value"]]
                    replace_dict = {
                        "value": {
                            "S0": 0,
                            "S1": 1,
                            "S2": 2,
                            "S3": 3,
                            "S4": 4
                        }
                    }
                    csv_data.replace(replace_dict, inplace=True)
                    csv_data = pd.DataFrame(resampling.resample_ts(
                        csv_data.values, physio_index, mode="max"),
                                            columns=["Time (s)", "Smile_I"])

                else:
                    csv_data = []
                    for t in physio_index:
                        csv_data.append([t, 0])
                    csv_data = pd.DataFrame(csv_data,
                                            columns=["Time (s)", "Smile"])

                csv_data.to_pickle(out_dir_smiles +
                                   video.split('/')[-1].split('.')[0] + ".pkl")
                os.system("rm %s" % out_dir_smiles +
                          video.split('/')[-1].split('.')[0] + ".csv")

            except Exception as e:
                print(e)
                continue
Esempio n. 2
0
        else:
            time_series.append([current_time, 0])

        current_time += 1.0 / fps

        if args.show:
            cv2.rectangle(bgr_image, (x, y), ((x + w), (y + h)), (255, 0, 0),
                          2)
            for (sx, sy, sw, sh) in smiles:
                cv2.rectangle(color_face, (sx, sy), ((sx + sw), (sy + sh)),
                              (0, 0, 255), 2)
            cv2.imshow('Video', bgr_image)

        if cv2.waitKey(100) & 0xFF == ord('q'):
            break

    # Index according to fRMI recording step
    index = [1.205]
    i = 1
    while (1.205 + index[i - 1] < current_time):
        index.append(1.205 + index[i - 1])
        i += 1

    time_series = resampling.resample_ts(np.array(time_series),
                                         index,
                                         mode="mode")

    df = pd.DataFrame(time_series, columns=["Time (s)", "Smiles_I"])
    df.to_pickle(out_file + '.pkl')
Esempio n. 3
0
    # First part of the variables
    df1 = pd.DataFrame()
    df1["Time (s)"] = video_index
    df1["AUs_mouth_I"] = openface_data.loc[:, [
        " AU10_r", " AU12_r", " AU14_r", " AU15_r", " AU17_r", " AU20_r",
        " AU23_r", " AU25_r", " AU26_r"
    ]].sum(axis=1)
    df1["AU_eyes_I"] = openface_data.loc[:, [
        " AU01_r", " AU02_r", " AU04_r", " AU05_r", " AU06_r", " AU07_r",
        " AU09_r"
    ]].sum(axis=1)
    df1["AU_all_I"] = df1["AUs_mouth_I"] + df1["AU_eyes_I"]

    # resampling
    output_time_series = pd.DataFrame(resampling.resample_ts(df1.values,
                                                             physio_index,
                                                             mode="mean"),
                                      columns=df1.columns)

    # direct gaze
    direct_gaze_brut = openface_data.loc[:, [
        " timestamp", " gaze_angle_x", " gaze_angle_y"
    ]].values
    direct_gaze = moving_average(direct_gaze_brut, 30)
    # Re-add the first 29 observations lost by moving average
    for i in range(29):
        direct_gaze = np.insert(direct_gaze, i, direct_gaze_brut[i], axis=0)

    direct_gaze[:, 1] = direct_gaze[:, 1] - np.mean(direct_gaze[:, 1])
    direct_gaze[:, 2] = direct_gaze[:, 2] - np.mean(direct_gaze[:, 2])
Esempio n. 4
0
    for i in range(1, frames_nb):
        video_index.append(1.0 / fps + video_index[i - 1])

    # eliminates nan from coordinates (due to blinks)
    x = eye_tracking_data[:, 0:3]
    #print (x)
    #exit (1)
    indice_nans = []
    for i in range(len(x)):
        if np.isnan(x[i]).any():
            indice_nans.append(i)
    x = np.delete(x, indice_nans, 0)
    #print (x)
    #exit (1)
    # resample gaze coordinates to the video frequency
    gaze_coordiantes = resampling.resample_ts(x, video_index, mode="mean")

    # extract time index and 2D landmarks columns from openface data
    cols = [" timestamp", " confidence"]
    for i in range(68):
        cols = cols + [" x_%d" % i, " y_%d" % i]

    openface_data = openface_data[cols].values

    # start loop over video stream
    nb_frames = 0
    face_time_series = []
    current_time = 0
    lds = []

    while True:
Esempio n. 5
0
                                     ["Time (s)", "saccades"]].values.astype(
                                         float)
    openface_data = pd.read_pickle(openface_file)

    # read DISPLAY_COORDS from metadata
    display_coords = eye_tracking_data.display_coords
    eye_tracking_data = eye_tracking_data.values.astype(float)

    # Construct the index of the video stream
    video_index = [1.0 / fps]
    for i in range(1, frames_nb):
        video_index.append(1.0 / fps + video_index[i - 1])

    # resample gaze coordinates to the video frequency
    gaze_coordiantes = resampling.resample_ts(eye_tracking_data[:, 0:3],
                                              video_index,
                                              mode="mean")

    # extract time index and 2D landmarks columns from openface data
    cols = [" timestamp", " success"]
    for i in range(68):
        cols = cols + [" x_%d" % i, " y_%d" % i]

    openface_data = openface_data[cols].values

    # start loop over video stream
    nb_frames = 0
    face_time_series = []
    current_time = 0
    lds = []