示例#1
0
    def handle_frames(self, camera_frame_files: dict, output_directory: str, display: bool = False):
        for frame_idx in sorted(camera_frame_files):
            # print('=== FRAME %s ===' % frame_idx)
            self.current_frame = frame_idx
            frame_camera_dict = camera_frame_files[frame_idx]
            is_valid_frame = None
            for camera, frame_file in frame_camera_dict.items():
                data = json.load(open(frame_file))
                data = self.frame_data_transform(data)
                is_valid_frame = self.camera_frame_parse_subjects(camera, data)
                if not is_valid_frame:
                    if self.verbose:
                        log('INFO', 'Not enough poses detected. Skipping camera frame')
                    continue

            if is_valid_frame:
                group_data = self.metric_intragroup_distance(self.subjects)
                self.intragroup_distance = group_data
                for _, subject in self.subjects.items():
                    if not self.has_required_cameras(subject):
                        log('WARN', 'Subject (%s) does not have data from required cameras. ' % subject.id +
                            'Not enough information to process frame (%s)' % frame_idx)
                    else:
                        self.process_subject_individual_metrics(subject,
                                                                group_data)

            self.metric_overlap(self.subjects)
            # writting every frame
            self.save_output(output_directory, is_valid_frame)
示例#2
0
    def __init__(self,
                 group_id: str,
                 prettify: bool = False,
                 verbose: bool = False):
        self.group_id = group_id
        self.clean_group_dir = OPENFACE_OUTPUT_DIR / \
            group_id / (group_id + '_clean')
        if not Path(self.clean_group_dir).is_dir():
            log(
                'ERROR',
                'This step requires the output of openface data cleaning step')
        os.makedirs(self.clean_group_dir, exist_ok=True)

        self.output_group_dir = OPENFACE_OUTPUT_DIR / \
            group_id / (group_id + '_processed')
        os.makedirs(self.output_group_dir, exist_ok=True)

        self.experiment = Experiment(group_id)
        json.dump(self.experiment.to_json(),
                  open(self.output_group_dir / (self.group_id + '.json'), 'w'))

        self.subjects = dict()
        self.is_valid_frame = None

        self.prettify = prettify
        self.verbose = verbose
示例#3
0
    def camera_frame_parse_subjects(self, camera, frame_data):
        frame_subjects = frame_data['subjects']
        frame_subject_pose = frame_subjects['pose']
        frame_idx = frame_data['frame']
        self.current_frame = frame_idx
        for subject_id, subject in self.subjects.items():
            if subject_id in frame_subject_pose:
                subject_data = frame_subject_pose[subject_id]
                subject._update_pose(
                    camera, subject_data, self.verbose)
            elif camera not in subject.previous_pose:
                # Subject has no pose nor previous pose.
                # Must skip camera frame as it is impossible to reuse keypoints
                # from other camera due to lack of 3D alignment. Future Consideration.
                if self.verbose:
                    log('INFO', 'Subject has no previous pose.' +
                        '(frame: %s, camera: %s, subject: %s)' %
                        (frame_idx, camera, subject))
                return False
            else:
                if self.verbose:
                    log('INFO', 'Subject (%s) has no pose in this frame (%s - %s), but previous pose on this camera can be used' %
                        (subject_id, frame_idx, camera))

                if subject.previous_pose[camera]:
                    subject._update_pose(
                        camera, subject.previous_pose[camera], self.verbose)
                else:
                    return False
                
        return True
示例#4
0
    def save_output(self, output_directory, frame_validity):
        frame = self.current_frame
        output_frame_file = output_directory / \
            ("frame_%.12d_processed.json" % (frame))
        os.makedirs(output_directory, exist_ok=True)
        if not output_directory.is_dir():
            log('ERROR', 'Directory does not exist')

        obj = {
            "frame":
            self.current_frame,
            "is_enhanced_data_valid":
            frame_validity,
            "group":
            self.to_json(),
            "subjects": [
                subject.to_json()
                for subject_id, subject in self.subjects.items()
            ],
        }

        if self.prettify:
            json.dump(obj, open(output_frame_file, 'w'), indent=2)
        else:
            json.dump(obj, open(output_frame_file, 'w'))
示例#5
0
    def __init__(self,
                 group_id: str,
                 prettify: bool = False,
                 verbose: bool = False):
        self.group_id = group_id
        self.clean_group_dir = OPENPOSE_OUTPUT_DIR / \
            group_id / (group_id + '_clean')
        if not Path(self.clean_group_dir).is_dir():
            log(
                'ERROR',
                'This step requires the output of openpose data cleaning step')
        os.makedirs(self.clean_group_dir, exist_ok=True)

        self.output_group_dir = OPENPOSE_OUTPUT_DIR / \
            group_id / (group_id + '_processed')
        os.makedirs(self.output_group_dir, exist_ok=True)

        self.experiment = Experiment(group_id)
        json.dump(self.experiment.to_json(),
                  open(self.output_group_dir / (self.group_id + '.json'), 'w'))

        self.current_frame = -1
        self.n_subjects = -1
        self.subjects = {
            subject_id: OpenposeSubject(subject_id, verbose)
            for subject_id in range(1, 5)
        }
        self.intragroup_distance = dict()
        self.prettify = prettify
        self.verbose = verbose
示例#6
0
    def process(self, tasks_directories: dict, specific_frame: int = None, display: bool = False):

        if specific_frame is not None:
            log('WARN', 'Impossible to calculate energy in a single specific frame. Skipping video processing step')
        else:
            for task in tasks_directories:
                output_directory = self.output_group_dir / task.name
                os.makedirs(output_directory, exist_ok=True)
                if not output_directory.is_dir():
                    log('ERROR', 'Directory does not exist')

                task_video_files = [
                    x for x in task.iterdir() if x.suffix in VALID_VIDEO_TYPES]
                video_caps = {int(re.search(r'(?<=Videopc)(\d{1})(?=\d{12})', x.name).group(
                    0)): cv2.VideoCapture(str(x)) for x in task_video_files}

                thread_list = list()
                for cap_cam, video_cap in video_caps.items():
                    thread = Worker(cap_cam, cap_cam, video_cap, output_directory,
                                    verbose=self.verbose, prettify=self.prettify, display=display)
                    thread.start()
                    thread_list.append(thread)

                for thread in thread_list:
                    thread.join()

                metrics = dict()
                for thread in thread_list:
                    metrics['energy_htmp'] = thread.cumulative_energy_frame
                    self.save_output(metrics, task.name, thread.camera)
示例#7
0
 def n_subjects(self, value):
     if value == 4 or value == -1:
         self.__n_subjects = value
     else:
         log(
             'ERROR',
             'invalid number of subjects. Keep previous frame\'s subject pose'
         )
示例#8
0
 def current_frame(self, value):
     if value >= self.current_frame or value <= 0:
         self.__current_frame = value
     else:
         log(
             'ERROR',
             'Analyzing previous frame. Frame processing should be ordered.'
         )
 def subjects(self, value):
     self.__subjects = value
     try:
         assert len(value) == 4
         self.__subjects = value
         self.frame_data_validity = True
     except AssertionError:
         total = {1, 2, 3, 4}
         found = set()
         for sub in value:
             found.add(sub.quadrant)
         if self.verbose:
             log("WARN", "Invalid number of subjects. Found %s out of 4 required in frame %s of camera %s. Missing: %s" % (
                 len(value), self.frame, self.camera, total.difference(found)))
    def process_frames(self,
                       task_frame_df: pd.DataFrame,
                       output_directory: str,
                       prettify: bool = False,
                       verbose: bool = False,
                       display: bool = False):
        """Process each frame. Filter Skeleton parts detected and parse Subjects

        Args:
            camera_frame_files (dict): Camera frame files
            output_directory (str): Output directory path
            prettify (bool, optional): Pretty JSON print. Defaults to False.
            verbose (bool, optional): Verbose. Defaults to False.
            display (bool, optional): Display visualization. Defaults to False.
        """

        frames = list(task_frame_df['frame'].unique())
        for frame in frames:
            df = task_frame_df.loc[task_frame_df.frame == frame]
            df = df.loc[df.success == 1]

            frame_cameras_count = df.groupby('camera').size()
            for frame_camera in frame_cameras_count.iteritems():
                _cam = frame_camera[0]
                _cam_count = frame_camera[1]
                if verbose:
                    if _cam_count < self.experiment._n_subjects:
                        log(
                            "WARN", "Camera %s only has %s subjects faces" %
                            (_cam, _cam_count))

                output_frame_directory = output_directory / _cam
                output_frame_file = output_frame_directory / \
                    ("%s_%.12d_clean.json" % (_cam, frame))
                os.makedirs(output_frame_directory, exist_ok=True)
                openface_frame = ExperimentCameraFrame(
                    _cam,
                    int(frame),
                    df[['confidence', 'face_id'] +
                       self.columns_2d_facial_lmks + self.columns_2d_eye_lmks +
                       self.columns_gaze[6:8]],
                    OPENFACE_KEY,
                    verbose=verbose,
                    display=display)

                self.save_data(df,
                               openface_frame,
                               output_frame_file,
                               prettify=prettify)
def main(video_files: list, verbose: bool = False):
    group_id = list(filter(None, video_files[0].split('/')))[-1]
    experiment = Experiment(group_id)

    video_files = [
        directory + filename
        for filename in filter_files(fetch_files_from_directory([directory]),
                                     valid_types=VALID_VIDEO_TYPES)
    ]

    video_files.sort()
    timestamp_files = [
        splitext(f)[0][::-1].replace('Video'[::-1], 'Timestamp'[::-1], 1)[::-1]
        + ".txt" for f in video_files
    ]

    if len(video_files) != 3 and len(timestamp_files) != 3:
        log(
            'ERROR',
            'Specify only 3 video files (and corresponding timestamps - Optional: Default is searching for same file name)'
        )
        exit()

    out_dir_base = '%s/%s' % (DATASET_SYNC, str(Path(
        video_files[0]).parent).split('/')[-1])
    out_dirs = list()

    for n in range(experiment._n_tasks):
        out_dirs.append('%s/%s_%s/' % (out_dir_base, 'task', n + 1))

    if verbose:
        print('Saving to: ', out_dirs)

    try:
        for _dir in out_dirs:
            os.makedirs(_dir)
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise

    cap_list = list()
    for i in range(len(video_files)):
        _id = str(i + 1)
        vid = CameraVideo(
            "VID" + _id, video_files[i], CAM_ROI['pc' + _id],
            PERSON_IDENTIFICATION_GRID['pc' + _id], timestamp_files[i],
            out_dirs,
            splitext(video_files[i])[0].split('/')[-1] + "_sync.avi")
        cap_list.append(vid)

    if not all(vid.cap.isOpened() for vid in cap_list):
        log('ERROR', 'Error opening video stream or file')
        exit()

    marker_validator = {ord(str(i)): False for i in range(1, 9)}

    precision_step = 10

    while (all(vid.cap.isOpened() for vid in cap_list)):

        alignment, align_by = timestamp_align(cap_list)
        to_align_list = cap_list if alignment is True else alignment

        for vid in to_align_list:
            # Read frame
            vid.ret, vid.frame = vid.cap.read()
            vid.current_frame_idx += 1
            # print(vid.current_frame_idx)
            # Update current_timestamp
            file_ts = vid.timestamps.readline()
            vid.current_timestamp = int(file_ts) if file_ts is not '' else -1

        key = cv2.waitKey(25) & 0xff

        # TODO: While paused, allow to set markers
        if key == 0x20:  # Pause Video
            while cv2.waitKey(-1) & 0xFF != 0x20:  # Resume Video
                pass

        if key == ord('d'):  # Skip
            for vid in cap_list:
                vid.current_frame_idx += FRAME_SKIP
                vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key == ord('a'):  # Jump Back
            for vid in cap_list:
                vid.current_frame_idx -= FRAME_SKIP
                if vid.current_frame_idx < vid.init_synced_frame:
                    vid.current_frame_idx = vid.init_synced_frame

                vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key == ord('f'):  # Jump Back
            for vid in cap_list:
                vid.current_frame_idx -= FRAME_SKIP * 10
                if vid.current_frame_idx < vid.init_synced_frame:
                    vid.current_frame_idx = vid.init_synced_frame

                vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        # if key == ord('s'): # Set sync point
        #     print("SET SYNC POINT")

        if key == ord('u'):
            vid = cap_list[0]
            vid.current_frame_idx -= precision_step
            vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key == ord('i'):
            vid = cap_list[0]
            vid.current_frame_idx += precision_step
            vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key == ord('j'):
            vid = cap_list[1]
            vid.current_frame_idx -= precision_step
            vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key == ord('k'):
            vid = cap_list[1]
            vid.current_frame_idx += precision_step
            vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key == ord('n'):
            vid = cap_list[2]
            vid.current_frame_idx -= precision_step
            vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key == ord('m'):
            vid = cap_list[2]
            vid.current_frame_idx += precision_step
            vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key >= ord('1') and key <= ord('8'):
            print("Marker %s set" % chr(key))
            marker_validator[key] = True
            for vid in cap_list:
                vid.markers[key] = vid.current_frame_idx

        if key == ord('t'):
            print("Task Separator set")
            for vid in cap_list:
                vid.task_separator = vid.current_frame_idx

        if all([vid.ret for vid in cap_list]):
            if alignment == True and align_by is not None:  # Videos are in sync
                for vid in cap_list:
                    if vid.init_synced_frame == 0:
                        vid.init_synced_frame = vid.current_frame_idx

                    roi = vid.roi
                    grid = vid.grid
                    grid_horitonzal_axis = grid['horizontal']
                    grid_vertical_axis = grid['vertical']
                    cv2.rectangle(vid.frame, (roi['xmin'], roi['ymin']),
                                  (roi['xmax'], roi['ymax']), (0, 255, 0), 2)
                    cv2.line(vid.frame, (grid_horitonzal_axis['x0'],
                                         grid_horitonzal_axis['y']),
                             (grid_horitonzal_axis['x1'],
                              grid_horitonzal_axis['y']), (0, 0, 255), 1)
                    cv2.line(
                        vid.frame,
                        (grid_vertical_axis['x'], grid_vertical_axis['y0']),
                        (grid_vertical_axis['x'], grid_vertical_axis['y1']),
                        (0, 0, 255), 1)
                    cv2.imshow(vid.title, vid.frame)

            if key == ord('g'):  # Save
                if verbose:
                    print("Start writting phase")

                for vid in cap_list:
                    valid_markers = [
                        marker for marker in marker_validator.items()
                        if marker[1] == True
                    ]
                    if len(valid_markers) % 2 != 0:
                        log(
                            'ERROR',
                            'Odd number of markers. Number of markers should be an even number.'
                        )
                        break

                    first_experience_markers = list(vid.markers.keys())[:2]
                    second_experience_markers = list(vid.markers.keys())[2:4]
                    third_experience_markers = list(vid.markers.keys())[4:6]
                    fourth_experience_markers = list(vid.markers.keys())[6:8]

                    for i in range(0, (len(vid.markers.keys()) + 1) - 1, 2):
                        task_markers = list(vid.markers.keys())[i:i + 2]

                        if verbose:
                            print("Vid %s Saving Task" % (vid.title))
                        cut_from_until(vid, vid.markers[task_markers[0]],
                                       vid.markers[task_markers[1]])

                break

            if key == ord('q'):
                break

        else:
            break

    for vid in cap_list:
        vid.cap.release()
        vid.writer_task1.release()
        vid.writer_task2.release()

    cv2.destroyAllWindows()
示例#12
0
    def aggregate(self, prettify: bool = False):
        """Aggregate framework clean output data
        Follow openpose frame list as openpose always outputs a file per frame

        Args:
            prettify (bool, optional): Pretty JSON print. Defaults to False.
        """
        openpose_data = self.openpose_data
        densepose_data = self.densepose_data
        openface_data = self.openface_data
        video_data = self.video_data

        if not openpose_data:
            log('ERROR', 'Nothing to Aggregate. Use -op -of and -dp to include openpose, openface and densepose data. The use of Openpose data is mandatory.')

        if openface_data:
            cleaned_openface = openface_data['cleaned']
            processed_openface = openface_data['processed']

        if densepose_data:
            cleaned_densepose = densepose_data['cleaned']
            processed_densepose = densepose_data['processed']

        cleaned_openpose = openpose_data['cleaned']
        processed_openpose = openpose_data['processed']
        tasks = cleaned_openpose.keys()

        if self.specific_task is not None:
            tasks = [task for task in tasks if str(self.specific_task) in task]

        for task in tasks:
            self.reset_files = True
            output_frame_directory = self.group_directory / \
                FEATURE_AGGREGATE_DIR / task
            makedirs(output_frame_directory, exist_ok=True)

            processed_openpose_files = processed_openpose[task]

            if self.specific_frame is not None:
                processed_openpose_files = {
                    self.specific_frame: processed_openpose_files[self.specific_frame]}

            for frame_idx in processed_openpose_files:
                output_frame_file = output_frame_directory / \
                    ("%.12d" % frame_idx + '.json')
                aggregate_frame = AggregateFrame(frame_idx)

                # OPENPOSE
                if self.verbose:
                    print("Cleaned OpenPose")
                self.framework_being_processed = OPENPOSE_KEY
                for camera in cleaned_openpose[task]:
                    cleaned_openpose_files = cleaned_openpose[task][camera]
                    openpose_clean_frame_data = json.load(
                        open(cleaned_openpose_files[frame_idx], 'r'))
                    aggregate_frame = self.read_frame_data(aggregate_frame,
                                                           openpose_clean_frame_data,
                                                           camera=camera,
                                                           frame_data_type='raw')

                if self.verbose:
                    print("Processed Openpose")
                openpose_processed_frame_data = json.load(
                    open(processed_openpose_files[frame_idx], 'r'))

                aggregate_frame = self.read_frame_data(aggregate_frame,
                                                       openpose_processed_frame_data,
                                                       frame_data_type='processed')

                # OPENFACE
                if openface_data:
                    if self.verbose:
                        print("Cleaned OpenFace")
                    self.framework_being_processed = OPENFACE_KEY
                    cleaned_task_openface = cleaned_openface[task]
                    for camera in cleaned_task_openface:
                        cleaned_openface_files = cleaned_task_openface[camera]
                        if frame_idx in cleaned_openface_files:
                            openface_clean_frame_data = json.load(
                                open(cleaned_openface_files[frame_idx], 'r'))
                            aggregate_frame = self.read_frame_data(aggregate_frame,
                                                                   openface_clean_frame_data,
                                                                   camera=camera,
                                                                   frame_data_type='raw')

                    if self.verbose:
                        print("Processed Openface")

                    processed_task_openface = processed_openface[task]
                    if frame_idx in processed_task_openface:
                        processed_task_frame = processed_task_openface[frame_idx]
                        for camera, frame_file in processed_task_frame.items():
                            openface_processed_frame_data = json.load(
                                open(frame_file, 'r'))
                            aggregate_frame = self.read_frame_data(aggregate_frame,
                                                                   openface_processed_frame_data,
                                                                   camera=camera,
                                                                   frame_data_type='processed')

                # DENSEPOSE
                if densepose_data:
                    if self.verbose:
                        print("Cleaned Densepose")
                    
                    self.framework_being_processed = DENSEPOSE_KEY
                    for camera in cleaned_densepose[task]:
                        cleaned_densepose_files = cleaned_densepose[task][camera]
                        densepose_clean_frame_data = json.load(
                            open(cleaned_densepose_files[frame_idx], 'r'))
                        aggregate_frame = self.read_frame_data(aggregate_frame,
                                                            densepose_clean_frame_data,
                                                            camera=camera,
                                                            frame_data_type='raw')

                    if self.verbose:
                        print("Processed Densepose")
                    
                    densepose_processed_frame_data = json.load(
                        open(processed_densepose[task][frame_idx], 'r'))

                    aggregate_frame = self.read_frame_data(aggregate_frame,
                                                        densepose_processed_frame_data,
                                                        frame_data_type='processed')

                # VIDEO
                if video_data:
                    self.framework_being_processed = OPENCV_KEY
                    processed_video_data = video_data['processed']
                    if task in processed_video_data:
                        processed_video_data_task = processed_video_data[task]
                        if frame_idx in processed_video_data_task:
                            processed_video_data_frame = processed_video_data_task[frame_idx]
                            for camera, frame_file in processed_video_data_frame.items():
                                video_data_processed_frame_data = json.load(
                                    open(frame_file, 'r'))
                                aggregate_frame = self.read_frame_data(aggregate_frame,
                                                                       video_data_processed_frame_data,
                                                                       camera=camera,
                                                                       frame_data_type='processed')

                self.plot_generator(aggregate_frame, output_frame_directory)

                if prettify:
                    json.dump(aggregate_frame.to_json(), open(
                        output_frame_file, 'w'), indent=2)
                else:
                    json.dump(aggregate_frame.to_json(),
                              open(output_frame_file, 'w'))

            if video_data:
                video_data_heatmaps = video_data['heatmap']
                if task in video_data_heatmaps:
                    video_data_heatmaps_task = video_data_heatmaps[task]
                    for file_name in video_data_heatmaps_task:
                        shutil.copy(file_name, output_frame_directory)
示例#13
0
    def read_frame_data(self, agg_frame: AggregateFrame, frame_data: dict, camera: str = None, frame_data_type: str = None):
        agg_frame_subjects = agg_frame.subjects

        if 'is_raw_data_valid' in frame_data:
            if agg_frame.is_raw_data_valid is not False:
                agg_frame.is_raw_data_valid = frame_data['is_raw_data_valid']
            frame_data_type = 'raw'

        if agg_frame.is_processed_data_valid is None and 'is_enhanced_data_valid' in frame_data:
            agg_frame.is_processed_data_valid = frame_data['is_enhanced_data_valid']
            frame_data_type = 'processed'

        if 'group' in frame_data:
            updated_value = frame_data['group']
            framework_being_processed = self.framework_being_processed.lower()

            if camera:
                if framework_being_processed not in agg_frame.group:
                    agg_frame.group[framework_being_processed] = dict()

                for key, value in updated_value.items():
                    if key not in agg_frame.group[framework_being_processed]:
                        agg_frame.group[framework_being_processed][key] = dict()

                    agg_frame.group[framework_being_processed][key].update({
                        camera: value
                    })
            else:
                agg_frame.group.update({
                    framework_being_processed: updated_value
                })

        if 'subjects' in frame_data:
            frame_subjects = frame_data['subjects']
            for subject in frame_subjects:

                # ID
                if 'id' not in subject:
                    log('ERROR', 'Invalid Subject. Cannot parse subject ID.')
                subject_id = subject['id']
                agg_subject = agg_frame_subjects[subject_id] if subject_id in agg_frame_subjects \
                    else AggregateSubject(subject_id)

                # FACE
                if 'face' in subject:
                    subject_face_data = subject['face']
                    if frame_data_type == 'raw':
                        agg_subject.clean_face_data, _ = self.parse_data(
                            agg_subject.clean_face_data, subject_face_data, camera=camera)

                    if frame_data_type == 'processed':
                        processed_face_data, processed_face_metrics = self.parse_data(
                            agg_subject.processed_face_data, subject_face_data, camera=camera)

                        agg_subject.processed_face_data = processed_face_data
                        if processed_face_metrics:
                            agg_subject.metrics.update(processed_face_metrics)

                # POSE
                if 'pose' in subject:
                    subject_pose_data = subject['pose']

                    if frame_data_type == 'raw':
                        agg_subject.clean_pose_data, _ = self.parse_data(
                            agg_subject.clean_pose_data, subject_pose_data, camera)

                    if frame_data_type == 'processed':
                        processed_pose_data, processed_pose_metrics = self.parse_data(
                            agg_subject.processed_pose_data, subject_pose_data)

                        agg_subject.processed_pose_data = processed_pose_data
                        if processed_pose_metrics:
                            agg_subject.metrics.update(processed_pose_metrics)

                agg_frame.subjects[subject_id] = agg_subject

        return agg_frame
示例#14
0
    def handle_frames(self,
                      camera_frame_files: dict,
                      output_directory: str,
                      display: bool = False):

        for frame_idx in sorted(camera_frame_files):
            # print('=== FRAME %s ===' % frame_idx)
            self.current_frame = frame_idx
            frame_camera_dict = camera_frame_files[frame_idx]
            for camera, frame_file in frame_camera_dict.items():
                output_path_dir = output_directory / camera
                output_path = output_path_dir / \
                    ("%s_%.12d_processed.json" % (camera, frame_idx))
                os.makedirs(output_path_dir, exist_ok=True)
                data = json.load(open(frame_file))
                # TODO: Replace by condition if needed
                self.is_valid_frame = data['is_raw_data_valid']

                frame_subjects = dict()

                for subject in data['subjects']:
                    subject_id = subject['id']
                    # print("== SUBJECT %s ==" % subject_id)
                    if subject_id in self.subjects:
                        openface_subject = self.subjects[subject_id]
                    else:
                        openface_subject = OpenfaceSubject(subject['id'])
                        self.subjects[subject_id] = openface_subject

                    # Parse data
                    subject_openface_data = subject['face']['openface']

                    # EMOTION IDENTIFICATION
                    openface_subject.face = subject_openface_data
                    openface_subject.emotion = self.identify_emotion(
                        subject_openface_data['AUs'])

                    # HEAD MOVEMENT DIRECTION
                    head_rotation_data = subject_openface_data['head']
                    if len(openface_subject.data_buffer['head']
                           ) >= FRAME_THRESHOLD:
                        openface_subject.data_buffer['head'].pop(0)

                    openface_subject.data_buffer['head'].append(
                        head_rotation_data)
                    if len(openface_subject.data_buffer['head']
                           ) == FRAME_THRESHOLD:
                        openface_subject.head_rotation = self.identify_head_movement(
                            openface_subject.data_buffer['head'])

                    # EYE MOVEMENT DIRECTION
                    eye_gaze_data = subject_openface_data['gaze']
                    if len(openface_subject.data_buffer['eye']
                           ) >= FRAME_THRESHOLD:
                        openface_subject.data_buffer['eye'].pop(0)

                    openface_subject.data_buffer['eye'].append(eye_gaze_data)
                    if len(openface_subject.data_buffer['eye']
                           ) == FRAME_THRESHOLD:
                        openface_subject.eye_gaze = self.identify_eye_gaze_movement(
                            openface_subject.data_buffer['eye'])

                    # Update subject - Not needed?
                    frame_subjects[subject_id] = openface_subject
            write = self.save_output(output_path, frame_subjects)
            if not write:
                log('ERROR',
                    'Could not save frame %s to %s' % (frame_idx, output_path))