示例#1
0
    def __init__(self,
                 group_id: str,
                 prettify: bool = False,
                 verbose: bool = False):
        self.group_id = group_id
        self.clean_group_dir = OPENFACE_OUTPUT_DIR / \
            group_id / (group_id + '_clean')
        if not Path(self.clean_group_dir).is_dir():
            log(
                'ERROR',
                'This step requires the output of openface data cleaning step')
        os.makedirs(self.clean_group_dir, exist_ok=True)

        self.output_group_dir = OPENFACE_OUTPUT_DIR / \
            group_id / (group_id + '_processed')
        os.makedirs(self.output_group_dir, exist_ok=True)

        self.experiment = Experiment(group_id)
        json.dump(self.experiment.to_json(),
                  open(self.output_group_dir / (self.group_id + '.json'), 'w'))

        self.subjects = dict()
        self.is_valid_frame = None

        self.prettify = prettify
        self.verbose = verbose
示例#2
0
    def __init__(self,
                 group_id: str,
                 prettify: bool = False,
                 verbose: bool = False):
        self.group_id = group_id
        self.clean_group_dir = OPENPOSE_OUTPUT_DIR / \
            group_id / (group_id + '_clean')
        if not Path(self.clean_group_dir).is_dir():
            log(
                'ERROR',
                'This step requires the output of openpose data cleaning step')
        os.makedirs(self.clean_group_dir, exist_ok=True)

        self.output_group_dir = OPENPOSE_OUTPUT_DIR / \
            group_id / (group_id + '_processed')
        os.makedirs(self.output_group_dir, exist_ok=True)

        self.experiment = Experiment(group_id)
        json.dump(self.experiment.to_json(),
                  open(self.output_group_dir / (self.group_id + '.json'), 'w'))

        self.current_frame = -1
        self.n_subjects = -1
        self.subjects = {
            subject_id: OpenposeSubject(subject_id, verbose)
            for subject_id in range(1, 5)
        }
        self.intragroup_distance = dict()
        self.prettify = prettify
        self.verbose = verbose
示例#3
0
 def __init__(self, group_id):
     self.experiment = Experiment(group_id)
     self.group_id = group_id
     self.base_output_dir = DENSEPOSE_OUTPUT_DIR / \
         group_id / (group_id + '_clean')
     os.makedirs(self.base_output_dir, exist_ok=True)
     json.dump(self.experiment.to_json(),
               open(self.base_output_dir / (self.group_id + '.json'), 'w'))
示例#4
0
    def __init__(self, group_id: str, prettify: bool = False, verbose: bool = False):
        self.group_id = group_id
        self.prettify = prettify
        self.verbose = verbose

        self.experiment = Experiment(group_id)
        self.output_group_dir = VIDEO_OUTPUT_DIR / \
            group_id / (group_id + '_processed')
        os.makedirs(self.output_group_dir, exist_ok=True)
        json.dump(self.experiment.to_json(),
                  open(self.output_group_dir / (self.group_id + '.json'), 'w'))
示例#5
0
    def get_experiment_data(self, group_directory, dir_type: str = 'clean'):
        # TODO: Change if processed group info changes. So far nothing is added to the experiment
        # JSON file in the processing step
        """Get experiment information

        Args:
            group_directory (str): Dataset Group directory path

        Returns:
            dict: Group basic information: ID and TYPE
        """
        clean_dir = [x for x in group_directory.iterdir()
                     if x.is_dir() and dir_type in x.name][0]

        experiment_file = [x for x in clean_dir.iterdir()
                           if not x.is_dir() and x.suffix in VALID_OUTPUT_FILE_TYPES][0]

        experiment_file = json.load(open(experiment_file, 'r'))
        experiment_data = experiment_file['experiment']

        experiment = Experiment(experiment_data['id'])
        return experiment.to_json()
def main(video_files: list, verbose: bool = False):
    group_id = list(filter(None, video_files[0].split('/')))[-1]
    experiment = Experiment(group_id)

    video_files = [
        directory + filename
        for filename in filter_files(fetch_files_from_directory([directory]),
                                     valid_types=VALID_VIDEO_TYPES)
    ]

    video_files.sort()
    timestamp_files = [
        splitext(f)[0][::-1].replace('Video'[::-1], 'Timestamp'[::-1], 1)[::-1]
        + ".txt" for f in video_files
    ]

    if len(video_files) != 3 and len(timestamp_files) != 3:
        log(
            'ERROR',
            'Specify only 3 video files (and corresponding timestamps - Optional: Default is searching for same file name)'
        )
        exit()

    out_dir_base = '%s/%s' % (DATASET_SYNC, str(Path(
        video_files[0]).parent).split('/')[-1])
    out_dirs = list()

    for n in range(experiment._n_tasks):
        out_dirs.append('%s/%s_%s/' % (out_dir_base, 'task', n + 1))

    if verbose:
        print('Saving to: ', out_dirs)

    try:
        for _dir in out_dirs:
            os.makedirs(_dir)
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise

    cap_list = list()
    for i in range(len(video_files)):
        _id = str(i + 1)
        vid = CameraVideo(
            "VID" + _id, video_files[i], CAM_ROI['pc' + _id],
            PERSON_IDENTIFICATION_GRID['pc' + _id], timestamp_files[i],
            out_dirs,
            splitext(video_files[i])[0].split('/')[-1] + "_sync.avi")
        cap_list.append(vid)

    if not all(vid.cap.isOpened() for vid in cap_list):
        log('ERROR', 'Error opening video stream or file')
        exit()

    marker_validator = {ord(str(i)): False for i in range(1, 9)}

    precision_step = 10

    while (all(vid.cap.isOpened() for vid in cap_list)):

        alignment, align_by = timestamp_align(cap_list)
        to_align_list = cap_list if alignment is True else alignment

        for vid in to_align_list:
            # Read frame
            vid.ret, vid.frame = vid.cap.read()
            vid.current_frame_idx += 1
            # print(vid.current_frame_idx)
            # Update current_timestamp
            file_ts = vid.timestamps.readline()
            vid.current_timestamp = int(file_ts) if file_ts is not '' else -1

        key = cv2.waitKey(25) & 0xff

        # TODO: While paused, allow to set markers
        if key == 0x20:  # Pause Video
            while cv2.waitKey(-1) & 0xFF != 0x20:  # Resume Video
                pass

        if key == ord('d'):  # Skip
            for vid in cap_list:
                vid.current_frame_idx += FRAME_SKIP
                vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key == ord('a'):  # Jump Back
            for vid in cap_list:
                vid.current_frame_idx -= FRAME_SKIP
                if vid.current_frame_idx < vid.init_synced_frame:
                    vid.current_frame_idx = vid.init_synced_frame

                vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key == ord('f'):  # Jump Back
            for vid in cap_list:
                vid.current_frame_idx -= FRAME_SKIP * 10
                if vid.current_frame_idx < vid.init_synced_frame:
                    vid.current_frame_idx = vid.init_synced_frame

                vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        # if key == ord('s'): # Set sync point
        #     print("SET SYNC POINT")

        if key == ord('u'):
            vid = cap_list[0]
            vid.current_frame_idx -= precision_step
            vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key == ord('i'):
            vid = cap_list[0]
            vid.current_frame_idx += precision_step
            vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key == ord('j'):
            vid = cap_list[1]
            vid.current_frame_idx -= precision_step
            vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key == ord('k'):
            vid = cap_list[1]
            vid.current_frame_idx += precision_step
            vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key == ord('n'):
            vid = cap_list[2]
            vid.current_frame_idx -= precision_step
            vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key == ord('m'):
            vid = cap_list[2]
            vid.current_frame_idx += precision_step
            vid.cap.set(cv2.CAP_PROP_POS_FRAMES, vid.current_frame_idx)

        if key >= ord('1') and key <= ord('8'):
            print("Marker %s set" % chr(key))
            marker_validator[key] = True
            for vid in cap_list:
                vid.markers[key] = vid.current_frame_idx

        if key == ord('t'):
            print("Task Separator set")
            for vid in cap_list:
                vid.task_separator = vid.current_frame_idx

        if all([vid.ret for vid in cap_list]):
            if alignment == True and align_by is not None:  # Videos are in sync
                for vid in cap_list:
                    if vid.init_synced_frame == 0:
                        vid.init_synced_frame = vid.current_frame_idx

                    roi = vid.roi
                    grid = vid.grid
                    grid_horitonzal_axis = grid['horizontal']
                    grid_vertical_axis = grid['vertical']
                    cv2.rectangle(vid.frame, (roi['xmin'], roi['ymin']),
                                  (roi['xmax'], roi['ymax']), (0, 255, 0), 2)
                    cv2.line(vid.frame, (grid_horitonzal_axis['x0'],
                                         grid_horitonzal_axis['y']),
                             (grid_horitonzal_axis['x1'],
                              grid_horitonzal_axis['y']), (0, 0, 255), 1)
                    cv2.line(
                        vid.frame,
                        (grid_vertical_axis['x'], grid_vertical_axis['y0']),
                        (grid_vertical_axis['x'], grid_vertical_axis['y1']),
                        (0, 0, 255), 1)
                    cv2.imshow(vid.title, vid.frame)

            if key == ord('g'):  # Save
                if verbose:
                    print("Start writting phase")

                for vid in cap_list:
                    valid_markers = [
                        marker for marker in marker_validator.items()
                        if marker[1] == True
                    ]
                    if len(valid_markers) % 2 != 0:
                        log(
                            'ERROR',
                            'Odd number of markers. Number of markers should be an even number.'
                        )
                        break

                    first_experience_markers = list(vid.markers.keys())[:2]
                    second_experience_markers = list(vid.markers.keys())[2:4]
                    third_experience_markers = list(vid.markers.keys())[4:6]
                    fourth_experience_markers = list(vid.markers.keys())[6:8]

                    for i in range(0, (len(vid.markers.keys()) + 1) - 1, 2):
                        task_markers = list(vid.markers.keys())[i:i + 2]

                        if verbose:
                            print("Vid %s Saving Task" % (vid.title))
                        cut_from_until(vid, vid.markers[task_markers[0]],
                                       vid.markers[task_markers[1]])

                break

            if key == ord('q'):
                break

        else:
            break

    for vid in cap_list:
        vid.cap.release()
        vid.writer_task1.release()
        vid.writer_task2.release()

    cv2.destroyAllWindows()
示例#7
0
class DenseposeClean(object):
    def __init__(self, group_id):
        self.experiment = Experiment(group_id)
        self.group_id = group_id
        self.base_output_dir = DENSEPOSE_OUTPUT_DIR / \
            group_id / (group_id + '_clean')
        os.makedirs(self.base_output_dir, exist_ok=True)
        json.dump(self.experiment.to_json(),
                  open(self.base_output_dir / (self.group_id + '.json'), 'w'))

    def process_frames(self,
                       camera_frame_files: dict,
                       output_directory: str,
                       prettify: bool = False,
                       verbose: bool = False,
                       display: bool = False):
        """Process each frame. Filter Skeleton parts detected and parse Subjects

        Args:
            camera_frame_files (dict): Camera frame files
            output_directory (str): Output directory path
            prettify (bool, optional): Pretty JSON print. Defaults to False.
            verbose (bool, optional): Verbose. Defaults to False.
            display (bool, optional): Display visualization. Defaults to False.
        """

        for camera in camera_frame_files:
            for frame_file in camera_frame_files[camera]:
                frame_idx = re.search(r'(?<=_)(\d{12})(?=.)',
                                      frame_file.name).group(0)
                output_frame_directory = output_directory / camera
                output_frame_file = output_frame_directory / \
                    ("%s_%s_clean.json" % (camera, frame_idx))
                os.makedirs(output_frame_directory, exist_ok=True)

                with open(frame_file) as json_data:
                    data = json.load(json_data)
                    metadata = data['meta']
                    del data['meta']
                    file_people_df = pd.json_normalize(data.values())
                    experiment_frame = ExperimentCameraFrame(camera,
                                                             int(frame_idx),
                                                             file_people_df,
                                                             DENSEPOSE_KEY,
                                                             verbose=verbose,
                                                             display=display,
                                                             metadata=metadata)
                json_data.close()

                if prettify:
                    json.dump(experiment_frame.to_json(),
                              open(output_frame_file, 'w'),
                              indent=2)
                else:
                    json.dump(experiment_frame.to_json(),
                              open(output_frame_file, 'w'))

    def clean(self,
              tasks_directories: dict,
              specific_frame: int = None,
              prettify: bool = False,
              verbose: bool = False,
              display: bool = False):
        """Densepose feature data cleansing and filtering

        Args:
            tasks_directories (dict): Experiment Group Tasks directory
            specific_frame (int, optional): Specify frame. Defaults to None.
            prettify (bool, optional): Pretty JSON print. Defaults to False.
            verbose (bool, optional): Verbose. Defaults to False.
            display (bool, optional): Enable visualization. Defaults to False.
        """

        for task in tasks_directories:
            camera_files = dict()
            task_directory = DENSEPOSE_OUTPUT_DIR / self.group_id / task.name
            densepose_camera_directories = [
                x for x in task_directory.iterdir() if x.is_dir()
            ]

            # load camera files
            for camera_id in densepose_camera_directories:
                densepose_camera_raw_files = [
                    x for x in camera_id.iterdir()
                    if x.suffix in VALID_OUTPUT_FILE_TYPES
                ]
                densepose_camera_raw_files.sort()
                camera_files[camera_id.name] = densepose_camera_raw_files

                if specific_frame is not None:
                    camera_files[camera_id.name] = [
                        densepose_camera_raw_files[specific_frame]
                    ]
                else:
                    camera_files[camera_id.name] = densepose_camera_raw_files

            num_frames = 1
            if specific_frame is None:
                num_frames = min(len(files) for files in camera_files.values())
                for camera in camera_files:
                    camera_files[camera] = camera_files[camera][:num_frames]

            output_directory = self.base_output_dir / task.name
            self.process_frames(camera_files,
                                output_directory,
                                prettify=prettify,
                                verbose=verbose,
                                display=display)
class OpenfaceClean(object):
    """ 


        WARNING: Might have inaccuracies as this could
        not be extensively tested due to low
        data quality

        See Also:
            TODO: Ref thesis
    """

    columns_basic = [
        'frame',  # Frame number
        'face_id',  # Face id - No guarantee this is consistent across frames in case of FaceLandmarkVidMulti
        'timestamp',  # Timer of video being processed in seconds
        'success',  # Is the track successful - Is there a face in the frame or do we think we tracked it well
        'confidence'
    ]  # How confident is the tracker in current landmark detection estimation

    columns_gaze = [
        'gaze_0_x',
        'gaze_0_y',
        'gaze_0_z',  # Left eye
        'gaze_1_x',
        'gaze_1_y',
        'gaze_1_z',  # Right eye
        'gaze_angle_x',
        'gaze_angle_y'
    ]  # Gaze angle in radians

    columns_2d_eye_lmks_x = [('eye_lmk_x_%s' % lmk_idx)
                             for lmk_idx in range(NUM_EYE_LANDMARKS)]

    columns_2d_eye_lmks_y = [('eye_lmk_y_%s' % lmk_idx)
                             for lmk_idx in range(NUM_EYE_LANDMARKS)]

    columns_2d_eye_lmks = columns_2d_eye_lmks_x + \
        columns_2d_eye_lmks_y  # 2D Eye Landmarks (x,y) coordinates

    # Relative Location to Camera
    columns_head_loc = ['pose_Tx', 'pose_Ty', 'pose_Tz']
    columns_head_rot = ['pose_Rx', 'pose_Ry',
                        'pose_Rz']  # Rotation pitch, yaw, roll

    columns_2d_facial_lmks_x = [('x_%s' % lmk_idx)
                                for lmk_idx in range(NUM_FACE_LANDMARKS)]

    columns_2d_facial_lmks_y = [('y_%s' % lmk_idx)
                                for lmk_idx in range(NUM_FACE_LANDMARKS)]

    columns_2d_facial_lmks = columns_2d_facial_lmks_x + \
        columns_2d_facial_lmks_y  # 2D Face Landmarks (x,y) coordinates

    # Rigid face shape (location, scale, rotation)
    columns_rigid_shape = [
        'p_scale',  # Face scale
        'p_rx',
        'p_ry',
        'p_rz',  #
        'p_tx',
        'p_ty'
    ]  #

    columns_aus_classification = [
        'AU01_c',  # Inner Brow Raiser
        'AU02_c',  # Outer Brow Raiser
        'AU04_c',  # Brow Lowerer
        'AU05_c',  # Upper Lid Raiser
        'AU06_c',  # Cheek Raiser
        'AU07_c',  # Lid Tightener
        'AU09_c',  # Nose Wrinkler
        'AU10_c',  # Upper Lip Raiser
        'AU12_c',  # Lip Corner Puller
        'AU14_c',  # Dimpler
        'AU15_c',  # Lip Corner Depressor
        'AU17_c',  # Chin Raiser
        'AU20_c',  # Lip stretcher
        'AU23_c',  # Lip Tightener
        'AU25_c',  # Lips part
        'AU26_c',  # Jaw Drop
        'AU28_c',  # Lip suck
        'AU45_c'
    ]  # Blink

    columns_aus_intensity = [
        'AU01_r',  # Inner Brow Raiser
        'AU02_r',  # Outer Brow Raiser
        'AU04_r',  # Brow Lowerer
        'AU05_r',  # Upper Lid Raiser
        'AU06_r',  # Cheek Raiser
        'AU07_r',  # Lid Tightener
        'AU09_r',  # Nose Wrinkler
        'AU10_r',  # Upper Lip Raiser
        'AU12_r',  # Lip Corner Puller
        'AU14_r',  # Dimpler
        'AU15_r',  # Lip Corner Depressor
        'AU17_r',  # Chin Raiser
        'AU20_r',  # Lip stretcher
        'AU23_r',  # Lip Tightener
        'AU25_r',  # Lips part
        'AU26_r',  # Jaw Drop
        'AU45_r'
    ]  # Blink

    def __init__(self, group_id):
        self.experiment = Experiment(group_id)
        self.group_id = group_id
        self.base_output_dir = OPENFACE_OUTPUT_DIR / \
            group_id / (group_id + '_clean')
        os.makedirs(self.base_output_dir, exist_ok=True)
        json.dump(self.experiment.to_json(),
                  open(self.base_output_dir / (self.group_id + '.json'), 'w'))

    def scaling_min_max(self,
                        df_entry: pd.DataFrame,
                        axis: str,
                        camera: str,
                        a: int = QUADRANT_MIN,
                        b: int = QUADRANT_MAX):
        """Min-Max Scaling step. Needed in openface, as openface does not normalize output data

        Args:
            df_entry (pd.DataFrame):  Dataframe to normalize
            axis (str): Axis (X or Y)
            camera (str): Which camera
            a (int, optional): Minimum value. Defaults to -1.
            b (int, optional): Maximum value. Defaults to 1.

        Returns:
            [type]: [description]
        """
        resolution_min, resolution_max = 0, VIDEO_RESOLUTION[camera][axis]
        normalized_entry = a + (df_entry - resolution_min) * (b-a) \
            / (resolution_max-resolution_min)

        return normalized_entry

    def process_frames(self,
                       task_frame_df: pd.DataFrame,
                       output_directory: str,
                       prettify: bool = False,
                       verbose: bool = False,
                       display: bool = False):
        """Process each frame. Filter Skeleton parts detected and parse Subjects

        Args:
            camera_frame_files (dict): Camera frame files
            output_directory (str): Output directory path
            prettify (bool, optional): Pretty JSON print. Defaults to False.
            verbose (bool, optional): Verbose. Defaults to False.
            display (bool, optional): Display visualization. Defaults to False.
        """

        frames = list(task_frame_df['frame'].unique())
        for frame in frames:
            df = task_frame_df.loc[task_frame_df.frame == frame]
            df = df.loc[df.success == 1]

            frame_cameras_count = df.groupby('camera').size()
            for frame_camera in frame_cameras_count.iteritems():
                _cam = frame_camera[0]
                _cam_count = frame_camera[1]
                if verbose:
                    if _cam_count < self.experiment._n_subjects:
                        log(
                            "WARN", "Camera %s only has %s subjects faces" %
                            (_cam, _cam_count))

                output_frame_directory = output_directory / _cam
                output_frame_file = output_frame_directory / \
                    ("%s_%.12d_clean.json" % (_cam, frame))
                os.makedirs(output_frame_directory, exist_ok=True)
                openface_frame = ExperimentCameraFrame(
                    _cam,
                    int(frame),
                    df[['confidence', 'face_id'] +
                       self.columns_2d_facial_lmks + self.columns_2d_eye_lmks +
                       self.columns_gaze[6:8]],
                    OPENFACE_KEY,
                    verbose=verbose,
                    display=display)

                self.save_data(df,
                               openface_frame,
                               output_frame_file,
                               prettify=prettify)

    def save_data(self,
                  openface_data: pd.DataFrame,
                  frame_data: ExperimentCameraFrame,
                  path,
                  prettify: bool = False):

        frame = frame_data.frame
        is_valid_data = frame_data.frame_data_validity
        subjects = list()
        for subject in frame_data.subjects:
            of_subject_data = openface_data[openface_data['face_id'] ==
                                            subject.framework_given_id]

            subject_face = subject.face['openface']
            subject_face['AUs'] = {
                k: of_subject_data.get(k).values[0]
                for k in of_subject_data[self.columns_aus_intensity]
            }
            subject_face['head'] = list(
                of_subject_data[self.columns_head_rot].values[0])
            subject_face['gaze'] = list(
                of_subject_data[self.columns_gaze[6:8]].values[0])

            sub_obj = {
                "id": subject.quadrant,
                "face": {
                    'openface': subject_face
                }
            }

            subjects.append(sub_obj)

        frame_obj = {
            'frame': frame,
            'is_raw_data_valid': is_valid_data,
            'subjects': [subject for subject in subjects]
        }

        if prettify:
            json.dump(frame_obj, open(path, 'w'), indent=2)
        else:
            json.dump(frame_obj, open(path, 'w'))

        return frame_obj

    def clean(self,
              tasks_directories: dict,
              specific_frame: int = None,
              prettify: bool = False,
              verbose: bool = False,
              display: bool = False):
        """ Openface feature data cleansing and filtering

        Args:
            tasks_directories (dict): Experiment Group Tasks directory
            specific_frame (int, optional): Specify frame. Defaults to None.
            prettify (bool, optional): Pretty JSON print. Defaults to False.
            verbose (bool, optional): Verbose. Defaults to False.
            display (bool, optional): Enable visualization. Defaults to False.
        """

        for task in tasks_directories:
            camera_files = dict()
            task_directory = OPENFACE_OUTPUT_DIR / self.group_id / task.name
            openface_camera_directories = [
                x for x in task_directory.iterdir() if x.is_dir()
            ]

            camera_files = pd.DataFrame()
            for openface_camera_dir in openface_camera_directories:
                openface_output_files = [
                    x for x in openface_camera_dir.iterdir()
                    if not x.is_dir() and x.suffix in VALID_OUTPUT_FILE_TYPES
                ]
                for openface_output_file in openface_output_files:
                    camera_id = re.search(r'(?<=Video)(pc\d{1})(?=\d{14})',
                                          openface_output_file.name).group(0)
                    openface_file_df = pd.read_csv(openface_output_file)

                    openface_file_df = openface_file_df.rename(
                        columns=lambda x: x.strip())
                    tmp_df = openface_file_df
                    if specific_frame is not None:
                        tmp_df = openface_file_df.loc[openface_file_df.frame ==
                                                      specific_frame]
                    tmp_df = tmp_df.assign(camera=camera_id)

                    # Scaling
                    to_normalize_cols_x = self.columns_2d_facial_lmks_x + self.columns_2d_eye_lmks_x
                    to_normalize_cols_y = self.columns_2d_facial_lmks_y + self.columns_2d_eye_lmks_y

                    to_normalize_df_x = tmp_df[to_normalize_cols_x]
                    to_normalize_df_y = tmp_df[to_normalize_cols_y]

                    to_normalize_df_x = self.scaling_min_max(to_normalize_df_x,
                                                             axis='x',
                                                             camera=camera_id)
                    to_normalize_df_y = self.scaling_min_max(to_normalize_df_y,
                                                             axis='y',
                                                             camera=camera_id)

                    tmp_df.drop(self.columns_2d_facial_lmks,
                                axis=1,
                                inplace=True)
                    tmp_df.drop(self.columns_2d_eye_lmks, axis=1, inplace=True)
                    tmp_df = tmp_df.join(to_normalize_df_x)
                    tmp_df = tmp_df.join(to_normalize_df_y)

                    camera_files = camera_files.append(tmp_df,
                                                       ignore_index=True)

            output_directory = self.base_output_dir / task.name
            self.process_frames(camera_files,
                                output_directory,
                                prettify=prettify,
                                verbose=verbose,
                                display=display)
示例#9
0
class DenseposeProcess(object):
    """ Densepose pose related metrics

        Subject-wise:
        * Horizontal/Vertical Expansion - Occupied Area
        * Number of Interactions with table center objects
        * Body Direction Vector
        * Hand / Head / Body energy (heatmap)

        Group-wise:
        * Intragroup Distance
        * Group Energy
    """

    def __init__(self, group_id: str, prettify: bool = False, verbose: bool = False):
        self.group_id = group_id
        self.clean_group_dir = DENSEPOSE_OUTPUT_DIR / \
            group_id / (group_id + '_clean')
        if not Path(self.clean_group_dir).is_dir():
            log('ERROR', 'This step requires the output of densepose data cleaning step')
        os.makedirs(self.clean_group_dir, exist_ok=True)

        self.output_group_dir = DENSEPOSE_OUTPUT_DIR / \
            group_id / (group_id + '_processed')
        os.makedirs(self.output_group_dir, exist_ok=True)

        self.experiment = Experiment(group_id)
        json.dump(self.experiment.to_json(),
                  open(self.output_group_dir / (self.group_id + '.json'), 'w'))

        self.current_frame = -1
        self.n_subjects = -1
        self.subjects = {subject_id: DenseposeSubject(
            subject_id, verbose) for subject_id in range(1, 5)}
        self.intragroup_distance = dict()
        self.prettify = prettify
        self.verbose = verbose

    @property
    def current_frame(self):
        try:
            return self.__current_frame
        except AttributeError:
            self.__current_frame = -1
            return self.__current_frame

    @current_frame.setter
    def current_frame(self, value):
        if value >= self.current_frame or value <= 0:
            self.__current_frame = value
        else:
            log('ERROR', 'Analyzing previous frame. Frame processing should be ordered.')

    @property
    def n_subjects(self):
        try:
            return self.__n_subjects
        except AttributeError:
            self.__n_subjects = -1
            return self.__n_subjects

    @n_subjects.setter
    def n_subjects(self, value):
        if value == 4 or value == -1:
            self.__n_subjects = value
        else:
            log('ERROR', 'invalid number of subjects. Keep previous frame\'s subject pose')

    def to_json(self):
        group_metrics = {
            "intragroup_distance": self.intragroup_distance,
        }
        return group_metrics

    def has_required_cameras(self, subject):
        subject_cameras = set(subject.current_pose.keys())
        requirements = {'x': False,
                        'y': False,
                        'z': False}

        for subject_axis, required_cameras in SUBJECT_AXES[subject.id].items():
            if set(required_cameras).intersection(subject_cameras):
                requirements[subject_axis] = True

        return all([state for state in requirements.values()])

    def frame_data_transform(self, frame_data):
        # transform subjects list to dict
        frame_idx = frame_data['frame']
        frame_subjects_pose = dict()
        for subject in frame_data['subjects']:
            subject_id = subject['id']
            frame_subjects_pose[subject_id] = subject['pose']['densepose']

        data = {'frame': frame_idx,
                'subjects': {'pose': frame_subjects_pose}}
        return data

    def camera_frame_parse_subjects(self, camera, frame_data):
        frame_subjects = frame_data['subjects']
        frame_subject_pose = frame_subjects['pose']
        frame_idx = frame_data['frame']
        self.current_frame = frame_idx
        for subject_id, subject in self.subjects.items():
            if subject_id in frame_subject_pose:
                subject_data = frame_subject_pose[subject_id]
                subject._update_pose(
                    camera, subject_data, self.verbose)
            elif camera not in subject.previous_pose:
                # Subject has no pose nor previous pose.
                # Must skip camera frame as it is impossible to reuse keypoints
                # from other camera due to lack of 3D alignment. Future Consideration.
                if self.verbose:
                    log('INFO', 'Subject has no previous pose.' +
                        '(frame: %s, camera: %s, subject: %s)' %
                        (frame_idx, camera, subject))
                return False
            else:
                if self.verbose:
                    log('INFO', 'Subject (%s) has no pose in this frame (%s - %s), but previous pose on this camera can be used' %
                        (subject_id, frame_idx, camera))

                if subject.previous_pose[camera]:
                    subject._update_pose(
                        camera, subject.previous_pose[camera], self.verbose)
                else:
                    return False
                
        return True

    def process_subject_individual_metrics(self, subject, group_data):
        subject.expansiveness = subject.metric_expansiveness()
        subject.energy = subject.metric_keypoint_energy()
        # subject.body_direction = subject.metric_body_direction() # SEE COMMENTS ON METHOD ABOVE
        subject.center_interaction = subject.metric_center_interaction(group_data,
                                                                       subject.expansiveness)

    def metric_overlap(self, subjects: dict):
        """Calculate overlap between subjects.
        Only considering overlap on side-by-side subjects.
        Subjects in front of each other can have an almost coincident posture/expansiveness.

        Args:
            subjects: (dict): subjects dict
        """

        # Reset overlap values
        for subject_id, subject in self.subjects.items():
            subject.overlap = dict()

        for camera in CAMERAS:
            camera_expensiveness = dict()
            for subject_id, subject in subjects.items():
                if camera in subject.expansiveness:
                    camera_expensiveness[subject_id] = subject.expansiveness[camera]
                    vertices = polygon_vertices_from_2_points(subject.expansiveness[camera]['x'],
                                                              subject.expansiveness[camera]['y'])
                    camera_expensiveness[subject_id] = vertices

            overlap_permutations = list(combinations(
                camera_expensiveness.keys(), 2)).copy()

            for perm in overlap_permutations:
                s1, s2 = perm[0], perm[1]
                subject1, subject2 = self.subjects[s1], self.subjects[s2]
                vertices1, vertices2 = camera_expensiveness[s1], camera_expensiveness[s2]
                polygon1, polygon2 = shapely.Polygon(
                    vertices1), shapely.Polygon(vertices2)
                intersection = polygon1.intersection(polygon2)
                if intersection and intersection.geom_type == 'Polygon':
                    polygon_area = float(abs(intersection.area))

                    for subject in [subject1, subject2]:
                        if camera in subject.overlap:
                            subject.overlap[camera]['area'] += polygon_area
                        else:
                            overlap_dict = {'polygon': intersection.exterior.coords[:],
                                            'area': polygon_area}
                            subject.overlap[camera] = overlap_dict

    def metric_intragroup_distance(self, subjects: dict):
        subjects_distance = dict()
        neck_keypoint = None
        for subject_id, subject in subjects.items():
            subject_pose = subject.current_pose
            for camera, keypoints in subject_pose.items():
                if camera not in subjects_distance:
                    subjects_distance[camera] = dict()
                
                neck_keypoint = tuple(
                    keypoints[str(DENSEPOSE_KEYPOINT_MAP['neck'])])
                subjects_distance[camera][subject_id] = neck_keypoint[:2]

        intragroup_distance = dict()
        for camera, subjects in subjects_distance.items():
            points = list(subjects.values())
            points.append(points[0])
            polygon = Polygon(*points)
            polygon_area = None
            try:
                polygon_area = float(abs(polygon.area))
            except AttributeError:
                print(self.current_frame, camera, polygon, polygon_area)
            centroid = (sum([point[0] for point in points]) / len(points),
                        sum([point[1] for point in points]) / len(points))
            polygon_center = [float(centroid[0]), float(centroid[1])]
            intragroup_distance[camera] = {'polygon': points,
                                           'area': polygon_area,
                                           'center': polygon_center}
        return intragroup_distance

    def save_output(self, output_directory, frame_validity):
        frame = self.current_frame
        output_frame_file = output_directory / \
            ("frame_%.12d_processed.json" % (frame))
        os.makedirs(output_directory, exist_ok=True)
        if not output_directory.is_dir():
            log('ERROR', 'Directory does not exist')

        obj = {
            "frame": self.current_frame,
            "is_enhanced_data_valid": frame_validity,
            "group": self.to_json(),
            "subjects": [subject.to_json() for subject_id, subject in self.subjects.items()],
        }

        if self.prettify:
            json.dump(obj, open(output_frame_file, 'w'), indent=2)
        else:
            json.dump(obj, open(output_frame_file, 'w'))

    def handle_frames(self, camera_frame_files: dict, output_directory: str, display: bool = False):
        for frame_idx in sorted(camera_frame_files):
            # print('=== FRAME %s ===' % frame_idx)
            self.current_frame = frame_idx
            frame_camera_dict = camera_frame_files[frame_idx]
            is_valid_frame = None
            for camera, frame_file in frame_camera_dict.items():
                data = json.load(open(frame_file))
                data = self.frame_data_transform(data)
                is_valid_frame = self.camera_frame_parse_subjects(camera, data)
                if not is_valid_frame:
                    if self.verbose:
                        log('INFO', 'Not enough poses detected. Skipping camera frame')
                    continue

            if is_valid_frame:
                group_data = self.metric_intragroup_distance(self.subjects)
                self.intragroup_distance = group_data
                for _, subject in self.subjects.items():
                    if not self.has_required_cameras(subject):
                        log('WARN', 'Subject (%s) does not have data from required cameras. ' % subject.id +
                            'Not enough information to process frame (%s)' % frame_idx)
                    else:
                        self.process_subject_individual_metrics(subject,
                                                                group_data)

            self.metric_overlap(self.subjects)
            # writting every frame
            self.save_output(output_directory, is_valid_frame)

    def process(self, tasks_directories: dict, specific_frame: int = None, display: bool = False):
        clean_task_directory = self.clean_group_dir
        clean_tasks_directories = list()
        for task in tasks_directories:
            clean_tasks_directories += ([x for x in clean_task_directory.iterdir()
                                         if x.is_dir() and task.name in x.name])

        for task in clean_tasks_directories:
            clean_camera_directories = [x for x in task.iterdir()
                                        if x.is_dir()]
            clean_camera_directories.sort()
            camera_files = dict()
            for camera_id in clean_camera_directories:
                for frame_file in camera_id.iterdir():
                    frame_idx = int(re.search(r'(?<=_)(\d{12})(?=_)',
                                              frame_file.name).group(0))
                    if frame_idx not in camera_files:
                        camera_files[frame_idx] = dict()
                    if camera_id not in camera_files[frame_idx]:
                        camera_files[frame_idx][camera_id.name] = dict()
                    camera_files[frame_idx][camera_id.name] = frame_file

            if specific_frame is not None:
                specific_camera_files = dict()
                for camera_id in clean_camera_directories:
                    specific_camera_files[specific_frame] = camera_files[specific_frame]
                camera_files = specific_camera_files

            output_directory = self.output_group_dir / task.name
            self.handle_frames(camera_files, output_directory, display=display)
示例#10
0
class OpenfaceProcess(object):
    def __init__(self,
                 group_id: str,
                 prettify: bool = False,
                 verbose: bool = False):
        self.group_id = group_id
        self.clean_group_dir = OPENFACE_OUTPUT_DIR / \
            group_id / (group_id + '_clean')
        if not Path(self.clean_group_dir).is_dir():
            log(
                'ERROR',
                'This step requires the output of openface data cleaning step')
        os.makedirs(self.clean_group_dir, exist_ok=True)

        self.output_group_dir = OPENFACE_OUTPUT_DIR / \
            group_id / (group_id + '_processed')
        os.makedirs(self.output_group_dir, exist_ok=True)

        self.experiment = Experiment(group_id)
        json.dump(self.experiment.to_json(),
                  open(self.output_group_dir / (self.group_id + '.json'), 'w'))

        self.subjects = dict()
        self.is_valid_frame = None

        self.prettify = prettify
        self.verbose = verbose

    columns_basic = [
        # 'frame',       # Frame number
        'face_id',  # Face id - No guarantee this is consistent across frames in case of FaceLandmarkVidMulti
        'timestamp',  # Timer of video being processed in seconds
        'success',  # Is the track successful - Is there a face in the frame or do we think we tracked it well
        'confidence'  # How confident is the tracker in current landmark detection estimation
    ]

    columns_gaze = [
        'gaze_0_x',
        'gaze_0_y',
        'gaze_0_z',  # Left eye
        'gaze_1_x',
        'gaze_1_y',
        'gaze_1_z',  # Right eye
        'gaze_angle_x',
        'gaze_angle_y'
    ]  # Gaze angle in radians

    columns_2d_eye_lmks = [('eye_lmk_x_%s' % lmk_idx) for lmk_idx in range(NUM_EYE_LANDMARKS)] + \
        [('eye_lmk_y_%s' % lmk_idx) for lmk_idx in range(
            NUM_EYE_LANDMARKS)]  # 2D Eye Landmarks (x,y) coordinates

    columns_3d_eye_lmks = [('eye_lmk_X_%s' % lmk_idx) for lmk_idx in range(
        NUM_EYE_LANDMARKS)] + \
        [('eye_lmk_Y_%s' % lmk_idx) for lmk_idx in range(
            NUM_EYE_LANDMARKS)] + \
        [('eye_lmk_Z_%s' % lmk_idx) for lmk_idx in range(
            NUM_EYE_LANDMARKS)]  # 3D Eye Landmarks (X,Y,Z) coordinates

    # Relative Location to Camera
    columns_head_loc = ['pose_Tx', 'pose_Ty', 'pose_Tz']
    columns_head_rot = ['pose_Rx', 'pose_Ry',
                        'pose_Rz']  # Rotation pitch, yaw, roll

    columns_facial_lmks = [('X_%s' % lmk_idx) for lmk_idx in range(NUM_FACE_LANDMARKS)] + \
        [('Y_%s' % lmk_idx) for lmk_idx in range(NUM_FACE_LANDMARKS)] + \
        [('Z_%s' % lmk_idx) for lmk_idx in range(
            NUM_FACE_LANDMARKS)]  # 3D Face Landmarks (X,Y,Z) coordinate

    # Rigid face shape (location, scale, rotation)
    columns_rigid_shape = [
        'p_scale',  # Face scale
        'p_rx',
        'p_ry',
        'p_rz',  #
        'p_tx',
        'p_ty'
    ]  #

    columns_aus_intensity = [
        'AU01_r',  # Inner Brow Raiser
        'AU02_r',  # Outer Brow Raiser
        'AU04_r',  # Brow Lowerer
        'AU05_r',  # Upper Lid Raiser
        'AU06_r',  # Cheek Raiser
        'AU07_r',  # Lid Tightener
        'AU09_r',  # Nose Wrinkler
        'AU10_r',  # Upper Lip Raiser
        'AU12_r',  # Lip Corner Puller
        'AU14_r',  # Dimpler
        'AU15_r',  # Lip Corner Depressor
        'AU17_r',  # Chin Raiser
        'AU20_r',  # Lip stretcher
        'AU23_r',  # Lip Tightener
        'AU25_r',  # Lips part
        'AU26_r',  # Jaw Drop
        'AU45_r'
    ]  # Blink

    columns_features = [
        'emotion', 'head_movement_pitch', 'head_movement_yaw',
        'head_movement_roll', 'eye_movement_x', 'eye_movement_y'
    ]

    columns_relevant = columns_basic + \
        columns_facial_lmks + \
        columns_aus_intensity + \
        columns_head_rot + \
        columns_gaze

    columns_output = columns_relevant + columns_features

    def calculate_au_vector_coef(self, au_intensity_list: list,
                                 decrease_factor: float):
        weight = 1
        au_coef = 0
        for val in au_intensity_list:
            au_coef += val * (weight)
            weight -= decrease_factor
        return au_coef

    def identify_emotion(self, action_units_data: dict):
        """Emotion Identification based on relation matrix using \
            Discriminative Power method as Velusamy et al. used \
            in "A METHOD TO INFER EMOTIONS FROM FACIAL ACTION UNITS"

        Args:
            aus_vector (dict): Vectors of Action Units intensities

        See Also:
            "A METHOD TO INFER EMOTIONS FROM FACIAL ACTION UNITS":
            https://ieeexplore.ieee.org/document/5946910/

        To Do:
            predict_emotion(aus_vector: pd.Series) using a ML classifier

        Returns:
            str: identified emotion
        """

        aus_vector = dict()
        for au_col in self.columns_aus_intensity:
            if au_col in action_units_data:
                aus_vector[au_col] = action_units_data[au_col]

        emotion_vector_coefs = dict()
        for emotion_name, emotion_aus_vector in EMOTIONS_ENCONDING.items():
            decrease_factor = 1 / len(emotion_aus_vector)
            emotion_aus = list()
            for au in emotion_aus_vector:
                emotion_aus.append(aus_vector[au + "_r"])

            emotion_vector_coefs[emotion_name] = self.calculate_au_vector_coef(
                emotion_aus, decrease_factor)

        emotion_vector_coefs['NEUTRAL'] = 1
        emotion_pred = max(emotion_vector_coefs, key=emotion_vector_coefs.get)

        return emotion_pred

    def is_sequence_increasing(self, seq: list):
        return all(earlier <= later for earlier, later in zip(seq, seq[1:]))

    def is_sequence_decreasing(self, seq: list):
        return all(earlier >= later for earlier, later in zip(seq, seq[1:]))

    def identify_head_movement(self, data_buffer: str):

        orientation_labels = {
            'x': ['UP', 'DOWN'],
            'y': ['LEFT', 'RIGHT'],
            'z': ['CW', 'CCW']
        }

        x_vector = list()
        y_vector = list()
        z_vector = list()
        for entry in data_buffer:
            x_vector.append(degrees(entry[0]))
            y_vector.append(degrees(entry[1]))
            z_vector.append(degrees(entry[2]))

        vectors = {'x': x_vector, 'y': y_vector, 'z': z_vector}
        orientation = {'x': None, 'y': None, 'z': None}

        for axis, vector in vectors.items():
            if np.var(vector) >= HEAD_MOV_VARIANCE_THRESHOLD:
                if self.is_sequence_increasing(vector):
                    orientation[axis] = orientation_labels[axis][0]
                elif self.is_sequence_decreasing(vector):
                    orientation[axis] = orientation_labels[axis][1]
                else:
                    orientation[axis] = 'CENTER'
            else:
                orientation[axis] = 'CENTER'

        return orientation

    def identify_eye_gaze_movement(self, data_buffer: str):
        """If a person is looking left-right this will results in the change of gaze_angle_x (from positive to negative) \
        If a person is looking up-down this will result in change of gaze_angle_y (from negative to positive) \
        If a person is looking straight ahead both of the angles will be close to 0 (within measurement error)

        See Also:
            Check 'Gaze related' section in OpenFace Documentation
            https://github.com/TadasBaltrusaitis/OpenFace/wiki/Output-Format

        Args:
            col (str): [description]
            vector (str): [description]

        Returns:
            str: [description]
        """
        orientation_labels = {
            'x': ['RIGHT', 'CENTER', 'LEFT'],
            'y': ['UP', 'CENTER', 'DOWN'],
        }

        x_vector = list()
        y_vector = list()
        for entry in data_buffer:
            x_vector.append(degrees(entry[0]))
            y_vector.append(degrees(entry[1]))

        vectors = {'x': x_vector, 'y': y_vector}
        orientation = {'x': None, 'y': None}

        for axis, vector in vectors.items():
            vector_mean = np.mean(vector)
            vector_std = np.std(vector)

            if (vector_mean - vector_std <= 0 <= vector_mean + vector_std):
                orientation[axis] = orientation_labels[axis][1]
            elif vector_mean > 0:
                orientation[axis] = orientation_labels[axis][2]
            elif vector_mean < 0:
                orientation[axis] = orientation_labels[axis][0]
            else:
                orientation[axis] = 'MEASUREMENT ERROR'

        return orientation

    def save_output(self, output_path, frame_subjects):

        obj = {
            "frame": self.current_frame,
            "is_enhanced_data_valid": self.is_valid_frame,
            "subjects":
            [subject.to_json() for subject in frame_subjects.values()]
        }

        if self.prettify:
            json.dump(obj, open(output_path, 'w'), indent=2)
        else:
            json.dump(obj, open(output_path, 'w'))

        return obj

    def handle_frames(self,
                      camera_frame_files: dict,
                      output_directory: str,
                      display: bool = False):

        for frame_idx in sorted(camera_frame_files):
            # print('=== FRAME %s ===' % frame_idx)
            self.current_frame = frame_idx
            frame_camera_dict = camera_frame_files[frame_idx]
            for camera, frame_file in frame_camera_dict.items():
                output_path_dir = output_directory / camera
                output_path = output_path_dir / \
                    ("%s_%.12d_processed.json" % (camera, frame_idx))
                os.makedirs(output_path_dir, exist_ok=True)
                data = json.load(open(frame_file))
                # TODO: Replace by condition if needed
                self.is_valid_frame = data['is_raw_data_valid']

                frame_subjects = dict()

                for subject in data['subjects']:
                    subject_id = subject['id']
                    # print("== SUBJECT %s ==" % subject_id)
                    if subject_id in self.subjects:
                        openface_subject = self.subjects[subject_id]
                    else:
                        openface_subject = OpenfaceSubject(subject['id'])
                        self.subjects[subject_id] = openface_subject

                    # Parse data
                    subject_openface_data = subject['face']['openface']

                    # EMOTION IDENTIFICATION
                    openface_subject.face = subject_openface_data
                    openface_subject.emotion = self.identify_emotion(
                        subject_openface_data['AUs'])

                    # HEAD MOVEMENT DIRECTION
                    head_rotation_data = subject_openface_data['head']
                    if len(openface_subject.data_buffer['head']
                           ) >= FRAME_THRESHOLD:
                        openface_subject.data_buffer['head'].pop(0)

                    openface_subject.data_buffer['head'].append(
                        head_rotation_data)
                    if len(openface_subject.data_buffer['head']
                           ) == FRAME_THRESHOLD:
                        openface_subject.head_rotation = self.identify_head_movement(
                            openface_subject.data_buffer['head'])

                    # EYE MOVEMENT DIRECTION
                    eye_gaze_data = subject_openface_data['gaze']
                    if len(openface_subject.data_buffer['eye']
                           ) >= FRAME_THRESHOLD:
                        openface_subject.data_buffer['eye'].pop(0)

                    openface_subject.data_buffer['eye'].append(eye_gaze_data)
                    if len(openface_subject.data_buffer['eye']
                           ) == FRAME_THRESHOLD:
                        openface_subject.eye_gaze = self.identify_eye_gaze_movement(
                            openface_subject.data_buffer['eye'])

                    # Update subject - Not needed?
                    frame_subjects[subject_id] = openface_subject
            write = self.save_output(output_path, frame_subjects)
            if not write:
                log('ERROR',
                    'Could not save frame %s to %s' % (frame_idx, output_path))

    def process(self,
                tasks_directories: dict,
                specific_frame: int = None,
                display: bool = False):
        clean_task_directory = self.clean_group_dir
        clean_tasks_directories = list()
        for task in tasks_directories:
            clean_tasks_directories += ([
                x for x in clean_task_directory.iterdir()
                if x.is_dir() and task.name in x.name
            ])

        for task in clean_tasks_directories:
            clean_camera_directories = [
                x for x in task.iterdir() if x.is_dir()
            ]
            clean_camera_directories.sort()
            camera_files = dict()
            for camera_id in clean_camera_directories:
                for frame_file in camera_id.iterdir():
                    frame_idx = int(
                        re.search(r'(?<=_)(\d{12})(?=_)',
                                  frame_file.name).group(0))
                    if frame_idx not in camera_files:
                        camera_files[frame_idx] = dict()
                    if camera_id not in camera_files[frame_idx]:
                        camera_files[frame_idx][camera_id.name] = dict()
                    camera_files[frame_idx][camera_id.name] = frame_file

            if specific_frame is not None:
                specific_camera_files = dict()
                for camera_id in clean_camera_directories:
                    specific_camera_files[specific_frame] = camera_files[
                        specific_frame]
                camera_files = specific_camera_files

            output_directory = self.output_group_dir / task.name
            self.handle_frames(camera_files, output_directory, display=display)
示例#11
0
class VideoProcess(object):

    def __init__(self, group_id: str, prettify: bool = False, verbose: bool = False):
        self.group_id = group_id
        self.prettify = prettify
        self.verbose = verbose

        self.experiment = Experiment(group_id)
        self.output_group_dir = VIDEO_OUTPUT_DIR / \
            group_id / (group_id + '_processed')
        os.makedirs(self.output_group_dir, exist_ok=True)
        json.dump(self.experiment.to_json(),
                  open(self.output_group_dir / (self.group_id + '.json'), 'w'))

    def save_output(self, metrics, task, camera):

        energy_heatmap_path = self.output_group_dir / task / \
            ('htmp_energy_pc%s.png' % camera)
        energy_heatmap = metrics['energy_htmp']

        masked_data = np.ma.masked_where(energy_heatmap == 0, energy_heatmap)

        scaled_heatmap = masked_data.astype('float64')
        scaled_heatmap *= 255.0/scaled_heatmap.max()
        scaled_heatmap = scaled_heatmap.astype(int)

        # cmap = plt.get_cmap('Reds')
        cmap = plt.get_cmap("Reds")
        cmap.set_under('k', alpha=0)

        if self.verbose:
            plt.imshow(scaled_heatmap, cmap=cmap)
            plt.show()

        plt.imsave(str(energy_heatmap_path), scaled_heatmap, cmap=cmap)

    def process(self, tasks_directories: dict, specific_frame: int = None, display: bool = False):

        if specific_frame is not None:
            log('WARN', 'Impossible to calculate energy in a single specific frame. Skipping video processing step')
        else:
            for task in tasks_directories:
                output_directory = self.output_group_dir / task.name
                os.makedirs(output_directory, exist_ok=True)
                if not output_directory.is_dir():
                    log('ERROR', 'Directory does not exist')

                task_video_files = [
                    x for x in task.iterdir() if x.suffix in VALID_VIDEO_TYPES]
                video_caps = {int(re.search(r'(?<=Videopc)(\d{1})(?=\d{12})', x.name).group(
                    0)): cv2.VideoCapture(str(x)) for x in task_video_files}

                thread_list = list()
                for cap_cam, video_cap in video_caps.items():
                    thread = Worker(cap_cam, cap_cam, video_cap, output_directory,
                                    verbose=self.verbose, prettify=self.prettify, display=display)
                    thread.start()
                    thread_list.append(thread)

                for thread in thread_list:
                    thread.join()

                metrics = dict()
                for thread in thread_list:
                    metrics['energy_htmp'] = thread.cumulative_energy_frame
                    self.save_output(metrics, task.name, thread.camera)