Exemplo n.º 1
0
 def recent_events(self, events):
     frame = events.get("frame")
     if not frame:
         return
     self.last_frame_ts = frame.timestamp
     if frame.index != self.last_frame_index:
         self.last_frame_index = frame.index
         frame_window = pm.enclosing_window(self.g_pool.timestamps, frame.index)
         events = self.annotations.by_ts_window(frame_window)
         for event in events:
             logger.info(
                 "{} annotation @ {}".format(event["label"], event["timestamp"])
             )
Exemplo n.º 2
0
    def map_section(self, section, all_world_timestamps, all_gaze_events, camera_model):
        try:
            location_cache = self.location_cache[section]
        except TypeError:
            return []

        section_gaze_on_surf = []
        for frame_idx, location in enumerate(location_cache):
            frame_idx += section.start
            if location and location.detected:
                frame_window = player_methods.enclosing_window(
                    all_world_timestamps, frame_idx
                )
                gaze_events = all_gaze_events.by_ts_window(frame_window)

                gaze_on_surf = self.map_gaze_and_fixation_events(
                    gaze_events, camera_model, trans_matrix=location.img_to_surf_trans
                )
            else:
                gaze_on_surf = []
            section_gaze_on_surf.append(gaze_on_surf)
        return section_gaze_on_surf
Exemplo n.º 3
0
 def recent_events(self, events):
     # TODO: comments or method extraction
     if "frame" in events:
         frame_idx = events["frame"].index
         window = pm.enclosing_window(self.g_pool.timestamps, frame_idx)
         events["gaze"] = self.g_pool.gaze_positions.by_ts_window(window)
Exemplo n.º 4
0
 def recent_events(self, events):
     if "frame" in events:
         frm_idx = events["frame"].index
         window = pm.enclosing_window(self.g_pool.timestamps, frm_idx)
         events["pupil"] = self.g_pool.pupil_positions.by_ts_window(window)
Exemplo n.º 5
0
def _export_world_video(
    rec_dir,
    user_dir,
    min_data_confidence,
    start_frame,
    end_frame,
    plugin_initializers,
    out_file_path,
    pre_computed_eye_data,
):
    """
    Simulates the generation for the world video and saves a certain time range as a video.
    It simulates a whole g_pool such that all plugins run as normal.
    """
    from glob import glob
    from time import time

    import file_methods as fm
    import player_methods as pm
    from av_writer import AV_Writer

    # we are not importing manual gaze correction. In Player corrections have already been applied.
    # in batch exporter this plugin makes little sense.
    from fixation_detector import Offline_Fixation_Detector

    # Plug-ins
    from plugin import Plugin_List, import_runtime_plugins
    from video_capture import EndofVideoError, init_playback_source
    from vis_circle import Vis_Circle
    from vis_cross import Vis_Cross
    from vis_eye_video_overlay import Vis_Eye_Video_Overlay
    from vis_light_points import Vis_Light_Points
    from vis_polyline import Vis_Polyline
    from vis_scan_path import Vis_Scan_Path
    from vis_watermark import Vis_Watermark

    PID = str(os.getpid())
    logger = logging.getLogger(__name__ + " with pid: " + PID)
    start_status = "Starting video export with pid: {}".format(PID)
    logger.info(start_status)
    yield start_status, 0

    try:
        vis_plugins = sorted(
            [
                Vis_Circle,
                Vis_Cross,
                Vis_Polyline,
                Vis_Light_Points,
                Vis_Watermark,
                Vis_Scan_Path,
                Vis_Eye_Video_Overlay,
            ],
            key=lambda x: x.__name__,
        )
        analysis_plugins = [Offline_Fixation_Detector]
        user_plugins = sorted(
            import_runtime_plugins(os.path.join(user_dir, "plugins")),
            key=lambda x: x.__name__,
        )

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        audio_path = os.path.join(rec_dir, "audio.mp4")

        meta_info = pm.load_meta_info(rec_dir)

        g_pool = GlobalContainer()
        g_pool.app = "exporter"
        g_pool.min_data_confidence = min_data_confidence

        valid_ext = (".mp4", ".mkv", ".avi", ".h264", ".mjpeg", ".fake")
        try:
            video_path = next(f for f in glob(os.path.join(rec_dir, "world.*"))
                              if os.path.splitext(f)[1] in valid_ext)
        except StopIteration:
            raise FileNotFoundError("No Video world found")
        cap = init_playback_source(g_pool, source_path=video_path, timing=None)

        timestamps = cap.timestamps

        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, end frame) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.0
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the launching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = (
            "Will export from frame {} to frame {}. This means I will export {} frames."
        )
        logger.debug(
            exp_info.format(start_frame, start_frame + frames_to_export,
                            frames_to_export))

        # setup of writer
        writer = AV_Writer(out_file_path,
                           fps=cap.frame_rate,
                           audio_loc=audio_path,
                           use_timestamps=True)

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []

        for initializers in pre_computed_eye_data.values():
            initializers["data"] = [
                fm.Serialized_Dict(msgpack_bytes=serialized)
                for serialized in initializers["data"]
            ]

        g_pool.pupil_positions = pm.Bisector(**pre_computed_eye_data["pupil"])
        g_pool.gaze_positions = pm.Bisector(**pre_computed_eye_data["gaze"])
        g_pool.fixations = pm.Affiliator(**pre_computed_eye_data["fixations"])

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoError:
                break

            events = {"frame": frame}
            # new positions and events
            frame_window = pm.enclosing_window(g_pool.timestamps, frame.index)
            events["gaze"] = g_pool.gaze_positions.by_ts_window(frame_window)
            events["pupil"] = g_pool.pupil_positions.by_ts_window(frame_window)

            # publish delayed notifications when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n["_notify_time_"] < time():
                    del n["_notify_time_"]
                    del g_pool.delayed_notifications[n["subject"]]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifications:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield "Exporting with pid {}".format(PID), current_frame

        writer.close(timestamp_export_format="all")

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        logger.info(
            result.format(current_frame, out_file_path, duration,
                          effective_fps))
        yield "Export done. This took {:.0f} seconds.".format(
            duration), current_frame

    except GeneratorExit:
        logger.warning("Video export with pid {} was canceled.".format(
            os.getpid()))
Exemplo n.º 6
0
    def recent_events(self, events):
        if self.bg_task:
            for progress, fixation_result in self.bg_task.fetch():
                self.status = progress
                if fixation_result:
                    serialized, start_ts, stop_ts = fixation_result
                    self.fixation_data.append(
                        fm.Serialized_Dict(msgpack_bytes=serialized))
                    self.fixation_start_ts.append(start_ts)
                    self.fixation_stop_ts.append(stop_ts)

                if self.fixation_data:
                    current_ts = self.fixation_stop_ts[-1]
                    progress = ((current_ts - self.g_pool.timestamps[0]) /
                                (self.g_pool.timestamps[-1] -
                                 self.g_pool.timestamps[0]))
                    self.menu_icon.indicator_stop = progress
            if self.bg_task.completed:
                self.status = "{} fixations detected".format(
                    len(self.fixation_data))
                self.correlate_and_publish()
                self.bg_task = None
                self.menu_icon.indicator_stop = 0.

        frame = events.get('frame')
        if not frame:
            return

        self.last_frame_idx = frame.index
        frame_window = pm.enclosing_window(self.g_pool.timestamps, frame.index)
        fixations = self.g_pool.fixations.by_ts_window(frame_window)
        events['fixations'] = fixations
        if self.show_fixations:
            for f in fixations:
                x = int(f['norm_pos'][0] * frame.width)
                y = int((1. - f['norm_pos'][1]) * frame.height)
                pm.transparent_circle(frame.img, (x, y),
                                      radius=25.,
                                      color=(0., 1., 1., 1.),
                                      thickness=3)
                cv2.putText(frame.img, '{}'.format(f['id']), (x + 30, y),
                            cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 150, 100))

        if self.current_fixation_details and self.prev_index != frame.index:
            info = ''
            for f in fixations:
                info += 'Current fixation, {} of {}\n'.format(
                    f['id'], len(self.g_pool.fixations))
                info += '    Confidence: {:.2f}\n'.format(f['confidence'])
                info += '    Duration: {:.2f} milliseconds\n'.format(
                    f['duration'])
                info += '    Dispersion: {:.3f} degrees\n'.format(
                    f['dispersion'])
                info += '    Frame range: {}-{}\n'.format(
                    f['start_frame_index'] + 1, f['end_frame_index'] + 1)
                info += '    2d gaze pos: x={:.3f}, y={:.3f}\n'.format(
                    *f['norm_pos'])
                if 'gaze_point_3d' in f:
                    info += '    3d gaze pos: x={:.3f}, y={:.3f}, z={:.3f}\n'.format(
                        *f['gaze_point_3d'])
                else:
                    info += '    3d gaze pos: N/A\n'
                if f['id'] > 1:
                    prev_f = self.g_pool.fixations[f['id'] - 2]
                    time_lapsed = f['timestamp'] - prev_f[
                        'timestamp'] + prev_f['duration'] / 1000
                    info += '    Time since prev. fixation: {:.2f} seconds\n'.format(
                        time_lapsed)
                else:
                    info += '    Time since prev. fixation: N/A\n'

                if f['id'] < len(self.g_pool.fixations):
                    next_f = self.g_pool.fixations[f['id']]
                    time_lapsed = next_f['timestamp'] - f[
                        'timestamp'] + f['duration'] / 1000
                    info += '    Time to next fixation: {:.2f} seconds\n'.format(
                        time_lapsed)
                else:
                    info += '    Time to next fixation: N/A\n'

            self.current_fixation_details.text = info
            self.prev_index = frame.index
Exemplo n.º 7
0
 def get_current_frame_window(self):
     frame_index = self.get_current_frame_index()
     frame_window = pm.enclosing_window(self.g_pool.timestamps, frame_index)
     return frame_window
Exemplo n.º 8
0
def export(rec_dir, user_dir, min_data_confidence, start_frame=None, end_frame=None,
           plugin_initializers=(), out_file_path=None, pre_computed={}):

    PID = str(os.getpid())
    logger = logging.getLogger(__name__+' with pid: '+PID)
    start_status = 'Starting video export with pid: {}'.format(PID)
    print(start_status)
    yield start_status, 0

    try:
        pm.update_recording_to_recent(rec_dir)

        vis_plugins = sorted([Vis_Circle, Vis_Cross, Vis_Polyline, Vis_Light_Points,
                              Vis_Watermark, Vis_Scan_Path, Vis_Eye_Video_Overlay],
                             key=lambda x: x.__name__)
        analysis_plugins = [Offline_Fixation_Detector]
        user_plugins = sorted(import_runtime_plugins(os.path.join(user_dir, 'plugins')), key=lambda x: x.__name__)

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        pm.update_recording_to_recent(rec_dir)

        audio_path = os.path.join(rec_dir, "audio.mp4")

        meta_info = pm.load_meta_info(rec_dir)

        g_pool = Global_Container()
        g_pool.app = 'exporter'
        g_pool.min_data_confidence = min_data_confidence

        valid_ext = ('.mp4', '.mkv', '.avi', '.h264', '.mjpeg', '.fake')
        video_path = [f for f in glob(os.path.join(rec_dir, "world.*"))
                      if os.path.splitext(f)[1] in valid_ext][0]
        cap = init_playback_source(g_pool, source_path=video_path, timing=None)

        timestamps = cap.timestamps

        # Out file path verification, we do this before but if one uses a separate tool, this will kick in.
        if out_file_path is None:
            out_file_path = os.path.join(rec_dir, "world_viz.mp4")
        else:
            file_name = os.path.basename(out_file_path)
            dir_name = os.path.dirname(out_file_path)
            if not dir_name:
                dir_name = rec_dir
            if not file_name:
                file_name = 'world_viz.mp4'
            out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, endframe) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the lauching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = "Will export from frame {} to frame {}. This means I will export {} frames."
        logger.debug(exp_info.format(start_frame, start_frame + frames_to_export, frames_to_export))

        # setup of writer
        writer = AV_Writer(out_file_path, fps=cap.frame_rate, audio_loc=audio_path, use_timestamps=True)

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []

        for initializers in pre_computed.values():
            initializers['data'] = [fm.Serialized_Dict(msgpack_bytes=serialized)
                                    for serialized in initializers['data']]

        g_pool.pupil_positions = pm.Bisector(**pre_computed["pupil"])
        g_pool.gaze_positions = pm.Bisector(**pre_computed["gaze"])
        g_pool.fixations = pm.Affiliator(**pre_computed["fixations"])

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoError:
                break

            events = {'frame': frame}
            # new positons and events
            frame_window = pm.enclosing_window(g_pool.timestamps, frame.index)
            events['gaze'] = g_pool.gaze_positions.by_ts_window(frame_window)
            events['pupil'] = g_pool.pupil_positions.by_ts_window(frame_window)

            # publish delayed notifiactions when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n['_notify_time_'] < time():
                    del n['_notify_time_']
                    del g_pool.delayed_notifications[n['subject']]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifactions:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield 'Exporting with pid {}'.format(PID), current_frame

        writer.close()
        writer = None

        duration = time()-start_time
        effective_fps = float(current_frame)/duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        print(result.format(current_frame, out_file_path, duration, effective_fps))
        yield 'Export done. This took {:.0f} seconds.'.format(duration), current_frame

    except GeneratorExit:
        print('Video export with pid {} was canceled.'.format(os.getpid()))
    except Exception as e:
        from time import sleep
        import traceback
        trace = traceback.format_exc()
        print('Process Export (pid: {}) crashed with trace:\n{}'.format(os.getpid(), trace))
        yield e
        sleep(1.0)
Exemplo n.º 9
0
 def find_markers_in_frame(index):
     window = pm.enclosing_window(timestamps, index)
     return markers_bisector.by_ts_window(window)
Exemplo n.º 10
0
 def recent_events(self, events):
     if 'frame' in events:
         frm_idx = events['frame'].index
         window = pm.enclosing_window(self.g_pool.timestamps, frm_idx)
         events['gaze'] = self.g_pool.gaze_positions.by_ts_window(window)
Exemplo n.º 11
0
 def find_markers_in_frame(index):
     window = pm.enclosing_window(timestamps, index)
     return markers_bisector.by_ts_window(window)
Exemplo n.º 12
0
    def recent_events(self, events):
        if self.bg_task:
            for progress, fixation_result in self.bg_task.fetch():
                self.status = progress
                if fixation_result:
                    serialized, start_ts, stop_ts = fixation_result
                    self.fixation_data.append(
                        fm.Serialized_Dict(msgpack_bytes=serialized)
                    )
                    self.fixation_start_ts.append(start_ts)
                    self.fixation_stop_ts.append(stop_ts)

                if self.fixation_data:
                    current_ts = self.fixation_stop_ts[-1]
                    progress = (current_ts - self.g_pool.timestamps[0]) / (
                        self.g_pool.timestamps[-1] - self.g_pool.timestamps[0]
                    )
                    self.menu_icon.indicator_stop = progress
            if self.bg_task.completed:
                self.status = "{} fixations detected".format(len(self.fixation_data))
                self.correlate_and_publish()
                self.bg_task = None
                self.menu_icon.indicator_stop = 0.0

        frame = events.get("frame")
        if not frame:
            return

        self.last_frame_idx = frame.index
        frame_window = pm.enclosing_window(self.g_pool.timestamps, frame.index)
        fixations = self.g_pool.fixations.by_ts_window(frame_window)
        events["fixations"] = fixations
        if self.show_fixations:
            for f in fixations:
                x = int(f["norm_pos"][0] * frame.width)
                y = int((1.0 - f["norm_pos"][1]) * frame.height)
                pm.transparent_circle(
                    frame.img,
                    (x, y),
                    radius=25.0,
                    color=(0.0, 1.0, 1.0, 1.0),
                    thickness=3,
                )
                cv2.putText(
                    frame.img,
                    "{}".format(f["id"]),
                    (x + 30, y),
                    cv2.FONT_HERSHEY_DUPLEX,
                    0.8,
                    (255, 150, 100),
                )

        if self.current_fixation_details and self.prev_index != frame.index:
            info = ""
            for f in fixations:
                info += "Current fixation, {} of {}\n".format(
                    f["id"], len(self.g_pool.fixations)
                )
                info += "    Confidence: {:.2f}\n".format(f["confidence"])
                info += "    Duration: {:.2f} milliseconds\n".format(f["duration"])
                info += "    Dispersion: {:.3f} degrees\n".format(f["dispersion"])
                info += "    Frame range: {}-{}\n".format(
                    f["start_frame_index"] + 1, f["end_frame_index"] + 1
                )
                info += "    2d gaze pos: x={:.3f}, y={:.3f}\n".format(*f["norm_pos"])
                if "gaze_point_3d" in f:
                    info += "    3d gaze pos: x={:.3f}, y={:.3f}, z={:.3f}\n".format(
                        *f["gaze_point_3d"]
                    )
                else:
                    info += "    3d gaze pos: N/A\n"
                if f["id"] > 1:
                    prev_f = self.g_pool.fixations[f["id"] - 2]
                    time_lapsed = (
                        f["timestamp"] - prev_f["timestamp"] + prev_f["duration"] / 1000
                    )
                    info += "    Time since prev. fixation: {:.2f} seconds\n".format(
                        time_lapsed
                    )
                else:
                    info += "    Time since prev. fixation: N/A\n"

                if f["id"] < len(self.g_pool.fixations):
                    next_f = self.g_pool.fixations[f["id"]]
                    time_lapsed = (
                        next_f["timestamp"] - f["timestamp"] + f["duration"] / 1000
                    )
                    info += "    Time to next fixation: {:.2f} seconds\n".format(
                        time_lapsed
                    )
                else:
                    info += "    Time to next fixation: N/A\n"

            self.current_fixation_details.text = info
            self.prev_index = frame.index
Exemplo n.º 13
0
 def segments_in_frame(self, frame) -> t.Iterable[Classified_Segment]:
     frame_window = pm.enclosing_window(self._timestamps, frame.index)
     return self.segments_in_timestamp_window(frame_window)
Exemplo n.º 14
0
 def get_current_frame_window(self):
     frame_index = self.get_current_frame_index()
     frame_window = pm.enclosing_window(self.g_pool.timestamps, frame_index)
     return frame_window
Exemplo n.º 15
0
    def export_data(self, export_range, export_dir):
        user_warned_3d_only = False

        rec_start = self._get_recording_start_date()
        im_dir = os.path.join(export_dir, "iMotions_{}".format(rec_start))

        try:
            self.add_export_job(
                export_range,
                im_dir,
                plugin_name="iMotions",
                input_name="world",
                output_name="scene",
                process_frame=_process_frame,
                export_timestamps=False,
            )
        except FileNotFoundError:
            logger.info(
                "'world' video not found. Export continues with gaze data.")

        info_src = os.path.join(self.g_pool.rec_dir, "info.csv")
        info_dest = os.path.join(im_dir, "iMotions_info.csv")
        copy2(info_src, info_dest)  # copy info.csv file

        with open(os.path.join(im_dir, "gaze.tlv"),
                  "w",
                  encoding="utf-8",
                  newline="") as csvfile:
            csv_writer = csv.writer(csvfile, delimiter="\t")

            csv_writer.writerow((
                "GazeTimeStamp",
                "MediaTimeStamp",
                "MediaFrameIndex",
                "Gaze3dX",
                "Gaze3dY",
                "Gaze3dZ",
                "Gaze2dX",
                "Gaze2dY",
                "PupilDiaLeft",
                "PupilDiaRight",
                "Confidence",
            ))

            for media_idx in range(*export_range):
                media_timestamp = self.g_pool.timestamps[media_idx]
                media_window = pm.enclosing_window(self.g_pool.timestamps,
                                                   media_idx)
                for g in self.g_pool.gaze_positions.by_ts_window(media_window):
                    try:
                        pupil_dia = {}
                        for p in g["base_data"]:
                            pupil_dia[p["id"]] = p["diameter_3d"]

                        pixel_pos = denormalize(g["norm_pos"],
                                                self.g_pool.capture.frame_size,
                                                flip_y=True)
                        undistorted3d = self.g_pool.capture.intrinsics.unprojectPoints(
                            pixel_pos)
                        undistorted2d = self.g_pool.capture.intrinsics.projectPoints(
                            undistorted3d, use_distortion=False)

                        data = (
                            g["timestamp"],
                            media_timestamp,
                            media_idx - export_range[0],
                            *g["gaze_point_3d"],  # Gaze3dX/Y/Z
                            *undistorted2d.flat,  # Gaze2dX/Y
                            pupil_dia.get(1, 0.0),  # PupilDiaLeft
                            pupil_dia.get(0, 0.0),  # PupilDiaRight
                            g["confidence"],
                        )  # Confidence
                    except KeyError:
                        if not user_warned_3d_only:
                            logger.error(
                                "Currently, the iMotions export only supports 3d gaze data"
                            )
                            user_warned_3d_only = True
                        continue
                    csv_writer.writerow(data)
Exemplo n.º 16
0
 def fixations_on_srf_by_frame_idx(self, frame_index, m_from_screen):
     frame_window = pm.enclosing_window(self.g_pool.timestamps, frame_index)
     return self.map_data_to_surface(
         self.g_pool.fixations.by_ts_window(frame_window), m_from_screen)
Exemplo n.º 17
0
    def recent_events(self, events):
        if self.bg_task:
            for progress, fixation_result in self.bg_task.fetch():
                self.status = progress
                if fixation_result:
                    serialized, start_ts, stop_ts = fixation_result
                    self.fixation_data.append(
                        fm.Serialized_Dict(msgpack_bytes=serialized))
                    self.fixation_start_ts.append(start_ts)
                    self.fixation_stop_ts.append(stop_ts)

                if self.fixation_data:
                    current_ts = self.fixation_stop_ts[-1]
                    progress = (current_ts - self.g_pool.timestamps[0]) / (
                        self.g_pool.timestamps[-1] - self.g_pool.timestamps[0])
                    self.menu_icon.indicator_stop = progress
            if self.bg_task.completed:
                self.status = "{} fixations detected".format(
                    len(self.fixation_data))
                self.correlate_and_publish()
                self.bg_task = None
                self.menu_icon.indicator_stop = 0.0

        frame = events.get("frame")
        if not frame:
            return

        self.last_frame_idx = frame.index
        frame_window = pm.enclosing_window(self.g_pool.timestamps, frame.index)
        fixations = self.g_pool.fixations.by_ts_window(frame_window)
        events["fixations"] = fixations
        if self.show_fixations:
            for f in fixations:
                x = int(f["norm_pos"][0] * frame.width)
                y = int((1.0 - f["norm_pos"][1]) * frame.height)
                pm.transparent_circle(
                    frame.img,
                    (x, y),
                    radius=25.0,
                    color=(0.0, 1.0, 1.0, 1.0),
                    thickness=3,
                )
                cv2.putText(
                    frame.img,
                    "{}".format(f["id"]),
                    (x + 30, y),
                    cv2.FONT_HERSHEY_DUPLEX,
                    0.8,
                    (255, 150, 100),
                )

        if self.current_fixation_details and self.prev_index != frame.index:
            info = ""
            for f in fixations:
                info += "Current fixation, {} of {}\n".format(
                    f["id"], len(self.g_pool.fixations))
                info += "    Confidence: {:.2f}\n".format(f["confidence"])
                info += "    Duration: {:.2f} milliseconds\n".format(
                    f["duration"])
                info += "    Dispersion: {:.3f} degrees\n".format(
                    f["dispersion"])
                info += "    Frame range: {}-{}\n".format(
                    f["start_frame_index"] + 1, f["end_frame_index"] + 1)
                info += "    2d gaze pos: x={:.3f}, y={:.3f}\n".format(
                    *f["norm_pos"])
                if "gaze_point_3d" in f:
                    info += "    3d gaze pos: x={:.3f}, y={:.3f}, z={:.3f}\n".format(
                        *f["gaze_point_3d"])
                else:
                    info += "    3d gaze pos: N/A\n"
                if f["id"] > 1:
                    prev_f = self.g_pool.fixations[f["id"] - 2]
                    time_lapsed = (f["timestamp"] - prev_f["timestamp"] +
                                   prev_f["duration"] / 1000)
                    info += "    Time since prev. fixation: {:.2f} seconds\n".format(
                        time_lapsed)
                else:
                    info += "    Time since prev. fixation: N/A\n"

                if f["id"] < len(self.g_pool.fixations):
                    next_f = self.g_pool.fixations[f["id"]]
                    time_lapsed = (next_f["timestamp"] - f["timestamp"] +
                                   f["duration"] / 1000)
                    info += "    Time to next fixation: {:.2f} seconds\n".format(
                        time_lapsed)
                else:
                    info += "    Time to next fixation: N/A\n"

            self.current_fixation_details.text = info
            self.prev_index = frame.index
Exemplo n.º 18
0
 def recent_events(self, events):
     if "frame" in events:
         frm_idx = events["frame"].index
         window = pm.enclosing_window(self.g_pool.timestamps, frm_idx)
         events["pupil"] = self.g_pool.pupil_positions.by_ts_window(window)
Exemplo n.º 19
0
def _export_world_video(
    rec_dir,
    user_dir,
    min_data_confidence,
    start_frame,
    end_frame,
    plugin_initializers,
    out_file_path,
    pre_computed_eye_data,
):
    """
    Simulates the generation for the world video and saves a certain time range as a video.
    It simulates a whole g_pool such that all plugins run as normal.
    """
    from glob import glob
    from time import time

    import file_methods as fm
    import player_methods as pm
    from av_writer import AV_Writer

    # we are not importing manual gaze correction. In Player corrections have already been applied.
    # in batch exporter this plugin makes little sense.
    from fixation_detector import Offline_Fixation_Detector
    from eye_movement import Offline_Eye_Movement_Detector

    # Plug-ins
    from plugin import Plugin_List, import_runtime_plugins
    from video_capture import EndofVideoError, File_Source
    from video_overlay.plugins import Video_Overlay, Eye_Overlay
    from vis_circle import Vis_Circle
    from vis_cross import Vis_Cross
    from vis_light_points import Vis_Light_Points
    from vis_polyline import Vis_Polyline
    from vis_scan_path import Vis_Scan_Path
    from vis_watermark import Vis_Watermark

    PID = str(os.getpid())
    logger = logging.getLogger(__name__ + " with pid: " + PID)
    start_status = "Starting video export with pid: {}".format(PID)
    logger.info(start_status)
    yield start_status, 0

    try:
        vis_plugins = sorted(
            [
                Vis_Circle,
                Vis_Cross,
                Vis_Polyline,
                Vis_Light_Points,
                Vis_Watermark,
                Vis_Scan_Path,
                Eye_Overlay,
                Video_Overlay,
            ],
            key=lambda x: x.__name__,
        )
        analysis_plugins = [Offline_Fixation_Detector, Offline_Eye_Movement_Detector]
        user_plugins = sorted(
            import_runtime_plugins(os.path.join(user_dir, "plugins")),
            key=lambda x: x.__name__,
        )

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        meta_info = pm.load_meta_info(rec_dir)

        g_pool = GlobalContainer()
        g_pool.app = "exporter"
        g_pool.min_data_confidence = min_data_confidence

        valid_ext = (".mp4", ".mkv", ".avi", ".h264", ".mjpeg", ".fake")
        try:
            video_path = next(
                f
                for f in glob(os.path.join(rec_dir, "world.*"))
                if os.path.splitext(f)[1] in valid_ext
            )
        except StopIteration:
            raise FileNotFoundError("No Video world found")
        cap = File_Source(g_pool, source_path=video_path, fill_gaps=True, timing=None)

        timestamps = cap.timestamps

        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, end frame) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.0
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the launching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = (
            "Will export from frame {} to frame {}. This means I will export {} frames."
        )
        logger.debug(
            exp_info.format(
                start_frame, start_frame + frames_to_export, frames_to_export
            )
        )

        # setup of writer
        writer = AV_Writer(
            out_file_path, fps=cap.frame_rate, audio_dir=rec_dir, use_timestamps=True
        )

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []

        for initializers in pre_computed_eye_data.values():
            initializers["data"] = [
                fm.Serialized_Dict(msgpack_bytes=serialized)
                for serialized in initializers["data"]
            ]

        g_pool.pupil_positions = pm.Bisector(**pre_computed_eye_data["pupil"])
        g_pool.pupil_positions_by_id = (
            pm.Bisector(**pre_computed_eye_data["pupil_by_id_0"]),
            pm.Bisector(**pre_computed_eye_data["pupil_by_id_1"]),
        )
        g_pool.gaze_positions = pm.Bisector(**pre_computed_eye_data["gaze"])
        g_pool.fixations = pm.Affiliator(**pre_computed_eye_data["fixations"])

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoError:
                break

            events = {"frame": frame}
            # new positions and events
            frame_window = pm.enclosing_window(g_pool.timestamps, frame.index)
            events["gaze"] = g_pool.gaze_positions.by_ts_window(frame_window)
            events["pupil"] = g_pool.pupil_positions.by_ts_window(frame_window)

            # publish delayed notifications when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n["_notify_time_"] < time():
                    del n["_notify_time_"]
                    del g_pool.delayed_notifications[n["subject"]]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifications:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield "Exporting with pid {}".format(PID), current_frame

        writer.close(timestamp_export_format="all")

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        logger.info(
            result.format(current_frame, out_file_path, duration, effective_fps)
        )
        yield "Export done. This took {:.0f} seconds.".format(duration), current_frame

    except GeneratorExit:
        logger.warning("Video export with pid {} was canceled.".format(os.getpid()))