def serialized_dict(datum):
     if type(datum) is dict:
         return fm.Serialized_Dict(python_dict=datum)
     elif type(datum) is bytes:
         return fm.Serialized_Dict(msgpack_bytes=datum)
     else:
         raise ValueError("Unsupported gaze datum type: {}.".format(type(datum)))
Beispiel #2
0
def _map_gaze(
    gazer_class_name,
    gazer_params,
    fake_gpool,
    pupil_pos_in_mapping_range,
    manual_correction_x,
    manual_correction_y,
    shared_memory,
):
    fake_gpool.import_runtime_plugins()
    gazers_by_name = registered_gazer_classes_by_class_name()
    gazer_cls = gazers_by_name[gazer_class_name]
    gazer = gazer_cls(fake_gpool, params=gazer_params)

    first_ts = pupil_pos_in_mapping_range[0]["timestamp"]
    last_ts = pupil_pos_in_mapping_range[-1]["timestamp"]
    ts_span = last_ts - first_ts

    for gaze_datum in gazer.map_pupil_to_gaze(pupil_pos_in_mapping_range):
        _apply_manual_correction(gaze_datum, manual_correction_x,
                                 manual_correction_y)

        curr_ts = gaze_datum["timestamp"]
        shared_memory.progress = (curr_ts - first_ts) / ts_span

        result = (curr_ts, fm.Serialized_Dict(gaze_datum))
        yield [result]
Beispiel #3
0
def _map_gaze(
    gazer_class_name,
    gazer_params,
    fake_gpool,
    pupil_pos_in_mapping_range,
    manual_correction_x,
    manual_correction_y,
    shared_memory,
):
    fake_gpool.import_runtime_plugins()
    gazers_by_name = gazer_classes_by_class_name(registered_gazer_classes())
    gazer_cls = gazers_by_name[gazer_class_name]
    gazer = gazer_cls(fake_gpool, params=gazer_params)

    first_ts = pupil_pos_in_mapping_range[0]["timestamp"]
    last_ts = pupil_pos_in_mapping_range[-1]["timestamp"]
    ts_span = last_ts - first_ts
    curr_ts = first_ts

    for gaze_datum in gazer.map_pupil_to_gaze(pupil_pos_in_mapping_range):
        _apply_manual_correction(gaze_datum, manual_correction_x,
                                 manual_correction_y)

        # gazer.map_pupil_to_gaze does not yield gaze with monotonic timestamps.
        # Binocular pupil matches are delayed internally. To avoid non-monotonic
        # progress updates, we use the largest timestamp that has been returned up to
        # the current point in time.
        curr_ts = max(curr_ts, gaze_datum["timestamp"])
        shared_memory.progress = (curr_ts - first_ts) / ts_span

        result = (curr_ts, fm.Serialized_Dict(gaze_datum))
        yield [result]
Beispiel #4
0
    def recent_events(self, events):
        super().recent_events(events)
        while self.data_sub.new_data:
            topic = self.data_sub.recv_topic()
            remaining_frames = self.data_sub.recv_remaining_frames()
            if topic.startswith("pupil."):
                # pupil data only has one remaining frame
                payload_serialized = next(remaining_frames)
                pupil_datum = fm.Serialized_Dict(msgpack_bytes=payload_serialized)
                assert pm.PupilTopic.match(topic, eye_id=pupil_datum["id"])
                timestamp = pupil_datum["timestamp"]
                self._pupil_data_store.append(topic, pupil_datum, timestamp)
            else:
                payload = self.data_sub.deserialize_payload(*remaining_frames)
                if payload["subject"] == "file_source.video_finished":
                    for eye_id in (0, 1):
                        if self.eye_video_loc[eye_id] == payload["source_path"]:
                            logger.debug("eye {} process complete".format(eye_id))
                            self.eye_frame_idx[eye_id] = self.eye_frame_num[eye_id]
                            self.detection_status[eye_id] = "complete"
                            self.stop_eye_process(eye_id)
                            break
                    if self.eye_video_loc == [None, None]:
                        data = self._pupil_data_store.as_pupil_data_bisector()
                        self.publish_new(pupil_data_bisector=data)
                if payload["subject"] == "file_source.current_frame_index":
                    for eye_id in (0, 1):
                        if self.eye_video_loc[eye_id] == payload["source_path"]:
                            self.eye_frame_idx[eye_id] = payload["index"]

        self.menu_icon.indicator_stop = self.detection_progress
Beispiel #5
0
def _map_gaze(
    calibration_result,
    fake_gpool,
    pupil_pos_in_mapping_range,
    manual_correction_x,
    manual_correction_y,
    shared_memory,
):
    gaze_mapping_plugins_by_name = {
        p.__name__: p
        for p in gaze_mapping_plugins
    }
    gaze_mapper_cls = gaze_mapping_plugins_by_name[
        calibration_result.mapping_plugin_name]
    gaze_mapper = gaze_mapper_cls(fake_gpool, **calibration_result.mapper_args)

    for idx_incoming, pupil_pos in enumerate(pupil_pos_in_mapping_range):
        mapped_gaze = gaze_mapper.on_pupil_datum(pupil_pos)

        output_gaze = []
        for gaze_datum in mapped_gaze:
            _apply_manual_correction(gaze_datum, manual_correction_x,
                                     manual_correction_y)
            output_gaze.append(
                (gaze_datum["timestamp"], fm.Serialized_Dict(gaze_datum)))

        shared_memory.progress = (idx_incoming +
                                  1) / len(pupil_pos_in_mapping_range)

        if output_gaze:
            yield output_gaze
Beispiel #6
0
 def recent_events(self, events):
     super().recent_events(events)
     while self.data_sub.new_data:
         topic = self.data_sub.recv_topic()
         remaining_frames = self.data_sub.recv_remaining_frames()
         if topic.startswith("pupil."):
             # pupil data only has one remaining frame
             payload_serialized = next(remaining_frames)
             pupil_datum = fm.Serialized_Dict(
                 msgpack_bytes=payload_serialized)
             assert int(topic[-1]) == pupil_datum["id"]
             self.pupil_positions[pupil_datum["timestamp"]] = pupil_datum
             self.id_topics[pupil_datum["timestamp"]] = topic
         else:
             payload = self.data_sub.deserialize_payload(*remaining_frames)
             if payload["subject"] == "file_source.video_finished":
                 for eyeid in (0, 1):
                     if self.eye_video_loc[eyeid] == payload["source_path"]:
                         logger.debug(
                             "eye {} process complete".format(eyeid))
                         self.detection_status[eyeid] = "complete"
                         self.stop_eye_process(eyeid)
                         break
                 if self.eye_video_loc == [None, None]:
                     self.correlate_publish()
     total = sum(self.eye_frame_num)
     self.menu_icon.indicator_stop = (len(self.pupil_positions) /
                                      total if total else 0.0)
Beispiel #7
0
        def blink_finished(idx):
            nonlocal blink

            # get tmp pupil idx
            start_idx = blink["__start_response_index__"]
            del blink["__start_response_index__"]

            blink["end_timestamp"] = self.timestamps[idx]
            blink["timestamp"] = (blink["end_timestamp"] +
                                  blink["start_timestamp"]) / 2
            blink[
                "duration"] = blink["end_timestamp"] - blink["start_timestamp"]
            blink["base_data"] = pupil_data[start_idx:idx].tolist()
            blink["filter_response"] = self.filter_response[
                start_idx:idx].tolist()
            # blink confidence is the mean of the absolute filter response
            # during the blink event, clamped at 1.
            blink["confidence"] = min(
                float(np.abs(blink["filter_response"]).mean()), 1.0)

            # correlate world indices
            ts_start, ts_end = blink["start_timestamp"], blink["end_timestamp"]

            idx_start, idx_end = np.searchsorted(self.g_pool.timestamps,
                                                 [ts_start, ts_end])
            # fix `list index out of range` error
            idx_end = min(idx_end, len(self.g_pool.timestamps) - 1)
            blink["start_frame_index"] = int(idx_start)
            blink["end_frame_index"] = int(idx_end)
            blink["index"] = int((idx_start + idx_end) // 2)

            blink_data.append(fm.Serialized_Dict(python_dict=blink))
            blink_start_ts.append(ts_start)
            blink_stop_ts.append(ts_end)
    def recent_events(self, events):
        super().recent_events(events)

        if self.process_pipe and self.process_pipe.new_data:
            topic, msg = self.process_pipe.recv()
            if topic == 'progress':
                recent = msg.get('data', [])
                progress, data = zip(*recent)
                self.circle_marker_positions.extend([d for d in data if d])
                self.detection_progress = progress[-1]
            elif topic == 'finished':
                self.detection_progress = 100.
                self.process_pipe = None
                for s in self.sections:
                    self.calibrate_section(s)
            elif topic == 'exception':
                logger.warning('Calibration marker detection raised exception:\n{}'.format(msg['reason']))
                self.process_pipe = None
                self.detection_progress = 0.
                logger.info('Marker detection was interrupted')
                logger.debug('Reason: {}'.format(msg.get('reason', 'n/a')))
            self.menu_icon.indicator_stop = self.detection_progress / 100.

        for sec in self.sections:
            if 'bg_task' in sec:
                for progress, gaze_data in sec["bg_task"].fetch():
                    for timestamp, serialized in gaze_data:
                        gaze_datum = fm.Serialized_Dict(msgpack_bytes=serialized)
                        sec['gaze'].append(gaze_datum)
                        sec['gaze_ts'].append(timestamp)
                    sec['status'] = progress
                if sec["bg_task"].completed:
                    self.correlate_and_publish()
                    del sec['bg_task']
Beispiel #9
0
def offline_detection(
    source_path,
    timestamps,
    frame_index_range,
    frame_index_to_num_markers,
    shared_memory,
):
    batch_size = 30
    frame_start, frame_end = frame_index_range
    frame_indices = sorted(
        set(range(frame_start, frame_end + 1)) -
        set(frame_index_to_num_markers.keys()))
    if not frame_indices:
        return

    frame_count = frame_end - frame_start + 1
    shared_memory.progress = (frame_indices[0] - frame_start + 1) / frame_count
    yield None

    src = video_capture.File_Source(SimpleNamespace(),
                                    source_path,
                                    timing=None)

    queue = []
    for frame_index in frame_indices:
        shared_memory.progress = (frame_index - frame_start + 1) / frame_count
        timestamp = timestamps[frame_index]
        src.seek_to_frame(frame_index)
        frame = src.get_frame()

        detections = _detect(frame)
        if detections:
            serialized_dicts = [
                fm.Serialized_Dict(detection) for detection in detections
            ]
            queue.append(
                (timestamp, serialized_dicts, frame_index, len(detections)))
        else:
            queue.append((timestamp, [fm.Serialized_Dict({})], frame_index, 0))

        if len(queue) >= batch_size:
            data = queue[:batch_size]
            del queue[:batch_size]
            yield data

    yield queue
Beispiel #10
0
 def fire_annotation(self, annotation_label):
     if self.last_frame_ts is None:
         return
     ts = self.last_frame_ts
     logger.info("{} annotation @ {}".format(annotation_label, ts))
     new_annotation = create_annotation(annotation_label, ts)
     new_annotation["added_in_player"] = True
     self.annotations.insert(new_annotation["timestamp"],
                             fm.Serialized_Dict(python_dict=new_annotation))
def offline_localization(
    timestamps,
    frame_index_range,
    markers_bisector,
    frame_index_to_num_markers,
    marker_id_to_extrinsics,
    camera_intrinsics,
    shared_memory,
):
    batch_size = 300

    def find_markers_in_frame(index):
        window = pm.enclosing_window(timestamps, index)
        return markers_bisector.by_ts_window(window)

    camera_extrinsics_prv = None
    not_localized_count = 0

    frame_start, frame_end = frame_index_range
    frame_count = frame_end - frame_start + 1
    frame_indices = sorted(
        set(range(frame_start, frame_end + 1))
        & set(frame_index_to_num_markers.keys()))

    queue = []
    for frame_index in frame_indices:
        shared_memory.progress = (frame_index - frame_start + 1) / frame_count
        if frame_index_to_num_markers[frame_index]:
            markers_in_frame = find_markers_in_frame(frame_index)
            camera_extrinsics = solvepnp.calculate(
                camera_intrinsics,
                markers_in_frame,
                marker_id_to_extrinsics,
                camera_extrinsics_prv=camera_extrinsics_prv,
                min_n_markers_per_frame=1,
            )
            if camera_extrinsics is not None:
                camera_extrinsics_prv = camera_extrinsics
                not_localized_count = 0

                timestamp = timestamps[frame_index]
                pose_data = get_pose_data(camera_extrinsics, timestamp)
                serialized_dict = fm.Serialized_Dict(pose_data)
                queue.append((timestamp, serialized_dict))

                if len(queue) >= batch_size:
                    data = queue[:batch_size]
                    del queue[:batch_size]
                    yield data

                continue

        not_localized_count += 1
        if not_localized_count >= 5:
            camera_extrinsics_prv = None

    yield queue
Beispiel #12
0
 def from_tuple(segment_tuple: tuple) -> "Classified_Segment":
     k = Classified_Segment._private_schema_keys
     v = segment_tuple
     assert len(k) == len(v)
     segment_dict = dict(zip(k, v))
     segment_dict["segment_data"] = [
         fm.Serialized_Dict(msgpack_bytes=datum)
         for datum in segment_dict["segment_data"]
     ]
     return Classified_Segment.from_dict(segment_dict)
Beispiel #13
0
 def fire_annotation(self, annotation_label):
     if self.last_frame_ts is None:
         return
     if self.last_frame_index < 0:
         return
     ts = self.last_frame_ts
     annotation_desc = self._annotation_description(
         label=annotation_label, world_index=self.last_frame_index)
     logger.info(annotation_desc)
     new_annotation = create_annotation(annotation_label, ts)
     new_annotation["added_in_player"] = True
     self.annotations.insert(new_annotation["timestamp"],
                             fm.Serialized_Dict(python_dict=new_annotation))
Beispiel #14
0
 def fire_annotation(self, annotation_label):
     t = self.last_frame_ts
     logger.info('"{}"@{}'.format(annotation_label, t))
     # you may add more field to this dictionary if you want.
     annotation_new = {
         'subject': 'annotation',
         'topic': 'notify.annotation',
         'label': annotation_label,
         'timestamp': t,
         'duration': 0.0,
         'added_in_player': True
     }
     self.annotations.insert(annotation_new['timestamp'],
                             fm.Serialized_Dict(python_dict=annotation_new))
Beispiel #15
0
 def fire_annotation(self, annotation_label):
     t = self.last_frame_ts
     logger.info('"{}"@{}'.format(annotation_label, t))
     # you may add more field to this dictionary if you want.
     annotation_new = {
         "subject": "annotation",
         "topic": "notify.annotation",
         "label": annotation_label,
         "timestamp": t,
         "duration": 0.0,
         "added_in_player": True,
     }
     self.annotations.insert(annotation_new["timestamp"],
                             fm.Serialized_Dict(python_dict=annotation_new))
Beispiel #16
0
def offline_detection(
    source_path,
    all_timestamps,
    frame_index_range,
    calculated_frame_indices,
    shared_memory,
):
    batch_size = 30
    frame_start, frame_end = frame_index_range
    frame_indices = sorted(
        set(range(frame_start, frame_end + 1)) - calculated_frame_indices)
    if not frame_indices:
        return

    frame_count = frame_end - frame_start + 1
    shared_memory.progress = (frame_indices[0] - frame_start + 1) / frame_count
    yield None

    src = video_capture.File_Source(SimpleNamespace(),
                                    source_path,
                                    fill_gaps=False,
                                    timing=None)
    timestamps_no_gaps = src.timestamps
    uncalculated_timestamps = all_timestamps[frame_indices]
    seek_poses = np.searchsorted(timestamps_no_gaps, uncalculated_timestamps)

    queue = []
    for frame_index, timestamp, target_frame_idx in zip(
            frame_indices, uncalculated_timestamps, seek_poses):
        detections = []
        if timestamp in timestamps_no_gaps:
            if target_frame_idx != src.target_frame_idx:
                src.seek_to_frame(
                    target_frame_idx)  # only seek frame if necessary
            frame = src.get_frame()
            detections = _detect(frame)

        serialized_dicts = [fm.Serialized_Dict(d) for d in detections]
        queue.append((timestamp, serialized_dicts, frame_index))

        if len(queue) >= batch_size:
            shared_memory.progress = (frame_index - frame_start +
                                      1) / frame_count

            data = queue[:batch_size]
            del queue[:batch_size]
            yield data

    yield queue
Beispiel #17
0
    def recent_events(self, events):
        super().recent_events(events)

        if self.process_pipe and self.process_pipe.new_data:
            topic, msg = self.process_pipe.recv()
            if topic == "progress":
                recent = msg.get("data", [])
                progress, data = zip(*recent)
                self.circle_marker_positions.extend([d for d in data if d])
                self.detection_progress = progress[-1]
            elif topic == "finished":
                self.detection_progress = 100.0
                self.process_pipe = None
                for s in self.sections:
                    self.calibrate_section(s)
            elif topic == "exception":
                logger.warning(
                    "Calibration marker detection raised exception:\n{}".
                    format(msg["reason"]))
                self.process_pipe = None
                self.detection_progress = 0.0
                logger.info("Marker detection was interrupted")
                logger.debug("Reason: {}".format(msg.get("reason", "n/a")))
            self.menu_icon.indicator_stop = self.detection_progress / 100.0

        for sec in self.sections:
            if "bg_task" in sec:
                for progress, gaze_data in sec["bg_task"].fetch():
                    for timestamp, serialized in gaze_data:
                        gaze_datum = fm.Serialized_Dict(
                            msgpack_bytes=serialized)
                        sec["gaze"].append(gaze_datum)
                        sec["gaze_ts"].append(timestamp)
                    sec["status"] = progress
                if sec["bg_task"].completed:
                    self.correlate_and_publish()
                    del sec["bg_task"]
Beispiel #18
0
def _export_world_video(
    rec_dir,
    user_dir,
    min_data_confidence,
    start_frame,
    end_frame,
    plugin_initializers,
    out_file_path,
    pre_computed_eye_data,
):
    """
    Simulates the generation for the world video and saves a certain time range as a video.
    It simulates a whole g_pool such that all plugins run as normal.
    """
    from glob import glob
    from time import time

    import file_methods as fm
    import player_methods as pm
    from av_writer import MPEG_Audio_Writer

    # We are not importing manual gaze correction. In Player corrections have already
    # been applied.
    from fixation_detector import Offline_Fixation_Detector

    # Plug-ins
    from plugin import Plugin_List, import_runtime_plugins
    from video_capture import EndofVideoError, File_Source
    from video_overlay.plugins import Video_Overlay, Eye_Overlay
    from vis_circle import Vis_Circle
    from vis_cross import Vis_Cross
    from vis_light_points import Vis_Light_Points
    from vis_polyline import Vis_Polyline
    from vis_watermark import Vis_Watermark

    PID = str(os.getpid())
    logger = logging.getLogger(f"{__name__} with pid: {PID}")
    start_status = f"Starting video export with pid: {PID}"
    logger.info(start_status)
    yield start_status, 0

    try:
        vis_plugins = sorted(
            [
                Vis_Circle,
                Vis_Cross,
                Vis_Polyline,
                Vis_Light_Points,
                Vis_Watermark,
                Eye_Overlay,
                Video_Overlay,
            ],
            key=lambda x: x.__name__,
        )
        analysis_plugins = [Offline_Fixation_Detector]
        user_plugins = sorted(
            import_runtime_plugins(os.path.join(user_dir, "plugins")),
            key=lambda x: x.__name__,
        )

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        recording = PupilRecording(rec_dir)
        meta_info = recording.meta_info

        g_pool = GlobalContainer()
        g_pool.app = "exporter"
        g_pool.process = "exporter"
        g_pool.min_data_confidence = min_data_confidence

        videos = recording.files().core().world().videos()
        if not videos:
            raise FileNotFoundError("No world video found")

        source_path = videos[0].resolve()
        cap = File_Source(g_pool,
                          source_path=source_path,
                          fill_gaps=True,
                          timing=None)
        if not cap.initialised:
            warn = "Trying to export zero-duration world video."
            logger.warning(warn)
            yield warn, 0.0
            return

        timestamps = cap.timestamps

        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, end frame) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.0
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the launching process and
        # give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        logger.debug(
            f"Will export from frame {start_frame} to frame "
            f"{start_frame + frames_to_export}. This means I will export "
            f"{frames_to_export} frames.")

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []

        for initializers in pre_computed_eye_data.values():
            initializers["data"] = [
                fm.Serialized_Dict(msgpack_bytes=serialized)
                for serialized in initializers["data"]
            ]

        g_pool.pupil_positions = pm.PupilDataBisector.from_init_dict(
            pre_computed_eye_data["pupil"])
        g_pool.gaze_positions = pm.Bisector(**pre_computed_eye_data["gaze"])
        g_pool.fixations = pm.Affiliator(**pre_computed_eye_data["fixations"])

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        try:
            # setup of writer
            writer = MPEG_Audio_Writer(
                out_file_path,
                start_time_synced=trimmed_timestamps[0],
                audio_dir=rec_dir,
            )

            while frames_to_export > current_frame:
                try:
                    frame = cap.get_frame()
                except EndofVideoError:
                    break

                events = {"frame": frame}
                # new positions and events
                frame_window = pm.enclosing_window(g_pool.timestamps,
                                                   frame.index)
                events["gaze"] = g_pool.gaze_positions.by_ts_window(
                    frame_window)
                events["pupil"] = g_pool.pupil_positions.by_ts_window(
                    frame_window)

                # publish delayed notifications when their time has come.
                for n in list(g_pool.delayed_notifications.values()):
                    if n["_notify_time_"] < time():
                        del n["_notify_time_"]
                        del g_pool.delayed_notifications[n["subject"]]
                        g_pool.notifications.append(n)

                # notify each plugin if there are new notifications:
                while g_pool.notifications:
                    n = g_pool.notifications.pop(0)
                    for p in g_pool.plugins:
                        p.on_notify(n)

                # allow each Plugin to do its work.
                for p in g_pool.plugins:
                    p.recent_events(events)

                writer.write_video_frame(frame)
                current_frame += 1
                yield "Exporting with pid {}".format(PID), current_frame
        except GeneratorExit:
            logger.warning(f"Video export with pid {PID} was canceled.")
            writer.close(timestamp_export_format=None,
                         closed_suffix=".canceled")
            return

        writer.close(timestamp_export_format="all")

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        logger.info(
            f"Export done: Exported {current_frame} frames to {out_file_path}. "
            f"This took {duration} seconds. "
            f"Exporter ran at {effective_fps} frames per second.")
        yield "Export done. This took {:.0f} seconds.".format(
            duration), current_frame

    except GeneratorExit:
        logger.warning(f"Video export with pid {PID} was canceled.")
Beispiel #19
0
    def recent_events(self, events):
        if self.bg_task:
            for progress, fixation_result in self.bg_task.fetch():
                self.status = progress
                if fixation_result:
                    serialized, start_ts, stop_ts = fixation_result
                    self.fixation_data.append(
                        fm.Serialized_Dict(msgpack_bytes=serialized))
                    self.fixation_start_ts.append(start_ts)
                    self.fixation_stop_ts.append(stop_ts)

                if self.fixation_data:
                    current_ts = self.fixation_stop_ts[-1]
                    progress = (current_ts - self.g_pool.timestamps[0]) / (
                        self.g_pool.timestamps[-1] - self.g_pool.timestamps[0])
                    self.menu_icon.indicator_stop = progress
            if self.bg_task.completed:
                self.status = "{} fixations detected".format(
                    len(self.fixation_data))
                self.correlate_and_publish()
                self.bg_task = None
                self.menu_icon.indicator_stop = 0.0

        frame = events.get("frame")
        if not frame:
            return

        self.last_frame_idx = frame.index
        frame_window = pm.enclosing_window(self.g_pool.timestamps, frame.index)
        fixations = self.g_pool.fixations.by_ts_window(frame_window)
        events["fixations"] = fixations
        if self.show_fixations:
            for f in fixations:
                x = int(f["norm_pos"][0] * frame.width)
                y = int((1.0 - f["norm_pos"][1]) * frame.height)
                pm.transparent_circle(
                    frame.img,
                    (x, y),
                    radius=25.0,
                    color=(0.0, 1.0, 1.0, 1.0),
                    thickness=3,
                )
                cv2.putText(
                    frame.img,
                    "{}".format(f["id"]),
                    (x + 30, y),
                    cv2.FONT_HERSHEY_DUPLEX,
                    0.8,
                    (255, 150, 100),
                )

        if self.current_fixation_details and self.prev_index != frame.index:
            info = ""
            for f in fixations:
                info += "Current fixation, {} of {}\n".format(
                    f["id"], len(self.g_pool.fixations))
                info += "    Confidence: {:.2f}\n".format(f["confidence"])
                info += "    Duration: {:.2f} milliseconds\n".format(
                    f["duration"])
                info += "    Dispersion: {:.3f} degrees\n".format(
                    f["dispersion"])
                info += "    Frame range: {}-{}\n".format(
                    f["start_frame_index"] + 1, f["end_frame_index"] + 1)
                info += "    2d gaze pos: x={:.3f}, y={:.3f}\n".format(
                    *f["norm_pos"])
                if "gaze_point_3d" in f:
                    info += "    3d gaze pos: x={:.3f}, y={:.3f}, z={:.3f}\n".format(
                        *f["gaze_point_3d"])
                else:
                    info += "    3d gaze pos: N/A\n"
                if f["id"] > 1:
                    prev_f = self.g_pool.fixations[f["id"] - 2]
                    time_lapsed = (f["timestamp"] - prev_f["timestamp"] +
                                   prev_f["duration"] / 1000)
                    info += "    Time since prev. fixation: {:.2f} seconds\n".format(
                        time_lapsed)
                else:
                    info += "    Time since prev. fixation: N/A\n"

                if f["id"] < len(self.g_pool.fixations):
                    next_f = self.g_pool.fixations[f["id"]]
                    time_lapsed = (next_f["timestamp"] - f["timestamp"] +
                                   f["duration"] / 1000)
                    info += "    Time to next fixation: {:.2f} seconds\n".format(
                        time_lapsed)
                else:
                    info += "    Time to next fixation: N/A\n"

            self.current_fixation_details.text = info
            self.prev_index = frame.index
Beispiel #20
0
def detect_fixations(capture, gaze_data, max_dispersion, min_duration,
                     max_duration, min_data_confidence):
    yield "Detecting fixations...", ()
    gaze_data = [
        fm.Serialized_Dict(msgpack_bytes=serialized)
        for serialized in gaze_data
    ]
    if not gaze_data:
        logger.warning("No data available to find fixations")
        return "Fixation detection complete", ()

    use_pupil = "gaze_normal_3d" in gaze_data[
        0] or "gaze_normals_3d" in gaze_data[0]
    logger.info("Starting fixation detection using {} data...".format(
        "3d" if use_pupil else "2d"))
    fixation_result = Fixation_Result_Factory()

    Q = deque()
    enum = deque(gaze_data)
    while enum:
        # check if Q contains enough data
        if len(Q) < 2 or Q[-1]["timestamp"] - Q[0]["timestamp"] < min_duration:
            datum = enum.popleft()
            Q.append(datum)
            continue

        # min duration reached, check for fixation
        dispersion, origin, base_data = gaze_dispersion(capture,
                                                        Q,
                                                        use_pupil=use_pupil)
        if dispersion > max_dispersion:
            # not a fixation, move forward
            Q.popleft()
            continue

        left_idx = len(Q)

        # minimal fixation found. collect maximal data
        # to perform binary search for fixation end
        while enum:
            datum = enum[0]
            if datum["timestamp"] > Q[0]["timestamp"] + max_duration:
                break  # maximum data found
            Q.append(enum.popleft())

        # check for fixation with maximum duration
        dispersion, origin, base_data = gaze_dispersion(capture,
                                                        Q,
                                                        use_pupil=use_pupil)
        if dispersion <= max_dispersion:
            fixation = fixation_result.from_data(dispersion, origin, base_data,
                                                 capture.timestamps)
            yield "Detecting fixations...", fixation
            Q.clear()  # discard old Q
            continue

        slicable = list(Q)  # deque does not support slicing
        right_idx = len(Q)

        # binary search
        while left_idx + 1 < right_idx:
            middle_idx = (left_idx + right_idx) // 2 + 1
            dispersion, origin, base_data = gaze_dispersion(
                capture, slicable[:middle_idx], use_pupil=use_pupil)

            if dispersion <= max_dispersion:
                left_idx = middle_idx - 1
            else:
                right_idx = middle_idx - 1

        middle_idx = (left_idx + right_idx) // 2
        dispersion_result = gaze_dispersion(capture,
                                            slicable[:middle_idx],
                                            use_pupil=use_pupil)
        fixation = fixation_result.from_data(*dispersion_result,
                                             capture.timestamps)
        yield "Detecting fixations...", fixation
        Q.clear()  # clear queue
        enum.extendleft(slicable[middle_idx:])

    yield "Fixation detection complete", ()
Beispiel #21
0
def export(
        rec_dir,
        user_dir,
        min_data_confidence,
        start_frame=None,
        end_frame=None,
        plugin_initializers=(),
        out_file_path=None,
        pre_computed={},
):

    PID = str(os.getpid())
    logger = logging.getLogger(__name__ + " with pid: " + PID)
    start_status = "Starting video export with pid: {}".format(PID)
    print(start_status)
    yield start_status, 0

    try:
        pm.update_recording_to_recent(rec_dir)

        vis_plugins = sorted(
            [
                Vis_Circle,
                Vis_Cross,
                Vis_Polyline,
                Vis_Light_Points,
                Vis_Watermark,
                Vis_Scan_Path,
                Vis_Eye_Video_Overlay,
            ],
            key=lambda x: x.__name__,
        )
        analysis_plugins = [Offline_Fixation_Detector]
        user_plugins = sorted(
            import_runtime_plugins(os.path.join(user_dir, "plugins")),
            key=lambda x: x.__name__,
        )

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        pm.update_recording_to_recent(rec_dir)

        audio_path = os.path.join(rec_dir, "audio.mp4")

        meta_info = pm.load_meta_info(rec_dir)

        g_pool = Global_Container()
        g_pool.app = "exporter"
        g_pool.min_data_confidence = min_data_confidence

        valid_ext = (".mp4", ".mkv", ".avi", ".h264", ".mjpeg", ".fake")
        video_path = [
            f for f in glob(os.path.join(rec_dir, "world.*"))
            if os.path.splitext(f)[1] in valid_ext
        ][0]
        cap = init_playback_source(g_pool, source_path=video_path, timing=None)

        timestamps = cap.timestamps

        # Out file path verification, we do this before but if one uses a separate tool, this will kick in.
        if out_file_path is None:
            out_file_path = os.path.join(rec_dir, "world_viz.mp4")
        else:
            file_name = os.path.basename(out_file_path)
            dir_name = os.path.dirname(out_file_path)
            if not dir_name:
                dir_name = rec_dir
            if not file_name:
                file_name = "world_viz.mp4"
            out_file_path = os.path.expanduser(
                os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, endframe) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.0
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the lauching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = (
            "Will export from frame {} to frame {}. This means I will export {} frames."
        )
        logger.debug(
            exp_info.format(start_frame, start_frame + frames_to_export,
                            frames_to_export))

        # setup of writer
        writer = AV_Writer(out_file_path,
                           fps=cap.frame_rate,
                           audio_loc=audio_path,
                           use_timestamps=True)

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []

        for initializers in pre_computed.values():
            initializers["data"] = [
                fm.Serialized_Dict(msgpack_bytes=serialized)
                for serialized in initializers["data"]
            ]

        g_pool.pupil_positions = pm.Bisector(**pre_computed["pupil"])
        g_pool.gaze_positions = pm.Bisector(**pre_computed["gaze"])
        g_pool.fixations = pm.Affiliator(**pre_computed["fixations"])

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoError:
                break

            events = {"frame": frame}
            # new positons and events
            frame_window = pm.enclosing_window(g_pool.timestamps, frame.index)
            events["gaze"] = g_pool.gaze_positions.by_ts_window(frame_window)
            events["pupil"] = g_pool.pupil_positions.by_ts_window(frame_window)

            # publish delayed notifiactions when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n["_notify_time_"] < time():
                    del n["_notify_time_"]
                    del g_pool.delayed_notifications[n["subject"]]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifactions:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield "Exporting with pid {}".format(PID), current_frame

        writer.close()
        writer = None

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        print(
            result.format(current_frame, out_file_path, duration,
                          effective_fps))
        yield "Export done. This took {:.0f} seconds.".format(
            duration), current_frame

    except GeneratorExit:
        print("Video export with pid {} was canceled.".format(os.getpid()))
    except Exception as e:
        from time import sleep
        import traceback

        trace = traceback.format_exc()
        print("Process Export (pid: {}) crashed with trace:\n{}".format(
            os.getpid(), trace))
        yield e
        sleep(1.0)
Beispiel #22
0
 def to_msgpack(self) -> utils.MsgPack_Serialized_Segment:
     serialized_dict = fm.Serialized_Dict(python_dict=self._python_dict)
     return serialized_dict.serialized
Beispiel #23
0
 def to_serialized_dict(self) -> fm.Serialized_Dict:
     return fm.Serialized_Dict(python_dict=self._python_dict)
Beispiel #24
0
def _export_world_video(
    rec_dir,
    user_dir,
    min_data_confidence,
    start_frame,
    end_frame,
    plugin_initializers,
    out_file_path,
    pre_computed_eye_data,
):
    """
    Simulates the generation for the world video and saves a certain time range as a video.
    It simulates a whole g_pool such that all plugins run as normal.
    """
    from glob import glob
    from time import time

    import file_methods as fm
    import player_methods as pm
    from av_writer import AV_Writer

    # we are not importing manual gaze correction. In Player corrections have already been applied.
    # in batch exporter this plugin makes little sense.
    from fixation_detector import Offline_Fixation_Detector

    # Plug-ins
    from plugin import Plugin_List, import_runtime_plugins
    from video_capture import EndofVideoError, File_Source
    from vis_circle import Vis_Circle
    from vis_cross import Vis_Cross
    from vis_eye_video_overlay import Vis_Eye_Video_Overlay
    from vis_light_points import Vis_Light_Points
    from vis_polyline import Vis_Polyline
    from vis_scan_path import Vis_Scan_Path
    from vis_watermark import Vis_Watermark

    PID = str(os.getpid())
    logger = logging.getLogger(__name__ + " with pid: " + PID)
    start_status = "Starting video export with pid: {}".format(PID)
    logger.info(start_status)
    yield start_status, 0

    try:
        vis_plugins = sorted(
            [
                Vis_Circle,
                Vis_Cross,
                Vis_Polyline,
                Vis_Light_Points,
                Vis_Watermark,
                Vis_Scan_Path,
                Vis_Eye_Video_Overlay,
            ],
            key=lambda x: x.__name__,
        )
        analysis_plugins = [Offline_Fixation_Detector]
        user_plugins = sorted(
            import_runtime_plugins(os.path.join(user_dir, "plugins")),
            key=lambda x: x.__name__,
        )

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        meta_info = pm.load_meta_info(rec_dir)

        g_pool = GlobalContainer()
        g_pool.app = "exporter"
        g_pool.min_data_confidence = min_data_confidence

        valid_ext = (".mp4", ".mkv", ".avi", ".h264", ".mjpeg", ".fake")
        try:
            video_path = next(f for f in glob(os.path.join(rec_dir, "world.*"))
                              if os.path.splitext(f)[1] in valid_ext)
        except StopIteration:
            raise FileNotFoundError("No Video world found")
        cap = File_Source(g_pool,
                          source_path=video_path,
                          fill_gaps=True,
                          timing=None)

        timestamps = cap.timestamps

        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, end frame) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.0
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the launching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = (
            "Will export from frame {} to frame {}. This means I will export {} frames."
        )
        logger.debug(
            exp_info.format(start_frame, start_frame + frames_to_export,
                            frames_to_export))

        # setup of writer
        writer = AV_Writer(out_file_path,
                           fps=cap.frame_rate,
                           audio_dir=rec_dir,
                           use_timestamps=True)

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []

        for initializers in pre_computed_eye_data.values():
            initializers["data"] = [
                fm.Serialized_Dict(msgpack_bytes=serialized)
                for serialized in initializers["data"]
            ]

        g_pool.pupil_positions = pm.Bisector(**pre_computed_eye_data["pupil"])
        g_pool.gaze_positions = pm.Bisector(**pre_computed_eye_data["gaze"])
        g_pool.fixations = pm.Affiliator(**pre_computed_eye_data["fixations"])

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoError:
                break

            events = {"frame": frame}
            # new positions and events
            frame_window = pm.enclosing_window(g_pool.timestamps, frame.index)
            events["gaze"] = g_pool.gaze_positions.by_ts_window(frame_window)
            events["pupil"] = g_pool.pupil_positions.by_ts_window(frame_window)

            # publish delayed notifications when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n["_notify_time_"] < time():
                    del n["_notify_time_"]
                    del g_pool.delayed_notifications[n["subject"]]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifications:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield "Exporting with pid {}".format(PID), current_frame

        writer.close(timestamp_export_format="all")

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        logger.info(
            result.format(current_frame, out_file_path, duration,
                          effective_fps))
        yield "Export done. This took {:.0f} seconds.".format(
            duration), current_frame

    except GeneratorExit:
        logger.warning("Video export with pid {} was canceled.".format(
            os.getpid()))
Beispiel #25
0
 def __init__(self, msgpack_bytes: utils.MsgPack_Serialized_Segment):
     serialized_dict = fm.Serialized_Dict(msgpack_bytes=msgpack_bytes)
     super().__init__(serialized_dict=serialized_dict)
Beispiel #26
0
    def recent_events(self, events):
        if self.bg_task:
            for progress, fixation_result in self.bg_task.fetch():
                self.status = progress
                if fixation_result:
                    serialized, start_ts, stop_ts = fixation_result
                    self.fixation_data.append(fm.Serialized_Dict(msgpack_bytes=serialized))
                    self.fixation_start_ts.append(start_ts)
                    self.fixation_stop_ts.append(stop_ts)

                if self.fixation_data:
                    current_ts = self.fixation_stop_ts[-1]
                    progress = ((current_ts - self.g_pool.timestamps[0]) /
                                (self.g_pool.timestamps[-1] - self.g_pool.timestamps[0]))
                    self.menu_icon.indicator_stop = progress
            if self.bg_task.completed:
                self.status = "{} fixations detected".format(len(self.fixation_data))
                self.correlate_and_publish()
                self.bg_task = None
                self.menu_icon.indicator_stop = 0.

        frame = events.get('frame')
        if not frame:
            return

        self.last_frame_idx = frame.index
        frame_window = pm.enclosing_window(self.g_pool.timestamps, frame.index)
        fixations = self.g_pool.fixations.by_ts_window(frame_window)
        events['fixations'] = fixations
        if self.show_fixations:
            for f in fixations:
                x = int(f['norm_pos'][0] * frame.width)
                y = int((1. - f['norm_pos'][1]) * frame.height)
                pm.transparent_circle(frame.img, (x, y), radius=25., color=(0., 1., 1., 1.), thickness=3)
                cv2.putText(frame.img, '{}'.format(f['id']), (x + 30, y),
                            cv2.FONT_HERSHEY_DUPLEX, 0.8, (255, 150, 100))

        if self.current_fixation_details and self.prev_index != frame.index:
            info = ''
            for f in fixations:
                info += 'Current fixation, {} of {}\n'.format(f['id'], len(self.g_pool.fixations))
                info += '    Confidence: {:.2f}\n'.format(f['confidence'])
                info += '    Duration: {:.2f} milliseconds\n'.format(f['duration'])
                info += '    Dispersion: {:.3f} degrees\n'.format(f['dispersion'])
                info += '    Frame range: {}-{}\n'.format(f['start_frame_index'] + 1, f['end_frame_index'] + 1)
                info += '    2d gaze pos: x={:.3f}, y={:.3f}\n'.format(*f['norm_pos'])
                if 'gaze_point_3d' in f:
                    info += '    3d gaze pos: x={:.3f}, y={:.3f}, z={:.3f}\n'.format(*f['gaze_point_3d'])
                else:
                    info += '    3d gaze pos: N/A\n'
                if f['id'] > 1:
                    prev_f = self.g_pool.fixations[f['id'] - 2]
                    time_lapsed = f['timestamp'] - prev_f['timestamp'] + prev_f['duration'] / 1000
                    info += '    Time since prev. fixation: {:.2f} seconds\n'.format(time_lapsed)
                else:
                    info += '    Time since prev. fixation: N/A\n'

                if f['id'] < len(self.g_pool.fixations):
                    next_f = self.g_pool.fixations[f['id']]
                    time_lapsed = next_f['timestamp'] - f['timestamp'] + f['duration'] / 1000
                    info += '    Time to next fixation: {:.2f} seconds\n'.format(time_lapsed)
                else:
                    info += '    Time to next fixation: N/A\n'

            self.current_fixation_details.text = info
            self.prev_index = frame.index
Beispiel #27
0
def detect_fixations(capture, gaze_data, max_dispersion, min_duration,
                     max_duration, min_data_confidence):
    yield "Detecting fixations...", ()
    gaze_data = (fm.Serialized_Dict(msgpack_bytes=serialized)
                 for serialized in gaze_data)
    gaze_data = [
        datum for datum in gaze_data
        if datum["confidence"] > min_data_confidence
    ]
    if not gaze_data:
        logger.warning("No data available to find fixations")
        return "Fixation detection failed", ()

    method = (FixationDetectionMethod.GAZE_3D
              if can_use_3d_gaze_mapping(gaze_data) else
              FixationDetectionMethod.GAZE_2D)
    logger.info(f"Starting fixation detection using {method.value} data...")
    fixation_result = Fixation_Result_Factory()

    working_queue = deque()
    remaining_gaze = deque(gaze_data)

    while remaining_gaze:
        # check if working_queue contains enough data
        if (len(working_queue) < 2
                or (working_queue[-1]["timestamp"] -
                    working_queue[0]["timestamp"]) < min_duration):
            datum = remaining_gaze.popleft()
            working_queue.append(datum)
            continue

        # min duration reached, check for fixation
        dispersion = gaze_dispersion(capture, working_queue, method)
        if dispersion > max_dispersion:
            # not a fixation, move forward
            working_queue.popleft()
            continue

        left_idx = len(working_queue)

        # minimal fixation found. collect maximal data
        # to perform binary search for fixation end
        while remaining_gaze:
            datum = remaining_gaze[0]
            if datum["timestamp"] > working_queue[0][
                    "timestamp"] + max_duration:
                break  # maximum data found
            working_queue.append(remaining_gaze.popleft())

        # check for fixation with maximum duration
        dispersion = gaze_dispersion(capture, working_queue, method)
        if dispersion <= max_dispersion:
            fixation = fixation_result.from_data(dispersion, method,
                                                 working_queue,
                                                 capture.timestamps)
            yield "Detecting fixations...", fixation
            working_queue.clear()  # discard old Q
            continue

        slicable = list(working_queue)  # deque does not support slicing
        right_idx = len(working_queue)

        # binary search
        while left_idx < right_idx - 1:
            middle_idx = (left_idx + right_idx) // 2
            dispersion = gaze_dispersion(
                capture,
                slicable[:middle_idx + 1],
                method,
            )
            if dispersion <= max_dispersion:
                left_idx = middle_idx
            else:
                right_idx = middle_idx

        # left_idx-1 is last valid base datum
        final_base_data = slicable[:left_idx]
        to_be_placed_back = slicable[left_idx:]
        dispersion_result = gaze_dispersion(capture, final_base_data, method)

        fixation = fixation_result.from_data(dispersion_result, method,
                                             final_base_data,
                                             capture.timestamps)
        yield "Detecting fixations...", fixation
        working_queue.clear()  # clear queue
        remaining_gaze.extendleft(reversed(to_be_placed_back))

    yield "Fixation detection complete", ()