Example #1
0
    def export_annotations(self, export_range, export_dir):

        if not self.annotations:
            logger.warning(
                'No annotations in this recording nothing to export')
            return

        export_window = pm.exact_window(self.g_pool.timestamps, export_range)
        annotation_section = self.annotations.init_dict_for_window(
            export_window)
        annotation_idc = pm.find_closest(self.g_pool.timestamps,
                                         annotation_section['data_ts'])
        csv_keys = self.parse_csv_keys(annotation_section['data'])

        with open(os.path.join(export_dir, 'annotations.csv'),
                  'w',
                  encoding='utf-8',
                  newline='') as csvfile:
            csv_writer = csv.writer(csvfile)
            csv_writer.writerow(csv_keys)
            for annotation, idx in zip(annotation_section['data'],
                                       annotation_idc):
                csv_row = [idx]
                csv_row.extend((annotation.get(k, '') for k in csv_keys[1:]))
                csv_writer.writerow(csv_row)
            logger.info("Created 'annotations.csv' file.")
Example #2
0
    def csv_export_write(
        self,
        positions_bisector,
        timestamps,
        export_window,
        export_dir,
        min_confidence_threshold=0.0,
    ):
        export_file = type(self).csv_export_filename()
        export_path = os.path.join(export_dir, export_file)

        export_section = positions_bisector.init_dict_for_window(export_window)
        export_world_idc = pm.find_closest(timestamps,
                                           export_section["data_ts"])

        with open(export_path, "w", encoding="utf-8", newline="") as csvfile:
            csv_header = type(self).csv_export_labels()
            dict_writer = csv.DictWriter(csvfile, fieldnames=csv_header)
            dict_writer.writeheader()

            for g, idx in zip(export_section["data"], export_world_idc):
                if g["confidence"] < min_confidence_threshold:
                    continue
                dict_row = type(self).dict_export(raw_value=g, world_index=idx)
                dict_writer.writerow(dict_row)

        logger.info(f"Created '{export_file}' file.")
def _csv_exported_gaze_data(
    gaze_positions, destination_folder, export_range, timestamps, capture
):

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(timestamps, (export_start, export_stop - 1))
    gaze_section = gaze_positions.init_dict_for_window(export_window)

    # find closest world idx for each gaze datum
    gaze_world_idc = pm.find_closest(timestamps, gaze_section["data_ts"])

    csv_header = (
        "GazeTimeStamp",
        "MediaTimeStamp",
        "MediaFrameIndex",
        "Gaze3dX",
        "Gaze3dY",
        "Gaze3dZ",
        "Gaze2dX",
        "Gaze2dY",
        "PupilDiaLeft",
        "PupilDiaRight",
        "Confidence",
    )

    csv_rows = []

    for gaze_pos, media_idx in zip(gaze_section["data"], gaze_world_idc):
        media_timestamp = timestamps[media_idx]
        try:
            pupil_dia = {}
            for p in gaze_pos["base_data"]:
                pupil_dia[p["id"]] = p["diameter_3d"]

            pixel_pos = denormalize(
                gaze_pos["norm_pos"], capture.frame_size, flip_y=True
            )
            undistorted3d = capture.intrinsics.unprojectPoints(pixel_pos)
            undistorted2d = capture.intrinsics.projectPoints(
                undistorted3d, use_distortion=False
            )

            data = (
                gaze_pos["timestamp"],
                media_timestamp,
                media_idx - export_range[0],
                *gaze_pos["gaze_point_3d"],  # Gaze3dX/Y/Z
                *undistorted2d.flat,  # Gaze2dX/Y
                pupil_dia.get(1, 0.0),  # PupilDiaLeft
                pupil_dia.get(0, 0.0),  # PupilDiaRight
                gaze_pos["confidence"],  # Confidence
            )
        except KeyError:
            raise _iMotionsExporterNo3DGazeDataError()

        csv_rows.append(data)

    return csv_header, csv_rows
Example #4
0
 def _pupil_getter():
     try:
         pupil_data = self.g_pool.pupil_positions_by_id[eye_id]
         closest_pupil_idx = pm.find_closest(pupil_data.data_ts,
                                             self.current_frame_ts)
         current_datum = pupil_data.data[closest_pupil_idx]
         return current_datum
     except (IndexError, ValueError):
         return None
def _convert_video_file(
    input_file,
    output_file,
    export_range,
    world_timestamps,
    process_frame,
    timestamp_export_format,
):
    yield "Export video", 0.0
    input_source = File_Source(SimpleNamespace(), input_file, fill_gaps=True)
    if not input_source.initialised:
        yield "Exporting video failed", 0.0
        return

    # yield progress results two times per second
    update_rate = int(input_source.frame_rate / 2)

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(world_timestamps, (export_start, export_stop - 1))
    (export_from_index, export_to_index) = pm.find_closest(
        input_source.timestamps, export_window
    )

    #  NOTE: Start time of the export recording will be synced with world recording
    #  export! This means that if the recording to export started later than the world
    #  video, the first frame of the exported recording will not be at timestamp 0 in
    #  the recording, but later. Some video players (e.g. VLC on windows) might display
    #  the video weirdly in this case, but we rather want syncronization between the
    #  exported video!
    start_time = export_window[0]
    writer = MPEG_Writer(output_file, start_time)

    input_source.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    while True:
        try:
            input_frame = input_source.get_frame()
        except EndofVideoError:
            break
        if input_frame.index >= export_to_index:
            break

        output_img = process_frame(input_source, input_frame)
        output_frame = input_frame
        output_frame._img = output_img  # it's ._img because .img has no setter
        writer.write_video_frame(output_frame)

        if input_source.get_frame_index() >= next_update_idx:
            progress = (input_source.get_frame_index() - export_from_index) / (
                export_to_index - export_from_index
            )
            yield "Exporting video", progress * 100.0
            next_update_idx += update_rate

    writer.close(timestamp_export_format)
    input_source.cleanup()
    yield "Exporting video completed", 100.0
Example #6
0
 def _pupil_getter():
     try:
         pupil_data = self.g_pool.pupil_positions_by_id[eye_id]
         closest_pupil_idx = pm.find_closest(
             pupil_data.data_ts, self.current_frame_ts
         )
         current_datum = pupil_data.data[closest_pupil_idx]
         return current_datum
     except (IndexError, ValueError):
         return None
Example #7
0
        def _pupil_getter():
            try:
                pupil_data = self.g_pool.pupil_positions[eye_id, "2d"]
                if pupil_data:
                    closest_pupil_idx = pm.find_closest(
                        pupil_data.data_ts, self.current_frame_ts)
                    current_datum_2d = pupil_data.data[closest_pupil_idx]
                else:
                    current_datum_2d = None

                pupil_data = self.g_pool.pupil_positions[eye_id, "3d"]

                if pupil_data:
                    closest_pupil_idx = pm.find_closest(
                        pupil_data.data_ts, self.current_frame_ts)
                    current_datum_3d = pupil_data.data[closest_pupil_idx]
                else:
                    current_datum_3d = None
                return current_datum_2d, current_datum_3d
            except (IndexError, ValueError):
                return None
Example #8
0
def _find_and_load_densified_worn_data(
    timestamps_200hz, timestamps_realtime_paths: T.List[Path]
):
    if not timestamps_realtime_paths:
        return None
    # Load and densify confidence data when 200hz gaze is available, but only
    # non-200hz confidence is available
    conf_data, timestamps_realtime = _find_and_load_realtime_recorded_worn_data(
        timestamps_realtime_paths
    )
    densification_idc = pm.find_closest(timestamps_realtime, timestamps_200hz)
    return conf_data[densification_idc]
def _convert_video_file(
    input_file,
    output_file,
    export_range,
    world_timestamps,
    process_frame,
    timestamp_export_format,
):
    yield "Export video", 0.0
    input_source = File_Source(SimpleNamespace(), input_file, fill_gaps=True)
    if not input_source.initialised:
        yield "Exporting video failed", 0.0
        return

    # yield progress results two times per second
    update_rate = int(input_source.frame_rate / 2)

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(world_timestamps, (export_start, export_stop - 1))
    (export_from_index, export_to_index) = pm.find_closest(
        input_source.timestamps, export_window
    )
    writer = AV_Writer(
        output_file, fps=input_source.frame_rate, audio_dir=None, use_timestamps=True
    )
    input_source.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    while True:
        try:
            input_frame = input_source.get_frame()
        except EndofVideoError:
            break
        if input_frame.index >= export_to_index:
            break

        output_img = process_frame(input_source, input_frame)
        output_frame = input_frame
        output_frame._img = output_img  # it's ._img because .img has no setter
        writer.write_video_frame(output_frame)

        if input_source.get_frame_index() >= next_update_idx:
            progress = (input_source.get_frame_index() - export_from_index) / (
                export_to_index - export_from_index
            )
            yield "Exporting video", progress * 100.0
            next_update_idx += update_rate

    writer.close(timestamp_export_format)
    input_source.cleanup()
    yield "Exporting video completed", 100.0
def _convert_video_file(
    input_file,
    output_file,
    export_range,
    world_timestamps,
    process_frame,
    timestamp_export_format,
):
    yield "Export video", 0.0
    input_source = File_Source(EmptyGPool(), input_file, fill_gaps=True)
    if not input_source.initialised:
        yield "Exporting video failed", 0.0
        return

    # yield progress results two times per second
    update_rate = int(input_source.frame_rate / 2)

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(world_timestamps,
                                    (export_start, export_stop - 1))
    (export_from_index,
     export_to_index) = pm.find_closest(input_source.timestamps, export_window)
    writer = AV_Writer(output_file,
                       fps=input_source.frame_rate,
                       audio_dir=None,
                       use_timestamps=True)
    input_source.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    while True:
        try:
            input_frame = input_source.get_frame()
        except EndofVideoError:
            break
        if input_frame.index >= export_to_index:
            break

        output_img = process_frame(input_source, input_frame)
        output_frame = input_frame
        output_frame._img = output_img  # it's ._img because .img has no setter
        writer.write_video_frame(output_frame)

        if input_source.get_frame_index() >= next_update_idx:
            progress = (input_source.get_frame_index() - export_from_index) / (
                export_to_index - export_from_index)
            yield "Exporting video", progress * 100.0
            next_update_idx += update_rate

    writer.close(timestamp_export_format)
    input_source.cleanup()
    yield "Exporting video completed", 100.0
Example #11
0
    def create_segment(self, gaze_data, gaze_time, use_pupil, nslr_segment,
                       nslr_segment_class,
                       world_timestamps) -> t.Optional[Classified_Segment]:
        segment_id = self._get_id_postfix_increment()

        i_start, i_end = nslr_segment.i
        segment_data = list(gaze_data[i_start:i_end])
        segment_time = list(gaze_time[i_start:i_end])

        if len(segment_data) == 0:
            return None

        segment_class = Segment_Class.from_nslr_class(nslr_segment_class)
        topic = utils.EYE_MOVEMENT_TOPIC_PREFIX + segment_class.value

        start_frame_timestamp, end_frame_timestamp = (
            segment_time[0],
            segment_time[-1],
        )  # [t_0, t_1]

        if len(world_timestamps) > 1:
            time_range = [start_frame_timestamp, end_frame_timestamp]
            start_frame_index, end_frame_index = pm.find_closest(
                world_timestamps, time_range)
            start_frame_index, end_frame_index = int(start_frame_index), int(
                end_frame_index)
        else:
            start_frame_index, end_frame_index = None, None

        segment = Classified_Segment.from_attrs(
            id=segment_id,
            topic=topic,
            use_pupil=use_pupil,
            segment_data=segment_data,
            segment_time=segment_time,
            segment_class=segment_class,
            start_frame_index=start_frame_index,
            end_frame_index=end_frame_index,
            start_frame_timestamp=start_frame_timestamp,
            end_frame_timestamp=end_frame_timestamp,
        )

        try:
            segment.validate()
        except AssertionError:
            return None

        return segment
Example #12
0
    def cache_pupil_timeline_data(self, key):
        world_start_stop_ts = [
            self.g_pool.timestamps[0], self.g_pool.timestamps[-1]
        ]
        if not self.g_pool.pupil_positions:
            self.cache[key] = {
                "left": [],
                "right": [],
                "xlim": world_start_stop_ts,
                "ylim": [0, 1],
            }
        else:
            ts_data_pairs_right_left = [], []
            for eye_id in (0, 1):
                pupil_positions = self.g_pool.pupil_positions_by_id[eye_id]
                if pupil_positions:
                    t0, t1 = (
                        pupil_positions.timestamps[0],
                        pupil_positions.timestamps[-1],
                    )
                    timestamps_target = np.linspace(t0,
                                                    t1,
                                                    NUMBER_SAMPLES_TIMELINE,
                                                    dtype=np.float32)

                    data_indeces = pm.find_closest(pupil_positions.timestamps,
                                                   timestamps_target)
                    data_indeces = np.unique(data_indeces)
                    for idx in data_indeces:
                        ts_data_pair = (
                            pupil_positions.timestamps[idx],
                            pupil_positions[idx][key],
                        )
                        ts_data_pairs_right_left[eye_id].append(ts_data_pair)

            # max_val must not be 0, else gl will crash
            all_pupil_data_chained = chain.from_iterable(
                ts_data_pairs_right_left)
            max_val = max((pd[1] for pd in all_pupil_data_chained)) or 1

            self.cache[key] = {
                "right": ts_data_pairs_right_left[0],
                "left": ts_data_pairs_right_left[1],
                "xlim": world_start_stop_ts,
                "ylim": [0, max_val],
            }
Example #13
0
    def export_annotations(self, export_window, export_dir):
        annotation_section = self.annotations.init_dict_for_window(export_window)
        annotation_idc = pm.find_closest(
            self.g_pool.timestamps, annotation_section["data_ts"]
        )
        csv_keys = self.parse_csv_keys(annotation_section["data"])

        with open(
            os.path.join(export_dir, "annotations.csv"),
            "w",
            encoding="utf-8",
            newline="",
        ) as csv_file:
            csv_writer = csv.writer(csv_file)
            csv_writer.writerow(csv_keys)
            for annotation, idx in zip(annotation_section["data"], annotation_idc):
                csv_row = [idx]
                csv_row.extend((annotation.get(k, "") for k in csv_keys[1:]))
                csv_writer.writerow(csv_row)
            logger.info("Created 'annotations.csv' file.")
Example #14
0
    def export_annotations(self, export_window, export_dir):
        annotation_section = self.annotations.init_dict_for_window(
            export_window)
        annotation_idc = pm.find_closest(self.g_pool.timestamps,
                                         annotation_section["data_ts"])
        csv_keys = self.parse_csv_keys(annotation_section["data"])

        with open(
                os.path.join(export_dir, "annotations.csv"),
                "w",
                encoding="utf-8",
                newline="",
        ) as csv_file:
            csv_writer = csv.writer(csv_file)
            csv_writer.writerow(csv_keys)
            for annotation, idx in zip(annotation_section["data"],
                                       annotation_idc):
                csv_row = [idx]
                csv_row.extend((annotation.get(k, "") for k in csv_keys[1:]))
                csv_writer.writerow(csv_row)
            logger.info("Created 'annotations.csv' file.")
Example #15
0
    def csv_export_write(self, imu_bisector, timestamps, export_window, export_dir):
        export_file = type(self).csv_export_filename()
        export_path = os.path.join(export_dir, export_file)

        export_section = imu_bisector.init_dict_for_window(export_window)
        export_world_idc = pm.find_closest(timestamps, export_section["data_ts"])

        with open(export_path, "w", encoding="utf-8", newline="") as csvfile:
            csv_header = type(self).csv_export_labels()
            dict_writer = csv.DictWriter(csvfile, fieldnames=csv_header)
            dict_writer.writeheader()

            for d_raw, wts, idx in zip(
                export_section["data"], export_section["data_ts"], export_world_idc
            ):
                dict_row = type(self).dict_export(
                    raw_value=d_raw, world_ts=wts, world_index=idx
                )
                dict_writer.writerow(dict_row)

        logger.info(f"Created '{export_file}' file.")
Example #16
0
    def export_data(self, export_window, export_dir):
        if self.should_export_pupil_positions:
            with open(
                os.path.join(export_dir, "pupil_positions.csv"),
                "w",
                encoding="utf-8",
                newline="",
            ) as csvfile:
                csv_writer = csv.writer(csvfile, delimiter=",")

                csv_writer.writerow(
                    (
                        "pupil_timestamp",
                        "world_index",
                        "eye_id",
                        "confidence",
                        "norm_pos_x",
                        "norm_pos_y",
                        "diameter",
                        "method",
                        "ellipse_center_x",
                        "ellipse_center_y",
                        "ellipse_axis_a",
                        "ellipse_axis_b",
                        "ellipse_angle",
                        "diameter_3d",
                        "model_confidence",
                        "model_id",
                        "sphere_center_x",
                        "sphere_center_y",
                        "sphere_center_z",
                        "sphere_radius",
                        "circle_3d_center_x",
                        "circle_3d_center_y",
                        "circle_3d_center_z",
                        "circle_3d_normal_x",
                        "circle_3d_normal_y",
                        "circle_3d_normal_z",
                        "circle_3d_radius",
                        "theta",
                        "phi",
                        "projected_sphere_center_x",
                        "projected_sphere_center_y",
                        "projected_sphere_axis_a",
                        "projected_sphere_axis_b",
                        "projected_sphere_angle",
                    )
                )

                pupil_section = self.g_pool.pupil_positions.init_dict_for_window(
                    export_window
                )
                pupil_world_idc = pm.find_closest(
                    self.g_pool.timestamps, pupil_section["data_ts"]
                )
                for p, idx in zip(pupil_section["data"], pupil_world_idc):
                    data_2d = [
                        "{}".format(
                            p["timestamp"]
                        ),  # use str to be consitant with csv lib.
                        idx,
                        p["id"],
                        p["confidence"],
                        p["norm_pos"][0],
                        p["norm_pos"][1],
                        p["diameter"],
                        p["method"],
                    ]
                    try:
                        ellipse_data = [
                            p["ellipse"]["center"][0],
                            p["ellipse"]["center"][1],
                            p["ellipse"]["axes"][0],
                            p["ellipse"]["axes"][1],
                            p["ellipse"]["angle"],
                        ]
                    except KeyError:
                        ellipse_data = [None] * 5
                    try:
                        data_3d = [
                            p["diameter_3d"],
                            p["model_confidence"],
                            p["model_id"],
                            p["sphere"]["center"][0],
                            p["sphere"]["center"][1],
                            p["sphere"]["center"][2],
                            p["sphere"]["radius"],
                            p["circle_3d"]["center"][0],
                            p["circle_3d"]["center"][1],
                            p["circle_3d"]["center"][2],
                            p["circle_3d"]["normal"][0],
                            p["circle_3d"]["normal"][1],
                            p["circle_3d"]["normal"][2],
                            p["circle_3d"]["radius"],
                            p["theta"],
                            p["phi"],
                            p["projected_sphere"]["center"][0],
                            p["projected_sphere"]["center"][1],
                            p["projected_sphere"]["axes"][0],
                            p["projected_sphere"]["axes"][1],
                            p["projected_sphere"]["angle"],
                        ]
                    except KeyError:
                        data_3d = [None] * 21
                    row = data_2d + ellipse_data + data_3d
                    csv_writer.writerow(row)
                logger.info("Created 'pupil_positions.csv' file.")

        if self.should_export_gaze_positions:
            with open(
                os.path.join(export_dir, "gaze_positions.csv"),
                "w",
                encoding="utf-8",
                newline="",
            ) as csvfile:
                csv_writer = csv.writer(csvfile, delimiter=",")
                csv_writer.writerow(
                    (
                        "gaze_timestamp",
                        "world_index",
                        "confidence",
                        "norm_pos_x",
                        "norm_pos_y",
                        "base_data",
                        "gaze_point_3d_x",
                        "gaze_point_3d_y",
                        "gaze_point_3d_z",
                        "eye_center0_3d_x",
                        "eye_center0_3d_y",
                        "eye_center0_3d_z",
                        "gaze_normal0_x",
                        "gaze_normal0_y",
                        "gaze_normal0_z",
                        "eye_center1_3d_x",
                        "eye_center1_3d_y",
                        "eye_center1_3d_z",
                        "gaze_normal1_x",
                        "gaze_normal1_y",
                        "gaze_normal1_z",
                    )
                )

                gaze_section = self.g_pool.gaze_positions.init_dict_for_window(
                    export_window
                )
                gaze_world_idc = pm.find_closest(
                    self.g_pool.timestamps, gaze_section["data_ts"]
                )

                for g, idx in zip(gaze_section["data"], gaze_world_idc):
                    data = [
                        "{}".format(g["timestamp"]),
                        idx,
                        g["confidence"],
                        g["norm_pos"][0],
                        g["norm_pos"][1],
                        " ".join(
                            [
                                "{}-{}".format(b["timestamp"], b["id"])
                                for b in g["base_data"]
                            ]
                        ),
                    ]  # use str on timestamp to be consitant with csv lib.

                    # add 3d data if avaiblable
                    if g.get("gaze_point_3d", None) is not None:
                        data_3d = [
                            g["gaze_point_3d"][0],
                            g["gaze_point_3d"][1],
                            g["gaze_point_3d"][2],
                        ]

                        # binocular
                        if g.get("eye_centers_3d", None) is not None:
                            data_3d += g["eye_centers_3d"].get(0, [None, None, None])
                            data_3d += g["gaze_normals_3d"].get(0, [None, None, None])
                            data_3d += g["eye_centers_3d"].get(1, [None, None, None])
                            data_3d += g["gaze_normals_3d"].get(1, [None, None, None])
                        # monocular
                        elif g.get("eye_center_3d", None) is not None:
                            data_3d += g["eye_center_3d"]
                            data_3d += g["gaze_normal_3d"]
                            data_3d += [None] * 6
                    else:
                        data_3d = [None] * 15
                    data += data_3d
                    csv_writer.writerow(data)
                logger.info("Created 'gaze_positions.csv' file.")
        if self.should_export_field_info:
            with open(
                os.path.join(export_dir, "pupil_gaze_positions_info.txt"),
                "w",
                encoding="utf-8",
                newline="",
            ) as info_file:
                info_file.write(self.__doc__)
Example #17
0
 def closest_frame_to_ts(self, ts):
     closest_idx = pm.find_closest(self.source.timestamps, ts)
     return self.frame_for_idx(closest_idx)
Example #18
0
def _write_gaze_data(gaze_positions, destination_folder, export_range,
                     timestamps, capture):
    global user_warned_3d_only
    with open(os.path.join(destination_folder, "gaze.tlv"),
              "w",
              encoding="utf-8",
              newline="") as csv_file:
        csv_writer = csv.writer(csv_file, delimiter="\t")

        csv_writer.writerow((
            "GazeTimeStamp",
            "MediaTimeStamp",
            "MediaFrameIndex",
            "Gaze3dX",
            "Gaze3dY",
            "Gaze3dZ",
            "Gaze2dX",
            "Gaze2dY",
            "PupilDiaLeft",
            "PupilDiaRight",
            "Confidence",
        ))

        export_start, export_stop = export_range  # export_stop is exclusive
        export_window = pm.exact_window(timestamps,
                                        (export_start, export_stop - 1))
        gaze_section = gaze_positions.init_dict_for_window(export_window)

        # find closest world idx for each gaze datum
        gaze_world_idc = pm.find_closest(timestamps, gaze_section["data_ts"])

        for gaze_pos, media_idx in zip(gaze_section["data"], gaze_world_idc):
            media_timestamp = timestamps[media_idx]
            try:
                pupil_dia = {}
                for p in gaze_pos["base_data"]:
                    pupil_dia[p["id"]] = p["diameter_3d"]

                pixel_pos = denormalize(gaze_pos["norm_pos"],
                                        capture.frame_size,
                                        flip_y=True)
                undistorted3d = capture.intrinsics.unprojectPoints(pixel_pos)
                undistorted2d = capture.intrinsics.projectPoints(
                    undistorted3d, use_distortion=False)

                data = (
                    gaze_pos["timestamp"],
                    media_timestamp,
                    media_idx - export_range[0],
                    *gaze_pos["gaze_point_3d"],  # Gaze3dX/Y/Z
                    *undistorted2d.flat,  # Gaze2dX/Y
                    pupil_dia.get(1, 0.0),  # PupilDiaLeft
                    pupil_dia.get(0, 0.0),  # PupilDiaRight
                    gaze_pos["confidence"],  # Confidence
                )
            except KeyError:
                if not user_warned_3d_only:
                    logger.error(
                        "Currently, the iMotions export only supports 3d gaze data"
                    )
                    user_warned_3d_only = True
                continue
            csv_writer.writerow(data)
    def export_data(self, export_range, export_dir):
        export_window = pm.exact_window(self.g_pool.timestamps, export_range)
        with open(os.path.join(export_dir, 'pupil_positions.csv'),
                  'w',
                  encoding='utf-8',
                  newline='') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',')

            csv_writer.writerow(
                ('timestamp', 'index', 'id', 'confidence', 'norm_pos_x',
                 'norm_pos_y', 'diameter', 'method', 'ellipse_center_x',
                 'ellipse_center_y', 'ellipse_axis_a', 'ellipse_axis_b',
                 'ellipse_angle', 'diameter_3d', 'model_confidence',
                 'model_id', 'sphere_center_x', 'sphere_center_y',
                 'sphere_center_z', 'sphere_radius', 'circle_3d_center_x',
                 'circle_3d_center_y', 'circle_3d_center_z',
                 'circle_3d_normal_x', 'circle_3d_normal_y',
                 'circle_3d_normal_z', 'circle_3d_radius', 'theta', 'phi',
                 'projected_sphere_center_x', 'projected_sphere_center_y',
                 'projected_sphere_axis_a', 'projected_sphere_axis_b',
                 'projected_sphere_angle'))

            pupil_section = self.g_pool.pupil_positions.init_dict_for_window(
                export_window)
            pupil_world_idc = pm.find_closest(self.g_pool.timestamps,
                                              pupil_section['data_ts'])
            for p, idx in zip(pupil_section['data'], pupil_world_idc):
                data_2d = [
                    '{}'.format(p['timestamp']
                                ),  # use str to be consitant with csv lib.
                    idx,
                    p['id'],
                    p['confidence'],
                    p['norm_pos'][0],
                    p['norm_pos'][1],
                    p['diameter'],
                    p['method']
                ]
                try:
                    ellipse_data = [
                        p['ellipse']['center'][0], p['ellipse']['center'][1],
                        p['ellipse']['axes'][0], p['ellipse']['axes'][1],
                        p['ellipse']['angle']
                    ]
                except KeyError:
                    ellipse_data = [None] * 5
                try:
                    data_3d = [
                        p['diameter_3d'], p['model_confidence'], p['model_id'],
                        p['sphere']['center'][0], p['sphere']['center'][1],
                        p['sphere']['center'][2], p['sphere']['radius'],
                        p['circle_3d']['center'][0],
                        p['circle_3d']['center'][1],
                        p['circle_3d']['center'][2],
                        p['circle_3d']['normal'][0],
                        p['circle_3d']['normal'][1],
                        p['circle_3d']['normal'][2], p['circle_3d']['radius'],
                        p['theta'], p['phi'],
                        p['projected_sphere']['center'][0],
                        p['projected_sphere']['center'][1],
                        p['projected_sphere']['axes'][0],
                        p['projected_sphere']['axes'][1],
                        p['projected_sphere']['angle']
                    ]
                except KeyError:
                    data_3d = [None] * 21
                row = data_2d + ellipse_data + data_3d
                csv_writer.writerow(row)
            logger.info("Created 'pupil_positions.csv' file.")

        with open(os.path.join(export_dir, 'gaze_positions.csv'),
                  'w',
                  encoding='utf-8',
                  newline='') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',')
            csv_writer.writerow(
                ("timestamp", "index", "confidence", "norm_pos_x",
                 "norm_pos_y", "base_data", "gaze_point_3d_x",
                 "gaze_point_3d_y", "gaze_point_3d_z", "eye_center0_3d_x",
                 "eye_center0_3d_y", "eye_center0_3d_z", "gaze_normal0_x",
                 "gaze_normal0_y", "gaze_normal0_z", "eye_center1_3d_x",
                 "eye_center1_3d_y", "eye_center1_3d_z", "gaze_normal1_x",
                 "gaze_normal1_y", "gaze_normal1_z"))

            gaze_section = self.g_pool.gaze_positions.init_dict_for_window(
                export_window)
            gaze_world_idc = pm.find_closest(self.g_pool.timestamps,
                                             gaze_section['data_ts'])

            for g, idx in zip(gaze_section['data'], gaze_world_idc):
                data = [
                    '{}'.format(g["timestamp"]), idx, g["confidence"],
                    g["norm_pos"][0], g["norm_pos"][1], " ".join([
                        '{}-{}'.format(b['timestamp'], b['id'])
                        for b in g['base_data']
                    ])
                ]  # use str on timestamp to be consitant with csv lib.

                # add 3d data if avaiblable
                if g.get('gaze_point_3d', None) is not None:
                    data_3d = [
                        g['gaze_point_3d'][0], g['gaze_point_3d'][1],
                        g['gaze_point_3d'][2]
                    ]

                    # binocular
                    if g.get('eye_centers_3d', None) is not None:
                        data_3d += g['eye_centers_3d'].get(
                            0, [None, None, None])
                        data_3d += g['gaze_normals_3d'].get(
                            0, [None, None, None])
                        data_3d += g['eye_centers_3d'].get(
                            1, [None, None, None])
                        data_3d += g['gaze_normals_3d'].get(
                            1, [None, None, None])
                    # monocular
                    elif g.get('eye_center_3d', None) is not None:
                        data_3d += g['eye_center_3d']
                        data_3d += g['gaze_normal_3d']
                        data_3d += [None] * 6
                else:
                    data_3d = [None] * 15
                data += data_3d
                csv_writer.writerow(data)
            logger.info("Created 'gaze_positions.csv' file.")

        with open(os.path.join(export_dir, 'pupil_gaze_positions_info.txt'),
                  'w',
                  encoding='utf-8',
                  newline='') as info_file:
            info_file.write(self.__doc__)
Example #20
0
    def cache_pupil_timeline_data(
        self,
        key: str,
        detector_tag: str,
        ylim=None,
        fallback_detector_tag: T.Optional[str] = None,
    ):
        world_start_stop_ts = [self.g_pool.timestamps[0], self.g_pool.timestamps[-1]]
        if not self.g_pool.pupil_positions:
            self.cache[key] = {
                "left": [],
                "right": [],
                "xlim": world_start_stop_ts,
                "ylim": [0, 1],
            }
        else:
            ts_data_pairs_right_left = [], []
            for eye_id in (0, 1):
                pupil_positions = self.g_pool.pupil_positions[eye_id, detector_tag]
                if not pupil_positions and fallback_detector_tag is not None:
                    pupil_positions = self.g_pool.pupil_positions[
                        eye_id, fallback_detector_tag
                    ]
                if pupil_positions:
                    t0, t1 = (
                        pupil_positions.timestamps[0],
                        pupil_positions.timestamps[-1],
                    )
                    timestamps_target = np.linspace(
                        t0, t1, NUMBER_SAMPLES_TIMELINE, dtype=np.float32
                    )

                    data_indeces = pm.find_closest(
                        pupil_positions.timestamps, timestamps_target
                    )
                    data_indeces = np.unique(data_indeces)
                    for idx in data_indeces:
                        ts_data_pair = (
                            pupil_positions.timestamps[idx],
                            pupil_positions[idx][key],
                        )
                        ts_data_pairs_right_left[eye_id].append(ts_data_pair)

            if ylim is None:
                # max_val must not be 0, else gl will crash
                all_pupil_data_chained = chain.from_iterable(ts_data_pairs_right_left)
                try:
                    # Outlier removal based on:
                    # https://en.wikipedia.org/wiki/Outlier#Tukey's_fences
                    min_val, max_val = np.quantile(
                        [pd[1] for pd in all_pupil_data_chained], [0.25, 0.75]
                    )
                    iqr = max_val - min_val
                    min_val -= 1.5 * iqr
                    max_val += 1.5 * iqr
                    ylim = min_val, max_val
                except IndexError:  # no pupil data available
                    ylim = 0.0, 1.0

            self.cache[key] = {
                "right": ts_data_pairs_right_left[0],
                "left": ts_data_pairs_right_left[1],
                "xlim": world_start_stop_ts,
                "ylim": ylim,
            }
Example #21
0
def _write_gaze_data(
    gaze_positions, destination_folder, export_range, timestamps, capture
):
    global user_warned_3d_only
    with open(
        os.path.join(destination_folder, "gaze.tlv"), "w", encoding="utf-8", newline=""
    ) as csv_file:
        csv_writer = csv.writer(csv_file, delimiter="\t")

        csv_writer.writerow(
            (
                "GazeTimeStamp",
                "MediaTimeStamp",
                "MediaFrameIndex",
                "Gaze3dX",
                "Gaze3dY",
                "Gaze3dZ",
                "Gaze2dX",
                "Gaze2dY",
                "PupilDiaLeft",
                "PupilDiaRight",
                "Confidence",
            )
        )

        export_start, export_stop = export_range  # export_stop is exclusive
        export_window = pm.exact_window(timestamps, (export_start, export_stop - 1))
        gaze_section = gaze_positions.init_dict_for_window(export_window)

        # find closest world idx for each gaze datum
        gaze_world_idc = pm.find_closest(timestamps, gaze_section["data_ts"])

        for gaze_pos, media_idx in zip(gaze_section["data"], gaze_world_idc):
            media_timestamp = timestamps[media_idx]
            try:
                pupil_dia = {}
                for p in gaze_pos["base_data"]:
                    pupil_dia[p["id"]] = p["diameter_3d"]

                pixel_pos = denormalize(
                    gaze_pos["norm_pos"], capture.frame_size, flip_y=True
                )
                undistorted3d = capture.intrinsics.unprojectPoints(pixel_pos)
                undistorted2d = capture.intrinsics.projectPoints(
                    undistorted3d, use_distortion=False
                )

                data = (
                    gaze_pos["timestamp"],
                    media_timestamp,
                    media_idx - export_range[0],
                    *gaze_pos["gaze_point_3d"],  # Gaze3dX/Y/Z
                    *undistorted2d.flat,  # Gaze2dX/Y
                    pupil_dia.get(1, 0.0),  # PupilDiaLeft
                    pupil_dia.get(0, 0.0),  # PupilDiaRight
                    gaze_pos["confidence"],  # Confidence
                )
            except KeyError:
                if not user_warned_3d_only:
                    logger.error(
                        "Currently, the iMotions export only supports 3d gaze data"
                    )
                    user_warned_3d_only = True
                continue
            csv_writer.writerow(data)
Example #22
0
def pi_gaze_items(root_dir):
    def find_timestamps_200hz_path(timestamps_path):
        path = timestamps_path.with_name("gaze_200hz_timestamps.npy")
        if path.is_file():
            return path
        else:
            return None

    def find_raw_path(timestamps_path):
        name = timestamps_path.name.replace("_timestamps", "")
        path = timestamps_path.with_name(name).with_suffix(".raw")
        assert path.is_file(), f"The file does not exist at path: {path}"
        return path

    def find_raw_200hz_path(timestamps_path):
        path = timestamps_path.with_name("gaze_200hz.raw")
        if path.is_file():
            return path
        else:
            return None

    def find_worn_path(timestamps_path):
        name = timestamps_path.name
        name = name.replace("gaze", "worn")
        name = name.replace("_timestamps", "")
        path = timestamps_path.with_name(name).with_suffix(".raw")
        if path.is_file():
            return path
        else:
            return None

    def find_worn_200hz_path(timestamps_path):
        path = timestamps_path.with_name("worn_200hz.raw")
        if path.is_file():
            return path
        else:
            return None

    def is_200hz(path: Path) -> bool:
        return "200hz" in path.name

    def load_timestamps_data(path):
        timestamps = np.load(str(path))
        return timestamps

    def load_raw_data(path):
        raw_data = np.fromfile(str(path), "<f4")
        raw_data_dtype = raw_data.dtype
        raw_data.shape = (-1, 2)
        return np.asarray(raw_data, dtype=raw_data_dtype)

    def load_worn_data(path):
        if not (path and path.exists()):
            return None

        confidences = np.fromfile(str(path), "<u1") / 255.0
        return np.clip(confidences, 0.0, 1.0)

    # This pattern will match any filename that:
    # - starts with "gaze ps"
    # - is followed by one or more digits
    # - ends with "_timestamps.npy"
    gaze_timestamp_paths = matched_files_by_name_pattern(
        pl.Path(root_dir), r"^gaze ps[0-9]+_timestamps.npy$")

    for timestamps_path in gaze_timestamp_paths:
        # Use 200hz data only if both gaze data and timestamps are available at 200hz
        is_200hz_data_available = (find_raw_200hz_path(timestamps_path)) and (
            find_timestamps_200hz_path(timestamps_path) is not None)

        if is_200hz_data_available:
            raw_data = load_raw_data(find_raw_200hz_path(timestamps_path))
        else:
            raw_data = load_raw_data(find_raw_path(timestamps_path))

        if is_200hz_data_available:
            timestamps = load_timestamps_data(
                find_timestamps_200hz_path(timestamps_path))
        else:
            timestamps = load_timestamps_data(timestamps_path)

        if is_200hz_data_available:
            ts_ = load_timestamps_data(timestamps_path)
            ts_200hz_ = load_timestamps_data(
                find_timestamps_200hz_path(timestamps_path))
            densification_idc = pm.find_closest(ts_, ts_200hz_)
        else:
            densification_idc = np.asarray(range(len(raw_data)))

        # Load confidence data when both 200hz gaze and 200hz confidence data is available
        if (is_200hz_data_available
                and find_worn_200hz_path(timestamps_path) is not None):
            conf_data = load_worn_data(find_worn_200hz_path(timestamps_path))
        # Load and densify confidence data when 200hz gaze is available, but only non-200hz confidence is available
        elif is_200hz_data_available and find_worn_path(
                timestamps_path) is not None:
            conf_data = load_worn_data(find_worn_path(timestamps_path))
            conf_data = conf_data[densification_idc]
        # Load confidence data when both non-200hz gaze and non-200hz confidence is available
        elif (not is_200hz_data_available
              and find_worn_path(timestamps_path) is not None):
            conf_data = load_worn_data(find_worn_path(timestamps_path))
        # Otherwise, don't load confidence data
        else:
            conf_data = None

        if len(raw_data) != len(timestamps):
            logger.warning(
                f"There is a mismatch between the number of raw data ({len(raw_data)}) "
                f"and the number of timestamps ({len(timestamps)})!")
            size = min(len(raw_data), len(timestamps))
            raw_data = raw_data[:size]
            timestamps = timestamps[:size]

        if conf_data is not None and len(conf_data) != len(timestamps):
            logger.warning(
                f"There is a mismatch between the number of confidence data ({len(conf_data)}) "
                f"and the number of timestamps ({len(timestamps)})! Not using confidence data."
            )

            conf_data = None

        if conf_data is None:
            conf_data = (1.0 for _ in range(len(raw_data)))

        yield from zip(raw_data, timestamps, conf_data)
Example #23
0
    def export_data(self, export_range, export_dir):
        export_window = pm.exact_window(self.g_pool.timestamps, export_range)
        if self.should_export_pupil_positions:
            with open(
                os.path.join(export_dir, "pupil_positions.csv"),
                "w",
                encoding="utf-8",
                newline="",
            ) as csvfile:
                csv_writer = csv.writer(csvfile, delimiter=",")

                csv_writer.writerow(
                    (
                        "timestamp",
                        "index",
                        "id",
                        "confidence",
                        "norm_pos_x",
                        "norm_pos_y",
                        "diameter",
                        "method",
                        "ellipse_center_x",
                        "ellipse_center_y",
                        "ellipse_axis_a",
                        "ellipse_axis_b",
                        "ellipse_angle",
                        "diameter_3d",
                        "model_confidence",
                        "model_id",
                        "sphere_center_x",
                        "sphere_center_y",
                        "sphere_center_z",
                        "sphere_radius",
                        "circle_3d_center_x",
                        "circle_3d_center_y",
                        "circle_3d_center_z",
                        "circle_3d_normal_x",
                        "circle_3d_normal_y",
                        "circle_3d_normal_z",
                        "circle_3d_radius",
                        "theta",
                        "phi",
                        "projected_sphere_center_x",
                        "projected_sphere_center_y",
                        "projected_sphere_axis_a",
                        "projected_sphere_axis_b",
                        "projected_sphere_angle",
                    )
                )

                pupil_section = self.g_pool.pupil_positions.init_dict_for_window(
                    export_window
                )
                pupil_world_idc = pm.find_closest(
                    self.g_pool.timestamps, pupil_section["data_ts"]
                )
                for p, idx in zip(pupil_section["data"], pupil_world_idc):
                    data_2d = [
                        "{}".format(
                            p["timestamp"]
                        ),  # use str to be consitant with csv lib.
                        idx,
                        p["id"],
                        p["confidence"],
                        p["norm_pos"][0],
                        p["norm_pos"][1],
                        p["diameter"],
                        p["method"],
                    ]
                    try:
                        ellipse_data = [
                            p["ellipse"]["center"][0],
                            p["ellipse"]["center"][1],
                            p["ellipse"]["axes"][0],
                            p["ellipse"]["axes"][1],
                            p["ellipse"]["angle"],
                        ]
                    except KeyError:
                        ellipse_data = [None] * 5
                    try:
                        data_3d = [
                            p["diameter_3d"],
                            p["model_confidence"],
                            p["model_id"],
                            p["sphere"]["center"][0],
                            p["sphere"]["center"][1],
                            p["sphere"]["center"][2],
                            p["sphere"]["radius"],
                            p["circle_3d"]["center"][0],
                            p["circle_3d"]["center"][1],
                            p["circle_3d"]["center"][2],
                            p["circle_3d"]["normal"][0],
                            p["circle_3d"]["normal"][1],
                            p["circle_3d"]["normal"][2],
                            p["circle_3d"]["radius"],
                            p["theta"],
                            p["phi"],
                            p["projected_sphere"]["center"][0],
                            p["projected_sphere"]["center"][1],
                            p["projected_sphere"]["axes"][0],
                            p["projected_sphere"]["axes"][1],
                            p["projected_sphere"]["angle"],
                        ]
                    except KeyError:
                        data_3d = [None] * 21
                    row = data_2d + ellipse_data + data_3d
                    csv_writer.writerow(row)
                logger.info("Created 'pupil_positions.csv' file.")

        if self.should_export_gaze_positions:
            with open(
                os.path.join(export_dir, "gaze_positions.csv"),
                "w",
                encoding="utf-8",
                newline="",
            ) as csvfile:
                csv_writer = csv.writer(csvfile, delimiter=",")
                csv_writer.writerow(
                    (
                        "timestamp",
                        "index",
                        "confidence",
                        "norm_pos_x",
                        "norm_pos_y",
                        "base_data",
                        "gaze_point_3d_x",
                        "gaze_point_3d_y",
                        "gaze_point_3d_z",
                        "eye_center0_3d_x",
                        "eye_center0_3d_y",
                        "eye_center0_3d_z",
                        "gaze_normal0_x",
                        "gaze_normal0_y",
                        "gaze_normal0_z",
                        "eye_center1_3d_x",
                        "eye_center1_3d_y",
                        "eye_center1_3d_z",
                        "gaze_normal1_x",
                        "gaze_normal1_y",
                        "gaze_normal1_z",
                    )
                )

                gaze_section = self.g_pool.gaze_positions.init_dict_for_window(
                    export_window
                )
                gaze_world_idc = pm.find_closest(
                    self.g_pool.timestamps, gaze_section["data_ts"]
                )

                for g, idx in zip(gaze_section["data"], gaze_world_idc):
                    data = [
                        "{}".format(g["timestamp"]),
                        idx,
                        g["confidence"],
                        g["norm_pos"][0],
                        g["norm_pos"][1],
                        " ".join(
                            [
                                "{}-{}".format(b["timestamp"], b["id"])
                                for b in g["base_data"]
                            ]
                        ),
                    ]  # use str on timestamp to be consitant with csv lib.

                    # add 3d data if avaiblable
                    if g.get("gaze_point_3d", None) is not None:
                        data_3d = [
                            g["gaze_point_3d"][0],
                            g["gaze_point_3d"][1],
                            g["gaze_point_3d"][2],
                        ]

                        # binocular
                        if g.get("eye_centers_3d", None) is not None:
                            data_3d += g["eye_centers_3d"].get(0, [None, None, None])
                            data_3d += g["gaze_normals_3d"].get(0, [None, None, None])
                            data_3d += g["eye_centers_3d"].get(1, [None, None, None])
                            data_3d += g["gaze_normals_3d"].get(1, [None, None, None])
                        # monocular
                        elif g.get("eye_center_3d", None) is not None:
                            data_3d += g["eye_center_3d"]
                            data_3d += g["gaze_normal_3d"]
                            data_3d += [None] * 6
                    else:
                        data_3d = [None] * 15
                    data += data_3d
                    csv_writer.writerow(data)
                logger.info("Created 'gaze_positions.csv' file.")
        if self.should_export_field_info:
            with open(
                os.path.join(export_dir, "pupil_gaze_positions_info.txt"),
                "w",
                encoding="utf-8",
                newline="",
            ) as info_file:
                info_file.write(self.__doc__)
Example #24
0
 def ts_idx_from_playback_time(self, playback_time):
     return pm.find_closest(self.g_pool.timestamps, playback_time)
Example #25
0
def export_processed_h264(
    world_timestamps,
    unprocessed_video_loc,
    target_video_loc,
    export_range,
    process_frame,
    export_timestamps,
):
    yield "Converting video", 0.1
    capture = File_Source(Empty(), unprocessed_video_loc)
    if not capture.initialised:
        yield "Converting scene video failed", 0.0
        return

    export_window = pm.exact_window(world_timestamps, export_range)
    (export_from_index,
     export_to_index) = pm.find_closest(capture.timestamps, export_window)

    update_rate = 10
    start_time = None
    time_base = Fraction(1, 65535)

    target_container = av.open(target_video_loc, "w")
    video_stream = target_container.add_stream("mpeg4", 1 / time_base)
    video_stream.bit_rate = 150e6
    video_stream.bit_rate_tolerance = video_stream.bit_rate / 20
    video_stream.thread_count = max(1, mp.cpu_count() - 1)
    video_stream.width, video_stream.height = capture.frame_size

    av_frame = av.VideoFrame(*capture.frame_size, "bgr24")
    av_frame.time_base = time_base

    capture.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    timestamps = []
    while True:
        try:
            frame = capture.get_frame()
        except EndofVideoError:
            break

        if frame.index > export_to_index:
            break

        if start_time is None:
            start_time = frame.timestamp

        undistorted_img = process_frame(capture, frame)
        av_frame.planes[0].update(undistorted_img)
        av_frame.pts = int((frame.timestamp - start_time) / time_base)

        if export_timestamps:
            timestamps.append(frame.timestamp)

        packet = video_stream.encode(av_frame)
        if packet:
            target_container.mux(packet)

        if capture.current_frame_idx >= next_update_idx:
            progress = ((capture.current_frame_idx - export_from_index) /
                        (export_to_index - export_from_index)) * 0.9 + 0.1
            yield "Converting video", progress * 100.0
            next_update_idx += update_rate

    while True:  # flush encoder
        packet = video_stream.encode()
        if packet:
            target_container.mux(packet)
        else:
            break

    if export_timestamps:
        write_timestamps(target_video_loc, timestamps)

    target_container.close()
    capture.cleanup()
    yield "Converting video completed", 1.0 * 100.0
Example #26
0
 def ts_idx_from_playback_time(self, playback_time):
     return pm.find_closest(self.g_pool.timestamps, [playback_time])