Пример #1
0
def update_recording_v086_to_v087(rec_dir):
    logger.info("Updating recording from v0.8.6 format to v0.8.7 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir, "info.csv")

    def _clamp_norm_point(pos):
        """realisitic numbers for norm pos should be in this range.
            Grossly bigger or smaller numbers are results bad exrapolation
            and can cause overflow erorr when denormalized and cast as int32.
        """
        return min(100, max(-100, pos[0])), min(100, max(-100, pos[1]))

    for g in pupil_data["gaze_positions"]:
        if "topic" not in g:
            # we missed this in one gaze mapper
            g["topic"] = "gaze"
        g["norm_pos"] = _clamp_norm_point(g["norm_pos"])

    save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path, "r", encoding="utf-8") as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info["Capture Software Version"] = "v0.8.7"

    with open(meta_info_path, "w", newline="") as csvfile:
        csv_utils.write_key_value_file(csvfile, meta_info)
Пример #2
0
def update_recording_v03_to_v074(rec_dir):
    logger.info("Updating recording from v0.3x format to v0.7.4 format")
    pupilgaze_array = np.load(os.path.join(rec_dir, "gaze_positions.npy"))
    gaze_list = []
    pupil_list = []

    for datum in pupilgaze_array:
        gaze_x, gaze_y, pupil_x, pupil_y, ts, confidence = datum
        # some bogus size and confidence as we did not save it back then
        pupil_list.append(
            {
                "timestamp": ts,
                "confidence": confidence,
                "id": 0,
                "norm_pos": [pupil_x, pupil_y],
                "diameter": 50,
                "method": "2d python",
            }
        )
        gaze_list.append(
            {"timestamp": ts, "confidence": confidence, "norm_pos": [gaze_x, gaze_y], "base": [pupil_list[-1]]}
        )

    pupil_data = {"pupil_positions": pupil_list, "gaze_positions": gaze_list}
    try:
        save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass

    ts_path = os.path.join(rec_dir, "world_timestamps.npy")
    ts_path_old = os.path.join(rec_dir, "timestamps.npy")
    if not os.path.isfile(ts_path) and os.path.isfile(ts_path_old):
        os.rename(ts_path_old, ts_path)
Пример #3
0
 def cleanup(self):
     """called when the plugin gets terminated.
     This happens either voluntarily or forced.
     if you have a GUI or glfw window destroy it here.
     """
     self.deinit_gui()
     save_object(self.annotations_list,os.path.join(self.g_pool.rec_dir, "annotations"))
Пример #4
0
def update_recording_v086_to_v087(rec_dir):
    logger.info("Updating recording from v0.8.6 format to v0.8.7 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir,"info.csv")

    def _clamp_norm_point(pos):
        '''realisitic numbers for norm pos should be in this range.
            Grossly bigger or smaller numbers are results bad exrapolation
            and can cause overflow erorr when denormalized and cast as int32.
        '''
        return min(100.,max(-100.,pos[0])),min(100.,max(-100.,pos[1]))

    for g in pupil_data.get('gaze_positions', []):
        if 'topic' not in g:
            # we missed this in one gaze mapper
            g['topic'] = 'gaze'
        g['norm_pos'] = _clamp_norm_point(g['norm_pos'])

    save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path,'r',encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Data Format Version'] = 'v0.8.7'

    update_meta_info(rec_dir, meta_info)
Пример #5
0
def update_recording_0v4_to_current(rec_dir):
    logger.info("Updatig recording from v0.4x format to current version")
    gaze_array = np.load(os.path.join(rec_dir, "gaze_positions.npy"))
    pupil_array = np.load(os.path.join(rec_dir, "pupil_positions.npy"))
    gaze_list = []
    pupil_list = []

    for datum in pupil_array:
        ts, confidence, id, x, y, diameter = datum[:6]
        pupil_list.append(
            {"timestamp": ts, "confidence": confidence, "id": id, "norm_pos": [x, y], "diameter": diameter}
        )

    pupil_by_ts = dict([(p["timestamp"], p) for p in pupil_list])

    for datum in gaze_array:
        ts, confidence, x, y, = datum
        gaze_list.append(
            {"timestamp": ts, "confidence": confidence, "norm_pos": [x, y], "base": [pupil_by_ts.get(ts, None)]}
        )

    pupil_data = {"pupil_positions": pupil_list, "gaze_positions": gaze_list}
    try:
        save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass
Пример #6
0
def save_intrinsics(directory, cam_name, resolution, intrinsics):
    """
    Saves camera intrinsics calibration to a file. For each unique camera name we maintain a single file containing all calibrations associated with this camera name.
    :param directory: Directory to which the intrinsics file will be written
    :param cam_name: Name of the camera, e.g. 'Pupil Cam 1 ID2'
    :param resolution: Camera resolution given as a tuple. This needs to match the resolution the calibration has been computed with.
    :param intrinsics: The camera intrinsics dictionary.
    :return:
    """
    # Try to load previous camera calibrations
    save_path = os.path.join(
        directory, "{}.intrinsics".format(cam_name.replace(" ", "_"))
    )
    try:
        calib_dict = load_object(save_path, allow_legacy=False)
    except:
        calib_dict = {}

    calib_dict["version"] = __version__
    calib_dict[str(resolution)] = intrinsics

    save_object(calib_dict, save_path)
    logger.info(
        "Calibration for camera {} at resolution {} saved to {}".format(
            cam_name, resolution, save_path
        )
    )
 def calculate(self):
     self.calculated = True
     rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(np.array(self.obj_points), np.array(self.img_points),self.g_pool.capture.frame_size)
     logger.info("Calibrated Camera, RMS:%s"%rms)
     camera_calibration = {'camera_matrix':camera_matrix,'dist_coefs':dist_coefs,'camera_name':self.g_pool.capture.name,'resolution':self.g_pool.capture.frame_size}
     save_object(camera_calibration,os.path.join(self.g_pool.user_dir,"camera_calibration"))
     logger.info("Calibration saved to user folder")
Пример #8
0
  def write_test_values_py():
    global test_file_Folder
    test_file_Folder += 'py/'
    #remove file path and create an empty one, befor writing new files to it
    if os.path.exists(test_file_Folder):
        shutil.rmtree(test_file_Folder)
    os.makedirs( os.path.expanduser(test_file_Folder))

    # Iterate every frame
    frameNumber = 0
    while True:
        # Get an image from the grabber
        try:
            frame = cap.get_frame()
            frameNumber += 1
        except CameraCaptureError:
            print "Capture from Camera Failed. Stopping."
            break
        except EndofVideoFileError:
            print "Video File is done."
            break
        # send to detector
        result = detector_py.detect(frame,user_roi=u_r,visualize=False)

        #save test values
        save_object( result, test_file_Folder + 'result_frame_py{}'.format(frameNumber))

        print "Frame {}".format(frameNumber)

    print "Finished writing test files py."
Пример #9
0
def update_recording_bytes_to_unicode(rec_dir):
    logger.info("Updating recording from bytes to unicode.")

    def convert(data):
        if isinstance(data, bytes):
            return data.decode()
        elif isinstance(data, str) or isinstance(data, np.ndarray):
            return data
        elif isinstance(data, collections.Mapping):
            return dict(map(convert, data.items()))
        elif isinstance(data, collections.Iterable):
            return type(data)(map(convert, data))
        else:
            return data

    for file in os.listdir(rec_dir):
        if file.startswith('.') or os.path.splitext(file)[1] in ('.mp4', '.avi'):
            continue
        rec_file = os.path.join(rec_dir, file)
        try:
            rec_object = load_object(rec_file)
            converted_object = convert(rec_object)
            if converted_object != rec_object:
                logger.info('Converted `{}` from bytes to unicode'.format(file))
                save_object(converted_object, rec_file)
        except (UnpicklingError, IsADirectoryError):
            continue

    # manually convert k v dicts.
    meta_info_path = os.path.join(rec_dir, "info.csv")
    with open(meta_info_path, 'r', encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
    with open(meta_info_path, 'w', newline='') as csvfile:
        csv_utils.write_key_value_file(csvfile, meta_info)
Пример #10
0
    def start_processing(self):
        data_path = '/Developments/NCLUni/pupil_crowd4Jul16/recordings/2016_07_29/003Original/CrowdOldMethod'#self.rec_path
        # Set user_dir to data_path so all related plugins save to the same folder as the recordings
        self.g_pool.user_dir = data_path
        # Manage plugins
        plugin_by_index = [Recorder]+calibration_plugins+gaze_mapping_plugins
        name_by_index = [p.__name__ for p in plugin_by_index]
        plugin_by_name = dict(zip(name_by_index,plugin_by_index))

        self.g_pool.user_settings_path = os.path.join(data_path[:data_path.index('recordings')], 'capture_settings')

        ''' Step 1: when possible detect all pupil positions '''
        # pupil_list = self.get_pupil_list(crowd_all=True)
        pupil_list = self.get_pupil_list_from_csv(data_path)
        # pupil_list = []
        if pupil_list:
            # create events variable that should sent to plugins
            events = {'pupil_positions':pupil_list,'gaze_positions':[]}
            # get world settings
            user_settings_world_path = os.path.join(self.g_pool.user_settings_path,'user_settings_world')
            user_settings_world = Persistent_Dict(user_settings_world_path)
            default_plugins = [('Recorder',{})]
            simple_gaze_mapper = [('Simple_Gaze_Mapper',{})]
            manual_calibration_plugin = [('Manual_Marker_Calibration',{})]
            self.g_pool.plugins = Plugin_List(self.g_pool,plugin_by_name,user_settings_world.get('loaded_plugins',default_plugins)+manual_calibration_plugin)
            # self.g_pool.plugins.add(simple_gaze_mapper)
            self.g_pool.pupil_confidence_threshold = user_settings_world.get('pupil_confidence_threshold',.6)
            self.g_pool.detection_mapping_mode = user_settings_world.get('detection_mapping_mode','2d')

            ''' Step 2: before calculating gaze positions we shall process calibration data
            For calibration we need pupil_list (in events variable) and ref_list - ref_list contains all frames of detected marker
            Using manual_marker_calibration plugin use plugin.update to pass pupil_list and world frames for marker detection
            However, pupil_list is by this point fully detected. Thus, we shall do the following:
            First iteration: send events with all pupil_list with first world frame to manual_marker_calibration plugin.update
            Following iterations: send empty [] pupil_list with next world frame to manual_marker_calibration plugin.update
            '''
            # self.calibrate(events, data_path, user_settings_world, crowd_all=True)
            self.calibrate_from_csv(pupil_list, data_path)
            # self.calibrate_from_user_calibration_data_file()

            ''' Step 3: calculate gaze positions
            passe events to gaze mapper plugin without the world frame
            '''
            for p in self.g_pool.plugins:
                if 'Simple_Gaze_Mapper' in p.class_name:
                    p.update(None,events)
                    break
            save_object(events,os.path.join(data_path, "pupil_data"))
            # timestamps_path = os.path.join(data_path, "world_timestamps.npy")
            # timestamps = np.load(timestamps_path)
            # pupil_positions_by_frame = None
            # pupil_positions_by_frame = correlate_data(pupil_list,timestamps)
        else:
            logger.warning("No eye data found")

        # Again, remove all plugins except recorder
        for p in self.g_pool.plugins:
            if p.class_name != 'Recorder':
                p.alive = False
Пример #11
0
def convert_pupil_mobile_recording_to_v094(rec_dir):
    logger.info("Converting Pupil Mobile recording to v0.9.4 format")
    # convert time files and rename corresponding videos
    time_pattern = os.path.join(rec_dir, '*.time')
    for time_loc in glob.glob(time_pattern):
        time_file_name = os.path.split(time_loc)[1]
        time_name, time_ext = os.path.splitext(time_file_name)

        potential_locs = [os.path.join(rec_dir, time_name+ext) for ext in ('.mjpeg', '.mp4','.m4a')]
        existing_locs = [loc for loc in potential_locs if os.path.exists(loc)]
        if not existing_locs:
            continue
        else:
            video_loc = existing_locs[0]

        if time_name in ('Pupil Cam1 ID0', 'Pupil Cam1 ID1'):
            time_name = 'eye'+time_name[-1]  # rename eye files
        elif time_name in ('Pupil Cam1 ID2', 'Logitech Webcam C930e'):
            cam_calib_loc = os.path.join(rec_dir, 'camera_calibration')
            try:
                camera_calibration = load_object(cam_calib_loc)
            except:
                # no camera calibration found
                video = av.open(video_loc, 'r')
                frame_size = video.streams.video[0].format.width, video.streams.video[0].format.height
                del video
                try:
                    camera_calibration = pre_recorded_calibrations[time_name][frame_size]
                except KeyError:

                    camera_calibration = idealized_camera_calibration(frame_size)
                    logger.warning('Camera calibration not found. Will assume idealized camera.')
                save_object(camera_calibration, cam_calib_loc)

            time_name = 'world'  # assume world file
        elif time_name.startswith('audio_'):
            time_name = 'audio'

        timestamps = np.fromfile(time_loc, dtype='>f8')
        timestamp_loc = os.path.join(rec_dir, '{}_timestamps.npy'.format(time_name))
        logger.info('Creating "{}"'.format(os.path.split(timestamp_loc)[1]))
        np.save(timestamp_loc, timestamps)

        if time_name == 'audio':
            video_dst = os.path.join(rec_dir, time_name) + '.mp4'
            logger.info('Renaming "{}" to "{}"'.format(os.path.split(video_loc)[1], os.path.split(video_dst)[1]))
            os.rename(video_loc, video_dst)
        else:
            video_dst = os.path.join(rec_dir, time_name) + os.path.splitext(video_loc)[1]
            logger.info('Renaming "{}" to "{}"'.format(os.path.split(video_loc)[1], os.path.split(video_dst)[1]))
            os.rename(video_loc, video_dst)

    pupil_data_loc = os.path.join(rec_dir, 'pupil_data')
    if not os.path.exists(pupil_data_loc):
        logger.info('Creating "pupil_data"')
        save_object({'pupil_positions': [],
                     'gaze_positions': [],
                     'notifications': []}, pupil_data_loc)
Пример #12
0
 def _save_to_file(self):
     file_path = self._plmodel_file_path
     dict_representation = {
         "version": self.version,
         "data": self.plmodel,
         "recording_uuid": self._recording_uuid_current,
     }
     os.makedirs(os.path.dirname(file_path), exist_ok=True)
     fm.save_object(dict_representation, file_path)
Пример #13
0
def update_recording_v05_to_v074(rec_dir):
    logger.info("Updating recording from v0.5x/v0.6x/v0.7x format to v0.7.4 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    save_object(pupil_data,os.path.join(rec_dir, "pupil_data_old"))
    for p in pupil_data['pupil_positions']:
        p['method'] = '2d python'
    try:
        save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass
Пример #14
0
 def calculate(self):
     self.calculated = True
     self.count = 10
     rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(np.array(self.obj_points), np.array(self.img_points),self.g_pool.capture.frame_size,None,None)
     logger.info("Calibrated Camera, RMS:{}".format(rms))
     camera_calibration = {'camera_matrix':camera_matrix,'dist_coefs':dist_coefs,'camera_name':self.g_pool.capture.name,'resolution':self.g_pool.capture.frame_size}
     save_object(camera_calibration,os.path.join(self.g_pool.user_dir,"camera_calibration"))
     logger.info("Calibration saved to user folder")
     self.camera_intrinsics = camera_matrix,dist_coefs,self.g_pool.capture.frame_size
     self.show_undistortion_switch.read_only=False
Пример #15
0
    def cleanup(self):
        self.stop_eye_process(0)
        self.stop_eye_process(1)
        # close sockets before context is terminated
        self.data_sub = None
        self.deinit_gui()

        session_data = {}
        session_data["detection_method"]= self.detection_method
        session_data['pupil_positions'] = self.pupil_positions
        session_data['detection_progress'] = self.detection_progress
        session_data['detection_status'] = self.detection_status
        save_object(session_data,os.path.join(self.data_dir,'offline_pupil_data'))
Пример #16
0
    def stop(self):
        # explicit release of VideoWriter
        self.writer.release()
        self.writer = None

        save_object(self.data, os.path.join(self.rec_path, "pupil_data"))

        try:
            copy2(os.path.join(self.g_pool.user_dir, "surface_definitions"),
                  os.path.join(self.rec_path, "surface_definitions"))
        except:
            logger.info("No surface_definitions data found. You may want this if you do marker tracking.")

        camera_calibration = load_camera_calibration(self.g_pool)
        if camera_calibration is not None:
            save_object(camera_calibration, os.path.join(self.rec_path, "camera_calibration"))
        else:
            logger.info("No camera calibration found.")

        try:
            with open(self.meta_info_path, 'a', newline='') as csvfile:
                csv_utils.write_key_value_file(csvfile, {
                    'Duration Time': self.get_rec_time_str(),
                    'World Camera Frames': self.frame_count,
                    'World Camera Resolution': str(self.g_pool.capture.frame_size[0])+"x"+str(self.g_pool.capture.frame_size[1]),
                    'Capture Software Version': self.g_pool.version,
                    'Data Format Version': self.g_pool.version,
                    'System Info': get_system_info()
                }, append=True)
        except Exception:
            logger.exception("Could not save metadata. Please report this bug!")

        try:
            with open(os.path.join(self.rec_path, "user_info.csv"), 'w', newline='') as csvfile:
                csv_utils.write_key_value_file(csvfile, self.user_info)
        except Exception:
            logger.exception("Could not save userdata. Please report this bug!")

        self.close_info_menu()

        self.running = False
        self.menu.read_only = False
        self.button.status_text = ''

        self.data = {'pupil_positions': [], 'gaze_positions': []}
        self.pupil_pos_list = []
        self.gaze_pos_list = []

        logger.info("Saved Recording.")
        self.notify_all({'subject': 'recording.stopped', 'rec_path': self.rec_path})
Пример #17
0
def update_recording_v091_to_v093(rec_dir):
    logger.info("Updating recording from v0.9.1 format to v0.9.3 format")
    meta_info_path = os.path.join(rec_dir,"info.csv")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    for g in pupil_data.get('gaze_positions', []):
        # fixing recordings made with bug https://github.com/pupil-labs/pupil/issues/598
        g['norm_pos'] = float(g['norm_pos'][0]), float(g['norm_pos'][1])

    save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path, 'r', encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Data Format Version'] = 'v0.9.3'
    update_meta_info(rec_dir, meta_info)
Пример #18
0
def update_recording_v083_to_v086(rec_dir):
    logger.info("Updating recording from v0.8.3 format to v0.8.6 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir,"info.csv")

    for topic in pupil_data.keys():
        for d in pupil_data[topic]:
            d['topic'] = topic

    save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path,'r',encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Data Format Version'] = 'v0.8.6'

    update_meta_info(rec_dir, meta_info)
Пример #19
0
def update_recording_v082_to_v083(rec_dir):
    logger.info("Updating recording from v0.8.2 format to v0.8.3 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir,"info.csv")

    for d in pupil_data['gaze_positions']:
        if 'base' in d:
            d['base_data'] = d.pop('base')

    save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path,'r',encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Data Format Version'] = 'v0.8.3'

    update_meta_info(rec_dir, meta_info)
Пример #20
0
    def save_offline_data(self):
        topic_data_ts = (
            (self.id_topics[ts], datum, ts)
            for ts, datum in self.pupil_positions.items()
        )
        with fm.PLData_Writer(self.data_dir, "offline_pupil") as writer:
            for topic, datum, timestamp in topic_data_ts:
                writer.append_serialized(timestamp, topic, datum.serialized)

        session_data = {}
        session_data["detection_method"] = self.detection_method
        session_data["detection_status"] = self.detection_status
        session_data["version"] = self.session_data_version
        cache_path = os.path.join(self.data_dir, "offline_pupil.meta")
        fm.save_object(session_data, cache_path)
        logger.info("Cached detected pupil data to {}".format(cache_path))
Пример #21
0
def update_recording_v082_to_v083(rec_dir):
    logger.info("Updating recording from v0.8.2 format to v0.8.3 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir, "info.csv")

    for d in pupil_data["gaze_positions"]:
        if "base" in d:
            d["base_data"] = d.pop("base")

    save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path, "r", encoding="utf-8") as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info["Capture Software Version"] = "v0.8.3"

    with open(meta_info_path, "w", newline="") as csvfile:
        csv_utils.write_key_value_file(csvfile, meta_info)
Пример #22
0
def update_recording_v083_to_v086(rec_dir):
    logger.info("Updating recording from v0.8.3 format to v0.8.6 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir, "info.csv")

    for topic in pupil_data.keys():
        for d in pupil_data[topic]:
            d["topic"] = topic

    save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path, "r", encoding="utf-8") as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info["Capture Software Version"] = "v0.8.6"

    with open(meta_info_path, "w", newline="") as csvfile:
        csv_utils.write_key_value_file(csvfile, meta_info)
Пример #23
0
def update_recording_0v3_to_current(rec_dir):
    logger.info("Updatig recording from v0.3x format to current version")
    pupilgaze_array = np.load(os.path.join(rec_dir,'gaze_positions.npy'))
    gaze_list = []
    pupil_list = []

    for datum in pupilgaze_array:
        gaze_x,gaze_y,pupil_x,pupil_y,ts,confidence = datum
        #some bogus size and confidence as we did not save it back then
        pupil_list.append({'timestamp':ts,'confidence':confidence,'id':0,'norm_pos':[pupil_x,pupil_y],'diameter':50})
        gaze_list.append({'timestamp':ts,'confidence':confidence,'norm_pos':[gaze_x,gaze_y],'base':[pupil_list[-1]]})

    pupil_data = {'pupil_positions':pupil_list,'gaze_positions':gaze_list}
    try:
        save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass
Пример #24
0
def update_recording_v083_to_v086(rec_dir):
    logger.info("Updating recording from v0.8.3 format to v0.8.6 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir,"info.csv")

    for topic in pupil_data.keys():
        for d in pupil_data[topic]:
            d['topic'] = topic

    save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path) as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Capture Software Version'] = 'v0.8.6'

    with open(meta_info_path,'w') as csvfile:
        csv_utils.write_key_value_file(csvfile,meta_info)
Пример #25
0
def update_recording_v093_to_v094(rec_dir):
    logger.info("Updating recording from v0.9.3 to v0.9.4.")

    for file in os.listdir(rec_dir):
        if file.startswith(".") or os.path.splitext(file)[1] in (".mp4", ".avi"):
            continue
        rec_file = os.path.join(rec_dir, file)

        try:
            rec_object = fm.load_object(rec_file, allow_legacy=False)
            fm.save_object(rec_object, rec_file)
        except Exception:
            try:
                rec_object = fm.load_object(rec_file, allow_legacy=True)
                fm.save_object(rec_object, rec_file)
                logger.info("Converted `{}` from pickle to msgpack".format(file))
            except Exception:
                logger.warning("did not convert {}".format(rec_file))

    _update_info_version_to("v0.9.4", rec_dir)
Пример #26
0
    def cleanup(self):
        if self.process_pipe:
            self.process_pipe.send(topic='terminate',payload={})
            self.process_pipe.socket.close()
            self.process_pipe = None
        for sec in self.sections:
            if sec['bg_task']:
                sec['bg_task'].cancel()
            sec['bg_task'] = None
            sec["gaze_positions"] = []

        session_data = {}
        session_data['sections'] = self.sections
        session_data['version'] = self.session_data_version
        session_data['manual_ref_positions'] = self.manual_ref_positions
        if self.detection_progress == 100.0:
            session_data['circle_marker_positions'] = self.circle_marker_positions
        else:
            session_data['circle_marker_positions'] = []
        save_object(session_data, os.path.join(self.result_dir, 'offline_calibration_gaze'))
Пример #27
0
def update_recording_v086_to_v087(rec_dir):
    logger.info("Updating recording from v0.8.6 format to v0.8.7 format")
    pupil_data = fm.load_object(os.path.join(rec_dir, "pupil_data"))

    def _clamp_norm_point(pos):
        """realisitic numbers for norm pos should be in this range.
            Grossly bigger or smaller numbers are results bad exrapolation
            and can cause overflow erorr when denormalized and cast as int32.
        """
        return min(100.0, max(-100.0, pos[0])), min(100.0, max(-100.0, pos[1]))

    for g in pupil_data.get("gaze_positions", []):
        if "topic" not in g:
            # we missed this in one gaze mapper
            g["topic"] = "gaze"
        g["norm_pos"] = _clamp_norm_point(g["norm_pos"])

    fm.save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))

    _update_info_version_to("v0.8.7", rec_dir)
Пример #28
0
    def cleanup(self):
        if self.process_pipe:
            self.process_pipe.send(topic='terminate',payload={})
            self.process_pipe.socket.close()
            self.process_pipe = None
        for sec in self.sections:
            if sec['bg_task']:
                sec['bg_task'].cancel()
            sec['bg_task'] = None
            sec["gaze_positions"] = []

        session_data = {}
        session_data['sections'] = self.sections
        session_data['version'] = self.session_data_version
        session_data['manual_ref_positions'] = self.manual_ref_positions
        if self.detection_progress == 100.0:
            session_data['circle_marker_positions'] = self.circle_marker_positions
        else:
            session_data['circle_marker_positions'] = []
        save_object(session_data, os.path.join(self.result_dir, 'offline_calibration_gaze'))
        self.deinit_gui()
Пример #29
0
def save_intrinsics(directory, cam_name, resolution, intrinsics):
    """
    Saves camera intrinsics calibration to a file. For each unique camera name we maintain a single file containing all calibrations associated with this camera name.
    :param directory: Directory to which the intrinsics file will be written
    :param cam_name: Name of the camera, e.g. 'Pupil Cam 1 ID2'
    :param resolution: Camera resolution given as a tuple. This needs to match the resolution the calibration has been computed with.
    :param intrinsics: The camera intrinsics dictionary.
    :return:
    """
    # Try to load previous camera calibrations
    save_path = os.path.join(directory, '{}.intrinsics'.format(cam_name.replace(" ", "_")))
    try:
        calib_dict = load_object(save_path, allow_legacy=False)
    except:
        calib_dict = {}

    calib_dict['version'] = __version__
    calib_dict[str(resolution)] = intrinsics

    save_object(calib_dict, save_path)
    logger.info("Calibration for camera {} at resolution {} saved to {}".format(cam_name, resolution, save_path))
Пример #30
0
    def calculate(self):
        self.count = 10
        rms, camera_matrix, dist_coefs, rot_vectors, trans_vectors = cv2.calibrateCamera(np.array(self.obj_points), np.array(self.img_points), self.g_pool.capture.frame_size,None,None)
        logger.info("Calibrated Camera, RMS:%s"%rms)

        tot_error = 0
        for i in xrange(len(self.obj_points)):
            img_points2, _ = cv2.projectPoints(self.obj_points[i], rot_vectors[i], trans_vectors[i], camera_matrix, dist_coefs)
            current_error = cv2.norm(self.img_points[i],img_points2, cv2.NORM_L2)/len(img_points2)
            tot_error += current_error
        error = tot_error/len(self.obj_points)
        logger.info("Error:%s"%error)
        print error, rms

        camera_calibration = {'camera_matrix':camera_matrix,'dist_coefs':dist_coefs,'camera_name':self.g_pool.capture.name,'resolution':self.g_pool.capture.frame_size,'error':error}
        save_object(camera_calibration,os.path.join(self.g_pool.user_dir,"camera_calibration"))
        logger.info("Calibration saved to user folder")
        self.camera_intrinsics = camera_matrix,dist_coefs,self.g_pool.capture.frame_size,error
        self.show_undistortion_switch.read_only=False
        self.calibrate_img = None
        self.calculated = True
Пример #31
0
def update_recording_0v3_to_current(rec_dir):
    logger.info("Updatig recording from v0.3x format to current version")
    pupilgaze_array = np.load(os.path.join(rec_dir, "gaze_positions.npy"))
    gaze_list = []
    pupil_list = []

    for datum in pupilgaze_array:
        gaze_x, gaze_y, pupil_x, pupil_y, ts, confidence = datum
        # some bogus size and confidence as we did not save it back then
        pupil_list.append(
            {"timestamp": ts, "confidence": confidence, "id": 0, "norm_pos": [pupil_x, pupil_y], "diameter": 50}
        )
        gaze_list.append(
            {"timestamp": ts, "confidence": confidence, "norm_pos": [gaze_x, gaze_y], "base": [pupil_list[-1]]}
        )

    pupil_data = {"pupil_positions": pupil_list, "gaze_positions": gaze_list}
    try:
        save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass
Пример #32
0
def update_recording_v073_to_v074(rec_dir):
    logger.info("Updating recording from v0.7x format to v0.7.4 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    modified = False
    for p in pupil_data['pupil_positions']:
        if p['method'] == "3D c++":
            p['method'] = "3d c++"
            try:
                p['projected_sphere'] = p.pop('projectedSphere')
            except:
                p['projected_sphere'] = {'center':(0,0),'angle':0,'axes':(0,0)}
            p['model_confidence'] = p.pop('modelConfidence')
            p['model_id'] = p.pop('modelID')
            p['circle_3d'] = p.pop('circle3D')
            p['diameter_3d'] = p.pop('diameter_3D')
            modified = True
    if modified:
        save_object(load_object(os.path.join(rec_dir, "pupil_data")),os.path.join(rec_dir, "pupil_data_old"))
    try:
        save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass
Пример #33
0
def update_recording_bytes_to_unicode(rec_dir):
    logger.info("Updating recording from bytes to unicode.")

    # update to python 3
    meta_info_path = os.path.join(rec_dir, "info.csv")

    def convert(data):
        if isinstance(data, bytes):
            return data.decode()
        elif isinstance(data, str) or isinstance(data, np.ndarray):
            return data
        elif isinstance(data, collections.Mapping):
            return dict(map(convert, data.items()))
        elif isinstance(data, collections.Iterable):
            return type(data)(map(convert, data))
        else:
            return data

    for file in os.listdir(rec_dir):
        rec_file = os.path.join(rec_dir, file)
        try:
            rec_object = load_object(rec_file)
            converted_object = convert(rec_object)
            if converted_object != rec_object:
                logger.info(
                    'Converted `{}` from bytes to unicode'.format(file))
                save_object(rec_object, rec_file)
        except (ValueError, IsADirectoryError):
            continue
        # except TypeError:
        #     logger.error('TypeError when parsing `{}`'.format(file))
        #     continue

    with open(meta_info_path, 'r', encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Capture Software Version'] = 'v0.8.8'

    with open(meta_info_path, 'w', newline='') as csvfile:
        csv_utils.write_key_value_file(csvfile, meta_info)
Пример #34
0
def finish_calibration(g_pool, pupil_list, ref_list):
    method, result = select_method_and_perform_calibration(g_pool, pupil_list, ref_list)

    # Start mapper / announce error
    g_pool.active_calibration_plugin.notify_all(result)
    if result["subject"] == "calibration.failed":
        return

    ts = g_pool.get_timestamp()

    # Announce success
    g_pool.active_calibration_plugin.notify_all(
        {
            "subject": "calibration.successful",
            "method": method,
            "timestamp": ts,
            "record": True,
        }
    )

    # Announce calibration data
    user_calibration_data = {
        "timestamp": ts,
        "pupil_list": pupil_list,
        "ref_list": ref_list,
        "calibration_method": method,
        "mapper_name": result["name"],
        "mapper_args": result["args"],
    }
    fm.save_object(
        user_calibration_data, os.path.join(g_pool.user_dir, "user_calibration_data")
    )
    g_pool.active_calibration_plugin.notify_all(
        {
            "subject": "calibration.calibration_data",
            "record": True,
            **user_calibration_data,
        }
    )
Пример #35
0
def finish_calibration(g_pool, pupil_list, ref_list):
    method, result = select_calibration_method(g_pool, pupil_list, ref_list)
    g_pool.active_calibration_plugin.notify_all(result)
    if result['subject'] != 'calibration.failed':
        ts = g_pool.get_timestamp()
        g_pool.active_calibration_plugin.notify_all({'subject': 'calibration.successful',
                                                     'method': method,
                                                     'timestamp': ts,
                                                     'record': True})

        g_pool.active_calibration_plugin.notify_all({'subject': 'calibration.calibration_data',
                                                     'timestamp': ts,
                                                     'pupil_list': pupil_list,
                                                     'ref_list': ref_list,
                                                     'calibration_method': method,
                                                     'record': True})

        # this is only used by show calibration. TODO: rewrite show calibraiton.
        user_calibration_data = {'timestamp': ts, 'pupil_list': pupil_list,
                                 'ref_list': ref_list, 'calibration_method': method}

        save_object(user_calibration_data, os.path.join(g_pool.user_dir, "user_calibration_data"))
Пример #36
0
def update_recording_v03_to_v074(rec_dir):
    logger.info("Updating recording from v0.3x format to v0.7.4 format")
    pupilgaze_array = np.load(os.path.join(rec_dir,'gaze_positions.npy'))
    gaze_list = []
    pupil_list = []

    for datum in pupilgaze_array:
        gaze_x,gaze_y,pupil_x,pupil_y,ts,confidence = datum
        #some bogus size and confidence as we did not save it back then
        pupil_list.append({'timestamp':ts,'confidence':confidence,'id':0,'norm_pos':[pupil_x,pupil_y],'diameter':50,'method':'2d python'})
        gaze_list.append({'timestamp':ts,'confidence':confidence,'norm_pos':[gaze_x,gaze_y],'base':[pupil_list[-1]]})

    pupil_data = {'pupil_positions':pupil_list,'gaze_positions':gaze_list}
    try:
        save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass

    ts_path     = os.path.join(rec_dir,"world_timestamps.npy")
    ts_path_old = os.path.join(rec_dir,"timestamps.npy")
    if not os.path.isfile(ts_path) and os.path.isfile(ts_path_old):
        os.rename(ts_path_old, ts_path)
Пример #37
0
def update_recording_0v4_to_current(rec_dir):
    logger.info("Updatig recording from v0.4x format to current version")
    gaze_array = np.load(os.path.join(rec_dir,'gaze_positions.npy'))
    pupil_array = np.load(os.path.join(rec_dir,'pupil_positions.npy'))
    gaze_list = []
    pupil_list = []

    for datum in pupil_array:
        ts, confidence, id, x, y, diameter = datum[:6]
        pupil_list.append({'timestamp':ts,'confidence':confidence,'id':id,'norm_pos':[x,y],'diameter':diameter})

    pupil_by_ts = dict([(p['timestamp'],p) for p in pupil_list])

    for datum in gaze_array:
        ts,confidence,x,y, = datum
        gaze_list.append({'timestamp':ts,'confidence':confidence,'norm_pos':[x,y],'base':[pupil_by_ts.get(ts,None)]})

    pupil_data = {'pupil_positions':pupil_list,'gaze_positions':gaze_list}
    try:
        save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass
Пример #38
0
def update_recording_v04_to_v074(rec_dir):
    logger.info("Updating recording from v0.4x format to v0.7.4 format")
    gaze_array = np.load(os.path.join(rec_dir, "gaze_positions.npy"))
    pupil_array = np.load(os.path.join(rec_dir, "pupil_positions.npy"))
    gaze_list = []
    pupil_list = []

    for datum in pupil_array:
        ts, confidence, id, x, y, diameter = datum[:6]
        pupil_list.append(
            {
                "timestamp": ts,
                "confidence": confidence,
                "id": id,
                "norm_pos": [x, y],
                "diameter": diameter,
                "method": "2d python",
                "ellipse": {"angle": 0.0, "center": [0.0, 0.0], "axes": [0.0, 0.0]},
            }
        )

    pupil_by_ts = dict([(p["timestamp"], p) for p in pupil_list])

    for datum in gaze_array:
        ts, confidence, x, y, = datum
        gaze_list.append(
            {
                "timestamp": ts,
                "confidence": confidence,
                "norm_pos": [x, y],
                "base": [pupil_by_ts.get(ts, None)],
            }
        )

    pupil_data = {"pupil_positions": pupil_list, "gaze_positions": gaze_list}
    try:
        fm.save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass
Пример #39
0
def update_recording_0v4_to_current(rec_dir):
    logger.info("Updatig recording from v0.4x format to current version")
    gaze_array = np.load(os.path.join(rec_dir,'gaze_positions.npy'))
    pupil_array = np.load(os.path.join(rec_dir,'pupil_positions.npy'))
    gaze_list = []
    pupil_list = []

    for datum in pupil_array:
        ts, confidence, id, x, y, diameter = datum[:6]
        pupil_list.append({'timestamp':ts,'confidence':confidence,'id':id,'norm_pos':[x,y],'diameter':diameter})

    pupil_by_ts = dict([(p['timestamp'],p) for p in pupil_list])

    for datum in gaze_array:
        ts,confidence,x,y, = datum
        gaze_list.append({'timestamp':ts,'confidence':confidence,'norm_pos':[x,y],'base':[pupil_by_ts.get(ts,None)]})

    pupil_data = {'pupil_positions':pupil_list,'gaze_positions':gaze_list}
    try:
        save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass
Пример #40
0
def update_recording_v04_to_v074(rec_dir):
    logger.info("Updating recording from v0.4x format to v0.7.4 format")
    gaze_array = np.load(os.path.join(rec_dir, 'gaze_positions.npy'))
    pupil_array = np.load(os.path.join(rec_dir, 'pupil_positions.npy'))
    gaze_list = []
    pupil_list = []

    for datum in pupil_array:
        ts, confidence, id, x, y, diameter = datum[:6]
        pupil_list.append({
            'timestamp': ts,
            'confidence': confidence,
            'id': id,
            'norm_pos': [x, y],
            'diameter': diameter,
            'method': '2d python',
            'ellipse': {
                'angle': 0.0,
                'center': [0.0, 0.0],
                'axes': [0.0, 0.0]
            }
        })

    pupil_by_ts = dict([(p['timestamp'], p) for p in pupil_list])

    for datum in gaze_array:
        ts, confidence, x, y, = datum
        gaze_list.append({
            'timestamp': ts,
            'confidence': confidence,
            'norm_pos': [x, y],
            'base': [pupil_by_ts.get(ts, None)]
        })

    pupil_data = {'pupil_positions': pupil_list, 'gaze_positions': gaze_list}
    try:
        fm.save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass
Пример #41
0
def update_recording_v073_to_v074(rec_dir):
    logger.info("Updating recording from v0.7x format to v0.7.4 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    modified = False
    for p in pupil_data['pupil_positions']:
        if p['method'] == "3D c++":
            p['method'] = "3d c++"
            try:
                p['projected_sphere'] = p.pop('projectedSphere')
            except:
                p['projected_sphere'] = {'center':(0,0),'angle':0,'axes':(0,0)}
            p['model_confidence'] = p.pop('modelConfidence')
            p['model_id'] = p.pop('modelID')
            p['circle_3d'] = p.pop('circle3D')
            p['diameter_3d'] = p.pop('diameter_3D')
            modified = True
    if modified:
        save_object(load_object(os.path.join(rec_dir, "pupil_data")),os.path.join(rec_dir, "pupil_data_old"))
    try:
        save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass
Пример #42
0
def update_recording_v03_to_v074(rec_dir):
    logger.info("Updating recording from v0.3x format to v0.7.4 format")
    pupilgaze_array = np.load(os.path.join(rec_dir,'gaze_positions.npy'))
    gaze_list = []
    pupil_list = []

    for datum in pupilgaze_array:
        gaze_x,gaze_y,pupil_x,pupil_y,ts,confidence = datum
        #some bogus size and confidence as we did not save it back then
        pupil_list.append({'timestamp':ts,'confidence':confidence,'id':0,'norm_pos':[pupil_x,pupil_y],'diameter':50,'method':'2d python'})
        gaze_list.append({'timestamp':ts,'confidence':confidence,'norm_pos':[gaze_x,gaze_y],'base':[pupil_list[-1]]})

    pupil_data = {'pupil_positions':pupil_list,'gaze_positions':gaze_list}
    try:
        save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass

    ts_path     = os.path.join(rec_dir,"world_timestamps.npy")
    ts_path_old = os.path.join(rec_dir,"timestamps.npy")
    if not os.path.isfile(ts_path) and os.path.isfile(ts_path_old):
        os.rename(ts_path_old, ts_path)
Пример #43
0
def update_recording_v03_to_v074(rec_dir):
    logger.info("Updating recording from v0.3x format to v0.7.4 format")
    pupilgaze_array = np.load(os.path.join(rec_dir, "gaze_positions.npy"))
    gaze_list = []
    pupil_list = []

    for datum in pupilgaze_array:
        gaze_x, gaze_y, pupil_x, pupil_y, ts, confidence = datum
        # some bogus size and confidence as we did not save it back then
        pupil_list.append(
            {
                "timestamp": ts,
                "confidence": confidence,
                "id": 0,
                "norm_pos": [pupil_x, pupil_y],
                "diameter": 50,
                "method": "2d python",
            }
        )
        gaze_list.append(
            {
                "timestamp": ts,
                "confidence": confidence,
                "norm_pos": [gaze_x, gaze_y],
                "base": [pupil_list[-1]],
            }
        )

    pupil_data = {"pupil_positions": pupil_list, "gaze_positions": gaze_list}
    try:
        fm.save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass

    ts_path = os.path.join(rec_dir, "world_timestamps.npy")
    ts_path_old = os.path.join(rec_dir, "timestamps.npy")
    if not os.path.isfile(ts_path) and os.path.isfile(ts_path_old):
        os.rename(ts_path_old, ts_path)
Пример #44
0
def check_for_worldless_recording(rec_dir):
    logger.info("Checking for world-less recording")
    valid_ext = (".mp4", ".mkv", ".avi", ".h264", ".mjpeg")
    existing_videos = [
        f for f in glob.glob(os.path.join(rec_dir, "world.*"))
        if os.path.splitext(f)[1] in valid_ext
    ]

    if not existing_videos:
        min_ts = np.inf
        max_ts = -np.inf
        for f in glob.glob(os.path.join(rec_dir, "eye*_timestamps.npy")):
            try:
                eye_ts = np.load(f)
                assert len(eye_ts.shape) == 1
                assert eye_ts.shape[0] > 1
                min_ts = min(min_ts, eye_ts[0])
                max_ts = max(max_ts, eye_ts[-1])
            except (FileNotFoundError, AssertionError):
                pass

        error_msg = "Could not generate world timestamps from eye timestamps. This is an invalid recording."
        assert -np.inf < min_ts < max_ts < np.inf, error_msg

        logger.warning(
            "No world video found. Constructing an artificial replacement.")

        frame_rate = 30
        timestamps = np.arange(min_ts, max_ts, 1 / frame_rate)
        np.save(os.path.join(rec_dir, "world_timestamps"), timestamps)
        fm.save_object(
            {
                "frame_rate": frame_rate,
                "frame_size": (1280, 720),
                "version": 0
            },
            os.path.join(rec_dir, "world.fake"),
        )
Пример #45
0
def convert_pupil_mobile_recording_to_v094(rec_dir):
    logger.info("Converting Pupil Mobile recording to v0.9.4 format")
    # convert time files and rename corresponding videos
    match_pattern = "*.time"
    rename_set = RenameSet(rec_dir, match_pattern)
    rename_set.load_intrinsics()
    rename_set.rename("Pupil Cam([0-3]) ID0", "eye0")
    rename_set.rename("Pupil Cam([0-3]) ID1", "eye1")
    rename_set.rename("Pupil Cam([0-2]) ID2", "world")
    # Rewrite .time file to .npy file
    rewrite_time = RenameSet(rec_dir, match_pattern, ["time"])
    rewrite_time.rewrite_time("_timestamps.npy")
    pupil_data_loc = os.path.join(rec_dir, "pupil_data")
    if not os.path.exists(pupil_data_loc):
        logger.info('Creating "pupil_data"')
        fm.save_object(
            {
                "pupil_positions": [],
                "gaze_positions": [],
                "notifications": []
            },
            pupil_data_loc,
        )
Пример #46
0
def finish_calibration(g_pool, pupil_list, ref_list):
    method, result = select_calibration_method(g_pool, pupil_list, ref_list)
    g_pool.active_calibration_plugin.notify_all(result)
    if result["subject"] != "calibration.failed":
        ts = g_pool.get_timestamp()
        g_pool.active_calibration_plugin.notify_all(
            {
                "subject": "calibration.successful",
                "method": method,
                "timestamp": ts,
                "record": True,
            }
        )

        g_pool.active_calibration_plugin.notify_all(
            {
                "subject": "calibration.calibration_data",
                "timestamp": ts,
                "pupil_list": pupil_list,
                "ref_list": ref_list,
                "calibration_method": method,
                "record": True,
            }
        )

        # this is only used by show calibration. TODO: rewrite show calibraiton.
        user_calibration_data = {
            "timestamp": ts,
            "pupil_list": pupil_list,
            "ref_list": ref_list,
            "calibration_method": method,
        }

        save_object(
            user_calibration_data,
            os.path.join(g_pool.user_dir, "user_calibration_data"),
        )
Пример #47
0
def update_recording_v093_to_v094(rec_dir):
    logger.info("Updating recording from v0.9.3 to v0.9.4.")
    meta_info_path = os.path.join(rec_dir, "info.csv")

    for file in os.listdir(rec_dir):
        if file.startswith('.') or os.path.splitext(file)[1] in ('.mp4', '.avi'):
            continue
        rec_file = os.path.join(rec_dir, file)

        try:
            rec_object = load_object(rec_file,allow_legacy=False)
            save_object(rec_object, rec_file)
        except:
            try:
                rec_object = load_object(rec_file,allow_legacy=True)
                save_object(rec_object, rec_file)
                logger.info('Converted `{}` from pickle to msgpack'.format(file))
            except:
                logger.warning("did not convert {}".format(rec_file))

    with open(meta_info_path, 'r', encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Data Format Version'] = 'v0.9.4'
    update_meta_info(rec_dir, meta_info)
Пример #48
0
def update_recording_v093_to_v094(rec_dir):
    logger.info("Updating recording from v0.9.3 to v0.9.4.")
    meta_info_path = os.path.join(rec_dir, "info.csv")

    for file in os.listdir(rec_dir):
        if file.startswith(".") or os.path.splitext(file)[1] in (".mp4", ".avi"):
            continue
        rec_file = os.path.join(rec_dir, file)

        try:
            rec_object = fm.load_object(rec_file, allow_legacy=False)
            fm.save_object(rec_object, rec_file)
        except:
            try:
                rec_object = fm.load_object(rec_file, allow_legacy=True)
                fm.save_object(rec_object, rec_file)
                logger.info("Converted `{}` from pickle to msgpack".format(file))
            except:
                logger.warning("did not convert {}".format(rec_file))

    with open(meta_info_path, "r", encoding="utf-8") as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info["Data Format Version"] = "v0.9.4"
    update_meta_info(rec_dir, meta_info)
Пример #49
0
def rectify_gaze_data(path, K, D, rect_camera_matrix):

    #if not os.path.exists(path + 'pupil_data_original'):
    #    data = load_object(path + 'pupil_data')
    #    save_object(data, path + 'pupil_data_original')
    #else:
    #    data = load_object(path + 'pupil_data_original')

    data = load_object(path + 'pupil_data')

    if not 'gaze_positions' in data:
        print("no gaze_positions", data.keys())
        return

    gazes = data['gaze_positions']
    for g in gazes:
        gaze = denormalize(g['norm_pos'], 1280, 720)
        gaze = np.float32(gaze).reshape(-1, 1, 2)
        gaze = cv2.fisheye.undistortPoints(gaze, K, D,
                                           P=rect_camera_matrix).reshape(2)
        gaze = normalize(gaze, 1280, 720)
        g['norm_pos'] = gaze

    save_object(data, path + 'pupil_data_corrected')
Пример #50
0
def update_recording_bytes_to_unicode(rec_dir):
    logger.info("Updating recording from bytes to unicode.")

    def convert(data):
        if isinstance(data, bytes):
            return data.decode()
        elif isinstance(data, str) or isinstance(data, np.ndarray):
            return data
        elif isinstance(data, collections.Mapping):
            return dict(map(convert, data.items()))
        elif isinstance(data, collections.Iterable):
            return type(data)(map(convert, data))
        else:
            return data

    for file in os.listdir(rec_dir):
        if file.startswith('.') or os.path.splitext(file)[1] in ('.mp4',
                                                                 '.avi'):
            continue
        rec_file = os.path.join(rec_dir, file)
        try:
            rec_object = fm.load_object(rec_file)
            converted_object = convert(rec_object)
            if converted_object != rec_object:
                logger.info(
                    'Converted `{}` from bytes to unicode'.format(file))
                fm.save_object(converted_object, rec_file)
        except (fm.UnpicklingError, IsADirectoryError):
            continue

    # manually convert k v dicts.
    meta_info_path = os.path.join(rec_dir, "info.csv")
    with open(meta_info_path, 'r', encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
    with open(meta_info_path, 'w', newline='') as csvfile:
        csv_utils.write_key_value_file(csvfile, meta_info)
Пример #51
0
    def stop(self):
        # explicit release of VideoWriter
        self.writer.release()
        self.writer = None

        save_object(self.data, os.path.join(self.rec_path, "pupil_data"))

        try:
            copy2(os.path.join(self.g_pool.user_dir, "surface_definitions"),
                  os.path.join(self.rec_path, "surface_definitions"))
        except:
            logger.info(
                "No surface_definitions data found. You may want this if you do marker tracking."
            )

        camera_calibration = load_camera_calibration(self.g_pool)
        if camera_calibration is not None:
            save_object(camera_calibration,
                        os.path.join(self.rec_path, "camera_calibration"))
        else:
            logger.info("No camera calibration found.")

        try:
            with open(self.meta_info_path, 'a', newline='') as csvfile:
                csv_utils.write_key_value_file(csvfile, {
                    'Duration Time':
                    self.get_rec_time_str(),
                    'World Camera Frames':
                    self.frame_count,
                    'World Camera Resolution':
                    str(self.g_pool.capture.frame_size[0]) + "x" +
                    str(self.g_pool.capture.frame_size[1]),
                    'Capture Software Version':
                    self.g_pool.version,
                    'Data Format Version':
                    self.g_pool.version,
                    'System Info':
                    get_system_info()
                },
                                               append=True)
        except Exception:
            logger.exception(
                "Could not save metadata. Please report this bug!")

        try:
            with open(os.path.join(self.rec_path, "user_info.csv"),
                      'w',
                      newline='') as csvfile:
                csv_utils.write_key_value_file(csvfile, self.user_info)
        except Exception:
            logger.exception(
                "Could not save userdata. Please report this bug!")

        self.close_info_menu()

        self.running = False
        self.menu.read_only = False
        self.button.status_text = ''

        self.data = {'pupil_positions': [], 'gaze_positions': []}
        self.pupil_pos_list = []
        self.gaze_pos_list = []

        logger.info("Saved Recording.")
        self.notify_all({
            'subject': 'recording.stopped',
            'rec_path': self.rec_path
        })
Пример #52
0
    def save_surface_statsics_to_file(self, export_range, export_dir):
        """
        between in and out mark

            report: gaze distribution:
                    - total gazepoints
                    - gaze points on surface x
                    - gaze points not on any surface

            report: surface visisbility

                - total frames
                - surface x visible framecount

            surface events:
                frame_no, ts, surface "name", "id" enter/exit

            for each surface:
                fixations_on_name.csv
                gaze_on_name_id.csv
                positions_of_name_id.csv

        """
        metrics_dir = os.path.join(export_dir, 'surfaces')
        section = export_range
        in_mark = export_range.start
        out_mark = export_range.stop
        logger.info("exporting metrics to {}".format(metrics_dir))
        if os.path.isdir(metrics_dir):
            logger.info("Will overwrite previous export for this section")
        else:
            try:
                os.mkdir(metrics_dir)
            except:
                logger.warning(
                    "Could not make metrics dir {}".format(metrics_dir))
                return

        with open(os.path.join(metrics_dir, 'surface_visibility.csv'),
                  'w',
                  encoding='utf-8',
                  newline='') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',')

            # surface visibility report
            frame_count = len(self.g_pool.timestamps[section])

            csv_writer.writerow(('frame_count', frame_count))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name', 'visible_frame_count'))
            for s in self.surfaces:
                if s.cache == None:
                    logger.warning(
                        "The surface is not cached. Please wait for the cacher to collect data."
                    )
                    return
                visible_count = s.visible_count_in_section(section)
                csv_writer.writerow((s.name, visible_count))
            logger.info("Created 'surface_visibility.csv' file")

        with open(os.path.join(metrics_dir, 'surface_gaze_distribution.csv'),
                  'w',
                  encoding='utf-8',
                  newline='') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',')

            # gaze distribution report
            gaze_in_section = list(
                chain(*self.g_pool.gaze_positions_by_frame[section]))
            not_on_any_srf = set([gp['timestamp'] for gp in gaze_in_section])

            csv_writer.writerow(
                ('total_gaze_point_count', len(gaze_in_section)))
            csv_writer.writerow((''))
            csv_writer.writerow(('surface_name', 'gaze_count'))

            for s in self.surfaces:
                gaze_on_srf = s.gaze_on_srf_in_section(section)
                gaze_on_srf = set(
                    [gp['base_data']['timestamp'] for gp in gaze_on_srf])
                not_on_any_srf -= gaze_on_srf
                csv_writer.writerow((s.name, len(gaze_on_srf)))

            csv_writer.writerow(('not_on_any_surface', len(not_on_any_srf)))
            logger.info("Created 'surface_gaze_distribution.csv' file")

        with open(os.path.join(metrics_dir, 'surface_events.csv'),
                  'w',
                  encoding='utf-8',
                  newline='') as csvfile:
            csv_writer = csv.writer(csvfile, delimiter=',')

            # surface events report
            csv_writer.writerow(('frame_number', 'timestamp', 'surface_name',
                                 'surface_uid', 'event_type'))

            events = []
            for s in self.surfaces:
                for enter_frame_id, exit_frame_id in s.cache.positive_ranges:
                    events.append({
                        'frame_id': enter_frame_id,
                        'srf_name': s.name,
                        'srf_uid': s.uid,
                        'event': 'enter'
                    })
                    events.append({
                        'frame_id': exit_frame_id,
                        'srf_name': s.name,
                        'srf_uid': s.uid,
                        'event': 'exit'
                    })

            events.sort(key=lambda x: x['frame_id'])
            for e in events:
                csv_writer.writerow(
                    (e['frame_id'], self.g_pool.timestamps[e['frame_id']],
                     e['srf_name'], e['srf_uid'], e['event']))
            logger.info("Created 'surface_events.csv' file")

        for s in self.surfaces:
            # per surface names:
            surface_name = '_' + s.name.replace('/', '') + '_' + s.uid

            # save surface_positions as pickle file
            save_object(
                s.cache.to_list(),
                os.path.join(metrics_dir, 'srf_positions' + surface_name))

            #save surface_positions as csv
            with open(os.path.join(metrics_dir,
                                   'srf_positons' + surface_name + '.csv'),
                      'w',
                      encoding='utf-8',
                      newline='') as csvfile:
                csv_writer = csv.writer(csvfile, delimiter=',')
                csv_writer.writerow(('frame_idx', 'timestamp', 'm_to_screen',
                                     'm_from_screen', 'detected_markers'))
                for idx, ts, ref_srf_data in zip(
                        range(len(self.g_pool.timestamps)),
                        self.g_pool.timestamps, s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            csv_writer.writerow(
                                (idx, ts, ref_srf_data['m_to_screen'],
                                 ref_srf_data['m_from_screen'],
                                 ref_srf_data['detected_markers']))

            # save gaze on srf as csv.
            with open(os.path.join(
                    metrics_dir,
                    'gaze_positions_on_surface' + surface_name + '.csv'),
                      'w',
                      encoding='utf-8',
                      newline='') as csvfile:
                csv_writer = csv.writer(csvfile, delimiter=',')
                csv_writer.writerow(
                    ('world_timestamp', 'world_frame_idx', 'gaze_timestamp',
                     'x_norm', 'y_norm', 'x_scaled', 'y_scaled', 'on_srf'))
                for idx, ts, ref_srf_data in zip(
                        range(len(self.g_pool.timestamps)),
                        self.g_pool.timestamps, s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for gp in s.gaze_on_srf_by_frame_idx(
                                    idx, ref_srf_data['m_from_screen']):
                                csv_writer.writerow(
                                    (ts, idx, gp['base_data']['timestamp'],
                                     gp['norm_pos'][0], gp['norm_pos'][1],
                                     gp['norm_pos'][0] *
                                     s.real_world_size['x'],
                                     gp['norm_pos'][1] *
                                     s.real_world_size['y'], gp['on_srf']))

            # save fixation on srf as csv.
            with open(os.path.join(
                    metrics_dir,
                    'fixations_on_surface' + surface_name + '.csv'),
                      'w',
                      encoding='utf-8',
                      newline='') as csvfile:
                csv_writer = csv.writer(csvfile, delimiter=',')
                csv_writer.writerow(
                    ('id', 'start_timestamp', 'duration', 'start_frame',
                     'end_frame', 'norm_pos_x', 'norm_pos_y', 'x_scaled',
                     'y_scaled', 'on_srf'))
                fixations_on_surface = []
                for idx, ref_srf_data in zip(
                        range(len(self.g_pool.timestamps)), s.cache):
                    if in_mark <= idx <= out_mark:
                        if ref_srf_data is not None and ref_srf_data is not False:
                            for f in s.fixations_on_srf_by_frame_idx(
                                    idx, ref_srf_data['m_from_screen']):
                                fixations_on_surface.append(f)

                removed_duplicates = dict([
                    (f['base_data']['id'], f) for f in fixations_on_surface
                ]).values()
                for f_on_s in removed_duplicates:
                    f = f_on_s['base_data']
                    f_x, f_y = f_on_s['norm_pos']
                    f_on_srf = f_on_s['on_srf']
                    csv_writer.writerow(
                        (f['id'], f['timestamp'], f['duration'],
                         f['start_frame_index'], f['end_frame_index'], f_x,
                         f_y, f_x * s.real_world_size['x'],
                         f_y * s.real_world_size['y'], f_on_srf))

            logger.info(
                "Saved surface positon gaze and fixation data for '{}' with uid:'{}'"
                .format(s.name, s.uid))

            if s.heatmap is not None:
                logger.info("Saved Heatmap as .png file.")
                cv2.imwrite(
                    os.path.join(metrics_dir,
                                 'heatmap' + surface_name + '.png'), s.heatmap)

        logger.info("Done exporting reference surface data.")
Пример #53
0
def convert_pupil_mobile_recording_to_v094(rec_dir):
    logger.info("Converting Pupil Mobile recording to v0.9.4 format")
    # convert time files and rename corresponding videos
    time_pattern = os.path.join(rec_dir, "*.time")
    for time_loc in glob.glob(time_pattern):
        time_file_name = os.path.split(time_loc)[1]
        time_name = os.path.splitext(time_file_name)[0]

        potential_locs = [
            os.path.join(rec_dir, time_name + ext) for ext in (".mjpeg", ".mp4", ".m4a")
        ]
        existing_locs = [loc for loc in potential_locs if os.path.exists(loc)]
        if not existing_locs:
            continue
        else:
            media_loc = existing_locs[0]

        if time_name in (
            "Pupil Cam1 ID0",
            "Pupil Cam1 ID1",
            "Pupil Cam2 ID0",
            "Pupil Cam2 ID1",
        ):
            time_name = "eye" + time_name[-1]  # rename eye files
        elif time_name in ("Pupil Cam1 ID2", "Logitech Webcam C930e"):
            video = av.open(media_loc, "r")
            frame_size = (
                video.streams.video[0].format.width,
                video.streams.video[0].format.height,
            )
            del video
            intrinsics = load_intrinsics(rec_dir, time_name, frame_size)
            intrinsics.save(rec_dir, "world")

            time_name = "world"  # assume world file
        elif time_name.startswith("audio_"):
            time_name = "audio"

        timestamps = np.fromfile(time_loc, dtype=">f8")
        timestamp_loc = os.path.join(rec_dir, "{}_timestamps.npy".format(time_name))
        logger.info('Creating "{}"'.format(os.path.split(timestamp_loc)[1]))
        np.save(timestamp_loc, timestamps)

        if time_name == "audio":
            media_dst = os.path.join(rec_dir, time_name) + ".mp4"
        else:
            media_dst = (
                os.path.join(rec_dir, time_name) + os.path.splitext(media_loc)[1]
            )
        logger.info(
            'Renaming "{}" to "{}"'.format(
                os.path.split(media_loc)[1], os.path.split(media_dst)[1]
            )
        )
        try:
            os.rename(media_loc, media_dst)
        except FileExistsError:
            # Only happens on Windows. Behavior on Unix is to overwrite the existing file.
            # To mirror this behaviour we need to delete the old file and try renaming the new one again.
            os.remove(media_dst)
            os.rename(media_loc, media_dst)

    pupil_data_loc = os.path.join(rec_dir, "pupil_data")
    if not os.path.exists(pupil_data_loc):
        logger.info('Creating "pupil_data"')
        fm.save_object(
            {"pupil_positions": [], "gaze_positions": [], "notifications": []},
            pupil_data_loc,
        )
Пример #54
0
 def _save_data_to_file(self, filepath, data):
     dict_representation = {"version": self._item_class.version, "data": data}
     os.makedirs(os.path.dirname(filepath), exist_ok=True)
     fm.save_object(dict_representation, filepath)
Пример #55
0
 def cleanup(self):
     session_data = {'dx': self.x_offset, 'dy': self.y_offset, 'version': 0}
     save_object(session_data,
                 os.path.join(self.result_dir, 'manual_gaze_correction'))
Пример #56
0
    def stop(self):
        #explicit release of VideoWriter
        self.writer.release()
        self.writer = None

        if self.record_eye:
            for alive, pipe in zip(self.g_pool.eyes_are_alive,self.g_pool.eye_pipes):
                if alive.value:
                    pipe.send(('Rec_Stop',None))

        save_object(self.data,os.path.join(self.rec_path, "pupil_data"))

        timestamps_path = os.path.join(self.rec_path, "world_timestamps.npy")
        # ts = sanitize_timestamps(np.array(self.timestamps))
        ts = np.array(self.timestamps)
        np.save(timestamps_path,ts)

        try:
            copy2(os.path.join(self.g_pool.user_dir,"surface_definitions"),os.path.join(self.rec_path,"surface_definitions"))
        except:
            logger.info("No surface_definitions data found. You may want this if you do marker tracking.")
        try:
            copy2(os.path.join(self.g_pool.user_dir,"user_calibration_data"),os.path.join(self.rec_path,"user_calibration_data"))
        except:
            logger.warning("No user calibration data found. Please calibrate first.")

        camera_calibration = load_camera_calibration(self.g_pool)
        if camera_calibration is not None:
            save_object(camera_calibration,os.path.join(self.rec_path, "camera_calibration"))
        else:
            logger.info("No camera calibration found.")

        try:
            with open(self.meta_info_path, 'a') as f:
                f.write("Duration Time\t"+ self.get_rec_time_str()+ "\n")
                f.write("World Camera Frames\t"+ str(self.frame_count)+ "\n")
                f.write("World Camera Resolution\t"+ str(self.g_pool.capture.frame_size[0])+"x"+str(self.g_pool.capture.frame_size[1])+"\n")
                f.write("Capture Software Version\t%s\n"%self.g_pool.version)
                f.write("System Info\t%s"%get_system_info())
        except Exception:
            logger.exception("Could not save metadata. Please report this bug!")

        try:
            with open(os.path.join(self.rec_path, "user_info.csv"), 'w') as f:
                for name,val in self.user_info.iteritems():
                    f.write("%s\t%s\n"%(name,val))
        except Exception:
            logger.exception("Could not save userdata. Please report this bug!")


        self.close_info_menu()

        if self.audio_writer:
            self.audio_writer = None

        self.running = False
        self.menu.read_only = False
        self.button.status_text = ''

        self.timestamps = []
        self.data = {'pupil_positions':[],'gaze_positions':[]}
        self.pupil_pos_list = []
        self.gaze_pos_list = []

        logger.info("Saved Recording.")
        self.notify_all( {'subject':'rec_stopped','rec_path':self.rec_path,'network_propagate':True} )
Пример #57
0
def convert_pupil_mobile_recording_to_v094(rec_dir):
    logger.info("Converting Pupil Mobile recording to v0.9.4 format")
    # convert time files and rename corresponding videos
    time_pattern = os.path.join(rec_dir, '*.time')
    for time_loc in glob.glob(time_pattern):
        time_file_name = os.path.split(time_loc)[1]
        time_name = os.path.splitext(time_file_name)[0]

        potential_locs = [
            os.path.join(rec_dir, time_name + ext)
            for ext in ('.mjpeg', '.mp4', '.m4a')
        ]
        existing_locs = [loc for loc in potential_locs if os.path.exists(loc)]
        if not existing_locs:
            continue
        else:
            video_loc = existing_locs[0]

        if time_name in ('Pupil Cam1 ID0', 'Pupil Cam1 ID1', 'Pupil Cam2 ID0',
                         'Pupil Cam2 ID1'):
            time_name = 'eye' + time_name[-1]  # rename eye files
        elif time_name in ('Pupil Cam1 ID2', 'Logitech Webcam C930e'):
            video = av.open(video_loc, 'r')
            frame_size = video.streams.video[
                0].format.width, video.streams.video[0].format.height
            del video
            intrinsics = load_intrinsics(rec_dir, time_name, frame_size)
            intrinsics.save(rec_dir, 'world')

            time_name = 'world'  # assume world file
        elif time_name.startswith('audio_'):
            time_name = 'audio'

        timestamps = np.fromfile(time_loc, dtype='>f8')
        timestamp_loc = os.path.join(rec_dir,
                                     '{}_timestamps.npy'.format(time_name))
        logger.info('Creating "{}"'.format(os.path.split(timestamp_loc)[1]))
        np.save(timestamp_loc, timestamps)

        if time_name == 'audio':
            video_dst = os.path.join(rec_dir, time_name) + '.mp4'
            logger.info('Renaming "{}" to "{}"'.format(
                os.path.split(video_loc)[1],
                os.path.split(video_dst)[1]))
            os.rename(video_loc, video_dst)
        else:
            video_dst = os.path.join(
                rec_dir, time_name) + os.path.splitext(video_loc)[1]
            logger.info('Renaming "{}" to "{}"'.format(
                os.path.split(video_loc)[1],
                os.path.split(video_dst)[1]))
            os.rename(video_loc, video_dst)

    pupil_data_loc = os.path.join(rec_dir, 'pupil_data')
    if not os.path.exists(pupil_data_loc):
        logger.info('Creating "pupil_data"')
        fm.save_object(
            {
                'pupil_positions': [],
                'gaze_positions': [],
                'notifications': []
            }, pupil_data_loc)
Пример #58
0
def finish_calibration(g_pool, pupil_list, ref_list):

    if pupil_list and ref_list:
        pass
    else:
        logger.error(not_enough_data_error_msg)
        g_pool.active_calibration_plugin.notify_all({
            'subject':
            'calibration.failed',
            'reason':
            not_enough_data_error_msg,
            'timestamp':
            g_pool.get_timestamp(),
            'record':
            True
        })
        return

    camera_intrinsics = load_camera_calibration(g_pool)

    # match eye data and check if biocular and or monocular
    pupil0 = [p for p in pupil_list if p['id'] == 0]
    pupil1 = [p for p in pupil_list if p['id'] == 1]

    #TODO unify this and don't do both
    matched_binocular_data = calibrate.closest_matches_binocular(
        ref_list, pupil_list)
    matched_pupil0_data = calibrate.closest_matches_monocular(ref_list, pupil0)
    matched_pupil1_data = calibrate.closest_matches_monocular(ref_list, pupil1)

    if len(matched_pupil0_data) > len(matched_pupil1_data):
        matched_monocular_data = matched_pupil0_data
    else:
        matched_monocular_data = matched_pupil1_data

    logger.info('Collected %s monocular calibration data.' %
                len(matched_monocular_data))
    logger.info('Collected %s binocular calibration data.' %
                len(matched_binocular_data))

    mode = g_pool.detection_mapping_mode

    if mode == '3d' and not camera_intrinsics:
        mode = '2d'
        logger.warning(
            "Please calibrate your world camera using 'camera intrinsics estimation' for 3d gaze mapping."
        )

    if mode == '3d':
        hardcoded_translation0 = np.array([20, 15, -20])
        hardcoded_translation1 = np.array([-40, 15, -20])
        if matched_binocular_data:
            method = 'binocular 3d model'

            #TODO model the world as cv2 pinhole camera with distorion and focal in ceres.
            # right now we solve using a few permutations of K
            smallest_residual = 1000
            scales = list(np.linspace(0.7, 1.4, 20))
            K = camera_intrinsics["camera_matrix"]

            for s in scales:
                scale = np.ones(K.shape)
                scale[0, 0] *= s
                scale[1, 1] *= s
                camera_intrinsics["camera_matrix"] = K * scale

                ref_dir, gaze0_dir, gaze1_dir = calibrate.preprocess_3d_data(
                    matched_binocular_data,
                    camera_intrinsics=camera_intrinsics)

                if len(ref_dir) < 1 or len(gaze0_dir) < 1 or len(
                        gaze1_dir) < 1:
                    logger.error(not_enough_data_error_msg)
                    g_pool.active_calibration_plugin.notify_all({
                        'subject':
                        'calibration.failed',
                        'reason':
                        not_enough_data_error_msg,
                        'timestamp':
                        g_pool.get_timestamp(),
                        'record':
                        True
                    })
                    return

                sphere_pos0 = pupil0[-1]['sphere']['center']
                sphere_pos1 = pupil1[-1]['sphere']['center']

                initial_R0, initial_t0 = find_rigid_transform(
                    np.array(gaze0_dir) * 500,
                    np.array(ref_dir) * 500)
                initial_rotation0 = math_helper.quaternion_from_rotation_matrix(
                    initial_R0)
                initial_translation0 = np.array(initial_t0).reshape(3)

                initial_R1, initial_t1 = find_rigid_transform(
                    np.array(gaze1_dir) * 500,
                    np.array(ref_dir) * 500)
                initial_rotation1 = math_helper.quaternion_from_rotation_matrix(
                    initial_R1)
                initial_translation1 = np.array(initial_t1).reshape(3)

                eye0 = {
                    "observations": gaze0_dir,
                    "translation": hardcoded_translation0,
                    "rotation": initial_rotation0,
                    'fix': ['translation']
                }
                eye1 = {
                    "observations": gaze1_dir,
                    "translation": hardcoded_translation1,
                    "rotation": initial_rotation1,
                    'fix': ['translation']
                }
                world = {
                    "observations": ref_dir,
                    "translation": (0, 0, 0),
                    "rotation": (1, 0, 0, 0),
                    'fix': ['translation', 'rotation'],
                    'fix': ['translation', 'rotation']
                }
                initial_observers = [eye0, eye1, world]
                initial_points = np.array(ref_dir) * 500

                success, residual, observers, points = bundle_adjust_calibration(
                    initial_observers, initial_points, fix_points=False)

                if residual <= smallest_residual:
                    smallest_residual = residual
                    scales[-1] = s

            if not success:
                g_pool.active_calibration_plugin.notify_all({
                    'subject':
                    'calibration.failed',
                    'reason':
                    solver_failed_to_converge_error_msg,
                    'timestamp':
                    g_pool.get_timestamp(),
                    'record':
                    True
                })
                logger.error("Calibration solver faild to converge.")
                return

            eye0, eye1, world = observers

            t_world0 = np.array(eye0['translation'])
            R_world0 = math_helper.quaternion_rotation_matrix(
                np.array(eye0['rotation']))
            t_world1 = np.array(eye1['translation'])
            R_world1 = math_helper.quaternion_rotation_matrix(
                np.array(eye1['rotation']))

            def toWorld0(p):
                return np.dot(R_world0, p) + t_world0

            def toWorld1(p):
                return np.dot(R_world1, p) + t_world1

            points_a = []  #world coords
            points_b = []  #eye0 coords
            points_c = []  #eye1 coords
            for a, b, c, point in zip(world['observations'],
                                      eye0['observations'],
                                      eye1['observations'], points):
                line_a = np.array([0, 0, 0]), np.array(a)  #observation as line
                line_b = toWorld0(np.array([0, 0, 0])), toWorld0(
                    b)  #eye0 observation line in world coords
                line_c = toWorld1(np.array([0, 0, 0])), toWorld1(
                    c)  #eye1 observation line in world coords
                close_point_a, _ = math_helper.nearest_linepoint_to_point(
                    point, line_a)
                close_point_b, _ = math_helper.nearest_linepoint_to_point(
                    point, line_b)
                close_point_c, _ = math_helper.nearest_linepoint_to_point(
                    point, line_c)
                points_a.append(close_point_a)
                points_b.append(close_point_b)
                points_c.append(close_point_c)

            # we need to take the sphere position into account
            # orientation and translation are referring to the sphere center.
            # but we want to have it referring to the camera center
            # since the actual translation is in world coordinates, the sphere translation needs to be calculated in world coordinates
            sphere_translation = np.array(sphere_pos0)
            sphere_translation_world = np.dot(R_world0, sphere_translation)
            camera_translation = t_world0 - sphere_translation_world
            eye_camera_to_world_matrix0 = np.eye(4)
            eye_camera_to_world_matrix0[:3, :3] = R_world0
            eye_camera_to_world_matrix0[:3, 3:4] = np.reshape(
                camera_translation, (3, 1))

            sphere_translation = np.array(sphere_pos1)
            sphere_translation_world = np.dot(R_world1, sphere_translation)
            camera_translation = t_world1 - sphere_translation_world
            eye_camera_to_world_matrix1 = np.eye(4)
            eye_camera_to_world_matrix1[:3, :3] = R_world1
            eye_camera_to_world_matrix1[:3, 3:4] = np.reshape(
                camera_translation, (3, 1))

            g_pool.plugins.add(Binocular_Vector_Gaze_Mapper,
                               args={
                                   'eye_camera_to_world_matrix0':
                                   eye_camera_to_world_matrix0,
                                   'eye_camera_to_world_matrix1':
                                   eye_camera_to_world_matrix1,
                                   'camera_intrinsics': camera_intrinsics,
                                   'cal_points_3d': points,
                                   'cal_ref_points_3d': points_a,
                                   'cal_gaze_points0_3d': points_b,
                                   'cal_gaze_points1_3d': points_c
                               })

        elif matched_monocular_data:
            method = 'monocular 3d model'

            #TODO model the world as cv2 pinhole camera with distorion and focal in ceres.
            # right now we solve using a few permutations of K
            smallest_residual = 1000
            scales = list(np.linspace(0.7, 1.4, 20))
            K = camera_intrinsics["camera_matrix"]
            for s in scales:
                scale = np.ones(K.shape)
                scale[0, 0] *= s
                scale[1, 1] *= s
                camera_intrinsics["camera_matrix"] = K * scale
                ref_dir, gaze_dir, _ = calibrate.preprocess_3d_data(
                    matched_monocular_data,
                    camera_intrinsics=camera_intrinsics)
                # save_object((ref_dir,gaze_dir),os.path.join(g_pool.user_dir, "testdata"))
                if len(ref_dir) < 1 or len(gaze_dir) < 1:
                    g_pool.active_calibration_plugin.notify_all({
                        'subject':
                        'calibration.failed',
                        'reason':
                        not_enough_data_error_msg,
                        'timestamp':
                        g_pool.get_timestamp(),
                        'record':
                        True
                    })
                    logger.error(not_enough_data_error_msg + " Using:" +
                                 method)
                    return

                ### monocular calibration strategy: mimize the reprojection error by moving the world camera.
                # we fix the eye points and work in the eye coord system.
                initial_R, initial_t = find_rigid_transform(
                    np.array(ref_dir) * 500,
                    np.array(gaze_dir) * 500)
                initial_rotation = math_helper.quaternion_from_rotation_matrix(
                    initial_R)
                initial_translation = np.array(initial_t).reshape(3)
                # this problem is scale invariant so we scale to some sensical value.

                if matched_monocular_data[0]['pupil']['id'] == 0:
                    hardcoded_translation = hardcoded_translation0
                else:
                    hardcoded_translation = hardcoded_translation1

                eye = {
                    "observations": gaze_dir,
                    "translation": (0, 0, 0),
                    "rotation": (1, 0, 0, 0),
                    'fix': ['translation', 'rotation']
                }
                world = {
                    "observations": ref_dir,
                    "translation": np.dot(initial_R, -hardcoded_translation),
                    "rotation": initial_rotation,
                    'fix': ['translation']
                }
                initial_observers = [eye, world]
                initial_points = np.array(gaze_dir) * 500

                success, residual, observers, points_in_eye = bundle_adjust_calibration(
                    initial_observers, initial_points, fix_points=True)
                if residual <= smallest_residual:
                    smallest_residual = residual
                    scales[-1] = s

            eye, world = observers

            if not success:
                logger.error("Calibration solver faild to converge.")
                g_pool.active_calibration_plugin.notify_all({
                    'subject':
                    'calibration.failed',
                    'reason':
                    solver_failed_to_converge_error_msg,
                    'timestamp':
                    g_pool.get_timestamp(),
                    'record':
                    True
                })
                return

            #pose of the world in eye coords.
            rotation = np.array(world['rotation'])
            t_world = np.array(world['translation'])
            R_world = math_helper.quaternion_rotation_matrix(rotation)

            # inverse is pose of eye in world coords
            R_eye = R_world.T
            t_eye = np.dot(R_eye, -t_world)

            def toWorld(p):
                return np.dot(R_eye, p) + np.array(t_eye)

            points_in_world = [toWorld(p) for p in points_in_eye]

            points_a = []  #world coords
            points_b = []  #cam2 coords
            for a, b, point in zip(world['observations'], eye['observations'],
                                   points_in_world):

                line_a = np.array([0, 0, 0]), np.array(a)  #observation as line
                line_b = toWorld(np.array([0, 0, 0])), toWorld(
                    b)  #cam2 observation line in cam1 coords
                close_point_a, _ = math_helper.nearest_linepoint_to_point(
                    point, line_a)
                close_point_b, _ = math_helper.nearest_linepoint_to_point(
                    point, line_b)
                # print np.linalg.norm(point-close_point_a),np.linalg.norm(point-close_point_b)

                points_a.append(close_point_a)
                points_b.append(close_point_b)

            # we need to take the sphere position into account
            # orientation and translation are referring to the sphere center.
            # but we want to have it referring to the camera center
            # since the actual translation is in world coordinates, the sphere translation needs to be calculated in world coordinates
            sphere_translation = np.array(
                matched_monocular_data[-1]['pupil']['sphere']['center'])
            sphere_translation_world = np.dot(R_eye, sphere_translation)
            camera_translation = t_eye - sphere_translation_world
            eye_camera_to_world_matrix = np.eye(4)
            eye_camera_to_world_matrix[:3, :3] = R_eye
            eye_camera_to_world_matrix[:3, 3:4] = np.reshape(
                camera_translation, (3, 1))

            g_pool.plugins.add(Vector_Gaze_Mapper,
                               args={
                                   'eye_camera_to_world_matrix':
                                   eye_camera_to_world_matrix,
                                   'camera_intrinsics': camera_intrinsics,
                                   'cal_points_3d': points_in_world,
                                   'cal_ref_points_3d': points_a,
                                   'cal_gaze_points_3d': points_b,
                                   'gaze_distance': 500
                               })

        else:
            logger.error(not_enough_data_error_msg)
            g_pool.active_calibration_plugin.notify_all({
                'subject':
                'calibration.failed',
                'reason':
                not_enough_data_error_msg,
                'timestamp':
                g_pool.get_timestamp(),
                'record':
                True
            })
            return

    elif mode == '2d':
        if matched_binocular_data:
            method = 'binocular polynomial regression'
            cal_pt_cloud_binocular = calibrate.preprocess_2d_data_binocular(
                matched_binocular_data)
            cal_pt_cloud0 = calibrate.preprocess_2d_data_monocular(
                matched_pupil0_data)
            cal_pt_cloud1 = calibrate.preprocess_2d_data_monocular(
                matched_pupil1_data)

            map_fn, inliers, params = calibrate.calibrate_2d_polynomial(
                cal_pt_cloud_binocular,
                g_pool.capture.frame_size,
                binocular=True)
            if not inliers.any():
                g_pool.active_calibration_plugin.notify_all({
                    'subject':
                    'calibration.failed',
                    'reason':
                    solver_failed_to_converge_error_msg,
                    'timestamp':
                    g_pool.get_timestamp(),
                    'record':
                    True
                })
                return

            map_fn, inliers, params_eye0 = calibrate.calibrate_2d_polynomial(
                cal_pt_cloud0, g_pool.capture.frame_size, binocular=False)
            if not inliers.any():
                g_pool.active_calibration_plugin.notify_all({
                    'subject':
                    'calibration.failed',
                    'reason':
                    solver_failed_to_converge_error_msg,
                    'timestamp':
                    g_pool.get_timestamp(),
                    'record':
                    True
                })
                return

            map_fn, inliers, params_eye1 = calibrate.calibrate_2d_polynomial(
                cal_pt_cloud1, g_pool.capture.frame_size, binocular=False)
            if not inliers.any():
                g_pool.active_calibration_plugin.notify_all({
                    'subject':
                    'calibration.failed',
                    'reason':
                    solver_failed_to_converge_error_msg,
                    'timestamp':
                    g_pool.get_timestamp(),
                    'record':
                    True
                })
                return

            g_pool.plugins.add(Binocular_Gaze_Mapper,
                               args={
                                   'params': params,
                                   'params_eye0': params_eye0,
                                   'params_eye1': params_eye1
                               })

        elif matched_monocular_data:
            method = 'monocular polynomial regression'
            cal_pt_cloud = calibrate.preprocess_2d_data_monocular(
                matched_monocular_data)
            map_fn, inliers, params = calibrate.calibrate_2d_polynomial(
                cal_pt_cloud, g_pool.capture.frame_size, binocular=False)
            if not inliers.any():
                g_pool.active_calibration_plugin.notify_all({
                    'subject':
                    'calibration.failed',
                    'reason':
                    solver_failed_to_converge_error_msg,
                    'timestamp':
                    g_pool.get_timestamp(),
                    'record':
                    True
                })
                return

            g_pool.plugins.add(Monocular_Gaze_Mapper, args={'params': params})
        else:
            logger.error(not_enough_data_error_msg)
            g_pool.active_calibration_plugin.notify_all({
                'subject':
                'calibration.failed',
                'reason':
                not_enough_data_error_msg,
                'timestamp':
                g_pool.get_timestamp(),
                'record':
                True
            })
            return

    user_calibration_data = {
        'pupil_list': pupil_list,
        'ref_list': ref_list,
        'calibration_method': method
    }
    save_object(user_calibration_data,
                os.path.join(g_pool.user_dir, "user_calibration_data"))
    g_pool.active_calibration_plugin.notify_all({
        'subject':
        'calibration.successful',
        'method':
        method,
        'timestamp':
        g_pool.get_timestamp(),
        'record':
        True
    })
Пример #59
0
 def cleanup(self):
     session_data = {"dx": self.x_offset, "dy": self.y_offset, "version": 0}
     fm.save_object(session_data,
                    os.path.join(self.result_dir, "manual_gaze_correction"))
Пример #60
0
    def stop(self, network_propagate=True):
        #explicit release of VideoWriter
        self.writer.release()
        self.writer = None

        if self.record_eye:
            for tx in self.g_pool.eye_tx:
                try:
                    tx.send((None, None))
                except:
                    logger.warning(
                        "Could not stop eye-recording. Please report this bug!"
                    )

        save_object(self.data, os.path.join(self.rec_path, "pupil_data"))

        gaze_list_path = os.path.join(self.rec_path, "gaze_positions.npy")
        np.save(gaze_list_path, np.asarray(self.gaze_pos_list))

        pupil_list_path = os.path.join(self.rec_path, "pupil_positions.npy")
        np.save(pupil_list_path, np.asarray(self.pupil_pos_list))

        timestamps_path = os.path.join(self.rec_path, "world_timestamps.npy")
        # ts = sanitize_timestamps(np.array(self.timestamps))
        ts = np.array(self.timestamps)
        np.save(timestamps_path, ts)

        try:
            copy2(os.path.join(self.g_pool.user_dir, "surface_definitions"),
                  os.path.join(self.rec_path, "surface_definitions"))
        except:
            logger.info(
                "No surface_definitions data found. You may want this if you do marker tracking."
            )

        try:
            copy2(os.path.join(self.g_pool.user_dir, "cal_pt_cloud.npy"),
                  os.path.join(self.rec_path, "cal_pt_cloud.npy"))
        except:
            logger.warning(
                "No calibration data found. Please calibrate first.")

        try:
            copy2(os.path.join(self.g_pool.user_dir, "camera_calibration"),
                  os.path.join(self.rec_path, "camera_calibration"))
        except:
            logger.info("No camera calibration found.")

        try:
            with open(self.meta_info_path, 'a') as f:
                f.write("Duration Time\t" + self.get_rec_time_str() + "\n")
                if self.g_pool.binocular:
                    f.write("Eye Mode\tbinocular\n")
                else:
                    f.write("Eye Mode\tmonocular\n")
                f.write("Duration Time\t" + self.get_rec_time_str() + "\n")
                f.write("World Camera Frames\t" + str(self.frame_count) + "\n")
                f.write("World Camera Resolution\t" +
                        str(self.g_pool.capture.frame_size[0]) + "x" +
                        str(self.g_pool.capture.frame_size[1]) + "\n")
                f.write("Capture Software Version\t%s\n" % self.g_pool.version)
                if platform.system() == "Windows":
                    username = os.environ["USERNAME"]
                    sysname, nodename, release, version, machine, _ = platform.uname(
                    )
                else:
                    username = getpass.getuser()
                    try:
                        sysname, nodename, release, version, machine = os.uname(
                        )
                    except:
                        sysname, nodename, release, version, machine = sys.platform, None, None, None, None
                f.write("User\t" + username + "\n")
                f.write("Platform\t" + sysname + "\n")
                f.write("Machine\t" + nodename + "\n")
                f.write("Release\t" + release + "\n")
                f.write("Version\t" + version + "\n")
        except Exception:
            logger.exception(
                "Could not save metadata. Please report this bug!")

        try:
            with open(os.path.join(self.rec_path, "user_info.csv"), 'w') as f:
                for name, val in self.user_info.iteritems():
                    f.write("%s\t%s\n" % (name, val))
        except Exception:
            logger.exception(
                "Could not save userdata. Please report this bug!")

        self.close_info_menu()

        if self.audio_writer:
            self.audio_writer = None

        self.running = False
        self.menu.read_only = False
        self.button.status_text = ''

        self.timestamps = []
        self.data = {'pupil_positions': [], 'gaze_positions': []}
        self.pupil_pos_list = []
        self.gaze_pos_list = []

        self.notify_all({
            'name': 'rec_stopped',
            'rec_path': self.rec_path,
            'network_propagate': network_propagate
        })