Beispiel #1
0
    def __init__(self,g_pool,annotations=None):
        if annotations:
            super().__init__(g_pool,annotations)
        else:
            super().__init__(g_pool)

        from player_methods import correlate_data

        self.frame_count = len(self.g_pool.timestamps)

        #display layout
        self.padding = 20. #in sceen pixel
        self.window_size = 0,0


        #first we try to load annoations previously saved with pupil player
        try:
            annotations_list = load_object(os.path.join(self.g_pool.rec_dir, "annotations"))
        except IOError as e:
            #if that fails we assume this is the first time this recording is played and we load annotations from pupil_data
            try:
                notifications_list = load_object(os.path.join(self.g_pool.rec_dir, "pupil_data"))['notifications']
                annotations_list = [n for n in notifications_list if n['subject']=='annotation']
            except (KeyError,IOError) as e:
                annotations_list = []
                logger.debug('No annotations found in pupil_data file.')
            else:
                logger.debug('loaded {} annotations from pupil_data file'.format(len(annotations_list)))
        else:
            logger.debug('loaded {} annotations from annotations file'.format(len(annotations_list)))

        self.annotations_by_frame = correlate_data(annotations_list, self.g_pool.timestamps)
        self.annotations_list = annotations_list
Beispiel #2
0
    def stop(self):
        logger.info("Stopping Touchup")
        self.smooth_pos = 0.,0.
        self.sample_site = -2,-2
        self.counter = 0
        self.active = False
        self.button.status_text = ''


        offset_pt_clound = calibrate.preprocess_2d_data_monocular(calibrate.closest_matches_monocular(self.ref_list,self.gaze_list) )
        if len(offset_pt_clound)<3:
            logger.error('Did not sample enough data for touchup please retry.')
            return

        #Calulate the offset for gaze to target
        offset_pt_clound = np.array(offset_pt_clound)
        offset =  offset_pt_clound[:,:2]-offset_pt_clound[:,2:]
        mean_offset  = np.mean(offset,axis=0)

        user_calibration = load_object(os.path.join(self.g_pool.user_dir, "user_calibration_data"))

        self.pupil_list = user_calibration['pupil_list']
        self.ref_list = user_calibration['ref_list']
        calibration_method = user_calibration['calibration_method']

        if '3d' in calibration_method:
            logger.error('adjust calibration is not supported for 3d calibration.')
            return

        for r in self.ref_list:
            r['norm_pos'] = [ r['norm_pos'][0]-mean_offset[0],r['norm_pos'][1]-mean_offset[1] ]


        finish_calibration(self.g_pool,self.pupil_list,self.ref_list)
Beispiel #3
0
def load_camera_calibration(g_pool):
    if g_pool.app == 'capture':
        try:
            camera_calibration = load_object(os.path.join(g_pool.user_dir,'camera_calibration'))
        except:
            camera_calibration = None
        else:
            same_name = camera_calibration['camera_name'] == g_pool.capture.name
            same_resolution =  camera_calibration['resolution'] == g_pool.capture.frame_size
            if not (same_name and same_resolution):
                logger.warning('Loaded camera calibration but camera name and/or resolution has changed.')
                camera_calibration = None
            else:
                logger.info("Loaded user calibrated calibration for %s@%s."%(g_pool.capture.name,g_pool.capture.frame_size))

        if not camera_calibration:
            logger.debug("Trying to load pre recorded calibration.")
            try:
                camera_calibration = pre_recorded_calibrations[g_pool.capture.name][g_pool.capture.frame_size]
            except KeyError:
                logger.info("Pre recorded calibration for %s@%s not found."%(g_pool.capture.name,g_pool.capture.frame_size))
            else:
                logger.info("Loaded pre recorded calibration for %s@%s."%(g_pool.capture.name,g_pool.capture.frame_size))


        if not camera_calibration:
            logger.warning("Camera calibration not found please run Camera_Intrinsics_Estimation or Chessboard_Calibration to calibrate camera.")


        return camera_calibration

    else:
        raise NotImplementedError()
  def compare_test_py():

    global test_file_Folder
    test_file_Folder += 'py/'
    # Iterate every frame
    frameNumber = 0
    while True:
        # Get an image from the grabber
        try:
            frame = cap.get_frame()
            frameNumber += 1
        except CameraCaptureError:
            print "Capture from Camera Failed. Stopping."
            break
        except EndofVideoFileError:
            print "Video File is done."
            break
        # send to detector
        result = detector_py.detect(frame,user_roi=u_r,visualize=False)

        #load corresponding test files
        reference_result = load_object( test_file_Folder + 'result_frame_py{}'.format(frameNumber))

        compare_dict(reference_result, result )

        print "Frame {}".format(frameNumber)

    print "Finished compare test py."
Beispiel #5
0
    def __init__(self, g_pool,manual_ref_edit_mode=False):
        super().__init__(g_pool)
        self.manual_ref_edit_mode = manual_ref_edit_mode
        self.menu = None
        self.process_pipe = None


        self.result_dir = os.path.join(g_pool.rec_dir, 'offline_data')
        os.makedirs(self.result_dir, exist_ok=True)
        try:
            session_data = load_object(os.path.join(self.result_dir, 'offline_calibration_gaze'))
            if session_data['version'] != self.session_data_version:
                logger.warning("Session data from old version. Will not use this.")
                assert False
        except Exception as e:
            map_range = [0, len(self.g_pool.timestamps)]
            calib_range = [len(self.g_pool.timestamps)//10, len(self.g_pool.timestamps)//2]
            session_data = {}
            session_data['sections'] = [make_section_dict(calib_range, map_range), ]
            session_data['circle_marker_positions'] = []
            session_data['manual_ref_positions'] = []
        self.sections = session_data['sections']
        self.circle_marker_positions = session_data['circle_marker_positions']
        self.manual_ref_positions = session_data['manual_ref_positions']
        if self.circle_marker_positions:
            self.detection_progress = 100.0
            for s in self.sections:
                self.calibrate_section(s)
            self.correlate_and_publish()
        else:
            self.detection_progress = 0.0
            self.start_detection_task()
Beispiel #6
0
    def __init__(
        self,
        g_pool,
        source_path=None,
        frame_size=None,
        frame_rate=None,
        name="Fake Source",
        *args,
        **kwargs
    ):
        super().__init__(g_pool, *args, **kwargs)
        if self.timing == "external":
            self.recent_events = self.recent_events_external_timing
        else:
            self.recent_events = self.recent_events_own_timing

        if source_path:
            meta = load_object(source_path)
            frame_size = meta["frame_size"]
            frame_rate = meta["frame_rate"]
            self.timestamps = np.load(
                os.path.splitext(source_path)[0] + "_timestamps.npy"
            )
        else:
            self.timestamps = None

        self.fps = frame_rate
        self._name = name
        self.make_img(tuple(frame_size))
        self.source_path = source_path
        self.current_frame_idx = 0
        self.target_frame_idx = 0
Beispiel #7
0
def update_recording_v086_to_v087(rec_dir):
    logger.info("Updating recording from v0.8.6 format to v0.8.7 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir, "info.csv")

    def _clamp_norm_point(pos):
        """realisitic numbers for norm pos should be in this range.
            Grossly bigger or smaller numbers are results bad exrapolation
            and can cause overflow erorr when denormalized and cast as int32.
        """
        return min(100, max(-100, pos[0])), min(100, max(-100, pos[1]))

    for g in pupil_data["gaze_positions"]:
        if "topic" not in g:
            # we missed this in one gaze mapper
            g["topic"] = "gaze"
        g["norm_pos"] = _clamp_norm_point(g["norm_pos"])

    save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path, "r", encoding="utf-8") as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info["Capture Software Version"] = "v0.8.7"

    with open(meta_info_path, "w", newline="") as csvfile:
        csv_utils.write_key_value_file(csvfile, meta_info)
Beispiel #8
0
        def set_pupil_settings(new_settings):
            g_pool.pupil_settings = new_settings

            if not new_settings == "default":
                try:
                    pupil_settings_new = load_object(os.path.join(g_pool.user_dir,'pupil_settings_' + new_settings))
                except:
                    logger.error("Settings don't exist")
                pupil_settings = g_pool.pupil_detector.get_settings()

                controls = cap.capture.controls
                controls_dict = dict([(c.display_name,c) for c in controls])
                try:
                    cap.frame_rate = pupil_settings_new['frame_rate']
                    cap.frame_size = pupil_settings_new['frame_size']
                except:
                    logger.info("no frame rate and frame size in camera settings")
                for key in controls_dict:
                    try:
                        controls_dict[key].value = pupil_settings_new[key]
                    except:
                        logger.info("no key with the name '%s' in camera settings" %key)
                for key in pupil_settings.keys():
                    try:
                        pupil_settings[key] = pupil_settings_new[key]
                    except:
                        logger.info("no key with the name '%s' in pupil settings" %key)
Beispiel #9
0
def update_recording_bytes_to_unicode(rec_dir):
    logger.info("Updating recording from bytes to unicode.")

    def convert(data):
        if isinstance(data, bytes):
            return data.decode()
        elif isinstance(data, str) or isinstance(data, np.ndarray):
            return data
        elif isinstance(data, collections.Mapping):
            return dict(map(convert, data.items()))
        elif isinstance(data, collections.Iterable):
            return type(data)(map(convert, data))
        else:
            return data

    for file in os.listdir(rec_dir):
        if file.startswith('.') or os.path.splitext(file)[1] in ('.mp4', '.avi'):
            continue
        rec_file = os.path.join(rec_dir, file)
        try:
            rec_object = load_object(rec_file)
            converted_object = convert(rec_object)
            if converted_object != rec_object:
                logger.info('Converted `{}` from bytes to unicode'.format(file))
                save_object(converted_object, rec_file)
        except (UnpicklingError, IsADirectoryError):
            continue

    # manually convert k v dicts.
    meta_info_path = os.path.join(rec_dir, "info.csv")
    with open(meta_info_path, 'r', encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
    with open(meta_info_path, 'w', newline='') as csvfile:
        csv_utils.write_key_value_file(csvfile, meta_info)
Beispiel #10
0
    def __init__(self, g_pool):
        super().__init__(g_pool)
        zmq_ctx = zmq.Context()
        self.data_sub = zmq_tools.Msg_Receiver(zmq_ctx, g_pool.ipc_sub_url, topics=('pupil',))

        self.data_dir = os.path.join(g_pool.rec_dir, 'offline_data')
        os.makedirs(self.data_dir , exist_ok=True)
        try:
            session_data = load_object(os.path.join(self.data_dir , 'offline_pupil_data'))
            assert session_data.get('version') != self.session_data_version
        except:
            session_data = {}
            session_data["detection_method"]='3d'
            session_data['pupil_positions'] = []
            session_data['detection_progress'] = [0.,0.]
            session_data['detection_status'] = ["unknown","unknown"]
        self.detection_method = session_data["detection_method"]
        self.pupil_positions = session_data['pupil_positions']
        self.eye_processes = [None, None]
        self.detection_progress = session_data['detection_progress']
        self.detection_status = session_data['detection_status']

        self.menu = None

        # start processes
        if self.detection_progress[0] < 100:
            self.start_eye_process(0)
        if self.detection_progress[1] < 100:
            self.start_eye_process(1)

        # either we did not start them or they failed to start (mono setup etc)
        # either way we are done and can publish
        if self.eye_processes == [None, None]:
            self.correlate_publish()
Beispiel #11
0
def update_recording_v086_to_v087(rec_dir):
    logger.info("Updating recording from v0.8.6 format to v0.8.7 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir,"info.csv")

    def _clamp_norm_point(pos):
        '''realisitic numbers for norm pos should be in this range.
            Grossly bigger or smaller numbers are results bad exrapolation
            and can cause overflow erorr when denormalized and cast as int32.
        '''
        return min(100.,max(-100.,pos[0])),min(100.,max(-100.,pos[1]))

    for g in pupil_data.get('gaze_positions', []):
        if 'topic' not in g:
            # we missed this in one gaze mapper
            g['topic'] = 'gaze'
        g['norm_pos'] = _clamp_norm_point(g['norm_pos'])

    save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path,'r',encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Data Format Version'] = 'v0.8.7'

    update_meta_info(rec_dir, meta_info)
Beispiel #12
0
def save_intrinsics(directory, cam_name, resolution, intrinsics):
    """
    Saves camera intrinsics calibration to a file. For each unique camera name we maintain a single file containing all calibrations associated with this camera name.
    :param directory: Directory to which the intrinsics file will be written
    :param cam_name: Name of the camera, e.g. 'Pupil Cam 1 ID2'
    :param resolution: Camera resolution given as a tuple. This needs to match the resolution the calibration has been computed with.
    :param intrinsics: The camera intrinsics dictionary.
    :return:
    """
    # Try to load previous camera calibrations
    save_path = os.path.join(
        directory, "{}.intrinsics".format(cam_name.replace(" ", "_"))
    )
    try:
        calib_dict = load_object(save_path, allow_legacy=False)
    except:
        calib_dict = {}

    calib_dict["version"] = __version__
    calib_dict[str(resolution)] = intrinsics

    save_object(calib_dict, save_path)
    logger.info(
        "Calibration for camera {} at resolution {} saved to {}".format(
            cam_name, resolution, save_path
        )
    )
Beispiel #13
0
def convert_pupil_mobile_recording_to_v094(rec_dir):
    logger.info("Converting Pupil Mobile recording to v0.9.4 format")
    # convert time files and rename corresponding videos
    time_pattern = os.path.join(rec_dir, '*.time')
    for time_loc in glob.glob(time_pattern):
        time_file_name = os.path.split(time_loc)[1]
        time_name, time_ext = os.path.splitext(time_file_name)

        potential_locs = [os.path.join(rec_dir, time_name+ext) for ext in ('.mjpeg', '.mp4','.m4a')]
        existing_locs = [loc for loc in potential_locs if os.path.exists(loc)]
        if not existing_locs:
            continue
        else:
            video_loc = existing_locs[0]

        if time_name in ('Pupil Cam1 ID0', 'Pupil Cam1 ID1'):
            time_name = 'eye'+time_name[-1]  # rename eye files
        elif time_name in ('Pupil Cam1 ID2', 'Logitech Webcam C930e'):
            cam_calib_loc = os.path.join(rec_dir, 'camera_calibration')
            try:
                camera_calibration = load_object(cam_calib_loc)
            except:
                # no camera calibration found
                video = av.open(video_loc, 'r')
                frame_size = video.streams.video[0].format.width, video.streams.video[0].format.height
                del video
                try:
                    camera_calibration = pre_recorded_calibrations[time_name][frame_size]
                except KeyError:

                    camera_calibration = idealized_camera_calibration(frame_size)
                    logger.warning('Camera calibration not found. Will assume idealized camera.')
                save_object(camera_calibration, cam_calib_loc)

            time_name = 'world'  # assume world file
        elif time_name.startswith('audio_'):
            time_name = 'audio'

        timestamps = np.fromfile(time_loc, dtype='>f8')
        timestamp_loc = os.path.join(rec_dir, '{}_timestamps.npy'.format(time_name))
        logger.info('Creating "{}"'.format(os.path.split(timestamp_loc)[1]))
        np.save(timestamp_loc, timestamps)

        if time_name == 'audio':
            video_dst = os.path.join(rec_dir, time_name) + '.mp4'
            logger.info('Renaming "{}" to "{}"'.format(os.path.split(video_loc)[1], os.path.split(video_dst)[1]))
            os.rename(video_loc, video_dst)
        else:
            video_dst = os.path.join(rec_dir, time_name) + os.path.splitext(video_loc)[1]
            logger.info('Renaming "{}" to "{}"'.format(os.path.split(video_loc)[1], os.path.split(video_dst)[1]))
            os.rename(video_loc, video_dst)

    pupil_data_loc = os.path.join(rec_dir, 'pupil_data')
    if not os.path.exists(pupil_data_loc):
        logger.info('Creating "pupil_data"')
        save_object({'pupil_positions': [],
                     'gaze_positions': [],
                     'notifications': []}, pupil_data_loc)
Beispiel #14
0
    def __init__(self, g_pool, mode="Show markers and frames", min_marker_perimeter=40):
        super(Marker_Detector, self).__init__(g_pool)
        self.order = 0.2

        # all markers that are detected in the most recent frame
        self.markers = []

        # load camera intrinsics

        try:
            camera_calibration = load_object(os.path.join(self.g_pool.user_dir, "camera_calibration"))
        except:
            self.camera_intrinsics = None
        else:
            same_name = camera_calibration["camera_name"] == self.g_pool.capture.name
            same_resolution = camera_calibration["resolution"] == self.g_pool.capture.frame_size
            if same_name and same_resolution:
                logger.info("Loaded camera calibration. 3D marker tracking enabled.")
                K = camera_calibration["camera_matrix"]
                dist_coefs = camera_calibration["dist_coefs"]
                resolution = camera_calibration["resolution"]
                self.camera_intrinsics = K, dist_coefs, resolution
            else:
                logger.info(
                    "Loaded camera calibration but camera name and/or resolution has changed. Please re-calibrate."
                )
                self.camera_intrinsics = None

        # all registered surfaces
        self.surface_definitions = Persistent_Dict(os.path.join(g_pool.user_dir, "surface_definitions"))
        self.surfaces = [
            Reference_Surface(saved_definition=d)
            for d in self.surface_definitions.get("realtime_square_marker_surfaces", [])
            if isinstance(d, dict)
        ]

        # edit surfaces
        self.edit_surfaces = []

        # plugin state
        self.mode = mode
        self.running = True

        self.robust_detection = 1
        self.aperture = 11
        self.min_marker_perimeter = min_marker_perimeter
        self.locate_3d = False

        # debug vars
        self.draw_markers = 0
        self.show_surface_idx = 0

        self.img_shape = None

        self.menu = None
        self.button = None
        self.add_button = None
Beispiel #15
0
def update_recording_v05_to_v074(rec_dir):
    logger.info("Updating recording from v0.5x/v0.6x/v0.7x format to v0.7.4 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    save_object(pupil_data,os.path.join(rec_dir, "pupil_data_old"))
    for p in pupil_data['pupil_positions']:
        p['method'] = '2d python'
    try:
        save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass
def load_camera_calibration(g_pool):
    if g_pool.app != 'player':
        try:
            camera_calibration = load_object(os.path.join(g_pool.user_dir,'camera_calibration'),allow_legacy=False)
            camera_calibration['camera_name']
        except (KeyError,ValueError):
            camera_calibration = None
            logger.warning('Invalid or Deprecated camera calibration found. Please recalibrate camera.')
        except:
            camera_calibration = None
        else:
            same_name = camera_calibration['camera_name'] == g_pool.capture.name
            same_resolution = tuple(camera_calibration['resolution']) == g_pool.capture.frame_size
            if not (same_name and same_resolution):
                logger.warning('Loaded camera calibration but camera name and/or resolution has changed.')
                camera_calibration = None
            else:
                logger.info("Loaded user calibrated calibration for {}@{}.".format(g_pool.capture.name,g_pool.capture.frame_size))

        if not camera_calibration:
            logger.debug("Trying to load pre recorded calibration.")
            try:
                camera_calibration = pre_recorded_calibrations[g_pool.capture.name][g_pool.capture.frame_size]
            except KeyError:
                logger.info("Pre recorded calibration for {}@{} not found.".format(g_pool.capture.name,g_pool.capture.frame_size))
            else:
                logger.info("Loaded pre recorded calibration for {}@{}.".format(g_pool.capture.name,g_pool.capture.frame_size))


        if not camera_calibration:
            camera_calibration = idealized_camera_calibration(g_pool.capture.frame_size)
            logger.warning("Camera calibration not found. Will assume idealized camera. Please calibrate your cameras. Using camera 'Camera_Intrinsics_Estimation'.")

    else:
        try:
            camera_calibration = load_object(os.path.join(g_pool.rec_dir,'camera_calibration'))
        except:
            camera_calibration = idealized_camera_calibration(g_pool.capture.frame_size)
            logger.warning("Camera calibration not found. Will assume idealized camera. Please calibrate your cameras before your next recording.")
        else:
            logger.info("Loaded Camera calibration from file.")
    return camera_calibration
    def __init__(self,g_pool):
        super(Show_Calibration, self).__init__(g_pool)

        self.menu=None

        logger.error("This will be implemented as part of gaze mapper soon.")
        self.alive= False
        return


        width,height = self.g_pool.capture.frame_size

        if g_pool.app == 'capture':
            cal_pt_path =  os.path.join(g_pool.user_dir,"user_calibration_data")
        else:
            cal_pt_path =  os.path.join(g_pool.rec_dir,"user_calibration_data")

        try:
            user_calibration_data = load_object(cal_pt_path)
        except:
            logger.warning("Please calibrate first")
            self.close()
            return

        if self.g_pool.binocular:

            fn_input_eye1 = cal_pt_cloud[:,2:4].transpose()
            cal_pt_cloud[:,0:2] =  np.array(map_fn(fn_input_eye0, fn_input_eye1)).transpose()
            cal_pt_cloud[:,2:4] = cal_pt_cloud[:,4:6]
        else:
            fn_input = cal_pt_cloud[:,0:2].transpose()
            cal_pt_cloud[:,0:2] =  np.array(map_fn(fn_input)).transpose()

        ref_pts = cal_pt_cloud[inlier_map][:,np.newaxis,2:4]
        ref_pts = np.array(ref_pts,dtype=np.float32)

        logger.debug("calibration ref_pts %s"%ref_pts)
        if len(ref_pts)== 0:
            logger.warning("Calibration is bad. Please re-calibrate")
            self.close()
            return

        self.calib_bounds =  cv2.convexHull(ref_pts)
        # create a list [[px1,py1],[wx1,wy1],[px2,py2],[wx2,wy2]...] of outliers and inliers for gl_lines
        self.outliers = np.concatenate((cal_pt_cloud[~inlier_map][:,0:2],cal_pt_cloud[~inlier_map][:,2:4])).reshape(-1,2)
        self.inliers = np.concatenate((cal_pt_cloud[inlier_map][:,0:2],cal_pt_cloud[inlier_map][:,2:4]),axis=1).reshape(-1,2)


        self.inlier_ratio = cal_pt_cloud[inlier_map].shape[0]/float(cal_pt_cloud.shape[0])
        self.inlier_count = cal_pt_cloud[inlier_map].shape[0]
        # hull = cv2.approxPolyDP(self.calib_bounds, 0.001,closed=True)
        full_screen_area = 1.
        logger.debug("calibration bounds %s"%self.calib_bounds)
        self.calib_area_ratio = cv2.contourArea(self.calib_bounds)/full_screen_area
Beispiel #18
0
 def _load_data_from_file(self, filepath):
     try:
         dict_representation = fm.load_object(filepath)
     except FileNotFoundError:
         return None
     if dict_representation.get("version", None) != self._item_class.version:
         logger.warning(
             "Data in {} is in old file format. Will not load these!".format(
                 filepath
             )
         )
         return None
     return dict_representation.get("data", None)
Beispiel #19
0
def update_recording_v073_to_v074(rec_dir):
    logger.info("Updating recording from v0.7x format to v0.7.4 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    modified = False
    for p in pupil_data["pupil_positions"]:
        if p["method"] == "3D c++":
            p["method"] = "3d c++"
            try:
                p["projected_sphere"] = p.pop("projectedSphere")
            except:
                p["projected_sphere"] = {"center": (0, 0), "angle": 0, "axes": (0, 0)}
            p["model_confidence"] = p.pop("modelConfidence")
            p["model_id"] = p.pop("modelID")
            p["circle_3d"] = p.pop("circle3D")
            p["diameter_3d"] = p.pop("diameter_3D")
            modified = True
    if modified:
        save_object(load_object(os.path.join(rec_dir, "pupil_data")), os.path.join(rec_dir, "pupil_data_old"))
    try:
        save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass
Beispiel #20
0
def update_recording_v073_to_v074(rec_dir):
    logger.info("Updating recording from v0.7x format to v0.7.4 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    modified = False
    for p in pupil_data['pupil_positions']:
        if p['method'] == "3D c++":
            p['method'] = "3d c++"
            try:
                p['projected_sphere'] = p.pop('projectedSphere')
            except:
                p['projected_sphere'] = {'center':(0,0),'angle':0,'axes':(0,0)}
            p['model_confidence'] = p.pop('modelConfidence')
            p['model_id'] = p.pop('modelID')
            p['circle_3d'] = p.pop('circle3D')
            p['diameter_3d'] = p.pop('diameter_3D')
            modified = True
    if modified:
        save_object(load_object(os.path.join(rec_dir, "pupil_data")),os.path.join(rec_dir, "pupil_data_old"))
    try:
        save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))
    except IOError:
        pass
Beispiel #21
0
    def calibrate_from_user_calibration_data_file(self):
        user_calibration = load_object(os.path.join(self.g_pool.user_dir, "user_calibration_data"))

        self.pupil_list = user_calibration['pupil_list']
        self.ref_list = user_calibration['ref_list']
        calibration_method = user_calibration['calibration_method']

        if '3d' in calibration_method:
            logger.error('adjust calibration is not supported for 3d calibration.')
            return


        finish_calibration.finish_calibration(self.g_pool,self.pupil_list,self.ref_list)
Beispiel #22
0
def load_intrinsics(directory, cam_name, resolution):
    """
    Loads a pre-recorded intrinsics calibration for the given camera and resolution. If no pre-recorded calibration is available we fall back on default values.
    :param directory: The directory in which to look for the intrinsincs file
    :param cam_name: Name of the camera, e.g. 'Pupil Cam 1 ID2'
    :param resolution: Camera resolution given as a tuple.
    :return: Camera Model Object
    """
    file_path = os.path.join(
        directory, "{}.intrinsics".format(cam_name.replace(" ", "_"))
    )
    try:
        calib_dict = load_object(file_path, allow_legacy=False)

        if calib_dict["version"] < __version__:
            logger.warning("Deprecated camera calibration found.")
            logger.info(
                "Please recalibrate using the Camera Intrinsics Estimation calibration."
            )
            os.rename(
                file_path, "{}.deprecated.v{}".format(file_path, calib_dict["version"])
            )

        intrinsics = calib_dict[str(resolution)]
        logger.info("Previously recorded calibration found and loaded!")
    except Exception:
        logger.info(
            "No user calibration found for camera {} at resolution {}".format(
                cam_name, resolution
            )
        )

        if (
            cam_name in pre_recorded_calibrations
            and str(resolution) in pre_recorded_calibrations[cam_name]
        ):
            logger.info("Loading pre-recorded calibration")
            intrinsics = pre_recorded_calibrations[cam_name][str(resolution)]
        else:
            logger.info("No pre-recorded calibration available")
            logger.warning("Loading dummy calibration")
            return Dummy_Camera(resolution, cam_name)

    if intrinsics["cam_type"] == "fisheye":
        return Fisheye_Dist_Camera(
            intrinsics["camera_matrix"], intrinsics["dist_coefs"], resolution, cam_name
        )
    else:
        return Radial_Dist_Camera(
            intrinsics["camera_matrix"], intrinsics["dist_coefs"], resolution, cam_name
        )
def load_camera_calibration(g_pool):
    if g_pool.app == 'capture':
        try:
            camera_calibration = load_object(os.path.join(g_pool.user_dir,'camera_calibration'))
        except:
            camera_calibration = None
        else:
            same_name = camera_calibration['camera_name'] == g_pool.capture.name
            same_resolution =  camera_calibration['resolution'] == g_pool.capture.frame_size
            if not (same_name and same_resolution):
                logger.warning('Loaded camera calibration but camera name and/or resolution has changed.')
                camera_calibration = None
            else:
                logger.info("Loaded user calibrated calibration for %s@%s."%(g_pool.capture.name,g_pool.capture.frame_size))

        if not camera_calibration:
            logger.debug("Trying to load pre recorded calibration.")
            try:
                camera_calibration = pre_recorded_calibrations[g_pool.capture.name][g_pool.capture.frame_size]
            except KeyError:
                logger.info("Pre recorded calibration for %s@%s not found."%(g_pool.capture.name,g_pool.capture.frame_size))
            else:
                logger.info("Loaded pre recorded calibration for %s@%s."%(g_pool.capture.name,g_pool.capture.frame_size))


        if not camera_calibration:
            camera_calibration = idealized_camera_calibration(g_pool.capture.frame_size)
            logger.warning("Camera calibration not found. Will assume idealized camera. Please calibrate your cameras. Using camera 'Camera_Intrinsics_Estimation'.")

    else:
        try:
            camera_calibration = load_object(os.path.join(g_pool.rec_dir,'camera_calibration'))
        except:
            camera_calibration = idealized_camera_calibration(g_pool.capture.frame_size)
            logger.warning("Camera calibration not found. Will assume idealized camera. Please calibrate your cameras before your next recording.")
        else:
            logger.info("Loaded Camera calibration from file.")
    return camera_calibration
Beispiel #24
0
def update_recording_v091_to_v093(rec_dir):
    logger.info("Updating recording from v0.9.1 format to v0.9.3 format")
    meta_info_path = os.path.join(rec_dir,"info.csv")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    for g in pupil_data.get('gaze_positions', []):
        # fixing recordings made with bug https://github.com/pupil-labs/pupil/issues/598
        g['norm_pos'] = float(g['norm_pos'][0]), float(g['norm_pos'][1])

    save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path, 'r', encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Data Format Version'] = 'v0.9.3'
    update_meta_info(rec_dir, meta_info)
    def __init__(self,g_pool,fullscreen = False):
        super(Camera_Intrinsics_Estimation, self).__init__(g_pool)
        self.collect_new = False
        self.calculated = False
        self.obj_grid = _gen_pattern_grid((4, 11))
        self.img_points = []
        self.obj_points = []
        self.count = 10
        self.display_grid = _make_grid()

        self._window = None

        self.menu = None
        self.button = None
        self.clicks_to_close = 5
        self.window_should_close = False
        self.fullscreen = fullscreen
        self.monitor_idx = 0


        self.glfont = fontstash.Context()
        self.glfont.add_font('opensans',get_opensans_font_path())
        self.glfont.set_size(32)
        self.glfont.set_color_float((0.2,0.5,0.9,1.0))
        self.glfont.set_align_string(v_align='center')



        self.undist_img = None
        self.show_undistortion = False
        self.show_undistortion_switch = None


        try:
            camera_calibration = load_object(os.path.join(self.g_pool.user_dir,'camera_calibration'))
        except:
            self.camera_intrinsics = None
        else:
            logger.info('Loaded camera calibration. Click show undistortion to verify.')
            logger.info('Hint: Lines in the real world should be straigt in the image.')
            same_name = camera_calibration['camera_name'] == self.g_pool.capture.name
            same_resolution =  camera_calibration['resolution'] == self.g_pool.capture.frame_size
            if not (same_name and same_resolution):
                logger.warning('Loaded camera calibration but camera name and/or resolution has changed. Please re-calibrate.')

            K = camera_calibration['camera_matrix']
            dist_coefs = camera_calibration['dist_coefs']
            resolution = camera_calibration['resolution']
            self.camera_intrinsics = K,dist_coefs,resolution
Beispiel #26
0
def update_recording_v093_to_v094(rec_dir):
    logger.info("Updating recording from v0.9.3 to v0.9.4.")
    meta_info_path = os.path.join(rec_dir, "info.csv")

    for file in os.listdir(rec_dir):
        if file.startswith('.') or os.path.splitext(file)[1] in ('.mp4', '.avi'):
            continue
        rec_file = os.path.join(rec_dir, file)

        try:
            rec_object = load_object(rec_file,allow_legacy=False)
            save_object(rec_object, rec_file)
        except:
            try:
                rec_object = load_object(rec_file,allow_legacy=True)
                save_object(rec_object, rec_file)
                logger.info('Converted `{}` from pickle to msgpack'.format(file))
            except:
                logger.warning("did not convert {}".format(rec_file))

    with open(meta_info_path, 'r', encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Data Format Version'] = 'v0.9.4'
    update_meta_info(rec_dir, meta_info)
Beispiel #27
0
    def __init__(self, g_pool):
        super().__init__(g_pool)
        zmq_ctx = zmq.Context()
        self.data_sub = zmq_tools.Msg_Receiver(
            zmq_ctx,
            g_pool.ipc_sub_url,
            topics=("pupil", "notify.file_source.video_finished"),
            hwm=100_000,
        )

        self.data_dir = os.path.join(g_pool.rec_dir, "offline_data")
        os.makedirs(self.data_dir, exist_ok=True)
        try:
            session_meta_data = fm.load_object(
                os.path.join(self.data_dir, self.session_data_name + ".meta"))
            assert session_meta_data.get(
                "version") == self.session_data_version
        except (AssertionError, FileNotFoundError):
            session_meta_data = {}
            session_meta_data["detection_method"] = "3d"
            session_meta_data["detection_status"] = ["unknown", "unknown"]
        self.detection_method = session_meta_data["detection_method"]
        self.detection_status = session_meta_data["detection_status"]

        pupil = fm.load_pldata_file(self.data_dir, self.session_data_name)
        ts_data_zip = zip(pupil.timestamps, pupil.data)
        ts_topic_zip = zip(pupil.timestamps, pupil.topics)
        self.pupil_positions = collections.OrderedDict(ts_data_zip)
        self.id_topics = collections.OrderedDict(ts_topic_zip)

        self.eye_video_loc = [None, None]
        self.eye_frame_num = [0, 0]
        for topic in self.id_topics.values():
            eye_id = int(topic[-1])
            self.eye_frame_num[eye_id] += 1

        self.pause_switch = None
        self.detection_paused = False

        # start processes
        for eye_id in range(2):
            if self.detection_status[eye_id] != "complete":
                self.start_eye_process(eye_id)

        # either we did not start them or they failed to start (mono setup etc)
        # either way we are done and can publish
        if self.eye_video_loc == [None, None]:
            self.correlate_publish()
Beispiel #28
0
    def __init__(self, g_pool):
        super().__init__(g_pool)
        zmq_ctx = zmq.Context()
        self.data_sub = zmq_tools.Msg_Receiver(
            zmq_ctx,
            g_pool.ipc_sub_url,
            topics=("pupil", "notify.file_source.video_finished"),
            hwm=100_000,
        )

        self.data_dir = os.path.join(g_pool.rec_dir, "offline_data")
        os.makedirs(self.data_dir, exist_ok=True)
        try:
            session_meta_data = fm.load_object(
                os.path.join(self.data_dir, self.session_data_name + ".meta")
            )
            assert session_meta_data.get("version") == self.session_data_version
        except (AssertionError, FileNotFoundError):
            session_meta_data = {}
            session_meta_data["detection_method"] = "3d"
            session_meta_data["detection_status"] = ["unknown", "unknown"]
        self.detection_method = session_meta_data["detection_method"]
        self.detection_status = session_meta_data["detection_status"]

        pupil = fm.load_pldata_file(self.data_dir, self.session_data_name)
        ts_data_zip = zip(pupil.timestamps, pupil.data)
        ts_topic_zip = zip(pupil.timestamps, pupil.topics)
        self.pupil_positions = collections.OrderedDict(ts_data_zip)
        self.id_topics = collections.OrderedDict(ts_topic_zip)

        self.eye_video_loc = [None, None]
        self.eye_frame_num = [0, 0]
        for topic in self.id_topics.values():
            eye_id = int(topic[-1])
            self.eye_frame_num[eye_id] += 1

        self.pause_switch = None
        self.detection_paused = False

        # start processes
        for eye_id in range(2):
            if self.detection_status[eye_id] != "complete":
                self.start_eye_process(eye_id)

        # either we did not start them or they failed to start (mono setup etc)
        # either way we are done and can publish
        if self.eye_video_loc == [None, None]:
            self.correlate_publish()
def load_intrinsics(directory, cam_name, resolution):
    """
    Loads a pre-recorded intrinsics calibration for the given camera and resolution. If no pre-recorded calibration is available we fall back on default values.
    :param directory: The directory in which to look for the intrinsincs file
    :param cam_name: Name of the camera, e.g. 'Pupil Cam 1 ID2'
    :param resolution: Camera resolution given as a tuple.
    :return: Camera Model Object
    """
    file_path = os.path.join(
        directory, '{}.intrinsics'.format(cam_name.replace(" ", "_")))
    try:
        calib_dict = load_object(file_path, allow_legacy=False)

        if calib_dict['version'] < __version__:
            logger.warning('Deprecated camera calibration found.')
            logger.info(
                'Please recalibrate using the Camera Intrinsics Estimation calibration.'
            )
            os.rename(
                file_path, '{}.deprecated.v{}'.format(file_path,
                                                      calib_dict['version']))

        intrinsics = calib_dict[str(resolution)]
        logger.info("Previously recorded calibration found and loaded!")
    except Exception as e:
        logger.info(
            "No user calibration found for camera {} at resolution {}".format(
                cam_name, resolution))

        if cam_name in pre_recorded_calibrations and str(
                resolution) in pre_recorded_calibrations[cam_name]:
            logger.info("Loading pre-recorded calibration")
            intrinsics = pre_recorded_calibrations[cam_name][str(resolution)]
        else:
            logger.info("No pre-recorded calibration available")
            logger.warning("Loading dummy calibration")
            intrinsics = {'cam_type': 'dummy'}

    if intrinsics['cam_type'] == 'dummy':
        return Dummy_Camera(resolution, cam_name)
    elif intrinsics['cam_type'] == 'fisheye':
        return Fisheye_Dist_Camera(intrinsics['camera_matrix'],
                                   intrinsics['dist_coefs'], resolution,
                                   cam_name)
    elif intrinsics['cam_type'] == 'radial':
        return Radial_Dist_Camera(intrinsics['camera_matrix'],
                                  intrinsics['dist_coefs'], resolution,
                                  cam_name)
Beispiel #30
0
def update_recording_v082_to_v083(rec_dir):
    logger.info("Updating recording from v0.8.2 format to v0.8.3 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir,"info.csv")

    for d in pupil_data['gaze_positions']:
        if 'base' in d:
            d['base_data'] = d.pop('base')

    save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path,'r',encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Data Format Version'] = 'v0.8.3'

    update_meta_info(rec_dir, meta_info)
Beispiel #31
0
def update_recording_v083_to_v086(rec_dir):
    logger.info("Updating recording from v0.8.3 format to v0.8.6 format")
    pupil_data = fm.load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir, "info.csv")

    for topic in pupil_data.keys():
        for d in pupil_data[topic]:
            d['topic'] = topic

    fm.save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path, 'r', encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Data Format Version'] = 'v0.8.6'

    update_meta_info(rec_dir, meta_info)
Beispiel #32
0
def update_recording_v082_to_v083(rec_dir):
    logger.info("Updating recording from v0.8.2 format to v0.8.3 format")
    pupil_data = fm.load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir, "info.csv")

    for d in pupil_data['gaze_positions']:
        if 'base' in d:
            d['base_data'] = d.pop('base')

    fm.save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path, 'r', encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Data Format Version'] = 'v0.8.3'

    update_meta_info(rec_dir, meta_info)
Beispiel #33
0
def update_recording_v083_to_v086(rec_dir):
    logger.info("Updating recording from v0.8.3 format to v0.8.6 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir,"info.csv")

    for topic in pupil_data.keys():
        for d in pupil_data[topic]:
            d['topic'] = topic

    save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path,'r',encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Data Format Version'] = 'v0.8.6'

    update_meta_info(rec_dir, meta_info)
Beispiel #34
0
def update_recording_v082_to_v083(rec_dir):
    logger.info("Updating recording from v0.8.2 format to v0.8.3 format")
    pupil_data = fm.load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir, "info.csv")

    for d in pupil_data["gaze_positions"]:
        if "base" in d:
            d["base_data"] = d.pop("base")

    fm.save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path, "r", encoding="utf-8") as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info["Data Format Version"] = "v0.8.3"

    update_meta_info(rec_dir, meta_info)
Beispiel #35
0
def update_recording_v082_to_v083(rec_dir):
    logger.info("Updating recording from v0.8.2 format to v0.8.3 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir, "info.csv")

    for d in pupil_data['gaze_positions']:
        if 'base' in d:
            d['base_data'] = d.pop('base')

    save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path) as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Capture Software Version'] = 'v0.8.3'

    with open(meta_info_path, 'w') as csvfile:
        csv_utils.write_key_value_file(csvfile, meta_info)
Beispiel #36
0
    def __init__(self, g_pool, source_path=None, frame_size=None,
                 frame_rate=None, name='Fake Source', *args, **kwargs):
        super().__init__(g_pool, *args, **kwargs)
        if source_path:
            meta = load_object(source_path)
            frame_size = meta['frame_size']
            frame_rate = meta['frame_rate']
            self.timestamps = np.load(os.path.splitext(source_path)[0] + '_timestamps.npy')
        else:
            self.timestamps = None

        self.fps = frame_rate
        self._name = name
        self.make_img(tuple(frame_size))
        self.source_path = source_path
        self.current_frame_idx = 0
        self.target_frame_idx = 0
Beispiel #37
0
def update_recording_v083_to_v086(rec_dir):
    logger.info("Updating recording from v0.8.3 format to v0.8.6 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir,"info.csv")

    for topic in pupil_data.keys():
        for d in pupil_data[topic]:
            d['topic'] = topic

    save_object(pupil_data,os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path) as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Capture Software Version'] = 'v0.8.6'

    with open(meta_info_path,'w') as csvfile:
        csv_utils.write_key_value_file(csvfile,meta_info)
Beispiel #38
0
def update_recording_v083_to_v086(rec_dir):
    logger.info("Updating recording from v0.8.3 format to v0.8.6 format")
    pupil_data = load_object(os.path.join(rec_dir, "pupil_data"))
    meta_info_path = os.path.join(rec_dir, "info.csv")

    for topic in pupil_data.keys():
        for d in pupil_data[topic]:
            d['topic'] = topic

    save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))

    with open(meta_info_path) as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Capture Software Version'] = 'v0.8.6'

    with open(meta_info_path, 'w') as csvfile:
        csv_utils.write_key_value_file(csvfile, meta_info)
Beispiel #39
0
    def __init__(self, g_pool):
        super().__init__(g_pool)

        self.data_dir = os.path.join(g_pool.rec_dir, 'offline_data')
        os.makedirs(self.data_dir, exist_ok=True)
        try:
            session_data = load_object(
                os.path.join(self.data_dir, 'offline_pupil_data'))
            assert session_data.get('version') != self.session_data_version
        except:
            session_data = {}
            session_data["detection_method"] = '3d'
            session_data['pupil_positions'] = []
            session_data['detection_progress'] = [0., 0.]
            session_data['detection_status'] = ["unknown", "unknown"]
        self.detection_method = session_data["detection_method"]
        self.pupil_positions = session_data['pupil_positions']
        self.eye_processes = [None, None]
        self.detection_progress = session_data['detection_progress']
        self.detection_status = session_data['detection_status']

        # Pupil Offline Detection
        self.eyes_are_alive = Value(c_bool, 0), Value(c_bool, 0)

        logger.debug('Starting eye process communication channel...')
        self.ipc_pub_url, self.ipc_sub_url, self.ipc_push_url = self.initialize_ipc(
        )
        sleep(0.2)
        self.data_sub = zmq_tools.Msg_Receiver(self.zmq_ctx,
                                               self.ipc_sub_url,
                                               topics=('', ))
        self.eye_control = zmq_tools.Msg_Dispatcher(self.zmq_ctx,
                                                    self.ipc_push_url)

        self.menu = None

        #start processes
        if self.detection_progress[0] < 100:
            self.start_eye_process(0)
        if self.detection_progress[1] < 100:
            self.start_eye_process(1)

        #either we did not start them or they failed to start (mono setup etc)
        #either way we are done and can publish
        if self.eye_processes == [None, None]:
            self.correlate_publish()
 def _load_from_file(self):
     file_path = self._plmodel_file_path
     try:
         dict_representation = fm.load_object(file_path)
     except FileNotFoundError:
         return None, None
     if dict_representation.get("version", None) != self.version:
         logger.warning(
             "Data in {} is in old file format. Will not load these!".format(
                 file_path
             )
         )
         return None, None
     return (
         dict_representation.get("recording_uuid", None),
         dict_representation.get("data", None),
     )
def load_camera_calibration(g_pool):
    if g_pool.app == 'capture':
        try:
            camera_calibration = load_object(
                os.path.join(g_pool.user_dir, 'camera_calibration'))
        except:
            camera_calibration = None
        else:
            same_name = camera_calibration[
                'camera_name'] == g_pool.capture.name
            same_resolution = camera_calibration[
                'resolution'] == g_pool.capture.frame_size
            if not (same_name and same_resolution):
                logger.warning(
                    'Loaded camera calibration but camera name and/or resolution has changed.'
                )
                camera_calibration = None
            else:
                logger.info("Loaded user calibrated calibration for %s@%s." %
                            (g_pool.capture.name, g_pool.capture.frame_size))

        if not camera_calibration:
            logger.debug("Trying to load pre recorded calibration.")
            try:
                camera_calibration = pre_recorded_calibrations[
                    g_pool.capture.name][g_pool.capture.frame_size]
            except KeyError:
                logger.info("Pre recorded calibration for %s@%s not found." %
                            (g_pool.capture.name, g_pool.capture.frame_size))
            else:
                logger.info("Loaded pre recorded calibration for %s@%s." %
                            (g_pool.capture.name, g_pool.capture.frame_size))

        if not camera_calibration:
            logger.warning(
                "Camera calibration not found please run Camera_Intrinsics_Estimation to calibrate camera."
            )

        return camera_calibration

    else:
        raise NotImplementedError()
Beispiel #42
0
    def stop(self):
        logger.info("Stopping Touchup")
        self.smooth_pos = 0., 0.
        self.sample_site = -2, -2
        self.counter = 0
        self.active = False
        self.button.status_text = ''

        offset_pt_clound = calibrate.preprocess_2d_data_monocular(
            calibrate.closest_matches_monocular(self.ref_list, self.gaze_list))
        if len(offset_pt_clound) < 3:
            logger.error(
                'Did not sample enough data for touchup please retry.')
            return

        #Calulate the offset for gaze to target
        offset_pt_clound = np.array(offset_pt_clound)
        offset = offset_pt_clound[:, :2] - offset_pt_clound[:, 2:]
        mean_offset = np.mean(offset, axis=0)

        user_calibration = load_object(
            os.path.join(self.g_pool.user_dir, "user_calibration_data"))

        self.pupil_list = user_calibration['pupil_list']
        self.ref_list = user_calibration['ref_list']
        calibration_method = user_calibration['calibration_method']

        if '3d' in calibration_method:
            logger.error(
                'adjust calibration is not supported for 3d calibration.')
            return

        for r in self.ref_list:
            r['norm_pos'] = [
                r['norm_pos'][0] - mean_offset[0],
                r['norm_pos'][1] - mean_offset[1]
            ]

        finish_calibration(self.g_pool,
                           self.pupil_list,
                           self.ref_list,
                           force='2d')
Beispiel #43
0
def update_recording_v086_to_v087(rec_dir):
    logger.info("Updating recording from v0.8.6 format to v0.8.7 format")
    pupil_data = fm.load_object(os.path.join(rec_dir, "pupil_data"))

    def _clamp_norm_point(pos):
        """realisitic numbers for norm pos should be in this range.
            Grossly bigger or smaller numbers are results bad exrapolation
            and can cause overflow erorr when denormalized and cast as int32.
        """
        return min(100.0, max(-100.0, pos[0])), min(100.0, max(-100.0, pos[1]))

    for g in pupil_data.get("gaze_positions", []):
        if "topic" not in g:
            # we missed this in one gaze mapper
            g["topic"] = "gaze"
        g["norm_pos"] = _clamp_norm_point(g["norm_pos"])

    fm.save_object(pupil_data, os.path.join(rec_dir, "pupil_data"))

    _update_info_version_to("v0.8.7", rec_dir)
Beispiel #44
0
def save_intrinsics(directory, cam_name, resolution, intrinsics):
    """
    Saves camera intrinsics calibration to a file. For each unique camera name we maintain a single file containing all calibrations associated with this camera name.
    :param directory: Directory to which the intrinsics file will be written
    :param cam_name: Name of the camera, e.g. 'Pupil Cam 1 ID2'
    :param resolution: Camera resolution given as a tuple. This needs to match the resolution the calibration has been computed with.
    :param intrinsics: The camera intrinsics dictionary.
    :return:
    """
    # Try to load previous camera calibrations
    save_path = os.path.join(directory, '{}.intrinsics'.format(cam_name.replace(" ", "_")))
    try:
        calib_dict = load_object(save_path, allow_legacy=False)
    except:
        calib_dict = {}

    calib_dict['version'] = __version__
    calib_dict[str(resolution)] = intrinsics

    save_object(calib_dict, save_path)
    logger.info("Calibration for camera {} at resolution {} saved to {}".format(cam_name, resolution, save_path))
Beispiel #45
0
    def __init__(self, g_pool):
        super().__init__(g_pool)
        self._detection_paused = False

        zmq_ctx = zmq.Context()
        self.data_sub = zmq_tools.Msg_Receiver(
            zmq_ctx,
            g_pool.ipc_sub_url,
            topics=("pupil", "notify.file_source"),
            hwm=100_000,
        )

        self.data_dir = os.path.join(g_pool.rec_dir, "offline_data")
        os.makedirs(self.data_dir, exist_ok=True)
        try:
            session_meta_data = fm.load_object(
                os.path.join(self.data_dir, self.session_data_name + ".meta")
            )
            assert session_meta_data.get("version") == self.session_data_version
        except (AssertionError, FileNotFoundError):
            session_meta_data = {}
            session_meta_data["detection_status"] = ["unknown", "unknown"]

        self.detection_status = session_meta_data["detection_status"]

        self._pupil_data_store = pm.PupilDataCollector()
        pupil_data_from_cache = pm.PupilDataBisector.load_from_file(
            self.data_dir, self.session_data_name
        )
        self.publish_existing(pupil_data_from_cache)

        # Start offline pupil detection if not complete yet:
        self.eye_video_loc = [None, None]
        self.eye_frame_num = [0, 0]
        self.eye_frame_idx = [-1, -1]

        # start processes
        for eye_id in range(2):
            if self.detection_status[eye_id] != "complete":
                self.start_eye_process(eye_id)
Beispiel #46
0
    def __init__(self, g_pool, manual_ref_edit_mode=False):
        super().__init__(g_pool)
        self.timeline_line_height = 16
        self.manual_ref_edit_mode = manual_ref_edit_mode
        self.menu = None
        self.process_pipe = None

        self.result_dir = os.path.join(g_pool.rec_dir, 'offline_data')
        os.makedirs(self.result_dir, exist_ok=True)
        try:
            session_data = load_object(
                os.path.join(self.result_dir, 'offline_calibration_gaze'))
            if session_data['version'] != self.session_data_version:
                logger.warning(
                    "Session data from old version. Will not use this.")
                assert False
        except Exception as e:
            map_range = [0, len(self.g_pool.timestamps)]
            calib_range = [
                len(self.g_pool.timestamps) // 10,
                len(self.g_pool.timestamps) // 2
            ]
            session_data = {}
            session_data['sections'] = [
                make_section_dict(calib_range, map_range),
            ]
            session_data['circle_marker_positions'] = []
            session_data['manual_ref_positions'] = []
        self.sections = session_data['sections']
        self.circle_marker_positions = session_data['circle_marker_positions']
        self.manual_ref_positions = session_data['manual_ref_positions']
        if self.circle_marker_positions:
            self.detection_progress = 100.0
            for s in self.sections:
                self.calibrate_section(s)
            self.correlate_and_publish()
        else:
            self.detection_progress = 0.0
            self.start_detection_task()
Beispiel #47
0
    def __init__(self, g_pool):
        super().__init__(g_pool)
        zmq_ctx = zmq.Context()
        self.data_sub = zmq_tools.Msg_Receiver(
            zmq_ctx,
            g_pool.ipc_sub_url,
            topics=('pupil', 'notify.file_source.video_finished'))

        self.data_dir = os.path.join(g_pool.rec_dir, 'offline_data')
        os.makedirs(self.data_dir, exist_ok=True)
        try:
            session_data = load_object(
                os.path.join(self.data_dir, 'offline_pupil_data'))
            assert session_data.get('version') != self.session_data_version
        except:
            session_data = {}
            session_data["detection_method"] = '3d'
            session_data['pupil_positions'] = []
            session_data['detection_progress'] = [0., 0.]
            session_data['detection_status'] = ["unknown", "unknown"]
        self.detection_method = session_data["detection_method"]
        self.pupil_positions = session_data['pupil_positions']
        self.eye_processes = [None, None]
        self.detection_progress = session_data['detection_progress']
        self.detection_status = session_data['detection_status']

        self.pause_switch = None
        self.detection_paused = False

        # start processes
        if self.detection_progress[0] < 100:
            self.start_eye_process(0)
        if self.detection_progress[1] < 100:
            self.start_eye_process(1)

        # either we did not start them or they failed to start (mono setup etc)
        # either way we are done and can publish
        if self.eye_processes == [None, None]:
            self.correlate_publish()
Beispiel #48
0
def update_recording_bytes_to_unicode(rec_dir):
    logger.info("Updating recording from bytes to unicode.")

    # update to python 3
    meta_info_path = os.path.join(rec_dir, "info.csv")

    def convert(data):
        if isinstance(data, bytes):
            return data.decode()
        elif isinstance(data, str) or isinstance(data, np.ndarray):
            return data
        elif isinstance(data, collections.Mapping):
            return dict(map(convert, data.items()))
        elif isinstance(data, collections.Iterable):
            return type(data)(map(convert, data))
        else:
            return data

    for file in os.listdir(rec_dir):
        rec_file = os.path.join(rec_dir, file)
        try:
            rec_object = load_object(rec_file)
            converted_object = convert(rec_object)
            if converted_object != rec_object:
                logger.info(
                    'Converted `{}` from bytes to unicode'.format(file))
                save_object(rec_object, rec_file)
        except (ValueError, IsADirectoryError):
            continue
        # except TypeError:
        #     logger.error('TypeError when parsing `{}`'.format(file))
        #     continue

    with open(meta_info_path, 'r', encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
        meta_info['Capture Software Version'] = 'v0.8.8'

    with open(meta_info_path, 'w', newline='') as csvfile:
        csv_utils.write_key_value_file(csvfile, meta_info)
Beispiel #49
0
    def from_file(
        directory: str, cam_name: str, resolution: T.Tuple[int]
    ) -> "Camera_Model":
        """
        Loads recorded intrinsics for the given camera and resolution. If no recorded
        intrinsics are available we fall back to default values. If no default values
        are available, we use dummy intrinsics.
        :param directory: The directory in which to look for the intrinsincs file.
        :param cam_name: Name of the camera, e.g. 'Pupil Cam 1 ID2'.
        :param resolution: Camera resolution.
        """
        file_path = os.path.join(
            directory, "{}.intrinsics".format(cam_name.replace(" ", "_"))
        )
        try:
            intrinsics_dict = load_object(file_path, allow_legacy=False)

            if intrinsics_dict["version"] < __version__:
                logger.warning("Deprecated camera intrinsics found.")
                logger.info(
                    "Please recalculate the camera intrinsics using the Camera"
                    " Intrinsics Estimation."
                )
                os.rename(
                    file_path,
                    "{}.deprecated.v{}".format(file_path, intrinsics_dict["version"]),
                )

            intrinsics = intrinsics_dict[str(resolution)]
            logger.info("Loading previously recorded intrinsics...")
            return Camera_Model._from_raw_intrinsics(cam_name, resolution, intrinsics)
        except Exception:
            logger.debug(
                f"No recorded intrinsics found for camera {cam_name} at resolution"
                f" {resolution}"
            )
            return Camera_Model.from_default(cam_name, resolution)
Beispiel #50
0
def rectify_gaze_data(path, K, D, rect_camera_matrix):

    #if not os.path.exists(path + 'pupil_data_original'):
    #    data = load_object(path + 'pupil_data')
    #    save_object(data, path + 'pupil_data_original')
    #else:
    #    data = load_object(path + 'pupil_data_original')

    data = load_object(path + 'pupil_data')

    if not 'gaze_positions' in data:
        print("no gaze_positions", data.keys())
        return

    gazes = data['gaze_positions']
    for g in gazes:
        gaze = denormalize(g['norm_pos'], 1280, 720)
        gaze = np.float32(gaze).reshape(-1, 1, 2)
        gaze = cv2.fisheye.undistortPoints(gaze, K, D,
                                           P=rect_camera_matrix).reshape(2)
        gaze = normalize(gaze, 1280, 720)
        g['norm_pos'] = gaze

    save_object(data, path + 'pupil_data_corrected')
    def update_offline_calibrations_to_latest_version(cls, rec_dir):
        calib_dir = cls._calibration_directory_from_recording(rec_dir)

        if not calib_dir.exists():
            return

        if not calib_dir.is_dir():
            return  # TODO: Raise exception - "calibrations" must be a directory

        for calib_path in sorted(calib_dir.glob("[!.]*.plcal")):
            calib_dict = fm.load_object(calib_path)
            version = calib_dict.get("version", None)
            data = calib_dict.get("data", None)

            if version == Calibration.version:
                # Calibration already at current version
                continue  # No-op

            elif version > Calibration.version:
                # Calibration at newer version than current model
                continue  # No-op

            elif data is not None:
                try:
                    if version == CalibrationV1.version == 1:
                        cls.__update_and_save_calibration_v1_as_latest_version(
                            rec_dir, data
                        )
                        continue  # Success
                except Exception as err:
                    logger.warning(str(err))

            # Failed to update
            logger.warning(
                f'Unable to update calibration "{calib_path.name}" from version {version} to the current version {Calibration.version}'
            )
Beispiel #52
0
def update_recording_bytes_to_unicode(rec_dir):
    logger.info("Updating recording from bytes to unicode.")

    def convert(data):
        if isinstance(data, bytes):
            return data.decode()
        elif isinstance(data, str) or isinstance(data, np.ndarray):
            return data
        elif isinstance(data, collections.Mapping):
            return dict(map(convert, data.items()))
        elif isinstance(data, collections.Iterable):
            return type(data)(map(convert, data))
        else:
            return data

    for file in os.listdir(rec_dir):
        if file.startswith('.') or os.path.splitext(file)[1] in ('.mp4',
                                                                 '.avi'):
            continue
        rec_file = os.path.join(rec_dir, file)
        try:
            rec_object = fm.load_object(rec_file)
            converted_object = convert(rec_object)
            if converted_object != rec_object:
                logger.info(
                    'Converted `{}` from bytes to unicode'.format(file))
                fm.save_object(converted_object, rec_file)
        except (fm.UnpicklingError, IsADirectoryError):
            continue

    # manually convert k v dicts.
    meta_info_path = os.path.join(rec_dir, "info.csv")
    with open(meta_info_path, 'r', encoding='utf-8') as csvfile:
        meta_info = csv_utils.read_key_value_file(csvfile)
    with open(meta_info_path, 'w', newline='') as csvfile:
        csv_utils.write_key_value_file(csvfile, meta_info)
Beispiel #53
0
def export(should_terminate,
           frames_to_export,
           current_frame,
           rec_dir,
           user_dir,
           start_frame=None,
           end_frame=None,
           plugin_initializers=[],
           out_file_path=None):

    logger = logging.getLogger(__name__ + ' with pid: ' + str(os.getpid()))

    #parse info.csv file
    meta_info_path = os.path.join(rec_dir, "info.csv")
    with open(meta_info_path) as info:
        meta_info = dict(
            ((line.strip().split('\t')) for line in info.readlines()))

    video_path = glob(os.path.join(rec_dir, "world.*"))[0]
    timestamps_path = os.path.join(rec_dir, "world_timestamps.npy")
    pupil_data_path = os.path.join(rec_dir, "pupil_data")

    rec_version = read_rec_version(meta_info)
    if rec_version >= VersionFormat('0.5'):
        pass
    elif rec_version >= VersionFormat('0.4'):
        update_recording_0v4_to_current(rec_dir)
    elif rec_version >= VersionFormat('0.3'):
        update_recording_0v3_to_current(rec_dir)
        timestamps_path = os.path.join(rec_dir, "timestamps.npy")
    else:
        logger.Error("This recording is to old. Sorry.")
        return

    timestamps = np.load(timestamps_path)

    cap = File_Capture(video_path, timestamps=timestamps_path)

    #Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to %s" % out_file_path)

    #Trim mark verification
    #make sure the trim marks (start frame, endframe) make sense: We define them like python list slices,thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps) == 0:
        logger.warn(
            "Start and end frames are set such that no video will be exported."
        )
        return False

    if start_frame == None:
        start_frame = 0

    #these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    logger.debug(
        "Will export from frame %s to frame %s. This means I will export %s frames."
        % (start_frame, start_frame + frames_to_export.value,
           frames_to_export.value))

    #setup of writer
    writer = AV_Writer(out_file_path)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g = Global_Container()
    g.app = 'exporter'
    g.capture = cap
    g.rec_dir = rec_dir
    g.user_dir = user_dir
    g.rec_version = rec_version
    g.timestamps = timestamps

    # load pupil_positions, gaze_positions
    pupil_data = load_object(pupil_data_path)
    pupil_list = pupil_data['pupil_positions']
    gaze_list = pupil_data['gaze_positions']

    g.pupil_positions_by_frame = correlate_data(pupil_list, g.timestamps)
    g.gaze_positions_by_frame = correlate_data(gaze_list, g.timestamps)
    g.fixations_by_frame = [[] for x in g.timestamps
                            ]  #populated by the fixation detector plugin

    #add plugins
    g.plugins = Plugin_List(g, plugin_by_name, plugin_initializers)

    while frames_to_export.value - current_frame.value > 0:

        if should_terminate.value:
            logger.warning("User aborted export. Exported %s frames to %s." %
                           (current_frame.value, out_file_path))

            #explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame_nowait()
        except EndofVideoFileError:
            break

        events = {}
        #new positons and events
        events['gaze_positions'] = g.gaze_positions_by_frame[frame.index]
        events['pupil_positions'] = g.pupil_positions_by_frame[frame.index]

        # allow each Plugin to do its work.
        for p in g.plugins:
            p.update(frame, events)

        writer.write_video_frame(frame)
        current_frame.value += 1

    writer.close()
    writer = None

    duration = time() - start_time
    effective_fps = float(current_frame.value) / duration

    logger.info(
        "Export done: Exported %s frames to %s. This took %s seconds. Exporter ran at %s frames per second"
        % (current_frame.value, out_file_path, duration, effective_fps))
    return True
Beispiel #54
0
#cd /home/gazetracker/pupil/pupil_v0.9.14-7_linux_x64/pupil/pupil_src/shared_modules
from file_methods import save_object, load_object
import os
import numpy

##check dictionary
pupil_data = load_object(
    os.path.join("/home/gazetracker/recordings/2018_02_17/006_95%",
                 "pupil_data"))
#print(pupil_data.keys())
#dict_keys(['gaze_positions', 'fixations', 'notifications', 'pupil_positions'])
gaze = pupil_data['gaze_positions']
#print(gaze[0].keys())
#dict_keys(['norm_pos', 'gaze_point_3d', 'topic', 'base_data', 'timestamp', 'gaze_normals_3d', 'eye_centers_3d', 'confidence'])

##extract & save gaze_pose from dictionary
norm = []
i = 0
time = []
while (i < len(gaze)):
    time.append(gaze[i]['timestamp'])
    norm.append(gaze[i]['norm_pos'])
    i = i + 1
numpy.save("/home/gazetracker/recordings/2018_02_17/006_95%/norm_pos.npy",
           norm)
numpy.save("/home/gazetracker/recordings/2018_02_17/006_95%/pos_timestamp.npy",
           time)

##load gaze_pose
norm_pos = numpy.load(
    "/home/gazetracker/recordings/2018_02_17/006_95%/norm_pos.npy")  #(6812, 2)
Beispiel #55
0
def check_for_worldless_recording(rec_dir):
    logger.info("Checking for world-less recording")
    valid_ext = (".mp4", ".mkv", ".avi", ".h264", ".mjpeg")

    world_video_exists = any(
        (
            os.path.splitext(f)[1] in valid_ext
            for f in glob.glob(os.path.join(rec_dir, "world.*"))
        )
    )

    if not world_video_exists:
        fake_world_version = 1
        fake_world_path = os.path.join(rec_dir, "world.fake")
        if os.path.exists(fake_world_path):
            fake_world = fm.load_object(fake_world_path)
            if fake_world["version"] == fake_world_version:
                return

        logger.warning("No world video found. Constructing an artificial replacement.")
        eye_ts_files = glob.glob(os.path.join(rec_dir, "eye*_timestamps.npy"))

        min_ts = np.inf
        max_ts = -np.inf
        for f in eye_ts_files:
            try:
                eye_ts = np.load(f)
                assert len(eye_ts.shape) == 1
                assert eye_ts.shape[0] > 1
                min_ts = min(min_ts, eye_ts[0])
                max_ts = max(max_ts, eye_ts[-1])
            except AssertionError:
                logger.debug(
                    (
                        "Ignoring {} since it does not conform with the expected format"
                        " (one-dimensional list with at least two entries)".format(f)
                    )
                )
        assert -np.inf < min_ts < max_ts < np.inf, (
            "This recording is invalid because it does not contain any valid eye timestamp"
            " files from which artifical world timestamps could be generated from."
        )

        frame_rate = 30
        timestamps = np.arange(min_ts, max_ts, 1 / frame_rate)
        np.save(os.path.join(rec_dir, "world_timestamps.npy"), timestamps)
        fm.save_object(
            {
                "frame_rate": frame_rate,
                "frame_size": (1280, 720),
                "version": fake_world_version,
            },
            os.path.join(rec_dir, "world.fake"),
        )
        lookup_entry = np.dtype(
            [
                ("container_idx", "<i8"),
                ("container_frame_idx", "<i8"),
                ("timestamp", "<f8"),
            ]
        )
        lookup = np.empty(timestamps.size, dtype=lookup_entry).view(np.recarray)
        lookup.timestamp = timestamps
        lookup.container_idx = -1
        np.save(os.path.join(rec_dir, "world_lookup.npy"), lookup)
Beispiel #56
0
def export(should_terminate,
           frames_to_export,
           current_frame,
           rec_dir,
           user_dir,
           min_data_confidence,
           start_frame=None,
           end_frame=None,
           plugin_initializers=(),
           out_file_path=None):

    vis_plugins = sorted([
        Vis_Circle, Vis_Cross, Vis_Polyline, Vis_Light_Points, Vis_Watermark,
        Scan_Path
    ],
                         key=lambda x: x.__name__)
    analysis_plugins = sorted([
        Manual_Gaze_Correction, Eye_Video_Overlay,
        Pupil_Angle_3D_Fixation_Detector, Gaze_Position_2D_Fixation_Detector
    ],
                              key=lambda x: x.__name__)
    user_plugins = sorted(import_runtime_plugins(
        os.path.join(user_dir, 'plugins')),
                          key=lambda x: x.__name__)
    available_plugins = vis_plugins + analysis_plugins + user_plugins
    name_by_index = [p.__name__ for p in available_plugins]
    index_by_name = dict(zip(name_by_index, range(len(name_by_index))))
    plugin_by_name = dict(zip(name_by_index, available_plugins))

    logger = logging.getLogger(__name__ + ' with pid: ' + str(os.getpid()))

    update_recording_to_recent(rec_dir)

    video_path = [
        f for f in glob(os.path.join(rec_dir, "world.*"))
        if f[-3:] in ('mp4', 'mkv', 'avi')
    ][0]
    timestamps_path = os.path.join(rec_dir, "world_timestamps.npy")
    pupil_data_path = os.path.join(rec_dir, "pupil_data")

    meta_info = load_meta_info(rec_dir)
    rec_version = read_rec_version(meta_info)

    g_pool = Global_Container()
    g_pool.app = 'exporter'
    g_pool.min_data_confidence = min_data_confidence
    timestamps = np.load(timestamps_path)
    cap = File_Source(g_pool, video_path, timestamps=timestamps)

    #Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to %s" % out_file_path)

    #Trim mark verification
    #make sure the trim marks (start frame, endframe) make sense: We define them like python list slices,thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps) == 0:
        logger.warn(
            "Start and end frames are set such that no video will be exported."
        )
        return False

    if start_frame == None:
        start_frame = 0

    #these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    logger.debug(
        "Will export from frame %s to frame %s. This means I will export %s frames."
        % (start_frame, start_frame + frames_to_export.value,
           frames_to_export.value))

    #setup of writer
    writer = AV_Writer(out_file_path, fps=cap.frame_rate, use_timestamps=True)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g_pool.capture = cap
    g_pool.rec_dir = rec_dir
    g_pool.user_dir = user_dir
    g_pool.rec_version = rec_version
    g_pool.timestamps = timestamps
    g_pool.delayed_notifications = {}
    g_pool.notifications = []

    # load pupil_positions, gaze_positions
    pupil_data = load_object(pupil_data_path)
    pupil_list = pupil_data['pupil_positions']
    gaze_list = pupil_data['gaze_positions']
    g_pool.pupil_positions_by_frame = correlate_data(pupil_list,
                                                     g_pool.timestamps)
    g_pool.gaze_positions_by_frame = correlate_data(gaze_list,
                                                    g_pool.timestamps)
    g_pool.fixations_by_frame = [[] for x in g_pool.timestamps
                                 ]  #populated by the fixation detector plugin

    #add plugins
    g_pool.plugins = Plugin_List(g_pool, plugin_by_name, plugin_initializers)

    while frames_to_export.value > current_frame.value:

        if should_terminate.value:
            logger.warning("User aborted export. Exported %s frames to %s." %
                           (current_frame.value, out_file_path))

            #explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame_nowait()
        except EndofVideoFileError:
            break

        events = {}
        #new positons and events
        events['gaze_positions'] = g_pool.gaze_positions_by_frame[frame.index]
        events['pupil_positions'] = g_pool.pupil_positions_by_frame[
            frame.index]

        # publish delayed notifiactions when their time has come.
        for n in g_pool.delayed_notifications.values():
            if n['_notify_time_'] < time():
                del n['_notify_time_']
                del g_pool.delayed_notifications[n['subject']]
                g_pool.notifications.append(n)

        # notify each plugin if there are new notifactions:
        while g_pool.notifications:
            n = g_pool.notifications.pop(0)
            for p in g_pool.plugins:
                p.on_notify(n)

        # allow each Plugin to do its work.
        for p in g_pool.plugins:
            p.update(frame, events)

        writer.write_video_frame(frame)
        current_frame.value += 1

    writer.close()
    writer = None

    duration = time() - start_time
    effective_fps = float(current_frame.value) / duration

    logger.info(
        "Export done: Exported %s frames to %s. This took %s seconds. Exporter ran at %s frames per second"
        % (current_frame.value, out_file_path, duration, effective_fps))
    return True
Beispiel #57
0
    def start(self):
        self.data = {
            'pupil_positions': [],
            'gaze_positions': [],
            'notifications': []
        }
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()

        session = os.path.join(self.rec_dir, self.session_name)
        try:
            os.makedirs(session)
            logger.debug(
                "Created new recordings session dir {}".format(session))

        except:
            logger.debug(
                "Recordings session dir {} already exists, using it.".format(
                    session))

        if self.rec_path is None:
            # set up self incrementing folder within session folder
            counter = 0
            while True:
                self.rec_path = os.path.join(session,
                                             "{:03d}/".format(counter))
                try:
                    os.mkdir(self.rec_path)
                    logger.debug("Created new recording dir {}".format(
                        self.rec_path))
                    break
                except:
                    logger.debug(
                        "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                    )
                    counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, 'w', newline='') as csvfile:
            csv_utils.write_key_value_file(
                csvfile, {
                    'Recording Name': self.session_name,
                    'Start Date': strftime("%d.%m.%Y",
                                           localtime(self.start_time)),
                    'Start Time': strftime("%H:%M:%S",
                                           localtime(self.start_time))
                })

        self.video_path = os.path.join(self.rec_path, "world.mp4")
        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.writer = JPEG_Writer(self.video_path,
                                      self.g_pool.capture.frame_rate)
        elif hasattr(self.g_pool.capture._recent_frame, 'h264_buffer'):
            self.writer = H264Writer(self.video_path,
                                     self.g_pool.capture.frame_size[0],
                                     self.g_pool.capture.frame_size[1],
                                     int(self.g_pool.capture.frame_rate))
        else:
            self.writer = AV_Writer(self.video_path,
                                    fps=self.g_pool.capture.frame_rate)

        try:
            cal_pt_path = os.path.join(self.g_pool.user_dir,
                                       "user_calibration_data")
            cal_data = load_object(cal_pt_path)
            notification = {
                'subject': 'calibration.calibration_data',
                'record': True
            }
            notification.update(cal_data)
            self.data['notifications'].append(notification)
        except:
            pass

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all({
            'subject': 'recording.started',
            'rec_path': self.rec_path,
            'session_name': self.session_name,
            'record_eye': self.record_eye,
            'compression': self.raw_jpeg
        })
Beispiel #58
0
    def start(self):
        self.start_time = time()
        start_time_synced = self.g_pool.get_timestamp()

        if isinstance(self.g_pool.capture, NDSI_Source):
            # If the user did not enable TimeSync, the timestamps will be way off and
            # the recording code will crash. We check the difference between the last
            # frame's time and the start_time_synced and if this does not match, we stop
            # the recording and show a warning instead.
            TIMESTAMP_ERROR_THRESHOLD = 5.0
            frame = self.g_pool.capture._recent_frame
            if frame is None:
                logger.error(
                    "Your connection does not seem to be stable enough for "
                    "recording Pupil Mobile via WiFi. We recommend recording "
                    "on the phone.")
                return
            if abs(frame.timestamp -
                   start_time_synced) > TIMESTAMP_ERROR_THRESHOLD:
                logger.error(
                    "Pupil Mobile stream is not in sync. Aborting recording."
                    " Enable the Time Sync plugin and try again.")
                return

        session = os.path.join(self.rec_root_dir, self.session_name)
        try:
            os.makedirs(session, exist_ok=True)
            logger.debug(
                "Created new recordings session dir {}".format(session))
        except OSError:
            logger.error(
                "Could not start recording. Session dir {} not writable.".
                format(session))
            return

        self.pldata_writers = {}
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        recording_uuid = uuid.uuid4()

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "{:03d}/".format(counter))
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir {}".format(
                    self.rec_path))
                break
            except FileExistsError:
                logger.debug(
                    "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                )
                counter += 1

        self.meta_info = RecordingInfoFile.create_empty_file(self.rec_path)
        self.meta_info.recording_software_name = (
            RecordingInfoFile.RECORDING_SOFTWARE_NAME_PUPIL_CAPTURE)
        self.meta_info.recording_software_version = str(self.g_pool.version)
        self.meta_info.recording_name = self.session_name
        self.meta_info.start_time_synced_s = start_time_synced
        self.meta_info.start_time_system_s = self.start_time
        self.meta_info.recording_uuid = recording_uuid
        self.meta_info.system_info = get_system_info()

        self.video_path = os.path.join(self.rec_path, "world.mp4")
        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.writer = JPEG_Writer(self.video_path, start_time_synced)
        elif hasattr(self.g_pool.capture._recent_frame, "h264_buffer"):
            self.writer = H264Writer(
                self.video_path,
                self.g_pool.capture.frame_size[0],
                self.g_pool.capture.frame_size[1],
                int(self.g_pool.capture.frame_rate),
            )
        else:
            self.writer = MPEG_Writer(self.video_path, start_time_synced)

        calibration_data_notification_classes = [
            CalibrationSetupNotification,
            CalibrationResultNotification,
        ]
        writer = PLData_Writer(self.rec_path, "notify")

        for note_class in calibration_data_notification_classes:
            try:
                file_path = os.path.join(self.g_pool.user_dir,
                                         note_class.file_name())
                note = note_class.from_dict(load_object(file_path))
                note_dict = note.as_dict()

                note_dict["topic"] = "notify." + note_dict["subject"]
                writer.append(note_dict)
            except FileNotFoundError:
                continue

        self.pldata_writers["notify"] = writer

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all({
            "subject": "recording.started",
            "rec_path": self.rec_path,
            "session_name": self.session_name,
            "record_eye": self.record_eye,
            "compression": self.raw_jpeg,
            "start_time_synced": float(start_time_synced),
        })
Beispiel #59
0
        for p in points:
            cam1_points.append(p)
            noise = 0  #randomize point in eye space
            pr = p  #+ np.array( (uniform(-noise,+noise),uniform(-noise,+noise),uniform(-noise,+noise))  )
            p2 = toEye(pr)  # to cam2 coordinate system
            # print p,p2,toWorld(p2)
            # p2 *= 1.2,1.0,1.0
            cam2_points.append(p2)

        cam1_observation = [p / np.linalg.norm(p) for p in cam1_points]
        cam2_observation = [p / np.linalg.norm(p) for p in cam2_points]

    else:
        #load real data:
        import file_methods
        cam1_observation, cam2_observation = file_methods.load_object(
            'testdata')

    #inital guess though rigid transform

    initial_R, initial_t = find_rigid_transform(np.array(cam2_observation),
                                                np.array(cam1_observation))
    initial_rotation_quaternion = math_helper.quaternion_from_rotation_matrix(
        initial_R)
    initial_translation = np.array(initial_t).reshape(3)
    if synth_data:
        initial_translation *= np.linalg.norm(cam2_center) / np.linalg.norm(
            initial_translation)

    # setup bundle adjustment

    o1 = {
Beispiel #60
0
def player(rec_dir, ipc_pub_url, ipc_sub_url, ipc_push_url, user_dir,
           app_version):
    # general imports
    from time import sleep
    import logging
    import errno
    from glob import glob
    from time import time
    # networking
    import zmq
    import zmq_tools

    import numpy as np

    # zmq ipc setup
    zmq_ctx = zmq.Context()
    ipc_pub = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx,
                                        ipc_sub_url,
                                        topics=('notify', ))

    # log setup
    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logger = logging.getLogger()
    logger.handlers = []
    logger.setLevel(logging.INFO)
    logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    try:

        # imports
        from file_methods import Persistent_Dict, load_object

        # display
        import glfw
        # check versions for our own depedencies as they are fast-changing
        from pyglui import __version__ as pyglui_version

        from pyglui import ui, cygl
        from pyglui.cygl.utils import Named_Texture, RGBA
        import gl_utils
        # capture
        from video_capture import File_Source, EndofVideoFileError

        # helpers/utils
        from version_utils import VersionFormat
        from methods import normalize, denormalize, delta_t, get_system_info
        from player_methods import correlate_data, is_pupil_rec_dir, load_meta_info

        # Plug-ins
        from plugin import Plugin, Plugin_List, import_runtime_plugins
        from plugin_manager import Plugin_Manager
        from vis_circle import Vis_Circle
        from vis_cross import Vis_Cross
        from vis_polyline import Vis_Polyline
        from vis_light_points import Vis_Light_Points
        from vis_watermark import Vis_Watermark
        from vis_fixation import Vis_Fixation
        from vis_scan_path import Vis_Scan_Path
        from vis_eye_video_overlay import Vis_Eye_Video_Overlay
        from seek_control import Seek_Control
        from video_export_launcher import Video_Export_Launcher
        from offline_surface_tracker import Offline_Surface_Tracker
        # from marker_auto_trim_marks import Marker_Auto_Trim_Marks
        from fixation_detector import Offline_Fixation_Detector
        from batch_exporter import Batch_Exporter, Batch_Export
        from log_display import Log_Display
        from annotations import Annotation_Player
        from raw_data_exporter import Raw_Data_Exporter
        from log_history import Log_History
        from pupil_producers import Pupil_From_Recording, Offline_Pupil_Detection
        from gaze_producers import Gaze_From_Recording, Offline_Calibration
        from system_graphs import System_Graphs
        from system_timelines import System_Timelines
        from blink_detection import Offline_Blink_Detection

        assert VersionFormat(pyglui_version) >= VersionFormat(
            '1.17'), 'pyglui out of date, please upgrade to newest version'

        runtime_plugins = import_runtime_plugins(
            os.path.join(user_dir, 'plugins'))
        system_plugins = [
            Log_Display, Seek_Control, Plugin_Manager, System_Graphs,
            Batch_Export, System_Timelines
        ]
        user_plugins = [
            Vis_Circle, Vis_Fixation, Vis_Polyline, Vis_Light_Points,
            Vis_Cross, Vis_Watermark, Vis_Eye_Video_Overlay, Vis_Scan_Path,
            Offline_Fixation_Detector, Offline_Blink_Detection, Batch_Exporter,
            Video_Export_Launcher, Offline_Surface_Tracker, Raw_Data_Exporter,
            Annotation_Player, Log_History, Pupil_From_Recording,
            Offline_Pupil_Detection, Gaze_From_Recording, Offline_Calibration
        ] + runtime_plugins

        plugins = system_plugins + user_plugins

        # Callback functions
        def on_resize(window, w, h):
            nonlocal window_size
            nonlocal hdpi_factor

            hdpi_factor = float(
                glfw.glfwGetFramebufferSize(window)[0] /
                glfw.glfwGetWindowSize(window)[0])
            g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
            window_size = w, h
            g_pool.camera_render_size = w - int(
                icon_bar_width * g_pool.gui.scale), h
            g_pool.gui.update_window(*window_size)
            g_pool.gui.collect_menus()
            for p in g_pool.plugins:
                p.on_window_resize(window, *g_pool.camera_render_size)

        def on_window_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_window_char(window, char):
            g_pool.gui.update_char(char)

        def on_window_mouse_button(window, button, action, mods):
            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            x, y = x * hdpi_factor, y * hdpi_factor
            g_pool.gui.update_mouse(x, y)
            pos = x, y
            pos = normalize(pos, g_pool.camera_render_size)
            # Position in img pixels
            pos = denormalize(pos, g_pool.capture.frame_size)
            for p in g_pool.plugins:
                p.on_pos(pos)

        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)

        def on_drop(window, count, paths):
            for x in range(count):
                new_rec_dir = paths[x].decode('utf-8')
                if is_pupil_rec_dir(new_rec_dir):
                    logger.debug(
                        "Starting new session with '{}'".format(new_rec_dir))
                    ipc_pub.notify({
                        "subject": "player_drop_process.should_start",
                        "rec_dir": new_rec_dir
                    })
                    glfw.glfwSetWindowShouldClose(window, True)
                else:
                    logger.error("'{}' is not a valid pupil recording".format(
                        new_rec_dir))

        tick = delta_t()

        def get_dt():
            return next(tick)

        video_path = [
            f for f in glob(os.path.join(rec_dir, "world.*"))
            if os.path.splitext(f)[1] in ('.mp4', '.mkv', '.avi', '.h264',
                                          '.mjpeg')
        ][0]
        pupil_data_path = os.path.join(rec_dir, "pupil_data")

        meta_info = load_meta_info(rec_dir)

        # log info about Pupil Platform and Platform in player.log
        logger.info('Application Version: {}'.format(app_version))
        logger.info('System Info: {}'.format(get_system_info()))

        icon_bar_width = 50
        window_size = None
        hdpi_factor = 1.0

        # create container for globally scoped vars
        g_pool = Global_Container()
        g_pool.app = 'player'
        g_pool.zmq_ctx = zmq_ctx
        g_pool.ipc_pub = ipc_pub
        g_pool.ipc_pub_url = ipc_pub_url
        g_pool.ipc_sub_url = ipc_sub_url
        g_pool.ipc_push_url = ipc_push_url
        g_pool.plugin_by_name = {p.__name__: p for p in plugins}
        g_pool.camera_render_size = None

        # sets itself to g_pool.capture
        File_Source(g_pool, video_path)

        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(user_dir, "user_settings_player"))
        if VersionFormat(session_settings.get("version",
                                              '0.0')) != app_version:
            logger.info(
                "Session setting are a different version of this app. I will not use those."
            )
            session_settings.clear()

        g_pool.capture.playback_speed = session_settings.get(
            'playback_speed', 1.)

        width, height = session_settings.get('window_size',
                                             g_pool.capture.frame_size)
        window_pos = session_settings.get('window_position',
                                          window_position_default)
        glfw.glfwInit()
        main_window = glfw.glfwCreateWindow(
            width, height, "Pupil Player: " + meta_info["Recording Name"] +
            " - " + rec_dir.split(os.path.sep)[-1], None, None)
        glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()
        g_pool.main_window = main_window

        def set_scale(new_scale):
            g_pool.gui_user_scale = new_scale
            window_size = (
                g_pool.camera_render_size[0] +
                int(icon_bar_width * g_pool.gui_user_scale * hdpi_factor),
                glfw.glfwGetFramebufferSize(main_window)[1])
            logger.warning(icon_bar_width * g_pool.gui_user_scale *
                           hdpi_factor)
            glfw.glfwSetWindowSize(main_window, *window_size)

        # load pupil_positions, gaze_positions
        g_pool.pupil_data = load_object(pupil_data_path)
        g_pool.binocular = meta_info.get('Eye Mode',
                                         'monocular') == 'binocular'
        g_pool.version = app_version
        g_pool.timestamps = g_pool.capture.timestamps
        g_pool.get_timestamp = lambda: 0.
        g_pool.new_seek = True
        g_pool.user_dir = user_dir
        g_pool.rec_dir = rec_dir
        g_pool.meta_info = meta_info
        g_pool.min_data_confidence = session_settings.get(
            'min_data_confidence', 0.6)

        g_pool.pupil_positions = []
        g_pool.gaze_positions = []
        g_pool.fixations = []

        g_pool.notifications_by_frame = correlate_data(
            g_pool.pupil_data['notifications'], g_pool.timestamps)
        g_pool.pupil_positions_by_frame = [[] for x in g_pool.timestamps
                                           ]  # populated by producer`
        g_pool.gaze_positions_by_frame = [[] for x in g_pool.timestamps
                                          ]  # populated by producer
        g_pool.fixations_by_frame = [
            [] for x in g_pool.timestamps
        ]  # populated by the fixation detector plugin

        def set_data_confidence(new_confidence):
            g_pool.min_data_confidence = new_confidence
            notification = {'subject': 'min_data_confidence_changed'}
            notification['_notify_time_'] = time() + .8
            g_pool.ipc_pub.notify(notification)

        def open_plugin(plugin):
            if plugin == "Select to load":
                return
            g_pool.plugins.add(plugin)

        def purge_plugins():
            for p in g_pool.plugins:
                if p.__class__ in user_plugins:
                    p.alive = False
            g_pool.plugins.clean()

        def do_export(_):
            export_range = g_pool.seek_control.trim_left, g_pool.seek_control.trim_right
            export_dir = os.path.join(g_pool.rec_dir, 'exports',
                                      '{}-{}'.format(*export_range))
            try:
                os.makedirs(export_dir)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    logger.error("Could not create export dir")
                    raise e
                else:
                    overwrite_warning = "Previous export for range [{}-{}] already exists - overwriting."
                    logger.warning(overwrite_warning.format(*export_range))
            else:
                logger.info('Created export dir at "{}"'.format(export_dir))

            notification = {
                'subject': 'should_export',
                'range': export_range,
                'export_dir': export_dir
            }
            g_pool.ipc_pub.notify(notification)

        def reset_restart():
            logger.warning("Resetting all settings and restarting Player.")
            glfw.glfwSetWindowShouldClose(main_window, True)
            ipc_pub.notify({'subject': 'clear_settings_process.should_start'})
            ipc_pub.notify({
                'subject': 'player_process.should_start',
                'rec_dir': rec_dir,
                'delay': 2.
            })

        def toggle_general_settings(collapsed):
            # this is the menu toggle logic.
            # Only one menu can be open.
            # If no menu is open the menubar should collapse.
            g_pool.menubar.collapsed = collapsed
            for m in g_pool.menubar.elements:
                m.collapsed = True
            general_settings.collapsed = collapsed

        g_pool.gui = ui.UI()
        g_pool.gui_user_scale = session_settings.get('gui_scale', 1.)
        g_pool.menubar = ui.Scrolling_Menu("Settings",
                                           pos=(-500, 0),
                                           size=(-icon_bar_width, 0),
                                           header_pos='left')
        g_pool.iconbar = ui.Scrolling_Menu("Icons",
                                           pos=(-icon_bar_width, 0),
                                           size=(0, 0),
                                           header_pos='hidden')
        g_pool.timelines = ui.Container((0, 0), (0, 0), (0, 0))
        g_pool.timelines.horizontal_constraint = g_pool.menubar
        g_pool.user_timelines = ui.Timeline_Menu('User Timelines',
                                                 pos=(0., -150.),
                                                 size=(0., 0.),
                                                 header_pos='headline')
        g_pool.user_timelines.color = RGBA(a=0.)
        g_pool.user_timelines.collapsed = True
        # add container that constaints itself to the seekbar height
        vert_constr = ui.Container((0, 0), (0, -50.), (0, 0))
        vert_constr.append(g_pool.user_timelines)
        g_pool.timelines.append(vert_constr)

        general_settings = ui.Growing_Menu('General', header_pos='headline')
        general_settings.append(
            ui.Button(
                'Reset window size', lambda: glfw.glfwSetWindowSize(
                    main_window, g_pool.capture.frame_size[0], g_pool.capture.
                    frame_size[1])))
        general_settings.append(
            ui.Selector('gui_user_scale',
                        g_pool,
                        setter=set_scale,
                        selection=[.8, .9, 1., 1.1, 1.2] +
                        list(np.arange(1.5, 5.1, .5)),
                        label='Interface Size'))
        general_settings.append(
            ui.Info_Text('Player Version: {}'.format(g_pool.version)))
        general_settings.append(
            ui.Info_Text('Capture Version: {}'.format(
                meta_info['Capture Software Version'])))
        general_settings.append(
            ui.Info_Text('Data Format Version: {}'.format(
                meta_info['Data Format Version'])))
        general_settings.append(
            ui.Slider('min_data_confidence',
                      g_pool,
                      setter=set_data_confidence,
                      step=.05,
                      min=0.0,
                      max=1.0,
                      label='Confidence threshold'))
        general_settings.append(
            ui.Button('Restart with default settings', reset_restart))

        g_pool.menubar.append(general_settings)
        icon = ui.Icon('collapsed',
                       general_settings,
                       label=chr(0xe8b8),
                       on_val=False,
                       off_val=True,
                       setter=toggle_general_settings,
                       label_font='pupil_icons')
        icon.tooltip = 'General Settings'
        g_pool.iconbar.append(icon)

        user_plugin_separator = ui.Separator()
        user_plugin_separator.order = 0.35
        g_pool.iconbar.append(user_plugin_separator)

        g_pool.quickbar = ui.Stretching_Menu('Quick Bar', (0, 100),
                                             (100, -100))
        g_pool.export_button = ui.Thumb('export',
                                        label=chr(0xe2c5),
                                        getter=lambda: False,
                                        setter=do_export,
                                        hotkey='e',
                                        label_font='pupil_icons')
        g_pool.quickbar.extend([g_pool.export_button])
        g_pool.gui.append(g_pool.menubar)
        g_pool.gui.append(g_pool.timelines)
        g_pool.gui.append(g_pool.iconbar)
        g_pool.gui.append(g_pool.quickbar)

        # we always load these plugins
        default_plugins = [('Plugin_Manager', {}), ('Seek_Control', {}),
                           ('Log_Display', {}), ('Raw_Data_Exporter', {}),
                           ('Vis_Polyline', {}), ('Vis_Circle', {}),
                           ('System_Graphs', {}), ('System_Timelines', {}),
                           ('Video_Export_Launcher', {}),
                           ('Pupil_From_Recording', {}),
                           ('Gaze_From_Recording', {})]
        g_pool.plugins = Plugin_List(
            g_pool, session_settings.get('loaded_plugins', default_plugins))

        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
        glfw.glfwSetKeyCallback(main_window, on_window_key)
        glfw.glfwSetCharCallback(main_window, on_window_char)
        glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
        glfw.glfwSetCursorPosCallback(main_window, on_pos)
        glfw.glfwSetScrollCallback(main_window, on_scroll)
        glfw.glfwSetDropCallback(main_window, on_drop)

        toggle_general_settings(True)

        g_pool.gui.configuration = session_settings.get('ui_config', {})

        # gl_state settings
        gl_utils.basic_gl_setup()
        g_pool.image_tex = Named_Texture()

        # trigger on_resize
        on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        def handle_notifications(n):
            subject = n['subject']
            if subject == 'start_plugin':
                g_pool.plugins.add(g_pool.plugin_by_name[n['name']],
                                   args=n.get('args', {}))
            elif subject.startswith('meta.should_doc'):
                ipc_pub.notify({
                    'subject': 'meta.doc',
                    'actor': g_pool.app,
                    'doc': player.__doc__
                })
                for p in g_pool.plugins:
                    if (p.on_notify.__doc__
                            and p.__class__.on_notify != Plugin.on_notify):
                        ipc_pub.notify({
                            'subject': 'meta.doc',
                            'actor': p.class_name,
                            'doc': p.on_notify.__doc__
                        })

        while not glfw.glfwWindowShouldClose(main_window):

            # fetch newest notifications
            new_notifications = []
            while notify_sub.new_data:
                t, n = notify_sub.recv()
                new_notifications.append(n)

            # notify each plugin if there are new notifications:
            for n in new_notifications:
                handle_notifications(n)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # grab new frame
            if g_pool.capture.play or g_pool.new_seek:
                g_pool.new_seek = False
                try:
                    new_frame = g_pool.capture.get_frame()
                except EndofVideoFileError:
                    # end of video logic: pause at last frame.
                    g_pool.capture.play = False
                    logger.warning("end of video")

            frame = new_frame.copy()
            events = {}
            events['frame'] = frame
            # report time between now and the last loop interation
            events['dt'] = get_dt()

            # pupil and gaze positions are added by their respective producer plugins
            events['pupil_positions'] = []
            events['gaze_positions'] = []

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            # check if a plugin need to be destroyed
            g_pool.plugins.clean()

            glfw.glfwMakeContextCurrent(main_window)
            # render visual feedback from loaded plugins
            if gl_utils.is_window_visible(main_window):

                gl_utils.glViewport(0, 0, *g_pool.camera_render_size)
                g_pool.capture._recent_frame = frame
                g_pool.capture.gl_display()
                for p in g_pool.plugins:
                    p.gl_display()

                gl_utils.glViewport(0, 0, *window_size)

                try:
                    clipboard = glfw.glfwGetClipboardString(
                        main_window).decode()
                except AttributeError:  # clipbaord is None, might happen on startup
                    clipboard = ''
                g_pool.gui.update_clipboard(clipboard)
                user_input = g_pool.gui.update()
                if user_input.clipboard and user_input.clipboard != clipboard:
                    # only write to clipboard if content changed
                    glfw.glfwSetClipboardString(main_window,
                                                user_input.clipboard.encode())

                for b in user_input.buttons:
                    button, action, mods = b
                    x, y = glfw.glfwGetCursorPos(main_window)
                    pos = x * hdpi_factor, y * hdpi_factor
                    pos = normalize(pos, g_pool.camera_render_size)
                    pos = denormalize(pos, g_pool.capture.frame_size)
                    for p in g_pool.plugins:
                        p.on_click(pos, button, action)

                for key, scancode, action, mods in user_input.keys:
                    for p in g_pool.plugins:
                        p.on_key(key, scancode, action, mods)

                for char_ in user_input.chars:
                    for p in g_pool.plugins:
                        p.on_char(char_)

                glfw.glfwSwapBuffers(main_window)

            # present frames at appropriate speed
            g_pool.capture.wait(frame)
            glfw.glfwPollEvents()

        session_settings['playback_speed'] = g_pool.capture.playback_speed
        session_settings['loaded_plugins'] = g_pool.plugins.get_initializers()
        session_settings['min_data_confidence'] = g_pool.min_data_confidence
        session_settings['gui_scale'] = g_pool.gui_user_scale
        session_settings['ui_config'] = g_pool.gui.configuration
        session_settings['window_size'] = glfw.glfwGetWindowSize(main_window)
        session_settings['window_position'] = glfw.glfwGetWindowPos(
            main_window)
        session_settings['version'] = str(g_pool.version)
        session_settings.close()

        # de-init all running plugins
        for p in g_pool.plugins:
            p.alive = False
        g_pool.plugins.clean()

        g_pool.capture.cleanup()
        g_pool.gui.terminate()
        glfw.glfwDestroyWindow(main_window)

    except:
        import traceback
        trace = traceback.format_exc()
        logger.error('Process Player crashed with trace:\n{}'.format(trace))
    finally:
        logger.info("Process shutting down.")
        ipc_pub.notify({'subject': 'player_process.stopped'})
        sleep(1.0)