def _try_patch_world_instrinsics_file(rec_dir: str, videos: T.Sequence[Path]) -> None: """Tries to create a reasonable world.intrinsics file from a set of videos.""" if not videos: return # Make sure the default value always correlates to the frame size of BrokenStream frame_size = (1280, 720) # TODO: Due to the naming conventions for multipart-recordings, we can't # easily lookup 'any' video name in the pre_recorded_calibrations, since it # might be a multipart recording. Therefore we need to compute a hint here # for the lookup. This could be improved. camera_hint = "" for video in videos: try: container = av.open(str(video)) except av.AVError: continue for camera in cm.pre_recorded_calibrations: if camera in video.name: camera_hint = camera break frame_size = ( container.streams.video[0].format.width, container.streams.video[0].format.height, ) break intrinsics = cm.load_intrinsics(rec_dir, camera_hint, frame_size) intrinsics.save(rec_dir, "world")
def __init__( self, g_pool, source_path=None, loop=False, buffered_decoding=False, fill_gaps=False, *args, **kwargs, ): super().__init__(g_pool, *args, **kwargs) if self.timing == "external": self.recent_events = self.recent_events_external_timing else: self.recent_events = self.recent_events_own_timing # minimal attribute set self.source_path = str(source_path) self.loop = loop self.fill_gaps = fill_gaps rec, set_name = self.get_rec_set_name(self.source_path) self._init_videoset() self.timestamps = self.videoset.lookup.timestamp if len(self.timestamps) > 1: self._frame_rate = (self.timestamps[-1] - self.timestamps[0]) / len( self.timestamps ) else: # TODO: where does the fallback framerate of 1/20 come from? self._frame_rate = 20 self.buffering = buffered_decoding # Load video split for first frame self.reset_video() self._intrinsics = load_intrinsics(rec, set_name, self.frame_size)
def __init__(self, g_pool, frame_size, frame_rate, name=None, preferred_names=(), uid=None, uvc_controls={}): import platform super().__init__(g_pool) self.uvc_capture = None self._restart_in = 3 assert name or preferred_names or uid if platform.system() == 'Windows': self.verify_drivers() self.devices = uvc.Device_List() devices_by_name = {dev['name']: dev for dev in self.devices} # if uid is supplied we init with that if uid: try: self.uvc_capture = uvc.Capture(uid) except uvc.OpenError: logger.warning("No avalilable camera found that matched {}".format(preferred_names)) except uvc.InitError: logger.error("Camera failed to initialize.") except uvc.DeviceNotFoundError: logger.warning("No camera found that matched {}".format(preferred_names)) # otherwise we use name or preffered_names else: if name: preferred_names = (name,) else: pass assert preferred_names # try to init by name for name in preferred_names: for d_name in devices_by_name.keys(): if name in d_name: uid_for_name = devices_by_name[d_name]['uid'] try: self.uvc_capture = uvc.Capture(uid_for_name) except uvc.OpenError: logger.info("{} matches {} but is already in use or blocked.".format(uid_for_name, name)) except uvc.InitError: logger.error("Camera failed to initialize.") else: break # check if we were sucessfull if not self.uvc_capture: logger.error("Init failed. Capture is started in ghost mode. No images will be supplied.") self.name_backup = preferred_names self.frame_size_backup = frame_size self.frame_rate_backup = frame_rate self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name, self.frame_size) else: self.configure_capture(frame_size, frame_rate, uvc_controls) self.name_backup = (self.name,) self.frame_size_backup = frame_size self.frame_rate_backup = frame_rate
def frame_size(self, new_size): # closest match for size sizes = [abs(r[0]-new_size[0]) for r in self.uvc_capture.frame_sizes] best_size_idx = sizes.index(min(sizes)) size = self.uvc_capture.frame_sizes[best_size_idx] if tuple(size) != tuple(new_size): logger.warning("%s resolution capture mode not available. Selected {}.".format(new_size, size)) self.uvc_capture.frame_size = size self.frame_size_backup = size self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name, self.frame_size)
def _load_intrinsics(file_name, name): try: video = av.open(file_name, "r") except av.AVError: frame_size = (480, 360) else: frame_size = ( video.streams.video[0].format.width, video.streams.video[0].format.height, ) del video intrinsics = load_intrinsics(self.rec_dir, name, frame_size) intrinsics.save(self.rec_dir, "world")
def convert_pupil_mobile_recording_to_v094(rec_dir): logger.info("Converting Pupil Mobile recording to v0.9.4 format") # convert time files and rename corresponding videos time_pattern = os.path.join(rec_dir, '*.time') for time_loc in glob.glob(time_pattern): time_file_name = os.path.split(time_loc)[1] time_name, time_ext = os.path.splitext(time_file_name) potential_locs = [os.path.join(rec_dir, time_name+ext) for ext in ('.mjpeg', '.mp4','.m4a')] existing_locs = [loc for loc in potential_locs if os.path.exists(loc)] if not existing_locs: continue else: video_loc = existing_locs[0] if time_name in ('Pupil Cam1 ID0', 'Pupil Cam1 ID1', 'Pupil Cam2 ID0', 'Pupil Cam2 ID1'): time_name = 'eye'+time_name[-1] # rename eye files elif time_name in ('Pupil Cam1 ID2', 'Logitech Webcam C930e'): video = av.open(video_loc, 'r') frame_size = video.streams.video[0].format.width, video.streams.video[0].format.height del video intrinsics = load_intrinsics(rec_dir, time_name, frame_size) intrinsics.save(rec_dir, 'world') time_name = 'world' # assume world file elif time_name.startswith('audio_'): time_name = 'audio' timestamps = np.fromfile(time_loc, dtype='>f8') timestamp_loc = os.path.join(rec_dir, '{}_timestamps.npy'.format(time_name)) logger.info('Creating "{}"'.format(os.path.split(timestamp_loc)[1])) np.save(timestamp_loc, timestamps) if time_name == 'audio': video_dst = os.path.join(rec_dir, time_name) + '.mp4' logger.info('Renaming "{}" to "{}"'.format(os.path.split(video_loc)[1], os.path.split(video_dst)[1])) os.rename(video_loc, video_dst) else: video_dst = os.path.join(rec_dir, time_name) + os.path.splitext(video_loc)[1] logger.info('Renaming "{}" to "{}"'.format(os.path.split(video_loc)[1], os.path.split(video_dst)[1])) os.rename(video_loc, video_dst) pupil_data_loc = os.path.join(rec_dir, 'pupil_data') if not os.path.exists(pupil_data_loc): logger.info('Creating "pupil_data"') save_object({'pupil_positions': [], 'gaze_positions': [], 'notifications': []}, pupil_data_loc)
def fake_gpool_surface(folder = None): if not folder: raise('we need the folder else we cannot load timestamps and surfaces etc.') surface_dir = os.path.join(folder,'../','surface') if not os.path.exists(surface_dir): os.makedirs(surface_dir) fake_gpool = gen_fakepool() fake_gpool.surfaces = [] fake_gpool.rec_dir = surface_dir fake_gpool.timestamps = np.load(os.path.join(folder,'world_timestamps.npy')) fake_gpool.capture.source_path = os.path.join(folder,'world.mp4') fake_gpool.capture.intrinsics = load_intrinsics('','Pupil Cam1 ID2',(1280, 720)) fake_gpool.seek_control = global_container() fake_gpool.seek_control.trim_left = 0 fake_gpool.seek_control.trim_right = 0 fake_gpool.timeline = global_container() return(fake_gpool)
def intrinsics(self): if self._intrinsics is None or self._intrinsics.resolution != self.frame_size: self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name, self.frame_size) if type(self._intrinsics) is Dummy_Camera: logger.info("Was dummy camera") saved_fp = os.path.join(self.g_pool.user_dir, "picoflexx_intrinsics") if os.path.exists(saved_fp): with open(saved_fp, "rb") as f: K, D, self.frame_size, self.name = pickle.load(f) self._intrinsics = Radial_Dist_Camera( K, D, self.frame_size, self.name) logger.info("Loaded from saved") else: logger.info("No saved intrinsics?") return self._intrinsics
def __init__( self, g_pool, source_path=None, loop=False, buffered_decoding=False, fill_gaps=False, *args, **kwargs, ): super().__init__(g_pool, *args, **kwargs) if self.timing == "external": self.recent_events = self.recent_events_external_timing else: self.recent_events = self.recent_events_own_timing # minimal attribute set self._initialised = True self.source_path = source_path self.loop = loop self.fill_gaps = fill_gaps assert self.check_source_path(source_path) rec, set_name = self.get_rec_set_name(self.source_path) self.videoset = VideoSet(rec, set_name, self.fill_gaps) # Load or build lookup table self.videoset.load_or_build_lookup() self.timestamps = self.videoset.lookup.timestamp self.current_container_index = self.videoset.lookup.container_idx[0] self.target_frame_idx = 0 self.current_frame_idx = 0 self.buffering = buffered_decoding # First video file is valid if self.videoset.containers[self.current_container_index]: self.setup_video(self.current_container_index) # load first split else: self.video_stream = BrokenStream() self.next_frame = self.video_stream.next_frame() self.pts_rate = 48000 self.shape = (720, 1280, 3) self.average_rate = (self.timestamps[-1] - self.timestamps[0]) / len( self.timestamps ) self._intrinsics = load_intrinsics(rec, set_name, self.frame_size)
def frame_size(self, new_size): # closest match for size sizes = [ abs(r[0] - new_size[0]) + abs(r[1] - new_size[1]) for r in self.uvc_capture.frame_sizes ] best_size_idx = sizes.index(min(sizes)) size = self.uvc_capture.frame_sizes[best_size_idx] if tuple(size) != tuple(new_size): logger.warning( "{} resolution capture mode not available. Selected {}.". format(new_size, size)) self.uvc_capture.frame_size = size self.frame_size_backup = size self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name, self.frame_size) if self.should_check_stripes: self.stripe_detector = Check_Frame_Stripes()
def __init__( self, g_pool, source_path=None, loop=False, buffered_decoding=False, fill_gaps=False, *args, **kwargs, ): super().__init__(g_pool, *args, **kwargs) if self.timing == "external": self.recent_events = self.recent_events_external_timing else: self.recent_events = self.recent_events_own_timing # minimal attribute set self._initialised = True self.source_path = source_path self.loop = loop self.fill_gaps = fill_gaps assert self.check_source_path(source_path) rec, set_name = self.get_rec_set_name(self.source_path) self.videoset = VideoSet(rec, set_name, self.fill_gaps) # Load or build lookup table self.videoset.load_or_build_lookup() self.timestamps = self.videoset.lookup.timestamp self.current_container_index = self.videoset.lookup.container_idx[0] self.target_frame_idx = 0 self.current_frame_idx = 0 self.buffering = buffered_decoding # First video file is valid if self.videoset.containers[self.current_container_index]: self.setup_video(self.current_container_index) # load first split else: self.video_stream = BrokenStream() self.next_frame = self.video_stream.next_frame() self.pts_rate = 48000 self.shape = (720, 1280, 3) self.average_rate = (self.timestamps[-1] - self.timestamps[0]) / len(self.timestamps) self._intrinsics = load_intrinsics(rec, set_name, self.frame_size)
def frame_size(self, new_size): # closest match for size sizes = [ abs(r[0] - new_size[0]) + abs(r[1] - new_size[1]) for r in self.uvc_capture.frame_sizes ] best_size_idx = sizes.index(min(sizes)) size = self.uvc_capture.frame_sizes[best_size_idx] if tuple(size) != tuple(new_size): logger.warning( "{} resolution capture mode not available. Selected {}.".format( new_size, size ) ) self.uvc_capture.frame_size = size self.frame_size_backup = size self._intrinsics = load_intrinsics( self.g_pool.user_dir, self.name, self.frame_size ) if self.check_stripes and ("Pupil Cam2" in self.uvc_capture.name): self.checkframestripes = Check_Frame_Stripes()
def _initialize_device( self, device_id, color_frame_size, color_fps, depth_frame_size, depth_fps, device_options=(), ): self.stop_pipeline() self.last_color_frame_ts = None self.last_depth_frame_ts = None self._recent_frame = None self._recent_depth_frame = None if device_id is None: device_id = self.device_id if device_id is None: # FIXME these two if blocks look ugly. return # use default streams to filter modes by rs_stream and rs_format self._available_modes = self._enumerate_formats(device_id) logger.debug("device_id: {} self._available_modes: {}".format( device_id, str(self._available_modes))) if (color_frame_size is not None and depth_frame_size is not None and color_fps is not None and depth_fps is not None): color_frame_size = tuple(color_frame_size) depth_frame_size = tuple(depth_frame_size) logger.debug("Initialize with Color {}@{}\tDepth {}@{}".format( color_frame_size, color_fps, depth_frame_size, depth_fps)) # make sure the frame rates are compatible with the given frame sizes color_fps = self._get_valid_frame_rate(rs.stream.color, color_frame_size, color_fps) depth_fps = self._get_valid_frame_rate(rs.stream.depth, depth_frame_size, depth_fps) self.frame_size_backup = color_frame_size self.depth_frame_size_backup = depth_frame_size self.frame_rate_backup = color_fps self.depth_frame_rate_backup = depth_fps config = self._prep_configuration(color_frame_size, color_fps, depth_frame_size, depth_fps) else: config = self._get_default_config() self.frame_size_backup = DEFAULT_COLOR_SIZE self.depth_frame_size_backup = DEFAULT_DEPTH_SIZE self.frame_rate_backup = DEFAULT_COLOR_FPS self.depth_frame_rate_backup = DEFAULT_DEPTH_FPS try: self.pipeline_profile = self.pipeline.start(config) except RuntimeError as re: logger.error("Cannot start pipeline! " + str(re)) self.pipeline_profile = None else: self.stream_profiles = { s.stream_type(): s.as_video_stream_profile() for s in self.pipeline_profile.get_streams() } logger.debug("Pipeline started for device " + device_id) logger.debug("Stream profiles: " + str(self.stream_profiles)) self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name, self.frame_size) self.update_menu() self._needs_restart = False
def __init__( self, g_pool, frame_size, frame_rate, name=None, preferred_names=(), uid=None, uvc_controls={}, check_stripes=True, exposure_mode="manual", ): super().__init__(g_pool) self.ts_offset = -0.1 print("UVC_Source") #self.uvc_capture = None self.video_capture = None self._restart_in = 3 assert name or preferred_names or uid self.devices = Device_List() devices_by_name = {dev["name"]: dev for dev in self.devices} # if uid is supplied we init with that if uid: self.video_capture = VideoCaptureWrapper(uid) # otherwise we use name or preffered_names else: if name: preferred_names = (name, ) else: pass assert preferred_names # try to init by name for name in preferred_names: for d_name in devices_by_name.keys(): if name in d_name: uid_for_name = devices_by_name[d_name]["uid"] # open the desired device self.video_capture = VideoCaptureWrapper(uid_for_name) # checkframestripes will be initialized accordingly in configure_capture() self.enable_stripe_checks = check_stripes self.exposure_mode = exposure_mode self.stripe_detector = None self.preferred_exposure_time = None # check if we were sucessfull if not self.video_capture.isOpened(): logger.error( "Init failed. Capture is started in ghost mode. No images will be supplied." ) self.name_backup = preferred_names self.frame_size_backup = frame_size self.frame_rate_backup = frame_rate self.exposure_time_backup = None self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name, self.frame_size) else: self.configure_capture(frame_size, frame_rate, uvc_controls) self.name_backup = (self.name, ) self.frame_size_backup = frame_size self.frame_rate_backup = frame_rate controls_dict = dict([(c.display_name, c) for c in self.video_capture.controls]) try: self.exposure_time_backup = controls_dict[ "Absolute Exposure Time"].value except KeyError: self.exposure_time_backup = None self.backup_uvc_controls = {}
def _initialize_device( self, device_id, color_frame_size, color_fps, depth_frame_size, depth_fps, device_options=(), ): self.stop_pipeline() self.last_color_frame_ts = None self.last_depth_frame_ts = None self._recent_frame = None self._recent_depth_frame = None if device_id is None: device_id = self.device_id if device_id is None: # FIXME these two if blocks look ugly. return # use default streams to filter modes by rs_stream and rs_format self._available_modes = self._enumerate_formats(device_id) logger.debug( "device_id: {} self._available_modes: {}".format( device_id, str(self._available_modes) ) ) if ( color_frame_size is not None and depth_frame_size is not None and color_fps is not None and depth_fps is not None ): color_frame_size = tuple(color_frame_size) depth_frame_size = tuple(depth_frame_size) logger.debug( "Initialize with Color {}@{}\tDepth {}@{}".format( color_frame_size, color_fps, depth_frame_size, depth_fps ) ) # make sure the frame rates are compatible with the given frame sizes color_fps = self._get_valid_frame_rate( rs.stream.color, color_frame_size, color_fps ) depth_fps = self._get_valid_frame_rate( rs.stream.depth, depth_frame_size, depth_fps ) self.frame_size_backup = color_frame_size self.depth_frame_size_backup = depth_frame_size self.frame_rate_backup = color_fps self.depth_frame_rate_backup = depth_fps config = self._prep_configuration( color_frame_size, color_fps, depth_frame_size, depth_fps ) else: config = self._get_default_config() self.frame_size_backup = DEFAULT_COLOR_SIZE self.depth_frame_size_backup = DEFAULT_DEPTH_SIZE self.frame_rate_backup = DEFAULT_COLOR_FPS self.depth_frame_rate_backup = DEFAULT_DEPTH_FPS try: self.pipeline_profile = self.pipeline.start(config) except RuntimeError as re: logger.error("Cannot start pipeline! " + str(re)) self.pipeline_profile = None else: self.stream_profiles = { s.stream_type(): s.as_video_stream_profile() for s in self.pipeline_profile.get_streams() } logger.debug("Pipeline started for device " + device_id) logger.debug("Stream profiles: " + str(self.stream_profiles)) self._intrinsics = load_intrinsics( self.g_pool.user_dir, self.name, self.frame_size ) self.update_menu() self._needs_restart = False
def __init__( self, g_pool, frame_size, frame_rate, name=None, preferred_names=(), uid=None, uvc_controls={}, check_stripes=True, exposure_mode="manual", *args, **kwargs, ): super().__init__(g_pool, *args, **kwargs) self.uvc_capture = None self._last_ts = None self._restart_in = 3 assert name or preferred_names or uid if platform.system() == "Windows": self.verify_drivers() self.devices = uvc.Device_List() devices_by_name = {dev["name"]: dev for dev in self.devices} # if uid is supplied we init with that if uid: try: self.uvc_capture = uvc.Capture(uid) except uvc.OpenError: logger.warning( "No avalilable camera found that matched {}".format( preferred_names)) except uvc.InitError: logger.error("Camera failed to initialize.") except uvc.DeviceNotFoundError: logger.warning( "No camera found that matched {}".format(preferred_names)) # otherwise we use name or preffered_names else: if name: preferred_names = (name, ) else: pass assert preferred_names # try to init by name for name in preferred_names: for d_name in devices_by_name.keys(): if name in d_name: uid_for_name = devices_by_name[d_name]["uid"] try: self.uvc_capture = uvc.Capture(uid_for_name) break except uvc.OpenError: logger.info( f"{uid_for_name} matches {name} but is already in use " "or blocked.") except uvc.InitError: logger.error("Camera failed to initialize.") if self.uvc_capture: break # checkframestripes will be initialized accordingly in configure_capture() self.enable_stripe_checks = check_stripes self.exposure_mode = exposure_mode self.stripe_detector = None self.preferred_exposure_time = None # check if we were sucessfull if not self.uvc_capture: logger.error( "Could not connect to device! No images will be supplied.") self.name_backup = preferred_names self.frame_size_backup = frame_size self.frame_rate_backup = frame_rate self.exposure_time_backup = None self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name, self.frame_size) else: self.configure_capture(frame_size, frame_rate, uvc_controls) self.name_backup = (self.name, ) self.frame_size_backup = frame_size self.frame_rate_backup = frame_rate controls_dict = dict([(c.display_name, c) for c in self.uvc_capture.controls]) try: self.exposure_time_backup = controls_dict[ "Absolute Exposure Time"].value except KeyError: self.exposure_time_backup = None self.backup_uvc_controls = {}
def _initialize_device( self, device_id, color_frame_size, color_fps, depth_frame_size, depth_fps, device_options=(), ): devices = tuple(self.service.get_devices()) color_frame_size = tuple(color_frame_size) depth_frame_size = tuple(depth_frame_size) self.streams = [ColorStream(), DepthStream(), PointStream()] self.last_color_frame_ts = None self.last_depth_frame_ts = None self._recent_frame = None self._recent_depth_frame = None if not devices: if not self._needs_restart: logger.error( "Camera failed to initialize. No cameras connected.") self.device = None self.update_menu() return if self.device is not None: self.device.stop() # only call Device.stop() if its context if device_id >= len(devices): logger.error( "Camera with id {} not found. Initializing default camera.". format(device_id)) device_id = 0 # use default streams to filter modes by rs_stream and rs_format self._available_modes = self._enumerate_formats(device_id) # make sure that given frame sizes and rates are available color_modes = self._available_modes[rs_stream.RS_STREAM_COLOR] if color_frame_size not in color_modes: # automatically select highest resolution color_frame_size = sorted(color_modes.keys(), reverse=True)[0] if color_fps not in color_modes[color_frame_size]: # automatically select highest frame rate color_fps = color_modes[color_frame_size][0] depth_modes = self._available_modes[rs_stream.RS_STREAM_DEPTH] if self.align_streams: depth_frame_size = color_frame_size else: if depth_frame_size not in depth_modes: # automatically select highest resolution depth_frame_size = sorted(depth_modes.keys(), reverse=True)[0] if depth_fps not in depth_modes[depth_frame_size]: # automatically select highest frame rate depth_fps = depth_modes[depth_frame_size][0] colorstream = ColorStream( width=color_frame_size[0], height=color_frame_size[1], fps=color_fps, color_format="yuv", preset=self.stream_preset, ) depthstream = DepthStream( width=depth_frame_size[0], height=depth_frame_size[1], fps=depth_fps, preset=self.stream_preset, ) pointstream = PointStream(width=depth_frame_size[0], height=depth_frame_size[1], fps=depth_fps) self.streams = [colorstream, depthstream, pointstream] if self.align_streams: dacstream = DACStream(width=depth_frame_size[0], height=depth_frame_size[1], fps=depth_fps) dacstream.name = "depth" # rename data accessor self.streams.append(dacstream) # update with correctly initialized streams # always initiliazes color + depth, adds rectified/aligned versions as necessary self.device = self.service.Device(device_id, streams=self.streams) self.controls = Realsense_Controls(self.device, device_options) self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name, self.frame_size) self.update_menu() self._needs_restart = False
import calibrate the_grid = np.arange(7 * 5).reshape(5, 7)[::-1] #print(the_grid) #use_targets = [ # 9, 10, 11, # 16, 17, 18, # 23, 24, 25, # ] use_targets = [15, 16, 17, 18, 19, 8, 9, 10, 11, 12, 1, 2, 3, 4, 6] #threshold = np.deg2rad(2.0) threshold = 35 screen_size = 1920, 1080 quality_threshold = 0.5 camera = camera_models.load_intrinsics(".", "Pupil Cam1 ID2", screen_size) def load_session(pupil_log, time_delta=0): #info = {k: v for k, v in (r.split(',',1) for r in open(info))} #time_delta = float(info["Start Time (System)"]) - float(info["Start Time (Synced)"]) calib_topics = [ "notify.calibration.calibration_data", "notify.accuracy_test.data" ] lines = iter(pupil_log) session_start = "notify.calibration.started" calib_times = [] movement_times = []
def convert_pupil_mobile_recording_to_v094(rec_dir): logger.info("Converting Pupil Mobile recording to v0.9.4 format") # convert time files and rename corresponding videos time_pattern = os.path.join(rec_dir, "*.time") for time_loc in glob.glob(time_pattern): time_file_name = os.path.split(time_loc)[1] time_name = os.path.splitext(time_file_name)[0] potential_locs = [ os.path.join(rec_dir, time_name + ext) for ext in (".mjpeg", ".mp4", ".m4a") ] existing_locs = [loc for loc in potential_locs if os.path.exists(loc)] if not existing_locs: continue else: media_loc = existing_locs[0] if time_name in ( "Pupil Cam1 ID0", "Pupil Cam1 ID1", "Pupil Cam2 ID0", "Pupil Cam2 ID1", ): time_name = "eye" + time_name[-1] # rename eye files elif time_name in ("Pupil Cam1 ID2", "Logitech Webcam C930e"): video = av.open(media_loc, "r") frame_size = ( video.streams.video[0].format.width, video.streams.video[0].format.height, ) del video intrinsics = load_intrinsics(rec_dir, time_name, frame_size) intrinsics.save(rec_dir, "world") time_name = "world" # assume world file elif time_name.startswith("audio_"): time_name = "audio" timestamps = np.fromfile(time_loc, dtype=">f8") timestamp_loc = os.path.join(rec_dir, "{}_timestamps.npy".format(time_name)) logger.info('Creating "{}"'.format(os.path.split(timestamp_loc)[1])) np.save(timestamp_loc, timestamps) if time_name == "audio": media_dst = os.path.join(rec_dir, time_name) + ".mp4" else: media_dst = ( os.path.join(rec_dir, time_name) + os.path.splitext(media_loc)[1] ) logger.info( 'Renaming "{}" to "{}"'.format( os.path.split(media_loc)[1], os.path.split(media_dst)[1] ) ) try: os.rename(media_loc, media_dst) except FileExistsError: # Only happens on Windows. Behavior on Unix is to overwrite the existing file. # To mirror this behaviour we need to delete the old file and try renaming the new one again. os.remove(media_dst) os.rename(media_loc, media_dst) pupil_data_loc = os.path.join(rec_dir, "pupil_data") if not os.path.exists(pupil_data_loc): logger.info('Creating "pupil_data"') fm.save_object( {"pupil_positions": [], "gaze_positions": [], "notifications": []}, pupil_data_loc, )
def intrinsics(self): if self._intrinsics is None or self._intrinsics.resolution != self.frame_size: self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name, self.frame_size) return self._intrinsics
def _initialize_device( self, device_id, color_frame_size, color_fps, depth_frame_size, depth_fps, device_options=(), ): devices = tuple(self.service.get_devices()) color_frame_size = tuple(color_frame_size) depth_frame_size = tuple(depth_frame_size) self.streams = [ColorStream(), DepthStream(), PointStream()] self.last_color_frame_ts = None self.last_depth_frame_ts = None self._recent_frame = None self._recent_depth_frame = None if not devices: if not self._needs_restart: logger.error("Camera failed to initialize. No cameras connected.") self.device = None self.update_menu() return if self.device is not None: self.device.stop() # only call Device.stop() if its context if device_id >= len(devices): logger.error( "Camera with id {} not found. Initializing default camera.".format( device_id ) ) device_id = 0 # use default streams to filter modes by rs_stream and rs_format self._available_modes = self._enumerate_formats(device_id) # make sure that given frame sizes and rates are available color_modes = self._available_modes[rs_stream.RS_STREAM_COLOR] if color_frame_size not in color_modes: # automatically select highest resolution color_frame_size = sorted(color_modes.keys(), reverse=True)[0] if color_fps not in color_modes[color_frame_size]: # automatically select highest frame rate color_fps = color_modes[color_frame_size][0] depth_modes = self._available_modes[rs_stream.RS_STREAM_DEPTH] if self.align_streams: depth_frame_size = color_frame_size else: if depth_frame_size not in depth_modes: # automatically select highest resolution depth_frame_size = sorted(depth_modes.keys(), reverse=True)[0] if depth_fps not in depth_modes[depth_frame_size]: # automatically select highest frame rate depth_fps = depth_modes[depth_frame_size][0] colorstream = ColorStream( width=color_frame_size[0], height=color_frame_size[1], fps=color_fps, color_format="yuv", preset=self.stream_preset, ) depthstream = DepthStream( width=depth_frame_size[0], height=depth_frame_size[1], fps=depth_fps, preset=self.stream_preset, ) pointstream = PointStream( width=depth_frame_size[0], height=depth_frame_size[1], fps=depth_fps ) self.streams = [colorstream, depthstream, pointstream] if self.align_streams: dacstream = DACStream( width=depth_frame_size[0], height=depth_frame_size[1], fps=depth_fps ) dacstream.name = "depth" # rename data accessor self.streams.append(dacstream) # update with correctly initialized streams # always initiliazes color + depth, adds rectified/aligned versions as necessary self.device = self.service.Device(device_id, streams=self.streams) self.controls = Realsense_Controls(self.device, device_options) self._intrinsics = load_intrinsics( self.g_pool.user_dir, self.name, self.frame_size ) self.update_menu() self._needs_restart = False
def intrinsics(self): if self._intrinsics is None or self._intrinsics.resolution != self.frame_size: self._intrinsics = load_intrinsics( self.g_pool.user_dir, self.name, self.frame_size ) return self._intrinsics
def convert_pupil_mobile_recording_to_v094(rec_dir): logger.info("Converting Pupil Mobile recording to v0.9.4 format") # convert time files and rename corresponding videos time_pattern = os.path.join(rec_dir, '*.time') for time_loc in glob.glob(time_pattern): time_file_name = os.path.split(time_loc)[1] time_name = os.path.splitext(time_file_name)[0] potential_locs = [ os.path.join(rec_dir, time_name + ext) for ext in ('.mjpeg', '.mp4', '.m4a') ] existing_locs = [loc for loc in potential_locs if os.path.exists(loc)] if not existing_locs: continue else: media_loc = existing_locs[0] if time_name in ('Pupil Cam1 ID0', 'Pupil Cam1 ID1', 'Pupil Cam2 ID0', 'Pupil Cam2 ID1'): time_name = 'eye' + time_name[-1] # rename eye files elif time_name in ('Pupil Cam1 ID2', 'Logitech Webcam C930e'): video = av.open(media_loc, 'r') frame_size = video.streams.video[ 0].format.width, video.streams.video[0].format.height del video intrinsics = load_intrinsics(rec_dir, time_name, frame_size) intrinsics.save(rec_dir, 'world') time_name = 'world' # assume world file elif time_name.startswith('audio_'): time_name = 'audio' timestamps = np.fromfile(time_loc, dtype='>f8') timestamp_loc = os.path.join(rec_dir, '{}_timestamps.npy'.format(time_name)) logger.info('Creating "{}"'.format(os.path.split(timestamp_loc)[1])) np.save(timestamp_loc, timestamps) if time_name == 'audio': media_dst = os.path.join(rec_dir, time_name) + '.mp4' else: media_dst = os.path.join( rec_dir, time_name) + os.path.splitext(media_loc)[1] logger.info('Renaming "{}" to "{}"'.format( os.path.split(media_loc)[1], os.path.split(media_dst)[1])) try: os.rename(media_loc, media_dst) except FileExistsError: # Only happens on Windows. Behavior on Unix is to overwrite the existing file. # To mirror this behaviour we need to delete the old file and try renaming the new one again. os.remove(media_dst) os.rename(media_loc, media_dst) pupil_data_loc = os.path.join(rec_dir, 'pupil_data') if not os.path.exists(pupil_data_loc): logger.info('Creating "pupil_data"') fm.save_object( { 'pupil_positions': [], 'gaze_positions': [], 'notifications': [] }, pupil_data_loc)
def __init__(self, g_pool, source_path=None, timed_playback=False, loop=False, playback_speed=1.): super().__init__(g_pool) # minimal attribute set self._initialised = True self.playback_speed = playback_speed self.source_path = source_path self.timestamps = None self.timed_playback = timed_playback self.loop = loop if not source_path or not os.path.isfile(source_path): logger.error( 'Init failed. Source file could not be found at `%s`' % source_path) self._initialised = False return self.container = av.open(str(source_path)) try: self.video_stream = next( s for s in self.container.streams if s.type == "video") # looking for the first videostream logger.debug("loaded videostream: %s" % self.video_stream) self.video_stream.thread_count = cpu_count() except StopIteration: self.video_stream = None logger.error("No videostream found in media container") try: self.audio_stream = next( s for s in self.container.streams if s.type == 'audio') # looking for the first audiostream logger.debug("loaded audiostream: %s" % self.audio_stream) except StopIteration: self.audio_stream = None logger.debug("No audiostream found in media container") if not self.video_stream and not self.audio_stream: logger.error( 'Init failed. Could not find any video or audio stream in the given source file.' ) self._initialised = False return self.display_time = 0. self.target_frame_idx = 0 self.current_frame_idx = 0 # we will use below for av playback # self.selected_streams = [s for s in (self.video_stream,self.audio_stream) if s] # self.av_packet_iterator = self.container.demux(self.selected_streams) if float(self.video_stream.average_rate) % 1 != 0.0: logger.error( 'Videofile pts are not evenly spaced, pts to index conversion may fail and be inconsitent.' ) # load/generate timestamps. timestamps_path, ext = os.path.splitext(source_path) timestamps_path += '_timestamps.npy' try: self.timestamps = np.load(timestamps_path) except IOError: logger.warning( "did not find timestamps file, making timetamps up based on fps and frame count. Frame count and timestamps are not accurate!" ) frame_rate = float(self.video_stream.average_rate) self.timestamps = [ i / frame_rate for i in range( int(self.container.duration / av.time_base * frame_rate) + 100) ] # we are adding some slack. else: logger.debug("Auto loaded %s timestamps from %s" % (len(self.timestamps), timestamps_path)) assert isinstance( self.timestamps[0], float ), 'Timestamps need to be instances of python float, got {}'.format( type(self.timestamps[0])) self.timestamps = self.timestamps # set the pts rate to convert pts to frame index. We use videos with pts writte like indecies. self.next_frame = self._next_frame() f0, f1 = next(self.next_frame), next(self.next_frame) self.pts_rate = f1.pts self.seek_to_frame(0) self.average_rate = (self.timestamps[-1] - self.timestamps[0]) / len( self.timestamps) loc, name = os.path.split(os.path.splitext(source_path)[0]) self._intrinsics = load_intrinsics(loc, name, self.frame_size) self.play = True
def __init__( self, g_pool, frame_size, frame_rate, name=None, preferred_names=(), uid=None, uvc_controls={}, check_stripes=True, exposure_mode="manual", ): import platform super().__init__(g_pool) self.uvc_capture = None self._restart_in = 3 assert name or preferred_names or uid if platform.system() == "Windows": self.verify_drivers() self.devices = uvc.Device_List() devices_by_name = {dev["name"]: dev for dev in self.devices} # if uid is supplied we init with that if uid: try: self.uvc_capture = uvc.Capture(uid) except uvc.OpenError: logger.warning( "No avalilable camera found that matched {}".format(preferred_names) ) except uvc.InitError: logger.error("Camera failed to initialize.") except uvc.DeviceNotFoundError: logger.warning( "No camera found that matched {}".format(preferred_names) ) # otherwise we use name or preffered_names else: if name: preferred_names = (name,) else: pass assert preferred_names # try to init by name for name in preferred_names: for d_name in devices_by_name.keys(): if name in d_name: uid_for_name = devices_by_name[d_name]["uid"] try: self.uvc_capture = uvc.Capture(uid_for_name) except uvc.OpenError: logger.info( "{} matches {} but is already in use or blocked.".format( uid_for_name, name ) ) except uvc.InitError: logger.error("Camera failed to initialize.") else: break # checkframestripes will be initialized accordingly in configure_capture() self.check_stripes = check_stripes self.exposure_mode = exposure_mode self.checkframestripes = None self.preferred_exposure_time = None # check if we were sucessfull if not self.uvc_capture: logger.error( "Init failed. Capture is started in ghost mode. No images will be supplied." ) self.name_backup = preferred_names self.frame_size_backup = frame_size self.frame_rate_backup = frame_rate self.exposure_time_backup = None self._intrinsics = load_intrinsics( self.g_pool.user_dir, self.name, self.frame_size ) else: self.configure_capture(frame_size, frame_rate, uvc_controls) self.name_backup = (self.name,) self.frame_size_backup = frame_size self.frame_rate_backup = frame_rate controls_dict = dict( [(c.display_name, c) for c in self.uvc_capture.controls] ) try: self.exposure_time_backup = controls_dict[ "Absolute Exposure Time" ].value except KeyError: self.exposure_time_backup = None self.backup_uvc_controls = {}
def make_update(): surface_definitions_path = os.path.join(rec_dir, "surface_definitions") if not os.path.exists(surface_definitions_path): return surface_definitions_dict = fm.Persistent_Dict(surface_definitions_path) surface_definitions_backup_path = os.path.join( rec_dir, "surface_definitions_deprecated" ) os.rename(surface_definitions_path, surface_definitions_backup_path) intrinsics_path = os.path.join(rec_dir, "world.intrinsics") if not os.path.exists(intrinsics_path): logger.warning( "Loading surface definitions failed: The data format of the " "surface definitions in this recording " "is too old and is no longer supported!" ) return valid_ext = (".mp4", ".mkv", ".avi", ".h264", ".mjpeg") existing_videos = [ f for f in glob.glob(os.path.join(rec_dir, "world.*")) if os.path.splitext(f)[1] in valid_ext ] if not existing_videos: return world_video_path = existing_videos[0] world_video = av.open(world_video_path) f = world_video.streams.video[0].format resolution = f.width, f.height intrinsics = cm.load_intrinsics(rec_dir, "world", resolution) DEPRECATED_SQUARE_MARKER_KEY = "realtime_square_marker_surfaces" if DEPRECATED_SQUARE_MARKER_KEY not in surface_definitions_dict: return surfaces_definitions_old = surface_definitions_dict[ DEPRECATED_SQUARE_MARKER_KEY ] surfaces_definitions_new = [] for surface_def_old in surfaces_definitions_old: surface_def_new = {} surface_def_new["deprecated"] = True surface_def_new["name"] = surface_def_old["name"] surface_def_new["real_world_size"] = surface_def_old["real_world_size"] surface_def_new["build_up_status"] = 1.0 reg_markers = [] registered_markers_dist = [] for id, verts in surface_def_old["markers"].items(): reg_marker_dist = {"id": id, "verts_uv": verts} registered_markers_dist.append(reg_marker_dist) verts_undist = undistort_vertices(verts, intrinsics) reg_marker = {"id": id, "verts_uv": verts_undist} reg_markers.append(reg_marker) surface_def_new["registered_markers_dist"] = registered_markers_dist surface_def_new["reg_markers"] = reg_markers surfaces_definitions_new.append(surface_def_new) surface_definitions_dict_new = fm.Persistent_Dict(surface_definitions_path) surface_definitions_dict_new["surfaces"] = surfaces_definitions_new surface_definitions_dict_new.save()
def __init__(self, g_pool, source_path=None, loop=False, buffered_decoding=False, *args, **kwargs): super().__init__(g_pool, *args, **kwargs) if self.timing == "external": self.recent_events = self.recent_events_external_timing else: self.recent_events = self.recent_events_own_timing # minimal attribute set self._initialised = True self.source_path = source_path self.timestamps = None self.loop = loop self.buffering = buffered_decoding if not source_path or not os.path.isfile(source_path): logger.error( "Init failed. Source file could not be found at `%s`" % source_path) self._initialised = False return print(source_path) if source_path[-8:] == "word.mp4": # ip do swiata source_path = "http://192.168.8.104:9898" else: # ip do oka source_path = "http://192.168.8.104:9899" self.container = av.open(str(source_path)) try: self.video_stream = next( s for s in self.container.streams if s.type == "video") # looking for the first videostream logger.debug("loaded videostream: %s" % self.video_stream) self.video_stream.thread_count = cpu_count() except StopIteration: self.video_stream = None logger.error("No videostream found in media container") try: self.audio_stream = next( s for s in self.container.streams if s.type == "audio") # looking for the first audiostream logger.debug("loaded audiostream: %s" % self.audio_stream) except StopIteration: self.audio_stream = None logger.debug("No audiostream found in media container") if not self.video_stream and not self.audio_stream: logger.error( "Init failed. Could not find any video or audio stream in the given source file." ) self._initialised = False return self.target_frame_idx = 0 self.current_frame_idx = 0 # we will use below for av playback # self.selected_streams = [s for s in (self.video_stream,self.audio_stream) if s] # self.av_packet_iterator = self.container.demux(self.selected_streams) avg_rate = self.video_stream.average_rate if avg_rate is None: avg_rate = Fraction(0, 1) if float(avg_rate) % 1 != 0.0: logger.error( "Videofile pts are not evenly spaced, pts to index conversion may fail and be inconsitent." ) print("AVG RATE", avg_rate) print("Duration: ", self.container.duration) frame_rate = 90.0 timestamps = [ i / frame_rate for i in range( int(self.container.duration / av.time_base * frame_rate) + 100) ] print("Timestamps, ", timestamps) # load/generate timestamps. # timestamps_path, ext = os.path.splitext(source_path) # timestamps_path += "_timestamps.npy" # print("Laduje timestampy") # try: # self.timestamps = np.load(timestamps_path) # print("Zaladowalem timestampy..") # except IOError: # logger.warning( # "did not find timestamps file, making timetamps up based on fps and frame count. Frame count and timestamps are not accurate!" # ) # frame_rate = float(avg_rate) # self.timestamps = [ # i / frame_rate # for i in range( # int(self.container.duration / av.time_base * frame_rate) + 100 # ) # ] # we are adding some slack. # else: # logger.debug( # "Auto loaded %s timestamps from %s" # % (len(self.timestamps), timestamps_path) # ) # assert isinstance( # self.timestamps[0], float # ), "Timestamps need to be instances of python float, got {}".format( # type(self.timestamps[0]) # ) self.timestamps = self.timestamps # set the pts rate to convert pts to frame index. We use videos with pts writte like indecies. if self.buffering: self.buffered_decoder = self.container.get_buffered_decoder( self.video_stream, dec_batch=50, dec_buffer_size=200) self.next_frame = self.buffered_decoder.get_frame() else: self.next_frame = self._next_frame() print(self.next_frame) f0, f1 = next(self.next_frame), next(self.next_frame) self.pts_rate = f1.pts # self.seek_to_frame(0) # self.average_rate = (self.timestamps[-1] - self.timestamps[0]) / len( # self.timestamps # ) loc, name = os.path.split(os.path.splitext(source_path)[0]) self._intrinsics = load_intrinsics(loc, name, self.frame_size)