def update_recording_v074_to_v082(rec_dir): meta_info_path = os.path.join(rec_dir, "info.csv") with open(meta_info_path, "r", encoding="utf-8") as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info["Capture Software Version"] = "v0.8.2" with open(meta_info_path, "w", newline="") as csvfile: csv_utils.write_key_value_file(csvfile, meta_info)
def _get_recording_start_date(source_folder): csv_loc = os.path.join(source_folder, "info.csv") with open(csv_loc, "r", encoding="utf-8") as csv_file: rec_info = csv_utils.read_key_value_file(csv_file) date = rec_info["Start Date"].replace(".", "_").replace(":", "_") time = rec_info["Start Time"].replace(":", "_") return "{}_{}".format(date, time)
def update_recording_bytes_to_unicode(rec_dir): logger.info("Updating recording from bytes to unicode.") def convert(data): if isinstance(data, bytes): return data.decode() elif isinstance(data, str) or isinstance(data, np.ndarray): return data elif isinstance(data, collections.Mapping): return dict(map(convert, data.items())) elif isinstance(data, collections.Iterable): return type(data)(map(convert, data)) else: return data for file in os.listdir(rec_dir): if file.startswith('.') or os.path.splitext(file)[1] in ('.mp4', '.avi'): continue rec_file = os.path.join(rec_dir, file) try: rec_object = load_object(rec_file) converted_object = convert(rec_object) if converted_object != rec_object: logger.info('Converted `{}` from bytes to unicode'.format(file)) save_object(converted_object, rec_file) except (UnpicklingError, IsADirectoryError): continue # manually convert k v dicts. meta_info_path = os.path.join(rec_dir, "info.csv") with open(meta_info_path, 'r', encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) with open(meta_info_path, 'w', newline='') as csvfile: csv_utils.write_key_value_file(csvfile, meta_info)
def update_recording_v074_to_v082(rec_dir): meta_info_path = os.path.join(rec_dir,"info.csv") with open(meta_info_path) as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Capture Software Version'] = 'v0.8.2' with open(meta_info_path,'w') as csvfile: csv_utils.write_key_value_file(csvfile,meta_info)
def update_recording_v086_to_v087(rec_dir): logger.info("Updating recording from v0.8.6 format to v0.8.7 format") pupil_data = load_object(os.path.join(rec_dir, "pupil_data")) meta_info_path = os.path.join(rec_dir,"info.csv") def _clamp_norm_point(pos): '''realisitic numbers for norm pos should be in this range. Grossly bigger or smaller numbers are results bad exrapolation and can cause overflow erorr when denormalized and cast as int32. ''' return min(100.,max(-100.,pos[0])),min(100.,max(-100.,pos[1])) for g in pupil_data.get('gaze_positions', []): if 'topic' not in g: # we missed this in one gaze mapper g['topic'] = 'gaze' g['norm_pos'] = _clamp_norm_point(g['norm_pos']) save_object(pupil_data,os.path.join(rec_dir, "pupil_data")) with open(meta_info_path,'r',encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Data Format Version'] = 'v0.8.7' update_meta_info(rec_dir, meta_info)
def update_recording_v086_to_v087(rec_dir): logger.info("Updating recording from v0.8.6 format to v0.8.7 format") pupil_data = load_object(os.path.join(rec_dir, "pupil_data")) meta_info_path = os.path.join(rec_dir, "info.csv") def _clamp_norm_point(pos): """realisitic numbers for norm pos should be in this range. Grossly bigger or smaller numbers are results bad exrapolation and can cause overflow erorr when denormalized and cast as int32. """ return min(100, max(-100, pos[0])), min(100, max(-100, pos[1])) for g in pupil_data["gaze_positions"]: if "topic" not in g: # we missed this in one gaze mapper g["topic"] = "gaze" g["norm_pos"] = _clamp_norm_point(g["norm_pos"]) save_object(pupil_data, os.path.join(rec_dir, "pupil_data")) with open(meta_info_path, "r", encoding="utf-8") as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info["Capture Software Version"] = "v0.8.7" with open(meta_info_path, "w", newline="") as csvfile: csv_utils.write_key_value_file(csvfile, meta_info)
def update_recording_v087_to_v091(rec_dir): logger.info("Updating recording from v0.8.7 format to v0.9.1 format") meta_info_path = os.path.join(rec_dir,"info.csv") with open(meta_info_path,'r',encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Data Format Version'] = 'v0.9.1' update_meta_info(rec_dir, meta_info)
def update_recording_v091_to_v093(rec_dir): logger.info("Updating recording from v0.9.1 format to v0.9.3 format") meta_info_path = os.path.join(rec_dir,"info.csv") pupil_data = load_object(os.path.join(rec_dir, "pupil_data")) for g in pupil_data.get('gaze_positions', []): # fixing recordings made with bug https://github.com/pupil-labs/pupil/issues/598 g['norm_pos'] = float(g['norm_pos'][0]), float(g['norm_pos'][1]) save_object(pupil_data, os.path.join(rec_dir, "pupil_data")) with open(meta_info_path, 'r', encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Data Format Version'] = 'v0.9.3' update_meta_info(rec_dir, meta_info)
def update_recording_v083_to_v086(rec_dir): logger.info("Updating recording from v0.8.3 format to v0.8.6 format") pupil_data = load_object(os.path.join(rec_dir, "pupil_data")) meta_info_path = os.path.join(rec_dir, "info.csv") for topic in pupil_data.keys(): for d in pupil_data[topic]: d['topic'] = topic save_object(pupil_data, os.path.join(rec_dir, "pupil_data")) with open(meta_info_path, 'r', encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Data Format Version'] = 'v0.8.6' update_meta_info(rec_dir, meta_info)
def update_recording_v083_to_v086(rec_dir): logger.info("Updating recording from v0.8.3 format to v0.8.6 format") pupil_data = load_object(os.path.join(rec_dir, "pupil_data")) meta_info_path = os.path.join(rec_dir,"info.csv") for topic in pupil_data.keys(): for d in pupil_data[topic]: d['topic'] = topic save_object(pupil_data,os.path.join(rec_dir, "pupil_data")) with open(meta_info_path,'r',encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Data Format Version'] = 'v0.8.6' update_meta_info(rec_dir, meta_info)
def update_recording_v082_to_v083(rec_dir): logger.info("Updating recording from v0.8.2 format to v0.8.3 format") pupil_data = load_object(os.path.join(rec_dir, "pupil_data")) meta_info_path = os.path.join(rec_dir,"info.csv") for d in pupil_data['gaze_positions']: if 'base' in d: d['base_data'] = d.pop('base') save_object(pupil_data,os.path.join(rec_dir, "pupil_data")) with open(meta_info_path,'r',encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Data Format Version'] = 'v0.8.3' update_meta_info(rec_dir, meta_info)
def update_recording_v082_to_v083(rec_dir): logger.info("Updating recording from v0.8.2 format to v0.8.3 format") pupil_data = load_object(os.path.join(rec_dir, "pupil_data")) meta_info_path = os.path.join(rec_dir, "info.csv") for d in pupil_data['gaze_positions']: if 'base' in d: d['base_data'] = d.pop('base') save_object(pupil_data, os.path.join(rec_dir, "pupil_data")) with open(meta_info_path, 'r', encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Data Format Version'] = 'v0.8.3' update_meta_info(rec_dir, meta_info)
def update_recording_v082_to_v083(rec_dir): logger.info("Updating recording from v0.8.2 format to v0.8.3 format") pupil_data = load_object(os.path.join(rec_dir, "pupil_data")) meta_info_path = os.path.join(rec_dir, "info.csv") for d in pupil_data["gaze_positions"]: if "base" in d: d["base_data"] = d.pop("base") save_object(pupil_data, os.path.join(rec_dir, "pupil_data")) with open(meta_info_path, "r", encoding="utf-8") as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info["Capture Software Version"] = "v0.8.3" with open(meta_info_path, "w", newline="") as csvfile: csv_utils.write_key_value_file(csvfile, meta_info)
def update_recording_v083_to_v086(rec_dir): logger.info("Updating recording from v0.8.3 format to v0.8.6 format") pupil_data = load_object(os.path.join(rec_dir, "pupil_data")) meta_info_path = os.path.join(rec_dir, "info.csv") for topic in pupil_data.keys(): for d in pupil_data[topic]: d['topic'] = topic save_object(pupil_data, os.path.join(rec_dir, "pupil_data")) with open(meta_info_path) as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Capture Software Version'] = 'v0.8.6' with open(meta_info_path, 'w') as csvfile: csv_utils.write_key_value_file(csvfile, meta_info)
def update_recording_v083_to_v086(rec_dir): logger.info("Updating recording from v0.8.3 format to v0.8.6 format") pupil_data = load_object(os.path.join(rec_dir, "pupil_data")) meta_info_path = os.path.join(rec_dir, "info.csv") for topic in pupil_data.keys(): for d in pupil_data[topic]: d["topic"] = topic save_object(pupil_data, os.path.join(rec_dir, "pupil_data")) with open(meta_info_path, "r", encoding="utf-8") as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info["Capture Software Version"] = "v0.8.6" with open(meta_info_path, "w", newline="") as csvfile: csv_utils.write_key_value_file(csvfile, meta_info)
def update_recording_v083_to_v086(rec_dir): logger.info("Updating recording from v0.8.3 format to v0.8.6 format") pupil_data = load_object(os.path.join(rec_dir, "pupil_data")) meta_info_path = os.path.join(rec_dir,"info.csv") for topic in pupil_data.keys(): for d in pupil_data[topic]: d['topic'] = topic save_object(pupil_data,os.path.join(rec_dir, "pupil_data")) with open(meta_info_path) as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Capture Software Version'] = 'v0.8.6' with open(meta_info_path,'w') as csvfile: csv_utils.write_key_value_file(csvfile,meta_info)
def update_recording_v13_v14(rec_dir): logger.info("Updating recording from v1.3 to v1.4") valid_ext = ('.mp4', '.mkv', '.avi', '.h264', '.mjpeg') existing_videos = [ f for f in glob.glob(os.path.join(rec_dir, 'world.*')) if os.path.splitext(f)[1] in valid_ext ] if not existing_videos: min_ts = np.inf max_ts = -np.inf for f in glob.glob(os.path.join(rec_dir, "eye*_timestamps.npy")): try: eye_ts = np.load(f) assert len(eye_ts.shape) == 1 assert eye_ts.shape[0] > 1 min_ts = min(min_ts, eye_ts[0]) max_ts = max(max_ts, eye_ts[-1]) except (FileNotFoundError, AssertionError): pass error_msg = 'Could not generate world timestamps from eye timestamps. This is an invalid recording.' assert -np.inf < min_ts < max_ts < np.inf, error_msg logger.warning( 'No world video found. Constructing an artificial replacement.') frame_rate = 30 timestamps = np.arange(min_ts, max_ts, 1 / frame_rate) np.save(os.path.join(rec_dir, 'world_timestamps'), timestamps) save_object( { 'frame_rate': frame_rate, 'frame_size': (1280, 720), 'version': 0 }, os.path.join(rec_dir, 'world.fake')) meta_info_path = os.path.join(rec_dir, "info.csv") with open(meta_info_path, 'r', encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Data Format Version'] = 'v1.4' update_meta_info(rec_dir, meta_info)
def update_recording_v0915_v13(rec_dir): logger.info("Updating recording from v0.9.15 to v1.3") # Look for unconverted Pupil Cam2 videos time_pattern = os.path.join(rec_dir, '*.time') for time_loc in glob.glob(time_pattern): time_file_name = os.path.split(time_loc)[1] time_name = os.path.splitext(time_file_name)[0] potential_locs = [ os.path.join(rec_dir, time_name + ext) for ext in ('.mjpeg', '.mp4', '.m4a') ] existing_locs = [loc for loc in potential_locs if os.path.exists(loc)] if not existing_locs: continue else: video_loc = existing_locs[0] if time_name in ('Pupil Cam2 ID0', 'Pupil Cam2 ID1'): time_name = 'eye' + time_name[-1] # rename eye files else: continue timestamps = np.fromfile(time_loc, dtype='>f8') timestamp_loc = os.path.join(rec_dir, '{}_timestamps.npy'.format(time_name)) logger.info('Creating "{}"'.format(os.path.split(timestamp_loc)[1])) np.save(timestamp_loc, timestamps) video_dst = os.path.join(rec_dir, time_name) + os.path.splitext(video_loc)[1] logger.info('Renaming "{}" to "{}"'.format( os.path.split(video_loc)[1], os.path.split(video_dst)[1])) os.rename(video_loc, video_dst) meta_info_path = os.path.join(rec_dir, "info.csv") with open(meta_info_path, 'r', encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Data Format Version'] = 'v1.3' update_meta_info(rec_dir, meta_info)
def update_recording_bytes_to_unicode(rec_dir): logger.info("Updating recording from bytes to unicode.") # update to python 3 meta_info_path = os.path.join(rec_dir, "info.csv") def convert(data): if isinstance(data, bytes): return data.decode() elif isinstance(data, str) or isinstance(data, np.ndarray): return data elif isinstance(data, collections.Mapping): return dict(map(convert, data.items())) elif isinstance(data, collections.Iterable): return type(data)(map(convert, data)) else: return data for file in os.listdir(rec_dir): rec_file = os.path.join(rec_dir, file) try: rec_object = load_object(rec_file) converted_object = convert(rec_object) if converted_object != rec_object: logger.info( 'Converted `{}` from bytes to unicode'.format(file)) save_object(rec_object, rec_file) except (ValueError, IsADirectoryError): continue # except TypeError: # logger.error('TypeError when parsing `{}`'.format(file)) # continue with open(meta_info_path, 'r', encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Capture Software Version'] = 'v0.8.8' with open(meta_info_path, 'w', newline='') as csvfile: csv_utils.write_key_value_file(csvfile, meta_info)
def update_recording_v093_to_v094(rec_dir): logger.info("Updating recording from v0.9.3 to v0.9.4.") meta_info_path = os.path.join(rec_dir, "info.csv") for file in os.listdir(rec_dir): if file.startswith('.') or os.path.splitext(file)[1] in ('.mp4', '.avi'): continue rec_file = os.path.join(rec_dir, file) try: rec_object = load_object(rec_file,allow_legacy=False) save_object(rec_object, rec_file) except: try: rec_object = load_object(rec_file,allow_legacy=True) save_object(rec_object, rec_file) logger.info('Converted `{}` from pickle to msgpack'.format(file)) except: logger.warning("did not convert {}".format(rec_file)) with open(meta_info_path, 'r', encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Data Format Version'] = 'v0.9.4' update_meta_info(rec_dir, meta_info)
def update_recording_bytes_to_unicode(rec_dir): logger.info("Updating recording from bytes to unicode.") def convert(data): if isinstance(data, bytes): return data.decode() elif isinstance(data, str) or isinstance(data, np.ndarray): return data elif isinstance(data, collections.Mapping): return dict(map(convert, data.items())) elif isinstance(data, collections.Iterable): return type(data)(map(convert, data)) else: return data for file in os.listdir(rec_dir): if file.startswith('.') or os.path.splitext(file)[1] in ('.mp4', '.avi'): continue rec_file = os.path.join(rec_dir, file) try: rec_object = fm.load_object(rec_file) converted_object = convert(rec_object) if converted_object != rec_object: logger.info( 'Converted `{}` from bytes to unicode'.format(file)) fm.save_object(converted_object, rec_file) except (fm.UnpicklingError, IsADirectoryError): continue # manually convert k v dicts. meta_info_path = os.path.join(rec_dir, "info.csv") with open(meta_info_path, 'r', encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) with open(meta_info_path, 'w', newline='') as csvfile: csv_utils.write_key_value_file(csvfile, meta_info)
def update_recording_v093_to_v094(rec_dir): logger.info("Updating recording from v0.9.3 to v0.9.4.") meta_info_path = os.path.join(rec_dir, "info.csv") for file in os.listdir(rec_dir): if file.startswith(".") or os.path.splitext(file)[1] in (".mp4", ".avi"): continue rec_file = os.path.join(rec_dir, file) try: rec_object = fm.load_object(rec_file, allow_legacy=False) fm.save_object(rec_object, rec_file) except: try: rec_object = fm.load_object(rec_file, allow_legacy=True) fm.save_object(rec_object, rec_file) logger.info("Converted `{}` from pickle to msgpack".format(file)) except: logger.warning("did not convert {}".format(rec_file)) with open(meta_info_path, "r", encoding="utf-8") as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info["Data Format Version"] = "v0.9.4" update_meta_info(rec_dir, meta_info)
def load_meta_info(rec_dir): meta_info_path = os.path.join(rec_dir, "info.csv") with open(meta_info_path, 'r', encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) return meta_info
def read_info(info_path): with open(info_path, 'r') as info_fh: info = read_key_value_file(info_fh) synced = float(info["Start Time (Synced)"]) system = float(info["Start Time (System)"]) return synced, system
def update_recording_v074_to_v082(rec_dir): meta_info_path = os.path.join(rec_dir, "info.csv") with open(meta_info_path, "r", encoding="utf-8") as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info["Data Format Version"] = "v0.8.2" update_meta_info(rec_dir, meta_info)
def update_recording_v074_to_v082(rec_dir): meta_info_path = os.path.join(rec_dir,"info.csv") with open(meta_info_path,'r',encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Data Format Version'] = 'v0.8.2' update_meta_info(rec_dir, meta_info)
def load_meta_info(rec_dir): meta_info_path = os.path.join(rec_dir,"info.csv") with open(meta_info_path,'r',encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) return meta_info
def load(self, rec_dir): def normalize_extension(ext: str) -> str: if ext.startswith("."): ext = ext[1:] return ext def is_video_file(file_path): if not os.path.isfile(file_path): return False _, ext = os.path.splitext(file_path) ext = normalize_extension(ext) valid_video_extensions = map(normalize_extension, VALID_VIDEO_EXTENSIONS) if ext not in valid_video_extensions: return False return True if not os.path.exists(rec_dir): raise InvalidRecordingException( reason=f"Target at path does not exist: {rec_dir}", recovery="") if not os.path.isdir(rec_dir): if is_video_file(rec_dir): raise InvalidRecordingException( reason= f"The provided path is a video, not a recording directory", recovery="Please provide a recording directory") else: raise InvalidRecordingException( reason=f"Target at path is not a directory: {rec_dir}", recovery="") info_path = os.path.join(rec_dir, "info.csv") if not os.path.exists(info_path): raise InvalidRecordingException( reason=f"There is no info.csv in the target directory", recovery="") if not os.path.isfile(info_path): raise InvalidRecordingException( reason=f"Target info.csv is not a file: {info_path}", recovery="") with open(info_path, "r", encoding="utf-8") as csvfile: try: meta_info = csv_utils.read_key_value_file(csvfile) except Exception as e: raise InvalidRecordingException( reason=f"Failed reading info.csv: {e}", recovery="") info_mandatory_keys = ["Recording Name"] for key in info_mandatory_keys: try: meta_info[key] except KeyError: raise InvalidRecordingException( reason=f"Target info.csv does not have \"{key}\"", recovery="") all_file_paths = glob.iglob(os.path.join(rec_dir, "*")) # TODO: Should this validation be "are there any video files" or are there specific video files? if not any(is_video_file(path) for path in all_file_paths): raise InvalidRecordingException( reason=f"Target directory does not contain any video files", recovery="") # TODO: Are there any other validations missing? # All validations passed self._rec_dir = rec_dir self._meta_info = meta_info
def update_recording_v094_to_v0913(rec_dir, retry_on_averror=True): try: logger.info("Updating recording from v0.9.4 to v0.9.13") meta_info_path = os.path.join(rec_dir, "info.csv") wav_file_loc = os.path.join(rec_dir, 'audio.wav') aac_file_loc = os.path.join(rec_dir, 'audio.mp4') audio_ts_loc = os.path.join(rec_dir, 'audio_timestamps.npy') backup_ts_loc = os.path.join(rec_dir, 'audio_timestamps_old.npy') if os.path.exists(wav_file_loc) and os.path.exists(audio_ts_loc): in_container = av.open(wav_file_loc) in_stream = in_container.streams.audio[0] in_frame_size = 0 in_frame_num = 0 out_container = av.open(aac_file_loc, 'w') out_stream = out_container.add_stream('aac') for in_packet in in_container.demux(): for audio_frame in in_packet.decode(): if not in_frame_size: in_frame_size = audio_frame.samples in_frame_num += 1 out_packet = out_stream.encode(audio_frame) if out_packet is not None: out_container.mux(out_packet) # flush encoder out_packet = out_stream.encode(None) while out_packet is not None: out_container.mux(out_packet) out_packet = out_stream.encode(None) out_frame_size = out_stream.frame_size out_frame_num = out_stream.frames out_frame_rate = out_stream.rate in_frame_rate = in_stream.rate out_container.close() old_ts = np.load(audio_ts_loc) np.save(backup_ts_loc, old_ts) if len(old_ts) != in_frame_num: in_frame_size /= len(old_ts) / in_frame_num logger.debug( 'Provided audio frame size is inconsistent with amount of timestamps. Correcting frame size to {}' .format(in_frame_size)) old_ts_idx = np.arange( 0, len(old_ts) * in_frame_size, in_frame_size) * out_frame_rate / in_frame_rate new_ts_idx = np.arange(0, out_frame_num * out_frame_size, out_frame_size) interpolate = interp1d(old_ts_idx, old_ts, bounds_error=False, fill_value='extrapolate') new_ts = interpolate(new_ts_idx) # raise RuntimeError np.save(audio_ts_loc, new_ts) with open(meta_info_path, 'r', encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Data Format Version'] = 'v0.9.13' update_meta_info(rec_dir, meta_info) except av.AVError as averr: # Try to catch `libav.aac : Input contains (near) NaN/+-Inf` errors # Unfortunately, the above error is only logged not raised. Instead # `averr`, an `Invalid Argument` error with error number 22, is raised. if retry_on_averror and averr.errno == 22: # unfortunately logger.error('Encountered AVError. Retrying to update recording.') out_container.close() # Only retry once: update_recording_v094_to_v0913(rec_dir, retry_on_averror=False) else: raise # re-raise exception
def update_recording_v074_to_v082(rec_dir): meta_info_path = os.path.join(rec_dir, "info.csv") with open(meta_info_path, 'r', encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Data Format Version'] = 'v0.8.2' update_meta_info(rec_dir, meta_info)
def __init__(self, g_pool, **kwargs): super().__init__(g_pool, **kwargs) self.order = 0.001 # Ensure we're after FileSource but before anything else self.menu = None self.recording_replay = RoyaleReplayDevice() self._current_recording_idx = None # Abort if the plugin is enabled in an unexpected app (Capture) if self.g_pool.app not in self.expected_app: self.gl_display = self._abort logger.error("Expected app {!r} instead of {!r}!.".format( self.expected_app, self.g_pool.app, )) return fn_picoflexx_info = os.path.join(self.g_pool.rec_dir, "info_picoflexx.csv") fn_old_info = os.path.join(self.g_pool.rec_dir, "info.old_style.csv") fn_info = os.path.join(self.g_pool.rec_dir, "info.csv") if not os.path.exists(fn_picoflexx_info): # Migrate from sharing info.json with Pupil recorder, to using our own csv file if os.path.exists(fn_old_info): shutil.copy(fn_old_info, fn_picoflexx_info) logger.info("Copying {!r} to {!r}".format( fn_old_info, fn_picoflexx_info)) elif os.path.exists(fn_info): shutil.copy(fn_info, fn_picoflexx_info) logger.info("Copying {!r} to {!r}".format( fn_info, fn_picoflexx_info)) else: self.gl_display = self._abort logger.error("Failed to migrate Picoflexx recording info!.") return with open(fn_picoflexx_info, 'r') as f: meta_info = csv_utils.read_key_value_file(f) self.offset = float(meta_info.get('Royale Timestamp Offset', 0)) cloud_path = os.path.join(self.g_pool.rec_dir, 'pointcloud.rrf') if not os.path.exists(cloud_path): self.recent_events = self._abort logger.error("There is no pointcloud in this recording.") return self.rrf_entries = list(self._rrf_container_times(0, cloud_path)) # enumerate all pointclouds in the recording p_idx = 1 while True: fn = os.path.join(self.g_pool.rec_dir, 'pointcloud_{}.rrf'.format(p_idx)) if not os.path.exists(fn): break self.rrf_entries += self._rrf_container_times(p_idx, fn) p_idx += 1 self.rrf_timestamps = list(map(lambda x: x[2], self.rrf_entries)) logger.info("{} pointclouds present".format(p_idx)) self.load_recording_container(0)
def read_info_csv_file(rec_dir: str) -> dict: """Read `info.csv` file from recording.""" file_path = os.path.join(rec_dir, "info.csv") with open(file_path, "r") as file: return csv_utils.read_key_value_file(file)
def read_meta_info_v081(rec_dir): meta_info_path = os.path.join(rec_dir,"info.csv") with open(meta_info_path) as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) return meta_info
def read_info_csv_file(rec_dir: str) -> dict: file_path = os.path.join(rec_dir, "info.csv") with open(file_path, "r") as file: return csv_utils.read_key_value_file(file)
def _load_recording_uuid_from_info_csv(self): info_csv_path = os.path.join(self.g_pool.rec_dir, "info.csv") with open(info_csv_path, "r", encoding="utf-8") as csv_file: recording_info = csv_utils.read_key_value_file(csv_file) return recording_info["Recording UUID"]
def read_meta_info_v081(rec_dir): meta_info_path = os.path.join(rec_dir, "info.csv") with open(meta_info_path) as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) return meta_info
def update_recording_v094_to_v0913(rec_dir, retry_on_averror=True): try: logger.info("Updating recording from v0.9.4 to v0.9.13") meta_info_path = os.path.join(rec_dir, "info.csv") wav_file_loc = os.path.join(rec_dir, 'audio.wav') aac_file_loc = os.path.join(rec_dir, 'audio.mp4') audio_ts_loc = os.path.join(rec_dir, 'audio_timestamps.npy') backup_ts_loc = os.path.join(rec_dir, 'audio_timestamps_old.npy') if os.path.exists(wav_file_loc) and os.path.exists(audio_ts_loc): in_container = av.open(wav_file_loc) in_stream = in_container.streams.audio[0] in_frame_size = 0 in_frame_num = 0 out_container = av.open(aac_file_loc, 'w') out_stream = out_container.add_stream('aac') for in_packet in in_container.demux(): for audio_frame in in_packet.decode(): if not in_frame_size: in_frame_size = audio_frame.samples in_frame_num += 1 out_packet = out_stream.encode(audio_frame) if out_packet is not None: out_container.mux(out_packet) # flush encoder out_packet = out_stream.encode(None) while out_packet is not None: out_container.mux(out_packet) out_packet = out_stream.encode(None) out_frame_size = out_stream.frame_size out_frame_num = out_stream.frames out_frame_rate = out_stream.rate in_frame_rate = in_stream.rate out_container.close() old_ts = np.load(audio_ts_loc) np.save(backup_ts_loc, old_ts) if len(old_ts) != in_frame_num: in_frame_size /= len(old_ts) / in_frame_num logger.debug('Provided audio frame size is inconsistent with amount of timestamps. Correcting frame size to {}'.format(in_frame_size)) old_ts_idx = np.arange(0, len(old_ts) * in_frame_size, in_frame_size) * out_frame_rate / in_frame_rate new_ts_idx = np.arange(0, out_frame_num * out_frame_size, out_frame_size) interpolate = interp1d(old_ts_idx, old_ts, bounds_error=False, fill_value='extrapolate') new_ts = interpolate(new_ts_idx) # raise RuntimeError np.save(audio_ts_loc, new_ts) with open(meta_info_path, 'r', encoding='utf-8') as csvfile: meta_info = csv_utils.read_key_value_file(csvfile) meta_info['Data Format Version'] = 'v0.9.13' update_meta_info(rec_dir, meta_info) except av.AVError as averr: # Try to catch `libav.aac : Input contains (near) NaN/+-Inf` errors # Unfortunately, the above error is only logged not raised. Instead # `averr`, an `Invalid Argument` error with error number 22, is raised. if retry_on_averror and averr.errno == 22: # unfortunately logger.error('Encountered AVError. Retrying to update recording.') out_container.close() # Only retry once: update_recording_v094_to_v0913(rec_dir, retry_on_averror=False) else: raise # re-raise exception