def get_recordings(self): """Return a list of all Recordings from Asterisk.""" result = self._api.call('recordings', http_method='GET') # Temporary until method is implemented result_list = [Recording(self._api), Recording(self._api)] #recordings = [Recording(x) for x in result] return result_list
def __init__(self, home_dir, channel_count, trial_length, *, fs=30000, dat_name='100_CHs.dat', resp_channel='100_ADC1.continuous', trig_chan='100_ADC6.continuous', conversion_factor=0.195, sniff_basis=False, sniff_locked=False): """ Unit recording - inherets from the Threshold_Recording object, the main object for a lot of different experiment types. Holds information both on the recording variables, and on the units isolated from the recording Args: home_dir (str): Home directory of the recording channel_count (int): Number of channels present in the recording trial_length (float): Length of the trials presented fs (int, optional): Sampling rate of the experiment. Defaults to 30000. dat_name (str, optional): Name of the dat file associated with the experiment. Defaults to '100_CHs.dat'. conversion_factor (float, optional): The conversion between bits and uV. Defaults to 0.195. sniff_basis (bool, optional): Is the recording in sniff basis. Defaults to False. sniff_locked (bool, optional): Is the recording sniff locked (?) - dont know where this is from. Defaults to False. """ Recording.__init__(self, home_dir, channel_count, fs=fs, resp_channel=resp_channel, dat_name=dat_name, conversion_factor=conversion_factor) self.channel_map = np.load(os.path.join(home_dir, 'channel_map.npy')) self.channel_positions = np.load(os.path.join(home_dir, 'channel_positions.npy')) self.sniff_basis = sniff_basis self.trial_length = trial_length self.trig_chan = trig_chan self.trial_names = [] self.resp_peaks = self.find_respiration_peaks() self.trial_starts, self.trial_ends = self.find_trial_starts() self.resp_trace = self.find_respiration_trace() self.clusters = self.find_clusters()
def playback(): record_start_time = None playback_start_time = datetime.datetime.now() car = Car() car.set_rc_mode() start_heading = car.dynamics.heading recording = Recording() while recording.read(): dyn = recording.current() if record_start_time == None: record_start_time = dyn.datetime record_start_heading = dyn.heading t_now = datetime.datetime.now() t_wait = (dyn.datetime - record_start_time) - (t_now - playback_start_time) if t_wait.total_seconds() > 0: time.sleep(t_wait.total_seconds()) # adjust steering based on heading error actual_turn = degrees_diff(start_heading, car.dynamics.heading) expected_turn = degrees_diff(record_start_heading, dyn.heading) original_steer_angle = car.angle_for_steering(dyn.str) steer_angle = original_steer_angle + degrees_diff(actual_turn, expected_turn) str = car.steering_for_angle(steer_angle) car.set_esc_and_str(dyn.esc, str) car.set_manual_mode() print 'all done'
def print_dupes(): dupes = _find_dupes() for key, data in dupes.items(): if len(data) > 1: print(key + " = " + str(len(data))) for item in data: rec = Recording(item) print("\t" + str(rec.object_id) + " | " + rec.get_description() + " - " + rec.get_dur())
def tick(self, button_pressed): if button_pressed: if not self._current_recording: self._current_recording = Recording(self._create_recording_file_name(), self.audio, self.device_index) self._recordings.append(self._current_recording) self._current_recording.write_frames() else: if self._current_recording: self._current_recording.complete() self.completion_callback(self._current_recording) self._current_recording = None
class DirectDrive: def __init__(self, name, servoRange): self.name = name self.servoRange = servoRange self.initPublishers() self.initVariables() self.angle = JointAngle(name, 1, -servoRange / 2, servoRange / 2, False) self.recording = Recording() def initPublishers(self): self.pub = rospy.Publisher(self.name + "_controller/command", Float64, queue_size=5) def initVariables(self): self.velocity = False self.noCommandYet = True def servoTo(self, dAngle): self.velocity = False self.angle.setDesired(dAngle) self.noCommandYet = False self.doUpdate() def servoWith(self, dVelocity): self.velocity = True self.angle.setDesiredVelocity(dVelocity) self.noCommandYet = False self.doUpdate() def publishCommand(self): dAngle = self.angle.getDesired() self.pub.publish(dAngle) def doUpdate(self): if self.velocity: self.angle.doVelocityIncrement() if self.noCommandYet: self.angle.setDesired(self.encoderAngle) self.publishCommand() def getJointAngle(self): return self.angle.getEncoder() def prepareRecording(self, fileNameBase): fileName = fileNameBase + "_" + self.name + ".csv" self.recording.prepare(fileName, ["time", "angle"]) def recordLine(self, delta): angle = self.getJointAngle() self.recording.add([delta, angle])
def analyse_recording(file, args, start_frame=0, end_frame=0): global recordings recording = Recording(file, start_frame, end_frame) try: recording.open_video() logging.debug("Analysing Video ...") analyse_video(recording) get_dimension(recording) if not args.skip_split and start_frame == 0 and end_frame == 0: parts = check_for_splits(recording) if parts: recording.processing_finished() logging.info( "Video contains %s recordings, splitting into different files", str(len(parts))) for part in parts: logging.debug("Video Part from frame %s to %s", str(int(part[0])), str(int(part[1]))) analyse_recording(file, args, part[0], part[1]) raise Exception('video is split') get_start_frame(recording) get_end_frame(recording) calculate_duration(recording) extract_modelname(recording, args.model) render_video(recording, args.output, args.dry_run, quality=args.x265_quality, audio_bitrate=args.audio_bitrate) get_recording_size(recording) except Exception as e: recording.error = str(e) logging.debug(e, exc_info=True) print_recording_stats(recording) recording.processing_finished() if not recording.has_errors() or not recording.error == 'video is split': recordings.append(recording)
def makeDecision(self, config, save=True): """ Function making the decision upon the predicted probas from analyze method. Each file is dealt with as a Recording object, created, loaded, decisions are made, and saved again. See Recording.makeDecision for more info. """ if self._verbatim > 0: print( '\n\n *** DATASET ANALYSIS: MAKING DECISION ON PREDICTIONS ***' ) tStartGlobal = time.time() for file_path in self.files: recording = Recording(file_path, config, verbatim=self._verbatim) recording.load(config) tStart = time.time() recording.makeDecision(config) tEnd = time.time() if save: recording.save(config) if self._verbatim > 1: print( '\tRecording has been re-analyzed: decisions on predictions have been made', file_path, tEnd - tStart) tEndGlobal = time.time() if self._verbatim > 0: print( 'Dataset has been re-analyzed: decisions on predictions have been made', tEndGlobal - tStartGlobal) return
def load(self): treestr = self.level*TAB + self.name + '/' # print string to tree hierarchy and screen self.writetree(treestr + '\n') print(treestr) # collect recording names: 1st char of each name must be a digit, that's all: rnames = [ name for name in os.listdir(self.path) if os.path.isdir(os.path.join(self.path, name)) and name[0].isdigit() ] rnames.sort() # alphabetical order dt = 0 # calculate total track duration by summing durations of all recordings for rname in rnames: path = os.path.join(self.path, rname) recording = Recording(path, track=self) recording.load() self.r[recording.id] = recording self.__setattr__('r' + str(recording.id), recording) # add shortcut attrib dt += recording.dt self.rnames = rnames # easy way to print out all recording names self.dt = dt self.dtsec = self.dt / 1e6 self.dtmin = self.dtsec / 60 self.dthour = self.dtmin / 60 # create a TrackSort with TrackNeurons: self.sort = TrackSort(self) self.sort.load() # one way of calculating self.trange: #tranges = np.asarray([ n.trange for n in self.alln.values() ]) #self.trange = min(tranges[:, 0]), max(tranges[:, 1]) # better way of calculating self.trange: rids = sorted(self.r.keys()) # all recording ids in self r0 = self.r[rids[0]] r1 = self.r[rids[-1]] assert r0.datetime == self.datetime self.trange = r0.td+r0.trange[0], r1.td+r1.trange[1] self.calc_meanrates() # pttype better be the same for all member recordings: pttype = self.r[rids[0]].pttype # init to pttype of first recording for rid in rids[1:]: r = self.r[rid] # if recording doesn't have a pttype, it's probably from an old .spk file, # so don't bother doing this test: if hasattr(r, 'pttype') and pttype != r.pttype: raise ValueError("inconsistent polytrode types %r and %r in track %s" % (pttype, r.pttype, self.id))
def on_validate(self): idx_counter = self.comboCounter.currentIndex() value_counter = int(self.lEdit.text()) if idx_counter == 0 or value_counter == "": self.close() return allcounters = Counter.findAll() counter = allcounters[idx_counter-1] r = Recording(counter.id, self.current_date, value_counter) r.insert() self.closed.emit() self.close()
def on_cellChanged(self, i, j, counter_id): tw = self.tables["%i"%counter_id] recordingId = int(tw.item(i,0).text()) r = Recording.findById(recordingId) r.value = int(tw.item(i,3).text()) r.update() self.setupTabs()
def delete(id_list, args): # TODO: add a confirmation (sans --yyyyassss) total = len(id_list) if total == 0: print(f"Nothing to delete, exiting...") return elif total == 1: print(f"Deleting {total} recording") else: print(f"Deleting {total} recordings") print("-" * 50) # Load all the recs path = built_ins['db']['recordings'] rec_db = TinyDB(path) shows = Query() # shortcut for later shows_qry = shows.data recs = [] total = 0 for obj_id in id_list: obj = rec_db.get((shows_qry.object_id == int(obj_id)) & (shows_qry.video_details.state != 'recording')) if not obj: print(f'ERROR: Unable to find recording with ' f'object_id == "{obj_id}", skipping...') continue total += 1 recs.append({ 'doc_id': obj.doc_id, 'obj_id': obj_id, 'rec': Recording(obj['data']) }) # TODO: don't "total" like this if total <= 0: print(f"No recordings found; {len(id_list)} requested.") elif total == 1: print(f"Deleting {total} recording...") else: print(f"Deleting {total} recordings...") if total > 0: for rec in recs: rec = rec['rec'] print(f" - {rec.get_actual_dur()} | {rec.get_description()} ") print("-" * 50) if not args.yes: print() print('\tAdd the "--yes" flag to actually delete things...') print() else: for rec in recs: _delete(rec, rec_db) print("\nFINISHED")
def __init__(self, name, servoRange): self.name = name self.servoRange = servoRange self.initPublishers() self.initVariables() self.angle = JointAngle(name, 1, -servoRange / 2, servoRange / 2, False) self.recording = Recording()
def startPlaying(self): self.isRecording = False self.isSaved = False self.isPlaying = True self.recording = Recording(self.rect.center) self.recording.onGround = True self.image = Recorder.images['play'] self.index = 0
def get_recording(self, object_id): """Return Recording specified by object_id.""" result = self._api.call('recordings', http_method='GET', object_id=object_id) # Temporary until method is implemented result = Recording(self._api) #recording = Recording(result) return result
def build_feature_data(self, condition, wav_path='./EmoDB/wav/', laut_path='./EmoDB/lablaut/'): assert condition in [1, 2, 3], 'unknown condition {}'.format(condition) data_available = sorted([ s.split('.')[0] for s in os.listdir(wav_path) if s.endswith('.wav') ]) df = pd.DataFrame() row_list = [] for ID in tqdm(data_available): rec = Recording(ID) if len(rec.df_tags) <= 1: print('Excluded {} (empty tag-file)'.format(ID)) continue if condition == 1: row_list.append({ 'ID': ID, 'feature_vec': rec.get_features(per_phoneme=False) }) if condition in [2, 3]: for i, row in rec.df_tags.iterrows(): row_list.append({ 'phoneme': row.phoneme, 'ID': ID, 'feature_vec': rec.get_features(row.t_start, row.t_stop) }) pass df = df.append(row_list) df['sex'] = df.ID.apply(lambda x: self.speaker_info[x[:2]][0]) df['speaker_id'] = df.ID.apply(lambda x: x[:2]) df['age'] = df.ID.apply(lambda x: self.speaker_info[x[:2]][1]) df['emotion_name'] = df.ID.apply( lambda x: self.emotion_from_ID(x, num=False)) df['emotion_label'] = df.ID.apply(lambda x: self.emotion_from_ID(x)) return df
def recording_add(self, name, channel, priority, start, stop, **info): """ add a new recording """ log.info("recording.add: %s", name) r = Recording(name, channel, priority, start, stop, info=info) if r in self.recordings: r = self.recordings[self.recordings.index(r)] if r.status == DELETED: r.status = CONFLICT r.favorite = False # update schedule, this will also send an update to all # clients registered. self.reschedule() return r raise AttributeError("Already scheduled") self.recordings.append(r) self.reschedule() return r
def recording_add(self, name, channel, priority, start, stop, **info): """ add a new recording """ log.info('recording.add: %s', name) r = Recording(name, channel, priority, start, stop, info=info) if r in self.recordings: r = self.recordings[self.recordings.index(r)] if r.status == DELETED: r.status = CONFLICT r.favorite = False # update schedule, this will also send an update to all # clients registered. self.reschedule() return r raise AttributeError('Already scheduled') self.recordings.append(r) self.reschedule() return r
def print_incomplete(args): # weird way I made it work... percent = args.incomplete if percent == -1: percent = 100 else: percent = min(percent, 100) percent = max(percent, 0) percent = percent / 100 dupes = _find_dupes() proper_dur = 0 matched = 0 total_recs = 0 id_set = [] for key, data in dupes.items(): if key.startswith('SH'): continue if len(data) > 0: sum_actual_dur = 0 recs = [] for item in data: rec = Recording(item) actual_dur = rec.video_details['duration'] proper_dur = rec.airing_details['duration'] sum_actual_dur += actual_dur if proper_dur > actual_dur: recs.append(rec) if (proper_dur * percent) > sum_actual_dur: matched += 1 total_recs += len(recs) header = None for x in recs: if args.id_list: if x.object_id not in id_set: id_set.append(x.object_id) else: if not header: header = x.get_description() + \ " - " + x.episode['tms_id'] print(header) print("\t" + str(x.object_id) + " | " + x.get_description() + " - " + x.get_dur()) if not args.id_list: sum_txt = str(timedelta(seconds=sum_actual_dur)) total_txt = str(timedelta(seconds=proper_dur)) pct = str(round(sum_actual_dur / proper_dur * 100, 2)) print(f"\n\t{sum_txt} / {total_txt} ({pct}%)") print() if args.id_list: print(id_set) else: print(f"Total incomplete shows less than {percent*100}% - {matched} " f"({total_recs} items)")
def send_voiceLocationTranslationFromCommand(p, rec_command, userInfo=False): digits = rec_command[5:] if utility.hasOnlyDigits(digits): rec_id = long(digits) rec = Recording.get_by_id(rec_id) if rec is None: send_message(p.chat_id, 'No recording found!') else: send_voiceLocationTranslation(p, rec, userInfo=userInfo) else: send_message(p.chat_id, FROWNING_FACE + "Input non valido.")
def getAllRecordings(p): recordings = '' qry = Recording.query(Recording.chat_id > 0) for r in qry: name = person.getPersonByChatId(r.chat_id).name recordings += '/rec_' + str(r.key.id()) + ' - ' + name + ' - ' + str( r.date_time.date()) + '\n' send_message( p.chat_id, "ULTIME REGISTRAZIONI:\n\n" + recordings + "\nPremi su uno dei link sopra per ascoltare la registrazione corrispondente.", kb=[[BOTTONE_INDIETRO]])
def display(self, config, onlineDisplay=False, saveDisplay=True, forChecking=False, labelEncoder=None): """ Function displaying all files of the dataset. Each file is dealt with as a Recording object, created and displayed. See Recording.analyze for more info. labelEncoder is only needed if forChecking. forChecking creates a file per observation and sort them in class by class folders that the expert can then review. Check README for more information. """ if self._verbatim > 0: print('\n\n *** DATASET DISPLAY ***') tStartGlobal = time.time() for file_path in self.files: recording = Recording(file_path, config, verbatim=self._verbatim) recording.load(config) tStart = time.time() recording.display(config, onlineDisplay=onlineDisplay, saveDisplay=saveDisplay, forChecking=forChecking, labelEncoder=labelEncoder) tEnd = time.time() if self._verbatim > 1: print('\tRecording has been loaded and displayed', file_path, tEnd - tStart) tEndGlobal = time.time() if self._verbatim > 0: print('Dataset has been displayed', tEndGlobal - tStartGlobal) return
def analyze(self, analyzer, config, save=True): """ Function analyzing all files of the dataset. Each file is dealt with as a Recording object, created, analyzed and saved. See Recording.analyze for more info. """ if self._verbatim > 0: print('\n\n *** DATASET ANALYSIS ***') tStartGlobal = time.time() for file_path in self.files: recording = Recording(file_path, config, verbatim=self._verbatim) tStart = time.time() recording.analyze(analyzer, config) tEnd = time.time() if self._verbatim > 1: print('\tRecording has been analyzed', file_path, tEnd - tStart) if save: recording.save(config) if self._verbatim > 1: print('\tRecording has been saved') tEndGlobal = time.time() if self._verbatim > 0: print('Dataset has been analyzed', tEndGlobal - tStartGlobal) return
def _parseRec(self, data): metadataDicts = ['Stims', 'vars', 'Packages', 'Logs', 'nm_folder0'] recordingsKeys = [ ddict for ddict in data if ddict not in metadataDicts ] for key in recordingsKeys: parts = key.split('_') if not (parts[0][-1] == self.id): recordingsKeys.remove(key) recordings = OrderedDict() for key in recordingsKeys: recordings[key] = Recording({key: data[key]}, self) return recordings
class AudioRecorder: def __init__(self, recording_device_name): self.audio = pyaudio.PyAudio() self.device_index = self._get_recording_device_index(recording_device_name) self._current_recording = None self._recordings = [] self._audio_files_temp_dir = tempfile.mkdtemp() self.completion_callback = lambda *args: None def __enter__(self): return self def _get_recording_device_index(self, recording_device_name): for i in range(self.audio.get_device_count()): if recording_device_name in self.audio.get_device_info_by_index(i).get('name'): return i raise RuntimeError('Unable to find device named: ' + recording_device_name) def _create_recording_file_name(self): return os.path.join(self._audio_files_temp_dir, 'input-{}.wav'.format(len(self._recordings))) def tick(self, button_pressed): if button_pressed: if not self._current_recording: self._current_recording = Recording(self._create_recording_file_name(), self.audio, self.device_index) self._recordings.append(self._current_recording) self._current_recording.write_frames() else: if self._current_recording: self._current_recording.complete() self.completion_callback(self._current_recording) self._current_recording = None def __exit__(self, _, __, ___): self.audio.terminate()
def getRecentRecordings(p): recordings = '' qry = Recording.query( Recording.approved == recording.REC_APPROVED_STATE_TRUE).order( -Recording.date_time).fetch(8) for r in qry: name = person.getPersonByChatId(r.chat_id).name.encode('utf-8') recordings += '/rec_' + str(r.key.id()) + ' - ' + name + ' - ' + str( r.date_time.date()) + '\n' send_message( p.chat_id, "ULTIME REGISTRAZIONI:\n\n" + recordings + "\nPremi su uno dei link sopra per ascoltare la registrazione corrispondente.", kb=[[BOTTONE_INDIETRO]], markdown=False)
def compute_errors(self): self._bag = Recording.get_instance() assert self._bag is not None, 'Recording has not been created' assert self._bag.is_init, 'Topics have not been sorted from the rosbag' t_start = self._bag.start_time t_end = self._bag.end_time self._errors = list() for p_act in self._bag.actual.points: if t_start <= p_act.t and p_act.t <= t_end: if len(self._errors): if p_act.t <= self._errors[-1].t: continue p_des = self._bag.desired.interpolate(p_act.t) self._errors.append(TrajectoryError(p_des, p_act))
def goToState91(p, **kwargs): input = kwargs['input'] if 'input' in kwargs.keys() else None giveInstruction = input is None if giveInstruction: rec = Recording.query(Recording.approved == recording.REC_APPROVED_STATE_IN_PROGRESS).get() if rec: p.setLast_recording_file_id(rec.file_id) send_voiceLocationTranslation(p, rec, userInfo=True) kb = [[BOTTONE_APPROVA, BOTTONE_DISAPPROVA], [BOTTONE_INDIETRO]] send_message(p.chat_id, "Approvi questa registrazione?", kb) else: kb = [[BOTTONE_INDIETRO]] send_message(p.chat_id, "Non c'è nessuna registrazione da approvare", kb) else: if input == '': send_message(p.chat_id, "Input non valido.") elif input == BOTTONE_APPROVA: rec = recording.getRecording(p.last_recording_file_id) send_message(rec.chat_id, USER_MSG.format('', str(rec.key.id())), markdown=False) send_message(p.chat_id, "Registrazione approvata!") rec.approve(recording.REC_APPROVED_STATE_TRUE) recording.appendRecordingInGeoJsonStructure(rec) sleep(2) repeatState(p) elif input == BOTTONE_DISAPPROVA: rec = recording.getRecording(p.last_recording_file_id) send_message(rec.chat_id, USER_MSG.format(' NON ', str(rec.key.id())), markdown=False) send_message( p.chat_id, "Registrazione NON approvata! " "Se vuoi mandare maggiori info scrivi /sendText {0} text". format(str(rec.chat_id))) rec.approve(recording.REC_APPROVED_STATE_FALSE) sleep(2) repeatState(p) elif input == BOTTONE_INDIETRO: redirectToState(p, 9) else: send_message( p.chat_id, FROWNING_FACE + " Scusa, non capisco quello che hai detto.")
def getLastContibutors(daysAgo): dateThreshold = time_util.get_time_days_ago(daysAgo) names = set() recsCommands = [] count = 0 recs = Recording.query( Recording.date_time > dateThreshold, Recording.approved == recording.REC_APPROVED_STATE_TRUE).fetch() for r in recs: if r.chat_id <= 0: continue name = person.getPersonByChatId(r.chat_id).name names.add(name) recsCommands.append(r.getRecCommand()) count += 1 namesString = ', '.join([x.encode('utf-8') for x in names]) recCommandsString = '\n'.join(['🎙 {}'.format(x) for x in recsCommands]) return count, namesString, recCommandsString
def compute_errors(self): self._bag = Recording.get_instance() assert self._bag is not None, 'Recording has not been created' # assert self._bag.is_init, 'Topics have not been sorted from the rosbag' if self._bag.parsers['error'].error is None: t_start = self._bag.parsers['trajectory'].start_time t_end = self._bag.parsers['trajectory'].end_time self._errors = list() for p_act in self._bag.parsers['trajectory'].odometry.points: if t_start <= p_act.t and p_act.t <= t_end: if len(self._errors): if p_act.t <= self._errors[-1].t: continue p_des = self._bag.parsers[ 'trajectory'].reference.interpolate(p_act.t) self._errors.append(TrajectoryError(p_des, p_act))
def check_favorite(fav, recordings): """ Check the given favorite against the db and add recordings """ # Note: we can't use keyword searching here because it won't match # some favorite titles when they have short names. if fav.substring: # unable to do that right now listing = kaa.epg.search(keywords=fav.name) else: # 'like' search listing = kaa.epg.search(title=kaa.epg.QExpr('like', fav.name)) now = time.time() for p in listing: if not fav.match(p.title, p.channel.name, p.start_timestamp): continue if p.stop_timestamp < now: # do not add old stuff continue # we found a new recording. rec = Recording(p.title, p.channel.name, fav.priority, p.start_timestamp, p.stop_timestamp, info={ "episode": p.episode, "subtitle": p.subtitle, "description": p.description }) if rec in recordings: # This does not only avoid adding recordings twice, it # also prevents from added a deleted favorite as active # again. continue fav.update_recording(rec) recordings.append(rec) log.info('added\n%s', rec) signals['changed'].emit(rec) if fav.once: favorites.remove(fav) break
def view(args): print() path = built_ins['db']['recordings'] rec_db = TinyDB(path) id_set = [] cnt = 0 for item in rec_db.all(): cnt += 1 if args.id_list: obj_id = item['data']['object_id'] if obj_id not in id_set: id_set.append(obj_id) elif args.full: pprint.pprint(item) else: Recording(item['data']).print() if args.id_list: print(id_set) else: print(f'Total recordings found: {cnt}')
def record(self): logger.debug("recording") cur_time_s = time.time() # check if this is the first time we will be recording if self.last_record_time_s < 0: self.set_last_record_state(cur_time_s) return # get the amount of time that has passed since the last recording delta_time_s = cur_time_s - self.last_record_time_s if delta_time_s < config.min_recording_duration_s: return # create a recording for each of the flow meters flow_meter_recordings = [] for flow_meter in self.flow_meters: # get the change in the total number of interrupts for the flow meter last_record_flow_meter_num_interrupts = self.last_record_flow_meters_num_interrupts[ flow_meter.id] delta_flow_meter_num_interrupts = flow_meter.num_interrupts - last_record_flow_meter_num_interrupts # calculate the volume volume_ml = delta_flow_meter_num_interrupts * flow_meter.volume_per_interrupt_ml flow_meter_recordings.append( FlowMeterRecording(flow_meter, volume_ml)) # create a recording recording = Recording(cur_time_s, delta_time_s, flow_meter_recordings) # set the last record state self.set_last_record_state(cur_time_s) # record to recorders for recorder in self.recorders: recorder.record(recording)
def process_recording(recording_id, args): """ Generate features for the given recording. Returns a path to an ARFF file containing features for the given recording. """ # get decoded_path directly, since CSLU is already in regular .wav files decoded_path = get_data_file(recording_id, 'speech', args.cslu_dir) if args.gain_level is not None: decoded_path = normalize_call_file(decoded_path, args.gain_level) segment_paths = split_call_file(decoded_path, args.segment_length, args.drop_short_segments) segments = [] for segment_path in segment_paths: features = extract_audio_features(segment_path, args) if features is not None: segments.append(Segment(segment_path, features)) return Recording(recording_id, segments)
def process_recording(recording_id, args): """ Generate features for the given recording. Returns a path to an ARFF file containing features for the given recording. """ wav_path = get_data_file(recording_id, 'calls', args.ogi_dir) decoded_path = decode_call_file(wav_path) if args.gain_level is not None: decoded_path = normalize_call_file(decoded_path, args.gain_level) segment_paths = split_call_file(decoded_path, args.segment_length, args.drop_short_segments) segments = [] for segment_path in segment_paths: features = extract_audio_features(segment_path, args) if features is not None: segments.append(Segment(segment_path, features)) return Recording(recording_id, segments)
def setupTabs(self): #self.tabs.currentChanged.disconnect(self.currentTabChanged) while(self.tabs.count() > 0): self.tabs.removeTab(0) self.tables = {} allcounters = Counter.findAll() for c in allcounters: tab = QWidget(self.tabs) hl = QHBoxLayout() tab.setLayout(hl) recordings = Recording.findByIdCounter(c.id) tw = QTableWidget(len(recordings), 5) self.tables["%i"%c.id] = tw column_names = ("Id","Counter", "Date", "Value", "Remove ?") tw.setHorizontalHeaderLabels(column_names) # Fill the table with the recordings for i, r in enumerate(recordings): # The id of the recording in the table of recordings item = QTableWidgetItem("{}".format(r.id)) item.setFlags(QtCore.Qt.NoItemFlags) tw.setItem (i, 0, item) # The id of the associated counter item = QTableWidgetItem("{}".format(r.idcounter)) item.setFlags(QtCore.Qt.NoItemFlags) tw.setItem (i, 1, item) # The date when the recording has been made item = QTableWidgetItem(r.date.strftime("%Y-%m-%d %H:%M:%S")) item.setFlags(QtCore.Qt.NoItemFlags) tw.setItem (i, 2, item) # The value can be edited item = QTableWidgetItem("{}".format(r.value)) item.setFlags(QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled) tw.setItem (i, 3, item) but = QPushButton("Remove") but.clicked.connect(partial(self.on_removeClicked, counter_id=c.id, recording_id=r.id)) #item = QTableWidgetItem(but) #tw.setItem(i, 4, but) #tw.setIndexWidget() tw.setCellWidget(i, 4, but) tw.cellChanged.connect(partial(self.on_cellChanged, counter_id=c.id)) # We allow sorting and we sort by decreasing date # to get the most recent recordings at the top tw.setSortingEnabled(True) tw.sortByColumn(2, QtCore.Qt.DescendingOrder) # Ajust the width of the columns to better see the contents tw.resizeColumnsToContents() tw.setFixedWidth(600) hl.addWidget(tw) #### Plot the data canvas = self.plot_data(recordings) hl.addWidget(canvas) self.tabs.addTab(tab,str(c.id) + "-" + c.name)
class Recorder(pygame.sprite.Sprite): images = {} def __init__(self, recorderformat): pygame.sprite.Sprite.__init__(self) path = os.path.join('assets', 'images') if not Recorder.images: Recorder.images['idle'] = pygame.image.load(os.path.join(path,'recorderidle2.png')).convert_alpha() Recorder.images['play'] = pygame.image.load(os.path.join(path,'recorderplaying2.png')).convert_alpha() Recorder.images['recording'] = pygame.image.load(os.path.join(path,'recorderrecording2.png')).convert_alpha() Recorder.images['saved'] = pygame.image.load(os.path.join(path,'recordersaved2.png')).convert_alpha() self.image = Recorder.images['idle'] self.rect = self.image.get_rect() self.initialpos = self.pos = self.rect.midbottom = recorderformat.rect.midbottom self.events = deque() self.recording = None self.isRecording = False self.isSaved = False self.isPlaying = False self.isIdle = False def update(self, offset): self.pos = [a+b for a,b in zip(self.pos, offset)] self.rect.midbottom = self.pos def startRecording(self): self.isRecording = True self.isSaved = False self.isIdle = False self.isPlaying = False self.maximum = 0 self.events = deque() self.image = Recorder.images['recording'] self.backup = deque() self.index , self.maximum = [0,0] def record(self, jump, left, right): #Oh, no, I'm actually writing documentation! #This is an event list. Index zero is for jumping. #Index 1 is for the left key, 2 for the right self.events.append([jump, left, right]) self.maximum += 1 if self.maximum >= 900: self.stopRecording() def stopRecording(self): self.isRecording = False self.isSaved = True self.isPlaying = False self.recording = None self.image = Recorder.images['saved'] def startPlaying(self): self.isRecording = False self.isSaved = False self.isPlaying = True self.recording = Recording(self.rect.center) self.recording.onGround = True self.image = Recorder.images['play'] self.index = 0 def play(self): if self.index < self.maximum: jump, left, right = self.events.popleft() if jump: self.recording.jump() if left: self.recording.leftPress() elif right: self.recording.rightPress() self.events.append([jump,left,right]) self.index += 1 else: self.stopPlaying() def stopPlaying(self): self.isRecording = False self.isSaved = True self.isPlaying = False self.image = Recorder.images['saved'] for i in range(self.index, self.maximum): self.events.append(self.events.popleft()) def reset(self): self.pos = self.initialpos self.rect.midbottom = self.pos
gaz = Counter("Gaz") gaz.insert() print(gaz) print("") #### List all the counters print("List all : ") allcounters = Counter.findAll() for c in allcounters: print(str(c)) #### Put some recordings r = Recording(elec.id, datetime.datetime.strptime("2015-10-01", "%Y-%m-%d"), 1000) r.insert() r = Recording(elec.id, datetime.datetime.strptime("2015-10-05", "%Y-%m-%d"), 1500) r.insert() r = Recording(gaz.id, datetime.datetime.strptime("2015-10-01", "%Y-%m-%d"), 100) r.insert() r = Recording(gaz.id, datetime.datetime.strptime("2015-10-10", "%Y-%m-%d"), 200) r.insert() #### List all the counters print("List all : ") s = Recording.findAll() for r in s: print(str(r))
async def process_metadata(self, item): if item["type"] == "ssnc": if item["code"] == "mdst": # beginning of metadata ### Start with fresh metadata, does not matter if we already had some self.current_metadata = Metadata(self.aiohttp_session, app) ### Add the mdst timing info as the first item in our metadata list self.current_metadata.set_field(item["name"], item["value"]) #self.sync_rtsp(item["value"]) elif item["code"] == "mden": # end of metadata ### Add the mden as the last item to our metadata list self.current_metadata.set_field(item["name"], item["value"]) persistent_id = self.current_metadata.get_field("persistentid") log.debug("Ended metadata {} for persistent_id {}".format( item["value"], persistent_id)) if self.next_item is None or self.next_item.recording.get_persistent_id( ) != persistent_id: if self.next_item is None: log.debug("No next item queued, looking up") else: log.debug( "Next item id changed from {} to {}, looking up". format( self.next_item.recording.get_persistent_id(), persistent_id)) ### Set up our recording object recording = Recording( self.current_metadata.get_field("persistentid")) ### Get the recording ID, if possible. Load the recording info ### from either Acousticbrainz or just the metadata we were sent try: recordingId = await self.current_metadata.get_recordingId( ) ab_info = await self.acoustic_brainz.lookup_recording( recordingId) recording.load_acousticbrainz(ab_info) except (Metadata.RecordingLookupError, TypeError, FailedRequest): recordingId = 0 recording.load_metadata(self.current_metadata) ### Enqueue the item, to start at the frame specified in the mden message await self.queue_item(recording) self.current_metadata = None elif item["code"] == "prgr": # progress info await self.handle_progress(item) elif item["code"] == "pbeg": # start playing pass elif item["code"] == "prsm": # resume playing pass elif item["code"] == "pend": # stop playing self.playback_state = PlaybackState.STOPPED if (self.current_item): self.current_item.cancel() if (self.next_item): self.next_item.cancel() pass elif item["code"] == "pfls": # flush (pause?) pass elif item["code"] == "pvol": # volume await handle_volume(item) elif item["type"] == "core": self.current_metadata.set_field(item["name"], item["value"])
class Controller(object): """ Class for the tvserver. """ def __init__(self, datafile): epg.init() self.locked = False self.datafile = datafile # load the recordings file self.load_schedule() # connect to recorder signals device.signals['start-recording'].connect(self._recorder_start) device.signals['stop-recording'].connect(self._recorder_stop) device.signals['changed'].connect(self.reschedule) # start by checking the recordings/favorites self.check_favorites_and_reschedule() # add schedule timer for SCHEDULE_TIMER / 3 seconds kaa.Timer(self.check_favorites_and_reschedule).start(SCHEDULE_TIMER / 3) @kaa.timed(0.1, kaa.OneShotTimer, policy=kaa.POLICY_ONCE) def print_schedule(self): """ Print current schedule (for debug only) """ if self.locked: # system busy, call again later self.print_schedule() return False if hasattr(self, 'only_print_current'): # print only latest recordings all = False else: # print all recordings in the list all = True # mark that all are printed once self.only_print_current = True # print only from the last 24 hours maxtime = int(time.time()) - 60 * 60 * 24 info = 'recordings:\n' for r in self.recordings: if all or r.stop > maxtime: info += '%s\n' % r log.info(info) info = 'favorites:\n' for f in self.favorites: info += '%s\n' % f log.info(info) return True @kaa.coroutine() def reschedule(self): """ Reschedule all recordings. """ if self.locked: # system busy, call again later kaa.OneShotTimer(self.reschedule).start(0.1) yield False self.locked = True # get current time (UTC) ctime = int(time.time()) # remove old recorderings self.recordings = filter(lambda r: r.start > ctime - 60*60*24*7, self.recordings) # run the scheduler to attach devices to recordings yield scheduler.schedule(self.recordings) # sort by start time self.recordings.sort(lambda l, o: cmp(l.start,o.start)) # save schedule self.save_schedule() self.print_schedule() # Schedule recordings on recorder for the next SCHEDULE_TIMER seconds. log.info('schedule recordings') for r in self.recordings: if r.start < ctime + SCHEDULE_TIMER and r.status == SCHEDULED: r.schedule() self.locked = False yield True @kaa.coroutine() def check_favorites_and_reschedule(self): """ Update recordings based on favorites and epg. """ if self.locked: # system busy, call again later kaa.OneShotTimer(self.check_favorites_and_reschedule).start(0.1) yield False self.locked = True yield epg.check(self.recordings, self.favorites) self.locked = False self.reschedule() yield True # # load / save schedule file with recordings and favorites # def load_schedule(self): """ load the schedule file """ self.recordings = [] self.favorites = [] if not os.path.isfile(self.datafile): return try: xml = kaa.xmlutils.create(self.datafile, root='schedule') except Exception, e: log.exception('tvserver.load: %s corrupt:' % self.datafile) sys.exit(1) for child in xml: if child.nodename == 'recording': try: r = Recording(node=child) except Exception, e: log.exception('tvserver.load_recording') continue if r.status == RECORDING: log.warning('recording in status \'recording\'') # Oops, we are in 'recording' status and this was saved. # That means we are stopped while recording, set status to # missed r.status = MISSED if r.status == SCHEDULED: # everything is a conflict for now r.status = CONFLICT self.recordings.append(r) if child.nodename == 'favorite': try: f = Favorite(node=child) except Exception, e: log.exception('tvserver.load_favorite:') continue self.favorites.append(f)
def load(self): treestr = self.level*TAB + self.name + '/' # print string to tree hierarchy and screen self.writetree(treestr + '\n') print(treestr) # collect recording names: 1st char of each name must be a digit, that's all: rnames = [ name for name in os.listdir(self.path) if os.path.isdir(os.path.join(self.path, name)) and name[0].isdigit() ] rnames.sort() # alphabetical order dt = 0 # calculate total track duration by summing durations of all recordings # does this track have any missing sorts, or rely on old impoverished .spk files?: missingsort, spksort = False, False for rname in rnames: path = os.path.join(self.path, rname) recording = Recording(path, track=self) recording.load() if recording.sort == None: missingsort = True elif type(recording.sort.header) == core.SPKHeader: spksort = True self.r[recording.id] = recording self.__setattr__('r' + str(recording.id), recording) # add shortcut attrib dt += recording.dt self.rnames = rnames # easy way to print out all recording names self.dt = dt self.dtsec = self.dt / 1e6 self.dtmin = self.dtsec / 60 self.dthour = self.dtmin / 60 if len(rnames) == 0: return # no recordings in this track, nothing else to do if missingsort or spksort: return # skip all below due to missing .ptcs or use of impoverished .spk files # create a TrackSort with TrackNeurons: self.sort = TrackSort(self) self.sort.load() # load RF type for each cell, should be one big dict indexed by nid: rftypefname = os.path.join(self.path, self.absname + '.rftype') try: with open(rftypefname, 'r') as f: rftypestr = f.read() rftypes = eval(rftypestr) for nid, rftype in rftypes.items(): assert rftype in ['simple', 'complex', 'LGN', None] self.alln[nid].rftype = rftype except IOError: # no absname.rftype file denoting RF type of each cell pass # load spike type for each cell, should be one big dict indexed by nid: spiketypefname = os.path.join(self.path, self.absname + '.spiketype') try: with open(spiketypefname, 'r') as f: spiketypestr = f.read() spiketypes = eval(spiketypestr) for nid, spiketype in spiketypes.items(): assert spiketype in ['fast', 'slow', 'fastasym', 'slowasym'] self.alln[nid].spiketype = spiketype except IOError: # no absname.spiketype file denoting RF type of each cell pass # calculate tranges, representing start and stop times (us) of child recordings # relative to start of track: rids = sorted(self.r.keys()) # all recording ids in self r0 = self.r[rids[0]] assert r0.datetime == self.datetime tranges = [] for rid in rids: rec = self.r[rid] # rec.td is time delta (us) between start of track and start of recording trange = rec.td+rec.trange[0], rec.td+rec.trange[1] tranges.append(trange) self.tranges = np.array(tranges) # each row is a recording trange self.trange = self.tranges[0, 0], self.tranges[-1, 1] self.calc_meanrates() # pttype better be the same for all member recordings: pttype = self.r[rids[0]].pttype # init to pttype of first recording for rid in rids[1:]: r = self.r[rid] # if recording doesn't have a pttype, it's probably from an old .spk file, # so don't bother doing this test: if hasattr(r, 'pttype') and pttype != r.pttype: raise ValueError("inconsistent polytrode types %r and %r in track %s" % (pttype, r.pttype, self.id))
def open_recording(self, path): rec = Recording(path) # init it just to parse its id exec_lines = ("r%s = Recording(%r)\n" "r%s.load()" % (rec.id, path, rec.id)) self.ipw.execute(exec_lines)
def on_removeClicked(self, counter_id, recording_id): r = Recording.findById(recording_id) r.remove() self.setupTabs()