def record_to_text(): record() print("transcribing the audio file...") # call asr api to turn the blocking.wav to text # The name of the audio file to transcribe file_name = 'blocking.wav' # Loads the audio into memory with io.open(file_name, 'rb') as audio_file: content = audio_file.read() audio = types.RecognitionAudio(content=content) config = types.RecognitionConfig( encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=44100, language_code='en-US') # Detects speech in the audio file response = speech_to_text_client.recognize(config, audio) transcript = '' for result in response.results: transcript += result.alternatives[0].transcript print('Transcript: {}'.format(result.alternatives[0].transcript)) print("transcript is:") print(transcript) data = {"response": transcript} return jsonify(**data)
def azure_verification(userId): """ record user audio and send request for identification """ try: verification_filename = 'azure_verification.wav' response_filename = 'ttsfiles/verification_response.mp3' audio_length = 15 path = '/azure_verify' params = { 'filename': verification_filename, 'userId': userId } os.system(countdown_cmd) record.record(audio_length, verification_filename) verification_response = file_request(path, verification_filename, params) verification_response = json.loads(verification_response) responseCode = verification_response.get('responseCode') # provide audio feedback to user if responseCode = 'SUCC': confidence = verification_response.get('confidence') st = "successfully verified user with confidence: {0}".format(confidence) else:
def AmbientRecorder(): global lastcanvas record.record(AMBIENT_FILENAME,1,RATE) data=wavfile.read(AMBIENT_FILENAME) [1] t = arange(len(data))*1.0/RATE f=Figure(figsize=(4,3),dpi=100) a=f.add_subplot(111) a.plot(t,data) a.set_title('Ambient recording') a.set_xlabel('Time') a.set_ylabel('Amplitude') #canvas.delete(Tk.ALL) canvas=FigureCanvasTkAgg(f,master=root) canvas.show() if lastcanvas!=None: lastcanvas.pack_forget() lastcanvas=canvas.get_tk_widget() lastcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) #canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) label.config(text="Please check the waveform for any unwanted noises. Record again?")
def voiceit_enrollment(uid, phrases): """ takes userId and phrases, records user saying the phrase and sends request to server with audio file and userId before each recording, the program will do a countdown using text to speech after receiveng server response, it converts it to audio to provide feedback to user """ try: iterations = 3 audio_length = 5 enrollment_filename = 'enrollment.wav' response_filename = 'ttsfiles/voiceit_enrollment_response.mp3' print("supported phrases: \n", pretty_list(phrases)) count = 1 path = '/voiceit_enrollment' filename = enrollment_filename method = 'POST' # 3 enrollments required for this API, keep trying until 3 successful enrollments are made while count <= iterations: # takes the audio file passed to it as argument and plays it for voice interaction os.system(countdown_cmd) record.record(audio_length, enrollment_filename) phrase = file_to_text(enrollment_filename) params = { 'phrase': phrase, 'filename': filename, 'userId': uid } #send request to local server (middleware) enrollment_response = file_request("/voiceit_enrollment", filename, params = params) enrollment_response = json.loads(enrollment_response) enrollment_response_code = enrollment_response.get('responseCode') # if enrollment attempt is successful, provide user feedback and increment count if enrollment_response_code == 'SUCC': count += 1 text = "attempt {0} of enrollment successful".format(count) tts(text, response_filename) else: tts(enrolmment_response['message'], response_filename) enrollment_cmd = 'mpg321 {}'.format(response_filename) os.system(enrollment_cmd) # delete audio files created every time #if not explicitly deleted, they can cause problems for future requests os.remove(enrollment_filename) os.remove(response_filename) os.system("mpg321 ttsfiles/EnrollmentSuccess.mp3") return enrollment_response except Exception as e: print(str(e)) return str(e)
def analysis(canvas=None, device=0): if not args.filename: filename = 'file.wav' record(filename, 5, device) else: filename = args.filename w2r(filename, 'file.raw') ext_pitch('file.raw', 'file.pitch') ext_mcep('file.raw', 'file.mcep') mfcc = mcep2vec('file.mcep') pitch = pitch2vec('file.pitch') mfcc = mfcc[np.where(pitch > 0)] pitch = pitch[np.where(pitch > 0)] features = mfcc[:, [1, 5, 10, 15]] scores = np.zeros(6) sampave = np.average(allsamples) for i, gmmname in enumerate(gmmnames): gmm = joblib.load(gmmname) score = np.exp(gmm.score(features)) scores[i] = score if args.cheaptrick: scoreave = np.average(scores) scores = scores * sampave / scoreave for i, score in enumerate(scores): samples = np.sort(allsamples[:, i]) dist = np.array([abs(sample - score) for sample in samples]) scores[i] = np.where(dist == np.min(dist))[0][0] * 5 / 30 fig = plt.figure(figsize=(6, 6)) titles = ['CUTE', 'YOUNG', 'COOL', 'MASCULINE', 'BOYISH', 'DANDY'] labels = [[1, 2, 3, 4, 5, 6]] + [[]] * 5 radar = Radar(fig, titles, labels) radar.plot(np.ndarray.tolist(scores), "-", lw=2, color="black", alpha=1) plt.savefig('result.png') img = Image.open('result.png') img.save('result.gif') if __name__ != '__main__': canvas.delete('all') canvas.create_text(310, 310, anchor='center', justify='center', text='結果を見る!', font=(None, 40)) canvas.create_text(310, 30, anchor='n', justify='center', text='↑\n↑\n↑\n↑\n', font=(None, 40))
def voiceit_verification(groupId): audio_length = 5 try: tts('started verification process', 'startedVerification') os.system('mpg321 ttsfiles/startedVerification') os.system("mpg321 ttsfiles/countdown.mp3") record.record(audio_length, "verify.wav") phrase = file_to_text("verify.wav") identification_response = voiceit.identify(groupId, phrase, 'verify.wav') if identification_response['responseCode'] == 'SUCC': print(identification_response) uid = identification_response['userId'] db_response = model.get_user(uid) if len(db_response) > 1: name = db_response[1] s = "First step completed, {0} was successfully identified. Please wait for the complete verification".format( name) else: s = "First step completed, user was successfully identified. Please wait for the complete verification" tts(s, 'response') os.system('mpg321 ttsfiles/response.mp3') verification_response = voiceit.verify(uid, phrase, 'verify.wav') if verification_response['responseCode'] == 'SUCC': print(verification_responses) confidence = verification_response['confidence'] st = "successfully verified user with confidence: {0}".format( confidence) tts(st, 'response') else: msg = verification_response['message'] if 'grp_' in msg: position = msg.index("grp_") gid = msg[position:position + 36] st = msg.replace(gid, "") else: st = msg tts(st, 'response') os.system('mpg321 ttsfiles/response.mp3') else: msg = identification_response['message'] if 'grp_' in msg: position = msg.index("grp_") gid = msg[position:position + 36] st = msg.replace(gid, "") else: st = msg tts(st, 'response') os.system('mpg321 ttsfiles/response.mp3') except Exception as e: return str(e)
def recordAll(score, args, splitRecord = [], counter = 1): #score needs to be flat #splitRecord is used to split the original score into pieces #because it is possible that the splitRecord is generated by structural #analysis in the future, so we seperate the score splitting code after all #recording are finished #record perfs and generate split file if len(score) == 0: #splitRecFilename= args.outputDir #splitRecFilename+= settings.getScoreName(args.scoreFilename)+'.split.json' splitRecFilename = settings.getSplitRecFilename(args.scoreFilename, args.outputDir) with open(splitRecFilename, 'w') as f: simplejson.dump(splitRecord, f, indent = 3) print('[INFO] splitting record saved to ' + splitRecFilename) return else: try: print("[INFO] Now playing score."), print("You can start to record anytime by pressing Ctrl+c") record.playStream(score); #provide stop button except KeyboardInterrupt: pass print("[INFO] =====Now recording phrase no." + str(counter) + "=====") #recLogFilename = settings.getRecLogFilename(args.scoreFilename +'.'+ str(counter)) #recLogFilename = args.outputDir + settings.getScoreName(args.scoreFilename) #recLogFilename += '.'+ str(counter) + '.log' recLogFilename = settings.getRecLogFilename(counter, args.scoreFilename, args.outputDir) settings.printDebug(recLogFilename) record.record(score, recLogFilename) perf = musicGenerator.generatePerf(score, recLogFilename) #outFilename= args.outputDir + settings.getScoreName(args.scoreFilename) #outFilename+= '.'+ str(counter) + '.perf' + settings.defaultOutputFormat outFilename = settings.getOutFilename(counter, args.scoreFilename, args.outputDir) musicGenerator.savePerf2File(perf, outFilename) scoreHead= score[:len(perf)] #if settings.DEBUG: # settings.printDebug('') # scoreHead.show('text') scoreTail= score[len(perf):] #if settings.DEBUG: #settings.printDebug('') #scoreTail.show('text') scoreHeadOffsets = [n.offset for n in scoreHead] splitRecord.append(scoreHeadOffsets) settings.printDebug(splitRecord) #splitRecord.extend(scoreHead) #capture mouse #pos = pygame.mouse.get_pos() #print("mouse position: "+pos) recordAll(scoreTail, args, splitRecord, counter+1)
def enroll(): data = req_handler.create_profile() data_dictionary = req_handler.parse_results(data) uid = data_dictionary['identificationProfileId'] record.record(30, "enroll.wav") enrollment_resp = req_handler.enroll_user("enroll.wav", uid) oid = get_oid(enrollment_resp) enrollment_operation = req_handler.get_operation(oid) print(enrollment_operation) return uid
def check_date(streng, idd): import datetime import record as rec file = open(F"{idd}.txt") file_mass = [] for string in file.readlines(): file_mass.append(string) date = file_mass[0] del (file_mass) date = date.split("-") date_m = [] for i in date: i = int(i) date_m.append(i) date = date_m del (date[0]) date_sum = (date[0] * 30) + date[1] # дата из файла. date_today = str(datetime.date.today()) date_today = date_today.split("-") date_today_m = [] for i in date_today: i = int(i) date_today_m.append(i) date_today = date_today_m del (date_today_m) del (date_today[0]) date_today_sum = (date_today[0] * 30) + date_today[1] diff = date_today_sum - date_sum file.close() if diff >= 30: file = open(F"{idd}.txt", "w") file.truncate() file = open(F"{idd}.txt", "w") date = str(datetime.date.today()) file.write(date) rec.record(streng, idd) return "Запись за прошлые 30 дней очищена!" else: rec.record(streng, idd) return "Запись произведена успешно!"
def recordAll(score, args, counter=1): # score needs to be flat if len(score) == 0: return else: print("[INFO] Now recording phrase no." + str(counter)) record.playStream(score) # provide stop button recLogFilename = settings.getRecLogFilename(args.scoreFilename + str(counter)) record.record(recLogFilename) perf = musicGenerator.generatePerf(score, recLogFilename) musicGenerator.savePerf2File(perf, arg.outputFilename + str(counter)) scoreTail = score[len(perf) :] recordAll(scoreTail, args, counter + 1)
def get_letter(): record.record() storing.upload_blob("hangman-audio-files", "letters.wav", "letters.wav") speech = speechToText.sample_recognize('gs://hangman-audio-files/letters.wav') if 'letter' in speech: speech = list(speech) for i in range(7): del speech[0] if ' ' in speech: speech.remove(' ') speech = ''.join(speech) return speech
def main(): file_name = "temp.wav" shutil.rmtree("data/") os.mkdir("data") record.record(file_name) import speech_to_text as stt #stt.system("/home/skysarthak/Documents/Project/speech_to_text.py") os.chdir("/home/skysarthak/Documents/Project (copy)/") with open('spoken.txt', 'r') as myfile: spoken_text = myfile.read() list_words = [a.lower().strip("!,.?") for a in spoken_text.split()] with open('all_words.txt', 'r') as myfile: words = myfile.read() lib = [a.lower().strip("!,.?") for a in words.split()] # read from local dataset for i, word in enumerate(list_words): if word in lib: f = mp.VideoFileClip("letters/" + word + ".mp4") f.write_videofile("data/%d.mp4" % i) else: letters = list(list_words[i]) c = [] for l in letters: if l.isalpha(): c.append( mp.VideoFileClip("letters/%s.mp4" % l).resize( height=320, width=240).speedx(factor=2)) f = mp.concatenate_videoclips(c, method="compose") f.write_videofile("data/%d.mp4" % i) f.close() clips = [] for j in range(len(list_words)): clips.append( mp.VideoFileClip("data/%d.mp4" % j).resize(height=320, width=240)) final_clip = mp.concatenate_videoclips(clips, method="compose") final_clip.write_videofile("final_clip.mp4") time.sleep(1) os.system("xdg-open final_clip.mp4") return "final_clip.mp4"
def main(): file_name = "temp.wav" shutil.rmtree("data/") os.mkdir("data") record.record(file_name) spoken_text = voice2text.retrieve_transcript("temp.wav") print(spoken_text) spoken_text = translate(spoken_text) list_words = [a.lower().strip("!,.?") for a in spoken_text.split()] modified_urls = [ i + "/" + i + "-abc.mp4" if len(i) == 1 else i[0] + "/" + i + ".mp4" if i != "bye" else "bye-wave.mp4" for i in list_words ] # Read URLs from handspeak.com for i, url in enumerate(modified_urls): r = requests.get("https://handspeak.com/word/" + url) print("https://handspeak.com/word/" + url) if r.text[:15] == "<!DOCTYPE html>": letters = list(list_words[i]) c = [] for l in letters: if l.isalpha(): c.append( mp.VideoFileClip("letters/%s-abc.mp4" % l).resize( height=320, width=240).speedx(factor=2)) f = mp.concatenate_videoclips(c, method="compose") f.write_videofile("data/%d.mp4" % i) else: f = open("data/%d.mp4" % i, 'wb') for chunk in r.iter_content(chunk_size=255): if chunk: f.write(chunk) f.close() clips = [] for j in range(len(modified_urls)): clips.append( mp.VideoFileClip("data/%d.mp4" % j).resize(height=320, width=240)) final_clip = mp.concatenate_videoclips(clips, method="compose") final_clip.write_videofile("public/final_clip.mp4") time.sleep(1) return spoken_text
def result_screen(result, sound, process_time, level): #游戏战绩记录 record.record(level, result, process_time) #提取记录 time, times, odds = record.get_record(level) #建立界面 bg_size = width, height = 450, 700 r_screen = pygame.display.set_mode(bg_size) r_screen.fill((237, 237, 237)) r_font1 = pygame.font.Font('material/benmoyouyuan.ttf', 67) r_font2 = pygame.font.Font('material/benmoyouyuan.ttf', 50) r_font3 = pygame.font.Font('material/benmoyouyuan.ttf', 30) r_font4 = pygame.font.Font('material/benmoyouyuan.ttf', 20) pygame.draw.rect(r_screen, (0, 0, 0), [100, 450, 250, 100], 5) process_time = str(process_time) r_text1 = r_font1.render(result, True, (0, 0, 0)) r_text2 = r_font2.render("继续游戏", True, (0, 0, 0)) r_text3 = r_font3.render('游戏时间:' + process_time + '秒', True, (0, 0, 0)) r_text4 = r_font4.render('游戏记录:' + time + '秒', True, (0, 0, 0)) r_text5 = r_font4.render('游戏次数:' + times + '次', True, (0, 0, 0)) r_text6 = r_font4.render('游戏胜率:' + odds, True, (0, 0, 0)) r_screen.blit(r_text1, (90, 100)) r_screen.blit(r_text2, (120, 470)) r_screen.blit(r_text3, (120, 200)) r_screen.blit(r_text4, (120, 250)) r_screen.blit(r_text5, (120, 300)) r_screen.blit(r_text6, (120, 350)) pygame.display.set_caption('游戏结束') pygame.display.flip() while True: for event in pygame.event.get(): if event.type == QUIT: pygame.quit() sys.exit() elif event.type == MOUSEBUTTONDOWN: if event.button == 1: if 100 < event.pos[0] < 350 and 450 < event.pos[1] < 550: pygame.display.quit() choose._interface(sound)
def simulation(tests, su): global plotFitnessLog global log log = [] simList = [] bestIndividual = {} rd.seed(a = su.seed) np.random.seed(seed = su.seed) fitnessSum = 0 for i in range(tests): log.append("\n --------- Simulation #{} ---------\n\n".format(i + 1)) sim = {} #Auxiliar dictionary for simulations sim['id'] = i + 1 #Simulation number sim['last'], sim['champion'], sim['history'], sim['average'] = evolve(su) #Last generation achieved, simulation's best individual, progression of best individual #sim['last'] == False means that there was some problem in the evolution caused by a invalid function (1/0, for example) or bad domain ([-5, -1] for log(x1), for example) if(sim['last'] == False): return False fitnessSum += float(str(sim['champion'][-1])) simList.append(sim) #Ordering the simulations by fitness simByChampion = sorted(simList, key = lambda sim: float(sim['champion'][-1]), reverse = True if(su.task == 'max') else False) bestIndividual = simByChampion[0] #Verifying simulation with best population average for i in range(1, tests): if(tests >= i + 1): if su.task == "max": if(bestIndividual['champion'][-1] == simByChampion[i]['champion'][-1] and bestIndividual['average'] < simByChampion[i]['average']): bestIndividual = simByChampion[i] else: if(bestIndividual['champion'][-1] == simByChampion[i]['champion'][-1] and bestIndividual['average'] > simByChampion[i]['average']): bestIndividual = simByChampion[i] rec.record(bestIndividual, log, su) #Saving simulation report aux = [] for entry in simList: aux.append(entry['history']) #Saving individual progression for graph plotting plotFitnessLog.append(aux) return True
def flow(self,conn): # s = send.client_connect() # while 1: # record.record(self.username) # send.send(s, self.username) # for i in self.userlist: # if self.username != i: # send.recv(s) # t = threading.Thread(target=play.play,args=i) # t.start() while 1: record.record(self.username) send.send(conn, self.username) opp = send.recv(conn) play(opp)
def __init__(self, src="uhd", dst="uhd", in_rate=2e6, out_rate=2e6, extra=None): super(tag_emulate, self).__init__() uhd = dst == "uhd" if uhd: dst = None self._bin_src = binary_src.binary_src(out_rate, encode="manchester", idle_bit=0) parser = Parser(extra) self._tag = parser.get_tag(self._bin_src.set_bits) # Do not record here self._dec = decoder.decoder(src=src, dst=None, reader=True, tag=False, samp_rate=in_rate, emulator=self._tag) self.connect(self._dec) self._mult = multiplier.multiplier(samp_rate=out_rate) self.connect(self._bin_src, self._mult) if uhd: # active load modulation self._real = blocks.complex_to_real(1) self._thres = blocks.threshold_ff(0.02, 0.1, 0) self._r2c = blocks.float_to_complex(1) self._sink = usrp_sink.usrp_sink(out_rate) self.connect(self._mult, self._real, self._thres, self._r2c, self._sink) elif dst: self._sink = record.record(dst, out_rate) self.connect(self._mult, self._sink) else: self._sink = blocks.null_sink(gr.sizeof_gr_complex) self.connect(self._mult, self._sink)
def __init__(self, L, M, N, R, M_grid, pm, pc, pop_size, max_gen, file_path_name): """ 算法初始化 :param L: 场景变长,Km :param M: 基站总数 :param N: 待选数 :param R: 基站半径 :param M_grid: 栅格数 :param pm: 变异概率 :param pc: 交叉概率 :param pop_size: 种群大小 :param max_gen: 最大迭代次数 :return: """ self.L = L self.M = M self.N = N self.R = R self.M_grid = M_grid self.pm = pm self.pc = pc self.pop_size = pop_size self.max_gen = max_gen self.scence = scence(L, M, N, R, M_grid) para_dict = {'L':L, 'M':M, 'N':N, 'R':R, 'M_grid':M_grid, 'pm':pm, 'pc':pc, 'pop_size':pop_size, 'max_gen':max_gen} self.record = record(file_path_name) self.record.write_scence_para(para_dict) self.pop = [] self.bests = [0] * max_gen self.g_best = 0
def run_game(): """ initialize """ pygame.init() settings = Settings() screen = pygame.display.set_mode( (settings.screen_width, settings.screen_height)) event = pygame.event.poll() # add music pygame.mixer.music.load("music.mp3") pygame.mixer.music.play(3, 0) pygame.mixer.music.set_volume(0.3) pygame.display.set_caption("alien_invasion_game") play_button = Button(settings, screen, "Play") states = States(settings) records = record(settings, screen, states) background_color = (0, 0, 0) player = Player(settings, screen) bullets = Group() aliens = Group() gf.fleet(settings, screen, player, aliens) while True: gf.incident(settings, screen, states, records, play_button, player, aliens, bullets) if states.game_active: player.update() gf.more_bullets(settings, screen, states, records, player, aliens, bullets) gf.new_aliens(settings, screen, states, records, player, aliens, bullets) gf.new_screen(settings, screen, states, records, player, aliens, bullets, play_button)
def __init__(self, L, M, N, R, M_grid, pm, pc, pop_size, max_gen, file_path_name, Dth, k1, w1, w2, maxdis, GA_step, VF_step, seed_number, alpha1, alpha2, alpha3): ''' :param L: 场景变长,Km :param M: 基站总数 :param N: 待选数 :param R: 基站半径 :param M_grid: 栅格数 :param pm: 变异概率 :param pc: 交叉概率 :param pop_size: 种群大小 :param max_gen: 最大迭代次数 :param Dth: 两个点的距离等于这个时既无引力又无斥力 :param k1: 感知距离系数 :param w1: 引力系数 :param w2: 斥力系数 :param maxdis: 单次移动最大距离 :param GA_step: 每GA_step步进行虚拟力引导 :param VF_step: 每VF_step步离散化操作 ''' self.L = L self.M = M self.N = N self.R = R self.M_grid = M_grid self.pm = pm self.pc = pc self.pop_size = pop_size self.max_gen = max_gen self.scence = scence(L, M, N, R, M_grid) self.Dth = Dth self.k1 = k1 self.w1 = w1 self.w2 = w2 self.maxdis = maxdis self.GA_step = GA_step self.VF_step = VF_step self.Rs = k1 * R self.alpha1 = alpha1 self.alpha2 = alpha2 self.alpha3 = alpha3 self.kdtree = kdtree(self.scence.sum_sites) para_dict = {'L':L, 'M':M, 'N':N, 'R':R, 'M_grid':M_grid, 'pm':pm, 'pc':pc, 'pop_size':pop_size, 'max_gen':max_gen, 'Dth':Dth, 'k1':k1, 'w1':w1, 'w2':w2, 'maxdis':maxdis, 'GA_step':GA_step, 'VF_step':VF_step, 'seed_number':seed_number, 'alpha1':alpha1, 'alpha2':alpha2, 'alpha3':alpha3} self.record = record(file_path_name) self.record.write_scence_para(para_dict) self.pop = [] self.bests = [0] * max_gen self.g_best = 0
def main(): file_name = "temp.wav" # shutil.rmtree("data/") # os.mkdir("data") record.record(file_name) spoken_text = voice2text.retrieve_transcript("temp.wav") # print "done" list_words = [a.lower().strip("!,.?") for a in spoken_text.split()] # print list_words # print spoken_text return spoken_text
def __init__(self, src="uhd", dst=None, repeat=False, reader=True, tag=True, samp_rate=2e6, emulator=None): gr.hier_block2.__init__( self, "decoder", gr.io_signature(0, 0, 0), # Input signature gr.io_signature(0, 0, 0)) # Output signature if src == "uhd": self._src = usrp_src.usrp_src(samp_rate=samp_rate, dst=dst) hi_val = 1.1 else: self._wav = blocks.wavfile_source(src, repeat) self._r2c = blocks.float_to_complex(1) self._src = blocks.complex_to_mag_squared(1) self.connect(self._wav, self._r2c, self._src) hi_val = 1.09 # may need to be set to 1.05 depending on antenna setup self._back = background.background(reader, tag, emulator) self._trans = transition_sink.transition_sink(samp_rate, self._back.append, hi_val=hi_val) self.connect(self._src, self._trans) if dst and dst != "uhd" and src == "uhd": self._rec = record.record(dst, samp_rate) self.connect(self._src, self._rec)
def transcribe(): # record the sound in a temporary file fname = 'tmp.wav' record.record(fname) # transcribe it os.system( "gcloud ml speech recognize {} --language-code='en-US' > result.json". format(fname)) # print to screen with open('result.json') as f: data = json.load(f) print(data['results'][0]['alternatives'][0]['transcript'])
def generate_sample( limit, midiout, note, velocity, midi_channel, threshold, print_progress=False, audio_interface_name=None, sample_rate=SAMPLE_RATE, ): all_notes_off(midiout, midi_channel) def after_start(): midiout.send_message([CHANNEL_OFFSET + midi_channel, note, velocity]) def on_time_up(): midiout.send_message([CHANNEL_OFFSET + midi_channel, note, 0]) return True # Get the release after keyup return record( limit=limit, after_start=after_start, on_time_up=on_time_up, threshold=threshold, print_progress=print_progress, audio_interface_name=audio_interface_name, sample_rate=sample_rate, )
def main(): FILENAME, FILENAME_DATA = filename('location', 'lure_index') DOA = record(FILENAME, FILENAME_DATA) # change filename to FILENAME upload_recording(FILENAME, DOA)
def __init__(self, src="uhd", dst="uhd", in_rate=2e6, out_rate=2e6, extra=None): super(reader_emulate, self).__init__() uhd = dst == "uhd" if uhd: dst = None self._bin_src = binary_src.binary_src(out_rate, encode="miller", idle_bit=1, repeat=[0, 1, 1, 0, 0, 1, 0]) # repeat REQA parser = Parser(extra) self._reader = parser.get_reader(self._bin_src.set_bits) # Do not record this self._dec = decoder.decoder(src=src, dst=None, reader=False, tag=True, samp_rate=in_rate, emulator=self._reader) self.connect(self._dec) self._mult = multiplier.multiplier(samp_rate=out_rate) self.connect(self._bin_src, self._mult) if uhd: self._sink = usrp_sink.usrp_sink(out_rate) elif dst: self._sink = record.record(dst, out_rate) else: self._sink = blocks.null_sink(gr.sizeof_gr_complex) self.connect(self._mult, self._sink)
def test_empty_args(self): with record.record(): def foo(): pass foo() args = record.get_all_args() self.assertEqual(0, len(args.all()))
def put(self, line): line = structured(line) if self._type_ == "zip" and line.isvalidzip: self._recent_ = (line.CMTE_ID, line.ZIP_CODE) if (line.CMTE_ID, line.ZIP_CODE) not in self._database_: self._database_[(line.CMTE_ID, line.ZIP_CODE)] = record() self._database_[(line.CMTE_ID, line.ZIP_CODE)].push(line.TRANSACTION_AMT) return True if self._type_ == "date" and line.isvaliddate: self._recent_ = (line.CMTE_ID, line.TRANSACTION_DT) if (line.CMTE_ID, line.TRANSACTION_DT) not in self._database_: self._database_[(line.CMTE_ID, line.TRANSACTION_DT)] = record() self._database_[(line.CMTE_ID, line.TRANSACTION_DT)].push(line.TRANSACTION_AMT) return True return False
def detectBPM(): t = threading.current_thread() while 1: wav = record() bpm = detect_bpm(wav) print("bpm: " + str(bpm)) t.bpm = int(bpm) time.sleep(0.01)
def main(filename): json_data = {} hardware = [] performance = [] stress = [] result = [] print('\033[1;32m \t*********************************** \033[0m\n' '\033[1;32m \t start parse config file \033[0m\n' '\033[1;32m \t*********************************** \033[0m') try: with open(filename, "r") as fp: json_data = json.load(fp) except OSError as e: print('\033[1;31m \t*********************************** \033[0m\n' '\033[1;31m \t parse config file failed \033[0m\n' '\033[1;31m \t*********************************** \033[0m\n') return print('\033[1;32m \t***********************************\033[0m\n' '\033[1;32m \t parse config file successful\033[0m\n' '\033[1;32m \t***********************************\033[0m\n') json_data = menu(json_data) if json_data == False: print('\033[1;31m exit test\033[0m\n') return for key in json_data: if key == 'Stress_Test': stress = json_data[key] if key == 'Performance_Test': performance = json_data[key] if key == 'Hardware_Test': hardware = json_data[key] print('\033[1;32mtests is:') for key in json_data: print('{}:'.format(key)) for item in json_data[key]: print('\t{}'.format(item['type'])) print('\033[0m') print('\033[1;32m \t***********************************\033[0m\n' '\033[1;32m \t start test \033[0m\n' '\033[1;32m \t***********************************\033[0m\n') result.extend(test(performance, 'performance')) result.extend(test(hardware, 'hardware')) result.extend(test(stress, 'stress')) record(result) print(result) return
def voiceit_enrollment(uid, phrases): try: iterations = 3 audio_length = 5 enrollment_filename = 'enrollment.wav' response_filename = 'voiceit_enrollment_response' print("supported phrases: \n", pretty_list(phrases)) count = 1 path = '/voiceit_enrollment' filename = enrollment_filename method = 'POST' # 3 enrollments required for this API, keep trying until 3 successful enrollments are made while count <= iterations: os.system("mpg321 ttsfiles/countdown.mp3") record.record(audio_length, enrollment_filename) phrase = file_to_text(enrollment_filename) params = { 'phrase': phrase, 'filename': filename, 'userId': uid } #send request to local server (middleware) enrollment_response = file_request("/voiceit_enrollment", filename, params = params) enrollment_response_code = enrollment_response.get('responseCode') # if enrollment attempt is successful, provide user feedback and increment count if enrollment_response_code == 'SUCC': count += 1 text = "attempt {0} of enrollment successful".format(count) tts(text, response_filename) else: tts(r['message'], response_filename) enrollment_cmd = 'mpg321 ttsfiles/{}.mp3'.format(response_filename) os.system(enrollment_cmd) os.system("mpg321 ttsfiles/EnrollmentSuccess.mp3") return enrollment_response except Exception as e: print(str(e)) return str(e)
def _record_and_annotate(self, code, ret_first=True): ast_ = ast.parse(code) with record.record(): exec(compile(ast_, filename=__file__, mode='exec')) ast_type_annotator.annotate_ast(ast_, __file__) if ret_first: return ast_.body[0] else: return ast_
def start_work(function): if function == FUNCTION_NEW: rawx1, rawx2, y = record.record('rawdata', set_image) elif function == FUNCTION_READ: rawx1, rawx2, y = record.readdata('rawdata') ui_queue.put(('spinner', True)) scalers, classifiers, scores = record.train(rawx1, rawx2, y, set_status) ui_queue.put(('spinner', False)) record.predict(scalers, classifiers, scores, set_status, set_image)
def azure_verification_enrollment(userId, phrases): """ given an user Id, record the audio file and send it to the server along with the Id """ try: enrollment_filename = 'verification_enrollment.wav' response_filename = 'ttsfiles/verification_response.mp3' iterations = 3 audio_length = 15 path = '/azure_verification_enrollment' params = { 'userId': userId, 'filename': enrollment_filename } print("phrases: ", pretty_list(phrase)) count = 1 # 3 registrations needed, keep trying until 3 successfull attemps have been made while count <= iterations: os.system(countdown_cmd) record.record(audio_length, enrollment_filename) enrollment_response = file_request(path, enrollment_filename, params) responseCode = enrollment_response.get('responseCode') # provide audio feedback to the user if responseCode == 'SUCC': st = "attempt {} of azure verification enrollment successfull".format(count) tts(st, response_filename) count += 1 else: tts("error trying to enroll user for verification ", response_filename) speech_cmd = 'mpg321 {}'.format(response_filename) os.system(speech_cmd) os.remove(enrollment_filename) os.remove(response_filename) os.system("mpg321 ttsfiles/EnrollmentSuccess.mp3") return enrollment_response except Exception as e: print(str(e)) return str(e)
def test_return_empty_list(self): with record.record(): def foo(): return [] foo() returns = record.get_all_returns() self.assertEqual(1, len(returns.all())) self.assertEqual( '{}.list'.format(record.BUILTINS_NAME), returns[0].type_name)
def findUser(user,group,conn,base,search): ret=conn.search(base,search,search_scope=ldap3.SUBTREE,attributes=['member']) entry=conn.entries[0] members=json.loads(entry.entry_to_json())['attributes']['member'] # print(members) for member in members: # print(member) if "User" in member: r=record(member) print(r.CN) user.append(r.CN) if "Groups" in member: print("Recurssion") r=record(member) search='(&(objectCategory=GROUP)(cn=%s))'%r.CN user=findUser(user,r.CN,conn,base,search) # print(users,r.CN,conn,base,search) return user
def leftClick(): global confirm global infos if confirm: confirm = False infos = [-1 for _ in range(5)] msgcontent = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()) + '\n ' dict1 = {'OriginCity':0,'DestinationCity':1,'DepartureTimeMonth':2, 'DepartureTimeDate':3, 'DepartureTimeYear':4} dict2 = {0:'departure city', 1:'destination city', 2: 'departure month', 3: 'departure date'} ques = [] #call understanding function mystring = record.record() myunderstand = understand.understand(mystring) print(myunderstand) print(infos) for phrase in myunderstand: if len(phrase) == 1: if phrase[0] == 'True': confirm = True if len(phrase) == 3: infos[dict1[phrase[1]]] = phrase[2] for i,info in enumerate(infos): #not departure year missing if info == -1 and i != 4: ques.append(dict2[i]) #departure year missing if info == -1 and i == 4: infos[i] = 2016 #there is missing information if ques and not confirm: question = "Please tell us your "; if len(ques) == 1: question += ques[0] elif len(ques) == 2: question += (ques[0] + ' and '+ques[1]) else: for i in range(len(ques)-2): question += (ques[i] + ', ' ) question += (ques[len(ques)-1]+" and "+ ques[len(ques)-1]) #print(question) text_msglist.insert(END, msgcontent, 'green') text_msg.insert(END, question, 'green') text_msglist.insert(END, text_msg.get('0.0', END)) text_msg.delete('0.0', END) os.system('speechtask1 '+question) else: text_msglist.insert(END, msgcontent, 'green') text_msg.insert(END, "Thank you for using our system!", 'green') text_msglist.insert(END, text_msg.get('0.0', END)) text_msg.delete('0.0', END) os.system("speechtask1 Thank you for using our system!")
def record_call(args): # Record signal if args.advanced: signal = record.record_advanced(args.duration, args.iterations, args.channels) else: signal = record.record(args.channels) # Save signal to file filename = file.save_signal(signal, args.name) print('Saved signal to: ' + filename)
def recordAll(score, args, counter = 1): #score needs to be flat if len(score) == 0: return else: try: print("[INFO] Now playing score. You can start to record anytime by pressing Ctrl+c") record.playStream(score); #provide stop button except KeyboardInterrupt: pass print("[INFO] =====Now recording phrase no." + str(counter) + "=====") #recLogFilename = settings.getRecLogFilename(args.scoreFilename +'.'+ str(counter)) recLogFilename = args.outputDir + settings.getScoreName(args.scoreFilename) recLogFilename += '.'+ str(counter) + '.log' record.record(score, recLogFilename); perf = musicGenerator.generatePerf(score, recLogFilename) outFilename= args.outputDir + settings.getScoreName(args.scoreFilename) outFilename+= '.'+ str(counter) + '.perf' + settings.defaultOutputFormat musicGenerator.savePerf2File(perf, outFilename) scoreTail= score[len(perf):] recordAll(scoreTail, args, counter+1)
def test_sample_function_has_proper_function(self): with record.record(): self._sample_function() returns = record.get_all_returns() self.assertEqual(1, len(returns.all())) self.assertEqual( '{}.str'.format(record.BUILTINS_NAME), returns[0].type_name) self.assertEqual( '_sample_function', returns[0].function.name)
def start_work(function): if function == FUNCTION_NEW: rawx1, rawx2, y = record.record('rawdata', set_image) elif function == FUNCTION_READ: rawx1, rawx2, y = record.readdata('rawdata') ui_queue.put(('spinner', True)) scalers, classifiers, scores = record.train ( rawx1, rawx2, y, set_status) ui_queue.put(('spinner', False)) record.predict ( scalers, classifiers, scores, set_status, set_image)
def record(self): signal = record.record(App.CHANNELS, App.FORMAT, App.RATE, App.SECONDS, update_text_callback=self.set_text) self.set_text("Processing") word = self.process(signal) self.set_text("You said: {:s}".format(word))
def test_single_arg(self): with record.record(): def foo(bar): pass foo('a') args = record.get_all_args() self.assertEqual(1, len(args.all())) self.assertEqual('foo', args[0].function.name) self.assertEqual('bar', args[0].arg_name) self.assertEqual( '{}.str'.format(record.BUILTINS_NAME), args[0].type_name)
def test_return_obj_instance(self): with record.record(): class Foobar(object): pass def foo(): return Foobar() foo() returns = record.get_all_returns() self.assertEqual(2, len(returns.all())) self.assertEqual(None, returns[0].type_name) self.assertEqual('record_test.Foobar', returns[1].type_name)
def test_return_obj_class(self): with record.record(): class Foobar(object): pass def foo(): return Foobar foo() returns = record.get_all_returns() self.assertEqual(2, len(returns.all())) self.assertEqual(None, returns[0].type_name) self.assertEqual( '{}.type'.format(record.BUILTINS_NAME), returns[1].type_name)
def test_sample_function_has_proper_function(self): with record.record(): self._sample_function('foobar') args = record.get_all_args().all() self.assertEqual(2, len(args)) expected_types = { '{}.str'.format(record.BUILTINS_NAME), 'record_test.ArgsRecordTest'} self.assertEqual( expected_types, {arg.type_name for arg in args}) for arg in args: self.assertEqual( '_sample_function', arg.function.name)
def test_multiple_args(self): with record.record(): def foo(bar, baz): pass foo('a', 2) args = record.get_all_args() self.assertEqual(2, len(args.all())) self.assertEqual('foo', args[0].function.name) self.assertEqual('bar', args[0].arg_name) self.assertEqual( '{}.str'.format(record.BUILTINS_NAME), args[0].type_name) self.assertEqual('foo', args[1].function.name) self.assertEqual('baz', args[1].arg_name) self.assertEqual( '{}.int'.format(record.BUILTINS_NAME), args[1].type_name)
def __init__(self, src="uhd", dst=None, repeat=False, reader=True, tag=True, samp_rate=2e6, emulator=None): gr.hier_block2.__init__(self, "decoder", gr.io_signature(0, 0, 0), # Input signature gr.io_signature(0, 0, 0)) # Output signature if src == "uhd": self._src = usrp_src.usrp_src(samp_rate=samp_rate, dst=dst) hi_val = 1.1 else: self._wav = blocks.wavfile_source(src, repeat) self._r2c = blocks.float_to_complex(1) self._src = blocks.complex_to_mag_squared(1) self.connect(self._wav, self._r2c, self._src) hi_val = 1.09 # may need to be set to 1.05 depending on antenna setup self._back = background.background(reader, tag, emulator) self._trans = transition_sink.transition_sink(samp_rate, self._back.append, hi_val=hi_val) self._connect(self._src, self._trans) if dst and dst != "uhd" and src == "uhd": self._rec = record.record(dst, samp_rate) self._connect(self._src, self._rec)
# -*- coding: utf-8 -*- """ Created on Wed Oct 01 22:43:57 2014 @author: Juda @email: [email protected] """ import network import record import chess log=record.record() server=network.server(log) board=chess.chess(log) log.logging("Game Begin",'SHOWALL') now_player=1 player_limit=[3,3] steps=0 while True: steps+=1 now_player=1-now_player; server.send(server.AI[now_player],'action') log.logging("Send to player %d [name: %s] a signal: ACTION"%(now_player,server.AIname[now_player])) try: message=server.recieve(server.AI[now_player]) except socket.timeout: log.logging("Recieve message form player %d [name: %s]: TIME EXCEEDED LIMIT"%(now_player,server.AIname[now_player]),'SHOWALL')
def _record_annotate_and_output(self, code): ast_ = ast.parse(code) with record.record(): exec(compile(ast_, filename=__file__, mode='exec')) ast_type_annotator.annotate_ast(ast_, __file__) return display_api.get_types(ast_)
def edit(name): global filepaths, sounds filepath = workingdir + "s/" + name + ".wav" record(filepath, time) filepaths[name] = filepath sounds[name] = pygame.mixer.Sound(filepath)
u,v,press = fluidsolve.fluidsolve(a,b,c,d,f1,f2,u,v,uft,vft,w1,w2,pft,f.time,press) #output the data # Move boundary at the local fluid velocity b1, b2 = mv_b.move_boundary(b1,b2,f.Q,u,v) #print b1, b2 # now we need to output the data, right now we are only making a vorticity movie # this needs to be changed to output full data of u,v,umag,vort, and forces #print u, v if f.time>ptime: rec.record(u,v,vort,press,f1,f2,b1,b2,k) #make a vorticity plot plt.figure() vort = vrt.vorticity(u,v,vort) cmaxx = np.max(vort) cmxx = np.max(cmaxx) plt.pcolor(xcoord, ycoord, vort,cmap = cm.hot, vmax=cmxx/16.0, vmin=-cmxx/16.0) plt.colorbar() plt.plot(b1,b2, 'yo') plt.plot(b1t, b2t, 'go') ptime = f.graphtime + ptime #this makes 'print time' increase so taht this if statement doesn't run every time step save = str('%03d' % k) + '.png' plt.savefig(save, dpi = 100, bbox_inches='tight') print 'wrote file', save
def run(self): record.record(RECORDING_FILENAME,RECORDING_DURATION,RATE)
# print 'attr, entro_old, entro_set, dist, entro_new : ', attr, entro_old, entro_set, dist try: entro_new = sum(map(operator.mul, dist, [entropy_of_set(x) for x in data_sets])) except: print [entropy_of_set(x) for x in data_sets] # print 'dist : ', dist entro_delta = entro_old - entro_new # print 'entro_delta : ', entro_delta return entro_delta/entro_set # 读入数据 data_lines = open("../res/hypo.data.my").readlines() test_datas = [] for line in data_lines: line = line.strip().split('.|') test_datas.append(record(line[0].split(','), 1.0, line[1])) # 数据预处理,将数据类型转化为字符类型 for x in clist: for r in test_datas: if r.data[x] != '?': r.data[x] = float(r.data[x]) else: r.data[x] = -1 # 构造跟节点,构造集合S,S初始化包含根节点 # 当根节点非空时,执行操作 # 取出Sets中的一个节点R,如果 # R为单一属性的记录 # -->得到叶子节点 # 否则