def gravar(self): f = open('output.wav','w') path = f.name f.close() recorder.record(path) self.path = filedialog.asksaveasfilename(defaultextension='wav') os.rename(path, self.path)
def userInput(): global rawFile recordChoice = input("\nDo you have a file you would like translated? ").lower() if recordChoice == "y" or recordChoice == "yes": fileName = input("What is the name of the file you would like translated? ") type = input("What format is the file in? (wav or mp4) ").lower() if type == "wav" or type == ".wav": rawFile = sr.AudioFile(fileName) elif type == "mp4" or type == ".mp4": clip = mp.VideoFileClip(fileName) clip.audio.write_audiofile("audio.wav") rawFile = sr.AudioFile("audio.wav") else: print("Apologies, the program only supports .wav and .mp4\n") terminate() elif recordChoice == "n" or recordChoice == "no": import recorder # Clean int input duration = input("How many seconds will you take to record your audio clip? ") recorder.RECORD_SECONDS = int(duration) print("\nPrepare to record a " + duration + " second audio clip.") time.sleep(2) print("\nStarting in ... 5") time.sleep(1) for i in range(4, 0, -1): print("\t\t" + str(i)) time.sleep(1) recorder.record() rawFile = sr.AudioFile(recorder.WAVE_OUTPUT_FILENAME) else: rawFile = userInput() return rawFile
def start_listening(self): counter_instance = 0 while RUNNING: try: record(DURATION) self.analyse(counter_instance) time.sleep(0.08) except IOError as err: continue
def xlonly(tok): saytext1 = '老爸,老妈,你们好啊,我是你们儿女派来的私人助理,小蓝' saytext3 = '有什么吩咐吗?' saytext2 = saytext1.encode('utf-8', 'strict') saytext4 = saytext3.encode('utf-8', 'strict') bt = baidu_tts() bt.tts(saytext2, tok) speaker.speak() bt.tts(saytext4, tok) speaker.speak() speaker.ding() recorder.record() speaker.dong() bs = baidu_stt(1, token, 2, '{')
def play_game(self, opp_type, opp_level): self.DialogFrame.opponent_type = opp_type if opp_type == 'computer': if opp_level == 'easy': self.DialogFrame.computer_engine = Computer(1) elif opp_level == 'medium': self.DialogFrame.computer_engine = Computer(5) elif opp_level == 'hard': self.DialogFrame.computer_engine = Computer(10) else: self.DialogFrame.computer_engine = Computer(1) self.DialogFrame.board_obj = chess.Board() write_png(self.DialogFrame.board_obj) while not self.DialogFrame.board_obj.is_checkmate(): if self.DialogFrame.misunderstood_times < 2: resp = record() #pprint.pprint(resp) intent, text, slots = self.NLU.parse(resp) else: utterance = input("Please type your move or request:") intent, text, slots = self.NLU.parse_text(utterance) self.DialogFrame.misunderstood_times = 0 output = self.execute( intent, text, slots) # updates frame and generates NLG response print(output)
def get_audio(self, word): flag = self.ui.prompt_audio() if 'r' == flag: return recorder.record(word) else: return flag
def main(): # start recording r.record() # request to MS STT data = h.handler() if 'DisplayText' in data.keys(): result = data['DisplayText'] print("You say:", result) print("Wait for LUIS...") command = l.get_command(result) print("Command:", command) else: print("I don't understand you.") print("Recived data: ", data)
def main(arguments): settings = None with open('settings.json') as jfile: settings = json.load(jfile) name = 'drumsets/' + arguments['<name>'] if arguments['new']: if os.path.isdir(name): choice = input( "Are you sure you want to overwrite {0}?(y/N): ".format(name)) if choice != 'y': return 0 else: shutil.rmtree(name) os.mkdir(name) drumtools.setUpDrumset(name, settings["record_device"]) # Select drumset drumset = drumtools.Drumset(name) choice = input("Drumset selected, are YOU ready to make a BEAT?(Y/n): ") if choice == 'n': return 0 # record the drums. May need to import microphone amp_arr, sr = record(settings["record_device"]) print("Great job! now lets make this beat rock more...") # Analyze and modify the track new_amp_arr = modify(amp_arr, sr, drumset) # Save the drums temporary: librosa.output.write_wav('.temp.wav', new_amp_arr, sr) no_choice = input("Press enter to hear the revised beat") play('.temp.wav', settings["playback_device"]) choice = input("Save the beat?(Y/n): ") if choice == 'n': return 0 name = None while name is None: name = input("Name the beat: ") name += ".wav" if os.path.exists(name): choice = input( "Are you sure you want to overwrite {0}?(y/N): ".format(name)) if choice == 'n' or choice == 'N': name = None continue librosa.output.write_wav(name, new_amp_arr, sr) os.remove('.temp.wav')
def rec(): globals["recording"] = True while True: if recording: note = recorder.record() print(note) root.after(1, showFqIn(note)) else: break
def do_GET(self): if self.path.endswith('.mjpg'): self.send_response(200) self.send_header('Content-type','multipart/x-mixed-replace; boundary=--jpgboundary') self.end_headers() while True: try: rc,img = capture.read() if not rc: continue img = detectPedestrians(img) imgRGB=cv2.cvtColor(img,cv2.COLOR_BGR2RGB) jpg = Image.fromarray(imgRGB) tmpFile = StringIO.StringIO() jpg.save(tmpFile,'JPEG') self.wfile.write("--jpgboundary") self.send_header('Content-type','image/jpeg') self.send_header('Content-length',str(tmpFile.len)) self.end_headers() jpg.save(self.wfile,'JPEG') time.sleep(0.05) except KeyboardInterrupt: return return if self.path.endswith('.html'): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() self.wfile.write('<html><head></head><body>') self.wfile.write('<img src="http://172.113.170.244:8080/cam.mjpg"/>') self.wfile.write('</body></html>') return if self.path.endswith('?genvideo=1'): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() self.wfile.write('<html><head></head><body>') self.wfile.write('Generating video...') self.wfile.write('</body></html>') recorder.record(15); return
def solvePuzzle(tiles, boardBlank, startTime): successHistory = [] board, tilesC, history, count, successCount, countTile, countRot = helper._init_( boardBlank, tiles) ref = np.sum(boardBlank) numOnes = boardBlank.shape[0] * boardBlank.shape[1] i = 0 while i < board.shape[0]: j = 0 while j < board.shape[1]: position = [i, j] if board[i, j] == 0: while board[i, j] == 0: if countTile < len(tilesC): tile = tilesC[countTile] board, old_board, countTile, countRot, n, tile = putRotTile( tile, board, position, countTile, countRot, tilesC, ref) else: if not history: if len(successHistory) > 0: return successHistory else: print("No solution!!!") return None board, tile, tilesC, countTile, i, j, countRot, n = recorder.recallHistory( history) position = [i, j] history = recorder.record(history, old_board, tile, tilesC, countTile, i, j, countRot, n) tilesC.pop(countTile) countTile = 0 countRot = 0 count += 1 j += 1 else: j += 1 i = position[0] i += 1 if np.sum(board) == numOnes: board, tile, tilesC, countTile, i, j, countRot, n, successCount, successHistory = recorder.success( board, count, successCount, history, startTime, successHistory) if demoFlag: if successCount > 1: return successHistory return successHistory
import AutoTune import recorder import player recorder.record(5, "file.wav") player.play("file.wav")
engine.runAndWait() engine.say('one to ten') engine.runAndWait() def end(): engine.say('Patient data is saved') engine.runAndWait() file.close() #call introduction method to start the app intro() # get the name while p_name == False: name() rec.record() text, c_rate = google.recognize() if int(round(float(c_rate)))*10 > 9: engine.say('You said') print "You said " + text engine.say(text) engine.runAndWait() yes = False while yes == False: engine.say('Yes or No') engine.runAndWait() rec.record() yn_text, c_rate = google.recognize() if yn_text == "yes" or yn_text == "YES" or yn_text == "Yes": p_name = True yes = True
strides=2, padding="same", name="multiout_b1"))(b0) b2 = K.layers.Flatten(name="multiout_b2")(b1) b3 = K.layers.Dense(10, name="multiout_b3")(b2) b4 = K.layers.Activation("softmax", name="multiout_b4")(b3) return x, [x, b0, b1, b2, b3, b4, a0, a1, a2, a3, a4, a5, a6] x, y = multiout_test() record( loss_fn_str="mse", file_name="multiple_output_model.info", input_shape=(3, 2, 3, 5), label_shape=(3, 10), optimizer=opt.SGD(learning_rate=0.1), iteration=10, inputs=x, outputs=y, multi_out=[5, 12], # debug=["name", "summary", "output", "initial_weights"], ) ## please generate all test cases since golden data format can change anytime fc_sigmoid = [ K.Input(shape=(3, 3)), K.layers.Dense(5), K.layers.Activation("sigmoid"), K.layers.Dense(10), K.layers.Activation("softmax"), ]
def recordWord(word): #record a word if (exists(word)): return data = rd.record('Đọc: "%s" ...' % word, time=2) rd.save(data, '%s.wav' % word) print('--------------------------------------')
from time import sleep import recorder import player import prepare recording = [] samples = 0 (recording, samples) = recorder.record(2) recording = prepare.prepare(recording) player.replay(b''.join(recording))
norm=numpy.loadtxt('airplane_model') #timp=len(Ys)/float(samplerate) #t=linspace(0,timp,len(Ys)) k = arange(freqrange) T = 1 frq = k/T # two sides frequency range Ys = Ys / sum(Ys) Ys = Ys[1:] norm = norm[1:] norm = norm / sum(norm) #plot(arange(25000), abs(norm)) #print sum(norm), sum(Ys) airplane_ness = sum((Ys-norm)**2) amplitude = sum(abs(y))/samples #print sum(abs(y))/samples, airplane_ness print(amplitude, airplane_ness) upload(amplitude,airplane_ness) if amplitude > amplitude_threshold and airplane_ness < airplane_ness_threshold: print("PROBABLY AN AIRPLANE") #plot(arange(24999), abs(Ys-norm)*1000) #show() while True: #print "RECORD" recorder.record() #print "CHECK" do_check()
with open(WORKDIR_PATH+'localization/data/ceres_input/markers.txt', 'w') as f: for i in range(len(ps)): f.write('{0} {1} {2} {3} {4}\n'.format(ps_undistort[i][0], ps_undistort[i][1], Pws[i][0], Pws[i][1], Pws[i][2])) with open(WORKDIR_PATH+'localization/data/ceres_input/initial.txt', 'w') as f: if len(ans)==5: # x, y, z, l1, l2 f.write('{0}\n{1} {2} {3} {4} {5}\n'.format(0, ans[0][0], ans[1][0], ans[2][0], ans[3][0], ans[4][0])) elif len(ans)==6: # x, y, z, yaw, l1, l2 f.write('{0}\n{1} {2} {3} {4} {5} {6}\n'.format(1, ans[0][0], ans[1][0], ans[2][0], ans[3][0], ans[4][0], ans[5][0])) if __name__=='__main__': parser = argparse.ArgumentParser() parser.add_argument('--rosbag', help='rosbag', type=str) parser.add_argument('--config', help='config', type=str, default=WORKDIR_PATH+'localization/cfg/config.json') parser.add_argument('--exec_ceres', action='store_true', help='execute nonlinear optimization with ceres in c++') args = parser.parse_args() with open('../cfg/config.json') as f: config = json.load(f) record(args.rosbag, config['t_at']*2) R, T = localize_from_rosbag(args.rosbag, config) print('Translation: {}'.format(T)) # print('Rotation: {}'.format(R)) print('Yaw: {} [deg]'.format(np.rad2deg(np.arcsin(R[0][1]))[0])) if args.exec_ceres: subprocess.call(WORKDIR_PATH+'localization/src/ceres_optim/build/localize')
def start_script(server): script = utils.load_script() question_key = "question_1" if question_key not in script: raise Exception("Script has to start with question_1") while True: time.sleep(0.5) current_question = script[question_key] # Joanna Question UI print current_question["text"] server.send_message('out', current_question["text"]) # Video File video_path = os.path.join("/assets/videos", current_question["video"]) server.send_video(video_path) # Sleep for length of file time.sleep(get_length(video_path)) if "answer" in current_question: answers = current_question["answer"] if DEMO_MODE: content = current_question["demo_answer"] time.sleep(3) else: data = record() response = recognize(data, RATE) if len(response.results) == 0: continue result = response.results[0] if not result.alternatives: continue content = result.alternatives[0].transcript # Patient answer server.send_message('in', content) key_list = answers.keys() key_list.remove("any") if not key_list: update_form(server, current_question) question_key = answers["any"] else: found_answer = False for answer in key_list: if answer in content: print "Found " + answer + " in " + content question_key = answers[answer] found_answer = True update_form(server, current_question) break if not found_answer: update_form(server, current_question) question_key = answers["any"] elif "next" in current_question: print 'found next' question_key = current_question["next"] print question_key update_form(server, current_question) else: update_form(server, current_question) # End the questionaire break
def setUpDrumset(name, record_device, drums=['bass', 'snare']): for drum in drums: # json properties properties = {} input("Press enter to record the {0}...".format(drum)) countdown(3) # record the drums. May need to import microphone arr_amp, sr = record(record_device) beat_frames = utils.getBeatFrames(amp_array=arr_amp, sample_rate=sr, lookahead=256, significance=7.0) beats = librosa.frames_to_time(beat_frames) freq_list = [] diss_list = [] hv_list = [] curve_list = [] # Get the lists for frame in beat_frames: freq_list.append( recognize.avgFreq(arr_amp[frame * 512:(frame + 2) * 512 - 1])) diss_list.append( recognize.dissipation(arr_amp[frame * 512:frame * 512 + 511])) hv_list.append( recognize.hillsAndValleys(arr_amp[frame * 512:frame * 512 + 511])) curve_list.append( recognize.curvature(arr_amp[frame * 512:frame * 512 + 511])) # Get the deviations stdev_freq = statistics.stdev(freq_list) stdev_diss = statistics.stdev(diss_list) stdev_hv = statistics.stdev(hv_list) stdev_curve = statistics.stdev(curve_list) # averages.... av_freq = statistics.mean(freq_list) av_diss = statistics.mean(diss_list) av_hv = statistics.mean(hv_list) av_curve = statistics.mean(curve_list) properties["stdevs"] = { "freq": stdev_freq, "diss": stdev_diss, "hv": stdev_hv, "curve": stdev_curve } properties["means"] = { "freq": av_freq, "diss": av_diss, "hv": av_hv, "curve": av_curve } properties["drum_name"] = drum # get sound desired properties["audio_file"] = input("Audio file name:") if properties["audio_file"] is "": properties["audio_file"] = drum + '.wav' with open(name + '/' + drum + '.json', 'w') as outfile: json.dump(properties, outfile)
def recordWord(word, chExist=True): #record a word if (chExist and exists(word)): return data = rd.record('Đọc: "%s" ...' % word, time=2) rd.save(data, folder + '%s.wav' % word) print('--------------------------------------')
from recorder import record from speak import speak_text from corona_stats import tell_corona_stats from play_music_online import playmusic from play_music_offline import playmusic_offline from chatbotency import activate_chatbotency f = 0 print('Loading...') text = record() if ['ok', 'nova' ] == text.split() and text != None: #Novice Operational Virtual Assistant speak_text() f = 1 while f: text = record() if text == None: text = "Speak Properly" speak_text(text) continue elif 'corona' in text: tell_corona_stats() elif ['play', 'song'] in text.split(): speak_text("Online or Offline?") text = record()
# --- fetching other action --- if planner_action and exercise_started: actual_act, is_movement_done = movement_planner.get_action (i-exercise_start_id) exercise_started = not is_movement_done if is_movement_done and not multiple_planner_run: exercise_done = True # --- executing action --- start = time.time() if move_motors: exec_action(actual_act, foot_phases) all_times[2].append(time.time()-start) # --- recording stuff --- recorder.record (actual_act, foot_phases, move_motors) # --- logging --- all_times[3].append(time.time()-last_time) last_time = time.time() # --- advancing foot phases if advance_foot_phases: foot_phases += 2*np.pi*foot_f0/30 # --- waiting the right amount of time --- while time.time()-last_loop < 1/30: pass last_loop = time.time()
def locateInArray(list1, list2): x = 0 y = 0 for x in xrange(len(list1)): if list1[x] == list2[0]: counter = 0 for y in xrange(len(list2)): try: if list1[x + y] != list2[y]: break else: counter += 1 except IndexError: return -1 if counter == len(list2): return x return -1 if __name__ == "__main__": counter_instance = 0 while True: record(DURATION) print "Calling moattar" notify_or_not, AVERAGE_INTENSITY_OF_RUNS = VAD.moattar_homayounpour( OUTPUT_FILE, 0, counter_instance) counter_instance += 1 print "Speaking: ", notify_or_not
def menu(debug=False): phonetic_code = '''A as in Alpha B as in Bravo C as in Charlie D as in Delta E as in Extra F as in Foot G as in Golf or Gamma H as in Hotel''' #initialize NLU for menu NLU = NLUDefault() opp_level = '' #blank recording blank = {'_text': '', 'entities': {}} startover = True hide_menu = False while startover: if not hide_menu: print('Welcome! My name is ChessBuddy! How should we play chess today?') print('Play a game against a...') print('1-friend') print('2-computer') #listen for input game_type = record() game_type, opp_type, slots = NLU.parse(game_type) if not debug: #confirm opponent type try: print('I heard: "{}". Is that correct?'.format(opp_type)) except: print("I didn't recognize that, can you repeat that?") hide_menu = True continue #listen for input resp = record() intent,text,slots = NLU.parse(resp) ready = False if intent == 'confirm': if opp_type == 'friend': print('Before we get started, this will help me understand the moves you input.') print('For each square (A-G) use the following words') print(phonetic_code) print('After the word, say the number (1-8)') print('For example say your move like the following:') print('alpha six OR knight bravo three OR king extra five') print('Okay, when you are ready, type any key and hit enter') ready = input() print("Great, Let's get started!") print('-------------------------------') print('White moves first') return opp_type,opp_level elif opp_type == 'computer': print('What level computer? (easy, medium, or hard)') resp = record() intent,opp_level,slots = NLU.parse(resp) print('Before we get started, this will help me understand the moves you input.') print('For each square (A-G) use the following words') print(phonetic_code) print('After the word, say the number (1-8)') print('For example say your move like the following:') print('alpha six OR knight bravo three OR king extra five') print('Okay, when you are ready, type any key and hit enter') ready = input() print("Great, Let's get started!") print('-------------------------------') print('White moves first') return opp_type,opp_level else: print("I heard no, so let's start over") print('-------------------------------')
from recorder import record from numpy import savetxt as save from numpy import array import os import csv if not os.path.isfile('test.txt'): data = record('Start speaking ...') data.reshape(44100) save('test.txt', data, delimiter=' ') else: tmp = csv.reader(open('test.txt', 'r'), delimiter=' ') data = [] for i in tmp: data.append(float(i[0])) data = array(data) print(data) import python_speech_features as psf mfcc = psf.mfcc(data, 44100, nfft=1103) save('testMfcc.txt', mfcc, delimiter=' ') print(mfcc.shape) import visualize as vz vz.plot(mfcc) vz.show()
from recorder import record import os result = record() if result.startswith('microphone'): print(sr.Microphone.list_microphone_names()) elif result.startswith('website'): os.system('code /Users/marcpartensky/programs/website') elif result.startswith('dossier'): print(os.listdir()) elif result.startswith('sauvegarde'): message = result.replace('sauvegarde', '', 1).strip() os.system(f'git add -A; git commit -m "{message}"; git push') elif result.startswith('dis'): message = result.replace('dis', '', 1).strip() os.system(f'say {message}') else: print(result)
def getSong(username, token, sp): #record, identify, return id record() return identify()