def event_loop(): wit = Wit(wit_token()) my_mic = Mic(DEFAULT_DICT, DEFAULT_LANG, DEFAULT_DICT, DEFAULT_LANG) while True: # listen for activation hotword try: threshold, text = my_mic.passiveListen(PERSONA) except: continue # detected hotword if threshold: audio_file = activeListenFile(threshold) if audio_file: data = None try: # retrieve wit intent data = wit.post_speech(open(audio_file)) # send to handler service raise NotImplementedError('no handler code yet') except Exception as e: print "Exception in audio_file handling:" print str(e) if data: print "Data: " print pprint(data)
def __init__(self, PERSONA, mic, mpdwrapper): self._logger = logging.getLogger(__name__) self.persona = PERSONA # self.mic - we're actually going to ignore the mic they passed in self.music = mpdwrapper # index spotify playlists into new dictionary and language models original = [ "STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST" ] + self.music.get_soup_playlist() pronounced = g2p.translateWords(original) zipped = zip(original, pronounced) lines = ["%s %s" % (x, y) for x, y in zipped] with open("dictionary_spotify.dic", "w") as f: f.write("\n".join(lines) + "\n") with open("sentences_spotify.txt", "w") as f: f.write("\n".join(original) + "\n") f.write("<s> \n </s> \n") f.close() # make language model os.system("text2idngram -vocab sentences_spotify.txt < " + "sentences_spotify.txt -idngram spotify.idngram") os.system("idngram2lm -idngram spotify.idngram -vocab " + "sentences_spotify.txt -arpa languagemodel_spotify.lm") # create a new mic with the new music models self.mic = Mic( mic.speaker, mic.passive_stt_engine, stt.PocketSphinxSTT(lmd_music="languagemodel_spotify.lm", dictd_music="dictionary_spotify.dic"))
def setUp(self): self.jasper_clip = "../static/audio/alfred.wav" self.time_clip = "../static/audio/time.wav" from mic import Mic self.m = Mic("languagemodel.lm", "dictionary.dic", "languagemodel_persona.lm", "dictionary_persona.dic")
def __init__(self, PERSONA, mic): self.persona = PERSONA # self.mic - we're actually going to ignore the mic they passed in self.music = Music() # index spotify playlists into new dictionary and language models original = self.music.get_soup_playlist() + [ "STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST" ] pronounced = g2p.translateWords(original) zipped = zip(original, pronounced) lines = ["%s %s" % (x, y) for x, y in zipped] with open("dictionary_spotify.dic", "w") as f: f.write("\n".join(lines) + "\n") with open("sentences_spotify.txt", "w") as f: f.write("\n".join(original) + "\n") f.write("<s> \n </s> \n") f.close() # make language model os.system( "text2idngram -vocab sentences_spotify.txt < sentences_spotify.txt -idngram spotify.idngram" ) os.system( "idngram2lm -idngram spotify.idngram -vocab sentences_spotify.txt -arpa languagemodel_spotify.lm" ) # create a new mic with the new music models self.mic = Mic("languagemodel.lm", "dictionary.dic", "languagemodel_persona.lm", "dictionary_persona.dic", "languagemodel_spotify.lm", "dictionary_spotify.dic")
class Jane(object): def __init__(self, options={}): self._logger = logging.getLogger(__name__) # Read config config_file = options['config'] self._logger.debug("Trying to read config file: '%s'", config_file) try: with open(config_file, "r") as f: self.config = yaml.safe_load(f) except OSError: self._logger.error("Can't open config file: '%s'", config_file) raise self._logger.info('config loaded') self._logger.info(self.config) try: stt_engine_slug = self.config['stt_engine'] except KeyError: stt_engine_slug = 'sphinx' self._logger.warning("stt_engine not specified in profile, defaulting " + "to '%s'", stt_engine_slug) stt_engine_class = stt.get_engine_by_slug(stt_engine_slug) try: slug = self.config['stt_passive_engine'] stt_passive_engine_class = stt.get_engine_by_slug(slug) except KeyError: stt_passive_engine_class = stt_engine_class try: tts_engine_slug = self.config['tts_engine'] except KeyError: tts_engine_slug = tts.get_default_engine_slug() self._logger.warning("tts_engine not specified in profile, defaulting " + "to '%s'", tts_engine_slug) tts_engine_class = tts.get_engine_by_slug(tts_engine_slug) self._logger.info('LOADED TTS %s', tts_engine_class) # Initialize Mic if 'text' in options: self.input = TextInput(tts_engine_class.get_instance()) else: self.input = Mic(tts_engine_class.get_instance(), stt_passive_engine_class.get_passive_instance(), stt_engine_class.get_active_instance()) def run(self): print 'Start' if 'first_name' in self.config: salutation = ("How can I be of service, %s?" % self.config["first_name"]) else: salutation = "How can I be of service?" print('Say hello') self.input.say(salutation) conversation = Conversation("JANE", self.input, self.config) conversation.handleForever()
def __init__(self): self.mic = Mic() self.speaker = Speaker() self.client = Client() self.modem = Modem() self.selectedMod = 'AM' self.modulations = { 'AM': self.modem.modAm, 'AMSC': self.modem.modAmsc, 'FM': self.modem.modFm } self.demodulations = { 'AM': self.modem.demodAm, 'AMSC': self.modem.demodAmsc, 'FM': self.modem.demodFm } self.fig, self.axes = plt.subplots(4, 1, figsize=[6,8]) plt.subplots_adjust(top=0.7) for i in range(4): self.axes[i].axes.xaxis.set_visible(False) self.axes[i].axes.yaxis.set_visible(False) self.input_ip = TextBox(plt.axes([0.1, 0.05, 0.4, 0.05]), '', initial='192.168.0.20') self.btn_connect = Button(plt.axes([0.5, 0.05, 0.2, 0.05]), 'Connect') self.btn_disconnect = Button(plt.axes([0.7, 0.05, 0.2, 0.05]), 'Disconnect') self.btn_am = Button(plt.axes([0.1, 0.94, 0.2, 0.05]), 'AM') self.btn_amsc = Button(plt.axes([0.3, 0.94, 0.2, 0.05]), 'AMSC') self.btn_fm = Button(plt.axes([0.5, 0.94, 0.2, 0.05]), 'FM') self.sld_cutoff = Slider(plt.axes([0.1, 0.91, 0.7, 0.02]), 'Cutoff', 1., 2000., valinit=1000, valstep=1.) self.sld_order = Slider(plt.axes([0.1, 0.87, 0.7, 0.02]), 'Order', 2, 50, valinit=5, valstep=1) self.sld_fm_carrier = Slider(plt.axes([0.1, 0.83, 0.7, 0.02]), 'FM Freq', 3000., 20000., valinit=10000., valstep=100.) self.sld_fm_devsiat = Slider(plt.axes([0.1, 0.79, 0.7, 0.02]), 'FM Desv', 300., 4000., valinit=1000., valstep=10.) self.sld_am_carrier = Slider(plt.axes([0.1, 0.75, 0.7, 0.02]), 'AM Freq', 3000., 20000., valinit=3000., valstep=100.) self.btn_am.on_clicked(self.selectAM) self.btn_amsc.on_clicked(self.selectAMSC) self.btn_fm.on_clicked(self.selectFM) self.btn_connect.on_clicked(self.connect) self.btn_disconnect.on_clicked(self.disconnect) self.sld_cutoff.on_changed(self.changeCutoff) self.sld_order.on_changed(self.changeOrder) self.sld_fm_carrier.on_changed(self.changeFmCarrier) self.sld_fm_devsiat.on_changed(self.changeFmDevsiat) self.sld_am_carrier.on_changed(self.changeAmCarrier) plt.show()
def setUp(self): self.jasper_clip = "../static/audio/jasper.wav" self.time_clip = "../static/audio/time.wav" from mic import Mic self.m = Mic("languagemodel.lm", "dictionary.dic", "languagemodel_persona.lm", "dictionary_persona.dic")
def initMic(self): if self.mic_active: return micThread = QThread() micObj = Mic() micObj.moveToThread(micThread) self.stopMic.clicked.connect(micObj.stream.stop_stream) micThread.started.connect(micObj.listen) micObj.update.connect(self.vis.updateVis) micObj.finished.connect(self.vis.radiiReset) micObj.finished.connect(micThread.quit) micObj.finished.connect(self.micOff) micThread.start() self.mic_active = True self.threads.append((micObj, micThread))
def __init__(self, PERSONA, mic): self.persona = PERSONA # self.mic - we're actually going to ignore the mic they passed in self.music = Music() # index spotify playlists into new dictionary and language models original = self.music.get_soup_playlist( ) + ["STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST"] pronounced = g2p.translateWords(original) zipped = zip(original, pronounced) lines = ["%s %s" % (x, y) for x, y in zipped] with open("dictionary_spotify.dic", "w") as f: f.write("\n".join(lines) + "\n") with open("sentences_spotify.txt", "w") as f: f.write("\n".join(original) + "\n") f.write("<s> \n </s> \n") f.close() # make language model os.system( "text2idngram -vocab sentences_spotify.txt < sentences_spotify.txt -idngram spotify.idngram") os.system( "idngram2lm -idngram spotify.idngram -vocab sentences_spotify.txt -arpa languagemodel_spotify.lm") # create a new mic with the new music models self.mic = Mic( speaker.newSpeaker(), stt.PocketSphinxSTT(lmd_music="languagemodel_spotify.lm", dictd_music="dictionary_spotify.dic"), stt.PocketSphinxSTT(lmd_music="languagemodel_spotify.lm", dictd_music="dictionary_spotify.dic") )
def __init__(self, options={}): self._logger = logging.getLogger(__name__) # Read config config_file = options['config'] self._logger.debug("Trying to read config file: '%s'", config_file) try: with open(config_file, "r") as f: self.config = yaml.safe_load(f) except OSError: self._logger.error("Can't open config file: '%s'", config_file) raise self._logger.info('config loaded') self._logger.info(self.config) try: stt_engine_slug = self.config['stt_engine'] except KeyError: stt_engine_slug = 'sphinx' self._logger.warning( "stt_engine not specified in profile, defaulting " + "to '%s'", stt_engine_slug) stt_engine_class = stt.get_engine_by_slug(stt_engine_slug) try: slug = self.config['stt_passive_engine'] stt_passive_engine_class = stt.get_engine_by_slug(slug) except KeyError: stt_passive_engine_class = stt_engine_class try: tts_engine_slug = self.config['tts_engine'] except KeyError: tts_engine_slug = tts.get_default_engine_slug() self._logger.warning( "tts_engine not specified in profile, defaulting " + "to '%s'", tts_engine_slug) tts_engine_class = tts.get_engine_by_slug(tts_engine_slug) self._logger.info('LOADED TTS %s', tts_engine_class) # Initialize Mic if 'text' in options: self.input = TextInput(tts_engine_class.get_instance()) else: self.input = Mic(tts_engine_class.get_instance(), stt_passive_engine_class.get_passive_instance(), stt_engine_class.get_active_instance())
def __init__(self, PERSONA, mic, mpdwrapper): self._logger = logging.getLogger(__name__) self.persona = PERSONA # self.mic - we're actually going to ignore the mic they passed in self.music = mpdwrapper # index spotify playlists into new dictionary and language models phrases = ["STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST"] phrases.extend(self.music.get_soup_playlist()) music_stt_engine = mic.active_stt_engine.get_instance('music', phrases) self.mic = Mic(mic.speaker, mic.passive_stt_engine, music_stt_engine)
def __init__(self, options={}): self._logger = logging.getLogger(__name__) # Read config config_file = options['config'] self._logger.debug("Trying to read config file: '%s'", config_file) try: with open(config_file, "r") as f: self.config = yaml.safe_load(f) except OSError: self._logger.error("Can't open config file: '%s'", config_file) raise self._logger.info('config loaded') self._logger.info(self.config) try: stt_engine_slug = self.config['stt_engine'] except KeyError: stt_engine_slug = 'sphinx' self._logger.warning("stt_engine not specified in profile, defaulting " + "to '%s'", stt_engine_slug) stt_engine_class = stt.get_engine_by_slug(stt_engine_slug) try: slug = self.config['stt_passive_engine'] stt_passive_engine_class = stt.get_engine_by_slug(slug) except KeyError: stt_passive_engine_class = stt_engine_class try: tts_engine_slug = self.config['tts_engine'] except KeyError: tts_engine_slug = tts.get_default_engine_slug() self._logger.warning("tts_engine not specified in profile, defaulting " + "to '%s'", tts_engine_slug) tts_engine_class = tts.get_engine_by_slug(tts_engine_slug) self._logger.info('LOADED TTS %s', tts_engine_class) # Initialize Mic if 'text' in options: self.input = TextInput(tts_engine_class.get_instance()) else: self.input = Mic(tts_engine_class.get_instance(), stt_passive_engine_class.get_passive_instance(), stt_engine_class.get_active_instance())
class TestMic(unittest.TestCase): def setUp(self): self.jasper_clip = "../static/audio/jasper.wav" self.time_clip = "../static/audio/time.wav" from mic import Mic self.m = Mic("languagemodel.lm", "dictionary.dic", "languagemodel_persona.lm", "dictionary_persona.dic") def testTranscribeJasper(self): """Does Jasper recognize his name (i.e., passive listen)?""" transcription = self.m.transcribe(self.jasper_clip, PERSONA_ONLY=True) self.assertTrue("JASPER" in transcription) def testTranscribe(self): """Does Jasper recognize 'time' (i.e., active listen)?""" transcription = self.m.transcribe(self.time_clip) self.assertTrue("TIME" in transcription)
class Text(object): def __init__(self): self.stt_engine = STT() self.tts_engine = TTS() self.mic = Mic(self.tts_engine, self.stt_engine, self.stt_engine) self.selection = Selection(self.tts_engine) def handle(self): while True: threshold, translate = self.mic.passiveListen("JARVIS") if not translate or not threshold: continue input = self.mic.activeListen(threshold) print input if input: string = self.selection.select(input) else: self.tts_engine.say("Pardon?")
def __init__(self, PERSONA, mic, lang): self.persona = PERSONA self.lang = lang self.bookName = "" self.chapNum = "" self.client = mpd.MPDClient() self.client.timeout = None self.client.idletimeout= None self.client.connect("localhost", 6600) dictionary = bible_lists.dictList[lang] self.mic = Mic(mic.speaker, "languagemodel_bible.lm", dictionary[0], "languagemodel_persona.lm", "dictionary_persona.dic", lmd_music="languagemodel_playback.lm", dictd_music=dictionary[1], lmd_num="languagemodel_num.lm", dictd_num=dictionary[2])
def __init__(self, PERSONA, mic): self.persona = PERSONA # self.mic - we're actually going to ignore the mic they passed in self.music = Music() # index spotify playlists into new dictionary and language models words = self.music.get_soup_playlist() + [ "STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST" ] text = "\n".join(["<s> %s </s>" for word in words]) # make language model vocabcompiler.compile_text(text, languagemodel_spotify) # create a new mic with the new music models self.mic = Mic( speaker.newSpeaker(), stt.PocketSphinxSTT(lmd_music=languagemodel_spotify, dictd_music=dictionary_spotify), stt.PocketSphinxSTT(lmd_music=languagemodel_spotify, dictd_music=dictionary_spotify))
def __init__(self, PERSONA, mic, mpdwrapper): self._logger = logging.getLogger(__name__) self.persona = PERSONA # self.mic - we're actually going to ignore the mic they passed in self.music = mpdwrapper # index spotify playlists into new dictionary and language models phrases = [ "STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST" ] phrases.extend(self.music.get_soup_playlist()) vocabulary_music = vocabcompiler.PocketsphinxVocabulary( name='music', path=jasperpath.config('vocabularies')) vocabulary_music.compile(phrases) # create a new mic with the new music models config = stt.PocketSphinxSTT.get_config() self.mic = Mic( mic.speaker, mic.passive_stt_engine, stt.PocketSphinxSTT(vocabulary_music=vocabulary_music, **config))
def __init__(self, PERSONA, mic): self.persona = PERSONA # self.mic - we're actually going to ignore the mic they passed in self.music = Music() # index spotify playlists into new dictionary and language models words = self.music.get_soup_playlist() + ["STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST"] text = "\n".join(["<s> %s </s>" for word in words]) # make language model vocabcompiler.compile_text(text, languagemodel_spotify) # create a new mic with the new music models self.mic = Mic( speaker.newSpeaker(), stt.PocketSphinxSTT(lmd_music=languagemodel_spotify, dictd_music=dictionary_spotify), stt.PocketSphinxSTT(lmd_music=languagemodel_spotify, dictd_music=dictionary_spotify) )
def __init__(self, PERSONA, mic, mpdwrapper): self._logger = logging.getLogger(__name__) self.persona = PERSONA # self.mic - we're actually going to ignore the mic they passed in self.music = mpdwrapper # index spotify playlists into new dictionary and language models phrases = ["STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST"] phrases.extend(self.music.get_soup_playlist()) vocabulary_music = vocabcompiler.PocketsphinxVocabulary( name='music', path=jasperpath.config('vocabularies')) vocabulary_music.compile(phrases) # create a new mic with the new music models config = stt.PocketSphinxSTT.get_config() self.mic = Mic(mic.speaker, mic.passive_stt_engine, stt.PocketSphinxSTT(vocabulary_music=vocabulary_music, **config))
class FrequencyStream(object): """Frequency stream.""" def __init__(self): """Construct FrequencyStream object.""" self.mic = Mic('Blue Snowball') def __enter__(self): """Open and return frequency stream.""" self.mic.open() return self def __exit__(self, type, value, traceback): """Close stream.""" self.mic.close() def fft(self, data, jump): """Return data in frequency domain.""" # cut up data # start frame start = 0 # go until interval reaches final frame while start + 8192 <= len(data): # get fft of interval freq = np.absolute(np.fft.rfft(data[start:start+8192])) # send out fft yield freq # move to next interval start += jump def read(self, jump=1024, frames=None): """Read a number of frames of data into the stream.""" # read all frames self.mic.read(frames) # iterate through buffers for buff in self.fft(self.mic.stream.channel_1, jump): yield buff
class MusicMode: def __init__(self, PERSONA, mic): self.persona = PERSONA # self.mic - we're actually going to ignore the mic they passed in self.music = Music() # index spotify playlists into new dictionary and language models original = self.music.get_soup_playlist( ) + ["STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST"] pronounced = g2p.translateWords(original) zipped = zip(original, pronounced) lines = ["%s %s" % (x, y) for x, y in zipped] with open("dictionary_spotify.dic", "w") as f: f.write("\n".join(lines) + "\n") with open("sentences_spotify.txt", "w") as f: f.write("\n".join(original) + "\n") f.write("<s> \n </s> \n") f.close() # make language model os.system( "text2idngram -vocab sentences_spotify.txt < sentences_spotify.txt -idngram spotify.idngram") os.system( "idngram2lm -idngram spotify.idngram -vocab sentences_spotify.txt -arpa languagemodel_spotify.lm") # create a new mic with the new music models self.mic = Mic( speaker.newSpeaker(), stt.PocketSphinxSTT(lmd_music="languagemodel_spotify.lm", dictd_music="dictionary_spotify.dic"), stt.PocketSphinxSTT(lmd_music="languagemodel_spotify.lm", dictd_music="dictionary_spotify.dic") ) def delegateInput(self, input): command = input.upper() # check if input is meant to start the music module if "PLAYLIST" in command: command = command.replace("PLAYLIST", "") elif "STOP" in command: self.mic.say("Stopping music") self.music.stop() return elif "PLAY" in command: self.mic.say("Playing %s" % self.music.current_song()) self.music.play() return elif "PAUSE" in command: self.mic.say("Pausing music") # not pause because would need a way to keep track of pause/play # state self.music.stop() return elif any(ext in command for ext in ["LOUDER", "HIGHER"]): self.mic.say("Louder") self.music.volume(interval=10) self.music.play() return elif any(ext in command for ext in ["SOFTER", "LOWER"]): self.mic.say("Softer") self.music.volume(interval=-10) self.music.play() return elif "NEXT" in command: self.mic.say("Next song") self.music.play() # backwards necessary to get mopidy to work self.music.next() self.mic.say("Playing %s" % self.music.current_song()) return elif "PREVIOUS" in command: self.mic.say("Previous song") self.music.play() # backwards necessary to get mopidy to work self.music.previous() self.mic.say("Playing %s" % self.music.current_song()) return # SONG SELECTION... requires long-loading dictionary and language model # songs = self.music.fuzzy_songs(query = command.replace("PLAY", "")) # if songs: # self.mic.say("Found songs") # self.music.play(songs = songs) # print "SONG RESULTS" # print "============" # for song in songs: # print "Song: %s Artist: %s" % (song.title, song.artist) # self.mic.say("Playing %s" % self.music.current_song()) # else: # self.mic.say("No songs found. Resuming current song.") # self.music.play() # PLAYLIST SELECTION playlists = self.music.fuzzy_playlists(query=command) if playlists: self.mic.say("Loading playlist %s" % playlists[0]) self.music.play(playlist_name=playlists[0]) self.mic.say("Playing %s" % self.music.current_song()) else: self.mic.say("No playlists found. Resuming current song.") self.music.play() return def handleForever(self): self.music.play() self.mic.say("Playing %s" % self.music.current_song()) while True: try: threshold, transcribed = self.mic.passiveListen(self.persona) except: continue if threshold: self.music.pause() input = self.mic.activeListen(MUSIC=True) if "close" in input.lower(): self.mic.say("Closing Spotify") return if input: self.delegateInput(input) else: self.mic.say("Pardon?") self.music.play()
class App(): def __init__(self): self.mic = Mic() self.speaker = Speaker() self.client = Client() self.modem = Modem() self.selectedMod = 'AM' self.modulations = { 'AM': self.modem.modAm, 'AMSC': self.modem.modAmsc, 'FM': self.modem.modFm } self.demodulations = { 'AM': self.modem.demodAm, 'AMSC': self.modem.demodAmsc, 'FM': self.modem.demodFm } self.fig, self.axes = plt.subplots(4, 1, figsize=[6,8]) plt.subplots_adjust(top=0.7) for i in range(4): self.axes[i].axes.xaxis.set_visible(False) self.axes[i].axes.yaxis.set_visible(False) self.input_ip = TextBox(plt.axes([0.1, 0.05, 0.4, 0.05]), '', initial='192.168.0.20') self.btn_connect = Button(plt.axes([0.5, 0.05, 0.2, 0.05]), 'Connect') self.btn_disconnect = Button(plt.axes([0.7, 0.05, 0.2, 0.05]), 'Disconnect') self.btn_am = Button(plt.axes([0.1, 0.94, 0.2, 0.05]), 'AM') self.btn_amsc = Button(plt.axes([0.3, 0.94, 0.2, 0.05]), 'AMSC') self.btn_fm = Button(plt.axes([0.5, 0.94, 0.2, 0.05]), 'FM') self.sld_cutoff = Slider(plt.axes([0.1, 0.91, 0.7, 0.02]), 'Cutoff', 1., 2000., valinit=1000, valstep=1.) self.sld_order = Slider(plt.axes([0.1, 0.87, 0.7, 0.02]), 'Order', 2, 50, valinit=5, valstep=1) self.sld_fm_carrier = Slider(plt.axes([0.1, 0.83, 0.7, 0.02]), 'FM Freq', 3000., 20000., valinit=10000., valstep=100.) self.sld_fm_devsiat = Slider(plt.axes([0.1, 0.79, 0.7, 0.02]), 'FM Desv', 300., 4000., valinit=1000., valstep=10.) self.sld_am_carrier = Slider(plt.axes([0.1, 0.75, 0.7, 0.02]), 'AM Freq', 3000., 20000., valinit=3000., valstep=100.) self.btn_am.on_clicked(self.selectAM) self.btn_amsc.on_clicked(self.selectAMSC) self.btn_fm.on_clicked(self.selectFM) self.btn_connect.on_clicked(self.connect) self.btn_disconnect.on_clicked(self.disconnect) self.sld_cutoff.on_changed(self.changeCutoff) self.sld_order.on_changed(self.changeOrder) self.sld_fm_carrier.on_changed(self.changeFmCarrier) self.sld_fm_devsiat.on_changed(self.changeFmDevsiat) self.sld_am_carrier.on_changed(self.changeAmCarrier) plt.show() def selectAM(self, evt): self.selectedMod = 'AM' def selectAMSC(self, evt): self.selectedMod = 'AMSC' def selectFM(self, evt): self.selectedMod = 'FM' def changeCutoff(self, val): self.modem.cutoff = val self.modem.normal_cutoff = self.modem.cutoff / self.modem.nyq def changeOrder(self, val): self.modem.order = val def changeFmCarrier(self, val): self.modem.fm_carrier_freq = val def changeFmDevsiat(self, val): self.modem.fm_desviation = val def changeAmCarrier(self, val): self.modem.am_carrier_freq = val def connect(self, event): self.client = Client() self.client.connect(self.input_ip.text) Thread(target=self.sendData).start() Thread(target=self.playSound).start() Thread(target=self.updatePlot).start() def disconnect(self, event): self.client.disconnect() def sendData(self): time.sleep(0.01) while self.client.isConnected(): time.sleep(0.01) entrada = self.mic.read() mod = self.modulations[self.selectedMod](entrada) self.client.send(mod) def playSound(self): time.sleep(0.01) while self.client.isConnected(): time.sleep(0.01) data = self.client.getBuffer().get_data().flatten() if not len(data): continue mod = data[-2205:] demod = self.demodulations[self.selectedMod](mod) self.speaker.play(demod) def updatePlot(self): time.sleep(0.01) while self.client.isConnected(): time.sleep(0.01) frame = self.client.getBuffer().get_data().flatten() data = np.fromstring(frame, dtype=np.int16) if not len(data): continue self.axes[0].cla() self.axes[0].plot(data, color="black") self.axes[1].cla() self.axes[1].specgram(data, Fs=44100) data = self.demodulations[self.selectedMod](frame) data = np.fromstring(data, dtype=np.int16) self.axes[2].cla() self.axes[2].plot(data, color="black") self.axes[3].cla() self.axes[3].specgram(data, Fs=44100) plt.draw()
class BibleReader: def __init__(self, PERSONA, mic, lang): self.persona = PERSONA self.lang = lang self.bookName = "" self.chapNum = "" self.client = mpd.MPDClient() self.client.timeout = None self.client.idletimeout= None self.client.connect("localhost", 6600) dictionary = bible_lists.dictList[lang] self.mic = Mic(mic.speaker, "languagemodel_bible.lm", dictionary[0], "languagemodel_persona.lm", "dictionary_persona.dic", lmd_music="languagemodel_playback.lm", dictd_music=dictionary[1], lmd_num="languagemodel_num.lm", dictd_num=dictionary[2]) # def say(self, word, lang): # filename = "audio/" + lang + "/" + word + ".wav" # os.system("aplay -D hw:1,0 " + filename) def lookupBible(self, lang): badInput = True while badInput: badInput = False self.mic.speak("book", self.lang) book = self.mic.activeListen() self.mic.speak("chapter", self.lang) chap = self.mic.activeListen(NUMBER=True) if book == "" or chap == "": badInput = True self.mic.speak("pardon", self.lang) else: book, chap, audio = bible_search.bible_query(book, chap, lang) if audio == "": badInput = True self.mic.speak("repeat", self.lang) else: self.mic.say("Opening " + book + " " + chap) self.mic.speak("confirm", self.lang) input = self.mic.activeListen(MUSIC=True) if "CANCEL" in input: badInput = True self.mic.speak("cancel", self.lang) else: return book, chap, audio def nextBook(self, book): return bible_lists.nextList[book] def handleForever(self): self.mic.speak("opening", self.lang) try: self.client.clear() except mpd.ConnectionError: self.client.disconnect() self.client.connect("localhost", 6600) self.client.clear() self.client.add("file:///home/pi/jasper/client/BibleReader/bible.mp3") self.client.play() isPlaying = True while True: inputFlag = False finishedFlag = False try: i, o, e = select.select([sys.stdin], [], [], 0) for s in i: if s == sys.stdin: input = sys.stdin.read(1) inputFlag = True #if not inputFlag: # threshold, transcribed = self.mic.passiveListen(self.persona) threshold = False stat = self.client.status() if 'songid' not in stat: finishedFlag = True except: continue if inputFlag or threshold: inputFlag = False try: self.client.pause(1) except mpd.ConnectionError: self.client.disconnect() self.client.connect("localhost", 6600) self.client.pause(1) input = self.mic.activeListen(MUSIC=True) if "CLOSE BIBLE" in input: self.mic.speak("closing", self.lang) self.client.stop() self.client.close() self.client.disconnect() return elif "STOP" in input: self.mic.speak("stop", self.lang) self.client.stop() isPlaying = False elif "PAUSE" in input: self.mic.speak("pause", self.lang) isPlaying = False elif "CONTINUE" in input: self.mic.speak("continuing", self.lang) self.client.pause(0) isPlaying = True elif "OPEN" in input: self.bookName, self.chapNum, audio = self.lookupBible(self.lang) self.mic.speak("opening", self.lang) #choose another book try: self.client.clear() except mpd.ConnectionError: self.client.disconnect() self.client.connect("localhost", 6600) self.client.clear() bible_search.audio_download(audio) self.client.add("file:///home/pi/jasper/client/BibleReader/bible.mp3") self.client.play() else: self.mic.speak("pardon", self.lang) if isPlaying: self.client.play() if finishedFlag: finishedFlag = False self.mic.speak("nextchap", self.lang) input = self.mic.activeListen(MUSIC=True) if "CONTINUE" in input: nextChap = str(int(self.chapNum) + 1) self.bookName, self.chapNum, audio = bible_search.bible_query(self.bookName, nextChap, self.lang) if audio == "": #go to next book self.bookName = self.nextBook(self.bookName) nextChap = "1" self.bookName, self.chapNum, audio = bible_search.bible_query(self.bookName, nextChap, self.lang) self.mic.speak("opening", self.lang) #choose another book try: self.client.clear() except mpd.ConnectionError: self.client.disconnect() self.client.connect("localhost", 6600) self.client.clear() bible_search.audio_download(audio) self.client.add("file:///home/pi/jasper/client/BibleReader/bible.mp3") self.client.play() else: self.mic.speak("closing", self.lang) try: self.client.close() self.client.disconnect() except mpd.connectionError: self.client.disconnect() return
class MusicMode(object): def __init__(self, PERSONA, mic, mpdwrapper): self._logger = logging.getLogger(__name__) self.persona = PERSONA # self.mic - we're actually going to ignore the mic they passed in self.music = mpdwrapper # index spotify playlists into new dictionary and language models phrases = ["STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST"] phrases.extend(self.music.get_soup_playlist()) music_stt_engine = mic.active_stt_engine.get_instance('music', phrases) self.mic = Mic(mic.speaker, mic.passive_stt_engine, music_stt_engine) def delegateInput(self, input): command = input.upper() # check if input is meant to start the music module if "PLAYLIST" in command: command = command.replace("PLAYLIST", "") elif "STOP" in command: self.mic.say("Stopping music") self.music.stop() return elif "PLAY" in command: self.mic.say("Playing %s" % self.music.current_song()) self.music.play() return elif "PAUSE" in command: self.mic.say("Pausing music") # not pause because would need a way to keep track of pause/play # state self.music.stop() return elif any(ext in command for ext in ["LOUDER", "HIGHER"]): self.mic.say("Louder") self.music.volume(interval=10) self.music.play() return elif any(ext in command for ext in ["SOFTER", "LOWER"]): self.mic.say("Softer") self.music.volume(interval=-10) self.music.play() return elif "NEXT" in command: self.mic.say("Next song") self.music.play() # backwards necessary to get mopidy to work self.music.next() self.mic.say("Playing %s" % self.music.current_song()) return elif "PREVIOUS" in command: self.mic.say("Previous song") self.music.play() # backwards necessary to get mopidy to work self.music.previous() self.mic.say("Playing %s" % self.music.current_song()) return # SONG SELECTION... requires long-loading dictionary and language model # songs = self.music.fuzzy_songs(query = command.replace("PLAY", "")) # if songs: # self.mic.say("Found songs") # self.music.play(songs = songs) # print "SONG RESULTS" # print "============" # for song in songs: # print "Song: %s Artist: %s" % (song.title, song.artist) # self.mic.say("Playing %s" % self.music.current_song()) # else: # self.mic.say("No songs found. Resuming current song.") # self.music.play() # PLAYLIST SELECTION playlists = self.music.fuzzy_playlists(query=command) if playlists: self.mic.say("Loading playlist %s" % playlists[0]) self.music.play(playlist_name=playlists[0]) self.mic.say("Playing %s" % self.music.current_song()) else: self.mic.say("No playlists found. Resuming current song.") self.music.play() return def handleForever(self): self.music.play() self.mic.say("Playing %s" % self.music.current_song()) while True: threshold, transcribed = self.mic.passiveListen(self.persona) if not transcribed or not threshold: self._logger.info("Nothing has been said or transcribed.") continue self.music.pause() input = self.mic.activeListen(MUSIC=True) if input: if "close" in input.lower(): self.mic.say("Closing Spotify") return self.delegateInput(input) else: self.mic.say("Pardon?") self.music.play()
# yaml config file cfg = yaml.safe_load(open("./config.yaml")) # init path collector path_coll = PathCollector(cfg) # -- # mic # create classifier classifier = Classifier(path_coll=path_coll, verbose=cfg['classifier']['verbose']) # create mic instance mic = Mic(classifier=classifier, feature_params=cfg['feature_params'], mic_params=cfg['mic_params'], is_audio_record=cfg['game']['capture_enabled']) # -- # game setup # init pygame pygame.init() # init display screen = pygame.display.set_mode(cfg['game']['screen_size']) # init screen capturer screen_capturer = ScreenCapturer(screen, cfg['game']['screen_size'], cfg['game']['fps'], capture_path=cfg['game']['capture_path'], enabled=cfg['game']['capture_enabled']) # text
import time import bible_search import bible_lists import sys import select WORDS = ["READ", "BIBLE"] config = open("config.txt") lang = config.read() lang = lang.strip() config.close() profile = yaml.safe_load(open("profile.yml", "r")) if "INDONESIAN" in lang: mic = Mic(speaker.newSpeaker(), "languagemodel_command.lm", "dictionary_commandindo.dic", "languagemodel_persona.lm", "dictionary_persona.dic") else: mic = Mic(speaker.newSpeaker(), "languagemodel_command.lm", "dictionary_command.dic", "languagemodel_persona.lm", "dictionary_persona.dic") def isValid(text): """ Returns True if the input is related to new testament Arguments: text -- user-input, typically transcribed speech """ return bool(re.search(r'\b(read|bible)\b', text, re.IGNORECASE)) class BibleReader:
from levels import LevelMoveWalls from game_logic import GameLogic # yaml config file cfg = yaml.safe_load(open("../config.yaml")) # -- # mic # create classifier classifier = Classifier(cfg_classifier=cfg['classifier'], root_path='../') # create mic instance mic = Mic(classifier=classifier, mic_params=cfg['mic_params'], is_audio_record=False) # init pygame pygame.init() # init display screen = pygame.display.set_mode(cfg['game']['screen_size']) # -- # level # level setup level = LevelMoveWalls(screen, cfg['game']['screen_size'], mic)
from mic import Mic,NoParsableSpeech from butler import Alfred if __name__ == '__main__': print("my name is alfred") alfred = Alfred() mic = Mic() counter = 0 while 1: try: counter += 1 #TODO try to have it as # text = mic.record() # where it records your voice, and returns plain text sample_width, sound_data = mic.record() mic.write_to_file(sample_width,sound_data) text = mic.get_text_from_google() # sample queries: # |Alfred| |lights on| # |Alfred lights on| print("|| " + text + " ||") print("|| " + str(counter) + " ||") if(not alfred.is_summoned(text)): continue #alfred.says("okay, hold on") alfred.respond_to(text) except NoParsableSpeech as e:
def __init__(self): self.stt_engine = STT() self.tts_engine = TTS() self.mic = Mic(self.tts_engine, self.stt_engine, self.stt_engine) self.selection = Selection(self.tts_engine)
""" kws game main """ # yaml config file cfg = yaml.safe_load(open("./config.yaml")) # -- # mic # create classifier classifier = Classifier(cfg_classifier=cfg['classifier']) # create mic instance mic = Mic(classifier=classifier, mic_params=cfg['mic_params'], is_audio_record=cfg['game']['capture_enabled']) # -- # game setup # init pygame pygame.init() # init display screen = pygame.display.set_mode(cfg['game']['screen_size']) # init screen capturer screen_capturer = ScreenCapturer(screen, cfg['game']) # text
class MusicMode(object): def __init__(self, PERSONA, mic, mpdwrapper): self._logger = logging.getLogger(__name__) self.persona = PERSONA # self.mic - we're actually going to ignore the mic they passed in self.music = mpdwrapper # index spotify playlists into new dictionary and language models phrases = ["STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST"] phrases.extend(self.music.get_soup_playlist()) music_stt_engine = mic.active_stt_engine.get_instance('music', phrases) self.mic = Mic(mic.speaker, mic.passive_stt_engine, music_stt_engine) def delegateInput(self, input): command = input.upper() # check if input is meant to start the music module if "PLAYLIST" in command: command = command.replace("PLAYLIST", "") elif "STOP" in command: self.mic.say("Stopping music") self.music.stop() return elif "PLAY" in command: self.mic.say("Playing %s" % self.music.current_song()) self.music.play() return elif "PAUSE" in command: self.mic.say("Pausing music") # not pause because would need a way to keep track of pause/play # state self.music.stop() return elif any(ext in command for ext in ["LOUDER", "HIGHER"]): self.mic.say("Louder") self.music.volume(interval=10) self.music.play() return elif any(ext in command for ext in ["SOFTER", "LOWER"]): self.mic.say("Softer") self.music.volume(interval=-10) self.music.play() return elif "NEXT" in command: self.mic.say("Next song") self.music.play() # backwards necessary to get mopidy to work self.music.next() self.mic.say("Playing %s" % self.music.current_song()) return elif "PREVIOUS" in command: self.mic.say("Previous song") self.music.play() # backwards necessary to get mopidy to work self.music.previous() self.mic.say("Playing %s" % self.music.current_song()) return # SONG SELECTION... requires long-loading dictionary and language model # songs = self.music.fuzzy_songs(query = command.replace("PLAY", "")) # if songs: # self.mic.say("Found songs") # self.music.play(songs = songs) # print("SONG RESULTS") # print("============") # for song in songs: # print("Song: %s Artist: %s" % (song.title, song.artist)) # self.mic.say("Playing %s" % self.music.current_song()) # else: # self.mic.say("No songs found. Resuming current song.") # self.music.play() # PLAYLIST SELECTION playlists = self.music.fuzzy_playlists(query=command) if playlists: self.mic.say("Loading playlist %s" % playlists[0]) self.music.play(playlist_name=playlists[0]) self.mic.say("Playing %s" % self.music.current_song()) else: self.mic.say("No playlists found. Resuming current song.") self.music.play() return def handleForever(self): self.music.play() self.mic.say("Playing %s" % self.music.current_song()) while True: threshold, transcribed = self.mic.passiveListen(self.persona) if not transcribed or not threshold: self._logger.info("Nothing has been said or transcribed.") continue self.music.pause() input = self.mic.activeListen(MUSIC=True) if input: if "close" in input.lower(): self.mic.say("Closing Spotify") return self.delegateInput(input) else: self.mic.say("Pardon?") self.music.play()
class Jane(object): def __init__(self, options={}): self._logger = logging.getLogger(__name__) # Read config config_file = options['config'] self._logger.debug("Trying to read config file: '%s'", config_file) try: with open(config_file, "r") as f: self.config = yaml.safe_load(f) except OSError: self._logger.error("Can't open config file: '%s'", config_file) raise self._logger.info('config loaded') self._logger.info(self.config) try: stt_engine_slug = self.config['stt_engine'] except KeyError: stt_engine_slug = 'sphinx' self._logger.warning( "stt_engine not specified in profile, defaulting " + "to '%s'", stt_engine_slug) stt_engine_class = stt.get_engine_by_slug(stt_engine_slug) try: slug = self.config['stt_passive_engine'] stt_passive_engine_class = stt.get_engine_by_slug(slug) except KeyError: stt_passive_engine_class = stt_engine_class try: tts_engine_slug = self.config['tts_engine'] except KeyError: tts_engine_slug = tts.get_default_engine_slug() self._logger.warning( "tts_engine not specified in profile, defaulting " + "to '%s'", tts_engine_slug) tts_engine_class = tts.get_engine_by_slug(tts_engine_slug) self._logger.info('LOADED TTS %s', tts_engine_class) # Initialize Mic if 'text' in options: self.input = TextInput(tts_engine_class.get_instance()) else: self.input = Mic(tts_engine_class.get_instance(), stt_passive_engine_class.get_passive_instance(), stt_engine_class.get_active_instance()) def run(self): print 'Start' if 'first_name' in self.config: salutation = ("How can I be of service, %s?" % self.config["first_name"]) else: salutation = "How can I be of service?" print('Say hello') self.input.say(salutation) conversation = Conversation("JANE", self.input, self.config) conversation.handleForever()
from mic import Mic print("active listening") mic = Mic("languagemodel.lm", "dictionary.dic", "languagemodel_persona.lm", "dictionary_persona.dic") mic.say("How can I be of service?") b = mic.activeListen() #a = mic.googleTranslate() #print(a)
def __init__(self): """Construct FrequencyStream object.""" self.mic = Mic('Blue Snowball')
# -*- encoding: utf-8 -*- import numpy as np import matplotlib import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation from mic import Mic RATE = 8000 CHUNK = 2048 BIT_DEPTH = 16 AXIS_COLOR = "#dddddd" MAXY = 2**(BIT_DEPTH - 1) - 1 MAXX = round(1000 * CHUNK / RATE) chunks = 0 mic = Mic() mic.run(rate=RATE, chunk=CHUNK, sampling_bit_depth=BIT_DEPTH) fig, (ax_wave, ax_pds) = plt.subplots(2, figsize=(12, 8)) fig.patch.set_facecolor("black") matplotlib.rc("axes", edgecolor="white") def init(): """Initializes the plots and texts used by Matplotlib Animation.""" global waveline, psdline, chunk_count, energy for ax in ax_wave, ax_pds: ax.set_facecolor("black") ax.spines["bottom"].set_color(AXIS_COLOR) ax.spines["left"].set_color(AXIS_COLOR) ax.xaxis.label.set_color(AXIS_COLOR) ax.tick_params(axis="both", colors=AXIS_COLOR)
class MusicMode: def __init__(self, PERSONA, mic): self.persona = PERSONA # self.mic - we're actually going to ignore the mic they passed in self.music = Music() # index spotify playlists into new dictionary and language models original = self.music.get_soup_playlist() + [ "STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST" ] pronounced = g2p.translateWords(original) zipped = zip(original, pronounced) lines = ["%s %s" % (x, y) for x, y in zipped] with open("dictionary_spotify.dic", "w") as f: f.write("\n".join(lines) + "\n") with open("sentences_spotify.txt", "w") as f: f.write("\n".join(original) + "\n") f.write("<s> \n </s> \n") f.close() # make language model os.system( "text2idngram -vocab sentences_spotify.txt < sentences_spotify.txt -idngram spotify.idngram" ) os.system( "idngram2lm -idngram spotify.idngram -vocab sentences_spotify.txt -arpa languagemodel_spotify.lm" ) # create a new mic with the new music models self.mic = Mic("languagemodel.lm", "dictionary.dic", "languagemodel_persona.lm", "dictionary_persona.dic", "languagemodel_spotify.lm", "dictionary_spotify.dic") def delegateInput(self, input): command = input.upper() # check if input is meant to start the music module if "PLAYLIST" in command: command = command.replace("PLAYLIST", "") elif "STOP" in command: self.mic.say("Stopping music") self.music.stop() return elif "PLAY" in command: self.mic.say("Playing %s" % self.music.current_song()) self.music.play() return elif "PAUSE" in command: self.mic.say("Pausing music") # not pause because would need a way to keep track of pause/play # state self.music.stop() return elif any(ext in command for ext in ["LOUDER", "HIGHER"]): self.mic.say("Louder") self.music.volume(interval=10) self.music.play() return elif any(ext in command for ext in ["SOFTER", "LOWER"]): self.mic.say("Softer") self.music.volume(interval=-10) self.music.play() return elif "NEXT" in command: self.mic.say("Next song") self.music.play() # backwards necessary to get mopidy to work self.music.next() self.mic.say("Playing %s" % self.music.current_song()) return elif "PREVIOUS" in command: self.mic.say("Previous song") self.music.play() # backwards necessary to get mopidy to work self.music.previous() self.mic.say("Playing %s" % self.music.current_song()) return # SONG SELECTION... requires long-loading dictionary and language model # songs = self.music.fuzzy_songs(query = command.replace("PLAY", "")) # if songs: # self.mic.say("Found songs") # self.music.play(songs = songs) # print "SONG RESULTS" # print "============" # for song in songs: # print "Song: %s Artist: %s" % (song.title, song.artist) # self.mic.say("Playing %s" % self.music.current_song()) # else: # self.mic.say("No songs found. Resuming current song.") # self.music.play() # PLAYLIST SELECTION playlists = self.music.fuzzy_playlists(query=command) if playlists: self.mic.say("Loading playlist %s" % playlists[0]) self.music.play(playlist_name=playlists[0]) self.mic.say("Playing %s" % self.music.current_song()) else: self.mic.say("No playlists found. Resuming current song.") self.music.play() return def handleForever(self): self.music.play() self.mic.say("Playing %s" % self.music.current_song()) while True: try: threshold, transcribed = self.mic.passiveListen(self.persona) except: continue if threshold: self.music.pause() input = self.mic.activeListen(MUSIC=True) if "close" in input.lower(): self.mic.say("Closing Spotify") return if input: self.delegateInput(input) else: self.mic.say("Pardon?") self.music.play()
from mic import Mic from speaker import Speaker from modem import Modem mic = Mic() speaker = Speaker() modem = Modem() while True: entrada = mic.read() # modulado = modem.modAm(entrada) # demodulado = modem.demodAm(modulado) modulado = modem.modAmsc(entrada) demodulado = modem.demodAmsc(modulado) # modulado = modem.modFm(entrada) # demodulado = modem.demodFm(modulado) speaker.play(demodulado)
# yaml config file cfg = yaml.safe_load(open("../config.yaml")) # init path collector path_coll = PathCollector(cfg, root_path='.') # -- # mic # create classifier classifier = Classifier(path_coll=path_coll, verbose=True) # create mic instance mic = Mic(classifier=classifier, feature_params=cfg['feature_params'], mic_params=cfg['mic_params'], is_audio_record=True) # -- # game setup # init pygame pygame.init() # init display screen = pygame.display.set_mode(cfg['game']['screen_size']) # level creation levels = [LevelMic(screen, cfg['game']['screen_size'], mic)] # choose level
class MusicMode: def __init__(self, PERSONA, mic): self.persona = PERSONA # self.mic - we're actually going to ignore the mic they passed in self.music = Music() # index spotify playlists into new dictionary and language models words = self.music.get_soup_playlist() + [ "STOP", "CLOSE", "PLAY", "PAUSE", "NEXT", "PREVIOUS", "LOUDER", "SOFTER", "LOWER", "HIGHER", "VOLUME", "PLAYLIST" ] text = "\n".join(["<s> %s </s>" for word in words]) # make language model vocabcompiler.compile_text(text, languagemodel_spotify) # create a new mic with the new music models self.mic = Mic( speaker.newSpeaker(), stt.PocketSphinxSTT(lmd_music=languagemodel_spotify, dictd_music=dictionary_spotify), stt.PocketSphinxSTT(lmd_music=languagemodel_spotify, dictd_music=dictionary_spotify)) def delegateInput(self, input): command = input.upper() # check if input is meant to start the music module if "PLAYLIST" in command: command = command.replace("PLAYLIST", "") elif "STOP" in command: self.mic.say("Stopping music") self.music.stop() return elif "PLAY" in command: self.mic.say("Playing %s" % self.music.current_song()) self.music.play() return elif "PAUSE" in command: self.mic.say("Pausing music") # not pause because would need a way to keep track of pause/play # state self.music.stop() return elif any(ext in command for ext in ["LOUDER", "HIGHER"]): self.mic.say("Louder") self.music.volume(interval=10) self.music.play() return elif any(ext in command for ext in ["SOFTER", "LOWER"]): self.mic.say("Softer") self.music.volume(interval=-10) self.music.play() return elif "NEXT" in command: self.mic.say("Next song") self.music.play() # backwards necessary to get mopidy to work self.music.next() self.mic.say("Playing %s" % self.music.current_song()) return elif "PREVIOUS" in command: self.mic.say("Previous song") self.music.play() # backwards necessary to get mopidy to work self.music.previous() self.mic.say("Playing %s" % self.music.current_song()) return # SONG SELECTION... requires long-loading dictionary and language model # songs = self.music.fuzzy_songs(query = command.replace("PLAY", "")) # if songs: # self.mic.say("Found songs") # self.music.play(songs = songs) # print "SONG RESULTS" # print "============" # for song in songs: # print "Song: %s Artist: %s" % (song.title, song.artist) # self.mic.say("Playing %s" % self.music.current_song()) # else: # self.mic.say("No songs found. Resuming current song.") # self.music.play() # PLAYLIST SELECTION playlists = self.music.fuzzy_playlists(query=command) if playlists: self.mic.say("Loading playlist %s" % playlists[0]) self.music.play(playlist_name=playlists[0]) self.mic.say("Playing %s" % self.music.current_song()) else: self.mic.say("No playlists found. Resuming current song.") self.music.play() return def handleForever(self): self.music.play() self.mic.say("Playing %s" % self.music.current_song()) while True: try: threshold, transcribed = self.mic.passiveListen(self.persona) except: continue if threshold: self.music.pause() input = self.mic.activeListen(MUSIC=True) if "close" in input.lower(): self.mic.say("Closing Spotify") return if input: self.delegateInput(input) else: self.mic.say("Pardon?") self.music.play()