def handle_speak(event): utterance = event.data['utterance'] expect_response = event.data.get('expect_response', False) # This is a bit of a hack for Picroft. The analog audio on a Pi blocks # for 30 seconds fairly often, so we don't want to break on periods # (decreasing the chance of encountering the block). But we will # keep the split for non-Picroft installs since it give user feedback # faster on longer phrases. # # TODO: Remove or make an option? This is really a hack, anyway, # so we likely will want to get rid of this when not running on Mimic if not config.get('enclosure', {}).get('platform') == "picroft": chunks = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', utterance) for chunk in chunks: try: mute_and_speak(chunk) except: logger.error('Error in mute_and_speak', exc_info=True) else: mute_and_speak(utterance) if expect_response: create_signal('buttonPress')
def on_message(self, message): utterance = json.dumps(message) print("*****Utterance : ", utterance) if utterance: if utterance == '"mic_on"': create_signal('startListening') else: if "|SILENT" in utterance: utterance = utterance.split("|") utterance = utterance[0] data = { "lang": lang, "session": "", "utterances": [utterance], "client": "WebChat" } ws.emit(Message('chat_response', data)) ws.emit(Message('recognizer_loop:utterance', data)) else: data = { "lang": lang, "session": "", "utterances": [utterance] } ws.emit(Message('recognizer_loop:utterance', data)) t = Thread(target=self.newThread) t.start()
def execute(self, sentence, ident=None): """ Convert sentence to speech, preprocessing out unsupported ssml The method caches results if possible using the hash of the sentence. Args: sentence: Sentence to be spoken ident: Id reference to current interaction """ sentence = self.validate_ssml(sentence) create_signal("isSpeaking") if self.phonetic_spelling: for word in re.findall(r"[\w']+", sentence): if word.lower() in self.spellings: sentence = sentence.replace(word, self.spellings[word.lower()]) key = str(hashlib.md5(sentence.encode('utf-8', 'ignore')).hexdigest()) wav_file = os.path.join(mycroft.util.get_cache_directory("tts"), key + '.' + self.audio_ext) if os.path.exists(wav_file): LOG.debug("TTS cache hit") phonemes = self.load_phonemes(key) else: wav_file, phonemes = self.get_tts(sentence, wav_file) if phonemes: self.save_phonemes(key, phonemes) vis = self.visime(phonemes) self.queue.put((self.audio_ext, wav_file, vis, ident))
def execute(self, sentence, ident=None, listen=False, play_error_sound=False): """Convert sentence to speech, preprocessing out unsupported ssml The method caches results if possible using the hash of the sentence. Arguments: sentence: (str) Sentence to be spoken ident: (str) Id reference to current interaction listen: (bool) True if listen should be triggered at the end of the utterance. play_error_sound: This utterance is an error. Depending on configuration, mycroft could play a sound instead """ sentence = self.validate_ssml(sentence) create_signal("isSpeaking") if play_error_sound: self.queue.put(('wav', self.audio_file_error, None, ident, None)) else: try: self._execute(sentence, ident, listen) except Exception: # If an error occurs end the audio sequence with an empty entry self.queue.put(EMPTY_PLAYBACK_QUEUE_TUPLE) # Re-raise to allow the Exception to be handled externally. raise
def execute(self, sentence, ident=None): """ Convert sentence to speech. The method caches results if possible using the hash of the sentence. Args: sentence: Sentence to be spoken ident: Id reference to current interaction """ create_signal("isSpeaking") if self.phonetic_spelling: for word in re.findall(r"[\w']+", sentence): if word in self.spellings: sentence = sentence.replace(word, self.spellings[word]) key = str(hashlib.md5(sentence.encode('utf-8', 'ignore')).hexdigest()) wav_file = os.path.join(mycroft.util.get_cache_directory("tts"), key + '.' + self.type) if os.path.exists(wav_file): LOG.debug("TTS cache hit") phonemes = self.load_phonemes(key) else: wav_file, phonemes = self.get_tts(sentence, wav_file) if phonemes: self.save_phonemes(key, phonemes) self.queue.put((self.type, wav_file, self.visime(phonemes), ident))
def handle_start_skipping(self, message): self.speak("Should I start skipping wake words?", True) self.enable_intent('ConfirmYes') self.enable_intent('ConfirmNo') create_signal('StartSkippingWW') create_signal('WaitingToConfirm')
def process(self, data): self.client.emit(Message(data)) if "mycroft.stop" in data: create_signal('buttonPress') self.client.emit(Message("mycroft.stop")) if "volume.up" in data: self.client.emit( Message("IncreaseVolumeIntent", metadata={'play_sound': True})) if "volume.down" in data: self.client.emit( Message("DecreaseVolumeIntent", metadata={'play_sound': True})) if "system.test.begin" in data: self.client.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.client.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.client.emit( Message("speak", metadata={'utterance': "I am testing one two three"})) time.sleep(0.5) # Prevents recording the loud button press record("/tmp/test.wav", 3.0) mixer.setvolume(prev_vol) play_wav("/tmp/test.wav") time.sleep(3.5) # Pause between tests so it's not so fast # Test audio muting on arduino subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True) if "unit.shutdown" in data: self.client.emit( Message("enclosure.eyes.timedspin", metadata={'length': 12000})) self.client.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl poweroff -i', shell=True) if "unit.reboot" in data: self.client.emit(Message("enclosure.eyes.spin")) self.client.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True) if "unit.setwifi" in data: self.client.emit(Message("mycroft.wifi.start")) if "unit.factory-reset" in data: subprocess.call('rm ~/.mycroft/identity/identity.json', shell=True) self.client.emit(Message("enclosure.eyes.spin")) self.client.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True)
def test_wait_while_speaking(self): # Check that test terminates create_signal('isSpeaking') Thread(target=wait_while_speaking_thread).start() sleep(2) self.assertFalse(done_waiting) check_for_signal('isSpeaking') sleep(2) self.assertTrue(done_waiting)
def test_check_signal(self): if exists('/tmp/mycroft'): rmtree('/tmp/mycroft') # check that signal is not found if file does not exist self.assertFalse(check_for_signal('test_signal')) # Check that the signal is found when created create_signal('test_signal') self.assertTrue(check_for_signal('test_signal')) # Check that the signal is removed after use self.assertFalse(isfile('/tmp/mycroft/ipc/signal/test_signal'))
def on_message(self, message): utterance = json.dumps(message) print("*****Utterance : ", utterance) if utterance: if utterance == '"mic_on"': create_signal('startListening') else: lang = 'en-us' data = {"lang": lang, "session": "", "utterances": [utterance]} ws.emit(Message('recognizer_loop:utterance', data)) t = Thread(target=self.newThread) t.start()
def on_message(self, message): utterance = message.strip() LOG.info("Utterance : " + utterance) if utterance: if utterance == '"mic_on"': create_signal('startListening') else: data = {"utterances": [utterance], "lang": lang} context = { "source": self.peer, "destinatary": "skills", "client_name": platform, "peer": self.peer } self.emitter.emit( Message("recognizer_loop:utterance", data, context))
def execute(self, sentence, ident=None, listen=False): """Convert sentence to speech, preprocessing out unsupported ssml The method caches results if possible using the hash of the sentence. Arguments: sentence: Sentence to be spoken ident: Id reference to current interaction listen: True if listen should be triggered at the end of the utterance. """ sentence = self.validate_ssml(sentence) create_signal("isSpeaking") if self.phonetic_spelling: for word in re.findall(r"[\w']+", sentence): if word.lower() in self.spellings: sentence = sentence.replace(word, self.spellings[word.lower()]) chunks = self._preprocess_sentence(sentence) # Apply the listen flag to the last chunk, set the rest to False chunks = [(chunks[i], listen if i == len(chunks) - 1 else False) for i in range(len(chunks))] for sentence, l in chunks: key = str( hashlib.md5(sentence.encode('utf-8', 'ignore')).hexdigest()) wav_file = os.path.join( mycroft.util.get_cache_directory("tts/" + self.tts_name), key + '.' + self.audio_ext) if os.path.exists(wav_file): LOG.debug("TTS cache hit") phonemes = self.load_phonemes(key) else: wav_file, phonemes = self.get_tts(sentence, wav_file) if phonemes: self.save_phonemes(key, phonemes) vis = self.viseme(phonemes) if phonemes else None self.queue.put((self.audio_ext, wav_file, vis, ident, l))
def handle_speak(event): """ Handle "speak" message """ config = ConfigurationManager.get() ConfigurationManager.init(ws) global _last_stop_signal # Mild abuse of the signal system to allow other processes to detect # when TTS is happening. See mycroft.util.is_speaking() create_signal("isSpeaking") utterance = event.data['utterance'] if event.data.get('expect_response', False): ws.once('recognizer_loop:audio_output_end', _trigger_expect_response) # This is a bit of a hack for Picroft. The analog audio on a Pi blocks # for 30 seconds fairly often, so we don't want to break on periods # (decreasing the chance of encountering the block). But we will # keep the split for non-Picroft installs since it give user feedback # faster on longer phrases. # # TODO: Remove or make an option? This is really a hack, anyway, # so we likely will want to get rid of this when not running on Mimic if not config.get('enclosure', {}).get('platform') == "picroft": start = time.time() chunks = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', utterance) for chunk in chunks: try: mute_and_speak(chunk) except KeyboardInterrupt: raise except: logger.error('Error in mute_and_speak', exc_info=True) if _last_stop_signal > start or check_for_signal('buttonPress'): break else: mute_and_speak(utterance) # This check will clear the "signal" check_for_signal("isSpeaking")
def handle_speak(event): global _last_stop_signal # Mild abuse of the signal system to allow other processes to detect # when TTS is happening. See mycroft.util.is_speaking() create_signal("isSpeaking") utterance = event.data['utterance'] expect_response = event.data.get('expect_response', False) # This is a bit of a hack for Picroft. The analog audio on a Pi blocks # for 30 seconds fairly often, so we don't want to break on periods # (decreasing the chance of encountering the block). But we will # keep the split for non-Picroft installs since it give user feedback # faster on longer phrases. # # TODO: Remove or make an option? This is really a hack, anyway, # so we likely will want to get rid of this when not running on Mimic if not config.get('enclosure', {}).get('platform') == "picroft": start = time.time() chunks = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', utterance) for chunk in chunks: try: mute_and_speak(chunk) except KeyboardInterrupt: raise except: logger.error('Error in mute_and_speak', exc_info=True) if _last_stop_signal > start or check_for_signal('buttonPress'): break else: mute_and_speak(utterance) # This check will clear the "signal" check_for_signal("isSpeaking") if expect_response: create_signal('startListening')
def execute(self, sentence, ident=None, listen=False): """Convert sentence to speech, preprocessing out unsupported ssml The method caches results if possible using the hash of the sentence. Arguments: sentence: Sentence to be spoken ident: Id reference to current interaction listen: True if listen should be triggered at the end of the utterance. """ sentence = self.validate_ssml(sentence) create_signal("isSpeaking") try: self._execute(sentence, ident, listen) except Exception: # If an error occurs end the audio sequence through an empty entry self.queue.put(EMPTY_PLAYBACK_QUEUE_TUPLE) # Re-raise to allow the Exception to be handled externally as well. raise
def execute(self, sentence, ident=None): """request and play mimic2 wav audio Args: sentence (str): sentence to synthesize from mimic2 ident (optional): Defaults to None. """ create_signal("isSpeaking") sentence = self._normalized_numbers(sentence) chunks = sentence_chunker(sentence, self.chunk_size) for idx, req in enumerate(self._requests(chunks)): results = req.result().json() audio = base64.b64decode(results['audio_base64']) vis = self.visime(results['visimes']) key = str( hashlib.md5(chunks[idx].encode('utf-8', 'ignore')).hexdigest()) wav_file = os.path.join(get_cache_directory("tts"), key + '.' + self.audio_ext) with open(wav_file, 'wb') as f: f.write(audio) self.queue.put((self.audio_ext, wav_file, vis, ident))
def execute(self, sentence, ident=None): """ Convert sentence to speech, preprocessing out unsupported ssml The method caches results if possible using the hash of the sentence. Args: sentence: Sentence to be spoken ident: Id reference to current interaction """ sentence = self.validate_ssml(sentence) create_signal("isSpeaking") if self.phonetic_spelling: for word in re.findall(r"[\w']+", sentence): if word.lower() in self.spellings: sentence = sentence.replace(word, self.spellings[word.lower()]) chunks = self._preprocess_sentence(sentence) for sentence in chunks: key = str(hashlib.md5( sentence.encode('utf-8', 'ignore')).hexdigest()) wav_file = os.path.join( mycroft.util.get_cache_directory("tts/" + self.tts_name), key + '.' + self.audio_ext) if os.path.exists(wav_file): LOG.debug("TTS cache hit") phonemes = self.load_phonemes(key) else: wav_file, phonemes = self.get_tts(sentence, wav_file) if phonemes: self.save_phonemes(key, phonemes) vis = self.viseme(phonemes) self.queue.put((self.audio_ext, wav_file, vis, ident))
def execute(self, sentence, ident=None): """request and play mimic2 wav audio Args: sentence (str): sentence to synthesize from mimic2 ident (optional): Defaults to None. """ create_signal("isSpeaking") sentence = self._normalized_numbers(sentence) # Use the phonetic_spelling mechanism from the TTS base class if self.phonetic_spelling: for word in re.findall(r"[\w']+", sentence): if word.lower() in self.spellings: sentence = sentence.replace(word, self.spellings[word.lower()]) chunks = sentence_chunker(sentence, self.chunk_size) try: for idx, req in enumerate(self._requests(chunks)): results = req.result().json() audio = base64.b64decode(results['audio_base64']) vis = self.visime(results['visimes']) key = str(hashlib.md5( chunks[idx].encode('utf-8', 'ignore')).hexdigest()) wav_file = os.path.join( get_cache_directory("tts"), key + '.' + self.audio_ext ) with open(wav_file, 'wb') as f: f.write(audio) self.queue.put((self.audio_ext, wav_file, vis, ident)) except (ReadTimeout, ConnectionError, ConnectTimeout, HTTPError): raise RemoteTTSTimeoutException( "Mimic 2 remote server request timedout. falling back to mimic" )
def execute(self, sentence): """ Convert sentence to speech. The method caches results if possible using the hash of the sentence. Args: sentence: Sentence to be spoken """ create_signal("isSpeaking") key = str(hashlib.md5(sentence.encode('utf-8', 'ignore')).hexdigest()) wav_file = os.path.join(mycroft.util.get_cache_directory("tts"), key + '.' + self.type) if os.path.exists(wav_file): LOG.debug("TTS cache hit") phonemes = self.load_phonemes(key) else: wav_file, phonemes = self.get_tts(sentence, wav_file) if phonemes: self.save_phonemes(key, phonemes) self.queue.put((self.type, wav_file, self.visime(phonemes)))
def handle_confirm_yes(self, message): if check_for_signal("StartSkippingWW", 0): create_signal('skip_wake_word') create_signal('restartedFromSkill') self.speak("o k. Starting to skip wake words.", False) # self.emitter.emit(Message('configuration.updated')) # self.emitter.emit(Message('recognizer_loop:reload')) self.emitter.emit(Message('recognizer_loop:restart')) elif check_for_signal("StopSkippingWW", 0): check_for_signal('skip_wake_word', 0) create_signal('restartedFromSkill') self.speak("o k. Stopping the skipping of wake words.", False) # self.emitter.emit(Message('configuration.updated')) # self.emitter.emit(Message('recognizer_loop:reload')) self.emitter.emit(Message('recognizer_loop:restart')) self.disable_intent('ConfirmYes') self.disable_intent('ConfirmNo')
def process(self, data): self.ws.emit(Message(data)) if "Command: system.version" in data: # This happens in response to the "system.version" message # sent during the construction of Enclosure() self.ws.emit(Message("enclosure.started")) if "mycroft.stop" in data: create_signal('buttonPress') self.ws.emit(Message("mycroft.stop")) if "volume.up" in data: self.ws.emit( Message("VolumeSkill:IncreaseVolumeIntent", {'play_sound': True})) if "volume.down" in data: self.ws.emit( Message("VolumeSkill:DecreaseVolumeIntent", {'play_sound': True})) if "system.test.begin" in data: self.ws.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.ws.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.ws.emit(Message("speak", { 'utterance': "I am testing one two three"})) time.sleep(0.5) # Prevents recording the loud button press record("/tmp/test.wav", 3.0) mixer.setvolume(prev_vol) play_wav("/tmp/test.wav").communicate() # Test audio muting on arduino subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True) if "unit.shutdown" in data: self.ws.emit( Message("enclosure.eyes.timedspin", {'length': 12000})) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl poweroff -i', shell=True) if "unit.reboot" in data: self.ws.emit( Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True) if "unit.setwifi" in data: self.ws.emit(Message("mycroft.wifi.start")) if "unit.factory-reset" in data: self.ws.emit( Message("enclosure.eyes.spin")) subprocess.call( 'rm ~/.mycroft/identity/identity2.json', shell=True) self.ws.emit(Message("mycroft.wifi.reset")) self.ws.emit(Message("mycroft.disable.ssh")) self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("reset to factory defaults")})) time.sleep(5) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True) if "unit.enable-ssh" in data: # This is handled by the wifi client self.ws.emit(Message("mycroft.enable.ssh")) self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("ssh enabled")})) if "unit.disable-ssh" in data: # This is handled by the wifi client self.ws.emit(Message("mycroft.disable.ssh")) self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("ssh disabled")}))
def process(self, data): self.ws.emit(Message(data)) if "Command: system.version" in data: self.ws.emit(Message("enclosure.start")) if "mycroft.stop" in data: create_signal('buttonPress') # FIXME - Must use WS instead self.ws.emit(Message("mycroft.stop")) if "volume.up" in data: self.ws.emit( Message("IncreaseVolumeIntent", {'play_sound': True})) if "volume.down" in data: self.ws.emit( Message("DecreaseVolumeIntent", {'play_sound': True})) if "system.test.begin" in data: self.ws.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.ws.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.ws.emit(Message("speak", { 'utterance': "I am testing one two three"})) time.sleep(0.5) # Prevents recording the loud button press record("/tmp/test.wav", 3.0) mixer.setvolume(prev_vol) play_wav("/tmp/test.wav").communicate() # Test audio muting on arduino subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True) if "unit.shutdown" in data: self.ws.emit( Message("enclosure.eyes.timedspin", {'length': 12000})) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl poweroff -i', shell=True) if "unit.reboot" in data: self.ws.emit( Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True) if "unit.setwifi" in data: self.ws.emit(Message("mycroft.wifi.start")) if "unit.factory-reset" in data: subprocess.call( 'rm ~/.mycroft/identity/identity2.json', shell=True) self.ws.emit( Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True)
def test_is_speaking(self): create_signal('isSpeaking') self.assertTrue(mycroft.audio.is_speaking()) # Check that the signal hasn't been removed self.assertTrue(check_for_signal('isSpeaking')) self.assertFalse(mycroft.audio.is_speaking())
def test_create_signal(self): create_signal('test_signal') self.assertTrue(isfile('/tmp/mycroft/ipc/signal/test_signal'))
def test_create_signal(self): create_signal('test_signal') self.assertTrue( isfile(join(gettempdir(), 'mycroft/ipc/signal/test_signal')))
def process(self, data): # TODO: Look into removing this emit altogether. # We need to check if any other serial bus messages # are handled by other parts of the code if "mycroft.stop" not in data: self.ws.emit(Message(data)) if "Command: system.version" in data: # This happens in response to the "system.version" message # sent during the construction of Enclosure() self.ws.emit(Message("enclosure.started")) if "mycroft.stop" in data: if has_been_paired(): create_signal('buttonPress') self.ws.emit(Message("mycroft.stop")) if "volume.up" in data: self.ws.emit( Message("VolumeSkill:IncreaseVolumeIntent", {'play_sound': True})) if "volume.down" in data: self.ws.emit( Message("VolumeSkill:DecreaseVolumeIntent", {'play_sound': True})) if "system.test.begin" in data: self.ws.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.ws.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.ws.emit(Message("speak", { 'utterance': "I am testing one two three"})) time.sleep(0.5) # Prevents recording the loud button press record("/tmp/test.wav", 3.0) mixer.setvolume(prev_vol) play_wav("/tmp/test.wav").communicate() # Test audio muting on arduino subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True) if "unit.shutdown" in data: self.ws.emit( Message("enclosure.eyes.timedspin", {'length': 12000})) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl poweroff -i', shell=True) if "unit.reboot" in data: self.ws.emit(Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True) if "unit.setwifi" in data: self.ws.emit(Message("mycroft.wifi.start")) if "unit.factory-reset" in data: self.ws.emit(Message("enclosure.eyes.spin")) subprocess.call( 'rm ~/.mycroft/identity/identity2.json', shell=True) self.ws.emit(Message("mycroft.wifi.reset")) self.ws.emit(Message("mycroft.disable.ssh")) self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("reset to factory defaults")})) wait_while_speaking() self.ws.emit(Message("enclosure.mouth.reset")) self.ws.emit(Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True) if "unit.enable-ssh" in data: # This is handled by the wifi client self.ws.emit(Message("mycroft.enable.ssh")) self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("ssh enabled")})) if "unit.disable-ssh" in data: # This is handled by the wifi client self.ws.emit(Message("mycroft.disable.ssh")) self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("ssh disabled")}))
def process(self, data): self.ws.emit(Message(data)) if "Command: system.version" in data: self.ws.emit(Message("enclosure.start")) if "teddy.start" in data: LOG.info("Got record start button") #self.ws.emit(Message("manual.record.start")) create_signal( 'startRecordingButton') # FIXME - Must use WS instead if "teddy.stop" in data: LOG.info("Got record stop button") #self.ws.emit(Message("manual.record.stop")) create_signal('stopRecordingButton') # FIXME - Must use WS instead if "teddy.mute" in data: #self.ws.emit(Message("manual.mute")) #create_signal('muteButton') # FIXME - Must use WS instead self.ws.emit(Message('recognizer_loop:mute')) if "mycroft.stop" in data: create_signal('buttonPress') # FIXME - Must use WS instead self.ws.emit(Message("mycroft.stop")) if "volume.up" in data: self.ws.emit(Message("IncreaseVolumeIntent", {'play_sound': True})) if "volume.down" in data: self.ws.emit(Message("DecreaseVolumeIntent", {'play_sound': True})) if "system.test.begin" in data: self.ws.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.ws.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.ws.emit( Message("speak", {'utterance': "I am testing one two three"})) time.sleep(0.5) # Prevents recording the loud button press record("/tmp/test.wav", 3.0) mixer.setvolume(prev_vol) play_wav("/tmp/test.wav").communicate() # Test audio muting on arduino subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True) if "unit.shutdown" in data: self.ws.emit(Message("enclosure.eyes.timedspin", {'length': 12000})) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl poweroff -i', shell=True) if "unit.reboot" in data: self.ws.emit(Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True) if "unit.setwifi" in data: self.ws.emit(Message("mycroft.wifi.start")) if "unit.factory-reset" in data: subprocess.call('rm ~/.mycroft/identity/identity2.json', shell=True) self.ws.emit(Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) subprocess.call('systemctl reboot -i', shell=True)
def process(self, data): # TODO: Look into removing this emit altogether. # We need to check if any other serial bus messages # are handled by other parts of the code if "mycroft.stop" not in data: self.ws.emit(Message(data)) if "Command: system.version" in data: # This happens in response to the "system.version" message # sent during the construction of Enclosure() self.ws.emit(Message("enclosure.started")) if "mycroft.stop" in data: if has_been_paired(): create_signal('buttonPress') self.ws.emit(Message("mycroft.stop")) if "volume.up" in data: self.ws.emit(Message("mycroft.volume.increase", {'play_sound': True})) if "volume.down" in data: self.ws.emit(Message("mycroft.volume.decrease", {'play_sound': True})) if "system.test.begin" in data: self.ws.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.ws.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.ws.emit(Message("speak", { 'utterance': "I am testing one two three"})) time.sleep(0.5) # Prevents recording the loud button press record("/tmp/test.wav", 3.0) mixer.setvolume(prev_vol) play_wav("/tmp/test.wav").communicate() # Test audio muting on arduino subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True) if "unit.shutdown" in data: # Eyes to soft gray on shutdown self.ws.emit(Message("enclosure.eyes.color", {'r': 70, 'g': 65, 'b': 69})) self.ws.emit( Message("enclosure.eyes.timedspin", {'length': 12000})) self.ws.emit(Message("enclosure.mouth.reset")) time.sleep(0.5) # give the system time to pass the message self.ws.emit(Message("system.shutdown")) if "unit.reboot" in data: # Eyes to soft gray on reboot self.ws.emit(Message("enclosure.eyes.color", {'r': 70, 'g': 65, 'b': 69})) self.ws.emit(Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) time.sleep(0.5) # give the system time to pass the message self.ws.emit(Message("system.reboot")) if "unit.setwifi" in data: self.ws.emit(Message("system.wifi.setup", {'lang': self.lang})) if "unit.factory-reset" in data: self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("reset to factory defaults")})) subprocess.call( 'rm ~/.mycroft/identity/identity2.json', shell=True) self.ws.emit(Message("system.wifi.reset")) self.ws.emit(Message("system.ssh.disable")) wait_while_speaking() self.ws.emit(Message("enclosure.mouth.reset")) self.ws.emit(Message("enclosure.eyes.spin")) self.ws.emit(Message("enclosure.mouth.reset")) time.sleep(5) # give the system time to process all messages self.ws.emit(Message("system.reboot")) if "unit.enable-ssh" in data: # This is handled by the wifi client self.ws.emit(Message("system.ssh.enable")) self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("ssh enabled")})) if "unit.disable-ssh" in data: # This is handled by the wifi client self.ws.emit(Message("system.ssh.disable")) self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("ssh disabled")})) if "unit.enable-learning" in data or "unit.disable-learning" in data: enable = 'enable' in data word = 'enabled' if enable else 'disabled' LOG.info("Setting opt_in to: " + word) new_config = {'opt_in': enable} user_config = LocalConf(USER_CONFIG) user_config.merge(new_config) user_config.store() self.ws.emit(Message("speak", { 'utterance': mycroft.dialog.get("learning " + word)}))
def begin_audio(self): """Helper function for child classes to call in execute()""" self.ws.emit(Message("recognizer_loop:audio_output_start")) create_signal("isSpeaking")
def _start_listener(message): """ Force Mycroft to start listening (as if 'Hey Mycroft' was spoken) """ create_signal('startListening')
def _trigger_expect_response(message): """ Makes mycroft start listening on 'recognizer_loop:audio_output_end' """ create_signal('startListening')
def process(self, data): # TODO: Look into removing this emit altogether. # We need to check if any other serial bus messages # are handled by other parts of the code if "mycroft.stop" not in data: self.bus.emit(Message(data)) if "Command: system.version" in data: # This happens in response to the "system.version" message # sent during the construction of Enclosure() self.bus.emit(Message("enclosure.started")) if "mycroft.stop" in data: if has_been_paired(): create_signal('buttonPress') self.bus.emit(Message("mycroft.stop")) if "volume.up" in data: self.bus.emit(Message("mycroft.volume.increase", {'play_sound': True})) if "volume.down" in data: self.bus.emit(Message("mycroft.volume.decrease", {'play_sound': True})) if "system.test.begin" in data: self.bus.emit(Message('recognizer_loop:sleep')) if "system.test.end" in data: self.bus.emit(Message('recognizer_loop:wake_up')) if "mic.test" in data: mixer = Mixer() prev_vol = mixer.getvolume()[0] mixer.setvolume(35) self.bus.emit(Message("speak", { 'utterance': "I am testing one two three"})) time.sleep(0.5) # Prevents recording the loud button press record("/tmp/test.wav", 3.0) mixer.setvolume(prev_vol) play_wav("/tmp/test.wav").communicate() # Test audio muting on arduino subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True) if "unit.shutdown" in data: # Eyes to soft gray on shutdown # self.bus.emit(Message("enclosure.eyes.color", # {'r': 70, 'g': 65, 'b': 69})) # self.bus.emit( # Message("enclosure.eyes.timedspin", # {'length': 12000})) # self.bus.emit(Message("enclosure.mouth.reset")) time.sleep(0.5) # give the system time to pass the message self.bus.emit(Message("system.shutdown")) if "unit.reboot" in data: # Eyes to soft gray on reboot # self.bus.emit(Message("enclosure.eyes.color", # {'r': 70, 'g': 65, 'b': 69})) # self.bus.emit(Message("enclosure.eyes.spin")) # self.bus.emit(Message("enclosure.mouth.reset")) time.sleep(0.5) # give the system time to pass the message self.bus.emit(Message("system.reboot")) if "unit.setwifi" in data: self.bus.emit(Message("system.wifi.setup", {'lang': self.lang})) if "unit.factory-reset" in data: self.bus.emit(Message("speak", { 'utterance': mycroft.dialog.get("reset to factory defaults")})) subprocess.call( 'rm ~/.mycroft/identity/identity2.json', shell=True) self.bus.emit(Message("system.wifi.reset")) self.bus.emit(Message("system.ssh.disable")) wait_while_speaking() # self.bus.emit(Message("enclosure.mouth.reset")) # self.bus.emit(Message("enclosure.eyes.spin")) # self.bus.emit(Message("enclosure.mouth.reset")) time.sleep(5) # give the system time to process all messages self.bus.emit(Message("system.reboot")) if "unit.enable-ssh" in data: # This is handled by the wifi client self.bus.emit(Message("system.ssh.enable")) self.bus.emit(Message("speak", { 'utterance': mycroft.dialog.get("ssh enabled")})) if "unit.disable-ssh" in data: # This is handled by the wifi client self.bus.emit(Message("system.ssh.disable")) self.bus.emit(Message("speak", { 'utterance': mycroft.dialog.get("ssh disabled")})) if "unit.enable-learning" in data or "unit.disable-learning" in data: enable = 'enable' in data word = 'enabled' if enable else 'disabled' LOG.info("Setting opt_in to: " + word) new_config = {'opt_in': enable} user_config = LocalConf(USER_CONFIG) user_config.merge(new_config) user_config.store() self.bus.emit(Message("speak", { 'utterance': mycroft.dialog.get("learning " + word)}))