def in_started(self, message: Any, sender: RhasspyActor) -> None: """Handle messages in started state.""" if isinstance(message, RecognizeIntent): intent = empty_intent() intent["text"] = message.text intent["speech_confidence"] = message.confidence self.send(message.receiver or sender, IntentRecognized(intent))
def in_started(self, message: Any, sender: RhasspyActor) -> None: """Handle messages in started state.""" if isinstance(message, RecognizeIntent): try: self._logger.debug(self.command) # Text -> STDIN -> STDOUT -> JSON output = subprocess.run( self.command, check=True, input=message.text.encode(), stdout=subprocess.PIPE, ).stdout.decode() intent = json.loads(output) except Exception: self._logger.exception("in_started") intent = empty_intent() intent["text"] = message.text intent["raw_text"] = message.text intent["speech_confidence"] = message.confidence self.send( message.receiver or sender, IntentRecognized(intent, handle=message.handle), )
def in_started(self, message: Any, sender: RhasspyActor) -> None: """Handle messages in started state.""" if isinstance(message, RecognizeIntent): post_url = urljoin(self.hass_config["url"], "api/conversation/process") # Send to Home Assistant kwargs = hass_request_kwargs(self.hass_config, self.pem_file) kwargs["json"] = {"text": message.text} if self.pem_file is not None: kwargs["verify"] = self.pem_file # POST to /api/conversation/process response = requests.post(post_url, **kwargs) response.raise_for_status() response_json = response.json() # Extract speech if self.handle_speech: speech = pydash.get(response_json, "speech.plain.speech", "") if speech: # Forward to TTS system self._logger.debug("Handling speech") self.send(sender, SpeakSentence(speech)) # Return empty intent since conversation doesn't give it to us intent = empty_intent() intent["text"] = message.text intent["raw_text"] = message.text intent["speech_confidence"] = message.confidence self.send(message.receiver or sender, IntentRecognized(intent))
def in_started(self, message: Any, sender: RhasspyActor) -> None: """Handle messages in started state.""" if isinstance(message, RecognizeIntent): try: intent = self.recognize(message.text) except Exception: self._logger.exception("in_started") intent = empty_intent() intent["text"] = message.text intent["speech_confidence"] = message.confidence self.send( message.receiver or sender, IntentRecognized(intent, handle=message.handle), )
def in_started(self, message: Any, sender: RhasspyActor) -> None: """Handle messages in started state.""" if isinstance(message, RecognizeIntent): try: intent = self.recognize(message.text) intent["intent"]["name"] = intent["intent"]["name"] or "" logging.debug(repr(intent)) except Exception: self._logger.exception("in_started") intent = empty_intent() intent["text"] = message.text intent["raw_text"] = message.text self.send( message.receiver or sender, IntentRecognized(intent, handle=message.handle), )
def in_loaded(self, message: Any, sender: RhasspyActor) -> None: """Handle messages in loaded state.""" if isinstance(message, RecognizeIntent): try: self.load_graph() # Assume lower case, white-space separated tokens text = message.text tokens = re.split(r"\s+", text) if self.profile.get("intent.fsticuffs.ignore_unknown_words", True): # Filter tokens tokens = [w for w in tokens if w in self.words] recognitions = recognize( tokens, self.graph, fuzzy=self.fuzzy, stop_words=self.stop_words, extra_converters=self.converters, ) assert recognitions, "No intent recognized" # Use first intent recognition = recognitions[0] # Convert to JSON intent = recognition.asdict() except Exception: self._logger.exception("in_loaded") intent = empty_intent() intent["text"] = message.text intent["raw_text"] = message.text intent["speech_confidence"] = message.confidence self.send( message.receiver or sender, IntentRecognized(intent, handle=message.handle), )
def handle_any(self, message: Any, sender: RhasspyActor) -> None: """Handle messages in any state.""" if isinstance(message, ListenForCommand): # Force voice command self.handle = message.handle self.intent_receiver = message.receiver or sender self.listen_timeout_sec = message.timeout self.listen_entities = message.entities self.transition("awake") elif isinstance(message, GetVoiceCommand): # Record voice command, but don't do anything with it self.send( self.command, ListenForCommand(message.receiver or sender, timeout=message.timeout), ) elif isinstance(message, TranscribeWav): # speech -> text self.send( self.decoder, TranscribeWav(message.wav_data, sender, handle=message.handle), ) elif isinstance(message, RecognizeIntent): # text -> intent self.send( self.recognizer, RecognizeIntent( message.text, confidence=message.confidence, receiver=sender, handle=message.handle, ), ) elif isinstance(message, HandleIntent): # intent -> action self.send(self.handler, HandleIntent(message.intent, sender)) # Forward to MQTT (hermes) if self.mqtt is not None: self.send(self.mqtt, IntentRecognized(message.intent)) elif isinstance(message, GetWordPhonemes): # eSpeak -> CMU self.send( self.word_pronouncer, GetWordPhonemes(message.word, receiver=sender) ) elif isinstance(message, SpeakWord): # eSpeak -> WAV self.send(self.word_pronouncer, SpeakWord(message.word, receiver=sender)) elif isinstance(message, GetWordPronunciations): # word -> [CMU] self.send( self.word_pronouncer, GetWordPronunciations(message.words, n=message.n, receiver=sender), ) elif isinstance(message, SpeakSentence): # text -> speech self.send( self.speech, SpeakSentence( message.sentence, receiver=sender, play=message.play, voice=message.voice, language=message.language, ), ) elif isinstance(message, TrainProfile): # Training self.reload_actors_after_training = message.reload_actors self.send(self.wake, StopListeningForWakeWord()) self.training_receiver = message.receiver or sender self.transition("training_sentences") # self.send(self.sentence_generator, GenerateSentences()) elif isinstance(message, StartRecordingToBuffer): # Record WAV self.send(self.recorder, message) elif isinstance(message, StopRecordingToBuffer): # Stop recording WAV self.send( self.recorder, StopRecordingToBuffer(message.buffer_name, message.receiver or sender), ) elif isinstance(message, StateTransition): # Track state of every actor self.handle_transition(message, sender) elif isinstance(message, GetActorStates): self.send(sender, self.actor_states) elif isinstance(message, WakeupMessage): pass elif isinstance(message, WavPlayed): pass elif isinstance(message, GetProblems): # Report problems from child actors self.send(sender, Problems(self.problems)) else: self.handle_forward(message, sender)