def handle_utterance(self, message): """ Messagebus handler for the recognizer_loop:utterance message """ try: # Get language of the utterance lang = message.data.get('lang', "en-us") utterances = message.data.get('utterances', '') stopwatch = Stopwatch() with stopwatch: # Parse the sentence converse = self.parse_converse(utterances, lang) if not converse: # no skill wants to handle utterance intent = self.parse_utterances(utterances, lang) if converse: # Report that converse handled the intent and return ident = message.context['ident'] if message.context else None report_timing(ident, 'intent_service', stopwatch, {'intent_type': 'converse'}) return elif intent: # Send the message on to the intent handler reply = message.reply(intent.get('intent_type'), intent) else: # or if no match send sentence to fallback system reply = message.reply('intent_failure', {'utterance': utterances[0], 'lang': lang}) self.emitter.emit(reply) self.send_metrics(intent, message.context, stopwatch) except Exception as e: LOG.exception(e)
def handle_converse_request(self, message): """ Check if the targeted skill id can handle conversation If supported, the conversation is invoked. """ skill_id = int(message.data["skill_id"]) utterances = message.data["utterances"] lang = message.data["lang"] # loop trough skills list and call converse for skill with skill_id for skill in self.loaded_skills: if self.loaded_skills[skill]["id"] == skill_id: try: instance = self.loaded_skills[skill]["instance"] except BaseException: LOG.error("converse requested but skill not loaded") self.ws.emit(Message("skill.converse.response", { "skill_id": 0, "result": False})) return try: result = instance.converse(utterances, lang) self.ws.emit(Message("skill.converse.response", { "skill_id": skill_id, "result": result})) return except BaseException: LOG.exception( "Error in converse method for skill " + str(skill_id)) self.ws.emit(Message("skill.converse.response", {"skill_id": 0, "result": False}))
def on_gui_show_page(self, message): try: page, namespace, index = _get_page_data(message) # Pass the request to the GUI(s) to pull up a page template self.show(namespace, page, index) except Exception as e: LOG.exception(repr(e))
def on_gui_delete_namespace(self, message): """ Bus handler for removing namespace. """ try: namespace = message.data['__from'] self.remove_namespace(namespace) except Exception as e: LOG.exception(repr(e))
def _poll_skill_settings(self): """ If identifier exists for this skill poll to backend to request settings and store it if it changes TODO: implement as websocket Args: hashed_meta (int): the hashed identifier """ try: if not self._complete_intialization: self.initialize_remote_settings() if not self._complete_intialization: return # unable to do remote sync else: original = hash(str(self)) self.update_remote() # Call callback for updated settings if self.changed_callback and hash(str(self)) != original: self.changed_callback() except Exception as e: LOG.error(e) LOG.exception("") # this is used in core so do not delete! if self.is_alive: # continues to poll settings every 60 seconds t = Timer(60, self._poll_skill_settings) t.daemon = True t.start()
def handle_utterance(self, message): # Get language of the utterance lang = message.data.get('lang', None) if not lang: lang = "en-us" utterances = message.data.get('utterances', '') # check for conversation time-out self.active_skills = [skill for skill in self.active_skills if time.time() - skill[ 1] <= self.converse_timeout * 60] # check if any skill wants to handle utterance for skill in self.active_skills: if self.do_converse(utterances, skill[0], lang): # update timestamp, or there will be a timeout where # intent stops conversing whether its being used or not self.add_active_skill(skill[0]) return # no skill wants to handle utterance best_intent = None for utterance in utterances: try: # normalize() changes "it's a boy" to "it is boy", etc. best_intent = next(self.engine.determine_intent( normalize(utterance, lang), 100, include_tags=True, context_manager=self.context_manager)) # TODO - Should Adapt handle this? best_intent['utterance'] = utterance except StopIteration, e: LOG.exception(e) continue
def handler(message): # indicate fallback handling start ws.emit(Message("mycroft.skill.handler.start", data={'handler': "fallback"})) stopwatch = Stopwatch() handler_name = None with stopwatch: for _, handler in sorted(cls.fallback_handlers.items(), key=operator.itemgetter(0)): try: if handler(message): # indicate completion handler_name = get_handler_name(handler) ws.emit(Message( 'mycroft.skill.handler.complete', data={'handler': "fallback", "fallback_handler": handler_name})) break except Exception: LOG.exception('Exception in fallback.') else: # No fallback could handle the utterance ws.emit(Message('complete_intent_failure')) warning = "No fallback could handle intent." LOG.warning(warning) # indicate completion with exception ws.emit(Message('mycroft.skill.handler.complete', data={'handler': "fallback", 'exception': warning})) # Send timing metric if message.context and message.context['ident']: ident = message.context['ident'] report_timing(ident, 'fallback_handler', stopwatch, {'handler': handler_name})
def main(): global ws global config ws = WebsocketClient() Configuration.init(ws) config = Configuration.get() speech.init(ws) # Setup control of pulse audio setup_pulseaudio_handlers(config.get('Audio').get('pulseaudio')) def echo(message): try: _message = json.loads(message) if 'mycroft.audio.service' not in _message.get('type'): return message = json.dumps(_message) except: pass LOG.debug(message) LOG.info("Staring Audio Services") ws.on('message', echo) ws.once('open', load_services_callback) try: ws.run_forever() except KeyboardInterrupt, e: LOG.exception(e) speech.shutdown() sys.exit()
def main(): global ws global loop global config lock = PIDLock("voice") ws = WebsocketClient() config = Configuration.get() Configuration.init(ws) loop = RecognizerLoop() loop.on('recognizer_loop:utterance', handle_utterance) loop.on('speak', handle_speak) loop.on('recognizer_loop:record_begin', handle_record_begin) loop.on('recognizer_loop:wakeword', handle_wakeword) loop.on('recognizer_loop:record_end', handle_record_end) loop.on('recognizer_loop:no_internet', handle_no_internet) ws.on('open', handle_open) ws.on('complete_intent_failure', handle_complete_intent_failure) ws.on('recognizer_loop:sleep', handle_sleep) ws.on('recognizer_loop:wake_up', handle_wake_up) ws.on('mycroft.mic.mute', handle_mic_mute) ws.on('mycroft.mic.unmute', handle_mic_unmute) ws.on("mycroft.paired", handle_paired) ws.on('recognizer_loop:audio_output_start', handle_audio_start) ws.on('recognizer_loop:audio_output_end', handle_audio_end) ws.on('mycroft.stop', handle_stop) event_thread = Thread(target=connect) event_thread.setDaemon(True) event_thread.start() try: loop.run() except KeyboardInterrupt, e: LOG.exception(e) sys.exit()
def on_gui_delete_page(self, message): """ Bus handler for removing pages. """ page, namespace, _ = _get_page_data(message) try: self.remove_pages(namespace, page) except Exception as e: LOG.exception(repr(e))
def simple_cli(): global bus global bSimple bSimple = True bus = WebsocketClient() # Mycroft messagebus connection event_thread = Thread(target=connect) event_thread.setDaemon(True) event_thread.start() bus.on('speak', handle_speak) try: while True: # Sleep for a while so all the output that results # from the previous command finishes before we print. time.sleep(1.5) print("Input (Ctrl+C to quit):") line = sys.stdin.readline() bus.emit(Message("recognizer_loop:utterance", {'utterances': [line.strip()]})) except KeyboardInterrupt as e: # User hit Ctrl+C to quit print("") except KeyboardInterrupt as e: LOG.exception(e) event_thread.exit() sys.exit()
def _poll_skill_settings(self): """ If identifier exists for this skill poll to backend to request settings and store it if it changes TODO: implement as websocket """ original = hash(str(self)) try: if not is_paired(): pass elif not self._complete_intialization: self.initialize_remote_settings() else: self.update_remote() except Exception as e: LOG.exception('Failed to fetch skill settings: {}'.format(repr(e))) finally: # Call callback for updated settings if self._complete_intialization: if self.changed_callback and hash(str(self)) != original: self.changed_callback() if self._poll_timer: self._poll_timer.cancel() if not self._is_alive: return # continues to poll settings every minute self._poll_timer = Timer(1 * 60, self._poll_skill_settings) self._poll_timer.daemon = True self._poll_timer.start()
def parse_utterances(self, utterances, lang): """ Parse the utteracne using adapt to find a matching intent. Args: utterances (list): list of utterances lang (string): 4 letter ISO language code Returns: Intent structure, or None if no match was found. """ best_intent = None for utterance in utterances: try: # normalize() changes "it's a boy" to "it is boy", etc. best_intent = next(self.engine.determine_intent( normalize(utterance, lang), 100, include_tags=True, context_manager=self.context_manager)) # TODO - Should Adapt handle this? best_intent['utterance'] = utterance except StopIteration: # don't show error in log continue except Exception as e: LOG.exception(e) continue if best_intent and best_intent.get('confidence', 0.0) > 0.0: self.update_context(best_intent) # update active skills skill_id = int(best_intent['intent_type'].split(":")[0]) self.add_active_skill(skill_id) return best_intent
def send_skill_list(self, message=None): """ Send list of loaded skills. """ try: self.ws.emit(Message('mycroft.skills.list', data={'skills': self.loaded_skills.keys()})) except Exception as e: LOG.exception(e)
def on_gui_set_value(self, message): data = message.data namespace = data.get("__from", "") # Pass these values on to the GUI renderers for key in data: if key not in RESERVED_KEYS: try: self.set(namespace, key, data[key]) except Exception as e: LOG.exception(repr(e))
def load_skill(skill_descriptor, emitter, skill_id, BLACKLISTED_SKILLS=None): """ load skill from skill descriptor. Args: skill_descriptor: descriptor of skill to load emitter: messagebus emitter skill_id: id number for skill Returns: MycroftSkill: the loaded skill or None on failure """ BLACKLISTED_SKILLS = BLACKLISTED_SKILLS or [] path = skill_descriptor["path"] name = basename(path) LOG.info("ATTEMPTING TO LOAD SKILL: {} with ID {}".format(name, skill_id)) if name in BLACKLISTED_SKILLS: LOG.info("SKILL IS BLACKLISTED " + name) return None main_file = join(path, MainModule + '.py') try: with open(main_file, 'rb') as fp: skill_module = imp.load_module(name.replace('.', '_'), fp, main_file, ('.py', 'rb', imp.PY_SOURCE)) if (hasattr(skill_module, 'create_skill') and callable(skill_module.create_skill)): # v2 skills framework skill = skill_module.create_skill() skill.settings.allow_overwrite = True skill.settings.load_skill_settings_from_file() skill.bind(emitter) skill.skill_id = skill_id skill.load_data_files(path) # Set up intent handlers skill.initialize() skill._register_decorated() LOG.info("Loaded " + name) # The very first time a skill is run, speak the intro first_run = skill.settings.get("__mycroft_skill_firstrun", True) if first_run: LOG.info("First run of " + name) skill.settings["__mycroft_skill_firstrun"] = False skill.settings.store() intro = skill.get_intro_message() if intro: skill.speak(intro) return skill else: LOG.warning("Module {} does not appear to be skill".format(name)) except Exception: LOG.exception("Failed to load skill: " + name) return None
def load_priority(self): skills = {skill.name: skill for skill in self.msm.list()} for skill_name in PRIORITY_SKILLS: skill = skills[skill_name] if not skill.is_local: try: skill.install() except Exception: LOG.exception('Downloading priority skill:' + skill.name) if not skill.is_local: continue self._load_or_reload_skill(skill.path)
def _stop(self): """Stop and close an open stream.""" try: if not self.stream.is_stopped(): self.stream.stop_stream() self.stream.close() except Exception: LOG.exception('Failed to stop mic input stream') # Let's pretend nothing is wrong... self.stream = None self.audio.terminate()
def stop(self): """ Tell the manager to shutdown """ self._stop_event.set() # Do a clean shutdown of all skills for name, skill_info in self.loaded_skills.items(): instance = skill_info.get('instance') if instance: try: instance._shutdown() except Exception: LOG.exception('Shutting down skill: ' + name)
def handle_utterance(self, message): # Get language of the utterance lang = message.data.get('lang', None) if not lang: lang = "en-us" utterances = message.data.get('utterances', '') # check for conversation time-out self.active_skills = [skill for skill in self.active_skills if time.time() - skill[ 1] <= self.converse_timeout * 60] # check if any skill wants to handle utterance for skill in self.active_skills: if self.do_converse(utterances, skill[0], lang): # update timestamp, or there will be a timeout where # intent stops conversing whether its being used or not self.add_active_skill(skill[0]) return # no skill wants to handle utterance best_intent = None for utterance in utterances: try: # normalize() changes "it's a boy" to "it is boy", etc. best_intent = next(self.engine.determine_intent( normalize(utterance, lang), 100, include_tags=True, context_manager=self.context_manager)) # TODO - Should Adapt handle this? best_intent['utterance'] = utterance except StopIteration: # don't show error in log continue except e: LOG.exception(e) continue if best_intent and best_intent.get('confidence', 0.0) > 0.0: self.update_context(best_intent) reply = message.reply( best_intent.get('intent_type'), best_intent) self.emitter.emit(reply) # update active skills skill_id = int(best_intent['intent_type'].split(":")[0]) self.add_active_skill(skill_id) else: self.emitter.emit(Message("intent_failure", { "utterance": utterances[0], "lang": lang }))
def _reload_modified_skills(self): """Handle reload of recently changed skill(s)""" for skill_dir in self._get_skill_directories(): try: skill_loader = self.skill_loaders.get(skill_dir) if skill_loader is not None and skill_loader.reload_needed(): # If reload succeed add settingsmeta to upload queue if skill_loader.reload(): self.upload_queue.put(skill_loader) except Exception: LOG.exception('Unhandled exception occured while ' 'reloading {}'.format(skill_dir))
def send_skill_list(self, _): """Send list of loaded skills.""" try: message_data = {} for skill_dir, skill_loader in self.skill_loaders.items(): message_data[skill_loader.skill_id] = dict( active=skill_loader.active and skill_loader.loaded, id=skill_loader.skill_id ) self.bus.emit(Message('mycroft.skills.list', data=message_data)) except Exception: LOG.exception('Failed to send skill list')
def handle_utterance(self, message): """ Main entrypoint for handling user utterances with Mycroft skills Monitor the messagebus for 'recognizer_loop:utterance', typically generated by a spoken interaction but potentially also from a CLI or other method of injecting a 'user utterance' into the system. Utterances then work through this sequence to be handled: 1) Active skills attempt to handle using converse() 2) Adapt intent handlers 3) Padatious intent handlers 4) Other fallbacks Args: message (Message): The messagebus data """ try: # Get language of the utterance lang = message.data.get('lang', "en-us") utterances = message.data.get('utterances', '') stopwatch = Stopwatch() with stopwatch: # Give active skills an opportunity to handle the utterance converse = self._converse(utterances, lang) if not converse: # No conversation, use intent system to handle utterance intent = self._adapt_intent_match(utterances, lang) padatious_intent = PadatiousService.instance.calc_intent( utterances[0]) if converse: # Report that converse handled the intent and return ident = message.context['ident'] if message.context else None report_timing(ident, 'intent_service', stopwatch, {'intent_type': 'converse'}) return elif intent and not (padatious_intent and padatious_intent.conf >= 0.95): # Send the message to the Adapt intent's handler unless # Padatious is REALLY sure it was directed at it instead. reply = message.reply(intent.get('intent_type'), intent) else: # Allow fallback system to handle utterance # NOTE: Padatious intents are handled this way, too reply = message.reply('intent_failure', {'utterance': utterances[0], 'lang': lang}) self.bus.emit(reply) self.send_metrics(intent, message.context, stopwatch) except Exception as e: LOG.exception(e)
def stop(self): """ Tell the manager to shutdown """ self._stop_event.set() # Do a clean shutdown of all skills for name, skill_info in self.loaded_skills.items(): instance = skill_info.get('instance') if instance: try: instance.default_shutdown() except Exception: LOG.exception('Shutting down skill: ' + name)
def initialize(self): # Initialize... self.brightness_dict = self.translate_namedvalues('brightness.levels') self.color_dict = self.translate_namedvalues('colors') self.settings['web eye color'] = self.settings['eye color'] try: # Handle changing the eye color once Mark 1 is ready to go # (Part of the statup sequence) self.add_event('mycroft.internet.connected', self.handle_internet_connected) self.add_event('mycroft.eyes.default', self.handle_default_eyes) # Handle the 'waking' visual self.add_event('recognizer_loop:record_begin', self.handle_listener_started) self.add_event('recognizer_loop:record_end', self.handle_listener_ended) self.add_event('mycroft.speech.recognition.unknown', self.handle_failed_stt) self.start_idle_check() # Handle the 'busy' visual self.bus.on('mycroft.skill.handler.start', self.on_handler_started) self.bus.on('mycroft.skill.handler.complete', self.on_handler_complete) self.bus.on('recognizer_loop:audio_output_start', self.on_handler_interactingwithuser) self.bus.on('enclosure.mouth.think', self.on_handler_interactingwithuser) self.bus.on('enclosure.mouth.events.deactivate', self.on_handler_interactingwithuser) self.bus.on('enclosure.mouth.text', self.on_handler_interactingwithuser) self.bus.on('enclosure.mouth.viseme', self.on_handler_speaking) self.bus.on('gui.page.show', self.on_gui_page_show) self.bus.on('mycroft.skills.initialized', self.reset_face) except Exception: LOG.exception('In Mark 1 Skill') # Update use of wake-up beep self._sync_wake_beep_setting() self.settings.set_changed_callback(self.on_websettings_changed)
def handler(message): start, stop = message.data.get('fallback_range', (0, 101)) # indicate fallback handling start LOG.debug('Checking fallbacks in range ' '{} - {}'.format(start, stop)) bus.emit( message.forward("mycroft.skill.handler.start", data={'handler': "fallback"})) stopwatch = Stopwatch() handler_name = None with stopwatch: sorted_handlers = sorted(cls.fallback_handlers.items(), key=operator.itemgetter(0)) handlers = [ f[1] for f in sorted_handlers if start <= f[0] < stop ] for handler in handlers: try: if handler(message): # indicate completion status = True handler_name = get_handler_name(handler) bus.emit( message.forward( 'mycroft.skill.handler.complete', data={ 'handler': "fallback", "fallback_handler": handler_name })) break except Exception: LOG.exception('Exception in fallback.') else: status = False # indicate completion with exception warning = 'No fallback could handle intent.' bus.emit( message.forward('mycroft.skill.handler.complete', data={ 'handler': "fallback", 'exception': warning })) # return if the utterance was handled to the caller bus.emit(message.response(data={'handled': status})) # Send timing metric if message.context.get('ident'): ident = message.context['ident'] report_timing(ident, 'fallback_handler', stopwatch, {'handler': handler_name})
def __init__(self, bus): # Dictionary for translating a skill id to a name self.bus = bus self.skill_names = {} config = Configuration.get() self.adapt_service = AdaptService(config.get('context', {})) try: self.padatious_service = PadatiousService(bus, config['padatious']) except Exception as err: LOG.exception('Failed to create padatious handlers ' '({})'.format(repr(err))) self.fallback = FallbackService(bus) self.bus.on('register_vocab', self.handle_register_vocab) self.bus.on('register_intent', self.handle_register_intent) self.bus.on('recognizer_loop:utterance', self.handle_utterance) self.bus.on('detach_intent', self.handle_detach_intent) self.bus.on('detach_skill', self.handle_detach_skill) # Context related handlers self.bus.on('add_context', self.handle_add_context) self.bus.on('remove_context', self.handle_remove_context) self.bus.on('clear_context', self.handle_clear_context) # Converse method self.bus.on('mycroft.speech.recognition.unknown', self.reset_converse) self.bus.on('mycroft.skills.loaded', self.update_skill_name_dict) def add_active_skill_handler(message): self.add_active_skill(message.data['skill_id']) self.bus.on('active_skill_request', add_active_skill_handler) self.active_skills = [] # [skill_id , timestamp] self.converse_timeout = 5 # minutes to prune active_skills # Intents API self.registered_vocab = [] self.bus.on('intent.service.intent.get', self.handle_get_intent) self.bus.on('intent.service.skills.get', self.handle_get_skills) self.bus.on('intent.service.active_skills.get', self.handle_get_active_skills) self.bus.on('intent.service.adapt.get', self.handle_get_adapt) self.bus.on('intent.service.adapt.manifest.get', self.handle_adapt_manifest) self.bus.on('intent.service.adapt.vocab.manifest.get', self.handle_vocab_manifest) self.bus.on('intent.service.padatious.get', self.handle_get_padatious) self.bus.on('intent.service.padatious.manifest.get', self.handle_padatious_manifest) self.bus.on('intent.service.padatious.entities.manifest.get', self.handle_entity_manifest)
def post_manifest(self, reload_skills_manifest=False): """Post the manifest of the device's skills to the backend.""" upload_allowed = self.config['skills'].get('upload_skill_manifest') if upload_allowed and is_paired(): if reload_skills_manifest: # TODO: Handle inside msm self.msm._device_skill_state = None try: device_api = DeviceApi() device_api.upload_skills_data(self.msm.device_skill_state) except Exception: LOG.exception('Could not upload skill manifest')
def _load_settings_meta_file(self): """Read the contents of the settingsmeta file into memory.""" _, ext = os.path.splitext(str(self.settings_meta_path)) is_json_file = self.settings_meta_path.suffix == ".json" try: with open(str(self.settings_meta_path)) as meta_file: if is_json_file: self.settings_meta = json.load(meta_file) else: self.settings_meta = yaml.safe_load(meta_file) except Exception: log_msg = "Failed to load settingsmeta file: " LOG.exception(log_msg + str(self.settings_meta_path))
def _get_remote_settings(self): """Get the settings for this skill from the server Returns: skill_settings (dict or None): returns a dict on success, else None """ try: remote_settings = self.api.get_skill_settings() except Exception: LOG.exception('Failed to download remote settings from server.') remote_settings = None return remote_settings
def get_version(): version = None try: from mycroft.version import CORE_VERSION_STR version = CORE_VERSION_STR except Exception as e: try: version = "dev-" + subprocess.check_output( ["git", "rev-parse", "--short", "HEAD"]).strip() except subprocess.CalledProcessError, e2: version = "development" LOG.debug(e) LOG.exception(e2)
def stop(self): """Tell the manager to shutdown.""" self._stop_event.set() self.settings_downloader.stop_downloading() # Do a clean shutdown of all skills for skill_loader in self.skill_loaders.values(): if skill_loader.instance is not None: try: skill_loader.instance.default_shutdown() except Exception: LOG.exception('Failed to shut down skill: ' + skill_loader.skill_id)
def on_message(self, message): LOG.debug(message) try: deserialized_message = Message.deserialize(message) except: return try: self.emitter.emit(deserialized_message.type, deserialized_message) except Exception, e: LOG.exception(e) traceback.print_exc(file=sys.stdout) pass
def handle_youtubempv_seek_intent(self, message): try: if (self.mpvExists()): msg = str(message.data.get("utterance")).split(" ")[2] if (msg != ''): secs = int(msg) self.mpvSeek(secs) # self.mpvChangeSpeed(self.volume+0.2) else: self.speak_dialog("ytmpv.not.exists") except Exception as e: LOG.exception("YoutubeMpv Error: " + e.message) self.speak_dialog("ytmpv.error")
def spotify_play(self, dev_id, uris=None, context_uri=None): """ Start spotify playback and catch any exceptions. """ try: LOG.info(u'spotify_play: {}'.format(dev_id)) self.spotify.play(dev_id, uris, context_uri) self.start_monitor() self.dev_id = dev_id except spotipy.SpotifyException as e: # TODO: Catch other conditions? self.speak_dialog('NotAuthorized') except Exception as e: LOG.exception(e) self.speak_dialog('NotAuthorized')
def initialize(self): try: #self.start_idle_check() #self.add_event('recognizer_loop:audio_output_end', # self.handle_listener_started) self.add_event('mycroft.gui.screen.close', self.show_home_screen) self.gui.register_handler('mycroft.gui.screen.close', self.show_home_screen) self.add_event("mycroft.gui.user.interaction", self.delay_event) except Exception: LOG.exception('In Xenon Platform Skill')
def handle_utterance(self, message): # Get language of the utterance lang = message.data.get('lang', "en-us") utterances = message.data.get('utterances', '') # check for conversation time-out self.active_skills = [skill for skill in self.active_skills if time.time() - skill[ 1] <= self.converse_timeout * 60] # check if any skill wants to handle utterance for skill in self.active_skills: if self.do_converse(utterances, skill[0], lang): # update timestamp, or there will be a timeout where # intent stops conversing whether its being used or not self.add_active_skill(skill[0]) return # no skill wants to handle utterance best_intent = None for utterance in utterances: try: # normalize() changes "it's a boy" to "it is boy", etc. best_intent = next(self.engine.determine_intent( normalize(utterance, lang), 100, include_tags=True, context_manager=self.context_manager)) # TODO - Should Adapt handle this? best_intent['utterance'] = utterance except StopIteration: # don't show error in log continue except e: LOG.exception(e) continue if best_intent and best_intent.get('confidence', 0.0) > 0.0: self.update_context(best_intent) reply = message.reply( best_intent.get('intent_type'), best_intent) self.emitter.emit(reply) # update active skills skill_id = int(best_intent['intent_type'].split(":")[0]) self.add_active_skill(skill_id) else: self.emitter.emit(Message("intent_failure", { "utterance": utterances[0], "lang": lang }))
def match_intent(self, utterances, _=None, __=None): """Run the Adapt engine to search for an matching intent. Args: utterances (iterable): utterances for consideration in intent matching. As a practical matter, a single utterance will be passed in most cases. But there are instances, such as streaming STT that could pass multiple. Each utterance is represented as a tuple containing the raw, normalized, and possibly other variations of the utterance. Returns: Intent structure, or None if no match was found. """ best_intent = {} def take_best(intent, utt): nonlocal best_intent best = best_intent.get('confidence', 0.0) if best_intent else 0.0 conf = intent.get('confidence', 0.0) if conf > best: best_intent = intent # TODO - Shouldn't Adapt do this? best_intent['utterance'] = utt for utt_tup in utterances: for utt in utt_tup: try: intents = [ i for i in self.engine.determine_intent( utt, 100, include_tags=True, context_manager=self.context_manager) ] if intents: utt_best = max(intents, key=lambda x: x.get('confidence', 0.0)) take_best(utt_best, utt_tup[0]) except Exception as err: LOG.exception(err) if best_intent: self.update_context(best_intent) skill_id = best_intent['intent_type'].split(":")[0] ret = IntentMatch('Adapt', best_intent['intent_type'], best_intent, skill_id) else: ret = None return ret
def load_spellings(self): """Load phonetic spellings of words as dictionary""" path = join('text', self.lang.lower(), 'phonetic_spellings.txt') spellings_file = resolve_resource_file(path) if not spellings_file: return {} try: with open(spellings_file) as f: lines = filter(bool, f.read().split('\n')) lines = [i.split(':') for i in lines] return {key.strip(): value.strip() for key, value in lines} except ValueError: LOG.exception('Failed to load phonetic spellings.') return {}
def load_spellings(self): """Load phonetic spellings of words as dictionary""" path = join('text', self.lang, 'phonetic_spellings.txt') spellings_file = resolve_resource_file(path) if not spellings_file: return {} try: with open(spellings_file) as f: lines = filter(bool, f.read().split('\n')) lines = [i.split(':') for i in lines] return {key.strip(): value.strip() for key, value in lines} except ValueError: LOG.exception('Failed to load phonetic spellings.') return {}
def handle_youtubempv_intent(self, message): try: cmd = str(message.data.get('Start')) msg = str(message.data.get('utterance')).replace(cmd + " ", "", 1) if (self.mpvExists()): # TODO adding translations in voc self.search = msg self.url = self.getResults(self.search) self.mpvStart() else: self.speak_dialog("ytmpv.not.exists") except Exception as e: LOG.exception("YoutubeMpv Error: " + e.message) self.speak_dialog("ytmpv.error")
def handle_utterance(self, message): """ Main entrypoint for handling user utterances with Mycroft skills Monitor the messagebus for 'recognizer_loop:utterance', typically generated by a spoken interaction but potentially also from a CLI or other method of injecting a 'user utterance' into the system. Utterances then work through this sequence to be handled: 1) Active skills attempt to handle using converse() 2) Adapt intent handlers 3) Padatious intent handlers 4) Other fallbacks Args: message (Message): The messagebus data """ try: # Get language of the utterance lang = message.data.get('lang', "en-us") utterances = message.data.get('utterances', '') stopwatch = Stopwatch() with stopwatch: # Give active skills an opportunity to handle the utterance converse = self._converse(utterances, lang) if not converse: # No conversation, use intent system to handle utterance intent = self._adapt_intent_match(utterances, lang) if converse: # Report that converse handled the intent and return ident = message.context['ident'] if message.context else None report_timing(ident, 'intent_service', stopwatch, {'intent_type': 'converse'}) return elif intent: # Send the message to the intent handler reply = message.reply(intent.get('intent_type'), intent) else: # Allow fallback system to handle utterance # NOTE: Padatious intents are handled this way, too reply = message.reply('intent_failure', { 'utterance': utterances[0], 'lang': lang }) self.emitter.emit(reply) self.send_metrics(intent, message.context, stopwatch) except Exception as e: LOG.exception(e)
def initialize(): nonlocal instance, complete try: clazz = HotWordFactory.CLASSES[module] instance = clazz(hotword, config, lang=lang) except TriggerReload: complete.set() sleep(0.5) loop.reload() except Exception: LOG.exception( 'Could not create hotword. Falling back to default.') instance = None complete.set()
def save_phonemes(self, key, phonemes): """Cache phonemes Arguments: key: Hash key for the sentence phonemes: phoneme string to save """ cache_dir = get_cache_directory("tts/" + self.tts_name) pho_file = os.path.join(cache_dir, key + ".pho") try: with open(pho_file, "w") as cachefile: cachefile.write(json.dumps(phonemes)) except Exception: LOG.exception("Failed to write {} to cache".format(pho_file))
def send_skill_list(self, message=None): """ Send list of loaded skills. """ try: info = {} for s in self.loaded_skills: info[basename(s)] = { 'active': self.loaded_skills[s].get('active', True), 'id': self.loaded_skills[s]['id'] } self.ws.emit(Message('mycroft.skills.list', data=info)) except Exception as e: LOG.exception(e)
def create_hotword(hotword="hey mycroft", config=None, lang="en-us"): LOG.info("creating " + hotword) if not config: config = Configuration.get().get("hotwords", {}) module = config.get(hotword).get("module", "pocketsphinx") config = config.get(hotword, {"module": module}) clazz = HotWordFactory.CLASSES.get(module) try: return clazz(hotword, config, lang=lang) except (KeyboardInterrupt, SystemExit): raise except: LOG.exception('Could not create hotword. Falling back to default.') return HotWordFactory.CLASSES['pocketsphinx']()
def run(self): """Thread main loop. Get audio and extra data from queue and play. The queue messages is a tuple containing snd_type: 'mp3' or 'wav' telling the loop what format the data is in data: path to temporary audio data videmes: list of visemes to display while playing listen: if listening should be triggered at the end of the sentence. Playback of audio is started and the visemes are sent over the bus the loop then wait for the playback process to finish before starting checking the next position in queue. If the queue is empty the tts.end_audio() is called possibly triggering listening. """ while not self._terminated: try: (snd_type, data, visemes, ident, listen) = self.queue.get(timeout=2) self.blink(0.5) if not self._processing_queue: self._processing_queue = True self.tts.begin_audio() stopwatch = Stopwatch() with stopwatch: if snd_type == 'wav': self.p = play_wav(data, environment=self.pulse_env) elif snd_type == 'mp3': self.p = play_mp3(data, environment=self.pulse_env) if visemes: self.show_visemes(visemes) if self.p: self.p.communicate() self.p.wait() report_timing(ident, 'speech_playback', stopwatch) if self.queue.empty(): self.tts.end_audio(listen) self._processing_queue = False self.blink(0.2) except Empty: pass except Exception as e: LOG.exception(e) if self._processing_queue: self.tts.end_audio(listen) self._processing_queue = False
def save_phonemes(self, key, phonemes): """ Cache phonemes Args: key: Hash key for the sentence phonemes: phoneme string to save """ cache_dir = get_cache_directory("tts/" + self.tts_name) pho_file = os.path.join(cache_dir, key + ".pho") try: with open(pho_file, "w") as cachefile: cachefile.write(json.dumps(phonemes)) except Exception: LOG.exception("Failed to write {} to cache".format(pho_file))
def _register_intent_services(bus): """Start up the all intent services and connect them as needed. Arguments: bus: messagebus client to register the services on """ service = IntentService(bus) try: PadatiousService(bus, service) except Exception as e: LOG.exception('Failed to create padatious handlers ' '({})'.format(repr(e))) # Register handler to trigger fallback system bus.on('intent_failure', FallbackSkill.make_intent_failure_handler(bus))
def _unload_removed_skills(self): """Shutdown removed skills.""" skill_dirs = self._get_skill_directories() # Find loaded skills that don't exist on disk removed_skills = [ s for s in self.skill_loaders.keys() if s not in skill_dirs ] for skill_dir in removed_skills: skill = self.skill_loaders[skill_dir] LOG.info('removing {}'.format(skill.skill_id)) try: skill.unload() except Exception: LOG.exception('Failed to shutdown skill ' + skill.id) del self.skill_loaders[skill_dir]
def send_skill_list(self, message=None): """ Send list of loaded skills. """ try: info = {} for s in self.loaded_skills: is_active = (self.loaded_skills[s].get('active', True) and self.loaded_skills[s].get('instance') is not None) info[basename(s)] = { 'active': is_active, 'id': self.loaded_skills[s]['id'] } self.bus.emit(Message('mycroft.skills.list', data=info)) except Exception as e: LOG.exception(e)
def on_message(self, message): # LOG.debug(message) try: deserialized_message = Message.deserialize(message) except: return try: self.emitter.emit(deserialized_message.type, deserialized_message) except Exception as e: LOG.exception(e) traceback.print_exc(file=sys.stdout) pass for client in client_connections: client.write_message(message)
def save_phonemes(self, key, phonemes): """ Cache phonemes Args: key: Hash key for the sentence phonemes: phoneme string to save """ cache_dir = mycroft.util.get_cache_directory("tts") pho_file = os.path.join(cache_dir, key + ".pho") try: with open(pho_file, "w") as cachefile: cachefile.write(phonemes) except Exception: LOG.exception("Failed to write {} to cache".format(pho_file)) pass
def _unload_removed(self, paths): """ Shutdown removed skills. Arguments: paths: list of current directories in the skills folder """ paths = [p.rstrip('/') for p in paths] skills = self.loaded_skills # Find loaded skills that doesn't exist on disk removed_skills = [str(s) for s in skills.keys() if str(s) not in paths] for s in removed_skills: LOG.info('removing {}'.format(s)) try: LOG.debug('Removing: {}'.format(skills[s])) skills[s]['instance'].default_shutdown() except Exception as e: LOG.exception(e) self.loaded_skills.pop(s)
def __switch_page(self, namespace, pages): """ Switch page to an already loaded page. Args: pages (list): pages (str) to switch to namespace (str): skill namespace """ try: num = self.loaded[0].pages.index(pages[0]) except Exception as e: LOG.exception(repr(e)) num = 0 LOG.debug('Switching to already loaded page at ' 'index {} in namespace {}'.format(num, namespace)) self.send({"type": "mycroft.events.triggered", "namespace": namespace, "event_name": "page_gained_focus", "data": {"number": num}})
def initialize(): nonlocal instance, complete try: clazz = HotWordFactory.CLASSES[module] instance = clazz(hotword, config, lang=lang) except TriggerReload: complete.set() sleep(0.5) loop.reload() except NoModelAvailable: LOG.warning('Could not found find model for {} on {}.'.format( hotword, module )) instance = None except Exception: LOG.exception( 'Could not create hotword. Falling back to default.') instance = None complete.set()