def _clear_gui_timeout(self): """ Handler for clear_gui_timeout function :return: """ LOG.info("Reset GUI!") self.gui.clear()
def translate(self, text, target=None, source="auto"): from translate import Translator target = target or self.internal_language # TODO: This doesn't appear to work DM translated = Translator(to_lang=target).translate(text) LOG.info(translated) return translated
def _write_line(data, file): try: with open(file, 'a+') as to_write: writer = csv.writer(to_write) writer.writerow(data) except IOError as e: LOG.error(e)
def get_stock_quote(symbol: str, **kwargs) -> dict: """ Queries FMP for stock information for the specified company :param symbol: Stock ticker symbol :param kwargs: 'api_key' - optional str api_key to use for query (None to force remote lookup) :return: dict stock data """ api_key = kwargs.get( "api_key", AUTH_CONFIG.get("financial_modeling_prep", {}).get("api_key")) if api_key: query_params = {"apikey": api_key} resp = query_fmp_api( f"https://financialmodelingprep.com/api/v3/company/profile/{symbol}?" f"{urllib.parse.urlencode(query_params)}") else: query_params = {**kwargs, **{"api": "quote"}} resp = request_neon_api(NeonAPI.FINANCIAL_MODELING_PREP, query_params) data = json.loads(resp["content"]) if data.get("Information"): LOG.warning(data.get("Information")) # TODO: Handle API Errors DM return data.get("profile")
def speak_dialog(self, key, data=None, expect_response=False, wait=False, message=None, private=False, speaker=None): """ Speak a random sentence from a dialog file. Arguments: :param key: dialog file key (e.g. "hello" to speak from the file "locale/en-us/hello.dialog") :param data: information used to populate key :param expect_response: set to True if Mycroft should listen for a response immediately after speaking. :param wait: set to True to block while the text is being spoken. :param speaker: optional dict of speaker info to use :param private: private flag (server use only) :param message: associated message from request """ data = data or {} LOG.debug(f"data={data}") self.speak( self.dialog_renderer.render( key, data), # TODO: Pass index here to use non-random responses DM expect_response, message=message, private=private, speaker=speaker, wait=wait, meta={ 'dialog': key, 'data': data })
def __init__(self, results_event, config=None): super(DeepSpeechLocalStreamingSTT, self).__init__(results_event, config) # override language with module specific language selection self.language = self.config.get('lang') or self.lang self.queue = None if not self.language.startswith("en"): raise ValueError("DeepSpeech is currently english only") model_path = self.config.get("model_path") or \ os.path.expanduser("~/.local/share/neon/deepspeech-0.8.1-models.pbmm") scorer_path = self.config.get("scorer_path") or \ os.path.expanduser("~/.local/share/neon/deepspeech-0.8.1-models.scorer") if not os.path.isfile(model_path): LOG.error("You need to provide a valid model file") LOG.error(model_path) LOG.info( "download a model from https://github.com/mozilla/DeepSpeech") raise FileNotFoundError if not scorer_path or not os.path.isfile(scorer_path): LOG.warning("You should provide a valid scorer") LOG.info( "download scorer from https://github.com/mozilla/DeepSpeech") self.client = deepspeech.Model(model_path) if scorer_path: self.client.enableExternalScorer(scorer_path)
def schedule_event(self, handler, when, data=None, name=None, context=None): # TODO: should 'when' already be a datetime? DM if isinstance(when, int) or isinstance(when, float): from datetime import datetime as dt, timedelta when = to_system_time(dt.now(self.sys_tz)) + timedelta(seconds=when) LOG.info(f"Made a datetime: {when}") super().schedule_event(handler, when, data, name, context)
def _do_net_check(self): # TODO: This should live in the derived Enclosure, e.g. EnclosureMark1 LOG.info("Checking internet connection") if not connected(): # and self.conn_monitor is None: if has_been_paired(): # TODO: Enclosure/localization self.speak("This unit is not connected to the Internet. " "Either plug in a network cable or setup your " "wifi connection.") else: # Begin the unit startup process, this is the first time it # is being run with factory defaults. # TODO: This logic should be in EnclosureMark1 # TODO: Enclosure/localization # Don't listen to mic during this out-of-box experience self.bus.emit(Message("mycroft.mic.mute")) # Setup handler to unmute mic at the end of on boarding # i.e. after pairing is complete self.bus.once('mycroft.paired', self._handle_pairing_complete) self.speak(mycroft.dialog.get('mycroft.intro')) wait_while_speaking() time.sleep(2) # a pause sounds better than just jumping in # Kick off wifi-setup automatically data = {'allow_timeout': False, 'lang': self.lang} self.bus.emit(Message('system.wifi.setup', data))
def neon_must_respond(self, message): """ Checks if Neon must respond to an utterance (i.e. a server request) @param message: @return: """ if self.server: title = message.context.get("klat_data", {}).get("title", "") LOG.debug(message.data.get("utterance")) if message.data.get("utterance").startswith("Welcome to your private conversation"): return False if title.startswith("!PRIVATE:"): if ',' in title: users = title.split(':')[1].split(',') for idx, val in enumerate(users): users[idx] = val.strip() if len(users) == 2 and "Neon" in users: # Private with Neon # LOG.debug("DM: Private Conversation with Neon") return True elif message.data.get("utterance").lower().startsWith("neon"): # Message starts with "neon", must respond return True else: # Solo Private return True return False
def neon_in_request(self, message): """ Checks if the utterance is intended for Neon. Server utilizes current conversation, otherwise wake-word status and message "Neon" parameter used """ if not is_neon_core(): return True if message.context.get("neon_should_respond", False): return True elif message.data.get("Neon") or message.data.get("neon"): return True elif not self.server and self.local_config.get("interface", {}).get("wake_word_enabled", True): return True elif self.server and message.context.get("klat_data", {}).get("title").startswith("!PRIVATE"): return True else: try: voc_match = self.voc_match(message.data.get("utterance"), "neon") if voc_match: return True except FileNotFoundError: LOG.error(f"No neon vocab found!") if "neon" in message.data.get("utterance").lower(): return True LOG.debug("No Neon") return False
def converse(self, message=None): user = self.get_utterance_user(message) LOG.debug(self.actions_to_confirm) if user in self.actions_to_confirm.keys(): result = self.check_yes_no_response(message) if result == -1: # This isn't a response, ignore it return False elif not result: # User said no if self.local_config.get("interface", {}).get("wake_word_enabled", True): self.speak_dialog("HowAboutMore", expect_response=True) self.enable_intent('CaffeineContentGoodbyeIntent') self.request_check_timeout(self.default_intent_timeout, 'CaffeineContentGoodbyeIntent') else: self.speak_dialog("StayCaffeinated") return True elif result: # User said yes LOG.info(self.results) self._get_drink_text(message) # self.speak(self._get_drink_text()) # self.speak("Provided by CaffeineWiz.") self.speak("Provided by CaffeineWiz. Stay caffeinated!") return True return False
def _get_drink_text(self, message, caff_list=None): cnt = 0 spoken = [] if not caff_list: caff_list = self.results LOG.info(caff_list) for i in range(len(caff_list)): if caff_list[i][0] not in spoken: oz = float(caff_list[i][1]) caffeine = float(caff_list[i][2]) drink = caff_list[i][0] units = self.preference_unit(message)['measure'] if units == "metric": caff_mg, caff_vol, drink_units = self.convert_metric( oz, caffeine) else: caff_mg = str(caffeine) caff_vol = str(oz) drink_units = 'ounces' self.speak_dialog( 'MultipleCaffeine', { 'drink': drink, 'caffeine_content': caff_mg, 'caffeine_units': self.translate('milligrams'), 'drink_size': caff_vol, 'drink_units': drink_units }) spoken.append(caff_list[i][0]) sleep(0.5) # Prevent simultaneous speak inserts cnt = cnt + 1
def repo_is_neon(repo_url: str) -> bool: """ Determines if the specified repository url is part of the NeonGeckoCom org on github Args: repo_url: string url to check Returns: True if the repository URL is known to be accessible using a neon auth key """ url = urlparse(repo_url) if not url.scheme or not url.netloc: raise ValueError(f"{repo_url} is not a valid url") if any([ x for x in ("github.com", "githubusercontent.com") if x in url.netloc ]): try: author = url.path.split('/')[1] except IndexError: raise ValueError(f"{repo_url} is not a valid github url") if author.lower() == "neongeckocom": return True elif author.lower().startswith( "neon"): # TODO: Get token and scrape org? DM LOG.info(f"Assuming repository uses Neon auth: {repo_url}") return True return False
def CQS_match_query_phrase(self, utt, message=None): if " of " in utt: drink = utt.split(" of ", 1)[1] elif " in " in utt: drink = utt.split(" in ", 1)[1] else: drink = utt drink = self._clean_drink_name(drink) if not drink: return None if self._drink_in_database(drink): try: to_speak = self._generate_drink_dialog(drink, message) if self.voc_match(utt, "caffeine"): conf = CQSMatchLevel.EXACT elif drink.lower() in to_speak.lower().split(): # If the exact drink name was matched, but caffeine not requested, consider this a general match conf = CQSMatchLevel.GENERAL else: # We didn't match "caffeine" or an exact drink name, this request isn't for this skill return None except Exception as e: LOG.error(e) LOG.error(drink) return None else: to_speak = self.dialog_renderer.render("NotFound", {"drink": drink}) if self.voc_match(utt, "caffeine"): conf = CQSMatchLevel.CATEGORY else: return None return utt, conf, to_speak, {"user": self.get_utterance_user(message)}
def handle_get_stt(self, message: Message): """ Handles a request for stt. Emits a response to the sender with stt data or error data :param message: Message associated with request """ if message.data.get("audio_data"): wav_file_path = self._write_encoded_file( message.data.pop("audio_data")) else: wav_file_path = message.data.get("audio_file") lang = message.data.get("lang") ident = message.context.get("ident") or "neon.get_stt.response" LOG.info(f"Handling STT request: {ident}") if not wav_file_path: self.bus.emit(message.reply( ident, data={"error": f"audio_file not specified!"})) return if not os.path.isfile(wav_file_path): self.bus.emit(message.reply( ident, data={"error": f"{wav_file_path} Not found!"})) try: _, parser_data, transcriptions = \ self._get_stt_from_file(wav_file_path, lang) self.bus.emit(message.reply(ident, data={"parser_data": parser_data, "transcripts": transcriptions})) except Exception as e: LOG.error(e) self.bus.emit(message.reply(ident, data={"error": repr(e)}))
def run(self): LOG.debug("pulse thread started") self.tmp_leds = [] for x in range(0, 10): self.tmp_leds.append(self.color_tup) self.led_obj.brightness = self.brightness / 100 self.led_obj.set_leds(self.tmp_leds) while not self.exit_flag: if (self.brightness + self.step_size) > 100: self.brightness = self.brightness - self.step_size self.step_size = self.step_size * -1 elif (self.brightness + self.step_size) < 0: self.brightness = self.brightness - self.step_size self.step_size = self.step_size * -1 else: self.brightness += self.step_size self.led_obj.brightness = self.brightness / 100 self.led_obj.set_leds(self.tmp_leds) time.sleep(self.delay) LOG.debug("pulse thread stopped") self.led_obj.brightness = 1.0 self.led_obj.fill(self.pal_obj.BLACK)
def await_confirmation(self, user, actions, timeout=None): """ Used to add an action for which to await a response (note: this will disable skill reload when called and enable on timeout) :param user: username ("local" for non-server) :param actions: string action name (or list of action names) we are confirming, handled in skill's converse method :param timeout: duration to wait in seconds before removing the action from the list """ from datetime import datetime as dt, timedelta self.reload_skill = False if isinstance(actions, str): actions = [actions] self.actions_to_confirm[user] = actions if not timeout: timeout = self.default_intent_timeout expiration = dt.now(self.sys_tz) + timedelta(seconds=timeout) self.cancel_scheduled_event(user) time.sleep(1) self.schedule_event(self._confirmation_timeout, to_system_time(expiration), data={ "user": user, "action": actions }, name=user) LOG.debug(f"Scheduled {user}")
def build_combined_skill_object(self, message=None) -> list: # TODO: Depreciated? DM LOG.error(f"This method is depreciated!") user = self.get_utterance_user(message) skill_dict = message.context["nick_profiles"][user]["skills"] skill_list = list(skill_dict.values()) return skill_list
def _load_config(self): """ Load configuration parameters from configuration and initialize self.microphone, self.responsive_recognizer """ # self.config_core = self._init_config_core or Configuration.get() self.config_core = Configuration.get() self.config = self.config_core.get('listener') self._config_hash = recognizer_conf_hash(self.config_core) self.lang = self.config_core.get('lang') rate = self.config.get('sample_rate') device_index = self.config.get('device_index') or \ self.config.get("dev_index") device_name = self.config.get('device_name') if not device_index and device_name: device_index = find_input_device(device_name) LOG.debug('Using microphone (None = default): ' + str(device_index)) self.microphone = MutableMicrophone(device_index, rate, mute=self.mute_calls > 0) self.create_hotword_engines() self.state = RecognizerLoopState() self.responsive_recognizer = NeonResponsiveRecognizer(self)
def update_profile(self, new_preferences: dict, message: Message = None): """ Updates a user profile with the passed new_preferences :param new_preferences: dict of updated preference values. Should follow {section: {key: val}} format :param message: Message associated with request """ if self.server: nick = get_message_user(message) if message else None new_skills_prefs = new_preferences.pop("skills") old_skills_prefs = message.context["nick_profiles"][nick]["skills"] combined_skill_prefs = {**old_skills_prefs, **new_skills_prefs} combined_changes = {k: v for dic in new_preferences.values() for k, v in dic.items()} if new_skills_prefs: combined_changes["skill_settings"] = json.dumps(list(combined_skill_prefs.values())) new_preferences["skills"] = combined_skill_prefs LOG.debug(f"combined_skill_prefs={combined_skill_prefs}") combined_changes["username"] = nick self.socket_emit_to_server("update profile", ["skill", combined_changes, message.context["klat_data"]["request_id"]]) self.bus.emit(Message("neon.remove_cache_entry", {"nick": nick})) old_preferences = message.context["nick_profiles"][nick] message.context["nick_profiles"][nick] = {**old_preferences, **new_preferences} else: for section, settings in new_preferences: # section in user, brands, units, etc. for key, val in settings: self.user_config[section][key] = val self.user_config.write_changes()
def process(self, audio, context=None): context = context or {} # NOTE: in the parent class context is a string for lang # in neon we pass a dict around instead lang = context.get("lang") or self.loop.stt.lang if audio is None: return if self._audio_length(audio) < self.MIN_AUDIO_SIZE: LOG.warning("Audio too short to be processed") else: stopwatch = Stopwatch() with stopwatch: transcription = self.transcribe(audio, lang) if transcription: if isinstance(transcription, str): transcription = [transcription] ident = str(stopwatch.timestamp) + str(hash(transcription[0])) # STT succeeded, send the transcribed speech on for processing payload = { 'utterances': transcription, 'lang': lang, 'ident': ident, 'context': context } self.loop.emit("recognizer_loop:utterance", payload)
def voc_match(self, utt, voc_filename, lang=None, exact=False): # TODO: Handles bug to be addressed in: https://github.com/OpenVoiceOS/ovos_utils/issues/73 try: return super().voc_match(utt, voc_filename, lang, exact) except FileNotFoundError: LOG.info(f"`{voc_filename}` not found, checking in neon_core") from mycroft.skills.skill_data import read_vocab_file from neon_utils.packaging_utils import get_core_root from itertools import chain import re lang = lang or self.lang voc = os.path.join(get_core_root(), "neon_core", "res", "text", lang, f"{voc_filename}.voc") if not os.path.exists(voc): raise FileNotFoundError(voc) vocab = read_vocab_file(voc) cache_key = lang + voc_filename self.voc_match_cache[cache_key] = list(chain(*vocab)) if utt: if exact: # Check for exact match return any(i.strip() == utt for i in self.voc_match_cache[cache_key]) else: # Check for matches against complete words return any([re.match(r'.*\b' + i + r'\b.*', utt) for i in self.voc_match_cache[cache_key]]) else: return False
def __init__(self, results_event, config=None): if len( signature(super(DeepSpeechLocalStreamingSTT, self).__init__).parameters) == 2: super(DeepSpeechLocalStreamingSTT, self).__init__(results_event, config) else: LOG.warning( f"Shorter Signature Found; config will be ignored and results_event will not be handled!" ) super(DeepSpeechLocalStreamingSTT, self).__init__() self.results_event = None # override language with module specific language selection self.language = self.config.get('lang') or self.lang self.queue = None if not self.language.startswith("en"): raise ValueError("DeepSpeech is currently english only") model_path = self.config.get("model_path") or \ os.path.expanduser("~/.local/share/neon/deepspeech-0.9.3-models.pbmm") scorer_path = self.config.get("scorer_path") or \ os.path.expanduser("~/.local/share/neon/deepspeech-0.9.3-models.scorer") if not os.path.isfile(model_path): LOG.error("Model not found and will be downloaded!") LOG.error(model_path) get_model() self.client = deepspeech.Model(model_path) if not scorer_path or not os.path.isfile(scorer_path): LOG.warning("You should provide a valid scorer") LOG.info( "download scorer from https://github.com/mozilla/DeepSpeech") else: self.client.enableExternalScorer(scorer_path)
def __init__(self, name=None, bus=None, use_settings=True): self.user_config = NGIConfig("ngi_user_info") self.local_config = NGIConfig("ngi_local_conf") self.ngi_settings: Optional[NGIConfig] = None super(NeonSkill, self).__init__(name, bus, use_settings) self.cache_loc = os.path.expanduser( self.local_config.get('dirVars', {}).get('cacheDir') or "~/.local/share/neon/cache") self.lru_cache = LRUCache() # TODO: Depreciate these references, signal use is discouraged DM self.create_signal = create_signal self.check_for_signal = check_for_signal self.sys_tz = gettz() self.gui_enabled = self.configuration_available.get( "prefFlags", {}).get("guiEvents", False) if use_settings: self.settings = {} self._initial_settings = None self.init_settings() else: LOG.error(f"{name} Skill requested no settings!") self.settings = None self.scheduled_repeats = [] # Server-specific imports and timeout setting # A server is a device that hosts the core and skills to serve clients, # but that a user will not interact with directly. # A server will likely serve multiple users and devices concurrently. if self.configuration_available.get("devVars", {}).get("devType", "generic") == "server": self.server = True self.default_intent_timeout = 90 else: self.server = False self.default_intent_timeout = 60 self.neon_core = True self.actions_to_confirm = dict() self.skill_mode = self.user_config.content.get('response_mode').get( 'speed_mode') or DEFAULT_SPEED_MODE self.extension_time = SPEED_MODE_EXTENSION_TIME.get(self.skill_mode) try: # Lang support self.language_config = get_neon_lang_config() self.lang_detector = DetectorFactory.create() # Default fastlang self.translator = TranslatorFactory.create() # Default Amazon except Exception as e: LOG.error(e) self.language_config, self.language_detector, self.translator = None, None, None
def __init__(self): super().__init__() try: from fastlang import fastlang except ImportError: LOG.error("Run pip install fastlang") raise self._detect = fastlang
def handle_action(self): LOG.debug("Mark2:HardwareEnclosure:handle_action()") # debounce this 10 seconds if time.time() - self.last_action > 10: self.last_action = time.time() if self.overide_action is not None: self.overide_action() else: create_signal('buttonPress')
def handle_vol_up(self): self.shadow_volume = self.hardware_volume.get_volume() LOG.debug("Mark2:HardwareEnclosure:handle_vol_up()-was %s" % (self.shadow_volume)) if self.shadow_volume < self.max_volume: self.shadow_volume += self.volume_increment self.hardware_volume.set_volume(self.shadow_volume) self.show_volume(self.shadow_volume)
def terminate(self): LOG.info("Mark2:HardwareEnclosure:terminate()") self.cancel_watchdog() self.leds.fill(self.palette.BLACK) self.switches.terminate() self.switches._running = False if self.switches.thread_handle is not None: self.switches.thread_handle.join()
def __init__(self): super().__init__() try: from langdetect import detect, detect_langs except ImportError: LOG.error("Run pip install langdetect") raise self._detect = detect self._detect_prob = detect_langs
def skill_needs_patching(skill): """ Determines if the passed skill is running under a non-Neon core and needs to be patched for compatibility :param skill: MycroftSkill object to test :return: True if skill needs to be patched """ LOG.warning( f"This method is depreciated. Please update your skill to extend NeonSkill instead." ) return not hasattr(skill, "neon_core")