def initialize(self): """ Initialization function to be implemented by all Skills. Usually used to create intents rules and register them. """ LOG.debug("No initialize function implemented")
def _load_config(self): """ Load configuration parameters from configuration """ config = Configuration.get() self.config_core = config self._config_hash = hash(str(config)) self.lang = config.get('lang') self.config = config.get('listener') rate = self.config.get('sample_rate') device_index = self.config.get('device_index') device_name = self.config.get('device_name') if not device_index and device_name: device_index = find_input_device(device_name) LOG.debug('Using microphone (None = default): '+str(device_index)) self.microphone = MutableMicrophone(device_index, rate, mute=self.mute_calls > 0) # TODO:19.02 - channels are not been used, remove from mycroft.conf # and from code. self.microphone.CHANNELS = self.config.get('channels') self.wakeword_recognizer = self.create_wake_word_recognizer() # TODO - localization self.wakeup_recognizer = self.create_wakeup_recognizer() self.responsive_recognizer = ResponsiveRecognizer( self.wakeword_recognizer) self.state = RecognizerLoopState()
def set_active(skill_name): """ Sets skill name as active in the display Manager args: string: skill_name """ _write_data({"active_skill": skill_name}) LOG.debug("Setting active skill to " + skill_name)
def _upload_file(self, filename): server = self.upload_config['server'] keyfile = resolve_resource_file('wakeword_rsa') userfile = expanduser('~/.mycroft/wakeword_rsa') if not isfile(userfile): shutil.copy2(keyfile, userfile) os.chmod(userfile, 0o600) keyfile = userfile address = self.upload_config['user'] + '@' + \ server + ':' + self.upload_config['folder'] self.upload_lock.acquire() try: self.filenames_to_upload.append(filename) for i, fn in enumerate(self.filenames_to_upload): LOG.debug('Uploading ' + fn + '...') os.chmod(fn, 0o666) cmd = 'scp -o StrictHostKeyChecking=no -P ' + \ str(self.upload_config['port']) + ' -i ' + \ keyfile + ' ' + fn + ' ' + address if os.system(cmd) == 0: del self.filenames_to_upload[i] os.remove(fn) else: LOG.debug('Could not upload ' + fn + ' to ' + server) finally: self.upload_lock.release()
def get(phrase, lang=None, context=None): """ Looks up a resource file for the given phrase. If no file is found, the requested phrase is returned as the string. This will use the default language for translations. Args: phrase (str): resource phrase to retrieve/translate lang (str): the language to use context (dict): values to be inserted into the string Returns: str: a randomized and/or translated version of the phrase """ if not lang: from mycroft.configuration import Configuration lang = Configuration.get().get("lang") filename = "text/" + lang.lower() + "/" + phrase + ".dialog" template = resolve_resource_file(filename) if not template: LOG.debug("Resource file not found: " + filename) return phrase stache = MustacheDialogRenderer() stache.load_template_file("template", template) if not context: context = {} return stache.render("template", context)
def _load(): LOG.debug('Loading identity') try: with FileSystemAccess('identity').open('identity2.json', 'r') as f: IdentityManager.__identity = DeviceIdentity(**json.load(f)) except Exception: IdentityManager.__identity = DeviceIdentity()
def transcribe(self, audio): text = None try: # Invoke the STT engine on the audio clip text = self.stt.execute(audio).lower().strip() LOG.debug("STT: " + text) except sr.RequestError as e: LOG.error("Could not request Speech Recognition {0}".format(e)) except ConnectionError as e: LOG.error("Connection Error: {0}".format(e)) self.emitter.emit("recognizer_loop:no_internet") except HTTPError as e: if e.response.status_code == 401: text = "pair my device" # phrase to start the pairing process LOG.warning("Access Denied at mycroft.ai") except Exception as e: LOG.error(e) LOG.error("Speech Recognition could not understand audio") if text: # STT succeeded, send the transcribed speech on for processing payload = { 'utterances': [text], 'lang': self.stt.lang, 'session': SessionManager.get().session_id } self.emitter.emit("recognizer_loop:utterance", payload) self.metrics.attr('utterances', [text])
def __insert_new_namespace(self, namespace, pages): """ Insert new namespace and pages. This first sends a message adding a new namespace at the highest priority (position 0 in the namespace stack) Args: namespace (str): The skill namespace to create pages (str): Pages to insert (name matches QML) """ LOG.debug("Inserting new namespace") self.send({"type": "mycroft.session.list.insert", "namespace": "mycroft.system.active_skills", "position": 0, "data": [{"skill_id": namespace}] }) # Load any already stored Data data = self.datastore.get(namespace, {}) for key in data: msg = {"type": "mycroft.session.set", "namespace": namespace, "data": {key: data[key]}} self.send(msg) LOG.debug("Inserting new page") self.send({"type": "mycroft.gui.list.insert", "namespace": namespace, "position": 0, "data": [{"url": p} for p in pages] }) # Make sure the local copy is updated self.loaded.insert(0, Namespace(namespace, pages))
def execute(self, sentence, ident=None): """ Convert sentence to speech, preprocessing out unsupported ssml The method caches results if possible using the hash of the sentence. Args: sentence: Sentence to be spoken ident: Id reference to current interaction """ sentence = self.validate_ssml(sentence) create_signal("isSpeaking") if self.phonetic_spelling: for word in re.findall(r"[\w']+", sentence): if word.lower() in self.spellings: sentence = sentence.replace(word, self.spellings[word.lower()]) key = str(hashlib.md5(sentence.encode('utf-8', 'ignore')).hexdigest()) wav_file = os.path.join(mycroft.util.get_cache_directory("tts"), key + '.' + self.audio_ext) if os.path.exists(wav_file): LOG.debug("TTS cache hit") phonemes = self.load_phonemes(key) else: wav_file, phonemes = self.get_tts(sentence, wav_file) if phonemes: self.save_phonemes(key, phonemes) vis = self.visime(phonemes) self.queue.put((self.audio_ext, wav_file, vis, ident))
def transcribe(self, audio): try: # Invoke the STT engine on the audio clip text = self.stt.execute(audio).lower().strip() LOG.debug("STT: " + text) return text except sr.RequestError as e: LOG.error("Could not request Speech Recognition {0}".format(e)) except ConnectionError as e: LOG.error("Connection Error: {0}".format(e)) self.emitter.emit("recognizer_loop:no_internet") except HTTPError as e: if e.response.status_code == 401: LOG.warning("Access Denied at mycroft.ai") return "pair my device" # phrase to start the pairing process else: LOG.error(e.__class__.__name__ + ': ' + str(e)) except RequestException as e: LOG.error(e.__class__.__name__ + ': ' + str(e)) except Exception as e: self.emitter.emit('recognizer_loop:speech.recognition.unknown') if isinstance(e, IndexError): LOG.info('no words were transcribed') else: LOG.error(e) LOG.error("Speech Recognition could not understand audio") return None if connected(): dialog_name = 'backend.down' else: dialog_name = 'not connected to the internet' self.emitter.emit('speak', {'utterance': dialog.get(dialog_name)})
def main(): # Read the system configuration system_config = LocalConf(SYSTEM_CONFIG) platform = system_config.get("enclosure", {}).get("platform") if platform == "mycroft_mark_1": LOG.debug("Creating Mark I Enclosure") from mycroft.client.enclosure.mark1 import EnclosureMark1 enclosure = EnclosureMark1() elif platform == "mycroft_mark_2": LOG.debug("Creating Mark II Enclosure") from mycroft.client.enclosure.mark2 import EnclosureMark2 enclosure = EnclosureMark2() else: LOG.debug("Creating generic enclosure, platform='{}'".format(platform)) # TODO: Mechanism to load from elsewhere. E.g. read a script path from # the mycroft.conf, then load/launch that script. from mycroft.client.enclosure.generic import EnclosureGeneric enclosure = EnclosureGeneric() if enclosure: try: LOG.debug("Enclosure started!") enclosure.run() except Exception as e: print(e) finally: sys.exit() else: LOG.debug("No enclosure available for this hardware, running headless")
def _update(login=None): LOG.debug('Updaing identity') login = login or {} expiration = login.get("expiration", 0) IdentityManager.__identity.uuid = login.get("uuid", "") IdentityManager.__identity.access = login.get("accessToken", "") IdentityManager.__identity.refresh = login.get("refreshToken", "") IdentityManager.__identity.expires_at = time.time() + expiration
def __init__(self, name, emitter=None, basedir=None): super(ScheduledCRUDSkill, self).__init__(name, emitter) self.data = {} self.repeat_data = {} if basedir: LOG.debug('basedir argument is no longer required and is ' + 'depreciated.') self.basedir = basedir
def on_connection_closed(self, socket): # Self-destruct (can't reconnect on the same port) LOG.debug("on_connection_closed") if self.socket: LOG.debug("Server stopped: {}".format(self.socket)) # TODO: How to stop the webapp for this socket? # self.socket.stop() self.socket = None self.callback_disconnect(self.id)
def echo(message): try: _message = json.loads(message) if 'mycroft.audio.service' not in _message.get('type'): return message = json.dumps(_message) except: pass LOG.debug(message)
def enable_intent(self, intent_name): """Reenable a registered intent""" for (name, intent) in self.registered_intents: if name == intent_name: self.registered_intents.remove((name, intent)) intent.name = name self.register_intent(intent, None) LOG.debug('Enabling intent ' + intent_name) break else: LOG.error('Could not enable ' + intent_name + ', it hasn\'t been registered.')
def mimic_fallback_tts(utterance, ident): global mimic_fallback_obj # fallback if connection is lost config = Configuration.get() tts_config = config.get('tts', {}).get("mimic", {}) lang = config.get("lang", "en-us") if not mimic_fallback_obj: mimic_fallback_obj = Mimic(lang, tts_config) tts = mimic_fallback_obj LOG.debug("Mimic fallback, utterance : " + str(utterance)) tts.init(bus) tts.execute(utterance, ident)
def get_version(): version = None try: from mycroft.version import CORE_VERSION_STR version = CORE_VERSION_STR except Exception as e: try: version = "dev-" + subprocess.check_output( ["git", "rev-parse", "--short", "HEAD"]).strip() except subprocess.CalledProcessError, e2: version = "development" LOG.debug(e) LOG.exception(e2)
def run(self): self.start_async() while self.state.running: try: time.sleep(1) if self._config_hash != hash( str(ConfigurationManager().get())): LOG.debug('Config has changed, reloading...') self.reload() except KeyboardInterrupt as e: LOG.error(e) self.stop() raise # Re-raise KeyboardInterrupt
def on_message(self, message): LOG.debug(message) try: deserialized_message = Message.deserialize(message) except: return try: self.emitter.emit(deserialized_message.type, deserialized_message) except Exception, e: LOG.exception(e) traceback.print_exc(file=sys.stdout) pass
def _register_object(self, message, object_name, register_func): file_name = message.data['file_name'] name = message.data['name'] LOG.debug('Registering Padatious ' + object_name + ': ' + name) if not isfile(file_name): LOG.warning('Could not find file ' + file_name) return register_func(name, file_name) self.train_time = get_time() + self.train_delay self.wait_and_train()
def _load_or_reload_skill(self, skill_folder): """ Check if unloaded skill or changed skill needs reloading and perform loading if necessary. """ if skill_folder not in self.loaded_skills: self.loaded_skills[skill_folder] = { "id": hash(os.path.join(SKILLS_DIR, skill_folder)) } skill = self.loaded_skills.get(skill_folder) skill["path"] = os.path.join(SKILLS_DIR, skill_folder) # check if folder is a skill (must have __init__.py) if not MainModule + ".py" in os.listdir(skill["path"]): return # getting the newest modified date of skill modified = _get_last_modified_date(skill["path"]) last_mod = skill.get("last_modified", 0) # checking if skill is loaded and wasn't modified if skill.get("loaded") and modified <= last_mod: return # check if skill was modified elif skill.get("instance") and modified > last_mod: # check if skill is allowed to reloaded if not skill["instance"].reload_skill: return LOG.debug("Reloading Skill: " + skill_folder) # removing listeners and stopping threads skill["instance"].shutdown() # Remove two local references that are known refs = sys.getrefcount(skill["instance"]) - 2 if refs > 0: LOG.warning( "After shutdown of {} there are still " "{} references remaining. The skill " "won't be cleaned from memory." .format(skill['instance'].name, refs)) del skill["instance"] # (Re)load the skill from disk with self.__msm_lock: # Make sure msm isn't running skill["loaded"] = True desc = create_skill_descriptor(skill["path"]) skill["instance"] = load_skill(desc, self.ws, skill["id"], BLACKLISTED_SKILLS) skill["last_modified"] = modified
def play(self, repeat=False): """ Start playback. TODO: add playlist support and repeat """ self.cast.quit_app() track = self.tracklist[0] # Report start of playback to audioservice if self._track_start_callback: self._track_start_callback(track) LOG.debug('track: {}, type: {}'.format(track, guess_type(track))) mime = guess_type(track)[0] or 'audio/mp3' self.cast.play_media(track, mime)
def save(login=None, lock=True): LOG.debug('Saving identity') if lock: identity_lock.acquire() try: if login: IdentityManager._update(login) with FileSystemAccess('identity').open('identity2.json', 'w') as f: json.dump(IdentityManager.__identity.__dict__, f) f.flush() os.fsync(f.fileno()) finally: if lock: identity_lock.release()
def __remove_page(self, namespace, pos): """ Delete page. Args: namespace (str): Namespace to remove from pos (int): Page position to remove """ LOG.debug("Deleting {} from {}".format(pos, namespace)) self.send({"type": "mycroft.gui.list.remove", "namespace": namespace, "position": pos, "items_number": 1 }) # Remove the page from the local reprensentation as well. self.loaded[0].pages.pop(pos)
def _restore_volume(self, message): """ Is triggered when mycroft is done speaking and restores the volume Args: message: message bus message, not used but required """ if self.current: LOG.debug('restoring volume') self.volume_is_low = False time.sleep(2) if not self.volume_is_low: self.current.restore_volume() if self.pulse_restore: self.pulse_restore()
def _lower_volume(self, message=None): """ Is triggered when mycroft starts to speak and reduces the volume. Args: message: message bus message, not used but required """ if self.current: LOG.debug('lowering volume') self.current.lower_volume() self.volume_is_low = True try: if self.pulse_quiet: self.pulse_quiet() except Exception as exc: LOG.error(exc)
def _stop_running_process(self): if self._is_process_running(): if self._paused: # The child process must be "unpaused" in order to be stopped self._resume() self.process.terminate() countdown = 10 while self._is_process_running() and countdown > 0: sleep(0.1) countdown -= 1 if self._is_process_running(): # Failed to shutdown when asked nicely. Force the issue. LOG.debug("Killing currently playing audio...") self.process.kill() self.process = None
def on_message(self, message): LOG.debug(message) try: deserialized_message = Message.deserialize(message) except: return try: self.emitter.emit(deserialized_message.type, deserialized_message) except Exception as e: LOG.exception(e) traceback.print_exc(file=sys.stdout) pass for client in client_connections: client.write_message(message)
def _stop(self, message=None): """ Handler for mycroft.stop. Stops any playing service. Args: message: message bus message, not used but required """ if time.monotonic() - self.play_start_time > 1: LOG.debug('stopping all playing services') with self.service_lock: if self.current: name = self.current.name if self.current.stop(): self.bus.emit(Message("mycroft.stop.handled", {"by": "audio:" + name})) self.current = None
def _request_my_settings(self, identifier): """ Get skill settings for this device associated with the identifier Args: identifier (str): a hashed_meta Returns: skill_settings (dict or None): returns a dict if matches """ LOG.debug("getting skill settings from " "server for {}".format(self.name)) settings = self._request_settings() # this loads the settings into memory for use in self.store for skill_settings in settings: if skill_settings['identifier'] == identifier: skill_settings = \ self._type_cast(skill_settings, to_platform='core') self._remote_settings = skill_settings return skill_settings return None
def _skip_wake_word(self): """Check if told programatically to skip the wake word For example when we are in a dialog with the user. """ if self._listen_triggered: return True # Pressing the Mark 1 button can start recording (unless # it is being used to mean 'stop' instead) if check_for_signal('buttonPress', 1): # give other processes time to consume this signal if # it was meant to be a 'stop' sleep(0.25) if check_for_signal('buttonPress'): # Signal is still here, assume it was intended to # begin recording LOG.debug("Button Pressed, wakeword not needed") return True return False
def __insert_pages(self, namespace, pages): """ Insert pages into the namespace Args: namespace (str): Namespace to add to pages (list): Pages (str) to insert """ LOG.debug("Inserting new pages") if not isinstance(pages, list): raise ValueError('Argument must be list of pages') self.send({ "type": "mycroft.gui.list.insert", "namespace": namespace, "position": len(self.loaded[0].pages), "data": [{ "url": p } for p in pages] }) # Insert the pages into local reprensentation as well. self.loaded[0].pages += pages
def _play(self, message): """ Handler for mycroft.audio.service.play. Starts playback of a tracklist. Also determines if the user requested a special service. Args: message: message bus message, not used but required """ tracks = message.data['tracks'] repeat = message.data.get('repeat', False) # Find if the user wants to use a specific backend for s in self.service: if ('utterance' in message.data and s.name in message.data['utterance']): prefered_service = s LOG.debug(s.name + ' would be prefered') break else: prefered_service = None self.play(tracks, prefered_service, repeat)
def get_playerid(self, backend): lmsplayers = self.lms.get_players() if not lmsplayers: LOG.error("No player available") return None, None if backend is None: backend = socket.gethostname() LOG.debug("Requested backend: {}".format(backend)) players_id_by_name = {i["name"]: i["playerid"] for i in lmsplayers} player_names = players_id_by_name.keys() extracted_player_name, confidence = extractOne( backend, player_names, processor=self.processor, scorer=self.scorer, score_cutoff=0, ) confidence = confidence / 100.0 LOG.debug("Player confidence: {}".format(confidence)) if confidence > 0.5: LOG.debug("Extracted backend: {}".format(extracted_player_name)) return extracted_player_name, players_id_by_name[ extracted_player_name] else: if self.default_player_name is not None: for lmsplayer in lmsplayers: if lmsplayer["name"] == self.default_player_name: return lmsplayer["name"], lmsplayer["playerid"] else: LOG.error("Couldn't find player matching: {}".format(backend)) # fallback to first found return lmsplayers[0]["name"], lmsplayers[0]["playerid"]
def initialize(self): self.engine = IntentDeterminationEngine() self.enable_fallback = self.settings.get('enable_fallback_ex') \ if self.settings.get('enable_fallback_ex') is not None else True self.public_path = self.settings.get('public_path_ex') \ if self.settings.get('public_path_ex') else self.file_system.path+"/public" self.local_path = self.settings.get('local_path_ex') \ if self.settings.get('local_path_ex') else self.file_system.path+"/private" self.allow_category = self.settings.get('allow_category_ex') \ if self.settings.get('allow_category_ex') else "humor,love,science" LOG.debug('local path enabled: %s' % self.local_path) self.save_path = self.file_system.path + "/mycroft-skills" self.saved_utt = "" self.save_answer = "" if self.enable_fallback is True: self.register_fallback(self.handle_fallback, 6) self.register_fallback(self.handle_save_fallback, 99) ############## todo: fallback load skill intents self.add_event('speak', self.save_action) LOG.debug('Learning-skil-fallback enabled: %s' % self.enable_fallback) skillfolder = Configuration.get()['skills']['directory'] self.lang_paths = [] if 'translations_dir' in Configuration.get( ): ##path of second language files self.lang_paths.append(Configuration.get()['translations_dir']) self.log.info("set lang path to translation_dir") if os.path.isdir( os.path.expanduser(skillfolder + "/PootleSync/mycroft-skills")): self.lang_paths.append( os.path.expanduser(skillfolder + "/PootleSync/mycroft-skills")) self.log.info("set lang path to PootleSync") ##intents self.register_intent_file('will_let_you_know.intent', self.will_let_you_know_intent) self.register_intent_file('say.differently.intent', self.say_differently_intent) self.register_intent_file('work.on.dialog.intent', self.work_on_dialog) self.register_intent_file('something_for_my_skill.intent', self.something_for_my_skill_intent)
def download(self, message=None): """Download the settings stored on the backend and check for changes When used as a messagebus handler a message is passed but not used. """ if not self.sync_enabled: return if is_paired(): remote_settings = self._get_remote_settings() if remote_settings: settings_changed = self.last_download_result != remote_settings if settings_changed: LOG.debug('Skill settings changed since last download') self._emit_settings_change_events(remote_settings) self.last_download_result = remote_settings save_remote_settings_cache(remote_settings) else: LOG.debug('No skill settings changes since last download') else: LOG.debug('Settings not downloaded - device is not paired') # If this method is called outside of the timer loop, ensure the # existing timer is canceled before starting a new one. if self.download_timer: self.download_timer.cancel() if self.continue_downloading: self.download_timer = Timer(ONE_MINUTE, self.download) self.download_timer.daemon = True self.download_timer.start()
def listen(self, source, emitter, stream=None): """Listens for chunks of audio that Mycroft should perform STT on. This will listen continuously for a wake-up-word, then return the audio chunk containing the spoken phrase that comes immediately afterwards. Args: source (AudioSource): Source producing the audio chunks emitter (EventEmitter): Emitter for notifications of when recording begins and ends. stream (AudioStreamHandler): Stream target that will receive chunks of the utterance audio while it is being recorded Returns: AudioData: audio with the user's utterance, minus the wake-up-word """ assert isinstance(source, AudioSource), "Source must be an AudioSource" # bytes_per_sec = source.SAMPLE_RATE * source.SAMPLE_WIDTH sec_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE # Every time a new 'listen()' request begins, reset the threshold # used for silence detection. This is as good of a reset point as # any, as we expect the user and Mycroft to not be talking. # NOTE: adjust_for_ambient_noise() doc claims it will stop early if # speech is detected, but there is no code to actually do that. self.adjust_for_ambient_noise(source, 1.0) LOG.debug("Waiting for wake word...") self._wait_until_wake_word(source, sec_per_buffer) if self._stop_signaled: return LOG.debug("Recording...") emitter.emit("recognizer_loop:record_begin") # If enabled, play a wave file with a short sound to audibly # indicate recording has begun. if self.config.get('confirm_listening'): audio_file = resolve_resource_file( self.config.get('sounds').get('start_listening')) if audio_file: source.mute() play_wav(audio_file).wait() source.unmute() frame_data = self._record_phrase(source, sec_per_buffer, stream) audio_data = self._create_audio_data(frame_data, source) emitter.emit("recognizer_loop:record_end") if self.save_utterances: LOG.info("Recording utterance") stamp = str(datetime.datetime.now()) filename = "/tmp/mycroft_utterance%s.wav" % stamp with open(filename, 'wb') as filea: filea.write(audio_data.get_wav_data()) LOG.debug("Thinking...") return audio_data
def add_unspecified_reminder(self, msg=None): """ Starts a dialog to add a reminder when no time was supplied for the reminder. """ reminder = msg.data['reminder'] # Handle the case where padaqtious misstook time/date by reminder if self.contains_datetime(msg.data['reminder']): msg.data['timedate'] = msg.data['reminder'] msg.data['reminder'] = None return self.add_unnamed_reminder_at(msg) # Handle the case where padatious misses the time/date if self.contains_datetime(msg.data['utterance']): return self.add_new_reminder(msg) response = self.get_response('ParticularTime') if response and self.is_affirmative(response): # Check if a time was also in the response dt, rest = (extract_datetime(response, now_local(), self.lang, default_time=DEFAULT_TIME) or (None, None)) if dt is None: # No time found in the response response = self.get_response('SpecifyTime') if not response is None: dt, rest = (extract_datetime(response, now_local(), self.lang, default_time=DEFAULT_TIME) or (None, None)) if dt: self.speak_dialog('Understood') else: #no time found self.speak_dialog('NoDateTimeCancel') return self.__save_reminder_local(reminder, dt) else: LOG.debug('put into general reminders') self.__save_unspecified_reminder(reminder)
def handle_automation_intent(self, message): # TODO not supported yet self.speak_dialog('fhem.error.notsupported') return # self._setup() if self.fhem is None: self.speak_dialog('fhem.error.setup') return entity = message.data["Entity"] allowed_types = ['automation', 'scene', 'script'] # TODO LOG.debug("Entity: %s" % entity) # also handle scene and script requests try: fhem_entity = self.fhem.find_device(entity, allowed_types) except ConnectionError: self.speak_dialog('fhem.error.offline') return fhem_data = {'entity_id': fhem_entity['id']} if fhem_entity is None: self.speak_dialog('fhem.device.unknown', data={ "dev_name": entity}) return # IDEA: set context for 'turn it off again' or similar # self.set_context('Entity', fhem_entity['dev_name']) LOG.debug("Triggered automation/scene/script: {}".format(fhem_data)) if "automation" in fhem_entity['id']: self.fhem.execute_service('automation', 'trigger', fhem_data) self.speak_dialog('fhem.automation.trigger', data={"dev_name": fhem_entity['dev_name']}) elif "script" in fhem_entity['id']: self.speak_dialog('fhem.automation.trigger', data={"dev_name": fhem_entity['dev_name']}) self.fhem.execute_service("fhem", "turn_on", data=fhem_data) elif "scene" in fhem_entity['id']: self.speak_dialog('fhem.device.on', data=fhem_entity) self.fhem.execute_service("fhem", "turn_on", data=fhem_data)
def transcribe(self, audio): LOG.debug("Transcribing audio") text = None try: # Invoke the STT engine on the audio clip text = self.stt.execute(audio).lower().strip() LOG.debug("STT: --------->" + text) except sr.RequestError as e: LOG.error("Could not request Speech Recognition {0}".format(e)) except ConnectionError as e: LOG.error("Connection Error: {0}".format(e)) self.emitter.emit("recognizer_loop:no_internet") except HTTPError as e: if e.response.status_code == 401: text = "pair my device" # phrase to start the pairing process LOG.warning("Access Denied at mycroft.ai") except Exception as e: LOG.error(e) LOG.error("Speech Recognition could not understand audio") if text: # STT succeeded, send the transcribed speech on for processing LOG.error("maine samjha tune bola " + text) tellMeMore = "tell me more" if (text == tellMeMore): #hotWordListener = self.finalHotWord LOG.info("found tell me more in listener****") #text = text + " about " + hotWordListener with open("hotWordFile.txt", "rw+") as hotWordTemp: prevHotWord = hotWordTemp.read() hotWordTemp.truncate(0) text = "tell me about " + prevHotWord LOG.error(" naya wala maine samjha tune bola " + text) payload = { 'utterances': [text], 'lang': self.stt.lang, 'session': SessionManager.get().session_id } self.emitter.emit("recognizer_loop:utterance", payload) self.metrics.attr('utterances', [text])
def CPS_match_query_phrase(self, phrase): # Look for regex matches starting from the most specific to the least # Play <data> internet radio on tune in match = re.search(self.translate_regex('internet_radio_on_tunein'), phrase) if match: data = re.sub(self.translate_regex('internet_radio_on_tunein'), '', phrase) LOG.debug("CPS Match (internet_radio_on_tunein): " + data) return phrase, CPSMatchLevel.EXACT, data # Play <data> radio on tune in match = re.search(self.translate_regex('radio_on_tunein'), phrase) if match: data = re.sub(self.translate_regex('radio_on_tunein'), '', phrase) LOG.debug("CPS Match (radio_on_tunein): " + data) return phrase, CPSMatchLevel.EXACT, data # Play <data> on tune in match = re.search(self.translate_regex('on_tunein'), phrase) if match: data = re.sub(self.translate_regex('on_tunein'), '', phrase) LOG.debug("CPS Match (on_tunein): " + data) return phrase, CPSMatchLevel.EXACT, data # Play <data> internet radio match = re.search(self.translate_regex('internet_radio'), phrase) if match: data = re.sub(self.translate_regex('internet_radio'), '', phrase) LOG.debug("CPS Match (internet_radio): " + data) return phrase, CPSMatchLevel.CATEGORY, data # Play <data> radio match = re.search(self.translate_regex('radio'), phrase) if match: data = re.sub(self.translate_regex('radio'), '', phrase) LOG.debug("CPS Match (radio): " + data) return phrase, CPSMatchLevel.CATEGORY, data return phrase, CPSMatchLevel.GENERIC, phrase
def play(self, tracks, prefered_service): """ play starts playing the audio on the prefered service if it supports the uri. If not the next best backend is found. Args: tracks: list of tracks to play. prefered_service: indecates the service the user prefer to play the tracks. """ self._stop() uri_type = tracks[0].split(':')[0] # check if user requested a particular service if prefered_service and uri_type in prefered_service.supported_uris(): selected_service = prefered_service # check if default supports the uri elif self.default and uri_type in self.default.supported_uris(): LOG.debug("Using default backend ({})".format(self.default.name)) selected_service = self.default else: # Check if any other service can play the media LOG.debug("Searching the services") for s in self.service: if uri_type in s.supported_uris(): LOG.debug("Service {} supports URI {}".format(s, uri_type)) selected_service = s break else: LOG.info('No service found for uri_type: ' + uri_type) return selected_service.clear_list() selected_service.add_list(tracks) selected_service.play() self.current = selected_service
def __init__(self, id, config, callback_disconnect, enclosure): LOG.debug("Creating GUIConnection") self.id = id self.socket = None self.callback_disconnect = callback_disconnect self.enclosure = enclosure # Each connection will run its own Tornado server. If the # connection drops, the server is killed. websocket_config = config.get("gui_websocket") host = websocket_config.get("host") route = websocket_config.get("route") base_port = websocket_config.get("base_port") while True: self.port = base_port + GUIConnection._last_idx GUIConnection._last_idx += 1 try: self.webapp = tornado.web.Application( [(route, GUIWebsocketHandler)], **gui_app_settings) # Hacky way to associate socket with this object: self.webapp.gui = self self.webapp.listen(self.port, host) except Exception as e: LOG.debug('Error: {}'.format(repr(e))) continue break # Can't run two IOLoop's in the same process if not GUIConnection.server_thread: GUIConnection.server_thread = create_daemon( ioloop.IOLoop.instance().start) LOG.debug('IOLoop started @ ' 'ws://{}:{}{}'.format(host, self.port, route))
def download(self): """Download the settings stored on the backend and check for changes""" if is_paired(): download_success = self._get_remote_settings() if download_success: self.settings_changed = (self.last_download_result != self.remote_settings) if self.settings_changed: LOG.debug('Skill settings changed since last download') self._emit_settings_change_events() self.last_download_result = self.remote_settings else: LOG.debug('No skill settings changes since last download') else: LOG.debug('Settings not downloaded - device is not paired') # If this method is called outside of the timer loop, ensure the # existing timer is canceled before starting a new one. if self.download_timer: self.download_timer.cancel() if self.continue_downloading: self.download_timer = Timer(ONE_MINUTE, self.download) self.download_timer.daemon = True self.download_timer.start()
def upload(self): """Upload the contents of the settingsmeta file to Mycroft servers. The settingsmeta file does not change often, if at all. Only perform the upload if a change in the file is detected. """ synced = False if is_paired(): self.api = DeviceApi() if self.api.identity.uuid: settings_meta_file_exists = (self.json_path.is_file() or self.yaml_path.is_file()) if settings_meta_file_exists: self._load_settings_meta_file() self._update_settings_meta() LOG.debug('Uploading settings meta for ' + self.skill_gid) synced = self._issue_api_call() else: LOG.debug('settingsmeta.json not uploaded - no identity') else: LOG.debug('settingsmeta.json not uploaded - device is not paired') if not synced and not self._stopped: self.upload_timer = Timer(ONE_MINUTE, self.upload) self.upload_timer.daemon = True self.upload_timer.start()
def transcribe(self, audio): def send_unknown_intent(): """ Send message that nothing was transcribed. """ self.emitter.emit('recognizer_loop:speech.recognition.unknown') try: # Invoke the STT engine on the audio clip text = self.stt.execute(audio) if text is not None: text = text.lower().strip() LOG.debug("STT: " + text) else: send_unknown_intent() LOG.info('no words were transcribed') return text except sr.RequestError as e: LOG.error("Could not request Speech Recognition {0}".format(e)) except ConnectionError as e: LOG.error("Connection Error: {0}".format(e)) self.emitter.emit("recognizer_loop:no_internet") except HTTPError as e: if e.response.status_code == 401: LOG.warning("Access Denied at mycroft.ai") return "pair my device" # phrase to start the pairing process else: LOG.error(e.__class__.__name__ + ': ' + str(e)) except RequestException as e: LOG.error(e.__class__.__name__ + ': ' + str(e)) except Exception as e: send_unknown_intent() LOG.error(e) LOG.error("Speech Recognition could not understand audio") return None if connected(): dialog_name = 'backend.down' else: dialog_name = 'not connected to the internet' self.emitter.emit('speak', {'utterance': dialog.get(dialog_name)})
def parse_feed(url, author=""): news = [] LOG.debug("Parsing feed: " + url) data = feedparser.parse(url) for new in data["entries"]: try: author = new["source"]["title"] except: pass caption = new["summary"] dt = datetime.fromtimestamp(mktime(new["published_parsed"])) d = { "title": new["title"], "source": author, "url": new["link"], "caption": caption, "date_str": dt.strftime("%Y-%m-%d"), "datetime": dt } if new.get("tags"): d["tags"] = [t["term"] for t in new["tags"]] img = None for l in new["links"]: if l["type"] in ["image/jpeg"]: img = l["href"] if img is None: html = new["summary"] soup = bs4.BeautifulSoup(html, "html.parser") img = soup.find("img") if img: caption = soup.text.strip() d["caption"] = caption img = img["src"] if img: d["imgLink"] = img d["utterance"] = d["caption"] or d["title"] or "News from " + \ author news.append(d) return news
def enable_intent(self, intent_name): """ (Re)Enable a registered intentif it belongs to this skill Args: intent_name: name of the intent to be enabled Returns: bool: True if enabled, False if it wasn't registered """ names = [intent[0] for intent in self.registered_intents] intents = [intent[1] for intent in self.registered_intents] if intent_name in names: intent = intents[names.index(intent_name)] self.registered_intents.remove((intent_name, intent)) intent.name = intent_name self.register_intent(intent, None) LOG.debug('Enabling intent ' + intent_name) return True LOG.error('Could not enable ' + intent_name + ', it hasn\'t been ' 'registered.') return False
def enable_intent(self, intent_name): """(Re)Enable a registered intent if it belongs to this skill. Arguments: intent_name: name of the intent to be enabled Returns: bool: True if enabled, False if it wasn't registered """ intent = self.intent_service.get_intent(intent_name) if intent: if ".intent" in intent_name: self.register_intent_file(intent_name, None) else: intent.name = intent_name self.register_intent(intent, None) LOG.debug('Enabling intent {}'.format(intent_name)) return True else: LOG.error('Could not enable ' '{}, it hasn\'t been registered.'.format(intent_name)) return False
def __move_namespace(self, from_pos, to_pos): """ Move an existing namespace to a new position in the stack. Args: from_pos (int): Position in the stack to move from to_pos (int): Position to move to """ LOG.debug("Activating existing namespace") # Seems like the namespace is moved to the top automatically when # a page change is done. Deactivating this for now. if self.explicit_move: LOG.debug("move {} to {}".format(from_pos, to_pos)) self.send({ "type": "mycroft.session.list.move", "namespace": "mycroft.system.active_skills", "from": from_pos, "to": to_pos, "items_number": 1 }) # Move the local representation of the skill from current # position to position 0. self.loaded.insert(to_pos, self.loaded.pop(from_pos))
def write_fb(im, dev='/dev/fb0'): """ Write Image Object to framebuffer. TODO: Check memory mapping """ start_time = time.time() cols = [] for j in range(im.size[1] - 1): for i in range(im.size[0]): R, G, B, A = im.getpixel((i, j)) # Write color data in the correct order for the screen cols.append(struct.pack('BBBB', B, G, R, A)) LOG.info('Row time: {}'.format(time.time() - start_time)) with open(dev, 'wb') as f: color = [BACKGROUND.blue, BACKGROUND.green, BACKGROUND.red, 0] f.write(struct.pack('BBBB', *color) * ((SCREEN.height - im.size[1]) // 2 * SCREEN.width)) f.write(b''.join(cols)) f.write(struct.pack('BBBB', *color) * ((SCREEN.height - im.size[1]) // 2 * SCREEN.width)) LOG.debug('Draw time: {}'.format(time.time() - start_time))
def load_regex_from_file(path, skill_id): """Load regex from file The regex is sent to the intent handler using the message bus Args: path: path to vocabulary file (*.voc) skill_id: skill_id to the regex is tied to """ regexes = [] if path.endswith('.rx'): with open(path, 'r', encoding='utf8') as reg_file: for line in reg_file.readlines(): if line.startswith("#"): continue LOG.debug('regex pre-munge: ' + line.strip()) regex = munge_regex(line.strip(), skill_id) LOG.debug('regex post-munge: ' + regex) # Raise error if regex can't be compiled re.compile(regex) regexes.append(regex) return regexes
def generate_cache_text(cache_audio_dir, cache_text_file): """ This prepares a text file with all the sentences from *.dialog files present in mycroft/res/text/en-us and mycroft-wifi setup skill Args: cache_audio_dir (path): path to store .wav files cache_text_file (file): file containing the sentences """ try: if not os.path.isfile(cache_text_file): os.makedirs(cache_audio_dir) f = open(cache_text_file, 'w') for each_path in cache_dialog_path: if os.path.exists(each_path): write_cache_text(each_path, f) f.close() LOG.debug("Completed generating cache") else: LOG.debug("Cache file 'cache_text.txt' already exists") except Exception: LOG.error("Could not open text file to write cache")
def main(): """Launch one of the available enclosure implementations. This depends on the configured platform and can currently either be mycroft_mark_1 or mycroft_mark_2, if unconfigured a generic enclosure with only the GUI bus will be started. """ # Read the system configuration system_config = LocalConf(SYSTEM_CONFIG) platform = system_config.get("enclosure", {}).get("platform") enclosure = create_enclosure(platform) if enclosure: try: LOG.debug("Enclosure started!") reset_sigint_handler() create_daemon(enclosure.run) wait_for_exit_signal() except Exception as e: print(e) else: LOG.info("No enclosure available for this hardware, running headless")
def load(config=None): RemoteConfiguration.validate(config) update = config.get("server", {}).get("update") if update: try: from mycroft.api import DeviceApi api = DeviceApi() setting = api.find_setting() location = api.find_location() if location: setting["location"] = location RemoteConfiguration.__load(config, setting) RemoteConfiguration.__store_cache(setting) except Exception as e: LOG.warning("Failed to fetch remote configuration: %s" % repr(e)) RemoteConfiguration.__load_cache(config) else: LOG.debug("Remote configuration not activated.") return config
def start_playlist_playback(self, dev, playlist): LOG.info(u'Playlist: {}'.format(playlist)) if not playlist and not self.playlists: LOG.debug('No playlists available') return False # different default action when no lists defined? if dev and playlist: LOG.info(u'playing {} using {}'.format(playlist, dev['name'])) self.speak_dialog('ListeningToPlaylist', data={'playlist': playlist}) time.sleep(2) pl = self.playlists[playlist] tracks = self.spotify.user_playlist_tracks(pl['owner']['id'], pl['id']) uris = [t['track']['uri'] for t in tracks['items']] self.spotify_play(dev['id'], uris=uris) return True elif not dev: LOG.info('No spotify devices found') else: LOG.info('No playlist found') return False
def on_gui_client_connected(self, message): # GUI has announced presence LOG.debug("on_gui_client_connected") gui_id = message.data.get("gui_id") # Spin up a new communication socket for this GUI if gui_id in self.GUIs: # TODO: Close it? pass try: asyncio.get_event_loop() except RuntimeError: asyncio.set_event_loop(asyncio.new_event_loop()) self.GUIs[gui_id] = GUIConnection(gui_id, self.global_config, self.callback_disconnect, self) LOG.debug("Heard announcement from gui_id: {}".format(gui_id)) # Announce connection, the GUI should connect on it soon self.bus.emit(Message("mycroft.gui.port", {"port": self.GUIs[gui_id].port, "gui_id": gui_id}))
def get_stream_url(self, youtube_url): abs_url = base_url + youtube_url LOG.debug('pafy processing: ' + abs_url) streams = pafy.new(abs_url) LOG.debug('audiostreams found: ' + str(streams.audiostreams)) bestaudio = streams.getbestaudio() LOG.debug('audiostream selected: ' + str(bestaudio)) return bestaudio.url
def execute(self, sentence, ident=None): """ Convert sentence to speech, preprocessing out unsupported ssml The method caches results if possible using the hash of the sentence. Args: sentence: Sentence to be spoken ident: Id reference to current interaction """ sentence = self.validate_ssml(sentence) create_signal("isSpeaking") if self.phonetic_spelling: for word in re.findall(r"[\w']+", sentence): if word.lower() in self.spellings: sentence = sentence.replace(word, self.spellings[word.lower()]) chunks = self._preprocess_sentence(sentence) for sentence in chunks: key = str(hashlib.md5( sentence.encode('utf-8', 'ignore')).hexdigest()) wav_file = os.path.join( mycroft.util.get_cache_directory("tts/" + self.tts_name), key + '.' + self.audio_ext) if os.path.exists(wav_file): LOG.debug("TTS cache hit") phonemes = self.load_phonemes(key) else: wav_file, phonemes = self.get_tts(sentence, wav_file) if phonemes: self.save_phonemes(key, phonemes) vis = self.viseme(phonemes) self.queue.put((self.audio_ext, wav_file, vis, ident))