def remove_pages(self, page_names): """Remove a list of pages in the GUI. Arguments: page_names (list): List of page names (str) to display, such as ["Weather.qml", "Forecast.qml", "Other.qml"] """ if not isinstance(page_names, list): raise ValueError('page_names must be a list') # Convert pages to full reference page_urls = [] for name in page_names: if name.startswith("SYSTEM"): page = resolve_resource_file(join('ui', name)) else: page = self.skill.find_resource(name, 'ui') if page: if self.config.get('remote'): page_urls.append(self.remote_url + "/" + page) else: page_urls.append("file://" + page) else: raise FileNotFoundError("Unable to find page: {}".format(name)) self.skill.bus.emit( Message("gui.page.delete", { "page": page_urls, "__from": self.skill.skill_id }))
def get(phrase, lang=None, context=None): """ Looks up a resource file for the given phrase. If no file is found, the requested phrase is returned as the string. This will use the default language for translations. Args: phrase (str): resource phrase to retrieve/translate lang (str): the language to use context (dict): values to be inserted into the string Returns: str: a randomized and/or translated version of the phrase """ if not lang: from mycroft.configuration import ConfigurationManager lang = ConfigurationManager.instance().get("lang") filename = "text/"+lang.lower()+"/"+phrase+".dialog" template = resolve_resource_file(filename) if not template: logger.debug("Resource file not found: " + filename) return phrase stache = MustacheDialogRenderer() stache.load_template_file("template", template) if not context: context = {} return stache.render("template", context)
def _upload_file(self, filename): server = self.upload_config['server'] keyfile = resolve_resource_file('wakeword_rsa') userfile = expanduser('~/.mycroft/wakeword_rsa') if not isfile(userfile): shutil.copy2(keyfile, userfile) os.chmod(userfile, 0o600) keyfile = userfile address = self.upload_config['user'] + '@' + \ server + ':' + self.upload_config['folder'] self.upload_lock.acquire() try: self.filenames_to_upload.append(filename) for i, fn in enumerate(self.filenames_to_upload): LOG.debug('Uploading ' + fn + '...') os.chmod(fn, 0o666) cmd = 'scp -o StrictHostKeyChecking=no -P ' + \ str(self.upload_config['port']) + ' -i ' + \ keyfile + ' ' + fn + ' ' + address if os.system(cmd) == 0: del self.filenames_to_upload[i] os.remove(fn) else: LOG.debug('Could not upload ' + fn + ' to ' + server) finally: self.upload_lock.release()
def _translate_word(name, lang): """ Helper to get word tranlations Args: name (str): Word name. Returned as the default value if not translated. lang (str): Language code, e.g. "en-us" Returns: str: translated version of resource name """ from mycroft.util import resolve_resource_file lang_code = get_full_lang_code(lang) filename = resolve_resource_file(join("text", lang_code, name+".word")) if filename: # open the file try: with open(filename, 'r', encoding='utf8') as f: for line in f: word = line.strip() if word.startswith("#"): continue # skip comment lines return word except Exception: pass return name # use resource name as the word
def _upload_file(self, filename): server = self.upload_config['server'] keyfile = resolve_resource_file('wakeword_rsa') userfile = expanduser('~/.mycroft/wakeword_rsa') if not isfile(userfile): shutil.copy2(keyfile, userfile) os.chmod(userfile, 0o600) keyfile = userfile address = '{}@{}:{}'.format(self.upload_config['user'], server, self.upload_config['folder']) with self.upload_lock: self.filenames_to_upload.append(filename) for i, fn in enumerate(self.filenames_to_upload): LOG.debug('Uploading wake word...') os.chmod(fn, 0o666) scp_status = Popen([ 'scp', '-o', 'StrictHostKeyChecking=no', '-P', str(self.upload_config['port']), '-i', keyfile, fn, address ], stdout=PIPE, stderr=PIPE).wait() if scp_status == 0: del self.filenames_to_upload[i] os.remove(fn) else: LOG.debug('Failed to upload wake word to metrics server') break
def __init__(self): MycroftSkill.__init__(self) self.record_process = None self.start_time = 0 self.last_index = 24 # index of last pixel in countdowns self.source_path = self.file_system.path self.piep = resolve_resource_file('snd/start_listening.wav') self.settings["name"] = self.config_core.get( 'listener', {}).get('wake_word').replace(' ', '-') self.settings["soundbackup"] = self.settings.get('soundbackup') \ if self.settings.get('soundbackup') is not None else False self.settings["min_free_disk"] = 100 # min mb to leave free on disk self.settings["rate"] = 16000 # sample rate, hertz self.settings["channels"] = 1 # recording channels (1 = mono) self.settings["file_path"] = self.file_system.path + "/data" self.settings["sell_path"] = "/tmp/mycroft_wake_words" self.settings["duration"] = -1 # default = unknown self.settings["formate"] = "S16_LE" self.settings["selling"] = self.settings.get('selling', 15) \ if self.settings.get('selling') is not None else 15 self.settings["improve"] = 10 self.settings['savewakewords'] = self.settings.get('savewakewords') #\ # if self.settings.get('savewakewords') is not None else False self.log.info("settings get: " + str(self.settings.get('savewakewords'))) if not os.path.isdir(self.file_system.path + "/precise/mycroft_precise.egg-info"): self.log.info("no precise installed. beginn installation") self.install_precice_source() if self.settings["soundbackup"] is True: self.download_sounds() self.save_wakewords()
def initialize(self): self.record_process = None self.start_time = 0 self.last_index = 24 # index of last pixel in countdowns self.source_path = self.file_system.path self.piep = resolve_resource_file('snd/start_listening.wav') self.precisefolder = self.file_system.path+"/Precise-Community-Data" self.settings["Name"] = self.config_core.get('listener', {}).get('wake_word').replace(' ', '-') self.settings["soundbackup"] = self.settings.get('soundbackup', False) self.settings["min_free_disk"] = 100 # min mb to leave free on disk self.settings["rate"] = 16000 # sample rate, hertz self.settings["channels"] = 1 # recording channels (1 = mono) self.settings["file_path"] = self.file_system.path + "/data/" self.settings["sell_path"] = "/tmp/mycroft_wake_words" self.settings["duration"] = -1 # default = unknown self.settings["formate"] = "S16_LE" self.settings["selling"] = self.settings.get('selling', 15) self.settings["improve"] = 10 self.settings["onlyPrecise"] = self.settings.get('onlyPrecise', True) self.settings['savewakewords'] = self.settings.get('savewakewords', False) self.settings["repo"] = self.settings.get('repo', 'https://github.com/MycroftAI/Precise-Community-Data.git') if not os.path.isdir(self.file_system.path + "/precise/mycroft_precise.egg-info"): self.log.info("no precise installed. beginn installation") _thread.start_new_thread(self.install_precise_source, ()) if self.settings["soundbackup"] is True: _thread.start_new_thread(self.download_sounds, ()) self.save_wakewords()
def get(phrase, lang=None, context=None): """ Looks up a resource file for the given phrase. If no file is found, the requested phrase is returned as the string. This will use the default language for translations. Args: phrase (str): resource phrase to retrieve/translate lang (str): the language to use context (dict): values to be inserted into the string Returns: str: a randomized and/or translated version of the phrase """ if not lang: from mycroft.configuration import ConfigurationManager lang = ConfigurationManager.instance().get("lang") filename = "text/" + lang.lower() + "/" + phrase + ".dialog" template = resolve_resource_file(filename) if not template: LOG.debug("Resource file not found: " + filename) return phrase stache = MustacheDialogRenderer() stache.load_template_file("template", template) if not context: context = {} return stache.render("template", context)
def find_download_exe(self): exe_file = resolve_resource_file(self.exe_name) if exe_file: return exe_file try: if call(self.exe_name + ' < /dev/null', shell=True) == 0: return self.exe_name except OSError: pass exe_file = expanduser('~/.mycroft/precise/' + self.exe_name) if isfile(exe_file): return exe_file import platform import stat def snd_msg(cmd): """Send message to faceplate""" Popen('echo "' + cmd + '" > /dev/ttyAMA0', shell=True) arch = platform.machine() url = self.dist_url + arch + '/' + self.exe_name snd_msg('mouth.text=Updating Listener...') self.download(url, exe_file) snd_msg('mouth.reset') os.chmod(exe_file, os.stat(exe_file).st_mode | stat.S_IEXEC) Popen('echo "mouth.reset" > /dev/ttyAMA0', shell=True) return exe_file
def get(phrase, lang=None, context=None): """Looks up a resource file for the given phrase. If no file is found, the requested phrase is returned as the string. This will use the default language for translations. Arguments: phrase (str): resource phrase to retrieve/translate lang (str): the language to use context (dict): values to be inserted into the string Returns: str: a randomized and/or translated version of the phrase """ if not lang: from mycroft.configuration import Configuration lang = Configuration.get().get('lang') filename = join('text', lang.lower(), phrase + '.dialog') template = resolve_resource_file(filename) if not template: LOG.debug('Resource file not found: {}'.format(filename)) return phrase stache = MustacheDialogRenderer() stache.load_template_file('template', template) if not context: context = {} return stache.render('template', context)
def listen(self, source, emitter, stream=None): """Listens for chunks of audio that Mycroft should perform STT on. This will listen continuously for a wake-up-word, then return the audio chunk containing the spoken phrase that comes immediately afterwards. Args: source (AudioSource): Source producing the audio chunks emitter (EventEmitter): Emitter for notifications of when recording begins and ends. stream (AudioStreamHandler): Stream target that will receive chunks of the utterance audio while it is being recorded Returns: AudioData: audio with the user's utterance, minus the wake-up-word """ assert isinstance(source, AudioSource), "Source must be an AudioSource" # bytes_per_sec = source.SAMPLE_RATE * source.SAMPLE_WIDTH sec_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE # Every time a new 'listen()' request begins, reset the threshold # used for silence detection. This is as good of a reset point as # any, as we expect the user and Mycroft to not be talking. # NOTE: adjust_for_ambient_noise() doc claims it will stop early if # speech is detected, but there is no code to actually do that. self.adjust_for_ambient_noise(source, 1.0) LOG.debug("Waiting for wake word...") self._wait_until_wake_word(source, sec_per_buffer) if self._stop_signaled: return LOG.debug("Recording...") emitter.emit("recognizer_loop:record_begin") # If enabled, play a wave file with a short sound to audibly # indicate recording has begun. if self.config.get('confirm_listening'): audio_file = resolve_resource_file( self.config.get('sounds').get('start_listening')) if audio_file: source.mute() play_wav(audio_file).wait() source.unmute() frame_data = self._record_phrase(source, sec_per_buffer, stream) audio_data = self._create_audio_data(frame_data, source) emitter.emit("recognizer_loop:record_end") if self.save_utterances: LOG.info("Recording utterance") stamp = str(datetime.datetime.now()) filename = "/tmp/mycroft_utterance%s.wav" % stamp with open(filename, 'wb') as filea: filea.write(audio_data.get_wav_data()) LOG.debug("Thinking...") return audio_data
def _upload_file(self, filename): server = self.upload_config['server'] keyfile = resolve_resource_file('wakeword_rsa') userfile = expanduser('~/.mycroft/wakeword_rsa') if not isfile(userfile): shutil.copy2(keyfile, userfile) os.chmod(userfile, 0o600) keyfile = userfile address = self.upload_config['user'] + '@' + \ server + ':' + self.upload_config['folder'] self.upload_lock.acquire() try: self.filenames_to_upload.append(filename) for i, fn in enumerate(self.filenames_to_upload): logger.debug('Uploading ' + fn + '...') os.chmod(fn, 0o666) cmd = 'scp -o StrictHostKeyChecking=no -P ' + \ str(self.upload_config['port']) + ' -i ' + \ keyfile + ' ' + fn + ' ' + address if os.system(cmd) == 0: del self.filenames_to_upload[i] os.remove(fn) else: logger.debug('Could not upload ' + fn + ' to ' + server) finally: self.upload_lock.release()
def test_source_package(self, mock_conf): """Check file shipped in the mycroft res folder.""" mock_conf.get.return_value = test_config expected_path = join(MYCROFT_ROOT_PATH, 'mycroft', 'res', 'text', 'en-us', 'and.word') res_path = resolve_resource_file('text/en-us/and.word') self.assertEqual(normpath(res_path), normpath(expected_path))
def listen(self, source, emitter): """Listens for chunks of audio that Mycroft should perform STT on. This will listen continuously for a wake-up-word, then return the audio chunk containing the spoken phrase that comes immediately afterwards. Args: source (AudioSource): Source producing the audio chunks emitter (EventEmitter): Emitter for notifications of when recording begins and ends. Returns: AudioData: audio with the user's utterance, minus the wake-up-word """ assert isinstance(source, AudioSource), "Source must be an AudioSource" # bytes_per_sec = source.SAMPLE_RATE * source.SAMPLE_WIDTH sec_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE # Every time a new 'listen()' request begins, reset the threshold # used for silence detection. This is as good of a reset point as # any, as we expect the user and Mycroft to not be talking. # NOTE: adjust_for_ambient_noise() doc claims it will stop early if # speech is detected, but there is no code to actually do that. self.adjust_for_ambient_noise(source, 1.0) LOG.debug("Waiting for wake word...") self._wait_until_wake_word(source, sec_per_buffer) if self._stop_signaled: return LOG.debug("Recording...") emitter.emit("recognizer_loop:record_begin") # If enabled, play a wave file with a short sound to audibly # indicate recording has begun. if self.config.get('confirm_listening'): audio_file = resolve_resource_file( self.config.get('sounds').get('start_listening')) if audio_file: source.mute() play_wav(audio_file).wait() source.unmute() frame_data = self._record_phrase(source, sec_per_buffer) audio_data = self._create_audio_data(frame_data, source) emitter.emit("recognizer_loop:record_end") if self.save_utterances: LOG.info("Recording utterance") stamp = str(datetime.datetime.now()) filename = "/tmp/mycroft_utterance%s.wav" % stamp with open(filename, 'wb') as filea: filea.write(audio_data.get_wav_data()) LOG.debug("Thinking...") return audio_data
def test_dot_mycroft(self, mock_isfile, mock_conf): mock_conf.get.return_value = test_config def files_in_dotmycroft_exists(path): return '.mycroft/' in path mock_isfile.side_effect = files_in_dotmycroft_exists self.assertEqual(resolve_resource_file('1984.txt'), expanduser('~/.mycroft/1984.txt'))
def handle_record_end(): logger.info("End Recording...") if config.get('confirm_listening'): file = resolve_resource_file(config.get('sounds').get('end_listening')) if file: play_wav(file) ws.emit(Message('recognizer_loop:record_end'))
def show_pages(self, page_names, index=0, override_idle=None, override_animations=False): """Begin showing the list of pages in the GUI. Arguments: page_names (list): List of page names (str) to display, such as ["Weather.qml", "Forecast.qml", "Details.qml"] index (int): Page number (0-based) to show initially. For the above list a value of 1 would start on "Forecast.qml" override_idle (boolean, int): True: Takes over the resting page indefinitely (int): Delays resting page for the specified number of seconds. override_animations (boolean): True: Disables showing all platform skill animations. False: 'Default' always show animations. """ if not isinstance(page_names, list): raise ValueError('page_names must be a list') if index > len(page_names): raise ValueError('Default index is larger than page list length') self.page = page_names[index] # First sync any data... data = self.__session_data.copy() data.update({'__from': self.skill.skill_id}) self.skill.bus.emit(Message("gui.value.set", data)) # Convert pages to full reference page_urls = [] for name in page_names: if name.startswith("SYSTEM"): page = resolve_resource_file(join('ui', name)) else: page = self.skill.find_resource(name, 'ui') if page: if self.config.get('remote'): page_urls.append(self.remote_url + "/" + page) else: page_urls.append("file://" + page) else: raise FileNotFoundError("Unable to find page: {}".format(name)) self.skill.bus.emit( Message( "gui.page.show", { "page": page_urls, "index": index, "__from": self.skill.skill_id, "__idle": override_idle, "__animations": override_animations }))
def handle_record_begin(self): LOGGER.info("Lsst - Begin Recording...") # If enabled, play a wave file with a short sound to audibly # indicate recording has begun. if config.get('confirm_listening'): file = resolve_resource_file( config.get('sounds').get('start_listening')) if file: self.playsmpl(file)
def mute_and_confirm_listening(self, source): audio_file = resolve_resource_file( self.config.get('sounds').get('start_listening')) if audio_file: source.mute() play_wav(audio_file).wait() source.unmute() return True else: return False
def test_data_dir(self, mock_isfile, mock_conf): """Check for file in the "configured data dir"/res/""" mock_conf.get.return_value = test_config def files_in_mycroft_datadir_exists(path): return 'datadir' in path mock_isfile.side_effect = files_in_mycroft_datadir_exists self.assertEqual(resolve_resource_file('1984.txt'), join(test_config['data_dir'], 'res', '1984.txt'))
def handle_record_begin(): logger.info("Begin Recording...") # If enabled, play a wave file with a short sound to audibly # indicate recording has begun. if config.get('confirm_listening'): file = resolve_resource_file( config.get('sounds').get('start_listening')) if file: play_wav(file) ws.emit(Message('recognizer_loop:record_begin'))
def handle_record_begin(self): # If enabled, play a wave file with a short sound to audibly # indicate recording has begun. if self.audioconfig.get('confirm_listening'): file = resolve_resource_file( self.audioconfig.get('sounds').get('start_listening')) if file: play_wav(file) LOG.info("deactivating speech recognition") self.emit("recognizer_loop:sleep") self.emit("recognizer_loop:local_listener.start") self.emit('recognizer_loop:record_begin')
def train(self, message=None): single_thread = message.data.get('single_thread', False) self.finished_training_event.clear() LOG.info('Training...') self.container.train(single_thread=single_thread) LOG.info('Training complete.') self.load_config = Configuration.get() file = resolve_resource_file("snd/wellcome.WAV") play_wav(file) self.finished_training_event.set() self.finished_initial_train = True
def get_voc(self, voc_filename): # Check for both skill resources and mycroft-core resources voc = self.find_resource(voc_filename + '.voc', 'vocab') if not voc: # Check for vocab in mycroft core resources voc = resolve_resource_file( join('text', self.lang, voc_filename + '.voc')) if not voc or not exists(voc): raise FileNotFoundError( 'Could not find {}.voc file'.format(voc_filename)) # load vocab and flatten into a simple list vocab = read_vocab_file(voc) return list(chain(*vocab))
def load_spellings(self): """Load phonetic spellings of words as dictionary""" path = join('text', self.lang, 'phonetic_spellings.txt') spellings_file = resolve_resource_file(path) if not spellings_file: return {} try: with open(spellings_file) as f: lines = filter(bool, f.read().split('\n')) lines = [i.split(':') for i in lines] return {key.strip(): value.strip() for key, value in lines} except ValueError: LOG.exception('Failed to load phonetic spellings.') return {}
def load_spellings(self): """Load phonetic spellings of words as dictionary""" path = join('text', self.lang.lower(), 'phonetic_spellings.txt') spellings_file = resolve_resource_file(path) if not spellings_file: return {} try: with open(spellings_file) as f: lines = filter(bool, f.read().split('\n')) lines = [i.split(':') for i in lines] return {key.strip(): value.strip() for key, value in lines} except ValueError: LOG.exception('Failed to load phonetic spellings.') return {}
def initialize(self): self.record_process = None self.start_time = 0 self.last_index = 24 # index of last pixel in countdowns self.source_path = self.file_system.path self.piep = resolve_resource_file('snd/start_listening.wav') self.precisefolder = self.file_system.path + "/Precise-Community-Data" self.settings["Name"] = self.config_core.get( 'listener', {}).get('wake_word').replace(' ', '-') self.settings["soundbackup"] = self.settings.get('soundbackup', False) self.settings["min_free_disk"] = 100 # min mb to leave free on disk self.settings["rate"] = 16000 # sample rate, hertz self.settings["channels"] = 1 # recording channels (1 = mono) self.settings["file_path"] = self.file_system.path + "/data/" self.settings["sell_path"] = "/tmp/mycroft_wake_words" self.settings["duration"] = -1 # default = unknown self.settings["formate"] = "S16_LE" self.settings["selling"] = self.settings.get('selling', 15) self.settings["improve"] = 10 self.settings["onlyPrecise"] = self.settings.get('onlyPrecise', True) self.settings["usevalidator"] = self.settings.get('usevalidator', True) self.settings['savewakewords'] = self.settings.get( 'savewakewords', False) self.settings['oploadserver'] = self.settings.get( 'oploadserver', False) self.settings["wwnr"] = self.settings.get('wwnr', 12) self.settings["nowwnr"] = self.settings.get('nowwnr', 12) self.settings["repo"] = self.settings.get( 'repo', 'https://github.com/MycroftAI/Precise-Community-Data.git') if not os.path.isdir(self.file_system.path + "/precise/mycroft_precise.egg-info"): self.log.info("no precise installed. beginn installation") _thread.start_new_thread(self.install_precise_source, ()) if self.settings["soundbackup"] is True: _thread.start_new_thread(self.download_sounds, ()) self.save_wakewords() if self.settings['oploadserver']: self.recording_server = subprocess.Popen( 'python -m http.server 8082', cwd=self.file_system.path + "/data", preexec_fn=os.setsid, shell=True) self.log.info("load server success") #self.bus.emit(Message('notification:alert', # {'skill': "test2"})) ## Wait vor wakeword #_wait_until_wake_word(source, sec_per_buffer): self.recordfile = ""
def get_global_config_definition(context, config, value): """Get config definitions included with Mycroft. Args: context: behave test context config: config value to fetch from the file value: predefined value to fetch Returns: Patch dictionary or None. """ configs_path = resolve_resource_file( join('text', context.lang, 'configurations.json')) return get_config_file_definition(configs_path, config, value)
def listen(self, source, emitter): """Listens for chunks of audio that Mycroft should perform STT on. This will listen continuously for a wake-up-word, then return the audio chunk containing the spoken phrase that comes immediately afterwards. Args: source (AudioSource): Source producing the audio chunks emitter (EventEmitter): Emitter for notifications of when recording begins and ends. Returns: AudioData: audio with the user's utterance, minus the wake-up-word """ assert isinstance(source, AudioSource), "Source must be an AudioSource" # bytes_per_sec = source.SAMPLE_RATE * source.SAMPLE_WIDTH sec_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE # Every time a new 'listen()' request begins, reset the threshold # used for silence detection. This is as good of a reset point as # any, as we expect the user and Mycroft to not be talking. # NOTE: adjust_for_ambient_noise() doc claims it will stop early if # speech is detected, but there is no code to actually do that. self.adjust_for_ambient_noise(source, 1.0) logger.debug("Waiting for wake word...") self._wait_until_wake_word(source, sec_per_buffer) logger.debug("Recording...") emitter.emit("recognizer_loop:record_begin") # If enabled, play a wave file with a short sound to audibly # indicate recording has begun. if config.get('confirm_listening'): file = resolve_resource_file( config.get('sounds').get('start_listening')) if file: play_wav(file) frame_data = self._record_phrase(source, sec_per_buffer) audio_data = self._create_audio_data(frame_data, source) emitter.emit("recognizer_loop:record_end") logger.debug("Thinking...") return audio_data
def test_get(self): phrase = 'i didn\'t catch that' res_file = pathlib.Path('text/en-us/').joinpath(phrase + '.dialog') print(res_file) resource = resolve_resource_file(str(res_file)) with open(resource) as f: results = [line.strip() for line in f] string = get(phrase) self.assertIn(string, results) # Check that the filename is returned if phrase is missing for lang string = get(phrase, lang='ne-ne') self.assertEqual(string, phrase) # Check that name is retured if phrase is missing string = get('testing aardwark') self.assertEqual(string, 'testing aardwark')
def acknowledge(self): """Acknowledge a successful request. This method plays a sound to acknowledge a request that does not require a verbal response. This is intended to provide simple feedback to the user that their request was handled successfully. """ audio_file = resolve_resource_file( self.config_core.get('sounds').get('acknowledge')) if not audio_file: LOG.warning("Could not find 'acknowledge' audio file!") return process = play_audio_file(audio_file) if not process: LOG.warning("Unable to play 'acknowledge' audio file!")
def voc_match(self, utt, voc_filename, lang=None, exact=False): """Determine if the given utterance contains the vocabulary provided. Checks for vocabulary match in the utterance instead of the other way around to allow the user to say things like "yes, please" and still match against "Yes.voc" containing only "yes". The method first checks in the current skill's .voc files and secondly the "res/text" folder of mycroft-core. The result is cached to avoid hitting the disk each time the method is called. Arguments: utt (str): Utterance to be tested voc_filename (str): Name of vocabulary file (e.g. 'yes' for 'res/text/en-us/yes.voc') lang (str): Language code, defaults to self.long exact (bool): comparison using "==" instead of "in" Returns: bool: True if the utterance has the given vocabulary it """ lang = lang or self.lang cache_key = lang + voc_filename if cache_key not in self.voc_match_cache: # Check for both skill resources and mycroft-core resources voc = self.find_resource(voc_filename + '.voc', 'vocab') if not voc: # Check for vocab in mycroft core resources voc = resolve_resource_file( join('text', lang, voc_filename + '.voc')) if not voc or not exists(voc): raise FileNotFoundError( 'Could not find {}.voc file'.format(voc_filename)) # load vocab and flatten into a simple list vocab = read_vocab_file(voc) self.voc_match_cache[cache_key] = list(chain(*vocab)) if utt: if exact: # Check for exact match return any(i.strip() == utt for i in self.voc_match_cache[cache_key]) else: # Check for matches against complete words return any([ re.match(r'.*\b' + i + r'\b.*', utt) for i in self.voc_match_cache[cache_key] ]) else: return False
def topics(self): voc_filename = "topics" lang = self.lang cache_key = lang + voc_filename if cache_key not in self.voc_match_cache: # Check for both skill resources and mycroft-core resources voc = self.find_resource(voc_filename + '.voc', 'vocab') if not voc: voc = resolve_resource_file(join('text', lang, voc_filename + '.voc')) if not voc or not exists(voc): raise FileNotFoundError( 'Could not find {}.voc file'.format(voc_filename)) with open(voc) as f: self.voc_match_cache[cache_key] = f.read().splitlines() return self.voc_match_cache[cache_key]
def record_wav(self): audio_file = resolve_resource_file( self.config_core.get('sounds').get('start_listening')) if audio_file: play_wav(audio_file).wait() self.bus.emit(Message('mycroft.mic.mute')) try: fd, tmp_file = mkstemp('.wav') subprocess.Popen([ "arecord", "-f", "S16_LE", "-r", str(16000), "-c", str(1), "-d", str(10), tmp_file ]).wait() finally: self.bus.emit(Message('mycroft.mic.unmute')) return tmp_file
def get_response(self, dialog='', data=None, announcement='', validator=None, on_fail=None, num_retries=-1): """ Prompt user and wait for response The given dialog or announcement will be spoken, the immediately listen and return user response. The response can optionally be validated. Example: color = self.get_response('ask.favorite.color') Args: dialog (str): Announcement dialog to read to the user data (dict): Data used to render the dialog announcement (str): Literal string (overrides dialog) validator (any): Function with following signature def validator(utterance): return utterance != "red" on_fail (any): Dialog or function returning literal string to speak on invalid input. For example: def on_fail(utterance): return "nobody likes the color red, pick another" num_retries (int): Times to ask user for input, -1 for infinite NOTE: User can not respond and timeout or say "cancel" to stop Returns: str: User's reply or None if timed out or canceled """ data = data or {} def get_announcement(): return announcement or self.dialog_renderer.render(dialog, data) if not get_announcement(): raise ValueError('announcement or dialog message required') def on_fail_default(utterance): fail_data = data.copy() fail_data['utterance'] = utterance if on_fail: return self.dialog_renderer.render(on_fail, fail_data) else: return get_announcement() # TODO: Load with something like mycroft.dialog.get_all() cancel_voc = 'text/' + self.lang + '/cancel.voc' with open(resolve_resource_file(cancel_voc)) as f: cancel_words = list(filter(bool, f.read().split('\n'))) def is_cancel(utterance): return utterance in cancel_words def validator_default(utterance): # accept anything except 'cancel' return not is_cancel(utterance) validator = validator or validator_default on_fail_fn = on_fail if callable(on_fail) else on_fail_default self.speak(get_announcement(), expect_response=True) num_fails = 0 while True: response = self.__get_response() if response is None: # if nothing said, prompt one more time num_none_fails = 1 if num_retries < 0 else num_retries if num_fails >= num_none_fails: return None else: if validator(response): return response # catch user saying 'cancel' if is_cancel(response): return None num_fails += 1 if 0 < num_retries < num_fails: return None line = on_fail_fn(response) self.speak(line, expect_response=True)