def __init__(self, key_phrase="hey mycroft", config=None, lang="en-us"): super(PreciseHotword, self).__init__(key_phrase, config, lang) self.update_freq = 24 # in hours precise_config = Configuration.get()['precise'] self.dist_url = precise_config['dist_url'] self.models_url = precise_config['models_url'] self.exe_name = 'precise-stream' ww = Configuration.get()['listener']['wake_word'] model_name = ww.replace(' ', '-') + '.pb' model_folder = expanduser('~/.mycroft/precise') if not isdir(model_folder): mkdir(model_folder) model_path = join(model_folder, model_name) exe_file = self.find_download_exe() LOG.info('Found precise executable: ' + exe_file) self.update_model(model_name, model_path) args = [exe_file, model_path, '1024'] self.proc = Popen(args, stdin=PIPE, stdout=PIPE) self.has_found = False self.cooldown = 20 t = Thread(target=self.check_stdout) t.daemon = True t.start()
def get_skills_dir(): return ( expanduser(os.environ.get('SKILLS_DIR', '')) or expanduser(join( Configuration.get()['data_dir'], Configuration.get()['skills']['msm']['directory'] )) )
def __init__(self, key_phrase="hey mycroft", config=None, lang="en-us"): self.key_phrase = str(key_phrase).lower() # rough estimate 1 phoneme per 2 chars self.num_phonemes = len(key_phrase) / 2 + 1 if config is None: config = Configuration.get().get("hot_words", {}) config = config.get(self.key_phrase, {}) self.config = config self.listener_config = Configuration.get().get("listener", {}) self.lang = str(self.config.get("lang", lang)).lower()
def __init__(self, args): params = self.__build_params(args) if params.config: Configuration.get([params.config]) if exists(params.lib) and isdir(params.lib): sys.path.append(params.lib) sys.path.append(params.dir) self.dir = params.dir self.enable_intent = params.enable_intent self.__init_client(params)
def run(self): """ Load skills and update periodically from disk and internet """ self.remove_git_locks() self._connected_event.wait() has_loaded = False # check if skill updates are enabled update = Configuration.get()["skills"]["auto_update"] # Scan the file folder that contains Skills. If a Skill is updated, # unload the existing version from memory and reload from the disk. while not self._stop_event.is_set(): # Update skills once an hour if update is enabled if time.time() >= self.next_download and update: self.download_skills() # Look for recently changed skill(s) needing a reload # checking skills dir and getting all skills there skill_paths = glob(join(self.msm.skills_dir, '*/')) still_loading = False for skill_path in skill_paths: still_loading = ( self._load_or_reload_skill(skill_path) or still_loading ) if not has_loaded and not still_loading and len(skill_paths) > 0: has_loaded = True self.bus.emit(Message('mycroft.skills.initialized')) self._unload_removed(skill_paths) # Pause briefly before beginning next scan time.sleep(2)
def main(): global ws global loop global config lock = PIDLock("voice") ws = WebsocketClient() config = Configuration.get() Configuration.init(ws) loop = RecognizerLoop() loop.on('recognizer_loop:utterance', handle_utterance) loop.on('speak', handle_speak) loop.on('recognizer_loop:record_begin', handle_record_begin) loop.on('recognizer_loop:wakeword', handle_wakeword) loop.on('recognizer_loop:record_end', handle_record_end) loop.on('recognizer_loop:no_internet', handle_no_internet) ws.on('open', handle_open) ws.on('complete_intent_failure', handle_complete_intent_failure) ws.on('recognizer_loop:sleep', handle_sleep) ws.on('recognizer_loop:wake_up', handle_wake_up) ws.on('mycroft.mic.mute', handle_mic_mute) ws.on('mycroft.mic.unmute', handle_mic_unmute) ws.on("mycroft.paired", handle_paired) ws.on('recognizer_loop:audio_output_start', handle_audio_start) ws.on('recognizer_loop:audio_output_end', handle_audio_end) ws.on('mycroft.stop', handle_stop) event_thread = Thread(target=connect) event_thread.setDaemon(True) event_thread.start() try: loop.run() except KeyboardInterrupt, e: LOG.exception(e) sys.exit()
def __init__(self, bus, schedule_file='schedule.json'): """ Create an event scheduler thread. Will send messages at a predetermined time to the registered targets. Args: bus: Mycroft messagebus (mycroft.messagebus) schedule_file: File to store pending events to on shutdown """ super(EventScheduler, self).__init__() data_dir = expanduser(Configuration.get()['data_dir']) self.events = {} self.event_lock = Lock() self.bus = bus self.isRunning = True self.schedule_file = join(data_dir, schedule_file) if self.schedule_file: self.load() self.bus.on('mycroft.scheduler.schedule_event', self.schedule_event_handler) self.bus.on('mycroft.scheduler.remove_event', self.remove_event_handler) self.bus.on('mycroft.scheduler.update_event', self.update_event_handler) self.bus.on('mycroft.scheduler.get_event', self.get_event_handler) self.start()
def create(): """ Factory method to create a TTS engine based on configuration. The configuration file ``mycroft.conf`` contains a ``tts`` section with the name of a TTS module to be read by this method. "tts": { "module": <engine_name> } """ from mycroft.tts.remote_tts import RemoteTTS config = Configuration.get().get('tts', {}) module = config.get('module', 'mimic') lang = config.get(module).get('lang') voice = config.get(module).get('voice') clazz = TTSFactory.CLASSES.get(module) if issubclass(clazz, RemoteTTS): url = config.get(module).get('url') tts = clazz(lang, voice, url) else: tts = clazz(lang, voice) tts.validator.validate() return tts
def main(): global ws global config ws = WebsocketClient() Configuration.init(ws) config = Configuration.get() speech.init(ws) # Setup control of pulse audio setup_pulseaudio_handlers(config.get('Audio').get('pulseaudio')) def echo(message): try: _message = json.loads(message) if 'mycroft.audio.service' not in _message.get('type'): return message = json.dumps(_message) except: pass LOG.debug(message) LOG.info("Staring Audio Services") ws.on('message', echo) ws.once('open', load_services_callback) try: ws.run_forever() except KeyboardInterrupt, e: LOG.exception(e) speech.shutdown() sys.exit()
def get(phrase, lang=None, context=None): """ Looks up a resource file for the given phrase. If no file is found, the requested phrase is returned as the string. This will use the default language for translations. Args: phrase (str): resource phrase to retrieve/translate lang (str): the language to use context (dict): values to be inserted into the string Returns: str: a randomized and/or translated version of the phrase """ if not lang: from mycroft.configuration import Configuration lang = Configuration.get().get("lang") filename = "text/" + lang.lower() + "/" + phrase + ".dialog" template = resolve_resource_file(filename) if not template: LOG.debug("Resource file not found: " + filename) return phrase stache = MustacheDialogRenderer() stache.load_template_file("template", template) if not context: context = {} return stache.render("template", context)
def __init__(self, emitter): self.config = Configuration.get().get('context', {}) self.engine = IntentDeterminationEngine() # Dictionary for translating a skill id to a name self.skill_names = {} # Context related intializations self.context_keywords = self.config.get('keywords', []) self.context_max_frames = self.config.get('max_frames', 3) self.context_timeout = self.config.get('timeout', 2) self.context_greedy = self.config.get('greedy', False) self.context_manager = ContextManager(self.context_timeout) self.emitter = emitter self.emitter.on('register_vocab', self.handle_register_vocab) self.emitter.on('register_intent', self.handle_register_intent) self.emitter.on('recognizer_loop:utterance', self.handle_utterance) self.emitter.on('detach_intent', self.handle_detach_intent) self.emitter.on('detach_skill', self.handle_detach_skill) # Context related handlers self.emitter.on('add_context', self.handle_add_context) self.emitter.on('remove_context', self.handle_remove_context) self.emitter.on('clear_context', self.handle_clear_context) # Converse method self.emitter.on('skill.converse.response', self.handle_converse_response) self.emitter.on('mycroft.speech.recognition.unknown', self.reset_converse) self.emitter.on('mycroft.skills.loaded', self.update_skill_name_dict) def add_active_skill_handler(message): self.add_active_skill(message.data['skill_id']) self.emitter.on('active_skill_request', add_active_skill_handler) self.active_skills = [] # [skill_id , timestamp] self.converse_timeout = 5 # minutes to prune active_skills
def __init__(self, bus): super(SkillManager, self).__init__() self._stop_event = Event() self._connected_event = Event() self.loaded_skills = {} self.bus = bus self.enclosure = EnclosureAPI(bus) # Schedule install/update of default skill self.msm = self.create_msm() self.num_install_retries = 0 self.update_interval = Configuration.get()['skills']['update_interval'] self.update_interval = int(self.update_interval * 60 * MINUTES) self.dot_msm = join(self.msm.skills_dir, '.msm') if exists(self.dot_msm): self.next_download = os.path.getmtime(self.dot_msm) + \ self.update_interval else: self.next_download = time.time() - 1 # Conversation management bus.on('skill.converse.request', self.handle_converse_request) # Update on initial connection bus.on('mycroft.internet.connected', lambda x: self._connected_event.set()) # Update upon request bus.on('skillmanager.update', self.schedule_now) bus.on('skillmanager.list', self.send_skill_list) bus.on('skillmanager.deactivate', self.deactivate_skill) bus.on('skillmanager.keep', self.deactivate_except) bus.on('skillmanager.activate', self.activate_skill)
def main(): global ws # Create PID file, prevent multiple instancesof this service mycroft.lock.Lock('skills') # Connect this Skill management process to the websocket ws = WebsocketClient() Configuration.init(ws) ignore_logs = Configuration.get().get("ignore_logs") # Listen for messages and echo them for logging def _echo(message): try: _message = json.loads(message) if _message.get("type") in ignore_logs: return if _message.get("type") == "registration": # do not log tokens from registration messages _message["data"]["token"] = None message = json.dumps(_message) except BaseException: pass LOG('SKILLS').debug(message) ws.on('message', _echo) # Startup will be called after websocket is fully live ws.once('open', _starting_up) ws.run_forever()
def __init__(self, wake_word_recognizer): self.config = Configuration.get() listener_config = self.config.get('listener') self.upload_url = listener_config['wake_word_upload']['url'] self.upload_disabled = listener_config['wake_word_upload']['disable'] self.wake_word_name = wake_word_recognizer.key_phrase self.overflow_exc = listener_config.get('overflow_exception', False) speech_recognition.Recognizer.__init__(self) self.wake_word_recognizer = wake_word_recognizer self.audio = pyaudio.PyAudio() self.multiplier = listener_config.get('multiplier') self.energy_ratio = listener_config.get('energy_ratio') # check the config for the flag to save wake words. self.save_utterances = listener_config.get('record_utterances', False) self.upload_lock = Lock() self.filenames_to_upload = [] self.mic_level_file = os.path.join(get_ipc_directory(), "mic_level") self._stop_signaled = False # The maximum audio in seconds to keep for transcribing a phrase # The wake word must fit in this time num_phonemes = wake_word_recognizer.num_phonemes len_phoneme = listener_config.get('phoneme_duration', 120) / 1000.0 self.TEST_WW_SEC = num_phonemes * len_phoneme self.SAVED_WW_SEC = max(3, self.TEST_WW_SEC) try: self.account_id = DeviceApi().get()['user']['uuid'] except (requests.RequestException, AttributeError): self.account_id = '0'
def __init__(self): config_core = Configuration.get() self.lang = str(self.init_language(config_core)) config_stt = config_core.get("stt", {}) self.config = config_stt.get(config_stt.get("module"), {}) self.credential = self.config.get("credential", {}) self.recognizer = Recognizer()
def _load_config(self): """ Load configuration parameters from configuration """ config = Configuration.get() self.config_core = config self._config_hash = hash(str(config)) self.lang = config.get('lang') self.config = config.get('listener') rate = self.config.get('sample_rate') device_index = self.config.get('device_index') device_name = self.config.get('device_name') if not device_index and device_name: device_index = find_input_device(device_name) LOG.debug('Using microphone (None = default): '+str(device_index)) self.microphone = MutableMicrophone(device_index, rate, mute=self.mute_calls > 0) # TODO:19.02 - channels are not been used, remove from mycroft.conf # and from code. self.microphone.CHANNELS = self.config.get('channels') self.wakeword_recognizer = self.create_wake_word_recognizer() # TODO - localization self.wakeup_recognizer = self.create_wakeup_recognizer() self.responsive_recognizer = ResponsiveRecognizer( self.wakeword_recognizer) self.state = RecognizerLoopState()
def main(): import tornado.options lock = Lock("service") tornado.options.parse_command_line() def reload_hook(): """ Hook to release lock when autoreload is triggered. """ lock.delete() autoreload.add_reload_hook(reload_hook) config = Configuration.get().get("websocket") host = config.get("host") port = config.get("port") route = config.get("route") validate_param(host, "websocket.host") validate_param(port, "websocket.port") validate_param(route, "websocket.route") routes = [ (route, WebsocketEventHandler) ] application = web.Application(routes, **settings) application.listen(port, host) ioloop.IOLoop.instance().start()
def validate_connection(self): config = Configuration.get().get("tts", {}).get("watson", {}) user = config.get("user") or config.get("username") password = config.get("password") if user and password: return else: raise ValueError('user and/or password for IBM tts is not defined')
def mimic_fallback_tts(utterance, ident): # fallback if connection is lost config = Configuration.get() tts_config = config.get('tts', {}).get("mimic", {}) lang = config.get("lang", "en-us") tts = Mimic(lang, tts_config) tts.init(bus) tts.execute(utterance, ident)
def __init__(self, lang, voice="en-US_AllisonVoice", url="https://stream.watsonplatform.net/text-to-speech/api"): super(WatsonTTS, self).__init__(lang, voice, url, '/v1/synthesize', WatsonTTSValidator(self)) self.type = "wav" self.config = Configuration.get().get("tts", {}).get("watson", {}) user = self.config.get("user") or self.config.get("username") password = self.config.get("password") self.auth = HTTPBasicAuth(user, password)
def main(): parser = argparse.ArgumentParser() parser.add_argument( '-f', '--filename', dest='filename', default="/tmp/test.wav", help="Filename for saved audio (Default: /tmp/test.wav)") parser.add_argument( '-d', '--duration', dest='duration', type=int, default=10, help="Duration of recording in seconds (Default: 10)") parser.add_argument( '-v', '--verbose', dest='verbose', action='store_true', default=False, help="Add extra output regarding the recording") parser.add_argument( '-l', '--list', dest='show_devices', action='store_true', default=False, help="List all availabile input devices") args = parser.parse_args() if args.show_devices: print(" Initializing... ") pa = pyaudio.PyAudio() print(" ====================== Audio Devices ======================") print(" Index Device Name") for device_index in range(pa.get_device_count()): dev = pa.get_device_info_by_index(device_index) if dev['maxInputChannels'] > 0: print(' {}: {}'.format(device_index, dev['name'])) print() config = Configuration.get() if "device_name" in config["listener"]: dev = config["listener"]["device_name"] elif "device_index" in config["listener"]: dev = "Device at index {}".format(config["listener"]["device_index"]) else: dev = "Default device" samplerate = config["listener"]["sample_rate"] play_cmd = config["play_wav_cmdline"].replace("%1", "WAV_FILE") print(" ========================== Info ===========================") print(" Input device: {} @ Sample rate: {} Hz".format(dev, samplerate)) print(" Playback commandline: {}".format(play_cmd)) print() print(" ===========================================================") print(" == STARTING TO RECORD, MAKE SOME NOISE! ==") print(" ===========================================================") if not args.verbose: with mute_output(): record(args.filename, args.duration) else: record(args.filename, args.duration) print(" ===========================================================") print(" == DONE RECORDING, PLAYING BACK... ==") print(" ===========================================================") status = play_wav(args.filename).wait() if status: print('An error occured while playing back audio ({})'.format(status))
def __init__(self, lang, config): super(BingTTS, self).__init__(lang, config, BingTTSValidator(self)) self.type = 'wav' from bingtts import Translator self.config = Configuration.get().get("tts", {}).get("bing", {}) api = self.config.get("api_key") self.bing = Translator(api) self.gender = self.config.get("gender", "Male") self.format = self.config.get("format", "riff-16khz-16bit-mono-pcm")
def __init__(self, path): self.path = path config = Configuration.get([LocalConf(DEFAULT_CONFIG), LocalConf(SYSTEM_CONFIG), LocalConf(USER_CONFIG)], cache=False) config_server = config.get("server") self.url = config_server.get("url") self.version = config_server.get("version") self.identity = IdentityManager.get()
def create_hotword(cls, hotword="hey mycroft", config=None, lang="en-us", loop=None): if not config: config = Configuration.get()['hotwords'] config = config[hotword] module = config.get("module", "precise") return cls.load_module(module, hotword, config, lang, loop) or \ cls.load_module('pocketsphinx', hotword, config, lang, loop) or \ cls.CLASSES['pocketsphinx']()
def get(): data_dir = expanduser(Configuration.get()['data_dir']) version_file = join(data_dir, 'version.json') if exists(version_file) and isfile(version_file): try: with open(version_file) as f: return json.load(f) except Exception: LOG.error("Failed to load version from '%s'" % version_file) return {"coreVersion": None, "enclosureVersion": None}
def handle_speak(event): """ Handle "speak" message """ config = Configuration.get() Configuration.init(bus) global _last_stop_signal # Get conversation ID if event.context and 'ident' in event.context: ident = event.context['ident'] else: ident = 'unknown' start = time.time() # Time of speech request with lock: stopwatch = Stopwatch() stopwatch.start() utterance = event.data['utterance'] if event.data.get('expect_response', False): # When expect_response is requested, the listener will be restarted # at the end of the next bit of spoken audio. bus.once('recognizer_loop:audio_output_end', _start_listener) # This is a bit of a hack for Picroft. The analog audio on a Pi blocks # for 30 seconds fairly often, so we don't want to break on periods # (decreasing the chance of encountering the block). But we will # keep the split for non-Picroft installs since it give user feedback # faster on longer phrases. # # TODO: Remove or make an option? This is really a hack, anyway, # so we likely will want to get rid of this when not running on Mimic if (config.get('enclosure', {}).get('platform') != "picroft" and len(re.findall('<[^>]*>', utterance)) == 0): chunks = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\;|\?)\s', utterance) for chunk in chunks: # Check if somthing has aborted the speech if (_last_stop_signal > start or check_for_signal('buttonPress')): # Clear any newly queued speech tts.playback.clear() break try: mute_and_speak(chunk, ident) except KeyboardInterrupt: raise except Exception: LOG.error('Error in mute_and_speak', exc_info=True) else: mute_and_speak(utterance, ident) stopwatch.stop() report_timing(ident, 'speech', stopwatch, {'utterance': utterance, 'tts': tts.__class__.__name__})
def write_skills_data(data: dict): skills_data_file = expanduser('~/.mycroft/skills.json') with open(skills_data_file, 'w') as f: json.dump(data, f) if (is_paired and Configuration.get()['skills'].get('upload_skill_manifest')): upload_data = SkillManager.convert_skills_data(data) try: DeviceApi().upload_skills_data(upload_data) except Exception as e: LOG.error('An error occured ({})'.format(e))
def mimic_fallback_tts(utterance, ident): global mimic_fallback_obj # fallback if connection is lost config = Configuration.get() tts_config = config.get('tts', {}).get("mimic", {}) lang = config.get("lang", "en-us") if not mimic_fallback_obj: mimic_fallback_obj = Mimic(lang, tts_config) tts = mimic_fallback_obj LOG.debug("Mimic fallback, utterance : " + str(utterance)) tts.init(bus) tts.execute(utterance, ident)
def __init__(self, path): self.path = path # Load the config, skipping the REMOTE_CONFIG since we are # getting the info needed to get to it! config = Configuration.get([DEFAULT_CONFIG, SYSTEM_CONFIG, USER_CONFIG], cache=False) config_server = config.get("server") self.url = config_server.get("url") self.version = config_server.get("version") self.identity = IdentityManager.get()
def __init_client(self, params): config = Configuration.get().get("websocket") if not params.host: params.host = config.get('host') if not params.port: params.port = config.get('port') self.ws = WebsocketClient(host=params.host, port=params.port, ssl=params.use_ssl) # Connect configuration manager to message bus to receive updates Configuration.init(self.ws)
def __init__(self, wake_word_recognizer): self.config = Configuration.get() listener_config = self.config.get('listener') self.upload_url = listener_config['wake_word_upload']['url'] self.upload_disabled = listener_config['wake_word_upload']['disable'] self.wake_word_name = wake_word_recognizer.key_phrase self.overflow_exc = listener_config.get('overflow_exception', False) speech_recognition.Recognizer.__init__(self) self.wake_word_recognizer = wake_word_recognizer self.audio = pyaudio.PyAudio() self.multiplier = listener_config.get('multiplier') self.energy_ratio = listener_config.get('energy_ratio') # check the config for the flag to save wake words. if 'record_utterances' in listener_config: # TODO: 19.08 remove this backwards compatibility self.save_utterances = listener_config.get('record_utterances') else: self.save_utterances = listener_config.get('save_utterances', False) self.save_wake_words = listener_config.get('record_wake_words') self.saved_wake_words_dir = join(gettempdir(), 'mycroft_wake_words') self.upload_lock = Lock() self.filenames_to_upload = [] self.mic_level_file = os.path.join(get_ipc_directory(), "mic_level") self._stop_signaled = False # The maximum audio in seconds to keep for transcribing a phrase # The wake word must fit in this time num_phonemes = wake_word_recognizer.num_phonemes len_phoneme = listener_config.get('phoneme_duration', 120) / 1000.0 self.TEST_WW_SEC = num_phonemes * len_phoneme self.SAVED_WW_SEC = max(3, self.TEST_WW_SEC) try: self.account_id = DeviceApi().get()['user']['uuid'] except (requests.RequestException, AttributeError): self.account_id = '0'
def __init__(self, name=None, emitter=None): self.name = name or self.__class__.__name__ # Get directory of skill self._dir = dirname(abspath(sys.modules[self.__module__].__file__)) self.settings = SkillSettings(self._dir, self.name) self.bind(emitter) self.config_core = Configuration.get() self.config = self.config_core.get(self.name) or {} self.dialog_renderer = None self.vocab_dir = None self.root_dir = None self.file_system = FileSystemAccess(join('skills', self.name)) self.registered_intents = [] self.log = LOG.create_logger(self.name) self.reload_skill = True # allow reloading self.events = [] self.scheduled_repeats = [] self.skill_id = '' # will be set from the path, so guaranteed unique
def main(alive_hook=on_alive, started_hook=on_started, ready_hook=on_ready, error_hook=on_error, stopping_hook=on_stopping, watchdog=None): reset_sigint_handler() # Create PID file, prevent multiple instances of this service mycroft.lock.Lock('skills') config = Configuration.get() # Set the active lang to match the configured one set_active_lang(config.get('lang', 'en-us')) # Connect this process to the Mycroft message bus bus = start_message_bus_client("SKILLS") _register_intent_services(bus) event_scheduler = EventScheduler(bus) callbacks = StatusCallbackMap(on_started=started_hook, on_alive=alive_hook, on_ready=ready_hook, on_error=error_hook, on_stopping=stopping_hook) status = ProcessStatus('skills', bus, callbacks) skill_manager = _initialize_skill_manager(bus, watchdog) status.set_started() _wait_for_internet_connection() if skill_manager is None: skill_manager = _initialize_skill_manager(bus, watchdog) device_primer = DevicePrimer(bus, config) device_primer.prepare_device() skill_manager.start() while not skill_manager.is_alive(): time.sleep(0.1) status.set_alive() while not skill_manager.is_all_loaded(): time.sleep(0.1) status.set_ready() wait_for_exit_signal() status.set_stopping() shutdown(skill_manager, event_scheduler)
def create(): """Factory method to create a TTS engine based on configuration. The configuration file ``mycroft.conf`` contains a ``tts`` section with the name of a TTS module to be read by this method. "tts": { "module": <engine_name> } """ config = Configuration.get() lang = config.get("lang", "en-us") tts_module = config.get('tts', {}).get('module', 'mimic') tts_config = config.get('tts', {}).get(tts_module, {}) tts_lang = tts_config.get('lang', lang) audio_file_error = resolve_resource_file( config.get('sounds').get('dont_understand')) try: if tts_module in TTSFactory.CLASSES: clazz = TTSFactory.CLASSES[tts_module] else: clazz = load_tts_plugin(tts_module) LOG.info('Loaded plugin {}'.format(tts_module)) if clazz is None: raise ValueError('TTS module not found') tts = clazz(tts_lang, tts_config) tts.validator.validate() tts.audio_file_error = audio_file_error except Exception: # Fallback to mimic if an error occurs while loading. if tts_module != 'mimic': LOG.exception('The selected TTS backend couldn\'t be loaded. ' 'Falling back to Mimic') clazz = TTSFactory.CLASSES.get('mimic') tts_config = config.get('tts', {}).get('mimic', {}) tts = clazz(tts_lang, tts_config) tts.validator.validate() else: LOG.exception('The TTS could not be loaded.') raise return tts
def __init__(self, key_phrase="hey mycroft", config=None, lang="en-us"): super().__init__(key_phrase, config, lang) from precise_runner import ( PreciseRunner, PreciseEngine, ReadWriteStream ) local_conf = LocalConf(USER_CONFIG) if (local_conf.get('precise', {}).get('dist_url') == 'http://bootstrap.mycroft.ai/artifacts/static/daily/'): del local_conf['precise']['dist_url'] local_conf.store() Configuration.updated(None) self.download_complete = True self.show_download_progress = Timer(0, lambda: None) precise_config = Configuration.get()['precise'] precise_exe = self.update_precise(precise_config) local_model = self.config.get('local_model_file') if local_model: self.precise_model = expanduser(local_model) else: self.precise_model = self.install_model( precise_config['model_url'], key_phrase.replace(' ', '-') ).replace('.tar.gz', '.pb') self.has_found = False self.stream = ReadWriteStream() def on_activation(): self.has_found = True trigger_level = self.config.get('trigger_level', 3) sensitivity = self.config.get('sensitivity', 0.5) self.runner = PreciseRunner( PreciseEngine(precise_exe, self.precise_model), trigger_level, sensitivity, stream=self.stream, on_activation=on_activation, ) self.runner.start()
def __init__(self): super(FartingSkill, self).__init__(name="FartingSkill") self.audioservice = None self.random_farting = False # flag to indicate whether random farting mode is active self.counter = 0 # variable to increment to make the scheduled event unique # Search the sounds directory for sound files and load into a list. valid_codecs = ['.mp3'] #, '.wav'] self.path_to_sound_files = path.join(abspath(dirname(__file__)), 'sounds') self.sound_files = [ f for f in listdir(self.path_to_sound_files) if splitext(f)[1] in valid_codecs ] # cater for the picroft platform which behaves a bit differently from the mark1 self.platform = "unknown" config = Configuration.get([SYSTEM_CONFIG, USER_CONFIG], cache=False) if "enclosure" in config: self.platform = config.get("enclosure").get("platform", "unknown")
def __init__(self, lang="en-us", config=None): import boto3 config = config or Configuration.get().get("tts", {}).get("polly", {}) super(PollyTTS, self).__init__(lang, config, PollyTTSValidator(self), audio_ext="mp3", ssml_tags=["speak", "say-as", "voice", "prosody", "break", "emphasis", "sub", "lang", "phoneme", "w", "whisper", "amazon:auto-breaths", "p", "s", "amazon:effect", "mark"]) self.voice = self.config.get("voice", "Matthew") self.key_id = self.config.get("access_key_id", '') self.key = self.config.get("secret_access_key", '') self.region = self.config.get("region", 'us-east-1') self.polly = boto3.Session(aws_access_key_id=self.key_id, aws_secret_access_key=self.key, region_name=self.region).client('polly')
def init(websocket): """ Start speach related handlers """ global ws global tts global tts_hash global config ws = websocket Configuration.init(ws) config = Configuration.get() ws.on('mycroft.stop', handle_stop) ws.on('mycroft.audio.speech.stop', handle_stop) ws.on('speak', handle_speak) tts = TTSFactory.create() tts.init(ws) tts_hash = config.get('tts')
def create(): """ Factory method to create a TTS engine based on configuration. The configuration file ``mycroft.conf`` contains a ``tts`` section with the name of a TTS module to be read by this method. "tts": { "module": <engine_name> } """ config = Configuration.get() lang = config.get("lang", "en-us") tts_module = config.get('tts', {}).get('module', 'mimic') tts_config = config.get('tts', {}).get(tts_module, {}) tts_lang = tts_config.get('lang', lang) clazz = TTSFactory.CLASSES.get(tts_module) tts = clazz(tts_lang, tts_config) tts.validator.validate() return tts
def __init__(self, skill_directory: str, skill_name: str): self.skill_directory = Path(skill_directory) self.skill_name = skill_name self.json_path = self.skill_directory.joinpath('settingsmeta.json') self.yaml_path = self.skill_directory.joinpath('settingsmeta.yaml') self.config = Configuration.get() self.settings_meta = {} self.api = None self.upload_timer = None self.sync_enabled = self.config["server"].get("sync_skill_settings", False) if not self.sync_enabled: LOG.info("Skill settings sync is disabled, settingsmeta will " "not be uploaded") self._stopped = None # Property placeholders self._msm = None self._skill_gid = None
def __init__(self, ws): """ Args: ws: Websocket instance to use """ self.ws = ws self.config = Configuration.get().get("Audio") self.default = None self.service = [] self.current = None self.volume_is_low = False self.pulse = None self.pulse_quiet = None self.pulse_restore = None self.muted_sinks = [] # Setup control of pulse audio self.setup_pulseaudio_handlers(self.config.get('pulseaudio')) ws.once('open', self.load_services_callback)
def train(self, message=None): padatious_single_thread = Configuration.get( )['padatious']['single_thread'] if message is None: single_thread = padatious_single_thread else: single_thread = message.data.get('single_thread', padatious_single_thread) self.finished_training_event.clear() LOG.info('Training... (single_thread={})'.format(single_thread)) self.container.train(single_thread=single_thread) LOG.info('Training complete.') self.finished_training_event.set() if not self.finished_initial_train: LOG.info("Mycroft is all loaded and ready to roll!") self.bus.emit(Message('mycroft.ready')) self.finished_initial_train = True
def main(): rospy.init_node('mycroft_stt') rospy.loginfo(rospy.get_caller_id() + " started") global bus global loop global config reset_sigint_handler() PIDLock("voice") bus = WebsocketClient() # Mycroft messagebus, see mycroft.messagebus Configuration.init(bus) config = Configuration.get() # Register handlers on internal RecognizerLoop bus loop = RecognizerLoop() loop.on('recognizer_loop:utterance', handle_utterance) loop.on('recognizer_loop:speech.recognition.unknown', handle_unknown) loop.on('speak', handle_speak) loop.on('recognizer_loop:record_begin', handle_record_begin) loop.on('recognizer_loop:awoken', handle_awoken) loop.on('recognizer_loop:wakeword', handle_wakeword) loop.on('recognizer_loop:record_end', handle_record_end) loop.on('recognizer_loop:no_internet', handle_no_internet) # Register handlers for events on main Mycroft messagebus bus.on('open', handle_open) bus.on('complete_intent_failure', handle_complete_intent_failure) bus.on('recognizer_loop:sleep', handle_sleep) bus.on('recognizer_loop:wake_up', handle_wake_up) bus.on('mycroft.mic.mute', handle_mic_mute) bus.on('mycroft.mic.unmute', handle_mic_unmute) bus.on('mycroft.mic.get_status', handle_mic_get_status) bus.on("mycroft.paired", handle_paired) bus.on('recognizer_loop:audio_output_start', handle_audio_start) bus.on('recognizer_loop:audio_output_end', handle_audio_end) bus.on('mycroft.stop', handle_stop) create_daemon(bus.run_forever) create_daemon(loop.run) wait_for_exit_signal() rospy.spin()
def __init__(self): # Establish Enclosure's websocket connection to the messagebus self.bus = WebsocketClient() # Load full config Configuration.init(self.bus) config = Configuration.get() self.lang = config['lang'] self.config = config.get("enclosure") self.global_config = config # Listen for new GUI clients to announce themselves on the main bus self.GUIs = {} # GUIs, either local or remote self.active_namespaces = [] self.bus.on("mycroft.gui.connected", self.on_gui_client_connected) self.register_gui_handlers() # First send any data: self.bus.on("gui.value.set", self.on_gui_set_value) self.bus.on("gui.page.show", self.on_gui_show_page)
def __init__(self, bus): """ Args: bus: Mycroft messagebus """ self.bus = bus self.config = Configuration.get().get("Audio") self.service_lock = Lock() self.default = None self.service = [] self.current = None self.volume_is_low = False self.pulse = None self.pulse_quiet = None self.pulse_restore = None self.muted_sinks = [] # Setup control of pulse audio self.setup_pulseaudio_handlers(self.config.get('pulseaudio')) bus.once('open', self.load_services_callback)
def activate(self, state, token): version = VersionManager.get() platform = "unknown" platform_build = "" # load just the local configs to get platform info config = Configuration.get(cache=False, remote=False) if "enclosure" in config: platform = config.get("enclosure").get("platform", "unknown") platform_build = config.get("enclosure").get("platform_build", "") return self.request({ "method": "POST", "path": "/activate", "json": {"state": state, "token": token, "coreVersion": version.get("coreVersion"), "platform": platform, "platform_build": platform_build, "enclosureVersion": version.get("enclosureVersion")} })
def __init__(self, bus, schedule_file='schedule.json'): super().__init__() data_dir = expanduser(Configuration.get()['data_dir']) self.events = {} self.event_lock = Lock() self.bus = bus self.is_running = True self.schedule_file = join(data_dir, schedule_file) if self.schedule_file: self.load() self.bus.on('mycroft.scheduler.schedule_event', self.schedule_event_handler) self.bus.on('mycroft.scheduler.remove_event', self.remove_event_handler) self.bus.on('mycroft.scheduler.update_event', self.update_event_handler) self.bus.on('mycroft.scheduler.get_event', self.get_event_handler) self.start()
def get_tts(self, sentence, wav_file): config = Configuration.get().get("tts").get("ivonaComand") BIN = config.get("path", "") configParams = config.get("params", {}) configParamsList = list(configParams.keys()) arrayOfParams = [BIN, "-t", sentence] stringOfParams = BIN + ' -t "' + sentence + '" ' for key in configParamsList: arrayOfParams.append(key) value = configParams[key] stringOfParams += str(key) + " " + str(value) + ' ' arrayOfParams.append(str(value)) # subprocesParams = [BIN, "-t", sentence] + arrayOfParams arrayOfParams.append(wav_file) stringOfParams += wav_file print(stringOfParams) subprocess.call(arrayOfParams) # subprocess.run([BIN, "-t", sentence, wav_file]) return (wav_file, None) # No phonemes
def update_version(self): version = VersionManager.get() platform = "unknown" platform_build = "" # load just the local configs to get platform info config = Configuration.get([SYSTEM_CONFIG, USER_CONFIG], cache=False) if "enclosure" in config: platform = config.get("enclosure").get("platform", "unknown") platform_build = config.get("enclosure").get("platform_build", "") return self.request({ "method": "PATCH", "path": "/" + self.identity.uuid, "json": { "coreVersion": version.get("coreVersion"), "platform": platform, "platform_build": platform_build, "enclosureVersion": version.get("enclosureVersion") } })
def init(messagebus): """Start speech related handlers. Arguments: messagebus: Connection to the Mycroft messagebus """ global bus global tts global tts_hash global config bus = messagebus Configuration.set_config_update_handlers(bus) config = Configuration.get() bus.on('mycroft.stop', handle_stop) bus.on('mycroft.audio.speech.stop', handle_stop) bus.on('speak', handle_speak) tts = TTSFactory.create() tts.init(bus) tts_hash = hash(str(config.get('tts', '')))
def handle_speak(event): """ Handle "speak" message """ config = Configuration.get() Configuration.init(ws) global _last_stop_signal # Mild abuse of the signal system to allow other processes to detect # when TTS is happening. See mycroft.util.is_speaking() utterance = event.data['utterance'] if event.data.get('expect_response', False): # When expect_response is requested, the listener will be restarted # at the end of the next bit of spoken audio. ws.once('recognizer_loop:audio_output_end', _start_listener) # This is a bit of a hack for Picroft. The analog audio on a Pi blocks # for 30 seconds fairly often, so we don't want to break on periods # (decreasing the chance of encountering the block). But we will # keep the split for non-Picroft installs since it give user feedback # faster on longer phrases. # # TODO: Remove or make an option? This is really a hack, anyway, # so we likely will want to get rid of this when not running on Mimic if not config.get('enclosure', {}).get('platform') == "picroft": start = time.time() chunks = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', utterance) for chunk in chunks: try: mute_and_speak(chunk) except KeyboardInterrupt: raise except: LOG.error('Error in mute_and_speak', exc_info=True) if _last_stop_signal > start or check_for_signal('buttonPress'): break else: mute_and_speak(utterance)
def __init__(self, ready_hook=on_ready, error_hook=on_error, stopping_hook=on_stopping, alive_hook=on_alive, started_hook=on_started, watchdog=lambda: None, speech_config=None, daemonic=False): """ Creates a Speech service thread :param ready_hook: function callback when service is ready :param error_hook: function callback to handle uncaught exceptions :param stopping_hook: function callback when service is stopping :param alive_hook: function callback when service is alive :param started_hook: function callback when service is started :param speech_config: global core configuration override :param daemonic: if True, run this thread as a daemon """ Thread.__init__(self) self.setDaemon(daemonic) # Init messagebus and handlers self.bus = get_messagebus() from neon_utils.signal_utils import init_signal_handlers, init_signal_bus init_signal_bus(self.bus) init_signal_handlers() self.user_config = get_neon_user_config() if speech_config: LOG.warning("Passed configuration will not be handled in listener") self.config = speech_config or Configuration.get() self.lock = Lock() callbacks = StatusCallbackMap(on_ready=ready_hook, on_error=error_hook, on_stopping=stopping_hook, on_alive=alive_hook, on_started=started_hook) self.status = ProcessStatus('speech', self.bus, callbacks) self.status.set_started() self.status.bind(self.bus) self.loop = NeonRecognizerLoop(self.bus, watchdog) self.connect_loop_events() self.connect_bus_events() self.api_stt = STTFactory.create(config=self.config, results_event=None)
def _load_config(self): """ Load configuration parameters from configuration """ config = Configuration.get() self.config_core = config self._config_hash = hash(str(config)) self.lang = config.get('lang') self.config = config.get('listener') rate = self.config.get('sample_rate') device_index = self.config.get('device_index') self.microphone = MutableMicrophone(device_index, rate, mute=self.mute_calls > 0) # FIXME - channels are not been used self.microphone.CHANNELS = self.config.get('channels') self.wakeword_recognizer = self.create_wake_word_recognizer() # TODO - localization self.wakeup_recognizer = self.create_wakeup_recognizer() self.responsive_recognizer = ResponsiveRecognizer( self.wakeword_recognizer) self.state = RecognizerLoopState()
def send(message_to_send, data_to_send=None): """Send a single message over the websocket. Args: message_to_send (str): Message to send data_to_send (dict): data structure to go along with the message, defaults to empty dict. """ data_to_send = data_to_send or {} # Calculate the standard Mycroft messagebus websocket address config = Configuration.get([DEFAULT_CONFIG, SYSTEM_CONFIG, USER_CONFIG], cache=False) config = config.get("websocket") url = MessageBusClient.build_url(config.get("host"), config.get("port"), config.get("route"), config.get("ssl")) # Send the provided message/data ws = create_connection(url) packet = Message(message_to_send, data_to_send).serialize() ws.send(packet) ws.close()
def __init__(self, key_phrase="hey mycroft", config=None, lang="en-us"): super(PreciseHotword, self).__init__(key_phrase, config, lang) self.update_freq = 24 # in hours precise_config = Configuration.get()['precise'] self.dist_url = precise_config['dist_url'] self.models_url = precise_config['models_url'] self.exe_name = 'precise-stream' model_name, model_path = self.get_model_info() exe_file = self.find_download_exe() LOG.info('Found precise executable: ' + exe_file) self.update_model(model_name, model_path) args = [exe_file, model_path, '1024'] self.proc = Popen(args, stdin=PIPE, stdout=PIPE) self.has_found = False self.cooldown = 20 t = Thread(target=self.check_stdout) t.daemon = True t.start()
def default_timezone(): """ Get the default timezone Based on user location settings location.timezone.code or the default system value if no setting exists. Returns: (datetime.tzinfo): Definition of the default timezone """ try: # Obtain from user's configurated settings # location.timezone.code (e.g. "America/Chicago") # location.timezone.name (e.g. "Central Standard Time") # location.timezone.offset (e.g. -21600000) from mycroft.configuration import Configuration config = Configuration.get() code = config["location"]["timezone"]["code"] return gettz(code) except Exception: # Just go with system default timezone return tzlocal()
def __init__(self, bus): self.config = Configuration.get().get('context', {}) self.engine = IntentDeterminationEngine() # Dictionary for translating a skill id to a name self.skill_names = {} # Context related intializations self.context_keywords = self.config.get('keywords', []) self.context_max_frames = self.config.get('max_frames', 3) self.context_timeout = self.config.get('timeout', 2) self.context_greedy = self.config.get('greedy', False) self.context_manager = ContextManager(self.context_timeout) self.bus = bus self.bus.on('register_vocab', self.handle_register_vocab) self.bus.on('register_intent', self.handle_register_intent) self.bus.on('recognizer_loop:utterance', self.handle_utterance) self.bus.on('detach_intent', self.handle_detach_intent) self.bus.on('detach_skill', self.handle_detach_skill) # Context related handlers self.bus.on('add_context', self.handle_add_context) self.bus.on('remove_context', self.handle_remove_context) self.bus.on('clear_context', self.handle_clear_context) # Converse method self.bus.on('skill.converse.response', self.handle_converse_response) self.bus.on('skill.converse.error', self.handle_converse_error) self.bus.on('mycroft.speech.recognition.unknown', self.reset_converse) self.bus.on('mycroft.skills.loaded', self.update_skill_name_dict) # own Loomo Method self.bus.on('loomoMessage', self.handle_loomo_message) def add_active_skill_handler(message): self.add_active_skill(message.data['skill_id']) self.bus.on('active_skill_request', add_active_skill_handler) self.active_skills = [] # [skill_id , timestamp] self.converse_timeout = 5 # minutes to prune active_skills self.waiting_for_converse = False self.converse_result = False self.converse_skill_id = ""
def run(self): """ Load skills and update periodically from disk and internet """ self.remove_git_locks() self._connected_event.wait() has_loaded = False # check if skill updates are enabled update = Configuration.get()["skills"]["auto_update"] # Scan the file folder that contains Skills. If a Skill is updated, # unload the existing version from memory and reload from the disk. while not self._stop_event.is_set(): # Update skills once an hour if update is enabled if time.time() >= self.next_download and update: self.download_skills() # Look for recently changed skill(s) needing a reload # checking skills dir and getting all skills there skill_paths = glob(join(self.msm.skills_dir, '*/')) still_loading = False for skill_path in skill_paths: try: still_loading = ( self._load_or_reload_skill(skill_path) or still_loading ) except Exception as e: LOG.error('(Re)loading of {} failed ({})'.format( skill_path, repr(e))) if not has_loaded and not still_loading and len(skill_paths) > 0: has_loaded = True LOG.info("Skills all loaded!") self.bus.emit(Message('mycroft.skills.initialized')) self._unload_removed(skill_paths) # Pause briefly before beginning next scan time.sleep(2)
def __init__(self, bus, service): FallbackSkill.__init__(self) if not PadatiousService.instance: PadatiousService.instance = self self.padatious_config = Configuration.get()['padatious'] self.service = service intent_cache = expanduser(self.padatious_config['intent_cache']) try: from padatious import IntentContainer except ImportError: LOG.error('Padatious not installed. Please re-run dev_setup.sh') try: call([ 'notify-send', 'Padatious not installed', 'Please run build_host_setup and dev_setup again' ]) except OSError: pass return self.container = IntentContainer(intent_cache) self._bus = bus self.bus.on('padatious:register_intent', self.register_intent) self.bus.on('padatious:register_entity', self.register_entity) self.bus.on('detach_intent', self.handle_detach_intent) self.bus.on('detach_skill', self.handle_detach_skill) self.bus.on('mycroft.skills.initialized', self.train) self.register_fallback(self.handle_fallback, 5) self.finished_training_event = Event() self.finished_initial_train = False self.train_delay = self.padatious_config['train_delay'] self.train_time = get_time() + self.train_delay self.registered_intents = []
def __init__(self, bus): super(SkillManager, self).__init__() self._stop_event = Event() self._connected_event = Event() self.loaded_skills = {} self.bus = bus self.enclosure = EnclosureAPI(bus) # Schedule install/update of default skill self.msm = self.create_msm() self.thread_lock = self.get_lock() self.num_install_retries = 0 self.update_interval = Configuration.get()['skills']['update_interval'] self.update_interval = int(self.update_interval * 60 * MINUTES) self.dot_msm = join(self.msm.skills_dir, '.msm') # Update immediately if the .msm or installed skills file is missing # otherwise according to timestamp on .msm if exists(self.dot_msm) and exists(self.installed_skills_file): self.next_download = os.path.getmtime(self.dot_msm) + \ self.update_interval else: self.next_download = time.time() - 1 # Conversation management bus.on('skill.converse.request', self.handle_converse_request) # Update on initial connection bus.on('mycroft.internet.connected', lambda x: self._connected_event.set()) # Update upon request bus.on('skillmanager.update', self.schedule_now) bus.on('skillmanager.list', self.send_skill_list) bus.on('skillmanager.deactivate', self.deactivate_skill) bus.on('skillmanager.keep', self.deactivate_except) bus.on('skillmanager.activate', self.activate_skill)