def main(): global ws # Create PID file, prevent multiple instancesof this service mycroft.lock.Lock('skills') # Connect this Skill management process to the websocket ws = WebsocketClient() Configuration.init(ws) ignore_logs = Configuration.get().get("ignore_logs") # Listen for messages and echo them for logging def _echo(message): try: _message = json.loads(message) if _message.get("type") in ignore_logs: return if _message.get("type") == "registration": # do not log tokens from registration messages _message["data"]["token"] = None message = json.dumps(_message) except BaseException: pass LOG('SKILLS').debug(message) ws.on('message', _echo) # Startup will be called after websocket is fully live ws.once('open', _starting_up) ws.run_forever()
def __init__(self, key_phrase="hey mycroft", config=None, lang="en-us"): super(PreciseHotword, self).__init__(key_phrase, config, lang) self.update_freq = 24 # in hours precise_config = Configuration.get()['precise'] self.dist_url = precise_config['dist_url'] self.models_url = precise_config['models_url'] self.exe_name = 'precise-stream' ww = Configuration.get()['listener']['wake_word'] model_name = ww.replace(' ', '-') + '.pb' model_folder = expanduser('~/.mycroft/precise') if not isdir(model_folder): mkdir(model_folder) model_path = join(model_folder, model_name) exe_file = self.find_download_exe() LOG.info('Found precise executable: ' + exe_file) self.update_model(model_name, model_path) args = [exe_file, model_path, '1024'] self.proc = Popen(args, stdin=PIPE, stdout=PIPE) self.has_found = False self.cooldown = 20 t = Thread(target=self.check_stdout) t.daemon = True t.start()
def main(): global ws global config ws = WebsocketClient() Configuration.init(ws) config = Configuration.get() speech.init(ws) # Setup control of pulse audio setup_pulseaudio_handlers(config.get('Audio').get('pulseaudio')) def echo(message): try: _message = json.loads(message) if 'mycroft.audio.service' not in _message.get('type'): return message = json.dumps(_message) except: pass LOG.debug(message) LOG.info("Staring Audio Services") ws.on('message', echo) ws.once('open', load_services_callback) try: ws.run_forever() except KeyboardInterrupt, e: LOG.exception(e) speech.shutdown() sys.exit()
def main(): global ws global loop global config lock = PIDLock("voice") ws = WebsocketClient() config = Configuration.get() Configuration.init(ws) loop = RecognizerLoop() loop.on('recognizer_loop:utterance', handle_utterance) loop.on('speak', handle_speak) loop.on('recognizer_loop:record_begin', handle_record_begin) loop.on('recognizer_loop:wakeword', handle_wakeword) loop.on('recognizer_loop:record_end', handle_record_end) loop.on('recognizer_loop:no_internet', handle_no_internet) ws.on('open', handle_open) ws.on('complete_intent_failure', handle_complete_intent_failure) ws.on('recognizer_loop:sleep', handle_sleep) ws.on('recognizer_loop:wake_up', handle_wake_up) ws.on('mycroft.mic.mute', handle_mic_mute) ws.on('mycroft.mic.unmute', handle_mic_unmute) ws.on("mycroft.paired", handle_paired) ws.on('recognizer_loop:audio_output_start', handle_audio_start) ws.on('recognizer_loop:audio_output_end', handle_audio_end) ws.on('mycroft.stop', handle_stop) event_thread = Thread(target=connect) event_thread.setDaemon(True) event_thread.start() try: loop.run() except KeyboardInterrupt, e: LOG.exception(e) sys.exit()
def get_skills_dir(): return ( expanduser(os.environ.get('SKILLS_DIR', '')) or expanduser(join( Configuration.get()['data_dir'], Configuration.get()['skills']['msm']['directory'] )) )
def __init__(self, key_phrase="hey mycroft", config=None, lang="en-us"): self.key_phrase = str(key_phrase).lower() # rough estimate 1 phoneme per 2 chars self.num_phonemes = len(key_phrase) / 2 + 1 if config is None: config = Configuration.get().get("hot_words", {}) config = config.get(self.key_phrase, {}) self.config = config self.listener_config = Configuration.get().get("listener", {}) self.lang = str(self.config.get("lang", lang)).lower()
def handle_speak(event): """ Handle "speak" message """ config = Configuration.get() Configuration.init(bus) global _last_stop_signal # Get conversation ID if event.context and 'ident' in event.context: ident = event.context['ident'] else: ident = 'unknown' start = time.time() # Time of speech request with lock: stopwatch = Stopwatch() stopwatch.start() utterance = event.data['utterance'] if event.data.get('expect_response', False): # When expect_response is requested, the listener will be restarted # at the end of the next bit of spoken audio. bus.once('recognizer_loop:audio_output_end', _start_listener) # This is a bit of a hack for Picroft. The analog audio on a Pi blocks # for 30 seconds fairly often, so we don't want to break on periods # (decreasing the chance of encountering the block). But we will # keep the split for non-Picroft installs since it give user feedback # faster on longer phrases. # # TODO: Remove or make an option? This is really a hack, anyway, # so we likely will want to get rid of this when not running on Mimic if (config.get('enclosure', {}).get('platform') != "picroft" and len(re.findall('<[^>]*>', utterance)) == 0): chunks = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\;|\?)\s', utterance) for chunk in chunks: # Check if somthing has aborted the speech if (_last_stop_signal > start or check_for_signal('buttonPress')): # Clear any newly queued speech tts.playback.clear() break try: mute_and_speak(chunk, ident) except KeyboardInterrupt: raise except Exception: LOG.error('Error in mute_and_speak', exc_info=True) else: mute_and_speak(utterance, ident) stopwatch.stop() report_timing(ident, 'speech', stopwatch, {'utterance': utterance, 'tts': tts.__class__.__name__})
def __init_client(self, params): config = Configuration.get().get("websocket") if not params.host: params.host = config.get('host') if not params.port: params.port = config.get('port') self.ws = WebsocketClient(host=params.host, port=params.port, ssl=params.use_ssl) # Connect configuration manager to message bus to receive updates Configuration.init(self.ws)
def __init__(self): self.ws = WebsocketClient() self.ws.on("open", self.on_ws_open) Configuration.init(self.ws) self.config = Configuration.get().get("enclosure") self.__init_serial() self.reader = EnclosureReader(self.serial, self.ws) self.writer = EnclosureWriter(self.serial, self.ws) # initiates the web sockets on display manager # NOTE: this is a temporary place to initiate display manager sockets initiate_display_manager_ws()
def __init__(self, args): params = self.__build_params(args) if params.config: Configuration.get([params.config]) if exists(params.lib) and isdir(params.lib): sys.path.append(params.lib) sys.path.append(params.dir) self.dir = params.dir self.enable_intent = params.enable_intent self.__init_client(params)
def get(phrase, lang=None, context=None): """ Looks up a resource file for the given phrase. If no file is found, the requested phrase is returned as the string. This will use the default language for translations. Args: phrase (str): resource phrase to retrieve/translate lang (str): the language to use context (dict): values to be inserted into the string Returns: str: a randomized and/or translated version of the phrase """ if not lang: from mycroft.configuration import Configuration lang = Configuration.get().get("lang") filename = "text/" + lang.lower() + "/" + phrase + ".dialog" template = resolve_resource_file(filename) if not template: LOG.debug("Resource file not found: " + filename) return phrase stache = MustacheDialogRenderer() stache.load_template_file("template", template) if not context: context = {} return stache.render("template", context)
def create(): """ Factory method to create a TTS engine based on configuration. The configuration file ``mycroft.conf`` contains a ``tts`` section with the name of a TTS module to be read by this method. "tts": { "module": <engine_name> } """ from mycroft.tts.remote_tts import RemoteTTS config = Configuration.get().get('tts', {}) module = config.get('module', 'mimic') lang = config.get(module).get('lang') voice = config.get(module).get('voice') clazz = TTSFactory.CLASSES.get(module) if issubclass(clazz, RemoteTTS): url = config.get(module).get('url') tts = clazz(lang, voice, url) else: tts = clazz(lang, voice) tts.validator.validate() return tts
def run(self): """ Load skills and update periodically from disk and internet """ self.remove_git_locks() self._connected_event.wait() has_loaded = False # check if skill updates are enabled update = Configuration.get()["skills"]["auto_update"] # Scan the file folder that contains Skills. If a Skill is updated, # unload the existing version from memory and reload from the disk. while not self._stop_event.is_set(): # Update skills once an hour if update is enabled if time.time() >= self.next_download and update: self.download_skills() # Look for recently changed skill(s) needing a reload # checking skills dir and getting all skills there skill_paths = glob(join(self.msm.skills_dir, '*/')) still_loading = False for skill_path in skill_paths: still_loading = ( self._load_or_reload_skill(skill_path) or still_loading ) if not has_loaded and not still_loading and len(skill_paths) > 0: has_loaded = True self.bus.emit(Message('mycroft.skills.initialized')) self._unload_removed(skill_paths) # Pause briefly before beginning next scan time.sleep(2)
def __init__(self, bus): super(SkillManager, self).__init__() self._stop_event = Event() self._connected_event = Event() self.loaded_skills = {} self.bus = bus self.enclosure = EnclosureAPI(bus) # Schedule install/update of default skill self.msm = self.create_msm() self.num_install_retries = 0 self.update_interval = Configuration.get()['skills']['update_interval'] self.update_interval = int(self.update_interval * 60 * MINUTES) self.dot_msm = join(self.msm.skills_dir, '.msm') if exists(self.dot_msm): self.next_download = os.path.getmtime(self.dot_msm) + \ self.update_interval else: self.next_download = time.time() - 1 # Conversation management bus.on('skill.converse.request', self.handle_converse_request) # Update on initial connection bus.on('mycroft.internet.connected', lambda x: self._connected_event.set()) # Update upon request bus.on('skillmanager.update', self.schedule_now) bus.on('skillmanager.list', self.send_skill_list) bus.on('skillmanager.deactivate', self.deactivate_skill) bus.on('skillmanager.keep', self.deactivate_except) bus.on('skillmanager.activate', self.activate_skill)
def _load_config(self): """ Load configuration parameters from configuration """ config = Configuration.get() self.config_core = config self._config_hash = hash(str(config)) self.lang = config.get('lang') self.config = config.get('listener') rate = self.config.get('sample_rate') device_index = self.config.get('device_index') device_name = self.config.get('device_name') if not device_index and device_name: device_index = find_input_device(device_name) LOG.debug('Using microphone (None = default): '+str(device_index)) self.microphone = MutableMicrophone(device_index, rate, mute=self.mute_calls > 0) # TODO:19.02 - channels are not been used, remove from mycroft.conf # and from code. self.microphone.CHANNELS = self.config.get('channels') self.wakeword_recognizer = self.create_wake_word_recognizer() # TODO - localization self.wakeup_recognizer = self.create_wakeup_recognizer() self.responsive_recognizer = ResponsiveRecognizer( self.wakeword_recognizer) self.state = RecognizerLoopState()
def __init__(self): config_core = Configuration.get() self.lang = str(self.init_language(config_core)) config_stt = config_core.get("stt", {}) self.config = config_stt.get(config_stt.get("module"), {}) self.credential = self.config.get("credential", {}) self.recognizer = Recognizer()
def __init__(self, wake_word_recognizer): self.config = Configuration.get() listener_config = self.config.get('listener') self.upload_url = listener_config['wake_word_upload']['url'] self.upload_disabled = listener_config['wake_word_upload']['disable'] self.wake_word_name = wake_word_recognizer.key_phrase self.overflow_exc = listener_config.get('overflow_exception', False) speech_recognition.Recognizer.__init__(self) self.wake_word_recognizer = wake_word_recognizer self.audio = pyaudio.PyAudio() self.multiplier = listener_config.get('multiplier') self.energy_ratio = listener_config.get('energy_ratio') # check the config for the flag to save wake words. self.save_utterances = listener_config.get('record_utterances', False) self.upload_lock = Lock() self.filenames_to_upload = [] self.mic_level_file = os.path.join(get_ipc_directory(), "mic_level") self._stop_signaled = False # The maximum audio in seconds to keep for transcribing a phrase # The wake word must fit in this time num_phonemes = wake_word_recognizer.num_phonemes len_phoneme = listener_config.get('phoneme_duration', 120) / 1000.0 self.TEST_WW_SEC = num_phonemes * len_phoneme self.SAVED_WW_SEC = max(3, self.TEST_WW_SEC) try: self.account_id = DeviceApi().get()['user']['uuid'] except (requests.RequestException, AttributeError): self.account_id = '0'
def main(): global bus reset_sigint_handler() # Create PID file, prevent multiple instancesof this service mycroft.lock.Lock('skills') # Connect this Skill management process to the Mycroft Messagebus bus = WebsocketClient() Configuration.init(bus) bus.on('message', create_echo_function('SKILLS')) # Startup will be called after the connection with the Messagebus is done bus.once('open', _starting_up) create_daemon(bus.run_forever) wait_for_exit_signal() shutdown()
def __init__(self, bus, schedule_file='schedule.json'): """ Create an event scheduler thread. Will send messages at a predetermined time to the registered targets. Args: bus: Mycroft messagebus (mycroft.messagebus) schedule_file: File to store pending events to on shutdown """ super(EventScheduler, self).__init__() data_dir = expanduser(Configuration.get()['data_dir']) self.events = {} self.event_lock = Lock() self.bus = bus self.isRunning = True self.schedule_file = join(data_dir, schedule_file) if self.schedule_file: self.load() self.bus.on('mycroft.scheduler.schedule_event', self.schedule_event_handler) self.bus.on('mycroft.scheduler.remove_event', self.remove_event_handler) self.bus.on('mycroft.scheduler.update_event', self.update_event_handler) self.bus.on('mycroft.scheduler.get_event', self.get_event_handler) self.start()
def __init__(self, emitter): self.config = Configuration.get().get('context', {}) self.engine = IntentDeterminationEngine() # Dictionary for translating a skill id to a name self.skill_names = {} # Context related intializations self.context_keywords = self.config.get('keywords', []) self.context_max_frames = self.config.get('max_frames', 3) self.context_timeout = self.config.get('timeout', 2) self.context_greedy = self.config.get('greedy', False) self.context_manager = ContextManager(self.context_timeout) self.emitter = emitter self.emitter.on('register_vocab', self.handle_register_vocab) self.emitter.on('register_intent', self.handle_register_intent) self.emitter.on('recognizer_loop:utterance', self.handle_utterance) self.emitter.on('detach_intent', self.handle_detach_intent) self.emitter.on('detach_skill', self.handle_detach_skill) # Context related handlers self.emitter.on('add_context', self.handle_add_context) self.emitter.on('remove_context', self.handle_remove_context) self.emitter.on('clear_context', self.handle_clear_context) # Converse method self.emitter.on('skill.converse.response', self.handle_converse_response) self.emitter.on('mycroft.speech.recognition.unknown', self.reset_converse) self.emitter.on('mycroft.skills.loaded', self.update_skill_name_dict) def add_active_skill_handler(message): self.add_active_skill(message.data['skill_id']) self.emitter.on('active_skill_request', add_active_skill_handler) self.active_skills = [] # [skill_id , timestamp] self.converse_timeout = 5 # minutes to prune active_skills
def main(): import tornado.options lock = Lock("service") tornado.options.parse_command_line() def reload_hook(): """ Hook to release lock when autoreload is triggered. """ lock.delete() autoreload.add_reload_hook(reload_hook) config = Configuration.get().get("websocket") host = config.get("host") port = config.get("port") route = config.get("route") validate_param(host, "websocket.host") validate_param(port, "websocket.port") validate_param(route, "websocket.route") routes = [ (route, WebsocketEventHandler) ] application = web.Application(routes, **settings) application.listen(port, host) ioloop.IOLoop.instance().start()
def mimic_fallback_tts(utterance, ident): # fallback if connection is lost config = Configuration.get() tts_config = config.get('tts', {}).get("mimic", {}) lang = config.get("lang", "en-us") tts = Mimic(lang, tts_config) tts.init(bus) tts.execute(utterance, ident)
def validate_connection(self): config = Configuration.get().get("tts", {}).get("watson", {}) user = config.get("user") or config.get("username") password = config.get("password") if user and password: return else: raise ValueError('user and/or password for IBM tts is not defined')
def main(): """ Main function. Run when file is invoked. """ reset_sigint_handler() check_for_signal("isSpeaking") bus = WebsocketClient() # Connect to the Mycroft Messagebus Configuration.init(bus) speech.init(bus) LOG.info("Starting Audio Services") bus.on('message', create_echo_function('AUDIO', ['mycroft.audio.service'])) audio = AudioService(bus) # Connect audio service instance to message bus create_daemon(bus.run_forever) wait_for_exit_signal() speech.shutdown() audio.shutdown()
def __init__(self): # Establish Enclosure's websocket connection to the messagebus self.bus = WebsocketClient() # Load full config Configuration.init(self.bus) config = Configuration.get() self.lang = config['lang'] self.config = config.get("enclosure") self.global_config = config # This datastore holds the data associated with the GUI provider. Data # is stored in Namespaces, so you can have: # self.datastore["namespace"]["name"] = value # Typically the namespace is a meaningless identifier, but there is a # special "SYSTEM" namespace. self.datastore = {} # self.loaded is a list, each element consists of a namespace named # tuple. # The namespace namedtuple has the properties "name" and "pages" # The name contains the namespace name as a string and pages is a # mutable list of loaded pages. # # [Namespace name, [List of loaded qml pages]] # [ # ["SKILL_NAME", ["page1.qml, "page2.qml", ... , "pageN.qml"] # [...] # ] self.loaded = [] # list of lists in order. self.explicit_move = True # Set to true to send reorder commands # Listen for new GUI clients to announce themselves on the main bus self.GUIs = {} # GUIs, either local or remote self.active_namespaces = [] self.bus.on("mycroft.gui.connected", self.on_gui_client_connected) self.register_gui_handlers() # First send any data: self.bus.on("gui.value.set", self.on_gui_set_value) self.bus.on("gui.page.show", self.on_gui_show_page) self.bus.on("gui.page.delete", self.on_gui_delete_page) self.bus.on("gui.clear.namespace", self.on_gui_delete_namespace) self.bus.on("gui.event.send", self.on_gui_send_event)
def main(): parser = argparse.ArgumentParser() parser.add_argument( '-f', '--filename', dest='filename', default="/tmp/test.wav", help="Filename for saved audio (Default: /tmp/test.wav)") parser.add_argument( '-d', '--duration', dest='duration', type=int, default=10, help="Duration of recording in seconds (Default: 10)") parser.add_argument( '-v', '--verbose', dest='verbose', action='store_true', default=False, help="Add extra output regarding the recording") parser.add_argument( '-l', '--list', dest='show_devices', action='store_true', default=False, help="List all availabile input devices") args = parser.parse_args() if args.show_devices: print(" Initializing... ") pa = pyaudio.PyAudio() print(" ====================== Audio Devices ======================") print(" Index Device Name") for device_index in range(pa.get_device_count()): dev = pa.get_device_info_by_index(device_index) if dev['maxInputChannels'] > 0: print(' {}: {}'.format(device_index, dev['name'])) print() config = Configuration.get() if "device_name" in config["listener"]: dev = config["listener"]["device_name"] elif "device_index" in config["listener"]: dev = "Device at index {}".format(config["listener"]["device_index"]) else: dev = "Default device" samplerate = config["listener"]["sample_rate"] play_cmd = config["play_wav_cmdline"].replace("%1", "WAV_FILE") print(" ========================== Info ===========================") print(" Input device: {} @ Sample rate: {} Hz".format(dev, samplerate)) print(" Playback commandline: {}".format(play_cmd)) print() print(" ===========================================================") print(" == STARTING TO RECORD, MAKE SOME NOISE! ==") print(" ===========================================================") if not args.verbose: with mute_output(): record(args.filename, args.duration) else: record(args.filename, args.duration) print(" ===========================================================") print(" == DONE RECORDING, PLAYING BACK... ==") print(" ===========================================================") status = play_wav(args.filename).wait() if status: print('An error occured while playing back audio ({})'.format(status))
def __init__(self, lang, config): super(BingTTS, self).__init__(lang, config, BingTTSValidator(self)) self.type = 'wav' from bingtts import Translator self.config = Configuration.get().get("tts", {}).get("bing", {}) api = self.config.get("api_key") self.bing = Translator(api) self.gender = self.config.get("gender", "Male") self.format = self.config.get("format", "riff-16khz-16bit-mono-pcm")
def __init__(self, lang, voice="en-US_AllisonVoice", url="https://stream.watsonplatform.net/text-to-speech/api"): super(WatsonTTS, self).__init__(lang, voice, url, '/v1/synthesize', WatsonTTSValidator(self)) self.type = "wav" self.config = Configuration.get().get("tts", {}).get("watson", {}) user = self.config.get("user") or self.config.get("username") password = self.config.get("password") self.auth = HTTPBasicAuth(user, password)
def create_hotword(cls, hotword="hey mycroft", config=None, lang="en-us", loop=None): if not config: config = Configuration.get()['hotwords'] config = config[hotword] module = config.get("module", "precise") return cls.load_module(module, hotword, config, lang, loop) or \ cls.load_module('pocketsphinx', hotword, config, lang, loop) or \ cls.CLASSES['pocketsphinx']()
def get(): data_dir = expanduser(Configuration.get()['data_dir']) version_file = join(data_dir, 'version.json') if exists(version_file) and isfile(version_file): try: with open(version_file) as f: return json.load(f) except Exception: LOG.error("Failed to load version from '%s'" % version_file) return {"coreVersion": None, "enclosureVersion": None}
from twisted.internet import reactor, ssl from autobahn.twisted.websocket import WebSocketClientFactory, \ WebSocketClientProtocol from twisted.internet.protocol import ReconnectingClientFactory import json from threading import Thread from mycroft.messagebus.client.ws import WebsocketClient from mycroft.messagebus.message import Message from mycroft.util.log import LOG as logger from mycroft.configuration import Configuration platform = "JarbasClient:" + Configuration.get().get("enclosure", {}).get( "platform", "linux") class JarbasClientProtocol(WebSocketClientProtocol): def onConnect(self, response): logger.info("Server connected: {0}".format(response.peer)) self.factory.emitter.emit( Message("server.connected", {"server_id": response.headers["server"]})) def onOpen(self): logger.info("WebSocket connection open. ") self.factory.client = self self.factory.emitter.emit(Message("server.websocket.open")) def onMessage(self, payload, isBinary):
def create_msm(): LOG.debug('instantiating msm via static method...') msm_config = build_msm_config(Configuration.get()) msm_instance = msm_creator(msm_config) return msm_instance
def __init__(self, skill): self.__session_data = {} # synced to GUI for use by this skill's pages self.page = None # the active GUI page (e.g. QML template) to show self.skill = skill self.on_gui_changed_callback = None self.config = Configuration.get()
from mycroft.configuration import Configuration from mycroft.messagebus.client.ws import WebsocketClient from mycroft.messagebus.message import Message from mycroft.skills.core import load_skill, create_skill_descriptor, \ MainModule, FallbackSkill from mycroft.skills.event_scheduler import EventScheduler from mycroft.skills.intent_service import IntentService from mycroft.skills.padatious_service import PadatiousService from mycroft.util import connected from mycroft.util.log import LOG ws = None event_scheduler = None skill_manager = None skills_config = Configuration.get().get("skills") BLACKLISTED_SKILLS = skills_config.get("blacklisted_skills", []) PRIORITY_SKILLS = skills_config.get("priority_skills", []) SKILLS_DIR = '/opt/mycroft/skills' installer_config = Configuration.get().get("SkillInstallerSkill") MSM_BIN = installer_config.get("path", join(MYCROFT_ROOT_PATH, 'msm', 'msm')) MINUTES = 60 # number of seconds in a minute (syntatic sugar) def connect(): global ws ws.run_forever()
import json from threading import Thread import base64 from autobahn.twisted.websocket import WebSocketClientFactory, \ WebSocketClientProtocol from mycroft.configuration import Configuration from mycroft.messagebus.client.ws import WebsocketClient from mycroft.messagebus.message import Message from mycroft.util.log import LOG as logger from twisted.internet import reactor, ssl from twisted.internet.protocol import ReconnectingClientFactory platform = "JarbasDrone:" + Configuration.get().get("enclosure", {}).get( "platform", "linux") class JarbasClientProtocol(WebSocketClientProtocol): def onConnect(self, response): logger.info("Server connected: {0}".format(response.peer)) self.factory.emitter.emit( Message("hive.mind.connected", {"server_id": response.headers["server"]})) self.factory.client = self self.factory.status = "connected" def onOpen(self): logger.info("WebSocket connection open. ") self.factory.emitter.emit(Message("hive.mind.websocket.open")) def onMessage(self, payload, isBinary): logger.info("status: " + self.factory.status)
class DuckduckgoSkill(CommonQuerySkill): config = Configuration.get() # Set the active lang to match the configured one lang = (config.get('lang', 'en-us')) # Confirmations vocabs with open((dirname(realpath(__file__)) + "/locale/" + lang + "/text.json"), encoding='utf8') as f: texts = json.load(f) # Only ones that make sense in # <question_word> <question_verb> <noun> question_words = texts.get('question_words') # Note the spaces question_verbs = texts.get('question_verbs') articles = texts.get('articles') start_words = texts.get('start_words') is_verb = texts.get('is_verb') in_word = texts.get('in_word') def __init__(self): super(DuckduckgoSkill, self).__init__() self.autotranslate = self.settings.get('autotranslate', True) self.log.debug("autotranslate: {}".format(self.autotranslate)) config = Configuration.get() self.lang = config.get('lang', 'en-us') @classmethod def format_related(cls, abstract, query): LOG.debug('Original abstract: ' + abstract) ans = abstract if ans[-2:] == '..': while ans[-1] == '.': ans = ans[:-1] phrases = ans.split(', ') first = ', '.join(phrases[:-1]) last = phrases[-1] if last.split()[0] in cls.start_words: ans = first last_word = ans.split(' ')[-1] while last_word in cls.start_words or last_word[-3:] == 'ing': ans = ans.replace(' ' + last_word, '') last_word = ans.split(' ')[-1] category = None match = re.search(r'\(([a-z ]+)\)', ans) if match: start, end = match.span(1) if start <= len(query) * 2: category = match.group(1) ans = ans.replace('(' + category + ')', '()') words = ans.split() for article in cls.articles: article = article.title() if article in words: index = words.index(article) if index <= 2 * len(query.split()): name, desc = words[:index], words[index:] desc[0] = desc[0].lower() ans = ' '.join(name) + cls.is_verb + ' '.join(desc) break if category: ans = ans.replace('()', cls.in_word + category) if ans[-1] not in '.?!': ans += '.' return ans def respond(self, query): if len(query) == 0: return 0.0 if self.autotranslate and self.lang[:2] != 'en': query_tr = translate(query, from_language=self.lang[:2], to_language='en') self.log.debug("translation: {}".format(query_tr)) r = ddg.query(query_tr) LOG.debug('Query: ' + str(query)) LOG.debug('Query_tr: ' + str(query_tr)) LOG.debug('Type: ' + r.type) if (r.answer is not None and r.answer.text and "HASH" not in r.answer.text): LOG.debug('Answer: ' + str(r.answer.text)) if self.autotranslate and self.lang[:2] != 'en': response = translate(r.answer.text, from_language='en', to_language=self.lang[:2]) else: response = r.answer.text return (query + self.is_verb + response + '.') elif len(r.abstract.text) > 0: LOG.debug('Abstract: ' + str(r.abstract.text)) sents = split_sentences(r.abstract.text) if self.autotranslate and self.lang[:2] != 'en': for sent in sents: sent = translate(sent, from_language='en', to_language=self.lang[:2]) return sents[0] elif len(r.related) > 0 and len(r.related[0].text) > 0: related = split_sentences(r.related[0].text)[0] answer = self.format_related(related, query) LOG.debug('Related: ' + str(answer)) if self.autotranslate and self.lang[:2] != 'en': answer = translate(answer, from_language='en', to_language=self.lang[:2]) return (answer) else: return None def CQS_match_query_phrase(self, query): answer = None for noun in self.question_words: for verb in self.question_verbs: for article in [i + ' ' for i in self.articles] + ['']: test = noun + verb + ' ' + article if query[:len(test)] == test: answer = self.respond(query[len(test):]) break if answer: return (query, CQSMatchLevel.CATEGORY, answer) else: return None def stop(self): pass
def main(): parser = argparse.ArgumentParser() parser.add_argument( '-f', '--filename', dest='filename', default="/tmp/test.wav", help="Filename for saved audio (Default: /tmp/test.wav)") parser.add_argument('-d', '--duration', dest='duration', type=int, default=10, help="Duration of recording in seconds (Default: 10)") parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', default=False, help="Add extra output regarding the recording") parser.add_argument('-l', '--list', dest='show_devices', action='store_true', default=False, help="List all availabile input devices") args = parser.parse_args() if args.show_devices: print(" Initializing... ") pa = pyaudio.PyAudio() print(" ====================== Audio Devices ======================") print(" Index Device Name") for device_index in range(pa.get_device_count()): dev = pa.get_device_info_by_index(device_index) if dev['maxInputChannels'] > 0: print(' {}: {}'.format(device_index, dev['name'])) print() config = Configuration.get() if "device_name" in config["listener"]: dev = config["listener"]["device_name"] elif "device_index" in config["listener"]: dev = "Device at index {}".format(config["listener"]["device_index"]) else: dev = "Default device" samplerate = config["listener"]["sample_rate"] play_cmd = config["play_wav_cmdline"].replace("%1", "WAV_FILE") print(" ========================== Info ===========================") print(" Input device: {} @ Sample rate: {} Hz".format(dev, samplerate)) print(" Playback commandline: {}".format(play_cmd)) print() print(" ===========================================================") print(" == STARTING TO RECORD, MAKE SOME NOISE! ==") print(" ===========================================================") if not args.verbose: with mute_output(): record(args.filename, args.duration) else: record(args.filename, args.duration) print(" ===========================================================") print(" == DONE RECORDING, PLAYING BACK... ==") print(" ===========================================================") status = play_wav(args.filename).wait() if status: print('An error occured while playing back audio ({})'.format(status))
import stat import subprocess from threading import Thread from time import time, sleep import os.path from os.path import exists, join, expanduser from mycroft import MYCROFT_ROOT_PATH from mycroft.api import DeviceApi from mycroft.configuration import Configuration from mycroft.tts import TTS, TTSValidator from mycroft.util.download import download from mycroft.util.log import LOG config = Configuration.get().get("tts").get("mimic") data_dir = expanduser(Configuration.get()['data_dir']) BIN = config.get("path", os.path.join(MYCROFT_ROOT_PATH, 'mimic', 'bin', 'mimic')) if not os.path.isfile(BIN): # Search for mimic on the path import distutils.spawn BIN = distutils.spawn.find_executable("mimic") SUBSCRIBER_VOICES = {'trinity': join(data_dir, 'voices/mimic_tn')} def download_subscriber_voices(selected_voice):
def load_mycroft_config(bus): """ Load the mycroft config and connect it to updates over the messagebus. """ Configuration.set_config_update_handlers(bus) return Configuration.get()
def handle_speak(event): """ Handle "speak" message """ config = Configuration.get() Configuration.init(bus) global _last_stop_signal # Get conversation ID if event.context and 'ident' in event.context: ident = event.context['ident'] else: ident = 'unknown' start = time.time() # Time of speech request with lock: stopwatch = Stopwatch() stopwatch.start() utterance = event.data['utterance'] if event.data.get('expect_response', False): # When expect_response is requested, the listener will be restarted # at the end of the next bit of spoken audio. bus.once('recognizer_loop:audio_output_end', _start_listener) # This is a bit of a hack for Picroft. The analog audio on a Pi blocks # for 30 seconds fairly often, so we don't want to break on periods # (decreasing the chance of encountering the block). But we will # keep the split for non-Picroft installs since it give user feedback # faster on longer phrases. # # TODO: Remove or make an option? This is really a hack, anyway, # so we likely will want to get rid of this when not running on Mimic if (config.get('enclosure', {}).get('platform') != "picroft" and len(re.findall('<[^>]*>', utterance)) == 0): # Remove any whitespace present after the period, # if a character (only alpha) ends with a period # ex: A. Lincoln -> A.Lincoln # so that we don't split at the period utterance = re.sub(r'\b([A-za-z][\.])(\s+)', r'\g<1>', utterance) chunks = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\;|\?)\s', utterance) for chunk in chunks: # Check if somthing has aborted the speech if (_last_stop_signal > start or check_for_signal('buttonPress')): # Clear any newly queued speech tts.playback.clear() break try: mute_and_speak(chunk, ident) except KeyboardInterrupt: raise except Exception: LOG.error('Error in mute_and_speak', exc_info=True) else: mute_and_speak(utterance, ident) stopwatch.stop() report_timing(ident, 'speech', stopwatch, { 'utterance': utterance, 'tts': tts.__class__.__name__ })
def check_connection(): """ Check for network connection. If not paired trigger pairing. Runs as a Timer every second until connection is detected. """ if connected(): enclosure = EnclosureAPI(bus) if is_paired(): # Skip the sync message when unpaired because the prompt to go to # home.mycrof.ai will be displayed by the pairing skill enclosure.mouth_text(dialog.get("message_synching.clock")) # Force a sync of the local clock with the internet config = Configuration.get() platform = config['enclosure'].get("platform", "unknown") if platform in ['mycroft_mark_1', 'picroft', 'mycroft_mark_2pi']: bus.wait_for_response(Message('system.ntp.sync'), 'system.ntp.sync.complete', 15) if not is_paired(): try_update_system(platform) # Check if the time skewed significantly. If so, reboot skew = abs((time.monotonic() - start_ticks) - (time.time() - start_clock)) if skew > 60 * 60: # Time moved by over an hour in the NTP sync. Force a reboot to # prevent weird things from occcurring due to the 'time warp'. # data = {'utterance': dialog.get("time.changed.reboot")} bus.emit(Message("speak", data)) wait_while_speaking() # provide visual indicators of the reboot enclosure.mouth_text(dialog.get("message_rebooting")) enclosure.eyes_color(70, 65, 69) # soft gray enclosure.eyes_spin() # give the system time to finish processing enclosure messages time.sleep(1.0) # reboot bus.emit(Message("system.reboot")) return else: bus.emit(Message("enclosure.mouth.reset")) time.sleep(0.5) enclosure.eyes_color(189, 183, 107) # dark khaki enclosure.mouth_text(dialog.get("message_loading.skills")) bus.emit(Message('mycroft.internet.connected')) # check for pairing, if not automatically start pairing try: if not is_paired(ignore_errors=False): payload = {'utterances': ["pair my device"], 'lang': "en-us"} bus.emit(Message("recognizer_loop:utterance", payload)) else: api = DeviceApi() api.update_version() except BackendDown: data = {'utterance': dialog.get("backend.down")} bus.emit(Message("speak", data)) bus.emit(Message("backend.down")) else: thread = Timer(1, check_connection) thread.daemon = True thread.start()
import re import ast from os.path import join, isdir, basename from pyee import EventEmitter from mycroft.messagebus.message import Message from mycroft.skills.core import create_skill_descriptor, load_skill, \ MycroftSkill, FallbackSkill from mycroft.skills.settings import SkillSettings from mycroft.configuration import Configuration MainModule = '__init__' DEFAULT_EVALUAITON_TIMEOUT = 30 # Set a configuration value to allow skills to check if they're in a test Configuration.get()['test_env'] = True # Easy way to show colors on terminals class clr: PINK = '\033[95m' BLUE = '\033[94m' CYAN = '\033[96m' GREEN = '\033[92m' YELLOW = '\033[93m' RED = '\033[91m' DKGRAY = '\033[90m' # Classes USER_UTT = '\033[96m' # cyan MYCROFT = '\033[33m' # bright yellow HEADER = '\033[94m' # blue
import sys import os, time, platform from datetime import datetime import json import dialog from mycroft.configuration import Configuration from mycroft.messagebus.client.ws import WebsocketClient from mycroft.messagebus.message import Message from mycroft.util.log import LOG from mycroft.util import create_daemon, wait_for_exit_signal __author__ = 'jcasoft' # ----- Variables for Websocket configWS = Configuration.get().get('websocket') host = configWS.get('host') port = configWS.get('port') route = configWS.get('route') ssl = configWS.get('ssl') # ----- Variables for Proximity configPr = Configuration.get().get('Proximity') proximity_enabled = configPr.get('proximity_enabled') proximity_data_array = configPr.get('proximity_data') # ----- Variables for Language lang = Configuration.get().get("lang") ws = None
def config(self): """Property representing the device configuration.""" return Configuration.get()
def __init__(self): super(DuckduckgoSkill, self).__init__() self.autotranslate = self.settings.get('autotranslate', True) self.log.debug("autotranslate: {}".format(self.autotranslate)) config = Configuration.get() self.lang = config.get('lang', 'en-us')
def get_skills_dir(): if len(sys.argv) > 1: return expanduser(sys.argv[-1]) return expanduser(join(Configuration.get()['data_dir'], Configuration.get()['skills']['msm']['directory']))
import os import base64 from twisted.internet import reactor, ssl from autobahn.twisted.websocket import WebSocketServerProtocol, \ WebSocketServerFactory from mycroft.messagebus.client.ws import WebsocketClient from mycroft.messagebus.message import Message from mycroft.util.log import LOG as logger from mycroft.configuration import Configuration from jarbas_hive_mind.utils import create_self_signed_cert from jarbas_hive_mind.database.client import ClientDatabase author = "jarbasAI" NAME = Configuration.get().get("server", {}).get("name", "JarbasMindv0.1") def root_dir(): """ Returns root directory for this project """ return os.path.dirname(os.path.realpath(__file__ + '/.')) users = ClientDatabase() # protocol class JarbasServerProtocol(WebSocketServerProtocol): def onConnect(self, request): logger.info("Client connecting: {0}".format(request.peer))
def create(): config = Configuration.get().get("stt", {}) module = config.get("module", "mycroft") clazz = STTFactory.CLASSES.get(module) return clazz()
def get_skills_dir(): return (expanduser(os.environ.get('SKILLS_DIR', '')) or expanduser( join(Configuration.get()['data_dir'], Configuration.get()['skills']['msm']['directory'])))
import os.path from os.path import exists, join, expanduser import stat import subprocess from threading import Thread from time import sleep from mycroft import MYCROFT_ROOT_PATH from mycroft.api import DeviceApi from mycroft.configuration import Configuration from mycroft.util.download import download from mycroft.util.log import LOG from mycroft.tts.tts import TTS, TTSValidator CONFIG = Configuration.get().get("tts").get("mimic") DATA_DIR = expanduser(Configuration.get()['data_dir']) BIN = CONFIG.get("path", os.path.join(MYCROFT_ROOT_PATH, 'mimic', 'bin', 'mimic')) if not os.path.isfile(BIN): # Search for mimic on the path import distutils.spawn BIN = distutils.spawn.find_executable("mimic") SUBSCRIBER_VOICES = {'trinity': join(DATA_DIR, 'voices/mimic_tn')} def download_subscriber_voices(selected_voice):
from mycroft.client.enclosure.api import EnclosureAPI from mycroft.skills.event_scheduler import EventScheduler from mycroft.skills.intent_service import IntentService from mycroft.skills.padatious_service import PadatiousService from mycroft.util import connected, wait_while_speaking from mycroft.util.log import LOG ws = None event_scheduler = None skill_manager = None # Remember "now" at startup. Used to detect clock changes. start_ticks = monotonic.monotonic() start_clock = time.time() DEBUG = Configuration.get().get("debug", False) skills_config = Configuration.get().get("skills") BLACKLISTED_SKILLS = skills_config.get("blacklisted_skills", []) PRIORITY_SKILLS = skills_config.get("priority_skills", []) SKILLS_DIR = '/opt/mycroft/skills' installer_config = Configuration.get().get("SkillInstallerSkill") MSM_BIN = installer_config.get("path", join(MYCROFT_ROOT_PATH, 'msm', 'msm')) MINUTES = 60 # number of seconds in a minute (syntatic sugar) def direct_update_needed(): """Determine need for an update Direct update is needed if the .msm file doesn't exist, if it's older than 12 hours (or as configured) or if any of the default skills are missing.
def get_filtered_alerts(self, entries, status="Actual", msgType="Alert,Update", scope="Public", sent=date.today(), expires=date.today(), max_entries=999): alerts = [] count = 0 for e in entries: count += 1 # some service feeds contain the complete history of alert entries, # cut them after x messages if count > self.maximum_entries: break # when entry contains CAP data do some quick filtering without # loading and parsing the actual CAP alert if self.is_cap_entry(e): self.log.info("CAP entry") if (e.cap_status not in status) or (e.cap_msgtype not in msgType): self.log.info(e.id) self.log.info("status: {} msgtype: {}".format( e.cap_status, e.cap_msgtype)) continue if not ((self._get_datetime(e.cap_sent).date() == sent) or (self._get_datetime(e.cap_expires).date() >= expires)): self.log.info(e.id) self.log.info("sent: {} expires: {}".format( e.cap_sent, e.cap_expires)) continue if "cap_severity" in e.keys( ) and e.cap_severity not in self.severity: continue if "cap_urgency" in e.keys( ) and e.cap_urgency not in self.urgency: continue if "cap_certainty" in e.keys( ) and e.cap_certainty not in self.certainty: continue caplink = self.get_cap_alert_link(e.links) self.log.debug(caplink) # fix for Spain, first link is download of compressed feed if ".tar.gz" in caplink: continue r = requests.get(caplink, headers=self.service['hdr_atom']) if r.status_code != 200: self.log.info("request status: {}".format(r.status_code)) continue a = xmltodict.parse(r.content.decode("utf-8"), namespaces={'cap': None}) if "alert" not in a.keys(): self.log.info("no alert") continue alert = a["alert"] info = self.get_alert_info_by_lang(alert, language=self.service['lang']) if not info: continue if not self.is_cap_entry(e): if (alert["status"] not in status) or (alert["msgType"] not in msgType): self.log.info("status: {} msgtype: {}".format( alert["status"], alert["msgType"])) continue if "expires" in info.keys(): if not ((self._get_datetime(alert["sent"]).date() == sent) or (self._get_datetime(info["expires"]).date() >= expires)): self.log.info("sent: {} expires: {}".format( alert["sent"], info["expires"])) continue else: if not ((self._get_datetime(alert["sent"]).date() == sent)): self.log.info("sent: {} ".format(alert["sent"])) continue if "severity" in info.keys( ) and info["severity"] not in self.severity: continue if "urgency" in info.keys( ) and info["urgency"] not in self.urgency: continue if "certainty" in info.keys( ) and info["certainty"] not in self.certainty: continue # location filtering if self.location_filter == "geoloc": lon = config.get()['location']['coordinate']['longitude'] lat = config.get()['location']['coordinate']['latitude'] if not self.in_geo_location(info, longitude=lon, latitude=lat): self.log.info("skipping for reason: geoloc") continue elif self.location_filter == "areadesc" and self.location_text: if not self.in_geo_location(info, areadesc=self.location_text): self.log.info("skipping for reason: areadesc - {}".format( self.location_text)) continue elif self.location_filter == "areadesc" and self.location_text: if not self.in_geo_location(info, geocodevalue=self.location_text): self.log.info("skipping for reason: geocode - {}".format( self.location_text)) continue # all filters criterias passed, add alert to result list self.log.info("add alert {}".format(alert["identifier"])) alerts.append(alert) if len(alerts) == max_entries: break return alerts
def check_connection(): """ Check for network connection. If not paired trigger pairing. Runs as a Timer every second until connection is detected. """ if connected(): enclosure = EnclosureAPI(ws) if is_paired(): # Skip the sync message when unpaired because the prompt to go to # home.mycrof.ai will be displayed by the pairing skill enclosure.mouth_text(mycroft.dialog.get("message_synching.clock")) # Force a sync of the local clock with the internet config = Configuration.get() platform = config['enclosure'].get("platform", "unknown") if platform in ['mycroft_mark_1', 'picroft']: ws.emit(Message("system.ntp.sync")) time.sleep(15) # TODO: Generate/listen for a message response... # Check if the time skewed significantly. If so, reboot skew = abs((monotonic.monotonic() - start_ticks) - (time.time() - start_clock)) if skew > 60 * 60: # Time moved by over an hour in the NTP sync. Force a reboot to # prevent weird things from occcurring due to the 'time warp'. # ws.emit( Message( "speak", {'utterance': mycroft.dialog.get("time.changed.reboot")})) wait_while_speaking() # provide visual indicators of the reboot enclosure.mouth_text(mycroft.dialog.get("message_rebooting")) enclosure.eyes_color(70, 65, 69) # soft gray enclosure.eyes_spin() # give the system time to finish processing enclosure messages time.sleep(1.0) # reboot ws.emit(Message("system.reboot")) return else: ws.emit(Message("enclosure.mouth.reset")) time.sleep(0.5) ws.emit(Message('mycroft.internet.connected')) # check for pairing, if not automatically start pairing if not is_paired(): # begin the process payload = {'utterances': ["pair my device"], 'lang': "en-us"} ws.emit(Message("recognizer_loop:utterance", payload)) else: from mycroft.api import DeviceApi api = DeviceApi() api.update_version() else: thread = Timer(1, check_connection) thread.daemon = True thread.start()
def __init__(self, name): self.name = name.lower() self.config = Configuration.get().get(self.name, {}) self.intent_samples = {} self.entity_samples = {} self.regex_samples = {}
def validate_lang(self): config = Configuration.get().get("tts", {}).get("yandex", {}) lang = config.get("lang") if lang in ["en-US", "ru-RU", "tr-TR"]: return True raise ValueError("Unsupported language for Yandex TTS")
def skill_is_blacklisted(skill): blacklist = Configuration.get()['skills']['blacklisted_skills'] return os.path.basename(skill.path) in blacklist or skill.name in blacklist
def create_echo_function(name, whitelist=None): """ Standard logging mechanism for Mycroft processes. This handles the setup of the basic logging for all Mycroft messagebus-based processes. Args: name (str): Reference name of the process whitelist (list, optional): List of "type" strings. If defined, only messages in this list will be logged. Returns: func: The echo function """ from mycroft.configuration import Configuration blacklist = Configuration.get().get("ignore_logs") # Make sure whitelisting doesn't remove the log level setting command if whitelist: whitelist.append('mycroft.debug.log') def echo(message): global _log_all_bus_messages try: msg = json.loads(message) msg_type = msg.get("type", "") # Whitelist match beginning of message # i.e 'mycroft.audio.service' will allow the message # 'mycroft.audio.service.play' for example if whitelist and not any([msg_type.startswith(e) for e in whitelist]): return if blacklist and msg_type in blacklist: return if msg_type == "mycroft.debug.log": # Respond to requests to adjust the logger settings lvl = msg["data"].get("level", "").upper() if lvl in ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]: LOG.level = lvl LOG(name).info("Changing log level to: {}".format(lvl)) try: logging.getLogger().setLevel(lvl) logging.getLogger('urllib3').setLevel(lvl) except Exception: pass # We don't really care about if this fails... else: LOG(name).info("Invalid level provided: {}".format(lvl)) # Allow enable/disable of messagebus traffic log_bus = msg["data"].get("bus", None) if log_bus is not None: LOG(name).info("Bus logging: {}".format(log_bus)) _log_all_bus_messages = log_bus elif msg_type == "registration": # do not log tokens from registration messages msg["data"]["token"] = None message = json.dumps(msg) except Exception as e: LOG.info("Error: {}".format(repr(e)), exc_info=True) if _log_all_bus_messages: # Listen for messages and echo them for logging LOG(name).info("BUS: {}".format(message)) return echo
from threading import Thread from mycroft.stt import STTFactory from mycroft.configuration import Configuration from mycroft.util.log import LOG from mycroft.messagebus.client import MessageBusClient from mycroft.messagebus.message import Message import speech_recognition as sr authors = ["forslund", "jarbas"] ws = None config = Configuration.get() def connect(): ws.run_forever() def read_wave_file(wave_file): # use the audio file as the audio source r = sr.Recognizer() with sr.AudioFile(wave_file) as source: audio = r.record(source) return audio class FileConsumer: def __init__(self, emitter=None): super(FileConsumer, self).__init__() self.stt = None