def _do_net_check(self): # TODO: This should live in the derived Enclosure, e.g. Enclosure_Mark1 LOG.info("Checking internet connection") if not connected(): # and self.conn_monitor is None: if has_been_paired(): # TODO: Enclosure/localization self.speak("This unit is not connected to the Internet. " "Either plug in a network cable or hold the " "button on top for two seconds, then select " "wifi from the menu") else: # Begin the unit startup process, this is the first time it # is being run with factory defaults. # TODO: This logic should be in Enclosure_Mark1 # TODO: Enclosure/localization # Don't listen to mic during this out-of-box experience self.bus.emit(Message("owo.mic.mute")) # Setup handler to unmute mic at the end of on boarding # i.e. after pairing is complete self.bus.once('owo.paired', self._handle_pairing_complete) self.speak(owo.dialog.get('owo.intro')) wait_while_speaking() time.sleep(2) # a pause sounds better than just jumping in # Kick off wifi-setup automatically data = {'allow_timeout': False, 'lang': self.lang} self.bus.emit(Message('system.wifi.setup', data))
def get(phrase, lang=None, context=None): """ Looks up a resource file for the given phrase. If no file is found, the requested phrase is returned as the string. This will use the default language for translations. Args: phrase (str): resource phrase to retrieve/translate lang (str): the language to use context (dict): values to be inserted into the string Returns: str: a randomized and/or translated version of the phrase """ if not lang: from owo.configuration import Configuration lang = Configuration.get().get("lang") filename = "text/" + lang.lower() + "/" + phrase + ".dialog" template = resolve_resource_file(filename) if not template: LOG.debug("Resource file not found: " + filename) return phrase stache = MustacheDialogRenderer() stache.load_template_file("template", template) if not context: context = {} return stache.render("template", context)
def __init__(self, bus, service): FallbackSkill.__init__(self) if not PadatiousService.instance: PadatiousService.instance = self self.config = Configuration.get()['padatious'] self.service = service intent_cache = expanduser(self.config['intent_cache']) try: from padatious import IntentContainer except ImportError: LOG.error('Padatious not installed. Please re-run dev_setup.sh') try: call([ 'notify-send', 'Padatious not installed', 'Please run build_host_setup and dev_setup again' ]) except OSError: pass return self.container = IntentContainer(intent_cache) self.bus = bus self.bus.on('padatious:register_intent', self.register_intent) self.bus.on('padatious:register_entity', self.register_entity) self.bus.on('detach_intent', self.handle_detach_intent) self.bus.on('owo.skills.initialized', self.train) self.register_fallback(self.handle_fallback, 5) self.finished_training_event = Event() self.finished_initial_train = False self.train_delay = self.config['train_delay'] self.train_time = get_time() + self.train_delay
def enable_intent(self, intent_name): """ (Re)Enable a registered intent if it belongs to this skill Args: intent_name: name of the intent to be enabled Returns: bool: True if enabled, False if it wasn't registered """ names = [intent[0] for intent in self.registered_intents] intents = [intent[1] for intent in self.registered_intents] if intent_name in names: intent = intents[names.index(intent_name)] self.registered_intents.remove((intent_name, intent)) if ".intent" in intent_name: self.register_intent_file(intent_name, None) else: intent.name = intent_name self.register_intent(intent, None) LOG.debug('Enabling intent ' + intent_name) return True LOG.error('Could not enable ' + intent_name + ', it hasn\'t been ' 'registered.') return False
def handle_utterance(event): LOG.info("Utterance: " + str(event['utterances'])) context = {'client_name': 'OwO_listener'} if 'ident' in event: ident = event.pop('ident') context['ident'] = ident bus.emit(Message('recognizer_loop:utterance', event, context))
def schedule_repeating_event(self, handler, when, frequency, data=None, name=None): """ Schedule a repeating event. Args: handler: method to be called when (datetime): time for calling the handler or None to initially trigger <frequency> seconds from now frequency (float/int): time in seconds between calls data (dict, optional): data to send along to the handler name (str, optional): friendly name parameter """ # Do not schedule if this event is already scheduled by the skill if name not in self.scheduled_repeats: data = data or {} if not when: when = datetime.now() + timedelta(seconds=frequency) self._schedule_event(handler, when, data, name, frequency) else: LOG.debug('The event is already scheduled, cancel previous ' 'event if this scheduling should replace the last.')
def read(self, size, of_exc=False): """ Read data from stream. Arguments: size (int): Number of bytes to read of_exc (bool): flag determining if the audio producer thread should throw IOError at overflows. Returns: Data read from device """ frames = collections.deque() remaining = size while remaining > 0: to_read = min(self.wrapped_stream.get_read_available(), remaining) if to_read == 0: sleep(.01) continue result = self.wrapped_stream.read(to_read, exception_on_overflow=of_exc) frames.append(result) remaining -= to_read if self.muted: return self.muted_buffer input_latency = self.wrapped_stream.get_input_latency() if input_latency > 0.2: LOG.warning("High input latency: %f" % input_latency) audio = b"".join(list(frames)) return audio
def execute(self, sentence, ident=None): """ Convert sentence to speech, preprocessing out unsupported ssml The method caches results if possible using the hash of the sentence. Args: sentence: Sentence to be spoken ident: Id reference to current interaction """ sentence = self.validate_ssml(sentence) create_signal("isSpeaking") if self.phonetic_spelling: for word in re.findall(r"[\w']+", sentence): if word.lower() in self.spellings: sentence = sentence.replace(word, self.spellings[word.lower()]) key = str(hashlib.md5(sentence.encode('utf-8', 'ignore')).hexdigest()) wav_file = os.path.join(owo.util.get_cache_directory("tts"), key + '.' + self.audio_ext) if os.path.exists(wav_file): LOG.debug("TTS cache hit") phonemes = self.load_phonemes(key) else: wav_file, phonemes = self.get_tts(sentence, wav_file) if phonemes: self.save_phonemes(key, phonemes) vis = self.visime(phonemes) self.queue.put((self.audio_ext, wav_file, vis, ident))
def default_shutdown(self): """Parent function called internally to shut down everything. Shuts down known entities and calls skill specific shutdown method. """ try: self.shutdown() except Exception as e: LOG.error('Skill specific shutdown function encountered ' 'an error: {}'.format(repr(e))) # Store settings if exists(self._dir): self.settings.store() self.settings.stop_polling() # removing events self.cancel_all_repeating_events() for e, f in self.events: self.bus.remove(e, f) self.events = [] # Remove reference to wrappers self.bus.emit( Message("detach_skill", {"skill_id": str(self.skill_id) + ":"})) try: self.stop() except: LOG.error("Failed to stop skill: {}".format(self.name), exc_info=True)
def __init__(self): super(GoVivaceSTT, self).__init__() self.default_uri = "https://services.govivace.com:49149/telephony" if not self.lang.startswith("en") and not self.lang.startswith("es"): LOG.error("GoVivace STT only supports english and spanish") raise NotImplementedError
def _adapt_intent_match(self, utterances, lang): """ Run the Adapt engine to search for an matching intent Args: utterances (list): list of utterances lang (string): 4 letter ISO language code Returns: Intent structure, or None if no match was found. """ best_intent = None for utterance in utterances: try: # normalize() changes "it's a boy" to "it is boy", etc. best_intent = next( self.engine.determine_intent( normalize(utterance, lang), 100, include_tags=True, context_manager=self.context_manager)) # TODO - Should Adapt handle this? best_intent['utterance'] = utterance except StopIteration: # don't show error in log continue except Exception as e: LOG.exception(e) continue if best_intent and best_intent.get('confidence', 0.0) > 0.0: self.update_context(best_intent) # update active skills skill_id = best_intent['intent_type'].split(":")[0] self.add_active_skill(skill_id) return best_intent
def validate_connection(self): try: subprocess.call([BIN, '--version']) except: LOG.info("Failed to find mimic at: " + BIN) raise Exception( 'Mimic was not found. Run install-mimic.sh to install it.')
def _read_data(): """ Writes the dictionary of state data from the IPC directory. Returns: dict: loaded state information """ managerIPCDir = os.path.join(get_ipc_directory(), "managers") path = os.path.join(managerIPCDir, "disp_info") permission = "r" if os.path.isfile(path) else "w+" if permission == "w+" and os.path.isdir(managerIPCDir) is False: os.makedirs(managerIPCDir) data = {} try: with open(path, permission) as dispFile: if os.stat(str(dispFile.name)).st_size != 0: data = json.load(dispFile) except Exception as e: LOG.error(e) os.remove(path) _read_data() return data
def _poll_skill_settings(self): """ If identifier exists for this skill poll to backend to request settings and store it if it changes TODO: implement as websocket """ original = hash(str(self)) try: if not is_paired(): pass elif not self._complete_intialization: self.initialize_remote_settings() if not self._complete_intialization: return # unable to do remote sync else: self.update_remote() except Exception as e: LOG.exception('Failed to fetch skill settings: {}'.format(repr(e))) finally: # Call callback for updated settings if self.changed_callback and hash(str(self)) != original: self.changed_callback() if self._poll_timer: self._poll_timer.cancel() if not self._is_alive: return # continues to poll settings every minute self._poll_timer = Timer(60, self._poll_skill_settings) self._poll_timer.daemon = True self._poll_timer.start()
def simple_cli(): global bus global bSimple bSimple = True bus = WebsocketClient() # OwO messagebus connection event_thread = Thread(target=connect) event_thread.setDaemon(True) event_thread.start() bus.on('speak', handle_speak) try: while True: # Sleep for a while so all the output that results # from the previous command finishes before we print. time.sleep(1.5) print("Input (Ctrl+C to quit):") line = sys.stdin.readline() bus.emit( Message("recognizer_loop:utterance", {'utterances': [line.strip()]})) except KeyboardInterrupt as e: # User hit Ctrl+C to quit print("") except KeyboardInterrupt as e: LOG.exception(e) event_thread.exit() sys.exit()
def mute_and_speak(utterance, ident): """ Mute mic and start speaking the utterance using selected tts backend. Args: utterance: The sentence to be spoken ident: Ident tying the utterance to the source query """ global tts_hash # update TTS object if configuration has changed if tts_hash != hash(str(config.get('tts', ''))): global tts # Stop tts playback thread tts.playback.stop() tts.playback.join() # Create new tts instance tts = TTSFactory.create() tts.init(bus) tts_hash = hash(str(config.get('tts', ''))) LOG.info("Speak: " + utterance) try: tts.execute(utterance, ident) except Exception as e: LOG.error('TTS execution failed ({})'.format(repr(e)))
def ensure_directory_exists(directory, domain=None): """ Create a directory and give access rights to all Args: domain (str): The IPC domain. Basically a subdirectory to prevent overlapping signal filenames. Returns: str: a path to the directory """ if domain: directory = os.path.join(directory, domain) directory = os.path.normpath(directory) if not os.path.isdir(directory): try: save = os.umask(0) os.makedirs(directory, 0o777) # give everyone rights to r/w here except OSError: LOG.warning("Failed to create: " + directory) pass finally: os.umask(save) return directory
def supported_uris(self): """ Return supported uris of chromecast. """ LOG.info("Chromecasts found: " + str(self.cast)) if self.cast: return ['http', 'https'] else: return []
def load_module(module, hotword, config, lang, loop): LOG.info('Loading "{}" wake word via {}'.format(hotword, module)) instance = None complete = Event() def initialize(): nonlocal instance, complete try: clazz = HotWordFactory.CLASSES[module] instance = clazz(hotword, config, lang=lang) except TriggerReload: complete.set() sleep(0.5) loop.reload() except Exception: LOG.exception( 'Could not create hotword. Falling back to default.') instance = None complete.set() Thread(target=initialize, daemon=True).start() if not complete.wait(INIT_TIMEOUT): LOG.info('{} is taking too long to load'.format(module)) complete.set() return instance
def during_download(self, first_run=False): LOG.info('Still downloading executable...') if first_run: # TODO: Localize self._snd_msg('mouth.text=Updating listener...') if not self.download_complete: self.show_download_progress = Timer(30, self.during_download) self.show_download_progress.start()
def process(self, audio): SessionManager.touch() payload = { 'utterance': self.wakeword_recognizer.key_phrase, 'session': SessionManager.get().session_id, } self.emitter.emit("recognizer_loop:wakeword", payload) if self._audio_length(audio) < self.MIN_AUDIO_SIZE: LOG.warning("Audio too short to be processed") else: stopwatch = Stopwatch() with stopwatch: transcription = self.transcribe(audio) if transcription: ident = str(stopwatch.timestamp) + str(hash(transcription)) # STT succeeded, send the transcribed speech on for processing payload = { 'utterances': [transcription], 'lang': self.stt.lang, 'session': SessionManager.get().session_id, 'ident': ident } self.emitter.emit("recognizer_loop:utterance", payload) self.metrics.attr('utterances', [transcription]) else: ident = str(stopwatch.timestamp) # Report timing metrics report_timing(ident, 'stt', stopwatch, { 'transcription': transcription, 'stt': self.stt.__class__.__name__ })
def deactivate_skill(self, message): """ Deactivate a skill. """ try: skill = message.data['skill'] if skill in [basename(s) for s in self.loaded_skills]: self.__deactivate_skill(skill) except Exception as e: LOG.error('Couldn\'t deactivate skill, {}'.format(repr(e)))
def flush(self): while self.alive: try: cmd = self.commands.get() + '\n' self.serial.write(cmd.encode()) self.commands.task_done() except Exception as e: LOG.error("Writing error: {0}".format(e))
def __play(self, req): resp = req.result() if resp.status_code == 200: self.__save(resp.content) play_wav(self.filename).communicate() else: LOG.error('%s Http Error: %s for url: %s' % (resp.status_code, resp.reason, resp.url))
def is_speaking(): """Determine if Text to Speech is occurring Returns: bool: True while still speaking """ LOG.info("owo.utils.is_speaking() is depreciated, use " "owo.audio.is_speaking() instead.") return owo.audio.is_speaking()
def load_vocab_files(self, root_directory): vocab_dir = join(root_directory, 'vocab', self.lang) if exists(vocab_dir): load_vocabulary(vocab_dir, self.bus, self.skill_id) elif exists(join(root_directory, 'locale', self.lang)): load_vocabulary(join(root_directory, 'locale', self.lang), self.bus, self.skill_id) else: LOG.debug('No vocab loaded')
def get(): data_dir = expanduser(Configuration.get()['data_dir']) version_file = join(data_dir, 'version.json') if exists(version_file) and isfile(version_file): try: with open(version_file) as f: return json.load(f) except Exception: LOG.error("Failed to load version from '%s'" % version_file) return {"coreVersion": None, "enclosureVersion": None}
def wait_while_speaking(): """Pause as long as Text to Speech is still happening Pause while Text to Speech is still happening. This always pauses briefly to ensure that any preceeding request to speak has time to begin. """ LOG.info("owo.utils.wait_while_speaking() is depreciated, use " "owo.audio.wait_while_speaking() instead.") return owo.audio.wait_while_speaking()
def play(self): """ Start playback. """ self.cast.quit_app() track = self.tracklist[0] # Report start of playback to audioservice if self._track_start_callback: self._track_start_callback(track) LOG.debug('track: {}, type: {}'.format(track, guess_type(track))) mime = guess_type(track)[0] or 'audio/mp3' self.cast.play_media(track, mime)
def autodetect(config, bus): """ Autodetect chromecasts on the network and create backends for each """ casts = pychromecast.get_chromecasts(timeout=5, tries=2, retry_wait=2) ret = [] for c in casts: LOG.info(c.name + " found.") ret.append(ChromecastService(config, bus, c.name.lower(), c)) return ret