def test_deserialize(self): """This test's the deserialize method """ messages = [] # create the messages from the serialized strings above messages.append(Message.deserialize(self.serialized[0])) messages.append(Message.deserialize(self.serialized[1])) messages.append(Message.deserialize(self.serialized[2])) # check the created messages match the strings self.assertEqual(messages[0].serialize(), self.serialized[0]) self.assertEqual(messages[1].serialize(), self.serialized[1]) self.assertEqual(messages[2].serialize(), self.serialized[2])
def setUp(self): """This sets up some basic messages for testing. """ self.empty_message = Message("empty") self.message1 = Message("enclosure.reset") self.message2 = Message("enclosure.system.blink", {'target': 4}, {'target': 5}) self.message3 = Message("status", "OK") # serialized results of each of the messages self.serialized = ['{"data": {}, "type": "empty", "context": null}', '{"data": {}, "type": "enclosure.reset",\ "context": null}', '{"data": { "target": 4}, \ "type": "enclosure.system.blink", \ "context": {"target": 5}}', '{"data": "OK", "type": "status", \ "context": null}']
def on_message(self, message): LOG.debug(message) try: deserialized_message = Message.deserialize(message) except: return try: self.emitter.emit(deserialized_message.type, deserialized_message) except Exception, e: LOG.exception(e) traceback.print_exc(file=sys.stdout) pass
def on_message(self, message): # LOG.debug(message) try: deserialized_message = Message.deserialize(message) except: return try: self.emitter.emit(deserialized_message.type, deserialized_message) except Exception as e: LOG.exception(e) traceback.print_exc(file=sys.stdout) pass for client in client_connections: client.write_message(message)
def _handle_trigger(self, message: Message): """ Given a message, determines if this skill can handle the request. If it can, it will emit a message on the bus indicating that. Args: message: Message """ data = message.data request = IoTRequest.from_dict(data[IoTRequest.__name__]) can_handle, callback_data = self.can_handle(request) if can_handle: data.update({"skill_id": self.skill_id, "callback_data": callback_data}) self.bus.emit(message.response(data))
def eyes_narrow(self): """Make the eyes look narrow, like a squint""" self.ws.emit(Message("enclosure.eyes.narrow"))
def __emit_paired(self, paired): msg = Message('mycroft.paired', metadata={'paired': paired}) self.emitter.emit(msg)
def on_message(self, message): parsed_message = Message.deserialize(message) self.emitter.emit('message', message) self.emitter.emit(parsed_message.type, parsed_message)
def _load_or_reload_skill(self, skill_folder): """ Check if unloaded skill or changed skill needs reloading and perform loading if necessary. Returns True if the skill was loaded/reloaded """ if skill_folder not in self.loaded_skills: self.loaded_skills[skill_folder] = { "id": hash(os.path.join(SKILLS_DIR, skill_folder)) } skill = self.loaded_skills.get(skill_folder) skill["path"] = os.path.join(SKILLS_DIR, skill_folder) # check if folder is a skill (must have __init__.py) if not MainModule + ".py" in os.listdir(skill["path"]): return False # getting the newest modified date of skill modified = _get_last_modified_date(skill["path"]) last_mod = skill.get("last_modified", 0) # checking if skill is loaded and hasn't been modified on disk if skill.get("loaded") and modified <= last_mod: return False # Nothing to do! # check if skill was modified elif skill.get("instance") and modified > last_mod: # check if skill has been blocked from reloading if not skill["instance"].reload_skill: return False LOG.debug("Reloading Skill: " + skill_folder) # removing listeners and stopping threads skill["instance"].shutdown() if DEBUG: gc.collect() # Collect garbage to remove false references # Remove two local references that are known refs = sys.getrefcount(skill["instance"]) - 2 if refs > 0: LOG.warning("After shutdown of {} there are still " "{} references remaining. The skill " "won't be cleaned from memory.".format( skill['instance'].name, refs)) del skill["instance"] self.ws.emit( Message("mycroft.skills.shutdown", { "folder": skill_folder, "id": skill["id"] })) # (Re)load the skill from disk with self.__msm_lock: # Make sure msm isn't running skill["loaded"] = True desc = create_skill_descriptor(skill["path"]) skill["instance"] = load_skill(desc, self.ws, skill["id"], BLACKLISTED_SKILLS) skill["last_modified"] = modified if skill['instance'] is not None: self.ws.emit( Message( 'mycroft.skills.loaded', { 'folder': skill_folder, 'id': skill['id'], 'name': skill['instance'].name, 'modified': modified })) return True else: self.ws.emit( Message('mycroft.skills.loading_failure', { 'folder': skill_folder, 'id': skill['id'] })) return False
def on_message(self, ws, message): self.emitter.emit('message', message) parsed_message = Message.deserialize(message) self.pool.apply_async(self.emitter.emit, (parsed_message.type, parsed_message))
def disable_intent(self, intent_name): """Disable a registered intent""" LOG.debug('Disabling intent ' + intent_name) name = str(self.skill_id) + ':' + intent_name self.emitter.emit(Message("detach_intent", {"intent_name": name}))
def train_wake_word_intent(self, message): if message.data.get("number"): self.settings["wwnr"] = int(message.data.get("number")) if message.data.get("name"): name = message.data.get("name") else: name = self.get_response('witch.wakeword') if name is None: self.speak_dialog('no') return name = name.replace(' ', '-') if os.path.isdir(self.settings["file_path"] + name): if self.ask_yesno("model.available", data={"name": name}) == "yes": if os.path.isdir(self.settings["file_path"] + name): rmtree(self.settings["file_path"] + name) if os.path.isdir("/tmp/mycroft_wakeword/"): rmtree("/tmp/mycroft_wakeword/") if self.settings["wwnr"] >= 1: self.speak_dialog("word.wake", data={ "name": name, "number": self.settings["wwnr"] }) else: self.speak_dialog("none.wake.word") # Throw away any previous recording i = 1 self.halt = False source = "/tmp/mycroft_wakeword/" + name nopath = "/not-wake-word/" + self.lang[:2] + "-short/" if not os.path.isdir(source + nopath): os.makedirs(source + nopath) yespath = "/wake-word/" + self.lang[:2] + "-short/" if not os.path.isdir(source + yespath): os.makedirs(source + yespath) self.new_name = name wait_while_speaking() ### Record test files to tmp while i <= self.settings["wwnr"] + self.settings["nowwnr"]: while self.record_process: time.sleep(1) time.sleep(2) if self.halt is True: self.remove_event('recognizer_loop:record_end') self.remove_event('recognizer_loop:record_begin') self.remove_instance_handlers() if self.ask_yesno("calculate.anyway") == "yes": self.speak_dialog("start.calculating") self.calculating_intent(self.new_name) return else: rmtree(source) self.speak_dialog("no") wait_while_speaking() return elif self.halt is "break": self.remove_event('recognizer_loop:record_end') self.remove_event('recognizer_loop:record_begin') self.remove_instance_handlers() self.record_file_mover(yespath, nopath, source) if self.ask_yesno("calculate.anyway") == "yes": self.speak_dialog("start.calculating") self.calculating_intent(self.new_name) else: self.speak_dialog("break") wait_while_speaking() return elif self.halt is None: shutil.move( self.recordpath + self.recordfile, source + nopath + "not" + self.new_name + "-" + self.lang[:2] + "-" + str(uuid.uuid1()) + ".wav") if i <= self.settings["wwnr"] - 1: i = i - 1 self.log.info("step number " + str(i)) if i < self.settings["wwnr"]: #play_wav(self.piep) self.recordpath = source + yespath self.recordfile = str(self.new_name + "-" + self.lang[:2] + "-" + str(uuid.uuid1()) + ".wav") elif i == self.settings["wwnr"]: time.sleep(2) self.speak_dialog("none.wake.word") wait_while_speaking() #play_wav(self.piep) self.recordpath = source + nopath self.recordfile = str("not" + self.new_name + "-" + self.lang[:2] + "-" + str(uuid.uuid1()) + ".wav") else: #play_wav(self.piep) self.recordpath = source + nopath self.recordfile = str("not" + self.new_name + "-" + self.lang[:2] + "-" + str(uuid.uuid1()) + ".wav") #time.sleep(2) self.log.info(self.recordfile) wait_while_speaking() i = i + 1 #play_wav(self.piep).wait() if i <= 2: self.add_event('recognizer_loop:record_end', self.rec_stop) self.add_event('recognizer_loop:record_begin', self.loop) self.register_fallback(self.handle_validator, 1) self.bus.emit(Message('mycroft.mic.listen')) self.start_recording() #self.bus.emit(Message('mycroft.volume.unmute', # {"speak_message": False})) else: self.log.info("end records") self.remove_event('recognizer_loop:record_end') self.remove_event('recognizer_loop:record_begin') self.remove_instance_handlers() #### Save wakewords in data folder if self.ask_yesno("is.all.ok") == "no": rmtree(source) return wait_while_speaking() self.record_file_mover(yespath, nopath, source) self.calculating_intent(self.new_name) self.speak_dialog("start.calculating")
def stop(self): self.enclosure.bus.emit(Message("metadata", {"type": "stop"})) pass
def get_response(self, dialog='', data=None, validator=None, on_fail=None, num_retries=-1): """Get response from user. If a dialog is supplied it is spoken, followed immediately by listening for a user response. If the dialog is omitted listening is started directly. The response can optionally be validated before returning. Example: color = self.get_response('ask.favorite.color') Arguments: dialog (str): Optional dialog to speak to the user data (dict): Data used to render the dialog validator (any): Function with following signature def validator(utterance): return utterance != "red" on_fail (any): Dialog or function returning literal string to speak on invalid input. For example: def on_fail(utterance): return "nobody likes the color red, pick another" num_retries (int): Times to ask user for input, -1 for infinite NOTE: User can not respond and timeout or say "cancel" to stop Returns: str: User's reply or None if timed out or canceled """ data = data or {} def on_fail_default(utterance): fail_data = data.copy() fail_data['utterance'] = utterance if on_fail: return self.dialog_renderer.render(on_fail, fail_data) else: return self.dialog_renderer.render(dialog, data) def is_cancel(utterance): return self.voc_match(utterance, 'cancel') def validator_default(utterance): # accept anything except 'cancel' return not is_cancel(utterance) on_fail_fn = on_fail if callable(on_fail) else on_fail_default validator = validator or validator_default # Speak query and wait for user response utterance = self.dialog_renderer.render(dialog, data) if utterance: self.speak(utterance, expect_response=True, wait=True) else: self.bus.emit(Message('mycroft.mic.listen')) return self._wait_response(is_cancel, validator, on_fail_fn, num_retries)
def __stop_timeout(): # The self.stop() call took more than 100ms, assume it handled Stop self.bus.emit( Message('mycroft.stop.handled', {'skill_id': str(self.skill_id) + ':'}))
def create_intent_envelope(intent): return Message(None, metadata=intent.__dict__, context={})
def speak(self, utterance): self.emitter.emit(Message("speak", metadata={'utterance': utterance}))
def detach(self): for name in self.registered_intents: self.emitter.emit( Message("detach_intent", metadata={"intent_name": name}))
def run(self, loader): """ Run a test for a skill. The skill, test_case_file and emitter is already set up in the __init__ method Args: loader: A list of loaded skills """ s = [s for s in loader.skills if s and s.root_dir == self.skill] if s: s = s[0] else: raise Exception('Skill couldn\'t be loaded') print("") print(color.HEADER + "=" * 20 + " RUNNING TEST " + "=" * 20 + color.RESET) print('Test file: ', self.test_case_file) with open(self.test_case_file, 'r') as f: test_case = json.load(f) print('Test:', json.dumps(test_case, indent=4, sort_keys=False)) original_settings = None if 'settings' in test_case: original_settings = s.settings s.settings = TestSettings('/tmp/', self.test_case_file) for key in test_case['settings']: s.settings[key] = test_case['settings'][key] print(color.YELLOW, 'will run test with custom settings:', '\n{}'.format(s.settings), color.RESET) if 'responses' in test_case: def get_response(dialog='', data=None, announcement='', validator=None, on_fail=None, num_retries=-1): data = data or {} utt = announcement or s.dialog_renderer.render(dialog, data) print(color.MYCROFT + ">> " + utt + color.RESET) s.speak(utt) response = test_case['responses'].pop(0) print("SENDING RESPONSE:", color.USER_UTT + response + color.RESET) return response s.get_response = get_response # If we keep track of test status for the entire skill, then # get all intents from the skill, and mark current intent # tested if self.test_status: self.test_status.append_intent(s) if 'intent_type' in test_case: self.test_status.set_tested(test_case['intent_type']) evaluation_rule = EvaluationRule(test_case, s) # Set up queue for emitted events. Because # the evaluation method expects events to be received in convoy, # and be handled one by one. We cant make assumptions about threading # in the core or the skill q = Queue() s.bus.q = q # Set up context before calling intent # This option makes it possible to better isolate (reduce dependance) # between test_cases cxt = test_case.get('remove_context', None) if cxt: if isinstance(cxt, list): for x in cxt: MycroftSkill.remove_context(s, x) else: MycroftSkill.remove_context(s, cxt) cxt = test_case.get('set_context', None) if cxt: for key, value in cxt.items(): MycroftSkill.set_context(s, key, value) # Emit an utterance, just like the STT engine does. This sends the # provided text to the skill engine for intent matching and it then # invokes the skill. utt = test_case.get('utterance', None) print("UTTERANCE:", color.USER_UTT + utt + color.RESET) self.emitter.emit( 'recognizer_loop:utterance', Message('recognizer_loop:utterance', {'utterances': [utt]})) # Wait up to X seconds for the test_case to complete timeout = time.time() + int(test_case.get('evaluation_timeout')) \ if test_case.get('evaluation_timeout', None) and \ isinstance(test_case['evaluation_timeout'], int) \ else time.time() + DEFAULT_EVALUAITON_TIMEOUT while not evaluation_rule.all_succeeded(): try: event = q.get(timeout=1) if ':' in event.type: event.data['__type__'] = event.type.split(':')[1] else: event.data['__type__'] = event.type evaluation_rule.evaluate(event.data) if event.type == 'mycroft.skill.handler.complete': break except Empty: pass if time.time() > timeout: break # Stop emmiter from sending on queue s.bus.q = None # remove the skill which is not responding self.emitter.remove_all_listeners('speak') self.emitter.remove_all_listeners('mycroft.skill.handler.complete') # Report test result if failed if not evaluation_rule.all_succeeded(): self.failure_msg = str(evaluation_rule.get_failure()) print(color.FAIL + "Evaluation failed" + color.RESET) print(color.FAIL + "Failure:", self.failure_msg + color.RESET) return False if original_settings: s.settings = original_settings return True
def load_skills(self): self.skills = load_skills(self.emitter, self.skills_root) self.skills = [s for s in self.skills if s] self.ps.train(Message('', data=dict(single_thread=True))) return self.emitter.emitter # kick out the underlying emitter
def handle_cmd(cmd): global show_meter global screen_mode global log_filters global cy_chat_area global find_str global show_last_key if "show" in cmd and "log" in cmd: pass elif "help" in cmd: show_help() elif "exit" in cmd or "quit" in cmd: return 1 elif "clear" in cmd and "log" in cmd: clear_log() elif "keycode" in cmd: # debugging keyboard if "hide" in cmd or "off" in cmd: show_last_key = False elif "show" in cmd or "on" in cmd: show_last_key = True elif "meter" in cmd: # microphone level meter if "hide" in cmd or "off" in cmd: show_meter = False elif "show" in cmd or "on" in cmd: show_meter = True elif "find" in cmd: find_str = _get_cmd_param(cmd) rebuild_filtered_log() elif "filter" in cmd: if "show" in cmd or "list" in cmd: # display active filters add_log_message("Filters: " + str(log_filters)) return if "reset" in cmd or "clear" in cmd: log_filters = list(default_log_filters) else: # extract last word(s) param = _get_cmd_param(cmd) if "remove" in cmd and param in log_filters: log_filters.remove(param) else: log_filters.append(param) rebuild_filtered_log() add_log_message("Filters: " + str(log_filters)) elif "history" in cmd: # extract last word(s) lines = int(_get_cmd_param(cmd)) if lines < 1: lines = 1 max_chat_area = curses.LINES - 7 if lines > max_chat_area: lines = max_chat_area cy_chat_area = lines elif "skills" in cmd: # List loaded skill message = ws.wait_for_response(Message('skillmanager.list'), reply_type='mycroft.skills.list') if message: show_skills(message.data) scr.get_wch() # blocks screen_mode = 0 # back to main screen set_screen_dirty() elif "deactivate" in cmd: skills = cmd.split()[1:] if len(skills) > 0: for s in skills: ws.emit(Message("skillmanager.deactivate", data={'skill': s})) else: add_log_message('Usage :deactivate SKILL [SKILL2] [...]') elif "keep" in cmd: s = cmd.split() if len(s) > 1: ws.emit(Message("skillmanager.keep", data={'skill': s[1]})) else: add_log_message('Usage :keep SKILL') elif "activate" in cmd: skills = cmd.split()[1:] if len(skills) > 0: for s in skills: ws.emit(Message("skillmanager.activate", data={'skill': s})) else: add_log_message('Usage :activate SKILL [SKILL2] [...]') # TODO: More commands return 0 # do nothing upon return
def __api_error(self, e): if e.response.status_code == 401: self.emitter.emit(Message("mycroft.not.paired"))
def detach(self): for (name, intent) in self.registered_intents: name = str(self.skill_id) + ':' + name self.emitter.emit(Message("detach_intent", {"intent_name": name}))
def collect(self): """Trigger collection and then show the resting screen.""" self.bus.emit(Message("mycroft.mark2.collect_idle")) time.sleep(1) self.show()
def register_regex(self, regex_str): re.compile(regex_str) # validate regex self.emitter.emit(Message('register_vocab', {'regex': regex_str}))
def handle_record_begin(): logger.info("Begin Recording...") client.emit(Message('recognizer_loop:record_begin'))
def check_connection(): """ Check for network connection. If not paired trigger pairing. Runs as a Timer every second until connection is detected. """ if connected(): enclosure = EnclosureAPI(ws) if is_paired(): # Skip the sync message when unpaired because the prompt to go to # home.mycrof.ai will be displayed by the pairing skill enclosure.mouth_text(mycroft.dialog.get("message_synching.clock")) # Force a sync of the local clock with the internet config = Configuration.get() platform = config['enclosure'].get("platform", "unknown") if platform in ['mycroft_mark_1', 'picroft']: ws.emit(Message("system.ntp.sync")) time.sleep(15) # TODO: Generate/listen for a message response... # Check if the time skewed significantly. If so, reboot skew = abs((monotonic.monotonic() - start_ticks) - (time.time() - start_clock)) if skew > 60 * 60: # Time moved by over an hour in the NTP sync. Force a reboot to # prevent weird things from occcurring due to the 'time warp'. # ws.emit( Message( "speak", {'utterance': mycroft.dialog.get("time.changed.reboot")})) wait_while_speaking() # provide visual indicators of the reboot enclosure.mouth_text(mycroft.dialog.get("message_rebooting")) enclosure.eyes_color(70, 65, 69) # soft gray enclosure.eyes_spin() # give the system time to finish processing enclosure messages time.sleep(1.0) # reboot ws.emit(Message("system.reboot")) return else: ws.emit(Message("enclosure.mouth.reset")) time.sleep(0.5) ws.emit(Message('mycroft.internet.connected')) # check for pairing, if not automatically start pairing if not is_paired(): # begin the process payload = {'utterances': ["pair my device"], 'lang': "en-us"} ws.emit(Message("recognizer_loop:utterance", payload)) else: from mycroft.api import DeviceApi api = DeviceApi() api.update_version() else: thread = Timer(1, check_connection) thread.daemon = True thread.start()
def handle_record_end(): logger.info("End Recording...") client.emit(Message('recognizer_loop:record_end'))
def _notify_backend_down(self): """Notify user of inability to communicate with the backend.""" self._speak_dialog(dialog_id="backend.down") self.bus.emit(Message("backend.down"))
def handle_wakeword(event): logger.info("Wakeword Detected: " + event['utterance']) client.emit(Message('recognizer_loop:wakeword', event))
def on_message(self, ws, message): self.emitter.emit('message', message) parsed_message = Message.deserialize(message) self.pool.apply_async( self.emitter.emit, (parsed_message.type, parsed_message))
def eyes_blink(self, side): """Make the eyes blink Args: side (str): 'r', 'l', or 'b' for 'right', 'left' or 'both' """ self.ws.emit(Message("enclosure.eyes.blink", {'side': side}))
class TestMessageMethods(unittest.TestCase): """This tests the Message class functions """ def setUp(self): """This sets up some basic messages for testing. """ self.empty_message = Message("empty") self.message1 = Message("enclosure.reset") self.message2 = Message("enclosure.system.blink", {'target': 4}, {'target': 5}) self.message3 = Message("status", "OK") # serialized results of each of the messages self.serialized = ['{"data": {}, "type": "empty", "context": null}', '{"data": {}, "type": "enclosure.reset",\ "context": null}', '{"data": { "target": 4}, \ "type": "enclosure.system.blink", \ "context": {"target": 5}}', '{"data": "OK", "type": "status", \ "context": null}'] def test_serialize(self): """This test the serialize method """ self.assertEqual(self.empty_message.serialize(), self.serialized[0]) self.assertEqual(self.message1.serialize(), self.serialized[1]) self.assertEqual(self.message2.serialize(), self.serialized[2]) self.assertEqual(self.message3.serialize(), self.serialized[3]) def test_deserialize(self): """This test's the deserialize method """ messages = [] # create the messages from the serialized strings above messages.append(Message.deserialize(self.serialized[0])) messages.append(Message.deserialize(self.serialized[1])) messages.append(Message.deserialize(self.serialized[2])) # check the created messages match the strings self.assertEqual(messages[0].serialize(), self.serialized[0]) self.assertEqual(messages[1].serialize(), self.serialized[1]) self.assertEqual(messages[2].serialize(), self.serialized[2]) def test_reply(self): """This tests the reply method This is probably incomplete as the use of the reply message escapes me. """ message = self.empty_message.reply("status", "OK") self.assertEqual(message.serialize(), '{"data": "OK", "type": "status", "context": {}}') message = self.message1.reply("status", "OK") self.assertEqual(message.serialize(), '{"data": "OK", "type": "status", "context": {}}') message = self.message2.reply("status", "OK") def test_publish(self): """This is for testing the publish method TODO: Needs to be completed """ pass
def gui_main(stdscr): global scr global ws global line global log_line_lr_scroll global longest_visible_line global find_str global last_key global history global screen_lock scr = stdscr init_screen() scr.keypad(1) scr.notimeout(1) ws = WebsocketClient() ws.on('speak', handle_speak) ws.on('message', handle_message) ws.on('recognizer_loop:utterance', handle_utterance) event_thread = Thread(target=connect) event_thread.setDaemon(True) event_thread.start() gui_thread = ScreenDrawThread() gui_thread.setDaemon(True) # this thread won't prevent prog from exiting gui_thread.start() hist_idx = -1 # index, from the bottom try: while True: set_screen_dirty() try: c = scr.get_wch() # unicode char or int for special keys except KeyboardInterrupt: # User hit Ctrl+C to quit if find_str: # End the find session find_str = None rebuild_filtered_log() else: break except curses.error: # This happens in odd cases, such as when you Ctrl+Z suspend # the CLI and then resume. Curses fails on get_wch(). continue if isinstance(c, int): code = c else: code = ord(c) # Convert VT100 ESC codes generated by some terminals if code == 27: # NOTE: Not sure exactly why, but the screen can get corrupted # if we draw to the screen while doing a scr.getch(). So # lock screen updates until the VT100 sequence has been # completely read. with screen_lock: scr.timeout(0) c1 = -1 start = time.time() while c1 == -1: c1 = scr.getch() if time.time() - start > 1: break # 1 second timeout waiting for ESC code c2 = -1 while c2 == -1: c2 = scr.getch() if time.time() - start > 1: # 1 second timeout break # 1 second timeout waiting for ESC code scr.timeout(-1) if c1 == 79 and c2 == 120: c = curses.KEY_UP elif c1 == 79 and c2 == 116: c = curses.KEY_LEFT elif c1 == 79 and c2 == 114: c = curses.KEY_DOWN elif c1 == 79 and c2 == 118: c = curses.KEY_RIGHT elif c1 == 79 and c2 == 121: c = curses.KEY_PPAGE # aka PgUp elif c1 == 79 and c2 == 115: c = curses.KEY_NPAGE # aka PgDn elif c1 == 79 and c2 == 119: c = curses.KEY_HOME elif c1 == 79 and c2 == 113: c = curses.KEY_END else: c = c1 if c1 != -1: last_key = str(c) + ",ESC+" + str(c1) + "+" + str(c2) code = c else: last_key = "ESC" else: if code < 33: last_key = str(code) else: last_key = str(code) if code == 27: # Hitting ESC twice clears the entry line hist_idx = -1 line = "" elif c == curses.KEY_RESIZE: # Generated by Curses when window/screen has been resized y, x = scr.getmaxyx() curses.resizeterm(y, x) # resizeterm() causes another curses.KEY_RESIZE, so # we need to capture that to prevent a loop of resizes c = scr.get_wch() elif screen_mode == 1: # in Help mode, any key goes to next page show_next_help() continue elif c == '\n' or code == 10 or code == 13 or code == 343: # ENTER sends the typed line to be processed by Mycroft if line == "": continue if line[:1] == ":": # Lines typed like ":help" are 'commands' if handle_cmd(line[1:]) == 1: break else: # Treat this as an utterance ws.emit( Message("recognizer_loop:utterance", { 'utterances': [line.strip()], 'lang': 'en-us' })) hist_idx = -1 line = "" elif code == 16 or code == 545: # Ctrl+P or Ctrl+Left (Previous) # Move up the history stack hist_idx = clamp(hist_idx + 1, -1, len(history) - 1) if hist_idx >= 0: line = history[len(history) - hist_idx - 1] else: line = "" elif code == 14 or code == 560: # Ctrl+N or Ctrl+Right (Next) # Move down the history stack hist_idx = clamp(hist_idx - 1, -1, len(history) - 1) if hist_idx >= 0: line = history[len(history) - hist_idx - 1] else: line = "" elif c == curses.KEY_LEFT: # scroll long log lines left log_line_lr_scroll += curses.COLS // 4 elif c == curses.KEY_RIGHT: # scroll long log lines right log_line_lr_scroll -= curses.COLS // 4 if log_line_lr_scroll < 0: log_line_lr_scroll = 0 elif c == curses.KEY_HOME: # HOME scrolls log lines all the way to the start log_line_lr_scroll = longest_visible_line elif c == curses.KEY_END: # END scrolls log lines all the way to the end log_line_lr_scroll = 0 elif c == curses.KEY_UP: scroll_log(False, 1) elif c == curses.KEY_DOWN: scroll_log(True, 1) elif c == curses.KEY_NPAGE: # aka PgDn # PgDn to go down a page in the logs scroll_log(True) elif c == curses.KEY_PPAGE: # aka PgUp # PgUp to go up a page in the logs scroll_log(False) elif code == 2 or code == 550: # Ctrl+B or Ctrl+PgDn scroll_log(True, max_log_lines) elif code == 20 or code == 555: # Ctrl+T or Ctrl+PgUp scroll_log(False, max_log_lines) elif code == curses.KEY_BACKSPACE or code == 127: # Backspace to erase a character in the utterance line = line[:-1] elif code == 6: # Ctrl+F (Find) line = ":find " elif code == 18: # Ctrl+R (Redraw) scr.erase() elif code == 24: # Ctrl+X (Exit) if find_str: # End the find session find_str = None rebuild_filtered_log() elif code > 31 and isinstance(c, str): # Accept typed character in the utterance line += c finally: scr.erase() scr.refresh() scr = None pass
def handle_utterance(event): logger.info("Utterance: " + str(event['utterances'])) client.emit(Message('recognizer_loop:utterance', event))
def make_response(_): data = dict(result=False) self.emitter.emit(Message('skill.converse.response', data))
def __speak(self, utterance): payload = { 'utterance': utterance, 'session': SessionManager.get().session_id } self.emitter.emit("speak", Message("speak", metadata=payload))
def eyes_off(self): """Turn off or hide the eyes.""" self.ws.emit(Message("enclosure.eyes.off"))