def on_direct_message(self, event): post = event["data"]["post"] post = json.loads(post) sender = event["data"]["sender_name"] msg = post["message"] channel_id = post["channel_id"] LOG.info("Direct Message from: " + sender) LOG.info("Message: " + msg) # echo self.handle_direct_message(msg, sender, channel_id)
def on_volume_get(self, message): self.current_volume = self.m2enc.hardware_volume.get_volume() if self.current_volume > 1.0: self.current_volume = self.current_volume / 10 LOG.info('Mark2:interface.py get and emit volume %s' % (self.current_volume,)) self.bus.emit( message.response( data={'percent': self.current_volume, 'muted': False}))
def __init__(self): LOG.info('** Initialize EnclosureMark2 **') super().__init__() self.display_bus_client = None self._define_event_handlers() self.finished_loading = False self.active_screen = 'loading' self.paused_screen = None self.is_pairing = False self.active_until_stopped = None self.reserved_led = 10 self.mute_led = 11 self.chaseLedThread = None self.pulseLedThread = None self.system_volume = 0.5 # pulse audio master system volume # if you want to do anything with the system volume # (ala pulseaudio, etc) do it here! self.current_volume = 0.5 # hardware/board level volume # TODO these need to come from a config value self.m2enc = HardwareEnclosure("Mark2", "sj201r4") self.m2enc.client_volume_handler = self.async_volume_handler # start the temperature monitor thread self.temperatureMonitorThread = temperatureMonitorThread( self.m2enc.fan, self.m2enc.leds, self.m2enc.palette) self.temperatureMonitorThread.start() self.m2enc.leds.set_leds([ self.m2enc.palette.BLACK, self.m2enc.palette.BLACK, self.m2enc.palette.BLACK, self.m2enc.palette.BLACK, self.m2enc.palette.BLACK, self.m2enc.palette.BLACK, self.m2enc.palette.BLACK, self.m2enc.palette.BLACK, self.m2enc.palette.BLACK, self.m2enc.palette.BLACK ]) self.m2enc.leds._set_led_with_brightness(self.reserved_led, self.m2enc.palette.MAGENTA, 0.5) # set mute led based on reality mute_led_color = self.m2enc.palette.GREEN if self.m2enc.switches.SW_MUTE == 1: mute_led_color = self.m2enc.palette.RED self.m2enc.leds._set_led_with_brightness(self.mute_led, mute_led_color, 1.0) self.default_caps = EnclosureCapabilities() LOG.info('** EnclosureMark2 initalized **') self.bus.once('mycroft.skills.trained', self.is_device_ready)
def on_capabilities_get(self, message): LOG.info('Mark2:interface.py get capabilities requested') self.bus.emit( message.response( data={ 'default': self.default_caps.caps, 'extra': self.m2enc.capabilities, 'board_type': self.m2enc.board_type, 'leds': self.m2enc.leds.capabilities, 'volume': self.m2enc.hardware_volume.capabilities, 'switches': self.m2enc.switches.capabilities }))
def on_error(e): """Speak and log the error.""" if not isinstance(e, AbortEvent): # Convert "MyFancySkill" to "My Fancy Skill" for speaking handler_name = camel_case_split(self.name) msg_data = {'skill': handler_name} msg = dialog.get('skill.error', self.lang, msg_data) self.speak(msg) LOG.exception(msg) else: LOG.info("Skill execution aborted") # append exception information in message skill_data['exception'] = repr(e)
def sync_appstores(self, merge=False, new_only=True, threaded=False): stores = self.get_active_appstores() for appstore_id in stores: LOG.info("Syncing skills from " + appstore_id) store = stores[appstore_id] store.authenticate() if threaded: # TODO this will cause auth issues t = store.sync_skills_list_threaded(merge, new_only) self._threads.append(t) else: store.sync_skills_list(merge, new_only) store.clear_authentication()
def bootstrap(self, new_only=True): base_db = join(dirname(dirname(__file__)), "res", self.db.name + ".jsondb") if not len(self.db): LOG.info("Bootstrapping {database}, this might take a " "while!".format(database=self.name)) if isfile(base_db): LOG.debug("Bootstrapping from bundled skill list") shutil.copyfile(base_db, self.db.path) self.db.reset() else: LOG.debug("Downloading skill list") self.sync_skills_list(new_only=new_only)
def main(ready_hook=on_ready, error_hook=on_error, stopping_hook=on_stopping): enclosure = EnclosureMark2() enclosure.default_caps = EnclosureCapabilities() LOG.info("Enclosure created, capabilities ===>%s" % (enclosure.default_caps.caps, )) LOG.info("Mark2 detected[%s], additional capabilities ===>%s" % (enclosure.m2enc.board_type, enclosure.m2enc.capabilities)) LOG.info("Leds ===>%s" % (enclosure.m2enc.leds.capabilities)) LOG.info("Volume ===>%s" % (enclosure.m2enc.hardware_volume.capabilities)) LOG.info("Switches ===>%s" % (enclosure.m2enc.switches.capabilities))
def start_announcing(self): device_uuid = uuid.uuid4() local_ip_address = get_ip() hivemind_socket = self.listener.address.replace( "0.0.0.0", local_ip_address) if self.zero is None: LOG.info("Registering zeroconf:HiveMind-NodeRed-websocket " + hivemind_socket) self.zero = ZeroConfAnnounce(uuid=device_uuid, port=self.port, host=hivemind_socket) self.zero.daemon = True self.zero.start()
def create_gui_service(enclosure, config): import tornado.options LOG.info('Starting message bus for GUI...') # Disable all tornado logging so mycroft loglevel isn't overridden tornado.options.parse_command_line(['--logging=None']) routes = [(config['route'], GUIWebsocketHandler)] application = web.Application(routes, debug=True) application.enclosure = enclosure application.listen(config['base_port'], config['host']) create_daemon(ioloop.IOLoop.instance().start) LOG.info('GUI Message bus started!') return application
def on_typing(self, event): user_id = event["data"]["user_id"] channel_id = event["broadcast"]["channel_id"] channel_data = self.driver.channels.get_channel(channel_id) channel_name = channel_data["name"] user_data = self.driver.users.get_user(user_id=user_id) username = user_data["username"] if channel_name == self.user_id + "__" + user_id: LOG.info(username + " is typing a direct message") else: LOG.info(username + " is typing a message in channel: " + channel_name)
def handle_incoming_mycroft(self, message, client): """ external skill client sent a message message (Message): mycroft bus message object """ # message from a skill if message.context.get("skill_id"): self.ee.emit("localhive.skill", message) # message from a terminal if message.msg_type == "recognizer_loop:utterance": LOG.info(f"Utterance: {message.data['utterances']} " f"Peer: {client.peer}") message.context["source"] = client.peer self.ee.emit("localhive.utterance", message)
def discover_hivemind(name="JarbasPushToTalkTerminal", access_key="RESISTENCEisFUTILE", crypto_key="resistanceISfutile"): discovery = LocalDiscovery() headers = HiveMindConnection.get_headers(name, access_key) while True: LOG.info("Scanning...") for node_url in discovery.scan(): LOG.info("Fetching Node data: {url}".format(url=node_url)) node = discovery.nodes[node_url] node.connect(crypto_key=crypto_key, node_type=JarbasPtTTerminal, headers=headers) sleep(5)
def on_gui_client_connected(self, message): # GUI has announced presence LOG.info('GUI HAS ANNOUNCED!') port = self.global_config["gui_websocket"]["base_port"] LOG.debug("on_gui_client_connected") gui_id = message.data.get("gui_id") LOG.debug("Heard announcement from gui_id: {}".format(gui_id)) # Announce connection, the GUI should connect on it soon self.bus.emit( Message("mycroft.gui.port", { "port": port, "gui_id": gui_id }))
def prune_dead_streams(cls, ttl=60): """ remove dead streams from channel list set stream status as OK for ttl minutes""" for idx, ch in dict(cls.channels).items(): if cls.channels[idx]["status"] != StreamStatus.OK: cls.channels[idx]["status"] = cls.get_channel_status(ch) cls.channels[idx]["expires"] = time.time() + ttl * 60 if cls.channels[idx]["status"] == StreamStatus.OK: cls.channels[idx]["_dead_counter"] = 0 else: cls.channels[idx]["_dead_counter"] += 1 if cls.channels[idx]["_dead_counter"] >= \ cls.remove_threshold: LOG.info(f"Removing dead stream: {idx}") cls.dead_channels[idx] = ch cls.delete_channel(idx)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if "max_videos" not in self.settings: self.settings["max_videos"] = 500 if "min_duration" not in self.settings: self.settings["min_duration"] = -1 if "max_duration" not in self.settings: self.settings["max_duration"] = -1 if "shuffle_menu" not in self.settings: self.settings["shuffle_menu"] = False if "filter_live" not in self.settings: self.settings["filter_live"] = False if "filter_date" not in self.settings: self.settings["filter_date"] = False if "min_score" not in self.settings: self.settings["min_score"] = 40 if "match_description" not in self.settings: self.settings["match_description"] = True if "match_tags" not in self.settings: self.settings["match_tags"] = True if "match_title" not in self.settings: self.settings["match_title"] = True if "filter_trailers" not in self.settings: self.settings["filter_trailers"] = True if "filter_behind_scenes" not in self.settings: self.settings["filter_behind_scenes"] = True if "search_depth" not in self.settings: # after matching and ordering by title # will match/search metadata for N videos # some collection can be huge and matching everything will cause # a timeout, collections with less than N videos wont have any # problem self.settings["search_depth"] = 500 if pyvod is None: LOG.error("py_VOD not installed!") LOG.info("pip install py_VOD>=0.4.0") raise ImportError self.playback_type = CPSPlayback.GUI self.media_type = CPSMatchType.VIDEO self.default_bg = "https://github.com/OpenVoiceOS/ovos_assets/raw/master/Logo/ovos-logo-512.png" self.default_image = resolve_ovos_resource_file("ui/images/moviesandfilms.png") db_path = join(dirname(__file__), "res", self.name + ".jsondb") self.message_namespace = basename(dirname(__file__)) + ".ovos_utils" self.media_collection = pyvod.Collection(self.name, logo=self.default_image, db_path=db_path)
def ti_start_sequence(self): ''' Start Sequence for the TAS5806 ''' LOG.info("Start the TI Amp") self.write_ti_data(0x01, 0x11) # reset chip self.write_ti_data(0x78, 0x80) # clear fault - works self.write_ti_data(0x01, 0x00) # remove reset self.write_ti_data(0x78, 0x00) # remove clear fault self.write_ti_data(0x33, 0x03) self.set_volume(0.5) self.write_ti_data(0x30, 0x01) self.write_ti_data(0x03, 0x00) # Deep Sleep self.write_ti_data(0x03, 0x02) # HiZ # Indicate the first coefficient of a BQ is starting to write self.write_ti_data(0x5C, 0x01) self.write_ti_data(0x03, 0x03) # Play
def discover_hivemind(name="JarbasCliTerminal", access_key="RESISTENCEisFUTILE", crypto_key="resistanceISfutile", curses=False): discovery = LocalDiscovery() headers = HiveMindConnection.get_headers(name, access_key) clazz = JarbasCursesTerminal if curses else JarbasCliTerminal while True: print("Scanning...") for node_url in discovery.scan(): LOG.info("Fetching Node data: {url}".format(url=node_url)) node = discovery.nodes[node_url] node.connect(crypto_key=crypto_key, node_type=clazz, headers=headers ) sleep(5)
def on_mention(self, event): post = event["data"]["post"] post = json.loads(post) sender = event["data"]["sender_name"] msg = post["message"] channel_id = post["channel_id"] user_id = post["user_id"] channel_data = self.driver.channels.get_channel(channel_id) channel_name = channel_data["name"] for tag in self.tags: msg = msg.replace(tag, "") LOG.info("New mention at channel: " + channel_name) LOG.info(sender + " said: " + msg) self.handle_mention(msg, sender, channel_id)
def _skip_wake_word(self, source): """Check if told programatically to skip the wake word For example when we are in a dialog with the user. """ signaled = False if check_for_signal('startListening') or self._listen_triggered: signaled = True # Pressing the Mark 1 button can start recording (unless # it is being used to mean 'stop' instead) elif check_for_signal('buttonPress', 1): # give other processes time to consume this signal if # it was meant to be a 'stop' sleep(0.25) if check_for_signal('buttonPress'): # Signal is still here, assume it was intended to # begin recording LOG.debug("Button Pressed, wakeword not needed") signaled = True if signaled: LOG.info("Listen signal detected") # If enabled, play a wave file with a short sound to audibly # indicate listen signal was detected. sound = self.config["listener"].get('listen_sound') audio_file = resolve_resource_file(sound) if audio_file: try: source.mute() if audio_file.endswith(".wav"): play_wav(audio_file).wait() elif audio_file.endswith(".mp3"): play_mp3(audio_file).wait() elif audio_file.endswith(".ogg"): play_ogg(audio_file).wait() else: play_audio(audio_file).wait() source.unmute() except Exception as e: LOG.warning(e) return signaled
def discover_hivemind(email, password, access_key, name="JarbasDeltaChatBridge", crypto_key=None): discovery = LocalDiscovery() headers = HiveMindConnection.get_headers(name, access_key) while True: print("Scanning...") for node_url in discovery.scan(): LOG.info("Fetching Node data: {url}".format(url=node_url)) node = discovery.nodes[node_url] node.connect(crypto_key=crypto_key, node_type=JarbasDeltaChatBridge, headers=headers, email=email, password=password) sleep(5)
def onConnect(self, request): LOG.info("Client connecting: {0}".format(request.peer)) ip = request.peer.split(":")[1] context = {"source": self.peer} self.platform = request.headers.get("platform", "unknown") self.crypto_key = None # send message to internal mycroft bus data = {"ip": ip, "headers": request.headers} self.blacklist = {"messages": [], "skills": [], "intents": []} self.factory.mycroft_send("hive.client.connect", data, context) # return a pair with WS protocol spoken (or None for any) and # custom headers to send in initial WS opening handshake HTTP response headers = {"server": self.platform} return (None, headers)
def find_input_device(device_name): """ Find audio input device by name. Arguments: device_name: device name or regex pattern to match Returns: device_index (int) or None if device wasn't found """ LOG.info('Searching for input device: {}'.format(device_name)) LOG.debug('Devices: ') pa = pyaudio.PyAudio() pattern = re.compile(device_name) for device_index in range(pa.get_device_count()): dev = pa.get_device_info_by_index(device_index) LOG.debug(' {}'.format(dev['name'])) if dev['maxInputChannels'] > 0 and pattern.match(dev['name']): LOG.debug(' ^-- matched') return device_index return None
def discover_hivemind(name="JarbasVoiceTerminal", access_key="RESISTENCEisFUTILE", crypto_key=None): discovery = LocalDiscovery() headers = HiveMindConnection.get_headers(name, access_key) while True: try: LOG.info("Scanning...") for node_url in discovery.scan(): LOG.info("Fetching Node data: {url}".format(url=node_url)) node = discovery.nodes[node_url] node.connect(crypto_key=crypto_key, node_type=JarbasVoiceTerminal, headers=headers ) sleep(5) except KeyboardInterrupt: break
def speak(self, utterance): LOG.info("SPEAK: " + utterance) temppath = join(gettempdir(), self.tts.tts_name) if not isdir(temppath): makedirs(temppath) audio_file = join(temppath, str(hash(utterance))[1:] + "." + self.tts.audio_ext) self.tts.get_tts(utterance, audio_file) try: if audio_file.endswith(".wav"): play_wav(audio_file).wait() elif audio_file.endswith(".mp3"): play_mp3(audio_file).wait() elif audio_file.endswith(".ogg"): play_ogg(audio_file).wait() else: play_audio(audio_file).wait() except Exception as e: LOG.warning(e)
def is_device_ready(self, message): is_ready = False # Bus service assumed to be alive if messages sent and received # Enclosure assumed to be alive if this method is running services = {'audio': False, 'speech': False, 'skills': False} start = time.monotonic() while not is_ready: is_ready = self.check_services_ready(services) if is_ready: break elif time.monotonic() - start >= 60: raise Exception('Timeout waiting for services start.') else: time.sleep(3) if is_ready: LOG.info("Mycroft is all loaded and ready to roll!") self.bus.emit(Message('mycroft.ready')) return is_ready
def concat(self, files, wav_file): """ generate output wav file from input files """ cmd = ["sox"] for file in files: if not isfile(file): continue cmd.append("-c") cmd.append(self.channels) cmd.append("-r") cmd.append(self.rate) cmd.append(file) cmd.append(wav_file) cmd.append("channels") cmd.append(self.channels) cmd.append("rate") cmd.append(self.rate) LOG.info(subprocess.check_output(cmd)) return wav_file
def _display(self, message): """ Handler for ovos.ccanvas.play. Starts window of a picturelist. Also determines if the user requested a special service. Args: message: message bus message, not used but required """ try: pictures = message.data['pictures'] prefered_service = self.get_prefered(message.data.get("utterance", "")) if isinstance(pictures[0], str): uri_type = pictures[0].split(':')[0] else: uri_type = pictures[0][0].split(':')[0] # check if user requested a particular service if prefered_service and uri_type in prefered_service.supported_uris(): selected_service = prefered_service # check if default supports the uri elif self.default and uri_type in self.default.supported_uris(): LOG.debug("Using default backend ({})".format(self.default.name)) selected_service = self.default else: # Check if any other service can play the media LOG.debug("Searching the services") for s in self.services: if uri_type in s.supported_uris(): LOG.debug("Service {} supports URI {}".format(s, uri_type)) selected_service = s break else: LOG.info('No service found for uri_type: ' + uri_type) return selected_service.clear_pictures() selected_service.add_pictures(pictures) selected_service.window() self.current = selected_service except Exception as e: LOG.exception(e)
def _write_data(dictionary): """ Writes the dictionary of state data to the IPC directory. Args: dictionary (dict): information to place in the 'disp_info' file """ managerIPCDir = os.path.join(get_ipc_directory(), "managers") # change read/write permissions based on if file exists or not path = os.path.join(managerIPCDir, "disp_info") permission = "r+" if os.path.isfile(path) else "w+" if permission == "w+" and os.path.isdir(managerIPCDir) is False: os.makedirs(managerIPCDir) os.chmod(managerIPCDir, 0o777) try: with open(path, permission) as dispFile: # check if file is empty if os.stat(str(dispFile.name)).st_size != 0: data = json.load(dispFile) else: data = {} LOG.info("Display Manager is creating " + dispFile.name) for key in dictionary: data[key] = dictionary[key] dispFile.seek(0) dispFile.write(json.dumps(data)) dispFile.truncate() os.chmod(path, 0o777) except Exception as e: LOG.error(e) LOG.error("Error found in display manager file, deleting...") os.remove(path) _write_data(dictionary)
def handle_intent_service_message(self, message): if isinstance(message, str): message = Message.deserialize(message) skill_id = message.context.get("skill_id") peers = message.context.get("destination") or [] # converse method handling if message.msg_type in ["skill.converse.request"]: skill_id = message.data.get("skill_id") message.context["skill_id"] = skill_id skill_peer = self.skill2peer(skill_id) LOG.info(f"Converse: {message.msg_type} " f"Skill: {skill_id} " f"Peer: {skill_peer}") message.context['source'] = "IntentService" message.context['destination'] = peers self.send2peer(message, skill_peer) elif message.msg_type in ["skill.converse.response"]: # just logging that it was received, converse method handled by # skill skill_id = message.data.get("skill_id") response = message.data.get("result") message.context["skill_id"] = skill_id skill_peer = self.skill2peer(skill_id) LOG.info(f"Converse Response: {response} " f"Skill: {skill_id} " f"Peer: {skill_peer}") message.context['source'] = skill_id message.context['destination'] = peers # intent found elif message.msg_type in self.intent2skill: skill_id = self.intent2skill[message.msg_type] skill_peer = self.skill2peer(skill_id) message.context["skill_id"] = skill_id LOG.info(f"Intent: {message.msg_type} " f"Skill: {skill_id} " f"Source: {peers} " f"Target: {skill_peer}") # trigger the skill message.context['source'] = "IntentService" LOG.debug(f"Triggering intent: {skill_peer}") self.send2peer(message, skill_peer) # skill registering intent elif message.msg_type in ["register_intent", "padatious:register_intent"]: LOG.info(f"Register Intent: {message.data['name']} " f"Skill: {message.context['skill_id']}") self.intent2skill[message.data["name"]] = skill_id