def play(self): data = self.playback_data.get("playing") or {} uri = data.get("stream") or data.get("uri") or data.get("url") skill_id = self.active_skill = data["skill_id"] self.stop() if data["playback"] == CPSPlayback.AUDIO: data["status"] = CPSTrackStatus.PLAYING_AUDIOSERVICE real_url = self.get_stream(uri) self.audio_service.play(real_url) elif data["playback"] == CPSPlayback.SKILL: data["status"] = CPSTrackStatus.PLAYING if data.get("is_old_style"): self.bus.emit( Message( 'play:start', { "skill_id": skill_id, "callback_data": data, "phrase": data["phrase"] })) else: self.bus.emit(Message(f'better_cps.{skill_id}.play', data)) elif data["playback"] == CPSPlayback.GUI: pass # plays in display_ui else: raise ValueError("invalid playback request") self.update_status(data) self._set_now_playing(data) self.display_ui() self.update_player_status("Playing")
def test_common_query_events_routing(self): # common query message life cycle self.bus.emitted_msgs = [] self.cc.handle_question( Message("fallback_cycle_test", {"utterance": "what is the speed of light"}, { "source": "unittests", "destination": "common_query" })) # "source" should receive these unittest_msgs = set([ m["type"] for m in self.bus.emitted_msgs if m["context"].get("destination", "") == "unittests" ]) self.assertEqual( unittest_msgs, { 'question:query', 'question:query.response', 'question:action', 'add_context', 'speak' }) # internal to mycroft, "source" should NOT receive these # TODO fix bug - these messages should not be dropping context # these should in fact also be sent ... cc_msgs = set([ m["type"] for m in self.bus.emitted_msgs if m["context"].get("destination", "") != "unittests" ]) for m in cc_msgs: self.assertTrue(m.startswith("enclosure.") or m.startswith("gui."))
def mouth_display_png(self, image_absolute_path, invert=False, x=0, y=0, refresh=True): """ Send an image to the enclosure. Args: image_absolute_path (string): The absolute path of the image invert (bool): inverts the image being drawn. x (int): x offset for image y (int): y offset for image refresh (bool): specify whether to clear the faceplate before displaying the new image or not. Useful if you'd like to display muliple images on the faceplate at once. """ self.bus.emit( Message( "enclosure.mouth.display_image", { 'img_path': image_absolute_path, 'xOffset': x, 'yOffset': y, 'invert': invert, 'clearPrev': refresh }))
def abort(_): if not t.is_alive(): return if stop_tts: skill.bus.emit(Message("mycroft.audio.speech.stop")) if call_stop: # call stop on parent skill skill.stop() # ensure no orphan get_response daemons # this is the only killable daemon that core itself will # create, users should also account for this condition with # callbacks if using the decorator for other purposes skill._handle_killed_wait_response() try: while t.is_alive(): t.raise_exc(exc) time.sleep(0.1) except threading.ThreadError: pass # already killed except AssertionError: pass # could not determine thread id ? if callback is not None: if len(signature(callback).parameters) == 1: # class method, needs self callback(args[0]) else: callback()
def emit_to_ona(self, client, msg_type, data): context = { "source": "ona-listener", "destination": client.peer, "client_name": "OnaWebInterface" } message = Message(msg_type, data, context) self.on_message_from_mycroft(message)
def resume(self): if self.active_backend == CPSTrackStatus.PLAYING_GUI: self.gui.resume_video() elif self.active_backend == CPSTrackStatus.PLAYING_AUDIOSERVICE: self.audio_service.resume() elif self.active_backend is not None: self.bus.emit(Message(f'better_cps.{self.active_skill}.resume')) self.update_status({"status": self.active_backend})
def play_prev(self): # TODO playlist handling if self.active_backend == CPSTrackStatus.PLAYING_GUI: pass elif self.active_backend == CPSTrackStatus.PLAYING_AUDIOSERVICE: self.audio_service.prev() elif self.active_backend is not None: self.bus.emit(Message(f'better_cps.{self.active_skill}.prev'))
def pause(self): self.update_status({"status": CPSTrackStatus.PAUSED}) if self.active_backend == CPSTrackStatus.PLAYING_GUI: self.gui.pause_video() elif self.active_backend == CPSTrackStatus.PLAYING_AUDIOSERVICE: self.audio_service.pause() elif self.active_backend is not None: self.bus.emit(Message(f'better_cps.{self.active_skill}.pause'))
def send_query(self, phrase, media_type=CPSMatchType.GENERIC): self.query_replies[phrase] = [] self.query_extensions[phrase] = [] self.bus.emit( Message('play:query', { "phrase": phrase, "media_type": media_type }))
def eyes_volume(self, volume): """Indicate the volume using the eyes Args: volume (int): 0 to 11 """ if volume < 0 or volume > 11: raise ValueError('volume ({}) must be between 0-11'.format( str(volume))) self.bus.emit(Message("enclosure.eyes.volume", {'volume': volume}))
def handle_broadcast_message(self, data, client): payload = data["payload"] LOG.info("Received broadcast message at: " + self.node_id) LOG.debug("ROUTE: " + str(data["route"])) LOG.debug("PAYLOAD: " + str(payload)) # echo to nodered (all connections/flows) # TODO skip source peer self.nodered_send(message=Message("hivemind.broadcast", payload))
def _stop(self, message=None): LOG.debug('stopping window services') with self.service_lock: if self.current: name = self.current.name if self.current.stop(): self.bus.emit(Message("mycroft.stop.handled", {"by": "window:" + name})) self.current = None
def eyes_look(self, side): """Make the eyes look to the given side Args: side (str): 'r' for right 'l' for left 'u' for up 'd' for down 'c' for crossed """ self.bus.emit(Message("enclosure.eyes.look", {'side': side}))
def on_volume_set(self, message): self.current_volume = message.data.get("percent", self.current_volume) LOG.info('Mark2:interface.py set volume to %s' % (self.current_volume, )) self.m2enc.hardware_volume.set_volume(float(self.current_volume)) # notify anybody listening on the bus who cares self.bus.emit( Message("hardware.volume", {"volume": self.current_volume}, context={"source": ["enclosure"]}))
def async_volume_handler(self, vol): LOG.error("ASYNC SET VOL PASSED IN %s" % (vol, )) if vol > 1.0: vol = vol / 10 self.current_volume = vol LOG.error("ASYNC SET VOL TO %s" % (self.current_volume, )) # notify anybody listening on the bus who cares self.bus.emit( Message("hardware.volume", {"volume": self.current_volume}, context={"source": ["enclosure"]}))
def eyes_fill(self, percentage): """Use the eyes as a type of progress meter Args: amount (int): 0-49 fills the right eye, 50-100 also covers left """ if percentage < 0 or percentage > 100: raise ValueError('percentage ({}) must be between 0-100'.format( str(percentage))) self.bus.emit( Message("enclosure.eyes.fill", {'percentage': percentage}))
def nodered_send(self, message): if isinstance(message, Message): payload = Message.serialize(message) elif isinstance(message, dict): payload = repr(json.dumps(message)) else: payload = message for peer in set(self.clients): client = self.clients[peer]["instance"] client.sendMessage(payload.encode())
def check_services_ready(self, services): """Report if all specified services are ready. services (iterable): service names to check. """ for ser in services: services[ser] = False response = self.bus.wait_for_response( Message('mycroft.{}.is_ready'.format(ser))) if response and response.data['status']: services[ser] = True return all([services[ser] for ser in services])
def stop(self): if self.active_backend == CPSTrackStatus.PLAYING_GUI: self.gui.stop_video() elif self.active_backend == CPSTrackStatus.PLAYING_AUDIOSERVICE: self.audio_service.stop() elif self.active_backend is not None: self.bus.emit(Message(f'better_cps.{self.active_skill}.stop')) self.update_status({"status": CPSTrackStatus.END_OF_MEDIA}) stopped = self.active_backend is not None self.active_backend = None self.active_skill = None return stopped
def eyes_color(self, r=255, g=255, b=255): """Change the eye color to the given RGB color Args: r (int): 0-255, red value g (int): 0-255, green value b (int): 0-255, blue value """ self.bus.emit(Message("enclosure.eyes.color", { 'r': r, 'g': g, 'b': b }))
def _picture_info(self, message): """ Returns picture info on the message bus. Args: message: message bus message, not used but required """ if self.current: picture_info = self.current.picture_info() else: picture_info = {} self.bus.emit(Message('ovos.ccanvas.picture_info_reply', data=picture_info))
def CPS_extend_timeout(self, timeout=0.5): """ request more time for searching, limits are defined by better-common-play framework, by default max total time is 5 seconds per query """ if self._current_query: self.bus.emit( Message( "better_cps.query.response", { "phrase": self._current_query, "skill_id": self.skill_id, "timeout": timeout, "searching": True }))
def is_device_ready(self, message): is_ready = False # Bus service assumed to be alive if messages sent and received # Enclosure assumed to be alive if this method is running services = {'audio': False, 'speech': False, 'skills': False} start = time.monotonic() while not is_ready: is_ready = self.check_services_ready(services) if is_ready: break elif time.monotonic() - start >= 60: raise Exception('Timeout waiting for services start.') else: time.sleep(3) if is_ready: LOG.info("All Mycroft Services have reported ready.") if connected(): self.bus.emit(Message('mycroft.ready')) else: self.bus.emit(Message('mycroft.wifi.setup')) return is_ready
def on_message_from_mycroft(self, message=None): # forward internal messages to clients if they are the target if isinstance(message, dict): message = json.dumps(message) if isinstance(message, str): message = Message.deserialize(message) LOG.debug("Missatge de mycroft rebut: {0}".format(message.serialize())) message.context = message.context or {} if message.msg_type == "speak": payload = { "msg_type": "speak", "utterance": message.data["utterance"] } elif message.msg_type == "ona:recognized": payload = { "msg_type": "recognized", "utterance": message.data["utterance"] } elif message.msg_type == "ona:hotword_start": payload = { "msg_type": "waiting_for_hotword", } elif message.msg_type == "ona:hotword_detected": payload = { "msg_type": "listening", } elif message.msg_type == "play:status": payload = {"msg_type": "play", "data": message.data} elif message.msg_type == "mycroft.stop": payload = {"msg_type": "stop"} else: return peers = message.context.get("destination") or [] if not isinstance(peers, list): peers = [peers] for peer in peers: if peer and peer in self.clients: client = self.clients[peer].get("instance") if payload["msg_type"] == "speak": tts_engine = self.clients[peer].get("tts_engine") tts_voice = self.clients[peer].get("tts_voice") self.audio_source_queue.put( (payload, client, tts_engine, tts_voice)) else: self.interface.send(payload, client)
def get_channel_status(cls, channel): if isinstance(channel, str): channel = cls.find_channel(channel) stream = channel.get("stream") if not stream: if channel.get("stream_callback"): data = cls.bus.wait_response( Message(channel["stream_callback"])) # callback mode to ask stream provider (skill) for actual url # - we have a message type in payload instead of stream # - we send that bus message and wait reply with actual stream # - allows searching without extracting (slow), eg, youtube if data and data.get("stream"): return check_stream(data["stream"], timeout=5) raise KeyError("channel has no associated stream") return check_stream(stream, timeout=5)
def get_eyes_color(self): """ Get the eye RGB color for all pixels :returns pixels (list) - list of (r,g,b) tuples for each eye pixel """ message = Message("enclosure.eyes.rgb.get", context={ "source": "enclosure_api", "destination": "enclosure" }) response = self.bus.wait_for_response(message, "enclosure.eyes.rgb") if response: return response.data["pixels"] raise TimeoutError("Enclosure took too long to respond")
def search(self, phrase, media_type=CPSMatchType.GENERIC): self.query_replies[phrase] = [] self.query_timeouts[phrase] = self.min_timeout self.search_start = time.time() self.waiting = True self.bus.emit( Message('better_cps.query', { "phrase": phrase, "media_type": media_type })) # old common play will send the messages expected by the official # mycroft stack, but skills are know to over match, dont support # match type, and the GUI is different for every skill, it may also # cause issues with status tracking and mess up playlists if self.old_cps: self.old_cps.send_query(phrase, media_type) # if there is no match type defined, lets increase timeout a bit # since all skills need to search if media_type == CPSMatchType.GENERIC: bonus = 3 # timeout bonus else: bonus = 0 while self.waiting and \ time.time() - self.search_start <= self.max_timeout + bonus: time.sleep(0.1) self.waiting = False # convert the returned data to the expected new format, playback # type is consider Skill, better cps will not handle the playback # life cycle but instead delegate to the skill if self.old_cps: old_style = self.old_cps.get_results(phrase) self.query_replies[phrase] += self._convert_to_new_style( old_style, media_type) if self.query_replies.get(phrase): return [s for s in self.query_replies[phrase] if s.get("results")] # fallback to generic media type if self.media_fallback and media_type != CPSMatchType.GENERIC: LOG.debug("BetterCPS falling back to CPSMatchType.GENERIC") return self.search(phrase, media_type=CPSMatchType.GENERIC) return []
def _real_wait_response(self, is_cancel, validator, on_fail, num_retries): """Loop until a valid response is received from the user or the retry limit is reached. Arguments: is_cancel (callable): function checking cancel criteria validator (callbale): function checking for a valid response on_fail (callable): function handling retries """ num_fails = 0 while True: if self._response is not False: # usually None when aborted externally # also allows overriding returned result from other events return self._response response = self.__get_response() if response is None: # if nothing said, prompt one more time num_none_fails = 1 if num_retries < 0 else num_retries if num_fails >= num_none_fails: self._response = None return else: if validator(response): self._response = response return # catch user saying 'cancel' if is_cancel(response): self._response = None return num_fails += 1 if 0 < num_retries < num_fails or self._response is not False: self._response = None return line = on_fail(response) if line: self.speak(line, expect_response=True) else: self.bus.emit( Message('mycroft.mic.listen', context={"skill_id": self.skill_id}))
def eyes_setpixel(self, idx, r=255, g=255, b=255): """Set individual pixels of the Mark 1 neopixel eyes Args: idx (int): 0-11 for the right eye, 12-23 for the left r (int): The red value to apply g (int): The green value to apply b (int): The blue value to apply """ if idx < 0 or idx > 23: raise ValueError('row ({}) must be between 0-23'.format(str(idx))) self.bus.emit( Message("enclosure.eyes.setpixel", { 'row': idx, 'r': r, 'g': g, 'b': b }))
def play_video_event(self, message): video_data = message.data["modelData"] if video_data["skill_id"] == self.skill_id: # ensure all data fields present video_data = merge_dict(video_data, { "match_confidence": 100, "media_type": self.media_type, "playback": self.playback_type, "skill_icon": self.skill_icon, "skill_logo": self.skill_logo, "bg_image": video_data.get("logo") or self.default_bg, "image": video_data.get("logo") or self.default_image, "author": self.name }) self.bus.emit(Message("better_cps.play", { "tracks": [video_data] }))