def start_async(self): """Start consumer and producer threads.""" self.state.running = True stt = STTFactory.create(self.config_core["stt"]) queue = Queue() stream_handler = None if stt.can_stream: stream_handler = AudioStreamHandler(queue) LOG.debug("Using STT engine: " + stt.__class__.__name__) self.producer = AudioProducer(self.state, queue, self.microphone, self.responsive_recognizer, self, stream_handler) self.producer.start() self.consumer = AudioConsumer(self.state, queue, self, stt) self.consumer.start()
def on_typing(self, event): user_id = event["data"]["user_id"] channel_id = event["broadcast"]["channel_id"] channel_data = self.driver.channels.get_channel(channel_id) channel_name = channel_data["name"] user_data = self.driver.users.get_user(user_id=user_id) username = user_data["username"] if channel_name == self.user_id + "__" + user_id: LOG.info(username + " is typing a direct message") else: LOG.info(username + " is typing a message in channel: " + channel_name)
def play_audio(uri, play_cmd="play %1"): """ Play a audio file. Returns: subprocess.Popen object """ play_wav_cmd = str(play_cmd).split(" ") for index, cmd in enumerate(play_wav_cmd): if cmd == "%1": play_wav_cmd[index] = uri try: return subprocess.Popen(play_wav_cmd) except Exception as e: LOG.error("Failed to launch WAV: {}".format(play_wav_cmd)) LOG.debug("Error: {}".format(repr(e)), exc_info=True) return None
def discover_hivemind(name="JarbasPushToTalkTerminal", access_key="RESISTENCEisFUTILE", crypto_key="resistanceISfutile"): discovery = LocalDiscovery() headers = HiveMindConnection.get_headers(name, access_key) while True: LOG.info("Scanning...") for node_url in discovery.scan(): LOG.info("Fetching Node data: {url}".format(url=node_url)) node = discovery.nodes[node_url] node.connect(crypto_key=crypto_key, node_type=JarbasPtTTerminal, headers=headers) sleep(5)
def handle_incoming_mycroft(self, message, client): """ external skill client sent a message message (Message): mycroft bus message object """ # message from a skill if message.context.get("skill_id"): self.ee.emit("localhive.skill", message) # message from a terminal if message.msg_type == "recognizer_loop:utterance": LOG.info(f"Utterance: {message.data['utterances']} " f"Peer: {client.peer}") message.context["source"] = client.peer self.ee.emit("localhive.utterance", message)
def on_message_from_mycroft(self, message=None): # forward internal messages to clients if they are the target if isinstance(message, dict): message = json.dumps(message) if isinstance(message, str): message = Message.deserialize(message) LOG.debug("Missatge de mycroft rebut: {0}".format(message.serialize())) message.context = message.context or {} if message.msg_type == "speak": payload = { "msg_type": "speak", "utterance": message.data["utterance"] } elif message.msg_type == "ona:recognized": payload = { "msg_type": "recognized", "utterance": message.data["utterance"] } elif message.msg_type == "ona:hotword_start": payload = { "msg_type": "waiting_for_hotword", } elif message.msg_type == "ona:hotword_detected": payload = { "msg_type": "listening", } elif message.msg_type == "play:status": payload = {"msg_type": "play", "data": message.data} elif message.msg_type == "mycroft.stop": payload = {"msg_type": "stop"} else: return peers = message.context.get("destination") or [] if not isinstance(peers, list): peers = [peers] for peer in peers: if peer and peer in self.clients: client = self.clients[peer].get("instance") if payload["msg_type"] == "speak": tts_engine = self.clients[peer].get("tts_engine") tts_voice = self.clients[peer].get("tts_voice") self.audio_source_queue.put( (payload, client, tts_engine, tts_voice)) else: self.interface.send(payload, client)
def load_phonemes(self, key): """ Load phonemes from cache file. Args: Key: Key identifying phoneme cache """ pho_file = os.path.join(get_cache_directory("tts"), key + ".pho") if os.path.exists(pho_file): try: with open(pho_file, "r") as cachefile: phonemes = json.load(cachefile) return phonemes except Exception as e: LOG.error("Failed to read .PHO from cache ({})".format(e)) return None
def save_phonemes(self, key, phonemes): """ Cache phonemes Args: key: Hash key for the sentence phonemes: phoneme string to save """ cache_dir = get_cache_directory("tts") pho_file = os.path.join(cache_dir, key + ".pho") try: with open(pho_file, "w") as cachefile: cachefile.write(json.dumps(phonemes)) except Exception: LOG.exception("Failed to write {} to cache".format(pho_file))
def overdrive(self, gain_db=20.0, colour=20.0): """ Apply non-linear distortion. Parameters: gain_db : float, default=20 Controls the amount of distortion (dB). colour : float, default=20 Controls the amount of even harmonic content in the output (dB). """ LOG.debug("overdrive") effect_args = [ 'overdrive', '{:f}'.format(gain_db), '{:f}'.format(colour) ] self.effects.extend(effect_args)
def tremolo(self, speed=6.0, depth=40.0): """ Apply a tremolo (low frequency amplitude modulation) effect to the audio. The tremolo frequency in Hz is giv en by speed, and the depth as a percentage by depth (default 40). Parameters: speed : float Tremolo speed in Hz. depth : float Tremolo depth as a percentage of the total amplitude. """ LOG.debug("tremolo") effect_args = ['tremolo', '{:f}'.format(speed), '{:f}'.format(depth)] self.effects.extend(effect_args)
def prune_dead_streams(cls, ttl=60): """ remove dead streams from channel list set stream status as OK for ttl minutes""" for idx, ch in dict(cls.channels).items(): if cls.channels[idx]["status"] != StreamStatus.OK: cls.channels[idx]["status"] = cls.get_channel_status(ch) cls.channels[idx]["expires"] = time.time() + ttl * 60 if cls.channels[idx]["status"] == StreamStatus.OK: cls.channels[idx]["_dead_counter"] = 0 else: cls.channels[idx]["_dead_counter"] += 1 if cls.channels[idx]["_dead_counter"] >= \ cls.remove_threshold: LOG.info(f"Removing dead stream: {idx}") cls.dead_channels[idx] = ch cls.delete_channel(idx)
def contrast(self, amount=75): """ Comparable with compression, this effect modifies an audio signal to make it sound louder. Parameters: amount : float Amount of enhancement between 0 and 100. """ LOG.debug("contrast") if amount < 0 or amount > 100: raise ValueError('amount must be a number between 0 and 100.') effect_args = ['contrast', '{:f}'.format(amount)] self.effects.extend(effect_args)
def sudo_exec(cmdline, passwd="root"): osname = platform.system() if osname == 'Linux': prompt = r'\[sudo\] password for %s: ' % os.environ['USER'] elif osname == 'Darwin': prompt = 'Password:'******'sudo password was asked.') child.sendline(passwd) child.expect(pexpect.EOF) return child.before
def enclosure2rootdir(enclosure=None): enclosure = enclosure or detect_enclosure() if enclosure == MycroftEnclosures.OLD_MARK1: return MycroftRootLocations.OLD_MARK1 elif enclosure == MycroftEnclosures.MARK1: return MycroftRootLocations.MARK1 elif enclosure == MycroftEnclosures.MARK2: return MycroftRootLocations.MARK2 elif enclosure == MycroftEnclosures.PICROFT: return MycroftRootLocations.PICROFT elif enclosure == MycroftEnclosures.OVOS: return MycroftRootLocations.OVOS elif enclosure == MycroftEnclosures.BIGSCREEN: return MycroftRootLocations.BIGSCREEN LOG.warning("Assuming mycroft-core location is ~/mycroft-core") return MycroftRootLocations.HOME
def detect_lang_neural(text, return_multiple=False, return_dict=False, hint_language=None, filter_unreliable=False): if cld3 is None: LOG.debug("run pip install pycld3") raise ImportError("pycld3 not installed") languages = [] if return_multiple or hint_language: preds = sorted(cld3.get_frequent_languages(text, num_langs=5), key=lambda i: i.probability, reverse=True) for pred in preds: if filter_unreliable and not pred.is_reliable: continue if return_dict: languages += [{ "lang_code": pred.language, "lang": code_to_name(pred.language), "conf": pred.probability }] else: languages.append(pred.language) if hint_language and hint_language == pred.language: languages = [languages[-1]] break else: pred = cld3.get_language(text) if filter_unreliable and not pred.is_reliable: pass elif return_dict: languages = [{ "lang_code": pred.language, "lang": code_to_name(pred.language), "conf": pred.probability }] else: languages = [pred.language] # return top language only if not return_multiple: if not len(languages): return None return languages[0] return languages
def search(self, phrase, media_type=CPSMatchType.GENERIC): self.query_replies[phrase] = [] self.query_timeouts[phrase] = self.min_timeout self.search_start = time.time() self.waiting = True self.bus.emit( Message('better_cps.query', { "phrase": phrase, "media_type": media_type })) # old common play will send the messages expected by the official # mycroft stack, but skills are know to over match, dont support # match type, and the GUI is different for every skill, it may also # cause issues with status tracking and mess up playlists if self.old_cps: self.old_cps.send_query(phrase, media_type) # if there is no match type defined, lets increase timeout a bit # since all skills need to search if media_type == CPSMatchType.GENERIC: bonus = 3 # timeout bonus else: bonus = 0 while self.waiting and \ time.time() - self.search_start <= self.max_timeout + bonus: time.sleep(0.1) self.waiting = False # convert the returned data to the expected new format, playback # type is consider Skill, better cps will not handle the playback # life cycle but instead delegate to the skill if self.old_cps: old_style = self.old_cps.get_results(phrase) self.query_replies[phrase] += self._convert_to_new_style( old_style, media_type) if self.query_replies.get(phrase): return [s for s in self.query_replies[phrase] if s.get("results")] # fallback to generic media type if self.media_fallback and media_type != CPSMatchType.GENERIC: LOG.debug("BetterCPS falling back to CPSMatchType.GENERIC") return self.search(phrase, media_type=CPSMatchType.GENERIC) return []
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if "max_videos" not in self.settings: self.settings["max_videos"] = 500 if "min_duration" not in self.settings: self.settings["min_duration"] = -1 if "max_duration" not in self.settings: self.settings["max_duration"] = -1 if "shuffle_menu" not in self.settings: self.settings["shuffle_menu"] = False if "filter_live" not in self.settings: self.settings["filter_live"] = False if "filter_date" not in self.settings: self.settings["filter_date"] = False if "min_score" not in self.settings: self.settings["min_score"] = 40 if "match_description" not in self.settings: self.settings["match_description"] = True if "match_tags" not in self.settings: self.settings["match_tags"] = True if "match_title" not in self.settings: self.settings["match_title"] = True if "filter_trailers" not in self.settings: self.settings["filter_trailers"] = True if "filter_behind_scenes" not in self.settings: self.settings["filter_behind_scenes"] = True if "search_depth" not in self.settings: # after matching and ordering by title # will match/search metadata for N videos # some collection can be huge and matching everything will cause # a timeout, collections with less than N videos wont have any # problem self.settings["search_depth"] = 500 if pyvod is None: LOG.error("py_VOD not installed!") LOG.info("pip install py_VOD>=0.4.0") raise ImportError self.playback_type = CPSPlayback.GUI self.media_type = CPSMatchType.VIDEO self.default_bg = "https://github.com/OpenVoiceOS/ovos_assets/raw/master/Logo/ovos-logo-512.png" self.default_image = resolve_ovos_resource_file("ui/images/moviesandfilms.png") db_path = join(dirname(__file__), "res", self.name + ".jsondb") self.message_namespace = basename(dirname(__file__)) + ".ovos_utils" self.media_collection = pyvod.Collection(self.name, logo=self.default_image, db_path=db_path)
def on_mention(self, event): post = event["data"]["post"] post = json.loads(post) sender = event["data"]["sender_name"] msg = post["message"] channel_id = post["channel_id"] user_id = post["user_id"] channel_data = self.driver.channels.get_channel(channel_id) channel_name = channel_data["name"] for tag in self.tags: msg = msg.replace(tag, "") LOG.info("New mention at channel: " + channel_name) LOG.info(sender + " said: " + msg) self.handle_mention(msg, sender, channel_id)
def discover_hivemind(name="JarbasCliTerminal", access_key="RESISTENCEisFUTILE", crypto_key="resistanceISfutile", curses=False): discovery = LocalDiscovery() headers = HiveMindConnection.get_headers(name, access_key) clazz = JarbasCursesTerminal if curses else JarbasCliTerminal while True: print("Scanning...") for node_url in discovery.scan(): LOG.info("Fetching Node data: {url}".format(url=node_url)) node = discovery.nodes[node_url] node.connect(crypto_key=crypto_key, node_type=clazz, headers=headers ) sleep(5)
def run(self): while self.keep_running(): self.wait_for_hotword() if not self.keep_running(): break stub = SttServiceStub(channel) results = stub.StreamingRecognize(self.vad_generator()) try: for r in results: try: if r.chunks[0].final: self.emit_utterance(r.chunks[0].alternatives[0].text) break except LookupError: LOG.debug('No available chunks') except grpc._channel._Rendezvous as err: LOG.error('Error code %s, message: %s' % (err._state.code, err._state.details)) self.stop()
def detect_lang_naive(text, return_multiple=False, return_dict=False, hint_language=None, filter_unreliable=False): """ :param text: :param return_multiple bool if True return a list of all languages detected, else the top language :param return_dict: bool if True returns all data, E.g., pt -> {'lang': 'Portuguese', 'lang_code': 'pt', 'conf': 0.96} :param hint_language: str E.g., 'ITALIAN' or 'it' boosts Italian :return: """ if cld2 is None: LOG.debug("run pip install pycld2") raise ImportError("pycld2 not installed") isReliable, textBytesFound, details = cld2.detect( text, hintLanguage=hint_language) languages = [] # filter unreliable predictions if not isReliable and filter_unreliable: return None # select first language only if not return_multiple: details = [details[0]] for name, code, score, _ in details: if code == "un": continue if return_dict: languages.append({ "lang": name.lower().capitalize(), "lang_code": code, "conf": score / 100 }) else: languages.append(code) # return top language only if not return_multiple: if not len(languages): return None return languages[0] return languages
def ti_start_sequence(self): ''' Start Sequence for the TAS5806 ''' LOG.info("Start the TI Amp") self.write_ti_data(0x01, 0x11) # reset chip self.write_ti_data(0x78, 0x80) # clear fault - works self.write_ti_data(0x01, 0x00) # remove reset self.write_ti_data(0x78, 0x00) # remove clear fault self.write_ti_data(0x33, 0x03) self.set_volume(0.5) self.write_ti_data(0x30, 0x01) self.write_ti_data(0x03, 0x00) # Deep Sleep self.write_ti_data(0x03, 0x02) # HiZ # Indicate the first coefficient of a BQ is starting to write self.write_ti_data(0x5C, 0x01) self.write_ti_data(0x03, 0x03) # Play
def _register_public_api(self): """ Find and register api methods. Api methods has been tagged with the api_method member, for each method where this is found the method a message bus handler is registered. Finally create a handler for fetching the api info from any requesting skill. """ def wrap_method(func): """Boiler plate for returning the response to the sender.""" def wrapper(message): result = func(*message.data['args'], **message.data['kwargs']) self.bus.emit(message.response(data={'result': result})) return wrapper methods = [ attr_name for attr_name in get_non_properties(self) if hasattr(getattr(self, attr_name), '__name__') ] for attr_name in methods: method = getattr(self, attr_name) if hasattr(method, 'api_method'): doc = method.__doc__ or '' name = method.__name__ self.public_api[name] = { 'help': doc, 'type': '{}.{}'.format(self.skill_id, name), 'func': method } for key in self.public_api: if ('type' in self.public_api[key] and 'func' in self.public_api[key]): LOG.debug('Adding api method: ' '{}'.format(self.public_api[key]['type'])) # remove the function member since it shouldn't be # reused and can't be sent over the messagebus func = self.public_api[key].pop('func') self.add_event(self.public_api[key]['type'], wrap_method(func)) if self.public_api: self.add_event('{}.public_api'.format(self.skill_id), self._send_public_api)
def play_error(): # If enabled, play a wave file with a short sound to audibly # indicate speech recognition failed sound = CONFIGURATION["listener"].get('error_sound') audio_file = resolve_resource_file(sound) if audio_file: try: if audio_file.endswith(".wav"): play_wav(audio_file).wait() elif audio_file.endswith(".mp3"): play_mp3(audio_file).wait() elif audio_file.endswith(".ogg"): play_ogg(audio_file).wait() else: play_audio(audio_file).wait() except Exception as e: LOG.warning(e)
def handle_fallback(self, message): utt = message.data.get('utterance') LOG.debug(self.engine.name + " fallback attempt: " + utt) if not self.finished_training_event.is_set(): LOG.debug('Waiting for training to finish...') self.finished_training_event.wait() data = self.engine.calc_intent(utt) if data["conf"] < 0.5: return False self.make_active() self.emitter.emit(message.reply(data["name"], data=data)) return True
def reverb(self, reverberance=50, high_freq_damping=50, room_scale=100, stereo_depth=100, pre_delay=0, wet_gain=0, wet_only=False): """ Add reverberation to the audio using the ‘freeverb’ algorithm. A reverberation effect is sometimes desirable for concert halls that are too small or contain so many people that the hall’s natural reverberance is diminished. Applying a small amount of stereo reverb to a (dry) mono signal will usually make it sound more natural. Parameters: reverberance : float, default=50 Percentage of reverberance high_freq_damping : float, default=50 Percentage of high-frequency damping. room_scale : float, default=100 Scale of the room as a percentage. stereo_depth : float, default=100 Stereo depth as a percentage. pre_delay : float, default=0 Pre-delay in milliseconds. wet_gain : float, default=0 Amount of wet gain in dB wet_only : bool, default=False If True, only outputs the wet signal. """ LOG.debug("reverb") effect_args = ['reverb'] if wet_only: effect_args.append('-w') effect_args.extend([ '{:f}'.format(reverberance), '{:f}'.format(high_freq_damping), '{:f}'.format(room_scale), '{:f}'.format(stereo_depth), '{:f}'.format(pre_delay), '{:f}'.format(wet_gain) ]) self.effects += effect_args
def merge_duplicate_channels(cls): for idx, chs in cls._duplicates.items(): ch = cls.channels.get(idx) if not ch: continue for idx2 in chs: if idx2 == idx: continue ch2 = cls.channels.get(idx2) if not ch2: continue merged_ch = cls.create_merged_channel(ch, ch2) if merged_ch: LOG.debug(f"merging channel data {idx}:{idx2}") cls.delete_channel(idx) cls.delete_channel(idx2) cls.add_channel(merged_ch)
def onConnect(self, request): LOG.info("Client connecting: {0}".format(request.peer)) ip = request.peer.split(":")[1] context = {"source": self.peer} self.platform = request.headers.get("platform", "unknown") self.crypto_key = None # send message to internal mycroft bus data = {"ip": ip, "headers": request.headers} self.blacklist = {"messages": [], "skills": [], "intents": []} self.factory.mycroft_send("hive.client.connect", data, context) # return a pair with WS protocol spoken (or None for any) and # custom headers to send in initial WS opening handshake HTTP response headers = {"server": self.platform} return (None, headers)
def get_main_branch_from_github_url(url: str) -> str: """ Determine the main branch for the specified URL. @param url: Repository URL @return: default branch name """ html = None try: url = normalize_github_url(url) html = requests.get(url).text if "<title>Rate limit · GitHub</title>" in html: raise GithubHTTPRateLimited encoded = html.split("default-branch=\"")[1].split('"')[0] return base64.b64decode(encoded).decode("utf-8") except Exception as e: LOG.error(f"html={html}") LOG.error(e) raise GithubInvalidUrl
def videos(self): try: # load video catalog videos = [ch.as_json() for ch in self.media_collection.entries] # set skill_id for idx, v in enumerate(videos): videos[idx]["skill"] = self.skill_id # set url if len(videos[idx].get("streams", [])): videos[idx]["url"] = videos[idx]["streams"][0] else: videos[idx]["url"] = videos[idx].get("stream") or \ videos[idx].get("url") # return sorted return self.sort_videos(videos) except Exception as e: LOG.exception(e) return []