Пример #1
0
 def validate_connection(self):
     try:
         gTTS(text='Hi')
     except:
         LOG.warning(
             'GoogleTTS server could not be verified. Please check your '
             'internet connection.')
Пример #2
0
def ensure_directory_exists(directory, domain=None):
    """ Create a directory and give access rights to all

    Args:
        domain (str): The IPC domain.  Basically a subdirectory to prevent
            overlapping signal filenames.

    Returns:
        str: a path to the directory
    """
    if domain:
        directory = os.path.join(directory, domain)

    # Expand and normalize the path
    directory = os.path.normpath(directory)
    directory = os.path.expanduser(directory)

    if not os.path.isdir(directory):
        try:
            save = os.umask(0)
            os.makedirs(directory, 0o777)  # give everyone rights to r/w here
        except OSError:
            LOG.warning("Failed to create: " + directory)
            pass
        finally:
            os.umask(save)

    return directory
Пример #3
0
    def read(self, size, of_exc=False):
        """Read data from stream.

        Arguments:
            size (int): Number of bytes to read
            of_exc (bool): flag determining if the audio producer thread
                           should throw IOError at overflows.

        Returns:
            (bytes) Data read from device
        """
        frames = deque()
        remaining = size
        with self.read_lock:
            while remaining > 0:
                # If muted during read return empty buffer. This ensures no
                # reads occur while the stream is stopped
                if self.muted:
                    return self.muted_buffer

                to_read = min(self.wrapped_stream.get_read_available(),
                              remaining)
                if to_read <= 0:
                    sleep(.01)
                    continue
                result = self.wrapped_stream.read(to_read,
                                                  exception_on_overflow=of_exc)
                frames.append(result)
                remaining -= to_read

        input_latency = self.wrapped_stream.get_input_latency()
        if input_latency > 0.2:
            LOG.warning("High input latency: %f" % input_latency)
        audio = b"".join(list(frames))
        return audio
Пример #4
0
    def process_message(self, client, payload, isBinary):
        """
       Process message from client, decide what to do internally here
       """
        LOG.info("processing message from client: " + str(client.peer))
        client_data = self.clients[client.peer]
        client_protocol, ip, sock_num = client.peer.split(":")

        if isBinary:
            # TODO receive files
            pass
        else:
            # add context for this message
            payload = payload.decode("utf-8")
            message = Message.deserialize(payload)
            message.context["source"] = client.peer
            message.context["destination"] = "skills"
            if "platform" not in message.context:
                message.context["platform"] = client_data.get(
                    "platform", "unknown")

            # messages/skills/intents per user
            if message.type in client.blacklist.get("messages", []):
                LOG.warning(client.peer + " sent a blacklisted message " \
                                          "type: " + message.type)
                return
            # TODO check intent / skill that will trigger

            # send client message to internal mycroft bus
            self.mycroft_send(message.type, message.data, message.context)
Пример #5
0
 def process(self, audio):
     if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
         LOG.warning("Audio too short to be processed")
     else:
         transcription = self.transcribe(audio)
         if transcription:
             # STT succeeded, send the transcribed speech on for processing
             payload = {
                 'utterances': [transcription],
                 'lang': self.stt.lang
             }
             self.emitter.emit("recognizer_loop:utterance", payload)
Пример #6
0
 def store(self, path=None):
     """
         store the configuration locally.
     """
     path = path or self.path
     if not path:
         LOG.warning("config path not set, updating user config!!")
         update_mycroft_config(self)
         return
     path = expanduser(path)
     if not isdir(dirname(path)):
         makedirs(dirname(path))
     with open(path, 'w', encoding="utf-8") as f:
         json.dump(self, f, indent=4, ensure_ascii=False)
Пример #7
0
 def __init__(self):
     path = None
     # TODO check system config platform and go directly to correct path if it exists
     paths = [
         "/opt/venvs/mycroft-core/lib/python3.7/site-packages/",  # mark1/2
         "/opt/venvs/mycroft-core/lib/python3.4/site-packages/ ",  # old mark1 installs
         "/home/pi/mycroft-core"  # picroft
     ]
     for p in paths:
         p = join(p, "mycroft", "configuration", "mycroft.conf")
         if isfile(p):
             path = p
     super().__init__(path)
     if not self.path or not isfile(self.path):
         LOG.warning("mycroft root path not found")
Пример #8
0
 def __init__(self, key_phrase="hey mycroft", config=None, lang="en-us"):
     super().__init__(key_phrase, config, lang)
     # Hotword module imports
     from snowboydecoder import HotwordDetector
     # Hotword module config
     module = self.config.get("module")
     if module != "snowboy":
         LOG.warning(module + " module does not match with Hotword class "
                              "snowboy")
     # Hotword params
     models = self.config.get("models", {})
     paths = []
     for key in models:
         paths.append(models[key])
     sensitivity = self.config.get("sensitivity", 0.5)
     self.snowboy = HotwordDetector(paths,
                                    sensitivity=[sensitivity] * len(paths))
     self.lang = str(lang).lower()
     self.key_phrase = str(key_phrase).lower()
Пример #9
0
 def initialize():
     nonlocal instance, complete
     try:
         clazz = HotWordFactory.CLASSES[module]
         instance = clazz(hotword, config, lang=lang)
     except TriggerReload:
         complete.set()
         sleep(0.5)
         loop.reload()
     except NoModelAvailable:
         LOG.warning('Could not found find model for {} on {}.'.format(
             hotword, module
         ))
         instance = None
     except Exception:
         LOG.exception(
             'Could not create hotword. Falling back to default.')
         instance = None
     complete.set()
Пример #10
0
 def get_mixer(self, control="Master"):
     if self._mixer is None:
         try:
             mixer = Mixer(control)
         except Exception as e:
             try:
                 mixer = Mixer(control)
             except Exception as e:
                 try:
                     if control != "Master":
                         LOG.warning("could not allocate requested mixer, "
                                     "falling back to 'Master'")
                         mixer = Mixer("Master")
                     else:
                         raise
                 except Exception as e:
                     LOG.error("Couldn't allocate mixer")
                     LOG.exception(e)
                     raise
         self._mixer = mixer
     return self.mixer
Пример #11
0
 def run(self):
     restart_attempts = 0
     with self.mic as source:
         self.recognizer.adjust_for_ambient_noise(source)
         while self.state.running:
             try:
                 audio = self.recognizer.listen(source, self.emitter,
                                                self.stream_handler)
                 if audio is not None:
                     self.queue.put((AUDIO_DATA, audio))
                 else:
                     LOG.warning("Audio contains no data.")
             except IOError as e:
                 # IOError will be thrown if the read is unsuccessful.
                 # If self.recognizer.overflow_exc is False (default)
                 # input buffer overflow IOErrors due to not consuming the
                 # buffers quickly enough will be silently ignored.
                 LOG.exception('IOError Exception in AudioProducer')
                 if e.errno == pyaudio.paInputOverflowed:
                     pass  # Ignore overflow errors
                 elif restart_attempts < MAX_MIC_RESTARTS:
                     # restart the mic
                     restart_attempts += 1
                     LOG.info('Restarting the microphone...')
                     source.restart()
                     LOG.info('Restarted...')
                 else:
                     LOG.error('Restarting mic doesn\'t seem to work. '
                               'Stopping...')
                     raise
             except Exception:
                 LOG.exception('Exception in AudioProducer')
                 raise
             else:
                 # Reset restart attempt counter on sucessful audio read
                 restart_attempts = 0
             finally:
                 if self.stream_handler is not None:
                     self.stream_handler.stream_stop()
Пример #12
0
 def register_client(self, client, platform=None):
     """
    Add client to list of managed connections.
    """
     platform = platform or "unknown"
     LOG.info("registering client: " + str(client.peer))
     t, ip, sock = client.peer.split(":")
     # see if ip address is blacklisted
     if ip in self.ip_list and self.blacklist:
         LOG.warning("Blacklisted ip tried to connect: " + ip)
         self.unregister_client(client, reason=u"Blacklisted ip")
         return
     # see if ip address is whitelisted
     elif ip not in self.ip_list and not self.blacklist:
         LOG.warning("Unknown ip tried to connect: " + ip)
         #  if not whitelisted kick
         self.unregister_client(client, reason=u"Unknown ip")
         return
     self.clients[client.peer] = {
         "object": client,
         "status": "connected",
         "platform": platform
     }
Пример #13
0
    def _normalized_numbers(self, sentence):
        """normalized numbers to word equivalent.

        Args:
            sentence (str): setence to speak

        Returns:
            stf: normalized sentences to speak
        """
        try:
            from lingua_franca.format import pronounce_number
            numbers = re.findall(r'-?\d+', sentence)
            normalized_num = [
                (num, pronounce_number(int(num)))
                for num in numbers
            ]
            for num, norm_num in normalized_num:
                sentence = sentence.replace(num, norm_num, 1)
        except TypeError:
            LOG.exception("type error in mimic2_tts.py _normalized_numbers()")
        except ImportError:
            LOG.warning("lingua_franca not installed, can not normalize numbers")
        return sentence
Пример #14
0
def start_mind(config=None, bus=None):
    # server
    config = config or {}
    host = config.get("host", "0.0.0.0")
    port = config.get("port", DEFAULT_PORT)
    # TODO non-ssl support
    use_ssl = config.get("ssl", USE_SSL)
    max_connections = config.get("max_connections", -1)
    address = u"wss://" + str(host) + u":" + str(port)
    cert = config.get("cert_file", DEFAULT_SSL_CRT)
    key = config.get("key_file", DEFAULT_SSL_KEY)

    factory = JarbasMind(bus=bus)
    factory.protocol = JarbasMindProtocol
    if max_connections >= 0:
        factory.setProtocolOptions(maxConnections=max_connections)

    if not exists(key) or not exists(cert):
        LOG.warning("ssl keys dont exist, creating self signed")
        name = key.split("/")[-1].replace(".key", "")
        create_self_signed_cert(CERTS_PATH, name)
        cert = CERTS_PATH + "/" + name + ".crt"
        key = CERTS_PATH + "/" + name + ".key"
        LOG.info("key created at: " + key)
        LOG.info("crt created at: " + cert)
        # update config with new keys
        config["cert_file"] = cert
        config["key_file"] = key
        # factory.config_update({"mind": config}, True)

    # SSL server context: load server key and certificate
    contextFactory = ssl.DefaultOpenSSLContextFactory(key, cert)

    reactor.listenSSL(port, factory, contextFactory)
    print("Starting mind: ", address)
    reactor.run()
Пример #15
0
    def _wait_until_wake_word(self, source, sec_per_buffer, bus):
        """Listen continuously on source until a wake word is spoken

        Args:
            source (AudioSource):  Source producing the audio chunks
            sec_per_buffer (float):  Fractional number of seconds in each chunk
        """
        num_silent_bytes = int(self.SILENCE_SEC * source.SAMPLE_RATE *
                               source.SAMPLE_WIDTH)

        silence = get_silence(num_silent_bytes)

        # bytearray to store audio in
        byte_data = silence

        buffers_per_check = self.SEC_BETWEEN_WW_CHECKS / sec_per_buffer
        buffers_since_check = 0.0

        # Max bytes for byte_data before audio is removed from the front
        max_size = self.sec_to_bytes(self.SAVED_WW_SEC, source)
        test_size = self.sec_to_bytes(self.TEST_WW_SEC, source)

        said_wake_word = False

        # Rolling buffer to track the audio energy (loudness) heard on
        # the source recently.  An average audio energy is maintained
        # based on these levels.
        energies = []
        idx_energy = 0
        avg_energy = 0.0
        energy_avg_samples = int(5 / sec_per_buffer)  # avg over last 5 secs
        counter = 0

        # These are frames immediately after wake word is detected
        # that we want to keep to send to STT
        ww_frames = deque(maxlen=7)

        while not said_wake_word and not self._stop_signaled:
            if self._skip_wake_word():
                break
            chunk = self.record_sound_chunk(source)
            ww_frames.append(chunk)

            energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
            if energy < self.energy_threshold * self.multiplier:
                self._adjust_threshold(energy, sec_per_buffer)

            if len(energies) < energy_avg_samples:
                # build the average
                energies.append(energy)
                avg_energy += float(energy) / energy_avg_samples
            else:
                # maintain the running average and rolling buffer
                avg_energy -= float(energies[idx_energy]) / energy_avg_samples
                avg_energy += float(energy) / energy_avg_samples
                energies[idx_energy] = energy
                idx_energy = (idx_energy + 1) % energy_avg_samples

                # maintain the threshold using average
                if energy < avg_energy * 1.5:
                    if energy > self.energy_threshold:
                        # bump the threshold to just above this value
                        self.energy_threshold = energy * 1.2

            counter += 1

            # At first, the buffer is empty and must fill up.  After that
            # just drop the first chunk bytes to keep it the same size.
            needs_to_grow = len(byte_data) < max_size
            if needs_to_grow:
                byte_data += chunk
            else:  # Remove beginning of audio and add new chunk to end
                byte_data = byte_data[len(chunk):] + chunk

            buffers_since_check += 1.0
            self.feed_hotwords(chunk)
            if buffers_since_check > buffers_per_check:
                buffers_since_check -= buffers_per_check
                chopped = byte_data[-test_size:] \
                    if test_size < len(byte_data) else byte_data
                audio_data = chopped + silence
                said_hot_word = False
                for hotword in self.check_for_hotwords(audio_data, bus):
                    said_hot_word = True
                    engine = self.hotword_engines[hotword]["engine"]
                    sound = self.hotword_engines[hotword]["sound"]
                    utterance = self.hotword_engines[hotword]["utterance"]
                    listen = self.hotword_engines[hotword]["listen"]

                    LOG.debug("Hot Word: " + hotword)
                    # If enabled, play a wave file with a short sound to audibly
                    # indicate hotword was detected.
                    if sound:
                        try:
                            audio_file = resolve_resource_file(sound)
                            source.mute()
                            if audio_file.endswith(".wav"):
                                play_wav(audio_file).wait()
                            elif audio_file.endswith(".mp3"):
                                play_mp3(audio_file).wait()
                            elif audio_file.endswith(".ogg"):
                                play_ogg(audio_file).wait()
                            else:
                                play_audio(audio_file).wait()
                            source.unmute()
                        except Exception as e:
                            LOG.warning(e)

                    # Hot Word succeeded
                    payload = {
                        'hotword': hotword,
                        'start_listening': listen,
                        'sound': sound,
                        "engine": engine.__class__.__name__
                    }
                    bus.emit("recognizer_loop:hotword", payload)

                    if utterance:
                        # send the transcribed word on for processing
                        payload = {'utterances': [utterance]}
                        bus.emit("recognizer_loop:utterance", payload)

                    audio = None
                    mtd = self._compile_metadata(hotword)
                    if self.save_wake_words:
                        # Save wake word locally
                        audio = self._create_audio_data(byte_data, source)

                        if not isdir(self.saved_wake_words_dir):
                            os.mkdir(self.saved_wake_words_dir)

                        fn = join(
                            self.saved_wake_words_dir,
                            '_'.join(str(mtd[k])
                                     for k in sorted(mtd)) + '.wav')
                        with open(fn, 'wb') as f:
                            f.write(audio.get_wav_data())

                        fn = join(
                            self.saved_wake_words_dir,
                            '_'.join(str(mtd[k])
                                     for k in sorted(mtd)) + '.json')
                        with open(fn, 'w') as f:
                            json.dump(mtd, f, indent=4)

                    if listen:
                        said_wake_word = True

                if said_hot_word:
                    # reset bytearray to store wake word audio in, else many
                    # serial detections
                    byte_data = silence
Пример #16
0
 def execute(self, audio, language=None):
     LOG.warning("WITSTT language should be configured at wit.ai settings.")
     return self.recognizer.recognize_wit(audio, self.token)
 def validate_connection(self):
     r = requests.get("https://responsivevoice.org")
     if r.status_code == 200:
         return True
     LOG.warning("Could not reach https://responsivevoice.org")