Пример #1
0
    def _wait_for_listen_signal(self, source):
        """Listen continuously on source until a listen signal is detected
        Args:
            source (AudioSource):  Source producing the audio chunks
            sec_per_buffer (float):  Fractional number of seconds in each chunk
        """

        while not self._stop_signaled and not self._is_listen_signaled():
            if check_for_signal('adjustAmbientNoise') or \
                    self._should_adjust_noise:
                self._adjust_ambient_noise(source)
            sleep(self.sec_between_signal_checks)

        # If enabled, play a wave file with a short sound to audibly
        # indicate listen signal was detected.
        sound = self.config["listener"].get('listen_sound')
        audio_file = resolve_resource_file(sound)
        try:
            if audio_file:
                if audio_file.endswith(".wav"):
                    play_wav(audio_file).wait()
                elif audio_file.endswith(".mp3"):
                    play_mp3(audio_file).wait()
                elif audio_file.endswith(".ogg"):
                    play_ogg(audio_file).wait()
                else:
                    play_audio(audio_file).wait()
        except Exception as e:
            LOG.warning(e)
Пример #2
0
    def add_channel(cls, channel):
        url = channel.get("stream")
        channel_id = cls.channel2id(channel)

        if url in [ch.get("stream") for idx, ch in cls.dead_channels.items()]:
            LOG.error("Channel has been previously flagged DEAD, refused to "
                      "add channel")
            LOG.debug(str(channel))
            return

        for idx, ch in cls.channels.items():
            ch_url = ch["stream"]
            if url != ch_url:
                continue
            LOG.debug(f"Stream previously added: {url}")
            if is_compatible_dict(ch, channel):
                LOG.debug(f"merging channel data {channel_id}:{idx}")
                cls.channels[idx] = cls.create_merged_channel(ch, channel)
                return

            else:
                if channel_id in cls.channels:
                    LOG.error(f"channel data doesn't "
                              f"match, {channel_id} already in database")
                LOG.warning("refused to merge, replacing channel")

        LOG.info(f"Adding channel: {channel_id}")
        channel["expires"] = 0
        channel["status"] = StreamStatus.UNKNOWN
        channel["_dead_counter"] = 0
        cls.channels[channel_id] = channel
Пример #3
0
def get_manifest_from_github_url(url, branch=None):
    branch = branch or get_branch_from_github_url(url)
    url = manifest_url_from_github_url(url, branch)
    manifest = requests.get(url).text
    data = yaml.safe_load(manifest)
    if not data:
        # most likely just the template full of comments
        raise InvalidManifest
    if 'dependencies' in data:
        return data
    # some skills in the wild have the manifest without the top-level key
    LOG.warning(
        "{url} contains an invalid manifest, attempting recovery".format(
            url=url))
    recovered = {"dependencies": {}}
    if "python" in data:
        recovered["dependencies"]["python"] = data["python"]
    if "skill" in data:
        recovered["dependencies"]["skill"] = data["skill"]
    if "system" in data:
        recovered["dependencies"]["system"] = data["system"]
    if not len(recovered["dependencies"]):
        # suspicious, doesn't follow standard
        raise InvalidManifest
    return recovered
Пример #4
0
def get_manifest_from_github_url(url: str,
                                 branch: Optional[str] = None) -> dict:
    """
    Get requirements specified in the repository manifest file
    @param url: Repository URL to query
    @param branch: Optional branch spec, otherwise default branch will be used
    @return: dict parsed requirements
    """
    branch = branch or get_branch_from_github_url(url)
    url = manifest_url_from_github_url(url, branch)
    manifest = requests.get(url).text
    if "<title>Rate limit &middot; GitHub</title>" in manifest:
        raise GithubHTTPRateLimited
    data = yaml.safe_load(manifest)
    if not data:
        # most likely just the template full of comments
        raise InvalidManifest
    if 'dependencies' in data:
        return data
    # some skills in the wild have the manifest without the top-level key
    LOG.warning(
        "{url} contains an invalid manifest, attempting recovery".format(
            url=url))
    recovered = {"dependencies": {}}
    if "python" in data:
        recovered["dependencies"]["python"] = data["python"]
    if "skill" in data:
        recovered["dependencies"]["skill"] = data["skill"]
    if "system" in data:
        recovered["dependencies"]["system"] = data["system"]
    if not len(recovered["dependencies"]):
        # suspicious, doesn't follow standard
        raise InvalidManifest
    return recovered
Пример #5
0
def ensure_directory_exists(directory, domain=None):
    """ Create a directory and give access rights to all

    Args:
        domain (str): The IPC domain.  Basically a subdirectory to prevent
            overlapping signal filenames.

    Returns:
        str: a path to the directory
    """
    if domain:
        directory = os.path.join(directory, domain)

    # Expand and normalize the path
    directory = os.path.normpath(directory)
    directory = os.path.expanduser(directory)

    if not os.path.isdir(directory):
        try:
            save = os.umask(0)
            os.makedirs(directory, 0o777)  # give everyone rights to r/w here
        except OSError:
            LOG.warning("Failed to create: " + directory)
            pass
        finally:
            os.umask(save)

    return directory
Пример #6
0
    def read(self, size, of_exc=False):
        """Read data from stream.

        Arguments:
            size (int): Number of bytes to read
            of_exc (bool): flag determining if the audio producer thread
                           should throw IOError at overflows.

        Returns:
            (bytes) Data read from device
        """
        frames = deque()
        remaining = size
        with self.read_lock:
            while remaining > 0:
                # If muted during read return empty buffer. This ensures no
                # reads occur while the stream is stopped
                if self.muted:
                    return self.muted_buffer

                to_read = min(self.wrapped_stream.get_read_available(),
                              remaining)
                if to_read <= 0:
                    sleep(.01)
                    continue
                result = self.wrapped_stream.read(to_read,
                                                  exception_on_overflow=of_exc)
                frames.append(result)
                remaining -= to_read

        input_latency = self.wrapped_stream.get_input_latency()
        if input_latency > 0.2:
            LOG.warning("High input latency: %f" % input_latency)
        audio = b"".join(list(frames))
        return audio
Пример #7
0
    def stretch(self, factor, window=20):
        """
        Change the audio duration (but not its pitch). Unless factor is close to 1, use the tempo effect instead.

        This effect is broadly equivalent to the tempo effect with search set to zero, so in general, its results are comparatively poor; it is retained as it can sometimes out-perform tempo for small factors.

        Parameters:
        factor : float
        The ratio of the new tempo to the old tempo. For ex. 1.1 speeds up the tempo by 10%; 0.9 slows it down by 10%. Note - this argument is the inverse of what is passed to the sox stretch effect for consistency with tempo.

        window : float, default=20
        Window size in miliseconds
        """
        LOG.debug("stretch")
        if factor <= 0:
            raise ValueError("factor must be a positive number")

        if factor < 0.5 or factor > 2:
            LOG.warning("Using an extreme time stretching factor. "
                        "Quality of results will be poor")

        if abs(factor - 1.0) > 0.1:
            LOG.warning("For this stretch factor, "
                        "the tempo effect has better performance.")

        if window <= 0:
            raise ValueError("window must be a positive number.")

        effect_args = ['stretch', '{:f}'.format(factor), '{:f}'.format(window)]

        self.effects.extend(effect_args)
Пример #8
0
def validate_manifest(content):
    if isinstance(content, str):
        data = yaml.safe_load(content)
    else:
        assert isinstance(content, dict)
        data = content
    if not data:
        # most likely just the template full of comments
        raise InvalidManifest
    if 'dependencies' in data:
        return data

    # some skills in the wild have the manifest without the top-level key
    LOG.warning("invalid manifest, attempting recovery")
    recovered = {"dependencies": {}}
    if "python" in data:
        recovered["dependencies"]["python"] = data["python"]
    if "skill" in data:
        recovered["dependencies"]["skill"] = data["skill"]
    if "system" in data:
        recovered["dependencies"]["system"] = data["system"]
    if not len(recovered["dependencies"]):
        # suspicious, doesn't follow standard
        raise InvalidManifest
    return recovered
Пример #9
0
 def validate_connection(self):
     try:
         gTTS(text='Hi')
     except:
         LOG.warning(
             'GoogleTTS server could not be verified. Please check your '
             'internet connection.')
Пример #10
0
    def register_client(self, client, platform=None):
        """
       Add client to list of managed connections.
       """
        platform = platform or "unknown"
        LOG.info("registering client: " + str(client.peer))
        t, ip, sock = client.peer.split(":")
        # see if ip address is blacklisted
        if ip in self.ip_list and self.blacklist:
            LOG.warning("Blacklisted ip tried to connect: " + ip)
            self.unregister_client(client, reason="Blacklisted ip")
            return
        # see if ip address is whitelisted
        elif ip not in self.ip_list and not self.blacklist:
            LOG.warning("Unknown ip tried to connect: " + ip)
            #  if not whitelisted kick
            self.unregister_client(client, reason="Unknown ip")
            return

        audio_queue = Queue()
        audio_listener = WebsocketAudioListener(self, client, audio_queue)
        self.clients[client.peer] = {
            "instance": client,
            "status": "connected",
            "platform": platform,
            "audio_queue": audio_queue,
            "audio_listener": audio_listener,
            "use_hotword": True,
            "tts_engine": "festival",
            "tts_voice": "ona"
        }
        audio_listener.start()
Пример #11
0
    def transcribe(self, audio):
        def send_unknown_intent():
            """ Send message that nothing was transcribed. """
            self.emitter.emit('recognizer_loop:speech.recognition.unknown')

        try:
            # Invoke the STT engine on the audio clip
            text = self.stt.execute(audio)
            if text is not None:
                text = text.lower().strip()
                LOG.debug("STT: " + text)
            else:
                send_unknown_intent()
                LOG.info('no words were transcribed')
            if self.save_utterances:
                mtd = self._compile_metadata(text)

                filename = os.path.join(self.saved_utterances_dir, mtd["name"])
                with open(filename, 'wb') as f:
                    f.write(audio.get_wav_data())

                filename = os.path.join(self.saved_utterances_dir,
                                        mtd["name"].replace(".wav", ".json"))
                with open(filename, 'w') as f:
                    json.dump(mtd, f, indent=4)

            return text
        except sr.RequestError as e:
            LOG.error("Could not request Speech Recognition {0}".format(e))
        except ConnectionError as e:
            LOG.error("Connection Error: {0}".format(e))

            self.emitter.emit("recognizer_loop:no_internet")
        except RequestException as e:
            LOG.error(e.__class__.__name__ + ': ' + str(e))
        except Exception as e:
            send_unknown_intent()
            LOG.error(e)
            LOG.error("Speech Recognition could not understand audio")
            # If enabled, play a wave file with a short sound to audibly
            # indicate speech recognition failed
            sound = CONFIGURATION["listener"].get('error_sound')
            audio_file = resolve_resource_file(sound)
            try:
                if audio_file:
                    if audio_file.endswith(".wav"):
                        play_wav(audio_file).wait()
                    elif audio_file.endswith(".mp3"):
                        play_mp3(audio_file).wait()
                    elif audio_file.endswith(".ogg"):
                        play_ogg(audio_file).wait()
                    else:
                        play_audio(audio_file).wait()
            except Exception as e:
                LOG.warning(e)
            return None

        dialog_name = 'not connected to the internet'
        self.emitter.emit('speak', {'utterance': dialog_name})
Пример #12
0
 def process(self, audio):
     if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
         LOG.warning("Audio too short to be processed")
     else:
         transcription = self.transcribe(audio)
         if transcription:
             # STT succeeded, send the transcribed speech on for processing
             payload = {
                 'utterances': [transcription],
                 'lang': self.stt.lang
             }
             self.emitter.emit("recognizer_loop:utterance", payload)
Пример #13
0
 def store(self, path=None):
     """
         store the configuration locally.
     """
     path = path or self.path
     if not path:
         LOG.warning("config path not set, updating user config!!")
         update_mycroft_config(self)
         return
     path = expanduser(path)
     if not isdir(dirname(path)):
         makedirs(dirname(path))
     with open(path, 'w', encoding="utf-8") as f:
         json.dump(self, f, indent=4, ensure_ascii=False)
Пример #14
0
def enclosure2rootdir(enclosure=None):
    enclosure = enclosure or detect_enclosure()
    if enclosure == MycroftEnclosures.OLD_MARK1:
        return MycroftRootLocations.OLD_MARK1
    elif enclosure == MycroftEnclosures.MARK1:
        return MycroftRootLocations.MARK1
    elif enclosure == MycroftEnclosures.MARK2:
        return MycroftRootLocations.MARK2
    elif enclosure == MycroftEnclosures.PICROFT:
        return MycroftRootLocations.PICROFT
    elif enclosure == MycroftEnclosures.OVOS:
        return MycroftRootLocations.OVOS
    elif enclosure == MycroftEnclosures.BIGSCREEN:
        return MycroftRootLocations.BIGSCREEN
    LOG.warning("Assuming mycroft-core location is ~/mycroft-core")
    return MycroftRootLocations.HOME
Пример #15
0
 def play_error():
     # If enabled, play a wave file with a short sound to audibly
     # indicate speech recognition failed
     sound = CONFIGURATION["listener"].get('error_sound')
     audio_file = resolve_resource_file(sound)
     if audio_file:
         try:
             if audio_file.endswith(".wav"):
                 play_wav(audio_file).wait()
             elif audio_file.endswith(".mp3"):
                 play_mp3(audio_file).wait()
             elif audio_file.endswith(".ogg"):
                 play_ogg(audio_file).wait()
             else:
                 play_audio(audio_file).wait()
         except Exception as e:
             LOG.warning(e)
Пример #16
0
    def tempo(self, factor, audio_type=None, quick=False):
        """Time stretch audio without changing pitch.

        This effect uses the WSOLA algorithm. The audio is chopped up into segments which are then shifted in the time domain and overlapped (cross-faded) at points where their waveforms are most similar as determined by measurement of least squares.

        Parameters:
        factor : float
        The ratio of new tempo to the old tempo. For ex. 1.1 speeds up the tempo by 10%; 0.9 slows it down by 10%.

        audio_type : str
        Type of audio, which optimizes algorithm parameters. One of:
        m : Music,
        s : Speech,
        l : Linear (useful when factor is close to 1),
        quick : bool, default=False
        If True, this effect will run faster but with lower sound quality.
        """
        LOG.debug("tempo")
        if factor <= 0:
            raise ValueError("factor must be a positive number")

        if factor < 0.5 or factor > 2:
            LOG.warning("Using an extreme time stretching factor. "
                        "Quality of results will be poor")

        if abs(factor - 1.0) <= 0.1:
            LOG.warning("For this stretch factor, "
                        "the stretch effect has better performance.")

        if audio_type not in [None, 'm', 's', 'l']:
            raise ValueError(
                "audio_type must be one of None, 'm', 's', or 'l'.")

        if not isinstance(quick, bool):
            raise ValueError("quick must be a boolean.")

        effect_args = ['tempo']

        if quick:
            effect_args.append('-q')

        if audio_type is not None:
            effect_args.append('-{}'.format(audio_type))

        effect_args.append('{:f}'.format(factor))
        self.effects += effect_args
Пример #17
0
    def _skip_wake_word(self, source):
        """Check if told programatically to skip the wake word

        For example when we are in a dialog with the user.
        """

        signaled = False
        if check_for_signal('startListening') or self._listen_triggered:
            signaled = True

        # Pressing the Mark 1 button can start recording (unless
        # it is being used to mean 'stop' instead)
        elif check_for_signal('buttonPress', 1):
            # give other processes time to consume this signal if
            # it was meant to be a 'stop'
            sleep(0.25)
            if check_for_signal('buttonPress'):
                # Signal is still here, assume it was intended to
                # begin recording
                LOG.debug("Button Pressed, wakeword not needed")
                signaled = True

        if signaled:
            LOG.info("Listen signal detected")
            # If enabled, play a wave file with a short sound to audibly
            # indicate listen signal was detected.
            sound = self.config["listener"].get('listen_sound')
            audio_file = resolve_resource_file(sound)
            if audio_file:
                try:

                    source.mute()
                    if audio_file.endswith(".wav"):
                        play_wav(audio_file).wait()
                    elif audio_file.endswith(".mp3"):
                        play_mp3(audio_file).wait()
                    elif audio_file.endswith(".ogg"):
                        play_ogg(audio_file).wait()
                    else:
                        play_audio(audio_file).wait()
                    source.unmute()
                except Exception as e:
                    LOG.warning(e)

        return signaled
Пример #18
0
 def process(self, audio, source=None):
     if source:
         LOG.debug("Muting microphone during STT")
         source.mute()
     if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
         LOG.warning("Audio too short to be processed")
     else:
         transcription = self.transcribe(audio)
         if transcription:
             # STT succeeded, send the transcribed speech on for processing
             payload = {
                 'utterances': [transcription],
                 'lang': self.stt.lang
             }
             self.emitter.emit("recognizer_loop:utterance", payload)
     if source:
         LOG.debug("Unmuting microphone")
         source.unmute()
Пример #19
0
    def speed(self, factor):
        """
        Adjust the audio speed (pitch and tempo together).

        Technically, the speed effect only changes the sample rate information, leaving the samples themselves untouched. The rate effect is invoked automatically to resample to the output sample rate, using its default quality/speed. For higher quality or higher speed resampling, in addition to the speed effect, specify the rate effect with the desired quality option.

        Parameters:
        factor : float
        The ratio of the new speed to the old speed. For ex. 1.1 speeds up the audio by 10%; 0.9 slows it down by 10%. Note - this argument is the inverse of what is passed to the sox stretch effect for consistency with speed.
        """
        LOG.debug("speed: " + str(factor))

        if factor < 0.5 or factor > 2:
            LOG.warning(
                "Using an extreme factor. Quality of results will be poor")

        effect_args = ['speed', '{:f}'.format(factor)]

        self.effects.extend(effect_args)
Пример #20
0
 def speak(self, utterance):
     LOG.info("SPEAK: " + utterance)
     temppath = join(gettempdir(), self.tts.tts_name)
     if not isdir(temppath):
         makedirs(temppath)
     audio_file = join(temppath,
                       str(hash(utterance))[1:] + "." + self.tts.audio_ext)
     self.tts.get_tts(utterance, audio_file)
     try:
         if audio_file.endswith(".wav"):
             play_wav(audio_file).wait()
         elif audio_file.endswith(".mp3"):
             play_mp3(audio_file).wait()
         elif audio_file.endswith(".ogg"):
             play_ogg(audio_file).wait()
         else:
             play_audio(audio_file).wait()
     except Exception as e:
         LOG.warning(e)
Пример #21
0
 def run(self):
     restart_attempts = 0
     with self.mic as source:
         LOG.info("Adjusting for ambient noise, be silent!!!")
         self.recognizer.adjust_for_ambient_noise(source)
         LOG.info("Ambient noise profile has been created")
         while self.state.running:
             try:
                 audio = self.recognizer.listen(source, self.emitter,
                                                self.stream_handler)
                 if audio is not None:
                     self.queue.put((AUDIO_DATA, audio, source))
                 else:
                     LOG.warning("Audio contains no data.")
             except IOError as e:
                 # IOError will be thrown if the read is unsuccessful.
                 # If self.recognizer.overflow_exc is False (default)
                 # input buffer overflow IOErrors due to not consuming the
                 # buffers quickly enough will be silently ignored.
                 LOG.exception('IOError Exception in AudioProducer')
                 if e.errno == pyaudio.paInputOverflowed:
                     pass  # Ignore overflow errors
                 elif restart_attempts < MAX_MIC_RESTARTS:
                     # restart the mic
                     restart_attempts += 1
                     LOG.info('Restarting the microphone...')
                     source.restart()
                     LOG.info('Restarted...')
                 else:
                     LOG.error('Restarting mic doesn\'t seem to work. '
                               'Stopping...')
                     raise
             except Exception:
                 LOG.exception('Exception in AudioProducer')
                 raise
             else:
                 # Reset restart attempt counter on sucessful audio read
                 restart_attempts = 0
             finally:
                 if self.stream_handler is not None:
                     self.stream_handler.stream_stop()
Пример #22
0
 def get_mixer(self, control="Master"):
     if self._mixer is None:
         try:
             mixer = alsaaudio.Mixer(control)
         except Exception as e:
             try:
                 mixer = alsaaudio.Mixer(control)
             except Exception as e:
                 try:
                     if control != "Master":
                         LOG.warning("could not allocate requested mixer, "
                                     "falling back to 'Master'")
                         mixer = alsaaudio.Mixer("Master")
                     else:
                         raise
                 except Exception as e:
                     LOG.error("Couldn't allocate mixer")
                     LOG.exception(e)
                     raise
         self._mixer = mixer
     return self.mixer
Пример #23
0
    def _normalized_numbers(self, sentence):
        """normalized numbers to word equivalent.

        Args:
            sentence (str): setence to speak

        Returns:
            stf: normalized sentences to speak
        """
        try:
            from lingua_franca.format import pronounce_number
            numbers = re.findall(r'-?\d+', sentence)
            normalized_num = [
                (num, pronounce_number(int(num)))
                for num in numbers
            ]
            for num, norm_num in normalized_num:
                sentence = sentence.replace(num, norm_num, 1)
        except TypeError:
            LOG.exception("type error in mimic2_tts.py _normalized_numbers()")
        except ImportError:
            LOG.warning("lingua_franca not installed, can not normalize numbers")
        return sentence
Пример #24
0
 def uuid(self) -> str:
     # a unique identifier
     # github_repo.github_author , case insensitive
     # should be guaranteed to be unique
     if self.url:
         try:
             author, folder = author_repo_from_github_url(self.url)
         except Exception as e:
             LOG.error(e)
             return ""
     else:
         LOG.warning(
             f"Skill installation from local source; uuid may have collisions"
         )
         author = self.skill_author if self.skill_author else None
         folder = self.skill_folder if self.skill_folder else None
     if folder and author:
         return f"{folder}.{author}".lower()
     else:
         LOG.warning(
             f"repo or author not defined, skill uuid cannot be determined!"
         )
         return ""
Пример #25
0
 def on_volume_unduck(self, message):
     # TODO duck it anyway using set vol
     LOG.warning("Mark2 volume unduck deprecated! use volume set instead.")
     self.m2enc.hardware_volume.set_volume(float(self.current_volume))
Пример #26
0
 def on_volume_duck(self, message):
     # TODO duck it anyway using set vol
     LOG.warning("Mark2 volume duck deprecated! use volume set instead.")
     self.m2enc.hardware_volume.set_volume(
         float(0.1))  # TODO make configurable 'duck_vol'
Пример #27
0
    def _wait_until_wake_word(self, source, sec_per_buffer, bus):
        """Listen continuously on source until a wake word is spoken

        Args:
            source (AudioSource):  Source producing the audio chunks
            sec_per_buffer (float):  Fractional number of seconds in each chunk
        """
        num_silent_bytes = int(self.SILENCE_SEC * source.SAMPLE_RATE *
                               source.SAMPLE_WIDTH)

        silence = get_silence(num_silent_bytes)

        # bytearray to store audio in
        byte_data = silence

        buffers_per_check = self.sec_between_ww_checks / sec_per_buffer
        buffers_since_check = 0.0

        # Max bytes for byte_data before audio is removed from the front
        max_size = self.sec_to_bytes(self.SAVED_WW_SEC, source)
        test_size = self.sec_to_bytes(self.TEST_WW_SEC, source)

        said_wake_word = False

        # Rolling buffer to track the audio energy (loudness) heard on
        # the source recently.  An average audio energy is maintained
        # based on these levels.
        energies = []
        idx_energy = 0
        avg_energy = 0.0
        energy_avg_samples = int(5 / sec_per_buffer)  # avg over last 5 secs
        counter = 0

        # These are frames immediately after wake word is detected
        # that we want to keep to send to STT
        ww_frames = deque(maxlen=7)

        while not said_wake_word and not self._stop_signaled:
            if self._skip_wake_word(source):
                break
            chunk = self.record_sound_chunk(source)
            ww_frames.append(chunk)

            energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
            if energy < self.energy_threshold * self.multiplier:
                self._adjust_threshold(energy, sec_per_buffer)

            if len(energies) < energy_avg_samples:
                # build the average
                energies.append(energy)
                avg_energy += float(energy) / energy_avg_samples
            else:
                # maintain the running average and rolling buffer
                avg_energy -= float(energies[idx_energy]) / energy_avg_samples
                avg_energy += float(energy) / energy_avg_samples
                energies[idx_energy] = energy
                idx_energy = (idx_energy + 1) % energy_avg_samples

                # maintain the threshold using average
                if energy < avg_energy * 1.5:
                    if energy > self.energy_threshold:
                        # bump the threshold to just above this value
                        self.energy_threshold = energy * 1.2

            counter += 1

            # At first, the buffer is empty and must fill up.  After that
            # just drop the first chunk bytes to keep it the same size.
            needs_to_grow = len(byte_data) < max_size
            if needs_to_grow:
                byte_data += chunk
            else:  # Remove beginning of audio and add new chunk to end
                byte_data = byte_data[len(chunk):] + chunk

            buffers_since_check += 1.0
            self.feed_hotwords(chunk)
            if buffers_since_check > buffers_per_check:
                buffers_since_check -= buffers_per_check
                chopped = byte_data[-test_size:] \
                    if test_size < len(byte_data) else byte_data
                audio_data = chopped + silence
                said_hot_word = False
                for hotword in self.check_for_hotwords(audio_data, bus):
                    said_hot_word = True
                    engine = self.hotword_engines[hotword]["engine"]
                    sound = self.hotword_engines[hotword]["sound"]
                    utterance = self.hotword_engines[hotword]["utterance"]
                    listen = self.hotword_engines[hotword]["listen"]

                    LOG.debug("Hot Word: " + hotword)
                    # If enabled, play a wave file with a short sound to audibly
                    # indicate hotword was detected.
                    if sound:
                        try:
                            audio_file = resolve_resource_file(sound)
                            source.mute()
                            if audio_file.endswith(".wav"):
                                play_wav(audio_file).wait()
                            elif audio_file.endswith(".mp3"):
                                play_mp3(audio_file).wait()
                            elif audio_file.endswith(".ogg"):
                                play_ogg(audio_file).wait()
                            else:
                                play_audio(audio_file).wait()
                            source.unmute()
                        except Exception as e:
                            LOG.warning(e)

                    # Hot Word succeeded
                    payload = {
                        'hotword': hotword,
                        'start_listening': listen,
                        'sound': sound,
                        "engine": engine.__class__.__name__
                    }
                    bus.emit("recognizer_loop:hotword", payload)

                    if utterance:
                        # send the transcribed word on for processing
                        payload = {'utterances': [utterance]}
                        bus.emit("recognizer_loop:utterance", payload)

                    audio = None
                    mtd = self._compile_metadata(hotword)
                    if self.save_wake_words:
                        # Save wake word locally
                        audio = self._create_audio_data(byte_data, source)

                        if not isdir(self.saved_wake_words_dir):
                            os.mkdir(self.saved_wake_words_dir)

                        fn = join(
                            self.saved_wake_words_dir,
                            '_'.join(str(mtd[k])
                                     for k in sorted(mtd)) + '.wav')
                        with open(fn, 'wb') as f:
                            f.write(audio.get_wav_data())

                        fn = join(
                            self.saved_wake_words_dir,
                            '_'.join(str(mtd[k])
                                     for k in sorted(mtd)) + '.json')
                        with open(fn, 'w') as f:
                            json.dump(mtd, f, indent=4)

                    if listen:
                        said_wake_word = True

                if said_hot_word:
                    # reset bytearray to store wake word audio in, else many
                    # serial detections
                    byte_data = silence
 def validate_connection(self):
     r = requests.get("https://responsivevoice.org")
     if r.status_code == 200:
         return True
     LOG.warning("Could not reach https://responsivevoice.org")
Пример #29
0
 def validate(self):
     if self.validator:
         self.validator.validate()
     else:
         LOG.warning("could not validate " + self.tts_name)
Пример #30
0
    def install(self,
                folder=None,
                default_branch="master",
                platform=None,
                update=True):
        if not update and self.is_previously_installed(folder):
            return False
        if self.branch_overrides:
            try:
                platform = platform or detect_enclosure()
            except Exception as e:
                LOG.error("Failed to detect platform")
                raise e
            if platform in self.branch_overrides:
                branch = self.branch_overrides[platform]
                if branch != self.branch:
                    LOG.info("Detected platform specific branch:" + branch)
                    skill = SkillEntry.from_github_url(self.url, branch)
                    return skill.install(folder, default_branch)

        LOG.info("Installing skill: {url} from branch: {branch}".format(
            url=self.url, branch=self.branch))

        # TODO: This is just patching a bug in requirements parsing DM
        if isinstance(self.requirements, list):
            LOG.warning(self.requirements)
            self._data["requirements"] = {"python": self.requirements}

        skills = self.requirements.get("skill", [])
        if skills:
            LOG.info('Installing required skills')
        for s in skills:
            skill = SkillEntry.from_github_url(s)
            skill.install(folder, default_branch)

        system = self.requirements.get("system")
        if system:
            LOG.info('Installing system requirements')
            install_system_deps(system)

        pyth = self.requirements.get("python")
        if pyth:
            LOG.info('Running pip install')
            pip_install(pyth)

        LOG.info("Downloading " + self.url)
        updated = self.download(folder)
        if self.json.get("desktopFile"):
            LOG.info("Creating desktop entry")
            # TODO support system wide? /usr/local/XXX ?
            desktop_dir = expanduser("~/.local/share/applications")
            icon_dir = expanduser("~/.local/share/icons")

            # copy the files to a unique path, this way duplicate file names
            # dont overwrite each other, eg, several skills with "icon.png"
            base_name = ".".join([self.skill_folder,
                                  self.skill_author]).lower()

            # copy icon file
            icon_file = join(icon_dir,
                             base_name + self.skill_icon.split(".")[-1])
            if self.skill_icon.startswith("http"):
                content = requests.get(self.skill_icon).content
                with open(icon_file, "wb") as f:
                    f.write(content)
            elif isfile(self.skill_icon):
                shutil.copyfile(self.skill_icon, icon_file)

            # copy .desktop file
            desktop_file = join(desktop_dir, base_name + ".desktop")
            with open(desktop_file, "w") as f:
                f.write(self.desktop_file)

        return updated