コード例 #1
0
 def __init__(self, config=CONFIGURATION, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.config = config
     self.loop = RecognizerLoop(self.config)
     self.tts = TTSFactory.create(self.config["tts"])
     LOG.debug("Using TTS engine: " + self.tts.__class__.__name__)
     self.tts.validate()
コード例 #2
0
    def select_best(self, results):
        # Look at any replies that arrived before the timeout
        # Find response(s) with the highest confidence
        best = None
        ties = []
        for handler in results:
            if not best or handler['match_confidence'] > best[
                    'match_confidence']:
                best = handler
                ties = [best]
            elif handler['match_confidence'] == best['match_confidence']:
                ties.append(handler)

        if ties:
            # select randomly
            selected = random.choice(ties)

            if self.gui_only:
                # select only from GUI results if preference is set
                # WARNING this can effectively make it so that the same
                # skill is always selected
                gui_results = [
                    r for r in ties if r["playback"] == CPSPlayback.GUI
                ]
                if len(gui_results):
                    selected = random.choice(gui_results)

            # TODO: Ask user to pick between ties or do it automagically
        else:
            selected = best
        LOG.debug(
            f"BetterCPS selected: {selected['skill_id']} - {selected['match_confidence']}"
        )
        return selected
コード例 #3
0
    def run(self):
        LOG.debug("pulse thread started")
        self.tmp_leds = []
        for x in range(0, 10):
            self.tmp_leds.append(self.color_tup)

        self.led_obj.brightness = self.brightness / 100
        self.led_obj.set_leds(self.tmp_leds)

        while not self.exit_flag:

            if (self.brightness + self.step_size) > 100:
                self.brightness = self.brightness - self.step_size
                self.step_size = self.step_size * -1

            elif (self.brightness + self.step_size) < 0:
                self.brightness = self.brightness - self.step_size
                self.step_size = self.step_size * -1

            else:
                self.brightness += self.step_size

            self.led_obj.brightness = self.brightness / 100
            self.led_obj.set_leds(self.tmp_leds)

            time.sleep(self.delay)

        LOG.debug("pulse thread stopped")
        self.led_obj.brightness = 1.0
        self.led_obj.fill(self.pal_obj.BLACK)
コード例 #4
0
    def bass(self, gain_db, frequency=100.0, slope=0.5):
        """
        Boost or cut the bass (lower) frequencies of the audio using a two-pole shelving filter with a response similar to that of a standard hi-fi’s tone-controls. This is also known as shelving equalisation.

        The filters are described in detail in http://musicdsp.org/files/Audio-EQ-Cookbook.txt

        Parameters:
        gain_db : float
        The gain at 0 Hz. For a large cut use -20, for a large boost use 20.

        frequency : float, default=100.0
        The filter’s cutoff frequency in Hz.

        slope : float, default=0.5
        The steepness of the filter’s shelf transition. For a gentle slope use 0.3, and use 1.0 for a steep slope.
        """
        LOG.debug("bass")
        if frequency <= 0:
            raise ValueError("frequency must be a positive number.")

        effect_args = [
            'bass', '{:f}'.format(gain_db), '{:f}'.format(frequency),
            '{:f}s'.format(slope)
        ]

        self.effects.extend(effect_args)
コード例 #5
0
    def bandreject(self, frequency, width_q=2.0):
        """
        Apply a two-pole Butterworth band-reject filter with the given central frequency, and (3dB-point) band-width. The filter rolls off at 6dB per octave (20dB per decade) and is described in detail in http://musicdsp.org/files/Audio-EQ-Cookbook.txt

        Parameters:
        frequency : float
        The filter’s center frequency in Hz.

        width_q : float, default=2.0
        The filter’s width as a Q-factor.

        constant_skirt : bool, default=False
        If True, selects constant skirt gain (peak gain = width_q). If False, selects constant 0dB peak gain.
        """
        LOG.debug("bandreject")
        if frequency <= 0:
            raise ValueError("frequency must be a positive number.")

        if width_q <= 0:
            raise ValueError("width_q must be a positive number.")

        effect_args = [
            'bandreject', '{:f}'.format(frequency), '{:f}q'.format(width_q)
        ]

        self.effects.extend(effect_args)
コード例 #6
0
    def lowpass(self, frequency, width_q=0.707, n_poles=2):
        """
        Apply a low-pass filter with 3dB point frequency. The filter can be either single-pole or double-pole.
        The filters roll off at 6dB per pole per octave (20dB per pole per decade).

        Parameters:
        frequency : float
        The filter’s cutoff frequency in Hz.

        width_q : float, default=0.707
        The filter’s width as a Q-factor. Applies only when n_poles=2. The default gives a Butterworth response.

        n_poles : int, default=2
        The number of poles in the filter. Must be either 1 or 2
        """
        LOG.debug("lowpass")
        if frequency <= 0:
            raise ValueError("frequency must be a positive number.")

        if width_q <= 0:
            raise ValueError("width_q must be a positive number.")

        if n_poles not in [1, 2]:
            raise ValueError("n_poles must be 1 or 2.")

        effect_args = [
            'lowpass', '-{}'.format(n_poles), '{:f}'.format(frequency)
        ]

        if n_poles == 2:
            effect_args.append('{:f}q'.format(width_q))

        self.effects.extend(effect_args)
コード例 #7
0
    def stretch(self, factor, window=20):
        """
        Change the audio duration (but not its pitch). Unless factor is close to 1, use the tempo effect instead.

        This effect is broadly equivalent to the tempo effect with search set to zero, so in general, its results are comparatively poor; it is retained as it can sometimes out-perform tempo for small factors.

        Parameters:
        factor : float
        The ratio of the new tempo to the old tempo. For ex. 1.1 speeds up the tempo by 10%; 0.9 slows it down by 10%. Note - this argument is the inverse of what is passed to the sox stretch effect for consistency with tempo.

        window : float, default=20
        Window size in miliseconds
        """
        LOG.debug("stretch")
        if factor <= 0:
            raise ValueError("factor must be a positive number")

        if factor < 0.5 or factor > 2:
            LOG.warning("Using an extreme time stretching factor. "
                        "Quality of results will be poor")

        if abs(factor - 1.0) > 0.1:
            LOG.warning("For this stretch factor, "
                        "the tempo effect has better performance.")

        if window <= 0:
            raise ValueError("window must be a positive number.")

        effect_args = ['stretch', '{:f}'.format(factor), '{:f}'.format(window)]

        self.effects.extend(effect_args)
コード例 #8
0
 def play_radio_drama(self, message):
     LOG.debug("Radio Theatre BetterCPS match")
     # TODO new type in next ovos_utils alpha release
     try:
         self._play(message, CPSMatchType.RADIO_THEATRE)
     except:
         self._play(message, CPSMatchType.AUDIOBOOK)
コード例 #9
0
    def equalizer(self, frequency, width_q, gain_db):
        """
        Apply a two-pole peaking equalisation (EQ) filter to boost or reduce around a given frequency. This effect can be applied multiple times to produce complex EQ curves.

        Parameters:
        frequency : float
        The filter’s central frequency in Hz.

        width_q : float
        The filter’s width as a Q-factor.

        gain_db : float
        The filter’s gain in dB.
        """
        LOG.debug("equalizer")
        if frequency <= 0:
            raise ValueError("frequency must be a positive number.")

        if width_q <= 0:
            raise ValueError("width_q must be a positive number.")

        effect_args = [
            'equalizer', '{:f}'.format(frequency), '{:f}q'.format(width_q),
            '{:f}'.format(gain_db)
        ]
        self.effects.extend(effect_args)
コード例 #10
0
 def play_behind_scenes(self, message):
     LOG.debug("Behind the Scenes BetterCPS match")
     # TODO new type in next ovos_utils alpha release
     try:
         self._play(message, CPSMatchType.BEHIND_THE_SCENES)
     except:
         self._play(message, CPSMatchType.VIDEO)
コード例 #11
0
    def transcribe(self, audio):
        def send_unknown_intent():
            """ Send message that nothing was transcribed. """
            self.emitter.emit('recognizer_loop:speech.recognition.unknown')

        try:
            # Invoke the STT engine on the audio clip
            text = self.stt.execute(audio)
            if text is not None:
                text = text.lower().strip()
                LOG.debug("STT: " + text)
            else:
                send_unknown_intent()
                LOG.info('no words were transcribed')
            self.save_utt(text, audio)
            return text
        except sr.RequestError as e:
            LOG.error("Could not request Speech Recognition {0}".format(e))
        except ConnectionError as e:
            LOG.error("Connection Error: {0}".format(e))
            self.emitter.emit("recognizer_loop:no_internet")
        except RequestException as e:
            LOG.error(e.__class__.__name__ + ': ' + str(e))
        except sr.UnknownValueError:
            LOG.error("Speech Recognition could not understand audio")
        except Exception as e:
            send_unknown_intent()
            LOG.exception(e)
            LOG.error("Speech Recognition Error")
        self.play_error()
        self.save_utt("", audio)
        return None
コード例 #12
0
    def _load_config(self, config=None):
        """Load configuration parameters from configuration."""
        config = config or self.config
        self.config_core = config
        self.lang = config.get('lang')
        self.config = config.get('listener')
        rate = self.config.get('sample_rate')

        device_index = self.config.get('device_index')
        device_name = self.config.get('device_name')
        if not device_index and device_name:
            device_index = find_input_device(device_name)

        LOG.debug('Using microphone (None = default): ' + str(device_index))

        self.microphone = MutableMicrophone(device_index,
                                            rate,
                                            mute=self.mute_calls > 0)

        # TODO - localization
        self.wakeup_recognizer = self.create_wakeup_recognizer()
        self.hotword_engines = {}
        self.create_hotword_engines()
        self.responsive_recognizer = ResponsiveRecognizer(self.hotword_engines)
        self.state = RecognizerLoopState()
コード例 #13
0
 def on_preferences_changed(self, event):
     preferences = json.loads(event["data"]["preferences"])
     for pref in preferences:
         user_id = pref["user_id"]
         category = pref["category"]
         value = pref["value"]
         LOG.debug(category + ":" + value)
コード例 #14
0
    def transcribe(self, audio):
        def send_unknown_intent():
            """ Send message that nothing was transcribed. """
            self.emitter.emit('recognizer_loop:speech.recognition.unknown')

        try:
            # Invoke the STT engine on the audio clip
            text = self.stt.execute(audio)
            if text is not None:
                text = text.lower().strip()
                LOG.debug("STT: " + text)
            else:
                send_unknown_intent()
                LOG.info('no words were transcribed')
            if self.save_utterances:
                mtd = self._compile_metadata(text)

                filename = os.path.join(self.saved_utterances_dir, mtd["name"])
                with open(filename, 'wb') as f:
                    f.write(audio.get_wav_data())

                filename = os.path.join(self.saved_utterances_dir,
                                        mtd["name"].replace(".wav", ".json"))
                with open(filename, 'w') as f:
                    json.dump(mtd, f, indent=4)

            return text
        except sr.RequestError as e:
            LOG.error("Could not request Speech Recognition {0}".format(e))
        except ConnectionError as e:
            LOG.error("Connection Error: {0}".format(e))

            self.emitter.emit("recognizer_loop:no_internet")
        except RequestException as e:
            LOG.error(e.__class__.__name__ + ': ' + str(e))
        except Exception as e:
            send_unknown_intent()
            LOG.error(e)
            LOG.error("Speech Recognition could not understand audio")
            # If enabled, play a wave file with a short sound to audibly
            # indicate speech recognition failed
            sound = CONFIGURATION["listener"].get('error_sound')
            audio_file = resolve_resource_file(sound)
            try:
                if audio_file:
                    if audio_file.endswith(".wav"):
                        play_wav(audio_file).wait()
                    elif audio_file.endswith(".mp3"):
                        play_mp3(audio_file).wait()
                    elif audio_file.endswith(".ogg"):
                        play_ogg(audio_file).wait()
                    else:
                        play_audio(audio_file).wait()
            except Exception as e:
                LOG.warning(e)
            return None

        dialog_name = 'not connected to the internet'
        self.emitter.emit('speak', {'utterance': dialog_name})
コード例 #15
0
ファイル: protocol.py プロジェクト: JarbasHiveMind/LocalHive
 def send2peer(self, message, peer):
     if peer in self.clients:
         LOG.debug(f"sending to: {peer}")
         client = self.clients[peer].get("instance")
         msg = HiveMessage(HiveMessageType.BUS,
                           source_peer=self.peer,
                           payload=message)
         self.interface.send(msg, client)
コード例 #16
0
    def handle_vol_up(self):
        self.shadow_volume = self.hardware_volume.get_volume()
        LOG.debug("Mark2:HardwareEnclosure:handle_vol_up()-was %s" % (self.shadow_volume))
        if self.shadow_volume < self.max_volume:
            self.shadow_volume += self.volume_increment

        self.hardware_volume.set_volume(self.shadow_volume)
        self.show_volume(self.shadow_volume)
コード例 #17
0
ファイル: hardware_enclosure.py プロジェクト: JarbasAl/mk2
 def handle_action(self):
     LOG.debug("Mark2:HardwareEnclosure:handle_action()")
     # debounce this 10 seconds
     if time.time() - self.last_action > 10:
         self.last_action = time.time()
         if self.overide_action is not None:
             self.overide_action()
         else:
             create_signal('buttonPress')
コード例 #18
0
    def handle_broadcast_message(self, data, client):
        payload = data["payload"]

        LOG.info("Received broadcast message at: " + self.node_id)
        LOG.debug("ROUTE: " + str(data["route"]))
        LOG.debug("PAYLOAD: " + str(payload))
        # echo to nodered (all connections/flows)
        # TODO skip source peer
        self.nodered_send(message=Message("hivemind.broadcast", payload))
コード例 #19
0
ファイル: detect.py プロジェクト: forslund/ovos_utils
def detect_lang_google(text, return_dict=False):
    if google_translator is None:
        LOG.debug("run pip install google_trans_new")
        raise ImportError("google_trans_new not installed")
    translator = google_translator()
    tx = translator.detect(text)
    if return_dict:
        return {"lang_code": tx[0], "lang": tx[1]}
    return tx[0]
コード例 #20
0
ファイル: loader.py プロジェクト: JarbasHiveMind/LocalHive
 def handle_skill_emit(self, message):
     if isinstance(message, str):
         message = Message.deserialize(message)
     message.context["skill_id"] = self.skill_id
     if not message.context.get("source"):
         message.context["source"] = self.skill_id
     msg = HiveMessage(HiveMessageType.BUS, payload=message)
     LOG.debug(f"<<: {message.msg_type}")
     self.hive.emit(msg)
コード例 #21
0
    def _stop(self, message=None):
        LOG.debug('stopping window services')
        with self.service_lock:
            if self.current:
                name = self.current.name
                if self.current.stop():
                    self.bus.emit(Message("mycroft.stop.handled",
                                          {"by": "window:" + name}))

                self.current = None
コード例 #22
0
    def _register_object(self, message, object_name, register_func):
        name = message.data['name']
        samples = message.data['samples']

        LOG.debug(
            'Registering ' + self.engine.name + ' ' + object_name + ': ' + name)

        register_func(name, samples)
        self.train_time = get_time() + self.train_delay
        self.wait_and_train()
コード例 #23
0
 def get_prefered(self, utterance=""):
     # Find if the user wants to use a specific backend
     for s in self.services:
         if s.name in utterance:
             prefered_service = s
             LOG.debug(s.name + ' would be prefered')
             break
     else:
         prefered_service = None
     return prefered_service
コード例 #24
0
ファイル: hardware_enclosure.py プロジェクト: JarbasAl/mk2
 def handle_mute(self, val):
     LOG.debug("Mark2:HardwareEnclosure:handle_mute() - val = %s" % (val, ))
     if val != self.last_mute:
         self.last_mute = val
         if val == 0:
             self.leds._set_led_with_brightness(self.mute_led,
                                                self.palette.GREEN, 0.5)
         else:
             self.leds._set_led_with_brightness(self.mute_led,
                                                self.palette.RED, 0.5)
コード例 #25
0
 def handle_stop_recording(self, message):
     background_color = self.m2enc.palette.BLUE
     foreground_color = self.m2enc.palette.BLACK
     LOG.debug("Got spoken stuff")
     if self.pulseLedThread is not None:
         self.pulseLedThread.exit_flag = True
         self.pulseLedThread.join()
         self.pulseLedThread = None
     if self.chaseLedThread is None:
         self.chaseLedThread = chaseLedThread(self.m2enc.leds, background_color, foreground_color)
         self.chaseLedThread.start()
コード例 #26
0
def translate_google(text, lang="en-us", source_lang=None):
    if google_translator is None:
        LOG.debug("run pip install google_trans_new")
        raise ImportError("google_trans_new not installed")
    translator = google_translator()
    lang = lang.split("-")[0]
    if source_lang:
        source_lang = source_lang.split("-")[0]
        tx = translator.translate(text, lang_src=source_lang, lang_tgt=lang)
    else:
        tx = translator.translate(text, lang_tgt=lang)
    return tx.strip()
コード例 #27
0
    def _set_led(self, pixel, color):
        """ internal interface
            permits access to the 
            reserved leds """
        red_val = color[0]
        green_val = color[1]
        blue_val = color[2]

        cmd = "i2cset -y 1 %d %d %d %d %d i " % (self.device_addr, pixel,
                                                 red_val, green_val, blue_val)
        os.system(cmd)
        LOG.debug("Execute %s" % (cmd, ))
コード例 #28
0
 def bootstrap(self, new_only=True):
     base_db = join(dirname(dirname(__file__)), "res",
                    self.db.name + ".jsondb")
     if not len(self.db):
         LOG.info("Bootstrapping {database}, this might take a "
                  "while!".format(database=self.name))
         if isfile(base_db):
             LOG.debug("Bootstrapping from bundled skill list")
             shutil.copyfile(base_db, self.db.path)
             self.db.reset()
         else:
             LOG.debug("Downloading skill list")
             self.sync_skills_list(new_only=new_only)
コード例 #29
0
    def save_utt(self, text, audio):
        if self.save_utterances:
            LOG.debug("saving utterance")
            mtd = self._compile_metadata(text)

            filename = os.path.join(self.saved_utterances_dir, mtd["name"])
            with open(filename, 'wb') as f:
                f.write(audio.get_wav_data())

            filename = os.path.join(self.saved_utterances_dir,
                                    mtd["name"].replace(".wav", ".json"))
            with open(filename, 'w') as f:
                json.dump(mtd, f, indent=4)
コード例 #30
0
 def init_dialog(self, root_directory):
     # If "<skill>/dialog/<lang>" exists, load from there.  Otherwise
     # load dialog from "<skill>/locale/<lang>"
     dialog_dir = get_language_dir(join(root_directory, 'dialog'),
                                   self.lang)
     locale_dir = get_language_dir(join(root_directory, 'locale'),
                                   self.lang)
     if exists(dialog_dir):
         self.dialog_renderer = load_dialogs(dialog_dir)
     elif exists(locale_dir):
         self.dialog_renderer = load_dialogs(locale_dir)
     else:
         LOG.debug('No dialog loaded')