Ejemplo n.º 1
0
    def init_settings(self):
        """
        Initializes yml-based skill config settings, updating from default dict as necessary for added parameters
        """
        if os.path.isfile(os.path.join(self.root_dir, "settingsmeta.yml")):
            skill_meta = NGIConfig("settingsmeta", self.root_dir).content
        elif os.path.isfile(os.path.join(self.root_dir, "settingsmeta.json")):
            with open(os.path.join(self.root_dir, "settingsmeta.json")) as f:
                skill_meta = json.load(f)
        else:
            skill_meta = None

        # Load defaults from settingsmeta
        default = {"__mycroft_skill_firstrun": True}
        if skill_meta:
            # LOG.info(skill_meta)
            LOG.info(skill_meta["skillMetadata"]["sections"])
            for section in skill_meta["skillMetadata"]["sections"]:
                for pref in section.get("fields", []):
                    if not pref.get("name"):
                        LOG.debug(f"non-data skill meta: {pref}")
                    else:
                        if pref.get("value") == "true":
                            value = True
                        elif pref.get("value") == "false":
                            value = False
                        elif isinstance(pref.get("value"), CommentedMap):
                            value = dict(pref.get("value"))
                        else:
                            value = pref.get("value")
                        default[pref["name"]] = value

        # Load or init configuration
        if os.path.isfile(os.path.join(self.root_dir, f"{self.name}.yml")):
            LOG.warning(
                f"Config found in skill directory for {self.name}! Relocating to: {self.file_system.path}"
            )
            shutil.move(os.path.join(self.root_dir, f"{self.name}.yml"),
                        self.file_system.path)
        self.ngi_settings = NGIConfig(self.name, self.file_system.path)

        # Load any new or updated keys
        try:
            LOG.debug(self.ngi_settings.content)
            LOG.debug(default)
            if self.ngi_settings.content and len(
                    self.ngi_settings.content.keys()) > 0 and len(
                        default.keys()) > 0:
                self.ngi_settings.make_equal_by_keys(default, recursive=False)
            elif len(default.keys()) > 0:
                LOG.info("No settings to load, use default")
                self.ngi_settings.populate(default)
        except Exception as e:
            LOG.error(e)
            self.ngi_settings.populate(default)

        # Make sure settings is initialized as a dictionary
        if self.ngi_settings.content:
            self.settings = self.ngi_settings.content  # Uses the default self.settings object for skills compat
        LOG.debug(f"loaded settings={self.settings}")
Ejemplo n.º 2
0
 def preference_skill(self, message=None) -> dict:
     """
     Returns the skill settings configuration
     Equivalent to self.settings for non-server
     :param message: Message associated with request
     :return: dict of skill preferences
     """
     nick = get_message_user(message) if message else None
     if self.server and nick:
         try:
             skill = self.skill_id
             LOG.info(f"Get server prefs for skill={skill}")
             user_overrides = message.context["nick_profiles"][nick][
                 "skills"].get(self.skill_id, dict())
             LOG.debug(user_overrides)
             merged_settings = {**self.settings, **user_overrides}
             if user_overrides.keys() != merged_settings.keys():
                 LOG.info(
                     f"New settings keys: user={nick}|skill={self.skill_id}|user={user_overrides}"
                 )
                 self.update_skill_settings(merged_settings, message)
             return merged_settings
         except Exception as e:
             LOG.error(e)
     return self.settings
Ejemplo n.º 3
0
 def _clear_gui_timeout(self):
     """
     Handler for clear_gui_timeout function
     :return:
     """
     LOG.info("Reset GUI!")
     self.gui.clear()
Ejemplo n.º 4
0
 def voc_match(self, utt, voc_filename, lang=None, exact=False):
     # TODO: Handles bug to be addressed in: https://github.com/OpenVoiceOS/ovos_utils/issues/73
     try:
         return super().voc_match(utt, voc_filename, lang, exact)
     except FileNotFoundError:
         LOG.info(f"`{voc_filename}` not found, checking in neon_core")
         from mycroft.skills.skill_data import read_vocab_file
         from neon_utils.packaging_utils import get_core_root
         from itertools import chain
         import re
     lang = lang or self.lang
     voc = os.path.join(get_core_root(), "neon_core", "res", "text", lang, f"{voc_filename}.voc")
     if not os.path.exists(voc):
         raise FileNotFoundError(voc)
     vocab = read_vocab_file(voc)
     cache_key = lang + voc_filename
     self.voc_match_cache[cache_key] = list(chain(*vocab))
     if utt:
         if exact:
             # Check for exact match
             return any(i.strip() == utt
                        for i in self.voc_match_cache[cache_key])
         else:
             # Check for matches against complete words
             return any([re.match(r'.*\b' + i + r'\b.*', utt)
                         for i in self.voc_match_cache[cache_key]])
     else:
         return False
Ejemplo n.º 5
0
    def __init__(self, results_event, config=None):
        if len(
                signature(super(DeepSpeechLocalStreamingSTT,
                                self).__init__).parameters) == 2:
            super(DeepSpeechLocalStreamingSTT,
                  self).__init__(results_event, config)
        else:
            LOG.warning(
                f"Shorter Signature Found; config will be ignored and results_event will not be handled!"
            )
            super(DeepSpeechLocalStreamingSTT, self).__init__()
            self.results_event = None
        # override language with module specific language selection
        self.language = self.config.get('lang') or self.lang
        self.queue = None
        if not self.language.startswith("en"):
            raise ValueError("DeepSpeech is currently english only")

        model_path = self.config.get("model_path") or \
            os.path.expanduser("~/.local/share/neon/deepspeech-0.9.3-models.pbmm")
        scorer_path = self.config.get("scorer_path") or \
            os.path.expanduser("~/.local/share/neon/deepspeech-0.9.3-models.scorer")
        if not os.path.isfile(model_path):
            LOG.error("Model not found and will be downloaded!")
            LOG.error(model_path)
            get_model()

        self.client = deepspeech.Model(model_path)

        if not scorer_path or not os.path.isfile(scorer_path):
            LOG.warning("You should provide a valid scorer")
            LOG.info(
                "download scorer from https://github.com/mozilla/DeepSpeech")
        else:
            self.client.enableExternalScorer(scorer_path)
Ejemplo n.º 6
0
    def send_email(self, title, body, message=None, email_addr=None, attachments=None):
        """
        Send an email to the registered user's email.
        Email address priority: email_addr, user prefs from message, fallback to DeviceApi for Mycroft method

        Arguments:
            title (str): Title of email
            body  (str): HTML body of email. This supports
                         simple HTML like bold and italics
            email_addr (str): Optional email address to use
            attachments (dict): Optional dict of file names to Base64 encoded files
            message (Message): Optional message to get email from
        """
        if not email_addr and message:
            email_addr = self.preference_user(message).get("email")

        if email_addr:
            LOG.info("Send email via Neon Server")
            try:
                LOG.debug(f"body={body}")
                self.bus.emit(Message("neon.send_email", {"title": title, "email": email_addr, "body": body,
                                                          "attachments": attachments}))
            except Exception as e:
                LOG.error(e)
        else:
            super().send_email(title, body)
Ejemplo n.º 7
0
def repo_is_neon(repo_url: str) -> bool:
    """
    Determines if the specified repository url is part of the NeonGeckoCom org on github
    Args:
        repo_url: string url to check
    Returns:
        True if the repository URL is known to be accessible using a neon auth key
    """
    url = urlparse(repo_url)
    if not url.scheme or not url.netloc:
        raise ValueError(f"{repo_url} is not a valid url")
    if any([
            x for x in ("github.com", "githubusercontent.com")
            if x in url.netloc
    ]):
        try:
            author = url.path.split('/')[1]
        except IndexError:
            raise ValueError(f"{repo_url} is not a valid github url")
        if author.lower() == "neongeckocom":
            return True
        elif author.lower().startswith(
                "neon"):  # TODO: Get token and scrape org? DM
            LOG.info(f"Assuming repository uses Neon auth: {repo_url}")
            return True
    return False
Ejemplo n.º 8
0
    def _do_net_check(self):
        # TODO: This should live in the derived Enclosure, e.g. EnclosureMark1
        LOG.info("Checking internet connection")
        if not connected():  # and self.conn_monitor is None:
            if has_been_paired():
                # TODO: Enclosure/localization
                self.speak("This unit is not connected to the Internet. "
                           "Either plug in a network cable or setup your "
                           "wifi connection.")
            else:
                # Begin the unit startup process, this is the first time it
                # is being run with factory defaults.

                # TODO: This logic should be in EnclosureMark1
                # TODO: Enclosure/localization

                # Don't listen to mic during this out-of-box experience
                self.bus.emit(Message("mycroft.mic.mute"))
                # Setup handler to unmute mic at the end of on boarding
                # i.e. after pairing is complete
                self.bus.once('mycroft.paired', self._handle_pairing_complete)

                self.speak(mycroft.dialog.get('mycroft.intro'))
                wait_while_speaking()
                time.sleep(2)  # a pause sounds better than just jumping in

                # Kick off wifi-setup automatically
                data = {'allow_timeout': False, 'lang': self.lang}
                self.bus.emit(Message('system.wifi.setup', data))
Ejemplo n.º 9
0
    def __init__(self, results_event, config=None):
        super(DeepSpeechLocalStreamingSTT,
              self).__init__(results_event, config)
        # override language with module specific language selection
        self.language = self.config.get('lang') or self.lang
        self.queue = None
        if not self.language.startswith("en"):
            raise ValueError("DeepSpeech is currently english only")

        model_path = self.config.get("model_path") or \
            os.path.expanduser("~/.local/share/neon/deepspeech-0.8.1-models.pbmm")
        scorer_path = self.config.get("scorer_path") or \
            os.path.expanduser("~/.local/share/neon/deepspeech-0.8.1-models.scorer")
        if not os.path.isfile(model_path):
            LOG.error("You need to provide a valid model file")
            LOG.error(model_path)
            LOG.info(
                "download a model from https://github.com/mozilla/DeepSpeech")
            raise FileNotFoundError
        if not scorer_path or not os.path.isfile(scorer_path):
            LOG.warning("You should provide a valid scorer")
            LOG.info(
                "download scorer from https://github.com/mozilla/DeepSpeech")

        self.client = deepspeech.Model(model_path)
        if scorer_path:
            self.client.enableExternalScorer(scorer_path)
Ejemplo n.º 10
0
    def handle_get_stt(self, message: Message):
        """
        Handles a request for stt.
        Emits a response to the sender with stt data or error data
        :param message: Message associated with request
        """
        if message.data.get("audio_data"):
            wav_file_path = self._write_encoded_file(
                message.data.pop("audio_data"))
        else:
            wav_file_path = message.data.get("audio_file")
        lang = message.data.get("lang")
        ident = message.context.get("ident") or "neon.get_stt.response"
        LOG.info(f"Handling STT request: {ident}")
        if not wav_file_path:
            self.bus.emit(message.reply(
                ident, data={"error": f"audio_file not specified!"}))
            return

        if not os.path.isfile(wav_file_path):
            self.bus.emit(message.reply(
                ident, data={"error": f"{wav_file_path} Not found!"}))

        try:
            _, parser_data, transcriptions = \
                self._get_stt_from_file(wav_file_path, lang)
            self.bus.emit(message.reply(ident,
                                        data={"parser_data": parser_data,
                                              "transcripts": transcriptions}))
        except Exception as e:
            LOG.error(e)
            self.bus.emit(message.reply(ident, data={"error": repr(e)}))
Ejemplo n.º 11
0
    def transcribe(self, audio, lang):
        def send_unknown_intent():
            """ Send message that nothing was transcribed. """
            self.loop.emit('recognizer_loop:speech.recognition.unknown')

        try:
            # Invoke the STT engine on the audio clip
            try:
                transcriptions = self.loop.stt.execute(audio, language=lang)
            except Exception as e:
                if self.loop.fallback_stt:
                    LOG.warning(f"Using fallback STT, main plugin failed: {e}")
                    transcriptions = self.loop.fallback_stt.execute(
                        audio, language=lang)
                else:
                    raise e
            if isinstance(transcriptions, str):
                LOG.info("Casting str transcriptions to list")
                transcriptions = [transcriptions]
            if transcriptions is not None:
                transcriptions = [t.lower().strip() for t in transcriptions]
                LOG.debug(f"STT: {transcriptions}")
            else:
                send_unknown_intent()
                LOG.info('no words were transcribed')
            return transcriptions
        except Exception as e:
            send_unknown_intent()
            LOG.error(e)
            LOG.exception("Speech Recognition could not understand audio")
            return None
Ejemplo n.º 12
0
 def translate(self, text, target=None, source="auto"):
     from translate import Translator
     target = target or self.internal_language
     # TODO: This doesn't appear to work DM
     translated = Translator(to_lang=target).translate(text)
     LOG.info(translated)
     return translated
Ejemplo n.º 13
0
 def _get_stt_from_file(self, wav_file: str,
                        lang: str = None) -> (AudioData, dict, list):
     """
     Performs STT and audio processing on the specified wav_file
     :param wav_file: wav audio file to process
     :param lang: language of passed audio
     :return: (AudioData of object, extracted context, transcriptions)
     """
     from neon_utils.file_utils import get_audio_file_stream
     lang = lang or 'en-us'  # TODO: read default from config
     segment = AudioSegment.from_file(wav_file)
     audio_data = AudioData(segment.raw_data, segment.frame_rate,
                            segment.sample_width)
     audio_stream = get_audio_file_stream(wav_file)
     if self.lock.acquire(True, 30):
         LOG.info(f"Starting STT processing (lang={lang}): {wav_file}")
         self.api_stt.stream_start(lang)
         while True:
             try:
                 data = audio_stream.read(1024)
                 self.api_stt.stream_data(data)
             except EOFError:
                 break
         transcriptions = self.api_stt.stream_stop()
         self.lock.release()
     else:
         LOG.error(f"Timed out acquiring lock, not processing: {wav_file}")
         transcriptions = []
     if isinstance(transcriptions, str):
         LOG.warning("Transcriptions is a str, no alternatives provided")
         transcriptions = [transcriptions]
     audio, audio_context = self.loop.responsive_recognizer. \
         audio_consumers.transform(audio_data)
     LOG.info(f"Transcribed: {transcriptions}")
     return audio, audio_context, transcriptions
Ejemplo n.º 14
0
    def _get_drink_text(self, message, caff_list=None):
        cnt = 0
        spoken = []
        if not caff_list:
            caff_list = self.results
            LOG.info(caff_list)
        for i in range(len(caff_list)):
            if caff_list[i][0] not in spoken:
                oz = float(caff_list[i][1])
                caffeine = float(caff_list[i][2])

                drink = caff_list[i][0]
                units = self.preference_unit(message)['measure']

                if units == "metric":
                    caff_mg, caff_vol, drink_units = self.convert_metric(
                        oz, caffeine)
                else:
                    caff_mg = str(caffeine)
                    caff_vol = str(oz)
                    drink_units = 'ounces'

                self.speak_dialog(
                    'MultipleCaffeine', {
                        'drink': drink,
                        'caffeine_content': caff_mg,
                        'caffeine_units': self.translate('milligrams'),
                        'drink_size': caff_vol,
                        'drink_units': drink_units
                    })
                spoken.append(caff_list[i][0])
                sleep(0.5)  # Prevent simultaneous speak inserts
            cnt = cnt + 1
Ejemplo n.º 15
0
 def converse(self, message=None):
     user = self.get_utterance_user(message)
     LOG.debug(self.actions_to_confirm)
     if user in self.actions_to_confirm.keys():
         result = self.check_yes_no_response(message)
         if result == -1:
             # This isn't a response, ignore it
             return False
         elif not result:
             # User said no
             if self.local_config.get("interface",
                                      {}).get("wake_word_enabled", True):
                 self.speak_dialog("HowAboutMore", expect_response=True)
                 self.enable_intent('CaffeineContentGoodbyeIntent')
                 self.request_check_timeout(self.default_intent_timeout,
                                            'CaffeineContentGoodbyeIntent')
             else:
                 self.speak_dialog("StayCaffeinated")
             return True
         elif result:
             # User said yes
             LOG.info(self.results)
             self._get_drink_text(message)
             # self.speak(self._get_drink_text())
             # self.speak("Provided by CaffeineWiz.")
             self.speak("Provided by CaffeineWiz. Stay caffeinated!")
             return True
     return False
Ejemplo n.º 16
0
 def schedule_event(self, handler, when, data=None, name=None, context=None):
     # TODO: should 'when' already be a datetime? DM
     if isinstance(when, int) or isinstance(when, float):
         from datetime import datetime as dt, timedelta
         when = to_system_time(dt.now(self.sys_tz)) + timedelta(seconds=when)
         LOG.info(f"Made a datetime: {when}")
     super().schedule_event(handler, when, data, name, context)
Ejemplo n.º 17
0
    def _generate_drink_dialog(self, drink: str, message) -> str:
        """
        Generates the dialog and populates self.results for the requested drink
        :param drink: raw input drink to find
        :param message: message associated with request
        :return: generated dialog to speak
        """
        self.results = self._get_matching_drinks(drink)
        LOG.debug(self.results)
        if len(self.results) == 1:
            '''Return the only result'''
            # self.speak(("The best match is: " + str(self.results[0][0]) +
            #             ", which has " + str(self.results[0][2]) + " milligrams caffeine in "
            #             + str(self.results[0][1])) + " ounces. Provided by CaffeineWiz")
            drink = str(self.results[0][0])
            caff_mg = float(self.results[0][2])
            caff_oz = float(self.results[0][1])

        else:
            '''Find the best match from all of the returned results'''
            matched_drink_names = [
                self.results[i][0] for i in range(len(self.results))
            ]
            match = difflib.get_close_matches(drink, matched_drink_names, 1)
            if match:
                match2 = [i for i in self.results if i[0] in match]
            else:
                match2 = [
                    i for i in self.results if i[0] in matched_drink_names[0]
                ]
            LOG.debug(match)
            LOG.debug(match2)
            drink = str(match2[0][0])
            caff_mg = float(match2[0][2])
            caff_oz = float(match2[0][1])
            # self.speak(("The best match is: " + str(match2[0][0]) +
            #             ", which has " + str(match2[0][2]) + " milligrams caffeine in "
            #             + str(match2[0][1])) + " ounces. Provided by CaffeineWiz")
        preference_unit = self.preference_unit(message)
        # self.digits = preference_unit['measure'] \
        #     if preference_unit['measure'] else 'imperial'
        if preference_unit['measure'] == 'metric':
            caff_mg, caff_vol, drink_units = self.convert_metric(
                caff_oz, caff_mg)
        else:
            caff_mg = str(caff_mg)
            caff_vol = str(caff_oz)
            drink_units = 'ounces'

        LOG.info(f"{drink} | {caff_mg} | {caff_vol} | {drink_units}")
        to_speak = self.dialog_renderer.render(
            'DrinkCaffeine', {
                'drink': drink,
                'caffeine_content': caff_mg,
                'caffeine_units': self.translate('milligrams'),
                'drink_size': caff_vol,
                'drink_units': drink_units
            })
        return to_speak
Ejemplo n.º 18
0
    def terminate(self):
        LOG.info("Mark2:HardwareEnclosure:terminate()")
        self.cancel_watchdog()
        self.leds.fill(self.palette.BLACK)
        self.switches.terminate()
        self.switches._running = False

        if self.switches.thread_handle is not None:
            self.switches.thread_handle.join()
Ejemplo n.º 19
0
    def on_volume_set(self, message):
        self.current_volume = message.data.get("percent", self.current_volume)
        LOG.info('Mark2:interface.py set volume to %s' %
                 (self.current_volume, ))
        self.m2enc.hardware_volume.set_volume(float(self.current_volume))

        # notify anybody listening on the bus who cares
        self.bus.emit(
            Message("hardware.volume", {"volume": self.current_volume},
                    context={"source": ["enclosure"]}))
Ejemplo n.º 20
0
 def _confirmation_timeout(self, message):
     user = message.data.get("user", "local")
     try:
         if user in self.actions_to_confirm.keys():
             removed = self.actions_to_confirm.pop(user)
             LOG.info(f"confirmation timed out ({time.time()}): {removed}")
     except Exception as e:
         # Catches if the item was already popped
         LOG.error(e)
     if len(self.actions_to_confirm.keys()) == 0:
         self.reload_skill = True
Ejemplo n.º 21
0
    def __init__(self):
        LOG.info('** Initialize EnclosureMark2 **')
        super().__init__()
        self.display_bus_client = None
        self._define_event_handlers()
        self.finished_loading = False
        self.active_screen = 'loading'
        self.paused_screen = None
        self.is_pairing = False
        self.active_until_stopped = None
        self.reserved_led = 10
        self.mute_led = 11
        self.chaseLedThread = None
        self.pulseLedThread = None

        self.system_volume = 0.5  # pulse audio master system volume
        # if you want to do anything with the system volume
        # (ala pulseaudio, etc) do it here!
        self.current_volume = 0.5  # hardware/board level volume

        # TODO these need to come from a config value
        self.m2enc = HardwareEnclosure("Mark2", "sj201r4")
        self.m2enc.client_volume_handler = self.async_volume_handler

        # start the temperature monitor thread
        self.temperatureMonitorThread = temperatureMonitorThread(
            self.m2enc.fan, self.m2enc.leds, self.m2enc.palette)
        self.temperatureMonitorThread.start()

        self.m2enc.leds.set_leds([
            self.m2enc.palette.BLACK, self.m2enc.palette.BLACK,
            self.m2enc.palette.BLACK, self.m2enc.palette.BLACK,
            self.m2enc.palette.BLACK, self.m2enc.palette.BLACK,
            self.m2enc.palette.BLACK, self.m2enc.palette.BLACK,
            self.m2enc.palette.BLACK, self.m2enc.palette.BLACK
        ])

        self.m2enc.leds._set_led_with_brightness(self.reserved_led,
                                                 self.m2enc.palette.MAGENTA,
                                                 0.5)

        # set mute led based on reality
        mute_led_color = self.m2enc.palette.GREEN
        if self.m2enc.switches.SW_MUTE == 1:
            mute_led_color = self.m2enc.palette.RED

        self.m2enc.leds._set_led_with_brightness(self.mute_led, mute_led_color,
                                                 1.0)

        self.default_caps = EnclosureCapabilities()

        LOG.info('** EnclosureMark2 initalized **')
        self.bus.once('mycroft.skills.trained', self.is_device_ready)
Ejemplo n.º 22
0
    def on_capabilities_get(self, message):
        LOG.info('Mark2:interface.py get capabilities requested')

        self.bus.emit(
            message.response(
                data={
                    'default': self.default_caps.caps,
                    'extra': self.m2enc.capabilities,
                    'board_type': self.m2enc.board_type,
                    'leds': self.m2enc.leds.capabilities,
                    'volume': self.m2enc.hardware_volume.capabilities,
                    'switches': self.m2enc.switches.capabilities
                }))
Ejemplo n.º 23
0
    def on_volume_get(self, message):
        self.current_volume = self.m2enc.hardware_volume.get_volume()

        if self.current_volume > 1.0:
            self.current_volume = self.current_volume / 10

        LOG.info('Mark2:interface.py get and emit volume %s' %
                 (self.current_volume, ))
        self.bus.emit(
            message.response(data={
                'percent': self.current_volume,
                'muted': False
            }))
Ejemplo n.º 24
0
def patch_config(config: dict = None):
    """
    Write the specified speech configuration to the global config file
    :param config: Mycroft-compatible configuration override
    """
    config = config or dict()
    updated_config = {**get_speech_module_config(), **config}
    config_file = join(xdg_config_home(), "neon", "neon.conf")
    if not os.path.isdir(os.path.dirname(config_file)):
        os.makedirs(os.path.dirname(config_file))
    with open(config_file, "w+") as f:
        json.dump(updated_config, f)
    LOG.info(f"Updated config file: {config_file}")
Ejemplo n.º 25
0
def install_stt_plugin(plugin: str) -> bool:
    """
    Install an stt plugin using pip
    :param plugin: entrypoint of plugin to install
    :returns: True if the plugin installation is successful
    """
    import pip
    _, tmp_file = mkstemp()
    with open(tmp_file, 'w') as f:
        f.write('\n'.join(get_package_dependencies("neon-speech")))
    LOG.info(f"Requested installation of plugin: {plugin}")
    returned = pip.main(['install', _plugin_to_package(plugin), "-c", tmp_file])
    LOG.info(f"pip status: {returned}")
    return returned == 0
Ejemplo n.º 26
0
    def _init_settings(self):
        """
        Initializes yml-based skill config settings, updating from default dict as necessary for added parameters
        """
        # TODO: This should just use the underlying Mycroft methods DM
        super()._init_settings()
        if os.path.isfile(os.path.join(self.root_dir, "settingsmeta.yml")):
            skill_meta = NGIConfig("settingsmeta", self.root_dir).content
        elif os.path.isfile(os.path.join(self.root_dir, "settingsmeta.json")):
            with open(os.path.join(self.root_dir, "settingsmeta.json")) as f:
                skill_meta = json.load(f)
        else:
            skill_meta = None

        # Load defaults from settingsmeta
        default = {}
        if skill_meta:
            # LOG.info(skill_meta)
            LOG.info(skill_meta["skillMetadata"]["sections"])
            for section in skill_meta["skillMetadata"]["sections"]:
                for pref in section.get("fields", []):
                    if not pref.get("name"):
                        LOG.debug(f"non-data skill meta: {pref}")
                    else:
                        if pref.get("value") == "true":
                            value = True
                        elif pref.get("value") == "false":
                            value = False
                        elif isinstance(pref.get("value"), CommentedMap):
                            value = dict(pref.get("value"))
                        else:
                            value = pref.get("value")
                        default[pref["name"]] = value

        # Load or init configuration
        self._ngi_settings = NGIConfig(self.name, self.settings_write_path)

        # Load any new or updated keys
        try:
            LOG.debug(self._ngi_settings.content)
            LOG.debug(default)
            if self._ngi_settings.content and len(self._ngi_settings.content.keys()) > 0 and len(default.keys()) > 0:
                self._ngi_settings.make_equal_by_keys(default, recursive=False)
            elif len(default.keys()) > 0:
                LOG.info("No settings to load, use default")
                self._ngi_settings.populate(default)
        except Exception as e:
            LOG.error(e)
            self._ngi_settings.populate(default)
Ejemplo n.º 27
0
 def _clean_drink_name(self, drink: str) -> [str, None]:
     if not drink:
         return None
     drink = drink.lower()
     # Strip leading "a"
     if drink.split(maxsplit=1)[0] == "a":
         drink.lstrip("a")
     if drink.startswith("cup of") or drink.startswith("glass of"):
         drink = drink.split(" of ", 1)[1]
     drink = drink.translate({ord(i): None
                              for i in '?:!/;@#$'
                              }).rstrip().replace(" '", "'")
     # Check for common mis-matched names
     drink = self.translate_drinks.get(drink, drink)
     LOG.info(drink)
     return drink
Ejemplo n.º 28
0
    def handle_audio_input(self, message):
        """
        Handler for `neon.audio_input`.
        Handles remote audio input to Neon and replies with confirmation
        :param message: Message associated with request
        """

        def build_context(msg: Message):
            ctx: dict = message.context
            defaults = {'client_name': 'mycroft_listener',
                        'client': 'api',
                        'source': 'speech_api',
                        'ident': time(),
                        'username': self.user_config["user"]["username"] or
                        "local",
                        'user_profiles': [self.user_config.content]}
            ctx = {**defaults, **ctx, 'destination': ['skills'],
                   'timing': {'start': msg.data.get('time'),
                              'transcribed': time()}}
            return ctx

        ident = message.context.get("ident") or "neon.audio_input.response"
        LOG.info(f"Handling audio input: {ident}")
        if message.data.get("audio_data"):
            wav_file_path = self._write_encoded_file(
                message.data.pop("audio_data"))
        else:
            wav_file_path = message.data.get("audio_file")
        lang = message.data.get("lang")
        try:
            _, parser_data, transcriptions = \
                self._get_stt_from_file(wav_file_path, lang)
            message.context["audio_parser_data"] = parser_data
            context = build_context(message)
            data = {
                "utterances": transcriptions,
                "lang": message.data.get("lang", "en-us")
            }
            handled = self._emit_utterance_to_skills(Message(
                'recognizer_loop:utterance', data, context))
            self.bus.emit(message.reply(ident,
                                        data={"parser_data": parser_data,
                                              "transcripts": transcriptions,
                                              "skills_recv": handled}))
        except Exception as e:
            LOG.error(e)
            self.bus.emit(message.reply(ident, data={"error": repr(e)}))
Ejemplo n.º 29
0
 def ti_start_sequence(self):
     '''
     Start Sequence for the TAS5806
     '''
     LOG.info("Start the TI Amp")
     self.write_ti_data(0x01, 0x11)  # reset chip
     self.write_ti_data(0x78, 0x80)  # clear fault - works
     self.write_ti_data(0x01, 0x00)  # remove reset
     self.write_ti_data(0x78, 0x00)  # remove clear fault
     self.write_ti_data(0x33, 0x03)
     self.set_volume(0.5)
     self.write_ti_data(0x30, 0x01)
     self.write_ti_data(0x03, 0x00)  # Deep Sleep
     self.write_ti_data(0x03, 0x02)  # HiZ
     # Indicate the first coefficient of a BQ is starting to write
     self.write_ti_data(0x5C, 0x01)
     self.write_ti_data(0x03, 0x03)  # Play
Ejemplo n.º 30
0
def remove_old_logs(log_dir: str = LOG_DIR,
                    history_to_retain: timedelta = timedelta(weeks=6)):
    """
    Removes archived logs older than the specified history timedelta
    Args:
        log_dir: Path to archived logs
        history_to_retain: Timedelta of history to retain
    """
    from shutil import rmtree
    for archive in os.listdir(log_dir):
        archive_path = os.path.join(log_dir, archive)
        if not os.path.isdir(archive_path):
            continue
        if datetime.now() - datetime.fromtimestamp(
                os.path.getmtime(archive_path)) > history_to_retain:
            LOG.info(f"removing {archive}")
            rmtree(archive_path)