예제 #1
0
 def download(url, filename):
     import shutil
     from urllib2 import urlopen
     LOG.info('Downloading: ' + url)
     req = urlopen(url)
     with open(filename, 'wb') as fp:
         shutil.copyfileobj(req, fp)
예제 #2
0
    def __init__(self, key_phrase="hey mycroft", config=None, lang="en-us"):
        super(PreciseHotword, self).__init__(key_phrase, config, lang)
        self.update_freq = 24  # in hours

        precise_config = Configuration.get()['precise']
        self.dist_url = precise_config['dist_url']
        self.models_url = precise_config['models_url']
        self.exe_name = 'precise-stream'

        ww = Configuration.get()['listener']['wake_word']
        model_name = ww.replace(' ', '-') + '.pb'
        model_folder = expanduser('~/.mycroft/precise')
        if not isdir(model_folder):
            mkdir(model_folder)
        model_path = join(model_folder, model_name)

        exe_file = self.find_download_exe()
        LOG.info('Found precise executable: ' + exe_file)
        self.update_model(model_name, model_path)

        args = [exe_file, model_path, '1024']
        self.proc = Popen(args, stdin=PIPE, stdout=PIPE)
        self.has_found = False
        self.cooldown = 20
        t = Thread(target=self.check_stdout)
        t.daemon = True
        t.start()
예제 #3
0
def handle_utterance(event):
    LOG.info("Utterance: " + str(event['utterances']))
    context = {'client_name': 'mycroft_listener'}
    if 'ident' in event:
        ident = event.pop('ident')
        context['ident'] = ident
    bus.emit(Message('recognizer_loop:utterance', event, context))
예제 #4
0
def mute_and_speak(utterance):
    """
        Mute mic and start speaking the utterance using selected tts backend.

        Args:
            utterance: The sentence to be spoken
    """
    global tts_hash

    lock.acquire()
    # update TTS object if configuration has changed
    if tts_hash != hash(str(config.get('tts', ''))):
        global tts
        # Stop tts playback thread
        tts.playback.stop()
        tts.playback.join()
        # Create new tts instance
        tts = TTSFactory.create()
        tts.init(ws)
        tts_hash = hash(str(config.get('tts', '')))

    LOG.info("Speak: " + utterance)
    try:
        tts.execute(utterance)
    finally:
        lock.release()
예제 #5
0
    def _do_net_check(self):
        # TODO: This should live in the derived Enclosure, e.g. Enclosure_Mark1
        LOG.info("Checking internet connection")
        if not connected():  # and self.conn_monitor is None:
            if has_been_paired():
                # TODO: Enclosure/localization
                self.speak("This unit is not connected to the Internet. "
                           "Either plug in a network cable or hold the "
                           "button on top for two seconds, then select "
                           "wifi from the menu")
            else:
                # Begin the unit startup process, this is the first time it
                # is being run with factory defaults.

                # TODO: This logic should be in Enclosure_Mark1
                # TODO: Enclosure/localization

                # Don't listen to mic during this out-of-box experience
                self.ws.emit(Message("mycroft.mic.mute"))
                # Setup handler to unmute mic at the end of on boarding
                # i.e. after pairing is complete
                self.ws.once('mycroft.paired', self._handle_pairing_complete)

                self.speak(mycroft.dialog.get('mycroft.intro'))
                wait_while_speaking()
                time.sleep(2)  # a pause sounds better than just jumping in

                # Kick off wifi-setup automatically
                data = {'allow_timeout': False, 'lang': self.lang}
                self.ws.emit(Message('system.wifi.setup', data))
예제 #6
0
    def load_module(module, hotword, config, lang, loop):
        LOG.info('Loading "{}" wake word via {}'.format(hotword, module))
        instance = None
        complete = Event()

        def initialize():
            nonlocal instance, complete
            try:
                clazz = HotWordFactory.CLASSES[module]
                instance = clazz(hotword, config, lang=lang)
            except TriggerReload:
                complete.set()
                sleep(0.5)
                loop.reload()
            except NoModelAvailable:
                LOG.warning('Could not found find model for {} on {}.'.format(
                    hotword, module
                ))
                instance = None
            except Exception:
                LOG.exception(
                    'Could not create hotword. Falling back to default.')
                instance = None
            complete.set()

        Thread(target=initialize, daemon=True).start()
        if not complete.wait(INIT_TIMEOUT):
            LOG.info('{} is taking too long to load'.format(module))
            complete.set()
        return instance
예제 #7
0
    def transcribe(self, audio):
        try:
            # Invoke the STT engine on the audio clip
            text = self.stt.execute(audio).lower().strip()
            LOG.debug("STT: " + text)
            return text
        except sr.RequestError as e:
            LOG.error("Could not request Speech Recognition {0}".format(e))
        except ConnectionError as e:
            LOG.error("Connection Error: {0}".format(e))

            self.emitter.emit("recognizer_loop:no_internet")
        except HTTPError as e:
            if e.response.status_code == 401:
                LOG.warning("Access Denied at mycroft.ai")
                return "pair my device"  # phrase to start the pairing process
            else:
                LOG.error(e.__class__.__name__ + ': ' + str(e))
        except RequestException as e:
            LOG.error(e.__class__.__name__ + ': ' + str(e))
        except Exception as e:
            self.emitter.emit('recognizer_loop:speech.recognition.unknown')
            if isinstance(e, IndexError):
                LOG.info('no words were transcribed')
            else:
                LOG.error(e)
            LOG.error("Speech Recognition could not understand audio")
            return None
        if connected():
            dialog_name = 'backend.down'
        else:
            dialog_name = 'not connected to the internet'
        self.emitter.emit('speak', {'utterance': dialog.get(dialog_name)})
예제 #8
0
def main():
    global ws
    global config
    ws = WebsocketClient()
    Configuration.init(ws)
    config = Configuration.get()
    speech.init(ws)

    # Setup control of pulse audio
    setup_pulseaudio_handlers(config.get('Audio').get('pulseaudio'))

    def echo(message):
        try:
            _message = json.loads(message)
            if 'mycroft.audio.service' not in _message.get('type'):
                return
            message = json.dumps(_message)
        except:
            pass
        LOG.debug(message)

    LOG.info("Staring Audio Services")
    ws.on('message', echo)
    ws.once('open', load_services_callback)
    try:
        ws.run_forever()
    except KeyboardInterrupt, e:
        LOG.exception(e)
        speech.shutdown()
        sys.exit()
예제 #9
0
 def supported_uris(self):
     """ Return supported uris of chromecast. """
     LOG.info("Chromecasts found: " + str(self.cast))
     if self.cast:
         return ['http', 'https']
     else:
         return []
예제 #10
0
    def shutdown(self):
        for s in self.service:
            try:
                LOG.info('shutting down ' + s.name)
                s.shutdown()
            except Exception as e:
                LOG.error('shutdown of ' + s.name + ' failed: ' + repr(e))

        # remove listeners
        self.bus.remove('mycroft.audio.service.play', self._play)
        self.bus.remove('mycroft.audio.service.queue', self._queue)
        self.bus.remove('mycroft.audio.service.pause', self._pause)
        self.bus.remove('mycroft.audio.service.resume', self._resume)
        self.bus.remove('mycroft.audio.service.stop', self._stop)
        self.bus.remove('mycroft.audio.service.next', self._next)
        self.bus.remove('mycroft.audio.service.prev', self._prev)
        self.bus.remove('mycroft.audio.service.track_info', self._track_info)
        self.bus.remove('mycroft.audio.service.seek_forward',
                        self._seek_forward)
        self.bus.remove('mycroft.audio.service.seek_backward',
                        self._seek_backward)
        self.bus.remove('recognizer_loop:audio_output_start',
                        self._lower_volume)
        self.bus.remove('recognizer_loop:record_begin', self._lower_volume)
        self.bus.remove('recognizer_loop:audio_output_end',
                        self._restore_volume)
        self.bus.remove('recognizer_loop:record_end', self._restore_volume)
        self.bus.remove('mycroft.stop', self._stop)
예제 #11
0
def mute_and_speak(utterance, ident):
    """
        Mute mic and start speaking the utterance using selected tts backend.

        Args:
            utterance:  The sentence to be spoken
            ident:      Ident tying the utterance to the source query
    """
    global tts_hash

    # update TTS object if configuration has changed
    if tts_hash != hash(str(config.get('tts', ''))):
        global tts
        # Stop tts playback thread
        tts.playback.stop()
        tts.playback.join()
        # Create new tts instance
        tts = TTSFactory.create()
        tts.init(bus)
        tts_hash = hash(str(config.get('tts', '')))

    LOG.info("Speak: " + utterance)
    try:
        tts.execute(utterance, ident)
    except RemoteTTSTimeoutException as e:
        LOG.error(e)
        mimic_fallback_tts(utterance, ident)
    except Exception as e:
        LOG.error('TTS execution failed ({})'.format(repr(e)))
예제 #12
0
 def validate_connection(self):
     try:
         subprocess.call([BIN, '--version'])
     except:
         LOG.info("Failed to find mimic at: " + BIN)
         raise Exception(
             'Mimic was not found. Run install-mimic.sh to install it.')
예제 #13
0
    def _do_net_check(self):
        # TODO: This should live in the derived Enclosure, e.g. Enclosure_Mark1
        LOG.info("Checking internet connection")
        if not connected():  # and self.conn_monitor is None:
            if has_been_paired():
                # TODO: Enclosure/localization
                self.ws.emit(Message("speak", {
                    'utterance': "This unit is not connected to the Internet."
                                 " Either plug in a network cable or hold the "
                                 "button on top for two seconds, then select "
                                 "wifi from the menu"
                }))
            else:
                # Begin the unit startup process, this is the first time it
                # is being run with factory defaults.

                # TODO: This logic should be in Enclosure_Mark1
                # TODO: Enclosure/localization

                # Don't listen to mic during this out-of-box experience
                self.ws.emit(Message("mycroft.mic.mute"))
                # Setup handler to unmute mic at the end of on boarding
                # i.e. after pairing is complete
                self.ws.once('mycroft.paired', self._handle_pairing_complete)

                # Kick off wifi-setup automatically
                self.ws.emit(Message("mycroft.wifi.start",
                                     {'msg': "Hello I am Mycroft, your new "
                                      "assistant.  To assist you I need to be "
                                      "connected to the internet.  You can "
                                      "either plug me in with a network cable,"
                                      " or use wifi.  To setup wifi ",
                                      'allow_timeout': False}))
예제 #14
0
    def _play(self, message=None):
        """ Implementation specific async method to handle playback.
            This allows mpg123 service to use the "next method as well
            as basic play/stop.
        """
        LOG.info('Mpg123Service._play')
        self._is_playing = True
        track = self.tracks[self.index]

        # Replace file:// uri's with normal paths
        track = track.replace('file://', '')

        self.process = subprocess.Popen(['mpg123', track])
        # Wait for completion or stop request
        while self.process.poll() is None and not self._stop_signal:
            sleep(0.25)

        if self._stop_signal:
            self.process.terminate()
            self.process = None
            self._is_playing = False

            return
        self.index += 1
        # if there are more tracks available play next
        if self.index < len(self.tracks):
            self.emitter.emit(Message('Mpg123ServicePlay'))
        else:
            self._is_playing = False
예제 #15
0
 def during_download(self, first_run=False):
     LOG.info('Still downloading executable...')
     if first_run:  # TODO: Localize
         self._snd_msg('mouth.text=Updating listener...')
     if not self.download_complete:
         self.show_download_progress = Timer(30, self.during_download)
         self.show_download_progress.start()
예제 #16
0
 def create_hotword(hotword="hey mycroft", config=None, lang="en-us"):
     LOG.info("creating " + hotword)
     if not config:
         config = ConfigurationManager.get().get("hotwords", {})
     module = config.get(hotword).get("module", "pocketsphinx")
     config = config.get(hotword, {"module": module})
     clazz = HotWordFactory.CLASSES.get(module)
     return clazz(hotword, config, lang=lang)
예제 #17
0
def is_speaking():
    """Determine if Text to Speech is occurring

    Returns:
        bool: True while still speaking
    """
    LOG.info("mycroft.utils.is_speaking() is depreciated, use "
             "mycroft.audio.is_speaking() instead.")
    return mycroft.audio.is_speaking()
예제 #18
0
 def read(self):
     while self.alive:
         try:
             data = self.serial.readline()[:-2]
             if data:
                 self.process(data)
                 LOG.info("Reading: " + data)
         except Exception as e:
             LOG.error("Reading error: {0}".format(e))
예제 #19
0
    def listen(self, source, emitter):
        """Listens for chunks of audio that Mycroft should perform STT on.

        This will listen continuously for a wake-up-word, then return the
        audio chunk containing the spoken phrase that comes immediately
        afterwards.

        Args:
            source (AudioSource):  Source producing the audio chunks
            emitter (EventEmitter): Emitter for notifications of when recording
                                    begins and ends.

        Returns:
            AudioData: audio with the user's utterance, minus the wake-up-word
        """
        assert isinstance(source, AudioSource), "Source must be an AudioSource"

        #        bytes_per_sec = source.SAMPLE_RATE * source.SAMPLE_WIDTH
        sec_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE

        # Every time a new 'listen()' request begins, reset the threshold
        # used for silence detection.  This is as good of a reset point as
        # any, as we expect the user and Mycroft to not be talking.
        # NOTE: adjust_for_ambient_noise() doc claims it will stop early if
        #       speech is detected, but there is no code to actually do that.
        self.adjust_for_ambient_noise(source, 1.0)

        LOG.debug("Waiting for wake word...")
        self._wait_until_wake_word(source, sec_per_buffer)
        if self._stop_signaled:
            return

        LOG.debug("Recording...")
        emitter.emit("recognizer_loop:record_begin")

        # If enabled, play a wave file with a short sound to audibly
        # indicate recording has begun.
        if self.config.get('confirm_listening'):
            audio_file = resolve_resource_file(
                self.config.get('sounds').get('start_listening'))
            if audio_file:
                source.mute()
                play_wav(audio_file).wait()
                source.unmute()

        frame_data = self._record_phrase(source, sec_per_buffer)
        audio_data = self._create_audio_data(frame_data, source)
        emitter.emit("recognizer_loop:record_end")
        if self.save_utterances:
            LOG.info("Recording utterance")
            stamp = str(datetime.datetime.now())
            filename = "/tmp/mycroft_utterance%s.wav" % stamp
            with open(filename, 'wb') as filea:
                filea.write(audio_data.get_wav_data())
            LOG.debug("Thinking...")

        return audio_data
예제 #20
0
 def flush(self):
     while self.alive:
         try:
             cmd = self.commands.get()
             self.serial.write(cmd + '\n')
             LOG.info("Writing: " + cmd)
             self.commands.task_done()
         except Exception as e:
             LOG.error("Writing error: {0}".format(e))
예제 #21
0
def wait_while_speaking():
    """Pause as long as Text to Speech is still happening

    Pause while Text to Speech is still happening.  This always pauses
    briefly to ensure that any preceeding request to speak has time to
    begin.
    """
    LOG.info("mycroft.utils.wait_while_speaking() is depreciated, use "
             "mycroft.audio.wait_while_speaking() instead.")
    return mycroft.audio.wait_while_speaking()
예제 #22
0
 def _delete_old_meta(self):
     """" Deletes the old meta data """
     if self._uuid_exist():
         try:
             LOG.info("a uuid exist for {}".format(self.name) +
                      " deleting old one")
             old_uuid = self._load_uuid()
             self._delete_metatdata(old_uuid)
         except Exception as e:
             LOG.info(e)
예제 #23
0
    def _play(self, message):
        """ Implementation specific async method to handle playback.
            This allows mpg123 service to use the "next method as well
            as basic play/stop.
        """
        LOG.info('SimpleAudioService._play')
        repeat = message.data.get('repeat', False)
        self._is_playing = True
        if isinstance(self.tracks[self.index], list):
            track = self.tracks[self.index][0]
            mime = self.tracks[self.index][1]
            mime = mime.split('/')
        else:  # Assume string
            track = self.tracks[self.index]
            mime = find_mime(track)
        # Indicate to audio service which track is being played
        if self._track_start_callback:
            self._track_start_callback(track)

        # Replace file:// uri's with normal paths
        track = track.replace('file://', '')
        try:
            if 'mpeg' in mime[1]:
                self.process = play_mp3(track)
            elif 'ogg' in mime[1]:
                self.process = play_ogg(track)
            elif 'wav' in mime[1]:
                self.process = play_wav(track)
            else:
                # If no mime info could be determined guess mp3
                self.process = play_mp3(track)
        except FileNotFoundError as e:
            LOG.error('Couldn\'t play audio, {}'.format(repr(e)))
            self.process = None

        # Wait for completion or stop request
        while (self.process and self.process.poll() is None and
                not self._stop_signal):
            sleep(0.25)

        if self._stop_signal:
            self.process.terminate()
            self.process = None
            self._is_playing = False
            return

        self.index += 1
        # if there are more tracks available play next
        if self.index < len(self.tracks) or repeat:
            if self.index >= len(self.tracks):
                self.index = 0
            self.bus.emit(Message('SimpleAudioServicePlay',
                                  {'repeat': repeat}))
        else:
            self._is_playing = False
예제 #24
0
def autodetect(config, bus):
    """
        Autodetect chromecasts on the network and create backends for each
    """
    casts = pychromecast.get_chromecasts(timeout=5, tries=2, retry_wait=2)
    ret = []
    for c in casts:
        LOG.info(c.name + " found.")
        ret.append(ChromecastService(config, bus, c.name.lower(), c))

    return ret
예제 #25
0
def autodetect(config, emitter):
    """
        Autodetect chromecasts on the network and create backends for each
    """
    casts = pychromecast.get_chromecasts()
    ret = []
    for c in casts:
        LOG.info(c.name + " found.")
        ret.append(ChromecastService(config, emitter, c.name.lower(), c))

    return ret
예제 #26
0
 def on_download(self):
     LOG.info('Downloading Precise executable...')
     if isdir(join(self.folder, 'precise-stream')):
         rmtree(join(self.folder, 'precise-stream'))
     for old_package in glob(join(self.folder, 'precise-engine_*.tar.gz')):
         os.remove(old_package)
     self.download_complete = False
     self.show_download_progress = Timer(
         5, self.during_download, args=[True]
     )
     self.show_download_progress.start()
예제 #27
0
 def __init_serial(self):
     try:
         self.port = self.config.get("port")
         self.rate = self.config.get("rate")
         self.timeout = self.config.get("timeout")
         self.serial = serial.serial_for_url(
             url=self.port, baudrate=self.rate, timeout=self.timeout)
         LOG.info("Connected to: %s rate: %s timeout: %s" %
                  (self.port, self.rate, self.timeout))
     except:
         LOG.error("Impossible to connect to serial port: "+str(self.port))
         raise
예제 #28
0
    def _save_hash(self, hashed_meta):
        """ Saves hashed_meta to settings directory.

            Args:
                hashed_meta (int): hash of new settingsmeta
        """
        LOG.info("saving hash {}".format(str(hashed_meta)))
        directory = self.config.get("skills")["directory"]
        directory = join(directory, self.name)
        directory = expanduser(directory)
        hash_file = join(directory, 'hash')
        with open(hash_file, 'w') as f:
            f.write(str(hashed_meta))
예제 #29
0
def _stop(message=None):
    """
        Handler for mycroft.stop. Stops any playing service.

        Args:
            message: message bus message, not used but required
    """
    global current
    LOG.info('stopping all playing services')
    if current:
        current.stop()
        current = None
    LOG.info('Stopped')
예제 #30
0
 def deactivate_except(self, message):
     """ Deactivate all skills except the provided. """
     try:
         skill_to_keep = message.data['skill']
         LOG.info('DEACTIVATING ALL SKILLS EXCEPT {}'.format(skill_to_keep))
         if skill_to_keep in [basename(i) for i in self.loaded_skills]:
             for skill in self.loaded_skills:
                 if basename(skill) != skill_to_keep:
                     self.__deactivate_skill(skill)
         else:
             LOG.info('Couldn\'t find skill')
     except Exception as e:
         LOG.error('Error during skill removal, {}'.format(repr(e)))
예제 #31
0
 def on_open(self):
     LOG.info("Connected")
     self.connected_event.set()
     self.emitter.emit("open")
     # Restore reconnect timer to 5 seconds on sucessful connect
     self.retry = 5
예제 #32
0
 def play(self):
     LOG.info('Call Mpg123ServicePlay')
     self.index = 0
     self.emitter.emit(Message('Mpg123ServicePlay'))
예제 #33
0
 def add_list(self, tracks):
     self.tracks += tracks
     LOG.info("Track list is " + str(tracks))
예제 #34
0
def on_stopping():
    LOG.info('Enclosure is shutting down...')
예제 #35
0
 def stop(self):
     super(FaceRecognizerSkill, self).shutdown()
     LOG.info("Face Recognizer Skill CLOSED")
     if self.socket:
         self.socket.close()
예제 #36
0
    def handle_set_thermostat_intent(self, message):
        self._setup()
        if self.fhem is None:
            self.speak_dialog('fhem.error.setup')
            return
        LOG.debug("Starting Thermostat Intent")

        if message.data.get("device"):
            device = message.data.get("device")
        else:
            device = "thermostat"
        if message.data.get("room"):
            room = message.data.get("room")
        else:
            room = self.device_location

        LOG.debug("Device: %s" % device)
        LOG.debug("This is the message data: %s" % message.data)
        temperature = message.data["temp"]
        LOG.debug("desired temperature from message: %s" % temperature)

        allowed_types = 'thermostat'
        try:
            fhem_device = self._find_device(device, allowed_types, room)
        except ConnectionError:
            self.speak_dialog('fhem.error.offline')
            return
        if fhem_device is None:
            self.speak_dialog('fhem.device.unknown', data={"dev_name": device})
            return
        LOG.debug("Entity State: %s" % fhem_device['state'])

        device_id = fhem_device['id']
        target_device = device_id

        # defaults for min/max temp and step
        minValue = 5.0
        maxValue = 35.0
        minStep = 0.5
        unit = ""
        cmd = ""

        # check thermostat type, derive command and min/max values
        LOG.debug("fhem_device: %s" % fhem_device)
        # for that get thermostat device
        td = self.fhem.get_device(fhem_device['id'])
        if len(td) != 1:
            self.speak_dialog('fhem.device.unknown', data={"dev_name": device})
            return
        td = td[0]
        LOG.debug("td: %s" % td)
        if 'desired-temp' in td['Readings']:
            cmd = "desired-temp"
            if ('FBTYPE' in td['Readings']) and \
               (td['Readings']['FBTYPE'] == 'Comet DECT'):
                # LOG.debug("Comet DECT")
                minValue = 8.0
                maxValue = 28.0
            elif td['Internals']['TYPE'] == 'FHT':
                # LOG.debug("FHT")
                minValue = 6.0
                maxValue = 30.0
            elif td['Internals']['TYPE'] == 'CUL_HM':
                LOG.debug("HM")
                # test for Clima-Subdevice
                if 'channel_04' in td['Internals']:
                    target_device = td['Internals']['channel_04']
        elif 'desiredTemperature' in td['Readings']:
            # LOG.debug("MAX")
            cmd = "desiredTemperature"
            minValue = 4.5
            maxValue = 30.5
        elif 'desired' in td['Readings']:
            # LOG.debug("PID20")
            cmd = "desired"
        elif 'homebridgeMapping' in td['Attributes']:
            LOG.debug("homebridgeMapping")
            hbm = td['Attributes']['homebridgeMapping'].split(" ")
            for h in hbm:
                # TargetTemperature=desired-temp::desired-temp,
                # minValue=5,maxValue=35,minStep=0.5,nocache=1
                if h.startswith("TargetTemperature"):
                    targettemp = (h.split("=", 1)[1]).split(",")
                    LOG.debug("targettemp = %s" % targettemp)
                    for t in targettemp:
                        LOG.debug("t = %s" % t)
                        if t.startswith("desired-temp"):
                            t2 = t.split(":")
                            cmd = t2[0]
                            if t2[1] != '':
                                target_device = t2[1]
                        elif t.startswith("minValue"):
                            minValue = float(t.split("=")[1])
                        elif t.startswith("maxValue"):
                            maxValue = float(t.split("=")[1])
                        elif t.startswith("minStep"):
                            minStep = float(t.split("=")[1])

        if not cmd:
            LOG.info("FHEM device %s has unknown thermostat type" % device_id)
            self.speak_dialog('fhem.error.notsupported')
            return

        LOG.debug("target_device: %s cmd: %s" % (target_device, cmd))
        LOG.debug("minValue: %s maxValue: %s minStep: %s" %
                  (minValue, maxValue, minStep))

        # check if desired temperature is out of bounds
        if (float(temperature) < minValue) or (float(temperature) > maxValue) \
           or (float(temperature) % minStep != 0.0):
            self.speak_dialog('fhem.thermostat.badreq',
                              data={
                                  "minValue": minValue,
                                  "maxValue": maxValue,
                                  "minStep": minStep
                              })
            return

        action = "%s %s" % (cmd, temperature)
        LOG.debug("set %s %s" % (target_device, action))
        self.fhem.send_cmd("set {} {}".format(target_device, action))
        self.speak_dialog('fhem.set.thermostat',
                          data={
                              "dev_name": device,
                              "value": temperature,
                              "unit": unit
                          })
예제 #37
0
 def handle_context_intent(self):
     if self.debug_mode:
         LOG.info("Kodi response: " + str(self.myKodi.Input.ContextMenu()))
     else:
         self.myKodi.Input.ContextMenu()  
예제 #38
0
 def on_complete(self):
     LOG.info('Precise download complete!')
     self.download_complete = True
     self.show_download_progress.cancel()
     self._snd_msg('mouth.reset')
예제 #39
0
    def download_skills(self, speak=False):
        """ Invoke MSM to install default skills and/or update installed skills

            Args:
                speak (bool, optional): Speak the result? Defaults to False
        """
        if not connected():
            LOG.error('msm failed, network connection not available')
            if speak:
                self.bus.emit(
                    Message("speak", {
                        'utterance':
                        dialog.get("not connected to the internet")
                    }))
            self.next_download = time.time() + 5 * MINUTES
            return False

        installed_skills = self.load_installed_skills()
        msm = SkillManager.create_msm()
        with msm.lock, self.thread_lock:
            default_groups = dict(msm.repo.get_default_skill_names())
            if msm.platform in default_groups:
                platform_groups = default_groups[msm.platform]
            else:
                LOG.info('Platform defaults not found, using DEFAULT '
                         'skills only')
                platform_groups = []
            default_names = set(
                chain(default_groups['default'], platform_groups))
            default_skill_errored = False

            def get_skill_data(skill_name):
                """ Get skill data structure from name. """
                for e in msm.skills_data.get('skills', []):
                    if e.get('name') == skill_name:
                        return e
                # if skill isn't in the list return empty structure
                return {}

            def install_or_update(skill):
                """Install missing defaults and update existing skills"""
                if get_skill_data(skill.name).get('beta'):
                    skill.sha = None  # Will update to latest head
                if skill.is_local:
                    skill.update()
                    if skill.name not in installed_skills:
                        skill.update_deps()
                elif skill.name in default_names:
                    try:
                        msm.install(skill, origin='default')
                    except Exception:
                        if skill.name in default_names:
                            LOG.warning('Failed to install default skill: ' +
                                        skill.name)
                            nonlocal default_skill_errored
                            default_skill_errored = True
                        raise
                installed_skills.add(skill.name)

            try:
                msm.apply(install_or_update, msm.list())
                if SkillManager.manifest_upload_allowed and is_paired():
                    try:
                        DeviceApi().upload_skills_data(msm.skills_data)
                    except Exception:
                        LOG.exception('Could not upload skill manifest')

            except MsmException as e:
                LOG.error('Failed to update skills: {}'.format(repr(e)))

        self.save_installed_skills(installed_skills)

        if speak:
            data = {'utterance': dialog.get("skills updated")}
            self.bus.emit(Message("speak", data))

        if default_skill_errored and self.num_install_retries < 10:
            self.num_install_retries += 1
            self.next_download = time.time() + 5 * MINUTES
            return False
        self.num_install_retries = 0

        with open(self.dot_msm, 'a'):
            os.utime(self.dot_msm, None)
        self.next_download = time.time() + self.update_interval

        return True
예제 #40
0
 def write_plc(self, myTagName, myTagValue):
     LOG.info('Writing: ' + myTagName + ' A value of: ' + str(myTagValue))
     self.comm.Write(myTagName, myTagValue)
     self.comm.Close()
예제 #41
0
    def process(self, data):
        # TODO: Look into removing this emit altogether.
        # We need to check if any other serial bus messages
        # are handled by other parts of the code
        if "mycroft.stop" not in data:
            self.ws.emit(Message(data))

        if "Command: system.version" in data:
            # This happens in response to the "system.version" message
            # sent during the construction of Enclosure()
            self.ws.emit(Message("enclosure.started"))

        if "mycroft.stop" in data:
            if has_been_paired():
                create_signal('buttonPress')
                self.ws.emit(Message("mycroft.stop"))

        if "volume.up" in data:
            self.ws.emit(
                Message("mycroft.volume.increase", {'play_sound': True}))

        if "volume.down" in data:
            self.ws.emit(
                Message("mycroft.volume.decrease", {'play_sound': True}))

        if "system.test.begin" in data:
            self.ws.emit(Message('recognizer_loop:sleep'))

        if "system.test.end" in data:
            self.ws.emit(Message('recognizer_loop:wake_up'))

        if "mic.test" in data:
            mixer = Mixer()
            prev_vol = mixer.getvolume()[0]
            mixer.setvolume(35)
            self.ws.emit(
                Message("speak", {'utterance': "I am testing one two three"}))

            time.sleep(0.5)  # Prevents recording the loud button press
            record("/tmp/test.wav", 3.0)
            mixer.setvolume(prev_vol)
            play_wav("/tmp/test.wav").communicate()

            # Test audio muting on arduino
            subprocess.call('speaker-test -P 10 -l 0 -s 1', shell=True)

        if "unit.shutdown" in data:
            # Eyes to soft gray on shutdown
            self.ws.emit(
                Message("enclosure.eyes.color", {
                    'r': 70,
                    'g': 65,
                    'b': 69
                }))
            self.ws.emit(Message("enclosure.eyes.timedspin",
                                 {'length': 12000}))
            self.ws.emit(Message("enclosure.mouth.reset"))
            time.sleep(0.5)  # give the system time to pass the message
            self.ws.emit(Message("system.shutdown"))

        if "unit.reboot" in data:
            # Eyes to soft gray on reboot
            self.ws.emit(
                Message("enclosure.eyes.color", {
                    'r': 70,
                    'g': 65,
                    'b': 69
                }))
            self.ws.emit(Message("enclosure.eyes.spin"))
            self.ws.emit(Message("enclosure.mouth.reset"))
            time.sleep(0.5)  # give the system time to pass the message
            self.ws.emit(Message("system.reboot"))

        if "unit.setwifi" in data:
            self.ws.emit(Message("system.wifi.setup", {'lang': self.lang}))

        if "unit.factory-reset" in data:
            self.ws.emit(
                Message("speak", {
                    'utterance':
                    mycroft.dialog.get("reset to factory defaults")
                }))
            subprocess.call('rm ~/.mycroft/identity/identity2.json',
                            shell=True)
            self.ws.emit(Message("system.wifi.reset"))
            self.ws.emit(Message("system.ssh.disable"))
            wait_while_speaking()
            self.ws.emit(Message("enclosure.mouth.reset"))
            self.ws.emit(Message("enclosure.eyes.spin"))
            self.ws.emit(Message("enclosure.mouth.reset"))
            time.sleep(5)  # give the system time to process all messages
            self.ws.emit(Message("system.reboot"))

        if "unit.enable-ssh" in data:
            # This is handled by the wifi client
            self.ws.emit(Message("system.ssh.enable"))
            self.ws.emit(
                Message("speak",
                        {'utterance': mycroft.dialog.get("ssh enabled")}))

        if "unit.disable-ssh" in data:
            # This is handled by the wifi client
            self.ws.emit(Message("system.ssh.disable"))
            self.ws.emit(
                Message("speak",
                        {'utterance': mycroft.dialog.get("ssh disabled")}))

        if "unit.enable-learning" in data or "unit.disable-learning" in data:
            enable = 'enable' in data
            word = 'enabled' if enable else 'disabled'

            LOG.info("Setting opt_in to: " + word)
            new_config = {'opt_in': enable}
            user_config = LocalConf(USER_CONFIG)
            user_config.merge(new_config)
            user_config.store()

            self.ws.emit(
                Message("speak",
                        {'utterance': mycroft.dialog.get("learning " + word)}))
예제 #42
0
 def handle_scanaudio_intent(self):
     if self.debug_mode:
         LOG.info("Kodi response: " + str(self.myKodi.AudioLibrary.Scan()))
     else:
         self.myKodi.AudioLibrary.Scan()   
예제 #43
0
 def open(self):
     LOG.info('Client IP: ' + self.request.remote_ip)
     self.peer = self.request.remote_ip
     clients[self.peer] = self
     self.create_internal_emitter()
     self.write_message("Welcome to Jarbas Web Client")
예제 #44
0
 def create_wakeup_recognizer(self):
     LOG.info("creating stand up word engine")
     word = self.config.get("stand_up_word", "wake up")
     return HotWordFactory.create_hotword(word, lang=self.lang, loop=self)
예제 #45
0
def handle_record_begin():
    LOG.info("Begin Recording...")
    ws.emit(Message('recognizer_loop:record_begin'))
예제 #46
0
 def lower_volume(self, message):
     LOG.info('lowering volume')
     self.server.setvol(10)
     self.volume_is_low = True
예제 #47
0
 def handle_info_intent(self):
     if self.debug_mode:
         LOG.info("Kodi response: " + str(self.myKodi.Input.Info()))
     else:
         self.myKodi.Input.Info()  
예제 #48
0
 def __init__(self):
     super(FhemSkill, self).__init__(name="FhemSkill")
     LOG.info("__init__")
     self.fhem = None
     self.enable_fallback = False
     self.device_location = ""
예제 #49
0
def run_apriori(logs_file_path, min_supp=0.05, min_confidence=0.8):
    hashes_temp = []
    table_csv = []
    csv_path = os.path.expanduser(
        "~/.mycroft/skills/HabitMinerSkill/inputApriori.csv")
    date_time_obj0 = datetime.strptime('2018-01-01 00:00:00.0',
                                       '%Y-%m-%d %H:%M:%S.%f')
    habit_manager = HabitsManager()
    habit_manager.load_files()

    if not os.path.getsize(logs_file_path):
        return
    """
    Open logs and put them in a list,
    same line if consequent logs are within 5 minutes interval
    """
    with open(logs_file_path) as json_data:
        for i, line in enumerate(json_data):
            data = json.loads(line)
            date_time_obj1 = datetime.strptime(data['datetime'],
                                               '%Y-%m-%d %H:%M:%S.%f')
            delta = date_time_obj1 - date_time_obj0
            del data['datetime']
            hash = hashlib.md5(json.dumps(data)).hexdigest()

            if delta > timedelta(minutes=5):
                table_csv.append(hashes_temp)
                hashes_temp = []
                hashes_temp.append(hash)
                date_time_obj0 = date_time_obj1
            else:
                hashes_temp.append(hash)

    del table_csv[0]
    """
    Converts logs list to csv so that
    we can executre apriori algorithm on them
    """
    with open(csv_path, 'w+') as fp:
        LOG.info('opened')
        writer = csv.writer(fp, delimiter=',')
        for row in table_csv:
            writer.writerow(row)

    in_file = data_from_file(csv_path)

    _, rules = apriori(in_file, min_supp, min_confidence)

    # reformat the rules and sort the tuples in it
    hashes_temp = []

    for rule in rules:
        hash = rule[0][0] + rule[0][1]
        hashes_temp.append(sorted(hash))

    # this is to remove duplicates
    hashes = []

    for tuple in hashes_temp:
        if tuple not in hashes:
            hashes.append(tuple)

    # Hash to json data
    habits = []
    habit = []
    intents = []

    for hash in hashes:
        for intent in hash:
            with open(logs_file_path) as json_data:
                for line in json_data:
                    data = json.loads(line)
                    del data['datetime']
                    hash = hashlib.md5(json.dumps(data)).hexdigest()
                    if hash == intent:
                        habit.append(json.dumps(data))
                        intents.append(data)
                        break
        if not habit_manager.check_skill_habit(intents):
            habits.append(habit)
        habit = []

    # format habits as expected and register them
    intents = []

    for habit in habits:
        for intent in habit:
            intent = {
                'last_utterance': json.loads(intent)['utterance'],
                'name': json.loads(intent)['type'],
                'parameters': json.loads(intent)['parameters']
            }
            intents.append(intent)
        habit_manager.register_habit("skill", intents)
        intents = []

    os.remove(csv_path)
예제 #50
0
파일: base.py 프로젝트: wummel/mycroft-core
 def send_message(self, message):
     if isinstance(message, Message):
         self.write_message(message.serialize())
     else:
         LOG.info('message: {}'.format(message))
         self.write_message(str(message))
예제 #51
0
 def stop(self, message=None):
     LOG.info('Handling stop request')
     self.server.clear()
     self.server.stop()
예제 #52
0
def on_ready():
    LOG.info("Enclosure started!")
예제 #53
0
 def load(self):
     LOG.info('ATTEMPTING TO LOAD SKILL: ' + self.skill_id)
     self._load()
예제 #54
0
    def handle_blind_intent(self, message):
        self._setup()
        if self.fhem is None:
            self.speak_dialog('fhem.error.setup')
            return
        LOG.info("Starting Blind Intent")
        LOG.info("message.data {}".format(message.data))

        device = message.data.get("device")
        action = ""
        percent = None
        if message.data.get("open"):
            action = "open"
            speak_action = message.data.get("open")
            target_pct = 0
        elif message.data.get("close"):
            action = "closed"  # sic!
            speak_action = message.data.get("close")
            target_pct = 100
        elif message.data.get("percent"):
            percent = message.data.get("percent")
            if percent.isdigit():
                action = "pct"
                target_pct = int(percent)

        if not action:
            LOG.info("no action for blind intent found!")
            return False
        if message.data.get("room"):
            room = message.data.get("room")
        else:
            # if no room is given use device location
            room = self.device_location
        allowed_types = 'blind'
        LOG.info("Device: %s" % device)
        LOG.info("Action: %s" % action)
        LOG.info("Room: %s" % room)
        if percent:
            LOG.info("Percent: %s" % percent)
        try:
            fhem_device = self._find_device(device, allowed_types, room)
        except ConnectionError:
            self.speak_dialog('fhem.error.offline')
            return
        if fhem_device is None:
            self.speak_dialog('fhem.device.unknown', data={"dev_name": device})
            return
        LOG.info("Entity State: %s" % fhem_device['state'])

        open_pct = 0
        closed_pct = 100

        blind = self.fhem.get_device(fhem_device['id'])[0]
        if blind['Internals']['TYPE'] == 'ROLLO':
            if action == "pct":
                self.fhem.send_cmd("set {} pct {}".format(
                    fhem_device['id'], target_pct))
                self.speak_dialog('fhem.blind.set',
                                  data={
                                      "device": device,
                                      "percent": target_pct
                                  })
            else:
                self.fhem.send_cmd("set {} {}".format(fhem_device['id'],
                                                      action))
                self.speak_dialog('fhem.blind',
                                  data={
                                      "device": device,
                                      "action": speak_action,
                                      "room": room
                                  })
        else:
            self.speak_dialog('fhem.error.notsupported')
            return
예제 #55
0
 def _skip_load(self):
     log_msg = 'Skill {} is blacklisted - it will not be loaded'
     LOG.info(log_msg.format(self.skill_id))
예제 #56
0
 def reload(self):
     LOG.info('ATTEMPTING TO RELOAD SKILL: ' + self.skill_id)
     if self.instance:
         self._unload()
     self._load()
예제 #57
0
 def handle_set_stack_light_intent(self, message):
     LOG.info('Condor.ai was asked: ' + message.data.get('utterance'))
     color_kw = message.data.get("ColorKeyword")
     self.speak_dialog("set_stacklight",
                       data={"result": str(color_kw)},
                       wait=True)
예제 #58
0
파일: ws.py 프로젝트: kenstars/Jarvis-Basic
 def echo(message):
     LOG.info(message)
예제 #59
0
 def handle_campus_intent(self, message):
     LOG.info('Condor.ai was asked: ' + message.data.get('utterance'))
     str_remainder = str(message.utterance_remainder())
     self.speak_dialog("campus_intro", wait=True)
     self.speak_dialog("campus", wait=True)
     self.card_conversation()
예제 #60
0
 def handle_robot_start_intent(self, message):
     LOG.info('Condor.ai was asked: ' + message.data.get('utterance'))
     str_remainder = str(message.utterance_remainder())
     self.start_robot()