Esempio n. 1
0
    def handle_get_stt(self, message: Message):
        """
        Handles a request for stt.
        Emits a response to the sender with stt data or error data
        :param message: Message associated with request
        """
        if message.data.get("audio_data"):
            wav_file_path = self._write_encoded_file(
                message.data.pop("audio_data"))
        else:
            wav_file_path = message.data.get("audio_file")
        lang = message.data.get("lang")
        ident = message.context.get("ident") or "neon.get_stt.response"
        LOG.info(f"Handling STT request: {ident}")
        if not wav_file_path:
            self.bus.emit(message.reply(
                ident, data={"error": f"audio_file not specified!"}))
            return

        if not os.path.isfile(wav_file_path):
            self.bus.emit(message.reply(
                ident, data={"error": f"{wav_file_path} Not found!"}))

        try:
            _, parser_data, transcriptions = \
                self._get_stt_from_file(wav_file_path, lang)
            self.bus.emit(message.reply(ident,
                                        data={"parser_data": parser_data,
                                              "transcripts": transcriptions}))
        except Exception as e:
            LOG.error(e)
            self.bus.emit(message.reply(ident, data={"error": repr(e)}))
def walkOnNetwork(selfGraph, startWord, lengthWalk):
    """
         Do a walk on the selfGraph, starting from a concept, during lengthWalk steps, or unless end up at a deadend of the graph
    """
    nStep = 0
    deadEnd = False
    word = startWord
    while nStep < lengthWalk and not deadEnd:
        if len(
                list(selfGraph[word][1].keys())
        ) > 0:  #reminder: selfGraph[word][1] is a dictionnary of the neighbors.
            nextWord = random.choice(selfGraph[word][1].keys())
            statement = "Path  " + str(
                round(selfGraph[word][1][nextWord],
                      2)) + " towards " + nextWord + ", " + str(
                          round(selfGraph[nextWord][0], 3)) + ". "
            print(statement)
            client.emit(Message('speak', data={'utterance':
                                               statement}))  #or all answer ?
            word = nextWord
            nStep += 1
        else:
            deadEnd = True
            client.emit(
                Message('speak',
                        data={'utterance': "It is a dead end of myself."
                              }))  #or all answer ?
    if not deadEnd:
        client.emit(Message('speak',
                            data={'utterance':
                                  "Walk ended here."}))  #or all answer ?
Esempio n. 3
0
    def on_no_internet(self, event=None):
        if connected():
            # One last check to see if connection was established
            return

        if time.time() - Enclosure._last_internet_notification < 30:
            # don't bother the user with multiple notifications with 30 secs
            return

        Enclosure._last_internet_notification = time.time()

        # TODO: This should go into EnclosureMark1 subclass of Enclosure.
        if has_been_paired():
            # Handle the translation within that code.
            self.bus.emit(
                Message(
                    "speak", {
                        'utterance':
                        "This device is not connected to the Internet. "
                        "Either plug in a network cable or set up your "
                        "wifi connection."
                    }))
        else:
            # enter wifi-setup mode automatically
            self.bus.emit(Message('system.wifi.setup', {'lang': self.lang}))
Esempio n. 4
0
    def on_manage_skills_pressed(self):
        """ Adds the checked skills to self.active_skills and the unchecked to
            self.unactive_skills and activates or deactivates those skills.
        """
        deactivated = []
        activated = []
        for cb in self.active_skills_checkBoxes:
            if not cb.isChecked():
                self.bus.emit(
                    Message('skillmanager.deactivate', {'skill': cb.text()}))
                deactivated.append(cb.text())

        for cb in self.unactive_skills_checkBoxes:
            if cb.isChecked():
                self.bus.emit(
                    Message('skillmanager.activate', {'skill': cb.text()}))
                activated.append(cb.text())

        self.active_skills = [
            skill for skill in self.active_skills if skill not in deactivated
        ]
        self.active_skills.extend(activated)

        self.unactive_skills = [
            skill for skill in self.unactive_skills if skill not in activated
        ]
        self.unactive_skills.extend(deactivated)

        self.skills_dialog.hide()
        self.on_skills_pressed()
Esempio n. 5
0
    def _do_net_check(self):
        # TODO: This should live in the derived Enclosure, e.g. EnclosureMark1
        LOG.info("Checking internet connection")
        if not connected():  # and self.conn_monitor is None:
            if has_been_paired():
                # TODO: Enclosure/localization
                self.speak("This unit is not connected to the Internet. "
                           "Either plug in a network cable or setup your "
                           "wifi connection.")
            else:
                # Begin the unit startup process, this is the first time it
                # is being run with factory defaults.

                # TODO: This logic should be in EnclosureMark1
                # TODO: Enclosure/localization

                # Don't listen to mic during this out-of-box experience
                self.bus.emit(Message("mycroft.mic.mute"))
                # Setup handler to unmute mic at the end of on boarding
                # i.e. after pairing is complete
                self.bus.once('mycroft.paired', self._handle_pairing_complete)

                self.speak(mycroft.dialog.get('mycroft.intro'))
                wait_while_speaking()
                time.sleep(2)  # a pause sounds better than just jumping in

                # Kick off wifi-setup automatically
                data = {'allow_timeout': False, 'lang': self.lang}
                self.bus.emit(Message('system.wifi.setup', data))
def loadSelf(firstTime, audibleSelfQuest, nSearch):
    """
        The VA loads his selfGraph and memory as last saved. Or build it if first time.
    """
    if firstTime:
        whatVAHeard = ""
        phrase = "Hatching self in process..."
        print(phrase)
        if audibleSelfQuest:
            client.emit(Message('speak', data={'utterance': phrase}))
        selfGraph, wordsMemory, description = coreQuest.hatchSelf(nSearch)
        print(description)
        if audibleSelfQuest:
            client.emit(Message('speak', data={'utterance': description}))

    else:
        with open('./workshop/data/selfgraph.txt', 'r') as json_file:
            #selfGraph = json.load(json_file)
            selfGraph = eval(json_file.read())  # which one RIGHT ?
        with open('./workshop/data/wordsMemory.txt', "r") as f:
            wordsMemory = f.readlines()  #List of words concepts he looked up
        with open('./workshop/data/whatVAHeard.txt',
                  "r") as f:  #Last historics before Chris learned
            whatVAHeard = f.read().replace('\n', '')
        phrase = "I am here. Self Quest can begin."
        print(phrase)
        if audibleSelfQuest:
            client.emit(Message('speak', data={'utterance': phrase}))

    return selfGraph, wordsMemory, whatVAHeard
def selfMapping(word, selfGraph, wordsMemory, ifMLDrift, lengthML, nSimMax,
                lengthWalk, walkNetwork, audibleSelfQuest):
    """
        Self Mapping where look if a specific word is related to his selfm and possibly grow his selfGraph.
        This can be audible, and integrate or not a ML drift and a walk on the network
    """
    answer = ""
    drift = ""

    ###(1) Ask Chris about a work on wikipedia
    if audibleSelfQuest:
        question = "Tell me about " + word + "."  #Dont need wake up word ?
        phrase = wonder(word)  #Phrase to be heard. Can generate Others>>>
        client.emit(Message('speak', data={'utterance': phrase}))
        print(phrase)
        answer = askVA(question)  #VA answer and read wikipedia page here

    ###(2) Possible ML Drift from the last sentence, case where audibleSelf Quest only.
    if ifMLDrift:
        drift = oneMLDrift(answer, lengthML)

    ##(3) Self Awareness Quest: look if this word is related to Self. May have modified the self graph here!
    print("Looking if {} is related to Self...".format(word))
    selfGraph, ifadded, simWord, simScore = coreQuest.isSelf(
        selfGraph, word, nSimMax)
    nSelf = len(list(selfGraph.keys()))

    #(4) State the progress of his selfAwareness quest, aloud if audible Self quest, else only print
    if ifadded:  #Case where the word was found related to self
        selfAwareness = "Oh, " + word + " is similar to " + simWord + "and hence to me at " + str(
            round(simScore, 2)) + "Now I know more about myself. "
        print(selfAwareness)
    else:  #Case where the word was not found related to self
        selfAwareness = "Whatever, " + word + ", may not be very related to myself. "
        print(selfAwareness)
    if audibleSelfQuest:
        client.emit(Message('speak', data={'utterance': selfAwareness}))

    #(5) Walk on the network if stated and added the word
    if walkNetwork and ifadded:
        walkOnNetwork(selfGraph, word, lengthWalk)

    #(6) Add word looked for Memory. Just beware if memory full.
    if len(wordsMemory
           ) == maxWordsMemory + 1:  #Memory Full, erase the older one.
        if wordsMemory[0] == str(len(wordsMemory) - 1):
            wordsMemory[0] = str(1)
        else:
            wordsMemory[0] = str(int(wordsMemory[0]) + 1)
        wordsMemory[wordsMemory[0]] = word
    else:
        wordsMemory.append(
            word.lower()
        )  #Add word to memory when has wiki checked it. Only lowered words here
        wordsMemory[0] = str(int(wordsMemory[0]) + 1)  #in 0 element keep track

    return selfGraph, wordsMemory, answer, drift, ifadded
def selfQuest(firstTime=False,
              walkNetwork=False,
              audibleSelfQuest=True,
              visualizeGraph=True,
              ifMLDrift=False,
              lengthML=100,
              nSimMax=50,
              nSearch=100,
              lengthWalk=10,
              finetuned_ML_model=False,
              path_finetuned_ML_model='./workshop/models/gpt-2'):  #ifVisualize
    """
         Self Quest with possible ML drits, walks on the network, audible Quest, visualisation of the Graph, etc.
         The VA is aiming at growing his self Concept, from what he has heard (in the file whatVAHeard).
    """

    # Initialize machine learning
    tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
    if finetuned_ML_model:
        model = GPT2LMHeadModel.from_pretrained(path_finetuned_ML_model)
    else:
        model = GPT2LMHeadModel.from_pretrained("gpt2")

    #(0) Load SELF, memory, and what the VA has heard since last time.
    ## SelfGraph is a dictionnary, whose keys are concepts, and values are couple (weight, neighbors).
    selfGraph, wordsMemory, whatVAHeard = loadSelf(firstTime, audibleSelfQuest,
                                                   nSearch)

    #(1) Self Quest, with possible walk on the network, possible ML drifts, and parameters, from the text whatVAHear
    #If not first time (if first time, assume whatVAHear is empty)
    if not firstTime:
        selfGraph, wordsMemory, addedWords, blablaQuest = selfMapLoops(
            whatVAHeard, selfGraph, ifMLDrift, lengthML, nSimMax, wordsMemory,
            nSearch, walkNetwork, lengthWalk, audibleSelfQuest)

    #(2) State new Self
    nN = len(list(selfGraph.keys()))
    phrase = "Self has now " + str(
        nN) + " nodes." + "Self is made of:" + ', '.join(list(
            selfGraph.keys()))
    print(phrase)
    if audibleSelfQuest:
        client.emit(Message('speak', data={'utterance': phrase}))

    #(3) Update the selfGraph and the memory
    with open('./workshop/data/selfgraph.txt', 'w') as outfile:
        json.dump(selfGraph, outfile)

    with open('./workshop/data/wordsMemory.txt', "w") as f:
        f.write("\n".join(wordsMemory))

    #(4) Visualize if want
    if visualizeGraph:
        graph, descriptionSelf = coreQuest.createGraph(selfGraph)
        coreQuest.drawGraph(graph)
        if audibleSelfQuest:  #Description of himself. Could comment out if too annoying.
            client.emit(Message('speak', data={'utterance': descriptionSelf}))
 def test_serialize_deserialize(self):
     """Assert that a serized message is recreated when deserialized."""
     source = Message('test_type',
                      data={
                          'robot': 'marvin',
                          'android': 'data'
                      },
                      context={'origin': 'earth'})
     msg_string = source.serialize()
     reassembled = Message.deserialize(msg_string)
     self.assertEqual(source.msg_type, reassembled.msg_type)
     self.assertEqual(source.data, reassembled.data)
     self.assertEqual(source.context, reassembled.context)
 def test_response(self):
     """Assert that the .response is added to the message type for response.
     """
     source = Message('test_type',
                      data={
                          'robot': 'marvin',
                          'android': 'data'
                      },
                      context={'origin': 'earth'})
     response_msg = source.response()
     self.assertEqual(response_msg.msg_type, "test_type.response")
     self.assertEqual(response_msg.data, {})
     self.assertEqual(response_msg.context, source.context)
Esempio n. 11
0
def reset(gpio, level, tick):
    if(pins.read(24) == 1):
        client.emit(Message('speak', {"utterance": "Shutting Down", "lang": "en-us"}))
        light.pulse(.2, 100, 0, 10)
        time.sleep(3)
        #self.bus.emit(Message('system.shutdown')) #docs say this "force[s] a linux shutdown", but I'll believe it when I see it (it wicked doesn't do that)
        subprocess.Popen(['sudo','shutdown','-h','now'])
    else:
        client.emit(Message('speak', {"utterance": "Restarting", "lang": "en-us"}))
        light.pulse(.2, 100, 0, 6)
        time.sleep(3)
        #self.bus.emit(Message('system.restart')) #same deal here
        subprocess.Popen(['sudo','shutdown','-r','now'])
Esempio n. 12
0
    def handle_radio_talanthus(self, message):
        print('Setting up client to connect to a local mycroft instance')
        client = MessageBusClient()
        client.run_in_thread()

        print('Sending speak message...')
        client.emit(Message('speak', data={'utterance': 'Currently at the top of the bounty board is the calamo five at ten million credits a head'}))
Esempio n. 13
0
 def test_get_stt_no_file(self):
     context = {"client": "tester", "ident": "123", "user": "******"}
     stt_resp = self.bus.wait_for_response(
         Message("neon.get_stt", {}, context), context["ident"])
     self.assertEqual(stt_resp.context, context)
     self.assertIsInstance(stt_resp.data.get("error"), str)
     self.assertEqual(stt_resp.data["error"], "audio_file not specified!")
Esempio n. 14
0
 def test_get_stt_invalid_file_path(self):
     context = {"client": "tester", "ident": "1234", "user": "******"}
     stt_resp = self.bus.wait_for_response(
         Message("neon.get_stt", {"audio_file": "~/invalid_file.wav"},
                 context), context["ident"])
     self.assertEqual(stt_resp.context, context)
     self.assertIsInstance(stt_resp.data.get("error"), str)
Esempio n. 15
0
def play():
    print('play')
#    client.emit(Message('mycroft.audio.service.resume'))
    client.emit(Message('play:start',
                       {'skill_id': 'mycroft-spotify.forslund',
                        'phrase': '',
                        'callback_data': {'type': 'continue'}}))
Esempio n. 16
0
    def mouth_display_png(self,
                          image_absolute_path,
                          invert=False,
                          x=0,
                          y=0,
                          refresh=True):
        """ Send an image to the enclosure.

        Args:
            image_absolute_path (string): The absolute path of the image
            invert (bool): inverts the image being drawn.
            x (int): x offset for image
            y (int): y offset for image
            refresh (bool): specify whether to clear the faceplate before
                            displaying the new image or not.
                            Useful if you'd like to display muliple images
                            on the faceplate at once.
            """
        self.display_manager.set_active(self.name)
        self.bus.emit(
            Message("enclosure.mouth.display_image", {
                'img_path': image_absolute_path,
                'xOffset': x,
                'yOffset': y,
                'invert': invert,
                'clearPrev': refresh
            },
                    context={"destination": ["enclosure"]}))
Esempio n. 17
0
    def test_audio_input_valid(self):
        handle_utterance = mock.Mock()
        self.bus.once("recognizer_loop:utterance", handle_utterance)
        context = {
            "client": "tester",
            "ident": "11111",
            "user": "******",
            "extra_data": "something"
        }
        audio_data = encode_file_to_base64_string(
            os.path.join(AUDIO_FILE_PATH, "stop.wav"))
        stt_resp = self.bus.wait_for_response(
            Message("neon.audio_input", {"audio_data": audio_data}, context),
            context["ident"], 60.0)
        self.assertIsInstance(stt_resp, Message)
        for key in context:
            self.assertIn(key, stt_resp.context)
            self.assertEqual(context[key], stt_resp.context[key])
        self.assertIsInstance(stt_resp.data.get("skills_recv"), bool,
                              stt_resp.serialize())

        handle_utterance.assert_called_once()
        message = handle_utterance.call_args[0][0]
        self.assertIsInstance(message, Message)
        for key in context:
            self.assertIn(key, message.context)
            self.assertEqual(context[key], message.context[key])
        self.assertIsInstance(message.data["utterances"], list, message.data)
        self.assertIn("stop", message.data["utterances"],
                      message.data.get("utterances"))
        self.assertIsInstance(message.context["timing"], dict)
        self.assertEqual(message.context["destination"], ["skills"])
Esempio n. 18
0
    def on_message(self, message):
        LOG.info("Received: {}".format(message))
        msg = json.loads(message)
        if (msg.get('type') == "mycroft.events.triggered" and
            (msg.get('event_name') == 'page_gained_focus'
             or msg.get('event_name') == 'system.gui.user.interaction')):
            # System event, a page was changed
            msg_type = 'gui.page_interaction'
            msg_data = {
                'namespace': msg['namespace'],
                'page_number': msg['parameters'].get('number'),
                'skill_id': msg['parameters'].get('skillId')
            }
        elif msg.get('type') == "mycroft.events.triggered":
            # A normal event was triggered
            msg_type = '{}.{}'.format(msg['namespace'], msg['event_name'])
            msg_data = msg['parameters']

        elif msg.get('type') == 'mycroft.session.set':
            # A value was changed send it back to the skill
            msg_type = '{}.{}'.format(msg['namespace'], 'set')
            msg_data = msg['data']

        message = Message(msg_type, msg_data)
        LOG.info('Forwarding to bus...')
        self.application.enclosure.bus.emit(message)
        LOG.info('Done!')
Esempio n. 19
0
 def reset(self):
     """The enclosure should restore itself to a started state.
     Typically this would be represented by the eyes being 'open'
     and the mouth reset to its default (smile or blank).
     """
     self.bus.emit(
         Message("enclosure.reset", context={"destination": ["enclosure"]}))
Esempio n. 20
0
def simple_cli(lang="en-us"):
    global bSimple
    bSimple = True

    bus.on('speak', handle_speak)
    try:
        while True:
            # Sleep for a while so all the output that results
            # from the previous command finishes before we print.
            time.sleep(1.5)
            print("Input (Ctrl+C to quit):")
            line = sys.stdin.readline()
            bus.emit(
                Message("recognizer_loop:utterance", {
                    'utterances': [line.strip()],
                    "lang": lang
                }, {
                    'client_name': 'mycroft_simple_cli',
                    'source': 'cli',
                    'destination': ["skills"]
                }))
    except KeyboardInterrupt as e:
        # User hit Ctrl+C to quit
        print("")
    except KeyboardInterrupt as e:
        LOG.exception(e)
        event_thread.exit()
        sys.exit()
Esempio n. 21
0
 def eyes_brightness(self, level=30):
     """Set the brightness of the eyes in the display.
     Args:
         level (int): 1-30, bigger numbers being brighter
     """
     self.bus.emit(
         Message("enclosure.eyes.level", {'level': level},
                 context={"destination": ["enclosure"]}))
Esempio n. 22
0
 def send(self, response_type=None, timeout=10):
     self.response = Message(None, None, None)
     if response_type is None:
         response_type = self.query.type + ".reply"
     self.add_response_type(response_type)
     self.bus.emit(self.query)
     self._wait_response(timeout)
     return self.response
Esempio n. 23
0
 def eyes_blink(self, side):
     """Make the eyes blink
     Args:
         side (str): 'r', 'l', or 'b' for 'right', 'left' or 'both'
     """
     self.bus.emit(
         Message("enclosure.eyes.blink", {'side': side},
                 context={"destination": ["enclosure"]}))
Esempio n. 24
0
 def system_blink(self, times):
     """The 'eyes' should blink the given number of times.
     Args:
         times (int): number of times to blink
     """
     self.bus.emit(
         Message("enclosure.system.blink", {'times': times},
                 context={"destination": ["enclosure"]}))
Esempio n. 25
0
 def test_get_stt_invalid_file_type(self):
     context = {"client": "tester", "ident": "123456", "user": "******"}
     stt_resp = self.bus.wait_for_response(
         Message("neon.get_stt",
                 {"audio_file": os.path.join(AUDIO_FILE_PATH, "test.txt")},
                 context), context["ident"])
     self.assertEqual(stt_resp.context, context)
     self.assertIsInstance(stt_resp.data.get("error"), str)
Esempio n. 26
0
    def playback_time(self):
        """ Request information of current playback time.

            Returns:
                seconds (int) if track is playing or None
        """
        response = self.bus.wait_for_response(Message('mycroft.audio.service.playback_time'))
        data = response.data if response else {"time": 0}
        return data.get("time") or 0
Esempio n. 27
0
    def available_backends(self):
        """Return available audio backends.

        Returns:
            dict with backend names as keys
        """
        msg = Message('mycroft.audio.service.list_backends')
        response = self.bus.wait_for_response(msg)
        return response.data if response else {}
Esempio n. 28
0
 def async_volume_handler(self, vol):
     LOG.error("ASYNC SET VOL PASSED IN %s" % (vol,))
     if vol > 1.0:
         vol = vol / 10
     self.current_volume = vol
     LOG.error("ASYNC SET VOL TO %s" % (self.current_volume,))
     # notify anybody listening on the bus who cares
     self.bus.emit(Message("hardware.volume", {
         "volume": self.current_volume}, context={"source": ["enclosure"]}))
Esempio n. 29
0
 def mouth_text(self, text=""):
     """Display text (scrolling as needed)
     Args:
         text (str): text string to display
     """
     self.display_manager.set_active(self.name)
     self.bus.emit(
         Message("enclosure.mouth.text", {'text': text},
                 context={"destination": ["enclosure"]}))
Esempio n. 30
0
    def on_volume_set(self, message):
        self.current_volume = message.data.get("percent", self.current_volume)
        LOG.info('Mark2:interface.py set volume to %s' %
                 (self.current_volume,))
        self.m2enc.hardware_volume.set_volume(float(self.current_volume))

        # notify anybody listening on the bus who cares
        self.bus.emit(Message("hardware.volume", {
            "volume": self.current_volume}, context={"source": ["enclosure"]}))