Example #1
0
 def mouth_smile(self):
     """Show a 'smile' image or animation"""
     self.bus.emit(
         Message("enclosure.mouth.smile",
                 context={"destination": ["enclosure"]}))
     self.display_manager.set_active(self.name)
Example #2
0
 def deactivate_mouth_events(self):
     """Disable movement of the mouth with speech"""
     self.bus.emit(
         Message('enclosure.mouth.events.deactivate',
                 context={"destination": ["enclosure"]}))
Example #3
0
def pause():
    print('pause')
    client.emit(Message('mycroft.audio.service.pause'))
Example #4
0
 def on_send_pressed(self):
     self.user_utterance = self.lineEdit_chat_message.text()
     self.bus.emit(
         Message('recognizer_loop:utterance',
                 {'utterances': [self.user_utterance]}))
     self.lineEdit_chat_message.setText('')
Example #5
0
def speak(utterance,
          expect_response=False,
          wait=False,
          meta=None,
          message=None,
          private=False,
          speaker=None):
    """
    Speak a sentence.
    Arguments:
        utterance (str):        sentence mycroft should speak
        expect_response (bool): set to True if Mycroft should listen for a response immediately after
                                speaking the utterance.
        message (Message):      message associated with the input that this speak is associated with
        private (bool):         flag to indicate this message contains data that is private to the requesting user
        speaker (dict):         dict containing language or voice data to override user preference values
        wait (bool):            set to True to block while the text is being spoken.
        meta:                   Information of what built the sentence.
    """
    from neon_utils import SKILL

    # registers the skill as being active
    meta = meta or {}
    meta['skill'] = SKILL.name
    SKILL.enclosure.register(SKILL.name)
    if utterance:
        LOG.debug(f">>>>> Skill speak! {utterance}")

        # Find the associated message
        if message:
            LOG.info('message passed to speak = ' + str(message.data))
            if not speaker:
                speaker = message.data.get("speaker", None)
        else:
            LOG.debug('message is None.')
            message = dig_for_message()

        if message:
            # filename = message.context.get("flac_filename", "")
            # cc_data = message.context.get("cc_data", {})
            # profiles = message.context.get("nick_profiles", {})
            if not speaker:
                speaker = message.data.get("speaker", speaker)
            # if message.data['flac_filename']:
            #     filename = message.data['flac_filename']
            # else:
            #     filename = ''
        else:
            message = dig_for_message()
            filename = ''
            # cc_data = {}
            # profiles = {}
            if message:
                # filename = message.context.get("flac_filename", "")
                # cc_data = message.context.get("cc_data", {})
                # profiles = message.context.get("nick_profiles", {})
                if not speaker:
                    speaker = message.data.get("speaker", {})

        # registers the skill as being active
        # print(f'{cc_data} is cc_data')
        # self.enclosure.register(self.name)
        nick = ""
        # LOG.debug(nick)
        data = {
            "utterance": utterance,
            "expect_response": expect_response,
            "meta": meta,
            "speaker": speaker
        }

        # devices might not want to do these logs either... weird characters cause a logging error
        if not SKILL.server:
            LOG.info(f'{speaker} Speak: {utterance}')
            # LOG.info('Speak data = ' + str(data))
        # LOG.info(filename)
        if not message:
            message = dig_for_message()

        if message and message.context.get("cc_data", {}).get("emit_response"):
            LOG.debug(f"DM: {data}")
            msg_to_emit = message.reply("skills:execute.response", data)

        elif message and message.msg_type != "mycroft.ready":
            message.context.get("timing", {})["speech_start"] = time.time()
            LOG.info("message True, " + str(data))
            # LOG.info(message)
            # TODO: This is where we have the most complete timing profile for an utterance
            # LOG.debug(f"TIME: to_speak, {time.time()}, {message.context['flac_filename']}, {data['utterance']}, "
            #           f"{message.context}")
            # self.bus.emit(message.reply("speak", data))
            msg_to_emit = message.reply("speak", data)
            LOG.debug(f">>>> Skill speak! {data}, {message.context}")
        else:
            LOG.warning("message False, " + str(data))
            # self.bus.emit(Message("speak", data))
            msg_to_emit = Message("speak", data)
        LOG.debug(msg_to_emit.msg_type)
        SKILL.bus.emit(msg_to_emit)
    else:
        LOG.warning("Null utterance passed to speak")
        LOG.warning(f"{SKILL.name} | message={message}")

    if wait:
        wait_while_speaking()
Example #6
0
def spatial_ritual(i):
    """
    Spatial Ritual:
    - Listen to coordinate Sent by Arduino
    - If new interesting point (ie turn), would look up closer Self concept and say it aloud
    - Save the event data for future use 

    Input: int, step of the trajectory
    """

    global sent
    global x_vals
    global y_vals
    global trajectory
    global event_data
    global num_frames
    global trigger

    print("Frame {}".format(i))

    if i == num_frames - 1:  #NOTE: currently last frame save & close the plot
        #plt.savefig('./outputs/full_trajectory_event_'+ event_id+ '.png')
        print("Ending Spatial Dance!")
        # Send signal to arduino to stop roomba trajectory
        sock.send('d')
        trigger = False
        plt.close()
    else:
        #---listen to Arduino
        message = roomba_listen()

        # if message contains coordinates
        if message and message != 'clearning' and message != 'docking':

            x, y = (message.split(',', 1))
            x = float(x)
            y = float(y)

            #--save data trajectory
            x_vals.append(x)
            y_vals.append(y)

            #--save plot frame
            plt.cla()
            plt.plot(x_vals,
                     y_vals,
                     color="mediumblue",
                     marker="2",
                     markevery=1,
                     markersize=5,
                     markeredgecolor="orangered")

            #----Check if new point +/- aligned with previous 2 points of trajectory (if trajectory length >2...)
            new_point = [x, y]

            #check if new point aligned with 2 previous point if nb point >=2
            if len(trajectory) >= 2:
                aligned = approximately_colinear(
                    trajectory[-2],
                    trajectory[-1],
                    new_point,
                    threshold=COLINEARITY_THRESHOLD)
                if aligned:
                    #new point aligned with last 2, so replace last point with new point:
                    trajectory[-1] = new_point
                    #NOTE: This is a way to clean the trajectory, in the sense it removes intermediary points on the same line,

                else:
                    #means a turn happened, so will read aloud closer previous point (beware, a lil delay as look at previous point!)
                    #get idx and distance nearest concept of this point
                    idx, dist = nearest_concept(embeddings2D, trajectory[-1])
                    #get word attached to that idx
                    new_closer_concept = list(custom_embeddings.keys())[idx]
                    #NOTE: Refer to the trajectory points values to adjust EMBEDDINGS_BOUND, else would always output same concept
                    print(
                        "--looking at trajectory point {}. Here is {}".format(
                            trajectory[-1], new_closer_concept))
                    #say it aloud
                    client.emit(
                        Message('speak',
                                data={'utterance': new_closer_concept}))

                    #--update event data
                    # save data of close concepts and distance
                    #NOTE: beware this concept may be already in registered concept, in which case,
                    # update the idx of the trajectory point only if closer than last time registered
                    event_data = update_event_data(new_closer_concept, dist,
                                                   len(trajectory) - 1,
                                                   event_data)

                    #add new point to trajectory (at least temporarily)
                    trajectory.append(new_point)

            else:  #second point in traj
                trajectory.append(new_point)
Example #7
0
 def eyes_reset(self):
     """Restore the eyes to their default (ready) state."""
     self.bus.emit(
         Message("enclosure.eyes.reset",
                 context={"destination": ["enclosure"]}))
Example #8
0
 def eyes_on(self):
     """Illuminate or show the eyes."""
     self.bus.emit(
         Message("enclosure.eyes.on",
                 context={"destination": ["enclosure"]}))
Example #9
0
 def _handle_pairing_complete(self, _):
     """
     Handler for 'mycroft.paired', unmutes the mic after the pairing is
     complete.
     """
     self.bus.emit(Message("mycroft.mic.unmute"))
Example #10
0
 def eyes_narrow(self):
     """Make the eyes look narrow, like a squint"""
     self.bus.emit(
         Message("enclosure.eyes.narrow",
                 context={"destination": ["enclosure"]}))
Example #11
0
 def speak(self, text):
     self.bus.emit(Message("speak", {'utterance': text}))
Example #12
0
def spatial_ritual(i, trajectory, end_reading):
    """
    Spatial Ritual:
    - Listen to coordinate Sent by Arduino
    - If new interesting point (ie turn), would look up closer Self concept and say it aloud
    - Save the event data for future use

    Args:
        i, step of the trajectory
        trajectory: list of points saved
        end_reading: boolean if shall end the reading
        event_data: dictionnary whose keys are string (concepts) and values are list [float, int] (which are distances, reps. idx of corresponding trajectory point in our case)
    
    Output:
        trajectory: updated trajectory
        end_reading: boolean if shall end the reading
    """

    global event_data
    global idx_event_concepts

    print("Frame {}".format(i - WARM_UP))

    # ---listen to Arduino
    message = roomba_listen()

    # if message contains coordinates
    if message and message != "clearning" and message != "docking":

        x, y = message.split(",", 1)
        x = float(x)
        y = float(y)

        # ----Check if new point +/- aligned with previous 2 points of trajectory (if trajectory length >2...)
        new_point = [x / scaling_factor, y / scaling_factor]

        # check if new point aligned with 2 previous point if nb point >=2
        # and if it has not reached the maximum concept number per reading:
        if len(trajectory) >= 2 and len(
                idx_event_concepts) < MAX_WORDS_PER_EVENT:
            aligned = approximately_colinear(
                trajectory[-2],
                trajectory[-1],
                new_point,
                threshold=COLINEARITY_THRESHOLD,
            )
            if aligned:
                # replace last point with new point
                # NOTE: thus removes intermediary points which are aligned
                trajectory[-1] = new_point

            else:
                # a turn happened ! so read aloud closer previous point
                # get idx and distance nearest concept of this point
                idx, dist, neue = nearest_concept(embeddings2D, trajectory[-1],
                                                  idx_event_concepts)

                if neue:  # say aloud only if new concept
                    # get word attached to that idx
                    new_closer_concept = list(custom_embeddings.keys())[idx]
                    print("--Trajectory point {}  understood as {}".format(
                        trajectory[-1], new_closer_concept))
                    # say it aloud
                    client.emit(
                        Message("speak",
                                data={"utterance": new_closer_concept}))

                    # --update event data
                    event_data[new_closer_concept] = [
                        dist, len(trajectory) - 1
                    ]
                    idx_event_concepts.append(idx)

                    # add new point to trajectory
                    trajectory.append(new_point)
                else:  # if no new concept, end reading
                    end_reading = True

        else:
            trajectory.append(new_point)

    return trajectory, end_reading
Example #13
0
    def setup_ui(self):
        self.setWindowTitle(self.title)
        self.setGeometry(self.top, self.left, self.width, self.height)
        self.center_on_screen()
        self.centralwidget = QtWidgets.QWidget(self)
        self.setCentralWidget(self.centralwidget)
        self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)

        spacer_item = QtWidgets.QSpacerItem(40, 20,
                                            QtWidgets.QSizePolicy.Expanding,
                                            QtWidgets.QSizePolicy.Minimum)
        self.gridLayout.addItem(spacer_item, 1, 2, 1, 3)

        self.verticalLayout_intents = QtWidgets.QVBoxLayout()
        self.gridLayout.addLayout(self.verticalLayout_intents, 2, 1, 1, 1)

        self.label_intent1 = self.create_intent_label()
        self.label_intent2 = self.create_intent_label()
        self.label_intent3 = self.create_intent_label()
        self.label_intent4 = self.create_intent_label()
        self.label_intent5 = self.create_intent_label()
        self.label_intent6 = self.create_intent_label()
        self.label_intent7 = self.create_intent_label()

        self.line = QtWidgets.QFrame(self.centralwidget)
        size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred,
                                            QtWidgets.QSizePolicy.Maximum)
        size_policy.setHorizontalStretch(0)
        size_policy.setVerticalStretch(0)
        size_policy.setHeightForWidth(
            self.line.sizePolicy().hasHeightForWidth())
        self.line.setSizePolicy(size_policy)
        self.line.setLineWidth(1)
        self.line.setMidLineWidth(0)
        self.line.setFrameShape(QtWidgets.QFrame.HLine)
        self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
        self.gridLayout.addWidget(self.line, 3, 1, 1, 5)

        self.lineEdit_chat_message = QtWidgets.QLineEdit(self.centralwidget)
        size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
                                            QtWidgets.QSizePolicy.Fixed)
        size_policy.setHorizontalStretch(0)
        size_policy.setVerticalStretch(0)
        size_policy.setHeightForWidth(
            self.lineEdit_chat_message.sizePolicy().hasHeightForWidth())
        self.lineEdit_chat_message.setSizePolicy(size_policy)
        self.gridLayout.addWidget(self.lineEdit_chat_message, 8, 1, 1, 4)

        self.label_intents_title = QtWidgets.QLabel(self.centralwidget)
        size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum,
                                            QtWidgets.QSizePolicy.Maximum)
        size_policy.setHorizontalStretch(0)
        size_policy.setVerticalStretch(0)
        size_policy.setHeightForWidth(
            self.label_intents_title.sizePolicy().hasHeightForWidth())
        self.label_intents_title.setSizePolicy(size_policy)
        font_questions_title = QtGui.QFont()
        font_questions_title.setPointSize(16)
        self.label_intents_title.setFont(font_questions_title)
        self.gridLayout.addWidget(self.label_intents_title, 1, 1, 1, 1)

        self.scrollArea = QtWidgets.QScrollArea(self.centralwidget)
        size_policy = QtWidgets.QSizePolicy(
            QtWidgets.QSizePolicy.MinimumExpanding,
            QtWidgets.QSizePolicy.MinimumExpanding)
        size_policy.setHorizontalStretch(0)
        size_policy.setVerticalStretch(0)
        size_policy.setHeightForWidth(
            self.scrollArea.sizePolicy().hasHeightForWidth())
        self.scrollArea.setSizePolicy(size_policy)
        self.scrollArea.setWidgetResizable(True)
        self.scrollAreaWidgetContents = QtWidgets.QWidget()
        self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 768, 410))
        self.verticalLayout = QtWidgets.QVBoxLayout(
            self.scrollAreaWidgetContents)
        self.gridLayout_conversation = QtWidgets.QGridLayout()
        self.verticalLayout.addLayout(self.gridLayout_conversation)
        self.scrollArea.setWidget(self.scrollAreaWidgetContents)
        self.gridLayout.addWidget(self.scrollArea, 7, 1, 1, 5)

        self.label_chat_title = QtWidgets.QLabel(self.centralwidget)
        size_policy = QtWidgets.QSizePolicy(
            QtWidgets.QSizePolicy.MinimumExpanding,
            QtWidgets.QSizePolicy.Maximum)
        size_policy.setHorizontalStretch(0)
        size_policy.setVerticalStretch(0)
        size_policy.setHeightForWidth(
            self.label_chat_title.sizePolicy().hasHeightForWidth())
        font_chat_title = QtGui.QFont()
        font_chat_title.setPointSize(14)
        self.label_chat_title.setSizePolicy(size_policy)
        self.label_chat_title.setFont(font_chat_title)
        self.gridLayout.addWidget(self.label_chat_title, 5, 1)

        self.pushButton_mic = QtWidgets.QPushButton(self.centralwidget)
        size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum,
                                            QtWidgets.QSizePolicy.Maximum)
        size_policy.setHorizontalStretch(0)
        size_policy.setVerticalStretch(0)
        size_policy.setHeightForWidth(
            self.pushButton_mic.sizePolicy().hasHeightForWidth())
        self.pushButton_mic.setSizePolicy(size_policy)
        self.mic_icon = QtGui.QIcon()
        self.mic_icon.addPixmap(QtGui.QPixmap("imgs/mic.svg"))
        self.mic_muted_icon = QtGui.QIcon()
        self.mic_muted_icon.addPixmap(QtGui.QPixmap("imgs/mic_muted.svg"))
        self.pushButton_mic.setIcon(self.mic_icon)
        palette = QtGui.QPalette()
        brush = QtGui.QBrush(QtGui.QColor(115, 210, 22))
        brush.setStyle(QtCore.Qt.SolidPattern)
        palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
        self.pushButton_mic.setPalette(palette)
        self.pushButton_mic.clicked.connect(self.on_mic_pressed)
        self.gridLayout.addWidget(self.pushButton_mic, 5, 5)

        self.pushButton_send = QtWidgets.QPushButton(self.centralwidget)
        self.pushButton_send.setGeometry(QtCore.QRect(399, 550, 50, 30))
        self.pushButton_send.setPalette(palette)
        self.pushButton_send.clicked.connect(self.on_send_pressed)
        self.gridLayout.addWidget(self.pushButton_send, 8, 5, 1, 1)

        self.pushButton_logs = QtWidgets.QPushButton(self.centralwidget)
        self.pushButton_logs.setGeometry(QtCore.QRect(370, 10, 120, 40))
        self.logs_file_icon = QtGui.QIcon()
        self.logs_file_icon.addPixmap(QtGui.QPixmap("imgs/file.svg"))
        self.pushButton_logs.setIcon(self.logs_file_icon)
        self.pushButton_logs.clicked.connect(self.on_logs_pressed)
        self.gridLayout.addWidget(self.pushButton_logs, 1, 5, 1, 1)

        self.skills_dialog = QtWidgets.QDialog(self)
        self.skills_dialog.setWindowTitle('Mycroft Skills')
        self.skills_dialog.resize(600, 600)

        self.pushButton_skills = QtWidgets.QPushButton(self.centralwidget)
        self.pushButton_skills.setGeometry(QtCore.QRect(370, 60, 120, 40))
        self.skills_list_icon = QtGui.QIcon()
        self.skills_list_icon.addPixmap(QtGui.QPixmap("imgs/list.svg"))
        self.pushButton_skills.setIcon(self.skills_list_icon)
        self.pushButton_skills.clicked.connect(self.on_skills_pressed)
        self.gridLayout.addWidget(self.pushButton_skills, 2, 5, 1, 1)

        self.pushButton_manage_skills = QtWidgets.QPushButton(
            self.skills_dialog)
        self.pushButton_manage_skills.setGeometry(
            QtCore.QRect(470, 10, 120, 40))
        self.pushButton_manage_skills.clicked.connect(
            self.on_manage_skills_pressed)

        # List of the skills that the user should not interact with
        dangerous_skills = [
            'mycroft-volume.mycroftai', 'mycroft-stop.mycroftai',
            'fallback-unknown.mycroftai', 'fallback-query.mycroftai',
            'mycroft-configuration.mycroftai'
        ]

        # List of the skills in the /opt/mycroft/skills folder
        [self.active_skills.append(name) for name in listdir('/opt/mycroft/skills/') \
            if path.isdir('/opt/mycroft/skills/' + name) and name not in dangerous_skills]

        # Check if the chat needs to be updated every second
        self.timer = QtCore.QTimer(self)
        self.timer.setInterval(1000)
        self.timer.timeout.connect(self.check_for_chat_update)
        self.timer.start()

        self.retranslate_ui()

        # Send the webservice class to Mycroft
        server_socket = Thread(target=util.create_server_socket,
                               args=[self.ws])
        server_socket.setDaemon(True)
        server_socket.start()

        # Start Mycroft services
        subprocess.run([
            'bash',
            path.expanduser('~') + '/mycroft-core/start-mycroft.sh', 'all',
            'restart'
        ])

        # Wait until Mycroft services are started, there might be a better solution
        time.sleep(15)

        # Thread connected to Mycroft MessageBusClient
        self.bus = MessageBusClient()
        self.bus.run_in_thread()
        self.bus.on('speak', self.handle_speak)
        self.bus.on('recognizer_loop:utterance', self.handle_utterance)

        # Deactivate mycroft-volume.mycroftai skill, mic works weird when it's active
        self.bus.emit(
            Message('skillmanager.deactivate',
                    {'skill': 'mycroft-volume.mycroftai'}))
Example #14
0
 def system_reset(self):
     """The enclosure hardware should reset any CPUs, etc."""
     self.bus.emit(
         Message("enclosure.system.reset",
                 context={"destination": ["enclosure"]}))
Example #15
0
 def eyes_spin(self):
     """Make the eyes 'roll'
     """
     self.bus.emit(
         Message("enclosure.eyes.spin",
                 context={"destination": ["enclosure"]}))
Example #16
0
 def system_unmute(self):
     """Unmute (turn on) the system speaker."""
     self.bus.emit(
         Message("enclosure.system.unmute",
                 context={"destination": ["enclosure"]}))
Example #17
0
 def eyes_timed_spin(self, length):
     """Make the eyes 'roll' for the given time.
     Args:
         length (int): duration in milliseconds of roll, None = forever
     """
     self.bus.emit(Message("enclosure.eyes.timedspin", {'length': length}))
Example #18
0
 def eyes_off(self):
     """Turn off or hide the eyes."""
     self.bus.emit(
         Message("enclosure.eyes.off",
                 context={"destination": ["enclosure"]}))
Example #19
0
 def mouth_reset(self):
     """Restore the mouth display to normal (blank)"""
     self.bus.emit(
         Message("enclosure.mouth.reset",
                 context={"destination": ["enclosure"]}))
     self.display_manager.set_active(self.name)
Example #20
0
 def __init__(self, message, bus=None):
     self.bus = bus or get_mycroft_bus()
     self._waiting = False
     self.response = Message(None, None, None)
     self.query = message
     self.valid_response_types = []
Example #21
0
 def mouth_talk(self):
     """Show a generic 'talking' animation for non-synched speech"""
     self.bus.emit(
         Message("enclosure.mouth.talk",
                 context={"destination": ["enclosure"]}))
     self.display_manager.set_active(self.name)
Example #22
0
def unhook(gpio, level, tick):
    client.emit(Message('mycroft.mic.listen'))
    light.pulse(0.1, 30, None, 1) #Flicker on
    light.pulse(0.05, 25, None, 1)
    light.pulse(0.1, 100, None, 1)
    light.pulse(0.15, 80, None, 1)
Example #23
0
 def mouth_listen(self):
     """Show a 'thinking' image or animation"""
     self.bus.emit(
         Message("enclosure.mouth.listen",
                 context={"destination": ["enclosure"]}))
     self.display_manager.set_active(self.name)
Example #24
0
def next_track():
    print('next')
    client.emit(Message('mycroft.audio.service.next'))
Example #25
0
with open('wakewords.txt', 'r') as f:
    f_contense = f.read()  #asign the contense of the file a variable
    l = f_contense.split("\n")  #split each line of the file into a list
    print(l)  #print the list

#establish comunication with mycroft
print('Setting up client to connect to a local mycroft instance')
client = MessageBusClient()
client.run_in_thread()

#set the speech to text variable to 0 and then print it
stt_state = 0
print(stt_state)

print("Victor standing by")
client.emit(Message('speak', data={'utterance': ('Victor standing by')
                                   }))  # tell the user the script is ready

#forever loop
while True:
    # obtain audio from the microphone
    r = sr.Recognizer()
    with sr.Microphone() as source:
        # listen for 1 second to calibrate the energy threshold for ambient noise levels
        r.adjust_for_ambient_noise(source)
        print("Say something!")
        audio = r.listen(source)

    # recognize speech using Google Speech Recognition
    try:
        # for testing purposes, we're just using the default API key
        # to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`