示例#1
0
def task(quit_event):
    #mic = Microphone(quit_event=quit_event)
    mic_index = None
    for i, microphone_name in enumerate(Microphone.list_microphone_names()):
        if 'seeed' in microphone_name:
            mic_index = i
            print("Using microphone {}".format(microphone_name))
            break
    if not mic_index:
        print("Could not find a proper microphone")
        exit()
    with Microphone(device_index=mic_index) as mic:
        recognizer = Recognizer()
        while not quit_event.is_set():
            pixel_ring.off()
            print("Listening for keyword")
            data = recognizer.listen(source=mic)
            kw_text = recognizer.recognize_sphinx(data)
            print("Heard '{}' while listening for keyword".format(kw_text))
            if kw_text == name:
                print('Wake up')
                pixel_ring.listen()
                data = recognizer.listen(mic)
                pixel_ring.think()
                text = recognizer.recognize_sphinx(data)
                print('Done listening')
                pixel_ring.off()
                if text:
                    print('Recognized {}'.format(text))
                    tts.say(text)
示例#2
0
def test_8mic():
    import signal
    import time
    from pixel_ring import pixel_ring

    is_quit = threading.Event()

    def signal_handler(sig, num):
        is_quit.set()
        print('Quit')

    signal.signal(signal.SIGINT, signal_handler)
 
    with MicArray(16000, 4, 16000 / 4)  as mic:
        for chunk in mic.read_chunks():
            direction = mic.get_direction(chunk)
            pixel_ring.set_direction(direction)
            pixel_ring.spin()
            #print(direction)
            #print(chunk)
            #print('Test before direction')
            print(int(direction))

            if is_quit.is_set():
                break

    pixel_ring.off()
示例#3
0
def main():
    vad = webrtcvad.Vad(3)

    servodir = 0
    speech_count = 0
    chunks = []
    #doa_chunks = int(DOA_FRAMES / VAD_FRAMES)
    doa_chunks = 128

    try:
        with MicArray(RATE, CHANNELS, RATE * VAD_FRAMES / 1000) as mic:
            # chunck size com doa_chuncks para o caso sem vad
            for chunk in mic.read_chunks():
                # Use single channel audio to detect voice activity
                if vad.is_speech(chunk[0::CHANNELS].tobytes(), RATE):
                    speech_count += 1

                chunks.append(chunk)
                if len(chunks) == doa_chunks:
                    if speech_count > (doa_chunks / 2):
                        frames = np.concatenate(chunks)
                        horizontal, vertical = mic.get_direction(frames)
                        servodir = int(6273 - 45.5 * horizontal)
                        servo.setTarget(0, servodir)
                        print('\n H:{h} V:{v}'.format(h=int(horizontal),
                                                      v=int(vertical)))

                    speech_count = 0
                    chunks = []

    except KeyboardInterrupt:
        pass

    pixel_ring.off()
示例#4
0
    def initialize(self):
        LOG.debug("initialising")
        self.en = mraa.Gpio(12)
        if os.geteuid() != 0:
            time.sleep(1)

        self.en.dir(mraa.DIR_OUT)
        self.en.write(0)
        pixel_ring.set_brightness(20)
        pixel_ring.wakeup()

        self.userkey = None
        try:
            self.userkey = InputDevice("/dev/input/event0")
        except Exception as e:
            LOG.debug("exception while reading InputDevice: {}".format(e))

        if self.userkey:
            self.schedule_repeating_event(self.handle_button, None, 0.1,
                                          'RespeakerIo')

        self.add_event('recognizer_loop:record_begin',
                       self.handle_listener_wakeup)
        self.add_event('recognizer_loop:record_end', self.handle_listener_off)
        self.add_event('recognizer_loop:audio_output_start',
                       self.handle_listener_speak)
        self.add_event('recognizer_loop:audio_output_end',
                       self.handle_listener_off)
        self.add_event('mycroft.skill.handler.start',
                       self.handle_listener_think)
        self.add_event('mycroft.skill.handler.complete',
                       self.handle_listener_off)
        pixel_ring.off()
示例#5
0
def turn_on_led(R, G, B):
    try:
        pixel_ring.set_color(B, G, R)
        time.sleep(3)
        pixel_ring.off()
    except KeyboardInterrupt:
        print("bye")
    def enable(self):
        self.log.info("Pixel Ring: Enabling")

        self.add_event('recognizer_loop:wakeword', self.handle_listener_wakeup)
        self.add_event('recognizer_loop:record_end', self.handle_listener_off)

        pixel_ring.off()
示例#7
0
def record(frames, foo):
    # print(type(frames))
    p = pyaudio.PyAudio()
    pixel_ring.off()

    stream = p.open(
        rate=SAMPLE_RATE,
        format=p.get_format_from_width(RESPEAKER_WIDTH),
        channels=RESPEAKER_CHANNELS,
        input=True,
        input_device_index=RESPEAKER_INDEX,
    )
    print("* listening")

    if not main.CHUNK_RECORDING:
        while True:

            data = stream.read(CHUNK, exception_on_overflow=False)
            frames.appendleft(data)
    else:
        counter = 0
        lst = []
        while main.keepRecording:
            data = stream.read(CHUNK, exception_on_overflow=False)
            lst.append(data)
            counter = (counter + 1) % NUM_OF_SNAPSHOTS_FOR_MUSIC
            if counter == 0:
                frames.appendleft(lst.copy())
                lst.clear()

    print("* done recording")
    stream.stop_stream()
    stream.close()
    p.terminate()
def test_8mic():
    import signal
    import time
    from pixel_ring import pixel_ring

    is_quit = threading.Event()

    def signal_handler(sig, num):
        is_quit.set()
        print('Quit')

    signal.signal(signal.SIGINT, signal_handler)

    en_speech = np.zeros((1, ))
    raw = np.zeros((1, 8))
    with MicArray(16000, 8, 16000 / 8) as mic:
        for frames in mic.read_chunks():
            chunk = np.fromstring(frames, dtype='int16')
            direction = mic.get_direction(chunk)

            pixel_ring.set_direction(direction)
            print('@ {:.2f}'.format(direction))
            #  chunk = chunk / (2**15)
            #  chunk.shape = -1, 8

            #  raw = np.concatenate((raw, chunk), axis=0)
            #
            #  mic.separationAt(chunk, [direction])
            #  en_speech = np.concatenate((en_speech, mic.sep_queue.get()),axis=0)
            if is_quit.is_set():
                break

    pixel_ring.off()
    return raw, en_speech
示例#9
0
def process_event(event):
    """Pretty prints events.

    Prints all events that occur with two spaces between each new
    conversation and a single space between turns of a conversation.

    Args:
        event(event.Event): The current event to process.
    """
    print("event:" + str(event))

    if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
        subprocess.Popen(
            ["aplay", "/home/pi/piassistant/src/sample-audio/Fb.wav"],
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE)
        pixel_ring.listen()

    if event.type == EventType.ON_RESPONDING_STARTED:
        pixel_ring.speak()

    if event.type == EventType.ON_RESPONDING_FINISHED or event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT or event.type == EventType.ON_NO_RESPONSE:
        pixel_ring.off()

    if event.type == EventType.ON_END_OF_UTTERANCE:
        pixel_ring.think()

    if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED and event.args
            and not event.args['with_follow_on_turn']):
        pixel_ring.off()

    if event.type == EventType.ON_DEVICE_ACTION:
        for command, params in event.actions:
            print('Do command', command, 'with params', str(params))
示例#10
0
def main():
    vad = webrtcvad.Vad(3)

    speech_count = 0
    chunks = []
    doa_chunks = int(DOA_FRAMES / VAD_FRAMES)

    try:
        with MicArray(RATE, CHANNELS, RATE * VAD_FRAMES / 1000) as mic:
            for chunk in mic.read_chunks():
                # Use single channel audio to detect voice activity
                if vad.is_speech(chunk[0::CHANNELS].tobytes(), RATE):
                    speech_count += 1
                    sys.stdout.write('1')
                else:
                    sys.stdout.write('0')

                sys.stdout.flush()

                chunks.append(chunk)
                if len(chunks) == doa_chunks:
                    if speech_count > (doa_chunks / 2):
                        frames = np.concatenate(chunks)
                        direction = mic.get_direction(frames)
                        pixel_ring.set_direction(direction)
                        print('\n{}'.format(int(direction)))

                    speech_count = 0
                    chunks = []

    except KeyboardInterrupt:
        pass

    pixel_ring.off()
示例#11
0
def main():
    vad = webrtcvad.Vad(3)

    speech_count = 0
    chunks = []
    doa_chunks = int(DOA_FRAMES / VAD_FRAMES)

    try:
        with MicArray(RATE, CHANNELS, RATE * VAD_FRAMES / 1000)  as mic:
            for chunk in mic.read_chunks():
                # Use single channel audio to detect voice activity
                if vad.is_speech(chunk[0::CHANNELS].tobytes(), RATE):
                    speech_count += 1
                    sys.stdout.write('1')
                else:
                    sys.stdout.write('0')

                sys.stdout.flush()

                chunks.append(chunk)
                if len(chunks) == doa_chunks:
                    if speech_count > (doa_chunks / 2):
                        frames = np.concatenate(chunks)
                        direction = mic.get_direction(frames)
                        pixel_ring.set_direction(direction)
                        print('\n{}'.format(int(direction)))

                    speech_count = 0
                    chunks = []

    except KeyboardInterrupt:
        pass
        
    pixel_ring.off()
示例#12
0
def test_8mic():
    import signal
    import time
    from pixel_ring import pixel_ring

    is_quit = threading.Event()

    def signal_handler(sig, num):
        is_quit.set()
        print('Quit')

    signal.signal(signal.SIGINT, signal_handler)
 
    with MicArray(16000, 8, 16000 / 4)  as mic:
        for chunk in mic.read_chunks():
            direction = mic.get_direction(chunk)
            if len(a)>6:
                                #a.remove(max(a));
				a.remove(max(a));
				#a.remove(min(a));
				a.remove(min(a));
				angle=sum(a)/2;
				#print("angle:", angle);
				print(a);
				a=[];
	    else:
				a.append(direction);
            pixel_ring.set_direction(direction)
            #print(int(direction))

            if is_quit.is_set():
                break

    pixel_ring.off()
示例#13
0
def turn_onoff(onoff):
    pixel_ring.off()
    pixel_ring.think()
    print("Turned " + onoff)
    requests.post("https://maker.ifttt.com/trigger/turn_" + onoff + "_bedroom_light_request/with/key/"+iftttkey)
    requests.post("https://maker.ifttt.com/trigger/turn_" + onoff + "_bedside_light/with/key/"+iftttkey)

    pixel_ring.off()
示例#14
0
def on_message(client, userdata, message):
    if message.topic == "hermes/hotword/default/detected":
        pixel_ring.wakeup()
    if message.topic == "hermes/asr/textCaptured":
        pixel_ring.think()
        time.sleep(3)
        pixel_ring.off()
    #print("message received " ,str(message.payload.decode("utf-8")))
    print("message topic=", message.topic)
示例#15
0
def jarvis_parse_audio(audio):
    pixel_ring.wakeup()
    word_list = collect_word_list(audio)
    if word_list:
        parse_command_module(word_list)
    else:
        pixel_ring.off()
        time.sleep(1)
        power.off()
示例#16
0
    def init_pixelring():
        power = mraa.Gpio(12)
        time.sleep(1)
        power.dir(mraa.DIR_OUT)
        power.write(0)

        pixel_ring.wakeup(0)
        time.sleep(1)
        pixel_ring.off()
示例#17
0
def parse_command_module(word_list):
    pixel_ring.think()
    print("parsing command module from: "+ str(word_list))
    if (set(word_list) & set(LIGHT_WORDS)):
        parse_light_command(word_list)
    else:
        pixel_ring.off()
        time.sleep(1)
        power.off()
        print("no light command found, so i stop because i am simple")
示例#18
0
 def record_begin_handler(self, message):
     pixel_ring.wakeup()
     time.sleep(2)
     pixel_ring.think()
     time.sleep(2)
     pixel_ring.speak()
     time.sleep(3)
     pixel_ring.off()
     time.sleep(2)
     pixel_ring.off()
示例#19
0
    def on_handler_complete(self, message):
        """When a skill finishes executing turn off the LED ring"""
        handler = message.data.get('handler', '')
        if self._skip_handler(handler):
            return

        # If speaking has already begun, on_handler_audio_end will
        # turn off the LEDs
        if not self.speaking and not self.show_volume:
            pixel_ring.off()
示例#20
0
def main():
    vad = webrtcvad.Vad(3)

    speech_count = 0
    chunks = []
    doa_chunks = int(DOA_FRAMES / VAD_FRAMES)

    # bottom 151, 194, 210
    # left 225, 241, 300, 284
    # top 14, 358, 315, 30
    # right 61, 88, 45, 120

    try:
        with MicArray(RATE, CHANNELS, RATE * VAD_FRAMES / 1000) as mic:
            for chunk in mic.read_chunks():
                # Use single channel audio to detect voice activity
                if vad.is_speech(chunk[0::CHANNELS].tobytes(), RATE):
                    speech_count += 1
                    sys.stdout.write('')
                else:
                    sys.stdout.write('')

                sys.stdout.flush()
    #while True:
        print "Digital Logic --> Sending"
        chunks.append(chunk)
        if len(chunks) == doa_chunks:
            if speech_count > (doa_chunks / 2):
                frames = np.concatenate(chunks)
                direction = mic.get_direction(frames)
                pixel_ring.set_direction(direction)
                print('\n{}'.format(int(direction)))
                if ((int(direction) >= 45) & (int(direction) <= 135)):
                    port.write(str(1))
                    sleep(3)
                elif ((int(direction) > 135) & (int(direction) <= 225)):
                    port.write(str(3))
                    sleep(3)
                elif ((int(direction) > 225) & (int(direction) <= 315)):
                    port.write(str(2))
                    sleep(3)
                else:  # ((int(direction) > 315) &( int(direction) < 45)):
                    port.write(str(4))
                    sleep(3)

                    speech_count = 0
                    chunks = []

    except KeyboardInterrupt:
        pass

    pixel_ring.off()
def on_message(client, userdata, msg):
    if msg.topic.find("audioServer") != -1:
        return
    print msg.topic
    if msg.topic.find("hermes/hotword/default/detected") != -1:
        pixel_ring.wakeup()
        return
    if msg.topic.find("hermes/dialogueManager/sessionStarted") != -1:
        pixel_ring.think()
        return
    if msg.topic.find("hermes/dialogueManager/sessionEnded") != -1:
        pixel_ring.off()
        return
示例#22
0
 def _proc_audio(self, recog, audio):
     txt = recog.recognize_sphinx(audio)
     if txt == self.name:
         try:
             self.stop()
             tts.say(get_greeting())
             pixel_ring.listen()
             cmd_aud = recog.listen(self.mic, timeout=10)
         except speech_recognition.WaitTimeoutError:
             pixel_ring.off()
             return
         pixel_ring.think()
         cmd_txt = recog.recognize_sphinx(cmd_aud)
示例#23
0
def start_listening():
    mic = sr.Microphone()

    with mic as source:
        print("jarvis is listening")
        pixel_ring.wakeup()
        audio = r.listen(source, timeout=2)
        word_list = collect_word_list(audio)
        if word_list:
            print("found a word list")
            parse_command_module(word_list)
        else:
            pixel_ring.off()
            time.sleep(1)
            power.off()
示例#24
0
    def detect(self, keyword=None):
        self.decoder.end_utt()
        self.decoder.start_utt()

        pixel_ring.off()

        self.detect_history.clear()

        self.detect_queue.queue.clear()
        self.status |= self.detecting_mask
        self.stream.start_stream()

        result = None
        logger.info('Start detecting')
        print('Start detecting')
        while not self.quit_event.is_set():
            size = self.detect_queue.qsize()
            if size > 4:
                logger.info('Too many delays, {} in queue'.format(size))

            data = self.detect_queue.get()
            self.detect_history.append(data)
            self.decoder.process_raw(data, False, False)

            hypothesis = self.decoder.hyp()
            if hypothesis:
                logger.info('Detected {}'.format(hypothesis.hypstr))
                if collecting_audio != 'no':
                    logger.debug(collecting_audio)
                    save_as_wav(b''.join(self.detect_history),
                                hypothesis.hypstr)
                self.detect_history.clear()
                if keyword:
                    if hypothesis.hypstr.find(keyword) >= 0:
                        result = hypothesis.hypstr
                        break
                    else:
                        self.decoder.end_utt()
                        self.decoder.start_utt()
                        self.detect_history.clear()
                else:
                    result = hypothesis.hypstr
                    break

        self.status &= ~self.detecting_mask
        self.stop()

        return result
示例#25
0
def main():
    vad = webrtcvad.Vad(3)

    speech_count = 0
    chunks = []
    doa_chunks = int(DOA_FRAMES / VAD_FRAMES)

    try:
        with MicArray(RATE, CHANNELS, RATE * VAD_FRAMES / 1000) as mic:
            for chunk in mic.read_chunks():
                # Use single channel audio to detect voice activity
                if vad.is_speech(chunk[0::CHANNELS].tobytes(), RATE):
                    speech_count += 1
                    sys.stdout.write('1')
                else:
                    sys.stdout.write('0')

                chunks.append(chunk)
                if len(chunks) == doa_chunks:
                    if speech_count > (doa_chunks / 2):
                        rms = audioop.rms(chunk, 2)
                        fft = abs(np.fft.fft(chunk).real)
                        fft = fft[:int(len(fft) / 2)]
                        freq = np.fft.fftfreq(CHUNK, 1.0 / RATE)
                        freq = freq[:int(len(freq) / 2)]
                        val = freq[np.where(fft == np.max(fft))[0][0]] + 1
                        frames = np.concatenate(chunks)
                        direction = mic.get_direction(frames)
                        pixel_ring.set_direction(direction)
                        try:
                            res = requests.post('http://13.209.217.37/api',
                                                data={
                                                    'location': int(direction),
                                                    'volume': int(rms),
                                                    'freq': int(val)
                                                }).json()
                            print('\ndirection: {} volume: {} frequency: {}'.
                                  format(int(direction), int(rms), int(val)))
                        except:
                            print('ready...')

                    speech_count = 0
                    chunks = []

    except KeyboardInterrupt:
        pass

    pixel_ring.off()
示例#26
0
def led_pixel_ring():
    en.dir(mraa.DIR_OUT)
    en.write(0)
    pixel_ring.set_brightness(20)

    while led_ring:
        try:
            pixel_ring.wakeup()
            time.sleep(0.01)
            pixel_ring.off()
            time.sleep(0.01)
        except KeyboardInterrupt:
            break

        pixel_ring.off()
    en.write(1)
示例#27
0
def main():
    src = Source(rate=16000, frames_size=160, channels=2)
    ch0 = ChannelPicker(channels=src.channels, pick=0)
    ns = NS(rate=src.rate, channels=1)
    kws = KWS()
    doa = DOA(rate=16000, chunks=50)
    alexa = Alexa()

    alexa.state_listener.on_listening = pixel_ring.listen
    alexa.state_listener.on_thinking = pixel_ring.think
    alexa.state_listener.on_speaking = pixel_ring.speak
    alexa.state_listener.on_finished = pixel_ring.off

    # data flow between elements
    # ---------------------------
    # src -> ns -> kws -> alexa
    #    \
    #    doa
    src.pipeline(ch0, ns, kws, alexa)

    src.link(doa)

    def on_detected(keyword):
        direction = doa.get_direction()
        print('detected {} at direction {}'.format(keyword, direction))
        alexa.listen()
        pixel_ring.wakeup(direction)

    kws.on_detected = on_detected

    is_quit = []

    def signal_handler(sig, frame):
        is_quit.append(True)
        print('quit')

    signal.signal(signal.SIGINT, signal_handler)

    src.pipeline_start()
    while not is_quit:
        time.sleep(1)

    src.pipeline_stop()
    pixel_ring.off()

    # wait a second to allow other threads to exit
    time.sleep(1)
示例#28
0
    def enable(self):
        self.log.info("Pixel Ring: Enabling")

        self.add_event('recognizer_loop:wakeword', self.handle_listener_wakeup)
        self.add_event('recognizer_loop:record_end', self.handle_listener_off)

        self.add_event('mycroft.skill.handler.start',
                       self.handle_listener_think)
        self.add_event('mycroft.skill.handler.complete',
                       self.handle_listener_off)

        self.add_event('recognizer_loop:audio_output_start',
                       self.handler_listener_speak)
        self.add_event('recognizer_loop:audio_output_end',
                       self.handle_listener_off)

        pixel_ring.off()
示例#29
0
def main():
    vad = webrtcvad.Vad(3)

    speech_count = 0
    chunks = []
    doa_chunks = int(DOA_FRAMES / VAD_FRAMES)

    try:
        with MicArray(RATE, CHANNELS, RATE * VAD_FRAMES / 1000) as mic:
            a = []
            for chunk in mic.read_chunks():
                # Use single channel audio to detect voice activity
                if vad.is_speech(chunk[0::CHANNELS].tobytes(), RATE):
                    speech_count += 1
                    sys.stdout.write('1')
                else:
                    sys.stdout.write('0')

                sys.stdout.flush()

                chunks.append(chunk)
                if len(chunks) == doa_chunks:
                    if speech_count > (doa_chunks / 2):
                        frames = np.concatenate(chunks)
                        direction = mic.get_direction(frames)
                        if len(a) > 2:
                            angle = [np.bincount(a).argmax()]
                            b = angle[0]

                            position = int((b) / (360 / 12))
                            pixels = [0, 0, 0, 10] * 12
                            pixels[position * 4 + 2] = 10
                            pixel_ring.show(pixels)
                            print('\n{}'.format(int(b)))
                            a.remove(a[0])
                        else:
                            new_angle = angle_to_index_angle(direction)
                            a.append(new_angle)
                    speech_count = 0
                    chunks = []

    except KeyboardInterrupt:
        pass

    pixel_ring.off()
    def initialize(self):
        LOG.debug("initialising")

        pixel_ring.set_brightness(10)
        pixel_ring.wakeup()

        self.add_event('recognizer_loop:record_begin',
                       self.handle_listener_wakeup)
        self.add_event('recognizer_loop:record_end', self.handle_listener_off)
        self.add_event('recognizer_loop:audio_output_start',
                       self.handle_listener_speak)
        self.add_event('recognizer_loop:audio_output_end',
                       self.handle_listener_off)
        self.add_event('mycroft.skill.handler.start',
                       self.handle_listener_think)
        self.add_event('mycroft.skill.handler.complete',
                       self.handle_listener_off)
        pixel_ring.off()
示例#31
0
    def run(self):
        has_voice = 0
        buffer = ''
        count = 0
        pixel_ring_countdown = 0

        while not self.done:
            data = self.queue.get()
            buffer += data

            while len(buffer) >= self.frame_bytes:
                data = buffer[:self.frame_bytes]
                buffer = buffer[self.frame_bytes:]

                data = np.fromstring(data, dtype='int16')
                mono = data[0::self.channels].tostring()

                mono = self.ap.process_stream(mono)
                has_voice = self.ap.has_voice()

                # sys.stdout.write('1' if has_voice else '0')
                # sys.stdout.flush()

                offset, direction = self._process(data)

                self.collections.append([direction, offset, has_voice])

                count += 1
                if count >= self.collections.maxlen:
                    direction = self.get_direction()
                    if direction:
                        print('@ {}'.format(direction))

                        pixel_ring.wakeup(direction)
                        pixel_ring_countdown = 10
                    else:
                        if pixel_ring_countdown > 0:
                            pixel_ring_countdown -= 1
                            if pixel_ring_countdown == 0:
                                pixel_ring.off()

                    count = 0

                super(DOA, self).put(mono)
示例#32
0
def test_8mic():
    import signal
    import time
    from pixel_ring import pixel_ring

    is_quit = threading.Event()

    def signal_handler(sig, num):
        is_quit.set()
        print('Quit')

    signal.signal(signal.SIGINT, signal_handler)
 
    with MicArray(16000, 8, 16000 / 4)  as mic:
        for chunk in mic.read_chunks():
            direction = mic.get_direction(chunk)
            pixel_ring.set_direction(direction)
            print(int(direction))

            if is_quit.is_set():
                break

    pixel_ring.off()
示例#33
0
    def _off(self):
        self.write([0] * 4 * 12)

    def write(self, data):
        if type(data) is list:
            pixel_ring.write(3, data)
        else:
            pixel_ring.write(3, data.astype('uint8').tostring())


lights = GoogleHomeLights()


if __name__ == '__main__':
    while True:

        try:
            lights.wakeup()
            time.sleep(3)
            lights.think()
            time.sleep(3)
            lights.speak()
            time.sleep(3)
            lights.off()
            time.sleep(3)
        except KeyboardInterrupt:
            break


    pixel_ring.off()