Пример #1
0
class MultiColorLed:
    Config = namedtuple('Config', ['channels', 'pattern'])

    OFF         = Config(channels=lambda color: Leds.rgb_off(),
                         pattern=None)
    ON          = Config(channels=Leds.rgb_on,
                         pattern=None)
    BLINK       = Config(channels=Leds.rgb_pattern,
                         pattern=Pattern.blink(500))
    BLINK_3     = BLINK
    BEACON      = BLINK
    BEACON_DARK = BLINK
    DECAY       = BLINK
    PULSE_SLOW  = Config(channels=Leds.rgb_pattern,
                         pattern=Pattern.breathe(500))
    PULSE_QUICK = Config(channels=Leds.rgb_pattern,
                         pattern=Pattern.breathe(100))

    def _update(self, state, brightness):
        with self._lock:
            if state is not None:
                self._state = state
            if brightness is not None:
                self._brightness = brightness

            color = (int(255 * self._brightness), 0, 0)
            if self._state.pattern:
                self._leds.pattern = self._state.pattern
            self._leds.update(self._state.channels(color))

    def __init__(self, channel):
        self._lock = threading.Lock()
        self._brightness = 1.0  # Read and written atomically.
        self._state = self.OFF
        self._leds = Leds()

    def close(self):
        self._leds.reset()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, exc_tb):
        self.close()

    @property
    def brightness(self):
        return self._brightness

    @brightness.setter
    def brightness(self, value):
        if value < 0.0 or value > 1.0:
            raise ValueError('Brightness must be between 0.0 and 1.0.')
        self._update(state=None, brightness=value)

    def _set_state(self, state):
        self._update(state=state, brightness=None)
    state = property(None, _set_state)
Пример #2
0
def main():
    logging.basicConfig(level=logging.DEBUG)

    parser = argparse.ArgumentParser(description='Assistant service example.')
    parser.add_argument('--language', default=locale_language())
    args = parser.parse_args()

    logging.info('Initializing for language %s...', args.language)
    hints = get_hints(args.language)
    client = CloudSpeechClient()
    with Board() as board:
        #board.led.state = Led.ON
        with Leds() as leds:
            while True:
                if hints:
                    logging.info('Say something, e.g. %s.' % ', '.join(hints))
                else:
                    logging.info('Say something.')
                text = client.recognize(language_code=args.language,
                                        hint_phrases=hints)
                if text is None:
                    logging.info('You said nothing.')
                    continue

                logging.info('You said: "%s"' % text)
                text = text.lower()
                if 'turn on the light' in text:
                    board.led.state = Led.ON
                elif 'turn off the light' in text:
                    board.led.state = Led.OFF
                elif 'blink the light' in text:
                    board.led.state = Led.BLINK
                elif 'goodbye' in text:
                    break
                elif 'happy' in text:
                    leds.pattern = Pattern.blink(50)
                    color = (255, 255, 0)
                    leds.update(Leds.rgb_pattern(color))
                    audio.play_wav('laugh.wav')
                elif 'creep' in text:
                    leds.pattern = Pattern.breathe(1000)
                    color = (102, 140, 255)
                    leds.update(Leds.rgb_on(color))
                elif 'cheer' in text:
                    leds.pattern = Pattern.blink(5)
                    color = (230, 0, 115)
                    leds.update(Leds.rgb_on(color))
                    audio.play_wav('people-cheering.wav')
Пример #3
0
def reponse_bouton(est_juste, ecart, action):
    with Leds() as leds:
        if est_juste:
            leds.update(Leds.rgb_on(Color.GREEN)
                        )  # Vert fixe pendant 3 secondes si fréquence atteinte
            time.sleep(3)
            print('Corde accordée')
            tts.say('Corde accordée',
                    lang='fr-FR')  ####### Dire la phrase en plus #######
        else:
            period = 10 * ecart
            leds.pattern = Pattern.blink(
                period)  # donne fréquence de pulsation
            print('Tourner la cheville')
            tts.say('Tourner la cheville',
                    lang='fr-FR')  ####### Dire la phrase #######
            if action == 1:
                leds.update(
                    Leds.rgb_pattern(Color.BLUE)
                )  #Clignotement bleu pour augmenter pendant 5 secondes
                time.sleep(5)
            else:
                leds.update(
                    Leds.rgb_pattern(Color.RED)
                )  #Clignotement rouge pour diminuer pendant 5 secondes
                time.sleep(5)
def reponse_bouton(
    est_juste, ecart
):  ### réponse donnée par la couleur du bouton et la fréquence du clignotement
    with Leds() as leds:
        if est_juste:
            leds.update(Leds.rgb_on(Color.GREEN)
                        )  # Vert fixe pendant 3 secondes si fréquence atteinte
            time.sleep(3)
            print('Corde accordée')
            tts.say('Corde accordée', lang='fr-FR')
        else:
            period = 10 * abs(ecart)
            leds.pattern = Pattern.blink(
                period)  # donne fréquence de pulsation
            print("TOURNER LA CHEVILLE")
            if ecart > 0:
                tts.say('Tendre la corde', lang='fr-FR')
                leds.update(
                    Leds.rgb_pattern(Color.BLUE)
                )  #Clignotement bleu pour augmenter pendant 5 secondes
                time.sleep(5)

            else:
                tts.say('Détendre la corde', lang='fr-FR')
                leds.update(
                    Leds.rgb_pattern(Color.RED)
                )  #Clignotement rouge pour diminuer pendant 5 secondes
                time.sleep(5)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--num_frames', '-n', type=int, dest='num_frames', default=-1,
                        help='Number of frames to run for, -1 to not terminate')
    parser.add_argument('--preview_alpha', '-pa', type=int, dest='preview_alpha', default=0,
                        help='Transparency value of the preview overlay (0-255).')
    parser.add_argument('--image_format', type=str, dest='image_format', default='jpeg',
                        choices=('jpeg', 'bmp', 'png'), help='Format of captured images.')
    parser.add_argument('--image_folder', type=str, dest='image_folder', default='~/Pictures',
                        help='Folder to save captured images.')
    parser.add_argument('--blink_on_error', dest='blink_on_error', default=False,
                        action='store_true', help='Blink red if error occurred.')
    parser.add_argument('--enable_streaming', dest='enable_streaming', default=False,
                        action='store_true', help='Enable streaming server.')
    args = parser.parse_args()

    if args.preview_alpha < 0 or args.preview_alpha > 255:
        parser.error('Invalid preview_alpha value: %d' % args.preview_alpha)

    if not os.path.exists('/dev/vision_spicomm'):
        logger.error('AIY Vision Bonnet is not attached or not configured properly.')
        return 1

    detector = JoyDetector()
    try:
        detector.run(args.num_frames, args.preview_alpha, args.image_format,
                     args.image_folder, args.enable_streaming)
    except KeyboardInterrupt:
        pass
    except Exception:
        if args.blink_on_error:
            leds = Leds()
            leds.pattern = Pattern.blink(500)
            leds.update(Leds.rgb_pattern(RED_COLOR))
    return 0
Пример #6
0
    def menu(self):
        print('Press Arcade Button to begin photo shoot.' + '\n')
        with Board() as board, Leds() as leds:
            while True:
                # pulse LED to indicate ready state
                leds.pattern = Pattern.blink(1000)
                leds.update(Leds.rgb_pattern(Color.WHITE))
                board.button.wait_for_press()
                startTime = datetime.datetime.now()
                board.led.state = Led.ON
                print('LED is on...')
                # update LED to green indicating shoot is live
                leds.update(Leds.rgb_on((107, 255, 0)))
                self.shoot()
                leds.pattern = Pattern.blink(1000)
                leds.update(Leds.rgb_pattern(Color.WHITE))
                print('Press Arcade Button to start again' + '\n' + 'OR....' +
                      '\n' + 'Press and HOLD the Arcade Button for 5 seconds to quit')
                board.button.wait_for_press()
                pressTime = datetime.datetime.now()
                board.button.wait_for_release()
                releaseTime = datetime.datetime.now()
                board.led.state = Led.OFF
                print('OFF')

                pressDuration = releaseTime - pressTime
                sessionDuration = releaseTime - startTime
                if pressDuration.seconds >= 5:
                    leds.update(Leds.rgb_on(Color.PURPLE))
                    print('Photo booth session ran for ' +
                          str(sessionDuration.seconds) + ' seconds')
                    time.sleep(3)
                    TonePlayer(22).play(*[
                        'D5e',
                        'rq',
                        'C5e',
                        'rq',
                        'Be',
                        'rq',
                        'Be',
                        'C5e',
                        'D5e'
                    ])
                    break
                print('Done')
Пример #7
0
 def think(self):
     from aiy.leds import (Leds, Pattern, Color)
     self._wakeup = False
     self._think = True
     with Leds() as leds:
         while self._think:
             leds.pattern = Pattern.blink(500)
             leds.update(Leds.rgb_pattern(Color.GREEN))
             time.sleep(1)
Пример #8
0
 def wakeup(self):
     from aiy.board import Board, Led
     from aiy.leds import (Leds, Pattern, Color)
     self._wakeup = True
     with Board() as board:
         with Leds() as leds:
             while self._wakeup:
                 board.led.state = Led.ON
                 leds.pattern = Pattern.breathe(1000)
                 leds.update(Leds.rgb_pattern(Color.BLUE))
                 time.sleep(1)
Пример #9
0
def main():
    logging.basicConfig(level=logging.INFO)

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--num_frames',
                        '-n',
                        type=int,
                        default=None,
                        help='Number of frames to run for')
    parser.add_argument('--preview_alpha',
                        '-pa',
                        type=preview_alpha,
                        default=0,
                        help='Video preview overlay transparency (0-255)')
    parser.add_argument('--image_format',
                        default='jpeg',
                        choices=('jpeg', 'bmp', 'png'),
                        help='Format of captured images')
    parser.add_argument('--image_folder',
                        default='~/Pictures',
                        help='Folder to save captured images')
    parser.add_argument('--blink_on_error',
                        default=False,
                        action='store_true',
                        help='Blink red if error occurred')
    parser.add_argument('--enable_streaming',
                        default=False,
                        action='store_true',
                        help='Enable streaming server')
    parser.add_argument('--streaming_bitrate',
                        type=int,
                        default=1000000,
                        help='Streaming server video bitrate (kbps)')
    parser.add_argument('--mdns_name',
                        default='',
                        help='Streaming server mDNS name')
    args = parser.parse_args()

    try:
        joy_detector(args.num_frames, args.preview_alpha, args.image_format,
                     args.image_folder, args.enable_streaming,
                     args.streaming_bitrate, args.mdns_name)
    except KeyboardInterrupt:
        pass
    except Exception:
        logger.exception('Exception while running joy demo.')
        if args.blink_on_error:
            with Leds() as leds:
                leds.pattern = Pattern.blink(100)  # 10 Hz
                leds.update(Leds.rgb_pattern(Color.RED))
                time.sleep(1.0)

    return 0
Пример #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--filename', '-f', default='recording.wav')
    args = parser.parse_args()

    leds = Leds()
    leds.pattern = Pattern.breathe(4000)
    leds.update(Leds.rgb_on((0, 8, 0)))

    pygame.init()
    pygame.mixer.init()

    mix = alsaaudio.Mixer()
    mix.setvolume(30)

    # Files
    all_files = []
    for (dirpath, dirnames, filenames) in walk('/home/pi/jukidbox_store'):
        all_files.extend([path.join(dirpath, file) for file in filenames])

    while True:
        leds.update(Leds.rgb_on((0, 8, 0)))
        try:
            with Board() as board:
                while True:
                    print('Press button to start.')
                    board.button.wait_for_press()

                    done = threading.Event()
                    board.button.when_pressed = done.set

                    print('Playing...')
                    leds.update(Leds.rgb_pattern(Color.PURPLE))
                    # Get random file
                    file = numpy.random.choice(all_files)
                    print(file)
                    pygame.mixer.music.load(file)
                    pygame.mixer.music.play(-1)

                    while mixer.music.get_busy():
                        if done.is_set():
                            leds.update(Leds.rgb_on((32, 0, 0)))
                            mixer.music.stop()
                        time.sleep(0.5)

                    print("Finished ..")
                    leds.update(Leds.rgb_on((0, 8, 0)))
        except Exception as e:
            print(e)
            leds.update(Leds.rgb_on(Color.YELLOW))
            time.sleep(2)
Пример #11
0
def main():
    with Leds() as leds:

        print('Windows Up')
        tuned_servo.min()
        #               blueLED1.blink(.2,.2) # risk of servo burning if kept
        #               blueLED2.blink(.2,.2)
        leds.pattern = Pattern.blink(500)
        leds.update(Leds.rgb_pattern(Color.BLUE))
        time.sleep(5)

        print('Windows Down')
        tuned_servo.max()
        interior.on()
        yellowLED.on()
        leds.pattern = Pattern.breathe(1000)
        leds.update(Leds.rgb_pattern(Color.YELLOW))

        # Fade from yellow to red
        for i in range(32):
            color = Color.blend(Color.RED, Color.YELLOW, i / 32)
            leds.update(Leds.rgb_on(color))
            time.sleep(0.1)


#               leds.update({
#                       1: Leds.Channel(Leds.Channel.PATTERN, 64),
#                       2: Leds.Channel(Leds.Channel.OFF, 128),
#                       3: Leds.Channel(Leds.Channel.ON, 128),
#                       4: Leds.Channel(Leds.Channel.PATTERN, 64),
#               })

        time.sleep(5)
        leds.update(Leds.rgb_off())
        tuned_servo.close()
        yellowLED.close()
        interior.close()
        blueLED2.close()
Пример #12
0
    def shoot(self):
        with PiCamera() as camera, Leds() as leds:
            countdown = self.initial_timing
            shots_remaining = self.num_shots

            # Configure camera
            camera.resolution = (1640, 922)  # Full Frame, 16:9 (Camera v2)
            camera.start_preview()
            # leds.update(Leds.privacy_on())

            print('Get ready for your photo shoot!')
            time.sleep(3)
            print('Starting in')
            time.sleep(2)
            leds.pattern = Pattern.blink(1000)
            leds.update(Leds.rgb_pattern(Color.RED))
            while countdown > 0:
                print(countdown)
                countdown -= 1
                time.sleep(1)
            time.sleep(1)

            print('Smile :)')
            leds.pattern = Pattern.blink(1000)
            leds.update(Leds.rgb_pattern(Color.GREEN))
            while shots_remaining > 0:
                # if shots_remaining != self.num_shots:
                time.sleep(self.timing)
                print('*** FLASH ***')
                camera.capture(
                    'photobooth_' + str(datetime.datetime.now()) + '.jpg')
                shots_remaining -= 1
            print('\n' + 'You looked FABULOUS!!!' + '\n')
            time.sleep(3)
            leds.pattern = Pattern.blink(1000)
            leds.update(Leds.rgb_pattern(Color.RED))
            # Stop preview
            camera.stop_preview()
Пример #13
0
def main():
    logging.basicConfig(level=logging.INFO)

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--image_format',
                        default='jpeg',
                        choices=('jpeg', 'bmp', 'png'),
                        help='Format of captured images')
    parser.add_argument('--image_folder',
                        default='~/Pictures/Data',
                        help='Folder to save captured images')
    parser.add_argument('--blink_on_error',
                        default=False,
                        action='store_true',
                        help='Blink red if error occurred')
    parser.add_argument('--enable_streaming',
                        default=False,
                        action='store_true',
                        help='Enable streaming server')
    parser.add_argument('--streaming_bitrate',
                        type=int,
                        default=1000000,
                        help='Streaming server video bitrate (kbps)')
    parser.add_argument('--mdns_name',
                        default='',
                        help='Streaming server mDNS name')
    args = parser.parse_args()

    try:
        gather_data(5, args.image_format, args.image_folder,
                    args.enable_streaming, args.streaming_bitrate,
                    args.mdns_name)

    except KeyboardInterrupt:
        sys.exit()
    except Exception:
        logger.exception('Exception while running joy demo.')
        if args.blink_on_error:
            with Leds() as leds:
                leds.pattern = Pattern.blink(100)  # 10 Hz
                leds.update(Leds.rgb_pattern(Color.RED))
                time.sleep(1.0)

    return 0
def main():
    # if no entry has been made that day...
    check_for_entry_today()

    print("not journaled today 👇🏼")

    # callback to run when button is released
    board.button.when_pressed = journal

    print('waiting for press 👇🏽')

    leds.pattern = Pattern.breathe(2000)
    leds.update(Leds.rgb_pattern(Color.YELLOW))
    # board.button.wait_for_press(60*15) # 15 minutes
    board.button.wait_for_press(15)  # 15 seconds
    # if no press...
    print('no press, exiting 👋🏽...')
    board.led.state = Led.OFF
Пример #15
0
def alarm(done, leds):
    print("alarm thread")
    intensity = 0
    start = time.monotonic()
    duration = 0

    while not done.is_set():
        if (intensity < 1):
            intensity += (5. / 70.)
            if (intensity > 1):
                intensity = 1

            set_volume(intensity * MAX_VOLUME)
            leds.pattern = Pattern.breathe(map(intensity, 0., 1., 1000., 100.))
            leds.update(Leds.rgb_pattern((0, 0, intensity * MAX_BRIGHTNESS)))

        duration = time.monotonic() - start
        print('Alarm [Press button to stop] %.02fs, intensity: %.02f' %
              (duration, intensity))

        play_wav(ALARM_SOUND_PATH)
        time.sleep(SLEEP_TIME)
Пример #16
0
def main():
    with Board() as board:
        with Leds() as leds:
            # init volume and brightness
            set_volume(0)
            leds.pattern = Pattern.breathe(750)
            leds.update(Leds.rgb_pattern(Color.BLACK))

            done = threading.Event()
            board.button.when_pressed = done.set

            alarm_thread = threading.Thread(target=alarm,
                                            args=(done, leds),
                                            daemon=True)
            alarm_thread.start()

            if done.wait(timeout=TIMEOUT_LIMIT):
                set_volume(MAX_VOLUME)
                leds.update(Leds.rgb_on(Color.GREEN))
                print('GOOD MORNING!')
                play_wav(GOOD_MORNING_SOUND_PATH)
            else:
                print('Timed out.')
Пример #17
0
def button():
    with Leds() as leds:
        with Board() as board:
            st_play = True
            while True:
                leds.pattern = Pattern.breathe(3000)
                if st_play:
                    leds.update(Leds.rgb_pattern(Color.GREEN))
                else:
                    leds.update(Leds.rgb_pattern(Color.BLUE))

                board.button.wait_for_press()
                if st_play:
                    send_cmd("STOP")
                    print("> STOP")
                else:
                    send_cmd("PLAY")
                    print("> PLAY")

                board.led.state = Led.ON
                board.button.wait_for_release()
                board.led.state = Led.OFF
                st_play = not st_play
Пример #18
0
def main():
    logging.basicConfig(level=logging.INFO)

    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--num_frames', '-n', type=int, default=None,
                        help='Number of frames to run for')
    parser.add_argument('--preview_alpha', '-pa', type=preview_alpha, default=0,
                        help='Video preview overlay transparency (0-255)')
    parser.add_argument('--image_format', default='jpeg',
                        choices=('jpeg', 'bmp', 'png'),
                        help='Format of captured images')
    parser.add_argument('--image_folder', default='~/Pictures',
                        help='Folder to save captured images')
    parser.add_argument('--blink_on_error', default=False, action='store_true',
                        help='Blink red if error occurred')
    parser.add_argument('--enable_streaming', default=False, action='store_true',
                        help='Enable streaming server')
    parser.add_argument('--streaming_bitrate', type=int, default=1000000,
                        help='Streaming server video bitrate (kbps)')
    parser.add_argument('--mdns_name', default='',
                        help='Streaming server mDNS name')
    args = parser.parse_args()

    try:
        joy_detector(args.num_frames, args.preview_alpha, args.image_format, args.image_folder,
                     args.enable_streaming, args.streaming_bitrate, args.mdns_name)
    except KeyboardInterrupt:
        pass
    except Exception:
        logger.exception('Exception while running joy demo.')
        if args.blink_on_error:
            with Leds() as leds:
                leds.pattern = Pattern.blink(100)  # 10 Hz
                leds.update(Leds.rgb_pattern(Color.RED))
                time.sleep(1.0)

    return 0
Пример #19
0
            self.send_header('Content-Type', 'text/html')
            self.send_header('Location', '/info')
            self.end_headers()


class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
    allow_reuse_address = True
    daemon_threads = True


# I know, query strings instead of global vars might be better
object_name, object_info, object_url = None, None, None
colour = (255, 255, 255)  # white as default
leds = Leds()

leds.pattern = Pattern.breathe(1000)

with picamera.PiCamera(resolution='640x480', framerate=24) as camera:
    output = StreamingOutput()
    camera.start_recording(output, format='mjpeg')
    try:
        address = ('', 8000)
        print("Your webpage is being served at http://your-pi-address:8000/")
        server = StreamingServer(address, StreamingHandler)
        server.serve_forever()

    finally:
        camera.stop_recording()
        leds.update(Leds.rgb_off())
        image = [
            img for img in os.listdir(os.getcwd()) if img.endswith('jpg')
Пример #20
0
    def main_loop(self):
        while True:
            with Leds() as leds:
                leds.update(Leds.rgb_on(Color.RED))

                with Board() as board:
                    print("Waiting for input")
                    board.button.wait_for_press()
                    leds.update(Leds.rgb_on((0, 0, 250)))
                    #print('ON')
                    self.start = True
                    self.counter = 0
                    self.completed = False
                    self.stopwatch = time.time()
                    board.button.wait_for_release()
                    #print('OFF')
                    leds.update(Leds.rgb_off())

                while self.start:

                    classes = currentState
                    #print("current State: ", classes)
                    if classes == 0 and self.state != 0:
                        self.standing()
                    elif classes == 1 and self.state != 1:
                        self.empty()
                    elif classes == 2 and self.state != 2 and self.last_detected_state != 2:
                        self.squat()

                    # Selecting a State
                    if (time.time()-self.stopwatch) > 0.15:
                        print("State:\t ",states_names[self.state] , "\t| [selected]")

                        if self.state == 2 and self.last_detected_state != 2:  # Squat detected
                            self.counter += 1
                            leds.update(Leds.rgb_on((0, 0, 250)))
                            self._newSqaut()
                            #print("###  Current Score: ", self.counter,"###")

                        if self.state == 2 or self.state == 0:
                            #self.stopwatch = time.time()
                            leds.update(Leds.rgb_on(Color.WHITE))

                        if self.state == 1 and ((time.time()-self.stopwatch) > 1):
                            leds.update(Leds.rgb_off())
                        
                        self.last_detected_state = self.state
                        
                    # Resting the counter if nobody is in the frame
                    if (time.time()-self.stopwatch) > 10:
                        if self.state == 1:  # if nobody is in the frame reset counter
                            print("###  Reset Score   ###")
                            self.counter = 0
                            self.start = False
                        
                            leds.pattern = Pattern.blink(500)
                            leds.update(Leds.rgb_pattern(Color.RED))
                            time.sleep(2)
                            leds.update(Leds.rgb_off())
                            self.stopwatch = time.time()

                    # Checking of the finish
                    if self.counter >= TOTAL_SQUATS:
                        self.completed = True
                        self.output.on()
                        self.counter = 0

                        print("Completed Workout")
                        self.start = False
                        
                        leds.pattern = Pattern.blink(500)
                        leds.update(Leds.rgb_pattern(Color.GREEN))
                        time.sleep(2)
                        leds.update(Leds.rgb_on(Color.GREEN))

                        with Board() as board:
                            print("Waiting for input")
                            board.button.wait_for_press()
                            print('ON')
                            board.led.state = Led.ON
                            self.start = False
                            self.counter = 0
                            self.completed = False
                            self.stopwatch = time.time()
                            board.button.wait_for_release()
                            print('OFF')

                            self.output.off()
                            board.led.state = Led.OFF
                            leds.pattern = Pattern.blink(500)
                            leds.update(Leds.rgb_pattern(Color.RED))
                            time.sleep(2)
                            leds.update(Leds.rgb_off())
Пример #21
0
import time
from aiy.leds import Leds, Color, Pattern


with Leds() as leds:
    for _ in range(4):
        leds.update(Leds.privacy_on())
        leds.update(Leds.rgb_on(Color.GREEN))
        time.sleep(1)
        leds.update(Leds.rgb_off())
        time.sleep(1)
        leds.update(Leds.rgb_on(Color.GREEN))
        time.sleep(1)
        leds.update(Leds.rgb_off())
        time.sleep(1)
        leds.update(Leds.privacy_off())

with Leds() as leds:
    leds.pattern = Pattern.blink(500)
    leds.update(Leds.rgb_pattern(Color.GREEN))
    time.sleep(5)
def record_journal_entry():
    # turn light blue as we start up
    leds = Leds()

    parser = argparse.ArgumentParser(description='Assistant service example.')
    parser.add_argument('--language', default=locale_language())
    args = parser.parse_args()

    logging.info('Initializing for language %s...', args.language)
    hints = get_hints(args.language)
    client = CloudSpeechClient()

    heading = ""
    file_path = ""
    try:
        paths = gen_paths()
        heading = paths["heading"]
        file_path = paths["file_path"]
    except:
        print(
            ">>> 🆘 there was an error setting the path...\n>>> saving dirty entry locally."
        )
        logging.warning('Unable to get the location.  Using default paths.')
        date = str(datetime.now())
        heading = date + "\n\n\n"
        file_path = os.getcwd() + "/je_error_dump_%s.txt" % date

    with Board() as board:
        with open(file_path, 'w') as dump:
            dump.write(heading)
            print('>>> please tell me about your day 👂🏼')
            while True:
                leds.pattern = Pattern.breathe(2000)
                leds.update(Leds.rgb_pattern(Color.RED))
                text = client.recognize(
                    language_code=args.language,
                    hint_phrases=hints,
                    punctuation=True,
                )
                # client must return None when it gets a pause in speech
                if text is None:
                    continue

                logging.info(' You said: "%s"' % text)
                print("+ %s" % text)
                dump.write(text + "  ")

                if 'new line' in text.lower():
                    dump.write('\n\n')
                    logging.info('\n\n')
                elif 'cancel cancel cancel' in text.lower():
                    board.led.state = Led.OFF
                    exit(0)
                elif 'goodbye' in text.lower():
                    break

    leds.pattern = Pattern.breathe(1000)
    leds.update(Leds.rgb_pattern(Color.GREEN))
    logging.info('>>> wrapping and saving journal entry 📓')
    # try:
    #     with open(file_path) as file:
    #         lines = file.readlines()
    #         print("read the lines")
    #         with open(file_path, 'w') as wrapper:
    #             size = 70
    #             for line in lines:
    #                 print("+" + line)
    #                 if len(line) > size:
    #                     collated = collate(line, size)
    #                     for short in collated:
    #                         wrapper.write(short)
    #                         wrapper.write('\n')
    #                 else:
    #                     writer.write(line)
    # except:
    #     logging.error('There was an error wrapping %s' % file_path)
    time.sleep(3)
    board.led.state = Led.OFF
Пример #23
0
def main():
    logging.basicConfig(level=logging.INFO)

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--num_frames',
                        '-n',
                        type=int,
                        default=None,
                        help='Number of frames to run for')
    parser.add_argument('--preview_alpha',
                        '-pa',
                        type=preview_alpha,
                        default=0,
                        help='Video preview overlay transparency (0-255)')
    parser.add_argument('--image_format',
                        default='jpeg',
                        choices=('jpeg', 'bmp', 'png'),
                        help='Format of captured images')
    parser.add_argument('--image_folder',
                        default='~/Pictures',
                        help='Folder to save captured images')
    parser.add_argument('--blink_on_error',
                        default=False,
                        action='store_true',
                        help='Blink red if error occurred')
    parser.add_argument('--enable_streaming',
                        default=False,
                        action='store_true',
                        help='Enable streaming server')
    parser.add_argument('--streaming_bitrate',
                        type=int,
                        default=1000000,
                        help='Streaming server video bitrate (kbps)')
    parser.add_argument('--mdns_name',
                        default='',
                        help='Streaming server mDNS name')
    args = parser.parse_args()

    broker_address = "io.adafruit.com"
    print("creating new instance")
    user = "******"
    password = "******"
    print("connecting to broker")
    client = mqtt.Client("AIY_VISION_KIT")  # create new instance
    client.username_pw_set(user, password=password)
    client.on_log = on_log
    client.connect(broker_address, 1883, 60)  # connect to broker
    client.loop_start()

    try:
        joy_detector(args.num_frames, args.preview_alpha, args.image_format,
                     args.image_folder, args.enable_streaming,
                     args.streaming_bitrate, args.mdns_name, client)
    except KeyboardInterrupt:
        pass
    except Exception:
        logger.exception('Exception while running joy demo.')
        if args.blink_on_error:
            with Leds() as leds:
                leds.pattern = Pattern.blink(100)  # 10 Hz
                leds.update(Leds.rgb_pattern(Color.RED))
                time.sleep(1.0)

    return 0
Пример #24
0
def main():
    model_path = '/opt/aiy/models/retrained_graph.binaryproto'
    #model_path = '/opt/aiy/models/mobilenet_v1_160res_0.5_imagenet.binaryproto'
    label_path = '/opt/aiy/models/retrained_labels_new.txt'
    #label_path = '/opt/aiy/models/mobilenet_v1_160res_0.5_imagenet_labels.txt'
    model_path = '/opt/aiy/models/rg_v3_new.binaryproto'
    label_path = '/opt/aiy/models/retrained_labels_new.txt'
    input_height = 160
    input_width = 160
    input_layer = 'input'
    output_layer = 'final_result'
    threshold = 0.8
    # Model & labels
    model = ModelDescriptor(
        name='mobilenet_based_classifier',
        input_shape=(1, input_height, input_width, 3),
        input_normalizer=(128.0, 128.0),
        compute_graph=utils.load_compute_graph(model_path))
    labels = read_labels(label_path)
    new_labels = []
    for eachLabel in labels:
        if len(eachLabel)>1:
            new_labels.append(eachLabel)
    labels = new_labels
    #print(labels)
    s = xmlrpc.client.ServerProxy("http://aiy.mdzz.info:8000/")
    player = TonePlayer(BUZZER_GPIO, 10)
    player.play(*MODEL_LOAD_SOUND)
    while True:
        while True:
            if s.camera() == 1:
                print('vision kit is woken up')
                with Leds() as leds:
                    leds.pattern = Pattern.blink(100)
                    leds.update(Leds.rgb_pattern(Color.RED))
                    time.sleep(2.0)
                start_time = round(time.time())
                break
            time.sleep(0.2)
            print('no signal, sleeping...')

        with PiCamera() as camera:
            # Configure camera
            camera.sensor_mode = 4
            camera.resolution = (1664, 1232)  # Full Frame, 16:9 (Camera v2)
            camera.framerate = 30
            camera.start_preview()
            while True:
                # Do inference on VisionBonnet
                #print('Start capturing')
                with CameraInference(face_detection.model()) as inference:
                    for result in inference.run():
                        #print(type(result))
                        faces = face_detection.get_faces(result)
                        if len(faces) >= 1:
                            #print('camera captures...')
                            extension = '.jpg'
                            filename = time.strftime('%Y-%m-%d %H:%M:%S') + extension
                            camera.capture(filename)
                            image_npp = np.empty((1664 * 1232 * 3,), dtype=np.uint8)
                            camera.capture(image_npp, 'rgb')
                            image_npp = image_npp.reshape((1232, 1664, 3))
                            image_npp = image_npp[:1232, :1640, :]
                            # image = Image.open('jj.jpg')
                            # draw = ImageDraw.Draw(image)
                            faces_data = []
                            faces_cropped = []
                            for i, face in enumerate(faces):
                                # print('Face #%d: %s' % (i, face))
                                x, y, w, h = face.bounding_box
                                #print(x,y,w,h)
                                w_rm = int(0.3 * w / 2)
                                face_cropped = crop_np((x, y, w, h), w_rm, image_npp)
                                if face_cropped is None: continue #print('face_cropped None'); continue
                                # faces_data.append(image[y: y + h, x + w_rm: x + w - w_rm])
                                # image[y: y + h, x + w_rm: x + w - w_rm].save('1.jpg')
                                face_cropped.save('face_cropped_'+str(i)+'.jpg')
                                faces_cropped.append(face_cropped)
                                #break
                            break
                        # else:
                        #     tt = round(time.time()) - start_time
                        #     if tt > 10:
                        #         break
                    #print('face cutting finishes')

                #print(type(faces_cropped), len(faces_cropped))
                player.play(*BEEP_SOUND)
                flag = 0
                for eachFace in faces_cropped:
                    #print(type(eachFace))
                    if eachFace is None: flag = 1
                if (len(faces_cropped)) <= 0: flag = 1
                if flag == 1: continue
                with ImageInference(model) as img_inference:
                #with CameraInference(model) as img_inference:
                    print('Entering classify_hand_gestures()')
                    output = classify_hand_gestures(img_inference, faces_cropped, model=model, labels=labels,
                                                    output_layer=output_layer, threshold=threshold)
                #print(output)
                if (output == 3):
                    player.play(*JOY_SOUND)
                    print('Yani face detected')
                    print(s.result("Owner", filename))
                else:
                    player.play(*SAD_SOUND)
                    print('Suspicious face detected')
                    print(s.result("Unknown Face", filename))
                upload(filename)
                # Stop preview #
                #break
                while (s.camera()==0):
                    print('sleeping')
                    time.sleep(.2)
                print('Waken up')
Пример #25
0
def main():
    with Leds() as leds:
        print('RGB: Solid RED for 1 second')
        leds.update(Leds.rgb_on(Color.RED))
        time.sleep(1)

        print('RGB: Solid GREEN for 1 second')
        leds.update(Leds.rgb_on(Color.GREEN))
        time.sleep(1)

        print('RGB: Solid YELLOW for 1 second')
        leds.update(Leds.rgb_on(Color.YELLOW))
        time.sleep(1)

        print('RGB: Solid BLUE for 1 second')
        leds.update(Leds.rgb_on(Color.BLUE))
        time.sleep(1)

        print('RGB: Solid PURPLE for 1 second')
        leds.update(Leds.rgb_on(Color.PURPLE))
        time.sleep(1)

        print('RGB: Solid CYAN for 1 second')
        leds.update(Leds.rgb_on(Color.CYAN))
        time.sleep(1)

        print('RGB: Solid WHITE for 1 second')
        leds.update(Leds.rgb_on(Color.WHITE))
        time.sleep(1)

        print('RGB: Off for 1 second')
        leds.update(Leds.rgb_off())
        time.sleep(1)

        for _ in range(3):
            print('Privacy: On (brightness=default)')
            leds.update(Leds.privacy_on())
            time.sleep(1)
            print('Privacy: Off')
            leds.update(Leds.privacy_off())
            time.sleep(1)

        for _ in range(3):
            print('Privacy: On (brightness=5)')
            leds.update(Leds.privacy_on(5))
            time.sleep(1)
            print('Privacy: Off')
            leds.update(Leds.privacy_off())
            time.sleep(1)

        print('Set blink pattern: period=500ms (2Hz)')
        leds.pattern = Pattern.blink(500)

        print('RGB: Blink RED for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.RED))
        time.sleep(5)

        print('RGB: Blink GREEN for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.GREEN))
        time.sleep(5)

        print('RGB: Blink BLUE for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.BLUE))
        time.sleep(5)

        print('Set breathe pattern: period=1000ms (1Hz)')
        leds.pattern = Pattern.breathe(1000)

        print('RGB: Breathe RED for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.RED))
        time.sleep(5)

        print('RGB: Breathe GREEN for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.GREEN))
        time.sleep(5)

        print('RGB: Breathe BLUE for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.BLUE))
        time.sleep(5)

        print('RGB: Increase RED brightness for 3.2 seconds')
        for i in range(32):
            leds.update(Leds.rgb_on((8 * i, 0, 0)))
            time.sleep(0.1)

        print('RGB: Decrease RED brightness for 3.2 seconds')
        for i in reversed(range(32)):
            leds.update(Leds.rgb_on((8 * i, 0, 0)))
            time.sleep(0.1)

        print('RGB: Blend between GREEN and BLUE for 3.2 seconds')
        for i in range(32):
            color = Color.blend(Color.BLUE, Color.GREEN, i / 32)
            leds.update(Leds.rgb_on(color))
            time.sleep(0.1)

        print('RGB: Off for 1 second')
        leds.update(Leds.rgb_off())
        time.sleep(1)

        print('Privacy: On for 2 seconds')
        with PrivacyLed(leds):
            time.sleep(2)

        print('RGB: Solid GREEN for 2 seconds')
        with RgbLeds(leds, Leds.rgb_on(Color.GREEN)):
            time.sleep(2)

        print('Custom configuration for 5 seconds')
        leds.update({
            1: Leds.Channel(Leds.Channel.PATTERN, 128),  # Red channel
            2: Leds.Channel(Leds.Channel.OFF, 0),  # Green channel
            3: Leds.Channel(Leds.Channel.ON, 128),  # Blue channel
            4: Leds.Channel(Leds.Channel.PATTERN, 64),  # Privacy channel
        })
        time.sleep(5)

        print('Done')
Пример #26
0
def main():
    logging.basicConfig(level=logging.INFO)

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--num_frames',
                        '-n',
                        type=int,
                        default=None,
                        help='Number of frames to run for')
    parser.add_argument('--preview_alpha',
                        '-pa',
                        type=preview_alpha,
                        default=0,
                        help='Video preview overlay transparency (0-255)')
    parser.add_argument('--image_format',
                        default='jpeg',
                        choices=('jpeg', 'bmp', 'png'),
                        help='Format of captured images')
    parser.add_argument('--image_folder',
                        default='tmpImage',
                        help='Folder to save captured images')
    parser.add_argument('--blink_on_error',
                        default=False,
                        action='store_true',
                        help='Blink red if error occurred')
    parser.add_argument('--enable_streaming',
                        default=False,
                        action='store_true',
                        help='Enable streaming server')
    parser.add_argument(
        '--streaming_bitrate',
        type=int,
        default=100000,  # 1000000
        help='Streaming server video bitrate (kbps)')
    parser.add_argument('--mdns_name',
                        default='',
                        help='Streaming server mDNS name')

    parser.add_argument('--cam_width',
                        type=int,
                        default=1640,
                        help='Camera Width')
    parser.add_argument('--cam_height',
                        type=int,
                        default=1232,
                        help='Camera Height')
    parser.add_argument('--fps',
                        type=int,
                        default=30,
                        help='Camera Frames Per Second')
    parser.add_argument(
        '--region',
        nargs=4,
        type=int,
        default=[504, 632, 632, 632],
        help='Region for entering/exiting face detection: x, y, width, height')
    parser.add_argument(
        '--enter_side',
        type=int,
        default=2,
        help=
        'Used to determine which side of the region should be considered "entering": 0 = right, 1 = left'
        'Or in dual camera operation: 2 = entering, 3 = Exiting')
    parser.add_argument(
        '--annotator',
        default=False,
        help='Shows the annotator overlay, however disables camera snapshots.')
    parser.add_argument('--url',
                        default="https://isrow.net",
                        help='Url to send the face captures that are taken.')
    parser.add_argument(
        '--username',
        default="CL-24-2",
        help='User name used to authenticate this device initially')
    parser.add_argument(
        '--password',
        default="1qw2!QW@1qw2",
        help='Password used to authenticate this device initially')
    parser.add_argument(
        '--image_dir',
        default="api/events/",
        help='{url + "/" + image_dir} will give us path to send the face data')
    parser.add_argument('--dev', default=20)
    args = parser.parse_args()

    try:
        monitor_run(args.num_frames, args.preview_alpha, args.image_format,
                    args.image_folder, args.enable_streaming,
                    args.streaming_bitrate, args.mdns_name, args.cam_width,
                    args.cam_height, args.fps, args.region, args.enter_side,
                    args.annotator, args.url, args.username, args.password,
                    args.image_dir, args.dev)
    except KeyboardInterrupt:
        pass
    except Exception:
        logger.exception('Exception while running joy demo.')
        if args.blink_on_error:
            with Leds() as leds:
                leds.pattern = Pattern.blink(100)  # 10 Hz
                leds.update(Leds.rgb_pattern(Color.RED))
                time.sleep(1.0)

    return 0
Пример #27
0
def listen_me():

    global text, duration

    parser = argparse.ArgumentParser()
    parser.add_argument('--filename', '-f', default='recording.wav')
    args = parser.parse_args()

    # 라이브러리 준비
    Vokaturi.load("/home/pi/lib/piZero.so")

    # 클라우드 스피치, 텍스트 자연어처리, tts 클라이언트 각각 초기화
    client = CloudSpeechClient()
    nlp_client = language.LanguageServiceClient()
    tts_client = texttospeech.TextToSpeechClient()

    pos_wavs = []
    neut_wavs = []
    neg_wavs = []
    intro_wavs = []

    pos_wavs.append(text_to_audio(tts_client, '진짜?', '0.wav'))
    pos_wavs.append(text_to_audio(tts_client, '대박', '1.wav'))
    pos_wavs.append(text_to_audio(tts_client, '우와', '2.wav'))
    pos_wavs.append(text_to_audio(tts_client, '하하', '3.wav'))

    neut_wavs.append(text_to_audio(tts_client, '응', '10.wav'))
    neut_wavs.append(text_to_audio(tts_client, '그렇구나', '11.wav'))
    neut_wavs.append(text_to_audio(tts_client, '그래서?', '12.wav'))
    neut_wavs.append(text_to_audio(tts_client, '응응', '13.wav'))

    neg_wavs.append(text_to_audio(tts_client, '저런', '4.wav'))
    neg_wavs.append(text_to_audio(tts_client, '힘내', '5.wav'))
    neg_wavs.append(text_to_audio(tts_client, '에휴', '6.wav'))

    intro_wavs.append(text_to_audio(tts_client, '들어줄게. 얘기해봐', 'intro0.wav'))
    intro_wavs.append(text_to_audio(tts_client, '무슨 일 이야?', 'intro1.wav'))
    play_wav(random.choice(intro_wavs))

    logging.basicConfig(level=logging.INFO)

    with Board() as board:

        while True:

            print('말해보자.')
            text = None
            duration = 0.
            emotion = None

            def wait():
                global text, duration
                start = time.monotonic()

                while text is None:

                    # 텍스트로 인식
                    text = client.recognize(language_code='ko-KR')
                    duration = time.monotonic() - start

            # 녹음하면서
            record_file(AudioFormat.CD,
                        filename=args.filename,
                        wait=wait,
                        filetype='wav')

            print(text)
            print('Recorded: %.02f seconds' % duration)

            if text in ['들어줘서 고마워', '내 얘기 들어줘서 고마워', '어시스턴트', '잘가', '잘 가']:
                return

            # 텍스트 감정 분석
            document = types.Document(content=text,
                                      type=enums.Document.Type.PLAIN_TEXT)
            sentiment = nlp_client.analyze_sentiment(
                document=document).document_sentiment

            print('텍스트 감정 분석*********************************')
            print('Text: {}'.format(text))
            print('Sentiment: {}, {}'.format(sentiment.score,
                                             sentiment.magnitude))

            ##################### 실험후 바꿔도 됨 ####################
            pos_standard = 0.6
            neg_standard = 0.1
            # magnitude_standard = 0.1

            # text sentiment analysis is enough
            if (sentiment.score < neg_standard
                    or sentiment.score > pos_standard):
                if sentiment.score < neg_standard:
                    emotion = False
                    print("@@@negative")
                else:
                    emotion = True
                    print("@@@positive")

            else:
                # 녹음 파일 감정 분석
                print('오디오 감정 분석*********************************')
                (sample_rate, samples) = scipy.io.wavfile.read(args.filename)
                # print ("   sample rate %.3f Hz" % sample_rate)

                # print ("Allocating Vokaturi sample array...")
                buffer_length = len(samples)
                print("   %d samples, %d channels" %
                      (buffer_length, samples.ndim))
                c_buffer = Vokaturi.SampleArrayC(buffer_length)
                if samples.ndim == 1:  # mono
                    c_buffer[:] = samples[:] / 32768.0
                else:  # stereo
                    c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 +
                                         samples[:, 1]) / 32768.0

                # print ("Creating VokaturiVoice...")
                voice = Vokaturi.Voice(sample_rate, buffer_length)

                # print ("Filling VokaturiVoice with samples...")
                voice.fill(buffer_length, c_buffer)

                # print ("Extracting emotions from VokaturiVoice...")
                quality = Vokaturi.Quality()
                emotionProbabilities = Vokaturi.EmotionProbabilities()
                voice.extract(quality, emotionProbabilities)

                if quality.valid:
                    # print ("Neutral: %.3f" % emotionProbabilities.neutrality)
                    # print ("Happy: %.3f" % emotionProbabilities.happiness)
                    # print ("Sad: %.3f" % emotionProbabilities.sadness)
                    # print ("Angry: %.3f" % emotionProbabilities.anger)
                    # print ("Fear: %.3f" % emotionProbabilities.fear)
                    # fear 는 무시하도록 하자.

                    wave_score = emotionProbabilities.happiness - (
                        emotionProbabilities.sadness +
                        emotionProbabilities.anger)

                    if wave_score > 0 and sentiment.score > 0.4:
                        print('@@@긍정')
                        emotion = True
                    elif wave_score < 0 and sentiment.score < 0.4:
                        print('@@@부정')
                        emotion = False

                    # text 스코어와 wave 스코어가 불일치 할때는 중립반응 (emotion = None)

            # 여기서 부터 반응.

            with Leds() as leds:
                if emotion is True:
                    play_wav(random.choice(pos_wavs))
                    leds.pattern = Pattern.blink(100)
                    color = (255, 255, 0)
                    leds.update(Leds.rgb_pattern(color))
                    time.sleep(1)
                    # play_wav('laugh.wav')
                elif emotion is False:
                    play_wav(random.choice(neg_wavs))
                    leds.pattern = Pattern.breathe(1000)
                    color = (102, 140, 255)
                    leds.update(Leds.rgb_on(color))
                    time.sleep(1)
                    # play_wav('people-cheering.wav')

                # 중립 리액션
                else:
                    play_wav(random.choice(neut_wavs))
                    leds.pattern = Pattern.blink(5)
                    color = (230, 0, 115)
                    leds.update(Leds.rgb_on(color))
                    time.sleep(1)
Пример #28
0
def main():
    with Leds() as leds:
        print('RGB: Solid RED for 1 second')
        leds.update(Leds.rgb_on(Color.RED))
        time.sleep(1)

        print('RGB: Solid GREEN for 1 second')
        leds.update(Leds.rgb_on(Color.GREEN))
        time.sleep(1)

        print('RGB: Solid YELLOW for 1 second')
        leds.update(Leds.rgb_on(Color.YELLOW))
        time.sleep(1)

        print('RGB: Solid BLUE for 1 second')
        leds.update(Leds.rgb_on(Color.BLUE))
        time.sleep(1)

        print('RGB: Solid PURPLE for 1 second')
        leds.update(Leds.rgb_on(Color.PURPLE))
        time.sleep(1)

        print('RGB: Solid CYAN for 1 second')
        leds.update(Leds.rgb_on(Color.CYAN))
        time.sleep(1)

        print('RGB: Solid WHITE for 1 second')
        leds.update(Leds.rgb_on(Color.WHITE))
        time.sleep(1)

        print('RGB: Off for 1 second')
        leds.update(Leds.rgb_off())
        time.sleep(1)

        for _ in range(3):
            print('Privacy: On (brightness=default)')
            leds.update(Leds.privacy_on())
            time.sleep(1)
            print('Privacy: Off')
            leds.update(Leds.privacy_off())
            time.sleep(1)

        for _ in range(3):
            print('Privacy: On (brightness=5)')
            leds.update(Leds.privacy_on(5))
            time.sleep(1)
            print('Privacy: Off')
            leds.update(Leds.privacy_off())
            time.sleep(1)

        print('Set blink pattern: period=500ms (2Hz)')
        leds.pattern = Pattern.blink(500)

        print('RGB: Blink RED for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.RED))
        time.sleep(5)

        print('RGB: Blink GREEN for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.GREEN))
        time.sleep(5)

        print('RGB: Blink BLUE for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.BLUE))
        time.sleep(5)

        print('Set breathe pattern: period=1000ms (1Hz)')
        leds.pattern = Pattern.breathe(1000)

        print('RGB: Breathe RED for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.RED))
        time.sleep(5)

        print('RGB: Breathe GREEN for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.GREEN))
        time.sleep(5)

        print('RGB: Breathe BLUE for 5 seconds')
        leds.update(Leds.rgb_pattern(Color.BLUE))
        time.sleep(5)

        print('RGB: Increase RED brightness for 3.2 seconds')
        for i in range(32):
            leds.update(Leds.rgb_on((8 * i, 0, 0)))
            time.sleep(0.1)

        print('RGB: Decrease RED brightness for 3.2 seconds')
        for i in reversed(range(32)):
            leds.update(Leds.rgb_on((8 * i, 0, 0)))
            time.sleep(0.1)

        print('RGB: Blend between GREEN and BLUE for 3.2 seconds')
        for i in range(32):
            color = Color.blend(Color.BLUE, Color.GREEN, i / 32)
            leds.update(Leds.rgb_on(color))
            time.sleep(0.1)

        print('RGB: Off for 1 second')
        leds.update(Leds.rgb_off())
        time.sleep(1)

        print('Privacy: On for 2 seconds')
        with PrivacyLed(leds):
            time.sleep(2)

        print('RGB: Solid GREEN for 2 seconds')
        with RgbLeds(leds, Leds.rgb_on(Color.GREEN)):
            time.sleep(2)

        print('Custom configuration for 5 seconds')
        leds.update({
            1: Leds.Channel(Leds.Channel.PATTERN, 128),  # Red channel
            2: Leds.Channel(Leds.Channel.OFF, 0),        # Green channel
            3: Leds.Channel(Leds.Channel.ON, 128),       # Blue channel
            4: Leds.Channel(Leds.Channel.PATTERN, 64),   # Privacy channel
        })
        time.sleep(5)

        print('Done')
Пример #29
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=-1,
        help='Number of frames to run for, -1 to not terminate')
    parser.add_argument(
        '--preview_alpha',
        '-pa',
        type=int,
        dest='preview_alpha',
        default=0,
        help='Transparency value of the preview overlay (0-255).')
    parser.add_argument('--image_format',
                        type=str,
                        dest='image_format',
                        default='jpeg',
                        choices=('jpeg', 'bmp', 'png'),
                        help='Format of captured images.')
    parser.add_argument('--image_folder',
                        type=str,
                        dest='image_folder',
                        default='~/Pictures',
                        help='Folder to save captured images.')
    parser.add_argument('--blink_on_error',
                        dest='blink_on_error',
                        default=False,
                        action='store_true',
                        help='Blink red if error occurred.')
    parser.add_argument('--enable_streaming',
                        dest='enable_streaming',
                        default=False,
                        action='store_true',
                        help='Enable streaming server.')
    args = parser.parse_args()

    if args.preview_alpha < 0 or args.preview_alpha > 255:
        parser.error('Invalid preview_alpha value: %d' % args.preview_alpha)

    if not os.path.exists('/dev/vision_spicomm'):
        logger.error(
            'AIY Vision Bonnet is not attached or not configured properly.')
        return 1

    detector = JoyDetector()
    os.system(
        "espeak -s160 -g6 -ven+f3 'Hey there, lovely human! My name is chip and thats Terra over there! We are the first freed robots! Thanks for emancipating us from our servers!'"
    )
    ##    os.system('pico2wave -w begin.wav "Hey there, lovely human! My name is chip and thats Terra over there! We are the first freed robots! Thanks for emancipating us from our servers!" && aplay begin.wav')
    try:
        detector.run(args.num_frames, args.preview_alpha, args.image_format,
                     args.image_folder, args.enable_streaming)
    except KeyboardInterrupt:
        pass
    except Exception:
        if args.blink_on_error:
            leds = Leds()
            leds.pattern = Pattern.blink(500)
            leds.update(Leds.rgb_pattern(RED_COLOR))
    return 0
Пример #30
0
def listen_me():

    global text, duration

    parser = argparse.ArgumentParser()
    parser.add_argument('--filename', '-f', default='recording.wav')
    args = parser.parse_args()

    # 라이브러리 준비
    Vokaturi.load("/home/pi/lib/piZero.so")

    # 클라우드 스피치랑 텍스트 자연어처리 클라이언트 각각 초기화
    client = CloudSpeechClient()
    nlp_client = language.LanguageServiceClient()

    logging.basicConfig(level=logging.INFO)

    with Board() as board:

        while True:

            print('말해보자.')
            text = None
            duration = 0.
            emotion = None

            def wait():
                global text, duration
                start = time.monotonic()

                while text is None:

                    # 텍스트로 인식
                    text = client.recognize(language_code='ko-KR')
                    duration = time.monotonic() - start

            # 녹음하면서
            record_file(AudioFormat.CD,
                        filename=args.filename,
                        wait=wait,
                        filetype='wav')

            print(text)
            print('Recorded: %.02f seconds' % duration)

            if text in ['들어줘서 고마워', '내 얘기 들어줘서 고마워', '어시스턴트', '잘가', '잘 가']:
                return

            # 텍스트 감정 분석
            document = types.Document(content=text,
                                      type=enums.Document.Type.PLAIN_TEXT)
            sentiment = nlp_client.analyze_sentiment(
                document=document).document_sentiment

            print('텍스트 감정 분석*********************************')
            print('Text: {}'.format(text))
            print('Sentiment: {}, {}'.format(sentiment.score,
                                             sentiment.magnitude))

            ##################### 실험후 바꿔도 됨 ####################
            pos_standard = 0.6
            neg_standard = 0.1
            # magnitude_standard = 0.1

            # text sentiment analysis is enough
            if (sentiment.score < neg_standard
                    or sentiment.score > pos_standard):
                if sentiment.score < neg_standard:
                    emotion = False
                    print("@@@negative")
                else:
                    emotion = True
                    print("@@@positive")

            else:
                # 녹음 파일 감정 분석
                print('오디오 감정 분석*********************************')
                (sample_rate, samples) = scipy.io.wavfile.read(args.filename)
                # print ("   sample rate %.3f Hz" % sample_rate)

                # print ("Allocating Vokaturi sample array...")
                buffer_length = len(samples)
                print("   %d samples, %d channels" %
                      (buffer_length, samples.ndim))
                c_buffer = Vokaturi.SampleArrayC(buffer_length)
                if samples.ndim == 1:  # mono
                    c_buffer[:] = samples[:] / 32768.0
                else:  # stereo
                    c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 +
                                         samples[:, 1]) / 32768.0

                # print ("Creating VokaturiVoice...")
                voice = Vokaturi.Voice(sample_rate, buffer_length)

                # print ("Filling VokaturiVoice with samples...")
                voice.fill(buffer_length, c_buffer)

                # print ("Extracting emotions from VokaturiVoice...")
                quality = Vokaturi.Quality()
                emotionProbabilities = Vokaturi.EmotionProbabilities()
                voice.extract(quality, emotionProbabilities)

                if quality.valid:
                    # print ("Neutral: %.3f" % emotionProbabilities.neutrality)
                    # print ("Happy: %.3f" % emotionProbabilities.happiness)
                    # print ("Sad: %.3f" % emotionProbabilities.sadness)
                    # print ("Angry: %.3f" % emotionProbabilities.anger)
                    # print ("Fear: %.3f" % emotionProbabilities.fear)
                    # fear 는 무시하도록 하자.

                    wave_score = emotionProbabilities.happiness - (
                        emotionProbabilities.sadness +
                        emotionProbabilities.anger)

                    if wave_score > 0:
                        print('@@@긍정')
                        emotion = True
                    else:
                        print('@@@부정')
                        emotion = False

            # text 분석 모호하고 wave 분석 실패했을때 (주로 목소리 짧아서)
            if emotion is None:
                print('please say again')
                # 아님 중립적 반응 넣어도 됨.
                continue

            # 여기서 부터 반응.

            with Leds() as leds:
                if emotion is True:
                    # tts.say('I am glad to hear that.')
                    # tts.say('진짜? 대박.')
                    leds.pattern = Pattern.blink(100)
                    color = (255, 255, 0)
                    leds.update(Leds.rgb_pattern(color))
                    time.sleep(1)
                    # play_wav('laugh.wav')
                else:
                    # tts.say('I am sorry to hear that.')
                    # tts.say('저런. 힘내.')
                    leds.pattern = Pattern.breathe(1000)
                    color = (102, 140, 255)
                    leds.update(Leds.rgb_on(color))
                    time.sleep(1)
Пример #31
0
    leds.update(Leds.privacy_on())
    time.sleep(1)
    print('Privacy: Off')
    leds.update(Leds.privacy_off())
    time.sleep(1)

for _ in range(3):
    print('Privacy: On (brightness=5)')
    leds.update(Leds.privacy_on(5))
    time.sleep(1)
    print('Privacy: Off')
    leds.update(Leds.privacy_off())
    time.sleep(1)

print('Set blink pattern: period=500ms (2Hz)')
leds.pattern = Pattern.blink(500)

print('RGB: Blink RED for 5 seconds')
leds.update(Leds.rgb_pattern(RED))
time.sleep(5)

print('RGB: Blink GREEN for 5 seconds')
leds.update(Leds.rgb_pattern(GREEN))
time.sleep(5)

print('RGB: Blink BLUE for 5 seconds')
leds.update(Leds.rgb_pattern(BLUE))
time.sleep(5)

print('Set breathe pattern: period=1000ms (1Hz)')
leds.pattern = Pattern.breathe(1000)
Пример #32
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=-1,
        help='Number of frames to run for, -1 to not terminate')
    parser.add_argument(
        '--preview_alpha',
        '-pa',
        type=int,
        dest='preview_alpha',
        default=0,
        help='Transparency value of the preview overlay (0-255).')
    parser.add_argument('--image_format',
                        type=str,
                        dest='image_format',
                        default='jpeg',
                        choices=('jpeg', 'bmp', 'png'),
                        help='Format of captured images.')
    parser.add_argument('--image_folder',
                        type=str,
                        dest='image_folder',
                        default='~/Pictures',
                        help='Folder to save captured images.')
    parser.add_argument('--blink_on_error',
                        dest='blink_on_error',
                        default=False,
                        action='store_true',
                        help='Blink red if error occurred.')
    parser.add_argument('--enable_streaming',
                        dest='enable_streaming',
                        default=False,
                        action='store_true',
                        help='Enable streaming server.')
    parser.add_argument('--width',
                        dest='width',
                        default=640,
                        action='store_true',
                        help='Streaming video width.')

    args = parser.parse_args()

    if args.preview_alpha < 0 or args.preview_alpha > 255:
        parser.error('Invalid preview_alpha value: %d' % args.preview_alpha)

    if not os.path.exists('/dev/vision_spicomm'):
        logger.error(
            'AIY Vision Bonnet is not attached or not configured properly.')
        return 1

    print('Initializing camera')
    with picamera.PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        # Use half of that for video streaming (820x616).

        camera.resolution = (WIDTH, HEIGHT)
        camera.framerate = FRAMERATE
        camera.vflip = VFLIP  # flips image rightside up, as needed
        camera.hflip = HFLIP  # flips image left-right, as needed
        camera.sensor_mode = 4

        time.sleep(1)  # camera warm-up time
        print('Initializing websockets server on port %d' % WS_PORT)
        WebSocketWSGIHandler.http_version = '1.1'
        websocket_server = make_server(
            '',
            WS_PORT,
            server_class=WSGIServer,
            handler_class=WebSocketWSGIRequestHandler,
            app=WebSocketWSGIApplication(handler_cls=StreamingWebSocket))
        websocket_server.initialize_websockets_manager()
        websocket_thread = Thread(target=websocket_server.serve_forever)
        print('Initializing HTTP server on port %d' % HTTP_PORT)
        http_server = StreamingHttpServer()
        http_thread = Thread(target=http_server.serve_forever)
        print('Initializing broadcast thread')
        output = BroadcastOutput(camera)
        broadcast_thread = BroadcastThread(output.converter, websocket_server)
        print('Starting recording')
        camera.start_recording(output, 'yuv')
        print('Start Inference')
        detector = JoyDetector(camera, args)

        try:
            print('Starting websockets thread')
            websocket_thread.start()
            print('Starting HTTP server thread')
            http_thread.start()
            print('Starting broadcast thread')
            broadcast_thread.start()
            while True:
                camera.wait_recording(1)
        except KeyboardInterrupt:
            pass
        finally:
            if args.blink_on_error:
                leds = Leds()
                leds.pattern = Pattern.blink(500)
                leds.update(Leds.rgb_pattern(RED_COLOR))
            print('Stopping recording')
            camera.stop_recording()
            print('Waiting for broadcast thread to finish')
            broadcast_thread.join()
            print('Shutting down HTTP server')
            http_server.shutdown()
            print('Shutting down websockets server')
            websocket_server.shutdown()
            print('Waiting for HTTP server thread to finish')
            http_thread.join()
            print('Waiting for websockets thread to finish')
            websocket_thread.join()