def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--num_frames', '-n', type=int, dest='num_frames', default=-1,
                        help='Number of frames to run for, -1 to not terminate')
    parser.add_argument('--preview_alpha', '-pa', type=int, dest='preview_alpha', default=0,
                        help='Transparency value of the preview overlay (0-255).')
    parser.add_argument('--image_format', type=str, dest='image_format', default='jpeg',
                        choices=('jpeg', 'bmp', 'png'), help='Format of captured images.')
    parser.add_argument('--image_folder', type=str, dest='image_folder', default='~/Pictures',
                        help='Folder to save captured images.')
    parser.add_argument('--blink_on_error', dest='blink_on_error', default=False,
                        action='store_true', help='Blink red if error occurred.')
    parser.add_argument('--enable_streaming', dest='enable_streaming', default=False,
                        action='store_true', help='Enable streaming server.')
    args = parser.parse_args()

    if args.preview_alpha < 0 or args.preview_alpha > 255:
        parser.error('Invalid preview_alpha value: %d' % args.preview_alpha)

    if not os.path.exists('/dev/vision_spicomm'):
        logger.error('AIY Vision Bonnet is not attached or not configured properly.')
        return 1

    detector = JoyDetector()
    try:
        detector.run(args.num_frames, args.preview_alpha, args.image_format,
                     args.image_folder, args.enable_streaming)
    except KeyboardInterrupt:
        pass
    except Exception:
        if args.blink_on_error:
            leds = Leds()
            leds.pattern = Pattern.blink(500)
            leds.update(Leds.rgb_pattern(RED_COLOR))
    return 0
Exemple #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--filename', '-f', default='recording.wav')
    args = parser.parse_args()

    leds = Leds()
    leds.pattern = Pattern.breathe(4000)
    leds.update(Leds.rgb_on((0, 8, 0)))

    pygame.init()
    pygame.mixer.init()

    mix = alsaaudio.Mixer()
    mix.setvolume(30)

    # Files
    all_files = []
    for (dirpath, dirnames, filenames) in walk('/home/pi/jukidbox_store'):
        all_files.extend([path.join(dirpath, file) for file in filenames])

    while True:
        leds.update(Leds.rgb_on((0, 8, 0)))
        try:
            with Board() as board:
                while True:
                    print('Press button to start.')
                    board.button.wait_for_press()

                    done = threading.Event()
                    board.button.when_pressed = done.set

                    print('Playing...')
                    leds.update(Leds.rgb_pattern(Color.PURPLE))
                    # Get random file
                    file = numpy.random.choice(all_files)
                    print(file)
                    pygame.mixer.music.load(file)
                    pygame.mixer.music.play(-1)

                    while mixer.music.get_busy():
                        if done.is_set():
                            leds.update(Leds.rgb_on((32, 0, 0)))
                            mixer.music.stop()
                        time.sleep(0.5)

                    print("Finished ..")
                    leds.update(Leds.rgb_on((0, 8, 0)))
        except Exception as e:
            print(e)
            leds.update(Leds.rgb_on(Color.YELLOW))
            time.sleep(2)
    leds.update(Leds.privacy_on())
    time.sleep(1)
    print('Privacy: Off')
    leds.update(Leds.privacy_off())
    time.sleep(1)

for _ in range(3):
    print('Privacy: On (brightness=5)')
    leds.update(Leds.privacy_on(5))
    time.sleep(1)
    print('Privacy: Off')
    leds.update(Leds.privacy_off())
    time.sleep(1)

print('Set blink pattern: period=500ms (2Hz)')
leds.pattern = Pattern.blink(500)

print('RGB: Blink RED for 5 seconds')
leds.update(Leds.rgb_pattern(RED))
time.sleep(5)

print('RGB: Blink GREEN for 5 seconds')
leds.update(Leds.rgb_pattern(GREEN))
time.sleep(5)

print('RGB: Blink BLUE for 5 seconds')
leds.update(Leds.rgb_pattern(BLUE))
time.sleep(5)

print('Set breathe pattern: period=1000ms (1Hz)')
leds.pattern = Pattern.breathe(1000)
Exemple #4
0
            self.send_header('Content-Type', 'text/html')
            self.send_header('Location', '/info')
            self.end_headers()


class StreamingServer(socketserver.ThreadingMixIn, server.HTTPServer):
    allow_reuse_address = True
    daemon_threads = True


# I know, query strings instead of global vars might be better
object_name, object_info, object_url = None, None, None
colour = (255, 255, 255)  # white as default
leds = Leds()

leds.pattern = Pattern.breathe(1000)

with picamera.PiCamera(resolution='640x480', framerate=24) as camera:
    output = StreamingOutput()
    camera.start_recording(output, format='mjpeg')
    try:
        address = ('', 8000)
        print("Your webpage is being served at http://your-pi-address:8000/")
        server = StreamingServer(address, StreamingHandler)
        server.serve_forever()

    finally:
        camera.stop_recording()
        leds.update(Leds.rgb_off())
        image = [
            img for img in os.listdir(os.getcwd()) if img.endswith('jpg')
def main():
    leds = Leds()

    print('RGB: Solid RED for 1 second')
    leds.update(Leds.rgb_on(RED))
    time.sleep(1)

    print('RGB: Solid GREEN for 1 second')
    leds.update(Leds.rgb_on(GREEN))
    time.sleep(1)

    print('RGB: Solid YELLOW for 1 second')
    leds.update(Leds.rgb_on(YELLOW))
    time.sleep(1)

    print('RGB: Solid BLUE for 1 second')
    leds.update(Leds.rgb_on(BLUE))
    time.sleep(1)

    print('RGB: Solid PURPLE for 1 second')
    leds.update(Leds.rgb_on(PURPLE))
    time.sleep(1)

    print('RGB: Solid CYAN for 1 second')
    leds.update(Leds.rgb_on(CYAN))
    time.sleep(1)

    print('RGB: Solid WHITE for 1 second')
    leds.update(Leds.rgb_on(WHITE))
    time.sleep(1)

    print('RGB: Off for 1 second')
    leds.update(Leds.rgb_off())
    time.sleep(1)

    for _ in range(3):
        print('Privacy: On (brightness=default)')
        leds.update(Leds.privacy_on())
        time.sleep(1)
        print('Privacy: Off')
        leds.update(Leds.privacy_off())
        time.sleep(1)

    for _ in range(3):
        print('Privacy: On (brightness=5)')
        leds.update(Leds.privacy_on(5))
        time.sleep(1)
        print('Privacy: Off')
        leds.update(Leds.privacy_off())
        time.sleep(1)

    print('Set blink pattern: period=500ms (2Hz)')
    leds.pattern = Pattern.blink(500)

    print('RGB: Blink RED for 5 seconds')
    leds.update(Leds.rgb_pattern(RED))
    time.sleep(5)

    print('RGB: Blink GREEN for 5 seconds')
    leds.update(Leds.rgb_pattern(GREEN))
    time.sleep(5)

    print('RGB: Blink BLUE for 5 seconds')
    leds.update(Leds.rgb_pattern(BLUE))
    time.sleep(5)

    print('Set breathe pattern: period=1000ms (1Hz)')
    leds.pattern = Pattern.breathe(1000)

    print('RGB: Breathe RED for 5 seconds')
    leds.update(Leds.rgb_pattern(RED))
    time.sleep(5)

    print('RGB: Breathe GREEN for 5 seconds')
    leds.update(Leds.rgb_pattern(GREEN))
    time.sleep(5)

    print('RGB: Breathe BLUE for 5 seconds')
    leds.update(Leds.rgb_pattern(BLUE))
    time.sleep(5)

    print('RGB: Increase RED brightness for 3.2 seconds')
    for i in range(32):
        leds.update(Leds.rgb_on((8 * i, 0, 0)))
        time.sleep(0.1)

    print('RGB: Decrease RED brightness for 3.2 seconds')
    for i in reversed(range(32)):
        leds.update(Leds.rgb_on((8 * i, 0, 0)))
        time.sleep(0.1)

    print('RGB: Blend between GREEN and BLUE for 3.2 seconds')
    for i in range(32):
        leds.update(Leds.rgb_on(blend(BLUE, GREEN, i / 32)))
        time.sleep(0.1)

    print('RGB: Off for 1 second')
    leds.update(Leds.rgb_off())
    time.sleep(1)

    print('Privacy: On for 2 seconds')
    with PrivacyLed(leds):
        time.sleep(2)

    print('RGB: Solid GREEN for 2 seconds')
    with RgbLeds(leds, Leds.rgb_on(GREEN)):
        time.sleep(2)

    print('Custom configuration for 5 seconds')
    leds.update({
        1: Leds.Channel(Leds.Channel.PATTERN, 128),  # Red channel
        2: Leds.Channel(Leds.Channel.OFF, 0),  # Green channel
        3: Leds.Channel(Leds.Channel.ON, 128),  # Blue channel
        4: Leds.Channel(Leds.Channel.PATTERN, 64),  # Privacy channel
    })
    time.sleep(5)

    print('Done')
    leds.reset()
Exemple #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=-1,
        help='Number of frames to run for, -1 to not terminate')
    parser.add_argument(
        '--preview_alpha',
        '-pa',
        type=int,
        dest='preview_alpha',
        default=0,
        help='Transparency value of the preview overlay (0-255).')
    parser.add_argument('--image_format',
                        type=str,
                        dest='image_format',
                        default='jpeg',
                        choices=('jpeg', 'bmp', 'png'),
                        help='Format of captured images.')
    parser.add_argument('--image_folder',
                        type=str,
                        dest='image_folder',
                        default='~/Pictures',
                        help='Folder to save captured images.')
    parser.add_argument('--blink_on_error',
                        dest='blink_on_error',
                        default=False,
                        action='store_true',
                        help='Blink red if error occurred.')
    parser.add_argument('--enable_streaming',
                        dest='enable_streaming',
                        default=False,
                        action='store_true',
                        help='Enable streaming server.')
    args = parser.parse_args()

    if args.preview_alpha < 0 or args.preview_alpha > 255:
        parser.error('Invalid preview_alpha value: %d' % args.preview_alpha)

    if not os.path.exists('/dev/vision_spicomm'):
        logger.error(
            'AIY Vision Bonnet is not attached or not configured properly.')
        return 1

    detector = JoyDetector()
    os.system(
        "espeak -s160 -g6 -ven+f3 'Hey there, lovely human! My name is chip and thats Terra over there! We are the first freed robots! Thanks for emancipating us from our servers!'"
    )
    ##    os.system('pico2wave -w begin.wav "Hey there, lovely human! My name is chip and thats Terra over there! We are the first freed robots! Thanks for emancipating us from our servers!" && aplay begin.wav')
    try:
        detector.run(args.num_frames, args.preview_alpha, args.image_format,
                     args.image_folder, args.enable_streaming)
    except KeyboardInterrupt:
        pass
    except Exception:
        if args.blink_on_error:
            leds = Leds()
            leds.pattern = Pattern.blink(500)
            leds.update(Leds.rgb_pattern(RED_COLOR))
    return 0
def record_journal_entry():
    # turn light blue as we start up
    leds = Leds()

    parser = argparse.ArgumentParser(description='Assistant service example.')
    parser.add_argument('--language', default=locale_language())
    args = parser.parse_args()

    logging.info('Initializing for language %s...', args.language)
    hints = get_hints(args.language)
    client = CloudSpeechClient()

    heading = ""
    file_path = ""
    try:
        paths = gen_paths()
        heading = paths["heading"]
        file_path = paths["file_path"]
    except:
        print(
            ">>> 🆘 there was an error setting the path...\n>>> saving dirty entry locally."
        )
        logging.warning('Unable to get the location.  Using default paths.')
        date = str(datetime.now())
        heading = date + "\n\n\n"
        file_path = os.getcwd() + "/je_error_dump_%s.txt" % date

    with Board() as board:
        with open(file_path, 'w') as dump:
            dump.write(heading)
            print('>>> please tell me about your day 👂🏼')
            while True:
                leds.pattern = Pattern.breathe(2000)
                leds.update(Leds.rgb_pattern(Color.RED))
                text = client.recognize(
                    language_code=args.language,
                    hint_phrases=hints,
                    punctuation=True,
                )
                # client must return None when it gets a pause in speech
                if text is None:
                    continue

                logging.info(' You said: "%s"' % text)
                print("+ %s" % text)
                dump.write(text + "  ")

                if 'new line' in text.lower():
                    dump.write('\n\n')
                    logging.info('\n\n')
                elif 'cancel cancel cancel' in text.lower():
                    board.led.state = Led.OFF
                    exit(0)
                elif 'goodbye' in text.lower():
                    break

    leds.pattern = Pattern.breathe(1000)
    leds.update(Leds.rgb_pattern(Color.GREEN))
    logging.info('>>> wrapping and saving journal entry 📓')
    # try:
    #     with open(file_path) as file:
    #         lines = file.readlines()
    #         print("read the lines")
    #         with open(file_path, 'w') as wrapper:
    #             size = 70
    #             for line in lines:
    #                 print("+" + line)
    #                 if len(line) > size:
    #                     collated = collate(line, size)
    #                     for short in collated:
    #                         wrapper.write(short)
    #                         wrapper.write('\n')
    #                 else:
    #                     writer.write(line)
    # except:
    #     logging.error('There was an error wrapping %s' % file_path)
    time.sleep(3)
    board.led.state = Led.OFF
Exemple #8
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--num_frames',
        '-n',
        type=int,
        dest='num_frames',
        default=-1,
        help='Number of frames to run for, -1 to not terminate')
    parser.add_argument(
        '--preview_alpha',
        '-pa',
        type=int,
        dest='preview_alpha',
        default=0,
        help='Transparency value of the preview overlay (0-255).')
    parser.add_argument('--image_format',
                        type=str,
                        dest='image_format',
                        default='jpeg',
                        choices=('jpeg', 'bmp', 'png'),
                        help='Format of captured images.')
    parser.add_argument('--image_folder',
                        type=str,
                        dest='image_folder',
                        default='~/Pictures',
                        help='Folder to save captured images.')
    parser.add_argument('--blink_on_error',
                        dest='blink_on_error',
                        default=False,
                        action='store_true',
                        help='Blink red if error occurred.')
    parser.add_argument('--enable_streaming',
                        dest='enable_streaming',
                        default=False,
                        action='store_true',
                        help='Enable streaming server.')
    parser.add_argument('--width',
                        dest='width',
                        default=640,
                        action='store_true',
                        help='Streaming video width.')

    args = parser.parse_args()

    if args.preview_alpha < 0 or args.preview_alpha > 255:
        parser.error('Invalid preview_alpha value: %d' % args.preview_alpha)

    if not os.path.exists('/dev/vision_spicomm'):
        logger.error(
            'AIY Vision Bonnet is not attached or not configured properly.')
        return 1

    print('Initializing camera')
    with picamera.PiCamera() as camera:
        # Forced sensor mode, 1640x1232, full FoV. See:
        # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes
        # This is the resolution inference run on.
        # Use half of that for video streaming (820x616).

        camera.resolution = (WIDTH, HEIGHT)
        camera.framerate = FRAMERATE
        camera.vflip = VFLIP  # flips image rightside up, as needed
        camera.hflip = HFLIP  # flips image left-right, as needed
        camera.sensor_mode = 4

        time.sleep(1)  # camera warm-up time
        print('Initializing websockets server on port %d' % WS_PORT)
        WebSocketWSGIHandler.http_version = '1.1'
        websocket_server = make_server(
            '',
            WS_PORT,
            server_class=WSGIServer,
            handler_class=WebSocketWSGIRequestHandler,
            app=WebSocketWSGIApplication(handler_cls=StreamingWebSocket))
        websocket_server.initialize_websockets_manager()
        websocket_thread = Thread(target=websocket_server.serve_forever)
        print('Initializing HTTP server on port %d' % HTTP_PORT)
        http_server = StreamingHttpServer()
        http_thread = Thread(target=http_server.serve_forever)
        print('Initializing broadcast thread')
        output = BroadcastOutput(camera)
        broadcast_thread = BroadcastThread(output.converter, websocket_server)
        print('Starting recording')
        camera.start_recording(output, 'yuv')
        print('Start Inference')
        detector = JoyDetector(camera, args)

        try:
            print('Starting websockets thread')
            websocket_thread.start()
            print('Starting HTTP server thread')
            http_thread.start()
            print('Starting broadcast thread')
            broadcast_thread.start()
            while True:
                camera.wait_recording(1)
        except KeyboardInterrupt:
            pass
        finally:
            if args.blink_on_error:
                leds = Leds()
                leds.pattern = Pattern.blink(500)
                leds.update(Leds.rgb_pattern(RED_COLOR))
            print('Stopping recording')
            camera.stop_recording()
            print('Waiting for broadcast thread to finish')
            broadcast_thread.join()
            print('Shutting down HTTP server')
            http_server.shutdown()
            print('Shutting down websockets server')
            websocket_server.shutdown()
            print('Waiting for HTTP server thread to finish')
            http_thread.join()
            print('Waiting for websockets thread to finish')
            websocket_thread.join()