def reponse_bouton( est_juste, ecart ): ### réponse donnée par la couleur du bouton et la fréquence du clignotement with Leds() as leds: if est_juste: leds.update(Leds.rgb_on(Color.GREEN) ) # Vert fixe pendant 3 secondes si fréquence atteinte time.sleep(3) print('Corde accordée') tts.say('Corde accordée', lang='fr-FR') else: period = 10 * abs(ecart) leds.pattern = Pattern.blink( period) # donne fréquence de pulsation print("TOURNER LA CHEVILLE") if ecart > 0: tts.say('Tendre la corde', lang='fr-FR') leds.update( Leds.rgb_pattern(Color.BLUE) ) #Clignotement bleu pour augmenter pendant 5 secondes time.sleep(5) else: tts.say('Détendre la corde', lang='fr-FR') leds.update( Leds.rgb_pattern(Color.RED) ) #Clignotement rouge pour diminuer pendant 5 secondes time.sleep(5)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--num_frames', '-n', type=int, dest='num_frames', default=-1, help='Number of frames to run for, -1 to not terminate') parser.add_argument('--preview_alpha', '-pa', type=int, dest='preview_alpha', default=0, help='Transparency value of the preview overlay (0-255).') parser.add_argument('--image_format', type=str, dest='image_format', default='jpeg', choices=('jpeg', 'bmp', 'png'), help='Format of captured images.') parser.add_argument('--image_folder', type=str, dest='image_folder', default='~/Pictures', help='Folder to save captured images.') parser.add_argument('--blink_on_error', dest='blink_on_error', default=False, action='store_true', help='Blink red if error occurred.') parser.add_argument('--enable_streaming', dest='enable_streaming', default=False, action='store_true', help='Enable streaming server.') args = parser.parse_args() if args.preview_alpha < 0 or args.preview_alpha > 255: parser.error('Invalid preview_alpha value: %d' % args.preview_alpha) if not os.path.exists('/dev/vision_spicomm'): logger.error('AIY Vision Bonnet is not attached or not configured properly.') return 1 detector = JoyDetector() try: detector.run(args.num_frames, args.preview_alpha, args.image_format, args.image_folder, args.enable_streaming) except KeyboardInterrupt: pass except Exception: if args.blink_on_error: leds = Leds() leds.pattern = Pattern.blink(500) leds.update(Leds.rgb_pattern(RED_COLOR)) return 0
def initialize(self): self.settings_change_callback = self.on_settings_changed self.get_settings() try: self.leds = Leds() self.led_idle() except: self.log.warning("Can't initialize LED - skill will not load") self.speak_dialog("error.initialize") try: GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) GPIO.setup(BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP) GPIO.remove_event_detect(BUTTON) GPIO.add_event_detect(BUTTON, GPIO.FALLING, bouncetime=500) except: self.log.warning("Can't initialize GPIO - skill will not load") self.speak_dialog("error.initialize") finally: self.schedule_repeating_event(self.button_press, None, 0.1, 'GoogleAIYv2') self.add_event('recognizer_loop:record_begin', self.on_listener_started) self.add_event('recognizer_loop:record_end', self.on_listener_ended) self.add_event('mycroft.skill.handler.complete', self.on_handler_complete) self.add_event('mycroft.speech.recognition.unknown', self.on_handler_complete)
def reponse_bouton(est_juste, ecart, action): with Leds() as leds: if est_juste: leds.update(Leds.rgb_on(Color.GREEN) ) # Vert fixe pendant 3 secondes si fréquence atteinte time.sleep(3) print('Corde accordée') tts.say('Corde accordée', lang='fr-FR') ####### Dire la phrase en plus ####### else: period = 10 * ecart leds.pattern = Pattern.blink( period) # donne fréquence de pulsation print('Tourner la cheville') tts.say('Tourner la cheville', lang='fr-FR') ####### Dire la phrase ####### if action == 1: leds.update( Leds.rgb_pattern(Color.BLUE) ) #Clignotement bleu pour augmenter pendant 5 secondes time.sleep(5) else: leds.update( Leds.rgb_pattern(Color.RED) ) #Clignotement rouge pour diminuer pendant 5 secondes time.sleep(5)
def facedetect(): with PiCamera() as camera, Leds() as leds: # Configure camera camera.resolution = (1640, 922) # Full Frame, 16:9 (Camera v2) camera.start_preview() leds.update(Leds.privacy_on()) # Do inference on VisionBonnet with CameraInference(face_detection.model()) as inference: for result in inference.run(): if len(face_detection.get_faces(result)) >= 1: camera.capture( 'faces_' + str(datetime.datetime.now()) + '.jpg') # print(device.is_active) print(led.is_active) # device.on() # bz.on() led.on() print(led.is_active) # time.sleep(1) # print(device.is_active) led.off() print(led.is_active) break # Stop preview camera.stop_preview() leds.update(Leds.privacy_on())
def run(self, num_frames, preview_alpha, image_format, image_folder): logger.info('Starting...') leds = Leds() player = Player(gpio=22, bpm=10) photographer = Photographer(image_format, image_folder) animator = Animator(leds, self._done) try: # Forced sensor mode, 1640x1232, full FoV. See: # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes # This is the resolution inference run on. with PiCamera(sensor_mode=4, resolution=(1640, 1232)) as camera, PrivacyLed(leds): def take_photo(): logger.info('Button pressed.') player.play(BEEP_SOUND) photographer.shoot(camera) # Blend the preview layer with the alpha value from the flags. if preview_alpha > 0: logger.info('Starting preview with alpha %d', preview_alpha) camera.start_preview(alpha=preview_alpha) else: logger.info('Not starting preview, alpha 0') button = Button(23) button.when_pressed = take_photo joy_score_moving_average = MovingAverage(10) prev_joy_score = 0.0 with CameraInference(face_detection.model()) as inference: logger.info('Model loaded.') player.play(MODEL_LOAD_SOUND) for i, result in enumerate(inference.run()): faces = face_detection.get_faces(result) photographer.update_faces(faces) joy_score = joy_score_moving_average.next( average_joy_score(faces)) animator.update_joy_score(joy_score) if joy_score > JOY_SCORE_PEAK > prev_joy_score: player.play(JOY_SOUND) elif joy_score < JOY_SCORE_MIN < prev_joy_score: player.play(SAD_SOUND) prev_joy_score = joy_score if self._done.is_set() or i == num_frames: break finally: player.stop() photographer.stop() player.join() photographer.join() animator.join()
def think(self): from aiy.leds import (Leds, Pattern, Color) self._wakeup = False self._think = True with Leds() as leds: while self._think: leds.pattern = Pattern.blink(500) leds.update(Leds.rgb_pattern(Color.GREEN)) time.sleep(1)
def startup(self): with Board() as board, Leds() as leds: colors = [Color.RED, Color.YELLOW, Color.GREEN, Color.CYAN, Color.BLUE, Color.PURPLE, Color.BLACK, Color.WHITE] board.led.state = Led.ON for color in colors: leds.update(Leds.rgb_on(color)) time.sleep(0.25) TonePlayer(22).play(*jingleBells(6)) board.led.state = Led.OFF
def run(self, num_frames, preview_alpha, image_format, image_folder, enable_streaming): logger.info('Starting...') leds = Leds() with contextlib.ExitStack() as stack: player = stack.enter_context(Player(gpio=BUZZER_GPIO, bpm=10)) photographer = stack.enter_context(Photographer(image_format, image_folder)) animator = stack.enter_context(Animator(leds)) # Forced sensor mode, 1640x1232, full FoV. See: # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes # This is the resolution inference run on. # Use half of that for video streaming (820x616). camera = stack.enter_context(PiCamera(sensor_mode=4, resolution=(820, 616))) stack.enter_context(PrivacyLed(leds)) server = None if enable_streaming: server = stack.enter_context(StreamingServer(camera)) server.run() def take_photo(): logger.info('Button pressed.') player.play(BEEP_SOUND) photographer.shoot(camera) if preview_alpha > 0: camera.start_preview(alpha=preview_alpha) button = Button(BUTTON_GPIO) button.when_pressed = take_photo joy_score_moving_average = MovingAverage(10) prev_joy_score = 0.0 with CameraInference(face_detection.model()) as inference: logger.info('Model loaded.') player.play(MODEL_LOAD_SOUND) for i, result in enumerate(inference.run()): faces = face_detection.get_faces(result) photographer.update_faces(faces) joy_score = joy_score_moving_average.next(average_joy_score(faces)) animator.update_joy_score(joy_score) if server: data = server_inference_data(result.width, result.height, faces, joy_score) server.send_inference_data(data) if joy_score > JOY_SCORE_PEAK > prev_joy_score: player.play(JOY_SOUND) elif joy_score < JOY_SCORE_MIN < prev_joy_score: player.play(SAD_SOUND) prev_joy_score = joy_score if self._done.is_set() or i == num_frames: break
def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--num_frames', '-n', type=int, default=None, help='Number of frames to run for') parser.add_argument('--preview_alpha', '-pa', type=preview_alpha, default=0, help='Video preview overlay transparency (0-255)') parser.add_argument('--image_format', default='jpeg', choices=('jpeg', 'bmp', 'png'), help='Format of captured images') parser.add_argument('--image_folder', default='~/Pictures', help='Folder to save captured images') parser.add_argument('--blink_on_error', default=False, action='store_true', help='Blink red if error occurred') parser.add_argument('--enable_streaming', default=False, action='store_true', help='Enable streaming server') parser.add_argument('--streaming_bitrate', type=int, default=1000000, help='Streaming server video bitrate (kbps)') parser.add_argument('--mdns_name', default='', help='Streaming server mDNS name') args = parser.parse_args() try: joy_detector(args.num_frames, args.preview_alpha, args.image_format, args.image_folder, args.enable_streaming, args.streaming_bitrate, args.mdns_name) except KeyboardInterrupt: pass except Exception: logger.exception('Exception while running joy demo.') if args.blink_on_error: with Leds() as leds: leds.pattern = Pattern.blink(100) # 10 Hz leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(1.0) return 0
def wakeup(self): from aiy.board import Board, Led from aiy.leds import (Leds, Pattern, Color) self._wakeup = True with Board() as board: with Leds() as leds: while self._wakeup: board.led.state = Led.ON leds.pattern = Pattern.breathe(1000) leds.update(Leds.rgb_pattern(Color.BLUE)) time.sleep(1)
def flag(): for i in range(4): with Leds() as leds: leds.update(Leds.rgb_on(Color.BLUE)) sleep(1) leds.update(Leds.rgb_on(Color.RED)) sleep(1) leds.update(Leds.rgb_on(Color.WHITE)) sleep(1)
def _run(self): logger.info('Starting...') leds = Leds() with contextlib.ExitStack() as stack: player = stack.enter_context(Player(gpio=BUZZER_GPIO, bpm=10)) photographer = stack.enter_context( Photographer(self.args.image_format, self.args.image_folder)) animator = stack.enter_context(Animator(leds)) stack.enter_context(PrivacyLed(leds)) server = None if self.args.enable_streaming: server = stack.enter_context(StreamingServer(self.camera)) server.run() def take_photo(): logger.info('Button pressed.') player.play(BEEP_SOUND) photographer.shoot(self.camera) button = Button(BUTTON_GPIO) button.when_pressed = take_photo joy_score_moving_average = MovingAverage(10) prev_joy_score = 0.0 with CameraInference(face_detection.model()) as inference: logger.info('Model loaded.') player.play(MODEL_LOAD_SOUND) for i, result in enumerate(inference.run()): faces = face_detection.get_faces(result) photographer.update_faces(faces) avg_joy_score = average_joy_score(faces) joy_score = joy_score_moving_average.next(avg_joy_score) animator.update_joy_score(joy_score) if server: data = server_inference_data(result.width, result.height, faces, joy_score) server.send_inference_data(data) if avg_joy_score > JOY_SCORE_MIN: photographer.shoot(self.camera) # if joy_score > JOY_SCORE_PEAK > prev_joy_score: # player.play(JOY_SOUND) # elif joy_score < JOY_SCORE_MIN < prev_joy_score: # player.play(SAD_SOUND) prev_joy_score = joy_score if self._done.is_set() or i == self.args.num_frames: break
def main(): parser = argparse.ArgumentParser() parser.add_argument('--filename', '-f', default='recording.wav') args = parser.parse_args() leds = Leds() leds.pattern = Pattern.breathe(4000) leds.update(Leds.rgb_on((0, 8, 0))) pygame.init() pygame.mixer.init() mix = alsaaudio.Mixer() mix.setvolume(30) # Files all_files = [] for (dirpath, dirnames, filenames) in walk('/home/pi/jukidbox_store'): all_files.extend([path.join(dirpath, file) for file in filenames]) while True: leds.update(Leds.rgb_on((0, 8, 0))) try: with Board() as board: while True: print('Press button to start.') board.button.wait_for_press() done = threading.Event() board.button.when_pressed = done.set print('Playing...') leds.update(Leds.rgb_pattern(Color.PURPLE)) # Get random file file = numpy.random.choice(all_files) print(file) pygame.mixer.music.load(file) pygame.mixer.music.play(-1) while mixer.music.get_busy(): if done.is_set(): leds.update(Leds.rgb_on((32, 0, 0))) mixer.music.stop() time.sleep(0.5) print("Finished ..") leds.update(Leds.rgb_on((0, 8, 0))) except Exception as e: print(e) leds.update(Leds.rgb_on(Color.YELLOW)) time.sleep(2)
def main(): logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) args = parser.parse_args() logging.info('Initializing for language %s...', args.language) hints = get_hints(args.language) client = CloudSpeechClient() with Board() as board: #board.led.state = Led.ON with Leds() as leds: while True: if hints: logging.info('Say something, e.g. %s.' % ', '.join(hints)) else: logging.info('Say something.') text = client.recognize(language_code=args.language, hint_phrases=hints) if text is None: logging.info('You said nothing.') continue logging.info('You said: "%s"' % text) text = text.lower() if 'turn on the light' in text: board.led.state = Led.ON elif 'turn off the light' in text: board.led.state = Led.OFF elif 'blink the light' in text: board.led.state = Led.BLINK elif 'goodbye' in text: break elif 'happy' in text: leds.pattern = Pattern.blink(50) color = (255, 255, 0) leds.update(Leds.rgb_pattern(color)) audio.play_wav('laugh.wav') elif 'creep' in text: leds.pattern = Pattern.breathe(1000) color = (102, 140, 255) leds.update(Leds.rgb_on(color)) elif 'cheer' in text: leds.pattern = Pattern.blink(5) color = (230, 0, 115) leds.update(Leds.rgb_on(color)) audio.play_wav('people-cheering.wav')
def __enter__(self): # Forced sensor mode, 1640x1232, full FoV. See: # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes # This is the resolution inference run on. with stopwatch('initialize camera'): self._camera = self._stack.enter_context( picamera.PiCamera(sensor_mode=4, resolution=CAPTURE_RESOLUTION)) with stopwatch('initialize inference'): self._inference = self._stack.enter_context( _initialize_inference()) leds = self._stack.enter_context(Leds()) self._stack.enter_context(PrivacyLed(leds)) return self
def startup(): with Board() as board, Leds() as leds: colors = [Color.RED, Color.YELLOW, Color.GREEN, Color.CYAN, Color.BLUE, Color.PURPLE, Color.BLACK, Color.WHITE] board.led.state = Led.ON for color in colors: leds.update(Leds.rgb_on(color)) time.sleep(0.5) TonePlayer(22).play(*[ 'Be', 'rs', 'C5e', 'rs', 'D5e', ]) board.led.state = Led.OFF
def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--image_format', default='jpeg', choices=('jpeg', 'bmp', 'png'), help='Format of captured images') parser.add_argument('--image_folder', default='~/Pictures/Data', help='Folder to save captured images') parser.add_argument('--blink_on_error', default=False, action='store_true', help='Blink red if error occurred') parser.add_argument('--enable_streaming', default=False, action='store_true', help='Enable streaming server') parser.add_argument('--streaming_bitrate', type=int, default=1000000, help='Streaming server video bitrate (kbps)') parser.add_argument('--mdns_name', default='', help='Streaming server mDNS name') args = parser.parse_args() try: gather_data(5, args.image_format, args.image_folder, args.enable_streaming, args.streaming_bitrate, args.mdns_name) except KeyboardInterrupt: sys.exit() except Exception: logger.exception('Exception while running joy demo.') if args.blink_on_error: with Leds() as leds: leds.pattern = Pattern.blink(100) # 10 Hz leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(1.0) return 0
def menu(self): print('Press Arcade Button to begin photo shoot.' + '\n') with Board() as board, Leds() as leds: while True: # pulse LED to indicate ready state leds.pattern = Pattern.blink(1000) leds.update(Leds.rgb_pattern(Color.WHITE)) board.button.wait_for_press() startTime = datetime.datetime.now() board.led.state = Led.ON print('LED is on...') # update LED to green indicating shoot is live leds.update(Leds.rgb_on((107, 255, 0))) self.shoot() leds.pattern = Pattern.blink(1000) leds.update(Leds.rgb_pattern(Color.WHITE)) print('Press Arcade Button to start again' + '\n' + 'OR....' + '\n' + 'Press and HOLD the Arcade Button for 5 seconds to quit') board.button.wait_for_press() pressTime = datetime.datetime.now() board.button.wait_for_release() releaseTime = datetime.datetime.now() board.led.state = Led.OFF print('OFF') pressDuration = releaseTime - pressTime sessionDuration = releaseTime - startTime if pressDuration.seconds >= 5: leds.update(Leds.rgb_on(Color.PURPLE)) print('Photo booth session ran for ' + str(sessionDuration.seconds) + ' seconds') time.sleep(3) TonePlayer(22).play(*[ 'D5e', 'rq', 'C5e', 'rq', 'Be', 'rq', 'Be', 'C5e', 'D5e' ]) break print('Done')
def main(): startup() print('Press Button start. Press Button to stop camera.' + 'Press Button again (or press Ctrl-C) to quit.') pressDuration = 0 with Board() as board, Leds() as leds: while True: board.button.wait_for_press() pressTime = datetime.datetime.now() board.led.state = Led.ON print('ON') print('Running facedetect') facedetect() leds.update(Leds.rgb_on((107, 255, 0))) board.button.wait_for_release() releaseTime = datetime.datetime.now() board.led.state = Led.OFF print('OFF') pressDuration = releaseTime - pressTime print('Program ran for ' + str(pressDuration.seconds) + ' seconds') if pressDuration.seconds >= 5: leds.update(Leds.rgb_on(Color.PURPLE)) time.sleep(3) TonePlayer(22).play(*[ 'D5e', 'rq', 'C5e', 'rq', 'Be', 'rq', 'Be', 'C5e', 'D5e' ]) break print('Done')
def main(): print("Play tune") player = TonePlayer(gpio=BUZZER_GPIO_PIN, bpm=10) player.play(*START_SOUND) print("Initialize robot") robot = Robot() robot.resetPosition() print("Switch on leds") with Leds() as leds: leds.update(Leds.rgb_on(Color.GREEN)) print("Switch on camera") with PiCamera(sensor_mode=4, resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=30) as camera: if ENABLE_DISPLAY: camera.start_preview() annotator = Annotator(camera, dimensions=(320, 240)) else: annotator = None print("Load model") with CameraInference(face_detection.model()) as inference: loop(inference=inference, robot=robot, annotator=annotator, leds=leds) if ENABLE_DISPLAY: camera.stop_preview() player.play(*STOP_SOUND) # Give time for the user to remote its finger. sleep(3) robot.resetPosition()
def main(): parser = argparse.ArgumentParser( 'Image classification camera inference example.') parser.add_argument( '--num_frames', '-n', type=int, default=None, help='Sets the number of frames to run for, otherwise runs forever.') parser.add_argument('--num_objects', '-c', type=int, default=2, help='Sets the number of object interences to print.') parser.add_argument('--nopreview', dest='preview', action='store_false', default=True, help='Enable camera preview') args = parser.parse_args() with PiCamera(sensor_mode=4, framerate=30) as camera, \ CameraPreview(camera, enabled=args.preview), \ CameraInference(image_classification.model()) as inference, \ Leds() as leds: leds.update(Leds.privacy_on()) for result in inference.run(args.num_frames): classes = image_classification.get_classes(result, top_k=args.num_objects, threshold=.3) print(classes_info(classes)) if classes: #annotator.clear() camera.annotate_text = '%s (%.2f)' % classes[0] if 'chicken' in classes[0]: camera.capture('chickens.jpg') print('Chicken captured')
def shoot(self): with PiCamera() as camera, Leds() as leds: countdown = self.initial_timing shots_remaining = self.num_shots # Configure camera camera.resolution = (1640, 922) # Full Frame, 16:9 (Camera v2) camera.start_preview() # leds.update(Leds.privacy_on()) print('Get ready for your photo shoot!') time.sleep(3) print('Starting in') time.sleep(2) leds.pattern = Pattern.blink(1000) leds.update(Leds.rgb_pattern(Color.RED)) while countdown > 0: print(countdown) countdown -= 1 time.sleep(1) time.sleep(1) print('Smile :)') leds.pattern = Pattern.blink(1000) leds.update(Leds.rgb_pattern(Color.GREEN)) while shots_remaining > 0: # if shots_remaining != self.num_shots: time.sleep(self.timing) print('*** FLASH ***') camera.capture( 'photobooth_' + str(datetime.datetime.now()) + '.jpg') shots_remaining -= 1 print('\n' + 'You looked FABULOUS!!!' + '\n') time.sleep(3) leds.pattern = Pattern.blink(1000) leds.update(Leds.rgb_pattern(Color.RED)) # Stop preview camera.stop_preview()
def main(): with Leds() as leds: print('Windows Up') tuned_servo.min() # blueLED1.blink(.2,.2) # risk of servo burning if kept # blueLED2.blink(.2,.2) leds.pattern = Pattern.blink(500) leds.update(Leds.rgb_pattern(Color.BLUE)) time.sleep(5) print('Windows Down') tuned_servo.max() interior.on() yellowLED.on() leds.pattern = Pattern.breathe(1000) leds.update(Leds.rgb_pattern(Color.YELLOW)) # Fade from yellow to red for i in range(32): color = Color.blend(Color.RED, Color.YELLOW, i / 32) leds.update(Leds.rgb_on(color)) time.sleep(0.1) # leds.update({ # 1: Leds.Channel(Leds.Channel.PATTERN, 64), # 2: Leds.Channel(Leds.Channel.OFF, 128), # 3: Leds.Channel(Leds.Channel.ON, 128), # 4: Leds.Channel(Leds.Channel.PATTERN, 64), # }) time.sleep(5) leds.update(Leds.rgb_off()) tuned_servo.close() yellowLED.close() interior.close() blueLED2.close()
def button(): with Leds() as leds: with Board() as board: st_play = True while True: leds.pattern = Pattern.breathe(3000) if st_play: leds.update(Leds.rgb_pattern(Color.GREEN)) else: leds.update(Leds.rgb_pattern(Color.BLUE)) board.button.wait_for_press() if st_play: send_cmd("STOP") print("> STOP") else: send_cmd("PLAY") print("> PLAY") board.led.state = Led.ON board.button.wait_for_release() board.led.state = Led.OFF st_play = not st_play
def main(): with Board() as board: with Leds() as leds: # init volume and brightness set_volume(0) leds.pattern = Pattern.breathe(750) leds.update(Leds.rgb_pattern(Color.BLACK)) done = threading.Event() board.button.when_pressed = done.set alarm_thread = threading.Thread(target=alarm, args=(done, leds), daemon=True) alarm_thread.start() if done.wait(timeout=TIMEOUT_LIMIT): set_volume(MAX_VOLUME) leds.update(Leds.rgb_on(Color.GREEN)) print('GOOD MORNING!') play_wav(GOOD_MORNING_SOUND_PATH) else: print('Timed out.')
def main(): parser = argparse.ArgumentParser( 'Image classification camera inference example.') parser.add_argument( '--num_frames', '-n', type=int, default=None, help='Sets the number of frames to run for, otherwise runs forever.') parser.add_argument('--num_objects', '-c', type=int, default=3, help='Sets the number of object interences to print.') parser.add_argument('--nopreview', dest='preview', action='store_false', default=True, help='Enable camera preview') args = parser.parse_args() with Leds() as leds: with PiCamera(sensor_mode=4, framerate=30) as camera, \ CameraPreview(camera, enabled=args.preview), \ CameraInference(image_classification.model()) as inference: for result in inference.run(args.num_frames): classes = image_classification.get_classes( result, top_k=args.num_objects) print(classes_info(classes)) #print("my class: "+classes[0][0]) if classes: camera.annotate_text = '%s (%.2f)' % classes[0] if "mouse" in str(classes[0][0]): leds.update(Leds.rgb_on(Color.RED)) else: leds.update(Leds.rgb_off())
GREEN = (0x00, 0xFF, 0x00) YELLOW = (0xFF, 0xFF, 0x00) BLUE = (0x00, 0x00, 0xFF) PURPLE = (0xFF, 0x00, 0xFF) CYAN = (0x00, 0xFF, 0xFF) WHITE = (0xFF, 0xFF, 0xFF) def blend(color_a, color_b, alpha): return tuple([ math.ceil(alpha * color_a[i] + (1.0 - alpha) * color_b[i]) for i in range(3) ]) leds = Leds() print('RGB: Solid RED for 1 second') leds.update(Leds.rgb_on(RED)) time.sleep(1) print('RGB: Solid GREEN for 1 second') leds.update(Leds.rgb_on(GREEN)) time.sleep(1) print('RGB: Solid YELLOW for 1 second') leds.update(Leds.rgb_on(YELLOW)) time.sleep(1) print('RGB: Solid BLUE for 1 second') leds.update(Leds.rgb_on(BLUE))
def __init__(self, channel): self._lock = threading.Lock() self._brightness = 1.0 # Read and written atomically. self._state = self.OFF self._leds = Leds()
def _setColor(color): global state if(state): Leds().update(Leds.rgb_on(color))