def reponse_bouton(est_juste, ecart, action): with Leds() as leds: if est_juste: leds.update(Leds.rgb_on(Color.GREEN) ) # Vert fixe pendant 3 secondes si fréquence atteinte time.sleep(3) print('Corde accordée') tts.say('Corde accordée', lang='fr-FR') ####### Dire la phrase en plus ####### else: period = 10 * ecart leds.pattern = Pattern.blink( period) # donne fréquence de pulsation print('Tourner la cheville') tts.say('Tourner la cheville', lang='fr-FR') ####### Dire la phrase ####### if action == 1: leds.update( Leds.rgb_pattern(Color.BLUE) ) #Clignotement bleu pour augmenter pendant 5 secondes time.sleep(5) else: leds.update( Leds.rgb_pattern(Color.RED) ) #Clignotement rouge pour diminuer pendant 5 secondes time.sleep(5)
def process(self, joy_score): if joy_score > 0: self._leds.update( Leds.rgb_on(blend(JOY_COLOR, SAD_COLOR, joy_score))) else: self._leds.update(Leds.rgb_off())
def blink_led(color=RED, period=1, n_blinks=3): for blink in range(n_blinks): leds.update(Leds.rgb_off()) sleep(period / 2) leds.update(Leds.rgb_on(color)) sleep(period / 2) leds.update(Leds.rgb_off())
def _run(self): while not self._done.is_set(): joy_score = self._joy_score.value if joy_score > 0: self._leds.update(Leds.rgb_on(blend(JOY_COLOR, SAD_COLOR, joy_score))) else: self._leds.update(Leds.rgb_off())
def run(): if KeepWatchForSeconds(3): print("Go test mode") leds.update(Leds.rgb_on(BLUE)) test_mode(test_time=100) manual_screen() leds.update(Leds.rgb_on(WHITE)) else: print("Beep sound") toneplayer.play(*BEEP_SOUND) leds.update(Leds.rgb_on(RED)) print("process") gu_servo.max() first_gu() janken_screen() your_hand = hand_recog() janken(your_hand) print("Done") gu_servo.min() choki_servo.min() pa_servo.min() leds.update(Leds.rgb_on(WHITE)) manual_screen()
def send_signal_to_pins(result0, gpio_logic): if 'stop' in result0: pinStatus(pin_A, 'LOW', gpio_logic) pinStatus(pin_B, 'LOW', gpio_logic) pinStatus(pin_C, 'LOW', gpio_logic) leds.update(Leds.rgb_on(RED)) elif 'left' in result0: pinStatus(pin_A, 'LOW', gpio_logic) pinStatus(pin_B, 'LOW', gpio_logic) pinStatus(pin_C, 'HIGH', gpio_logic) leds.update(Leds.rgb_on(BLUE)) elif 'right' in result0: pinStatus(pin_A, 'LOW', gpio_logic) pinStatus(pin_B, 'HIGH', gpio_logic) pinStatus(pin_C, 'LOW', gpio_logic) leds.update(Leds.rgb_on(PURPLE)) elif 'slow' in result0: pinStatus(pin_A, 'LOW', gpio_logic) pinStatus(pin_B, 'HIGH', gpio_logic) pinStatus(pin_C, 'HIGH', gpio_logic) leds.update(Leds.rgb_on(GREEN)) else: pinStatus(pin_A, 'HIGH', gpio_logic) pinStatus(pin_B, 'LOW', gpio_logic) pinStatus(pin_C, 'LOW', gpio_logic) leds.update(Leds.rgb_off()) time.sleep(1)
def facedetect(): with PiCamera() as camera, Leds() as leds: # Configure camera camera.resolution = (1640, 922) # Full Frame, 16:9 (Camera v2) camera.start_preview() leds.update(Leds.privacy_on()) # Do inference on VisionBonnet with CameraInference(face_detection.model()) as inference: for result in inference.run(): if len(face_detection.get_faces(result)) >= 1: camera.capture( 'faces_' + str(datetime.datetime.now()) + '.jpg') # print(device.is_active) print(led.is_active) # device.on() # bz.on() led.on() print(led.is_active) # time.sleep(1) # print(device.is_active) led.off() print(led.is_active) break # Stop preview camera.stop_preview() leds.update(Leds.privacy_on())
def reponse_bouton( est_juste, ecart ): ### réponse donnée par la couleur du bouton et la fréquence du clignotement with Leds() as leds: if est_juste: leds.update(Leds.rgb_on(Color.GREEN) ) # Vert fixe pendant 3 secondes si fréquence atteinte time.sleep(3) print('Corde accordée') tts.say('Corde accordée', lang='fr-FR') else: period = 10 * abs(ecart) leds.pattern = Pattern.blink( period) # donne fréquence de pulsation print("TOURNER LA CHEVILLE") if ecart > 0: tts.say('Tendre la corde', lang='fr-FR') leds.update( Leds.rgb_pattern(Color.BLUE) ) #Clignotement bleu pour augmenter pendant 5 secondes time.sleep(5) else: tts.say('Détendre la corde', lang='fr-FR') leds.update( Leds.rgb_pattern(Color.RED) ) #Clignotement rouge pour diminuer pendant 5 secondes time.sleep(5)
class MultiColorLed: Config = namedtuple('Config', ['channels', 'pattern']) OFF = Config(channels=lambda color: Leds.rgb_off(), pattern=None) ON = Config(channels=Leds.rgb_on, pattern=None) BLINK = Config(channels=Leds.rgb_pattern, pattern=Pattern.blink(500)) BLINK_3 = BLINK BEACON = BLINK BEACON_DARK = BLINK DECAY = BLINK PULSE_SLOW = Config(channels=Leds.rgb_pattern, pattern=Pattern.breathe(500)) PULSE_QUICK = Config(channels=Leds.rgb_pattern, pattern=Pattern.breathe(100)) def _update(self, state, brightness): with self._lock: if state is not None: self._state = state if brightness is not None: self._brightness = brightness color = (int(255 * self._brightness), 0, 0) if self._state.pattern: self._leds.pattern = self._state.pattern self._leds.update(self._state.channels(color)) def __init__(self, channel): self._lock = threading.Lock() self._brightness = 1.0 # Read and written atomically. self._state = self.OFF self._leds = Leds() def close(self): self._leds.reset() def __enter__(self): return self def __exit__(self, exc_type, exc_value, exc_tb): self.close() @property def brightness(self): return self._brightness @brightness.setter def brightness(self, value): if value < 0.0 or value > 1.0: raise ValueError('Brightness must be between 0.0 and 1.0.') self._update(state=None, brightness=value) def _set_state(self, state): self._update(state=state, brightness=None) state = property(None, _set_state)
def think(self): from aiy.leds import (Leds, Pattern, Color) self._wakeup = False self._think = True with Leds() as leds: while self._think: leds.pattern = Pattern.blink(500) leds.update(Leds.rgb_pattern(Color.GREEN)) time.sleep(1)
def startup(self): with Board() as board, Leds() as leds: colors = [Color.RED, Color.YELLOW, Color.GREEN, Color.CYAN, Color.BLUE, Color.PURPLE, Color.BLACK, Color.WHITE] board.led.state = Led.ON for color in colors: leds.update(Leds.rgb_on(color)) time.sleep(0.25) TonePlayer(22).play(*jingleBells(6)) board.led.state = Led.OFF
def main(): button.when_pressed = run leds.update(Leds.rgb_on(WHITE)) try: while True: pass except KeyboardInterrupt: leds.update(Leds.rgb_off()) pass
def wakeup(self): from aiy.board import Board, Led from aiy.leds import (Leds, Pattern, Color) self._wakeup = True with Board() as board: with Leds() as leds: while self._wakeup: board.led.state = Led.ON leds.pattern = Pattern.breathe(1000) leds.update(Leds.rgb_pattern(Color.BLUE)) time.sleep(1)
def main(): button.when_pressed = run leds.update(Leds.rgb_on(WHITE)) manual_screen() while True: for event in pygame.event.get(): if event.type == KEYDOWN: if event.key == K_ESCAPE: leds.update(Leds.rgb_off()) sys.exit()
def flag(): for i in range(4): with Leds() as leds: leds.update(Leds.rgb_on(Color.BLUE)) sleep(1) leds.update(Leds.rgb_on(Color.RED)) sleep(1) leds.update(Leds.rgb_on(Color.WHITE)) sleep(1)
def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--num_frames', '-n', type=int, default=None, help='Number of frames to run for') parser.add_argument('--preview_alpha', '-pa', type=preview_alpha, default=0, help='Video preview overlay transparency (0-255)') parser.add_argument('--image_format', default='jpeg', choices=('jpeg', 'bmp', 'png'), help='Format of captured images') parser.add_argument('--image_folder', default='~/Pictures', help='Folder to save captured images') parser.add_argument('--blink_on_error', default=False, action='store_true', help='Blink red if error occurred') parser.add_argument('--enable_streaming', default=False, action='store_true', help='Enable streaming server') parser.add_argument('--streaming_bitrate', type=int, default=1000000, help='Streaming server video bitrate (kbps)') parser.add_argument('--mdns_name', default='', help='Streaming server mDNS name') args = parser.parse_args() try: joy_detector(args.num_frames, args.preview_alpha, args.image_format, args.image_folder, args.enable_streaming, args.streaming_bitrate, args.mdns_name) except KeyboardInterrupt: pass except Exception: logger.exception('Exception while running joy demo.') if args.blink_on_error: with Leds() as leds: leds.pattern = Pattern.blink(100) # 10 Hz leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(1.0) return 0
def run(): if KeepWatchForSeconds(3): print("Going shutdown by GPIO") leds.update(Leds.rgb_off()) os.system("/sbin/shutdown -h now 'Poweroff by GPIO'") else: print("Beep sound") toneplayer.play(*BEEP_SOUND) leds.update(Leds.rgb_on(RED)) print("process") print("Done") leds.update(Leds.rgb_on(WHITE))
def main(): logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) args = parser.parse_args() logging.info('Initializing for language %s...', args.language) hints = get_hints(args.language) client = CloudSpeechClient() with Board() as board: #board.led.state = Led.ON with Leds() as leds: while True: if hints: logging.info('Say something, e.g. %s.' % ', '.join(hints)) else: logging.info('Say something.') text = client.recognize(language_code=args.language, hint_phrases=hints) if text is None: logging.info('You said nothing.') continue logging.info('You said: "%s"' % text) text = text.lower() if 'turn on the light' in text: board.led.state = Led.ON elif 'turn off the light' in text: board.led.state = Led.OFF elif 'blink the light' in text: board.led.state = Led.BLINK elif 'goodbye' in text: break elif 'happy' in text: leds.pattern = Pattern.blink(50) color = (255, 255, 0) leds.update(Leds.rgb_pattern(color)) audio.play_wav('laugh.wav') elif 'creep' in text: leds.pattern = Pattern.breathe(1000) color = (102, 140, 255) leds.update(Leds.rgb_on(color)) elif 'cheer' in text: leds.pattern = Pattern.blink(5) color = (230, 0, 115) leds.update(Leds.rgb_on(color)) audio.play_wav('people-cheering.wav')
def initialize(self): self.settings_change_callback = self.on_settings_changed self.get_settings() try: self.leds = Leds() self.led_idle() except: self.log.warning("Can't initialize LED - skill will not load") self.speak_dialog("error.initialize") try: GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) GPIO.setup(BUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP) GPIO.remove_event_detect(BUTTON) GPIO.add_event_detect(BUTTON, GPIO.FALLING, bouncetime=500) except: self.log.warning("Can't initialize GPIO - skill will not load") self.speak_dialog("error.initialize") finally: self.schedule_repeating_event(self.button_press, None, 0.1, 'GoogleAIYv2') self.add_event('recognizer_loop:record_begin', self.on_listener_started) self.add_event('recognizer_loop:record_end', self.on_listener_ended) self.add_event('mycroft.skill.handler.complete', self.on_handler_complete) self.add_event('mycroft.speech.recognition.unknown', self.on_handler_complete)
def startup(): with Board() as board, Leds() as leds: colors = [Color.RED, Color.YELLOW, Color.GREEN, Color.CYAN, Color.BLUE, Color.PURPLE, Color.BLACK, Color.WHITE] board.led.state = Led.ON for color in colors: leds.update(Leds.rgb_on(color)) time.sleep(0.5) TonePlayer(22).play(*[ 'Be', 'rs', 'C5e', 'rs', 'D5e', ]) board.led.state = Led.OFF
def run(self, num_frames, preview_alpha, image_format, image_folder): logger.info('Starting...') leds = Leds() player = Player(gpio=22, bpm=10) photographer = Photographer(image_format, image_folder) animator = Animator(leds, self._done) try: # Forced sensor mode, 1640x1232, full FoV. See: # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes # This is the resolution inference run on. with PiCamera(sensor_mode=4, resolution=(1640, 1232)) as camera, PrivacyLed(leds): def take_photo(): logger.info('Button pressed.') player.play(BEEP_SOUND) photographer.shoot(camera) # Blend the preview layer with the alpha value from the flags. if preview_alpha > 0: logger.info('Starting preview with alpha %d', preview_alpha) camera.start_preview(alpha=preview_alpha) else: logger.info('Not starting preview, alpha 0') button = Button(23) button.when_pressed = take_photo joy_score_moving_average = MovingAverage(10) prev_joy_score = 0.0 with CameraInference(face_detection.model()) as inference: logger.info('Model loaded.') player.play(MODEL_LOAD_SOUND) for i, result in enumerate(inference.run()): faces = face_detection.get_faces(result) photographer.update_faces(faces) joy_score = joy_score_moving_average.next( average_joy_score(faces)) animator.update_joy_score(joy_score) if joy_score > JOY_SCORE_PEAK > prev_joy_score: player.play(JOY_SOUND) elif joy_score < JOY_SCORE_MIN < prev_joy_score: player.play(SAD_SOUND) prev_joy_score = joy_score if self._done.is_set() or i == num_frames: break finally: player.stop() photographer.stop() player.join() photographer.join() animator.join()
def menu(self): print('Press Arcade Button to begin photo shoot.' + '\n') with Board() as board, Leds() as leds: while True: # pulse LED to indicate ready state leds.pattern = Pattern.blink(1000) leds.update(Leds.rgb_pattern(Color.WHITE)) board.button.wait_for_press() startTime = datetime.datetime.now() board.led.state = Led.ON print('LED is on...') # update LED to green indicating shoot is live leds.update(Leds.rgb_on((107, 255, 0))) self.shoot() leds.pattern = Pattern.blink(1000) leds.update(Leds.rgb_pattern(Color.WHITE)) print('Press Arcade Button to start again' + '\n' + 'OR....' + '\n' + 'Press and HOLD the Arcade Button for 5 seconds to quit') board.button.wait_for_press() pressTime = datetime.datetime.now() board.button.wait_for_release() releaseTime = datetime.datetime.now() board.led.state = Led.OFF print('OFF') pressDuration = releaseTime - pressTime sessionDuration = releaseTime - startTime if pressDuration.seconds >= 5: leds.update(Leds.rgb_on(Color.PURPLE)) print('Photo booth session ran for ' + str(sessionDuration.seconds) + ' seconds') time.sleep(3) TonePlayer(22).play(*[ 'D5e', 'rq', 'C5e', 'rq', 'Be', 'rq', 'Be', 'C5e', 'D5e' ]) break print('Done')
def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--image_format', default='jpeg', choices=('jpeg', 'bmp', 'png'), help='Format of captured images') parser.add_argument('--image_folder', default='~/Pictures/Data', help='Folder to save captured images') parser.add_argument('--blink_on_error', default=False, action='store_true', help='Blink red if error occurred') parser.add_argument('--enable_streaming', default=False, action='store_true', help='Enable streaming server') parser.add_argument('--streaming_bitrate', type=int, default=1000000, help='Streaming server video bitrate (kbps)') parser.add_argument('--mdns_name', default='', help='Streaming server mDNS name') args = parser.parse_args() try: gather_data(5, args.image_format, args.image_folder, args.enable_streaming, args.streaming_bitrate, args.mdns_name) except KeyboardInterrupt: sys.exit() except Exception: logger.exception('Exception while running joy demo.') if args.blink_on_error: with Leds() as leds: leds.pattern = Pattern.blink(100) # 10 Hz leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(1.0) return 0
def run(): print("Beep sound") toneplayer.play(*BEEP_SOUND) leds.update(Leds.rgb_on(RED)) print("process") gu_servo.max() first_gu() janken_screen() your_hand = hand_recog() janken(your_hand) print("Done") gu_servo.min() choki_servo.min() pa_servo.min() leds.update(Leds.rgb_on(WHITE)) manual_screen()
def run(): if KeepWatchForSeconds(3): print("Going shutdown by GPIO") leds.update(Leds.rgb_off()) os.system("/sbin/shutdown -h now 'Poweroff by GPIO'") else: print("Beep sound") toneplayer.play(*BEEP_SOUND) leds.update(Leds.rgb_on(RED)) print("Taking photo") sh.cameraLoad() sh.shutter() sh.cameraSave() print("Done") leds.update(Leds.rgb_on(WHITE))
def run(self, num_frames, preview_alpha, image_format, image_folder, enable_streaming): logger.info('Starting...') leds = Leds() with contextlib.ExitStack() as stack: player = stack.enter_context(Player(gpio=BUZZER_GPIO, bpm=10)) photographer = stack.enter_context(Photographer(image_format, image_folder)) animator = stack.enter_context(Animator(leds)) # Forced sensor mode, 1640x1232, full FoV. See: # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes # This is the resolution inference run on. # Use half of that for video streaming (820x616). camera = stack.enter_context(PiCamera(sensor_mode=4, resolution=(820, 616))) stack.enter_context(PrivacyLed(leds)) server = None if enable_streaming: server = stack.enter_context(StreamingServer(camera)) server.run() def take_photo(): logger.info('Button pressed.') player.play(BEEP_SOUND) photographer.shoot(camera) if preview_alpha > 0: camera.start_preview(alpha=preview_alpha) button = Button(BUTTON_GPIO) button.when_pressed = take_photo joy_score_moving_average = MovingAverage(10) prev_joy_score = 0.0 with CameraInference(face_detection.model()) as inference: logger.info('Model loaded.') player.play(MODEL_LOAD_SOUND) for i, result in enumerate(inference.run()): faces = face_detection.get_faces(result) photographer.update_faces(faces) joy_score = joy_score_moving_average.next(average_joy_score(faces)) animator.update_joy_score(joy_score) if server: data = server_inference_data(result.width, result.height, faces, joy_score) server.send_inference_data(data) if joy_score > JOY_SCORE_PEAK > prev_joy_score: player.play(JOY_SOUND) elif joy_score < JOY_SCORE_MIN < prev_joy_score: player.play(SAD_SOUND) prev_joy_score = joy_score if self._done.is_set() or i == num_frames: break
class Bonnet: """Communication with bonnet button led and bonnet microphone""" col_boot = (255, 153, 0) # Orange col_wait = (102, 255, 204) # Turqouis col_working = (102, 0, 255) # Purple col_speak = (68, 117, 47) # Green def __init__(self): self.leds = Leds() self.board = Board() self.current_col = None def wait_for_button(self): self.board.button.wait_for_release() def set_colour(self, col: tuple): self.leds.update(Leds.rgb_on(col)) self.current_col = col
def main(): startup() print('Press Button start. Press Button to stop camera.' + 'Press Button again (or press Ctrl-C) to quit.') pressDuration = 0 with Board() as board, Leds() as leds: while True: board.button.wait_for_press() pressTime = datetime.datetime.now() board.led.state = Led.ON print('ON') print('Running facedetect') facedetect() leds.update(Leds.rgb_on((107, 255, 0))) board.button.wait_for_release() releaseTime = datetime.datetime.now() board.led.state = Led.OFF print('OFF') pressDuration = releaseTime - pressTime print('Program ran for ' + str(pressDuration.seconds) + ' seconds') if pressDuration.seconds >= 5: leds.update(Leds.rgb_on(Color.PURPLE)) time.sleep(3) TonePlayer(22).play(*[ 'D5e', 'rq', 'C5e', 'rq', 'Be', 'rq', 'Be', 'C5e', 'D5e' ]) break print('Done')
def _run(self): logger.info('Starting...') leds = Leds() with contextlib.ExitStack() as stack: player = stack.enter_context(Player(gpio=BUZZER_GPIO, bpm=10)) photographer = stack.enter_context( Photographer(self.args.image_format, self.args.image_folder)) animator = stack.enter_context(Animator(leds)) stack.enter_context(PrivacyLed(leds)) server = None if self.args.enable_streaming: server = stack.enter_context(StreamingServer(self.camera)) server.run() def take_photo(): logger.info('Button pressed.') player.play(BEEP_SOUND) photographer.shoot(self.camera) button = Button(BUTTON_GPIO) button.when_pressed = take_photo joy_score_moving_average = MovingAverage(10) prev_joy_score = 0.0 with CameraInference(face_detection.model()) as inference: logger.info('Model loaded.') player.play(MODEL_LOAD_SOUND) for i, result in enumerate(inference.run()): faces = face_detection.get_faces(result) photographer.update_faces(faces) avg_joy_score = average_joy_score(faces) joy_score = joy_score_moving_average.next(avg_joy_score) animator.update_joy_score(joy_score) if server: data = server_inference_data(result.width, result.height, faces, joy_score) server.send_inference_data(data) if avg_joy_score > JOY_SCORE_MIN: photographer.shoot(self.camera) # if joy_score > JOY_SCORE_PEAK > prev_joy_score: # player.play(JOY_SOUND) # elif joy_score < JOY_SCORE_MIN < prev_joy_score: # player.play(SAD_SOUND) prev_joy_score = joy_score if self._done.is_set() or i == self.args.num_frames: break
def main(): print("Play tune") player = TonePlayer(gpio=BUZZER_GPIO_PIN, bpm=10) player.play(*START_SOUND) print("Initialize robot") robot = Robot() robot.resetPosition() print("Switch on leds") with Leds() as leds: leds.update(Leds.rgb_on(Color.GREEN)) print("Switch on camera") with PiCamera(sensor_mode=4, resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=30) as camera: if ENABLE_DISPLAY: camera.start_preview() annotator = Annotator(camera, dimensions=(320, 240)) else: annotator = None print("Load model") with CameraInference(face_detection.model()) as inference: loop(inference=inference, robot=robot, annotator=annotator, leds=leds) if ENABLE_DISPLAY: camera.stop_preview() player.play(*STOP_SOUND) # Give time for the user to remote its finger. sleep(3) robot.resetPosition()
def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--num_frames', '-n', type=int, default=None, help='Number of frames to run for') parser.add_argument('--preview_alpha', '-pa', type=preview_alpha, default=0, help='Video preview overlay transparency (0-255)') parser.add_argument('--image_format', default='jpeg', choices=('jpeg', 'bmp', 'png'), help='Format of captured images') parser.add_argument('--image_folder', default='~/Pictures', help='Folder to save captured images') parser.add_argument('--blink_on_error', default=False, action='store_true', help='Blink red if error occurred') parser.add_argument('--enable_streaming', default=False, action='store_true', help='Enable streaming server') parser.add_argument('--streaming_bitrate', type=int, default=1000000, help='Streaming server video bitrate (kbps)') parser.add_argument('--mdns_name', default='', help='Streaming server mDNS name') args = parser.parse_args() try: joy_detector(args.num_frames, args.preview_alpha, args.image_format, args.image_folder, args.enable_streaming, args.streaming_bitrate, args.mdns_name) except KeyboardInterrupt: pass except Exception: logger.exception('Exception while running joy demo.') if args.blink_on_error: with Leds() as leds: leds.pattern = Pattern.blink(100) # 10 Hz leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(1.0) return 0
def __init__(self, channel): self._lock = threading.Lock() self._brightness = 1.0 # Read and written atomically. self._state = self.OFF self._leds = Leds()
def process(self, joy_score): if joy_score > 0: self._leds.update(Leds.rgb_on(Color.blend(JOY_COLOR, SAD_COLOR, joy_score))) else: self._leds.update(Leds.rgb_off())
def brightness(self): return self._brightness @brightness.setter def brightness(self, value): if value < 0.0 or value > 1.0: raise ValueError('Brightness must be between 0.0 and 1.0.') self._brightness = value def _set_state(self, state): self._queue.put(state) self._updated.set() state = property(None, _set_state) if Leds.installed(): Led = MultiColorLed else: Led = SingleColorLed BUTTON_PIN = 23 LED_PIN = 25 class Board: def __init__(self, button_pin=BUTTON_PIN, led_pin=LED_PIN): self._stack = contextlib.ExitStack() self._lock = threading.Lock() self._button_pin = button_pin self._button = None
def shutdown(self): self._leds.update(Leds.rgb_off())
def main(): with Leds() as leds: print('RGB: Solid RED for 1 second') leds.update(Leds.rgb_on(Color.RED)) time.sleep(1) print('RGB: Solid GREEN for 1 second') leds.update(Leds.rgb_on(Color.GREEN)) time.sleep(1) print('RGB: Solid YELLOW for 1 second') leds.update(Leds.rgb_on(Color.YELLOW)) time.sleep(1) print('RGB: Solid BLUE for 1 second') leds.update(Leds.rgb_on(Color.BLUE)) time.sleep(1) print('RGB: Solid PURPLE for 1 second') leds.update(Leds.rgb_on(Color.PURPLE)) time.sleep(1) print('RGB: Solid CYAN for 1 second') leds.update(Leds.rgb_on(Color.CYAN)) time.sleep(1) print('RGB: Solid WHITE for 1 second') leds.update(Leds.rgb_on(Color.WHITE)) time.sleep(1) print('RGB: Off for 1 second') leds.update(Leds.rgb_off()) time.sleep(1) for _ in range(3): print('Privacy: On (brightness=default)') leds.update(Leds.privacy_on()) time.sleep(1) print('Privacy: Off') leds.update(Leds.privacy_off()) time.sleep(1) for _ in range(3): print('Privacy: On (brightness=5)') leds.update(Leds.privacy_on(5)) time.sleep(1) print('Privacy: Off') leds.update(Leds.privacy_off()) time.sleep(1) print('Set blink pattern: period=500ms (2Hz)') leds.pattern = Pattern.blink(500) print('RGB: Blink RED for 5 seconds') leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(5) print('RGB: Blink GREEN for 5 seconds') leds.update(Leds.rgb_pattern(Color.GREEN)) time.sleep(5) print('RGB: Blink BLUE for 5 seconds') leds.update(Leds.rgb_pattern(Color.BLUE)) time.sleep(5) print('Set breathe pattern: period=1000ms (1Hz)') leds.pattern = Pattern.breathe(1000) print('RGB: Breathe RED for 5 seconds') leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(5) print('RGB: Breathe GREEN for 5 seconds') leds.update(Leds.rgb_pattern(Color.GREEN)) time.sleep(5) print('RGB: Breathe BLUE for 5 seconds') leds.update(Leds.rgb_pattern(Color.BLUE)) time.sleep(5) print('RGB: Increase RED brightness for 3.2 seconds') for i in range(32): leds.update(Leds.rgb_on((8 * i, 0, 0))) time.sleep(0.1) print('RGB: Decrease RED brightness for 3.2 seconds') for i in reversed(range(32)): leds.update(Leds.rgb_on((8 * i, 0, 0))) time.sleep(0.1) print('RGB: Blend between GREEN and BLUE for 3.2 seconds') for i in range(32): color = Color.blend(Color.BLUE, Color.GREEN, i / 32) leds.update(Leds.rgb_on(color)) time.sleep(0.1) print('RGB: Off for 1 second') leds.update(Leds.rgb_off()) time.sleep(1) print('Privacy: On for 2 seconds') with PrivacyLed(leds): time.sleep(2) print('RGB: Solid GREEN for 2 seconds') with RgbLeds(leds, Leds.rgb_on(Color.GREEN)): time.sleep(2) print('Custom configuration for 5 seconds') leds.update({ 1: Leds.Channel(Leds.Channel.PATTERN, 128), # Red channel 2: Leds.Channel(Leds.Channel.OFF, 0), # Green channel 3: Leds.Channel(Leds.Channel.ON, 128), # Blue channel 4: Leds.Channel(Leds.Channel.PATTERN, 64), # Privacy channel }) time.sleep(5) print('Done')