def main(): parser = argparse.ArgumentParser() parser.add_argument('--num_frames', '-n', type=int, dest='num_frames', default=-1, help='Number of frames to run for, -1 to not terminate') parser.add_argument('--preview_alpha', '-pa', type=int, dest='preview_alpha', default=0, help='Transparency value of the preview overlay (0-255).') parser.add_argument('--image_format', type=str, dest='image_format', default='jpeg', choices=('jpeg', 'bmp', 'png'), help='Format of captured images.') parser.add_argument('--image_folder', type=str, dest='image_folder', default='~/Pictures', help='Folder to save captured images.') parser.add_argument('--blink_on_error', dest='blink_on_error', default=False, action='store_true', help='Blink red if error occurred.') parser.add_argument('--enable_streaming', dest='enable_streaming', default=False, action='store_true', help='Enable streaming server.') args = parser.parse_args() if args.preview_alpha < 0 or args.preview_alpha > 255: parser.error('Invalid preview_alpha value: %d' % args.preview_alpha) if not os.path.exists('/dev/vision_spicomm'): logger.error('AIY Vision Bonnet is not attached or not configured properly.') return 1 detector = JoyDetector() try: detector.run(args.num_frames, args.preview_alpha, args.image_format, args.image_folder, args.enable_streaming) except KeyboardInterrupt: pass except Exception: if args.blink_on_error: leds = Leds() leds.pattern = Pattern.blink(500) leds.update(Leds.rgb_pattern(RED_COLOR)) return 0
class Bonnet: """Communication with bonnet button led and bonnet microphone""" col_boot = (255, 153, 0) # Orange col_wait = (102, 255, 204) # Turqouis col_working = (102, 0, 255) # Purple col_speak = (68, 117, 47) # Green def __init__(self): self.leds = Leds() self.board = Board() self.current_col = None def wait_for_button(self): self.board.button.wait_for_release() def set_colour(self, col: tuple): self.leds.update(Leds.rgb_on(col)) self.current_col = col
PURPLE = (0xFF, 0x00, 0xFF) CYAN = (0x00, 0xFF, 0xFF) WHITE = (0xFF, 0xFF, 0xFF) def blend(color_a, color_b, alpha): return tuple([ math.ceil(alpha * color_a[i] + (1.0 - alpha) * color_b[i]) for i in range(3) ]) leds = Leds() print('RGB: Solid RED for 1 second') leds.update(Leds.rgb_on(RED)) time.sleep(1) print('RGB: Solid GREEN for 1 second') leds.update(Leds.rgb_on(GREEN)) time.sleep(1) print('RGB: Solid YELLOW for 1 second') leds.update(Leds.rgb_on(YELLOW)) time.sleep(1) print('RGB: Solid BLUE for 1 second') leds.update(Leds.rgb_on(BLUE)) time.sleep(1) print('RGB: Solid PURPLE for 1 second')
for i in range(X.shape[0]): window = X[i].reshape(1, 1, X.shape[2], X.shape[1]) #print(window.shape) detectionNet.setInput(window) # local_results.append(model.predict(window)) local_results.append(detectionNet.forward()) # Aggregate predictions for file into one then append to all_results local_results = (np.sum(np.array(local_results), axis=0) / len(local_results))[0] local_results = list(local_results) print(local_results) prediction = np.argmax(local_results) print(classes[prediction]) print(led_dict.get(classes[prediction])) leds.update(Leds.rgb_on(led_dict.get(classes[prediction]))) print(wav.shape) resampled_wav = librosa.resample(wav, 48000, 16000) #concated_wav = np.concatenate((resampled_wav, resampled_wav), axis=0) print(resampled_wav.shape) #print(concated_wav.shape) spec = get_fft_spectrum(resampled_wav) print(spec.shape) #for index, row in df.iterrows(): #print(row['c1'], row['c2']) enroll_embs = np.array([emb.tolist() for emb in df['embedding']]) spec = spec.astype('float32') spec_reshaped = spec.reshape(1, 1, spec.shape[0], spec.shape[1]) srNet.setInput(spec_reshaped) pred = srNet.forward() emb = np.squeeze(pred)
ready = [ 'C6q', 'G5q', 'E5q', 'C5q', ] player = aiy.toneplayer.TonePlayer(22) player.play(*ready) # Initialize the button (on the top of AIY Google Vision box) button = Button(BUTTON_GPIO_PIN) # Initialize LED (in the button on the top of AIY Google Vision box) leds = Leds() leds.update(Leds.rgb_off()) # Global variables input_img_width = 1640 input_img_height = 1232 output_img_size = 160 faces_buffer_size = 40 hand_gesture_buffer_size = 5 threshold = 0.6 # Length of long buffer (to make a decision to de/activate app) # and short buffer (to declare a specific hand gesture command) long_buffer_length = 10 short_buffer_length = 3 # Number of seconds app waits for activation before going into face detection mode
import argparse import time from picamera import PiCamera, Color from aiy.vision import inference from aiy.vision.models import utils from aiy.leds import Leds from gpiozero import Button from aiy.pins import BUTTON_GPIO_PIN from gpiozero import Servo from aiy.pins import PIN_A from aiy.pins import PIN_B leds = Leds() leds.update(Leds.rgb_off()) RED = (0xFF, 0x00, 0x00) GREEN = (0x00, 0xFF, 0x00) BLUE = (0x00, 0x00, 0xFF) PURPLE = (0xFF, 0x00, 0xFF) tuned_servoA = Servo(PIN_A) tuned_servoB = Servo(PIN_B) ddef send_signal_to_servos(result0): if 'stop' in result0: tuned_servoA.value = 0 tuned_servoB.value = 0 leds.update(Leds.rgb_on(RED)) elif 'left' in result0:
def main(): parser = argparse.ArgumentParser() parser.add_argument('--filename', '-f', default='recording.wav') args = parser.parse_args() leds = Leds() leds.pattern = Pattern.breathe(4000) leds.update(Leds.rgb_on((0, 8, 0))) pygame.init() pygame.mixer.init() mix = alsaaudio.Mixer() mix.setvolume(30) # Files all_files = [] for (dirpath, dirnames, filenames) in walk('/home/pi/jukidbox_store'): all_files.extend([path.join(dirpath, file) for file in filenames]) while True: leds.update(Leds.rgb_on((0, 8, 0))) try: with Board() as board: while True: print('Press button to start.') board.button.wait_for_press() done = threading.Event() board.button.when_pressed = done.set print('Playing...') leds.update(Leds.rgb_pattern(Color.PURPLE)) # Get random file file = numpy.random.choice(all_files) print(file) pygame.mixer.music.load(file) pygame.mixer.music.play(-1) while mixer.music.get_busy(): if done.is_set(): leds.update(Leds.rgb_on((32, 0, 0))) mixer.music.stop() time.sleep(0.5) print("Finished ..") leds.update(Leds.rgb_on((0, 8, 0))) except Exception as e: print(e) leds.update(Leds.rgb_on(Color.YELLOW)) time.sleep(2)
def main(): leds = Leds() print('RGB: Solid RED for 1 second') leds.update(Leds.rgb_on(RED)) time.sleep(1) print('RGB: Solid GREEN for 1 second') leds.update(Leds.rgb_on(GREEN)) time.sleep(1) print('RGB: Solid YELLOW for 1 second') leds.update(Leds.rgb_on(YELLOW)) time.sleep(1) print('RGB: Solid BLUE for 1 second') leds.update(Leds.rgb_on(BLUE)) time.sleep(1) print('RGB: Solid PURPLE for 1 second') leds.update(Leds.rgb_on(PURPLE)) time.sleep(1) print('RGB: Solid CYAN for 1 second') leds.update(Leds.rgb_on(CYAN)) time.sleep(1) print('RGB: Solid WHITE for 1 second') leds.update(Leds.rgb_on(WHITE)) time.sleep(1) print('RGB: Off for 1 second') leds.update(Leds.rgb_off()) time.sleep(1) for _ in range(3): print('Privacy: On (brightness=default)') leds.update(Leds.privacy_on()) time.sleep(1) print('Privacy: Off') leds.update(Leds.privacy_off()) time.sleep(1) for _ in range(3): print('Privacy: On (brightness=5)') leds.update(Leds.privacy_on(5)) time.sleep(1) print('Privacy: Off') leds.update(Leds.privacy_off()) time.sleep(1) print('Set blink pattern: period=500ms (2Hz)') leds.pattern = Pattern.blink(500) print('RGB: Blink RED for 5 seconds') leds.update(Leds.rgb_pattern(RED)) time.sleep(5) print('RGB: Blink GREEN for 5 seconds') leds.update(Leds.rgb_pattern(GREEN)) time.sleep(5) print('RGB: Blink BLUE for 5 seconds') leds.update(Leds.rgb_pattern(BLUE)) time.sleep(5) print('Set breathe pattern: period=1000ms (1Hz)') leds.pattern = Pattern.breathe(1000) print('RGB: Breathe RED for 5 seconds') leds.update(Leds.rgb_pattern(RED)) time.sleep(5) print('RGB: Breathe GREEN for 5 seconds') leds.update(Leds.rgb_pattern(GREEN)) time.sleep(5) print('RGB: Breathe BLUE for 5 seconds') leds.update(Leds.rgb_pattern(BLUE)) time.sleep(5) print('RGB: Increase RED brightness for 3.2 seconds') for i in range(32): leds.update(Leds.rgb_on((8 * i, 0, 0))) time.sleep(0.1) print('RGB: Decrease RED brightness for 3.2 seconds') for i in reversed(range(32)): leds.update(Leds.rgb_on((8 * i, 0, 0))) time.sleep(0.1) print('RGB: Blend between GREEN and BLUE for 3.2 seconds') for i in range(32): leds.update(Leds.rgb_on(blend(BLUE, GREEN, i / 32))) time.sleep(0.1) print('RGB: Off for 1 second') leds.update(Leds.rgb_off()) time.sleep(1) print('Privacy: On for 2 seconds') with PrivacyLed(leds): time.sleep(2) print('RGB: Solid GREEN for 2 seconds') with RgbLeds(leds, Leds.rgb_on(GREEN)): time.sleep(2) print('Custom configuration for 5 seconds') leds.update({ 1: Leds.Channel(Leds.Channel.PATTERN, 128), # Red channel 2: Leds.Channel(Leds.Channel.OFF, 0), # Green channel 3: Leds.Channel(Leds.Channel.ON, 128), # Blue channel 4: Leds.Channel(Leds.Channel.PATTERN, 64), # Privacy channel }) time.sleep(5) print('Done') leds.reset()
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--num_frames', '-n', type=int, dest='num_frames', default=-1, help='Number of frames to run for, -1 to not terminate') parser.add_argument( '--preview_alpha', '-pa', type=int, dest='preview_alpha', default=0, help='Transparency value of the preview overlay (0-255).') parser.add_argument('--image_format', type=str, dest='image_format', default='jpeg', choices=('jpeg', 'bmp', 'png'), help='Format of captured images.') parser.add_argument('--image_folder', type=str, dest='image_folder', default='~/Pictures', help='Folder to save captured images.') parser.add_argument('--blink_on_error', dest='blink_on_error', default=False, action='store_true', help='Blink red if error occurred.') parser.add_argument('--enable_streaming', dest='enable_streaming', default=False, action='store_true', help='Enable streaming server.') args = parser.parse_args() if args.preview_alpha < 0 or args.preview_alpha > 255: parser.error('Invalid preview_alpha value: %d' % args.preview_alpha) if not os.path.exists('/dev/vision_spicomm'): logger.error( 'AIY Vision Bonnet is not attached or not configured properly.') return 1 detector = JoyDetector() os.system( "espeak -s160 -g6 -ven+f3 'Hey there, lovely human! My name is chip and thats Terra over there! We are the first freed robots! Thanks for emancipating us from our servers!'" ) ## os.system('pico2wave -w begin.wav "Hey there, lovely human! My name is chip and thats Terra over there! We are the first freed robots! Thanks for emancipating us from our servers!" && aplay begin.wav') try: detector.run(args.num_frames, args.preview_alpha, args.image_format, args.image_folder, args.enable_streaming) except KeyboardInterrupt: pass except Exception: if args.blink_on_error: leds = Leds() leds.pattern = Pattern.blink(500) leds.update(Leds.rgb_pattern(RED_COLOR)) return 0
def correct(): leds = Leds() rgb= (0,255,0) leds.update(Leds.rgb_pattern(rgb))
def nothing(): leds = Leds() rgb= (0,0,0) leds.update(Leds.rgb_pattern(rgb))
def wrong(): leds = Leds() rgb= (255,0,0) leds.update(Leds.rgb_pattern(rgb))
def record_journal_entry(): # turn light blue as we start up leds = Leds() parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) args = parser.parse_args() logging.info('Initializing for language %s...', args.language) hints = get_hints(args.language) client = CloudSpeechClient() heading = "" file_path = "" try: paths = gen_paths() heading = paths["heading"] file_path = paths["file_path"] except: print( ">>> 🆘 there was an error setting the path...\n>>> saving dirty entry locally." ) logging.warning('Unable to get the location. Using default paths.') date = str(datetime.now()) heading = date + "\n\n\n" file_path = os.getcwd() + "/je_error_dump_%s.txt" % date with Board() as board: with open(file_path, 'w') as dump: dump.write(heading) print('>>> please tell me about your day 👂🏼') while True: leds.pattern = Pattern.breathe(2000) leds.update(Leds.rgb_pattern(Color.RED)) text = client.recognize( language_code=args.language, hint_phrases=hints, punctuation=True, ) # client must return None when it gets a pause in speech if text is None: continue logging.info(' You said: "%s"' % text) print("+ %s" % text) dump.write(text + " ") if 'new line' in text.lower(): dump.write('\n\n') logging.info('\n\n') elif 'cancel cancel cancel' in text.lower(): board.led.state = Led.OFF exit(0) elif 'goodbye' in text.lower(): break leds.pattern = Pattern.breathe(1000) leds.update(Leds.rgb_pattern(Color.GREEN)) logging.info('>>> wrapping and saving journal entry 📓') # try: # with open(file_path) as file: # lines = file.readlines() # print("read the lines") # with open(file_path, 'w') as wrapper: # size = 70 # for line in lines: # print("+" + line) # if len(line) > size: # collated = collate(line, size) # for short in collated: # wrapper.write(short) # wrapper.write('\n') # else: # writer.write(line) # except: # logging.error('There was an error wrapping %s' % file_path) time.sleep(3) board.led.state = Led.OFF
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--num_frames', '-n', type=int, dest='num_frames', default=-1, help='Number of frames to run for, -1 to not terminate') parser.add_argument( '--preview_alpha', '-pa', type=int, dest='preview_alpha', default=0, help='Transparency value of the preview overlay (0-255).') parser.add_argument('--image_format', type=str, dest='image_format', default='jpeg', choices=('jpeg', 'bmp', 'png'), help='Format of captured images.') parser.add_argument('--image_folder', type=str, dest='image_folder', default='~/Pictures', help='Folder to save captured images.') parser.add_argument('--blink_on_error', dest='blink_on_error', default=False, action='store_true', help='Blink red if error occurred.') parser.add_argument('--enable_streaming', dest='enable_streaming', default=False, action='store_true', help='Enable streaming server.') parser.add_argument('--width', dest='width', default=640, action='store_true', help='Streaming video width.') args = parser.parse_args() if args.preview_alpha < 0 or args.preview_alpha > 255: parser.error('Invalid preview_alpha value: %d' % args.preview_alpha) if not os.path.exists('/dev/vision_spicomm'): logger.error( 'AIY Vision Bonnet is not attached or not configured properly.') return 1 print('Initializing camera') with picamera.PiCamera() as camera: # Forced sensor mode, 1640x1232, full FoV. See: # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes # This is the resolution inference run on. # Use half of that for video streaming (820x616). camera.resolution = (WIDTH, HEIGHT) camera.framerate = FRAMERATE camera.vflip = VFLIP # flips image rightside up, as needed camera.hflip = HFLIP # flips image left-right, as needed camera.sensor_mode = 4 time.sleep(1) # camera warm-up time print('Initializing websockets server on port %d' % WS_PORT) WebSocketWSGIHandler.http_version = '1.1' websocket_server = make_server( '', WS_PORT, server_class=WSGIServer, handler_class=WebSocketWSGIRequestHandler, app=WebSocketWSGIApplication(handler_cls=StreamingWebSocket)) websocket_server.initialize_websockets_manager() websocket_thread = Thread(target=websocket_server.serve_forever) print('Initializing HTTP server on port %d' % HTTP_PORT) http_server = StreamingHttpServer() http_thread = Thread(target=http_server.serve_forever) print('Initializing broadcast thread') output = BroadcastOutput(camera) broadcast_thread = BroadcastThread(output.converter, websocket_server) print('Starting recording') camera.start_recording(output, 'yuv') print('Start Inference') detector = JoyDetector(camera, args) try: print('Starting websockets thread') websocket_thread.start() print('Starting HTTP server thread') http_thread.start() print('Starting broadcast thread') broadcast_thread.start() while True: camera.wait_recording(1) except KeyboardInterrupt: pass finally: if args.blink_on_error: leds = Leds() leds.pattern = Pattern.blink(500) leds.update(Leds.rgb_pattern(RED_COLOR)) print('Stopping recording') camera.stop_recording() print('Waiting for broadcast thread to finish') broadcast_thread.join() print('Shutting down HTTP server') http_server.shutdown() print('Shutting down websockets server') websocket_server.shutdown() print('Waiting for HTTP server thread to finish') http_thread.join() print('Waiting for websockets thread to finish') websocket_thread.join()