def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--num_frames', '-n', type=int, default=None, help='Number of frames to run for') parser.add_argument('--preview_alpha', '-pa', type=preview_alpha, default=0, help='Video preview overlay transparency (0-255)') parser.add_argument('--image_format', default='jpeg', choices=('jpeg', 'bmp', 'png'), help='Format of captured images') parser.add_argument('--image_folder', default='~/Pictures', help='Folder to save captured images') parser.add_argument('--blink_on_error', default=False, action='store_true', help='Blink red if error occurred') parser.add_argument('--enable_streaming', default=False, action='store_true', help='Enable streaming server') parser.add_argument('--streaming_bitrate', type=int, default=1000000, help='Streaming server video bitrate (kbps)') parser.add_argument('--mdns_name', default='', help='Streaming server mDNS name') args = parser.parse_args() try: joy_detector(args.num_frames, args.preview_alpha, args.image_format, args.image_folder, args.enable_streaming, args.streaming_bitrate, args.mdns_name) except KeyboardInterrupt: pass except Exception: logger.exception('Exception while running joy demo.') if args.blink_on_error: with Leds() as leds: leds.pattern = Pattern.blink(100) # 10 Hz leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(1.0) return 0
def main(): with Leds() as leds: print('RGB: Solid RED for 1 second') leds.update(Leds.rgb_on(Color.RED)) time.sleep(1) print('RGB: Solid GREEN for 1 second') leds.update(Leds.rgb_on(Color.GREEN)) time.sleep(1) print('RGB: Solid YELLOW for 1 second') leds.update(Leds.rgb_on(Color.YELLOW)) time.sleep(1) print('RGB: Solid BLUE for 1 second') leds.update(Leds.rgb_on(Color.BLUE)) time.sleep(1) print('RGB: Solid PURPLE for 1 second') leds.update(Leds.rgb_on(Color.PURPLE)) time.sleep(1) print('RGB: Solid CYAN for 1 second') leds.update(Leds.rgb_on(Color.CYAN)) time.sleep(1) print('RGB: Solid WHITE for 1 second') leds.update(Leds.rgb_on(Color.WHITE)) time.sleep(1) print('RGB: Off for 1 second') leds.update(Leds.rgb_off()) time.sleep(1) for _ in range(3): print('Privacy: On (brightness=default)') leds.update(Leds.privacy_on()) time.sleep(1) print('Privacy: Off') leds.update(Leds.privacy_off()) time.sleep(1) for _ in range(3): print('Privacy: On (brightness=5)') leds.update(Leds.privacy_on(5)) time.sleep(1) print('Privacy: Off') leds.update(Leds.privacy_off()) time.sleep(1) print('Set blink pattern: period=500ms (2Hz)') leds.pattern = Pattern.blink(500) print('RGB: Blink RED for 5 seconds') leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(5) print('RGB: Blink GREEN for 5 seconds') leds.update(Leds.rgb_pattern(Color.GREEN)) time.sleep(5) print('RGB: Blink BLUE for 5 seconds') leds.update(Leds.rgb_pattern(Color.BLUE)) time.sleep(5) print('Set breathe pattern: period=1000ms (1Hz)') leds.pattern = Pattern.breathe(1000) print('RGB: Breathe RED for 5 seconds') leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(5) print('RGB: Breathe GREEN for 5 seconds') leds.update(Leds.rgb_pattern(Color.GREEN)) time.sleep(5) print('RGB: Breathe BLUE for 5 seconds') leds.update(Leds.rgb_pattern(Color.BLUE)) time.sleep(5) print('RGB: Increase RED brightness for 3.2 seconds') for i in range(32): leds.update(Leds.rgb_on((8 * i, 0, 0))) time.sleep(0.1) print('RGB: Decrease RED brightness for 3.2 seconds') for i in reversed(range(32)): leds.update(Leds.rgb_on((8 * i, 0, 0))) time.sleep(0.1) print('RGB: Blend between GREEN and BLUE for 3.2 seconds') for i in range(32): color = Color.blend(Color.BLUE, Color.GREEN, i / 32) leds.update(Leds.rgb_on(color)) time.sleep(0.1) print('RGB: Off for 1 second') leds.update(Leds.rgb_off()) time.sleep(1) print('Privacy: On for 2 seconds') with PrivacyLed(leds): time.sleep(2) print('RGB: Solid GREEN for 2 seconds') with RgbLeds(leds, Leds.rgb_on(Color.GREEN)): time.sleep(2) print('Custom configuration for 5 seconds') leds.update({ 1: Leds.Channel(Leds.Channel.PATTERN, 128), # Red channel 2: Leds.Channel(Leds.Channel.OFF, 0), # Green channel 3: Leds.Channel(Leds.Channel.ON, 128), # Blue channel 4: Leds.Channel(Leds.Channel.PATTERN, 64), # Privacy channel }) time.sleep(5) print('Done')
def main(): model_path = '/opt/aiy/models/retrained_graph.binaryproto' #model_path = '/opt/aiy/models/mobilenet_v1_160res_0.5_imagenet.binaryproto' label_path = '/opt/aiy/models/retrained_labels_new.txt' #label_path = '/opt/aiy/models/mobilenet_v1_160res_0.5_imagenet_labels.txt' model_path = '/opt/aiy/models/rg_v3_new.binaryproto' label_path = '/opt/aiy/models/retrained_labels_new.txt' input_height = 160 input_width = 160 input_layer = 'input' output_layer = 'final_result' threshold = 0.8 # Model & labels model = ModelDescriptor( name='mobilenet_based_classifier', input_shape=(1, input_height, input_width, 3), input_normalizer=(128.0, 128.0), compute_graph=utils.load_compute_graph(model_path)) labels = read_labels(label_path) new_labels = [] for eachLabel in labels: if len(eachLabel)>1: new_labels.append(eachLabel) labels = new_labels #print(labels) s = xmlrpc.client.ServerProxy("http://aiy.mdzz.info:8000/") player = TonePlayer(BUZZER_GPIO, 10) player.play(*MODEL_LOAD_SOUND) while True: while True: if s.camera() == 1: print('vision kit is woken up') with Leds() as leds: leds.pattern = Pattern.blink(100) leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(2.0) start_time = round(time.time()) break time.sleep(0.2) print('no signal, sleeping...') with PiCamera() as camera: # Configure camera camera.sensor_mode = 4 camera.resolution = (1664, 1232) # Full Frame, 16:9 (Camera v2) camera.framerate = 30 camera.start_preview() while True: # Do inference on VisionBonnet #print('Start capturing') with CameraInference(face_detection.model()) as inference: for result in inference.run(): #print(type(result)) faces = face_detection.get_faces(result) if len(faces) >= 1: #print('camera captures...') extension = '.jpg' filename = time.strftime('%Y-%m-%d %H:%M:%S') + extension camera.capture(filename) image_npp = np.empty((1664 * 1232 * 3,), dtype=np.uint8) camera.capture(image_npp, 'rgb') image_npp = image_npp.reshape((1232, 1664, 3)) image_npp = image_npp[:1232, :1640, :] # image = Image.open('jj.jpg') # draw = ImageDraw.Draw(image) faces_data = [] faces_cropped = [] for i, face in enumerate(faces): # print('Face #%d: %s' % (i, face)) x, y, w, h = face.bounding_box #print(x,y,w,h) w_rm = int(0.3 * w / 2) face_cropped = crop_np((x, y, w, h), w_rm, image_npp) if face_cropped is None: continue #print('face_cropped None'); continue # faces_data.append(image[y: y + h, x + w_rm: x + w - w_rm]) # image[y: y + h, x + w_rm: x + w - w_rm].save('1.jpg') face_cropped.save('face_cropped_'+str(i)+'.jpg') faces_cropped.append(face_cropped) #break break # else: # tt = round(time.time()) - start_time # if tt > 10: # break #print('face cutting finishes') #print(type(faces_cropped), len(faces_cropped)) player.play(*BEEP_SOUND) flag = 0 for eachFace in faces_cropped: #print(type(eachFace)) if eachFace is None: flag = 1 if (len(faces_cropped)) <= 0: flag = 1 if flag == 1: continue with ImageInference(model) as img_inference: #with CameraInference(model) as img_inference: print('Entering classify_hand_gestures()') output = classify_hand_gestures(img_inference, faces_cropped, model=model, labels=labels, output_layer=output_layer, threshold=threshold) #print(output) if (output == 3): player.play(*JOY_SOUND) print('Yani face detected') print(s.result("Owner", filename)) else: player.play(*SAD_SOUND) print('Suspicious face detected') print(s.result("Unknown Face", filename)) upload(filename) # Stop preview # #break while (s.camera()==0): print('sleeping') time.sleep(.2) print('Waken up')
def listen_me(): global text, duration parser = argparse.ArgumentParser() parser.add_argument('--filename', '-f', default='recording.wav') args = parser.parse_args() # 라이브러리 준비 Vokaturi.load("/home/pi/lib/piZero.so") # 클라우드 스피치랑 텍스트 자연어처리 클라이언트 각각 초기화 client = CloudSpeechClient() nlp_client = language.LanguageServiceClient() logging.basicConfig(level=logging.INFO) with Board() as board: while True: print('말해보자.') text = None duration = 0. emotion = None def wait(): global text, duration start = time.monotonic() while text is None: # 텍스트로 인식 text = client.recognize(language_code='ko-KR') duration = time.monotonic() - start # 녹음하면서 record_file(AudioFormat.CD, filename=args.filename, wait=wait, filetype='wav') print(text) print('Recorded: %.02f seconds' % duration) if text in ['들어줘서 고마워', '내 얘기 들어줘서 고마워', '어시스턴트', '잘가', '잘 가']: return # 텍스트 감정 분석 document = types.Document(content=text, type=enums.Document.Type.PLAIN_TEXT) sentiment = nlp_client.analyze_sentiment( document=document).document_sentiment print('텍스트 감정 분석*********************************') print('Text: {}'.format(text)) print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) ##################### 실험후 바꿔도 됨 #################### pos_standard = 0.6 neg_standard = 0.1 # magnitude_standard = 0.1 # text sentiment analysis is enough if (sentiment.score < neg_standard or sentiment.score > pos_standard): if sentiment.score < neg_standard: emotion = False print("@@@negative") else: emotion = True print("@@@positive") else: # 녹음 파일 감정 분석 print('오디오 감정 분석*********************************') (sample_rate, samples) = scipy.io.wavfile.read(args.filename) # print (" sample rate %.3f Hz" % sample_rate) # print ("Allocating Vokaturi sample array...") buffer_length = len(samples) print(" %d samples, %d channels" % (buffer_length, samples.ndim)) c_buffer = Vokaturi.SampleArrayC(buffer_length) if samples.ndim == 1: # mono c_buffer[:] = samples[:] / 32768.0 else: # stereo c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0 # print ("Creating VokaturiVoice...") voice = Vokaturi.Voice(sample_rate, buffer_length) # print ("Filling VokaturiVoice with samples...") voice.fill(buffer_length, c_buffer) # print ("Extracting emotions from VokaturiVoice...") quality = Vokaturi.Quality() emotionProbabilities = Vokaturi.EmotionProbabilities() voice.extract(quality, emotionProbabilities) if quality.valid: # print ("Neutral: %.3f" % emotionProbabilities.neutrality) # print ("Happy: %.3f" % emotionProbabilities.happiness) # print ("Sad: %.3f" % emotionProbabilities.sadness) # print ("Angry: %.3f" % emotionProbabilities.anger) # print ("Fear: %.3f" % emotionProbabilities.fear) # fear 는 무시하도록 하자. wave_score = emotionProbabilities.happiness - ( emotionProbabilities.sadness + emotionProbabilities.anger) if wave_score > 0: print('@@@긍정') emotion = True else: print('@@@부정') emotion = False # text 분석 모호하고 wave 분석 실패했을때 (주로 목소리 짧아서) if emotion is None: print('please say again') # 아님 중립적 반응 넣어도 됨. continue # 여기서 부터 반응. with Leds() as leds: if emotion is True: # tts.say('I am glad to hear that.') # tts.say('진짜? 대박.') leds.pattern = Pattern.blink(100) color = (255, 255, 0) leds.update(Leds.rgb_pattern(color)) time.sleep(1) # play_wav('laugh.wav') else: # tts.say('I am sorry to hear that.') # tts.say('저런. 힘내.') leds.pattern = Pattern.breathe(1000) color = (102, 140, 255) leds.update(Leds.rgb_on(color)) time.sleep(1)
def main_loop(self): while True: with Leds() as leds: leds.update(Leds.rgb_on(Color.RED)) with Board() as board: print("Waiting for input") board.button.wait_for_press() leds.update(Leds.rgb_on((0, 0, 250))) #print('ON') self.start = True self.counter = 0 self.completed = False self.stopwatch = time.time() board.button.wait_for_release() #print('OFF') leds.update(Leds.rgb_off()) while self.start: classes = currentState #print("current State: ", classes) if classes == 0 and self.state != 0: self.standing() elif classes == 1 and self.state != 1: self.empty() elif classes == 2 and self.state != 2 and self.last_detected_state != 2: self.squat() # Selecting a State if (time.time()-self.stopwatch) > 0.15: print("State:\t ",states_names[self.state] , "\t| [selected]") if self.state == 2 and self.last_detected_state != 2: # Squat detected self.counter += 1 leds.update(Leds.rgb_on((0, 0, 250))) self._newSqaut() #print("### Current Score: ", self.counter,"###") if self.state == 2 or self.state == 0: #self.stopwatch = time.time() leds.update(Leds.rgb_on(Color.WHITE)) if self.state == 1 and ((time.time()-self.stopwatch) > 1): leds.update(Leds.rgb_off()) self.last_detected_state = self.state # Resting the counter if nobody is in the frame if (time.time()-self.stopwatch) > 10: if self.state == 1: # if nobody is in the frame reset counter print("### Reset Score ###") self.counter = 0 self.start = False leds.pattern = Pattern.blink(500) leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(2) leds.update(Leds.rgb_off()) self.stopwatch = time.time() # Checking of the finish if self.counter >= TOTAL_SQUATS: self.completed = True self.output.on() self.counter = 0 print("Completed Workout") self.start = False leds.pattern = Pattern.blink(500) leds.update(Leds.rgb_pattern(Color.GREEN)) time.sleep(2) leds.update(Leds.rgb_on(Color.GREEN)) with Board() as board: print("Waiting for input") board.button.wait_for_press() print('ON') board.led.state = Led.ON self.start = False self.counter = 0 self.completed = False self.stopwatch = time.time() board.button.wait_for_release() print('OFF') self.output.off() board.led.state = Led.OFF leds.pattern = Pattern.blink(500) leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(2) leds.update(Leds.rgb_off())
def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--num_frames', '-n', type=int, default=None, help='Number of frames to run for') parser.add_argument('--preview_alpha', '-pa', type=preview_alpha, default=0, help='Video preview overlay transparency (0-255)') parser.add_argument('--image_format', default='jpeg', choices=('jpeg', 'bmp', 'png'), help='Format of captured images') parser.add_argument('--image_folder', default='tmpImage', help='Folder to save captured images') parser.add_argument('--blink_on_error', default=False, action='store_true', help='Blink red if error occurred') parser.add_argument('--enable_streaming', default=True, action='store_true', help='Enable streaming server') parser.add_argument('--streaming_bitrate', type=int, default=1000000, help='Streaming server video bitrate (kbps)') parser.add_argument('--mdns_name', default='', help='Streaming server mDNS name') parser.add_argument('--cam_width', type=int, default=1640, help='Camera Width') parser.add_argument('--cam_height', type=int, default=1232, help='Camera Height') parser.add_argument('--fps', type=int, default=30, help='Camera Frames Per Second') parser.add_argument( '--region', nargs=4, type=int, default=[1040, 600, 600, 632], help='Region for entering/exiting face detection: x, y, width, height') parser.add_argument( '--enter_side', type=int, default=1, help= 'Used to determine which side of the region should be considered "entering": 1 = right, 0 = left' ) parser.add_argument( '--annotator', default=False, help='Shows the annotator overlay, however disables camera snapshots.') parser.add_argument('--url', default="http://isrow.net", help='Url to send the face captures that are taken.') parser.add_argument( '--username', default=None, help='User name used to authenticate this device initially') parser.add_argument( '--password', default=None, help='Password used to authenticate this device initially') parser.add_argument( '--image_dir', default="events/", help='{url + "/" + image_dir} will give us path to send the face data') args = parser.parse_args() try: monitor_run(args.num_frames, args.preview_alpha, args.image_format, args.image_folder, args.enable_streaming, args.streaming_bitrate, args.mdns_name, args.cam_width, args.cam_height, args.fps, args.region, args.enter_side, args.annotator, args.url, args.username, args.password, args.image_dir) except KeyboardInterrupt: pass except Exception: logger.exception('Exception while running joy demo.') if args.blink_on_error: with Leds() as leds: leds.pattern = Pattern.blink(100) # 10 Hz leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(1.0) return 0
def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--num_frames', '-n', type=int, default=None, help='Number of frames to run for') parser.add_argument('--preview_alpha', '-pa', type=preview_alpha, default=0, help='Video preview overlay transparency (0-255)') parser.add_argument('--image_format', default='jpeg', choices=('jpeg', 'bmp', 'png'), help='Format of captured images') parser.add_argument('--image_folder', default='~/Pictures', help='Folder to save captured images') parser.add_argument('--blink_on_error', default=False, action='store_true', help='Blink red if error occurred') parser.add_argument('--enable_streaming', default=False, action='store_true', help='Enable streaming server') parser.add_argument('--streaming_bitrate', type=int, default=1000000, help='Streaming server video bitrate (kbps)') parser.add_argument('--mdns_name', default='', help='Streaming server mDNS name') args = parser.parse_args() broker_address = "io.adafruit.com" print("creating new instance") user = "******" password = "******" print("connecting to broker") client = mqtt.Client("AIY_VISION_KIT") # create new instance client.username_pw_set(user, password=password) client.on_log = on_log client.connect(broker_address, 1883, 60) # connect to broker client.loop_start() try: joy_detector(args.num_frames, args.preview_alpha, args.image_format, args.image_folder, args.enable_streaming, args.streaming_bitrate, args.mdns_name, client) except KeyboardInterrupt: pass except Exception: logger.exception('Exception while running joy demo.') if args.blink_on_error: with Leds() as leds: leds.pattern = Pattern.blink(100) # 10 Hz leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(1.0) return 0
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--num_frames', '-n', type=int, dest='num_frames', default=-1, help='Number of frames to run for, -1 to not terminate') parser.add_argument( '--preview_alpha', '-pa', type=int, dest='preview_alpha', default=0, help='Transparency value of the preview overlay (0-255).') parser.add_argument('--image_format', type=str, dest='image_format', default='jpeg', choices=('jpeg', 'bmp', 'png'), help='Format of captured images.') parser.add_argument('--image_folder', type=str, dest='image_folder', default='~/Pictures', help='Folder to save captured images.') parser.add_argument('--blink_on_error', dest='blink_on_error', default=False, action='store_true', help='Blink red if error occurred.') parser.add_argument('--enable_streaming', dest='enable_streaming', default=False, action='store_true', help='Enable streaming server.') parser.add_argument('--width', dest='width', default=640, action='store_true', help='Streaming video width.') args = parser.parse_args() if args.preview_alpha < 0 or args.preview_alpha > 255: parser.error('Invalid preview_alpha value: %d' % args.preview_alpha) if not os.path.exists('/dev/vision_spicomm'): logger.error( 'AIY Vision Bonnet is not attached or not configured properly.') return 1 print('Initializing camera') with picamera.PiCamera() as camera: # Forced sensor mode, 1640x1232, full FoV. See: # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes # This is the resolution inference run on. # Use half of that for video streaming (820x616). camera.resolution = (WIDTH, HEIGHT) camera.framerate = FRAMERATE camera.vflip = VFLIP # flips image rightside up, as needed camera.hflip = HFLIP # flips image left-right, as needed camera.sensor_mode = 4 time.sleep(1) # camera warm-up time print('Initializing websockets server on port %d' % WS_PORT) WebSocketWSGIHandler.http_version = '1.1' websocket_server = make_server( '', WS_PORT, server_class=WSGIServer, handler_class=WebSocketWSGIRequestHandler, app=WebSocketWSGIApplication(handler_cls=StreamingWebSocket)) websocket_server.initialize_websockets_manager() websocket_thread = Thread(target=websocket_server.serve_forever) print('Initializing HTTP server on port %d' % HTTP_PORT) http_server = StreamingHttpServer() http_thread = Thread(target=http_server.serve_forever) print('Initializing broadcast thread') output = BroadcastOutput(camera) broadcast_thread = BroadcastThread(output.converter, websocket_server) print('Starting recording') camera.start_recording(output, 'yuv') print('Start Inference') detector = JoyDetector(camera, args) try: print('Starting websockets thread') websocket_thread.start() print('Starting HTTP server thread') http_thread.start() print('Starting broadcast thread') broadcast_thread.start() while True: camera.wait_recording(1) except KeyboardInterrupt: pass finally: if args.blink_on_error: leds = Leds() leds.pattern = Pattern.blink(500) leds.update(Leds.rgb_pattern(RED_COLOR)) print('Stopping recording') camera.stop_recording() print('Waiting for broadcast thread to finish') broadcast_thread.join() print('Shutting down HTTP server') http_server.shutdown() print('Shutting down websockets server') websocket_server.shutdown() print('Waiting for HTTP server thread to finish') http_thread.join() print('Waiting for websockets thread to finish') websocket_thread.join()