def model(): print(os.path.exists(_COMPUTE_GRAPH_NAME)) return ModelDescriptor( name='FaceRecognition', input_shape=(1, 160, 160, 3), input_normalizer=(127.5, 128), compute_graph=utils.load_compute_graph(_COMPUTE_GRAPH_NAME))
def model_descriptor(graph_name): # Face detection model has special implementation in VisionBonnet firmware. # input_shape, input_normalizer, and compute_graph params have on effect. return ModelDescriptor(name='MODEL' + graph_name, input_shape=(1, 0, 0, 3), input_normalizer=(0, 0), compute_graph=utils.load_compute_graph(graph_name))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='test_model', help='Model identifier.') parser.add_argument('--model_path', required=True, help='Path to model file.') parser.add_argument('--test_file', default=None, help='Path to test file.') args = parser.parse_args() model = ModelDescriptor(name=args.model_name, input_shape=(1, 192, 192, 3), input_normalizer=(0, 1), compute_graph=utils.load_compute_graph( args.model_path)) if args.test_file: with ImageInference(model) as inference: image = Image.open(args.test_file) result = inference.run(image) print(tensors_info(result.tensors)) return with PiCamera(sensor_mode=4, framerate=30): with CameraInference(model) as inference: for result in inference.run(): print('#%05d (%5.2f fps): %s' % (inference.count, inference.rate, tensors_info(result.tensors)))
def model(): # Face detection model has special implementation in VisionBonnet firmware. # input_shape, input_normalizer, and computate_graph params have on effect. return ModelDescriptor( name='FaceDetection', input_shape=(1, 0, 0, 3), input_normalizer=(0, 0), compute_graph=utils.load_compute_graph(_COMPUTE_GRAPH_NAME))
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--model_path', required=True, help='Path to converted model file that can run on VisionKit.') parser.add_argument('--input_height', type=int, required=True, help='Input height.') parser.add_argument('--input_width', type=int, required=True, help='Input width.') parser.add_argument('--input_mean', type=float, default=128.0, help='Input mean.') parser.add_argument('--input_std', type=float, default=128.0, help='Input std.') parser.add_argument('--input_depth', type=int, default=3, help='Input depth.') args = parser.parse_args() model = ModelDescriptor( name='test_run_model', input_shape=(1, args.input_height, args.input_width, args.input_depth), input_normalizer=(args.input_mean, args.input_std), compute_graph=utils.load_compute_graph(args.model_path)) with PiCamera(sensor_mode=4, framerate=30) as camera: with CameraInference(model) as camera_inference: last_time = time.time() for i, result in enumerate(camera_inference.run()): output_tensor_str = [ '%s [%d elements]' % (k, len(v.data)) for k, v in result.tensors.items() ] cur_time = time.time() fps = 1.0 / (cur_time - last_time) last_time = cur_time print('%d-th inference, fps: %.1f FPS, %s' % (i, fps, ','.join(output_tensor_str)))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='test_model', help='Model identifier.') parser.add_argument('--model_path', required=True, help='Path to model file.') parser.add_argument('--speed', default=0.5, type=float, help='Reduction factor on speed') args = parser.parse_args() model = ModelDescriptor(name=args.model_name, input_shape=(1, 192, 192, 3), input_normalizer=(0, 1), compute_graph=utils.load_compute_graph( args.model_path)) left = Motor(PIN_A, PIN_B) right = Motor(PIN_C, PIN_D) print('spinning') try: with PiCamera(sensor_mode=4, framerate=30): with CameraInference(model) as inference: for result in inference.run(): data = [ tensor.data for _, tensor in result.tensors.items() ] lspeed, rspeed = data[0] print('#%05d (%5.2f fps): %1.2f/%1.2f' % (inference.count, inference.rate, lspeed, rspeed)) if lspeed < 0: left.reverse(-max(-1, lspeed) * args.speed) else: left.forward(min(1, lspeed) * args.speed) if rspeed < 0: right.reverse(-max(-1, rspeed) * args.speed) else: right.forward(min(1, rspeed) * args.speed) except Exception as e: left.stop() right.stop() print(e)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--model_name', default='test_model', help='Model identifier.') parser.add_argument('--model_path', required=True, help='Path to model file.') parser.add_argument('--input_height', type=int, required=True, help='Input height.') parser.add_argument('--input_width', type=int, required=True, help='Input width.') parser.add_argument('--input_depth', type=int, default=3, help='Input depth.') parser.add_argument('--input_mean', type=float, default=128.0, help='Input mean.') parser.add_argument('--input_std', type=float, default=128.0, help='Input std.') args = parser.parse_args() model = ModelDescriptor( name=args.model_name, input_shape=(1, args.input_height, args.input_width, args.input_depth), input_normalizer=(args.input_mean, args.input_std), compute_graph=utils.load_compute_graph(args.model_path)) with PiCamera(sensor_mode=4, framerate=30): with CameraInference(model) as inference: for result in inference.run(): print('#%05d (%5.2f fps): %s' % (inference.count, inference.rate, tensors_info(result.tensors)))
def model(): return ModelDescriptor( name='object_detection', input_shape=(1, 256, 256, 3), input_normalizer=(128.0, 128.0), compute_graph=utils.load_compute_graph(_COMPUTE_GRAPH_NAME))
def model(model_type): this_model = _MODELS[model_type] return ModelDescriptor(name=model_type, input_shape=this_model.input_shape, input_normalizer=this_model.input_normalizer, compute_graph=this_model.compute_graph())
def model(): return ModelDescriptor( name='DishDetection', input_shape=(1, 0, 0, 3), input_normalizer=(0, 0), compute_graph=utils.load_compute_graph(_COMPUTE_GRAPH_NAME))
# for details about the parameters: frameWidth = 256 frameHeight = 256 frameRate = 20 contrast = 40 rotation = 180 # Set the picamera parametertaob camera = picamera.PiCamera() camera.resolution = (frameWidth, frameHeight) camera.framerate = frameRate camera.contrast = contrast model = ModelDescriptor(name="DarthVaderDetector", input_shape=(1, 256, 256, 3), input_normalizer=(128.0, 128.0), compute_graph=utils.load_compute_graph( os.path.join(os.getcwd(), "darthvader.binaryproto"))) # Start the video process with ImgCap(model, frameWidth, frameHeight, DEBUG) as img: camera.start_recording(img, format='rgb', splitter_port=1) try: while True: camera.wait_recording( timeout=0 ) # using timeout=0, default, it'll return immediately # if img.output is not None: # print(img.output[0,0,0]) except KeyboardInterrupt:
def model(model_type=MOBILENET): return ModelDescriptor(name=model_type, input_shape=(1, 160, 160, 3), input_normalizer=(128.0, 128.0), compute_graph=utils.load_compute_graph( _COMPUTE_GRAPH_NAME_MAP[model_type]))
def model_roll(): return ModelDescriptor(name='roll_inference',input_shape=(1, 64, 64, 3),input_normalizer=(128, 128),compute_graph=utils.load_compute_graph(_ROLL_GRAPH_NAME))
def model(): return ModelDescriptor( name='dish_classifier', input_shape=(1, 192, 192, 3), input_normalizer=(128.0, 128.0), compute_graph=utils.load_compute_graph(_COMPUTE_GRAPH_NAME))
parser.add_argument('--input_mean', type=float, default=128.0, help='Input mean.') parser.add_argument('--input_std', type=float, default=128.0, help='Input std.') parser.add_argument('--debug', default=False, action='store_true') args = parser.parse_args() DEBUG = args.debug model = ModelDescriptor( name=args.model_name, input_shape=(1, args.input_height, args.input_width, args.input_depth), input_normalizer=(args.input_mean, args.input_std), compute_graph=utils.load_compute_graph(args.model_path)) # Start the video process with ImgCap(model, frameWidth, frameHeight, DEBUG) as img: camera.start_recording(img, format='rgb', splitter_port=1) try: while True: camera.wait_recording( timeout=0 ) # using timeout=0, default, it'll return immediately # if img.output is not None: # print(img.output[0,0,0]) except KeyboardInterrupt:
def model(): return ModelDescriptor( name='cifar10_classification', input_shape=(1, 32, 32, 3), input_normalizer=(127.5, 127.5), compute_graph=utils.load_compute_graph(_COMPUTE_GRAPH_NAME))
parser.add_argument('--label_path', required=True, help='Path to label file.') parser.add_argument('--model_path', required=True, help='Path to model file.') parser.add_argument('--input', required=True, help='Input height.') parser.add_argument('--input_size', type=int, required=True, help='Input height.') parser.add_argument('--output_key', required=True) args = parser.parse_args() image = Image.open(args.input) width, height = image.size model = ModelDescriptor(name=args.model_name, input_shape=(1, args.input_size, args.input_size, 3), input_normalizer=(128, 128), compute_graph=utils.load_compute_graph( args.model_path)) inference = ImageInference(model) if inference: starttime = datetime.now() result = inference.run(image) deltatime = datetime.now() - starttime print( str(deltatime.seconds) + "s " + str(deltatime.microseconds / 1000) + "ms") assert len(result.tensors) == 1 tensor = result.tensors[args.output_key]
def model(): return ModelDescriptor( name='image_classification', input_shape=(1, 160, 160, 3), input_normalizer=(128.0, 128.0), compute_graph=utils.load_compute_graph(_COMPUTE_GRAPH_NAME))
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--model_path', required=True, help='Path to converted model file that can run on VisionKit.') parser.add_argument( '--label_path', required=True, help='Path to label file that corresponds to the model.') parser.add_argument( '--input_height', type=int, required=True, help='Input height.') parser.add_argument( '--input_width', type=int, required=True, help='Input width.') parser.add_argument( '--input_layer', required=True, help='Name of input layer.') parser.add_argument( '--output_layer', required=True, help='Name of output layer.') parser.add_argument( '--num_frames', type=int, default=-1, help='Sets the number of frames to run for, otherwise runs forever.') parser.add_argument( '--input_mean', type=float, default=128.0, help='Input mean.') parser.add_argument( '--input_std', type=float, default=128.0, help='Input std.') parser.add_argument( '--input_depth', type=int, default=3, help='Input depth.') parser.add_argument( '--threshold', type=float, default=0.6, help='Threshold for classification score (from output tensor).') parser.add_argument( '--preview', action='store_true', default=False, help='Enables camera preview in addition to printing result to terminal.') parser.add_argument( '--gpio_logic', default='NORMAL', help='Indicates if NORMAL or INVERSE logic is used in GPIO pins.') parser.add_argument( '--show_fps', action='store_true', default=False, help='Shows end to end FPS.') args = parser.parse_args() # Model & labels model = ModelDescriptor( name='mobilenet_based_classifier', input_shape=(1, args.input_height, args.input_width, args.input_depth), input_normalizer=(args.input_mean, args.input_std), compute_graph=utils.load_compute_graph(args.model_path)) labels = read_labels(args.label_path) with PiCamera() as camera: # Forced sensor mode, 1640x1232, full FoV. See: # https://picamera.readthedocs.io/en/release-1.13/fov.html#sensor-modes # This is the resolution inference run on. camera.sensor_mode = 4 # Scaled and cropped resolution. If different from sensor mode implied # resolution, inference results must be adjusted accordingly. This is # true in particular when camera.start_recording is used to record an # encoded h264 video stream as the Pi encoder can't encode all native # sensor resolutions, or a standard one like 1080p may be desired. camera.resolution = (1640, 1232) # Start the camera stream. camera.framerate = 30 camera.start_preview() while True: while True: long_buffer = [] short_buffer = [] pinStatus(pin_A,'LOW',args.gpio_logic) pinStatus(pin_B,'LOW',args.gpio_logic) pinStatus(pin_C,'LOW',args.gpio_logic) leds.update(Leds.rgb_on(GREEN)) face_box = detect_face() print("Entered the loop of face classifier") hand_box_params = determine_hand_box_params(face_box) if image_boundary_check(hand_box_params): print("Hand gesture identified") break # Start hand classifier is_active = False leds.update(Leds.rgb_on(PURPLE)) start_timer = time.time() with ImageInference(model) as img_inference: while True: print("Entered the loop of gesture classifier") #check_termination_trigger() if is_active: leds.update(Leds.rgb_on(RED)) hands_image = capture_hands_image(camera,hand_box_params) output = classify_hand_gestures(img_inference,hands_image,model=model,labels=labels,output_layer=args.output_layer,threshold = args.threshold) short_guess, num_short_guess = buffer_update(output,short_buffer,short_buffer_length) long_guess, num_long_guess = buffer_update(output,long_buffer,long_buffer_length) # Activation of classifier if (long_guess == activation_index or long_guess == deactivation_index) and not is_active and num_long_guess >= (long_buffer_length - 3): is_active = True leds.update(Leds.rgb_on(RED)) send_signal_to_pins(activation_index,args.gpio_logic) long_buffer = [] num_long_guess = 0 time.sleep(1) # Deactivation of classifier (go back to stable face detection) if (long_guess == activation_index or long_guess == deactivation_index) and is_active and num_long_guess >= (long_buffer_length - 3): is_active = False leds.update(Leds.rgb_off()) long_buffer = [] num_long_guess = 0 send_signal_to_pins(deactivation_index,args.gpio_logic) time.sleep(1) break # If not activated within max_no_activity_period seconds, go back to stable face detection if not is_active: timer = time.time()-start_timer if timer >= max_no_activity_period: leds.update(Leds.rgb_off()) send_signal_to_pins(deactivation_index,args.gpio_logic) time.sleep(1) break else: start_timer = time.time() # Displaying classified hand gesture commands if num_short_guess >= (short_buffer_length-1) and is_active: print_hand_command(short_guess) send_signal_to_pins(short_guess,args.gpio_logic) camera.stop_preview()
def main(): model_path = '/opt/aiy/models/retrained_graph.binaryproto' #model_path = '/opt/aiy/models/mobilenet_v1_160res_0.5_imagenet.binaryproto' label_path = '/opt/aiy/models/retrained_labels_new.txt' #label_path = '/opt/aiy/models/mobilenet_v1_160res_0.5_imagenet_labels.txt' model_path = '/opt/aiy/models/rg_v3_new.binaryproto' label_path = '/opt/aiy/models/retrained_labels_new.txt' input_height = 160 input_width = 160 input_layer = 'input' output_layer = 'final_result' threshold = 0.8 # Model & labels model = ModelDescriptor( name='mobilenet_based_classifier', input_shape=(1, input_height, input_width, 3), input_normalizer=(128.0, 128.0), compute_graph=utils.load_compute_graph(model_path)) labels = read_labels(label_path) new_labels = [] for eachLabel in labels: if len(eachLabel)>1: new_labels.append(eachLabel) labels = new_labels #print(labels) s = xmlrpc.client.ServerProxy("http://aiy.mdzz.info:8000/") player = TonePlayer(BUZZER_GPIO, 10) player.play(*MODEL_LOAD_SOUND) while True: while True: if s.camera() == 1: print('vision kit is woken up') with Leds() as leds: leds.pattern = Pattern.blink(100) leds.update(Leds.rgb_pattern(Color.RED)) time.sleep(2.0) start_time = round(time.time()) break time.sleep(0.2) print('no signal, sleeping...') with PiCamera() as camera: # Configure camera camera.sensor_mode = 4 camera.resolution = (1664, 1232) # Full Frame, 16:9 (Camera v2) camera.framerate = 30 camera.start_preview() while True: # Do inference on VisionBonnet #print('Start capturing') with CameraInference(face_detection.model()) as inference: for result in inference.run(): #print(type(result)) faces = face_detection.get_faces(result) if len(faces) >= 1: #print('camera captures...') extension = '.jpg' filename = time.strftime('%Y-%m-%d %H:%M:%S') + extension camera.capture(filename) image_npp = np.empty((1664 * 1232 * 3,), dtype=np.uint8) camera.capture(image_npp, 'rgb') image_npp = image_npp.reshape((1232, 1664, 3)) image_npp = image_npp[:1232, :1640, :] # image = Image.open('jj.jpg') # draw = ImageDraw.Draw(image) faces_data = [] faces_cropped = [] for i, face in enumerate(faces): # print('Face #%d: %s' % (i, face)) x, y, w, h = face.bounding_box #print(x,y,w,h) w_rm = int(0.3 * w / 2) face_cropped = crop_np((x, y, w, h), w_rm, image_npp) if face_cropped is None: continue #print('face_cropped None'); continue # faces_data.append(image[y: y + h, x + w_rm: x + w - w_rm]) # image[y: y + h, x + w_rm: x + w - w_rm].save('1.jpg') face_cropped.save('face_cropped_'+str(i)+'.jpg') faces_cropped.append(face_cropped) #break break # else: # tt = round(time.time()) - start_time # if tt > 10: # break #print('face cutting finishes') #print(type(faces_cropped), len(faces_cropped)) player.play(*BEEP_SOUND) flag = 0 for eachFace in faces_cropped: #print(type(eachFace)) if eachFace is None: flag = 1 if (len(faces_cropped)) <= 0: flag = 1 if flag == 1: continue with ImageInference(model) as img_inference: #with CameraInference(model) as img_inference: print('Entering classify_hand_gestures()') output = classify_hand_gestures(img_inference, faces_cropped, model=model, labels=labels, output_layer=output_layer, threshold=threshold) #print(output) if (output == 3): player.play(*JOY_SOUND) print('Yani face detected') print(s.result("Owner", filename)) else: player.play(*SAD_SOUND) print('Suspicious face detected') print(s.result("Unknown Face", filename)) upload(filename) # Stop preview # #break while (s.camera()==0): print('sleeping') time.sleep(.2) print('Waken up')