Ejemplo n.º 1
0
def start_self_driving():
    global on
    model = inference.ModelDescriptor(
        name='mobilenet_160',
        input_shape=(1, 160, 160, 3),
        input_normalizer=(128.0, 128.0),
        compute_graph=utils.load_compute_graph(MODEL_NAME))
    with PiCamera(sensor_mode=4, resolution=(160, 160),
                  framerate=30) as camera:
        camera_thread = threading.Thread(target=capture, args=(camera, ))
        camera_thread.daemon = True
        camera_thread.start()

        with inference.CameraInference(model) as inf:
            print('Model is ready. Type on/off to start/stop self-driving')
            sys.stdout.flush()

            on_off_thread = threading.Thread(target=on_off, args=())
            on_off_thread.daemon = True
            on_off_thread.start()
            for result in inf.run():
                if on:
                    direction, probability = process(result)
                    print('prediction: {:.2f} {} {:.2f}'.format(
                        time.time(), direction, probability))
                    sys.stdout.flush()
Ejemplo n.º 2
0
    def run(self, input_layer, output_layer, num_frames, input_mean, input_std,
            threshold, top_k, detecting_list, message_threshold, model,
            labels):
        count = 0
        logger.info('Starting...')
        player = Player(gpio=22, bpm=10)
        try:
            with PiCamera(sensor_mode=4, resolution=(1640, 1232),
                          framerate=30) as camera:
                with inference.CameraInference(model) as camera_inference:
                    last_time = time.time()
                    logger.info('Model loaded.')
                    player.play(MODEL_LOAD_SOUND)
                    for i, result in enumerate(camera_inference.run()):
                        if i == num_frames or self._done.is_set():
                            break
                        processed_result = process(result, labels,
                                                   output_layer, threshold,
                                                   top_k)
                        #logger.info('Processed result')

                        #my function to handle sending messages if detection happens at the threshold.
                        detection_made(processed_result, count,
                                       message_threshold, player)
                        cur_time = time.time()
                        fps = 1.0 / (cur_time - last_time)
                        last_time = cur_time
                        message = get_message(processed_result, threshold,
                                              top_k)
                    # logger.info(message)
        finally:
            player.stop()
            player.join()
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_path', required=True,
        help='Path to converted model file that can run on VisionKit.')
    parser.add_argument('--label_path', required=True,
        help='Path to label file that corresponds to the model.')
    parser.add_argument('--input_height', type=int, required=True, help='Input height.')
    parser.add_argument('--input_width', type=int, required=True, help='Input width.')
    parser.add_argument('--input_layer', required=True, help='Name of input layer.')
    parser.add_argument('--output_layer', required=True, help='Name of output layer.')
    parser.add_argument('--num_frames', type=int, default=None,
        help='Sets the number of frames to run for, otherwise runs forever.')
    parser.add_argument('--input_mean', type=float, default=128.0, help='Input mean.')
    parser.add_argument('--input_std', type=float, default=128.0, help='Input std.')
    parser.add_argument('--input_depth', type=int, default=3, help='Input depth.')
    parser.add_argument('--threshold', type=float, default=0.1,
        help='Threshold for classification score (from output tensor).')
    parser.add_argument('--top_k', type=int, default=3, help='Keep at most top_k labels.')
    parser.add_argument('--preview', action='store_true', default=False,
        help='Enables camera preview in addition to printing result to terminal.')
    parser.add_argument('--show_fps', action='store_true', default=False,
        help='Shows end to end FPS.')
    args = parser.parse_args()

    model = inference.ModelDescriptor(
        name='mobilenet_based_classifier',
        input_shape=(1, args.input_height, args.input_width, args.input_depth),
        input_normalizer=(args.input_mean, args.input_std),
        compute_graph=utils.load_compute_graph(args.model_path))
    labels = read_labels(args.label_path)

    with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera:
        if args.preview:
            camera.start_preview()

        with inference.CameraInference(model) as camera_inference:
            for result in camera_inference.run(args.num_frames):
                processed_result = process(result, labels, args.output_layer,
                                           args.threshold, args.top_k)
                send_signal_to_servos(processed_result[0])
                message = get_message(processed_result, args.threshold, args.top_k)
                if args.show_fps:
                    message += '\nWith %.1f FPS.' % camera_inference.rate
                print(message)

                if args.preview:
                    camera.annotate_foreground = Color('black')
                    camera.annotate_background = Color('white')
                    # PiCamera text annotation only supports ascii.
                    camera.annotate_text = '\n %s' % message.encode(
                        'ascii', 'backslashreplace').decode('ascii')

        if args.preview:
            camera.stop_preview()
Ejemplo n.º 4
0
def start_self_driving():
    model = inference.ModelDescriptor(
        name='mobilenet_160',
        input_shape=(1, 160, 160, 3),
        input_normalizer=(128.0, 128.0),
        compute_graph=utils.load_compute_graph(MODEL_NAME))
    print('Model loaded')
    with PiCamera(sensor_mode=4, resolution=(160, 160), framerate=30) as camera:
        print('Connected to the Pi Camera')
        with inference.CameraInference(model) as inf:
            for result in inf.run():
                direction, probability = process(result)
                RCool_drive.drive(direction)

                print('{:.2f} {} {:.2f}'.format(time.time(), direction, probability))
def test_mode(test_time = 30):
    with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera:
        camera.start_preview()

        with inference.CameraInference(model) as camera_inference:
            for result in camera_inference.run(test_time):
                processed_result = process(result, labels, args.output_layer,
                                           args.threshold, 1)
                message = get_message(processed_result, args.threshold, 1)
                # print(message)

                camera.annotate_text_size = 120
                # camera.annotate_foreground = Color('black')
                # camera.annotate_background = Color('white')
                # PiCamera text annotation only supports ascii.
                camera.annotate_text = '\n %s' % message.encode(
                    'ascii', 'backslashreplace').decode('ascii')
def main():
    # Loading the model and label
    model = inference.ModelDescriptor(
        name='mobilenet_based_classifier',
        input_shape=(1, 160, 160, 3),
        input_normalizer=(128.0, 128.0),
        compute_graph=utils.load_compute_graph('CrackClassification_graph.binaryproto'))
    print("Model loaded.")

    labels = read_labels(label_path + 'crack_label.txt')
    print("Labels loaded")
    
    # Classifier parameters
    top_k = 3
    threshold = 0.4
    num_frame = None
    show_fps = False
    
    # LED setup
    ledRED = LED(PIN_B)
    ledGREEN = LED(PIN_A)
    ledRED.off()
    ledGREEN.on()

    with PiCamera(sensor_mode=4, resolution=(1640, 1232), framerate=30) as camera:
        with inference.CameraInference(model) as camera_inference:
            for result in camera_inference.run(num_frame):
                processed_result = process(result, labels, 'final_result',threshold, top_k)
                    
                if processed_result[0][0] == 'positive':
                    print("CRACK")
                    ledGREEN.off()
                    ledRED.on()
                else:
                    print("CLEAR")
                    ledRED.off()
                    ledGREEN.on()
                
                print("Camera inference rate: " + str(camera_inference.rate))