Пример #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='File path of Tflite model.',
                        required=True)
    parser.add_argument('--label',
                        help='File path of label file.',
                        required=True)
    args = parser.parse_args()
    with open(args.label, 'r', encoding="utf-8") as f:
        pairs = (l.strip().split(maxsplit=1) for l in f.readlines())
        labels = dict((int(k), v) for k, v in pairs)
    engine = edgetpu.classification.engine.ClassificationEngine(args.model)
    with picamera.PiCamera() as camera:
        camera.resolution = (640, 480)
        camera.framerate = 30
        _, width, height, channels = engine.get_input_tensor_shape()
        camera.start_preview()
        try:
            stream = io.BytesIO()
            for foo in camera.capture_continuous(stream,
                                                 format='rgb',
                                                 use_video_port=True,
                                                 resize=(width, height)):
                stream.truncate()
                stream.seek(0)
                input = np.frombuffer(stream.getvalue(), dtype=np.uint8)
                start_ms = time.time()
                results = engine.ClassifyWithInputTensor(input, top_k=1)
                elapsed_ms = time.time() - start_ms
                if results:
                    camera.annotate_text = "%s %.2f\n%.2fms" % (labels[
                        results[0][0]], results[0][1], elapsed_ms * 1000.0)
        finally:
            camera.stop_preview()
Пример #2
0
    def authorized_get(self):
        if self.path == '/':
            self.send_response(301)
            self.send_header('Location', '/index.html')
            self.end_headers()
        elif self.path == '/index.html':
            content = PAGE.encode('utf-8')
            self.send_response(200)
            self.send_header('Content-Type', 'text/html')
            self.send_header('Content-Length', len(content))
            self.end_headers()
            self.wfile.write(content)
        elif self.path == '/stream.mjpg':
            self.send_response(200)
            self.send_header('Age', 0)
            self.send_header('Cache-Control', 'no-cache, private')
            self.send_header('Pragma', 'no-cache')
            self.send_header('Content-Type', 'multipart/x-mixed-replace; boundary=FRAME')
            self.end_headers()
            try:
                stream_video = io.BytesIO()
                stream_tpu = io.BytesIO()
                _, width, height, channels = engine.get_input_tensor_shape()
                
                while True:
                    camera.capture(stream_tpu,
                                        format='rgb',
                                        use_video_port=True,
                                        resize=(width, height))

                    stream_tpu.truncate()
                    stream_tpu.seek(0)
                    input = np.frombuffer(stream_tpu.getvalue(), dtype=np.uint8)
                    start_ms = time.time()
                    results = engine.ClassifyWithInputTensor(input, top_k=1)
                    elapsed_ms = time.time() - start_ms

                    if results:
                        camera.annotate_text = "%s %.2f\n%.2fms" % (
                            labels[results[0][0]], results[0][1], elapsed_ms*1000.0)

                    camera.capture(stream_video, format='jpeg', use_video_port=True)
                    stream_video.truncate()
                    stream_video.seek(0)

                    self.wfile.write(b'--FRAME\r\n')
                    self.send_header('Content-Type', 'image/jpeg')
                    self.send_header('Content-Length', len(stream_video.getvalue()))
                    self.end_headers()
                    self.wfile.write(stream_video.getvalue())
                    self.wfile.write(b'\r\n')

            except Exception as e:
                logging.warning(
                    'Removed streaming client %s: %s',
                    self.client_address, str(e))
        else:
            self.send_error(404)
            self.end_headers()
Пример #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='File path of Tflite model.',
                        required=True)
    parser.add_argument('--label',
                        help='File path of label file.',
                        required=True)
    args = parser.parse_args()

    with open(args.label, 'r', encoding="utf-8") as f:
        pairs = (l.strip().split(maxsplit=1) for l in f.readlines())
        labels = dict((int(k), v) for k, v in pairs)

    engine = edgetpu.classification.engine.ClassificationEngine(args.model)
    camera = cv2.VideoCapture(0)

    if camera:
        camera.set(3, 640)
        camera.set(4, 480)

        font = cv2.FONT_HERSHEY_SIMPLEX
        bottomLeftCornerOfText = (10, 470)
        fontScale = 0.6
        fontColor = (255, 255, 255)
        lineType = 2

        annotate_text = ""
        annotate_text_time = time.time()
        time_to_show_prediction = 3.0
        min_confidence = 0.2

        _, width, height, channels = engine.get_input_tensor_shape()
        try:
            while True:
                if not camera.isOpened():
                    continue
                ret, img = camera.read()
                if not ret:
                    continue
                input = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                input = cv2.resize(input, (width, height))
                input = input.reshape((width * height * channels))
                start_ms = time.time()
                results = engine.ClassifyWithInputTensor(input, top_k=1)
                elapsed_ms = time.time() - start_ms

                if results:
                    print("%s %.2f\n%.2fms" %
                          (labels[results[0][0]], results[0][1],
                           elapsed_ms * 1000.0))
        finally:
            camera.release()
Пример #4
0
def main():
    default_model_dir = "../all_models"
    default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
    default_labels = 'imagenet_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    args = parser.parse_args()

    with open(args.label, 'r') as f:
        pairs = (l.strip().split(maxsplit=1) for l in f.readlines())
        labels = dict((int(k), v) for k, v in pairs)

    engine = edgetpu.classification.engine.ClassificationEngine(args.model)

    with picamera.PiCamera() as camera:
        camera.resolution = (640, 480)
        camera.framerate = 30
        camera.annotate_text_size = 20
        _, width, height, channels = engine.get_input_tensor_shape()
        camera.start_preview()
        try:
            stream = io.BytesIO()
            fps = deque(maxlen=20)
            fps.append(time.time())
            for foo in camera.capture_continuous(stream,
                                                 format='rgb',
                                                 use_video_port=True,
                                                 resize=(width, height)):
                stream.truncate()
                stream.seek(0)
                input = np.frombuffer(stream.getvalue(), dtype=np.uint8)
                start_ms = time.time()
                results = engine.ClassifyWithInputTensor(input, top_k=3)
                inference_ms = (time.time() - start_ms) * 1000.0
                fps.append(time.time())
                fps_ms = len(fps) / (fps[-1] - fps[0])
                camera.annotate_text = "Inference: %5.2fms FPS: %3.1f" % (
                    inference_ms, fps_ms)
                for result in results:
                    camera.annotate_text += "\n%.0f%% %s" % (100 * result[1],
                                                             labels[result[0]])
                print(camera.annotate_text)
        finally:
            camera.stop_preview()
def main():
    default_model_dir = "../all_models"
    default_model = 'mobilenet_v2_1.0_224_quant_edgetpu.tflite'
    default_labels = 'imagenet_labels.txt'
    parser = argparse.ArgumentParser()
    parser.add_argument('--model',
                        help='.tflite model path',
                        default=os.path.join(default_model_dir, default_model))
    parser.add_argument('--labels',
                        help='label file path',
                        default=os.path.join(default_model_dir,
                                             default_labels))
    args = parser.parse_args()

    with open(args.labels, 'r') as f:
        pairs = (l.strip().split(maxsplit=1) for l in f.readlines())
        labels = dict((int(k), v) for k, v in pairs)

    engine = edgetpu.classification.engine.ClassificationEngine(args.model)

    pygame.init()
    pygame.camera.init()
    camlist = pygame.camera.list_cameras()

    camera = pygame.camera.Camera(camlist[0], (640, 480))
    _, width, height, channels = engine.get_input_tensor_shape()
    camera.start()
    try:
        fps = deque(maxlen=20)
        fps.append(time.time())
        while True:
            imagen = camera.get_image()
            imagen = pygame.transform.scale(imagen, (width, height))
            input = np.frombuffer(imagen.get_buffer(), dtype=np.uint8)
            start_ms = time.time()
            results = engine.ClassifyWithInputTensor(input, top_k=3)
            inference_ms = (time.time() - start_ms) * 1000.0
            fps.append(time.time())
            fps_ms = len(fps) / (fps[-1] - fps[0])
            annotate_text = "Inference: %5.2fms FPS: %3.1f" % (inference_ms,
                                                               fps_ms)
            for result in results:
                annotate_text += "\n%.0f%% %s" % (100 * result[1],
                                                  labels[result[0]])
            print(annotate_text)
    finally:
        camera.stop()
def main(model_path):
    label_file_path = '../label/for_lt_20190731/label.csv'
    labels = get_labels_from_csv(label_file_path)
    engine = edgetpu.classification.engine.ClassificationEngine(model_path)
    with picamera.PiCamera() as camera:
        camera.resolution = (225, 225)
        # camera.resolution = (640, 480)
        camera.vflip = True
        camera.hflip = True
        camera.framerate = 30
        _, width, height, channels = engine.get_input_tensor_shape()
        camera.start_preview()
        try:
            stream = io.BytesIO()
            for foo in camera.capture_continuous(stream,
                                                 format='rgb',
                                                 use_video_port=True,
                                                 resize=(width, height)):
                stream.truncate()
                stream.seek(0)
                input = np.frombuffer(stream.getvalue(), dtype=np.uint8)
                start_ms = time.time()
                results = engine.ClassifyWithInputTensor(input, top_k=5)
                elapsed_ms = time.time() - start_ms
                if results:
                    # camera.annotate_text = "%s %.2f\n%.2fms" % (
                    #     labels[results[0][0]], results[0][1], elapsed_ms*1000.0)
                    if results[0][1] < 0.2:
                        label = ""
                        target_value = 0.0
                    else:
                        label = labels[results[0][0]]
                        target_value = results[0][1]
                    camera.annotate_text = "%s %.1f\n%.2fms" % (
                        label, target_value, elapsed_ms*1000.0)
                    print(results)
        finally:
            camera.stop_preview()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
      '--model', help='File path of Tflite model.', required=True)
    parser.add_argument(
      '--label', help='File path of label file.', required=True)
    args = parser.parse_args()

    with open(args.label, 'r') as f:
        pairs = (l.strip().split(maxsplit=1) for l in f.readlines())
        labels = dict((int(k), v) for k, v in pairs)

    engine = edgetpu.classification.engine.ClassificationEngine(args.model)

    with picamera.PiCamera() as camera:
        camera.resolution = (640, 480)
        camera.framerate = 30
        _, width, height, channels = engine.get_input_tensor_shape()
        camera.start_preview()
        try:
            stream = io.BytesIO()
            for foo in camera.capture_continuous(stream,
                                                 format='rgb',
                                                 use_video_port=True,
                                                 resize=(width, height)):
                stream.truncate()
                stream.seek(0)
                input = np.frombuffer(stream.getvalue(), dtype=np.uint8)
                start_ms = time.time()
                results = engine.ClassifyWithInputTensor(input, top_k=1)
                elapsed_ms = time.time() - start_ms
                if results:
                    camera.annotate_text = "%s %.2f\n%.2fms" % (
                        labels[results[0][0]], results[0][1], elapsed_ms*1000.0)
        finally:
            camera.stop_preview()
Пример #8
0
import time
from time import sleep
from picamera import PiCamera
import edgetpu.classification.engine
from io import BytesIO
import numpy as np


if __name__ == "__main__":
    print('Starting up')

    engine = edgetpu.classification.engine.ClassificationEngine('test_data/mobilenet_v2_1.0_224_quant_edgetpu.tflite')
    print('Edge TPU initialised')
    _, width, height, channels = engine.get_input_tensor_shape()
    with open('test_data/imagenet_labels.txt', 'r') as f:
        pairs = (l.strip().split(maxsplit=1) for l in f.readlines())
        labels = dict((int(k), v) for k, v in pairs)
    print('Labels read')
    with PiCamera(resolution='800x600', framerate=30, sensor_mode=2) as camera:
        print('Camera initialised')
        camera.start_preview()
        # Camera warm-up time
        sleep(2)

        print('Starting classify_image_stream')
        while True:
            stream = BytesIO()
            camera.capture(stream,
                           format='rgb',
                           use_video_port=True,
                           resize=(width, height))
Пример #9
0
    def authorized_get(self):
        if self.path == '/':
            self.send_response(301)
            self.send_header('Location', '/index.html')
            self.end_headers()
        elif self.path == '/index.html':
            content = PAGE.encode('utf-8')
            self.send_response(200)
            self.send_header('Content-Type', 'text/html')
            self.send_header('Content-Length', len(content))
            self.end_headers()
            self.wfile.write(content)
        elif self.path == '/stream.mjpg':
            self.send_response(200)
            self.send_header('Age', 0)
            self.send_header('Cache-Control', 'no-cache, private')
            self.send_header('Pragma', 'no-cache')
            self.send_header('Content-Type',
                             'multipart/x-mixed-replace; boundary=FRAME')
            self.end_headers()
            font = cv2.FONT_HERSHEY_SIMPLEX

            try:
                stream_video = io.BytesIO()
                stream_tpu = io.BytesIO()
                _, width, height, channels = engine.get_input_tensor_shape()

                while True:
                    ret, color_image = cap.read()
                    if not ret:
                        break

                    prepimg = color_image[:, :, ::-1].copy()
                    prepimg = cv2.resize(prepimg, (width, height))
                    prepimg = np.reshape(prepimg, (width * height * 3, ))

                    start_ms = time.time()
                    results = engine.ClassifyWithInputTensor(prepimg, top_k=1)
                    elapsed_ms = time.time() - start_ms

                    if results:
                        caption = "%s %.2f\n%.2fms" % (labels[results[0][0]],
                                                       results[0][1],
                                                       elapsed_ms * 1000.0)
                    else:
                        caption = "..."

                    imgRGB = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
                    cv2.putText(imgRGB, caption, (0, 30), font, 1,
                                (255, 255, 255), 2, cv2.LINE_AA)

                    jpg = Image.fromarray(imgRGB)
                    jpg.save(stream_video, 'JPEG')

                    stream_video.truncate()
                    stream_video.seek(0)
                    self.wfile.write(b'--FRAME\r\n')
                    # self.wfile.write(b"--jpgboundary")
                    self.send_header('Content-Type', 'image/jpeg')
                    self.send_header('Content-Length',
                                     len(stream_video.getvalue()))
                    self.end_headers()
                    self.wfile.write(stream_video.getvalue())
                    self.wfile.write(b'\r\n')

            except Exception as e:
                logging.warning('Removed streaming client %s: %s',
                                self.client_address, str(e))
        else:
            self.send_error(404)
            self.end_headers()