Example #1
0
def run(model, config_file):
    global nn, pre_process, post_process
    filename, file_extension = os.path.splitext(model)
    supported_files = ['.so', '.pb']

    if file_extension not in supported_files:
        raise Exception("""
            Unknown file type. Got %s%s.
            Please check the model file (-m).
            Only .pb (protocol buffer) or .so (shared object) file is supported.
            """ % (filename, file_extension))

    config = load_yaml(config_file)
    pre_process = build_pre_process(config.PRE_PROCESSOR)
    post_process = build_post_process(config.POST_PROCESSOR)

    if file_extension == '.so':  # Shared library
        nn = NNLib()
        nn.load(model)

    elif file_extension == '.pb':  # Protocol Buffer file
        # only load tensorflow if user wants to use GPU
        from lmnet.tensorflow_graph_runner import TensorflowGraphRunner
        nn = TensorflowGraphRunner(model)

    run_impl(config)
Example #2
0
def _run(model, image_data, config):
    filename, file_extension = os.path.splitext(model)
    supported_files = ['.so', '.pb']

    if file_extension not in supported_files:
        raise Exception("""
            Unknown file type. Got %s%s.
            Please check the model file (-m).
            Only .pb (protocol buffer), .so (shared object) file is supported.
            """ % (filename, file_extension))

    if file_extension == '.so':  # Shared library
        # load and initialize the generated shared model
        nn = NNLib()
        nn.load(model)
        nn.init()

    elif file_extension == '.pb':  # Protocol Buffer file
        # only load tensorflow if user wants to use GPU
        from lmnet.tensorflow_graph_runner import TensorflowGraphRunner
        nn = TensorflowGraphRunner(model)
        nn.init()

    # run the graph
    output = nn.run(image_data)

    return output
Example #3
0
def run(model, config_file):
    global nn, pre_process, post_process
    filename, file_extension = os.path.splitext(model)
    supported_files = ['.so', '.pb']

    if file_extension not in supported_files:
        raise Exception("""
            Unknown file type. Got %s%s.
            Please check the model file (-m).
            Only .pb (protocol buffer) or .so (shared object) file is supported.
            """ % (filename, file_extension))

    config = load_yaml(config_file)
    pre_process = build_pre_process(config.PRE_PROCESSOR)
    post_process = build_post_process(config.POST_PROCESSOR)

    if file_extension == '.so':  # Shared library
        nn = NNLib()
        nn.load(model)

    elif file_extension == '.pb':  # Protocol Buffer file
        # only load tensorflow if user wants to use GPU
        from lmnet.tensorflow_graph_runner import TensorflowGraphRunner
        nn = TensorflowGraphRunner(model)

    if config.TASK == "IMAGE.CLASSIFICATION":
        run_classification(config)

    if config.TASK == "IMAGE.OBJECT_DETECTION":
        run_object_detection(config)

    if config.TASK == "IMAGE.SEMANTIC_SEGMENTATION":
        run_sementic_segmentation(config)
Example #4
0
def run(model, config_file, port=80):
    global nn, pre_process, post_process, config, stream, pool

    filename, file_extension = os.path.splitext(model)
    supported_files = [".so", ".pb"]

    if file_extension not in supported_files:
        raise Exception("""
            Unknown file type. Got %s%s.
            Please check the model file (-m).
            Only .pb (protocol buffer) or .so (shared object) file is supported.
            """ % (filename, file_extension))

    if file_extension == ".so":  # Shared library
        nn = NNLib()
        nn.load(model)

    elif file_extension == ".pb":  # Protocol Buffer file
        # only load tensorflow if user wants to use GPU
        from lmnet.tensorflow_graph_runner import TensorflowGraphRunner

        nn = TensorflowGraphRunner(model)

    nn = NNLib()
    nn.load(model)

    stream = VideoStream(CAMERA_SOURCE, CAMERA_WIDTH, CAMERA_HEIGHT,
                         CAMERA_FPS)

    config = load_yaml(config_file)

    pre_process = build_pre_process(config.PRE_PROCESSOR)
    post_process = build_post_process(config.POST_PROCESSOR)

    pool = Pool(processes=1, initializer=_init_worker)

    try:
        server = ThreadedHTTPServer(("", port), MotionJpegHandler)
        print("server starting")
        server.serve_forever()
    except KeyboardInterrupt:
        print("KeyboardInterrpt in server - ending server")
        stream.release()
        pool.terminate()
        pool.join()
        server.socket.close()
        server.shutdown()

    return
Example #5
0
def _run(model, input_image, config):
    filename, file_extension = os.path.splitext(model)
    supported_files = ['.so', '.pb']

    if file_extension not in supported_files:
        raise Exception("""
            Unknown file type. Got %s%s.
            Please check the model file (-m).
            Only .pb (protocol buffer) or .so (shared object) file is supported.
            """ % (filename, file_extension))

    # load the image
    img = Image.open(input_image).convert("RGB")

    # convert into numpy array
    data = np.asarray(img)
    raw_image = data

    # pre process for image
    data = _pre_process(data, config.PRE_PROCESSOR, config.DATA_FORMAT)

    # add the batch dimension
    data = np.expand_dims(data, axis=0)

    if file_extension == '.so':  # Shared library
        # load and initialize the generated shared model
        nn = NNLib()
        nn.load(model)
        nn.init()

    elif file_extension == '.pb':  # Protocol Buffer file
        # only load tensorflow if user wants to use GPU
        from lmnet.tensorflow_graph_runner import TensorflowGraphRunner
        nn = TensorflowGraphRunner(model)
        nn.init()

    # run the graph
    output = nn.run(data)

    return output, raw_image
Example #6
0
def run(library, config_file):
    global nn, pre_process, post_process
    nn = NNLib()
    nn.load(library)
    nn.init()

    config = load_yaml(config_file)

    pre_process = build_pre_process(config.PRE_PROCESSOR)
    post_process = build_post_process(config.POST_PROCESSOR)

    if config.TASK == "IMAGE.CLASSIFICATION":
        run_classification(config)

    if config.TASK == "IMAGE.OBJECT_DETECTION":
        run_object_detection(config)