コード例 #1
0
def load_ort():
    # Load ONNX
    load_onnx = timer('Load ONNX Model')
    ort_session = ort.InferenceSession('alexnet.onnx')
    load_onnx.end()

    return ort_session, ort_session.get_inputs()[0].name
コード例 #2
0
def load_trt():
    # load trt engine
    load_tensorrt = timer("Load TRT Engine")
    trt_path = 'alexnet.trt'
    engine = load_engine(trt_runtime, trt_path)
    load_tensorrt.end()

    return engine
コード例 #3
0
def load_model():
    # Load Model
    load_model = timer('Load Normal Model')
    model = torchvision.models.alexnet(pretrained=True).eval().cuda()
    load_model.end()
    return model
コード例 #4
0
def get_res(predict):
    # Get Labels
    f = open('../imagenet_classes.txt')
    t = [i.replace('\n', '') for i in f.readlines()]

    predict = softmax(np.array(predict))
    print(f"Result : {t[np.argmax(predict)]} , {np.max(predict)}\n")


if __name__ == "__main__":

    ### Prepare
    img_pil, img_np = load_data()
    model = load_model()
    ort, in_name = load_ort()

    ### Normal Model Infer
    infer_torch = timer("Run Torch Infer")
    with torch.no_grad():
        out_torch = model(img_pil)[0]
    infer_torch.end()
    get_res(out_torch.cpu())

    ### ORT Infer
    infer_onnx = timer('Run ORT Infer')
    out_ort = ort.run(None, {in_name: img_np})[0]
    infer_onnx.end()

    get_res(out_ort)
コード例 #5
0
from PIL import Image
import numpy as np
from torchvision import transforms as T

# Custom
from log import timer, logger

trans = T.Compose([T.Resize(256), T.CenterCrop(224), T.ToTensor()])

img = Image.open('../test_photo.jpg')
img_tensor = trans(img).unsqueeze(0)
img_np = np.array(img_tensor)

logger('Image : {} >>> {}'.format(np.shape(img), np.shape(img_tensor)))

# ONNX Run Time
load_onnx = timer('Load ONNX Model')
ort_session = ort.InferenceSession('alexnet.onnx')
load_onnx.end()

# run( out_feed, in_feed, opt )
input_name = ort_session.get_inputs()[0].name

infer_onnx = timer('Run Infer')
outputs = ort_session.run(None, {input_name: img_np})[0]
infer_onnx.end()

# Get Labels
f = open('../imagenet_classes.txt')
t = [i.replace('\n', '') for i in f.readlines()]
logger("Result : {}".format(t[np.argmax(outputs)]))
コード例 #6
0
from log import timer, logger

TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
trt_runtime = trt.Runtime(TRT_LOGGER)


def load_data(path):
    trans = T.Compose([T.Resize(256), T.CenterCrop(224), T.ToTensor()])

    img = Image.open(path)
    img_tensor = trans(img).unsqueeze(0)
    return np.array(img_tensor)


# load trt engine
load_trt = timer("Load TRT Engine")
trt_path = 'alexnet.trt'
engine = load_engine(trt_runtime, trt_path)
load_trt.end()

# allocate buffers
inputs, outputs, bindings, stream = common.allocate_buffers(engine)
# load data
inputs[0].host = load_data('../test_photo.jpg')

# inference
infer_trt = timer("TRT Inference")
with engine.create_execution_context() as context:
    trt_outputs = common.do_inference(context,
                                      bindings=bindings,
                                      inputs=inputs,
コード例 #7
0

def save_engine(engine, engine_path):

    buf = engine.serialize()
    with open(engine_path, 'wb') as f:
        f.write(buf)


def load_engine(trt_runtime, engine_path):

    with open(engine_path, 'rb') as f:
        engine_data = f.read()
    engine = trt_runtime.deserialize_cuda_engine(engine_data)

    return engine


if __name__ == "__main__":

    onnx_path = 'alexnet.onnx'
    trt_path = 'alexnet.trt'
    input_shape = [1, 224, 224, 3]

    build_trt = timer('Parser ONNX & Build TensorRT Engine')
    engine = build_engine(onnx_path, input_shape)
    build_trt.end()

    save_trt = timer('Save TensorRT Engine')
    save_engine(engine, trt_path)
    save_trt.end()