示例#1
0
r, g, b = 123, 117, 104

#params['mean'] = nd.array(np.array([r, g, b]).astype(np.uint8).reshape([1, 3, 1, 1]))
params['mean'] = nd.array(np.array([r, g, b]).reshape([1, 3, 1, 1]))

net, params = nnvm.frontend.from_mxnet(net, params)

inputs = np.ones(shapes)

print("[*] Compile...")

with autotvm.apply_history_best('log/ssd-inceptionv3.log'):
    with compiler.build_config(opt_level=3):
        graph, lib, params = compiler.build(net,
                                            target, {
                                                "data": shapes,
                                                "mean": (1, 3, 1, 1)
                                            },
                                            params=params)
    #graph, lib, params = compiler.build(net, target, {"data": shapes, "mean" : (1, 3, 1, 1)}, params = params, dtype = dtypes)
    #graph, lib, params = compiler.build(net, target, {"data": shapes, "mean" : (1, 3, 1, 1)}, params = params, dtype = {'data' : 'uint8', 'mean' : 'uint8'})

#out = 'ssd-mobilenetv2-680-det'
#out = 'ssd-resnetx50-512-det'
#out = 'ssd-resnet50-512-det'

out = 'ssd-inceptionv3-512-det'

lib.export_library('so/{}.tvm.so'.format(out))
#lib.export_library('so/{}.tvm.so'.format('ssd-inceptionv3-512-det'))

print('[*] Model is Compiled')
示例#2
0
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
    "-f", "--frontend",
    help="Frontend for compilation, nnvm or relay",
    type=str,
    default="nnvm")
args = parser.parse_args()
if args.frontend == "relay":
    net, params = relay.frontend.from_mxnet(sym, {"data": dshape}, arg_params=arg_params, aux_params=aux_params)
    with relay.build_config(opt_level=3):
        graph, lib, params = relay.build(net, target, params=params)
elif args.frontend == "nnvm":
    net, params = from_mxnet(sym, arg_params, aux_params)
    with compiler.build_config(opt_level=3):
        graph, lib, params = compiler.build(
            net, target, {"data": dshape}, params=params)
else:
    parser.print_help()
    parser.exit()

######################################################################
# Create TVM runtime and do inference

# Preprocess image
image = cv2.imread(test_image_path)
img_data = cv2.resize(image, (dshape[2], dshape[3]))
img_data = img_data[:, :, (2, 1, 0)].astype(np.float32)
img_data -= np.array([123, 117, 104])
img_data = np.transpose(np.array(img_data), (2, 0, 1))
img_data = np.expand_dims(img_data, axis=0)
# Build TVM runtime
示例#3
0
zip_ref = zipfile.ZipFile(model_file_path, 'r')
zip_ref.extractall(dir)
zip_ref.close()
zip_ref = zipfile.ZipFile(inference_symbol_path)
zip_ref.extractall(dir)
zip_ref.close()

######################################################################
# Convert and compile model with NNVM for CPU.

sym = mx.sym.load("%s/%s/ssd_resnet50_inference.json" % (dir, inference_symbol_folder))
_, arg_params, aux_params = load_checkpoint("%s/%s" % (dir, model_name), 0)
net, params = from_mxnet(sym, arg_params, aux_params)
with compiler.build_config(opt_level=3):
    graph, lib, params = compiler.build(net, target, {"data": dshape}, params=params)

######################################################################
# Create TVM runtime and do inference

# Preprocess image
image = cv2.imread(test_image_path)
img_data = cv2.resize(image, (dshape[2], dshape[3]))
img_data = img_data[:, :, (2, 1, 0)].astype(np.float32)
img_data -= np.array([123, 117, 104])
img_data = np.transpose(np.array(img_data), (2, 0, 1))
img_data = np.expand_dims(img_data, axis=0)
# Build TVM runtime
m = graph_runtime.create(graph, lib, ctx)
m.set_input('data', tvm.nd.array(img_data.astype(dtype)))
m.set_input(**params)
                               'model')

r, g, b = 123, 117, 104

params['mean'] = nd.array(
    np.array([r, g, b]).astype(np.float16).reshape([1, 3, 1, 1]))

net, params = nnvm.frontend.from_mxnet(net, params)

print("[*] Compile...")

#with autotvm.apply_history_best('log/ssd-inceptionv3.log'):
with compiler.build_config(opt_level=3):
    graph, lib, params = compiler.build(net,
                                        target, {
                                            "data": shapes,
                                            "mean": (1, 3, 1, 1)
                                        },
                                        dtype='float16')

out = 'ssd-inceptionv3-512-fp16-det'

lib.export_library('so/{}.tvm.so'.format(out))

print('[*] Model is Compiled')

m = graph_runtime.create(graph, lib, ctx)

#save_tvm_params('params/{}'.format(out), params)

#save_tvm_graph('graph/{}'.format(out), graph)
shapes = dict()

shapes['cls_prob'] = (1, 21, 5186)
shapes['loc_preds'] = (1, 20744)
shapes['anchor_boxes'] = (1, 5186, 4)

net = load_nms('model/deploy_ssd_inceptionv3_512-nms-symbol')

net, params = nnvm.frontend.from_mxnet(net)

print("[*] Compile...")

#with autotvm.apply_history_best('ssd-inceptionv3.log'):
with compiler.build_config(opt_level=3):
    graph, lib, params = compiler.build(net, target, shapes)

lib.export_library('so/{}.tvm.so'.format('ssd-inceptionv3-512-nms'))

print('[*] Model is Compiled')

m = graph_runtime.create(graph, lib, ctx)

for s in shapes.keys():

    m.set_input(s, tvm.nd.array(np.zeros(shapes[s]).astype(np.float32)))

start = time()

print(graph)
示例#6
0
target = tvm.target.cuda()

shapes = (1, 3, 224, 224)

inputs = np.ones(shapes)

net, params, input_shape = load_model('test')  # load mxnet model

net, params = nnvm.frontend.from_mxnet(net, params)

with autotvm.apply_history_best('test.log'):
    print("[*] Compile...")
    with compiler.build_config(opt_level=3):
        graph, lib, params = compiler.build(net,
                                            target, {"data": [1, 3, 224, 224]},
                                            params=params)

m = graph_runtime.create(graph, lib, ctx)

m.set_input('data', tvm.nd.array(inputs.astype(np.float32)))

m.set_input(**params)

m.run()

tvm_output = m.get_output(0)

save_tvm_params('test', params)

save_tvm_graph('test', graph)