Beispiel #1
0
#net, params = load_mxnet_model('deploy_ssd_resnet50_512-det', 0, 'model')
net, params = load_mxnet_model('deploy_ssd_inceptionv3_512-det', 215, 'model')

r, g, b = 123, 117, 104

#params['mean'] = nd.array(np.array([r, g, b]).astype(np.uint8).reshape([1, 3, 1, 1]))
params['mean'] = nd.array(np.array([r, g, b]).reshape([1, 3, 1, 1]))

net, params = nnvm.frontend.from_mxnet(net, params)

inputs = np.ones(shapes)

print("[*] Compile...")

with autotvm.apply_history_best('log/ssd-inceptionv3.log'):
    with compiler.build_config(opt_level=3):
        graph, lib, params = compiler.build(net,
                                            target, {
                                                "data": shapes,
                                                "mean": (1, 3, 1, 1)
                                            },
                                            params=params)
    #graph, lib, params = compiler.build(net, target, {"data": shapes, "mean" : (1, 3, 1, 1)}, params = params, dtype = dtypes)
    #graph, lib, params = compiler.build(net, target, {"data": shapes, "mean" : (1, 3, 1, 1)}, params = params, dtype = {'data' : 'uint8', 'mean' : 'uint8'})

#out = 'ssd-mobilenetv2-680-det'
#out = 'ssd-resnetx50-512-det'
#out = 'ssd-resnet50-512-det'

out = 'ssd-inceptionv3-512-det'
Beispiel #2
0
download(inference_symbol_url, inference_symbol_path)

zip_ref = zipfile.ZipFile(model_file_path, 'r')
zip_ref.extractall(dir)
zip_ref.close()
zip_ref = zipfile.ZipFile(inference_symbol_path)
zip_ref.extractall(dir)
zip_ref.close()

######################################################################
# Convert and compile model with NNVM for CPU.

sym = mx.sym.load("%s/%s/ssd_resnet50_inference.json" % (dir, inference_symbol_folder))
_, arg_params, aux_params = load_checkpoint("%s/%s" % (dir, model_name), 0)
net, params = from_mxnet(sym, arg_params, aux_params)
with compiler.build_config(opt_level=3):
    graph, lib, params = compiler.build(net, target, {"data": dshape}, params=params)

######################################################################
# Create TVM runtime and do inference

# Preprocess image
image = cv2.imread(test_image_path)
img_data = cv2.resize(image, (dshape[2], dshape[3]))
img_data = img_data[:, :, (2, 1, 0)].astype(np.float32)
img_data -= np.array([123, 117, 104])
img_data = np.transpose(np.array(img_data), (2, 0, 1))
img_data = np.expand_dims(img_data, axis=0)
# Build TVM runtime
m = graph_runtime.create(graph, lib, ctx)
m.set_input('data', tvm.nd.array(img_data.astype(dtype)))