def test_run(): g = graph_from_file("files/add.onnx") example = {"x1": np.array([2]).astype(np.float32), "x2": np.array([5]).astype(np.float32)} result = run(g, inputs=example, outputs=["sum"] ) assert result[0] == 7, "Add output not correct." result = run(g, inputs="", outputs="sum") assert not result, "Model with this input should not run."
def test_graph_from_file(): g = graph_from_file("files/non-existing-file.onnx") assert not g, "Graph from file failed to check emtpy file." g = graph_from_file("files/example01.onnx") assert type(g) is xpb2.GraphProto, "Graph from file failed to open file."
import sclblonnx as so import numpy as np from PIL import Image """ EXAMPLE 3: Using a previously exported pyTorch model Here we open an existing and pre-trained Resnet model (trained on the cifar data). For training details see: https://github.com/scailable/sclbl-tutorials/tree/master/sclbl-pytorch-onnx Here we simply evaluate one specific image. """ # Retrieve the graph from the stored .onnx model: g = so.graph_from_file("onnx/cifar10-resnet20.onnx") # Clean, check, and display (this model passes all the checks). g = so.clean(g) so.check(g) so.display(g) # To open an image we write a small utility function using Pillow to transform an image to a numpy array. def process_image(image_path): # Load Image img = Image.open(image_path) # Get the dimensions of the image width, height = img.size
# Test the resize graph: large_input = {"large_image": large_img.astype(np.int32)} result = so.run(sg1, inputs=large_input, outputs=['small_image']) # Round values in array and cast as 8-bit integer to store back as JPG: img_arr = np.array(np.round(result[0]), dtype=np.uint8) out = Image.fromarray(img_arr, mode="RGB") out.save("images/1-Resized.JPG") # Yes, this works. # Store the resize onnx: so.graph_to_file(sg1, "onnx/resize-image-450x600-300x400.onnx") # So, now we have a working (sub)graph that resizes an image (which obviously we can just load next time) # Now, we open up the original image processing graph sg2 = so.graph_from_file("onnx/check-container.onnx") # The outputs of sg1 and the inputs of sg2 need to match; lets examine them so.list_outputs(sg1) so.list_inputs(sg2) # Merge the two graphs, the outputs will be merged with the inputs in order of appearance: g = so.merge(sg1, sg2, outputs=["small_image"], inputs=["in"]) so.check(g) so.display(g) # And now it works with the large image: result = so.run(g, inputs=large_input, outputs=['result']) # Print the result if result[0]: print("The container in the large image is empty.")
dnn_model.compile(loss='mean_absolute_error', optimizer=tf.keras.optimizers.SGD()) # train the model (use .predict for local predictions) history = dnn_model.fit(X, y, validation_split=0.2, verbose=0, epochs=300) # Save model (note, the convert_keras() function is undergoing change in different # versions of tf / onnx). # You might need: tf.compat.v1.disable_eager_execution() # or use the tf2onnx tool at https://github.com/onnx/tensorflow-onnx onnx_model = keras2onnx.convert_keras(dnn_model, dnn_model.name) keras2onnx.save_model(onnx_model, "onnx/tf-keras-dynamic.onnx") # load the model using sclblonnx g = so.graph_from_file("onnx/tf-keras-dynamic.onnx") # so.display(g) # check() and clean() so.check(g) g = so.clean(g) # Fails due to dynamic size # Note, while this model passes check(), clean() provides a warning message due to the dynamic input (Nx10). # This occurs because the training data is N long. However, for inference we would like it to be 1x10 # Let's fix this by changing the input to static. so.list_inputs(g) g = so.replace_input(g, "input_1", "FLOAT", [1, 10]) # And do the same for the output output = so.replace_output(g, "output_1", "FLOAT", [1, 1]) # Check this one...