def test_run(): g = graph_from_file("files/add.onnx") example = {"x1": np.array([2]).astype(np.float32), "x2": np.array([5]).astype(np.float32)} result = run(g, inputs=example, outputs=["sum"] ) assert result[0] == 7, "Add output not correct." result = run(g, inputs="", outputs="sum") assert not result, "Model with this input should not run."
def test_concat(): """ Functional test for concat """ g1 = empty_graph("G1") n1 = node('Add', inputs=['x1_1', 'x1_2'], outputs=['sum_1'], name="node_name") g1 = add_input(g1, 'x1_1', "FLOAT", [1]) g1 = add_input(g1, 'x1_2', "FLOAT", [1]) g1 = add_output(g1, 'sum_1', "FLOAT", [1]) g1 = add_node(g1, n1) g2 = empty_graph("G2") n2 = node('Add', inputs=['x2_1', 'x2_2'], outputs=['sum_2'], name="node_name") g2 = add_input(g2, 'x2_2', "FLOAT", [1]) g2 = add_output(g2, 'sum_2', "FLOAT", [1]) g2 = add_node(g2, n2) g = concat(g1, g2, False, True, edge_match=[("x1_2", "x2_1")]) data = { "x1_1": np.array([2]).astype(np.float32), "x1_2": np.array([5]).astype(np.float32), "x2_2": np.array([5]).astype(np.float32) } result = run(g, inputs=data, outputs=["sum_1", "sum_2"]) assert result[ 0], "Sum of 2 and 5 should be equal to constant 7. Concat failed."
def test_merge(): """ Functional test of merge(). """ # Subgraph 1 sg1 = empty_graph("Graph 1") n1 = node('Add', inputs=['x1', 'x2'], outputs=['sum']) sg1 = add_node(sg1, n1) sg1 = add_input(sg1, 'x1', "FLOAT", [1]) sg1 = add_input(sg1, 'x2', "FLOAT", [1]) sg1 = add_output(sg1, 'sum', "FLOAT", [1]) # Subgraph 2 sg2 = empty_graph("Graph 2") sg2 = add_constant(sg2, "const", np.array([7]), "FLOAT") n2 = node("Equal", inputs=['sum', 'const'], outputs=['equal']) sg2 = add_node(sg2, n2) sg2 = add_input(sg2, 'sum', "FLOAT", [1]) sg2 = add_output(sg2, 'equal', "BOOL", [1]) g = merge(sg1, sg2, outputs=["sum"], inputs=["sum"]) data = { "x1": np.array([2]).astype(np.float32), "x2": np.array([5]).astype(np.float32) } result = run(g, inputs=data, outputs=["equal"]) assert result[ 0], "Sum of 2 and 5 should be equal to constant 7. Merged failed."
def test_add_constant(): # Simple add graph g = empty_graph() n1 = node('Add', inputs=['x1', 'x2'], outputs=['sum']) g = add_node(g, n1) # Add input and constant g = add_constant(g, 'x1', np.array([1]), "INT64") g = add_constant(g, 'x2', np.array([5]), "INT64") # Output: g = add_output(g, 'sum', "INT64", [1]) # This works, but seems to fail for other data types... result = run(g, inputs={}, outputs=["sum"]) assert result[0] == 6, "Add constant failed." # todo(McK): Does not work for INT16 / INT8, check?
def test_split(): """ Functional test for split """ g1 = empty_graph("G1") n1 = node('Add', inputs=['x1_1', 'x1_2'], outputs=['sum_1'], name="n1") g1 = add_input(g1, 'x1_1', "FLOAT", [1]) g1 = add_input(g1, 'x1_2', "FLOAT", [1]) g1 = add_output(g1, 'sum_1', "FLOAT", [1]) g1 = add_node(g1, n1) g2 = empty_graph("G2") n2 = node('Add', inputs=['x2_1', 'x2_2'], outputs=['sum_2'], name="n2") g2 = add_input(g2, 'x2_1', "FLOAT", [1]) g2 = add_input(g2, 'x2_2', "FLOAT", [1]) g2 = add_output(g2, 'sum_2', "FLOAT", [1]) g2 = add_node(g2, n2) g3 = empty_graph("G3") n3 = node('Add', inputs=['x3_1', 'x3_2'], outputs=['sum_3'], name="n3") g3 = add_input(g3, 'x3_1', "FLOAT", [1]) g3 = add_input(g3, 'x3_2', "FLOAT", [1]) g3 = add_output(g3, 'sum_3', "FLOAT", [1]) g3 = add_node(g3, n3) g = split(g1, g2, g3, cg1_match=[("sum_1", "x2_2")], cg2_match=[("sum_1", "x3_1")]) data = { "x1_1": np.array([1]).astype(np.float32), "x1_2": np.array([2]).astype(np.float32), "x2_1": np.array([3]).astype(np.float32), "x3_2": np.array([4]).astype(np.float32), } result = run(g, inputs=data, outputs=["sum_2", "sum_3"]) assert result[ 0], "Sum of 1,2, and 3 should be equal to constant 6. Split failed."
# Add output: g = so.add_output(g, "result", "BOOL", [1]) # After which is passes all the checks so.check(g) # Let's inspect: so.display(g) # Let's clean: g = so.clean(g) # Let's try it out for the first image: img_data = np.array(Image.open("images/1.JPG"), dtype=np.int32) example = {"in": img_data.astype(np.int32)} result = so.run(g, inputs=example, outputs=['result']) # Print the result if result[0]: print("The container is empty.") else: print("The container is filled.") # Store the graph so.graph_to_file(g, "onnx/check-container.onnx") ''' Additional usage of sclblpy for upload and evaluation: # Import sclblpy import sclblpy as sp
"FLOAT") # Note the empty fields for roi and scales. e2 = so.constant("scales", np.array([]), "FLOAT") c1 = so.constant("size", np.array([300, 400, 3]), "INT64") n1 = so.node("Resize", inputs=['large_image', 'roi', 'scales', 'size'], outputs=['small_image']) sg1 = so.add_nodes(sg1, [e1, e2, c1, n1]) sg1 = so.add_output(sg1, "small_image", "INT32", [300, 400, 3]) # Check and clean sg1 = so.clean(sg1) so.check(sg1) # Test the resize graph: large_input = {"large_image": large_img.astype(np.int32)} result = so.run(sg1, inputs=large_input, outputs=['small_image']) # Round values in array and cast as 8-bit integer to store back as JPG: img_arr = np.array(np.round(result[0]), dtype=np.uint8) out = Image.fromarray(img_arr, mode="RGB") out.save("images/1-Resized.JPG") # Yes, this works. # Store the resize onnx: so.graph_to_file(sg1, "onnx/resize-image-450x600-300x400.onnx") # So, now we have a working (sub)graph that resizes an image (which obviously we can just load next time) # Now, we open up the original image processing graph sg2 = so.graph_from_file("onnx/check-container.onnx") # The outputs of sg1 and the inputs of sg2 need to match; lets examine them so.list_outputs(sg1)
# Now, a few tricks to sanitize the graph which are always useful. # so.clean() provides lossless reduction of the graph. If successful cleaned graph is returned. g = so.clean(g) # so.display() tries to open the graph using Netron to inspect it. This worsk on most systems if Netron is installed. # Get Netron at https://github.com/onnx/onnx/blob/master/docs/Operators.md so.display(g) # Now, use the default ONNX runtime to do a test run of the graph. # Note that the inputs dimensions and types need to match the specification of the graph. # The outputs returns all the outputs named in the list. example = { "x1": np.array([1.2]).astype(np.float32), "x2": np.array([2.5]).astype(np.float32) } result = so.run(g, inputs=example, outputs=["sum"]) print(result) # We can easily store the graph to a file for upload at http://admin.sclbl.net: so.graph_to_file(g, "onnx/add-scalars.onnx") # Or, we can upload it to Scailable using the sclblpy package, # See the sclblpy package docs for more details. https://pypi.org/project/sclblpy/ # sp.upload_onnx("onnx/add-scalars.onnx", docs={"name": "Example_01: Add", "documentation": "Empty.."}) # so.sclbl_input(inputs) converts an example input to the input that can be used on the device: example_input = so.sclbl_input(example, "pb") print(example_input) # You can use the example to setup your own REST call:
# Normalize based on the preset mean and standard deviation img[0] = (img[0] - 0.4914) / 0.2023 img[1] = (img[1] - 0.4822) / 0.1994 img[2] = (img[2] - 0.4465) / 0.2010 # Add a fourth dimension to the beginning to indicate batch size # img = img[np.newaxis,:].astype(np.float16) img = img[np.newaxis, :] return img # Open the image and execute the graph: img_data = process_image("images/horse5.png").astype(np.float32) example = {"input": img_data} out = so.run(g, inputs=example, outputs=['output']) # Pretty printing classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') print("The ONNX model predicts the image is a", classes[np.argmax(out[0])] + ".") # And store the file (since we did clean it): so.graph_to_file(g, "onnx/cifar10-resnet20-clean.onnx") ''' Additional usage of sclblpy for upload and evaluation: # Import sclblpy import sclblpy as sp
img = img[np.newaxis, :] return img # Open the image img_input = process_image("images/horse5.png").astype(np.float32) # Load the cifar model: g = so.graph_from_file("onnx/cifar10-resnet20-clean.onnx") # Check its output, we see the name, type, and dimensions so.list_outputs(g) # Run the model to see the outputs: result = so.run(g, inputs={"input": img_input}, outputs=["output"]) print(result) # Add and arg_max node to find the highest output in the output vector # Note the keepdims and axis; the output of the Argmax node should align with the defined output. n1 = so.node('ArgMax', inputs=['output'], outputs=['argmax'], keepdims=0, axis=1) g = so.add_node( g, n1) # Note, this adds the node, but the output is still "output" g = so.delete_output(g, "output") # Remove the old output g = so.add_output(g, 'argmax', "INT64", [1]) # Add the new output (for testing only)