def test_arith_binop(test_dir, backend, Op, a_shape, b_shape, dtype): onnx_to_np_op = {"Add": np.add, "Sub": np.subtract, "Mul": np.multiply} a = np.random.randn(*a_shape).astype(dtype) b = np.random.randn(*b_shape).astype(dtype) out = onnx_to_np_op[Op](a, b) node = helper.make_node( Op, inputs=["a", "b"], outputs=["out"], ) graph = make_onnx_graph( node, inputs=[a], outputs=[out], tensors=[b], tensor_names=["b"], name=Op + "_test", ) expected_output = run_onnx(graph, [a]) config = ONNXConfig(backend).parse_io(graph) compiler = Compiler(graph, config, test_dir, Frontend.ONNX) mpc_output = compiler.compile_and_run([a]) assert_almost_equal(model_output=expected_output, mpc_tensor=mpc_output, precision=2) return
def test_non_linear(test_dir, backend, Op, a_shape, dtype): a = np.random.randn(*a_shape).astype(dtype) if Op == "Relu": out = np.clip(a, 0, np.inf) elif Op == "Sigmoid": out = 1.0 / (1.0 + np.exp(np.negative(a))) elif Op == "Tanh": out = np.tanh(a) elif Op == "Sqrt": out = np.sqrt(a) node = helper.make_node( Op, inputs=[ "a", ], outputs=["out"], ) graph = make_onnx_graph( node, inputs=[a], outputs=[out], tensors=[], tensor_names=[], name=Op + "_test", ) expected_output = run_onnx(graph, [a]) config = ONNXConfig(backend).parse_io(graph) compiler = Compiler(graph, config, test_dir, Frontend.ONNX) mpc_output = compiler.compile_and_run([a]) assert_almost_equal(model_output=expected_output, mpc_tensor=mpc_output, precision=2) return
def test_equal(test_dir, backend, a_val, b_val, dtype): Op = "Equal" a = np.array(a_val).astype(dtype) b = np.array(b_val).astype(dtype) out = np.equal(a, b) node = helper.make_node( Op, inputs=["a", "b"], outputs=["out"], ) graph = make_onnx_graph( node, inputs=[a], outputs=[out], tensors=[b], tensor_names=["b"], name=Op + "_test", ) expected_output = run_onnx(graph, [a]) config = ONNXConfig(backend).parse_io(graph) compiler = Compiler(graph, config, test_dir, Frontend.ONNX) mpc_output = compiler.compile_and_run([a]) assert_almost_equal(model_output=expected_output, mpc_tensor=mpc_output, precision=2) return
def test_batch_norm(test_dir, backend, a_shape, scale_val, bias_val, mean_val, var_val, dtype): Op = "BatchNormalization" a = np.random.randn(*a_shape).astype(dtype) scale = np.array(scale_val).astype(dtype) bias = np.array(bias_val).astype(dtype) mean = np.array(mean_val).astype(dtype) var = np.array(var_val).astype(dtype) out = _batchnorm_test_mode(a, scale, bias, mean, var).astype(dtype) node = onnx.helper.make_node( Op, inputs=["a", "scale", "bias", "mean", "var"], outputs=["out"], ) graph = make_onnx_graph( node, inputs=[a], outputs=[out], tensors=[scale, bias, mean, var], tensor_names=["scale", "bias", "mean", "var"], name=Op + "_test", ) expected_output = run_onnx(graph, [a]) config = ONNXConfig(backend).parse_io(graph) compiler = Compiler(graph, config, test_dir, Frontend.ONNX) mpc_output = compiler.compile_and_run([a]) assert_almost_equal(model_output=expected_output, mpc_tensor=mpc_output, precision=2) return
def test_global_avgpool(test_dir, backend, a_shape, dtype): a = dtype(np.random.randn(*a_shape)) out = np.mean(a, axis=tuple(range(2, np.ndim(a))), keepdims=True) Op = "GlobalAveragePool" node = helper.make_node( Op, inputs=[ "a", ], outputs=["out"], ) graph = make_onnx_graph( node, inputs=[a], outputs=[out], tensors=[], tensor_names=[], name=Op + "_test", ) expected_output = run_onnx(graph, [a]) config = ONNXConfig(backend).parse_io(graph) compiler = Compiler(graph, config, test_dir, Frontend.ONNX) mpc_output = compiler.compile_and_run([a]) assert_almost_equal( model_output=expected_output, mpc_tensor=mpc_output, precision=2 ) return
def test_uop(test_dir, backend, Op, a_shape, dtype): a = dtype(np.random.randn(*a_shape)) if Op == "Neg": out = np.negative(a) elif Op == "Floor": out = np.floor(a) elif Op == "Identity": out = np.negative(a) node = helper.make_node( Op, inputs=["a"], outputs=["out"], ) graph = make_onnx_graph( node, inputs=[a], outputs=[out], tensors=[], tensor_names=[], name=Op + "_test", ) expected_output = run_onnx(graph, [a]) config = ONNXConfig(backend).parse_io(graph) compiler = Compiler(graph, config, test_dir, Frontend.ONNX) mpc_output = compiler.compile_and_run([a]) assert_almost_equal( model_output=expected_output, mpc_tensor=mpc_output, precision=2 ) return
def test_shape(test_dir, backend, a_shape, start, end, dtype): Op = "Shape" a = dtype(np.random.randn(*a_shape)) out = np.array(a.shape[start:end]).astype(np.int64) kwargs = {} if start is not None: kwargs["start"] = start if end is not None: kwargs["end"] = end node = onnx.helper.make_node(Op, inputs=["a"], outputs=["out"], **kwargs) graph = make_onnx_graph( node, inputs=[a], outputs=[out], tensors=[], tensor_names=[], name=Op + "_test", ) expected_output = run_onnx(graph, [a]) config = ONNXConfig(backend).parse_io(graph) compiler = Compiler(graph, config, test_dir, Frontend.ONNX) mpc_output = compiler.compile_and_run([a]) assert_almost_equal( model_output=expected_output, mpc_tensor=mpc_output, precision=2 ) return
def test_reducemean(test_dir, backend, a_shape, axes, keepdims, dtype): Op = "ReduceMean" a = dtype(np.random.randn(*a_shape)) out = np.mean( a, axis=(None if axes is None else tuple(axes)), keepdims=keepdims == 1 ) kwargs = {"name": Op, "inputs": ["a"], "outputs": ["out"], "keepdims": keepdims} if axes is not None: kwargs["axes"] = axes node = helper.make_node(Op, **kwargs) graph = make_onnx_graph( node, inputs=[a], outputs=[out], tensors=[], tensor_names=[], name=Op + "_test", ) expected_output = run_onnx(graph, [a]) config = ONNXConfig(backend).parse_io(graph) compiler = Compiler(graph, config, test_dir, Frontend.ONNX) mpc_output = compiler.compile_and_run([a]) assert_almost_equal( model_output=expected_output, mpc_tensor=mpc_output, precision=2 ) return
def test_conv(test_dir, backend, a_shape, kernel_shape, pads, strides, output_shape, group, dtype): Op = "Conv" if len(a_shape) == 4: version = 2 # 2d elif len(a_shape) == 5: version = 3 # 3d if version == 3 and backend in ["2PC_HE", "2PC_OT"]: pytest.skip("[conv3d] Missing Support in SCI") a = np.random.randn(*a_shape).astype(dtype) kernel = np.random.randn(*kernel_shape).astype(dtype) # Only need this for its shape out = np.zeros(output_shape).astype(dtype) hw_kernel_shape = kernel_shape[-version:] node = onnx.helper.make_node( Op, inputs=["a", "kernel"], outputs=["output"], kernel_shape=hw_kernel_shape, pads=pads, strides=strides, group=group, # Default values for other attributes: dilations=[1, 1], groups=1 ) graph = make_onnx_graph( node, inputs=[a], outputs=[out], tensors=[kernel], tensor_names=["kernel"], name=Op + "_test", ) expected_output = run_onnx(graph, [a]) config = ONNXConfig(backend).parse_io(graph) config.config["scale"] = 12 compiler = Compiler(graph, config, test_dir, Frontend.ONNX) mpc_output = compiler.compile_and_run([a]) assert_almost_equal(model_output=expected_output, mpc_tensor=mpc_output, precision=2) return
def test_gemm(test_dir, backend, a_shape, b_shape, c_shape, alpha, beta, transA, transB, dtype): Op = "Gemm" a = np.random.randn(*a_shape).astype(dtype) b = np.random.randn(*b_shape).astype(dtype) kwargs = {"inputs": ["a", "b"], "outputs": ["out"]} npkwargs = {} if c_shape is not None: kwargs["inputs"].append("c") c = dtype(np.random.randn(*c_shape)) npkwargs["C"] = c if alpha is not None: kwargs["alpha"] = alpha npkwargs["alpha"] = alpha if beta is not None: kwargs["beta"] = beta npkwargs["beta"] = beta if transA == 1: kwargs["transA"] = 1 npkwargs["transA"] = 1 if transB == 1: kwargs["transB"] = 1 npkwargs["transB"] = 1 out = gemm_reference_implementation(a, b, **npkwargs) node = onnx.helper.make_node(Op, **kwargs) kwargs = { "inputs": [a], "outputs": [out], "tensors": [b], "tensor_names": ["b"], "name": Op + "_test", } if c_shape is not None: kwargs["tensors"].append(c) kwargs["tensor_names"].append("c") graph = make_onnx_graph(node, **kwargs) expected_output = run_onnx(graph, [a]) config = ONNXConfig(backend).parse_io(graph) compiler = Compiler(graph, config, test_dir, Frontend.ONNX) mpc_output = compiler.compile_and_run([a]) assert_almost_equal(model_output=expected_output, mpc_tensor=mpc_output, precision=2) return
def test_constant(test_dir, backend, shape, attribute): Op = "Constant" kwargs = {} print("Shape = ", shape) if attribute == "value": values = np.random.randn(*shape).astype(np.float32) kwargs[attribute] = onnx.helper.make_tensor( name="const_tensor", data_type=onnx.TensorProto.FLOAT, dims=values.shape, vals=values.flatten().astype(float), ) elif attribute == "value_float": values = np.random.randn(1).astype(np.float32) kwargs[attribute] = values[0] elif attribute == "value_floats": values = np.random.randn(*shape).astype(np.float32) kwargs[attribute] = values.flatten().astype(float) elif attribute == "value_int": values = np.array(np.random.randint(-(2 ** 32 - 1), 2 ** 32 - 1)).astype( np.int64 ) kwargs[attribute] = int(values) elif attribute == "value_ints": values = np.random.randint(-(2 ** 32 - 1), 2 ** 32 - 1, shape).astype(np.int32) print(values) kwargs[attribute] = values.flatten().astype(int) kwargs["inputs"] = [] kwargs["outputs"] = ["values"] node = helper.make_node(Op, **kwargs) graph = make_onnx_graph( node, inputs=[], outputs=[values], tensors=[], tensor_names=[], name=Op + "_test", ) expected_output = run_onnx(graph, []) config = ONNXConfig(backend).parse_io(graph) compiler = Compiler(graph, config, test_dir, Frontend.ONNX) mpc_output = compiler.compile_and_run([]) assert_almost_equal( model_output=expected_output, mpc_tensor=mpc_output, precision=2 ) return
def test_maxpool( test_dir, backend, a_shape, kernel_shape, pads, strides, auto_pad, output_shape, dtype, ): Op = "MaxPool" a = np.random.randn(*a_shape).astype(dtype) # Only need this for its shape out = np.zeros(output_shape).astype(dtype) kwargs = { "inputs": ["a"], "outputs": ["output"], "kernel_shape": kernel_shape, "strides": strides, } if auto_pad is "NOTSET": kwargs["pads"] = pads else: kwargs["auto_pad"] = auto_pad node = onnx.helper.make_node(Op, **kwargs) graph = make_onnx_graph( node, inputs=[a], outputs=[out], tensors=[], tensor_names=[], name=Op + "_test", ) expected_output = run_onnx(graph, [a]) config = ONNXConfig(backend).parse_io(graph) compiler = Compiler(graph, config, test_dir, Frontend.ONNX) mpc_output = compiler.compile_and_run([a]) assert_almost_equal( model_output=expected_output, mpc_tensor=mpc_output, precision=2 ) return
def test_matmul(test_dir, backend, a_shape, b_shape, bisModel, dtype): if backend == "2PC_HE" and a_shape[0] != 1: pytest.skip("HE only supports vector matrix multiplication") Op = "MatMul" a = np.random.randn(*a_shape).astype(dtype) b = np.random.randn(*b_shape).astype(dtype) out = np.matmul(a, b) node = onnx.helper.make_node( Op, inputs=["a", "b"], outputs=["out"], ) if not bisModel: graph = make_onnx_graph( node, inputs=[a, b], outputs=[out], name=Op + "_test", ) expected_output = run_onnx(graph, [a, b]) else: graph = make_onnx_graph( node, inputs=[a], outputs=[out], tensors=[b], tensor_names=["b"], name=Op + "_test", ) expected_output = run_onnx(graph, [a]) config = ONNXConfig(backend).parse_io(graph) compiler = Compiler(graph, config, test_dir, Frontend.ONNX) if not bisModel: mpc_output = compiler.compile_and_run([a, b]) else: mpc_output = compiler.compile_and_run([a]) assert_almost_equal(model_output=expected_output, mpc_tensor=mpc_output, precision=2) return
def test_cast(test_dir, backend, from_type, to_type, compile_time): Op = "Cast" shape = (3, 4) if "STRING" != from_type: input = np.random.random_sample(shape).astype( TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, from_type)] ) if "STRING" == to_type: # Converting input to str, then give it object dtype for generating script ss = [] for i in input.flatten(): s = str(i).encode("utf-8") su = s.decode("utf-8") ss.append(su) output = np.array(ss).astype(object).reshape([3, 4]) else: output = input.astype(TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, to_type)]) else: input = np.array( [ "0.47892547", "0.48033667", "0.49968487", "0.81910545", "0.47031248", "0.816468", "0.21087195", "0.7229038", "NaN", "INF", "+INF", "-INF", ], dtype=np.dtype(object), ).reshape([3, 4]) output = input.astype(TENSOR_TYPE_TO_NP_TYPE[getattr(TensorProto, to_type)]) node = onnx.helper.make_node( Op, inputs=["input"], outputs=["output"], to=getattr(TensorProto, to_type), ) if compile_time == True: graph = make_onnx_graph( node, inputs=[], outputs=[output], tensors=[input], tensor_names=["input"], name=Op + "_test", ) expected_output = run_onnx(graph, []) else: graph = make_onnx_graph( node, inputs=[input], outputs=[output], tensors=[], tensor_names=[], name=Op + "_test", ) expected_output = run_onnx(graph, [input]) config = ONNXConfig(backend).parse_io(graph) compiler = Compiler(graph, config, test_dir, Frontend.ONNX) if compile_time == True: mpc_output = compiler.compile_and_run([]) else: mpc_output = compiler.compile_and_run([input]) assert_almost_equal( model_output=expected_output, mpc_tensor=mpc_output, precision=2 ) return