def test_penalty_update(self): x = numpy.random.randn(10, 1).astype(numpy.float32) def fct(x): return numpy.sign(x) * 0.1 + (x * 0.9 * 2) exp_loss = x - fct(x) onx = function_onnx_graph('update_penalty_elastic_error', target_opset=get_max_opset(), dtype=numpy.float32, l1=0.1, l2=0.9) oinf = OnnxInference(onx) got = oinf.run({'X': x}) self.assertEqualArray(exp_loss, got['Y'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) got = sess.run(None, {'X': x}) self.assertEqualArray(exp_loss, got[0], decimal=5)
def test_onnx_rewrite_operator(self): opset = get_max_opset() node1 = OnnxReciprocal('X', output_names=['Y'], op_version=opset) onx1 = node1.to_onnx( inputs={'X': FloatTensorType()}, outputs={'Y': FloatTensorType()}, target_opset=opset) onx1.graph.name = "jjj" oinf1 = OnnxInference(onx1) node2 = OnnxDiv(numpy.array([1], dtype=numpy.float32), 'X', output_names=['Y'], op_version=opset) onx2 = node2.to_onnx( inputs={'X': FloatTensorType()}, outputs={'Y': FloatTensorType()}, target_opset=opset) oinf2 = OnnxInference(onx2) X = numpy.array([[5, 6]], dtype=numpy.float32) y1 = oinf1.run({'X': X})['Y'] y2 = oinf2.run({'X': X})['Y'] self.assertEqualArray(y1, y2) onx3 = onnx_rewrite_operator(onx1, 'Reciprocal', onx2) self.assertNotIn('Reciprocal', str(onx3)) oinf3 = OnnxInference(onx3) y3 = oinf3.run({'X': X})['Y'] self.assertEqualArray(y1, y3)
def test_penalty_3w(self): loss = numpy.random.randn(1, 1).astype(numpy.float32) w1 = numpy.random.randn(10, 1).astype(numpy.float32) w2 = numpy.random.randn(5, 1).astype(numpy.float32) def fct(x): return numpy.abs(x).sum() * 0.1 + ((x)**2).sum() * 0.9 exp_loss = loss + fct(w1) + fct(w2) onx = function_onnx_graph('n_penalty_elastic_error', target_opset=get_max_opset(), dtype=numpy.float32, n_tensors=2, l1_weight=0.1, l2_weight=0.9, weight_name='weight') oinf = OnnxInference(onx) got = oinf.run({'loss': loss, 'W0': w1, 'W1': w2}) self.assertEqualArray(exp_loss.reshape((-1, )), got['Y'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) got = sess.run(None, {'loss': loss, 'W0': w1, 'W1': w2}) self.assertEqualArray(exp_loss.reshape((-1, )), got[0], decimal=5)
def common_check(self, name, fct, weight_name=None, output_name='Y'): onx = function_onnx_graph(name, target_opset=get_max_opset(), dtype=numpy.float32, weight_name=weight_name) expected = numpy.random.randn(10, 1).astype(numpy.float32) predicted = numpy.random.randn(10, 1).astype(numpy.float32) w = numpy.random.rand(10).astype(numpy.float32) if weight_name is None: fin = fct(expected, predicted) else: fin = fct(expected, predicted, w) oinf = OnnxInference(onx) if weight_name is None: got = oinf.run({'X1': expected, 'X2': predicted}) else: got = oinf.run({'X1': expected, 'X2': predicted, 'weight': w}) self.assertEqualArray(fin, got[output_name], decimal=5) if weight_name is not None: got = oinf.run({'X1': expected, 'X2': predicted}) fin1 = fct(expected, predicted, numpy.array([1], dtype=expected.dtype)) self.assertEqualArray(fin1, got[output_name], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) if weight_name is None: got = sess.run(None, {'X1': expected, 'X2': predicted}) else: got = sess.run(None, { 'X1': expected, 'X2': predicted, 'weight': w }) self.assertEqualArray(fin, got[0], decimal=5) if weight_name is not None: got = sess.run(None, {'X1': expected, 'X2': predicted}) fin1 = fct(expected, predicted, numpy.array([1], dtype=expected.dtype)) self.assertEqualArray(fin1, got[0], decimal=5)
def test_251(self): onx = function_onnx_graph("grad_loss_square_error", target_opset=get_max_opset(), dtype=numpy.float32, weight_name='weight') expected = numpy.random.randn(25, 1).astype(numpy.float32) predicted = numpy.random.randn(25, 1).astype(numpy.float32) oinf = OnnxInference(onx) got1 = oinf.run({'X1': expected, 'X2': predicted}) so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=['CPUExecutionProvider']) got2 = sess.run(None, {'X1': expected, 'X2': predicted}) self.assertEqualArray(got1['Y'], got2[0], decimal=5) self.assertEqualArray(got1['Y_grad'], got2[1])
def common_unary(self, name, fct): onx = function_onnx_graph(name, target_opset=get_max_opset(), dtype=numpy.float32) x = numpy.random.randn(10, 1).astype(numpy.float32) fin = fct(x) oinf = OnnxInference(onx) got = oinf.run({'X': x}) self.assertEqualArray(fin, got['Y'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) got = sess.run(None, {'X': x}) self.assertEqualArray(fin, got[0], decimal=5)
def test_check(self): opset = get_max_opset() min_values = [ -41.621277, -40.621277, -30.621277, -20.621277, -19, -18, -17, -15, -14, -13, -12, -11, -10, -5, -2 ] data = numpy.array([[0]], dtype=numpy.float32) node = OnnxSigmoid('X', op_version=opset, output_names=['Y']) onx = node.to_onnx({'X': FloatTensorType()}, {'Y': FloatTensorType()}, target_opset=opset) rts = ['numpy', 'python', 'onnxruntime1'] for mv in min_values: data[:, 0] = mv for rt in rts: if rt == 'numpy': y = expit(data) else: oinf = OnnxInference(onx, runtime=rt) y = oinf.run({'X': data})['Y'] self.assertNotEmpty(y)
def common_check_1(self, name, fct, weight_name=None, **kwargs): onx = function_onnx_graph(name, target_opset=get_max_opset(), dtype=numpy.float32, weight_name=weight_name, **kwargs) x = numpy.random.randn(10, 1).astype(numpy.float32) exp_loss, exp_grad = fct(x) oinf = OnnxInference(onx) got = oinf.run({'X': x}) self.assertEqualArray(exp_loss, got['Y'], decimal=5) self.assertEqualArray(exp_grad, got['Y_grad'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) got = sess.run(None, {'X': x}) self.assertEqualArray(exp_loss, got[0], decimal=5) self.assertEqualArray(exp_grad, got[1], decimal=5)
def common_check_alpha_beta(self, name, fct): onx = function_onnx_graph(name, target_opset=get_max_opset(), dtype=numpy.float32) x1 = numpy.random.randn(10, 1).astype(numpy.float32) x2 = numpy.random.randn(10, 1).astype(numpy.float32) g = numpy.random.randn(10, 1).astype(numpy.float32) alpha = numpy.random.randn(1).astype(numpy.float32) beta = numpy.random.randn(1).astype(numpy.float32) y, z = fct(x1, x2, g, alpha, beta) oinf = OnnxInference(onx) got = oinf.run({ 'X1': x1, 'X2': x2, 'alpha': alpha, 'beta': beta, 'G': g }) self.assertEqualArray(y, got['Y'], decimal=5) self.assertEqualArray(z, got['Z'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) got = sess.run(None, { 'X1': x1, 'X2': x2, 'alpha': alpha, 'beta': beta, 'G': g }) self.assertEqualArray(y, got[0], decimal=5) self.assertEqualArray(z, got[1], decimal=5)
def common_check_3(self, name, fct): onx = function_onnx_graph(name, target_opset=get_max_opset(), dtype=numpy.float32) x = numpy.random.randn(10, 1).astype(numpy.float32) a = numpy.random.randn(10, 1).astype(numpy.float32).T b = numpy.random.randn(10, 1).astype(numpy.float32) y = fct(x, a, b) code = export2onnx(onx) self.assertIn("'OnnxAdd'", code) oinf = OnnxInference(onx) got = oinf.run({'X': x, 'A': a, 'B': b}) self.assertEqualArray(y, got['Y'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) got = sess.run(None, {'X': x, 'A': a, 'B': b}) self.assertEqualArray(y, got[0], decimal=5)
def common_check_2(self, name, fct, weight_name=None, verbose=0, classification=False, rnd=True, second_name='Y_grad', **kwargs): onx = function_onnx_graph(name, target_opset=get_max_opset(), dtype=numpy.float32, weight_name=weight_name, **kwargs) if verbose > 0: with open(name + ".onnx", "wb") as f: f.write(onx.SerializeToString()) if classification: N = 10 p = numpy.random.randn(N, 1).astype(numpy.float32) p[0, :] = 0 p[1, :] = 100 p[2, :] = -100 p[3, :] = 1 p[4, :] = -1 y = (numpy.random.randn(N, 1).astype(numpy.float32) > 0).astype( numpy.int64) x2 = p x1 = y else: if rnd: x1 = numpy.random.randn(10, 1).astype(numpy.float32) x2 = numpy.random.randn(10, 1).astype(numpy.float32) else: x1 = numpy.zeros((10, 1), dtype=numpy.float32) x2 = numpy.zeros((10, 1), dtype=numpy.float32) + 1 if rnd: w = numpy.random.rand(10).astype(numpy.float32) else: w = numpy.zeros(10, dtype=numpy.float32) + 0.2 if weight_name is None: exp_loss, exp_grad = fct(x1, x2) else: exp_loss, exp_grad = fct(x1, x2, w.reshape((-1, 1))) oinf = OnnxInference(onx) run_params = dict(verbose=verbose, fLOG=print) if verbose > 0 else {} if verbose > 0: print(f"\n+++++ name(1)={name!r}") if weight_name is None: got = oinf.run({'X1': x1, 'X2': x2}, **run_params) else: got = oinf.run({'X1': x1, 'X2': x2, 'weight': w}, **run_params) self.assertEqual(len(exp_grad.shape), 2) self.assertEqual(exp_grad.shape[-1], 1) self.assertEqualArray(exp_grad, got[second_name], decimal=5) self.assertEqualArray(exp_loss, got['Y'], decimal=5) providers = device_to_providers('cpu') so = SessionOptions() so.log_severity_level = 0 if verbose > 0 else 4 so.log_verbosity_level = 0 if verbose > 0 else 4 sess = InferenceSession(onx.SerializeToString(), so, providers=providers) if verbose > 0: print("+++ run") if weight_name is None: got = sess.run(None, {'X1': x1, 'X2': x2}) else: got = sess.run(None, {'X1': x1, 'X2': x2, 'weight': w}) self.assertEqualArray(exp_loss, got[0], decimal=5) self.assertEqualArray(exp_grad, got[1], decimal=5) if weight_name is not None: if verbose > 0: print("+++ run*") got = sess.run(None, {'X1': x1, 'X2': x2}) exp_loss2, exp_grad2 = fct(x1, x2, numpy.array([1], dtype=x1.dtype)) self.assertEqualArray(exp_loss2, got[0], decimal=5) self.assertEqualArray(exp_grad2, got[1], decimal=5) if 'grad' in name: rew = unreduced_onnx_loss(onx) if 'ReduceSum' in str(rew): raise AssertionError(f"Isse with:\n{rew!r}") if verbose > 0: with open(name + ".unreduced.onnx", "wb") as f: f.write(rew.SerializeToString()) if verbose > 0: print(f"\n+++++ name(2)={name!r}") oinf = OnnxInference(rew) if weight_name is None: got = oinf.run({'X1': x1, 'X2': x2}, **run_params) else: got = oinf.run({'X1': x1, 'X2': x2, 'weight': w}, **run_params) score = got['score'] self.assertEqual(len(score.shape), 2) self.assertEqual(score.shape[0], 10) self.assertEqual(score.shape[1], 1) self.assertEqualFloat(exp_loss, score.sum()) sess = InferenceSession(rew.SerializeToString(), so, providers=providers) if verbose > 0: print("+++ run") if weight_name is None: got = sess.run(None, {'X1': x1, 'X2': x2}) else: got = sess.run(None, {'X1': x1, 'X2': x2, 'weight': w}) score = got[0] self.assertEqual(len(score.shape), 2) self.assertEqual(score.shape[0], 10) self.assertEqual(score.shape[1], 1) self.assertEqualFloat(exp_loss, score.sum())
def taylor_approximation_exp(x, degre=50): y = numpy.zeros(x.shape, dtype=x.dtype) a = numpy.ones(x.shape, dtype=x.dtype) for i in range(1, degre + 1): a *= x / i y += a return y def taylor_sigmoid(x, degre=50): den = one + taylor_approximation_exp(-x, degre) return one / (den) opset = get_max_opset() N = 300 min_values = [-20 + float(i) * 10 / N for i in range(N)] data = numpy.array([0], dtype=numpy.float32) node = OnnxSigmoid('X', op_version=opset, output_names=['Y']) onx = node.to_onnx({'X': FloatTensorType()}, {'Y': FloatTensorType()}, target_opset=opset) rts = ['numpy', 'python', 'onnxruntime', 'taylor20', 'taylor40'] oinf = OnnxInference(onx) sess = InferenceSession(onx.SerializeToString()) graph = [] for mv in tqdm(min_values): data[0] = mv