def test_onnx_example_constant_of_shape(self): x = np.array([1, 2, 4, 5, 5, 4]).astype(np.float32).reshape((3, 2)) opv = _TARGET_OPSET_ cop2 = OnnxConstantOfShape(OnnxShape('input', op_version=opv), output_names=['mat'], op_version=opv) model_def = cop2.to_onnx({'input': x}, outputs=[('mat', FloatTensorType())]) sess = InferenceSession(model_def.SerializeToString()) res = sess.run(None, {'input': x}) exp = np.zeros((3, 2), dtype=np.float32) assert_almost_equal(exp, res[0]) tensor_value = onnx.helper.make_tensor("value", onnx.TensorProto.FLOAT, (1, ), [-5]) cop2 = OnnxConstantOfShape(OnnxShape('input', op_version=opv), value=tensor_value, output_names=['mat'], op_version=opv) model_def = cop2.to_onnx({'input': x}, outputs=[('mat', FloatTensorType())]) sess = InferenceSession(model_def.SerializeToString()) res = sess.run(None, {'input': x}) exp = np.full((3, 2), -5.) assert_almost_equal(exp, res[0])
def test_onnxt_runtime_shape(self): x = numpy.random.randn(20, 2).astype(numpy.float32) y = x.shape onx = OnnxShape('X', output_names=['Y']) model_def = onx.to_onnx({'X': x.astype(numpy.float32)}) got = OnnxInference(model_def).run({'X': x}) self.assertEqualArray(y, got['Y'])
def test_onnx_micro_runtime_shape(self): opset = TestOnnxMicroRuntime.opset x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape( (3, 2)) cop = OnnxShape('X', op_version=opset, output_names=['Y']) model_def = cop.to_onnx({'X': x}, target_opset=opset) rt = OnnxMicroRuntime(model_def) out = rt.run({'X': x}) self.assertEqual(numpy.array(x.shape, dtype=numpy.int64), out['Y'])
def test_constant_of_shape(self): x = numpy.array([1, 2, 4, 5, 5, 4]).astype(numpy.float32).reshape( (3, 2)) tensor_value = make_tensor("value", TensorProto.FLOAT, (1, ), [-5]) # pylint: disable=E1101 cop2 = OnnxConstantOfShape(OnnxShape('input'), value=tensor_value, output_names=['mat']) model_def = cop2.to_onnx({'input': x}, outputs=[('mat', FloatTensorType())]) oinf = OnnxInference(model_def, skip_run=True) dot = oinf.to_dot() self.assertIn('ConstantOfShape', dot)
def live_decorrelate_transformer_converter(scope, operator, container): # shortcuts op = operator.raw_operator opv = container.target_opset out = operator.outputs # We retrieve the unique input. X = operator.inputs[0] # We guess its type. If the operator ingests float (or double), # it outputs float (or double). proto_dtype = guess_proto_type(X.type) dtype = guess_numpy_type(X.type) # Lines in comment specify the numpy computation # the ONNX code implements. # mean_ = numpy.mean(X, axis=0, keepdims=True) mean = OnnxReduceMean(X, axes=[0], keepdims=1, op_version=opv) # This is trick I often use. The converter automatically # chooses a name for every output. In big graph, # it is difficult to know which operator is producing which output. # This line just tells every node must prefix its ouputs with this string. # It also applies to all inputs nodes unless this method # was called for one of these nodes. mean.set_onnx_name_prefix('mean') # X2 = X - mean_ X2 = OnnxSub(X, mean, op_version=opv) # V = X2.T @ X2 / X2.shape[0] N = OnnxGatherElements(OnnxShape(X, op_version=opv), numpy.array([0], dtype=numpy.int64), op_version=opv) Nf = OnnxCast(N, to=proto_dtype, op_version=opv) # Every output involved in N and Nf is prefixed by 'N'. Nf.set_onnx_name_prefix('N') V = OnnxDiv(OnnxMatMul(OnnxTranspose(X2, op_version=opv), X2, op_version=opv), Nf, op_version=opv) V.set_onnx_name_prefix('V1') # V += numpy.identity(V.shape[0]) * self.alpha V = OnnxAdd(V, op.alpha * numpy.identity(op.nf_, dtype=dtype), op_version=opv) V.set_onnx_name_prefix('V2') # L, P = numpy.linalg.eig(V) LP = OnnxEig(V, eigv=True, op_version=opv) LP.set_onnx_name_prefix('LP') # Linv = L ** (-0.5) # Notation LP[0] means OnnxPow is taking the first output # of operator OnnxEig, LP[1] would mean the second one # LP is not allowed as it is ambiguous Linv = OnnxPow(LP[0], numpy.array([-0.5], dtype=dtype), op_version=opv) Linv.set_onnx_name_prefix('Linv') # diag = numpy.diag(Linv) diag = OnnxMul(OnnxEyeLike(numpy.zeros((op.nf_, op.nf_), dtype=numpy.int64), k=0, op_version=opv), Linv, op_version=opv) diag.set_onnx_name_prefix('diag') # root = P @ diag @ P.transpose() trv = OnnxTranspose(LP[1], op_version=opv) coef_left = OnnxMatMul(LP[1], diag, op_version=opv) coef_left.set_onnx_name_prefix('coef_left') coef = OnnxMatMul(coef_left, trv, op_version=opv) coef.set_onnx_name_prefix('coef') # Same part as before. Y = OnnxMatMul(X2, coef, op_version=opv, output_names=out[:1]) Y.set_onnx_name_prefix('Y') # The last line specifies the final output. # Every node involved in the computation is added to the ONNX # graph at this stage. Y.add_to(scope, container)
def live_decorrelate_transformer_converter(scope, operator, container): op = operator.raw_operator opv = container.target_opset out = operator.outputs # We retrieve the unique input. X = operator.inputs[0] proto_dtype = guess_proto_type(X.type) dtype = guess_numpy_type(X.type) # new part # mean_ = numpy.mean(X, axis=0, keepdims=True) mean = OnnxReduceMean(X, axes=[0], keepdims=1, op_version=opv) mean.set_onnx_name_prefix('mean') # X2 = X - mean_ X2 = OnnxSub(X, mean, op_version=opv) # V = X2.T @ X2 / X2.shape[0] N = OnnxGatherElements(OnnxShape(X, op_version=opv), numpy.array([0], dtype=numpy.int64), op_version=opv) Nf = OnnxCast(N, to=proto_dtype, op_version=opv) Nf.set_onnx_name_prefix('N') V = OnnxDiv(OnnxMatMul(OnnxTranspose(X2, op_version=opv), X2, op_version=opv), Nf, op_version=opv) V.set_onnx_name_prefix('V1') # V += numpy.identity(V.shape[0]) * self.alpha V = OnnxAdd(V, op.alpha * numpy.identity(op.nf_, dtype=dtype), op_version=opv) V.set_onnx_name_prefix('V2') # L, P = numpy.linalg.eig(V) LP = OnnxEig(V, eigv=True, op_version=opv) LP.set_onnx_name_prefix('LP') # Linv = L ** (-0.5) Linv = OnnxPow(LP[0], numpy.array([-0.5], dtype=dtype), op_version=opv) Linv.set_onnx_name_prefix('Linv') # diag = numpy.diag(Linv) diag = OnnxMul(OnnxEyeLike(numpy.array([op.nf_, op.nf_], dtype=numpy.int64), k=0, op_version=opv), Linv, op_version=opv) diag.set_onnx_name_prefix('diag') # root = P @ diag @ P.transpose() trv = OnnxTranspose(LP[1], op_version=opv) coef_left = OnnxMatMul(LP[1], diag, op_version=opv) coef_left.set_onnx_name_prefix('coef_left') coef = OnnxMatMul(coef_left, trv, op_version=opv) coef.set_onnx_name_prefix('coef') # Same part as before. Y = OnnxMatMul(X2, coef, op_version=opv, output_names=out[:1]) Y.set_onnx_name_prefix('Y') Y.add_to(scope, container)