def test_less(): g = graph.create_graph() a = graph.create_initializer(g, "a", onnx.TensorProto.FLOAT, [4], [1.1, 2.3, 3.5, 9.6]) b = graph.create_input(g, "b", onnx.TensorProto.FLOAT, [None, 1]) l = ops.less()(graph.merge(a, b)) l = graph.add_output(l, l.transients[0].name, onnx.TensorProto.BOOL, [None, 4]) assert_model_result(l, input={'b': [ [0.1], [1.2], [11], [4.2], [np.NaN], ]}, expected_result=[[ [False, False, False, False], [True, False, False, False], [True, True, True, True], [True, True, True, False], [False, False, False, False], ]])
def _get_bin_index_on_continuous_value(g): bin_count = len(bin_edges) index_range = list(range(bin_count)) init_bin_index_range = graph.create_initializer( g, "bin_index_range", onnx.TensorProto.FLOAT, [bin_count], index_range) init_bin_edges = graph.create_initializer(g, "bin_edges", onnx.TensorProto.DOUBLE, [bin_count], bin_edges) g = ops.cast(onnx.TensorProto.DOUBLE)(g) g = ops.less_or_equal()(graph.merge(init_bin_edges, g)) g = ops.cast(onnx.TensorProto.FLOAT)(g) g = ops.mul()(graph.merge(g, init_bin_index_range)) g = ops.argmax(axis=1)(g) return g
def _get_bin_score_1d(g): init_bin_scores = graph.create_initializer(g, "bin_scores", onnx.TensorProto.FLOAT, bin_scores.shape, bin_scores.flatten()) init_reshape = graph.create_initializer( g, "score_reshape", onnx.TensorProto.INT64, [3], [-1, 1, bin_scores.shape[1]], ) g = ops.gather_nd()(graph.merge(init_bin_scores, g)) # gather score for each class g = ops.reshape()(graph.merge(g, init_reshape)) return g
def _get_bin_score_2d(g): init_bin_scores = graph.create_initializer( g, "bin_scores", onnx.TensorProto.FLOAT, [bin_scores.shape[0], bin_scores.shape[1]], bin_scores.flatten(), ) init_reshape = graph.create_initializer( g, "score_reshape", onnx.TensorProto.INT64, [3], [-1, 1, 1], ) g = ops.concat(axis=1)(g) g = ops.gather_nd()(graph.merge(init_bin_scores, g)) g = ops.reshape()(graph.merge(g, init_reshape)) return g
def _predict_value(g): init_reshape = graph.create_initializer( g, "reshape", onnx.TensorProto.INT64, [1], [0], ) g = ops.reshape()(graph.merge(g, init_reshape)) g = ops.identity("predict")(g) return g
def _compute_class_score(g): init_intercept = graph.create_initializer( g, "intercept", onnx.TensorProto.FLOAT, [intercept.shape[0]], intercept, ) init_sum_axis = graph.create_initializer( g, "sum_axis", onnx.TensorProto.INT64, [1], [1], ) g = ops.concat(axis=1)(g) scores_output_name = g.transients[0].name g = ops.reduce_sum(keepdims=0)(graph.merge(g, init_sum_axis)) g = ops.add()(graph.merge(g, init_intercept)) return g, scores_output_name
def test_create_initializer(): g = graph.create_graph() init = graph.create_initializer(g, "foo", onnx.TensorProto.FLOAT, [4], [0.1, 0.2, 0.3, 0.4]) assert len(init.initializers) == 1 assert init.initializers == [onnx.helper.make_tensor( 'foo_0' , onnx.TensorProto.FLOAT, [4], [0.1, 0.2, 0.3, 0.4] )] assert init.initializers == init.transients
def _predict_proba(g): if binary is True: init_zeros = graph.create_initializer( g, "zeros", onnx.TensorProto.FLOAT, [2], [0.0, 1.0], ) init_reshape = graph.create_initializer( g, "reshape", onnx.TensorProto.INT64, [1], [0], ) g = ops.mul()(graph.merge(g, init_zeros)) g = ops.softmax(axis=1)(g) g = ops.identity("predict")(g) return g
def test_add(): g = graph.create_graph() a = graph.create_initializer(g, "a", onnx.TensorProto.FLOAT, [1], [0.3]) i = graph.create_input(g, "i", onnx.TensorProto.FLOAT, [None]) l = ops.add()(graph.merge(i, a)) l = graph.add_output(l, l.transients[0].name, onnx.TensorProto.FLOAT, [None]) assert_model_result(l, input={ 'i': [0.1, 1.2, 11, 4.2], }, expected_result=[[0.4, 1.5, 11.3, 4.5]])
def test_reshape(): g = graph.create_graph() shape = graph.create_initializer(g, "shape", onnx.TensorProto.INT64, [1], [0]) i = graph.create_input(g, "i", onnx.TensorProto.FLOAT, [None, 1]) l = ops.reshape()(graph.merge(i, shape)) l = graph.add_output(l, l.transients[0].name, onnx.TensorProto.FLOAT, [None]) assert_model_result(l, input={'i': [ [0.1], [1.2], [11], [4.2], ]}, expected_result=[[0.1, 1.2, 11, 4.2]])
def test_mul(): g = graph.create_graph() a = graph.create_initializer(g, "a", onnx.TensorProto.FLOAT, [3], [1.0, 2.0, 3.0]) b = graph.create_input(g, "b", onnx.TensorProto.FLOAT, [None, 3]) l = ops.mul()(graph.merge(a, b)) l = graph.add_output(l, l.transients[0].name, onnx.TensorProto.FLOAT, [None, 3]) assert_model_result(l, input={'b': [ [0.1, 0.1, 0.1], [0.1, 0.2, 0.3], ]}, expected_result=[[ [0.1, 0.2, 0.3], [0.1, 0.4, 0.9], ]])
def test_reduce_sum(): g = graph.create_graph() axis = graph.create_initializer(g, "axis", onnx.TensorProto.INT64, [1], [1]) i = graph.create_input(g, "i", onnx.TensorProto.FLOAT, [None, 3]) l = ops.reduce_sum(keepdims=0)(graph.merge(i, axis)) l = graph.add_output(l, l.transients[0].name, onnx.TensorProto.FLOAT, [None]) assert_model_result(l, input={ 'i': [ [0.1, 1.0, 1.2], [1.2, 0.4, 0.9], [11, 0.8, -0.2], [4.2, 3.2, -6.4], ] }, expected_result=[[2.3, 2.5, 11.6, 1.0]])
def test_gather_elements(): g = graph.create_graph() a = graph.create_initializer(g, "a", onnx.TensorProto.FLOAT, [3, 1], [0.1, 0.2, 0.3]) b = graph.create_input(g, "b", onnx.TensorProto.INT64, [None, 1]) l = ops.gather_elements()(graph.merge(a, b)) l = graph.add_output(l, l.transients[0].name, onnx.TensorProto.FLOAT, [None, 1]) assert_model_result(l, input={'b': [ [2], [1], [0], ]}, expected_result=[[ [0.3], [0.2], [0.1], ]])
def test_gather_nd(): g = graph.create_graph() a = graph.create_initializer( g, "a", onnx.TensorProto.FLOAT, [3, 3], np.array([ [0.1, 0.2, 0.3], [1.1, 2.2, 3.3], [0.1, 20.2, 30.3], ]).flatten()) b = graph.create_input(g, "b", onnx.TensorProto.INT64, [None, 2]) l = ops.gather_nd()(graph.merge(a, b)) l = graph.add_output(l, l.transients[0].name, onnx.TensorProto.FLOAT, [None]) assert_model_result(l, input={'b': [ [2, 0], [1, 1], [0, 1], ]}, expected_result=np.array([[0.1, 2.2, 0.2]]))
def test_merge(): g = graph.create_graph() init1 = graph.create_initializer(g, "foo", onnx.TensorProto.FLOAT, [4], [0.1, 0.2, 0.3, 0.4]) init2 = graph.create_initializer(g, "foo", onnx.TensorProto.FLOAT, [4], [1.1, 1.2, 3.3, 4.4]) input1 = graph.create_input(g, "bar1", onnx.TensorProto.FLOAT, [None, 3]) input2 = graph.create_input(g, "bar2", onnx.TensorProto.FLOAT, [None, 4]) m = graph.merge(init1, input1, init2, input2) assert len(m.initializers) == 2 assert len(m.inputs) == 2 assert len(m.transients) == 4 assert m.initializers == [ onnx.helper.make_tensor( 'foo_0' , onnx.TensorProto.FLOAT, [4], [0.1, 0.2, 0.3, 0.4] ), onnx.helper.make_tensor( 'foo_1' , onnx.TensorProto.FLOAT, [4], [1.1, 1.2, 3.3, 4.4] ), ] assert m.inputs == [ onnx.helper.make_tensor_value_info( 'bar1' , onnx.TensorProto.FLOAT, [None, 3], ), onnx.helper.make_tensor_value_info( 'bar2' , onnx.TensorProto.FLOAT, [None, 4], ), ] assert m.transients == [ onnx.helper.make_tensor( 'foo_0' , onnx.TensorProto.FLOAT, [4], [0.1, 0.2, 0.3, 0.4] ), onnx.helper.make_tensor_value_info( 'bar1' , onnx.TensorProto.FLOAT, [None, 3], ), onnx.helper.make_tensor( 'foo_1' , onnx.TensorProto.FLOAT, [4], [1.1, 1.2, 3.3, 4.4] ), onnx.helper.make_tensor_value_info( 'bar2' , onnx.TensorProto.FLOAT, [None, 4], ), ]