Esempio n. 1
0
 def test_compress(self):
   if legacy_opset_pre_ver(9):
     raise unittest.SkipTest(
         "ONNX version {} doesn't support Compress.".format(
             defs.onnx_opset_version()))
   axis = 1
   node_def = helper.make_node(
       "Compress", inputs=['X', 'condition'], outputs=['Y'], axis=axis)
   x = self._get_rnd([5, 5, 5])
   cond = np.array([1, 0, 1])
   output = run_node(node_def, inputs=[x, cond])
   np.testing.assert_almost_equal(output['Y'], np.compress(cond, x, axis=axis))
Esempio n. 2
0
  def test_shrink(self):
    if legacy_opset_pre_ver(9):
      raise unittest.SkipTest(
          "ONNX version {} doesn't support Shrink.".format(
              defs.onnx_opset_version()))

    node_def = helper.make_node("Shrink", ["X"], ["Y"], bias=1.5, lambd=1.5)

    X = np.arange(-2.0, 2.1, dtype=np.float32)
    Y = np.array([-0.5, 0, 0, 0, 0.5], dtype=np.float32)
    output = run_node(node_def, [X])
    np.testing.assert_almost_equal(output["Y"], Y)
Esempio n. 3
0
 def test_mod(self):
   if legacy_opset_pre_ver(10):
     raise unittest.SkipTest("ONNX version {} doesn't support Mod.".format(
         defs.onnx_opset_version()))
   x = self._get_rnd([5, 5])
   y = self._get_rnd([5, 5])
   node_def = helper.make_node("Mod", ["X", "Y"], ["Z"], fmod=0)
   output = run_node(node_def, [x, y])
   np.testing.assert_almost_equal(output["Z"], np.mod(x, y))
   node_def = helper.make_node("Mod", ["X", "Y"], ["Z"], fmod=1)
   output = run_node(node_def, [x, y])
   np.testing.assert_almost_equal(output["Z"], np.fmod(x, y))
Esempio n. 4
0
 def test_eye_like(self):
   if legacy_opset_pre_ver(9):
     raise unittest.SkipTest("ONNX version {} doesn't support EyeLike.".format(
         defs.onnx_opset_version()))
   for shape in [[6, 10], [10, 6]]:
     for off_diagonal_offset in [-10, -6, -3, 0, 3, 6, 7, 10]:
       node_def = helper.make_node(
           "EyeLike", ['x'], ['y'], dtype=1, k=off_diagonal_offset)
       x = np.random.randint(0, 100, size=shape, dtype=np.int32)
       y = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=np.float32)
       output = run_node(node_def, [x])
       np.testing.assert_equal(output['y'], y)
Esempio n. 5
0
  def test_cast(self):
    if legacy_onnx_pre_ver(1, 2) or legacy_opset_pre_ver(6):
      test_cases = [("FLOAT", tf.float32), ("UINT8", tf.uint8),
                    ("INT8", tf.int8), ("UINT16", tf.uint16), ("INT16",
                                                               tf.int16),
                    ("INT32", tf.int32), ("INT64", tf.int64), ("BOOL", tf.bool),
                    ("FLOAT16", tf.float16), ("DOUBLE", tf.float64),
                    ("COMPLEX64", tf.complex64), ("COMPLEX128", tf.complex128)]
    else:
      test_cases = [(TensorProto.FLOAT,
                     tf.float32), (TensorProto.UINT8,
                                   tf.uint8), (TensorProto.INT8, tf.int8),
                    (TensorProto.UINT16,
                     tf.uint16), (TensorProto.INT16,
                                  tf.int16), (TensorProto.INT32, tf.int32),
                    (TensorProto.INT64,
                     tf.int64), (TensorProto.BOOL,
                                 tf.bool), (TensorProto.FLOAT16, tf.float16),
                    (TensorProto.DOUBLE,
                     tf.float64), (TensorProto.COMPLEX64,
                                   tf.complex64), (TensorProto.COMPLEX128,
                                                   tf.complex128)]
      if not legacy_opset_pre_ver(9):
         test_cases.append((TensorProto.STRING, tf.string))
    for ty, tf_type in test_cases:
      node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
      vector = [2, 3]
      output = run_node(node_def, [vector])
      np.testing.assert_equal(output["output"].dtype, tf_type)

    if not legacy_opset_pre_ver(9):
      test_cases2 = [(TensorProto.FLOAT, tf.float32),
                   (TensorProto.INT32, tf.int32),
                   (TensorProto.INT64, tf.int64),
                   (TensorProto.DOUBLE, tf.float64)]
      for ty, tf_type in test_cases2:
        node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
        vector = ['2', '3']
        output = run_node(node_def, [vector])
        np.testing.assert_equal(output["output"].dtype, tf_type)
Esempio n. 6
0
 def test_non_max_suppression(self):
   if legacy_opset_pre_ver(10):
     raise unittest.SkipTest(
         "ONNX version {} doesn't support NonMaxSuppression.".format(
             defs.onnx_opset_version()))
   boxes = np.array([[[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1],
                      [0.0, -0.1, 1.0, 0.9], [0.0, 10.0, 1.0, 11.0],
                      [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0, 101.0]],
                     [[0.0, 0.0, 1.0, 1.0], [0.0, 0.1, 1.0, 1.1],
                      [0.0, -0.1, 1.0, 0.9], [0.0, 10.0, 1.0, 11.0],
                      [0.0, 10.1, 1.0, 11.1], [0.0, 100.0, 1.0,
                                               101.0]]]).astype(np.float32)
   scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]],
                      [[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
   max_output_boxes_per_class = np.array([2]).astype(np.int64)
   iou_threshold = np.array([0.5]).astype(np.float32)
   score_threshold = np.array([0.0]).astype(np.float32)
   selected_indices = np.array([[0, 0, 3], [0, 0, 0], [1, 0, 3],
                                [1, 0, 0]]).astype(np.int64)
   node_def = helper.make_node("NonMaxSuppression", [
       "boxes", "scores", "max_output_boxes_per_class", "iou_threshold",
       "score_threshold"
   ], ["selected_indices"],
                               center_point_box=0)
   graph_def = helper.make_graph(
       [node_def],
       name="test_unknown_shape",
       inputs=[
           helper.make_tensor_value_info("boxes", TensorProto.FLOAT,
                                         [None, None, None]),
           helper.make_tensor_value_info("scores", TensorProto.FLOAT,
                                         [None, None, None]),
           helper.make_tensor_value_info("max_output_boxes_per_class",
                                         TensorProto.INT64, [None]),
           helper.make_tensor_value_info("iou_threshold", TensorProto.FLOAT,
                                         [None]),
           helper.make_tensor_value_info("score_threshold", TensorProto.FLOAT,
                                         [None])
       ],
       outputs=[
           helper.make_tensor_value_info("selected_indices", TensorProto.INT64,
                                         [None, None])
       ])
   tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
   output = tf_rep.run({
       "boxes": boxes,
       "scores": scores,
       "max_output_boxes_per_class": max_output_boxes_per_class,
       "iou_threshold": iou_threshold,
       "score_threshold": score_threshold
   })
   np.testing.assert_almost_equal(output["selected_indices"], selected_indices)
Esempio n. 7
0
 def test_topk(self):
   x = np.arange(15, dtype=np.float32).reshape(3, 5)
   values = np.array([[4, 3], [9, 8], [14, 13]], dtype=np.float32)
   indices = np.array([[4, 3],[4, 3],[4, 3]], dtype=np.int64)
   if legacy_opset_pre_ver(10): # for opset = 1
     node_def = helper.make_node("TopK", ["x"], ["values", "indices"], k=2)
     output = run_node(node_def, [x])
   elif legacy_opset_pre_ver(11): # for opset = 10
     k = np.array([2], dtype=np.int64)
     node_def = helper.make_node("TopK", ["x", "k"], ["values", "indices"])
     output = run_node(node_def, [x, k])
   else: # for opset = 11
     x = np.array([[3, 2, 5, 10, 7], [12, 15, 10, 7, 20], [21, 16, 5, 3, 6]],
                  dtype=np.float32)
     values = np.array([[3, 2], [10, 7], [5, 3]], dtype=np.float32)
     indices = np.array([[0, 1], [2, 3], [2, 3]], dtype=np.int64)
     k = np.array([2], dtype=np.int64)
     node_def = helper.make_node(
         "TopK", ["x", "k"], ["values", "indices"], largest=0, sorted=0)
     output = run_node(node_def, [x, k])
   np.testing.assert_almost_equal(output["values"], values)
   np.testing.assert_almost_equal(output["indices"], indices)
Esempio n. 8
0
 def test_dropout(self):
   # Since current ONNX only support inference and
   # dropout at inference mode is a no-op,
   # therefore dropout is always a no-op operator
   # in ONNX.
   node_def = helper.make_node("Dropout", ["X"], ["Y"])
   if legacy_opset_pre_ver(7):
     # at inference mode, is_test is always set to 1
     node_def = helper.make_node("Dropout", ["X"], ["Y"], is_test=1)
   x = self._get_rnd([3, 4, 5])
   y = x
   output = run_node(node_def, [x])
   np.testing.assert_equal(output["Y"], y)
Esempio n. 9
0
  def test_max_pool_with_argmax_2d_dilations_ceil_pads(self):
    if legacy_opset_pre_ver(10):
      raise unittest.SkipTest(
          "ONNX version {} doesn't support dilations nor ceil mode.".format(
              defs.onnx_opset_version()))
 
    kernel_shape = [3, 3]
    strides = [2, 2]
    dilations = [3, 3]
    pads = [1, 1, 2, 2]
    ceil_mode = True

    input_shape = [10, 3, 23, 23]
    x = self._get_rnd_float32(shape=input_shape) - 2

    node_def = helper.make_node("MaxPool", ["X"], ["Y", "Ind"],
                                kernel_shape=kernel_shape,
                                strides=strides,
                                dilations=dilations,
                                pads=pads,
                                ceil_mode=ceil_mode)

    graph_def = helper.make_graph(
        [node_def],
        name="test_unknown_shape",
        inputs=[
            helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                          [None, None, None, None]),
        ],
        outputs=[
            helper.make_tensor_value_info("Y", TensorProto.FLOAT,
                                          [None, None, None, None]),
            helper.make_tensor_value_info("Ind", TensorProto.INT64,
                                          [None, None, None, None])
        ])

    tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
    output = tf_rep.run({"X": x})

    test_output, test_ind = py_pool(x,
                                    kernel_shape=kernel_shape,
                                    strides=strides,
                                    dilations=dilations,
                                    padding=pads,
                                    ceil_mode=ceil_mode,
                                    pooling_type="MAX")

    np.testing.assert_almost_equal(output["Y"], test_output)
    np.testing.assert_almost_equal(output["Ind"], test_ind)
Esempio n. 10
0
 def test_onehot(self):
   if legacy_opset_pre_ver(9):
     raise unittest.SkipTest("ONNX version {} doesn't support OneHot.".format(
         defs.onnx_opset_version()))
   indices = np.array([[0, 2], [1, 2], [0, 1]])
   depth = np.array([5], dtype=np.int32)
   on_value = 6.0
   off_value = 2.0
   values = np.array([off_value, on_value])
   node_def = helper.make_node(
       'OneHot', inputs=['indices', 'depth', 'values'], outputs=['y'], axis=-1)
   y = (np.arange(depth) == indices[..., None]).astype(int)
   y = y * (on_value - off_value) + off_value
   output = run_node(node_def, inputs=[indices, depth, values])
   np.testing.assert_equal(output['y'], y)
Esempio n. 11
0
 def test_topk(self):
     x = np.arange(15, dtype=np.float32).reshape(3, 5)
     values = np.array([[4, 3], [9, 8], [14, 13]], dtype=np.float32)
     indices = np.array([[4, 3], [4, 3], [4, 3]], dtype=np.int64)
     if legacy_opset_pre_ver(10):  # for opset = 1
         node_def = helper.make_node("TopK", ["x"], ["values", "indices"],
                                     k=2)
         output = run_node(node_def, [x])
     else:  # for opset = 10
         k = np.array([2], dtype=np.int64)
         node_def = helper.make_node("TopK", ["x", "k"],
                                     ["values", "indices"])
         output = run_node(node_def, [x, k])
     np.testing.assert_almost_equal(output["values"], values)
     np.testing.assert_almost_equal(output["indices"], indices)
Esempio n. 12
0
  def test_mean_variance_normalization(self):
    if legacy_opset_pre_ver(9):
      raise unittest.SkipTest(
      "ONNX version {} doesn't have test for MeanVarianceNormalization"
      .format(defs.onnx_opset_version()))

    input_data = self._get_rnd([2,2,2,2])
    # Calculate expected output data using formula:
    # (Input - Mean)/SD
    mean = np.mean(input_data, keepdims=1, axis=(0,2,3))
    std = np.std(input_data, keepdims=1, axis=(0,2,3))
    expected_output = (input_data - mean) / std
    # Testing without "axes" argument should default to axes=[0,2,3]
    node_def = helper.make_node("MeanVarianceNormalization", ["X"], ["Y"])
    output = run_node(node_def, [input_data])
    np.testing.assert_almost_equal(output["Y"], expected_output, decimal=5)
 def test_batch_normalization(self):
     if legacy_opset_pre_ver(6):
         raise unittest.SkipTest("Backend doesn't support consumed flag")
     node_def = helper.make_node("BatchNormalization",
                                 ["X", "scale", "bias", "mean", "var"],
                                 ["Y"],
                                 epsilon=0.001)
     graph_def = helper.make_graph(
         [node_def],
         name="test_unknown_shape",
         inputs=[
             helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                           [None, None, None, None]),
             helper.make_tensor_value_info("scale", TensorProto.FLOAT,
                                           [None]),
             helper.make_tensor_value_info("bias", TensorProto.FLOAT,
                                           [None]),
             helper.make_tensor_value_info("mean", TensorProto.FLOAT,
                                           [None]),
             helper.make_tensor_value_info("var", TensorProto.FLOAT, [None])
         ],
         outputs=[
             helper.make_tensor_value_info("Y", TensorProto.FLOAT,
                                           [None, None, None, None])
         ])
     x_shape = [3, 5, 4, 2]
     param_shape = [5]
     _param_shape = [1, 5, 1, 1]
     x = self._get_rnd_float32(0, 1, shape=x_shape)
     m = self._get_rnd_float32(0, 1, shape=param_shape)
     _m = m.reshape(_param_shape)
     v = self._get_rnd_float32(0, 1, shape=param_shape)
     _v = v.reshape(_param_shape)
     scale = self._get_rnd_float32(0, 1, shape=param_shape)
     _scale = scale.reshape(_param_shape)
     bias = self._get_rnd_float32(0, 1, shape=param_shape)
     _bias = bias.reshape(_param_shape)
     golden = self._batch_normalization(x, _m, _v, _bias, _scale, 0.001)
     tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
     # export to tf.saved_model
     model_path = 'test_dynamic_shape/batch_normalization'
     tf_rep.export_graph(model_path)
     # load the saved_model back
     tf_model = tf.saved_model.load(model_path)
     # run the model
     tf_model_output = tf_model(X=x, scale=scale, bias=bias, mean=m, var=v)
     np.testing.assert_almost_equal(tf_model_output[0], golden, decimal=5)
 def test_batch_normalization(self):
     if legacy_opset_pre_ver(6):
         raise unittest.SkipTest("Backend doesn't support consumed flag")
     node_def = helper.make_node("BatchNormalization",
                                 ["X", "scale", "bias", "mean", "var"],
                                 ["Y"],
                                 epsilon=0.001)
     graph_def = helper.make_graph(
         [node_def],
         name="test_unknown_shape",
         inputs=[
             helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                           [None, None, None, None]),
             helper.make_tensor_value_info("scale", TensorProto.FLOAT,
                                           [None]),
             helper.make_tensor_value_info("bias", TensorProto.FLOAT,
                                           [None]),
             helper.make_tensor_value_info("mean", TensorProto.FLOAT,
                                           [None]),
             helper.make_tensor_value_info("var", TensorProto.FLOAT, [None])
         ],
         outputs=[
             helper.make_tensor_value_info("Y", TensorProto.FLOAT,
                                           [None, None, None, None])
         ])
     x_shape = [3, 5, 4, 2]
     param_shape = [5]
     _param_shape = [1, 5, 1, 1]
     x = self._get_rnd_float32(0, 1, shape=x_shape)
     m = self._get_rnd_float32(0, 1, shape=param_shape)
     _m = m.reshape(_param_shape)
     v = self._get_rnd_float32(0, 1, shape=param_shape)
     _v = v.reshape(_param_shape)
     scale = self._get_rnd_float32(0, 1, shape=param_shape)
     _scale = scale.reshape(_param_shape)
     bias = self._get_rnd_float32(0, 1, shape=param_shape)
     _bias = bias.reshape(_param_shape)
     golden = self._batch_normalization(x, _m, _v, _bias, _scale, 0.001)
     tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
     output = tf_rep.run({
         "X": x,
         "scale": scale,
         "bias": bias,
         "mean": m,
         "var": v
     })
     np.testing.assert_almost_equal(output["Y"], golden, decimal=5)
Esempio n. 15
0
 def test_is_inf(self):
   if legacy_opset_pre_ver(10):
     raise unittest.SkipTest("ONNX version {} doesn't support IsInf.".format(
         defs.onnx_opset_version()))
   inp = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf],
                  dtype=np.float32)
   expected_output = np.isinf(inp)
   node_def = helper.make_node("IsInf", ["X"], ["Y"])
   graph_def = helper.make_graph(
       [node_def],
       name="test_unknown_shape",
       inputs=[
           helper.make_tensor_value_info("X", TensorProto.FLOAT, [None]),
       ],
       outputs=[helper.make_tensor_value_info("Y", TensorProto.BOOL, [None])])
   tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
   output = tf_rep.run({"X": inp})
   np.testing.assert_equal(output["Y"], expected_output)
 def test_scatter_nd(self):
     if legacy_opset_pre_ver(11):
         raise unittest.SkipTest(
             "ONNX version {} doesn't support ScatterND.".format(
                 defs.onnx_opset_version()))
     # valid positive and negative indices for slices
     data = np.reshape(np.arange(1, 25, dtype=np.float32), [2, 3, 4])
     indices = np.array([[-1]], dtype=np.int64)
     updates = np.array(
         [[[43, 44, 45, 46], [47, 48, 49, 50], [51, 52, 53, 54]]],
         dtype=np.float32)
     ref_output = np.array(
         [[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
          [[43, 44, 45, 46], [47, 48, 49, 50], [51, 52, 53, 54]]],
         dtype=np.float32)
     node_def = helper.make_node("ScatterND",
                                 ["data", "indices", "updates"],
                                 ["outputs"])
     graph_def = helper.make_graph(
         [node_def],
         name="test_unknown_shape",
         inputs=[
             helper.make_tensor_value_info("data", TensorProto.FLOAT,
                                           [None, None, None]),
             helper.make_tensor_value_info("indices", TensorProto.INT64,
                                           [None, None]),
             helper.make_tensor_value_info("updates", TensorProto.FLOAT,
                                           [None, None, None])
         ],
         outputs=[
             helper.make_tensor_value_info("outputs", TensorProto.FLOAT,
                                           [None, None, None])
         ])
     tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
     # export to tf.saved_model
     model_path = 'test_dynamic_shape/scatter_nd'
     tf_rep.export_graph(model_path)
     # load the saved_model back
     tf_model = tf.saved_model.load(model_path)
     # run the model
     tf_model_output = tf_model(data=data, indices=indices, updates=updates)
     np.testing.assert_almost_equal(tf_model_output[0], ref_output)
Esempio n. 17
0
 def test_batch_normalization(self):
   if legacy_opset_pre_ver(6):
     raise unittest.SkipTest("Backend doesn't support consumed flag")
   node_def = helper.make_node(
       "BatchNormalization", ["X", "scale", "bias", "mean", "var"], ["Y"],
       epsilon=0.001)
   x_shape = [3, 5, 4, 2]
   param_shape = [5]
   _param_shape = [1, 5, 1, 1]
   x = self._get_rnd(x_shape, 0, 1)
   m = self._get_rnd(param_shape, 0, 1)
   _m = m.reshape(_param_shape)
   v = self._get_rnd(param_shape, 0, 1)
   _v = v.reshape(_param_shape)
   scale = self._get_rnd(param_shape, 0, 1)
   _scale = scale.reshape(_param_shape)
   bias = self._get_rnd(param_shape, 0, 1)
   _bias = bias.reshape(_param_shape)
   golden = self._batch_normalization(x, _m, _v, _bias, _scale, 0.001)
   output = run_node(node_def, [x, scale, bias, m, v])
   np.testing.assert_almost_equal(output["Y"], golden, decimal=5)
Esempio n. 18
0
  def test_constant(self):
    shape = [16, 16]
    values = np.random.randn(*shape).flatten().astype(float)
    const2_onnx = helper.make_tensor("const2", TensorProto.DOUBLE, shape,
                                     values)
    node_def = helper.make_node("Constant", [], ["Y"], value=const2_onnx)
    output = run_node(node_def, [])
    np.testing.assert_equal(output["Y"].shape, shape)
    np.testing.assert_almost_equal(output["Y"].flatten(), values)

    # test sparse tensor
    if not legacy_opset_pre_ver(11):
      expected = np.array([[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]])
      x = np.array([[0, 0], [1, 2]]).flatten().astype(np.int64)
      values = helper.make_tensor("values", TensorProto.INT32, [2], [1, 2])
      indices = helper.make_tensor("indices", TensorProto.INT64, [2, 2], x)
      a = helper.make_sparse_tensor(values, indices,[3, 4])
      node_def = helper.make_node("Constant", [], ["Y"], sparse_value=a)
      output = run_node(node_def, [])
      b = tf.sparse_to_dense(output["Y"].indices, output["Y"].dense_shape, output["Y"].values)
      result = b.eval(session=tf.Session())
      np.testing.assert_equal(result, expected)
Esempio n. 19
0
 def test_is_inf(self):
   if legacy_opset_pre_ver(10):
     raise unittest.SkipTest(
     "ONNX version {} doesn't support IsInf.".format(
         defs.onnx_opset_version()))
   input = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf],
         dtype=np.float32)
   expected_output = {
     "node_def": np.isinf(input),
     "node_def_neg_false": np.isposinf(input),
     "node_def_pos_false": np.isneginf(input)}
   node_defs = {
     "node_def" :
     helper.make_node("IsInf", ["X"], ["Y"]),
     "node_def_neg_false" :
     helper.make_node("IsInf", ["X"], ["Y"], detect_negative = 0),
     "node_def_pos_false" :
     helper.make_node("IsInf", ["X"], ["Y"], detect_positive = 0)
   }
   for key in node_defs:
     output = run_node(node_defs[key], [input])
     np.testing.assert_equal(output["Y"], expected_output[key])
Esempio n. 20
0
 def test_constant_fill(self):
   if not legacy_opset_pre_ver(9):
     raise unittest.SkipTest(
         "ONNX version {} doesn't support ConstantFill.".format(
             defs.onnx_opset_version()))
   shape = [1, 2, 3, 4]
   extra_shape = [5, 6]
   value = 3.
   node_def = helper.make_node(
       "ConstantFill",
       ["X"],
       ["Y"],
       value=value,
       extra_shape=extra_shape,
       dtype=1,
   )
   x = self._get_rnd(shape)
   y = np.zeros(shape + extra_shape)
   y.fill(value)
   output = run_node(node_def, [x])
   np.testing.assert_equal(output["Y"].dtype, tf.float32)
   np.testing.assert_equal(output["Y"], y)
Esempio n. 21
0
 def test_eye_like(self):
   if legacy_opset_pre_ver(9):
     raise unittest.SkipTest("ONNX version {} doesn't support EyeLike.".format(
         defs.onnx_opset_version()))
   shape = [6, 10]
   off_diagonal_offset = -3
   x = self._get_rnd_int(0, 100, shape=shape)
   y = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=np.float32)
   node_def = helper.make_node("EyeLike", ["x"], ["y"],
                               dtype=TensorProto.FLOAT,
                               k=off_diagonal_offset)
   graph_def = helper.make_graph(
       [node_def],
       name="test_unknown_shape",
       inputs=[
           helper.make_tensor_value_info("x", TensorProto.INT32, [None, None])
       ],
       outputs=[
           helper.make_tensor_value_info("y", TensorProto.FLOAT, [None, None])
       ])
   tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
   output = tf_rep.run({"x": x})
   np.testing.assert_equal(output["y"], y)
 def test_arg_max(self):
     if legacy_opset_pre_ver(12):
         raise unittest.SkipTest(
             "ONNX version {} doesn't support select_last_index attribute for ArgMax that depends on shape."
             .format(defs.onnx_opset_version()))
     axis = 1
     node_def = helper.make_node("ArgMax",
                                 inputs=['X'],
                                 outputs=['Y'],
                                 axis=axis,
                                 keepdims=0,
                                 select_last_index=1)
     graph_def = helper.make_graph(
         [node_def],
         name="test_unknown_shape",
         inputs=[
             helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                           [None, None])
         ],
         outputs=[
             helper.make_tensor_value_info("Y", TensorProto.FLOAT,
                                           [None, None])
         ])
     x = np.array([[1, 2, 3, 5, 3, 4, 5, 1], [2, 9, 3, 5, 9, 4, 5,
                                              1]]).astype(np.float32)
     # get tf_rep
     tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
     # export to tf.saved_model
     model_path = 'test_dynamic_shape/arg_max'
     tf_rep.export_graph(model_path)
     # load the saved_model back
     tf_model = tf.saved_model.load(model_path)
     # run the model
     tf_model_output = tf_model(X=x)
     expected_output = np.argmax(np.flip(x, axis), axis=axis)
     expected_output = x.shape[axis] - expected_output - 1
     np.testing.assert_almost_equal(tf_model_output[0], expected_output)
backend_test.exclude(r'test_mod_[a-z,_]*uint[0-9]+')
backend_test.exclude(r'test_mod_[a-z,_]*int(8|(16))+')

# TF doesn't support most of the attributes in resize op
# test_node.py will cover the test
backend_test.exclude(r'test_resize_[a-z,_]*')

# range is using loop in the model test but all the outputs datatype are
# missing in the body attribute of the loop
backend_test.exclude(r'test_range_float_type_positive_delta_expanded[a-z,_]*')
backend_test.exclude(r'test_range_int32_type_negative_delta_expanded[a-z,_]*')

# skip all the cumsum testcases because all the axis in the testcases
# are created as a 1-D 1 element tensor, but the spec clearly state
# that axis should be a 0-D tensor(scalar)
if legacy_opset_pre_ver(13):
    backend_test.exclude(r'test_cumsum_[a-z,_]*')

# TF session run does not support sequence/RaggedTensor as model inputs
backend_test.exclude(r'test_loop13_seq[a-z,_]*')

# TF minimum/maximum do not support uint64 when auto-cast is False (default)
backend_test.exclude(r'test_min_uint64_[a-z,_]*')
backend_test.exclude(r'test_max_uint64_[a-z,_]*')

if legacy_opset_pre_ver(7):
    backend_test.exclude(r'[a-z,_]*Upsample[a-z,_]*')

if 'TRAVIS' in os.environ:
    backend_test.exclude('test_vgg19')
    backend_test.exclude('zfnet512')
Esempio n. 24
0
        model = onnx_model
        tf_rep = prepare(model)
        output_onnx_tf = tf_rep.run(backend_feed_dict)

        assert len(output_tf) == len(output_onnx_tf)
        for tf_output, onnx_backend_output in zip(output_tf, output_onnx_tf):
            np.testing.assert_allclose(tf_output,
                                       onnx_backend_output,
                                       rtol=1e-3,
                                       atol=1e-7)

    return do_test_expected


if legacy_opset_pre_ver(10):
    dir_path = os.path.dirname(os.path.realpath(__file__))
    with open(dir_path + "/test_model.yaml", 'r') as config:
        try:
            for test_model in yaml.safe_load_all(config):
                for device in test_model["devices"]:
                    if supports_device(device):
                        test_method = create_test(test_model)
                        test_name_parts = ["test", test_model["name"], device]
                        test_name = str("_".join(map(str, test_name_parts)))
                        test_method.__name__ = test_name
                        setattr(TestModel, test_method.__name__, test_method)
        except yaml.YAMLError as exception:
            print(exception)

if __name__ == '__main__':
Esempio n. 25
0
    def test_if_with_sequence(self):
        if legacy_opset_pre_ver(14):
            raise unittest.SkipTest(
                "ONNX version {} doesn't support helper.make_tensor_sequence_value_info."
                .format(defs.onnx_opset_version()))

        # S = [a]
        # if cond is True
        #   S = [a,b]
        # else
        #   S = [a,c]
        a = np.random.randn(2, 1, 2).astype(np.float32)
        b = np.random.randn(1, 1, 2).astype(np.float32)
        c = np.random.randn(3, 1, 2).astype(np.float32)
        seq_construct_node = helper.make_node('SequenceConstruct', ['a'],
                                              ['S'])
        seq_insert_node1 = helper.make_node('SequenceInsert', ['S', 'b'],
                                            ['Sb'])
        seq_insert_node2 = helper.make_node('SequenceInsert', ['S', 'c'],
                                            ['Sc'])

        a_in = helper.make_tensor_value_info('a', onnx.TensorProto.FLOAT,
                                             [2, 1, 2])
        b_in = helper.make_tensor_value_info('b', onnx.TensorProto.FLOAT,
                                             [1, 1, 2])
        c_in = helper.make_tensor_value_info('c', onnx.TensorProto.FLOAT,
                                             [3, 1, 2])
        cond_in = helper.make_tensor_value_info('cond', TensorProto.BOOL, [])
        s_in = helper.make_sequence_value_info('S', TensorProto.FLOAT,
                                               [None, None, None, None])

        sb_out = helper.make_sequence_value_info('Sb', TensorProto.FLOAT,
                                                 [None, None, None, None])
        sc_out = helper.make_sequence_value_info('Sc', TensorProto.FLOAT,
                                                 [None, None, None, None])
        s_final_out = helper.make_sequence_value_info('S_final',
                                                      TensorProto.FLOAT,
                                                      [None, None, None, None])

        then_graph = helper.make_graph(nodes=[seq_insert_node1],
                                       name="then_graph",
                                       inputs=[s_in, b_in],
                                       outputs=[sb_out])
        else_graph = helper.make_graph(nodes=[seq_insert_node2],
                                       name="else_graph",
                                       inputs=[s_in, c_in],
                                       outputs=[sc_out])
        if_node = helper.make_node('If', ['cond'], ['S_final'],
                                   then_branch=then_graph,
                                   else_branch=else_graph)

        graph_def = helper.make_graph(nodes=[seq_construct_node, if_node],
                                      name='test_if',
                                      inputs=[a_in, b_in, c_in, cond_in],
                                      outputs=[s_final_out])
        tf_rep = prepare(helper.make_model(graph_def))
        output = tf_rep.run({
            'a': a,
            'b': b,
            'c': c,
            'cond': np.array(True, dtype=np.bool)
        })
        np.testing.assert_almost_equal(output['S_final'][0], a)
        np.testing.assert_almost_equal(output['S_final'][1], b)
        output = tf_rep.run({
            'a': a,
            'b': b,
            'c': c,
            'cond': np.array(False, dtype=np.bool)
        })
        np.testing.assert_almost_equal(output['S_final'][0], a)
        np.testing.assert_almost_equal(output['S_final'][1], c)
Esempio n. 26
0
# https://github.com/onnx/onnx/issues/349
backend_test.exclude(r'[a-z,_]*GLU[a-z,_]*')

# TF does not support dialation and strides at the same time:
# Will produce strides > 1 not supported in conjunction with dilation_rate > 1
backend_test.exclude(r'[a-z,_]*dilated_strided[a-z,_]*')
backend_test.exclude(r'[a-z,_]*Conv2d_dilated[a-z,_]*')

# TF does not have column major max_pool_with_argmax
backend_test.exclude(
    r'[a-z,_]*maxpool_with_argmax_2d_precomputed_strides[a-z,_]*')

# PRelu OnnxBackendPyTorchConvertedModelTest has wrong dim for broadcasting
backend_test.exclude(r'[a-z,_]*PReLU_[0-9]d_multiparam[a-z,_]*')

if legacy_opset_pre_ver(7):
    backend_test.exclude(r'[a-z,_]*Upsample[a-z,_]*')

if 'TRAVIS' in os.environ:
    backend_test.exclude('test_vgg19')
    backend_test.exclude('zfnet512')

if legacy_onnx_pre_ver(1, 2):
    # These following tests fails by a tiny margin with onnx<1.2:
    backend_test.exclude('test_operator_add_broadcast_cpu')
    backend_test.exclude('test_operator_add_size1_broadcast_cpu')
    backend_test.exclude('test_operator_add_size1_right_broadcast_cpu')
    backend_test.exclude('test_operator_add_size1_singleton_broadcast_cpu')
    backend_test.exclude('test_averagepool_3d_default_cpu')
    # Do not support consumed flag:
    backend_test.exclude('test_batch_normalization')
Esempio n. 27
0
    def test_slice(self):
        # test case 1 with normal inputs
        axes = [0, 1, 2]
        starts = [0, 0, 0]
        ends = [2, 2, 2]

        if legacy_opset_pre_ver(10):
            node_def = helper.make_node("Slice", ["X"], ["S"],
                                        axes=axes,
                                        starts=starts,
                                        ends=ends)
            graph_def = helper.make_graph(
                [node_def],
                name="test_unknown_shape",
                inputs=[
                    helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                                  [None, None, None])
                ],
                outputs=[
                    helper.make_tensor_value_info("S", TensorProto.FLOAT,
                                                  [None, None, None])
                ])
        else:
            node_def = helper.make_node("Slice",
                                        ["X", "starts", "ends", "axes"], ["S"])
            graph_def = helper.make_graph(
                [node_def],
                name="test_unknown_shape",
                inputs=[
                    helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                                  [None, None, None]),
                    helper.make_tensor_value_info("starts", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("ends", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("axes", TensorProto.INT32,
                                                  [None]),
                ],
                outputs=[
                    helper.make_tensor_value_info("S", TensorProto.FLOAT,
                                                  [None, None, None])
                ])
        tf_rep = onnx_graph_to_tensorflow_rep(graph_def)

        if legacy_opset_pre_ver(10):
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            output = tf_rep.run({"X": x})
            np.testing.assert_almost_equal(output["S"], x[0:2, 0:2, 0:2])
        else:
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            output = tf_rep.run({
                "X": x,
                "starts": starts,
                "ends": ends,
                "axes": axes
            })
            np.testing.assert_almost_equal(output["S"], x[0:2, 0:2, 0:2])

        # test case 2 with negative, out-of-bound and default inputs
        axes = [0, 2]
        starts = [0, -7]
        ends = [-8, 20]
        steps = [1, 1]

        if legacy_opset_pre_ver(10):
            node_def = helper.make_node("Slice", ["X"], ["S"],
                                        axes=axes,
                                        starts=starts,
                                        ends=ends)
            graph_def = helper.make_graph(
                [node_def],
                name="test_unknown_shape",
                inputs=[
                    helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                                  [None, None, None])
                ],
                outputs=[
                    helper.make_tensor_value_info("S", TensorProto.FLOAT,
                                                  [None, None, None])
                ])
        else:
            node_def = helper.make_node(
                "Slice", ["X", "starts", "ends", "axes", "steps"], ["S"])
            graph_def = helper.make_graph(
                [node_def],
                name="test_unknown_shape",
                inputs=[
                    helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                                  [None, None, None]),
                    helper.make_tensor_value_info("starts", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("ends", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("axes", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("steps", TensorProto.INT32,
                                                  [None]),
                ],
                outputs=[
                    helper.make_tensor_value_info("S", TensorProto.FLOAT,
                                                  [None, None, None])
                ])
        tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
        if legacy_opset_pre_ver(10):
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            output = tf_rep.run({"X": x})
            np.testing.assert_almost_equal(output["S"], x[0:-8, :, -7:20])
        else:
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            output = tf_rep.run({
                "X": x,
                "starts": starts,
                "ends": ends,
                "axes": axes,
                "steps": steps
            })
            np.testing.assert_almost_equal(output["S"], x[0:-8, :, -7:20])

        # test case 3 with non-default steps
        axes = [0, 1, 2]
        starts = [0, 0, 0]
        ends = [2, 2, 2]
        steps = [2, -2, -1]

        if not legacy_opset_pre_ver(10):
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            output = tf_rep.run({
                "X": x,
                "starts": starts,
                "ends": ends,
                "axes": axes,
                "steps": steps
            })
            np.testing.assert_almost_equal(output["S"], x[0:2:2, 0:2:-2,
                                                          0:2:-1])
Esempio n. 28
0
    def test_matmul_integer(self):
        if legacy_opset_pre_ver(10):
            raise unittest.SkipTest(
                "ONNX version {} doesn't support MatMulInteger.".format(
                    defs.onnx_opset_version()))

        node_def = helper.make_node("MatMulInteger",
                                    ["A", "B", "a_zero_point", "b_zero_point"],
                                    ["Z"])
        # A & B are 3-D tensor and a_zero_point & b_zero_point are scalar
        A = self._get_rnd_int(-20, 20, shape=(2, 3, 4), dtype=np.int8)
        B = self._get_rnd_int(-20, 20, shape=(2, 4, 6), dtype=np.int8)
        a_zero_point = self._get_rnd_int(-20, 20, dtype=np.int8)
        b_zero_point = self._get_rnd_int(-20, 20, dtype=np.int8)
        A_minus_zero_point = np.subtract(A.astype(np.int32),
                                         a_zero_point.astype(np.int32))
        B_minus_zero_point = np.subtract(B.astype(np.int32),
                                         b_zero_point.astype(np.int32))
        z = np.matmul(A_minus_zero_point, B_minus_zero_point)
        graph_def = helper.make_graph(
            [node_def],
            name="test_unknown_shape",
            inputs=[
                helper.make_tensor_value_info("A", TensorProto.INT8,
                                              [None, None, None]),
                helper.make_tensor_value_info("B", TensorProto.INT8,
                                              [None, None, None]),
                helper.make_tensor_value_info("a_zero_point", TensorProto.INT8,
                                              []),
                helper.make_tensor_value_info("b_zero_point", TensorProto.INT8,
                                              [])
            ],
            outputs=[
                helper.make_tensor_value_info("Z", TensorProto.INT32,
                                              [None, None, None])
            ])
        tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
        output = tf_rep.run({
            "A": A,
            "B": B,
            "a_zero_point": a_zero_point,
            "b_zero_point": b_zero_point
        })
        np.testing.assert_almost_equal(output["Z"], z)
        # A & B are 4-D tensor and a_zero_point & b_zero_point are 1-D tensor
        A = self._get_rnd_int(-20, 20, shape=(2, 5, 3, 4), dtype=np.int8)
        B = self._get_rnd_int(-20, 20, shape=(2, 1, 4, 6), dtype=np.int8)
        a_zero_point = self._get_rnd_int(-20,
                                         20,
                                         shape=(A.shape[-2]),
                                         dtype=np.int8)
        b_zero_point = self._get_rnd_int(-20,
                                         20,
                                         shape=(B.shape[-1]),
                                         dtype=np.int8)
        a_zero_point_with_reshape = np.reshape(a_zero_point, [A.shape[-2], 1])
        A_minus_zero_point = np.subtract(
            A.astype(np.int32), a_zero_point_with_reshape.astype(np.int32))
        B_minus_zero_point = np.subtract(B.astype(np.int32),
                                         b_zero_point.astype(np.int32))
        z = np.matmul(A_minus_zero_point, B_minus_zero_point)
        graph_def = helper.make_graph(
            [node_def],
            name="test_unknown_shape",
            inputs=[
                helper.make_tensor_value_info("A", TensorProto.INT8,
                                              [None, None, None, None]),
                helper.make_tensor_value_info("B", TensorProto.INT8,
                                              [None, None, None, None]),
                helper.make_tensor_value_info("a_zero_point", TensorProto.INT8,
                                              [None]),
                helper.make_tensor_value_info("b_zero_point", TensorProto.INT8,
                                              [None])
            ],
            outputs=[
                helper.make_tensor_value_info("Z", TensorProto.INT32,
                                              [None, None, None, None])
            ])
        tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
        output = tf_rep.run({
            "A": A,
            "B": B,
            "a_zero_point": a_zero_point,
            "b_zero_point": b_zero_point
        })
        np.testing.assert_almost_equal(output["Z"], z)
Esempio n. 29
0
("test_sqrt", tf.sqrt, "Sqrt", [get_rnd([10, 10])], {}),
("test_squeeze", tf.squeeze, "Squeeze", [get_rnd([1, 1, 10, 10])], {"axis":[0, 1]}),
("test_subtract", tf.subtract, "Sub", [get_rnd([10, 10]), get_rnd([10, 10])], {}),
("test_tanh", tf.tanh, "Tanh", [get_rnd([10, 10])], {}),
("test_top_k", tf.nn.top_k, "TopKV2", [get_rnd([10, 10, 10, 10])], {"k": 3}),
# Use reverse to test ignore_unimplemented
("test_unimplemented", tf.reverse, "ReverseV2", [get_rnd([1, 2, 3, 4]), [3]], {}, {"ignore_unimplemented": True}),
("test_unpack", tf.unstack, "unstack", [get_rnd([2, 3, 4])], {}),
("test_xor", tf.logical_xor, "LogicalXor", [get_rnd([10, 10], dtype=np.bool_), get_rnd([10, 10], dtype=np.bool_)], {}),
("test_transpose", tf.transpose, "transpose", [get_rnd([2, 10])], {"perm":[1, 0]}),
("test_concat", tf.concat, "concat", [[get_rnd([1, 10]),get_rnd([10, 10]),get_rnd([20, 10])], 0], {}),
("test_bias_add_nchw", tf.nn.bias_add, "BiasAdd", [get_rnd([10, 32, 10, 10]),get_rnd([32])], {"data_format":"NCHW"}),
("test_bias_add_nhwc", tf.nn.bias_add, "BiasAdd", [get_rnd([10, 10, 10, 32]),get_rnd([32])], {"data_format":"NHWC"}),
]

if not legacy_opset_pre_ver(6):
    test_cases.append(("test_tile", tf.tile, "Tile", [get_rnd([1, 2, 3, 4]), np.random.randint(1, 10, (4,), dtype=np.int32)], {}))

if not legacy_opset_pre_ver(9):
    test_cases.append(("test_strided_slice", tf.strided_slice, "StridedSlice", [get_rnd([5, 5]), [0, 0], [1, 5], [1, 1]], {}))
    test_cases.append(("test_strided_slice_shrink", tf.strided_slice, "StridedSlice", [get_rnd([5, 5]), [0, 0], [1, 3], [1, 1]], {"shrink_axis_mask":1}))

# yapf: enable

for k, val in enumerate(test_cases):
    test_method = create_test(val)
    test_method.__name__ = str(val[0])
    setattr(TestNode, test_method.__name__, test_method)

if __name__ == '__main__':
    unittest.main()
    def test_slice(self):
        # test case 1 with normal inputs
        axes = [0, 1, 2]
        starts = [0, 0, 0]
        ends = [2, 2, 2]

        if legacy_opset_pre_ver(10):
            node_def = helper.make_node("Slice", ["X"], ["S"],
                                        axes=axes,
                                        starts=starts,
                                        ends=ends)
            graph_def = helper.make_graph(
                [node_def],
                name="test_unknown_shape",
                inputs=[
                    helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                                  [None, None, None])
                ],
                outputs=[
                    helper.make_tensor_value_info("S", TensorProto.FLOAT,
                                                  [None, None, None])
                ])
        else:
            node_def = helper.make_node("Slice",
                                        ["X", "starts", "ends", "axes"], ["S"])
            graph_def = helper.make_graph(
                [node_def],
                name="test_unknown_shape",
                inputs=[
                    helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                                  [None, None, None]),
                    helper.make_tensor_value_info("starts", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("ends", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("axes", TensorProto.INT32,
                                                  [None]),
                ],
                outputs=[
                    helper.make_tensor_value_info("S", TensorProto.FLOAT,
                                                  [None, None, None])
                ])
        tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
        # export to tf.saved_model
        model_path = 'test_dynamic_shape/slice'
        tf_rep.export_graph(model_path)
        # load the saved_model back
        tf_model = tf.saved_model.load(model_path)

        if legacy_opset_pre_ver(10):
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            tf_model_output = tf_model(X=x)
            np.testing.assert_almost_equal(tf_model_output[0], x[0:2, 0:2,
                                                                 0:2])
        else:
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            tf_model_output = tf_model(X=x,
                                       starts=starts,
                                       ends=ends,
                                       axes=axes)
            np.testing.assert_almost_equal(tf_model_output[0], x[0:2, 0:2,
                                                                 0:2])

        # test case 2 with negative, out-of-bound and default inputs
        axes = [0, 2]
        starts = [0, -7]
        ends = [-8, 20]
        steps = [1, 1]

        if legacy_opset_pre_ver(10):
            node_def = helper.make_node("Slice", ["X"], ["S"],
                                        axes=axes,
                                        starts=starts,
                                        ends=ends)
            graph_def = helper.make_graph(
                [node_def],
                name="test_unknown_shape",
                inputs=[
                    helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                                  [None, None, None])
                ],
                outputs=[
                    helper.make_tensor_value_info("S", TensorProto.FLOAT,
                                                  [None, None, None])
                ])
        else:
            node_def = helper.make_node(
                "Slice", ["X", "starts", "ends", "axes", "steps"], ["S"])
            graph_def = helper.make_graph(
                [node_def],
                name="test_unknown_shape",
                inputs=[
                    helper.make_tensor_value_info("X", TensorProto.FLOAT,
                                                  [None, None, None]),
                    helper.make_tensor_value_info("starts", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("ends", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("axes", TensorProto.INT32,
                                                  [None]),
                    helper.make_tensor_value_info("steps", TensorProto.INT32,
                                                  [None]),
                ],
                outputs=[
                    helper.make_tensor_value_info("S", TensorProto.FLOAT,
                                                  [None, None, None])
                ])
        tf_rep = onnx_graph_to_tensorflow_rep(graph_def)
        # export to tf.saved_model
        model_path = 'test_dynamic_shape/slice'
        tf_rep.export_graph(model_path)
        # load the saved_model back
        tf_model = tf.saved_model.load(model_path)

        if legacy_opset_pre_ver(10):
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            tf_model_output = tf_model(X=x)
            np.testing.assert_almost_equal(tf_model_output[0], x[0:-8, :,
                                                                 -7:20])
        else:
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            tf_model_output = tf_model(X=x,
                                       starts=starts,
                                       ends=ends,
                                       axes=axes,
                                       steps=steps)
            np.testing.assert_almost_equal(tf_model_output[0], x[0:-8, :,
                                                                 -7:20])

        # test case 3 with non-default steps
        axes = [0, 1, 2]
        starts = [0, 0, 0]
        ends = [2, 2, 2]
        steps = [2, -2, -1]

        if not legacy_opset_pre_ver(10):
            x = self._get_rnd_float32(shape=[1000]).reshape([10, 10, 10])
            tf_model_output = tf_model(X=x,
                                       starts=starts,
                                       ends=ends,
                                       axes=axes,
                                       steps=steps)
            np.testing.assert_almost_equal(tf_model_output[0], x[0:2:2, 0:2:-2,
                                                                 0:2:-1])