Ejemplo n.º 1
0
 def test_cast(self):
     if legacy_onnx_pre_ver(1, 2) or legacy_opset_pre_ver(6):
         test_cases = [("FLOAT", tf.float32), ("UINT8", tf.uint8),
                       ("INT8", tf.int8), ("UINT16", tf.uint16),
                       ("INT16", tf.int16), ("INT32", tf.int32),
                       ("INT64", tf.int64), ("BOOL", tf.bool),
                       ("FLOAT16", tf.float16), ("DOUBLE", tf.float64),
                       ("COMPLEX64", tf.complex64),
                       ("COMPLEX128", tf.complex128)]
     else:
         test_cases = [(TensorProto.FLOAT, tf.float32),
                       (TensorProto.UINT8, tf.uint8),
                       (TensorProto.INT8, tf.int8),
                       (TensorProto.UINT16, tf.uint16),
                       (TensorProto.INT16, tf.int16),
                       (TensorProto.INT32, tf.int32),
                       (TensorProto.INT64, tf.int64),
                       (TensorProto.BOOL, tf.bool),
                       (TensorProto.FLOAT16, tf.float16),
                       (TensorProto.DOUBLE, tf.float64),
                       (TensorProto.COMPLEX64, tf.complex64),
                       (TensorProto.COMPLEX128, tf.complex128)]
     for ty, tf_type in test_cases:
         node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
         vector = [2, 3]
         output = run_node(node_def, [vector])
         np.testing.assert_equal(output["output"].dtype, tf_type)
Ejemplo n.º 2
0
 def test_tile(self):
   if legacy_onnx_pre_ver(1, 2):
     raise unittest.SkipTest(
         "The current version of ONNX does not record correctly the opset of Tile."
     )
   node_def = helper.make_node("Tile", ["X1", "X2"], ["Z"])
   x = self._get_rnd([3, 5, 5, 3])
   repeats = [1, 1, 2, 1]
   output = run_node(node_def, [x, repeats])
   np.testing.assert_allclose(output["Z"], np.tile(x, repeats), rtol=1e-3)
Ejemplo n.º 3
0
 def test_convert_to_tf(self):
     if legacy_onnx_pre_ver(1, 2, 1):
         raise unittest.SkipTest(
             "The current version of ONNX uses dead model link.")
     for model_name, url in _ONNX_MODELS:
         model_dir = self.prepare_model(model_name, url)
         subprocess.check_call([
             "onnx-tf",
             "convert",
             "-i",
             os.path.join(model_dir, '{}.onnx'.format(model_name)),
             "-o",
             os.path.join(model_dir, '{}.pb'.format(model_name)),
         ])
Ejemplo n.º 4
0
 def prepare_model(model_name, url):
     if legacy_onnx_pre_ver(1, 5, 0):
         prepare_model_data = Runner._prepare_model_data
     else:
         prepare_model_data = Runner.prepare_model_data
     if IS_PYTHON3:
         params = list(
             inspect.signature(prepare_model_data).parameters.keys())
     else:
         params = inspect.getargspec(prepare_model_data).args
     runner_class = Runner
     if params[0] == "self":
         runner_class = Runner(TensorflowBackend)
         if legacy_onnx_pre_ver(1, 5, 0):
             prepare_model_data = runner_class._prepare_model_data
         else:
             prepare_model_data = runner_class.prepare_model_data
     if legacy_onnx_pre_ver(1, 4, 0):
         tc = TestCase(name="test_{}".format(model_name),
                       model_name=model_name,
                       url=url,
                       model_dir=None,
                       model=None,
                       data_sets=None,
                       kind='real')
     else:
         tc = TestCase(name="test_{}".format(model_name),
                       model_name=model_name,
                       url=url,
                       model_dir=None,
                       model=None,
                       data_sets=None,
                       kind='real',
                       rtol=1e-3,
                       atol=1e-7)
     return prepare_model_data(model_test=tc)
Ejemplo n.º 5
0
    def test_initializer(self):
        if legacy_onnx_pre_ver(1, 2):
            raise unittest.SkipTest(
                "The current version of ONNX does not record correctly the opset of Cast."
            )
        X = np.array([[1, 2], [3, 4]]).astype(np.float32)
        Y = np.array([[1, 2], [3, 4]]).astype(np.float32)
        weight = np.array([[1, 0], [0, 1]])
        graph_def = helper.make_graph(
            [
                helper.make_node("Add", ["X", "Y"], ["Z0"]),
                helper.make_node("Cast", ["Z0"], ["Z"], to=TensorProto.FLOAT),
                helper.make_node("Mul", ["Z", "weight"], ["W"]),
                helper.make_node("Tanh", ["W"], ["W1"]),
                helper.make_node("Sigmoid", ["W1"], ["W2"])
            ],
            name="test_initializer",
            inputs=[
                helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 2)),
                helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 2)),
                helper.make_tensor_value_info("weight", TensorProto.FLOAT,
                                              (2, 2)),
            ],
            outputs=[
                helper.make_tensor_value_info("W2", TensorProto.FLOAT, (2, 2))
            ],
            initializer=[
                helper.make_tensor("weight", TensorProto.FLOAT, [2, 2],
                                   weight.flatten().astype(float))
            ])

        def sigmoid(x):
            return 1 / (1 + np.exp(-x))

        W_ref = sigmoid(np.tanh((X + Y) * weight))
        tf_rep = prepare(helper.make_model(graph_def))
        output = tf_rep.run({"X": X, "Y": Y})
        np.testing.assert_almost_equal(output["W2"], W_ref)
# TF session run does not support sequence/RaggedTensor as model inputs
backend_test.exclude(r'test_loop13_seq[a-z,_]*')

# TF minimum/maximum do not support uint64 when auto-cast is False (default)
backend_test.exclude(r'test_min_uint64_[a-z,_]*')
backend_test.exclude(r'test_max_uint64_[a-z,_]*')

if legacy_opset_pre_ver(7):
    backend_test.exclude(r'[a-z,_]*Upsample[a-z,_]*')

if 'TRAVIS' in os.environ:
    backend_test.exclude('test_vgg19')
    backend_test.exclude('zfnet512')

if legacy_onnx_pre_ver(1, 2):
    # These following tests fails by a tiny margin with onnx<1.2:
    backend_test.exclude('test_operator_add_broadcast_cpu')
    backend_test.exclude('test_operator_add_size1_broadcast_cpu')
    backend_test.exclude('test_operator_add_size1_right_broadcast_cpu')
    backend_test.exclude('test_operator_add_size1_singleton_broadcast_cpu')
    backend_test.exclude('test_averagepool_3d_default_cpu')
    # Do not support consumed flag:
    backend_test.exclude('test_batch_normalization')
    # Do not support RNN testing on onnx<1.2 due to incorrect tests:
    backend_test.exclude(r'test_operator_rnn_cpu')
    backend_test.exclude(r'test_operator_lstm_cpu')
    backend_test.exclude(r'test_operator_rnn_single_layer_cpu')

# The onnx test for cast, float to string, does not work
if not legacy_opset_pre_ver(9):