def test_compile_with_explicit_signature(self):
        with tf.Graph().as_default(), tf.compat.v1.Session().as_default(
        ) as session:
            input_x = tf.compat.v1.placeholder(dtype=tf.float32,
                                               shape=[3, 4],
                                               name='x')
            input_y = tf.compat.v1.placeholder(dtype=tf.float32,
                                               shape=[3, 4],
                                               name='y')
            input_z = tf.add(input_x, input_y, name='z')

        compiled = compiler.compile_source(source=TensorFlowModel(
            inputs=[Input(tensor=input_x),
                    Input(tensor=input_y)],
            outputs=[input_z],
            session=session),
                                           config=Config(
                                               input_signature=['foo', 'bar'],
                                               output_signature=['baz']))

        self.assertEqual(len(compiled.inputs), 2)
        self.assertEqual(compiled.inputs[0].name, 'foo')
        self.assertEqual(compiled.inputs[0].tensor, input_x)
        self.assertIsNone(compiled.inputs[0].data_format)
        self.assertEqual(compiled.inputs[1].name, 'bar')
        self.assertEqual(compiled.inputs[1].tensor, input_y)
        self.assertIsNone(compiled.inputs[1].data_format)

        self.assertEqual(len(compiled.outputs), 1)
        self.assertEqual(compiled.outputs[0].name, 'baz')
        self.assertIs(compiled.outputs[0].tensor, input_z)

        self.assertIs(compiled.session, session)
def _make_frozen_graph_model(func):
    with eager_context.graph_mode(), tf.Graph().as_default(), tf.compat.v1.Session().as_default() as session:
        input_x = tf.compat.v1.placeholder(dtype=tf.float32, shape=[3, 4], name='x')
        input_y = tf.compat.v1.placeholder(dtype=tf.float32, shape=[3, 4], name='y')
        output_z = func(input_x, input_y, session)

    return frozen_graph_compiler.compile_source(
        source=TensorFlowModel(inputs=[Input(tensor=input_x), Input(tensor=input_y)],
                               outputs=[output_z],
                               session=session)
    )
def _make_onnx_model(func, batch_size_1, batch_size_2):
    with tf.Graph().as_default(), tf.compat.v1.Session().as_default() as session:
        input_x = tf.compat.v1.placeholder(dtype=tf.float32, shape=[batch_size_1, 4], name='x')
        input_y = tf.compat.v1.placeholder(dtype=tf.float32, shape=[batch_size_2, 4], name='y')
        output_z = func(input_x, input_y, session)

    frozen_graph_model = frozen_graph_compiler.compile_source(
        source=TensorFlowModel(inputs=[TfInput(tensor=input_x), TfInput(tensor=input_y)],
                               outputs=[output_z],
                               session=session)
    )

    return onnx_compiler.compile_source(frozen_graph_model)
def _make_tensorrt_model() -> TensorRTModel:
    with tf.Graph().as_default(), tf.compat.v1.Session().as_default() as session:
        input_x = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 4], name='x')
        input_y = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 4], name='y')
        weight = tf.Variable(initial_value=[2.0, 3.0, 4.0, 5.0], dtype=tf.float32)
        output_z = tf.add(input_x + input_y, weight, name='z')

        session.run(weight.initializer)

    compiler, config_type = compiler_repository.REPOSITORY.get(TensorFlowModel, TensorRTModel)

    return compiler(source=TensorFlowModel(inputs=[TfInput(tensor=input_x), TfInput(tensor=input_y)],
                                           outputs=[output_z],
                                           session=session),
                    config=config_type.from_json({'max_batch_size': 4}))
def _make_onnx_model():
    with tf.Graph().as_default(), tf.compat.v1.Session().as_default(
    ) as session:
        input_x = tf.compat.v1.placeholder(dtype=tf.float32,
                                           shape=[3, 4],
                                           name='x')
        input_y = tf.compat.v1.placeholder(dtype=tf.float32,
                                           shape=[3, 4],
                                           name='y')
        weight = tf.Variable(initial_value=4.2, dtype=tf.float32)
        output_z = tf.multiply(input_x + input_y, weight, name='z')

        session.run(weight.initializer)

    frozen_graph_model = tf_model_compiler.compile_source(
        source=TensorFlowModel(
            inputs=[Input(
                tensor=input_x), Input(tensor=input_y)],
            outputs=[output_z],
            session=session))

    return frozen_graph_compiler.compile_source(frozen_graph_model)
Exemple #6
0
    def test_compile_simple(self):
        with eager_context.graph_mode(), tf.Graph().as_default(), tf.compat.v1.Session().as_default() as session:
            input_x = tf.compat.v1.placeholder(dtype=tf.float32, shape=[3, 4], name='x')
            input_y = tf.compat.v1.placeholder(dtype=tf.float32, shape=[3, 4], name='y')
            output_z = tf.add(input_x, input_y, name='z')

        compiled = compiler.compile_source(source=TensorFlowModel(inputs=[Input(tensor=input_x), Input(tensor=input_y)],
                                                                  outputs=[output_z],
                                                                  session=session),
                                           config=Config())

        self.assertEqual(len(compiled.inputs), 2)
        self.assertEqual(compiled.inputs[0].name, 'x:0')
        self.assertEqual(compiled.inputs[0].tensor, input_x)
        self.assertIsNone(compiled.inputs[0].data_format)
        self.assertEqual(compiled.inputs[1].name, 'y:0')
        self.assertEqual(compiled.inputs[1].tensor, input_y)
        self.assertIsNone(compiled.inputs[1].data_format)

        self.assertEqual(len(compiled.outputs), 1)
        self.assertEqual(compiled.outputs[0].name, 'z:0')
        self.assertIs(compiled.outputs[0].tensor, output_z)

        self.assertIs(compiled.session, session)