Beispiel #1
0
    def create_model():
        class Model(tf.Module):
            @tf.function
            def tf_function(self, x):
                for _ in range(2):
                    x = tf.nn.max_pool2d(x, (1, 1), (1, 1), "SAME", "NHWC")
                return x

        model = Model()
        concrete_func = model.tf_function.get_concrete_function(
            tf.TensorSpec(ifm_shape, dtype=tf.float32))

        # Convert the model
        def representative_dataset():
            for _ in range(100):
                data = np.random.rand(*tuple(ifm_shape))
                yield [data.astype(np.float32)]

        converter = tf.lite.TFLiteConverter.from_concrete_functions(
            [concrete_func])
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.representative_dataset = representative_dataset
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.TFLITE_BUILTINS_INT8
        ]
        converter.inference_input_type = tf.int8
        converter.inference_output_type = tf.int8
        return converter.convert()
Beispiel #2
0
    def create_tflite_graph():
        class Model(tf.Module):
            @tf.function
            def tf_function(self, x):
                if pooling_type == "MAX":
                    op = tf.nn.max_pool(x, pool_shape, strides, padding)
                elif pooling_type == "AVG":
                    op = tf.nn.avg_pool(x, pool_shape, strides, padding)
                if activation_function == "RELU":
                    op = tf.nn.relu(op)
                return op

        model = Model()
        concrete_func = model.tf_function.get_concrete_function(
            tf.TensorSpec(ifm_shape, dtype=tf.float32)
        )

        # Convert the model
        def representative_dataset():
            for _ in range(100):
                data = np.random.rand(*tuple(ifm_shape))
                yield [data.astype(np.float32)]

        converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.representative_dataset = representative_dataset
        converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
        converter.inference_input_type = tf.int8
        converter.inference_output_type = tf.int8
        tflite_model = converter.convert()
        return tflite_model
Beispiel #3
0
    def create_tflite_graph():
        tf.config.run_functions_eagerly(True)

        class Model(tf.Module):
            @tf.function
            def tf_function(self, x):
                return tf.nn.max_pool(x, [1, 2], [1, 2], "SAME")

        def representative_dataset():
            for _ in range(100):
                data = np.random.rand(*tuple([1, 3, 4, 3]))
                yield [data.astype(np.float32)]

        model = Model()
        concrete_func = model.tf_function.get_concrete_function(
            tf.TensorSpec([1, 3, 4, 3], dtype=tf.float32)
        )

        converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.representative_dataset = representative_dataset
        converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
        converter.inference_input_type = tf.int8
        converter.inference_output_type = tf.int8
        tflite_model = converter.convert()
        return tflite_model
Beispiel #4
0
    def create_tflite_graph():
        class Model(tf.Module):
            @tf.function
            def depthwise_conv2d(self, x):
                weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 1]
                weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
                # The input strides to the TensorFlow API needs to be of shape 1x4
                tf_strides = [1, strides[0], strides[1], 1]
                op = tf.nn.depthwise_conv2d(
                    x, weight, strides=tf_strides, padding=padding, dilations=dilation
                )
                if activation:
                    op = tf.nn.relu(op)
                return op

        model = Model()
        concrete_func = model.depthwise_conv2d.get_concrete_function(
            tf.TensorSpec(ifm_shape, dtype=tf.float32)
        )

        # Convert the model
        def representative_dataset():
            for _ in range(100):
                data = np.random.rand(*tuple(ifm_shape))
                yield [data.astype(np.float32)]

        converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.representative_dataset = representative_dataset
        converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
        converter.inference_input_type = tf.int8
        converter.inference_output_type = tf.int8
        tflite_model = converter.convert()
        return tflite_model
Beispiel #5
0
    def create_mod_from_tflite():
        class Model(tf.Module):
            @tf.function
            def tf_function(self, x):
                op = tf.math.reduce_mean(x, axis=axis, keepdims=keep_dims)
                return op

        model = Model()
        concrete_func = model.tf_function.get_concrete_function(
            tf.TensorSpec(ifm_shape, dtype=tf.float32)
        )

        # Convert the model
        def representative_dataset():
            for _ in range(100):
                data = np.random.rand(*tuple(ifm_shape))
                yield [data.astype(np.float32)]

        converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.representative_dataset = representative_dataset
        converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
        converter.inference_input_type = tf.int8
        converter.inference_output_type = tf.int8
        tflite_graph = converter.convert()
        tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)

        mod, _ = relay.frontend.from_tflite(
            tflite_model,
            shape_dict={"ifm": ifm_shape},
            dtype_dict={"ifm": dtype},
        )
        input_data, output_data = infra.generate_ref_data_tflite(tflite_graph)
        return mod, input_data, output_data
Beispiel #6
0
    def create_tflite_graph():
        class Model(tf.Module):
            @tf.function
            def abs_func(self, x):
                if operator_type == "ABS":
                    op = tf.math.abs(x)
                return op

        model = Model()

        # Save the model
        concrete_func = model.abs_func.get_concrete_function(
            tf.TensorSpec(ifm_shape, dtype=tf.float32))

        # Convert the model
        def representative_dataset():
            for _ in range(100):
                data = np.random.rand(*tuple(ifm_shape))
                yield [data.astype(np.float32)]

        converter = tf.lite.TFLiteConverter.from_concrete_functions(
            [concrete_func])
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.representative_dataset = representative_dataset
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.TFLITE_BUILTINS_INT8
        ]
        converter.inference_input_type = tf.int8
        converter.inference_output_type = tf.int8
        tflite_model = converter.convert()
        return tflite_model
Beispiel #7
0
    def create_tflite_graph_two_outs():
        """Create a model with 2 output tensors"""
        class Model(tf.Module):
            """Simple TFLite test model"""
            @tf.function
            def tf_function(self, tf_input_x):
                """Single TFLite function with two convolutions"""
                tf_strides = [1, strides[0], strides[1], 1]
                filter_shape = [kernel_shape[0], kernel_shape[1], 3, 3]
                filter1 = tf.constant(
                    np.arange(np.prod(filter_shape)).reshape(filter_shape),
                    dtype=tf.float32,
                )
                first_conv2d = tf.nn.conv2d(
                    tf_input_x,
                    filters=filter1,
                    strides=tf_strides,
                    padding=padding,
                    dilations=dilation,
                )
                first_conv2d = tf.nn.relu(first_conv2d)

                filter2 = tf.constant(
                    1000 +
                    np.arange(np.prod(filter_shape)).reshape(filter_shape),
                    dtype=tf.float32,
                )
                second_conv2d = tf.nn.conv2d(
                    tf_input_x,
                    filters=filter2,
                    strides=strides,
                    padding=padding,
                    data_format="NHWC",
                    dilations=dilation,
                )
                second_conv2d = tf.nn.relu(second_conv2d)
                return first_conv2d, second_conv2d

        model = Model()
        concrete_func = model.tf_function.get_concrete_function(
            tf.TensorSpec(ifm_shape, dtype=tf.float32))

        # Convert the model
        def representative_dataset():
            for _ in range(100):
                data = np.random.rand(*tuple(ifm_shape))
                yield [data.astype(np.float32)]

        converter = tf.lite.TFLiteConverter.from_concrete_functions(
            [concrete_func])
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.representative_dataset = representative_dataset
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.TFLITE_BUILTINS_INT8
        ]
        converter.inference_input_type = tf.int8
        converter.inference_output_type = tf.int8
        tflite_model = converter.convert()
        return tflite_model
Beispiel #8
0
    def create_tflite_graph():
        class Model(tf.Module):
            @tf.function
            def tf_func(self, x):
                weight_shape = (3, 3, ifm_shape[3], 4)
                weight = tf.constant(np.random.uniform(low=0,
                                                       high=0.3,
                                                       size=weight_shape),
                                     dtype=tf.float32)
                # The input strides to the TensorFlow API needs to be of shape 1x4
                op = tf.nn.conv2d(x,
                                  weight,
                                  strides=(1, 2, 2, 1),
                                  padding="SAME",
                                  dilations=(1, 1))
                op = tf.nn.tanh(op)
                op = tf.nn.tanh(op)

                weight_shape2 = (2, 3, 4, 1)
                weight2 = tf.constant(np.random.uniform(low=0,
                                                        high=0.3,
                                                        size=weight_shape2),
                                      dtype=tf.float32)
                op = tf.nn.depthwise_conv2d(op,
                                            weight2,
                                            strides=(1, 1, 1, 1),
                                            padding="VALID",
                                            dilations=(2, 2))
                op = tf.nn.sigmoid(op)
                op = tf.nn.max_pool(op, (1, 1),
                                    strides=(1, 1, 1, 1),
                                    padding="SAME")
                op = tf.nn.tanh(op)
                return op

        model = Model()
        concrete_func = model.tf_func.get_concrete_function(
            tf.TensorSpec(ifm_shape, dtype=tf.float32))

        # Convert the model
        def representative_dataset():
            for _ in range(100):
                data = 0.7 * np.random.rand(*tuple(ifm_shape))
                yield [data.astype(np.float32)]

        converter = tf.lite.TFLiteConverter.from_concrete_functions(
            [concrete_func])
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.representative_dataset = representative_dataset
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.TFLITE_BUILTINS_INT8
        ]
        converter.inference_input_type = tf.int8
        converter.inference_output_type = tf.int8
        tflite_model = converter.convert()
        return tflite_model
Beispiel #9
0
    def create_tflite_graph_two_outs():
        """Create a model with 2 output tensors"""
        class Model(tf.Module):
            @tf.function
            def tf_function(self, x):
                # Use tf.nn API to create the model
                tf_strides = [1, strides[0], strides[1], 1]
                op = tf.nn.conv2d(
                    x,
                    filters=tf.constant(
                        np.random.uniform(
                            size=[kernel_shape[0], kernel_shape[1], 3, 3]),
                        dtype=tf.float32,
                    ),
                    strides=tf_strides,
                    padding=padding,
                    dilations=dilation,
                )
                op = tf.nn.relu(op)
                # Second convolution
                op2 = tf.nn.conv2d(
                    x,
                    filters=tf.constant(
                        np.random.uniform(size=(kernel_shape[0],
                                                kernel_shape[1], 3, 3)),
                        dtype=tf.float32,
                    ),
                    strides=strides,
                    padding=padding,
                    data_format="NHWC",
                    dilations=dilation,
                )
                op2 = tf.nn.relu(op2)
                return op, op2

        model = Model()
        concrete_func = model.tf_function.get_concrete_function(
            tf.TensorSpec(ifm_shape, dtype=tf.float32))

        # Convert the model
        def representative_dataset():
            for _ in range(100):
                data = np.random.rand(*tuple(ifm_shape))
                yield [data.astype(np.float32)]

        converter = tf.lite.TFLiteConverter.from_concrete_functions(
            [concrete_func])
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.representative_dataset = representative_dataset
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.TFLITE_BUILTINS_INT8
        ]
        converter.inference_input_type = tf.int8
        converter.inference_output_type = tf.int8
        tflite_model = converter.convert()
        return tflite_model
Beispiel #10
0
def create_conv2d_tflite_model(ifm_shape, kernel_shape, strides, dilation,
                               padding, activation):
    """ This method prepares TFlite graph with a single Conv2d layer """
    import tensorflow as tf

    class Model(tf.Module):
        @tf.function
        def tf_function(self, x):
            # Use tf.nn API to create the model
            tf_strides = [1, strides[0], strides[1], 1]
            op = tf.nn.conv2d(
                x,
                filters=tf.constant(
                    np.random.uniform(
                        size=[kernel_shape[0], kernel_shape[1], 3, 3]),
                    dtype=tf.float32,
                ),
                strides=tf_strides,
                padding=padding,
                dilations=dilation,
            )
            if activation:
                op = tf.nn.relu(op)
            return op

    model = Model()
    concrete_func = model.tf_function.get_concrete_function(
        tf.TensorSpec(ifm_shape, dtype=tf.float32))

    def representative_dataset():
        for _ in range(100):
            data = np.random.rand(*tuple(ifm_shape))
            yield [data.astype(np.float32)]

    converter = tf.lite.TFLiteConverter.from_concrete_functions(
        [concrete_func])
    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    converter.representative_dataset = representative_dataset
    converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
    converter.inference_input_type = tf.int8
    converter.inference_output_type = tf.int8
    tflite_model = converter.convert()
    return tflite_model
Beispiel #11
0
    def create_tflite_graph():
        class Model(tf.Module):
            @tf.function
            def tf_function(self, lhs, rhs):
                if operator_type == "ADD":
                    op = tf.math.add(lhs, rhs)
                elif operator_type == "SUB":
                    op = tf.math.subtract(lhs, rhs)
                elif operator_type == "MUL":
                    op = tf.math.multiply(lhs, rhs)
                elif operator_type == "MIN":
                    op = tf.math.minimum(lhs, rhs)
                elif operator_type == "MAX":
                    op = tf.math.maximum(lhs, rhs)
                if activation_function == "RELU":
                    op = tf.nn.relu(op)
                return op

        model = Model()
        concrete_func = model.tf_function.get_concrete_function(
            tf.TensorSpec(ifm_shape, dtype=tf.float32),
            tf.TensorSpec(ifm2_shape, dtype=tf.float32))

        # Convert the model
        def representative_dataset():
            for _ in range(100):
                data = np.random.rand(*tuple(ifm_shape))
                data2 = np.random.rand(*tuple(ifm2_shape)) * 2
                yield [data.astype(np.float32), data2.astype(np.float32)]

        converter = tf.lite.TFLiteConverter.from_concrete_functions(
            [concrete_func])
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.representative_dataset = representative_dataset
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.TFLITE_BUILTINS_INT8
        ]
        converter.inference_input_type = tf.int8
        converter.inference_output_type = tf.int8
        tflite_model = converter.convert()
        return tflite_model
    def create_tflite_graph():
        class Model(tf.Module):
            @tf.function
            def model_func(self, x):
                weight_shape = [3, 3, 6, 1]  # HWO1
                weight = tf.constant(np.random.uniform(size=weight_shape),
                                     dtype=tf.float32)
                op = tf.nn.depthwise_conv2d(x,
                                            weight,
                                            strides=[1, 1, 1, 1],
                                            padding="SAME")
                op = tf.nn.relu(op)
                op = tf.reshape(op, [1, 8, 6, 3])
                op = tf.nn.pool(op, [2, 2], "MAX")
                op = tf.strided_slice(op, [0, 2, 3, 1], [1, 6, 5, 2])
                return op

        model = Model()
        concrete_func = model.model_func.get_concrete_function(
            tf.TensorSpec(ifm_shape, dtype=tf.float32))

        # Convert the model
        def representative_dataset():
            for _ in range(100):
                data = np.random.rand(*tuple(ifm_shape))
                yield [data.astype(np.float32)]

        converter = tf.lite.TFLiteConverter.from_concrete_functions(
            [concrete_func])
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.representative_dataset = representative_dataset
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.TFLITE_BUILTINS_INT8
        ]
        converter.inference_input_type = tf.int8
        converter.inference_output_type = tf.int8
        tflite_model = converter.convert()
        return tflite_model
Beispiel #13
0
    def create_model():
        class Model(tf.Module):
            @tf.function
            def tf_function(self, x):
                for _ in range(3):
                    x = tf.nn.conv2d(
                        x,
                        filters=tf.constant(
                            np.random.uniform(size=kernel_shape),
                            dtype=tf.float32),
                        strides=(1, 1),
                        padding="SAME",
                        data_format="NHWC",
                        dilations=1,
                    )
                return x

        model = Model()
        concrete_func = model.tf_function.get_concrete_function(
            tf.TensorSpec(ifm_shape, dtype=tf.float32))

        # Convert the model
        def representative_dataset():
            for _ in range(100):
                data = np.random.rand(*tuple(ifm_shape))
                yield [data.astype(np.float32)]

        converter = tf.lite.TFLiteConverter.from_concrete_functions(
            [concrete_func])
        converter.optimizations = [tf.lite.Optimize.DEFAULT]
        converter.representative_dataset = representative_dataset
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.TFLITE_BUILTINS_INT8
        ]
        converter.inference_input_type = tf.int8
        converter.inference_output_type = tf.int8
        return converter.convert()
Beispiel #14
0
def device_api_main_func():
    # Ideally we should have a sample Target registered here
    # but we're going to re-use this for now
    pytest.importorskip("ethosu.vela")
    import tensorflow as tf
    import tflite.Model

    from tests.python.contrib.test_ethosu.infra import create_test_runner, generate_ref_data_tflite
    from tvm.relay.op.contrib.ethosu import partition_for_ethosu

    tf.config.run_functions_eagerly(True)

    class Model(tf.Module):
        @tf.function
        def tf_function(self, x):
            return tf.nn.max_pool(x, [1, 2], [1, 2], "SAME")

    def representative_dataset():
        for _ in range(100):
            data = np.random.rand(1, 3, 4, 3)
            yield [data.astype(np.float32)]

    model = Model()
    concrete_func = model.tf_function.get_concrete_function(
        tf.TensorSpec([1, 3, 4, 3], dtype=tf.float32))

    converter = tf.lite.TFLiteConverter.from_concrete_functions(
        [concrete_func])
    converter.optimizations = [tf.lite.Optimize.DEFAULT]
    converter.representative_dataset = representative_dataset
    converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
    converter.inference_input_type = tf.int8
    converter.inference_output_type = tf.int8

    tflite_graph = converter.convert()
    tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)

    relay_module, params = relay.frontend.from_tflite(
        tflite_model,
        shape_dict={"x": [1, 3, 4, 3]},
        dtype_dict={"x": "int8"},
    )
    mod = partition_for_ethosu(relay_module, params)

    # Generate reference data
    input_data, output_data = generate_ref_data_tflite(tflite_graph)

    def compile_to_main_func(interface_api="c", use_unpacked_api=True):
        test_runner = create_test_runner()
        compiled_models = compile_models(
            models=AOTTestModel(
                module=mod,
                inputs=input_data,
                outputs=output_data,
            ),
            interface_api=interface_api,
            use_unpacked_api=use_unpacked_api,
            workspace_byte_alignment=16,
            pass_config=test_runner.pass_config,
        )
        main_ir_module = compiled_models[
            0].executor_factory.lowered_ir_mods.items()[0][1]
        main_func = main_ir_module["__tvm_main__"]
        return main_func

    return compile_to_main_func