def save_mlir(self):
        manager = tf.train.CheckpointManager(self.checkpoint,
                                             directory=self.check_dir)
        self.checkpoint.restore(manager.latest_checkpoint)

        fff = tf.function(self.model).get_concrete_function(tf.TensorSpec([None, 3920]), tf.float32)
        frozen_func, graph_def = convert_variables_to_constants_v2_as_graph(fff)

        input_tensors = [
            tensor for tensor in frozen_func.inputs
            if tensor.dtype != tf.resource
        ]

        output_tensors = frozen_func.outputs
        graph_def = run_graph_optimizations(
            graph_def,
            input_tensors,
            output_tensors,
            config=get_grappler_config(['pruning', 'function', 'constfold', 'shape', 'remap',
                                        'memory', 'common_subgraph_elimination', 'arithmetic',
                                        'loop', 'dependency', 'debug_stripper']),
            graph=frozen_func.graph)

        tf_mlir_graph = tf.mlir.experimental.convert_graph_def(graph_def)
        outfile = open("./result/kws.mlir", 'wb')
        outfile.write(tf_mlir_graph.encode())
        outfile.close()
def convert_saved_model_to_graph_def(saved_model_dir_path, model_output_dir_path, signature_name):
    imported = tf.saved_model.load(saved_model_dir_path)
    f = imported.signatures[signature_name]
    frozen_func, graph_def = convert_variables_to_constants_v2_as_graph(f, lower_control_flow=False)

    input_tensors = [tensor for tensor in frozen_func.inputs if tensor.dtype != tf.resource]
    output_tensors = frozen_func.outputs

    input_tensor_names = [tensor.name for tensor in frozen_func.inputs if tensor.dtype != tf.resource]
    output_tensor_names = [output.name for output in frozen_func.outputs]

    print('input_tensor_names:', input_tensor_names)
    print('output_tensor_names:', output_tensor_names)

    graph_def = run_graph_optimizations(
        graph_def,
        input_tensors,
        output_tensors,
        config=get_grappler_config(["constfold", "function"]),
        graph=frozen_func.graph)

    tf.io.write_graph(graph_or_graph_def=graph_def,
                        logdir=model_output_dir_path,
                        name='model_v2.pb',
                        as_text=False)
    
    print(f'Output model_v2.pb to {model_output_dir_path}/model_v2.pb')
Example #3
0
def convert_keras_model(model):
    """Converts a Keras model to TFLite flatbuffer.

    Returns:
      The converted data in serialized format.
    """
    if not tf.executing_eagerly():
        raise RuntimeError(
            "Graph mode is not supported. Please enable eager execution using "
            "tf.enable_eager_execution() when using TensorFlow 1.x")
    func = concrete_function_from_keras_model(model)
    if version.parse(tf.__version__) >= version.parse("1.15"):
        frozen_func = convert_variables_to_constants_v2(
            func, lower_control_flow=False)
    else:
        frozen_func = convert_variables_to_constants_v2(func)
    input_tensors = [
        tensor for tensor in frozen_func.inputs
        if tensor.dtype != tf.dtypes.resource
    ]
    output_tensors = frozen_func.outputs

    graph_def = frozen_func.graph.as_graph_def()
    # Run a constant folding using grappler since we currently don't implement
    # folding for LCE custom ops
    graph_def = run_graph_optimizations(
        graph_def,
        input_tensors,
        output_tensors,
        config=get_grappler_config(["constfold"]),
        graph=frozen_func.graph,
    )

    # Checks dimensions in input tensor.
    for tensor in input_tensors:
        # Note that shape_list might be empty for scalar shapes.
        shape_list = tensor.shape.as_list()
        if None in shape_list[1:]:
            raise ValueError(
                "None is only supported in the 1st dimension. Tensor '{0}' has "
                "invalid shape '{1}'.".format(get_tensor_name(tensor),
                                              shape_list))
        elif shape_list and shape_list[0] is None:
            # Set the batch size to 1 if undefined.
            shape = tensor.shape.as_list()
            shape[0] = 1
            tensor.set_shape(shape)

    return convert_graphdef_to_tflite_flatbuffer(
        graph_def.SerializeToString(),
        [get_tensor_name(tensor) for tensor in input_tensors],
        [
            DataType.Name(tensor.dtype.as_datatype_enum)
            for tensor in input_tensors
        ],
        [tensor.shape.as_list() for tensor in input_tensors],
        [get_tensor_name(tensor) for tensor in output_tensors],
    )
def main(_):
    if FLAGS.quantize:
        try:
            _ = tf.contrib
        except AttributeError as e:
            msg = e.args[0]
            msg += ('\n\n The --quantize option still requires contrib, which is not '
                    'part of TensorFlow 2.0. Please install a previous version:'
                    '\n    `pip install tensorflow<=1.15`')
            e.args = (msg,)
            raise e

    # Create the model and load its weights.
    sess = tf.compat.v1.InteractiveSession()
    input_tensor, output_tensor = create_inference_graph(
        FLAGS.wanted_words, FLAGS.sample_rate, FLAGS.clip_duration_ms,
        FLAGS.clip_stride_ms, FLAGS.window_size_ms, FLAGS.window_stride_ms,
        FLAGS.feature_bin_count, FLAGS.model_architecture, FLAGS.preprocess)
    if FLAGS.quantize:
        tf.contrib.quantize.create_eval_graph()

    models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)

    # Turn all the variables into inline constants inside the graph and save it.
    frozen_graph_def = tf.compat.v1.graph_util.convert_variables_to_constants(
        sess, sess.graph_def, ['labels_softmax'])

    # Here to convert mlir
    fff = tf.function(models).get_concrete_function(tf.TensorSpec([-1, models["fingerprint_size"]], tf.float32))

    frozen_func, graph_def = convert_variables_to_constants_v2_as_graph(fff)

    input_tensors = [
        tensor for tensor in frozen_func.inputs
        if tensor.dtype != tf.resource
    ]
    output_tensors = frozen_func.outputs

    graph_def = run_graph_optimizations(
        graph_def,
        input_tensors,
        output_tensors,
        config=get_grappler_config(
            ['pruning', 'function', 'constfold', 'shape', 'remap', 'memory', 'common_subgraph_elimination',
             'arithmetic', 'loop', 'dependency', 'debug_stripper']),
        graph=frozen_func.graph)

    tf_mlir_graph = tf.mlir.experimental.convert_graph_def(graph_def)

    outfile = open('pix2pix.mlir', 'wb')
    outfile.write(tf_mlir_graph.encode())
    outfile.close()
Example #5
0
def main(_):
    """Export model to MLIR."""
    config = get_config(FLAGS.model_name, FLAGS.dataset_cfg, FLAGS.hparam_str)
    model = effnetv2_model.EffNetV2Model(FLAGS.model_name, config.model)
    # Use call (not build) to match the namescope: tensorflow issues/29576
    model(tf.ones([1, 224, 224, 3]), False)
    if FLAGS.model_dir:
        ckpt = FLAGS.model_dir
        if tf.io.gfile.isdir(ckpt):
            ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
        utils.restore_tf2_ckpt(model,
                               ckpt,
                               exclude_layers=('_head', 'optimizer'))
    model.summary()

    from tensorflow.lite.python.util import run_graph_optimizations, get_grappler_config
    from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2_as_graph

    fff = tf.function(model).get_concrete_function(
        tf.TensorSpec([1, 224, 224, 3], tf.float32))

    frozen_func, graph_def = convert_variables_to_constants_v2_as_graph(fff)

    input_tensors = [
        tensor for tensor in frozen_func.inputs if tensor.dtype != tf.resource
    ]
    output_tensors = frozen_func.outputs

    graph_def = run_graph_optimizations(graph_def,
                                        input_tensors,
                                        output_tensors,
                                        config=get_grappler_config([
                                            'pruning', 'function', 'constfold',
                                            'shape', 'remap', 'memory',
                                            'common_subgraph_elimination',
                                            'arithmetic', 'loop', 'dependency',
                                            'debug_stripper'
                                        ]),
                                        graph=frozen_func.graph)

    tf_mlir_graph = tf.mlir.experimental.convert_graph_def(graph_def)

    print('export model to {}.mlir'.format(FLAGS.model_name))
    export_dir = FLAGS.export_dir
    if export_dir is None:
        export_dir = '.'
    os.makedirs(export_dir, exist_ok=True)
    outfile = open('{}/{}.mlir'.format(export_dir, FLAGS.model_name), 'wb')
    outfile.write(tf_mlir_graph.encode())
    outfile.close()
Example #6
0
def frozen_keras_graph(func_model):
    frozen_func, graph_def = convert_variables_to_constants_v2_as_graph(
        func_model)

    input_tensors = [
        tensor for tensor in frozen_func.inputs if tensor.dtype != tf.resource
    ]
    output_tensors = frozen_func.outputs
    graph_def = run_graph_optimizations(graph_def,
                                        input_tensors,
                                        output_tensors,
                                        config=get_grappler_config(
                                            ["constfold", "function"]),
                                        graph=frozen_func.graph)

    return graph_def
Example #7
0
    def to_proto(self):
        self.assert_to_proto_paths()
        utils.dump2json(obj=vars(self.args),
                        fp=os.path.join(self.checkpoints_config,
                                        'to_proto_cmd.json'))

        model = self.create_model(trainable=False)
        self.build_model(model, input_shape=self.input_shape)
        model.load_weights(self.checkpoints_restore)
        print("[*] Successfully loaded weights!")

        # Configure model input
        input_dtype = tf.float32
        compiled_model = model.get_keras_model(
            tf.keras.Input(shape=self.input_shape[1:],
                           batch_size=1,
                           name="low"))
        compiled_model.summary()

        # Freezing model weights, obtaining graph
        infer_func = tf.function(compiled_model)
        infer_func = infer_func.get_concrete_function(
            tf.TensorSpec(shape=compiled_model.inputs[0].shape,
                          dtype=input_dtype))

        frozen_func, graph_def = convert_to_constants(infer_func)
        input_tensors = [
            tensor for tensor in frozen_func.inputs
            if tensor.dtype != tf.resource
        ]
        output_tensors = frozen_func.outputs

        graph_def = run_graph_optimizations(graph_def,
                                            input_tensors,
                                            output_tensors,
                                            config=get_grappler_config(
                                                ["constfold", "function"]),
                                            graph=frozen_func.graph)
        tf.io.write_graph(graph_or_graph_def=graph_def,
                          logdir=self.checkpoints_to_proto,
                          name="model.pb",
                          as_text=False)
    def save_frozen_graph(self, path):
        real_model_function = tf.function(self.model)
        real_model = real_model_function.get_concrete_function(
            tf.TensorSpec(self.model.inputs[0].shape,
                          self.model.inputs[0].dtype))
        frozen_func, graph_def = convert_variables_to_constants_v2_as_graph(
            real_model)

        input_tensors = [
            tensor for tensor in frozen_func.inputs
            if tensor.dtype != tf.resource
        ]
        output_tensors = frozen_func.outputs

        graph_def = run_graph_optimizations(graph_def,
                                            input_tensors,
                                            output_tensors,
                                            config=get_grappler_config(
                                                ["constfold", "function"]),
                                            graph=frozen_func.graph)

        tf.io.write_graph(graph_def, './frozen_graph', path)
def _create_frozen_graph(model):
    real_model = tf.function(model).get_concrete_function(
        tf.TensorSpec(model.inputs[0].shape,
                      model.inputs[0].dtype,
                      name=model.inputs[0].name))
    frozen_func = convert_variables_to_constants_v2(real_model)
    graph_def = frozen_func.graph.as_graph_def()

    input_tensors = [
        tensor for tensor in frozen_func.inputs if tensor.dtype != tf.resource
    ]
    output_tensors = frozen_func.outputs

    graph_def = run_graph_optimizations(
        graph_def,
        input_tensors,
        output_tensors,
        config=get_grappler_config(["constfold", "function"]),
        graph=frozen_func.graph,
    )

    return graph_def
Example #10
0
def save_mlir(checkpoint, model_func, out_file):
    fff = tf.function(model_func).get_concrete_function(tf.TensorSpec([None, 3920]), tf.float32)
    frozen_func, graph_def = convert_variables_to_constants_v2_as_graph(fff)

    input_tensors = [
        tensor for tensor in frozen_func.inputs
        if tensor.dtype != tf.resource
    ]

    output_tensors = frozen_func.outputs
    graph_def = run_graph_optimizations(
        graph_def,
        input_tensors,
        output_tensors,
        config=get_grappler_config(['pruning', 'function', 'constfold', 'shape', 'remap',
                                    'memory', 'common_subgraph_elimination', 'arithmetic',
                                    'loop', 'dependency', 'debug_stripper']),
        graph=frozen_func.graph)

    tf_mlir_graph = tf.mlir.experimental.convert_graph_def(graph_def)
    outfile = open(out_file, 'wb')
    outfile.write(tf_mlir_graph.encode())
    outfile.close()