def test_failure_at_PrepareCompositeFunctionsPass(self):
        class NgramsLayer(tf.keras.layers.Layer):
            def call(self, input_tensor, **kwargs):
                return mock_ngrams(input_tensor,
                                   width=2,
                                   axis=-1,
                                   string_separator=' ')

        # Registers a fake WhitespaceTokenizeWithOffsets so the TFText fusing logic
        # is enable in MLIR side.
        custom_opdefs_str = (
            'name: \'WhitespaceTokenizeWithOffsets\' input_arg: {name: \'Input1\' '
            'type: DT_FLOAT} input_arg: {name: \'Input2\' type: DT_FLOAT} '
            'output_arg: {name: \'Output\' type: DT_FLOAT}')
        register_custom_opdefs([custom_opdefs_str])

        model = tf.keras.models.Sequential([NgramsLayer()])
        model.predict(tf.constant(['test']))
        converter = tf.lite.TFLiteConverter.from_keras_model(model)
        converter.allow_custom_ops = True
        self.convert_and_check_location_info(
            converter, converter_error_data_pb2.ConverterErrorData.UNKNOWNLOC)
        exported_error = metrics._gauge_conversion_errors.get_cell(
            'CONVERT_TF_TO_TFLITE_MODEL', 'PrepareCompositeFunctionsPass', '',
            'UNKNOWN').value()
        self.assertEqual(exported_error,
                         "\'width\' attribute is not set or not an integer\n")
    def _createGraphWithCustomOp(self, opname='CustomAdd'):
        custom_opdefs_str = (
            'name: \'' + opname +
            '\' input_arg: {name: \'Input1\' type: DT_FLOAT} '
            'input_arg: {name: \'Input2\' type: DT_FLOAT} output_arg: {name: '
            '\'Output\' type: DT_FLOAT}')

        # Create a graph that has one add op.
        new_graph = graph_pb2.GraphDef()
        with ops.Graph().as_default():
            with session.Session() as sess:
                in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                                  dtype=dtypes.float32,
                                                  name='input')
                out_tensor = in_tensor + in_tensor
                inputs = {'x': in_tensor}
                outputs = {'z': out_tensor}

                new_graph.CopyFrom(sess.graph_def)

        # Rename Add op name to opname.
        for node in new_graph.node:
            if node.op.startswith('Add'):
                node.op = opname
                del node.attr['T']

        # Register custom op defs to import modified graph def.
        register_custom_opdefs([custom_opdefs_str])

        return (new_graph, inputs, outputs)
示例#3
0
    def _createSavedModelWithCustomOp(self):
        custom_opdefs_str = (
            'name: \'CustomAdd\' input_arg: {name: \'Input1\' type: DT_FLOAT} '
            'input_arg: {name: \'Input2\' type: DT_FLOAT} output_arg: {name: '
            '\'Output\' type: DT_FLOAT}')

        # Create a graph that has one add op.
        new_graph = graph_pb2.GraphDef()
        with ops.Graph().as_default():
            with session.Session() as sess:
                in_tensor = array_ops.placeholder(shape=[1, 16, 16, 3],
                                                  dtype=dtypes.float32,
                                                  name='input')
                out_tensor = in_tensor + in_tensor
                inputs = {'x': in_tensor}
                outputs = {'z': out_tensor}

                new_graph.CopyFrom(sess.graph_def)

        # Rename Add op name to CustomAdd.
        for node in new_graph.node:
            if node.op.startswith('Add'):
                node.op = 'CustomAdd'
                del node.attr['T']

        # Register custom op defs to import modified graph def.
        register_custom_opdefs([custom_opdefs_str])

        # Store saved model.
        saved_model_dir = self._getFilepath('model')
        with ops.Graph().as_default():
            with session.Session() as sess:
                import_graph_def(new_graph, name='')
                saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
        return (saved_model_dir, custom_opdefs_str)
示例#4
0
def _convert_tf1_model(flags):
    """Calls function to convert the TensorFlow 1.X model into a TFLite model.

  Args:
    flags: argparse.Namespace object.

  Raises:
    ValueError: Invalid flags.
  """
    # Register custom opdefs before converter object creation.
    if flags.custom_opdefs:
        register_custom_opdefs(_parse_array(flags.custom_opdefs))

    # Create converter.
    converter = _get_tflite_converter(flags)
    if flags.inference_type:
        converter.inference_type = _parse_inference_type(
            flags.inference_type, "inference_type")
    if flags.inference_input_type:
        converter.inference_input_type = _parse_inference_type(
            flags.inference_input_type, "inference_input_type")
    if flags.output_format:
        converter.output_format = _toco_flags_pb2.FileFormat.Value(
            flags.output_format)

    if flags.mean_values and flags.std_dev_values:
        input_arrays = converter.get_input_arrays()
        std_dev_values = _parse_array(flags.std_dev_values, type_fn=float)

        # In quantized inference, mean_value has to be integer so that the real
        # value 0.0 is exactly representable.
        if converter.inference_type == dtypes.float32:
            mean_values = _parse_array(flags.mean_values, type_fn=float)
        else:
            mean_values = _parse_array(flags.mean_values, type_fn=int)
        quant_stats = list(zip(mean_values, std_dev_values))
        if ((not flags.input_arrays and len(input_arrays) > 1)
                or (len(input_arrays) != len(quant_stats))):
            raise ValueError(
                "Mismatching --input_arrays, --std_dev_values, and "
                "--mean_values. The flags must have the same number of "
                "items. The current input arrays are '{0}'. "
                "--input_arrays must be present when specifying "
                "--std_dev_values and --mean_values with multiple input "
                "tensors in order to map between names and "
                "values.".format(",".join(input_arrays)))
        converter.quantized_input_stats = dict(
            list(zip(input_arrays, quant_stats)))
    if (flags.default_ranges_min is not None) and (flags.default_ranges_max
                                                   is not None):
        converter.default_ranges_stats = (flags.default_ranges_min,
                                          flags.default_ranges_max)

    if flags.drop_control_dependency:
        converter.drop_control_dependency = flags.drop_control_dependency
    if flags.reorder_across_fake_quant:
        converter.reorder_across_fake_quant = flags.reorder_across_fake_quant
    if flags.change_concat_input_ranges:
        converter.change_concat_input_ranges = (
            flags.change_concat_input_ranges == "TRUE")

    if flags.allow_custom_ops:
        converter.allow_custom_ops = flags.allow_custom_ops

    if flags.target_ops:
        ops_set_options = lite.OpsSet.get_options()
        converter.target_spec.supported_ops = set()
        for option in six.ensure_str(flags.target_ops).split(","):
            if option not in ops_set_options:
                raise ValueError("Invalid value for --target_ops. Options: "
                                 "{0}".format(",".join(ops_set_options)))
            converter.target_spec.supported_ops.add(lite.OpsSet(option))

    if flags.experimental_select_user_tf_ops:
        if lite.OpsSet.SELECT_TF_OPS not in converter.target_spec.supported_ops:
            raise ValueError(
                "--experimental_select_user_tf_ops can only be set if "
                "--target_ops contains SELECT_TF_OPS.")
        user_op_set = set()
        for op_name in six.ensure_str(
                flags.experimental_select_user_tf_ops).split(","):
            user_op_set.add(op_name)
        converter.target_spec.experimental_select_user_tf_ops = list(
            user_op_set)

    if flags.post_training_quantize:
        converter.optimizations = [lite.Optimize.DEFAULT]
        if converter.inference_type != dtypes.float32:
            print(
                "--post_training_quantize quantizes a graph of inference_type "
                "FLOAT. Overriding inference_type to FLOAT.")
            converter.inference_type = dtypes.float32

    if flags.quantize_to_float16:
        converter.target_spec.supported_types = [dtypes.float16]
        if not flags.post_training_quantize:
            print("--quantize_to_float16 will only take effect with the "
                  "--post_training_quantize flag enabled.")

    if flags.dump_graphviz_dir:
        converter.dump_graphviz_dir = flags.dump_graphviz_dir
    if flags.dump_graphviz_video:
        converter.dump_graphviz_vode = flags.dump_graphviz_video
    if flags.conversion_summary_dir:
        converter.conversion_summary_dir = flags.conversion_summary_dir

    converter.experimental_new_converter = flags.experimental_new_converter

    if flags.experimental_new_quantizer is not None:
        converter.experimental_new_quantizer = flags.experimental_new_quantizer

    # Convert model.
    output_data = converter.convert()
    with open(flags.output_file, "wb") as f:
        f.write(six.ensure_binary(output_data))