def build_toco_flags(inference_type=dtypes.float32, inference_input_type=None, input_format=lite_constants.TENSORFLOW_GRAPHDEF, output_format=lite_constants.TFLITE, default_ranges_stats=None, drop_control_dependency=True, reorder_across_fake_quant=False, allow_custom_ops=False, post_training_quantize=False, quantize_to_float16=False, dump_graphviz_dir=None, dump_graphviz_video=False, target_ops=None, conversion_summary_dir=None, select_user_tf_ops=None, enable_tflite_resource_variables=False, unfold_batchmatmul=True, lower_tensor_list_ops=True, **_): """Build the TOCO flags object from params.""" toco = _toco_flags_pb2.TocoFlags() toco.input_format = input_format toco.output_format = output_format toco.inference_type = convert_inference_tf_type_to_tflite_type( inference_type, usage="inference_type flag") if inference_input_type: toco.inference_input_type = convert_inference_tf_type_to_tflite_type( inference_input_type, usage="inference_input_type flag") else: toco.inference_input_type = toco.inference_type toco.drop_control_dependency = drop_control_dependency toco.reorder_across_fake_quant = reorder_across_fake_quant toco.allow_custom_ops = allow_custom_ops if select_user_tf_ops: toco.select_user_tf_ops.extend(select_user_tf_ops) toco.post_training_quantize = post_training_quantize toco.quantize_to_float16 = quantize_to_float16 if default_ranges_stats: toco.default_ranges_min = default_ranges_stats[0] toco.default_ranges_max = default_ranges_stats[1] if dump_graphviz_dir: toco.dump_graphviz_dir = dump_graphviz_dir toco.dump_graphviz_include_video = dump_graphviz_video if conversion_summary_dir: toco.conversion_summary_dir = conversion_summary_dir if target_ops: if OpsSet.SELECT_TF_OPS in set(target_ops): toco.enable_select_tf_ops = True if set(target_ops) == set([OpsSet.SELECT_TF_OPS]): toco.force_select_tf_ops = True toco.enable_tflite_resource_variables = enable_tflite_resource_variables toco.unfold_batchmatmul = unfold_batchmatmul toco.lower_tensor_list_ops = lower_tensor_list_ops return toco
def _requires_input_stats(toco_flags: _toco_flags_pb2.TocoFlags()) -> bool: """Checks if the `input_stats` flag is required for conversion. Args: toco_flags: A protocol buffer describing the conversion process. Returns: True, if the `inference_type` or the `inference_input_type` is a quantized type and it is not post training quantization, else False. """ quantized_inference_types = \ [_types_pb2.QUANTIZED_UINT8, _types_pb2.INT8] return ((toco_flags.inference_type in quantized_inference_types or toco_flags.inference_input_type in quantized_inference_types) and not toco_flags.post_training_quantize)
def build_toco_flags(inference_type=lite_constants.FLOAT, inference_input_type=None, input_format=lite_constants.TENSORFLOW_GRAPHDEF, output_format=lite_constants.TFLITE, default_ranges_stats=None, drop_control_dependency=True, reorder_across_fake_quant=False, allow_custom_ops=False, custom_opdefs=None, post_training_quantize=False, quantize_to_float16=False, dump_graphviz_dir=None, dump_graphviz_video=False, target_ops=None, conversion_summary_dir=None, **_): """Build the TOCO flags object from params.""" toco = _toco_flags_pb2.TocoFlags() toco.input_format = input_format toco.output_format = output_format toco.inference_type = util.convert_dtype_to_tflite_type(inference_type) if inference_input_type: toco.inference_input_type = util.convert_dtype_to_tflite_type( inference_input_type) else: toco.inference_input_type = toco.inference_type toco.drop_control_dependency = drop_control_dependency toco.reorder_across_fake_quant = reorder_across_fake_quant toco.allow_custom_ops = allow_custom_ops if custom_opdefs: toco.custom_opdefs.extend(custom_opdefs) toco.post_training_quantize = post_training_quantize toco.quantize_to_float16 = quantize_to_float16 if default_ranges_stats: toco.default_ranges_min = default_ranges_stats[0] toco.default_ranges_max = default_ranges_stats[1] if dump_graphviz_dir: toco.dump_graphviz_dir = dump_graphviz_dir toco.dump_graphviz_include_video = dump_graphviz_video if conversion_summary_dir: toco.conversion_summary_dir = conversion_summary_dir if target_ops: if OpsSet.SELECT_TF_OPS in set(target_ops): toco.enable_select_tf_ops = True if set(target_ops) == set([OpsSet.SELECT_TF_OPS]): toco.force_select_tf_ops = True return toco
def _run(self, sess, in_tensor, out_tensor, should_succeed): """Use toco binary to check conversion from graphdef to tflite. Args: sess: Active TensorFlow session containing graph. in_tensor: TensorFlow tensor to use as input. out_tensor: TensorFlow tensor to use as output. should_succeed: Whether this is a valid conversion. """ # Build all protos and extract graphdef graph_def = sess.graph_def toco_flags = toco_flags_pb2.TocoFlags() toco_flags.input_format = toco_flags_pb2.TENSORFLOW_GRAPHDEF toco_flags.output_format = toco_flags_pb2.TFLITE toco_flags.inference_input_type = types_pb2.FLOAT toco_flags.inference_type = types_pb2.FLOAT toco_flags.allow_custom_ops = True model_flags = model_flags_pb2.ModelFlags() input_array = model_flags.input_arrays.add() input_array.name = TensorName(in_tensor) input_array.shape.dims.extend(map(int, in_tensor.shape)) model_flags.output_arrays.append(TensorName(out_tensor)) # Shell out to run toco (in case it crashes) with tempfile.NamedTemporaryFile() as fp_toco, \ tempfile.NamedTemporaryFile() as fp_model, \ tempfile.NamedTemporaryFile() as fp_input, \ tempfile.NamedTemporaryFile() as fp_output: fp_model.write(model_flags.SerializeToString()) fp_toco.write(toco_flags.SerializeToString()) fp_input.write(graph_def.SerializeToString()) fp_model.flush() fp_toco.flush() fp_input.flush() tflite_bin = resource_loader.get_path_to_datafile( "toco_from_protos.par") cmdline = " ".join([ tflite_bin, fp_model.name, fp_toco.name, fp_input.name, fp_output.name ]) exitcode = os.system(cmdline) if exitcode == 0: stuff = fp_output.read() self.assertEqual(stuff is not None, should_succeed) else: self.assertFalse(should_succeed)
def build_toco_convert_protos(input_tensors, output_tensors, inference_type=lite_constants.FLOAT, inference_input_type=None, input_format=lite_constants.TENSORFLOW_GRAPHDEF, input_shapes=None, output_format=lite_constants.TFLITE, quantized_input_stats=None, default_ranges_stats=None, drop_control_dependency=True, reorder_across_fake_quant=False, allow_custom_ops=False, custom_opdefs=None, change_concat_input_ranges=False, post_training_quantize=False, quantize_to_float16=False, dump_graphviz_dir=None, dump_graphviz_video=False, target_ops=None, allow_nonexistent_arrays=False, debug_info=None, conversion_summary_dir=None, saved_model_dir=None, saved_model_version=0, saved_model_tags=None, saved_model_exported_names=None): """Builds protocol buffers describing a conversion of a model using TOCO. Typically this is to convert from TensorFlow GraphDef to TFLite, in which case the default `input_format` and `output_format` are sufficient. Args: input_tensors: List of input tensors. Type and shape are computed using `foo.shape` and `foo.dtype`. output_tensors: List of output tensors (only .name is used from this). inference_type: Target data type of real-number arrays in the output file. Must be `{tf.float32, tf.uint8, tf.int8}`. (default tf.float32) inference_input_type: Target data type of real-number input arrays. Allows for a different type for input arrays in the case of quantization. Must be `{tf.float32, tf.uint8, tf.int8}`. (default `inference_type`) input_format: Type of data to read Currently must be `{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF) input_shapes: Input array shape. It needs to be a list of the same length as `input_tensors`, or None. (default None) output_format: Output file format. Currently must be `{TFLITE, GRAPHVIZ_DOT}`. (default TFLITE) quantized_input_stats: List of tuples of floats representing the mean and standard deviation. Each tuple maps to the corresponding input tensor. Only need if `inference_input_type` is `QUANTIZED_UINT8` or `INT8`. real_input_value = (quantized_input_value - mean_value) / std_dev_value. (default None) default_ranges_stats: Tuple of integers representing (min, max) range values for all arrays without a specified range. Intended for experimenting with quantization via "dummy quantization". (default None) drop_control_dependency: Boolean indicating whether to drop control dependencies silently. This is due to TFLite not supporting control dependencies. (default True) reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant nodes in unexpected locations. Used when the location of the FakeQuant nodes is preventing graph transformations necessary to convert the graph. Results in a graph that differs from the quantized training graph, potentially causing differing arithmetic behavior. (default False) allow_custom_ops: Boolean indicating whether to allow custom operations. When false any unknown operation is an error. When true, custom ops are created for any op that is unknown. The developer will need to provide these to the TensorFlow Lite runtime with a custom resolver. (default False) custom_opdefs: List of strings representing custom ops OpDefs that are included in the GraphDef. Required when using custom operations with the MLIR-based converter. (default None) change_concat_input_ranges: Boolean to change behavior of min/max ranges for inputs and outputs of the concat operator for quantized models. Changes the ranges of concat operator overlap when true. (default False) post_training_quantize: Boolean indicating whether to quantize the weights of the converted float model. Model size will be reduced and there will be latency improvements (at the cost of accuracy). (default False) quantize_to_float16: Boolean indicating whether to convert float buffers to float16. (default False) dump_graphviz_dir: Full filepath of folder to dump the graphs at various stages of processing GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order to keep the requirements of the output file. (default None) dump_graphviz_video: Boolean indicating whether to dump the graph after every graph transformation. (default False) target_ops: Experimental flag, subject to change. Set of OpsSet options indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS])) allow_nonexistent_arrays: Allow specifying array names that don't exist or are unused in the final graph. (default False) debug_info: `GraphDebugInfo` proto containing the stack traces for the original nodes referred by the converted graph. conversion_summary_dir: A string, the path to the generated conversion logs. saved_model_dir: Filepath of the saved model to be converted. This value will be non-empty only when the saved model import path will be used. Otherwises, the graph def-based conversion will be processed. saved_model_version: SavedModel file format version of The saved model file to be converted. This value will be set only when the SavedModel import path will be used. saved_model_tags: Set of string saved model tags, formatted in the comma-separated value. This value will be set only when the SavedModel import path will be used. saved_model_exported_names: Names to be exported (default: export all) when the saved model import path is on. This value will be set only when the SavedModel import path will be used. Returns: model_flags, toco_flags, debug_info: three protocol buffers describing the conversion process and debug information. Raises: ValueError: If the input tensor type is unknown Missing mean_values or std_dev_values RuntimeError: If TOCO fails to convert (in which case the runtime error's error text will contain the TOCO error log) """ toco = _toco_flags_pb2.TocoFlags() toco.input_format = input_format toco.output_format = output_format toco.inference_type = util.convert_dtype_to_tflite_type(inference_type) if inference_input_type: toco.inference_input_type = util.convert_dtype_to_tflite_type( inference_input_type) else: toco.inference_input_type = toco.inference_type toco.drop_control_dependency = drop_control_dependency toco.reorder_across_fake_quant = reorder_across_fake_quant toco.allow_custom_ops = allow_custom_ops if custom_opdefs: toco.custom_opdefs.extend(custom_opdefs) toco.post_training_quantize = post_training_quantize toco.quantize_to_float16 = quantize_to_float16 if default_ranges_stats: toco.default_ranges_min = default_ranges_stats[0] toco.default_ranges_max = default_ranges_stats[1] if dump_graphviz_dir: toco.dump_graphviz_dir = dump_graphviz_dir toco.dump_graphviz_include_video = dump_graphviz_video if conversion_summary_dir: toco.conversion_summary_dir = conversion_summary_dir if target_ops: if set(target_ops) == set( [OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]): toco.enable_select_tf_ops = True elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]): toco.enable_select_tf_ops = True toco.force_select_tf_ops = True model = _model_flags_pb2.ModelFlags() model.change_concat_input_ranges = change_concat_input_ranges for idx, input_tensor in enumerate(input_tensors): input_array = model.input_arrays.add() input_array.name = util.get_tensor_name(input_tensor) input_array.data_type = util.convert_dtype_to_tflite_type( input_tensor.dtype) if _requires_input_stats(toco) and quantized_input_stats: input_array.mean_value, input_array.std_value = quantized_input_stats[ idx] if input_shapes is None: shape = input_tensor.shape else: shape = input_shapes[idx] # Create shapes with -1 for unknown dimensions. dims = [] for dim in shape: if (dim is None or (isinstance(dim, tensor_shape.Dimension) and dim.value is None)): dims.append(-1) else: dims.append(int(dim)) input_array.shape.dims.extend(dims) for output_tensor in output_tensors: model.output_arrays.append(util.get_tensor_name(output_tensor)) model.allow_nonexistent_arrays = allow_nonexistent_arrays if saved_model_dir: model.saved_model_dir = saved_model_dir model.saved_model_version = saved_model_version if saved_model_tags: model.saved_model_tags.extend(saved_model_tags) if saved_model_exported_names: model.saved_model_exported_names.extend(saved_model_exported_names) return model, toco, debug_info
def build_conversion_flags(inference_type=dtypes.float32, inference_input_type=None, input_format=lite_constants.TENSORFLOW_GRAPHDEF, output_format=lite_constants.TFLITE, default_ranges_stats=None, drop_control_dependency=True, reorder_across_fake_quant=False, allow_custom_ops=False, post_training_quantize=False, quantize_to_float16=False, dump_graphviz_dir=None, dump_graphviz_video=False, target_ops=None, conversion_summary_dir=None, select_user_tf_ops=None, allow_all_select_tf_ops=False, enable_tflite_resource_variables=True, unfold_batchmatmul=True, lower_tensor_list_ops=True, default_to_single_batch_in_tensor_list_ops=False, accumulation_type=None, allow_bfloat16=False, unfold_large_splat_constant=False, supported_backends=None, disable_per_channel_quantization=False, enable_mlir_dynamic_range_quantizer=False, tf_quantization_mode=None, disable_infer_tensor_range=False, use_fake_quant_num_bits=False, enable_dynamic_update_slice=False, **_): """Builds protocol buffer describing a conversion of a model. Typically this is to convert from TensorFlow GraphDef to TFLite, in which case the default `input_format` and `output_format` are sufficient. Args: inference_type: Data type of numeric arrays, excluding the input layer. (default tf.float32, must be in {tf.float32, tf.int8, tf.uint8}) inference_input_type: Data type of the numeric arrays in the input layer. If `inference_input_type` is in {tf.int8, tf.uint8}, then `quantized_input_stats` must be provided. (default is the value assigned to `inference_type`, must be in {tf.float32, tf.int8, tf.uint8}) input_format: Type of data to read. (default TENSORFLOW_GRAPHDEF, must be in {TENSORFLOW_GRAPHDEF}) output_format: Output file format. (default TFLITE, must be in {TFLITE, GRAPHVIZ_DOT}) default_ranges_stats: Tuple of integers representing (min, max) range values for all arrays without a specified range. Intended for experimenting with quantization via "dummy quantization". (default None) drop_control_dependency: Boolean indicating whether to drop control dependencies silently. This is due to TFLite not supporting control dependencies. (default True) reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant nodes in unexpected locations. Used when the location of the FakeQuant nodes is preventing graph transformations necessary to convert the graph. Results in a graph that differs from the quantized training graph, potentially causing differing arithmetic behavior. (default False) allow_custom_ops: Boolean indicating whether to allow custom operations. When false any unknown operation is an error. When true, custom ops are created for any op that is unknown. The developer will need to provide these to the TensorFlow Lite runtime with a custom resolver. (default False) post_training_quantize: Boolean indicating whether to quantize the weights of the converted float model. Model size will be reduced and there will be latency improvements (at the cost of accuracy). (default False) quantize_to_float16: Boolean indicating whether to convert float buffers to float16. (default False) dump_graphviz_dir: Full filepath of folder to dump the graphs at various stages of processing GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order to keep the requirements of the output file. (default None) dump_graphviz_video: Boolean indicating whether to dump the graph after every graph transformation. (default False) target_ops: Experimental flag, subject to change. Set of OpsSet options indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS])) conversion_summary_dir: A string, the path to the generated conversion logs. select_user_tf_ops: List of user's defined TensorFlow ops need to be supported in the TensorFlow Lite runtime. These ops will be supported as select TensorFlow ops. allow_all_select_tf_ops: If True, automatically add all TF ops (including custom TF ops) to the converted model as flex ops. enable_tflite_resource_variables: Experimental flag, subject to change. Enables conversion of resource variables. (default False) unfold_batchmatmul: Whether to unfold tf.BatchMatMul to a set of tfl.fully_connected ops. If not, translate to tfl.batch_matmul. lower_tensor_list_ops: Whether to lower tensor list ops to builtin ops. If not, use Flex tensor list ops. default_to_single_batch_in_tensor_list_ops: Whether to force to use batch size one when the tensor list ops has the unspecified batch size. accumulation_type: Data type of the accumulators in quantized inference. Typically used for float16 quantization and is either fp16 or fp32. allow_bfloat16: Whether the converted model supports reduced precision inference with the bfloat16 type. unfold_large_splat_constant: Whether to unfold large splat constant tensors in the flatbuffer model to reduce size. supported_backends: List of TFLite backends which needs to check compatibility. disable_per_channel_quantization: Disable per-channel quantized weights for dynamic range quantization. Only per-tensor quantization will be used. enable_mlir_dynamic_range_quantizer: Enable MLIR dynamic range quantization. If False, the old converter dynamic range quantizer is used. tf_quantization_mode: Indicates the mode of TF Quantization when the output model is used for TF Quantization. disable_infer_tensor_range: Disable infering tensor ranges. use_fake_quant_num_bits: Allow quantization parameters to be calculated from num_bits attribute. enable_dynamic_update_slice: Enable to convert to DynamicUpdateSlice op. (default: False) Returns: conversion_flags: protocol buffer describing the conversion process. Raises: ValueError, if the input tensor type is unknown. """ conversion_flags = _conversion_flags_pb2.TocoFlags() conversion_flags.inference_type = convert_inference_tf_type_to_tflite_type( inference_type, usage="inference_type flag") if inference_input_type: conversion_flags.inference_input_type = ( convert_inference_tf_type_to_tflite_type( inference_input_type, usage="inference_input_type flag")) else: conversion_flags.inference_input_type = conversion_flags.inference_type conversion_flags.input_format = input_format conversion_flags.output_format = output_format if default_ranges_stats: conversion_flags.default_ranges_min = default_ranges_stats[0] conversion_flags.default_ranges_max = default_ranges_stats[1] conversion_flags.drop_control_dependency = drop_control_dependency conversion_flags.reorder_across_fake_quant = reorder_across_fake_quant conversion_flags.allow_custom_ops = allow_custom_ops conversion_flags.post_training_quantize = post_training_quantize conversion_flags.quantize_to_float16 = quantize_to_float16 if dump_graphviz_dir: conversion_flags.dump_graphviz_dir = dump_graphviz_dir conversion_flags.dump_graphviz_include_video = dump_graphviz_video if target_ops: if OpsSet.SELECT_TF_OPS in target_ops: conversion_flags.enable_select_tf_ops = True if set(target_ops) == {OpsSet.SELECT_TF_OPS}: conversion_flags.force_select_tf_ops = True if conversion_summary_dir: conversion_flags.conversion_summary_dir = conversion_summary_dir if select_user_tf_ops: conversion_flags.select_user_tf_ops.extend(select_user_tf_ops) conversion_flags.allow_all_select_tf_ops = allow_all_select_tf_ops conversion_flags.enable_tflite_resource_variables = ( enable_tflite_resource_variables) conversion_flags.unfold_batchmatmul = unfold_batchmatmul conversion_flags.lower_tensor_list_ops = lower_tensor_list_ops conversion_flags.default_to_single_batch_in_tensor_list_ops = ( default_to_single_batch_in_tensor_list_ops) if accumulation_type: conversion_flags.accumulation_type = convert_tensor_tf_type_to_tflite_type( accumulation_type, usage="accumulation_type flag") conversion_flags.allow_bfloat16 = allow_bfloat16 conversion_flags.unfold_large_splat_constant = unfold_large_splat_constant if supported_backends: conversion_flags.supported_backends.extend(supported_backends) conversion_flags.disable_per_channel_quantization = ( disable_per_channel_quantization) conversion_flags.enable_mlir_dynamic_range_quantizer = ( enable_mlir_dynamic_range_quantizer) conversion_flags.enable_dynamic_update_slice = enable_dynamic_update_slice if tf_quantization_mode: conversion_flags.tf_quantization_mode = tf_quantization_mode conversion_flags.disable_infer_tensor_range = disable_infer_tensor_range conversion_flags.use_fake_quant_num_bits = use_fake_quant_num_bits return conversion_flags
def build_toco_convert_protos(input_tensors, output_tensors, inference_type=lite_constants.FLOAT, inference_input_type=None, input_format=lite_constants.TENSORFLOW_GRAPHDEF, input_shapes=None, output_format=lite_constants.TFLITE, quantized_input_stats=None, default_ranges_stats=None, drop_control_dependency=True, reorder_across_fake_quant=False, allow_custom_ops=False, change_concat_input_ranges=False, post_training_quantize=False, dump_graphviz_dir=None, dump_graphviz_video=False, target_ops=None, allow_nonexistent_arrays=False): """Builds protocol buffers describing a conversion of a model using TOCO. Typically this is to convert from TensorFlow GraphDef to TFLite, in which case the default `input_format` and `output_format` are sufficient. Args: input_tensors: List of input tensors. Type and shape are computed using `foo.get_shape()` and `foo.dtype`. output_tensors: List of output tensors (only .name is used from this). inference_type: Target data type of real-number arrays in the output file. Must be `{FLOAT, QUANTIZED_UINT8}`. (default FLOAT) inference_input_type: Target data type of real-number input arrays. Allows for a different type for input arrays in the case of quantization. Must be `{FLOAT, QUANTIZED_UINT8}`. (default `inference_type`) input_format: Type of data to read Currently must be `{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF) input_shapes: Input array shape. It needs to be a list of the same length as `input_tensors`, or None. (default None) output_format: Output file format. Currently must be `{TFLITE, GRAPHVIZ_DOT}`. (default TFLITE) quantized_input_stats: List of tuples of floats representing the mean and standard deviation. Each tuple maps to the corresponding input tensor. Only need if `inference_input_type` is `QUANTIZED_UINT8`. real_input_value = (quantized_input_value - mean_value) / std_dev_value. (default None) default_ranges_stats: Tuple of integers representing (min, max) range values for all arrays without a specified range. Intended for experimenting with quantization via "dummy quantization". (default None) drop_control_dependency: Boolean indicating whether to drop control dependencies silently. This is due to TFLite not supporting control dependencies. (default True) reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant nodes in unexpected locations. Used when the location of the FakeQuant nodes is preventing graph transformations necessary to convert the graph. Results in a graph that differs from the quantized training graph, potentially causing differing arithmetic behavior. (default False) allow_custom_ops: Boolean indicating whether to allow custom operations. When false any unknown operation is an error. When true, custom ops are created for any op that is unknown. The developer will need to provide these to the TensorFlow Lite runtime with a custom resolver. (default False) change_concat_input_ranges: Boolean to change behavior of min/max ranges for inputs and outputs of the concat operator for quantized models. Changes the ranges of concat operator overlap when true. (default False) post_training_quantize: Boolean indicating whether to quantize the weights of the converted float model. Model size will be reduced and there will be latency improvements (at the cost of accuracy). (default False) dump_graphviz_dir: Full filepath of folder to dump the graphs at various stages of processing GraphViz .dot files. Preferred over --output_format=GRAPHVIZ_DOT in order to keep the requirements of the output file. (default None) dump_graphviz_video: Boolean indicating whether to dump the graph after every graph transformation. (default False) target_ops: Experimental flag, subject to change. Set of OpsSet options indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS])) allow_nonexistent_arrays: Allow specifying array names that don't exist or are unused in the final graph. (default False) Returns: model_flags, toco_flags: two protocol buffers describing the conversion process. Raises: ValueError: If the input tensor type is unknown RuntimeError: If TOCO fails to convert (in which case the runtime error's error text will contain the TOCO error log) """ toco = _toco_flags_pb2.TocoFlags() toco.input_format = input_format toco.output_format = output_format toco.inference_type = inference_type if inference_input_type: toco.inference_input_type = inference_input_type else: toco.inference_input_type = toco.inference_type toco.drop_control_dependency = drop_control_dependency toco.reorder_across_fake_quant = reorder_across_fake_quant toco.allow_custom_ops = allow_custom_ops toco.post_training_quantize = post_training_quantize if default_ranges_stats: toco.default_ranges_min = default_ranges_stats[0] toco.default_ranges_max = default_ranges_stats[1] if dump_graphviz_dir: toco.dump_graphviz_dir = dump_graphviz_dir toco.dump_graphviz_include_video = dump_graphviz_video if target_ops: if set(target_ops) == set([OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]): toco.allow_flex_ops = True elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]): toco.allow_flex_ops = True toco.force_flex_ops = True model = _model_flags_pb2.ModelFlags() model.change_concat_input_ranges = change_concat_input_ranges for idx, input_tensor in enumerate(input_tensors): input_array = model.input_arrays.add() if toco.inference_input_type == lite_constants.QUANTIZED_UINT8: input_array.mean_value, input_array.std_value = quantized_input_stats[idx] input_array.name = tensor_name(input_tensor) if input_shapes is None: shape = input_tensor.get_shape() else: shape = input_shapes[idx] input_array.shape.dims.extend(map(int, shape)) for output_tensor in output_tensors: model.output_arrays.append(tensor_name(output_tensor)) model.allow_nonexistent_arrays = allow_nonexistent_arrays return model, toco