def _freeze_graph(sess, input_tensors, output_tensors): """Returns a frozen GraphDef. Runs a Grappler pass and freezes a graph with Variables in it. Otherwise the existing GraphDef is returned. The Grappler pass is only run on models that are frozen in order to inline the functions in the graph. Args: sess: TensorFlow Session. input_tensors: List of input tensors. output_tensors: List of output tensors (only .name is used from this). Returns: Frozen GraphDef. """ # Runs a Grappler pass in order to inline any functions in the graph. graph_def = _run_graph_optimizations(sess.graph_def, input_tensors, output_tensors) if not _is_frozen_graph(sess): output_arrays = [_tensor_name(tensor) for tensor in output_tensors] return _tf_graph_util.convert_variables_to_constants( sess, graph_def, output_arrays) else: return sess.graph_def
def get_input_arrays(self): """Returns a list of the names of the input tensors. Returns: List of strings. """ if self._has_valid_tensors(): return [_tensor_name(tensor) for tensor in self._input_tensors] else: return [name for name, _ in self._input_arrays_with_shape]
def _freeze_graph(sess, output_tensors): """Returns a frozen GraphDef. Freezes a graph with Variables in it. Otherwise the existing GraphDef is returned. Args: sess: TensorFlow Session. output_tensors: List of output tensors (only .name is used from this). Returns: Frozen GraphDef. """ if not _is_frozen_graph(sess): output_arrays = [_tensor_name(tensor) for tensor in output_tensors] return _tf_graph_util.convert_variables_to_constants( sess, sess.graph_def, output_arrays) else: return sess.graph_def
def convert(self): """Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format. Either a TFLite Flatbuffer or a Graphviz graph depending on value in `output_format`. Raises: ValueError: Input shape is not specified. None value for dimension in input_tensor. """ # Checks dimensions in input tensor. if self._has_valid_tensors(): for tensor in self._input_tensors: shape = tensor.get_shape() if not shape: raise ValueError("Provide an input shape for input array " "'{0}'.".format(_tensor_name(tensor))) # Note that shape_list might be empty for scalar shapes. shape_list = shape.as_list() if None in shape_list[1:]: raise ValueError( "None is only supported in the 1st dimension. Tensor '{0}' has " "invalid shape '{1}'.".format(_tensor_name(tensor), shape_list)) elif shape_list and shape_list[0] is None: self._set_batch_size(batch_size=1) # Get quantization stats. Ensures there is one stat per name if the stats # are specified. if self.quantized_input_stats: quantized_stats = [] invalid_stats = [] for name in self.get_input_arrays(): if name in self.quantized_input_stats: quantized_stats.append(self.quantized_input_stats[name]) else: invalid_stats.append(name) if invalid_stats: raise ValueError( "Quantization input stats are not available for input " "tensors '{0}'.".format(",".join(invalid_stats))) else: quantized_stats = None if self.representative_dataset: if not isinstance(self.representative_dataset, RepresentativeDataset): raise TypeError( "representative_dataset must be an instance of " "RepresentativeDataset") if self.representative_dataset.input_gen is None: raise ValueError( "Provide an input generator for representative_dataset") # TODO(shashishekhar): For now use optimizations order is ignored. # Both size and latency optimizations decide whether to apply post # training optimizations. post_training_optimize = bool( len( set(self.optimizations) & set([ Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE ]))) # Do weights only quantization if there is no dataset for calibration. weights_only_quantize_flag = (post_training_optimize and (self.representative_dataset is None)) converter_kwargs = { "inference_type": self.inference_type, "inference_input_type": self.inference_input_type, "input_format": constants.TENSORFLOW_GRAPHDEF, "output_format": self.output_format, "quantized_input_stats": quantized_stats, "default_ranges_stats": self.default_ranges_stats, "drop_control_dependency": self.drop_control_dependency, "reorder_across_fake_quant": self.reorder_across_fake_quant, "change_concat_input_ranges": self.change_concat_input_ranges, "allow_custom_ops": self.allow_custom_ops, "post_training_quantize": weights_only_quantize_flag, "target_ops": self.target_ops, "dump_graphviz_dir": self.dump_graphviz_dir, "dump_graphviz_video": self.dump_graphviz_video } optimized_graph = None if self.inference_type == constants.QUANTIZED_UINT8: optimized_graph = self._graph_def else: try: optimized_graph = _run_graph_optimizations( self._graph_def, self._input_tensors, self._output_tensors) except Exception: optimized_graph = self._graph_def # Converts model. if self._has_valid_tensors(): result = _toco_convert_impl(input_data=optimized_graph, input_tensors=self._input_tensors, output_tensors=self._output_tensors, **converter_kwargs) else: result = _toco_convert_graph_def( input_data=optimized_graph, input_arrays_with_shape=self._input_arrays_with_shape, output_arrays=self._output_arrays, **converter_kwargs) if self.representative_dataset and post_training_optimize: calibrate_quantize = _calibrator.Calibrator(result) result = calibrate_quantize.calibrate_and_quantize( self.representative_dataset.input_gen) return result
def convert(self): """Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format. Raises: ValueError: Input shape is not specified. None value for dimension in input_tensor. """ graph_def = _convert_to_constants.convert_variables_to_constants_v2( self._func) input_tensors = [ tensor for tensor in self._func.inputs if tensor.dtype != _dtypes.resource ] output_tensors = self._func.outputs # Run a Grappler pass. graph_def = _run_graph_optimizations(graph_def, input_tensors, output_tensors, self._func.graph) # Checks dimensions in input tensor. for tensor in input_tensors: # Note that shape_list might be empty for scalar shapes. shape_list = tensor.get_shape().as_list() if None in shape_list[1:]: raise ValueError( "None is only supported in the 1st dimension. Tensor '{0}' has " "invalid shape '{1}'.".format(_tensor_name(tensor), shape_list)) elif shape_list and shape_list[0] is None: self._set_batch_size(batch_size=1) if self.representative_dataset: if not isinstance(self.representative_dataset, RepresentativeDataset): raise TypeError( "representative_dataset must be an instance of " "RepresentativeDataset") if self.representative_dataset.input_gen is None: raise ValueError( "Provide an input generator for representative_dataset") # TODO(shashishekhar): For now use optimizations order is ignored. # Both size and latency optimizations decide whether to apply post # training optimizations. post_training_optimize = bool( len( set(self.optimizations) & set([ Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE ]))) # Do weights only quantization if there is no dataset for calibration. weights_only_quantize_flag = (post_training_optimize and (self.representative_dataset is None)) converter_kwargs = { "input_format": constants.TENSORFLOW_GRAPHDEF, "allow_custom_ops": self.allow_custom_ops, "post_training_quantize": weights_only_quantize_flag, "target_ops": self.target_ops, } # Converts model. result = _toco_convert_impl(input_data=graph_def, input_tensors=input_tensors, output_tensors=output_tensors, **converter_kwargs) if self.representative_dataset and post_training_optimize: calibrate_quantize = _calibrator.Calibrator(result) result = calibrate_quantize.calibrate_and_quantize( self.representative_dataset.input_gen) return result
def convert(self): """Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format. Either a TFLite Flatbuffer or a Graphviz graph depending on value in `output_format`. Raises: ValueError: Input shape is not specified. None value for dimension in input_tensor. """ # Checks dimensions in input tensor. if self._has_valid_tensors(): for tensor in self._input_tensors: shape = tensor.get_shape() if not shape or not shape.as_list(): raise ValueError("Provide an input shape for input array " "'{0}'.".format(_tensor_name(tensor))) shape_list = shape.as_list() if None in shape_list[1:]: raise ValueError( "None is only supported in the 1st dimension. Tensor '{0}' has " "invalid shape '{1}'.".format(_tensor_name(tensor), shape_list)) elif shape_list[0] is None: self._set_batch_size(batch_size=1) # Get quantization stats. Ensures there is one stat per name if the stats # are specified. if self.quantized_input_stats: quantized_stats = [] invalid_stats = [] for name in self.get_input_arrays(): if name in self.quantized_input_stats: quantized_stats.append(self.quantized_input_stats[name]) else: invalid_stats.append(name) if invalid_stats: raise ValueError( "Quantization input stats are not available for input " "tensors '{0}'.".format(",".join(invalid_stats))) else: quantized_stats = None converter_kwargs = { "inference_type": self.inference_type, "inference_input_type": self.inference_input_type, "input_format": constants.TENSORFLOW_GRAPHDEF, "output_format": self.output_format, "quantized_input_stats": quantized_stats, "default_ranges_stats": self.default_ranges_stats, "drop_control_dependency": self.drop_control_dependency, "reorder_across_fake_quant": self.reorder_across_fake_quant, "change_concat_input_ranges": self.change_concat_input_ranges, "allow_custom_ops": self.allow_custom_ops, "post_training_quantize": self.post_training_quantize, "target_ops": self.target_ops, "dump_graphviz_dir": self.dump_graphviz_dir, "dump_graphviz_video": self.dump_graphviz_video } optimized_graph = None if self.inference_type == constants.QUANTIZED_UINT8: optimized_graph = self._graph_def else: try: optimized_graph = _run_graph_optimizations( self._graph_def, [t.name for t in self._output_tensors]) except Exception: optimized_graph = self._graph_def # Converts model. if self._has_valid_tensors(): result = _toco_convert_impl(input_data=optimized_graph, input_tensors=self._input_tensors, output_tensors=self._output_tensors, **converter_kwargs) else: result = _toco_convert_graph_def( input_data=optimized_graph, input_arrays_with_shape=self._input_arrays_with_shape, output_arrays=self._output_arrays, **converter_kwargs) return result
def convert(self): """Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format. Either a TFLite Flatbuffer or a Graphviz graph depending on value in `output_format`. Raises: ValueError: Input shape is not specified. None value for dimension in input_tensor. """ # Checks dimensions in input tensor. if self._has_valid_tensors(): for tensor in self._input_tensors: shape = tensor.get_shape() if not shape: raise ValueError("Provide an input shape for input array " "'{0}'.".format(_tensor_name(tensor))) # Note that shape_list might be empty for scalar shapes. shape_list = shape.as_list() if None in shape_list[1:]: raise ValueError( "None is only supported in the 1st dimension. Tensor '{0}' has " "invalid shape '{1}'.".format(_tensor_name(tensor), shape_list)) elif shape_list and shape_list[0] is None: self._set_batch_size(batch_size=1) # Get quantization stats. Ensures there is one stat per name if the stats # are specified. if self.quantized_input_stats: quantized_stats = [] invalid_stats = [] for name in self.get_input_arrays(): if name in self.quantized_input_stats: quantized_stats.append(self.quantized_input_stats[name]) else: invalid_stats.append(name) if invalid_stats: raise ValueError("Quantization input stats are not available for input " "tensors '{0}'.".format(",".join(invalid_stats))) else: quantized_stats = None converter_kwargs = { "inference_type": self.inference_type, "inference_input_type": self.inference_input_type, "input_format": constants.TENSORFLOW_GRAPHDEF, "output_format": self.output_format, "quantized_input_stats": quantized_stats, "default_ranges_stats": self.default_ranges_stats, "drop_control_dependency": self.drop_control_dependency, "reorder_across_fake_quant": self.reorder_across_fake_quant, "change_concat_input_ranges": self.change_concat_input_ranges, "allow_custom_ops": self.allow_custom_ops, "post_training_quantize": self.post_training_quantize, "target_ops": self.target_ops, "dump_graphviz_dir": self.dump_graphviz_dir, "dump_graphviz_video": self.dump_graphviz_video } optimized_graph = None if self.inference_type == constants.QUANTIZED_UINT8: optimized_graph = self._graph_def else: try: optimized_graph = _run_graph_optimizations( self._graph_def, self._input_tensors, self._output_tensors) except Exception: optimized_graph = self._graph_def # Converts model. if self._has_valid_tensors(): result = _toco_convert_impl( input_data=optimized_graph, input_tensors=self._input_tensors, output_tensors=self._output_tensors, **converter_kwargs) else: result = _toco_convert_graph_def( input_data=optimized_graph, input_arrays_with_shape=self._input_arrays_with_shape, output_arrays=self._output_arrays, **converter_kwargs) return result
def convert(self): """Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format. Either a TFLite Flatbuffer or a Graphviz graph depending on value in `output_format`. Raises: ValueError: Input shape is not specified. None value for dimension in input_tensor. """ # Checks dimensions in input tensor. if self._has_valid_tensors(): for tensor in self._input_tensors: shape = tensor.get_shape() if not shape: raise ValueError("Provide an input shape for input array " "'{0}'.".format(_tensor_name(tensor))) # Note that shape_list might be empty for scalar shapes. shape_list = shape.as_list() if None in shape_list[1:]: raise ValueError( "None is only supported in the 1st dimension. Tensor '{0}' has " "invalid shape '{1}'.".format(_tensor_name(tensor), shape_list)) elif shape_list and shape_list[0] is None: self._set_batch_size(batch_size=1) # Get quantization stats. Ensures there is one stat per name if the stats # are specified. if self.quantized_input_stats: quantized_stats = [] invalid_stats = [] for name in self.get_input_arrays(): if name in self.quantized_input_stats: quantized_stats.append(self.quantized_input_stats[name]) else: invalid_stats.append(name) if invalid_stats: raise ValueError("Quantization input stats are not available for input " "tensors '{0}'.".format(",".join(invalid_stats))) else: quantized_stats = None if self.representative_dataset: if not isinstance(self.representative_dataset, RepresentativeDataset): raise TypeError( "representative_dataset must be an instance of " "RepresentativeDataset") if self.representative_dataset.input_gen is None: raise ValueError( "Provide an input generator for representative_dataset") # TODO(shashishekhar): For now use optimizations order is ignored. # Both size and latency optimizations decide whether to apply post # training optimizations. post_training_optimize = bool( len(set(self.optimizations) & set([Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE]))) # Do weights only quantization if there is no dataset for calibration. weights_only_quantize_flag = ( post_training_optimize and (self.representative_dataset is None)) converter_kwargs = { "inference_type": self.inference_type, "inference_input_type": self.inference_input_type, "input_format": constants.TENSORFLOW_GRAPHDEF, "output_format": self.output_format, "quantized_input_stats": quantized_stats, "default_ranges_stats": self.default_ranges_stats, "drop_control_dependency": self.drop_control_dependency, "reorder_across_fake_quant": self.reorder_across_fake_quant, "change_concat_input_ranges": self.change_concat_input_ranges, "allow_custom_ops": self.allow_custom_ops, "post_training_quantize": weights_only_quantize_flag, "target_ops": self.target_ops, "dump_graphviz_dir": self.dump_graphviz_dir, "dump_graphviz_video": self.dump_graphviz_video } optimized_graph = None if self.inference_type == constants.QUANTIZED_UINT8: optimized_graph = self._graph_def else: try: optimized_graph = _run_graph_optimizations( self._graph_def, self._input_tensors, self._output_tensors) except Exception: optimized_graph = self._graph_def # Converts model. if self._has_valid_tensors(): result = _toco_convert_impl( input_data=optimized_graph, input_tensors=self._input_tensors, output_tensors=self._output_tensors, **converter_kwargs) else: result = _toco_convert_graph_def( input_data=optimized_graph, input_arrays_with_shape=self._input_arrays_with_shape, output_arrays=self._output_arrays, **converter_kwargs) if self.representative_dataset and post_training_optimize: calibrate_quantize = _calibrator.Calibrator(result) result = calibrate_quantize.calibrate_and_quantize( self.representative_dataset.input_gen) return result
def convert(self): """Converts a TensorFlow GraphDef based on instance variables. Returns: The converted data in serialized format. Raises: ValueError: Input shape is not specified. None value for dimension in input_tensor. """ frozen_func = _convert_to_constants.convert_variables_to_constants_v2( self._func) input_tensors = [ tensor for tensor in frozen_func.inputs if tensor.dtype != _dtypes.resource ] output_tensors = frozen_func.outputs # Run a Grappler pass. graph_def = _run_graph_optimizations(frozen_func.graph.as_graph_def(), input_tensors, output_tensors, frozen_func.graph) # Checks dimensions in input tensor. for tensor in input_tensors: # Note that shape_list might be empty for scalar shapes. shape_list = tensor.get_shape().as_list() if None in shape_list[1:]: raise ValueError( "None is only supported in the 1st dimension. Tensor '{0}' has " "invalid shape '{1}'.".format(_tensor_name(tensor), shape_list)) elif shape_list and shape_list[0] is None: # Set the batch size to 1 if undefined. shape = tensor.get_shape().as_list() shape[0] = 1 tensor.set_shape(shape) if self.representative_dataset: if not isinstance(self.representative_dataset, RepresentativeDataset): raise TypeError("`representative_dataset` must be an instance of " "`RepresentativeDataset`") if self.representative_dataset.input_gen is None: raise ValueError( "Provide an input generator for `representative_dataset`") # TODO(shashishekhar): For now use optimizations order is ignored. # Both size and latency optimizations decide whether to apply post # training optimizations. post_training_optimize = bool( len( set(self.optimizations) & set([Optimize.OPTIMIZE_FOR_LATENCY, Optimize.OPTIMIZE_FOR_SIZE]))) # Do weights only quantization if there is no dataset for calibration. weights_only_quantize_flag = ( post_training_optimize and (self.representative_dataset is None)) converter_kwargs = { "input_format": constants.TENSORFLOW_GRAPHDEF, "allow_custom_ops": self.allow_custom_ops, "post_training_quantize": weights_only_quantize_flag, "target_ops": self.target_spec.supported_ops, } # Converts model. result = _toco_convert_impl( input_data=graph_def, input_tensors=input_tensors, output_tensors=output_tensors, **converter_kwargs) if self.representative_dataset and post_training_optimize: calibrate_quantize = _calibrator.Calibrator(result) result = calibrate_quantize.calibrate_and_quantize( self.representative_dataset.input_gen) return result