def _write_metadata(model_path: str, label_map_path: str, mean: List[float], std: List[float]): """Add normalization option and label map TFLite metadata to the model. Args: model_path: The path of the TFLite model label_map_path: The path of the label map file mean: The mean value used to normalize input image tensor std: The standard deviation used to normalize input image tensor """ # Creates flatbuffer for model information. model_meta = _metadata_fb.ModelMetadataT() # Creates flatbuffer for model input metadata. # Here we add the input normalization info to input metadata. input_meta = _metadata_fb.TensorMetadataT() input_normalization = _metadata_fb.ProcessUnitT() input_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) input_normalization.options = _metadata_fb.NormalizationOptionsT() input_normalization.options.mean = mean input_normalization.options.std = std input_meta.processUnits = [input_normalization] # Creates flatbuffer for model output metadata. # Here we add label file to output metadata. output_meta = _metadata_fb.TensorMetadataT() label_file = _metadata_fb.AssociatedFileT() label_file.name = os.path.basename(label_map_path) label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS output_meta.associatedFiles = [label_file] # Creates subgraph to contain input and output information, # and add subgraph to the model information. subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_meta] subgraph.outputTensorMetadata = [output_meta] model_meta.subgraphMetadata = [subgraph] # Serialize the model metadata buffer we created above using flatbuffer # builder. b = flatbuffers.Builder(0) b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) metadata_buf = b.Output() # Populates metadata and label file to the model file. populator = _metadata.MetadataPopulator.with_model_file(model_path) populator.load_metadata_buffer(metadata_buf) populator.load_associated_files([label_map_path]) populator.populate()
def create_input_metadata(): input_metadata = _metadata_fb.TensorMetadataT() input_metadata.name = "image" input_metadata.description = VggFaceMetadata.Layers.ANDROID_INPUT input_metadata.content = _metadata_fb.ContentT() input_metadata.content.contentProperties = _metadata_fb.ImagePropertiesT() input_metadata.content.contentProperties.colorSpace = _metadata_fb.ColorSpaceType.RGB input_metadata.content.contentPropertiesType = _metadata_fb.ContentProperties.ImageProperties # Normalization input_normalization = _metadata_fb.ProcessUnitT() input_normalization.optionsType = _metadata_fb.ProcessUnitOptions.NormalizationOptions input_normalization.options = _metadata_fb.NormalizationOptionsT() input_normalization.options.mean = [91.4953, 103.8827, 131.0912] input_normalization.options.std = [1, 1, 1] input_metadata.processUnits = [input_normalization] # Input stats input_stats = _metadata_fb.StatsT() input_metadata.stats = input_stats return input_metadata
def _create_metadata(self): """Creates the metadata for an image classifier.""" # Creates model info. model_meta = _metadata_fb.ModelMetadataT() model_meta.name = self.model_info.name model_meta.description = ("Identify the most prominent breed of a dog in the " "image from a set of %d breeds." % self.model_info.num_classes) model_meta.version = self.model_info.version model_meta.author = "https://github.com/puntogris/" model_meta.license = ("Apache License. Version 2.0 " "http://www.apache.org/licenses/LICENSE-2.0.") # Creates input info. input_meta = _metadata_fb.TensorMetadataT() input_meta.name = "image" input_meta.description = ( "Input image to be classified. The expected image is {0} x {1}, with " "three channels (red, blue, and green) per pixel. Each value in the " "tensor is a single byte between {2} and {3}.".format( self.model_info.image_width, self.model_info.image_height, self.model_info.image_min, self.model_info.image_max)) input_meta.content = _metadata_fb.ContentT() input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT() input_meta.content.contentProperties.colorSpace = ( _metadata_fb.ColorSpaceType.RGB) input_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.ImageProperties) input_normalization = _metadata_fb.ProcessUnitT() input_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) input_normalization.options = _metadata_fb.NormalizationOptionsT() input_normalization.options.mean = self.model_info.mean input_normalization.options.std = self.model_info.std input_meta.processUnits = [input_normalization] input_stats = _metadata_fb.StatsT() input_stats.max = [self.model_info.image_max] input_stats.min = [self.model_info.image_min] input_meta.stats = input_stats # Creates output info. output_meta = _metadata_fb.TensorMetadataT() output_meta.name = "probability" output_meta.description = "Probabilities of the %d labels respectively." % self.model_info.num_classes output_meta.content = _metadata_fb.ContentT() output_meta.content.content_properties = _metadata_fb.FeaturePropertiesT() output_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) output_stats = _metadata_fb.StatsT() output_stats.max = [1.0] output_stats.min = [0.0] output_meta.stats = output_stats label_file = _metadata_fb.AssociatedFileT() label_file.name = os.path.basename(self.label_file_path) label_file.description = "Labels for objects that the model can recognize." label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS output_meta.associatedFiles = [label_file] # Creates subgraph info. subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_meta] subgraph.outputTensorMetadata = [output_meta] model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) b.Finish( model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) self.metadata_buf = b.Output()
def _create_metadata(self): """Creates the metadata for an image classifier.""" # Creates model info. model_meta = _metadata_fb.ModelMetadataT() model_meta.name = self.model_info.name model_meta.description = ("Identify the most prominent object in the " "image from a set of %d categories." % self.model_info.num_classes) model_meta.version = self.model_info.version model_meta.author = "TensorFlow" model_meta.license = ("Apache License. Version 2.0 " "http://www.apache.org/licenses/LICENSE-2.0.") # Creates input info. input_meta = _metadata_fb.TensorMetadataT() input_meta.name = "image" input_meta.description = ( "Input image to be classified. The expected image is {0} x {1}, with " "three channels (red, blue, and green) per pixel. Each value in the " "tensor is a single byte between {2} and {3}.".format( self.model_info.image_width, self.model_info.image_height, self.model_info.image_min, self.model_info.image_max)) input_meta.content = _metadata_fb.ContentT() input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT() input_meta.content.contentProperties.colorSpace = ( _metadata_fb.ColorSpaceType.RGB) input_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.ImageProperties) input_normalization = _metadata_fb.ProcessUnitT() input_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) input_normalization.options = _metadata_fb.NormalizationOptionsT() input_normalization.options.mean = self.model_info.mean input_normalization.options.std = self.model_info.std input_meta.processUnits = [input_normalization] input_stats = _metadata_fb.StatsT() input_stats.max = [self.model_info.image_max] input_stats.min = [self.model_info.image_min] input_meta.stats = input_stats # Creates output info. output_location_meta = _metadata_fb.TensorMetadataT() output_location_meta.name = "location" output_location_meta.description = "The locations of the detected boxes." output_location_meta.content = _metadata_fb.ContentT() output_location_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.BoundingBoxProperties) output_location_meta.content.contentProperties = ( _metadata_fb.BoundingBoxPropertiesT()) output_location_meta.content.contentProperties.index = [1, 0, 3, 2] output_location_meta.content.contentProperties.type = ( _metadata_fb.BoundingBoxType.BOUNDARIES) output_location_meta.content.contentProperties.coordinateType = ( _metadata_fb.CoordinateType.RATIO) output_location_meta.content.range = _metadata_fb.ValueRangeT() output_location_meta.content.range.min = 0 output_location_meta.content.range.max = 255 output_confidence_meta = _metadata_fb.TensorMetadataT() output_confidence_meta.name = "confidence" output_confidence_meta.description = "The confidences of the detected boxes." output_confidence_meta.content = _metadata_fb.ContentT() output_confidence_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) output_confidence_meta.content.contentProperties = ( _metadata_fb.FeaturePropertiesT()) output_confidence_meta.content.range = _metadata_fb.ValueRangeT() output_confidence_meta.content.range.min = 0 output_confidence_meta.content.range.max = 255 # label_file = _metadata_fb.AssociatedFileT() # label_file.name = os.path.basename("labelmap.txt") # label_file.description = "Label of objects that this model can recognize." # label_file.type = _metadata_fb.AssociatedFileType.TENSOR_VALUE_LABELS # output_confidence_meta.associatedFiles = [label_file] output_landmark_meta = _metadata_fb.TensorMetadataT() output_landmark_meta.name = "landmark" output_landmark_meta.description = "The landmark of the detected boxes." output_landmark_meta.content = _metadata_fb.ContentT() output_landmark_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) output_landmark_meta.content.contentProperties = ( _metadata_fb.FeaturePropertiesT()) output_landmark_meta.content.range = _metadata_fb.ValueRangeT() output_landmark_meta.content.range.min = 0 output_landmark_meta.content.range.max = 255 # Creates subgraph info. subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_meta] subgraph.outputTensorMetadata = [ output_location_meta, output_confidence_meta, output_landmark_meta ] model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) self.metadata_buf = b.Output()
def _create_metadata(self): """Creates the metadata for an object detector.""" model_meta = _metadata_fb.ModelMetadataT() model_meta.name = self.model_info.name model_meta.description = ( "Identify which of a known set of objects might be present and provide " "information about their positions within the given image or a video " "stream.") model_meta.version = self.model_info.version model_meta.author = "TensorFlow Lite Model Maker" model_meta.license = ("Apache License. Version 2.0 " "http://www.apache.org/licenses/LICENSE-2.0.") # Creates input info. input_meta = _metadata_fb.TensorMetadataT() input_meta.name = "image" input_meta.description = ( "Input image to be detected. The expected image is {0} x {1}, with " "three channels (red, blue, and green) per pixel. Each value in the " "tensor is a single byte between 0 and 255.".format( self.model_info.image_width, self.model_info.image_height)) input_meta.content = _metadata_fb.ContentT() input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT() input_meta.content.contentProperties.colorSpace = ( _metadata_fb.ColorSpaceType.RGB) input_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.ImageProperties) input_normalization = _metadata_fb.ProcessUnitT() input_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) input_normalization.options = _metadata_fb.NormalizationOptionsT() input_normalization.options.mean = self.model_info.mean input_normalization.options.std = self.model_info.std input_meta.processUnits = [input_normalization] input_stats = _metadata_fb.StatsT() input_stats.max = [self.model_info.image_max] input_stats.min = [self.model_info.image_min] input_meta.stats = input_stats # Creates outputs info. output_location_meta = _metadata_fb.TensorMetadataT() output_location_meta.name = "location" output_location_meta.description = "The locations of the detected boxes." output_location_meta.content = _metadata_fb.ContentT() output_location_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.BoundingBoxProperties) output_location_meta.content.contentProperties = ( _metadata_fb.BoundingBoxPropertiesT()) output_location_meta.content.contentProperties.index = [1, 0, 3, 2] output_location_meta.content.contentProperties.type = ( _metadata_fb.BoundingBoxType.BOUNDARIES) output_location_meta.content.contentProperties.coordinateType = ( _metadata_fb.CoordinateType.RATIO) output_location_meta.content.range = _metadata_fb.ValueRangeT() output_location_meta.content.range.min = 2 output_location_meta.content.range.max = 2 output_class_meta = _metadata_fb.TensorMetadataT() output_class_meta.name = "category" output_class_meta.description = "The categories of the detected boxes." output_class_meta.content = _metadata_fb.ContentT() output_class_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) output_class_meta.content.contentProperties = ( _metadata_fb.FeaturePropertiesT()) output_class_meta.content.range = _metadata_fb.ValueRangeT() output_class_meta.content.range.min = 2 output_class_meta.content.range.max = 2 label_file = _metadata_fb.AssociatedFileT() label_file.name = os.path.basename(self.associated_files[0]) label_file.description = "Label of objects that this model can recognize." label_file.type = _metadata_fb.AssociatedFileType.TENSOR_VALUE_LABELS output_class_meta.associatedFiles = [label_file] output_score_meta = _metadata_fb.TensorMetadataT() output_score_meta.name = "score" output_score_meta.description = "The scores of the detected boxes." output_score_meta.content = _metadata_fb.ContentT() output_score_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) output_score_meta.content.contentProperties = ( _metadata_fb.FeaturePropertiesT()) output_score_meta.content.range = _metadata_fb.ValueRangeT() output_score_meta.content.range.min = 2 output_score_meta.content.range.max = 2 output_number_meta = _metadata_fb.TensorMetadataT() output_number_meta.name = "number of detections" output_number_meta.description = "The number of the detected boxes." output_number_meta.content = _metadata_fb.ContentT() output_number_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) output_number_meta.content.contentProperties = ( _metadata_fb.FeaturePropertiesT()) # Creates subgraph info. group = _metadata_fb.TensorGroupT() group.name = "detection result" group.tensorNames = [ output_location_meta.name, output_class_meta.name, output_score_meta.name ] subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_meta] subgraph.outputTensorMetadata = [ output_location_meta, output_class_meta, output_score_meta, output_number_meta ] subgraph.outputTensorGroups = [group] model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) self.metadata_buf = b.Output()
'http://www.apache.org/licenses/LICENSE-2.0. ') input_meta = _metadata_fb.TensorMetadataT() input_meta.name = 'Image' input_meta.description = ( "Input image to be classified. The expected image is {0} x {1}, with " "three channels (red, blue, green) per pixel. Each value in the " "tensor is a single byte between 0 and 255. ".format(640, 640)) input_meta.content = _metadata_fb.ContentT() input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT() input_meta.content.contentProperties.colorSpace = _metadata_fb.ColorSpaceType.RGB input_meta.content.contentPropertiesType = _metadata_fb.ContentProperties.ImageProperties input_normalization = _metadata_fb.ProcessUnitT() input_normalization.optionsType = _metadata_fb.ProcessUnitOptions.NormalizationOptions input_normalization.options = _metadata_fb.NormalizationOptionsT() input_normalization.options.mean = [127.5] input_normalization.options.std = [127.5] input_meta.processUnits = [input_normalization] input_stats = _metadata_fb.StatsT() input_stats.max = [255] input_stats.min = [0] input_meta.stats = input_stats labelmap_file = "F:/Android/object_detection/android/app/src/main/assets/labelmap.txt" exported_model_path = "F:/Android/object_detection/android/app/src/main/assets/model_fpnlite640.tflite" output_tensorgroups = _metadata_fb.TensorGroupT() output_tensorgroups.name = "detection result" output_tensorgroups.tensorNames = ["location", "category", "score"]
def create_metadata(model_file_name, label_map_file_name, num_labels): from tflite_support import flatbuffers from tflite_support import metadata as _metadata from tflite_support import metadata_schema_py_generated as _metadata_fb """ ... """ """Creates the metadata for an image classifier.""" # Creates model info. model_meta = _metadata_fb.ModelMetadataT() model_meta.name = 'MobileNetV1 image classifier' model_meta.description = ('Identify the most prominent object in the ' 'image from a set of 1,001 categories such as ' 'trees, animals, food, vehicles, person etc.') model_meta.version = 'v1' model_meta.author = 'TensorFlow' model_meta.license = ('Apache License. Version 2.0 ' 'http://www.apache.org/licenses/LICENSE-2.0.') # Creates input info. input_meta = _metadata_fb.TensorMetadataT() input_meta.name = 'image' input_meta.description = ( 'Input image to be classified. The expected image is {0} x {1}, with ' 'three channels (red, blue, and green) per pixel. Each value in the ' 'tensor is a single byte between 0 and 255.'.format(416, 416)) input_meta.content = _metadata_fb.ContentT() input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT() input_meta.content.contentProperties.colorSpace = ( _metadata_fb.ColorSpaceType.RGB) input_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.ImageProperties) input_normalization = _metadata_fb.ProcessUnitT() input_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) input_normalization.options = _metadata_fb.NormalizationOptionsT() input_normalization.options.mean = [127.5] input_normalization.options.std = [127.5] input_meta.processUnits = [input_normalization] input_stats = _metadata_fb.StatsT() input_stats.max = [255] input_stats.min = [0] input_meta.stats = input_stats # Creates output info. bbox_meta = _metadata_fb.TensorMetadataT() bbox_meta.name = 'bbox' bbox_meta.description = '.' bbox_meta.content = _metadata_fb.ContentT() bbox_meta.content.content_properties = _metadata_fb.FeaturePropertiesT() bbox_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) bbox_stats = _metadata_fb.StatsT() bbox_stats.max = [416.0] bbox_stats.min = [0.0] bbox_meta.stats = bbox_stats classes_meta = _metadata_fb.TensorMetadataT() classes_meta.name = 'classes' classes_meta.description = '.' classes_meta.content = _metadata_fb.ContentT() classes_meta.content.content_properties = _metadata_fb.FeaturePropertiesT() classes_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) classes_stats = _metadata_fb.StatsT() classes_stats.max = [num_labels] classes_stats.min = [0] classes_meta.stats = classes_stats label_file = _metadata_fb.AssociatedFileT() label_file.name = os.path.basename(label_map_file_name) label_file.description = 'Labels for objects that the model can recognize.' label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS classes_meta.associatedFiles = [label_file] confidence_meta = _metadata_fb.TensorMetadataT() confidence_meta.name = 'confidence' confidence_meta.description = '.' confidence_meta.content = _metadata_fb.ContentT() confidence_meta.content.content_properties = _metadata_fb.FeaturePropertiesT( ) confidence_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) confidence_stats = _metadata_fb.StatsT() confidence_stats.max = [1.0] confidence_stats.min = [0.0] confidence_meta.stats = confidence_stats num_dets_meta = _metadata_fb.TensorMetadataT() num_dets_meta.name = 'num_dets' num_dets_meta.description = '.' num_dets_meta.content = _metadata_fb.ContentT() num_dets_meta.content.content_properties = _metadata_fb.FeaturePropertiesT( ) num_dets_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) num_dets_stats = _metadata_fb.StatsT() num_dets_stats.max = [200] num_dets_stats.min = [0] num_dets_meta.stats = num_dets_stats raw_output_meta = _metadata_fb.TensorMetadataT() raw_output_meta.name = 'raw_output' raw_output_meta.description = '.' raw_output_meta.content = _metadata_fb.ContentT() raw_output_meta.content.content_properties = _metadata_fb.FeaturePropertiesT( ) raw_output_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_meta] subgraph.outputTensorMetadata = [ bbox_meta, classes_meta, confidence_meta, num_dets_stats ] # raw_output_meta model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) metadata_buf = b.Output() populator = _metadata.MetadataPopulator.with_model_file(model_file_name) populator.load_metadata_buffer(metadata_buf) populator.load_associated_files([label_map_file_name]) populator.populate()
def _create_metadata(self): """Creates the metadata for an image classifier.""" # Creates model info. model_meta = _metadata_fb.ModelMetadataT() model_meta.name = self.model_info.name model_meta.description = ("Equipment.") model_meta.version = self.model_info.version model_meta.author = "TensorFlow" model_meta.license = ("Apache License. Version 2.0 " "http://www.apache.org/licenses/LICENSE-2.0.") # Creates input info. input_meta = _metadata_fb.TensorMetadataT() input_meta.name = "image" input_meta.description = ( "The expected image is 300 x 300, with three channels " "(re, blue, and green) per pixel. Each value in the tensor is between" " 0 and 1.") input_meta.content = _metadata_fb.ContentT() input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT() input_meta.content.contentProperties.colorSpace = ( _metadata_fb.ColorSpaceType.RGB) input_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.ImageProperties) input_normalization = _metadata_fb.ProcessUnitT() input_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) input_normalization.options = _metadata_fb.NormalizationOptionsT() input_normalization.options.mean = self.model_info.mean input_normalization.options.std = self.model_info.std input_meta.processUnits = [input_normalization] input_stats = _metadata_fb.StatsT() input_stats.max = [self.model_info.image_max] input_stats.min = [self.model_info.image_min] input_meta.stats = input_stats # Creates output info. output_location_meta = _metadata_fb.TensorMetadataT() output_location_meta.name = "location" output_location_meta.description = "The locations of the detected boxes." output_location_meta.content = _metadata_fb.ContentT() output_location_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.BoundingBoxProperties) output_location_meta.content.contentProperties = ( _metadata_fb.BoundingBoxPropertiesT()) output_location_meta.content.contentProperties.index = [1, 0, 3, 2] output_location_meta.content.contentProperties.type = ( _metadata_fb.BoundingBoxType.BOUNDARIES) output_location_meta.content.contentProperties.coordinateType = ( _metadata_fb.CoordinateType.RATIO) output_location_meta.content.range = _metadata_fb.ValueRangeT() output_location_meta.content.range.min = 2 output_location_meta.content.range.max = 2 output_class_meta = _metadata_fb.TensorMetadataT() output_class_meta.name = "category" output_class_meta.description = "The categories of the detected boxes." output_class_meta.content = _metadata_fb.ContentT() output_class_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) output_class_meta.content.contentProperties = ( _metadata_fb.FeaturePropertiesT()) output_class_meta.content.range = _metadata_fb.ValueRangeT() output_class_meta.content.range.min = 2 output_class_meta.content.range.max = 2 label_file = _metadata_fb.AssociatedFileT() label_file.name = os.path.basename(self.label_file_path) label_file.description = "Label of objects that this model can recognize." label_file.type = _metadata_fb.AssociatedFileType.TENSOR_VALUE_LABELS output_class_meta.associatedFiles = [label_file] output_score_meta = _metadata_fb.TensorMetadataT() output_score_meta.name = "score" output_score_meta.description = "The scores of the detected boxes." output_score_meta.content = _metadata_fb.ContentT() output_score_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) output_score_meta.content.contentProperties = ( _metadata_fb.FeaturePropertiesT()) output_score_meta.content.range = _metadata_fb.ValueRangeT() output_score_meta.content.range.min = 2 output_score_meta.content.range.max = 2 output_number_meta = _metadata_fb.TensorMetadataT() output_number_meta.name = "number of detections" output_number_meta.description = "The number of the detected boxes." output_number_meta.content = _metadata_fb.ContentT() output_number_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) output_number_meta.content.contentProperties = ( _metadata_fb.FeaturePropertiesT()) # Creates subgraph info. group = _metadata_fb.TensorGroupT() group.name = "detection result" group.tensorNames = [ output_location_meta.name, output_class_meta.name, output_score_meta.name ] subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_meta] subgraph.outputTensorMetadata = [ output_location_meta, output_class_meta, output_score_meta, output_number_meta ] subgraph.outputTensorGroups = [group] model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) self.metadata_buf = b.Output()
def _create_metadata(self): """Creates the metadata for the selfie2anime model.""" # Creates model info. model_meta = _metadata_fb.ModelMetadataT() model_meta.name = "Selfie2Anime" model_meta.description = ("Convert selfie to anime.") model_meta.version = "v1" model_meta.author = "TensorFlow" model_meta.license = ("Apache License. Version 2.0 " "http://www.apache.org/licenses/LICENSE-2.0.") # Creates info for the input, selfie image. input_image_meta = _metadata_fb.TensorMetadataT() input_image_meta.name = "selfie_image" input_image_meta.description = ( "The expected image is 256 x 256, with three channels " "(red, blue, and green) per pixel. Each value in the tensor is between" " 0 and 1.") input_image_meta.content = _metadata_fb.ContentT() input_image_meta.content.contentProperties = ( _metadata_fb.ImagePropertiesT()) input_image_meta.content.contentProperties.colorSpace = ( _metadata_fb.ColorSpaceType.RGB) input_image_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.ImageProperties) input_image_normalization = _metadata_fb.ProcessUnitT() input_image_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) input_image_normalization.options = _metadata_fb.NormalizationOptionsT( ) input_image_normalization.options.mean = [0.0] input_image_normalization.options.std = [255] input_image_meta.processUnits = [input_image_normalization] input_image_stats = _metadata_fb.StatsT() input_image_stats.max = [1.0] input_image_stats.min = [0.0] input_image_meta.stats = input_image_stats # Creates output info, anime image output_image_meta = _metadata_fb.TensorMetadataT() output_image_meta.name = "anime_image" output_image_meta.description = "Image styled." output_image_meta.content = _metadata_fb.ContentT() output_image_meta.content.contentProperties = _metadata_fb.ImagePropertiesT( ) output_image_meta.content.contentProperties.colorSpace = ( _metadata_fb.ColorSpaceType.RGB) output_image_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.ImageProperties) output_image_normalization = _metadata_fb.ProcessUnitT() output_image_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) output_image_normalization.options = _metadata_fb.NormalizationOptionsT( ) output_image_normalization.options.mean = [0.0] output_image_normalization.options.std = [0.003921568627] # 1/255 output_image_meta.processUnits = [output_image_normalization] output_image_stats = _metadata_fb.StatsT() output_image_stats.max = [1.0] output_image_stats.min = [0.0] output_image_meta.stats = output_image_stats # Creates subgraph info. subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_image_meta ] # Updated by Margaret subgraph.outputTensorMetadata = [output_image_meta ] # Updated by Margaret model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) self.metadata_buf = b.Output()