def save_metadata_to_model_file(metadata, model_filename): b = flatbuffers.Builder(0) b.Finish(metadata.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) metadata_buffer = b.Output() populator = _metadata.MetadataPopulator.with_model_file(model_filename) populator.load_metadata_buffer(metadata_buffer) # populator.load_associated_files(["your_path_to_label_file"]) # No associated files for this (e.g. No labels files) populator.populate()
_metadata_fb.FeaturePropertiesT()) group = _metadata_fb.TensorGroupT() group.name = "detection result" group.tensorNames = [ output_location_meta.name, output_class_meta.name, output_score_meta.name ] subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_meta] subgraph.outputTensorMetadata = [ output_location_meta, output_class_meta, output_score_meta, output_number_meta ] subgraph.outputTensorGroups = [group] model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) metadta_buf = b.Output() #본인 경로 populator = _metadata.MetadataPopulator.with_model_file("detect.tflite") populator.load_metadata_buffer(metadta_buf) #본인 경로 populator.load_associated_files(["label.txt"]) populator.populate() # #메타 데이터 시각화 # import os # displayer = _metadata.MetadataDisplayer.with_model_file("C:/Users/lee01/Jupyter_Projects/Safety-Helmet-Wearing-Dataset/ProcessingTfliteDetectionModel/final_model.tflite") # export_json_file = os.path.join(FLAGS.export_directory,
def build_metadata(name: str, version: str, labels: List[LabelDescription], output_types: List[OutputTensorType], output_interpretation: str = "", task: Optional[str] = None, author: str = "", task_params: str = "") -> bytearray: """ Args: name: version: labels: output_types: output_interpretation: task: author: task_params: Returns: """ if task is None or task == "": # Auto infer detection or localization from targets types = set(output_types) if types == { OutputTensorType.BOX_SHAPE, OutputTensorType.OBJECTNESS, OutputTensorType.CLASSES, }: task = TaskType.OBJECT_DETECTION.value elif types == { OutputTensorType.BOX_SHAPE, OutputTensorType.OBJECTNESS }: task = TaskType.OBJECT_LOCALIZATION.value else: raise ValueError( f"Cannot infer task_type from output_types: {output_types}") metadata = ModelMetadata( name=name, version=version, task=task, output_interpretation=output_interpretation, labels=labels, task_params=task_params, ) model_meta = _metadata_fb.ModelMetadataT() model_meta.name = name model_meta.description = json.dumps(metadata.asdict) model_meta.version = version model_meta.author = author input_meta = _metadata_fb.TensorMetadataT() input_meta.name = "image" output_metas = [] for t in output_types: output_meta = _metadata_fb.TensorMetadataT() output_meta.name = t.value output_metas.append(output_meta) subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_meta] subgraph.outputTensorMetadata = output_metas model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) metadata_buf = b.Output() return metadata_buf
def build_metadata( model_file_path: str, #model_name: str, #model_description: str, #model_author: str, #model_license: str, vocab_file_path: str=None, sentencepiece_model_path: str=None): model_meta = _metadata_fb.ModelMetadataT() model_meta.name = 'distilbert news' model_meta.description = 'some model description' model_meta.version = 'v1' model_meta.author = 'Unknown' model_meta.license = 'Apache License. Version 2.0' model_meta.minParserVersion = '1.1.0' # Creates input info. ids = _metadata_fb.TensorMetadataT() segment_ids = _metadata_fb.TensorMetadataT() mask = _metadata_fb.TensorMetadataT() ids.name = "ids" ids.description = "Tokenized ids of input text." ids.content = _metadata_fb.ContentT() ids.content.contentProperties = _metadata_fb.FeaturePropertiesT() ids.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) segment_ids.name = "segment_ids" segment_ids.description = "0 for the first sequence, 1 for the second sequence if exits." segment_ids.content = _metadata_fb.ContentT() segment_ids.content.contentProperties = _metadata_fb.FeaturePropertiesT() segment_ids.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) mask.name = "mask" mask.description = "Mask with 1 for real tokens and 0 for padding tokens." mask.content = _metadata_fb.ContentT() mask.content.contentProperties = _metadata_fb.FeaturePropertiesT() mask.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) # Creates output info. output_meta = _metadata_fb.TensorMetadataT() output_meta.name = "output" output_meta.description = "the id of the output class" output_meta.content = _metadata_fb.ContentT() output_meta.content.contentProperties = _metadata_fb.FeaturePropertiesT() output_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) """ label_file = _metadata_fb.AssociatedFileT() label_file.name = os.path.basename(label_file_path) label_file.description = "Labels for the categories to be predicted." label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS output_meta.associatedFiles = [label_file] """ input_process_units = [] if sentencepiece_model_path is not None: #Add sentencepiece specific process unit sentencepiece_process_unit = _metadata_fb.ProcessUnitT() sentencepiece_process_unit.optionsType = ( _metadata_fb.ProcessUnitOptions.SentencePieceTokenizerOptions) sentencepiece_process_unit.options = _metadata_fb.SentencePieceTokenizerOptionsT() sentencepiece_model = AssociatedFileT() sentencepiece_model.name="30k-clean-model", sentencepiece_model.description="The sentence piece model file." sentencepiece_process_unit.options.sentencePieceModel = [sentencepiece_model] input_process_units.append(sentencepiece_process_unit) if vocab_file_path is not None: model_process_unit = _metadata_fb.ProcessUnitT() model_process_unit.optionsType = ( _metadata_fb.ProcessUnitOptions.BertTokenizerOptions) model_process_unit.options = _metadata_fb.BertTokenizerOptionsT() vocab_file = AssociatedFileT() vocab_file.name="jp word piece vocab", vocab_file.description="Japanese Vocabulary file for the BERT tokenizer.", vocab_file.type=_metadata_fb.AssociatedFileType.VOCABULARY model_process_unit.options.vocabFile = [vocab_file] input_process_units.append(model_process_unit) #Put metadata together subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [ids, mask, segment_ids] subgraph.outputTensorMetadata = [output_meta] subgraph.inputProcessUnits = input_process_units model_meta.subgraphMetadata = [subgraph] #create flat buffers for metadata b = flatbuffers.Builder(0) b.Finish( model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) metadata_buf = b.Output() #associate files to metadata populator = _metadata.MetadataPopulator.with_model_file(model_file_path) populator.load_metadata_buffer(metadata_buf) if vocab_file_path is not None: files = [vocab_file_path] else: files = [sentencepiece_model_path] populator.load_associated_files(files) populator.populate()
def create_metadata(model_file_name, label_map_file_name, num_labels): from tflite_support import flatbuffers from tflite_support import metadata as _metadata from tflite_support import metadata_schema_py_generated as _metadata_fb """ ... """ """Creates the metadata for an image classifier.""" # Creates model info. model_meta = _metadata_fb.ModelMetadataT() model_meta.name = 'MobileNetV1 image classifier' model_meta.description = ('Identify the most prominent object in the ' 'image from a set of 1,001 categories such as ' 'trees, animals, food, vehicles, person etc.') model_meta.version = 'v1' model_meta.author = 'TensorFlow' model_meta.license = ('Apache License. Version 2.0 ' 'http://www.apache.org/licenses/LICENSE-2.0.') # Creates input info. input_meta = _metadata_fb.TensorMetadataT() input_meta.name = 'image' input_meta.description = ( 'Input image to be classified. The expected image is {0} x {1}, with ' 'three channels (red, blue, and green) per pixel. Each value in the ' 'tensor is a single byte between 0 and 255.'.format(416, 416)) input_meta.content = _metadata_fb.ContentT() input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT() input_meta.content.contentProperties.colorSpace = ( _metadata_fb.ColorSpaceType.RGB) input_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.ImageProperties) input_normalization = _metadata_fb.ProcessUnitT() input_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) input_normalization.options = _metadata_fb.NormalizationOptionsT() input_normalization.options.mean = [127.5] input_normalization.options.std = [127.5] input_meta.processUnits = [input_normalization] input_stats = _metadata_fb.StatsT() input_stats.max = [255] input_stats.min = [0] input_meta.stats = input_stats # Creates output info. bbox_meta = _metadata_fb.TensorMetadataT() bbox_meta.name = 'bbox' bbox_meta.description = '.' bbox_meta.content = _metadata_fb.ContentT() bbox_meta.content.content_properties = _metadata_fb.FeaturePropertiesT() bbox_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) bbox_stats = _metadata_fb.StatsT() bbox_stats.max = [416.0] bbox_stats.min = [0.0] bbox_meta.stats = bbox_stats classes_meta = _metadata_fb.TensorMetadataT() classes_meta.name = 'classes' classes_meta.description = '.' classes_meta.content = _metadata_fb.ContentT() classes_meta.content.content_properties = _metadata_fb.FeaturePropertiesT() classes_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) classes_stats = _metadata_fb.StatsT() classes_stats.max = [num_labels] classes_stats.min = [0] classes_meta.stats = classes_stats label_file = _metadata_fb.AssociatedFileT() label_file.name = os.path.basename(label_map_file_name) label_file.description = 'Labels for objects that the model can recognize.' label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS classes_meta.associatedFiles = [label_file] confidence_meta = _metadata_fb.TensorMetadataT() confidence_meta.name = 'confidence' confidence_meta.description = '.' confidence_meta.content = _metadata_fb.ContentT() confidence_meta.content.content_properties = _metadata_fb.FeaturePropertiesT( ) confidence_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) confidence_stats = _metadata_fb.StatsT() confidence_stats.max = [1.0] confidence_stats.min = [0.0] confidence_meta.stats = confidence_stats num_dets_meta = _metadata_fb.TensorMetadataT() num_dets_meta.name = 'num_dets' num_dets_meta.description = '.' num_dets_meta.content = _metadata_fb.ContentT() num_dets_meta.content.content_properties = _metadata_fb.FeaturePropertiesT( ) num_dets_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) num_dets_stats = _metadata_fb.StatsT() num_dets_stats.max = [200] num_dets_stats.min = [0] num_dets_meta.stats = num_dets_stats raw_output_meta = _metadata_fb.TensorMetadataT() raw_output_meta.name = 'raw_output' raw_output_meta.description = '.' raw_output_meta.content = _metadata_fb.ContentT() raw_output_meta.content.content_properties = _metadata_fb.FeaturePropertiesT( ) raw_output_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_meta] subgraph.outputTensorMetadata = [ bbox_meta, classes_meta, confidence_meta, num_dets_stats ] # raw_output_meta model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) metadata_buf = b.Output() populator = _metadata.MetadataPopulator.with_model_file(model_file_name) populator.load_metadata_buffer(metadata_buf) populator.load_associated_files([label_map_file_name]) populator.populate()
def _create_metadata(self): """Creates the metadata for the selfie2anime model.""" # Creates model info. model_meta = _metadata_fb.ModelMetadataT() model_meta.name = "Selfie2Anime" model_meta.description = ("Convert selfie to anime.") model_meta.version = "v1" model_meta.author = "TensorFlow" model_meta.license = ("Apache License. Version 2.0 " "http://www.apache.org/licenses/LICENSE-2.0.") # Creates info for the input, selfie image. input_image_meta = _metadata_fb.TensorMetadataT() input_image_meta.name = "selfie_image" input_image_meta.description = ( "The expected image is 256 x 256, with three channels " "(red, blue, and green) per pixel. Each value in the tensor is between" " 0 and 1.") input_image_meta.content = _metadata_fb.ContentT() input_image_meta.content.contentProperties = ( _metadata_fb.ImagePropertiesT()) input_image_meta.content.contentProperties.colorSpace = ( _metadata_fb.ColorSpaceType.RGB) input_image_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.ImageProperties) input_image_normalization = _metadata_fb.ProcessUnitT() input_image_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) input_image_normalization.options = _metadata_fb.NormalizationOptionsT( ) input_image_normalization.options.mean = [0.0] input_image_normalization.options.std = [255] input_image_meta.processUnits = [input_image_normalization] input_image_stats = _metadata_fb.StatsT() input_image_stats.max = [1.0] input_image_stats.min = [0.0] input_image_meta.stats = input_image_stats # Creates output info, anime image output_image_meta = _metadata_fb.TensorMetadataT() output_image_meta.name = "anime_image" output_image_meta.description = "Image styled." output_image_meta.content = _metadata_fb.ContentT() output_image_meta.content.contentProperties = _metadata_fb.ImagePropertiesT( ) output_image_meta.content.contentProperties.colorSpace = ( _metadata_fb.ColorSpaceType.RGB) output_image_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.ImageProperties) output_image_normalization = _metadata_fb.ProcessUnitT() output_image_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) output_image_normalization.options = _metadata_fb.NormalizationOptionsT( ) output_image_normalization.options.mean = [0.0] output_image_normalization.options.std = [0.003921568627] # 1/255 output_image_meta.processUnits = [output_image_normalization] output_image_stats = _metadata_fb.StatsT() output_image_stats.max = [1.0] output_image_stats.min = [0.0] output_image_meta.stats = output_image_stats # Creates subgraph info. subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_image_meta ] # Updated by Margaret subgraph.outputTensorMetadata = [output_image_meta ] # Updated by Margaret model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) self.metadata_buf = b.Output()
def _create_metadata(self): """Creates the metadata for an image classifier.""" # Creates model info. model_meta = _metadata_fb.ModelMetadataT() model_meta.name = self.model_info.name model_meta.description = ("Identify the most prominent object in the " "image from a set of categories.") model_meta.version = self.model_info.version model_meta.author = "TFLite Model Maker" model_meta.license = ("Apache License. Version 2.0 " "http://www.apache.org/licenses/LICENSE-2.0.") # Creates input info. input_meta = _metadata_fb.TensorMetadataT() input_meta.name = "image" input_meta.description = ( "Input image to be classified. The expected image is {0} x {1}, with " "three channels (red, blue, and green) per pixel. Each value in the " "tensor is a single byte between {2} and {3}.".format( self.model_info.image_width, self.model_info.image_height, self.model_info.image_min, self.model_info.image_max)) input_meta.content = _metadata_fb.ContentT() input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT() input_meta.content.contentProperties.colorSpace = ( _metadata_fb.ColorSpaceType.RGB) input_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.ImageProperties) input_normalization = _metadata_fb.ProcessUnitT() input_normalization.optionsType = ( _metadata_fb.ProcessUnitOptions.NormalizationOptions) input_normalization.options = _metadata_fb.NormalizationOptionsT() input_normalization.options.mean = self.model_info.mean input_normalization.options.std = self.model_info.std input_meta.processUnits = [input_normalization] input_stats = _metadata_fb.StatsT() input_stats.max = [self.model_info.image_max] input_stats.min = [self.model_info.image_min] input_meta.stats = input_stats # Creates output info. output_meta = _metadata_fb.TensorMetadataT() output_meta.name = "probability" output_meta.description = "Probabilities of the labels respectively." output_meta.content = _metadata_fb.ContentT() output_meta.content.content_properties = _metadata_fb.FeaturePropertiesT( ) output_meta.content.contentPropertiesType = ( _metadata_fb.ContentProperties.FeatureProperties) output_stats = _metadata_fb.StatsT() output_stats.max = [1.0] output_stats.min = [0.0] output_meta.stats = output_stats label_file = _metadata_fb.AssociatedFileT() label_file.name = os.path.basename(self.label_file_path) label_file.description = "Labels that %s can recognize." % model_meta.name label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS output_meta.associatedFiles = [label_file] # Creates subgraph info. subgraph = _metadata_fb.SubGraphMetadataT() subgraph.inputTensorMetadata = [input_meta] subgraph.outputTensorMetadata = [output_meta] model_meta.subgraphMetadata = [subgraph] b = flatbuffers.Builder(0) b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) self.metadata_buf = b.Output()