def _get_detection_module(self, experiment_name): params = exp_factory.get_exp_config(experiment_name) params.task.model.backbone.resnet.model_id = 18 params.task.model.detection_generator.use_batched_nms = True detection_module = detection.DetectionModule( params, batch_size=1, input_image_size=[640, 640]) return detection_module
def test_build_model_fail_with_none_batch_size(self): params = exp_factory.get_exp_config('retinanet_resnetfpn_coco') with self.assertRaisesRegex( ValueError, 'batch_size cannot be None for detection models.'): detection.DetectionModule(params, batch_size=None, input_image_size=[640, 640])
def test_export_tflite_detection(self, experiment, quant_type, input_image_size): test_tfrecord_file = os.path.join(self.get_temp_dir(), 'det_test.tfrecord') example = tfexample_utils.create_detection_test_example( image_height=input_image_size[0], image_width=input_image_size[1], image_channel=3, num_instances=10) self._create_test_tfrecord( tfrecord_file=test_tfrecord_file, example=example, num_samples=10) params = exp_factory.get_exp_config(experiment) params.task.validation_data.input_path = test_tfrecord_file params.task.train_data.input_path = test_tfrecord_file temp_dir = self.get_temp_dir() module = detection_serving.DetectionModule( params=params, batch_size=1, input_image_size=input_image_size, input_type='tflite') self._export_from_module( module=module, input_type='tflite', saved_model_dir=os.path.join(temp_dir, 'saved_model')) tflite_model = export_tflite_lib.convert_tflite_model( saved_model_dir=os.path.join(temp_dir, 'saved_model'), quant_type=quant_type, params=params, calibration_steps=5) self.assertIsInstance(tflite_model, bytes)
def test_build_model_fail_with_batched_nms_false(self): params = exp_factory.get_exp_config('retinanet_resnetfpn_coco') params.task.model.detection_generator.use_batched_nms = False with self.assertRaisesRegex(ValueError, 'Only batched_nms is supported.'): detection.DetectionModule(params, batch_size=1, input_image_size=[640, 640])
def _get_detection_module(self, experiment_name, input_type): params = exp_factory.get_exp_config(experiment_name) params.task.model.backbone.resnet.model_id = 18 params.task.model.detection_generator.nms_version = 'batched' detection_module = detection.DetectionModule( params, batch_size=1, input_image_size=[640, 640], input_type=input_type) return detection_module
def test_export_tflite_detection(self, experiment, quant_type, input_image_size): params = exp_factory.get_exp_config(experiment) temp_dir = self.get_temp_dir() module = detection_serving.DetectionModule( params=params, batch_size=1, input_image_size=input_image_size) self._export_from_module(module=module, input_type='tflite', saved_model_dir=os.path.join( temp_dir, 'saved_model')) tflite_model = export_tflite_lib.convert_tflite_model( saved_model_dir=os.path.join(temp_dir, 'saved_model'), quant_type=quant_type, params=params, calibration_steps=5) self.assertIsInstance(tflite_model, bytes)
def get_export_module(experiment: str, batch_size: int, config_files: Optional[str] = None): """Get export module according to experiment config. Args: experiment: `str`, look up for ExperimentConfig factory methods batch_size: `int`, batch size of inference config_file: `str`, path to yaml file that overrides experiment config. """ params = exp_factory.get_exp_config(experiment) for config_file in config_files or []: params = hyperparams.override_params_dict(params, config_file, is_strict=True) params.validate() params.lock() # Obtain relevant serving object kwargs = dict(params=params, batch_size=batch_size, input_image_size=params.task.model.input_size[:2], num_channels=3) if isinstance(params.task, configs.image_classification.ImageClassificationTask): export_module = image_classification.ClassificationModule(**kwargs) elif isinstance(params.task, configs.retinanet.RetinaNetTask) or \ isinstance(params.task, configs.maskrcnn.MaskRCNNTask): export_module = detection.DetectionModule(**kwargs) elif isinstance(params.task, configs.semantic_segmentation.SemanticSegmentationTask): export_module = semantic_segmentation.SegmentationModule(**kwargs) elif isinstance(params.task, configs.yolo.YoloTask): export_module = yolo.YoloModule(**kwargs) elif isinstance(params.task, multi_cfg.MultiTaskConfig): export_module = multitask.MultitaskModule(**kwargs) else: raise ValueError('Export module not implemented for {} task.'.format( type(params.task))) return export_module
def export_inference_graph( input_type: str, batch_size: Optional[int], input_image_size: List[int], params: cfg.ExperimentConfig, checkpoint_path: str, export_dir: str, num_channels: Optional[int] = 3, export_module: Optional[export_base.ExportModule] = None, export_checkpoint_subdir: Optional[str] = None, export_saved_model_subdir: Optional[str] = None, save_options: Optional[tf.saved_model.SaveOptions] = None, argmax_outputs: bool = False, visualise_outputs: bool = False, class_present_outputs: bool = False): """Exports inference graph for the model specified in the exp config. Saved model is stored at export_dir/saved_model, checkpoint is saved at export_dir/checkpoint, and params is saved at export_dir/params.yaml. Args: input_type: One of `image_tensor`, `image_bytes`, `tf_example`. batch_size: 'int', or None. input_image_size: List or Tuple of height and width. params: Experiment params. checkpoint_path: Trained checkpoint path or directory. export_dir: Export directory path. num_channels: The number of input image channels. export_module: Optional export module to be used instead of using params to create one. If None, the params will be used to create an export module. export_checkpoint_subdir: Optional subdirectory under export_dir to store checkpoint. export_saved_model_subdir: Optional subdirectory under export_dir to store saved model. save_options: `SaveOptions` for `tf.saved_model.save`. (applicable for Segmentation and MultiTask export definitions only) argmax_outputs: Set true to argmax the last channel of all outputs. visualise_outputs: Set true to apply colormap to all single channel outputs. class_present_outputs: Set true to gather unique values of outputs. """ if export_checkpoint_subdir: output_checkpoint_directory = os.path.join( export_dir, export_checkpoint_subdir) else: output_checkpoint_directory = export_dir if export_saved_model_subdir: output_saved_model_directory = os.path.join( export_dir, export_saved_model_subdir) else: output_saved_model_directory = export_dir # TODO(arashwan): Offers a direct path to use ExportModule with Task objects. if not export_module: if isinstance(params.task, configs.image_classification.ImageClassificationTask): export_module = image_classification.ClassificationModule( params=params, batch_size=batch_size, input_image_size=input_image_size, num_channels=num_channels) elif isinstance(params.task, configs.retinanet.RetinaNetTask) or isinstance( params.task, configs.maskrcnn.MaskRCNNTask): export_module = detection.DetectionModule( params=params, batch_size=batch_size, input_image_size=input_image_size, num_channels=num_channels) elif isinstance(params.task, configs.semantic_segmentation.SemanticSegmentationTask): export_module = semantic_segmentation.SegmentationModule( params=params, batch_size=batch_size, input_image_size=input_image_size, num_channels=num_channels, argmax_outputs=argmax_outputs, visualise_outputs=visualise_outputs) elif isinstance(params.task, configs.yolo.YoloTask): export_module = yolo.YoloModule( params=params, batch_size=batch_size, input_image_size=input_image_size, num_channels=num_channels) elif isinstance(params.task, multi_cfg.MultiTaskConfig): export_module = multitask.MultitaskModule( params=params, batch_size=batch_size, input_image_size=input_image_size, num_channels=num_channels, argmax_outputs=argmax_outputs, visualise_outputs=visualise_outputs, class_present_outputs=class_present_outputs) else: raise ValueError('Export module not implemented for {} task.'.format( type(params.task))) export_base.export( export_module, function_keys=[input_type], export_savedmodel_dir=output_saved_model_directory, checkpoint_path=checkpoint_path, timestamped=False, save_options=save_options) ckpt = tf.train.Checkpoint(model=export_module.model) ckpt.save(os.path.join(output_checkpoint_directory, 'ckpt')) train_utils.serialize_config(params, export_dir)
def export_inference_graph(input_type, batch_size, input_image_size, params, checkpoint_path, export_dir, export_checkpoint_subdir=None, export_saved_model_subdir=None): """Exports inference graph for the model specified in the exp config. Saved model is stored at export_dir/saved_model, checkpoint is saved at export_dir/checkpoint, and params is saved at export_dir/params.yaml. Args: input_type: One of `image_tensor`, `image_bytes`, `tf_example`. batch_size: 'int', or None. input_image_size: List or Tuple of height and width. params: Experiment params. checkpoint_path: Trained checkpoint path or directory. export_dir: Export directory path. export_checkpoint_subdir: Optional subdirectory under export_dir to store checkpoint. export_saved_model_subdir: Optional subdirectory under export_dir to store saved model. """ if export_checkpoint_subdir: output_checkpoint_directory = os.path.join(export_dir, export_checkpoint_subdir) else: output_checkpoint_directory = export_dir if export_saved_model_subdir: output_saved_model_directory = os.path.join(export_dir, export_saved_model_subdir) else: output_saved_model_directory = export_dir if isinstance(params.task, configs.image_classification.ImageClassificationTask): export_module = image_classification.ClassificationModule( params=params, batch_size=batch_size, input_image_size=input_image_size) elif isinstance(params.task, configs.retinanet.RetinaNetTask) or isinstance( params.task, configs.maskrcnn.MaskRCNNTask): export_module = detection.DetectionModule( params=params, batch_size=batch_size, input_image_size=input_image_size) else: raise ValueError('Export module not implemented for {} task.'.format( type(params.task))) model = export_module.build_model() ckpt = tf.train.Checkpoint(model=model) ckpt_dir_or_file = checkpoint_path if tf.io.gfile.isdir(ckpt_dir_or_file): ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file) status = ckpt.restore(ckpt_dir_or_file).expect_partial() if input_type == 'image_tensor': input_signature = tf.TensorSpec( shape=[batch_size, input_image_size[0], input_image_size[1], 3], dtype=tf.uint8) signatures = { 'serving_default': export_module.inference_from_image_tensors.get_concrete_function( input_signature) } elif input_type == 'image_bytes': input_signature = tf.TensorSpec(shape=[batch_size], dtype=tf.string) signatures = { 'serving_default': export_module.inference_from_image_bytes.get_concrete_function( input_signature) } elif input_type == 'tf_example': input_signature = tf.TensorSpec(shape=[batch_size], dtype=tf.string) signatures = { 'serving_default': export_module.inference_from_tf_example.get_concrete_function( input_signature) } else: raise ValueError('Unrecognized `input_type`') status.assert_existing_objects_matched() ckpt.save(os.path.join(output_checkpoint_directory, 'ckpt')) tf.saved_model.save(export_module, output_saved_model_directory, signatures=signatures) train_utils.serialize_config(params, export_dir)
def export_inference_graph( input_type: str, batch_size: Optional[int], input_image_size: List[int], params: cfg.ExperimentConfig, checkpoint_path: str, export_dir: str, num_channels: Optional[int] = 3, export_module: Optional[export_base.ExportModule] = None, export_checkpoint_subdir: Optional[str] = None, export_saved_model_subdir: Optional[str] = None, save_options: Optional[tf.saved_model.SaveOptions] = None, log_model_flops_and_params: bool = False): """Exports inference graph for the model specified in the exp config. Saved model is stored at export_dir/saved_model, checkpoint is saved at export_dir/checkpoint, and params is saved at export_dir/params.yaml. Args: input_type: One of `image_tensor`, `image_bytes`, `tf_example` or `tflite`. batch_size: 'int', or None. input_image_size: List or Tuple of height and width. params: Experiment params. checkpoint_path: Trained checkpoint path or directory. export_dir: Export directory path. num_channels: The number of input image channels. export_module: Optional export module to be used instead of using params to create one. If None, the params will be used to create an export module. export_checkpoint_subdir: Optional subdirectory under export_dir to store checkpoint. export_saved_model_subdir: Optional subdirectory under export_dir to store saved model. save_options: `SaveOptions` for `tf.saved_model.save`. log_model_flops_and_params: If True, writes model FLOPs to model_flops.txt and model parameters to model_params.txt. """ if export_checkpoint_subdir: output_checkpoint_directory = os.path.join(export_dir, export_checkpoint_subdir) else: output_checkpoint_directory = None if export_saved_model_subdir: output_saved_model_directory = os.path.join(export_dir, export_saved_model_subdir) else: output_saved_model_directory = export_dir # TODO(arashwan): Offers a direct path to use ExportModule with Task objects. if not export_module: if isinstance(params.task, configs.image_classification.ImageClassificationTask): export_module = image_classification.ClassificationModule( params=params, batch_size=batch_size, input_image_size=input_image_size, input_type=input_type, num_channels=num_channels) elif isinstance(params.task, configs.retinanet.RetinaNetTask) or isinstance( params.task, configs.maskrcnn.MaskRCNNTask): export_module = detection.DetectionModule( params=params, batch_size=batch_size, input_image_size=input_image_size, input_type=input_type, num_channels=num_channels) elif isinstance( params.task, configs.semantic_segmentation.SemanticSegmentationTask): export_module = semantic_segmentation.SegmentationModule( params=params, batch_size=batch_size, input_image_size=input_image_size, input_type=input_type, num_channels=num_channels) elif isinstance(params.task, configs.video_classification.VideoClassificationTask): export_module = video_classification.VideoClassificationModule( params=params, batch_size=batch_size, input_image_size=input_image_size, input_type=input_type, num_channels=num_channels) else: raise ValueError( 'Export module not implemented for {} task.'.format( type(params.task))) export_base.export(export_module, function_keys=[input_type], export_savedmodel_dir=output_saved_model_directory, checkpoint_path=checkpoint_path, timestamped=False, save_options=save_options) if output_checkpoint_directory: ckpt = tf.train.Checkpoint(model=export_module.model) ckpt.save(os.path.join(output_checkpoint_directory, 'ckpt')) train_utils.serialize_config(params, export_dir) if log_model_flops_and_params: inputs_kwargs = None if isinstance( params.task, (configs.retinanet.RetinaNetTask, configs.maskrcnn.MaskRCNNTask)): # We need to create inputs_kwargs argument to specify the input shapes for # subclass model that overrides model.call to take multiple inputs, # e.g., RetinaNet model. inputs_kwargs = { 'images': tf.TensorSpec([1] + input_image_size + [num_channels], tf.float32), 'image_shape': tf.TensorSpec([1, 2], tf.float32) } dummy_inputs = { k: tf.ones(v.shape.as_list(), tf.float32) for k, v in inputs_kwargs.items() } # Must do forward pass to build the model. export_module.model(**dummy_inputs) else: logging.info( 'Logging model flops and params not implemented for %s task.', type(params.task)) return train_utils.try_count_flops( export_module.model, inputs_kwargs, os.path.join(export_dir, 'model_flops.txt')) train_utils.write_model_params( export_module.model, os.path.join(export_dir, 'model_params.txt'))