def test_model_with_objectness(self): image_dim = 64 backbone = resnet.ResNetBackbone( input_shape=(image_dim, image_dim, 3), units_per_block=(1, 1), num_last_blocks=2, ) tasks = [standard_tasks.get_objectness_task()] builder = FPNBuilder(backbone=backbone, tasks=tasks) self.train_export_model(builder)
def test_simple_cnn_model_with_many_tasks_many_fms(self): image_dim = 64 backbone = SimpleCNNBackbone(input_shape=(image_dim, image_dim, 3), init_filters=16, num_last_blocks=1) tasks = [ standard_tasks.get_objectness_task(), standard_tasks.get_box_shape_task(), standard_tasks.get_multiclass_task(num_classes=10, fl_gamma=0, activation="softmax"), ] builder = FPNBuilder(backbone=backbone, tasks=tasks) self.train_export_model(builder)
def __init__( self, num_classes: int, image_dim: int = 128, mnet_alpha: float = 1.0, min_fm_size: int = 10, weights: str = "imagenet", label_smoothing: float = 0.01, box_shape_task: str = "box_shape", last_conv_filters: int = 64, class_activation: str = "sigmoid", box_obj_task: str = "center_ignore_margin", ): self._image_dim = image_dim self._num_classes = num_classes backbone = mobilenetv2.MobileNetV2Backbone( input_shape=(self._image_dim, self._image_dim, 3), alpha=mnet_alpha, min_fm_size=min_fm_size, weights=weights, ) tasks = [ standard_tasks.get_objectness_task( obj_class=box_obj_task, label_smoothing=label_smoothing, num_filters=last_conv_filters, ), standard_tasks.get_box_shape_task(box_shape_task, num_filters=last_conv_filters), standard_tasks.get_multiclass_task( self._num_classes, fl_gamma=0.0, label_smoothing=label_smoothing, num_filters=last_conv_filters, activation=class_activation, ), ] builder = FPNBuilder(backbone=backbone, tasks=tasks) super().__init__(builder)
def test_debug_quantized_model(self): image_dim = 64 backbone = resnet.ResNetBackbone( input_shape=(image_dim, image_dim, 3), units_per_block=(1, 1), num_last_blocks=2, ) tasks = [standard_tasks.get_objectness_task()] builder = FPNBuilder(backbone=backbone, tasks=tasks) builder.build() image_dim = builder.input_shape[1] raw_dataset = datasets_ops.from_numpy_generator( utils.create_fake_detection_dataset_generator(num_steps=100)) train_dataset = datasets_ops.prepare_dataset( raw_dataset, model_image_size=(image_dim, image_dim), augmentation_fn=aug_fn, num_epochs=-1, batch_size=2, shuffle_buffer_size=1, prefetch_buffer_size=1, ) prepared_train_dataset = train_dataset.map( builder.get_build_training_targets_fn()) def representative_dataset(): for features, labels in prepared_train_dataset: for image in features["image"]: yield np.expand_dims(image, 0) quantized_model = builder.build_quantized() outputs_diffs = tflite_debugger.debug_model_quantization( representative_dataset(), quantized_model, max_samples=1) print(outputs_diffs)