def train_export_model(self, builder: FPNBuilder):

        image_dim = builder.input_shape[1]
        raw_dataset = datasets_ops.from_numpy_generator(
            utils.create_fake_detection_dataset_generator(num_steps=100))

        train_dataset = datasets_ops.prepare_dataset(
            raw_dataset,
            model_image_size=(image_dim, image_dim),
            augmentation_fn=aug_fn,
            num_epochs=-1,
            batch_size=2,
            shuffle_buffer_size=1,
            prefetch_buffer_size=1,
        )
        model = builder.build()

        prepared_train_dataset = train_dataset.map(
            builder.get_build_training_targets_fn())

        optimizer = tf.keras.optimizers.Adam(learning_rate=0.005, beta_2=0.99)
        model.compile(optimizer, **builder.get_model_compile_args())
        model.fit(prepared_train_dataset, epochs=1, steps_per_epoch=5)
        model.save_weights(f"{self.test_dir}/model.h5")

        builder.convert_to_tflite(
            f"{self.test_dir}/model.h5",
            save_path=f"{self.test_dir}/model.tflite",
            export_batch_size=1,
            raw_dataset=raw_dataset,
            num_dataset_samples=2,
            num_test_steps=1,
            merge_feature_maps=True,
            postprocess_outputs=True,
        )

        qmodel = builder.build_quantized(
            non_quantized_model_weights=f"{self.test_dir}/model.h5")
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.005, beta_2=0.99)
        qmodel.compile(optimizer, **builder.get_model_compile_args())
        qmodel.fit(prepared_train_dataset, epochs=1, steps_per_epoch=5)

        builder.convert_to_tflite(
            qmodel,
            save_path=f"{self.test_dir}/qmodel.tflite",
            export_batch_size=1,
            raw_dataset=raw_dataset,
            num_dataset_samples=2,
            num_test_steps=1,
            merge_feature_maps=True,
            postprocess_outputs=True,
        )
    def test_model_with_box_shape(self):

        image_dim = 64
        backbone = resnet.ResNetBackbone(
            input_shape=(image_dim, image_dim, 3),
            units_per_block=(1, 1),
            num_last_blocks=1,
        )
        tasks = [standard_tasks.get_box_shape_task()]

        builder = FPNBuilder(backbone=backbone, tasks=tasks)
        self.train_export_model(builder)
    def test_model_with_objectness_and_fpn_backbone(self):

        image_dim = 64
        backbone = resnet.ResNetBackbone(
            input_shape=(image_dim, image_dim, 3),
            units_per_block=(1, 1, 1),
            num_last_blocks=3,
        )
        backbone = FPNBackbone(backbone, depth=16, num_first_blocks=1)

        tasks = [standard_tasks.get_objectness_task()]

        builder = FPNBuilder(backbone=backbone, tasks=tasks)
        self.train_export_model(builder)
    def test_simple_cnn_model_with_many_tasks_many_fms(self):

        image_dim = 64
        backbone = SimpleCNNBackbone(input_shape=(image_dim, image_dim, 3),
                                     init_filters=16,
                                     num_last_blocks=1)
        tasks = [
            standard_tasks.get_objectness_task(),
            standard_tasks.get_box_shape_task(),
            standard_tasks.get_multiclass_task(num_classes=10,
                                               fl_gamma=0,
                                               activation="softmax"),
        ]

        builder = FPNBuilder(backbone=backbone, tasks=tasks)
        self.train_export_model(builder)
    def test_model_with_many_tasks_many_fms(self):

        image_dim = 64
        backbone = resnet.ResNetBackbone(
            input_shape=(image_dim, image_dim, 3),
            units_per_block=(1, 1, 1),
            num_last_blocks=2,
        )
        tasks = [
            standard_tasks.get_objectness_task(),
            standard_tasks.get_box_shape_task(),
            standard_tasks.get_multiclass_task(num_classes=10),
        ]

        builder = FPNBuilder(backbone=backbone, tasks=tasks)
        self.train_export_model(builder)
Beispiel #6
0
    def __init__(
        self,
        num_classes: int,
        image_dim: int = 128,
        mnet_alpha: float = 1.0,
        min_fm_size: int = 10,
        weights: str = "imagenet",
        label_smoothing: float = 0.01,
        box_shape_task: str = "box_shape",
        last_conv_filters: int = 64,
        class_activation: str = "sigmoid",
        box_obj_task: str = "center_ignore_margin",
    ):

        self._image_dim = image_dim
        self._num_classes = num_classes

        backbone = mobilenetv2.MobileNetV2Backbone(
            input_shape=(self._image_dim, self._image_dim, 3),
            alpha=mnet_alpha,
            min_fm_size=min_fm_size,
            weights=weights,
        )

        tasks = [
            standard_tasks.get_objectness_task(
                obj_class=box_obj_task,
                label_smoothing=label_smoothing,
                num_filters=last_conv_filters,
            ),
            standard_tasks.get_box_shape_task(box_shape_task,
                                              num_filters=last_conv_filters),
            standard_tasks.get_multiclass_task(
                self._num_classes,
                fl_gamma=0.0,
                label_smoothing=label_smoothing,
                num_filters=last_conv_filters,
                activation=class_activation,
            ),
        ]
        builder = FPNBuilder(backbone=backbone, tasks=tasks)
        super().__init__(builder)
    def test_debug_quantized_model(self):

        image_dim = 64
        backbone = resnet.ResNetBackbone(
            input_shape=(image_dim, image_dim, 3),
            units_per_block=(1, 1),
            num_last_blocks=2,
        )
        tasks = [standard_tasks.get_objectness_task()]

        builder = FPNBuilder(backbone=backbone, tasks=tasks)
        builder.build()

        image_dim = builder.input_shape[1]
        raw_dataset = datasets_ops.from_numpy_generator(
            utils.create_fake_detection_dataset_generator(num_steps=100))

        train_dataset = datasets_ops.prepare_dataset(
            raw_dataset,
            model_image_size=(image_dim, image_dim),
            augmentation_fn=aug_fn,
            num_epochs=-1,
            batch_size=2,
            shuffle_buffer_size=1,
            prefetch_buffer_size=1,
        )
        prepared_train_dataset = train_dataset.map(
            builder.get_build_training_targets_fn())

        def representative_dataset():
            for features, labels in prepared_train_dataset:
                for image in features["image"]:
                    yield np.expand_dims(image, 0)

        quantized_model = builder.build_quantized()

        outputs_diffs = tflite_debugger.debug_model_quantization(
            representative_dataset(), quantized_model, max_samples=1)
        print(outputs_diffs)