def create_fake_detection_batched_dataset(
    image_size: Tuple[int, int] = (32, 24),
    num_epochs: int = 2,
    batch_size: int = 2,
    num_steps: int = 10,
    num_boxes: int = 5,
    num_classes: int = 10,
) -> tf.data.Dataset:

    dataset = datasets_ops.from_numpy_generator(
        create_fake_detection_dataset_generator(
            num_steps=num_steps, num_boxes=num_boxes, num_classes=num_classes
        )
    )

    def aug_fn(image_data):
        image = tf.cast(image_data.features.image, tf.float32)
        image = image / 255.0
        return image_data.replace_image(image)

    return datasets_ops.prepare_dataset(
        dataset,
        model_image_size=image_size,
        augmentation_fn=aug_fn,
        num_epochs=num_epochs,
        batch_size=batch_size,
        shuffle_buffer_size=1,
        prefetch_buffer_size=1,
    )
    def train_export_model(self, builder: FPNBuilder):

        image_dim = builder.input_shape[1]
        raw_dataset = datasets_ops.from_numpy_generator(
            utils.create_fake_detection_dataset_generator(num_steps=100))

        train_dataset = datasets_ops.prepare_dataset(
            raw_dataset,
            model_image_size=(image_dim, image_dim),
            augmentation_fn=aug_fn,
            num_epochs=-1,
            batch_size=2,
            shuffle_buffer_size=1,
            prefetch_buffer_size=1,
        )
        model = builder.build()

        prepared_train_dataset = train_dataset.map(
            builder.get_build_training_targets_fn())

        optimizer = tf.keras.optimizers.Adam(learning_rate=0.005, beta_2=0.99)
        model.compile(optimizer, **builder.get_model_compile_args())
        model.fit(prepared_train_dataset, epochs=1, steps_per_epoch=5)
        model.save_weights(f"{self.test_dir}/model.h5")

        builder.convert_to_tflite(
            f"{self.test_dir}/model.h5",
            save_path=f"{self.test_dir}/model.tflite",
            export_batch_size=1,
            raw_dataset=raw_dataset,
            num_dataset_samples=2,
            num_test_steps=1,
            merge_feature_maps=True,
            postprocess_outputs=True,
        )

        qmodel = builder.build_quantized(
            non_quantized_model_weights=f"{self.test_dir}/model.h5")
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.005, beta_2=0.99)
        qmodel.compile(optimizer, **builder.get_model_compile_args())
        qmodel.fit(prepared_train_dataset, epochs=1, steps_per_epoch=5)

        builder.convert_to_tflite(
            qmodel,
            save_path=f"{self.test_dir}/qmodel.tflite",
            export_batch_size=1,
            raw_dataset=raw_dataset,
            num_dataset_samples=2,
            num_test_steps=1,
            merge_feature_maps=True,
            postprocess_outputs=True,
        )
Esempio n. 3
0
    def prepare_test_dataset(
        self, raw_dataset: tf.data.Dataset, batch_size: int, num_epochs: int = 1
    ) -> tf.data.Dataset:

        dataset = prepare_dataset(
            dataset=raw_dataset,
            augmentation_fn=None,
            model_image_size=(self.input_shape[0], self.input_shape[1]),
            batch_size=batch_size,
            num_epochs=num_epochs,
        )

        def prepare_features_fn(data):
            data: ImageData[tf.Tensor] = ImageData.from_dict(data)
            return data.features.to_dict(), {}

        return dataset.map(prepare_features_fn)
    def test_debug_quantized_model(self):

        image_dim = 64
        backbone = resnet.ResNetBackbone(
            input_shape=(image_dim, image_dim, 3),
            units_per_block=(1, 1),
            num_last_blocks=2,
        )
        tasks = [standard_tasks.get_objectness_task()]

        builder = FPNBuilder(backbone=backbone, tasks=tasks)
        builder.build()

        image_dim = builder.input_shape[1]
        raw_dataset = datasets_ops.from_numpy_generator(
            utils.create_fake_detection_dataset_generator(num_steps=100))

        train_dataset = datasets_ops.prepare_dataset(
            raw_dataset,
            model_image_size=(image_dim, image_dim),
            augmentation_fn=aug_fn,
            num_epochs=-1,
            batch_size=2,
            shuffle_buffer_size=1,
            prefetch_buffer_size=1,
        )
        prepared_train_dataset = train_dataset.map(
            builder.get_build_training_targets_fn())

        def representative_dataset():
            for features, labels in prepared_train_dataset:
                for image in features["image"]:
                    yield np.expand_dims(image, 0)

        quantized_model = builder.build_quantized()

        outputs_diffs = tflite_debugger.debug_model_quantization(
            representative_dataset(), quantized_model, max_samples=1)
        print(outputs_diffs)