Ejemplo n.º 1
0
def create_fake_detection_batched_dataset(
    image_size: Tuple[int, int] = (32, 24),
    num_epochs: int = 2,
    batch_size: int = 2,
    num_steps: int = 10,
    num_boxes: int = 5,
    num_classes: int = 10,
) -> tf.data.Dataset:

    dataset = datasets_ops.from_numpy_generator(
        create_fake_detection_dataset_generator(
            num_steps=num_steps, num_boxes=num_boxes, num_classes=num_classes
        )
    )

    def aug_fn(image_data):
        image = tf.cast(image_data.features.image, tf.float32)
        image = image / 255.0
        return image_data.replace_image(image)

    return datasets_ops.prepare_dataset(
        dataset,
        model_image_size=image_size,
        augmentation_fn=aug_fn,
        num_epochs=num_epochs,
        batch_size=batch_size,
        shuffle_buffer_size=1,
        prefetch_buffer_size=1,
    )
    def train_export_model(self, builder: FPNBuilder):

        image_dim = builder.input_shape[1]
        raw_dataset = datasets_ops.from_numpy_generator(
            utils.create_fake_detection_dataset_generator(num_steps=100))

        train_dataset = datasets_ops.prepare_dataset(
            raw_dataset,
            model_image_size=(image_dim, image_dim),
            augmentation_fn=aug_fn,
            num_epochs=-1,
            batch_size=2,
            shuffle_buffer_size=1,
            prefetch_buffer_size=1,
        )
        model = builder.build()

        prepared_train_dataset = train_dataset.map(
            builder.get_build_training_targets_fn())

        optimizer = tf.keras.optimizers.Adam(learning_rate=0.005, beta_2=0.99)
        model.compile(optimizer, **builder.get_model_compile_args())
        model.fit(prepared_train_dataset, epochs=1, steps_per_epoch=5)
        model.save_weights(f"{self.test_dir}/model.h5")

        builder.convert_to_tflite(
            f"{self.test_dir}/model.h5",
            save_path=f"{self.test_dir}/model.tflite",
            export_batch_size=1,
            raw_dataset=raw_dataset,
            num_dataset_samples=2,
            num_test_steps=1,
            merge_feature_maps=True,
            postprocess_outputs=True,
        )

        qmodel = builder.build_quantized(
            non_quantized_model_weights=f"{self.test_dir}/model.h5")
        optimizer = tf.keras.optimizers.Adam(learning_rate=0.005, beta_2=0.99)
        qmodel.compile(optimizer, **builder.get_model_compile_args())
        qmodel.fit(prepared_train_dataset, epochs=1, steps_per_epoch=5)

        builder.convert_to_tflite(
            qmodel,
            save_path=f"{self.test_dir}/qmodel.tflite",
            export_batch_size=1,
            raw_dataset=raw_dataset,
            num_dataset_samples=2,
            num_test_steps=1,
            merge_feature_maps=True,
            postprocess_outputs=True,
        )
Ejemplo n.º 3
0
def create_backbone_fake_representative_dataset(
    input_shape: Tuple[int, int, int, int] = (1, 64, 64, 3), num_steps: int = 100
) -> tf.data.Dataset:
    raw_dataset = datasets_ops.from_numpy_generator(
        create_fake_detection_dataset_generator(
            num_steps=num_steps, image_shape=input_shape
        )
    )

    def map_fn(data):
        image = data["features"]["image"]
        image = tf.cast(image, dtype=tf.float32) / 255.0
        image.set_shape(input_shape)
        return {"image": image}, {}

    return raw_dataset.map(map_fn)
    def test_debug_quantized_model(self):

        image_dim = 64
        backbone = resnet.ResNetBackbone(
            input_shape=(image_dim, image_dim, 3),
            units_per_block=(1, 1),
            num_last_blocks=2,
        )
        tasks = [standard_tasks.get_objectness_task()]

        builder = FPNBuilder(backbone=backbone, tasks=tasks)
        builder.build()

        image_dim = builder.input_shape[1]
        raw_dataset = datasets_ops.from_numpy_generator(
            utils.create_fake_detection_dataset_generator(num_steps=100))

        train_dataset = datasets_ops.prepare_dataset(
            raw_dataset,
            model_image_size=(image_dim, image_dim),
            augmentation_fn=aug_fn,
            num_epochs=-1,
            batch_size=2,
            shuffle_buffer_size=1,
            prefetch_buffer_size=1,
        )
        prepared_train_dataset = train_dataset.map(
            builder.get_build_training_targets_fn())

        def representative_dataset():
            for features, labels in prepared_train_dataset:
                for image in features["image"]:
                    yield np.expand_dims(image, 0)

        quantized_model = builder.build_quantized()

        outputs_diffs = tflite_debugger.debug_model_quantization(
            representative_dataset(), quantized_model, max_samples=1)
        print(outputs_diffs)
    def test_create_dataset_generator(self):

        dataset = datasets_ops.from_numpy_generator(
            random_rects.create_random_rectangles_dataset_generator())
        print(dataset)