def test_autoensemble_end_to_end(self):

        train_dataset, test_dataset = testing_utils.get_holdout_data(
            train_samples=128,
            test_samples=64,
            input_shape=(10, ),
            num_classes=10,
            random_seed=42)

        # TODO: Consider performing `tf.data.Dataset` transformations
        # within get_holdout_data function.
        train_dataset = train_dataset.batch(32)
        test_dataset = test_dataset.batch(32)

        def build_model(hp):
            model = tf.keras.Sequential()
            model.add(
                tf.keras.layers.Dense(units=hp.Int('units',
                                                   min_value=32,
                                                   max_value=512,
                                                   step=32),
                                      activation='relu'))
            model.add(tf.keras.layers.Dense(10, activation='softmax'))
            model.compile(optimizer=tf.keras.optimizers.Adam(
                hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])),
                          loss='sparse_categorical_crossentropy',
                          metrics=['accuracy'])
            return model

        # This allows us to have a shared storage for all the autoensemble phases
        # that occur in the repeat phase.
        autoensemble_storage = InMemoryStorage()
        input_phase = InputPhase(train_dataset, test_dataset)
        # pylint: disable=g-long-lambda
        repeat_phase = RepeatPhase([
            lambda: KerasTunerPhase(
                tuners.RandomSearch(build_model,
                                    objective='val_accuracy',
                                    max_trials=3,
                                    executions_per_trial=1,
                                    directory=self.test_subdirectory,
                                    project_name='helloworld_' + str(
                                        int(time.time())),
                                    overwrite=True)),
            lambda: AutoEnsemblePhase(ensemblers=[
                MeanEnsembler('sparse_categorical_crossentropy', 'adam',
                              ['accuracy'])
            ],
                                      ensemble_strategies=[GrowStrategy()],
                                      storage=autoensemble_storage)
        ],
                                   repetitions=3)
        # pylint: enable=g-long-lambda

        controller = SequentialController(phases=[input_phase, repeat_phase])

        model_search = ModelSearch(controller)
        model_search.run()
        self.assertIsInstance(
            model_search.get_best_models(num_models=1)[0], MeanEnsemble)
Exemple #2
0
    def test_tuner_end_to_end(self):
        train_dataset, test_dataset = testing_utils.get_test_data(
            train_samples=128,
            test_samples=64,
            input_shape=(10, ),
            num_classes=10,
            random_seed=42)

        # TODO: Consider performing `tf.data.Dataset` transformations
        # within get_test_data function.
        train_dataset = train_dataset.batch(32)
        test_dataset = test_dataset.batch(32)

        def build_model(hp):
            model = tf.keras.Sequential()
            model.add(
                tf.keras.layers.Dense(units=hp.Int('units',
                                                   min_value=32,
                                                   max_value=512,
                                                   step=32),
                                      activation='relu'))
            model.add(tf.keras.layers.Dense(10, activation='softmax'))
            model.compile(optimizer=tf.keras.optimizers.Adam(
                hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])),
                          loss='sparse_categorical_crossentropy',
                          metrics=['accuracy'])
            return model

        # Define phases.
        tuner = tuners.RandomSearch(build_model,
                                    objective='val_accuracy',
                                    max_trials=3,
                                    executions_per_trial=1,
                                    directory=self.test_subdirectory,
                                    project_name='helloworld')

        tuner_phase = KerasTunerPhase(tuner,
                                      train_dataset,
                                      validation_data=test_dataset)

        def build_ensemble():
            ensemble = MeanEnsemble(submodels=tuner_phase.get_best_models(
                num_models=2))
            ensemble.compile(optimizer=tf.keras.optimizers.Adam(0.01),
                             loss='mse',
                             metrics=['mae'])
            return [ensemble]

        ensemble_phase = TrainKerasModelsPhase(build_ensemble,
                                               dataset=train_dataset)

        controller = SequentialController(phases=[tuner_phase, ensemble_phase])

        # Execute phases.
        model_search = ModelSearch(controller)
        model_search.run()
        self.assertIsInstance(
            model_search.get_best_models(num_models=1)[0], MeanEnsemble)
Exemple #3
0
        decay_rate=hp.Choice("decay_rate", [0.5, 0.75, 0.95]),
        staircase=True,
    )

    model.compile(
        loss="sparse_categorical_crossentropy",
        optimizer=tf.keras.optimizers.RMSprop(learning_rate=lr_schedule),
        metrics=["sparse_categorical_accuracy"],
    )
    return model


tuner = tuners.RandomSearch(
    build_model,
    objective="val_sparse_categorical_accuracy",
    max_trials=5,
    executions_per_trial=3,
    directory="test_dir",
)

tuner.search_space_summary()

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
BUFFER_SIZE = 10000
BATCH_SIZE = 64


def scale(image, label):
    image = tf.cast(image, tf.float32)