Пример #1
0
    def test_full_problem(self):
        from deephyper.nas.preprocessing import minmaxstdscaler
        from deephyper.problem import NaProblem

        pb = NaProblem()

        def load_data(prop):
            return ([[10]], [1]), ([10], [1])

        pb.load_data(load_data, prop=1.0)

        pb.preprocessing(minmaxstdscaler)

        pb.search_space(OneLayerSpace)

        pb.hyperparameters(
            batch_size=64,
            learning_rate=0.001,
            optimizer="adam",
            num_epochs=10,
            loss_metric="mse",
        )

        with pytest.raises(NaProblemError):
            pb.objective("r2")

        pb.loss("mse")
        pb.metrics(["r2"])

        possible_objective = ["loss", "val_loss", "r2", "val_r2"]
        for obj in possible_objective:
            pb.objective(obj)
Пример #2
0
from deephyper.problem import NaProblem
from molnet.molnet.load_data import load_data
from molnet.molnet.search_space import create_search_space
from deephyper.search.nas.model.preprocessing import minmaxstdscaler

Problem = NaProblem(seed=2019)

Problem.load_data(load_data)

Problem.preprocessing(minmaxstdscaler)

Problem.search_space(create_search_space, num_layers=3)

Problem.hyperparameters(
    batch_size=32,
    learning_rate=0.01,
    optimizer='adam',
    num_epochs=20,
    callbacks=dict(EarlyStopping=dict(
        monitor='val_r2',  # or 'val_acc' ?
        mode='max',
        verbose=0,
        patience=5)))

Problem.loss('mse')  # or 'categorical_crossentropy' ?

Problem.metrics(['r2'])  # or 'acc' ?

Problem.objective('val_r2__last')  # or 'val_acc__last' ?

# Just to print your problem, to test its definition and imports in the current python environment.
from nas_problems.polynome2.load_data import load_data
from nas_problems.polynome2.search_space import create_search_space
from deephyper.problem import NaProblem

Problem = NaProblem()

Problem.load_data(load_data, size=1000)

Problem.search_space(create_search_space)

Problem.hyperparameters(batch_size=128,
                        learning_rate=0.001,
                        optimizer="rmsprop",
                        num_epochs=5)

Problem.loss("mse")

Problem.metrics(["r2"])

Problem.objective("val_r2__last")

Problem.post_training(
    num_epochs=60,
    metrics=["r2"],
    model_checkpoint={
        "monitor": "val_r2",
        "mode": "max",
        "save_best_only": True,
        "verbose": 1,
    },
    early_stopping={
Пример #4
0
from deephyper.problem import NaProblem
from nas_big_data.combo.load_data import load_data_cache
from nas_big_data.combo.search_space_shared import create_search_space

Problem = NaProblem(seed=2019)

Problem.load_data(load_data_cache)

Problem.search_space(create_search_space, num_layers=5)

# schedules: https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules

Problem.hyperparameters(
    lsr_batch_size=True,
    lsr_learning_rate=True,
    batch_size=Problem.add_hyperparameter((16, 2048, "log-uniform"),
                                          "batch_size"),
    learning_rate=Problem.add_hyperparameter(
        (1e-4, 0.01, "log-uniform"),
        "learning_rate",
    ),
    optimizer=Problem.add_hyperparameter(
        ["sgd", "rmsprop", "adagrad", "adam", "adadelta", "adamax", "nadam"],
        "optimizer"),
    patience_ReduceLROnPlateau=Problem.add_hyperparameter(
        (3, 30), "patience_ReduceLROnPlateau"),
    patience_EarlyStopping=Problem.add_hyperparameter(
        (3, 30), "patience_EarlyStopping"),
    num_epochs=100,
    verbose=0,
    callbacks=dict(