Exemplo n.º 1
0
def tune_meta_learner():
    cs = build_configspace()
    def_value = obj_function(cs.get_default_configuration())
    print("Default Value: %.2f" % (def_value))

    bo = BayesianOptimization(obj_function, cs, max_runs=50, time_limit_per_trial=1200)
    bo.run()
    inc_value = bo.get_incumbent()
    config = inc_value[0][0]

    print('Best hyperparameter config found', config)
    return config
Exemplo n.º 2
0
def tune_meta_learner():
    cs = build_configspace()
    def_value = objective_function(cs.get_default_configuration())
    print("Default Value: %.2f" % (def_value))

    bo = BayesianOptimization(objective_function, cs, max_runs=50, time_limit_per_trial=150)
    bo.run()
    inc_value = bo.get_incumbent()
    config = inc_value[0][0]

    with open(meta_dir + 'meta_learner_%s_%s_%s_config.pkl' % (meta_algo, metric, hash_id), 'wb') as f:
        pk.dump(config, f)
    print('Best hyperparameter config found', config)
    return config
Exemplo n.º 3
0
def evaluate(mode, dataset, run_id, metric):
    print(mode, dataset, run_id, metric)

    metric = get_metric(metric)
    train_data, test_data = load_train_test_data(dataset,
                                                 task_type=MULTICLASS_CLS)

    cs = _classifiers[algo_name].get_hyperparameter_search_space()
    model = UnParametrizedHyperparameter("estimator", algo_name)
    cs.add_hyperparameter(model)
    default_hpo_config = cs.get_default_configuration()

    fe_evaluator = ClassificationEvaluator(default_hpo_config,
                                           scorer=metric,
                                           name='fe',
                                           resampling_strategy='holdout',
                                           seed=1)

    hpo_evaluator = ClassificationEvaluator(default_hpo_config,
                                            scorer=metric,
                                            data_node=train_data,
                                            name='hpo',
                                            resampling_strategy='holdout',
                                            seed=1)

    fe_optimizer = BayesianOptimizationOptimizer(task_type=CLASSIFICATION,
                                                 input_data=train_data,
                                                 evaluator=fe_evaluator,
                                                 model_id=algo_name,
                                                 time_limit_per_trans=600,
                                                 mem_limit_per_trans=5120,
                                                 number_of_unit_resource=10,
                                                 seed=1)

    def objective_function(config):
        if benchmark == 'fe':
            return fe_optimizer.evaluate_function(config)
        else:
            return hpo_evaluator(config)

    if mode == 'bo':
        bo = BO(objective_function,
                config_space,
                max_runs=max_runs,
                surrogate_model='prob_rf')
        bo.run()
        print('BO result')
        print(bo.get_incumbent())
        perf = bo.history_container.incumbent_value
        runs = [bo.configurations, bo.perfs]
    elif mode == 'lite_bo':
        from litebo.facade.bo_facade import BayesianOptimization
        bo = BayesianOptimization(objective_function,
                                  config_space,
                                  max_runs=max_runs)
        bo.run()
        print('BO result')
        print(bo.get_incumbent())
        perf = bo.history_container.incumbent_value
        runs = [bo.configurations, bo.perfs]
    elif mode.startswith('tlbo'):
        _, gp_fusion = mode.split('_')
        meta_feature_vec = metafeature_dict[dataset]
        past_datasets = test_datasets.copy()
        if dataset in past_datasets:
            past_datasets.remove(dataset)
        past_history = load_runhistory(past_datasets)

        gp_models = [
            gp_models_dict[dataset_name] for dataset_name in past_datasets
        ]
        tlbo = TLBO(objective_function,
                    config_space,
                    past_history,
                    gp_models=gp_models,
                    dataset_metafeature=meta_feature_vec,
                    max_runs=max_runs,
                    gp_fusion=gp_fusion)
        tlbo.run()
        print('TLBO result')
        print(tlbo.get_incumbent())
        runs = [tlbo.configurations, tlbo.perfs]
        perf = tlbo.history_container.incumbent_value
    else:
        raise ValueError('Invalid mode.')
    file_saved = '%s_%s_%s_result_%d_%d_%s.pkl' % (mode, algo_name, dataset,
                                                   max_runs, run_id, benchmark)
    with open(data_dir + file_saved, 'wb') as f:
        pk.dump([perf, runs], f)
Exemplo n.º 4
0
def test_branin():
    space_dict = {
        "parameters": {
            "x1": {
                "type": "float",
                "bound": [-5, 10],
                "default": 0
            },
            "x2": {
                "type": "float",
                "bound": [0, 15]
            },
        }
    }

    cs = get_config_space_from_dict(space_dict)
    print(cs)

    bo = BayesianOptimization(branin,
                              cs,
                              max_runs=30,
                              time_limit_per_trial=3,
                              logging_dir='logs')
    bo.run()
    inc_value = bo.get_incumbent()
    print('BO', '=' * 30)
    print(inc_value)

    # Evaluate the random search.
    bo = BayesianOptimization(branin,
                              cs,
                              max_runs=30,
                              time_limit_per_trial=3,
                              sample_strategy='random',
                              logging_dir='logs')
    bo.run()
    inc_value = bo.get_incumbent()
    print('RANDOM', '=' * 30)
    print(inc_value)

    # Evaluate batch BO.
    bo = BatchBayesianOptimization(branin,
                                   cs,
                                   max_runs=10,
                                   batch_size=3,
                                   time_limit_per_trial=3,
                                   sample_strategy='median_imputation',
                                   logging_dir='logs')
    bo.run()
    inc_value = bo.get_incumbent()
    print('MEDIAN IMPUTATION BATCH BO', '=' * 30)
    print(inc_value)

    # Evaluate batch BO.
    bo = BatchBayesianOptimization(branin,
                                   cs,
                                   max_runs=10,
                                   batch_size=3,
                                   time_limit_per_trial=3,
                                   sample_strategy='local_penalization',
                                   logging_dir='logs')
    bo.run()
    inc_value = bo.get_incumbent()
    print('LOCAL PENALIZATION BATCH BO', '=' * 30)
    print(inc_value)
Exemplo n.º 5
0
            "default": 0
        },
        "x2": {
            "type": "float",
            "bound": [0, 15]
        },
    }
}

from litebo.utils.config_space.space_utils import get_config_space_from_dict
cs = get_config_space_from_dict(space_dict)
print(cs)

bo = BayesianOptimization(branin,
                          cs,
                          max_runs=90,
                          time_limit_per_trial=3,
                          logging_dir='logs')
bo.run()
inc_value = bo.get_incumbent()
print('BO', '=' * 30)
print(inc_value)

# Evaluate the random search.
bo = BayesianOptimization(branin,
                          cs,
                          max_runs=90,
                          time_limit_per_trial=3,
                          sample_strategy='random',
                          logging_dir='logs')
bo.run()
Exemplo n.º 6
0
def evaluate(mth, dataset, run_id):
    print(mth, dataset, run_id)
    train_data, test_data = load_train_test_data(dataset,
                                                 test_size=0.3,
                                                 task_type=MULTICLASS_CLS)

    def objective_function(config):
        metric = get_metric('bal_acc')
        _, estimator = get_estimator(config.get_dictionary())
        X_train, y_train = train_data.data
        X_test, y_test = test_data.data
        estimator.fit(X_train, y_train)
        return -metric(estimator, X_test, y_test)

    def tpe_objective_function(config):
        metric = get_metric('bal_acc')
        _, estimator = get_estimator(config)
        X_train, y_train = train_data.data
        X_test, y_test = test_data.data
        estimator.fit(X_train, y_train)
        return -metric(estimator, X_test, y_test)

    config_space = get_configspace()

    if mth == 'gp_bo':
        bo = BO(objective_function, config_space, max_runs=max_runs)
        bo.run()
        print('new BO result')
        print(bo.get_incumbent())
        perf_bo = bo.history_container.incumbent_value
    elif mth == 'rf_bo':
        bo = BO(objective_function,
                config_space,
                surrogate_model='prob_rf',
                max_runs=max_runs)
        bo.run()
        print('new BO result')
        print(bo.get_incumbent())
        perf_bo = bo.history_container.incumbent_value
    elif mth == 'lite_bo':
        from litebo.facade.bo_facade import BayesianOptimization
        bo = BayesianOptimization(objective_function,
                                  config_space,
                                  max_runs=max_runs)
        bo.run()
        print('lite BO result')
        print(bo.get_incumbent())
        perf_bo = bo.history_container.incumbent_value
    elif mth == 'smac':
        from smac.scenario.scenario import Scenario
        from smac.facade.smac_facade import SMAC
        # Scenario object
        scenario = Scenario({
            "run_obj": "quality",
            "runcount-limit": max_runs,
            "cs": config_space,
            "deterministic": "true"
        })
        smac = SMAC(scenario=scenario,
                    rng=np.random.RandomState(42),
                    tae_runner=objective_function)
        incumbent = smac.optimize()
        perf_bo = objective_function(incumbent)
        print('SMAC BO result')
        print(perf_bo)
    elif mth == 'tpe':
        config_space = get_configspace('tpe')
        from hyperopt import tpe, fmin, Trials
        trials = Trials()
        fmin(tpe_objective_function,
             config_space,
             tpe.suggest,
             max_runs,
             trials=trials)
        perfs = [trial['result']['loss'] for trial in trials.trials]
        perf_bo = min(perfs)
    elif mth == 'tpe_bo':
        from mindware.components.transfer_learning.tlbo.tpe_optimizer import TPE_BO
        bo = TPE_BO(objective_function, config_space, max_runs=max_runs)
        bo.run()
        print('lite BO result')
        print(bo.get_incumbent())
        perf_bo = bo.history_container.incumbent_value
    elif mth == 'random_search':
        from mindware.components.transfer_learning.tlbo.tpe_optimizer import TPE_BO
        bo = TPE_BO(objective_function,
                    config_space,
                    surrogate_model=mth,
                    max_runs=max_runs)
        bo.run()
        print('lite BO result')
        print(bo.get_incumbent())
        perf_bo = bo.history_container.incumbent_value
    else:
        raise ValueError('Invalid method.')
    return perf_bo
Exemplo n.º 7
0
    "gamma", ["auto", "value"],
    default_value="auto")  # only rbf, poly, sigmoid
gamma_value = UniformFloatHyperparameter("gamma_value",
                                         0.0001,
                                         8,
                                         default_value=1)
cs.add_hyperparameters([gamma, gamma_value])
# We only activate gamma_value if gamma is set to "value"
cs.add_condition(InCondition(child=gamma_value, parent=gamma,
                             values=["value"]))
# And again we can restrict the use of gamma in general to the choice of the kernel
cs.add_condition(
    InCondition(child=gamma, parent=kernel, values=["rbf", "poly", "sigmoid"]))

# Example call of the function
# It returns: Status, Cost, Runtime, Additional Infos
def_value = svm_from_cfg(cs.get_default_configuration())
print("Default Value: %.2f" % (def_value))

# Optimize, using a SMAC-object
print("Optimizing! Depending on your machine, this might take a few minutes.")
bo = BayesianOptimization(svm_from_cfg,
                          cs,
                          max_runs=30,
                          time_limit_per_trial=30,
                          logging_dir='logs')
bo.run()
inc_value = bo.get_incumbent()

print(inc_value)
def evaluate(mth, dataset, run_id):
    print(mth, dataset, run_id)
    train_data, test_data = load_train_test_data(dataset,
                                                 test_size=0.3,
                                                 task_type=MULTICLASS_CLS)

    cs = _classifiers[algo_name].get_hyperparameter_search_space()
    model = UnParametrizedHyperparameter("estimator", algo_name)
    cs.add_hyperparameter(model)
    default_hpo_config = cs.get_default_configuration()
    metric = get_metric('bal_acc')

    fe_evaluator = ClassificationEvaluator(default_hpo_config,
                                           scorer=metric,
                                           name='fe',
                                           resampling_strategy='holdout',
                                           seed=1)
    fe_optimizer = BayesianOptimizationOptimizer(task_type=MULTICLASS_CLS,
                                                 input_data=train_data,
                                                 evaluator=fe_evaluator,
                                                 model_id=algo_name,
                                                 time_limit_per_trans=600,
                                                 mem_limit_per_trans=5120,
                                                 number_of_unit_resource=10,
                                                 seed=1)
    config_space = fe_optimizer.hyperparameter_space

    def objective_function(config):
        return fe_optimizer.evaluate_function(config)

    if mth == 'gp_bo':
        bo = BO(objective_function, config_space, max_runs=max_runs)
        bo.run()
        print('new BO result')
        print(bo.get_incumbent())
        perf_bo = bo.history_container.incumbent_value
    elif mth == 'lite_bo':
        from litebo.facade.bo_facade import BayesianOptimization
        bo = BayesianOptimization(objective_function,
                                  config_space,
                                  max_runs=max_runs)
        bo.run()
        print('lite BO result')
        print(bo.get_incumbent())
        perf_bo = bo.history_container.incumbent_value
    elif mth == 'smac':
        from smac.scenario.scenario import Scenario
        from smac.facade.smac_facade import SMAC
        # Scenario object
        scenario = Scenario({
            "run_obj": "quality",
            "runcount-limit": max_runs,
            "cs": config_space,
            "deterministic": "true"
        })
        smac = SMAC(scenario=scenario,
                    rng=np.random.RandomState(42),
                    tae_runner=objective_function)
        incumbent = smac.optimize()
        perf_bo = objective_function(incumbent)
        print('SMAC BO result')
        print(perf_bo)
    else:
        raise ValueError('Invalid method.')
    return perf_bo