Пример #1
0
    def test_camelback(self):
        f = synthetic_functions.Camelback()

        for x in f.get_meta_information()["optima"]:
            np.testing.assert_approx_equal(f(x),
                                           f.get_meta_information()["f_opt"],
                                           significant=9)
Пример #2
0
class Camelback2D(AbstractFunction):

    _camelback = hpobench.Camelback()

    ORIGINAL_MIN = _camelback.get_meta_information()["f_opt"]
    ORIGINAL_MIN_ARGUMENT = np.array(_camelback.get_meta_information()["optima"])
    ORIGINAL_MAX_ARGUMENT = np.array([[-3, -2.], [3, 2.]])
    ORIGINAL_MAX = _camelback.objective_function(ORIGINAL_MAX_ARGUMENT[0])["function_value"]
    ORIGINAL_UPPER_BOUNDS = np.array([3., 2.])
    ORIGINAL_LOWER_BOUNDS = np.array([-3., -2.])

    INVERT = True

    @classmethod
    def base_function(cls, x):
        return cls._camelback.objective_function(x)["function_value"]
sys.path.insert(0, path_join(dirname(__file__), "pysgmcmc_development"))
from collections import OrderedDict
from functools import partial
from itertools import product

from pysgmcmc_experiments.experiment_wrapper import to_experiment

import numpy as np
from robo.fmin import (bayesian_optimization, entropy_search, random_search,
                       bohamiann)
from robo.fmin.keras_bohamiann import bohamiann as keras_bohamiann
import hpolib.benchmarks.synthetic_functions as hpobench

BENCHMARKS = OrderedDict(
    (("branin", hpobench.Branin()), ("hartmann3", hpobench.Hartmann3()),
     ("hartmann6", hpobench.Hartmann6()), ("camelback", hpobench.Camelback()),
     ("goldstein_price", hpobench.GoldsteinPrice()), ("rosenbrock",
                                                      hpobench.Rosenbrock()),
     ("sin_one", hpobench.SinOne()), ("sin_two", hpobench.SinTwo()),
     ("bohachevsky", hpobench.Bohachevsky()), ("levy", hpobench.Levy())))

METHODS = OrderedDict((
    ("rf", partial(bayesian_optimization, model_type="rf")),
    ("gp", partial(bayesian_optimization, model_type="gp")),
    ("gp_mcmc", partial(bayesian_optimization, model_type="gp_mcmc")),
    ("entropy_search", entropy_search),
    ("random_search", random_search),
    ("bohamiann", bohamiann),
    ("keras_bohamiann", keras_bohamiann),
))
Пример #4
0
tpeOptimizer = TPEOptimizer()
randomOptimizer = RandomSearchOptimizer()

algorithms = {
    "ATPE": atpeOptimizer,
    "TPE": tpeOptimizer,
    "Random": randomOptimizer
}

# Run Scipy.minimize on artificial testfunctions

h3 = hpobench.Hartmann3()
h6 = hpobench.Hartmann6()
b = hpobench.Branin()
bo = hpobench.Bohachevsky()
cb = hpobench.Camelback()
fo = hpobench.Forrester()
gp = hpobench.GoldsteinPrice()
le = hpobench.Levy()
rb = hpobench.Rosenbrock()

logreg = svm_benchmark.SvmOnMnist()

for f in [logreg]:
    info = f.get_meta_information()

    print("=" * 50)
    print(info['name'])

    space = {"type": "object", "properties": {}}
Пример #5
0
 def __init__(self, path=None):
     super().__init__(synthetic_functions.Camelback(), path)
     # overwrite domain to get a reasonable range of function values
     self._domain = ContinuousDomain(np.array([-2, -1]), np.array([2, 1]))
Пример #6
0
 def make_hpo_fn(self):
     return synthetic_functions.Camelback()