Example #1
0
    def test_rosenbrock(self):
        f = synthetic_functions.Rosenbrock()

        for x in f.get_meta_information()["optima"]:
            np.testing.assert_approx_equal(f(x),
                                           f.get_meta_information()["f_opt"],
                                           significant=9)
Example #2
0
class Rosenbrock2D(AbstractFunction):

    _rosenbrock = hpobench.Rosenbrock()

    ORIGINAL_MAX = 1102581
    ORIGINAL_MIN = _rosenbrock.get_meta_information()["f_opt"]
    ORIGINAL_MIN_ARGUMENT = np.array(
        _rosenbrock.get_meta_information()["optima"])
    ORIGINAL_MAX_ARGUMENT = np.array([[10., -5.]])
    ORIGINAL_UPPER_BOUNDS = np.array([10., 10.])
    ORIGINAL_LOWER_BOUNDS = np.array([-5., -5.])

    INVERT = True

    @classmethod
    def base_function(cls, x):
        return cls._rosenbrock.objective_function(x)["function_value"]
from functools import partial
from itertools import product

from pysgmcmc_experiments.experiment_wrapper import to_experiment

import numpy as np
from robo.fmin import (bayesian_optimization, entropy_search, random_search,
                       bohamiann)
from robo.fmin.keras_bohamiann import bohamiann as keras_bohamiann
import hpolib.benchmarks.synthetic_functions as hpobench

BENCHMARKS = OrderedDict(
    (("branin", hpobench.Branin()), ("hartmann3", hpobench.Hartmann3()),
     ("hartmann6", hpobench.Hartmann6()), ("camelback", hpobench.Camelback()),
     ("goldstein_price", hpobench.GoldsteinPrice()), ("rosenbrock",
                                                      hpobench.Rosenbrock()),
     ("sin_one", hpobench.SinOne()), ("sin_two", hpobench.SinTwo()),
     ("bohachevsky", hpobench.Bohachevsky()), ("levy", hpobench.Levy())))

METHODS = OrderedDict((
    ("rf", partial(bayesian_optimization, model_type="rf")),
    ("gp", partial(bayesian_optimization, model_type="gp")),
    ("gp_mcmc", partial(bayesian_optimization, model_type="gp_mcmc")),
    ("entropy_search", entropy_search),
    ("random_search", random_search),
    ("bohamiann", bohamiann),
    ("keras_bohamiann", keras_bohamiann),
))

CONFIGURATIONS = tuple(({
    "benchmark": benchmark,
Example #4
0
    "ATPE": atpeOptimizer,
    "TPE": tpeOptimizer,
    "Random": randomOptimizer
}

# Run Scipy.minimize on artificial testfunctions

h3 = hpobench.Hartmann3()
h6 = hpobench.Hartmann6()
b = hpobench.Branin()
bo = hpobench.Bohachevsky()
cb = hpobench.Camelback()
fo = hpobench.Forrester()
gp = hpobench.GoldsteinPrice()
le = hpobench.Levy()
rb = hpobench.Rosenbrock()

logreg = svm_benchmark.SvmOnMnist()

for f in [logreg]:
    info = f.get_meta_information()

    print("=" * 50)
    print(info['name'])

    space = {"type": "object", "properties": {}}

    for boundIndex, bound in enumerate(info['bounds']):
        space['properties'][str(boundIndex)] = {
            "type": "number",
            "scaling": "linear",
 def __init__(self, path=None):
     super().__init__(synthetic_functions.Rosenbrock(), path)