Example #1
0
def _get_gp_est(space, **kwargs):
    from skopt.utils import Space
    from skopt.utils import normalize_dimensions
    from skopt.utils import ConstantKernel, HammingKernel, Matern
    from skopt.learning import GaussianProcessRegressor

    # Set space
    space = Space(space)
    space = Space(normalize_dimensions(space.dimensions))
    n_dims = space.transformed_n_dims

    cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))
    # If all dimensions are categorical, use Hamming kernel
    if space.is_categorical:
        other_kernel = HammingKernel(length_scale=np.ones(n_dims))
    else:
        other_kernel = Matern(length_scale=np.ones(n_dims),
                              length_scale_bounds=[(0.01, 100)] * n_dims,
                              nu=2.5)

    base_estimator = GaussianProcessRegressor(kernel=cov_amplitude *
                                              other_kernel,
                                              normalize_y=True,
                                              noise="gaussian",
                                              n_restarts_optimizer=2)

    base_estimator.set_params(**kwargs)
    return base_estimator
Example #2
0
    def bay_opt(self):
        # Initialize sample
        for i in range(self.niter):
            m52_1 = ConstantKernel(1) * RBF(np.array([100] * 12))
            self.gpr_obj = GaussianProcessRegressor(kernel=m52_1,
                                                    alpha=10,
                                                    noise="gaussian")

            m52_2 = ConstantKernel(1) * RBF(np.array([100] * 12))
            self.gpr_constraint = GaussianProcessRegressor(kernel=m52_2,
                                                           alpha=10,
                                                           noise="gaussian")

            # Update Gaussian process with existing samples
            #print(self.x.shape,self.y_obj.shape )
            self.gpr_obj.fit(self.x, self.y_obj)
            #print(self.gpr_obj.predict(self.x))
            self.gpr_constraint.fit(self.x, self.y_constraint)

            # Obtain next sampling point from the acquisition function (expected_improvement)
            X_next = self.propose_location()

            # Obtain next noisy sample from the objective function
            Y_next1 = np.array([self.obj_func(X_next)]).reshape(-1, 1)
            Y_next2 = np.array([self.constraint_func(X_next)]).reshape(-1, 1)
            #print(Y_next1, Y_next1.shape, Y_next2,Y_next2.shape)
            # Add sample to previous samples
            self.x = np.vstack((self.x, X_next))
            self.y_obj = np.vstack((self.y_obj, Y_next1))
            self.y_constraint = np.vstack((self.y_constraint, Y_next2))
        idx = np.where(self.y_constraint > 0)[0]
        t = idx[np.argmin(self.y_obj[idx])]
        self.f_best = self.y_obj[t]
        self.min_x = self.x[t]
        return self.f_best, self.min_x
Example #3
0
def cook_estimator(base_estimator, space=None, **kwargs):
    """Cook a default estimator.
    For the special base_estimator called "DUMMY" the return value is None.
    This corresponds to sampling points at random, hence there is no need
    for an estimator.
    Parameters
    ----------
    base_estimator : "GP", "RF", "ET", "GBRT", "DUMMY" or sklearn regressor
        Should inherit from `sklearn.base.RegressorMixin`.
        In addition the `predict` method should have an optional `return_std`
        argument, which returns `std(Y | x)`` along with `E[Y | x]`.
        If base_estimator is one of ["GP", "RF", "ET", "GBRT", "DUMMY"], a
        surrogate model corresponding to the relevant `X_minimize` function
        is created.
    space : Space instance
        Has to be provided if the base_estimator is a gaussian process.
        Ignored otherwise.
    kwargs : dict
        Extra parameters provided to the base_estimator at init time.
    """
    if isinstance(base_estimator, str):
        base_estimator = base_estimator.upper()
        if base_estimator not in ["GP", "ET", "RF", "GBRT", "DUMMY"]:
            raise ValueError("Valid strings for the base_estimator parameter "
                             " are: 'RF', 'ET', 'GP', 'GBRT' or 'DUMMY' not "
                             "%s." % base_estimator)
    elif not is_regressor(base_estimator):
        raise ValueError("base_estimator has to be a regressor.")

    if base_estimator == "GP":
        if space is not None:
            space = Space(space)
            space = Space(normalize_dimensions(space.dimensions))
            n_dims = space.transformed_n_dims
            is_cat = space.is_categorical

        else:
            raise ValueError("Expected a Space instance, not None.")

        cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))
        # only special if *all* dimensions are categorical
        if is_cat:
            other_kernel = HammingKernel(length_scale=np.ones(n_dims))
        else:
            other_kernel = Matern(length_scale=np.ones(n_dims),
                                  length_scale_bounds=[(0.01, 100)] * n_dims,
                                  nu=2.5)

        base_estimator = GaussianProcessRegressor(kernel=cov_amplitude *
                                                  other_kernel,
                                                  normalize_y=True,
                                                  noise="gaussian",
                                                  n_restarts_optimizer=2)

    if ('n_jobs' in kwargs.keys()) and not hasattr(base_estimator, 'n_jobs'):
        del kwargs['n_jobs']

    base_estimator.set_params(**kwargs)
    return base_estimator
Example #4
0
def test_gpr_uses_noise():
    """ Test that gpr is using WhiteKernel by default"""
    X = np.random.normal(size=[100, 2])
    Y = np.random.normal(size=[100])

    g_gaussian = GaussianProcessRegressor()
    g_gaussian.fit(X, Y)
    m, sigma = g_gaussian.predict(X[0:1], return_cov=True)

    assert sigma > 0
Example #5
0
def test_gpr_uses_noise():
    """ Test that gpr is using WhiteKernel"""

    X = np.random.normal(size=[100, 2])
    Y = np.random.normal(size=[100])

    g_gaussian = GaussianProcessRegressor(noise='gaussian')
    g_gaussian.fit(X, Y)
    m, sigma = g_gaussian.predict(X[0:1], return_cov=True)
    assert sigma > 0
Example #6
0
def test_acquisition_api():
    rng = np.random.RandomState(0)
    X = rng.randn(10, 2)
    y = rng.randn(10)
    gpr = GaussianProcessRegressor()
    gpr.fit(X, y)

    for method in [gaussian_ei, gaussian_lcb, gaussian_pi]:
        assert_array_equal(method(X, gpr).shape, 10)
        assert_raises(ValueError, method, rng.rand(10), gpr)
def test_acquisition_api():
    rng = np.random.RandomState(0)
    X = rng.randn(10, 2)
    y = rng.randn(10)
    gpr = GaussianProcessRegressor()
    gpr.fit(X, y)

    for method in [gaussian_ei, gaussian_lcb, gaussian_pi]:
        assert_array_equal(method(X, gpr).shape, 10)
        assert_raises(ValueError, method, rng.rand(10), gpr)
Example #8
0
File: bo.py Project: mborisyak/abo
def gpbo_cycle(ndim,
               space,
               target_f,
               n_iters=10,
               acq_function=ei,
               model=None,
               n_multi_start=100,
               show_progress=True):
    xrange = (lambda title, n: tqdm_notebook(range(n), postfix=title)
              ) if show_progress else (lambda title, n: range(n))

    space = np.array(space)

    if model is None:
        kernel = WhiteKernel(0.001, noise_level_bounds=[1.0e-5, 1.0e-3]) + \
                 Matern(1.0, nu=1.5, length_scale_bounds=[1.0e-3, 1.0e+3])

        model = GaussianProcessRegressor(kernel=kernel,
                                         normalize_y=False,
                                         noise=None,
                                         n_restarts_optimizer=2)

    known_points = []
    known_values = []
    cost = []

    for i in xrange('BO iteration', n_iters):
        acq = acq_function(model, known_points, known_values)

        candidates = []
        for _ in xrange('acquisition', n_multi_start):
            x0 = np.random.uniform(size=(ndim, ))

            x, f, _ = fmin_l_bfgs_b(maxiter=1000,
                                    func=acq,
                                    x0=x0,
                                    approx_grad=False,
                                    bounds=[(0, 1)] * ndim)

            candidates.append((x, f))

        best = np.argmin([f for x, f in candidates])
        suggestion, _ = candidates[best]
        suggestion = reverse_transform(suggestion.reshape(1, -1), space)[0, :]

        point_cost, observed = target_f(suggestion)

        known_points.append(suggestion)
        known_values.append(observed)
        cost.append(point_cost)

        model.fit(transform(np.array(known_points), space),
                  np.array(known_values))

        yield model, acq, space, known_points, known_values, cost
Example #9
0
def test_acquisition_gradient():
    rng = np.random.RandomState(0)
    X = rng.randn(20, 5)
    y = rng.randn(20)
    X_new = rng.randn(5)
    mat = Matern()
    wk = WhiteKernel()
    gpr = GaussianProcessRegressor(kernel=mat + wk)
    gpr.fit(X, y)

    for acq_func in ["LCB", "PI", "EI"]:
        check_gradient_correctness(X_new, gpr, acq_func, np.max(y))
Example #10
0
def test_noise_equals_gaussian():
    gpr1 = GaussianProcessRegressor(rbf + wk).fit(X, y)

    # gpr2 sets the noise component to zero at predict time.
    gpr2 = GaussianProcessRegressor(rbf, noise="gaussian").fit(X, y)
    assert_false(gpr1.noise_)
    assert_true(gpr2.noise_)
    assert_almost_equal(gpr1.kernel_.k2.noise_level, gpr2.noise_, 4)
    mean1, std1 = gpr1.predict(X, return_std=True)
    mean2, std2 = gpr2.predict(X, return_std=True)
    assert_array_almost_equal(mean1, mean2, 4)
    assert_false(np.any(std1 == std2))
def test_acquisition_gradient():
    rng = np.random.RandomState(0)
    X = rng.randn(20, 5)
    y = rng.randn(20)
    X_new = rng.randn(5)
    mat = Matern()
    wk = WhiteKernel()
    gpr = GaussianProcessRegressor(kernel=mat + wk)
    gpr.fit(X, y)

    for acq_func in ["LCB", "PI", "EI"]:
        check_gradient_correctness(X_new, gpr, acq_func, np.max(y))
Example #12
0
    def optimize(self,
                 num_vars,
                 objective_function,
                 gradient_function=None,
                 variable_bounds=None,
                 initial_point=None):

        callbacks = []

        def alt_obj_fn(pars):
            fn = objective_function(pars)
            callbacks.append({'point': pars, 'fn': fn})
            return fn

        result = super().optimize(num_vars, alt_obj_fn, gradient_function,
                                  variable_bounds, initial_point)

        if self._num_restarts is not None:
            for i in range(self._num_restarts - 1):
                if variable_bounds is not None:
                    init_pt = [
                        np.random.uniform(dn, up) for dn, up in variable_bounds
                    ]
                else:
                    init_pt = [
                        np.random.uniform(-np.pi, +np.pi)
                        for _ in range(num_vars)
                    ]
                result_new = super().optimize(num_vars, alt_obj_fn,
                                              gradient_function,
                                              variable_bounds, init_pt)
                if result_new[1] < result[1]:
                    result = result_new

        X = [step['point'] for step in callbacks]
        y = [step['fn'] for step in callbacks]
        if self._make_model and (len(callbacks) < self._max_model_points):
            model = GaussianProcessRegressor()
            model.fit(X, y)
        else:
            model = None

        if variable_bounds is not None:
            space = Space([Real(low, high) for low, high in variable_bounds])
        else:
            space = None

        self.optimization_result = create_result(
            X, y, space=space, models=[model] if model is not None else None)

        return result
Example #13
0
def test_mean_gradient():
    length_scale = np.arange(1, 6)
    X = rng.randn(10, 5)
    y = rng.randn(10)
    X_new = rng.randn(5)

    rbf = RBF(length_scale=length_scale, length_scale_bounds="fixed")
    gpr = GaussianProcessRegressor(rbf, random_state=0).fit(X, y)

    mean, std, mean_grad = gpr.predict(
        np.expand_dims(X_new, axis=0),
        return_std=True, return_cov=False, return_mean_grad=True)
    num_grad = optimize.approx_fprime(
        X_new, lambda x: predict_wrapper(x, gpr)[0], 1e-4)
    assert_array_almost_equal(mean_grad, num_grad, decimal=3)
Example #14
0
 def __init__(self, seq_len, embedder, Xinit, yinit, noise_std=None):
     self.seq_len = seq_len
     self.noise_std = noise_std
     self.current_best_seq = None
     self.current_best_val = np.inf
     self.X_sample = []
     self.y_sample = []
     self.nqueries = 0
     m52 = ConstantKernel(1.0) * StringEmbedKernel(seq_len=seq_len,
                                                   embedder=embedder)
     if noise_std is None:
         noise_std = np.std(yinit)
     gpr = GaussianProcessRegressor(kernel=m52, alpha=noise_std**2)
     gpr.fit(Xinit, yinit)
     self.gpr = gpr
Example #15
0
def test_mean_gradient():
    length_scale = np.arange(1, 6)
    X = rng.randn(10, 5)
    y = rng.randn(10)
    X_new = rng.randn(5)

    rbf = RBF(length_scale=length_scale, length_scale_bounds="fixed")
    gpr = GaussianProcessRegressor(rbf, random_state=0).fit(X, y)

    mean, std, mean_grad = gpr.predict(
        np.expand_dims(X_new, axis=0),
        return_std=True, return_cov=False, return_mean_grad=True)
    num_grad = optimize.approx_fprime(
        X_new, lambda x: predict_wrapper(x, gpr)[0], 1e-4)
    assert_array_almost_equal(mean_grad, num_grad, decimal=3)
Example #16
0
def mediator_triage(mediator_id, base_params_dict):
    # Mediator id 1 is the optimal
    if mediator_id == 1:
        return OptimalMediator(**base_params_dict)
    elif 2 <= mediator_id <= 4:
        # Poly Mediator Individual. Need to add an additional parameter for the degree of the polynomial.
        poly_mediator_params = base_params_dict.copy()
        poly_mediator_params['degree'] = mediator_id
        return PolyMediatorIndividual(**poly_mediator_params)
    elif mediator_id == 5:
        # Default Bayes Mediator. Does not take in any additional parameters.
        return BayesMediatorSocial(**base_params_dict)
    elif 6 <= mediator_id <= 8:
        # Polynomial Bayes Mediator, Social.
        base_mediator_params = base_params_dict.copy()
        base_mediator_params['base_estimator'] = \
            GaussianProcessRegressor(
                kernel=Exponentiation(Sum(Product(ConstantKernel(), DotProduct()), ConstantKernel(1.0, (0.01, 1000.0))), float(mediator_id) - 4.0),
                normalize_y=True,
                noise="gaussian",
                n_restarts_optimizer=2)
        return BayesMediatorSocial(**base_mediator_params)
    elif mediator_id == 9:
        # GP Mediator Individual with default kernel, which is the same as that used by BayesMediator (Mattern kernel).
        return GPMediatorIndividual(**base_params_dict)
    elif mediator_id == 10:
        # GP Mediator Social with default kernel, which is the same as that used by BayesMediator (Mattern kernel).
        return GPMediatorSocial(**base_params_dict)
    elif 11 <= mediator_id <= 13:
        poly_mediator_params = base_params_dict.copy()
        poly_mediator_params['degree'] = mediator_id - 9
        return PolyMediatorSocial(**poly_mediator_params)
    else:
        raise Exception('Unknown mediator with id = ', mediator_id)
Example #17
0
    def __init__(self, space, **kwargs):
        super(BayesianOptimizer, self).__init__(space)

        self.optimizer = Optimizer(
            base_estimator=GaussianProcessRegressor(**kwargs),
            dimensions=convert_orion_space_to_skopt_space(space))

        self.strategy = "cl_min"
Example #18
0
def choose_optimizer(optimizer):
    """
    Choose a surrogate model for Bayesian Optimization

    :param optimizer: list of setting of the BO experiment
    :type optimizer: Optimizer
    :return: surrogate model
    :rtype: scikit object
    """
    params_space_list = dimensions_aslist(optimizer.search_space)
    estimator = None
    # Choice of the surrogate model
    # Random forest
    if optimizer.surrogate_model == "RF":
        estimator = RandomForestRegressor(n_estimators=100,
                                          min_samples_leaf=3,
                                          random_state=optimizer.random_state)
    # Extra Tree
    elif optimizer.surrogate_model == "ET":
        estimator = ExtraTreesRegressor(n_estimators=100,
                                        min_samples_leaf=3,
                                        random_state=optimizer.random_state)
    # GP Minimize
    elif optimizer.surrogate_model == "GP":
        estimator = GaussianProcessRegressor(
            kernel=optimizer.kernel, random_state=optimizer.random_state)
        # Random Search
    elif optimizer.surrogate_model == "RS":
        estimator = "dummy"

    if estimator == "dummy":
        opt = skopt_optimizer(
            params_space_list,
            base_estimator=estimator,
            acq_func=optimizer.acq_func,
            acq_optimizer='sampling',
            initial_point_generator=optimizer.initial_point_generator,
            random_state=optimizer.random_state)
    else:
        opt = skopt_optimizer(
            params_space_list,
            base_estimator=estimator,
            acq_func=optimizer.acq_func,
            acq_optimizer='sampling',
            n_initial_points=optimizer.n_random_starts,
            initial_point_generator=optimizer.initial_point_generator,
            # work only for version skopt 8.0!!!
            acq_optimizer_kwargs={
                "n_points": 10000,
                "n_restarts_optimizer": 5,
                "n_jobs": 1
            },
            acq_func_kwargs={
                "xi": 0.01,
                "kappa": 1.96
            },
            random_state=optimizer.random_state)
    return opt
Example #19
0
def test_white_kernel_as_noise():
    # first .fit()
    gpr1 = GaussianProcessRegressor(rbf + wk).fit(X, y)
    gpr2 = GaussianProcessRegressor(rbf, noise="gaussian").fit(X, y)
    mean1, std1 = gpr1.predict(X, return_std=True)
    mean2, std2 = gpr2.predict(X, return_std=True)
    assert_almost_equal(gpr1.kernel_.k2.noise_level, gpr2.noise_, 4)
    assert not np.any(std1 == std2)
    assert _param_for_white_kernel_in_Sum(gpr1.kernel_)[1] == 'k2'
    assert _param_for_white_kernel_in_Sum(gpr2.kernel_)[1] == 'k2'
    # second .fit()
    gpr1 = gpr1.fit(X, y)
    gpr2 = gpr2.fit(X, y)
    mean1, std1 = gpr1.predict(X, return_std=True)
    mean2, std2 = gpr2.predict(X, return_std=True)
    assert_almost_equal(gpr1.kernel_.k2.noise_level, gpr2.noise_, 4)
    assert _param_for_white_kernel_in_Sum(gpr1.kernel_)[1] == 'k2'
    assert _param_for_white_kernel_in_Sum(gpr2.kernel_)[1] == 'k2'
    assert not np.any(std1 == std2)
Example #20
0
def interpolate(thetas,
                z_thetas,
                xx,
                yy,
                method='linear',
                z_uncertainties_thetas=None,
                matern_exponent=0.5,
                length_scale_min=0.001,
                length_scale_default=1.,
                length_scale_max=1000.,
                noise_level=0.001,
                subtract_min=False):
    if method == 'cubic':

        interpolator = CloughTocher2DInterpolator(thetas[:], z_thetas)

        zz = interpolator(np.dstack((xx.flatten(), yy.flatten())))
        zi = zz.reshape(xx.shape)

    elif method == 'gp':

        if z_uncertainties_thetas is not None:
            gp = GaussianProcessRegressor(
                normalize_y=True,
                kernel=ConstantKernel(1.0, (1.e-9, 1.e9)) * Matern(
                    length_scale=[length_scale_default],
                    length_scale_bounds=[(length_scale_min, length_scale_max)],
                    nu=matern_exponent) + WhiteKernel(noise_level),
                n_restarts_optimizer=10,
                alpha=z_uncertainties_thetas)
        else:
            gp = GaussianProcessRegressor(
                normalize_y=True,
                kernel=ConstantKernel(1.0, (1.e-9, 1.e9)) * Matern(
                    length_scale=length_scale_default,
                    length_scale_bounds=(length_scale_min, length_scale_max),
                    nu=matern_exponent) + WhiteKernel(noise_level),
                n_restarts_optimizer=10)

        gp.fit(thetas[:], z_thetas[:])

        zz, _ = gp.predict(np.c_[xx.ravel(), yy.ravel()], return_std=True)
        zi = zz.reshape(xx.shape)

    elif method == 'linear':
        interpolator = LinearNDInterpolator(thetas[:], z_thetas)
        zz = interpolator(np.dstack((xx.flatten(), yy.flatten())))
        zi = zz.reshape(xx.shape)

    else:
        raise ValueError

    mle = np.unravel_index(zi.argmin(), zi.shape)

    if subtract_min:
        zi -= zi[mle]

    return zi, mle
Example #21
0
 def _init_optimizer(self):
     if self.optimizer is None:
         self.optimizer = Optimizer(
             base_estimator=GaussianProcessRegressor(
                 alpha=self.alpha,
                 n_restarts_optimizer=self.n_restarts_optimizer,
                 normalize_y=self.normalize_y),
             dimensions=convert_orion_space_to_skopt_space(self.space),
             n_initial_points=self.n_initial_points,
             acq_func=self.acq_func)
Example #22
0
def test_gpr_handles_similar_points():
    """
    This tests whether our implementation of GPR
    does not crash when the covariance matrix whose
    inverse is calculated during fitting of the
    regressor is singular.
    Singular covariance matrix often indicates
    that same or very close points are explored
    during the optimization procedure.

    Essentially checks that the default value of `alpha` is non zero.
    """
    X = np.random.rand(8, 3)
    y = np.random.rand(8)

    X[:3, :] = 0.0
    y[:3] = 1.0

    model = GaussianProcessRegressor()
    # this fails if singular matrix is not handled
    model.fit(X, y)
Example #23
0
def test_gpr_handles_similar_points():
    """
    This tests whether our implementation of GPR
    does not crash when the covariance matrix whose
    inverse is calculated during fitting of the
    regressor is singular.
    Singular covariance matrix often indicates
    that same or very close points are explored
    during the optimization procedure.

    Essentially checks that the default value of `alpha` is non zero.
    """
    X = np.random.rand(8, 3)
    y = np.random.rand(8)

    X[:3, :] = 0.0
    y[:3] = 1.0

    model = GaussianProcessRegressor()
    # this fails if singular matrix is not handled
    model.fit(X, y)
Example #24
0
def load_state(path):
    if os.path.exists(path):
        dic = pickle.load(open(path, 'rb'))
        optimizer, unfinished, finished, reported = dic['optimizer'], dic['unfinished'], \
                                                    dic['finished'], dic['reported']
    else:
        optimizer = Optimizer(base_estimator=GaussianProcessRegressor(),
                              dimensions=[Integer(5, 20) for i in range(8)],
                              acq_optimizer='sampling')
        unfinished = []
        finished = {}
        reported = {}
    return optimizer, unfinished, finished, reported
Example #25
0
def hpscan():
    run_for = 20
    start = time.time()
    space = [
        #Integer(25, 25), #name ='epochs'),
        #Integer(5, 8), #name ='batch_size'),
        #Integer(8, 10), #name='latent size'),
        Real(1, 8),  #name='gen_weight'),
        Real(0.1, 10),  #name='aux_weight'),
        Real(0.1, 10),  #name='ecal_weight'),
        #Real(10**-5, 10**0, "log-uniform"), #name ='lr'),
        #Real(8, 9), #name='rho'),
        #Real(0, 0.0001), #name='decay'),
        #Categorical([True,False]), #name='dflag'),
        #Integer(4, 64), #name='df'),
        #Integer(2, 16), #name='dx'),
        #Integer(2, 16), #name='dy'),
        #Integer(2, 16), #name='dz'),
        #Real(0.01, 0.5), #name='dp'),
        #Categorical([True,False]), #name='gflag'),
        #Integer(4, 64), #name='gf'),
        #Integer(2, 16), #name='gx'),
        #Integer(2, 16), #name='gy'),
        #Integer(2, 16)] #name='gz')
    ]
    externalize = externalfunc(prog=evaluate_threaded, names=space)
    use_func = externalize

    o = Optimizer(
        n_initial_points=5,
        acq_func='gp_hedge',
        acq_optimizer='auto',
        base_estimator=GaussianProcessRegressor(
            alpha=0.0,
            copy_X_train=True,
            #kernel=1**2 * Matern(length_scale=[1, 1], nu=2.5),
            n_restarts_optimizer=2,
            noise='gaussian',
            normalize_y=True,
            optimizer='fmin_l_bfgs_b'),
        dimensions=space,
    )

    m = manager(n=4, skobj=o, iterations=run_for, func=use_func, wait=0)
    start = time.time()
    m.run()
    print(
        "Best parameters:\nLoss Weights:\n_ Weight Gen loss ={}\n_ Weight Aux loss ={}\n_ Weight Ecal loss ={}"
        .format(res_gp.x[0], res_gp.x[1], res_gp.x[2]))
    print("Time taken {} seconds".format(time.time() - start))
    plot_convergence(res_gp).savefig("result_hyp.pdf")
Example #26
0
    def __init__(self, dimensions_file: str, min_num_results_to_fit: int=8, lease_timout='2 days'):
        self.__all_experiments = pd.DataFrame()
        self.__all_experiments['status'] = [self.WAITING] * len(self.__all_experiments)
        self.__all_experiments['last_update'] = pd.Series(pd.Timestamp(float('NaN')))
        self.__all_experiments['client'] = [""] * len(self.__all_experiments)

        self.__lease_duration = pd.to_timedelta(lease_timout)
        self.__leased_experiments = []

        dims = self.__load_dimensions(dimensions_file)
        self.__dimension_names = list(dims.keys())
        self.__dimensions = list(dims.values())
        self.__min_num_results_to_fit = min_num_results_to_fit

        # Initialize

        dim_types = [check_dimension(d) for d in self.__dimensions]
        is_cat = all([isinstance(check_dimension(d), Categorical) for d in dim_types])
        if is_cat:
            transformed_dims = [check_dimension(d, transform="identity") for d in self.__dimensions]
        else:
            transformed_dims = []
            for dim_type, dim in zip(dim_types, self.__dimensions):
                if isinstance(dim_type, Categorical):
                    transformed_dims.append(check_dimension(dim, transform="onehot"))
                # To make sure that GP operates in the [0, 1] space
                else:
                    transformed_dims.append(check_dimension(dim, transform="normalize"))

        space = Space(transformed_dims)
        # Default GP
        cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))

        if is_cat:
            other_kernel = HammingKernel(length_scale=np.ones(space.transformed_n_dims))
            acq_optimizer = "lbfgs"
        else:
            other_kernel = Matern(
                length_scale=np.ones(space.transformed_n_dims),
                length_scale_bounds=[(0.01, 100)] * space.transformed_n_dims,
                nu=2.5)

        base_estimator = GaussianProcessRegressor(
            kernel=cov_amplitude * other_kernel,
            normalize_y=True, random_state=None, alpha=0.0, noise='gaussian',
            n_restarts_optimizer=2)

        self.__opt = Optimizer(self.__dimensions, base_estimator, acq_optimizer="lbfgs",
                               n_random_starts=100, acq_optimizer_kwargs=dict(n_points=10000))
Example #27
0
    def _initialize(self):
        """Initialize the optimizer once the space is transformed"""
        self.optimizer = Optimizer(
            base_estimator=GaussianProcessRegressor(
                alpha=self.alpha,
                n_restarts_optimizer=self.n_restarts_optimizer,
                noise=self.noise,
                normalize_y=self.normalize_y,
            ),
            dimensions=orion_space_to_skopt_space(self.space),
            n_initial_points=self.n_initial_points,
            acq_func=self.acq_func,
        )

        self.seed_rng(self.seed)
    def _get_gp_regressor(length_scale=1., nu=2.5, noise=0.1):
        """Creates the GaussianProcessRegressor model

        Args:
            length_scale (Union[float, list]): Length scale of the GP kernel. If float, it is the
                same for all dimensions, if array each element defines the length scale of the
                dimension
            nu (float): Controls the smoothness of the approximation.
                see https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.kernels.Matern.html

        Returns:
            A skopt.learning.GaussianProcessRegressor with the given parameters

        """
        kernel = ConstantKernel(1.0) * Matern(length_scale=length_scale, nu=nu)
        return GaussianProcessRegressor(kernel=kernel, alpha=noise ** 2)
Example #29
0
    def optimizer(self):
        """Return skopt's optimizer."""
        if self._optimizer is None:
            self._optimizer = Optimizer(
                base_estimator=GaussianProcessRegressor(
                    alpha=self.alpha,
                    n_restarts_optimizer=self.n_restarts_optimizer,
                    noise=self.noise,
                    normalize_y=self.normalize_y),
                dimensions=orion_space_to_skopt_space(self.space),
                n_initial_points=self.n_initial_points,
                acq_func=self.acq_func)

            self.seed_rng(self.seed)

        return self._optimizer
Example #30
0
def test_noise_equals_gaussian():
    gpr1 = GaussianProcessRegressor(rbf + wk).fit(X, y)

    # gpr2 sets the noise component to zero at predict time.
    gpr2 = GaussianProcessRegressor(rbf, noise="gaussian").fit(X, y)
    assert_false(gpr1.noise_)
    assert_true(gpr2.noise_)
    assert_almost_equal(gpr1.kernel_.k2.noise_level, gpr2.noise_, 4)
    mean1, std1 = gpr1.predict(X, return_std=True)
    mean2, std2 = gpr2.predict(X, return_std=True)
    assert_array_almost_equal(mean1, mean2, 4)
    assert_false(np.any(std1 == std2))
Example #31
0
def optimize(x0, y0, n_calls):

    estimator = GaussianProcessRegressor(alpha=1e-4,
                                         normalize_y=True,
                                         noise='gaussian',
                                         n_restarts_optimizer=10,
                                         kernel=RBF())

    w = gp_minimize(black_box, [(low, high)] * dim,
                    base_estimator=estimator,
                    acq_func="EI",
                    n_calls=n_calls,
                    verbose=False,
                    x0=x0,
                    y0=y0,
                    n_random_starts=1,
                    n_jobs=-1)
    return w.x_iters, w.func_vals.tolist()
Example #32
0
def cook_estimator(base_estimator, space=None, **kwargs):
    if isinstance(base_estimator, str):
        base_estimator = base_estimator.upper()
        allowed_estimators = ['GP', 'ET', 'RF', 'GBRT', 'DUMMY']
        if base_estimator not in allowed_estimators:
            raise ValueError(
                'invalid estimator, should be in {}, got {}'.format(
                    allowed_estimators, base_estimator))
    elif not is_regressor(base_estimator):
        raise ValueError('base estimator should be a regressor, got {}'.format(
            base_estimator))

    if base_estimator == 'GP':
        if space is not None:
            # space = Space(space)
            space = Space(normalize_param_space(space))
            n_params = space.transformed_n_params
            is_cat = space.is_categorical
        else:
            raise ValueError('expected a space instance, got None')
        cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))
        if is_cat:
            other_kernel = HammingKernel(length_scale=np.ones(n_params))
        else:
            other_kernel = Matern(length_scale=np.ones(n_params),
                                  length_scale_bounds=[(0.01, 100)] * n_params,
                                  nu=2.5)
        base_estimator = GaussianProcessRegressor(kernel=cov_amplitude *
                                                  other_kernel,
                                                  normalize_y=True,
                                                  noise='gaussian',
                                                  n_restarts_optimizer=2)
    elif base_estimator == 'RF':
        base_estimator = RandomForestRegressor(n_estimators=100,
                                               min_samples_leaf=3)
    elif base_estimator == 'ET':
        base_estimator = ExtraTreesRegressor(n_estimators=100,
                                             min_samples_leaf=3)
    elif base_estimator == 'GRBT':
        grbt = GradientBoostingRegressor(n_estimators=30, loss='quantile')
        base_estimator = GradientBoostingQuantileRegressor(base_estimator=grbt)
    elif base_estimator == 'DUMMY':
        return None

    base_estimator.set_params(**kwargs)
    return base_estimator
Example #33
0
def test_categorical_only2():
    from numpy import linalg
    from skopt.space import Categorical
    from skopt.learning import GaussianProcessRegressor
    space = [Categorical([1, 2, 3]), Categorical([4, 5, 6])]
    opt = Optimizer(space,
                    base_estimator=GaussianProcessRegressor(alpha=1e-7),
                    acq_optimizer='lbfgs',
                    n_initial_points=10,
                    n_jobs=2)

    next_x = opt.ask(n_points=4)
    assert len(next_x) == 4
    opt.tell(next_x, [linalg.norm(x) for x in next_x])
    next_x = opt.ask(n_points=4)
    assert len(next_x) == 4
    opt.tell(next_x, [linalg.norm(x) for x in next_x])
    next_x = opt.ask(n_points=4)
    assert len(next_x) == 4
Example #34
0
def test_bayes_opt_base_estimator():
    from skopt.learning import GaussianProcessRegressor
    from skopt.learning.gaussian_process.kernels import ConstantKernel
    from skopt.learning.gaussian_process.kernels import Matern
    cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))
    matern = Matern(
        length_scale=np.ones(2), length_scale_bounds=[(0.01, 100)] * 2, nu=2.5)
    base_estimator = GaussianProcessRegressor(
        kernel=cov_amplitude * matern,
        normalize_y=True, random_state=0, alpha=0.0,
        noise="gaussian", n_restarts_optimizer=2)
    opt = SkOptOptimizer(
        dimensions=[(-1.0, 1.0), (-1.0, 1.0)], base_estimator=base_estimator,
        random_state=0)
    opt.init(2)
    params = np.empty(2)
    for _ in range(10):
        opt.get_next_parameters(params)
        feedback = [-np.linalg.norm(params - 0.5384 * np.ones(2))]
        opt.set_evaluation_feedback(feedback)
    assert_greater(opt.get_best_fitness(), -0.3)
Example #35
0
        dimensions=dim,
        n_calls = run_for,
        
    )

    print "GPM best value",res.fun,"at",res.x
    #print res
    print "took",time.mktime(time.gmtime())-start,"[s]"
    
    
    o = Optimizer(
        n_initial_points =5,
        acq_func = 'gp_hedge',
        acq_optimizer='auto',
        base_estimator=GaussianProcessRegressor(alpha=0.0, copy_X_train=True,
                                                n_restarts_optimizer=2,
                                                noise='gaussian', normalize_y=True,
                                                optimizer='fmin_l_bfgs_b'),
        dimensions=dim,
    )

    m = manager(n = 4,
                skobj = o,
                iterations = run_for,
                func = use_func,
                wait= 0
    )
    start = time.mktime(time.gmtime())
    m.run()
    import numpy as np
    best = np.argmin( m.sk.yi)
    print "Threaded GPM best value",m.sk.yi[best],"at",m.sk.Xi[best],
Example #36
0
# TODO: data structure for bounds are inconsistent among multiple mediators. Should have the same data structure everywhere.
lower_bounds = [
    float(scenario.getroot()[0][i].attrib['lowerbound'])
    for i in range(0, num_issues)
]
upper_bounds = [
    float(scenario.getroot()[0][i].attrib['upperbound'])
    for i in range(0, num_issues)
]

# Run the mediator
num_init_random_points = 1
num_random_restarts = 5
base_estimator = GaussianProcessRegressor(kernel=Exponentiation(
    Sum(Product(ConstantKernel(), DotProduct()),
        ConstantKernel(1.0, (0.01, 1000.0))), 2.0),
                                          normalize_y=True,
                                          noise="gaussian",
                                          n_restarts_optimizer=2)
base_estimator = None
bayes_mediator_social = BayesMediatorSocial(
    num_issues=num_issues,
    num_agents=2,
    u_funcs=u_funcs,
    lower_bounds=lower_bounds,
    upper_bounds=upper_bounds,
    num_init_random_points=num_init_random_points,
    num_random_restarts=num_random_restarts,
    base_estimator=base_estimator,
    plot_mediator=num_issues == 1,
    verbose=True)
maximizer, max_value = bayes_mediator_social.mediate(10)