示例#1
0
def test_init_basic():
    # make sure we can initialize correctly.
    _ = pygp.BasicGP.from_gp(pygp.BasicGP(1, 1, 1, 0, 2, 'se'))
    _ = pygp.BasicGP.from_gp(pygp.BasicGP(1, 1, 1, 0, 2, 'matern1'))
    _ = pygp.BasicGP.from_gp(pygp.BasicGP(1, 1, 1, 0, 2, 'matern3'))
    _ = pygp.BasicGP.from_gp(pygp.BasicGP(1, 1, 1, 0, 2, 'matern5'))

    # throw an error with an unknown kernel.
    nt.assert_raises(ValueError,
                     pygp.inference.BasicGP, 1, 1, 1, 0, 2, 'foo')

    # throw an error for from_gp with incorrect kernel.
    likelihood = pygp.likelihoods.Gaussian(1)
    kernel = pygp.kernels.Periodic(1, 1, 1)
    gp = pygp.inference.ExactGP(likelihood, kernel, 0)
    nt.assert_raises(ValueError, pygp.BasicGP.from_gp, gp)
示例#2
0
def test_optimization():
    # load the data.
    cdir = os.path.abspath(os.path.dirname(demo.__file__))
    data = np.load(os.path.join(cdir, 'xy.npz'))
    X = data['X']
    y = data['y']

    # create the model and add data.
    gp = pygp.BasicGP(sn=.1, sf=1, ell=.1, mu=0)
    gp.add_data(X, y)

    # optimize the model
    pygp.optimize(gp, {'sn': None})

    # make sure our constraint is satisfied
    nt.assert_equal(gp.get_hyper()[0], np.log(0.1))
示例#3
0
def check_acq_gradient(policy):
    # randomly generate some data.
    rng = np.random.RandomState(0)
    X = rng.rand(10, 2)
    y = rng.rand(10)

    # create the model.
    model = pygp.BasicGP(0.5, 1, [1, 1])
    model.add_data(X, y)

    # get the computed gradients.
    index = policy(model)
    xtest = rng.rand(20, 2)
    _, grad = index(xtest, grad=True)

    # numericall approximate the gradients
    index_ = lambda x: index(x[None])
    grad_ = np.array([spop.approx_fprime(x, index_, 1e-8) for x in xtest])

    nt.assert_allclose(grad, grad_, rtol=1e-6, atol=1e-6)
示例#4
0
文件: bayesopt.py 项目: wavelets/pybo
def solve_bayesopt(objective,
                   bounds,
                   niter=100,
                   init='middle',
                   policy='ei',
                   solver='lbfgs',
                   recommender='latent',
                   model=None,
                   noisefree=False,
                   ftrue=None,
                   rng=None,
                   callback=None):
    """
    Maximize the given function using Bayesian Optimization.

    Args:
        objective: function handle representing the objective function.
        bounds: bounds of the search space as a (d,2)-array.
        niter: horizon for optimization.
        init: the initialization component.
        policy: the acquisition component.
        solver: the inner-loop solver component.
        recommender: the recommendation component.
        model: the Bayesian model instantiation.
        noisefree: a boolean denoting that the model is noisefree; this only
                   applies if a default model is used (ie. it is ignored if the
                   model argument is used).
        ftrue: a ground-truth function (for evaluation).
        rng: either an RandomState object or an integer used to seed the state;
             this will be fed to each component that requests randomness.
        callback: a function to call on each iteration for visualization.

    Note that the modular way in which this function has been written allows
    one to also pass parameters directly to some of the components. This works
    for the `init`, `policy`, `solver`, and `recommender` inputs. These
    components can be passed as either a string, a function, or a 2-tuple where
    the first item is a string/function and the second is a dictionary of
    additional arguments to pass to the component.

    Returns:
        A numpy record array containing a trace of the optimization process.
        The fields of this array are `x`, `y`, and `xbest` corresponding to the
        query locations, outputs, and recommendations at each iteration. If
        ground-truth is known an additional field `fbest` will be included.
    """
    # make sure the bounds are a 2d-array.
    bounds = np.array(bounds, dtype=float, ndmin=2)

    # see if the query object itself defines ground truth.
    if (ftrue is None) and hasattr(objective, 'get_f'):
        ftrue = objective.get_f

    # initialize the random number generator.
    rng = rstate(rng)

    # get the model components.
    init, policy, solver, recommender = \
        get_components(init, policy, solver, recommender, rng)

    # create a list of initial points to query.
    X = init(bounds)
    Y = [objective(x) for x in X]

    if model is None:
        # initialize parameters of a simple GP model.
        sf = np.std(Y) if (len(Y) > 1) else 10.
        mu = np.mean(Y)
        ell = bounds[:, 1] - bounds[:, 0]

        # FIXME: this may not be a great setting for the noise parameter
        sn = 1e-5 if noisefree else 1e-3

        # specify a hyperprior for the GP.
        prior = {
            'sn': (None if noisefree else pygp.priors.Horseshoe(scale=0.1,
                                                                min=1e-5)),
            'sf':
            pygp.priors.LogNormal(mu=np.log(sf), sigma=1., min=1e-6),
            'ell':
            pygp.priors.Uniform(ell / 100, ell * 2),
            'mu':
            pygp.priors.Gaussian(mu, sf)
        }

        # create the GP model (with hyperprior).
        model = pygp.BasicGP(sn, sf, ell, mu, kernel='matern5')
        model = pygp.meta.MCMC(model, prior, n=10, burn=100, rng=rng)

    # add any initial data to our model.
    model.add_data(X, Y)

    # allocate a datastructure containing "convergence" info.
    info = np.zeros(niter, [('x', np.float, (len(bounds), )), ('y', np.float),
                            ('xbest', np.float, (len(bounds), ))])

    # initialize the data.
    info['x'][:len(X)] = X
    info['y'][:len(Y)] = Y
    info['xbest'][:len(Y)] = [X[np.argmax(Y[:i + 1])] for i in xrange(len(Y))]

    for i in xrange(model.ndata, niter):
        # get the next point to evaluate.
        index = policy(model)
        x, _ = solver(index, bounds)

        # deal with any visualization.
        if callback is not None:
            callback(model, bounds, info[:i], x, index, ftrue)

        # make an observation and record it.
        y = objective(x)
        model.add_data(x, y)

        # record everything.
        info[i] = (x, y, recommender(model, bounds))

    if ftrue is not None:
        fbest = ftrue(info['xbest'])
        info = append_fields(info, 'fbest', fbest, usemask=False)

    return info
示例#5
0
文件: advanced.py 项目: wavelets/pybo
    noise = 1e-1                                        # observation noise

    # define the objective function
    objective = pybo.functions.Gramacy(noise)
    bounds = objective.bounds                           # bounds of search space
    dim = bounds.shape[0]                               # dimension of space

    # prescribe initial hyperparameters
    sn = noise                                          # likelihood std dev
    sf = 1.0                                            # kernel amplitude
    ell = 0.25 * (bounds[:, 1] - bounds[:, 0])          # kernel length scale
    mu = 0.0                                            # prior mean

    # define model
    kernel = 'matern3'                                  # kernel family
    gp = pygp.BasicGP(sn, sf, ell, mu, kernel=kernel)   # initialize base GP
    prior = {                                           # hyperparameter priors
        'sn': pygp.priors.Horseshoe(0.1, min=1e-5),
        'sf': pygp.priors.LogNormal(np.log(sf), sigma=1., min=1e-6),
        'ell': pygp.priors.Uniform(ell / 100, ell * 2),
        'mu': pygp.priors.Gaussian(mu, sf)}
    model = pygp.meta.MCMC(gp, prior, n=10, rng=rng)    # meta-model for MCMC
                                                        # marginalization

    info = pybo.solve_bayesopt(
        objective,
        bounds,
        niter=30*dim,
        init='sobol',                                   # initialization policy
        policy='ei',                                    # exploration policy
        recommender='incumbent',                        # recommendation policy
示例#6
0
import numpy as np
import matplotlib.pyplot as pl

import pygp
import pygp.priors
import pygp.plotting as pp

if __name__ == '__main__':
    # load the data.
    cdir = os.path.abspath(os.path.dirname(__file__))
    data = np.load(os.path.join(cdir, 'xy.npz'))
    X = data['X']
    y = data['y']

    # create the model and add data to it.
    model = pygp.BasicGP(sn=.1, sf=1, ell=.1)
    model.add_data(X, y)

    # find the ML hyperparameters and plot the predictions.
    pygp.optimize(model)

    # create a prior structure.
    priors = {
        'sn': pygp.priors.Uniform(0.01, 1.0),
        'sf': pygp.priors.Uniform(0.01, 5.0),
        'ell': pygp.priors.Uniform(0.01, 1.0),
        'mu': pygp.priors.Uniform(-2, 2)
    }

    # create sample-based models.
    mcmc = pygp.meta.MCMC(model, priors, n=200, burn=100)
示例#7
0
文件: basic.py 项目: fagan2888/pygp
Basic demo showing how to instantiate a simple GP model, add data to it, and
optimize its hyperparameters.
"""

import os
import numpy as np
import matplotlib.pyplot as pl

import pygp
import pygp.plotting as pp

if __name__ == '__main__':
    # load the data.
    cdir = os.path.abspath(os.path.dirname(__file__))
    data = np.load(os.path.join(cdir, 'xy.npz'))
    X = data['X']
    y = data['y']

    # create the model, add data, and optimize it.
    gp = pygp.BasicGP(sn=.1, sf=1, ell=.1, mu=0)
    gp.add_data(X, y)
    pygp.optimize(gp)

    # plot the posterior.
    pl.figure(1)
    pl.clf()
    pp.plot_posterior(gp)
    pl.legend(loc=2)
    pl.draw()
    pl.show()