예제 #1
0
def test_rstate():
    """Test the rstate helper."""
    rng = rstate()
    rng = rstate(rng)
    rng1 = rstate(1)
    rng2 = rstate(1)

    nt.assert_equal(rng1.randint(5), rng2.randint(5))
    nt.assert_raises(ValueError, rstate, 'foo')
예제 #2
0
def _sample_prior(model, priors, n, rng=None):
    rng = rstate(rng)

    # unpack priors
    # TODO -- Bobak: This snippet is copied from learning/sampling.py
    # and should probably be put into a Prior base class.
    priors = dict(priors)
    active = np.ones(model.nhyper, dtype=bool)
    logged = np.ones(model.nhyper, dtype=bool)

    for (key, block, log) in get_params(model):
        inactive = (key in priors) and (priors[key] is None)
        logged[block] = log
        active[block] = not inactive
        if inactive:
            del priors[key]
        else:
            priors[key] = (block, log, priors[key])
    priors = priors.values()

    # sample hyperparameters from prior
    hypers = np.tile(model.get_hyper(), (n, 1))
    for (block, log, prior) in priors:
        hypers[:, block] = (np.log(prior.sample(n, rng=rng)) if log else
                            prior.sample(n, rng=rng))

    return hypers
예제 #3
0
파일: _base.py 프로젝트: amoliu/pygp
    def sample(self, X, m=None, latent=True, rng=None):
        """
        Sample values from the posterior at points `X`. Given an `(n,d)`-array
        `X` this will return an `n`-vector corresponding to the resulting
        sample.

        If `m` is not `None` an `(m,n)`-array will be returned instead,
        corresponding to `m` such samples. If `latent` is `False` the sample
        will instead be returned corrupted by the observation noise. Finally
        `rng` can be used to seed the randomness.
        """
        X = self._kernel.transform(X)

        # this boolean indicates whether we'll flatten the sample to return a
        # vector, or if we'll return a set of samples as an array.
        flatten = (m is None)

        # get the relevant sizes.
        m = 1 if flatten else m
        n = len(X)

        # if a seed or instantiated RandomState is given use that, otherwise
        # use the global object.
        rng = rstate(rng)

        # add a tiny amount to the diagonal to make the cholesky of Sigma
        # stable and then add this correlated noise onto mu to get the sample.
        mu, Sigma = self._full_posterior(X)
        Sigma += 1e-10 * np.eye(n)
        f = mu[None] + np.dot(rng.normal(size=(m, n)), sla.cholesky(Sigma))

        if not latent:
            f = self._likelihood.sample(f.ravel(), rng).reshape(m, n)

        return f.ravel() if flatten else f
예제 #4
0
파일: _base.py 프로젝트: fagan2888/pygp
    def sample(self, X, m=None, latent=True, rng=None):
        """
        Sample values from the posterior at points `X`. Given an `(n,d)`-array
        `X` this will return an `n`-vector corresponding to the resulting
        sample.

        If `m` is not `None` an `(m,n)`-array will be returned instead,
        corresponding to `m` such samples. If `latent` is `False` the sample
        will instead be returned corrupted by the observation noise. Finally
        `rng` can be used to seed the randomness.
        """
        X = self._kernel.transform(X)

        # this boolean indicates whether we'll flatten the sample to return a
        # vector, or if we'll return a set of samples as an array.
        flatten = (m is None)

        # get the relevant sizes.
        m = 1 if flatten else m
        n = len(X)

        # if a seed or instantiated RandomState is given use that, otherwise
        # use the global object.
        rng = rstate(rng)

        # add a tiny amount to the diagonal to make the cholesky of Sigma
        # stable and then add this correlated noise onto mu to get the sample.
        mu, Sigma = self._full_posterior(X)
        Sigma += 1e-10 * np.eye(n)
        f = mu[None] + np.dot(rng.normal(size=(m, n)), sla.cholesky(Sigma))

        if not latent:
            f = self._likelihood.sample(f.ravel(), rng).reshape(m, n)

        return f.ravel() if flatten else f
예제 #5
0
    def sample(self, size=1, rng=None):
        rng = rstate(rng)
        if self._std.ndim == 1:
            sample = self._mu + self._std * rng.randn(size, self.ndim)
        elif self._s2.ndim == 2:
            sample = self._mu + np.dot(rng.randn(size, self.ndim), self._std)

        return sample
예제 #6
0
파일: matern.py 프로젝트: amoliu/pygp
 def sample_spectrum(self, N, rng=None):
     rng = rstate(rng)
     sf2 = np.exp(self._logsf*2)
     ell = np.exp(self._logell)
     a = self._d / 2.
     g = np.tile(rng.gamma(a, 1/a, N), (self.ndim, 1)).T
     W = (rng.randn(N, self.ndim) / ell) / np.sqrt(g)
     return W, sf2
예제 #7
0
파일: priors.py 프로젝트: amoliu/pygp
    def sample(self, size=1, rng=None):
        rng = rstate(rng)
        if self._std.ndim == 1:
            sample = self._mu + self._std * rng.randn(size, self.ndim)
        elif self._s2.ndim == 2:
            sample = self._mu + np.dot(rng.randn(size, self.ndim), self._std)

        return sample
예제 #8
0
파일: matern.py 프로젝트: fagan2888/pygp
 def sample_spectrum(self, N, rng=None):
     rng = rstate(rng)
     sf2 = np.exp(self._logsf * 2)
     ell = np.exp(self._logell)
     a = self._d / 2.
     g = np.tile(rng.gamma(a, 1 / a, N), (self.ndim, 1)).T
     W = (rng.randn(N, self.ndim) / ell) / np.sqrt(g)
     return W, sf2
예제 #9
0
파일: gps.py 프로젝트: wavelets/pybo
    def __init__(self, bounds, gp, N=None, rng=None):
        self.bounds = np.array(bounds, dtype=float, ndmin=2)
        self._gp = gp.copy()
        self._rng = random.rstate(rng)

        # generate some sampled observations.
        N = N if (N is not None) else 100 * len(self.bounds)
        X = random.latin(bounds, N, self._rng)
        y = self._gp.sample(X, latent=False, rng=self._rng)

        # add them back to get a new "posterior".
        self._gp.add_data(X, y)
예제 #10
0
def _slice_sample(logprob,
                  x0,
                  sigma=1.0,
                  step_out=True,
                  max_steps_out=1000,
                  rng=None):
    """
    Implementation of slice sampling taken almost directly from Snoek's
    spearmint package (with a few minor modifications).
    """
    rng = rstate(rng)

    def direction_slice(direction, x0):
        def dir_logprob(z):
            return logprob(direction * z + x0)

        upper = sigma * rng.rand()
        lower = upper - sigma
        llh_s = np.log(rng.rand()) + dir_logprob(0.0)

        l_steps_out = 0
        u_steps_out = 0
        if step_out:
            while dir_logprob(lower) > llh_s and l_steps_out < max_steps_out:
                l_steps_out += 1
                lower -= sigma
            while dir_logprob(upper) > llh_s and u_steps_out < max_steps_out:
                u_steps_out += 1
                upper += sigma

        while True:
            new_z = (upper - lower) * rng.rand() + lower
            new_llh = dir_logprob(new_z)
            if np.isnan(new_llh):
                raise Exception("Slice sampler got a NaN")
            if new_llh > llh_s:
                break
            elif new_z < 0:
                lower = new_z
            elif new_z > 0:
                upper = new_z
            else:
                raise Exception("Slice sampler shrank to zero!")

        return new_z * direction + x0

    # FIXME: I've removed how blocks work because I want to rewrite that bit.
    # so right now this samples everything as one big block.
    direction = rng.randn(x0.shape[0])
    direction = direction / np.sqrt(np.sum(direction**2))
    return direction_slice(direction, x0)
예제 #11
0
파일: sampling.py 프로젝트: amoliu/pygp
def _slice_sample(logprob,
                  x0,
                  sigma=1.0,
                  step_out=True,
                  max_steps_out=1000,
                  rng=None):
    """
    Implementation of slice sampling taken almost directly from Snoek's
    spearmint package (with a few minor modifications).
    """
    rng = rstate(rng)

    def direction_slice(direction, x0):
        def dir_logprob(z):
            return logprob(direction*z + x0)

        upper = sigma*rng.rand()
        lower = upper - sigma
        llh_s = np.log(rng.rand()) + dir_logprob(0.0)

        l_steps_out = 0
        u_steps_out = 0
        if step_out:
            while dir_logprob(lower) > llh_s and l_steps_out < max_steps_out:
                l_steps_out += 1
                lower -= sigma
            while dir_logprob(upper) > llh_s and u_steps_out < max_steps_out:
                u_steps_out += 1
                upper += sigma

        while True:
            new_z = (upper - lower)*rng.rand() + lower
            new_llh = dir_logprob(new_z)
            if np.isnan(new_llh):
                raise Exception("Slice sampler got a NaN")
            if new_llh > llh_s:
                break
            elif new_z < 0:
                lower = new_z
            elif new_z > 0:
                upper = new_z
            else:
                raise Exception("Slice sampler shrank to zero!")

        return new_z*direction + x0

    # FIXME: I've removed how blocks work because I want to rewrite that bit.
    # so right now this samples everything as one big block.
    direction = rng.randn(x0.shape[0])
    direction = direction / np.sqrt(np.sum(direction**2))
    return direction_slice(direction, x0)
예제 #12
0
    def __init__(self, model, prior, n=100, rng=None):
        self._prior = prior
        self._n = n
        self._rng = rstate(rng)

        # we won't add any data unless the model already has it.
        data = None

        if model.ndata > 0:
            data = model.data
            model = model.copy()
            model.reset()

        self._samples = [model.copy(h)
                         for h in _sample_prior(model, prior, n, rng=self._rng)]
        self._logweights = np.zeros(n) - np.log(n)
        self._loglikes = np.zeros(n)

        if data is not None:
            self.add_data(data[0], data[1])
예제 #13
0
파일: mcmc.py 프로젝트: fagan2888/pygp
    def __init__(self, model, prior, n=100, burn=100, rng=None):
        self._model = model.copy()
        self._prior = prior
        self._samples = []
        self._n = n
        self._burn = burn
        self._rng = rstate(rng)

        if self._model.ndata > 0:
            if self._burn > 0:
                sample(self._model, self._prior, self._burn, rng=self._rng)
            self._samples = sample(self._model,
                                   self._prior,
                                   self._n,
                                   raw=False,
                                   rng=self._rng)

        else:
            # FIXME: the likelihood won't play a role, so we can sample
            # directly from the prior. This of course requires the prior to
            # also be a well-defined distribution.
            pass
예제 #14
0
파일: mcmc.py 프로젝트: amoliu/pygp
    def __init__(self, model, prior, n=100, burn=100, rng=None):
        self._model = model.copy()
        self._prior = prior
        self._samples = []
        self._n = n
        self._burn = burn
        self._rng = rstate(rng)

        if self._model.ndata > 0:
            if self._burn > 0:
                sample(self._model, self._prior, self._burn, rng=self._rng)
            self._samples = sample(self._model,
                                   self._prior,
                                   self._n,
                                   raw=False,
                                   rng=self._rng)

        else:
            # FIXME: the likelihood won't play a role, so we can sample
            # directly from the prior. This of course requires the prior to
            # also be a well-defined distribution.
            pass
예제 #15
0
    def __init__(self, N, likelihood, kernel, mean, X, y, rng=None):
        # if given a seed or an instantiated RandomState make sure that we use
        # it here, but also within the sample_spectrum code.
        rng = rstate(rng)

        if not isinstance(likelihood, Gaussian):
            raise ValueError('Fourier samples only defined for Gaussian'
                             'likelihoods')

        # this randomizes the feature.
        W, alpha = kernel.sample_spectrum(N, rng)

        self._W = W
        self._b = rng.rand(N) * 2 * np.pi
        self._a = np.sqrt(2 * alpha / N)
        self._mean = mean
        self._theta = None

        if X is not None:
            # evaluate the features
            Z = np.dot(X, self._W.T) + self._b
            Phi = np.cos(Z) * self._a

            # get the components for regression
            A = np.dot(Phi.T, Phi) + likelihood.s2 * np.eye(Phi.shape[1])
            R = sla.cholesky(A)
            r = y - mean
            p = np.sqrt(likelihood.s2) * rng.randn(N)

            # FIXME: we can do a smarter update here when the number of points
            # is less than the number of features.

            self._theta = sla.cho_solve((R, False), np.dot(Phi.T, r))
            self._theta += sla.solve_triangular(R, p)

        else:
            self._theta = rng.randn(N)
예제 #16
0
    def solve_bayesopt_dim(self,
                           objective,
                           bounds,
                           kernel="se",
                           niter=150,
                           dims=2,
                           init='sobol',
                           policy='ei',
                           solver='direct',
                           recommender='latent'):
        """
        The following part, borrows some code from the pybo code.
        Therefore, retains a similar structure and parameters.

        From Pybo (with updates):
            Maximize the given function using Bayesian Optimization.
            Args:
                objective: function handle representing the objective function.
                bounds: bounds of the search space as a (d,2)-array.
                niter: horizon for optimization.
                dims: number of dimensions per permutation
                init: the initialization component.
                policy: the acquisition component.
                solver: the inner-loop solver component.
                recommender: the recommendation component.

            Note that the modular way in which this function has been written allows
            one to also pass parameters directly to some of the components. This works
            for the `init`, `policy`, `solver`, and `recommender` inputs. These
            components can be passed as either a string, a function, or a 2-tuple where
            the first item is a string/function and the second is a dictionary of
            additional arguments to pass to the component.
            Returns:
               Nothing, everything is written to a file
        """
        filename = self.outputdir + "_".join([policy[0], kernel, str(niter)])
        bounds = np.array(bounds, dtype=float, ndmin=2)
        # initialize the random number generator.
        rng = rstate(None)
        # get the model components.
        init, policy, solver, recommender = \
            self.get_components(init, policy, solver, recommender, rng)
        # Create the bmodel with the data
        model = self.initialmodel(objective, init, bounds, kernel)
        with open(filename, "a+") as f:
            for i, y in enumerate(model.data[1]):
                f.write("0," + str(y) + ",0,0," + ",".join([str(a) for a in model.data[0][i]]) + "\n")
        # Set up Dimension Scheduler Dictionaries
        modelsDict = {}
        dimDict = {}
        bY = np.argmax(model.data[1])
        bX = model.data[0][bY]
        objective.set_initial(bX)
        objective.set_best(model.data[1][bY])
        dimensionsProb = [1.0/len(bounds)] * len(bounds)
        self.console("Starting Bayesian Optimization with Dimensions Scheduler", 1)
        self.console("Time,Objective,Mu,Var,X", 2)
        for i in xrange(model.ndata, niter):
            start = time.clock()

            # Update the probabilities
            if (i % 50 == 0):
                mydata = model.data[0]
                mu = mydata.mean(axis=0)
                sigma = mydata.std(axis=0)
                a = (mydata - mu) / sigma
                C = np.cov(a.T)
                evals, evecs = np.linalg.eig(C)
                vars = evals / float(len(evals))
                dimensionsProb = vars / vars.sum()

            # generate new dimension and update the bounds and set the dimensions on the objective
            d = self.getRandomDimensions(dimensionsProb, dims)
            objective.set_dimensions(d)
            boundslowerdim = objective.get_bounds()
            # check if the model exists, else create a new one
            if not modelsDict.has_key(str(d)):
                tX ,tY= self.getInputsDimRed(model, d)
                m = self.createNewModel(tX, tY, boundslowerdim, kernel)
                try:
                    m.add_data(tX, tY)
                except:
                    self.console("An error occured during modle creation during inputing data points into the GP. Try increasing GP:sn parameter ")
                    break
                modelsDict[str(d)] = m
                dimDict[str(d)] = d
            # get model for current dimensions
            modellowerdim = modelsDict.get(str(d))

            # get the next point to evaluate.
            index = policy(modellowerdim)

            x, _ = solver(index, boundslowerdim)

            # make an observation and record it.
            try:
                y = objective(x)
                yp = str(y)
                curX = objective.get_prev_input()
                model.add_data(curX, y)
                modellowerdim.add_data(x, y)

                if objective.get_best() is not None:
                    if (y > objective.get_best()):
                        objective.set_best(y)
                        objective.change_initial(x)
                else:
                    objective.set_best(y)
                    objective.change_initial(x)
            except:
                curX = objective.get_prev_input()
                yp = "Failed"

            glomu, glovar = model.posterior(curX, grad=False)[:2]
            # Write to teh file
            interval = time.clock() - start
            data = str(interval) + "," + yp + "," + str(glomu[0]) + "," + str(glovar[0]) + ",".join(
                [str(a) for a in curX])
            self.console(data, 2)
            with open(filename, "a+") as f:
                f.write(data + "\n")


        # Get the recommender point from the model
        recx = recommender(model, bounds)
        recmu, recvar = model.posterior(recx, grad=False)[:2]
        with open(filename, "a+") as f:
            f.write("0,Rec.," + str(recmu) + "," + str(recvar) + "," + ",".join([str(s) for s in recx]) + "\n")
예제 #17
0
파일: se.py 프로젝트: cottrell/pygp
 def sample_spectrum(self, N, rng=None):
     rng = rstate(rng)
     sf2 = np.exp(self._logsf*2)
     ell = np.exp(self._logell)
     W = rng.randn(N, self.ndim) / ell
     return W, sf2
예제 #18
0
파일: bayesopt.py 프로젝트: jhartford/pybo
def solve_bayesopt(objective,
                   bounds,
                   grad,
                   niter=100,
                   init='middle',
                   policy='ei',
                   solver='lbfgs',
                   recommender='latent',
                   model=None,
                   noisefree=False,
                   ftrue=None,
                   rng=None,
                   callback=None,
                   n_grad=20):
    """
    Maximize the given function using Bayesian Optimization.

    Args:
        objective: function handle representing the objective function.
        bounds: bounds of the search space as a (d,2)-array.
        niter: horizon for optimization.
        init: the initialization component.
        policy: the acquisition component.
        solver: the inner-loop solver component.
        recommender: the recommendation component.
        model: the Bayesian model instantiation.
        noisefree: a boolean denoting that the model is noisefree; this only
                   applies if a default model is used (ie. it is ignored if the
                   model argument is used).
        ftrue: a ground-truth function (for evaluation).
        rng: either an RandomState object or an integer used to seed the state;
             this will be fed to each component that requests randomness.
        callback: a function to call on each iteration for visualization.

    Note that the modular way in which this function has been written allows
    one to also pass parameters directly to some of the components. This works
    for the `init`, `policy`, `solver`, and `recommender` inputs. These
    components can be passed as either a string, a function, or a 2-tuple where
    the first item is a string/function and the second is a dictionary of
    additional arguments to pass to the component.

    Returns:
        A numpy record array containing a trace of the optimization process.
        The fields of this array are `x`, `y`, and `xbest` corresponding to the
        query locations, outputs, and recommendations at each iteration. If
        ground-truth is known an additional field `fbest` will be included.
    """
        # make sure the bounds are a 2d-array.
    bounds = np.array(bounds, dtype=float, ndmin=2)

    # see if the query object itself defines ground truth.
    if (ftrue is None) and hasattr(objective, 'get_f'):
        ftrue = objective.get_f

    # initialize the random number generator.
    rng = rstate(rng)

    # get the model components.
    init, policy, solver, recommender = \
        get_components(init, policy, solver, recommender, rng)

    # create a list of initial points to query.
    X = init(bounds)
    Y = [objective(x) for x in X]

    if model is None:
        # initialize parameters of a simple GP model.
        sf = np.std(Y) if (len(Y) > 1) else 10.
        mu = np.mean(Y)
        ell = bounds[:, 1] - bounds[:, 0]

        # FIXME: this may not be a great setting for the noise parameter
        sn = 1e-5 if noisefree else 1e-3

        # specify a hyperprior for the GP.
        prior = {
            'sn': (
                None if noisefree else
                pygp.priors.Horseshoe(scale=0.1, min=1e-5)),
            'sf': pygp.priors.LogNormal(mu=np.log(sf), sigma=1., min=1e-6),
            'ell': pygp.priors.Uniform(ell / 100, ell * 2),
            'mu': pygp.priors.Gaussian(mu, sf)}

        # create the GP model (with hyperprior).
        model = pygp.BasicGP(sn, sf, ell, mu, kernel='matern5')
        model = pygp.meta.MCMC(model, prior, n=10, burn=100, rng=rng)

    # add any initial data to our model.
    model.add_data(X, Y)

    # allocate a datastructure containing "convergence" info.
    info = np.zeros(niter, [('x', np.float, (len(bounds),)),
                        ('y', np.float),
                        ('xbest', np.float, (len(bounds),))])

    # initialize the data.
    info['x'][:len(X)] = X
    info['y'][:len(Y)] = Y
    info['xbest'][:len(Y)] = [X[np.argmax(Y[:i+1])] for i in xrange(len(Y))]

    for i in xrange(model.ndata, niter):
        # get the next point to evaluate.
        index = policy(model)
        x, _ = solver(index, bounds)

        # deal with any visualization.
        if callback is not None:
            callback(model, bounds, info[:i], x, index, ftrue)

        # make an observation and record it.
        y = objective(x)
        model.add_data(x, y)

        # record everything.
        info[i] = (x, y, recommender(model, bounds))

    if ftrue is not None:
        fbest = ftrue(info['xbest'])
        info = append_fields(info, 'fbest', fbest, usemask=False)

    return info
예제 #19
0
    def solve_bayesopt(self,
                       objective,
                       bounds,
                       kernel="se",
                       niter=150,
                       init='sobol',
                       policy='ei',
                       solver='direct',
                       recommender='latent'):
        """
        The following part, borrows some code from the pybo code.
        Therefore, retains a similar structure and parameters.

        From Pybo:
            Maximize the given function using Bayesian Optimization.
            Args:
                objective: function handle representing the objective function.
                bounds: bounds of the search space as a (d,2)-array.
                niter: horizon for optimization.
                init: the initialization component.
                policy: the acquisition component.
                solver: the inner-loop solver component.
                recommender: the recommendation component.

            Note that the modular way in which this function has been written allows
            one to also pass parameters directly to some of the components. This works
            for the `init`, `policy`, `solver`, and `recommender` inputs. These
            components can be passed as either a string, a function, or a 2-tuple where
            the first item is a string/function and the second is a dictionary of
            additional arguments to pass to the component.
            Returns:
               A dictionary with the model, recommended values, and finished iteration
            """
        filename = self.outputdir + "_".join([policy[0], kernel, str(niter)])
        bounds = np.array(bounds, dtype=float, ndmin=2)
        rng = rstate(None)
        # get the model components.
        init, policy, solver, recommender = \
            self.get_components(init, policy, solver, recommender, rng)
        # Create the model with the data
        model = self.initialmodel(objective, init, bounds, kernel)
        # Write the initial data to a file
        with open(filename, "a+") as f:
            for i, y in enumerate(model.data[1]):
                f.write("0," + str(y) + ",0,0," + ",".join([str(a) for a in model.data[0][i]]) + "\n")
        # Send data to the observer
        self.pipeout.send({"inidata": {"x": model.data[0].tolist(), "y": model.data[1].tolist()}})
        self.console("Starting Bayesian Optimization")
        # Start BayesOpt
        for i in xrange(model.ndata, niter):
            # Record current time and iteration
            start = time.clock()
            curiter = i
            # Check if Observer has terminated the process
            if self.pipeout.poll():
                o = self.pipeout.recv()
                if o.has_key("stop"):
                    self.stop = o["stop"]
            if self.stop:
                break
            # get the next point to evaluate.
            index = policy(model)
            x, _ = solver(index, bounds)
            glomu, glovar = model.posterior(x, grad=False)[:2]
            # make an observation and record it.
            try:
                y = objective(x)
                model.add_data(x, y)
                yp = str(y)
            except:
                yp = "Failed"
            # Send out the data to the observer and write to the file
            interval = time.clock() - start
            with open(filename, "a+") as f:
                f.write(str(interval) + "," + yp + "," + str(glomu[0]) + "," + str(glovar[0]) + ",".join(
                    [str(a) for a in x]) + "\n")
            if yp != "Failed":
                self.pipeout.send({"data": {"y": y, "x": x, "mu": glomu[0], "var": glovar[0], "time": interval}})
        # Get the recommender point from the model
        recx = recommender(model, bounds)
        recmu, recvar = model.posterior(recx, grad=False)[:2]
        with open(filename, "a+") as f:
            f.write("0,Rec.," + str(recmu[0]) + "," + str(recvar[0]) + "," + ",".join([str(s) for s in recx]) + "\n")
        self.console("Finished experiment")
        return {"model": model, "iterfinish": curiter, "rec": {"x": recx, "mu": recmu, "var": recvar}}
예제 #20
0
파일: gaussian.py 프로젝트: amoliu/pygp
 def sample(self, f, rng=None):
     rng = rstate(rng)
     return f + rng.normal(size=len(f), scale=np.exp(self._logsigma))
예제 #21
0
파일: sampling.py 프로젝트: amoliu/pygp
def sample(gp, priors, n, raw=True, rng=None):
    rng = rstate(rng)
    priors = dict(priors)
    active = np.ones(gp.nhyper, dtype=bool)
    logged = np.ones(gp.nhyper, dtype=bool)

    for (key, block, log) in get_params(gp):
        inactive = (key in priors) and (priors[key] is None)
        logged[block] = log
        active[block] = not inactive
        if inactive:
            del priors[key]
        else:
            priors[key] = (block, log, priors[key])

    # priors is now just a list of the form (block, log, prior).
    priors = priors.values()

    # get the initial hyperparameters and transform into the non-log space.
    hyper0 = gp.get_hyper()
    hyper0[logged] = np.exp(hyper0[logged])

    def logprob(x):
        # copy the initial hyperparameters and then assign the "active"
        # parameters that come from x.
        hyper = hyper0.copy()
        hyper[active] = x
        logprob = 0

        # compute the prior probabilities. we do this first so that if there
        # are any infs they'll be caught in the least expensive computations
        # first.
        for block, log, prior in priors:
            logprob += prior.logprior(hyper[block])
            if np.isinf(logprob):
                break

        # now compute the likelihood term. note that we'll have to take the log
        # of any logspace parameters before calling set_hyper.
        if not np.isinf(logprob):
            hyper[logged] = np.log(hyper[logged])
            gp.set_hyper(hyper)
            logprob += gp.loglikelihood()

        return logprob

    # create a big list of the hyperparameters so that we can just assign to
    # the components that are active. also get an initial sample x
    # corresponding only to the active parts of hyper0.
    hypers = np.tile(hyper0, (n, 1))
    x = hyper0.copy()[active]

    # do the sampling.
    for i in xrange(n):
        x = _slice_sample(logprob, x, rng=rng)
        hypers[i][active] = x

    # change the logspace components back into logspace.
    hypers[:, logged] = np.log(hypers[:, logged])

    # make sure the gp gets updated to the last sampled hyperparameter.
    gp.set_hyper(hypers[-1])

    if raw:
        return hypers
    else:
        return [gp.copy(h) for h in hypers]
예제 #22
0
파일: priors.py 프로젝트: amoliu/pygp
 def sample(self, size=1, rng=None):
     rng = rstate(rng)
     return np.vstack(self._min + rng.gamma(k, s, size=size)
                      for k, s in zip(self._k, self._scale)).T
예제 #23
0
파일: functions.py 프로젝트: aaronkl/pybo
 def __init__(self, sigma=0.0, rng=None):
     self._sigma = sigma
     self._rng = rstate(rng)
예제 #24
0
파일: bayesopt.py 프로젝트: wavelets/pybo
def solve_bayesopt(objective,
                   bounds,
                   niter=100,
                   init='middle',
                   policy='ei',
                   solver='lbfgs',
                   recommender='latent',
                   model=None,
                   noisefree=False,
                   ftrue=None,
                   rng=None,
                   callback=None):
    """
    Maximize the given function using Bayesian Optimization.

    Args:
        objective: function handle representing the objective function.
        bounds: bounds of the search space as a (d,2)-array.
        niter: horizon for optimization.
        init: the initialization component.
        policy: the acquisition component.
        solver: the inner-loop solver component.
        recommender: the recommendation component.
        model: the Bayesian model instantiation.
        noisefree: a boolean denoting that the model is noisefree; this only
                   applies if a default model is used (ie. it is ignored if the
                   model argument is used).
        ftrue: a ground-truth function (for evaluation).
        rng: either an RandomState object or an integer used to seed the state;
             this will be fed to each component that requests randomness.
        callback: a function to call on each iteration for visualization.

    Note that the modular way in which this function has been written allows
    one to also pass parameters directly to some of the components. This works
    for the `init`, `policy`, `solver`, and `recommender` inputs. These
    components can be passed as either a string, a function, or a 2-tuple where
    the first item is a string/function and the second is a dictionary of
    additional arguments to pass to the component.

    Returns:
        A numpy record array containing a trace of the optimization process.
        The fields of this array are `x`, `y`, and `xbest` corresponding to the
        query locations, outputs, and recommendations at each iteration. If
        ground-truth is known an additional field `fbest` will be included.
    """
    # make sure the bounds are a 2d-array.
    bounds = np.array(bounds, dtype=float, ndmin=2)

    # see if the query object itself defines ground truth.
    if (ftrue is None) and hasattr(objective, 'get_f'):
        ftrue = objective.get_f

    # initialize the random number generator.
    rng = rstate(rng)

    # get the model components.
    init, policy, solver, recommender = \
        get_components(init, policy, solver, recommender, rng)

    # create a list of initial points to query.
    X = init(bounds)
    Y = [objective(x) for x in X]

    if model is None:
        # initialize parameters of a simple GP model.
        sf = np.std(Y) if (len(Y) > 1) else 10.
        mu = np.mean(Y)
        ell = bounds[:, 1] - bounds[:, 0]

        # FIXME: this may not be a great setting for the noise parameter
        sn = 1e-5 if noisefree else 1e-3

        # specify a hyperprior for the GP.
        prior = {
            'sn': (None if noisefree else pygp.priors.Horseshoe(scale=0.1,
                                                                min=1e-5)),
            'sf':
            pygp.priors.LogNormal(mu=np.log(sf), sigma=1., min=1e-6),
            'ell':
            pygp.priors.Uniform(ell / 100, ell * 2),
            'mu':
            pygp.priors.Gaussian(mu, sf)
        }

        # create the GP model (with hyperprior).
        model = pygp.BasicGP(sn, sf, ell, mu, kernel='matern5')
        model = pygp.meta.MCMC(model, prior, n=10, burn=100, rng=rng)

    # add any initial data to our model.
    model.add_data(X, Y)

    # allocate a datastructure containing "convergence" info.
    info = np.zeros(niter, [('x', np.float, (len(bounds), )), ('y', np.float),
                            ('xbest', np.float, (len(bounds), ))])

    # initialize the data.
    info['x'][:len(X)] = X
    info['y'][:len(Y)] = Y
    info['xbest'][:len(Y)] = [X[np.argmax(Y[:i + 1])] for i in xrange(len(Y))]

    for i in xrange(model.ndata, niter):
        # get the next point to evaluate.
        index = policy(model)
        x, _ = solver(index, bounds)

        # deal with any visualization.
        if callback is not None:
            callback(model, bounds, info[:i], x, index, ftrue)

        # make an observation and record it.
        y = objective(x)
        model.add_data(x, y)

        # record everything.
        info[i] = (x, y, recommender(model, bounds))

    if ftrue is not None:
        fbest = ftrue(info['xbest'])
        info = append_fields(info, 'fbest', fbest, usemask=False)

    return info
예제 #25
0
 def sample(self, size=1, rng=None):
     rng = rstate(rng)
     return np.vstack(self._min + rng.lognormal(m, s, size=size)
                      for m, s in zip(self._mu, self._sigma)).T
예제 #26
0
파일: bayesopt.py 프로젝트: jhartford/pybo
def solve_hyperopt(objective,
                   bounds,
                   niter=100,
                   init='middle',
                   policy='ei',
                   solver='lbfgs',
                   recommender='latent',
                   model=None,
                   noisefree=False,
                   ftrue=None,
                   rng=None,
                   callback=None):
    """
    Maximize the given function using Bayesian Optimization.

    Args:
        objective: function handle representing the objective function.
        bounds: bounds of the search space as a (d,2)-array.
        niter: horizon for optimization.
        init: the initialization component.
        policy: the acquisition component.
        solver: the inner-loop solver component.
        recommender: the recommendation component.
        model: the Bayesian model instantiation.
        noisefree: a boolean denoting that the model is noisefree; this only
                   applies if a default model is used (ie. it is ignored if the
                   model argument is used).
        ftrue: a ground-truth function (for evaluation).
        rng: either an RandomState object or an integer used to seed the state;
             this will be fed to each component that requests randomness.
        callback: a function to call on each iteration for visualization.

    Note that the modular way in which this function has been written allows
    one to also pass parameters directly to some of the components. This works
    for the `init`, `policy`, `solver`, and `recommender` inputs. These
    components can be passed as either a string, a function, or a 2-tuple where
    the first item is a string/function and the second is a dictionary of
    additional arguments to pass to the component.

    Returns:
        A numpy record array containing a trace of the optimization process.
        The fields of this array are `x`, `y`, and `xbest` corresponding to the
        query locations, outputs, and recommendations at each iteration. If
        ground-truth is known an additional field `fbest` will be included.
    """
    # make sure the bounds are a 2d-array.
    bounds = np.array(bounds, dtype=float, ndmin=2)

    # see if the query object itself defines ground truth.
    if (ftrue is None) and hasattr(objective, 'get_f'):
        ftrue = objective.get_f

    # initialize the random number generator.
    rng = rstate(rng)

    # get the model components.
    init, policy, solver, recommender = \
        get_components(init, policy, solver, recommender, rng)

    # create a list of initial points to query.
    X = init(bounds)
    Y = [objective(x) for x in X]

    # add any initial data to our model.
    model.add_data(X, Y)

    # allocate a datastructure containing "convergence" info.
    info = np.zeros(niter, [('x', np.float, (len(bounds),)),
                        ('y', np.float),
                        ('xbest', np.float, (len(bounds),))])

    # initialize the data.
    info['x'][:len(X)] = X
    info['y'][:len(Y)] = Y
    info['xbest'][:len(Y)] = [X[np.argmax(Y[:i+1])] for i in xrange(len(Y))]

    # BFGS callback
    X_y = ([], [])
    def callbackF(Xi):
        global X_y
        X_y[0].append(Xi)
        X_y[1].append(objective(Xi))

    for i in xrange(niter):
        # take n_grad gradient steps
        res = minimize(objective, x0, method='BFGS',
                   jac=grad,
                   options={'gtol': 1e-6, 'disp': True, 'maxiter': n_grad},
                   callback=callbackF)

        # get the next point to evaluate.
        index = policy(model)
        x, _ = solver(index, bounds)

        # deal with any visualization.
        if callback is not None:
            callback(model, bounds, info[:i], x, index, ftrue)

        # make an observation and record it.
        y = objective(x)
        model.add_data(x, y)

        # record everything.
        info[i] = (x, y, recommender(model, bounds))

    if ftrue is not None:
        fbest = ftrue(info['xbest'])
        info = append_fields(info, 'fbest', fbest, usemask=False)

    return info
예제 #27
0
 def sample(self, size=1, rng=None):
     rng = rstate(rng)
     return self._a + (self._b - self._a) * rng.rand(size, self.ndim)
예제 #28
0
 def __init__(self, sigma=0.0, rng=None):
     self._sigma = sigma
     self._rng = rstate(rng)
예제 #29
0
 def sample(self, size=1, rng=None):
     rng = rstate(rng)
     return np.vstack(self._min + rng.gamma(k, s, size=size)
                      for k, s in zip(self._k, self._scale)).T
예제 #30
0
 def sample_spectrum(self, N, rng=None):
     rng = rstate(rng)
     sf2 = np.exp(self._logsf * 2)
     ell = np.exp(self._logell)
     W = rng.randn(N, self.ndim) / ell
     return W, sf2
예제 #31
0
파일: priors.py 프로젝트: amoliu/pygp
 def sample(self, size=1, rng=None):
     rng = rstate(rng)
     return np.vstack(self._min + rng.lognormal(m, s, size=size)
                      for m, s in zip(self._mu, self._sigma)).T
예제 #32
0
    def solve_bayesopt(self,
                       objective,
                       bounds,
                       kernel="se",
                       niter=150,
                       init='sobol',
                       policy='ei',
                       solver='direct',
                       recommender='latent'):
        """
        The following part, borrows some code from the pybo code.
        Therefore, retains a similar structure and parameters.

        From Pybo:
            Maximize the given function using Bayesian Optimization.
            Args:
                objective: function handle representing the objective function.
                bounds: bounds of the search space as a (d,2)-array.
                niter: horizon for optimization.
                init: the initialization component.
                policy: the acquisition component.
                solver: the inner-loop solver component.
                recommender: the recommendation component.

            Note that the modular way in which this function has been written allows
            one to also pass parameters directly to some of the components. This works
            for the `init`, `policy`, `solver`, and `recommender` inputs. These
            components can be passed as either a string, a function, or a 2-tuple where
            the first item is a string/function and the second is a dictionary of
            additional arguments to pass to the component.
            Returns:
               Nothing, everything is written to a file
        """
        filename = self.outputdir + "_".join([policy[0], kernel, str(niter)])
        bounds = np.array(bounds, dtype=float, ndmin=2)
        rng = rstate(None)
        # get the model components.
        init, policy, solver, recommender = \
            self.get_components(init, policy, solver, recommender, rng)
        # Create the model with the data
        model = self.initialmodel(objective, init, bounds, kernel)
        with open(filename, "a+") as f:
            for i, y in enumerate(model.data[1]):
                f.write("0," + str(y) + ",0,0," + ",".join([str(a) for a in model.data[0][i]]) + "\n")

        # Update the console
        self.console("Starting Bayesian Optimization", 1)
        self.console("Time,Objective,Mu,Var,X", 2)
        # Start BayesOpt
        for i in xrange(model.ndata, niter):
            # Record current time
            start = time.clock()
            # get the next point to evaluate.
            index = policy(model)

            x, _ = solver(index, bounds)
            glomu, glovar = model.posterior(x, grad=False)[:2]

            try:
                # make an observation and record it.
                y = objective(x)
                model.add_data(x, y)
                yp = str(y)
            except:
                yp = "Failed"
            #Write to the file
            interval = time.clock() - start
            data = str(interval) + "," + yp + "," + str(glomu[0]) + "," + str(glovar[0]) + ",".join(
                [str(a) for a in x])
            self.console(data, 2)
            with open(filename, "a+") as f:
                f.write(data + "\n")
        # Get the recommender point from the model
        recx = recommender(model, bounds)
        recmu, recvar = model.posterior(recx, grad=False)[:2]
        with open(filename, "a+") as f:
            f.write("0,Rec.," + str(recmu[0]) + "," + str(recvar[0]) + "," + ",".join([str(s) for s in recx]) + "\n")
예제 #33
0
파일: priors.py 프로젝트: amoliu/pygp
 def sample(self, size=1, rng=None):
     rng = rstate(rng)
     return self._a + (self._b - self._a) * rng.rand(size, self.ndim)
예제 #34
0
def solve_hyperopt(objective,
                   bounds,
                   niter=100,
                   init='middle',
                   policy='ei',
                   solver='lbfgs',
                   recommender='latent',
                   model=None,
                   noisefree=False,
                   ftrue=None,
                   rng=None,
                   callback=None):
    """
    Maximize the given function using Bayesian Optimization.

    Args:
        objective: function handle representing the objective function.
        bounds: bounds of the search space as a (d,2)-array.
        niter: horizon for optimization.
        init: the initialization component.
        policy: the acquisition component.
        solver: the inner-loop solver component.
        recommender: the recommendation component.
        model: the Bayesian model instantiation.
        noisefree: a boolean denoting that the model is noisefree; this only
                   applies if a default model is used (ie. it is ignored if the
                   model argument is used).
        ftrue: a ground-truth function (for evaluation).
        rng: either an RandomState object or an integer used to seed the state;
             this will be fed to each component that requests randomness.
        callback: a function to call on each iteration for visualization.

    Note that the modular way in which this function has been written allows
    one to also pass parameters directly to some of the components. This works
    for the `init`, `policy`, `solver`, and `recommender` inputs. These
    components can be passed as either a string, a function, or a 2-tuple where
    the first item is a string/function and the second is a dictionary of
    additional arguments to pass to the component.

    Returns:
        A numpy record array containing a trace of the optimization process.
        The fields of this array are `x`, `y`, and `xbest` corresponding to the
        query locations, outputs, and recommendations at each iteration. If
        ground-truth is known an additional field `fbest` will be included.
    """
    # make sure the bounds are a 2d-array.
    bounds = np.array(bounds, dtype=float, ndmin=2)

    # see if the query object itself defines ground truth.
    if (ftrue is None) and hasattr(objective, 'get_f'):
        ftrue = objective.get_f

    # initialize the random number generator.
    rng = rstate(rng)

    # get the model components.
    init, policy, solver, recommender = \
        get_components(init, policy, solver, recommender, rng)

    # create a list of initial points to query.
    X = init(bounds)
    Y = [objective(x) for x in X]

    # add any initial data to our model.
    model.add_data(X, Y)

    # allocate a datastructure containing "convergence" info.
    info = np.zeros(niter, [('x', np.float, (len(bounds), )), ('y', np.float),
                            ('xbest', np.float, (len(bounds), ))])

    # initialize the data.
    info['x'][:len(X)] = X
    info['y'][:len(Y)] = Y
    info['xbest'][:len(Y)] = [X[np.argmax(Y[:i + 1])] for i in xrange(len(Y))]

    # BFGS callback
    X_y = ([], [])

    def callbackF(Xi):
        global X_y
        X_y[0].append(Xi)
        X_y[1].append(objective(Xi))

    for i in xrange(niter):
        # take n_grad gradient steps
        res = minimize(objective,
                       x0,
                       method='BFGS',
                       jac=grad,
                       options={
                           'gtol': 1e-6,
                           'disp': True,
                           'maxiter': n_grad
                       },
                       callback=callbackF)

        # get the next point to evaluate.
        index = policy(model)
        x, _ = solver(index, bounds)

        # deal with any visualization.
        if callback is not None:
            callback(model, bounds, info[:i], x, index, ftrue)

        # make an observation and record it.
        y = objective(x)
        model.add_data(x, y)

        # record everything.
        info[i] = (x, y, recommender(model, bounds))

    if ftrue is not None:
        fbest = ftrue(info['xbest'])
        info = append_fields(info, 'fbest', fbest, usemask=False)

    return info
예제 #35
0
def sample(gp, priors, n, raw=True, rng=None):
    rng = rstate(rng)
    priors = dict(priors)
    active = np.ones(gp.nhyper, dtype=bool)
    logged = np.ones(gp.nhyper, dtype=bool)

    for (key, block, log) in get_params(gp):
        inactive = (key in priors) and (priors[key] is None)
        logged[block] = log
        active[block] = not inactive
        if inactive:
            del priors[key]
        else:
            priors[key] = (block, log, priors[key])

    # priors is now just a list of the form (block, log, prior).
    priors = priors.values()

    # get the initial hyperparameters and transform into the non-log space.
    hyper0 = gp.get_hyper()
    hyper0[logged] = np.exp(hyper0[logged])

    def logprob(x):
        # copy the initial hyperparameters and then assign the "active"
        # parameters that come from x.
        hyper = hyper0.copy()
        hyper[active] = x
        logprob = 0

        # compute the prior probabilities. we do this first so that if there
        # are any infs they'll be caught in the least expensive computations
        # first.
        for block, log, prior in priors:
            logprob += prior.logprior(hyper[block])
            if np.isinf(logprob):
                break

        # now compute the likelihood term. note that we'll have to take the log
        # of any logspace parameters before calling set_hyper.
        if not np.isinf(logprob):
            hyper[logged] = np.log(hyper[logged])
            gp.set_hyper(hyper)
            logprob += gp.loglikelihood()

        return logprob

    # create a big list of the hyperparameters so that we can just assign to
    # the components that are active. also get an initial sample x
    # corresponding only to the active parts of hyper0.
    hypers = np.tile(hyper0, (n, 1))
    x = hyper0.copy()[active]

    # do the sampling.
    for i in xrange(n):
        x = _slice_sample(logprob, x, rng=rng)
        hypers[i][active] = x

    # change the logspace components back into logspace.
    hypers[:, logged] = np.log(hypers[:, logged])

    # make sure the gp gets updated to the last sampled hyperparameter.
    gp.set_hyper(hypers[-1])

    if raw:
        return hypers
    else:
        return [gp.copy(h) for h in hypers]