Exemplo n.º 1
0
def test_repeated_prediction_cache():
    kernel = kernels.ExpSquaredKernel(1.0)
    gp = GP(kernel)

    x = np.array((-1, 0, 1))
    gp.compute(x)

    t = np.array((-.5, .3, 1.2))

    y = x / x.std()
    mu0, mu1 = (gp.predict(y, t, return_cov=False) for _ in range(2))
    assert np.array_equal(mu0, mu1), \
        "Identical training data must give identical predictions " \
        "(problem with GP cache)."

    y2 = 2 * y
    mu2 = gp.predict(y2, t, return_cov=False)
    assert not np.array_equal(mu0, mu2), \
        "Different training data must give different predictions " \
        "(problem with GP cache)."

    a0 = gp._alpha
    gp.kernel[0] += 0.1
    gp.recompute()
    gp._compute_alpha(y2, True)
    a1 = gp._alpha
    assert not np.allclose(a0, a1), \
        "Different kernel parameters must give different alphas " \
        "(problem with GP cache)."

    mu, cov = gp.predict(y2, t)
    _, var = gp.predict(y2, t, return_var=True)
    assert np.allclose(np.diag(cov), var), \
        "The predictive variance must be equal to the diagonal of the " \
        "predictive covariance."
Exemplo n.º 2
0
Arquivo: gp.py Projeto: Cadair/k2sc
class GeorgeGP(object):
    def __init__(self, kernel):
        self.kernel = kernel
        self._gp = GP(kernel._k)
        self._y     = None       ## Cached inputs
        self._x     = None       ## Cached values
        self._dirty = True       ## Flag telling if the arrays are up to date


    @property
    def is_dirty(self):
        return self._dirty

    def set_dirty(self, is_dirty=True):
        self._dirty = is_dirty

    def set_pv(self, pv=None):
        if pv is not None and not array_equal(pv, self.kernel._pv):
            self.kernel.set_pv(pv)
            self._gp.kernel = self.kernel._k
            self.set_dirty()

    def set_inputs(self, x=None):
        if x is not None and not array_equal(x, self._x):
            self._x = x
            self.set_dirty()

    def _covariance_matrix(self, x1, x2=None, pv=None, separate=False):
        self.set_pv(pv)
        if separate:
            return (self.kernel._k1.value(x1, x2),
                    self.kernel._k2.value(x1, x2))
        else:
            return self.kernel._k.value(x1, x2)
    
    def compute(self, x=None, pv=None):
        self.set_pv(pv)
        self.set_inputs(x)
        if self.is_dirty:
            self._gp.compute(self._x, yerr=self.kernel._pm[-1], sort=False)
            self.set_dirty(False)
    
    def negll(self, pv, y=None):
        y = y if y is not None else self._y
        self.compute(self._x, pv)
        return -self._gp.lnlikelihood(y)

    def predict(self, x, mean_only=True):
        return self._gp.predict(self._y, x, mean_only=mean_only)

    def predict_components(self, pv, y, x1, x2=None):
        self.compute(x1, pv)
        b  = self._gp.solver.apply_inverse(y)
        K1 = self.kernel._k1.value(x1, x2)
        K2 = self.kernel._k2.value(x1, x2)
        mu_time = dot(K1,b)
        mu_pos  = dot(K2,b)
        return mu_time, mu_pos
Exemplo n.º 3
0
class GeorgeGP(object):
    def __init__(self, kernel):
        self.kernel = kernel
        self._gp = GP(kernel._k)
        self._y = None  ## Cached inputs
        self._x = None  ## Cached values
        self._dirty = True  ## Flag telling if the arrays are up to date

    @property
    def is_dirty(self):
        return self._dirty

    def set_dirty(self, is_dirty=True):
        self._dirty = is_dirty

    def set_pv(self, pv=None):
        if pv is not None and not array_equal(pv, self.kernel._pv):
            self.kernel.set_pv(pv)
            self._gp.kernel = self.kernel._k
            self.set_dirty()

    def set_inputs(self, x=None):
        if x is not None and not array_equal(x, self._x):
            self._x = x
            self.set_dirty()

    def _covariance_matrix(self, x1, x2=None, pv=None, separate=False):
        self.set_pv(pv)
        if separate:
            return (self.kernel._k1.value(x1,
                                          x2), self.kernel._k2.value(x1, x2))
        else:
            return self.kernel._k.value(x1, x2)

    def compute(self, x=None, pv=None):
        self.set_pv(pv)
        self.set_inputs(x)
        if self.is_dirty:
            self._gp.compute(self._x, yerr=self.kernel._pm[-1], sort=False)
            self.set_dirty(False)

    def negll(self, pv, y=None):
        y = y if y is not None else self._y
        self.compute(self._x, pv)
        return -self._gp.lnlikelihood(y)

    def predict(self, x, mean_only=True):
        return self._gp.predict(self._y, x, mean_only=mean_only)

    def predict_components(self, pv, y, x1, x2=None):
        self.compute(x1, pv)
        b = self._gp.solver.apply_inverse(y)
        K1 = self.kernel._k1.value(x1, x2)
        K2 = self.kernel._k2.value(x1, x2)
        mu_time = dot(K1, b)
        mu_pos = dot(K2, b)
        return mu_time, mu_pos
Exemplo n.º 4
0
def test_predict_single(solver, seed=1234, N=201, yerr=0.1):
    np.random.seed(seed)

    # Set up the solver.
    kernel = 1.0 * kernels.ExpSquaredKernel(0.5)
    kwargs = dict()
    if solver == HODLRSolver:
        kwargs["tol"] = 1e-8
    gp = GP(kernel, solver=solver, **kwargs)

    x = np.sort(np.random.rand(N))
    y = gp.sample(x)
    gp.compute(x, yerr=yerr)
    mu0, var0 = gp.predict(y, [0.0], return_var=True)
    mu, var = gp.predict(y, [0.0, 1.0], return_var=True)
    _, cov = gp.predict(y, [0.0, 1.0])
    assert np.allclose(mu0, mu[0])
    assert np.allclose(var0, var[0])
    assert np.allclose(var0, cov[0, 0])
Exemplo n.º 5
0
def test_prediction(solver, seed=42):
    """Basic sanity checks for GP regression."""

    np.random.seed(seed)

    kernel = kernels.ExpSquaredKernel(1.0)
    kwargs = dict()
    if solver == HODLRSolver:
        kwargs["tol"] = 1e-8
    gp = GP(kernel, solver=solver, white_noise=0.0, **kwargs)

    x0 = np.linspace(-10, 10, 500)
    x = np.sort(np.random.uniform(-10, 10, 300))
    gp.compute(x)

    y = np.sin(x)
    mu, cov = gp.predict(y, x0)

    Kstar = gp.get_matrix(x0, x)
    K = gp.get_matrix(x)
    K[np.diag_indices_from(K)] += 1.0
    mu0 = np.dot(Kstar, np.linalg.solve(K, y))
    print(np.abs(mu - mu0).max())
    assert np.allclose(mu, mu0)
Exemplo n.º 6
0
def plotpred_QP(p, t, y):
    kern, sig = kern_QP(p)
    gp = GP(kern)
    yerr = np.ones(len(y)) * sig
    gp.compute(t, yerr)
    mu, cov = gp.predict(y, t)
    sigma = np.diag(cov)
    sigma = np.sqrt(sigma**2 + yerr**2)
    pl.fill_between(t, mu + 2 * sigma, mu - 2 * sigma, \
                    color='c', alpha=0.3)
    pl.plot(t, mu, color='c', lw=2)
    nper = (len(p) - 1) / 4
    # if nper > 1:
    #     cols = ['c','m','y','k']
    #     for i in range(nper):
    #         p1 = np.append(p[i*4:i*4+4], p[-1])
    #         k1, sig = kern_QP(p1)
    #         b = gp.solver.apply_inverse(y)
    #         X = np.transpose([t])
    #         K1 = k1.value(t, t)
    #         mu1 = np.dot(K1, b)
    #         col = np.roll(cols, -i)[0]
    #         pl.plot(t, mu, color = col, lw = 2)
    return
Exemplo n.º 7
0
    def run(self):
        gp = GP(self._kernel)
        gp.compute(self._x)
        y = self._y
        pipe = self._out_pipe

        for cmd, args, kwargs in iter(pipe.recv, None):
            if cmd == 'predict':
                result = gp.predict(y, *args, **kwargs)
                if len(result) == 2:
                    # only return the diagonal of the covariance matrix
                    result = result[0], result[1].diagonal()

            elif cmd == 'get_kernel_pars':
                result = gp.kernel.pars

            elif cmd == 'set_kernel_pars':
                gp.kernel.pars = args[0]
                result = None

            elif cmd == 'train':
                prior, nstarts, verbose = args

                # define function for negative log-likelihood and its gradient
                def nll(vector, bad_return=(1e30, np.zeros(len(gp.kernel)))):
                    # prevent exp overflow
                    if np.any(vector > 100.):
                        return bad_return
                    gp.kernel.vector = vector
                    ll = gp.lnlikelihood(y, quiet=True)
                    if not np.isfinite(ll):
                        return bad_return
                    grad = gp.grad_lnlikelihood(y, quiet=True)
                    return -ll, -grad

                if verbose:
                    print(self.name, 'starting training')

                # sample random initial positions from prior
                # run optimization for each
                result = tuple(
                    optimize.minimize(nll, x0, jac=True, **kwargs)
                    for x0 in np.log(prior.rvs(nstarts))
                )

                if verbose:
                    print(self.name, 'training complete')
                    # Print a table of results.
                    # Since results are sure to repeat,
                    # group them and output a row for each group:
                    #   number ll *hyperpars
                    for nll, group in itertools.groupby(
                            sorted(result, key=lambda r: r.fun),
                            key=lambda r: round(r.fun, 2)
                    ):
                        for n, r in enumerate(group, start=1):
                            pass
                        print(' ', n, -nll, _format_number_list(*np.exp(r.x)))

                # set hyperparameters to opt result with best likelihood
                gp.kernel.vector = min(result, key=lambda r: r.fun).x

            else:
                result = ValueError('Unknown command: {}.'.format(cmd))

            pipe.send(result)