Ejemplo n.º 1
0
def create_gp(params):
    # GP parameters
    s2 = np.exp(params["log_s2"])
    taux = np.exp(params["log_taux"])
    tauy = np.exp(params["log_tauy"])

    gpx = GP(s2 * ExpSquaredKernel(taux))
    gpy = GP(s2 * ExpSquaredKernel(tauy))

    return gpx, gpy
Ejemplo n.º 2
0
def test_gp_mean(N=50, seed=1234):
    np.random.seed(seed)
    x = np.random.uniform(0, 5)
    y = 5 + np.sin(x)
    gp = GP(10. * kernels.ExpSquaredKernel(1.3), mean=5.0, fit_mean=True)
    gp.compute(x)
    check_gradient(gp, y)
Ejemplo n.º 3
0
def test_dtype(seed=123):
    np.random.seed(seed)
    kernel = 0.1 * kernels.ExpSquaredKernel(1.5)
    kernel.pars = [1, 2]
    gp = GP(kernel)
    x = np.random.rand(100)
    gp.compute(x, 1e-2)
Ejemplo n.º 4
0
def test_apply_inverse(solver, seed=1234, N=201, yerr=0.1):
    np.random.seed(seed)

    # Set up the solver.
    kernel = 1.0 * kernels.ExpSquaredKernel(0.5)
    kwargs = dict()
    if solver == HODLRSolver:
        kwargs["tol"] = 1e-10
    gp = GP(kernel, solver=solver, **kwargs)

    # Sample some data.
    x = np.sort(np.random.rand(N))
    y = gp.sample(x)
    gp.compute(x, yerr=yerr)

    K = gp.get_matrix(x)
    K[np.diag_indices_from(K)] += yerr**2

    b1 = np.linalg.solve(K, y)
    b2 = gp.apply_inverse(y)
    assert np.allclose(b1, b2)

    y = gp.sample(x, size=5).T
    b1 = np.linalg.solve(K, y)
    b2 = gp.apply_inverse(y)
    assert np.allclose(b1, b2)
Ejemplo n.º 5
0
def test_repeated_prediction_cache():
    kernel = kernels.ExpSquaredKernel(1.0)
    gp = GP(kernel)

    x = np.array((-1, 0, 1))
    gp.compute(x)

    t = np.array((-.5, .3, 1.2))

    y = x / x.std()
    mu0, mu1 = (gp.predict(y, t, return_cov=False) for _ in range(2))
    assert np.array_equal(mu0, mu1), \
        "Identical training data must give identical predictions " \
        "(problem with GP cache)."

    y2 = 2 * y
    mu2 = gp.predict(y2, t, return_cov=False)
    assert not np.array_equal(mu0, mu2), \
        "Different training data must give different predictions " \
        "(problem with GP cache)."

    a0 = gp._alpha
    gp.kernel[0] += 0.1
    gp.recompute()
    gp._compute_alpha(y2, True)
    a1 = gp._alpha
    assert not np.allclose(a0, a1), \
        "Different kernel parameters must give different alphas " \
        "(problem with GP cache)."

    mu, cov = gp.predict(y2, t)
    _, var = gp.predict(y2, t, return_var=True)
    assert np.allclose(np.diag(cov), var), \
        "The predictive variance must be equal to the diagonal of the " \
        "predictive covariance."
Ejemplo n.º 6
0
def _general_metric(metric, N=100, ndim=3):
    kernel = 0.1 * kernels.ExpSquaredKernel(metric, ndim=ndim)

    x = np.random.rand(N, ndim)
    M0 = kernel.get_value(x)

    gp = GP(kernel)
    M1 = gp.get_matrix(x)
    assert np.allclose(M0, M1)

    # Compute the expected matrix.
    M2 = np.empty((N, N))
    for i in range(N):
        for j in range(N):
            r = x[i] - x[j]
            r2 = np.dot(r, np.linalg.solve(metric, r))
            M2[i, j] = 0.1 * np.exp(-0.5 * r2)

    if not np.allclose(M0, M2):
        print(M0)
        print()
        print(M2)
        print()
        print(M0 - M2)
        print()
        print(M0 / M2)

        L = np.linalg.cholesky(metric)
        i = N - 1
        j = N - 2
        r = x[j] - x[i]
        print(x[i], x[j])
        print("r = ", r)
        print("L.r = ", np.dot(L, r))
    assert np.allclose(M0, M2)
Ejemplo n.º 7
0
    def __init__(self,
                 data=None,
                 param={},
                 bounds={},
                 fixed=[],
                 group=None,
                 **kwargs):
        """

        :param data:
        :param param:
        :param bounds:
        :param group:
        """
        self.data = []
        self.param, self.param_name, self.param_bounds = np.array(
            []), np.array([]), []
        self.Ndata = 0
        self.index_param_indiv = []
        self.index_bounds_indiv = []
        self.index_param_common = []
        self.index_bounds_common = []
        self.group_indiv = []
        self.fixed = []
        self.group_common = []
        self.detrendvar = {}
        self._varind = 0
        self.gp = GP(solver=BasicSolver)
        self._mcmc_parinit_optimized = False
        self.add_data(data, param, bounds, fixed, group, **kwargs)
Ejemplo n.º 8
0
def plotsample_QP(p, t, y, tsel, ysel):
    kern, sig = kern_QP(p)
    gp = GP(kern)
    yerr = np.ones(len(ysel)) * sig
    gp.compute(tsel, yerr)
    mu = gp.sample_conditional(ysel, t)
    pl.plot(t, mu, color='c', alpha=0.3)
    return
Ejemplo n.º 9
0
def test_gp_callable_mean(N=50, seed=1234):
    np.random.seed(seed)
    x = np.random.uniform(0, 5)
    y = 5 + np.sin(x)
    mean = CallableModel(lambda x: 5.0 * x)
    gp = GP(10. * kernels.ExpSquaredKernel(1.3), mean=mean)
    gp.compute(x)
    check_gradient(gp, y)
Ejemplo n.º 10
0
def test_pickle(solver, success, N=50, seed=123):
    np.random.seed(seed)
    kernel = 0.1 * kernels.ExpSquaredKernel(1.5)
    kernel.pars = [1, 2]
    gp = GP(kernel, solver=solver)
    x = np.random.rand(100)
    gp.compute(x, 1e-2)

    s = pickle.dumps(gp, -1)
    gp = pickle.loads(s)
    if success:
        gp.compute = _fake_compute
    gp.lnlikelihood(np.sin(x))
Ejemplo n.º 11
0
def test_gp_callable_white_noise(N=50, seed=1234):
    np.random.seed(seed)
    x = np.random.uniform(0, 5)
    y = 5 + np.sin(x)
    gp = GP(10. * kernels.ExpSquaredKernel(1.3),
            mean=5.0,
            white_noise=LinearWhiteNoise(-6, 0.01),
            fit_white_noise=True)
    gp.compute(x)
    check_gradient(gp, y)

    gp.freeze_parameter("white_noise:m")
    check_gradient(gp, y)
Ejemplo n.º 12
0
    def create(self,
               rseed=0,
               ldcs=None,
               wnsigma=None,
               rnsigma=None,
               rntscale=0.5,
               nights=1):
        ldcs = ldcs if ldcs is not None else self.ldcs
        seed(rseed)

        self.time = linspace(-0.5 * float(self.t_total_d),
                             0.5 * float(self.t_total_d), self.n_exp)
        self.time = (tile(self.time, [nights, 1]) +
                     (self.p * arange(nights))[:, newaxis]).ravel()
        self.npt = self.time.size

        self.transit = zeros([self.npt, 4])
        for i, (ldc, c) in enumerate(zip(ldcs, self.contamination)):
            self.transit[:, i] = MA().evaluate(self.time,
                                               self.k0,
                                               ldc,
                                               0,
                                               self.p,
                                               self.a,
                                               self.i,
                                               c=c)

        # White noise
        # -----------
        if wnsigma is not None:
            self.wnoise = multivariate_normal(
                zeros(atleast_2d(self.transit).shape[1]),
                diag(wnsigma)**2, self.npt)
        else:
            self.wnoise = zeros_like(self.transit)

        # Red noise
        # ---------
        if rnsigma and with_george:
            self.gp = GP(rnsigma**2 * ExpKernel(rntscale))
            self.gp.compute(self.time)
            self.rnoise = self.gp.sample(self.time, self.npb).T
            self.rnoise -= self.rnoise.mean(0)
        else:
            self.rnoise = zeros_like(self.transit)

        # Final light curve
        # -----------------
        self.time_h = Qty(self.time, 'd').rescale('h')
        self.flux = self.transit + self.wnoise + self.rnoise
        return self.time_h, self.flux
Ejemplo n.º 13
0
def test_bounds():
    kernel = 10 * kernels.ExpSquaredKernel(1.0, metric_bounds=[(None, 4.0)])
    kernel += 0.5 * kernels.RationalQuadraticKernel(log_alpha=0.1, metric=5.0)
    gp = GP(kernel, white_noise=LinearWhiteNoise(1.0, 0.1))

    # Test bounds length.
    assert len(gp.get_parameter_bounds()) == len(gp.get_parameter_vector())
    gp.freeze_all_parameters()
    gp.thaw_parameter("white_noise:m")
    assert len(gp.get_parameter_bounds()) == len(gp.get_parameter_vector())

    # Test invalid bounds specification.
    with pytest.raises(ValueError):
        kernels.ExpSine2Kernel(gamma=0.1, log_period=5.0, bounds=[10.0])
Ejemplo n.º 14
0
def test_axis_algined_metric(seed=1234, N=100, ndim=3):
    np.random.seed(seed)

    kernel = 0.1 * kernels.ExpSquaredKernel(np.ones(ndim), ndim=ndim)

    x = np.random.rand(N, ndim)
    M0 = kernel.get_value(x)

    gp = GP(kernel)
    M1 = gp.get_matrix(x)
    assert np.allclose(M0, M1)

    # Compute the expected matrix.
    M2 = 0.1 * np.exp(-0.5 * np.sum(
        (x[None, :, :] - x[:, None, :])**2, axis=-1))
    assert np.allclose(M0, M2)
Ejemplo n.º 15
0
def test_predict_single(solver, seed=1234, N=201, yerr=0.1):
    np.random.seed(seed)

    # Set up the solver.
    kernel = 1.0 * kernels.ExpSquaredKernel(0.5)
    kwargs = dict()
    if solver == HODLRSolver:
        kwargs["tol"] = 1e-8
    gp = GP(kernel, solver=solver, **kwargs)

    x = np.sort(np.random.rand(N))
    y = gp.sample(x)
    gp.compute(x, yerr=yerr)
    mu0, var0 = gp.predict(y, [0.0], return_var=True)
    mu, var = gp.predict(y, [0.0, 1.0], return_var=True)
    _, cov = gp.predict(y, [0.0, 1.0])
    assert np.allclose(mu0, mu[0])
    assert np.allclose(var0, var[0])
    assert np.allclose(var0, cov[0, 0])
    def __init__(self, mean, covariance, points, lnlikes, quiet=True):
        self.mean = mean
        self.cov = covariance

        #Let's try only interpolating over points that are
        #better than, say, 10-ish sigma
        dof = len(self.mean)
        inds = np.fabs(np.max(lnlikes) - lnlikes) < 100 * dof
        print(inds)
        print(lnlikes)

        self.points = points[inds]
        self.lnlikes_true = lnlikes[inds]
        self.lnlike_max = np.max(lnlikes[inds])
        self.lnlikes = lnlikes[inds] - self.lnlike_max  #max is now at 0
        self.x = self._transform_data(self.points)

        print(max(self.lnlikes), min(self.lnlikes))

        _guess = 4.5  #kernel length guess
        kernel = kernels.ExpSquaredKernel(metric=_guess, ndim=dof)
        lnPmin = np.min(self.lnlikes)
        gp = GP(kernel, mean=lnPmin - np.fabs(lnPmin * 3))

        gp.compute(self.x)

        def neg_ln_likelihood(p):
            gp.set_parameter_vector(p)
            return -gp.log_likelihood(self.lnlikes)

        def grad_neg_ln_likelihood(p):
            gp.set_parameter_vector(p)
            return -gp.grad_log_likelihood(self.lnlikes)

        result = minimize(neg_ln_likelihood,
                          gp.get_parameter_vector(),
                          jac=grad_neg_ln_likelihood)
        if not quiet:
            print(result)
        gp.set_parameter_vector(result.x)
        self.gp = gp
Ejemplo n.º 17
0
def test_gradient(solver, white_noise, seed=123, N=305, ndim=3, eps=1.32e-3):
    np.random.seed(seed)

    # Set up the solver.
    kernel = 1.0 * kernels.ExpSquaredKernel(0.5, ndim=ndim)
    kwargs = dict()
    if white_noise is not None:
        kwargs = dict(white_noise=white_noise, fit_white_noise=True)
    if solver == HODLRSolver:
        kwargs["tol"] = 1e-8
    gp = GP(kernel, solver=solver, **kwargs)

    # Sample some data.
    x = np.random.rand(N, ndim)
    x = x[np.argsort(x[:, 0])]
    y = gp.sample(x)
    gp.compute(x, yerr=0.1)

    # Compute the initial gradient.
    grad0 = gp.grad_log_likelihood(y)
    vector = gp.get_parameter_vector()

    for i, v in enumerate(vector):
        # Compute the centered finite difference approximation to the gradient.
        vector[i] = v + eps
        gp.set_parameter_vector(vector)
        lp = gp.lnlikelihood(y)

        vector[i] = v - eps
        gp.set_parameter_vector(vector)
        lm = gp.lnlikelihood(y)

        vector[i] = v
        gp.set_parameter_vector(vector)

        grad = 0.5 * (lp - lm) / eps
        assert np.abs(grad - grad0[i]) < 5 * eps, \
            "Gradient computation failed in dimension {0} ({1})\n{2}" \
            .format(i, solver.__name__, np.abs(grad - grad0[i]))
Ejemplo n.º 18
0
    def add_spots(self,QP=[5e-5,0.5,30.,28.]):
        """
        This attribute add stellar variability using a Quasi-periodic Kernel
        The activity is added using a george Kernel
        """

        if not hasattr(self,'flux_spots'):

            A  = QP[0]
            le = QP[1]
            lp = QP[2]
            P  = QP[3]

            from george import kernels, GP
            k = A * kernels.ExpSine2Kernel(gamma=1./2/lp,log_period=np.log(P)) * \
            kernels.ExpSquaredKernel(metric=le)
            gp = GP(k)
            self.flux_spots = 1 + gp.sample(self.time)

        self.flux = self.flux * self.flux_spots

        self.spots = True
Ejemplo n.º 19
0
def test_parameters():
    kernel = 10 * kernels.ExpSquaredKernel(1.0)
    kernel += 0.5 * kernels.RationalQuadraticKernel(log_alpha=0.1, metric=5.0)
    gp = GP(kernel, white_noise=LinearWhiteNoise(1.0, 0.1))

    n = len(gp.get_parameter_vector())
    assert n == len(gp.get_parameter_names())
    assert n - 2 == len(kernel.get_parameter_names())

    gp.freeze_parameter(gp.get_parameter_names()[0])
    assert n - 1 == len(gp.get_parameter_names())
    assert n - 1 == len(gp.get_parameter_vector())

    gp.freeze_all_parameters()
    assert len(gp.get_parameter_names()) == 0
    assert len(gp.get_parameter_vector()) == 0

    gp.kernel.thaw_all_parameters()
    gp.white_noise.thaw_all_parameters()
    assert n == len(gp.get_parameter_vector())
    assert n == len(gp.get_parameter_names())

    assert np.allclose(kernel[0], np.log(10.))
Ejemplo n.º 20
0
def plotpred_QP(p, t, y):
    kern, sig = kern_QP(p)
    gp = GP(kern)
    yerr = np.ones(len(y)) * sig
    gp.compute(t, yerr)
    mu, cov = gp.predict(y, t)
    sigma = np.diag(cov)
    sigma = np.sqrt(sigma**2 + yerr**2)
    pl.fill_between(t, mu + 2 * sigma, mu - 2 * sigma, \
                    color='c', alpha=0.3)
    pl.plot(t, mu, color='c', lw=2)
    nper = (len(p) - 1) / 4
    # if nper > 1:
    #     cols = ['c','m','y','k']
    #     for i in range(nper):
    #         p1 = np.append(p[i*4:i*4+4], p[-1])
    #         k1, sig = kern_QP(p1)
    #         b = gp.solver.apply_inverse(y)
    #         X = np.transpose([t])
    #         K1 = k1.value(t, t)
    #         mu1 = np.dot(K1, b)
    #         col = np.roll(cols, -i)[0]
    #         pl.plot(t, mu, color = col, lw = 2)
    return
Ejemplo n.º 21
0
def test_prediction(solver, seed=42):
    """Basic sanity checks for GP regression."""

    np.random.seed(seed)

    kernel = kernels.ExpSquaredKernel(1.0)
    kwargs = dict()
    if solver == HODLRSolver:
        kwargs["tol"] = 1e-8
    gp = GP(kernel, solver=solver, white_noise=0.0, **kwargs)

    x0 = np.linspace(-10, 10, 500)
    x = np.sort(np.random.uniform(-10, 10, 300))
    gp.compute(x)

    y = np.sin(x)
    mu, cov = gp.predict(y, x0)

    Kstar = gp.get_matrix(x0, x)
    K = gp.get_matrix(x)
    K[np.diag_indices_from(K)] += 1.0
    mu0 = np.dot(Kstar, np.linalg.solve(K, y))
    print(np.abs(mu - mu0).max())
    assert np.allclose(mu, mu0)
Ejemplo n.º 22
0
    def __init__(self, target, datasets, filters, model='pb_independent_k', fit_wn=True, **kwargs):
        super().__init__(target, datasets, filters, model, **kwargs)

        pbl = [LParameter('bl_{:d}'.format(i), 'baseline', '', N(1, 1e-2), bounds=(-inf,inf)) for i in range(self.nlc)]
        self.ps.thaw()
        self.ps.add_lightcurve_block('baseline', 1, self.nlc, pbl)
        self.ps.freeze()

        self._slbl = self.ps.blocks[-1].slice
        self._stbl = self.ps.blocks[-1].start

        self.logwnvar = log(array(self.wn) ** 2)
        self._create_kernel()
        self.covariates = [cv[:, self.covids] for cv in self.covariates]
        self.freeze = []
        self.standardize = []

        self.gps = [GP(self._create_kernel(),
                       mean=0., fit_mean=False,
                       white_noise=0.8*wn, fit_white_noise=fit_wn) for wn in self.logwnvar]

        # Freeze the GP hyperparameters marked as frozen in _create_kernel
        for gp in self.gps:
            pars = gp.get_parameter_names()
            [gp.freeze_parameter(pars[i]) for i in self.freeze]

        # Standardize the covariates marked to be standardized in _create_kernel
        for c in self.covariates:
            for i in self.standardize:
                c[:,i] = (c[:,i] - c[:,i].min()) / c[:,i].ptp()

        self.compute_always = False
        self.compute_gps()
        self.de = None
        self.gphpres = None
        self.gphps = None
Ejemplo n.º 23
0
    def train(self, kernel=None):
        """Train a Gaussian Process to interpolate the log-likelihood
        of the training samples.

        Args:
            kernel (george.kernels.Kernel object): kernel to use, or any 
                acceptable object that can be accepted by the george.GP object

        """
        inds = self.training_inds
        x = self.chain_rotated_regularized[inds]
        lnL = self.lnlikes[inds]
        _guess = 4.5
        if kernel is None:
            kernel = kernels.ExpSquaredKernel(metric=_guess, ndim=len(x[0]))
        #Note: the mean is set slightly lower that the minimum lnlike
        lnPmin = np.min(self.lnlikes)
        gp = GP(kernel, mean=lnPmin - np.fabs(lnPmin * 3))
        gp.compute(x)

        def neg_ln_likelihood(p):
            gp.set_parameter_vector(p)
            return -gp.log_likelihood(lnL)

        def grad_neg_ln_likelihood(p):
            gp.set_parameter_vector(p)
            return -gp.grad_log_likelihood(lnL)

        result = minimize(neg_ln_likelihood,
                          gp.get_parameter_vector(),
                          jac=grad_neg_ln_likelihood)
        print(result)
        gp.set_parameter_vector(result.x)
        self.gp = gp
        self.lnL_training = lnL
        return
Ejemplo n.º 24
0
 def create_gp(self):
     gp = GP(kernel=self.kk, mean=self.mean)
     gp.compute(self.x_train, self.sigma_n)
     return gp
Ejemplo n.º 25
0
 def __init__(self, kernel):
     self.kernel = kernel
     self._gp = GP(kernel._k)
     self._y = None  ## Cached inputs
     self._x = None  ## Cached values
     self._dirty = True  ## Flag telling if the arrays are up to date
Ejemplo n.º 26
0
def get_simple_gp(pars):
    amp, length = pars
    kernel = amp * ExpSquaredKernel(length)
    return GP(kernel)
Ejemplo n.º 27
0
def get_linear_gp(pars):
    amp, sigma = pars
    kernel = (amp * PolynomialKernel(sigma, order=2))
    return GP(kernel)
Ejemplo n.º 28
0
def get_composite_gp(pars):
    amp, sigma, amp2, length = pars
    kernel = (amp * PolynomialKernel(sigma, order=1) +
              amp2 * ExpSquaredKernel(length))
    return GP(kernel)
Ejemplo n.º 29
0
 def create_gp(self):
     # This function uses the kernel defined above to compute and train the Gaussian Process model
     gp = GP(kernel=self.kk, mean=self.mean)
     gp.compute(self.x_train, self.sigma_n)
     return gp
Ejemplo n.º 30
0
def get_fancy_gp(pars):
    amp, a, b, c, d = pars
    kernel = amp * MyDijetKernelSimp(a, b, c, d)
    return GP(kernel)