Пример #1
0
def get_gp(profile, t):
    # Define the objective function (negative log-likelihood in this case).
    def nll(p):
        gp.set_parameter_vector(p)
        ll = gp.log_likelihood(y, quiet=True)
        return -ll if np.isfinite(ll) else 1e25

# And the gradient of the objective function.

    def grad_nll(p):
        gp.set_parameter_vector(p)
        return -gp.grad_log_likelihood(y, quiet=True)

    k1 = 1e-5 * kernels.ExpSquaredKernel(metric=10.0)
    k2 = 1.0 * kernels.ExpSquaredKernel(metric=10)
    kernel = k1  #+ k2
    y = profile
    gp = george.GP(kernel,
                   mean=np.mean(y),
                   fit_mean=True,
                   white_noise=np.log(1e-5),
                   fit_white_noise=True)
    # You need to compute the GP once before starting the optimization.
    gp.compute(t)

    # Print the initial ln-likelihood.
    print(gp.log_likelihood(y))

    # Run the optimization routine.
    p0 = gp.get_parameter_vector()
    results = op.minimize(nll, p0, jac=grad_nll, method="L-BFGS-B")

    # Update the kernel and print the final log-likelihood.
    gp.set_parameter_vector(results.x)
    return gp
Пример #2
0
    def __init__(self, keyword=None, **kwargs):
        """Initialises the priors, initial hp values, and kernel."""

        # Set up kernel
        # -------------
        k_spatial = 1.0 * kernels.ExpSquaredKernel(
            metric=[1.0, 1.0], ndim=3, axes=[0, 1])
        k_temporal = 1.0 * kernels.ExpSquaredKernel(metric=1.0, ndim=3, axes=2)
        k_total = k_spatial + k_temporal

        if keyword in ('long', 'long_timescale'):
            default_sigt = 16
        elif isinstance(keyword, (int, float)):
            default_sigt = keyword + 1e-5
        else:
            default_sigt = np.exp(0.36 / 2)

        super().__init__(kernel=k_total,
                         parameter_names=('ln_Axy', '2ln_sigx', '2ln_sigy',
                                          'ln_At', '2ln_sigt'),
                         default_values=(-12.86, -3.47, -4.34, -12.28,
                                         2 * np.log(default_sigt)),
                         keyword=keyword,
                         **kwargs)

        self.default_X_cols = ['x', 'y', 't']
Пример #3
0
    def __init__(self, x, y, yerr, subdivisions):
        """
        Initialize global variables of Gaussian Process Regression Interpolator
    
        Args:
            x (array): Independent variable
            y (array): Dependent variable
            yerr (array): Uncertainty on y
            subdivisions: The number of subdivisions between data points
        """

        # Define kernels
        kernel_expsq = 38**2 * kernels.ExpSquaredKernel(metric=10**2)
        kernel_periodic = 150**2 * kernels.ExpSquaredKernel(
            2**2) * kernels.ExpSine2Kernel(gamma=0.05, log_period=np.log(11))
        kernel_poly = 5**2 * kernels.RationalQuadraticKernel(
            log_alpha=np.log(.78), metric=1.2**2)
        kernel_extra = 5**2 * kernels.ExpSquaredKernel(1.6**2)
        kernel = kernel_expsq + kernel_periodic + kernel_poly + kernel_extra

        # Create GP object
        self.gp = george.GP(kernel, mean=np.mean(y), fit_mean=False)
        self.gp.compute(x, yerr)

        # Set global variables
        self.ndim = len(self.gp)
        self.x = x
        self.y = y
        self.yerr = yerr
        self.subdivisions = subdivisions
        self.priors = [prior.Prior(0, 1) for i in range(self.ndim)]
        self.x_predict = np.linspace(min(self.x), max(self.x),
                                     subdivisions * (len(self.x) - 1) + 1)
Пример #4
0
    def __init__(self, P=None, *args, **kwargs):
        """Initialises the priors, initial hp values, and kernel."""

        # Set up kernel
        # -------------
        k_spatial = 1.0 * kernels.ExpSquaredKernel(
            metric=[1.0, 1.0], ndim=3, axes=[0, 1])
        k_temporal = 1.0 * kernels.ExpSquaredKernel(
                                            metric=1.0,
                                            ndim=3, axes=2) \
                         * kernels.ExpSine2Kernel(
                                             gamma=2, log_period=1,
                                            ndim=3, axes=2)
        k_total = k_spatial + k_temporal

        if P is None:
            P = 0.5

        # NOTE: sigt always starts as multiple of the period
        super().__init__(kernel=k_total,
                         parameter_names=('ln_Axy', '2ln_sigx', '2ln_sigy',
                                          'ln_At', '2ln_sigt', 'gamma', 'lnP'),
                         default_values=(-12.86, -3.47, -4.34, -12.28,
                                         2 * np.log(4 * P), 1.0, np.log(P)),
                         *args,
                         **kwargs)

        # self.set_hyperpriors(keyword=keyword)
        # self.set_bounds(keyword=keyword)
        # self.parameter_names = ('ln_Axy', '2ln_sigx', '2ln_sigy',
        # 				 		'ln_At', '2ln_sigt', 'gamma', 'lnP')
        # self.default_values = (-12.86, -3.47, -4.34, -12.28,
        # 					   max(2*np.log(4*P), self.bounds[4][0] + 1e-6),
        # 					   1.0, np.log(P))
        # self.kernel = k_total
        # self.kernel.set_parameter_vector(np.array(self.default_values))

        # # Additional potential tools
        # self.get_parameter_vector = self.kernel.get_parameter_vector
        # self.set_parameter_vector = self.kernel.set_parameter_vector

        if np.log(P) < self.bounds[-1][0] or np.log(P) > self.bounds[-1][1]:
            raise PriorInitialisationError(
                ("Initial period is out of bounds\nperiod: {},\n"
                 "lnP: {}, \nbounds: {}".format(P, np.log(P),
                                                self.bounds[-1])))
        elif not np.isfinite(self.log_prior(self.default_values)):
            raise PriorInitialisationError(
                ("Initial hyperparameter values are out of "
                 "prior bounds.\n"
                 "hp_default: {}\n"
                 "bounds: {}\n"
                 "P: {}".format(self.default_values, self.bounds, P)))

        self.default_X_cols = ['x', 'y', 't']
Пример #5
0
def _general_metric(metric, N=100, ndim=3):
    kernel = 0.1 * kernels.ExpSquaredKernel(metric, ndim=ndim)

    x = np.random.rand(N, ndim)
    M0 = kernel.get_value(x)

    gp = GP(kernel)
    M1 = gp.get_matrix(x)
    assert np.allclose(M0, M1)

    # Compute the expected matrix.
    M2 = np.empty((N, N))
    for i in range(N):
        for j in range(N):
            r = x[i] - x[j]
            r2 = np.dot(r, np.linalg.solve(metric, r))
            M2[i, j] = 0.1 * np.exp(-0.5 * r2)

    if not np.allclose(M0, M2):
        print(M0)
        print()
        print(M2)
        print()
        print(M0 - M2)
        print()
        print(M0 / M2)

        L = np.linalg.cholesky(metric)
        i = N - 1
        j = N - 2
        r = x[j] - x[i]
        print(x[i], x[j])
        print("r = ", r)
        print("L.r = ", np.dot(L, r))
    assert np.allclose(M0, M2)
Пример #6
0
def generate_data(params, N, rng=(-5, 5)):

    # Create GP object
    # It needs a kernel to be supplied.
    gp = george.GP(0.1 * kernels.ExpSquaredKernel(3.3))

    # Generate an array for the independent variable.
    # In this case, the array goes from -5 to +5.
    # In case of a spectrum this is the wavelength.
    t = rng[0] + np.diff(rng) * np.sort(np.random.rand(N))

    # Generate the dependent variable array. Flux in case of spectra.
    # The sample method of the gp object draws samples from the distribution
    # used to define the gp object.
    y = gp.sample(t)  # This just gives a straight line

    # This adds the gaussian "absorption" or "emission" to the straight line
    y += Model(**params).get_value(t)

    # Generate array for errors and add it to the dependent variable.
    # This has a base error of 0.05 and then another random error that
    # has a magnitude between 0 and 0.05.
    yerr = 0.05 + 0.05 * np.random.rand(N)
    y += yerr * np.random.randn(
        N)  # randn draws samples from the normal distribution
    """
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.errorbar(t, y, yerr=yerr, fmt='.k', capsize=0)
    plt.show()
    """

    return t, y, yerr
Пример #7
0
 def __init__(self):
     print('Initialize sigma_d emulator')
     self.cosmos = np.loadtxt(os.path.dirname(
         os.path.abspath(__file__)) + '/../data/cparams_4d.dat')
     self.ydata = np.loadtxt(os.path.dirname(os.path.abspath(
         __file__)) + '/../learned_data/sigmad/coeff_all.dat')
     self.yavg = np.loadtxt(os.path.dirname(os.path.abspath(
         __file__)) + '/../learned_data/sigmad/sigd_avg.dat')
     self.ystd = np.loadtxt(os.path.dirname(os.path.abspath(
         __file__)) + '/../learned_data/sigmad/sigd_std.dat')
     self.gp_params = np.loadtxt(os.path.dirname(os.path.abspath(
         __file__)) + '/../learned_data/sigmad/gp_params.dat')
     self.ktypes = np.loadtxt(os.path.dirname(os.path.abspath(
         __file__)) + '/../learned_data/sigmad/ktypes.dat')
     if self.ktypes == 10:
         kernel = 1. * \
             kernels.Matern52Kernel(np.ones(4), ndim=4) + \
             kernels.ConstantKernel(1e-4, ndim=4)
     elif self.ktypes == 6:
         kernel = 1. * \
             kernels.ExpSquaredKernel(
                 np.ones(4), ndim=4) + kernels.ConstantKernel(1e-4, ndim=4)
     else:
         print('kernel type 6 and 10 are the only supported types.')
     self.gp = george.GP(kernel)
     self.gp.compute(self.cosmos[:800])
     self.gp.set_parameter_vector(self.gp_params)
     self.As_fid = np.exp(3.094)
Пример #8
0
 def load_sigma_gp(self):
     self.cosmos = np.loadtxt(os.path.dirname(
         os.path.abspath(__file__)) + '/../data/cparams_4d.dat')
     self.ydata = np.loadtxt(os.path.dirname(os.path.abspath(
         __file__)) + '/../learned_data/sigmaM/coeff_all.dat')
     self.eigdata = np.loadtxt(os.path.dirname(os.path.abspath(
         __file__)) + '/../learned_data/sigmaM/pca_eigvec.dat')
     self.ymean = np.loadtxt(os.path.dirname(os.path.abspath(
         __file__)) + '/../learned_data/sigmaM/pca_mean.dat')
     self.ystd = np.loadtxt(os.path.dirname(os.path.abspath(
         __file__)) + '/../learned_data/sigmaM/pca_std.dat')
     self.yavg = np.loadtxt(os.path.dirname(os.path.abspath(
         __file__)) + '/../learned_data/sigmaM/pca_avg.dat')
     self.gp_params = np.loadtxt(os.path.dirname(os.path.abspath(
         __file__)) + '/../learned_data/sigmaM/gp_params.dat')
     self.ktypes = np.loadtxt(os.path.dirname(os.path.abspath(
         __file__)) + '/../learned_data/sigmaM/ktypes.dat')
     self.gps = []
     for i in range(4):
         if self.ktypes[i] == 10:
             kernel = 1. * \
                 kernels.Matern52Kernel(
                     np.ones(4), ndim=4) + kernels.ConstantKernel(1e-4, ndim=4)
         elif self.ktypes[i] == 6:
             kernel = 1. * \
                 kernels.ExpSquaredKernel(
                     np.ones(4), ndim=4) + kernels.ConstantKernel(1e-4, ndim=4)
         else:
             print('kernel type 6 and 10 are the only supported types.')
         gp = george.GP(kernel)
         gp.compute(self.cosmos[:800])
         gp.set_parameter_vector(self.gp_params[i])
         self.gps.append(gp)
Пример #9
0
    def lnlike_gp(self):

        if (self.t is None) | (self.y is None):
            raise ValueError(
                "Data is not properly initialized. Reveived Nones.")

        elif len(self.t) == 1:
            raise ValueError(
                "Time data is not properly initialized. Expected array of size greater then 1."
            )

        else:

            t, y = self.t, self.y

            if self.kernel_type == "Standard":
                kernel = 1. * kernels.ExpSquaredKernel(
                    5.) + kernels.WhiteKernel(2.)
                gp = george.GP(kernel, mean=self.meanfnc)
                gp.compute(t, self.yerr1)
                return gp.lnlikelihood(y - self.model)

            else:
                kernel = kernels.PythonKernel(self.kernelfnc)
                gp = george.GP(kernel, mean=self.meanfnc)
                gp.compute(t)
                return gp.lnlikelihood(y - self.model)
Пример #10
0
def test_apply_inverse(solver, seed=1234, N=201, yerr=0.1):
    np.random.seed(seed)

    # Set up the solver.
    kernel = 1.0 * kernels.ExpSquaredKernel(0.5)
    kwargs = dict()
    if solver == HODLRSolver:
        kwargs["tol"] = 1e-10
    gp = GP(kernel, solver=solver, **kwargs)

    # Sample some data.
    x = np.sort(np.random.rand(N))
    y = gp.sample(x)
    gp.compute(x, yerr=yerr)

    K = gp.get_matrix(x)
    K[np.diag_indices_from(K)] += yerr**2

    b1 = np.linalg.solve(K, y)
    b2 = gp.apply_inverse(y)
    assert np.allclose(b1, b2)

    y = gp.sample(x, size=5).T
    b1 = np.linalg.solve(K, y)
    b2 = gp.apply_inverse(y)
    assert np.allclose(b1, b2)
Пример #11
0
def test_gp_mean(N=50, seed=1234):
    np.random.seed(seed)
    x = np.random.uniform(0, 5)
    y = 5 + np.sin(x)
    gp = GP(10. * kernels.ExpSquaredKernel(1.3), mean=5.0, fit_mean=True)
    gp.compute(x)
    check_gradient(gp, y)
Пример #12
0
    def lnlike(self, p):
        """
        GP likelihood function for probability of data given the kernel parameters

        :return lnlike: likelihood of kernel amplitude and length-scale parameters
        """
        # Update the kernel and compute the lnlikelihood.
        a, tau = 10.0**p[0], 10.0**p[1:]

        lnlike = 0.0
        try:

            if self.kernel == 'sqexp':
                self.gaussproc = george.GP(
                    a * kernels.ExpSquaredKernel(tau, ndim=len(tau)))
            elif self.kernel == 'matern32':
                self.gaussproc = george.GP(
                    a * kernels.Matern32Kernel(tau, ndim=len(tau)))
            elif self.kernel == 'matern52':
                self.gaussproc = george.GP(
                    a * kernels.Matern52Kernel(tau, ndim=len(tau)))

            self.gaussproc.compute(self.x, self.yerr)

            lnlike = self.gaussproc.log_likelihood(self.y, quiet=True)

        except np.linalg.LinAlgError:

            lnlike = -np.inf

        return lnlike
Пример #13
0
def test_repeated_prediction_cache():
    kernel = kernels.ExpSquaredKernel(1.0)
    gp = GP(kernel)

    x = np.array((-1, 0, 1))
    gp.compute(x)

    t = np.array((-.5, .3, 1.2))

    y = x / x.std()
    mu0, mu1 = (gp.predict(y, t, return_cov=False) for _ in range(2))
    assert np.array_equal(mu0, mu1), \
        "Identical training data must give identical predictions " \
        "(problem with GP cache)."

    y2 = 2 * y
    mu2 = gp.predict(y2, t, return_cov=False)
    assert not np.array_equal(mu0, mu2), \
        "Different training data must give different predictions " \
        "(problem with GP cache)."

    a0 = gp._alpha
    gp.kernel[0] += 0.1
    gp.recompute()
    gp._compute_alpha(y2, True)
    a1 = gp._alpha
    assert not np.allclose(a0, a1), \
        "Different kernel parameters must give different alphas " \
        "(problem with GP cache)."

    mu, cov = gp.predict(y2, t)
    _, var = gp.predict(y2, t, return_var=True)
    assert np.allclose(np.diag(cov), var), \
        "The predictive variance must be equal to the diagonal of the " \
        "predictive covariance."
Пример #14
0
def test_dtype(seed=123):
    np.random.seed(seed)
    kernel = 0.1 * kernels.ExpSquaredKernel(1.5)
    kernel.pars = [1, 2]
    gp = GP(kernel)
    x = np.random.rand(100)
    gp.compute(x, 1e-2)
Пример #15
0
def _test_solver(Solver, N=300, seed=1234, **kwargs):
    # Set up the solver.
    kernel = 1.0 * kernels.ExpSquaredKernel(1.0)
    solver = Solver(kernel, **kwargs)

    # Sample some data.
    np.random.seed(seed)
    x = np.atleast_2d(np.sort(10 * np.random.randn(N))).T
    yerr = np.ones(N)
    solver.compute(x, yerr)

    # Build the matrix.
    K = kernel.get_value(x)
    K[np.diag_indices_from(K)] += yerr**2

    # Check the determinant.
    sgn, lndet = np.linalg.slogdet(K)
    assert sgn == 1.0, "Invalid determinant"
    assert np.allclose(solver.log_determinant, lndet), "Incorrect determinant"

    y = np.sin(x[:, 0])
    b0 = np.linalg.solve(K, y)
    b = solver.apply_inverse(y).flatten()
    assert np.allclose(b, b0)

    # Check the inverse.
    assert np.allclose(solver.apply_inverse(K), np.eye(N)), "Incorrect inverse"
Пример #16
0
        def kernel3(data):
            def neg_ln_like(p):
                gp.set_parameter_vector(p)
                return -gp.log_likelihood(y)

            def grad_neg_ln_like(p):
                gp.set_parameter_vector(p)
                return -gp.grad_log_likelihood(y)

            try:
                x, y, err = data
                ls = get_ls(x, y, err)
                kernel = np.var(y) * kernels.ExpSquaredKernel(ls**2)

                gp = george.GP(kernel,
                               fit_mean=True,
                               white_noise=np.max(err)**2,
                               fit_white_noise=True)

                gp.compute(x, err)
                results = optimize.minimize(neg_ln_like,
                                            gp.get_parameter_vector(),
                                            jac=grad_neg_ln_like,
                                            method="L-BFGS-B",
                                            tol=1e-5)
                # Update the kernel and print the final log-likelihood.
                gp.set_parameter_vector(results.x)

            except:
                gp, results = kernel1(data)

            return gp, results
Пример #17
0
    def fit_gp(self, X, y, ye):
        Xc, yc, yerrc = self.clean_data(X, y, ye)
        scaler = preprocessing.StandardScaler().fit(Xc)
        scaler_y = preprocessing.StandardScaler().fit(yc.reshape(-1, 1))
        nX = scaler.transform(Xc)
        ny = scaler_y.transform(yc.reshape(-1, 1))
        nye = yerrc * scaler_y.scale_

        kernel = np.var(ny.flatten()) * kernels.ExpSquaredKernel(
            0.5, ndim=Xc.shape[1])
        gp = george.GP(kernel, fit_white_noise=True)
        gp.compute(nX, np.sqrt(nye).flatten())

        def neg_ln_like(p):
            gp.set_parameter_vector(p)
            return -gp.log_likelihood(ny.flatten())

        def grad_neg_ln_like(p):
            gp.set_parameter_vector(p)
            return -gp.grad_log_likelihood(ny.flatten())

        result = minimize(neg_ln_like, [1., 1],
                          jac=grad_neg_ln_like,
                          method="L-BFGS-B")
        print('[{}]: fit {}'.format(self.nbody.domain.rank, result))
        gp.set_parameter_vector(result.x)

        return gp
Пример #18
0
def optLC(lcsb, a, b, id=1):
    global t, r, rerr, rdev, gp

    t = lcsb.time
    r = lcsb.rmag
    rerr = lcsb.rerr
    rdev = r - np.median(r)
    #    kernel = kernels.ExpSquaredKernel(a, bounds=(400,40000))
    kernel = kernels.ExpSquaredKernel(a)
    gp = george.GP(kernel, white_noise=np.log(b * b), fit_white_noise=True)
    gp.compute(t, rerr)

    # Print the initial ln-likelihood.
    print(gp.lnlikelihood(rdev))

    # Run the optimization routine.
    p0 = gp.get_parameter_vector()
    results = op.minimize(nll, p0, jac=grad_nll)
    print results.x

    # Update the kernel and print the final log-likelihood.
    gp.set_parameter_vector(results.x)
    print(gp.lnlikelihood(rdev))

    t_pred = np.linspace(np.min(t), np.max(t), 5000)
    pred_r, pred_rerr = gp.predict(rdev, t_pred, return_var=True)

    return t, rdev, rerr, t_pred, pred_r, pred_rerr
Пример #19
0
def kern_QP(p):
    nper = (len(p) - 1) / 4
    for i in range(nper):
        log_a, log_gamma, period, log_tau = p[i * 4:i * 4 + 4]
        a_sq = 10.0**(2 * log_a)
        gamma = 10.0**log_gamma
        tau_sq = 10.0**(2 * log_tau)
        if i == 0:
            kern = a_sq * kernels.ExpSine2Kernel(gamma, period) * \
              kernels.ExpSquaredKernel(tau_sq)
        else:
            kern += a_sq * kernels.ExpSine2Kernel(gamma, period) * \
              kernels.ExpSquaredKernel(tau_sq)
    log_sig_ppt = p[-1]
    sig = 10.0**(log_sig_ppt - 3)
    return kern, sig
Пример #20
0
    def fit_predict(self):
        kernel = kernels.ExpSquaredKernel(np.ones([self.x.shape[1]]),
                                          ndim=self.x.shape[1])
        gp = george.GP(kernel)
        gp.compute(self.x[self.init_id])
        pred_mean, pred_var = gp.predict(self.y, self.x, return_var=True)

        return pred_mean, pred_var
Пример #21
0
def computeModel():
    global gp
    kernel = np.var(y) * kernels.ExpSquaredKernel(0.5) * kernels.ExpSine2Kernel(log_period = 0.5, gamma=1)
    gp = george.GP(kernel)
    gp.compute(x, y)
    model = gp.predict(y, x, return_var=True)
    result = minimize(neg_ln_like, gp.get_parameter_vector(), jac=grad_neg_ln_like)
    gp.set_parameter_vector(result.x)
Пример #22
0
 def create_kernel(self):
     # This function creates the covariance function kernel for the Gaussian Process
     if self.kern == 'SE':
         return self.sigma_f * kernels.ExpSquaredKernel(self.l_param, ndim=self.n_dim)
     elif self.kern == 'M32':
         return self.sigma_f * kernels.Matern32Kernel(self.l_param, ndim=self.n_dim)
     elif self.kern == 'M52':
         return self.sigma_f * kernels.Matern52Kernel(self.l_param, ndim=self.n_dim)
Пример #23
0
def test_gp_callable_mean(N=50, seed=1234):
    np.random.seed(seed)
    x = np.random.uniform(0, 5)
    y = 5 + np.sin(x)
    mean = CallableModel(lambda x: 5.0 * x)
    gp = GP(10. * kernels.ExpSquaredKernel(1.3), mean=mean)
    gp.compute(x)
    check_gradient(gp, y)
Пример #24
0
def generate_data(params, N, rng=(-5, 5)):
    gp = george.GP(0.1 * kernels.ExpSquaredKernel(3.3))
    t = rng[0] + np.diff(rng) * np.sort(np.random.rand(N))
    y = gp.sample(t)
    y += model(params, t)
    yerr = 0.05 + 0.05 * np.random.rand(N)
    y += yerr * np.random.randn(N)
    return t, y, yerr
Пример #25
0
def generate_data(params, N, rng=(-5, 5)):
    gp = george.GP(0.1 * kernels.ExpSquaredKernel(3.3))
    t = rng[0] + np.diff(rng) * np.sort(
        np.random.rand(N))  # N number of random numbers ranging from -5 to 5
    y = gp.sample(t)
    y += Model(**params).get_value(t)
    yerr = 0.05 + 0.05 * np.random.rand(N)
    y += yerr * np.random.randn(N)
    return t, y, yerr
Пример #26
0
 def create_kernel(self):
     if self.kern == 'SE':
         return self.sigma_f * kernels.ExpSquaredKernel(self.l_param,
                                                        ndim=self.n_dim)
     elif self.kern == 'M32':
         return self.sigma_f * kernels.Matern32Kernel(self.l_param,
                                                      ndim=self.n_dim)
     elif self.kern == 'M52':
         return self.sigma_f * kernels.Matern52Kernel(self.l_param,
                                                      ndim=self.n_dim)
Пример #27
0
    def GP_double_exp_squared(self):
        """ A GP noise model including a double exponenetial squared
        kernel for corellated noise and white noise (jitter term). """

        scaling = self.param["scaling"]

        norm1 = self.param["norm1"]
        length1 = self.param["length1"]

        norm2 = self.param["norm2"]
        length2 = self.param["length2"]

        kernel = (norm1**2 * kernels.ExpSquaredKernel(length1**2) +
                  norm2**2 * kernels.ExpSquaredKernel(length2**2))

        self.gp = george.GP(kernel)
        self.gp.compute(self.x, self.y_err * scaling)

        self.corellated = True
Пример #28
0
def test_strange_hodlr_bug():
    np.random.seed(1234)
    x = np.sort(np.random.uniform(0, 10, 50000))
    yerr = 0.1 * np.ones_like(x)
    y = np.sin(x)

    kernel = np.var(y) * kernels.ExpSquaredKernel(1.0)

    gp_hodlr = george.GP(kernel, solver=HODLRSolver, seed=42)
    n = 200
    gp_hodlr.compute(x[:n], yerr[:n])
    gp_hodlr.log_likelihood(y[:n])
Пример #29
0
    def GP_exp_squared(self, a, b, l):
        """ A GP noise model including an exponenetial squared kernel
        for corellated noise and white noise (jitter term). """

        scaling = a

        norm = b
        length = l

        kernel = norm**2 * kernels.ExpSquaredKernel(length**2)
        self.gp = george.GP(kernel)
        self.gp.compute(self.x, self.y_err * scaling)
 def lnprior(p_var_current):
     """
     """
     # The parameters are stored as a vector of values, so unpack them?
     #CAREFUL OF SIGMA?
     #prior_type= 'uniform'
     #prior_type= 'gaussian-simple'
     prior_type= 'gaussian-with-cov'
     if prior_type=='uniform':
         # We're using only uniform priors (for now - try sparsity!)
         if np.logical_or((p_var_current<-2).any(),(p_var_current>2).any()):
             return -np.inf
         return 0 #prior up to constant -> log up to constant
     if prior_type=='gaussian-simple':
         #sigma_prior= 0.1#key is ratio to sigma in likelihood??
         sigma_prior= 2.0# typical variation in rates. Notes: 1.0 works well, 5.0 not so well - 'wiggly'
         denom= np.power(sigma_prior,2)
         #(la.norm(p_var-np.mean(p_var),ord=penalty_order)**penalty_order)
         #print 'lnprior'
         #print -0.5*(np.power(la.norm(np.divide(p_var_current-p_var0,denom),2),2)+len(p_var0)*np.log(denom*2*np.pi))
         return -0.5*(np.power(la.norm(np.divide(p_var_current-p0[p_var_i],denom),2),2)+len(p_var_i)*np.log(denom*2*np.pi))
         #return -0.5*(np.power(la.norm(np.divide(p_var_current-p_var0,denom),2),2)+len(p_var0)*np.log(denom*2*np.pi))
     if prior_type=='gaussian-with-cov':
         #use 'george' gaussian process package for now. Could do manual or could extend to proper gaussian process.
         #-correlation matrix 2.0 usual.
         #parameter_correlation_length= 2.0
         parameter_correlation_length= 5.0
         kernel = kernels.ExpSquaredKernel(0.5*parameter_correlation_length)
         #kernel = kernels.ExpSquaredKernel(0.5) #0.5 -> correlation length approx. 1 parameter; 2*L.
         gp = george.GP(kernel)
         correlation_matrix= gp.get_matrix(p_var_i)
         #-standard deviations
         sd_decay_region=3 #number of parameters from end over which sd decays.
         #sd_prior= 1.0*np.append(np.ones(len(p_var_i)-sd_decay_region),correlation_matrix[0,1:1+sd_decay_region]) #correlation_matrix[0,:]#
         sd_prior= 0.5*np.append(np.ones(len(p_var_i)-sd_decay_region),correlation_matrix[0,1:1+sd_decay_region]) #correlation_matrix[0,:]#
         #sd_prior= 1.0*np.append(np.ones(len(p_var_i)-1),0.3) #correlation_matrix[0,:]#
         var_prior= np.outer(sd_prior,sd_prior)
         sigma= var_prior*correlation_matrix #note: element-wise product
         #sd=2.0
         #sigma= sd*gp.get_matrix(p_var_i)
         dist_dim = np.float(len(p_var_i))
         det = np.linalg.det(sigma)
         if det == 0:
             raise NameError("The covariance matrix can't be singular")
         #note: calculation is in log form.
         norm_const1 = -0.5*dist_dim*np.log(2.*np.pi)
         norm_const2 = -0.5*np.log(det)
         err = p_var_current-p0[p_var_i]
         #print 'here 2'
         #print err
         numerator = -0.5*np.dot(err,np.dot(np.linalg.inv(sigma),err))
         return norm_const1+norm_const2+numerator