Esempio n. 1
0
def create_gp(params):
    # GP parameters
    s2 = np.exp(params["log_s2"])
    taux = np.exp(params["log_taux"])
    tauy = np.exp(params["log_tauy"])

    gpx = GP(s2 * ExpSquaredKernel(taux))
    gpy = GP(s2 * ExpSquaredKernel(tauy))

    return gpx, gpy
Esempio n. 2
0
    def ip_model(self):

        gpparams = np.array([1.0, 1.0, 80.0, 1.0, 3.0])
        kernel = (
            gpparams[0]**2 * ExpSquaredKernel(gpparams[2], ndim=3, dim=2) *
            ExpSquaredKernel(gpparams[3], ndim=3, dim=1) +
            gpparams[1]**2 * ExpSquaredKernel(gpparams[4], ndim=3, dim=0)
        )  # this line would be the inside-order model.
        # Need to think about what this would look like.

        gp = george.GP(kernel)
        '''
Esempio n. 3
0
def make_plots(id, RESULTS_DIR="/Users/ruthangus/projects/GProtation/code/" \
               "results_acfprior_03_10"):
    """
    Make a plot of the fit to the light curve and the posteriors.
    """
    """ load lc """
    x, y = load_suzanne_lcs(id)
    yerr = np.ones(len(y)) * 1e-5
    m = x < 100
    x, y, yerr = x[m], y[m], yerr[m]
    """ load posteriors """
    fn = os.path.join(RESULTS_DIR, "{}.h5".format(id))
    df = pd.read_hdf(fn, key="samples")
    """ find medians """
    theta = [np.median(df.iloc[:, i]) for i in range(5)]
    """ fit GP """
    print(np.exp(theta[-1]), "period")
    k = theta[0] * ExpSquaredKernel(theta[1]) \
        * ExpSine2Kernel(theta[2], theta[4]) + WhiteKernel(theta[3])
    gp = george.GP(k, solver=george.HODLRSolver)
    gp.compute(x - x[0], yerr)
    xs = np.linspace((x - x[0])[0], (x - x[0])[-1], 1000)
    mu, cov = gp.predict(y, xs)
    """ plot fit """
    plt.clf()
    plt.plot(x, y, "k.")
    plt.plot(xs, mu)
    plt.xlim(0, 100)
    # v = np.std(y)
    # plt.ylim(-10*v, 10*v)
    plt.savefig("{}_fit".format(id))
Esempio n. 4
0
def george_example(seed=None, ndata=10):
    if seed is not None:
        np.random.seed(seed)

    # Generate some fake noisy data.
    x = 10 * np.sort(np.random.rand(ndata))
    yerr = 0.2 * np.ones_like(x)
    y = np.sin(x) + yerr * np.random.randn(len(x))

    # Set up the Gaussian process.
    kernel = ExpSquaredKernel(1.0)
    gp = george.GP(kernel)

    # Pre-compute the factorization of the matrix.
    gp.compute(x, yerr)

    # Compute the log likelihood.
    print(gp.lnlikelihood(y))

    t = np.linspace(0, 10, 500)
    mu, cov = gp.predict(y, t)
    #std = np.sqrt(np.diag(cov))

    realy = np.sin(t)
    fig = plot_gp_stuff(t, mu, cov, realy, x, y, yerr)
    return fig
    def __init__(self, parameters, redshifts, k, power_spectra,
                 number_of_principle_components=6, kernel=None):
        parameters = np.array(parameters)
        redshifts = np.array(redshifts)
        k = np.array(k)
        power_spectra = np.array(power_spectra)

        if parameters.ndim != 2:
            raise Exception("Parameters must be 2D array.")
        if power_spectra.ndim != 2:
            raise Exception("Power spectra must be a 2D array of dimensions "+
                            "N_parameters x (N_k*N_z).")
        if len(parameters) != len(power_spectra):
            raise Exception("Power spectra must be a 2D array of dimensions "+
                            "N_parameters x (N_k*N_z).")
        if len(redshifts)*len(k) != len(power_spectra[0]):
            raise Exception("Power spectra must be a 2D array of dimensions "+
                            "N_parameters x (N_k*N_z).")
        
        self.parameters = parameters
        self.redshifts = redshifts
        self.k = k
        self.power_spectra = power_spectra
        self.Npars = len(self.parameters[0])

        self.NPC = number_of_principle_components
        metric_guess = np.std(self.parameters, 0)
        if kernel is None:
            kernel = 1.*ExpSquaredKernel(metric=metric_guess, ndim=self.Npars)
        self.kernel = kernel
Esempio n. 6
0
def initialize_emulator(am_param_train, wprp_train, wprp_err):
    """
	Given a set of abundance matching parameters and projectedtwo point 
	correlation functions to use for training, build an emulator.

	Parameters:
		am_param_train: A numpy array containing the abundance matching 
			parameters for each training point. This should have dimensions
			(n_training points x nun_params)
		wprp_train: The projected two point correlation function for each 
			set of abundance matching parameters

	Returns:
		An emulator initialized to the training points provided
	"""
    # Get the number of parameters for your abundance matching model
    n_am_params = am_param_train.shape[1]
    # Randomly initialzie our emulator parameters. The exact number of
    # parameters required depends on the kernel.
    em_vec = np.random.rand(n_am_params + 2)

    sf = em_vec[0]
    sx = em_vec[-1]
    # This kernel was suggested by sean. Will likely have to experiment with
    # different kernel variaties
    kernel = sf * ExpSquaredKernel(em_vec[1:n_am_params + 1],
                                   ndim=n_am_params) + sx
    emulator = george.GP(kernel, mean=np.mean(wprp_train))
    emulator.compute(am_param_train, yerr=wprp_err)
    return emulator
Esempio n. 7
0
def multilnlike(theta, x1, x2, x3, x4, y1, y2, y3, y4,
                yerr1, yerr2, yerr3, yerr4, p):
    lnlike = []
    theta = np.exp(theta)
    k = theta[0] * ExpSquaredKernel(theta[1]) * ExpSine2Kernel(theta[2], p)
    gp = george.GP(k)
    try:
        gp.compute(x1, np.sqrt(theta[3]+yerr1**2))
    except (ValueError, np.linalg.LinAlgError):
        return 10e25
    lnlike.append(-gp.lnlikelihood(y1, quiet=True))
    try:
        gp.compute(x2, np.sqrt(theta[4]+yerr2**2))
    except (ValueError, np.linalg.LinAlgError):
        return 10e25
    lnlike.append(-gp.lnlikelihood(y2, quiet=True))
    try:
        gp.compute(x3, np.sqrt(theta[5]+yerr3**2))
    except (ValueError, np.linalg.LinAlgError):
        return 10e25
    lnlike.append(-gp.lnlikelihood(y3, quiet=True))
    try:
        gp.compute(x4, np.sqrt(theta[6]+yerr4**2))
    except (ValueError, np.linalg.LinAlgError):
        return 10e25
    lnlike.append(-gp.lnlikelihood(y4, quiet=True))
    return np.logaddexp.reduce(np.array(lnlike), axis=0)
Esempio n. 8
0
def neglnlike(theta, x, y, yerr):
    theta = np.exp(theta)
    k = theta[0] * ExpSine2Kernel(theta[2], theta[1]) * ExpSquaredKernel(
        theta[3])
    gp = george.GaussianProcess(k)
    gp.compute(x, (theta[4] * yerr**2))
    return -gp.lnlikelihood(y)
Esempio n. 9
0
 def gp_kernel(self, theta):
     A = np.exp(theta[0])
     l = np.exp(theta[1])
     G = np.exp(theta[2])
     sigma = np.exp(theta[3])
     P = np.exp(theta[4])
     return A * ExpSquaredKernel(l) * ExpSine2Kernel(G, P) + WhiteKernel(sigma)        
def ret_product(params, time, flux, yerr):
    (period, T0, rprs, impact, noiseA1, noiseW1, noiseA2, noiseW2,
     noiseM2) = params
    M = LCModel()
    M.add_star(rho=0.0073, ld1=0.5, ld2=0.4)
    M.add_planet(T0=T0, period=period, impact=impact, rprs=rprs)
    M.add_data(time=time)
    resid = flux - M.transitmodel
    kernel = (((noiseA1 * ExpSquaredKernel(noiseW1)) *
               (noiseA2 * ExpSquaredKernel(noiseW2))) + noiseM2)
    gp = george.GaussianProcess(kernel)
    lnlike = 0.
    for i in np.arange(len(time) // 1000)[0:10]:
        section = np.arange(i * 1000, i * 1000 + 1000)
        gp.compute(time[section], yerr[section])
        lnlike += gp.lnlikelihood(resid[section])
    return -lnlike
Esempio n. 11
0
    def train(self, df, t):
        x, y, z = sph_to_xyz(df['station.latitude'].values, df['station.longitude'].values)
        stdev = 0.237 - 0.170 * df.cs

        kernel = 0.0809**2 * Matern52Kernel(0.0648, ndim=3) + 0.169**2 * ExpSquaredKernel(0.481, ndim=3)
        self.gp = george.GP(kernel)
        self.gp.compute(np.column_stack((x,y,z)), stdev + 1e-3)
        self.z = t
Esempio n. 12
0
def neglnlike(theta, x, y, yerr, p):
    theta = np.exp(theta)
    k = theta[0] * ExpSquaredKernel(theta[1]) * ExpSine2Kernel(theta[2], p)
    gp = george.GP(k)
    try:
        gp.compute(x, np.sqrt(theta[3]+yerr**2))
    except (ValueError, np.linalg.LinAlgError):
        return 10e25
    return -gp.lnlikelihood(y, quiet=True)
Esempio n. 13
0
def lnlike(theta, x, y, yerr):
    theta = np.exp(theta)
    k = theta[0] * ExpSine2Kernel(theta[2], theta[1]) * ExpSquaredKernel(
        theta[3])
    gp = george.GaussianProcess(k)
    #     j2 = np.exp(2)*theta[4]
    #     gp.compute(x, np.sqrt(yerr**2 + j2))
    gp.compute(x, (theta[4] * yerr**2))
    return gp.lnlikelihood(y)
Esempio n. 14
0
def predict(xs, x, y, yerr, theta):
    theta = np.exp(theta)
    k = theta[0] * ExpSine2Kernel(theta[2], theta[1]) * ExpSquaredKernel(
        theta[3])
    gp = george.GaussianProcess(k)
    #     j2 = np.exp(2)*theta[4]
    #     gp.compute(x, np.sqrt(yerr**2 + j2))
    gp.compute(x, (theta[4] * yerr**2))
    return gp.predict(y, xs)
Esempio n. 15
0
def make_plot(sampler,
              x,
              y,
              yerr,
              ID,
              DIR,
              traces=False,
              tri=False,
              prediction=True):

    nwalkers, nsteps, ndims = np.shape(sampler)
    flat = np.reshape(sampler, (nwalkers * nsteps, ndims))
    mcmc_result = map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]),
                      zip(*np.percentile(flat, [16, 50, 84], axis=0)))
    mcmc_result = np.array([i[0] for i in mcmc_result])
    print("\n", np.exp(np.array(mcmc_result[-1])), "period (days)", "\n")
    print(mcmc_result)
    np.savetxt("%s/%s_result.txt" % (DIR, ID), mcmc_result)

    fig_labels = ["A", "l", "G", "s", "P"]

    if traces:
        print("Plotting traces")
        for i in range(ndims):
            plt.clf()
            plt.plot(sampler[:, :, i].T, 'k-', alpha=0.3)
            plt.ylabel(fig_labels[i])
            plt.savefig("%s/%s_%s.png" % (DIR, ID, fig_labels[i]))

    if tri:
        print("Making triangle plot")
        flat[:, -1] = np.exp(flat[:, -1])
        try:
            fig = corner.corner(flat, labels=fig_labels)
        except:
            fig = triangle.corner(flat, labels=fig_labels)
        fig.savefig("%s/%s_triangle" % (DIR, ID))
        print("%s/%s_triangle.png" % (DIR, ID))

    if prediction:
        print("plotting prediction")
        theta = np.exp(np.array(mcmc_result))
        k = theta[0] * ExpSquaredKernel(theta[1]) \
                * ExpSine2Kernel(theta[2], theta[4])
        gp = george.GP(k, solver=george.HODLRSolver)
        gp.compute(x, yerr)
        xs = np.linspace(x[0], x[-1], 1000)
        mu, cov = gp.predict(y, xs)
        plt.clf()
        plt.errorbar(x - x[0], y, yerr=yerr, **reb)
        plt.xlabel("$\mathrm{Time~(days)}$")
        plt.ylabel("$\mathrm{Normalised~Flux}$")
        plt.plot(xs, mu, color=cols.lightblue)
        plt.xlim(min(x), max(x))
        plt.savefig("%s/%s_prediction" % (DIR, ID))
        print("%s/%s_prediction.png" % (DIR, ID))
Esempio n. 16
0
def MCMC(theta, x, y, yerr, fname, burn_in, nsteps, nruns):

    # calculate initial likelihood and plot initial hparams
    xs = np.linspace(min(x), max(x), 1000)
    k = theta[0] * ExpSquaredKernel(theta[1]) * ExpSine2Kernel(theta[2], theta[4])
    k += WhiteKernel(theta[3])
    gp = george.GP(k)
    print 'initial lnlike = ', lnlike(theta, x, y, yerr)
    mu, cov = predict(theta, xs, x, y, yerr)
    plt.clf()
    plt.errorbar(x, y, yerr=yerr, fmt='k.', capsize=0)
    plt.plot(xs, mu, 'r')
    std = np.sqrt(np.diag(cov))
#     plt.fill_between(mu-std, mu+std, color='r', alpha='.5')
    plt.savefig('%s_init' % fname)

    # setup sampler
    nwalkers, ndim = 32, len(theta)
    p0 = [theta+1e-4*np.random.rand(ndim) for i in range(nwalkers)]
    args = [x, y, yerr]
    sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=args)

    print("Burning in...")
    p0, lp, state = sampler.run_mcmc(p0, burn_in)
    sampler.reset()

    for i in range(nruns):

        print 'Running... ', i
        p0, lp, state = sampler.run_mcmc(p0, nsteps)

        # results
        samples = sampler.chain[:, 50:, :].reshape((-1, ndim))
        mcmc_result = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
                          zip(*np.percentile(samples, [16, 50, 84], axis=0)))
        mres = np.array(mcmc_result)[:, 0]
        print 'mcmc_result = ', np.exp(mres)
        np.savetxt("parameters_%s.txt" % fname, np.array(mcmc_result))

        print "saving samples"
        f = h5py.File("samples%s" % fname, "w")
        data = f.create_dataset("samples", np.shape(sampler.chain))
        data[:,:] = np.array(sampler.chain)
        f.close()

    # make triangle plot
    fig_labels = ["$A$", "$l1$", "$l2$", "$wn$", "$P$"]
    fig = triangle.corner(samples, truths=mres, labels=fig_labels)
    fig.savefig("triangle_%s.png" % fname)

    # plot result
    mu, cov = predict(mres, xs, x, y, yerr)
    plt.clf()
    plt.errorbar(x, y, yerr=yerr, fmt='k.', capsize=0)
    plt.plot(xs, mu, 'r')
    plt.savefig('%s_final' % fname)
Esempio n. 17
0
def lnlike(theta, x, y, yerr):
    theta = np.exp(theta)
    k = theta[0] * ExpSquaredKernel(theta[1]) \
        * ExpSine2Kernel(theta[2], theta[4]) + WhiteKernel(theta[3])
    gp = george.GP(k, solver=george.HODLRSolver)
    try:
        gp.compute(x, np.sqrt(theta[3]+yerr**2))
    except (ValueError, np.linalg.LinAlgError):
        return 10e25
    return gp.lnlikelihood(y, quiet=True)
    def __init__(
        self,
        parameters,
        covariance_matrices,
        NPC_D=1,
        NPC_L=1,
        kernel_D=None,
        kernel_lp=None,
    ):
        Cs = np.atleast_3d(covariance_matrices)
        self.N = Cs.shape[0]
        parameters = np.atleast_2d(parameters).reshape(self.N, -1)
        self.Npars = len(parameters[0])
        assert len(parameters) == len(Cs), f"{parameters.shape} vs {Cs.shape}"
        assert parameters.ndim == 2, parameters.ndim
        assert Cs.ndim == 3, Cs.ndim
        msg = "all covariances must have the same dimension"
        assert all(len(C) == len(C[0]) for C in Cs), msg

        # Save all attributes
        self.NPC_D = NPC_D
        self.NPC_L = NPC_L
        self.covariance_matrices = Cs
        self.parameters = parameters

        # Create kernels for the emulator
        metric_guess = np.std(self.parameters, 0)
        self.kernel_D = kernel_D or 1.0 * ExpSquaredKernel(
            metric=metric_guess, ndim=self.Npars
        )
        self.kernel_lp = kernel_lp or 1.0 * ExpSquaredKernel(
            metric=metric_guess, ndim=self.Npars
        )

        # Call methods that start to build the emulator
        self.breakdown_matrices()
        self.create_training_data()
        self.build_emulator()
        self.train_emulator()
Esempio n. 19
0
def main():
    print "Running fit"
    param=float(sys.argv[1])
    kernel=ExpSquaredKernel(param)
   
    gp=george.GP(kernel)
    lc=np.loadtxt('../files/SN13689_data.dat')
    ph=lc[:,0]; mag=lc[:,1]; magerr=lc[:,2]
    magerr/=max(mag);mag=mag/max(mag)
    
    gp.compute(ph, magerr)

    t=np.linspace(ph.min(), ph.max(), 500)
    
    def lnprob(p):
        if np.any((-10 > p) + (p > 10)):
            return -np.inf
        lnprior=float(sys.argv[2])
        kernel.pars=np.exp(p)
        return lnprior+ gp.lnlikelihood(mag, quiet=True)
    #setup the sampler
    nwalkers, ndim = 10, len(kernel)
    sampler=emcee.EnsembleSampler(nwalkers, ndim, lnprob)
   

    #initialise the walkers
    p0= [np.log(kernel.pars) + 1e-4 * np.random.randn(ndim) 	for i in range(nwalkers)]
    print "Running burn-in"
    st=time()
    p0, _, _ =sampler.run_mcmc(p0, 2000)
    end=time()
    print "It took", end-st, "to burn in "
    print "Running produciton chain "

    
    sampler.run_mcmc(p0, 2000)
    prod=time()
    print 'it took', prod-st, 'seconds'    
    param_arr=[]
    for i in range(50):
        w = np.random.randint (sampler.chain.shape[0])
        n = np.random.randint (2000, sampler.chain.shape[1])

        gp.kernel.pars  = np.exp(sampler.chain[w, n])
	#param_arr.append(gp.kernel.value)
	plt.errorbar(ph, mag, magerr, fmt='ro')
        plt.plot(t, gp.sample_conditional(mag,t), "k", alpha=0.3)
    
    print 'The kernel parameter is:', gp.kernel.value
    plt.savefig('../img/gaussfit_margin_'+sys.argv[1]+'_'+sys.argv[2]+'.png')
    plt.show()
Esempio n. 20
0
def multilnlike_emcee_comb(theta, x, y, yerr):

    yerr[:121] = np.sqrt(theta[3]+yerr[:121]**2)
    yerr[122:304] = np.sqrt(theta[4]+yerr[122:304]**2)
    yerr[305:332] = np.sqrt(theta[5]+yerr[305:332]**2)
    yerr[333:] = np.sqrt(theta[6]+yerr[333:]**2)

    theta = np.exp(theta)
    k = theta[0] * ExpSquaredKernel(theta[1]) * ExpSine2Kernel(theta[2], theta[7])
    gp = george.GP(k)
    try:
        gp.compute(x, yerr)
    except (ValueError, np.linalg.LinAlgError):
        return 10e25
    return gp.lnlikelihood(y, quiet=True)
Esempio n. 21
0
    def __init__(self, lc, dist_factor=10.0, time_factor=0.1, matern=False):
        self.time = lc.time
        self.flux = lc.flux - 1.0
        self.ferr = lc.ferr

        # Convert to parts per thousand.
        self.flux *= 1e3
        self.ferr *= 1e3

        # Hackishly build a kernel.
        tau = np.median(np.diff(self.time)) * integrated_time(self.flux)
        tau = max(0.1, tau)  # Tau should be floored.
        amp = np.median((self.flux - np.median(self.flux))**2)
        self.kernel = amp * ExpSquaredKernel(tau**2)
        self.gp = george.GP(self.kernel, solver=george.HODLRSolver)
        self.gp.compute(self.time, self.ferr, seed=1234)

        # Compute the likelihood of the null model.
        self.ll0 = self.lnlike()
Esempio n. 22
0
def sample_gp_function(t, variance=1000, seed=0):
    """
    Sample a function from GP with zeros on the boundary.
    :param t:           an array of time points; left and right values will be zero
    :param variance     variance of the kernel
    :param seed:        random seed of the sampler (=of numpy)
    :return: a sample from a GP at points t, the first and the last values are zero
    """
    np.random.seed(seed)

    period = len(t)
    kernel = ExpSquaredKernel(variance)
    gp = george.GP(kernel)
    gp.compute(np.array([0, period - 1]))

    target = np.zeros(len(t))
    target[1:-1] = gp.sample_conditional(np.array([0, 0]), t[1:-1])

    np.random.seed(None)
    return target
Esempio n. 23
0
    def __init__(self, Filename):
        '''
        This is the constructor of the class. By calling the constructor, the emissions will be read from file 
        (filling the EmissionRec) and the parameters will be read from the parameter file.
        :param Filename: path and filename of the parameter file.
        '''
        #print("A simple climate model was born")
        # get emissions normalizations
        #self.CO2_norm = theta[1]
        #self.CH4_norm = theta[2]
        #self.N2O_norm = theta[3]
        #self.SOx_norm = theta[4]

        self._ReadParameters(Filename)
        # get start and end year of simulation
        self.startYr = int(self._GetParameter('Start year'))
        self.endYr = int(self._GetParameter('End year'))
        self.yrs = np.arange(self.startYr, self.endYr + 1)
        fileload = get_example_data_file_path('EmissionsForSCM.dat',
                                              data_dir='pySCM')
        self.emissions = self._ReadEmissions(self._GetParameter(fileload))
        self.simYears = int(
            self._GetParameter('Years to evaluate response functions'))
        self.oceanMLDepth = float(
            self._GetParameter('Ocean mixed layer depth [in meters]'))

        self.ems_CO2 = self.emissions['CO2']
        self.ems_CH4 = self.emissions['CH4']
        self.ems_N2O = self.emissions['N2O']
        self.ems_SOx = self.emissions['SOx']

        self.ems_CO2_err = np.ones_like(self.emissions['CO2']) * 0.75
        self.ems_CH4_err = np.ones_like(self.emissions['CH4']) * 0.75
        self.ems_N2O_err = np.ones_like(self.emissions['N2O']) * 0.75
        self.ems_SOx_err = np.ones_like(self.emissions['SOx']) * 0.75

        # set up gaussian process
        self.kernel = ExpSquaredKernel(1.0)
        self.gp = george.GP(self.kernel)
Esempio n. 24
0
def plot_lc(koi):
    """
    Make demo plot of a light curve.
    """

    # Load the data
    print(LC_DIR)
    x, y, yerr = kd.load_kepler_data(LC_DIR)
    x -= x[0]
    m = x < 500
    x, y, yerr = x[m], y[m], yerr[m]

    # Load the posterior samples.
    df = pd.read_hdf(os.path.join(DATA_DIR, "KOI-{}.h5".format(int(koi))),
                     key="samples")
    a = np.exp(MAP(df.ln_A.values))
    l = np.exp(MAP(df.ln_l.values))
    g = np.exp(MAP(df.ln_G.values))
    s = np.exp(MAP(df.ln_sigma.values))
    p = np.exp(MAP(df.ln_period.values))
    print("ln(a) = ", np.log(a), "ln(l) = ", np.log(l), "ln(G) = ", np.log(g),
          "ln(s) = ", np.log(s), "ln(p) = ", np.log(p), "p = ", p)

    xs = np.linspace(min(x), max(x), 500)
    k = a * ExpSquaredKernel(l) \
        * ExpSine2Kernel(g, p) + WhiteKernel(s)
    gp = george.GP(k)
    gp.compute(x, yerr)
    mu, cov = gp.predict(y, xs)

    plt.clf()
    plt.plot(x, y, "k.")
    plt.plot(xs, mu, color="CornFlowerBlue")
    plt.xlabel("$\mathrm{Time~(days)}$")
    plt.ylabel("$\mathrm{Normalised~flux}$")
    plt.subplots_adjust(left=.18)
    plt.savefig(os.path.join(FIG_DIR, "koi_lc_demo.pdf"))
Esempio n. 25
0
def gp_estimate(x, x_data, y_data, y_err, kernel_value=1.0):
    """
    Use a simple george GP (exponential-squared kernel) to estimate values 
    at points x given data at points (x_data, y_data).
    
    Parameters
    ----------
    x: NumPy Array
        x-coordinates to get predicted values at
    x_data: NumPy Array
        x-coordiantes of the data points
    y_data: NumPy Array
        y-coordinates of the data points (same length as x_data)
    y_err: float NumPy Array
        error on the y values of the data points
        if float, use the same error for all points
        if array, needs to be the same length as x_data and y_data
        
    Returns
    -------
    NumPy Array:
        predicted values (same shape as x)
    NumPy Array:
        covaraince matrix on the predicted points (NxN if x has N points)
    """
    if not isIterable(y_err):
        y_err = np.full_like(y_data, y_err)

    kernel = ExpSquaredKernel(kernel_value)
    gp = george.GP(kernel)
    gp.compute(x_data, y_err)

    print('GP log likelihood is {}'.format(gp.lnlikelihood(y_data)))

    mu, cov = gp.predict(y_data, x)
    return mu, cov
Esempio n. 26
0
def Kernel(hyperparams):
    return hyperparams[0] * ExpSquaredKernel(hyperparams[1])
Esempio n. 27
0
import os
import sys
import urllib.request, json
import pandas as pd
import numpy as np
import george
from george.kernels import Matern32Kernel, Matern52Kernel, ExpSquaredKernel, ConstantKernel
from cs import cs_to_stdev, stdev_to_cs
import scipy.optimize as op
from multiprocessing import Pool

NSAMPLES = 1000

kernel = 0.0809**2 * Matern52Kernel(0.0648, ndim=3) + 0.169**2 * ExpSquaredKernel(0.481, ndim=3)

def get_data(url):
    with urllib.request.urlopen(url) as res:
        data = json.loads(res.read().decode())

    return data

def get_stations():
    data = get_data('http://localhost:%s/stations.json' % (os.getenv('API_PORT')))
    st = {}
    for row in data:
        station = row['station']
        st[station['id']] = { 'latitude': float(station['latitude']), 'longitude': float(station['longitude']) }

    return st

def sph_to_xyz(lat, lon):
Esempio n. 28
0
from __future__ import division, print_function

import os
import sys
import numpy as np
import matplotlib.pyplot as pl

#d = os.path.dirname
#sys.path.insert(0, d(d(os.path.abspath(__file__))))  #this well change dir doesn't work
import george
from george.kernels import ExpSquaredKernel

np.random.seed(12345)

kernel = ExpSquaredKernel([3, 0.5], ndim=2)
#gp = george.HODLRGP(kernel, tol=1e-10)
gp = george.GP(kernel, solver = george.HODLRSolver,tol=1e-10);  #reference the tutorials

x, y = np.linspace(-5, 5, 62), np.linspace(-5, 5, 60) # generate workplane([-5,5],[-5,5])
x, y = np.meshgrid(x, y, indexing="ij") #generate grid, expand x to 60(ys for every x point)*62(x), y is 62x to y*60 ->indexing x,y as i,j
shape = x.shape #shape get the dimension of x=(62,60)
samples = np.vstack((x.flatten(), y.flatten())).T #flatten 2d to vector,row by row,1*3720 stack as vetical 2*3720,Trans to 3720,2
gp.compute(samples, 1e-4*np.ones(len(samples)), sort=False) #input sample as x, yerr as 1e-4, len(3720,2)=3720

print(len(samples)) #length of sample
i = george.utils.nd_sort_samples(samples) #sorted the samples return i

img = gp.get_matrix(samples[i]) #use sorted index i to rebuild a matrix
pl.imshow(img, cmap="gray", interpolation="nearest")  #should be gray for sample
pl.gca().set_xticklabels([])
Esempio n. 29
0
import numpy as np
import matplotlib.pyplot as pl

import george
from george.kernels import ExpSquaredKernel

np.random.seed(1234)

# Generate some fake noisy data.
x = 10 * np.sort(np.random.rand(10))
yerr = 0.2 * np.ones_like(x)
y = np.sin(x) + yerr * np.random.randn(len(x))

# Set up the Gaussian process.
kernel = ExpSquaredKernel(1.0)
gp = george.GP(kernel)

# Pre-compute the factorization of the matrix.
gp.compute(x, yerr)

# Compute the log likelihood.
print(gp.lnlikelihood(y))

# Compute the predictive conditional distribution.
t = np.linspace(0, 10, 500)
mu, cov = gp.predict(y, t)
std = np.sqrt(np.diag(cov))

pl.fill_between(t, mu + std, mu - std, color="k", alpha=0.1)
pl.plot(t, mu + std, color="k", alpha=1, lw=0.25)
g2 = hf.get('cell list')
celllist = np.array(g2.get('cells in group'))
#for i in range(celllist.shape[0]):
#	print i," ",celllist[i][0]," ",celllist[i][1]
goodcols = np.array(g2.get('cells with good NIRAMS data'))
g3 = hf.get('emulator')
kertype = np.array(g3.get('Kernel type'))
if (kertype != 'ExpSquared'):
    print("Unrecognized kernel type, exiting!")
    exit()
kerpar = np.array(g3.get('Kernel parameters'))
groupav = np.array(g3.get('group average'))
x = np.array(g3.get('x'))
y = np.array(g3.get('y'))
yerr = np.array(g3.get('yerr'))
kernel = 100.0 * ExpSquaredKernel([1.08, 0.0015, 0.00005, 0.15], ndim=4)
gp = george.GP(kernel)
gp.kernel.set_parameter_vector(kerpar)
#print x
#print gp.kernel.get_parameter_vector()
#exit()
ct = 0
while (~(goodcols[ct])):
    ct += 1
gp.compute(x, yerr[:, ct])
mu, cov = gp.predict(groupav, x)
#print np.linalg.inv(cov)
g4 = hf.get('calibration')
obserror = np.array(g4.get('observation error'))
postsamps = np.array(g4.get('posterior samples'))
postsamps = postsamps[72000:, :]