def load_sigma_gp(self): self.cosmos = np.loadtxt(os.path.dirname( os.path.abspath(__file__)) + '/../data/cparams_4d.dat') self.ydata = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmaM/coeff_all.dat') self.eigdata = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmaM/pca_eigvec.dat') self.ymean = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmaM/pca_mean.dat') self.ystd = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmaM/pca_std.dat') self.yavg = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmaM/pca_avg.dat') self.gp_params = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmaM/gp_params.dat') self.ktypes = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmaM/ktypes.dat') self.gps = [] for i in range(4): if self.ktypes[i] == 10: kernel = 1. * \ kernels.Matern52Kernel( np.ones(4), ndim=4) + kernels.ConstantKernel(1e-4, ndim=4) elif self.ktypes[i] == 6: kernel = 1. * \ kernels.ExpSquaredKernel( np.ones(4), ndim=4) + kernels.ConstantKernel(1e-4, ndim=4) else: print('kernel type 6 and 10 are the only supported types.') gp = george.GP(kernel) gp.compute(self.cosmos[:800]) gp.set_parameter_vector(self.gp_params[i]) self.gps.append(gp)
def __init__(self): print('Initialize sigma_d emulator') self.cosmos = np.loadtxt(os.path.dirname( os.path.abspath(__file__)) + '/../data/cparams_4d.dat') self.ydata = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmad/coeff_all.dat') self.yavg = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmad/sigd_avg.dat') self.ystd = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmad/sigd_std.dat') self.gp_params = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmad/gp_params.dat') self.ktypes = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmad/ktypes.dat') if self.ktypes == 10: kernel = 1. * \ kernels.Matern52Kernel(np.ones(4), ndim=4) + \ kernels.ConstantKernel(1e-4, ndim=4) elif self.ktypes == 6: kernel = 1. * \ kernels.ExpSquaredKernel( np.ones(4), ndim=4) + kernels.ConstantKernel(1e-4, ndim=4) else: print('kernel type 6 and 10 are the only supported types.') self.gp = george.GP(kernel) self.gp.compute(self.cosmos[:800]) self.gp.set_parameter_vector(self.gp_params) self.As_fid = np.exp(3.094)
def get_kernel(self, p0): if george.__version__ == '0.3.1': p0 = np.exp(p0) k1 = kernels.ExpSquaredKernel(p0, ndim=len(p0)) k2 = kernels.Matern32Kernel(p0, ndim=len(p0)) k3 = kernels.ConstantKernel(0.1, ndim=len(p0)) #k4 = kernels.WhiteKernel(0.1, ndim=len(p0)) k5 = kernels.ConstantKernel(0.1, ndim=len(p0)) kernel_dict = { 'M32ExpConst': k1 * k5 + k2, 'M32ExpConst2': k1 * k5 + k2 + k3, 'M32Const': k2 + k5 } assert self.kernel_name in kernel_dict, f"{self.kernel_name} not in dict!" kernel = kernel_dict[self.kernel_name] return kernel
def __init__(self): print('Initialize pklin emulator') self.klist = np.logspace(-3, 1, 200) self.logklist = np.log(self.klist) self.cosmos = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../data/cparams_3d.dat') self.ydata = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../learned_data/pklin/coeff_all.dat') self.eigdata = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../learned_data/pklin/pca_eigvec.dat') self.ymean = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../learned_data/pklin/pca_mean.dat') self.ystd = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../learned_data/pklin/pca_std.dat') self.yavg = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../learned_data/pklin/pca_avg.dat') self.gp_params = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../learned_data/pklin/gp_params.dat') self.ktypes = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../learned_data/pklin/ktypes.dat') self.gps = [] for i in range(20): if self.ktypes[i] == 10: kernel = 1. * \ kernels.Matern52Kernel( np.ones(3), ndim=3) + kernels.ConstantKernel(1e-4, ndim=3) elif self.ktypes[i] == 6: kernel = 1. * \ kernels.ExpSquaredKernel( np.ones(3), ndim=3) + kernels.ConstantKernel(1e-4, ndim=3) else: print('kernel type 6 and 10 are the only supported types.') gp = george.GP(kernel) gp.compute(self.cosmos[:800]) gp.set_parameter_vector(self.gp_params[i]) self.gps.append(gp)
def init_gps(self): self.gps = [] for i in range(self.nparams): if self.k_type[i] == 0: kernel = kernels.ConstantKernel( self.logyvars[i], ndim=6) * kernels.ExpSquaredKernel( metric=np.eye(6), ndim=6) elif self.k_type[i] == 1: kernel = kernels.ConstantKernel(self.logyvars[i], ndim=6) * kernels.ExpKernel( metric=np.eye(6), ndim=6) elif self.k_type[i] == 2: kernel = kernels.ConstantKernel( self.logyvars[i], ndim=6) * kernels.Matern32Kernel( metric=np.eye(6), ndim=6) elif self.k_type[i] == 3: kernel = kernels.ConstantKernel( self.logyvars[i], ndim=6) * kernels.Matern52Kernel( metric=np.eye(6), ndim=6) tmp = copy.copy(george.GP(kernel)) tmp.compute(self.xall, self.yerr[:, i]) self.gps.append(tmp)
def setup_george_kernel(kernelnames, kernel_base=1, fit_bias=False): """Setup the Gaussian Process kernel for george Parameters ---------- kernelnames : list of str List of abbreviated names for the kernels, choices: 'Exp2' for a `ExpSquaredKernel`, 'ESin2' for a `ExpSine2Kernel`, 'Exp2ESin2' for a `ExpSquaredKernel` multiplied by a `ExpSine2Kernel`, 'RatQ' for a `RationalQuadraticKernel`, 'Mat32' for a `Matern32Kernel`, 'Exp' for `ExpKernel`, and 'B' for a `ConstantKernel` (bias). kernel_base : float, optional The initial "strength" of the kernels. fit_bias : bool, optional Adds a `ConstantKernel` if kernel does not already contain one. Returns ------- name : The kernel names concatenated by an underscore and prepended by '_gp' kernel : The covariance kernel for use with george.GP """ kernel = None kname = "_gp" for kn in kernelnames: krn = george_kernels.get(kn, None) if krn is None: # not found in the list of available kernels continue if kn in ["B", "W"]: # don't scale the constant or white kernels krnl = krn(0.25 * kernel_base) else: krnl = kernel_base * krn kernel = kernel + krnl if hasattr(kernel, "is_kernel") else krnl kname += "_" + kn if fit_bias and "B" not in kernelnames: krnl = kernels.ConstantKernel(kernel_base) kernel = kernel + krnl if hasattr(kernel, "is_kernel") else krnl kname += "_b" return kname, kernel
def get_kernel(architecture, kernel_name, domain_name_lst, n_dims): if architecture == "trans": mapping = trans_domain_kernel_mapping else: mapping = None kernel = None initial_ls = np.ones([n_dims]) if kernel_name == "constant": kernel = kernels.ConstantKernel(1, ndim=n_dims) elif kernel_name == "polynomial": kernel = kernels.PolynomialKernel(log_sigma2=1, order=3, ndim=n_dims) elif kernel_name == "linear": kernel = kernels.LinearKernel(log_gamma2=1, order=3, ndim=n_dims) elif kernel_name == "dotproduct": kernel = kernels.DotProductKernel(ndim=n_dims) elif kernel_name == "exp": kernel = kernels.ExpKernel(initial_ls, ndim=n_dims) elif kernel_name == "expsquared": kernel = kernels.ExpSquaredKernel(initial_ls, ndim=n_dims) elif kernel_name == "matern32": kernel = kernels.Matern32Kernel(initial_ls, ndim=n_dims) elif kernel_name == "matern52": kernel = kernels.Matern52Kernel(initial_ls, ndim=n_dims) elif kernel_name == "rationalquadratic": kernel = kernels.RationalQuadraticKernel(log_alpha=1, metric=initial_ls, ndim=n_dims) elif kernel_name == "expsine2": kernel = kernels.ExpSine2Kernel(1, 2, ndim=n_dims) elif kernel_name == "heuristic": kernel = mapping[domain_name_lst[0]](ndim=n_dims, axes=0) for i in range(len(domain_name_lst[1:])): d = domain_name_lst[1:][i] kernel += mapping[d](ndim=n_dims, axes=i) elif kernel_name == "logsquared": kernel = kernels.LogSquaredKernel(initial_ls, ndim=n_dims) return kernel
def setup_GP(self, **kwargs): """Set up GP kernels and GP object. Parameters ---------- None Returns ------- GP object """ if len(self.kernel_input_arrays) == 0: self.set_inputs() for i in range(self.nkernels): if i == 0: kernel = self.get_kernel(self.kernel_types[i], i) else: kernel += self.get_kernel(self.kernel_types[i], i) kernel = kernels.ConstantKernel(self.coeffs['A'], ndim=self.nkernels, axes=np.arange(self.nkernels)) * kernel if self.gp_code_name == 'george': gp = george.GP( kernel, white_noise=self.coeffs['WN'], fit_white_noise=self.fit_white_noise, mean=0, fit_mean=False) #, solver=george.solvers.HODLRSolver) if self.gp_code_name == 'tinygp': gp = tinygp.GaussianProcess(kernel, diag=self.coeffs['WN']**2, mean=0) #tiny gp code #kernel = tinygp.kernels.ConstantKernel(self.coeffs['A'],ndim=self.nkernels,axes=np.arange(self.nkernels))*kernel return gp
offset = np.zeros(len(t)) idx = t < 57161 offset[idx] = self.offset1 offset[~idx]= self.offset2 return rv1 + rv2 + offset #============================================================================== # GP #============================================================================== from george import kernels k1 = kernels.ExpSine2Kernel(gamma = 1, log_period = np.log(3200), bounds=dict(gamma=(-3,1), log_period=(0,10))) k2 = kernels.ConstantKernel(log_constant=np.log(1.), bounds=dict(log_constant=(-5,5))) * kernels.ExpSquaredKernel(1.) kernel = k1 * k2 truth = dict(P1=8., tau1=1., k1=np.std(y)/100, w1=0., e1=0.4, P2=100, tau2=1., k2=np.std(y)/100, w2=0., e2=0.4, offset1=0., offset2=0.) kwargs = dict(**truth) kwargs["bounds"] = dict(P1=(7.5,8.5), k1=(0,0.1), w1=(-2*np.pi,2*np.pi), e1=(0,0.9), tau2=(-50,50), k2=(0,0.2), w2=(-2*np.pi,2*np.pi), e2=(0,0.9)) mean_model = Model(**kwargs) gp = george.GP(kernel, mean=mean_model, fit_mean=True) # gp = george.GP(kernel, mean=mean_model, fit_mean=True, white_noise=np.log(0.5**2), fit_white_noise=True) gp.compute(t, yerr) def lnprob2(p): gp.set_parameter_vector(p) return gp.log_likelihood(y, quiet=True) + gp.log_prior()
d_harps2=0.) kwargs = dict(**truth) kwargs["bounds"] = dict(P1=(0.2, 0.3), k1=(0, 0.3), w1=(-2 * np.pi, 2 * np.pi), e1=(0, 0.9), P2=(2.5, 3.5), k2=(0, 0.3), w2=(-2 * np.pi, 2 * np.pi), e2=(0, 0.9)) if star == 'HD7449': k1 = kernels.ExpSine2Kernel(gamma=1, log_period=np.log(14), bounds=dict(gamma=(0, 100), log_period=(0, 3))) k2 = kernels.ConstantKernel(log_constant=np.log(0.1, ), bounds=dict(log_constant=(-5,0))) \ * kernels.ExpSquaredKernel(100) kernel = k1 * k2 truth = dict(P1=12.75, tau1=0.1, k1=np.std(y) / 100, w1=0., e1=0.8, P2=40.46, tau2=0.1, k2=np.std(y) / 100, w2=0., e2=0.5, d_harps1=0., d_harps2=0.) kwargs = dict(**truth)
def gaussian_process_smooth(self, per=None, minobs=10, phase_offset=None, recompute=False, scalemin=None, scalemax=None): """ per = cheaty hackish thing to get a gaussian process with some continuity at the end points minobs = mininum number of observations in each filter before we fit """ outgp = getattr(self, 'outgp', None) if outgp is not None: if not recompute: return outgp else: outgp = {} # note that we explicitly ask for non-smoothed and non-gpr lc here, since we're trying to compute the gpr outlc = self.get_lc(recompute=False, per=per, smoothed=False, gpr=False, phase_offset=phase_offset) for i, pb in enumerate(outlc): thislc = outlc.get(pb) thisphase, thisFlux, thisFluxErr = thislc nobs = len(thisphase) # if we do not have enough observations in this passband, skip it if nobs < minobs: continue # TODO : REVISIT KERNEL CHOICE if per == 1: # periodic kernel = kernels.ConstantKernel(1.) * kernels.ExpSine2Kernel( 1.0, 0.0) elif per == 2: # quasiperiodic kernel = kernels.ConstantKernel(1.) * kernels.ExpSquaredKernel( 100.) * kernels.ExpSine2Kernel(1.0, 0.0) else: # non-periodic kernel = kernels.ConstantKernel(1.) * kernels.ExpSquaredKernel( 1.) gp = george.GP(kernel, mean=thisFlux.mean(), fit_mean=True, fit_white_noise=True, white_noise=np.log(thisFluxErr.mean()**2.)) # define the objective function def nll(p): gp.set_parameter_vector(p) ll = gp.lnlikelihood(thisFlux, quiet=True) return -ll if np.isfinite(ll) else 1e25 # define the gradient of the objective function. def grad_nll(p): gp.set_parameter_vector(p) return -gp.grad_lnlikelihood(thisFlux, quiet=True) # pre-compute the kernel gp.compute(thisphase, thisFluxErr) p0 = gp.get_parameter_vector() max_white_noise = np.log((3. * np.median(thisFluxErr))**2.) min_white_noise = np.log((0.3 * np.median(thisFluxErr))**2) # coarse optimization with scipy.optimize # TODO : almost anything is better than scipy.optimize if per == 1: # mean, white_noise, amplitude, gamma, FIXED_period results = op.minimize(nll, p0, jac=grad_nll, bounds=[(None, None), (min_white_noise, max_white_noise),\ (None, None), (None, None), (0.,0.)]) elif per == 2: # mean white_noise, amplitude, variation_timescale, gamma, FIXED_period results = op.minimize(nll, p0, jac=grad_nll, bounds=[(None, None), (min_white_noise, max_white_noise),\ (None, None), (scalemin, scalemax), (None, None), (0.,0.)]) else: # mean white_noise, amplitude, variation_timescale results = op.minimize(nll, p0, jac=grad_nll, bounds=[(None, None), (min_white_noise, max_white_noise),\ (None, None), (scalemin,scalemax)]) gp.set_parameter_vector(results.x) # george is a little different than sklearn in that the prediction stage needs the input data outgp[pb] = (gp, thisphase, thisFlux, thisFluxErr) self.outgp = outgp return outgp
k1 = kernels.ExpSine2Kernel(gamma = 1, log_period = np.log(100), bounds=dict(gamma=(-3,3), log_period=(0,10))) k2 = np.std(y) * kernels.ExpSquaredKernel(1.) k3 = 0.66**2 * kernels.RationalQuadraticKernel(log_alpha=np.log(0.78), metric=1.2**2) kernel = k1 * k2 + k3 truth = dict(P1=0.25, tau1=0.1, k1=np.std(y)/100, w1=0., e1=0.4, P2=3.1, tau2=0.1, k2=np.std(y)/100, w2=0., e2=0.4, d_aat=0., d_harps1=0., d_harps2=0.) kwargs = dict(**truth) kwargs["bounds"] = dict(P1=(0.2, 0.3), k1=(0,0.3), w1=(-2*np.pi,2*np.pi), e1=(0,0.9), P2=(2.5, 3.5), k2=(0,0.3), w2=(-2*np.pi,2*np.pi), e2=(0,0.9)) if star == 'HD7449': k1 = kernels.ExpSine2Kernel(gamma = 1, log_period = np.log(13.3), bounds=dict(gamma=(-3,1), log_period=(np.log(13.3-2.56),np.log(13.3+2.56)))) k2 = kernels.ConstantKernel(log_constant=np.log(1.0), bounds=dict(log_constant=(-3,4))) * kernels.ExpSquaredKernel(10**2.) kernel = k1 * k2 truth = dict(P1=12.50, tau1=0.1, k1=np.std(y)/100, w1=0., e1=0.9, P2=160., tau2=0.1, k2=np.std(y)/100, d_harps1=0., d_harps2=0.) kwargs = dict(**truth) kwargs["bounds"] = dict(P1=(12.0, 13.0), k1=(0,1.), w1=(-2*np.pi,2*np.pi), e1=(0.7,0.99), P2=(120, 220), k2=(0,2.)) mean_model = Model(**kwargs) gp = george.GP(kernel, mean=mean_model, fit_mean=True, white_noise=np.log(0.5**2), fit_white_noise=True) # gp.freeze_parameter('kernel:k2:k1:log_constant') gp.compute(x, yerr) lnp1 = gp.log_likelihood(y) def lnprob2(p):
import numpy as np from george import kernels, GP def test_dtype(seed=123): np.random.seed(seed) kernel = 0.1 * kernels.ExpSquaredKernel(1.5) kernel.pars = [1, 2] gp = GP(kernel) x = np.random.rand(100) gp.compute(x, 1e-2) kernels_to_test = [ kernels.ConstantKernel(log_constant=0.1), kernels.ConstantKernel(log_constant=10.0, ndim=2), kernels.ConstantKernel(log_constant=5.0, ndim=5), kernels.DotProductKernel(), kernels.DotProductKernel(ndim=2), kernels.DotProductKernel(ndim=5, axes=0), kernels.CosineKernel(log_period=1.0), kernels.CosineKernel(log_period=0.5, ndim=2), kernels.CosineKernel(log_period=0.5, ndim=2, axes=1), kernels.CosineKernel(log_period=0.75, ndim=5, axes=[2, 3]), kernels.ExpSine2Kernel(gamma=0.4, log_period=1.0), kernels.ExpSine2Kernel(gamma=12., log_period=0.5, ndim=2), kernels.ExpSine2Kernel(gamma=17., log_period=0.5, ndim=2, axes=1), kernels.ExpSine2Kernel(gamma=13.7, log_period=-0.75, ndim=5, axes=[2, 3]), kernels.ExpSine2Kernel(gamma=-0.7, log_period=0.75, ndim=5, axes=[2, 3]), kernels.ExpSine2Kernel(gamma=-10, log_period=0.75),
def get_const_kernel(c, ndim): c = K.ConstantKernel(-0.69, ndim=ndim, bounds=[(-7.0, 4.0)]) return c
#yerr[ss] = Ystd[j] #yerr[ss] = GP_error[j]/2.303 #yerr[ss] = np.log10(GP_error[j]) y2[ss2] = y[ss] ss += 1 ss2 += 1 ###### # 15 initial values for the 7 hod and 8 cosmo params p0 = np.full(nparams, 0.1) k1 = kernels.ExpSquaredKernel(p0, ndim=len(p0)) k2 = kernels.Matern32Kernel(p0, ndim=len(p0)) k3 = kernels.ConstantKernel(0.1, ndim=len(p0)) #k4 = kernels.WhiteKernel(0.1, ndim=len(p0)) k5 = kernels.ConstantKernel(0.1, ndim=len(p0)) kernel = k2 + k5 #kernel = np.var(y)*k1 ppt = pp[j] gp = george.GP(kernel, mean=np.mean(y), solver=george.BasicSolver) #gp = george.GP(kernel, solver=george.BasicSolver) gp.compute(rr, yerr) #gp.kernel.vector = ppt gp.set_parameter_vector(ppt) gp.compute(rr, yerr)