def __init__(self, keyword=None, *args, **kwargs): """Initialises the priors, initial hp values, and kernel.""" # Set up kernel # ------------- k_spatial = 1.0 * kernels.Matern52Kernel( metric=[1.0, 1.0], ndim=3, axes=[0, 1]) k_temporal = 1.0 * kernels.Matern52Kernel(metric=1.0, ndim=3, axes=2) k_total = k_spatial + k_temporal if keyword in ('long', 'long_timescale'): default_sigt = 16 elif isinstance(keyword, (int, float)): default_sigt = keyword + 1e-5 else: default_sigt = np.exp(0.36 / 2) super().__init__(kernel=k_total, parameter_names=('ln_Axy', '2ln_sigx', '2ln_sigy', 'ln_At', '2ln_sigt'), default_values=(-12.86, -3.47, -4.34, -12.28, 2 * np.log(default_sigt)), keyword=keyword, *args, **kwargs) self.default_X_cols = ['x', 'y', 't']
def load_sigma_gp(self): self.cosmos = np.loadtxt(os.path.dirname( os.path.abspath(__file__)) + '/../data/cparams_4d.dat') self.ydata = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmaM/coeff_all.dat') self.eigdata = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmaM/pca_eigvec.dat') self.ymean = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmaM/pca_mean.dat') self.ystd = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmaM/pca_std.dat') self.yavg = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmaM/pca_avg.dat') self.gp_params = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmaM/gp_params.dat') self.ktypes = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmaM/ktypes.dat') self.gps = [] for i in range(4): if self.ktypes[i] == 10: kernel = 1. * \ kernels.Matern52Kernel( np.ones(4), ndim=4) + kernels.ConstantKernel(1e-4, ndim=4) elif self.ktypes[i] == 6: kernel = 1. * \ kernels.ExpSquaredKernel( np.ones(4), ndim=4) + kernels.ConstantKernel(1e-4, ndim=4) else: print('kernel type 6 and 10 are the only supported types.') gp = george.GP(kernel) gp.compute(self.cosmos[:800]) gp.set_parameter_vector(self.gp_params[i]) self.gps.append(gp)
def lnlike(self, p): """ GP likelihood function for probability of data given the kernel parameters :return lnlike: likelihood of kernel amplitude and length-scale parameters """ # Update the kernel and compute the lnlikelihood. a, tau = 10.0**p[0], 10.0**p[1:] lnlike = 0.0 try: if self.kernel == 'sqexp': self.gaussproc = george.GP( a * kernels.ExpSquaredKernel(tau, ndim=len(tau))) elif self.kernel == 'matern32': self.gaussproc = george.GP( a * kernels.Matern32Kernel(tau, ndim=len(tau))) elif self.kernel == 'matern52': self.gaussproc = george.GP( a * kernels.Matern52Kernel(tau, ndim=len(tau))) self.gaussproc.compute(self.x, self.yerr) lnlike = self.gaussproc.log_likelihood(self.y, quiet=True) except np.linalg.LinAlgError: lnlike = -np.inf return lnlike
def __init__(self): print('Initialize sigma_d emulator') self.cosmos = np.loadtxt(os.path.dirname( os.path.abspath(__file__)) + '/../data/cparams_4d.dat') self.ydata = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmad/coeff_all.dat') self.yavg = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmad/sigd_avg.dat') self.ystd = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmad/sigd_std.dat') self.gp_params = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmad/gp_params.dat') self.ktypes = np.loadtxt(os.path.dirname(os.path.abspath( __file__)) + '/../learned_data/sigmad/ktypes.dat') if self.ktypes == 10: kernel = 1. * \ kernels.Matern52Kernel(np.ones(4), ndim=4) + \ kernels.ConstantKernel(1e-4, ndim=4) elif self.ktypes == 6: kernel = 1. * \ kernels.ExpSquaredKernel( np.ones(4), ndim=4) + kernels.ConstantKernel(1e-4, ndim=4) else: print('kernel type 6 and 10 are the only supported types.') self.gp = george.GP(kernel) self.gp.compute(self.cosmos[:800]) self.gp.set_parameter_vector(self.gp_params) self.As_fid = np.exp(3.094)
def create_kernel(self): # This function creates the covariance function kernel for the Gaussian Process if self.kern == 'SE': return self.sigma_f * kernels.ExpSquaredKernel(self.l_param, ndim=self.n_dim) elif self.kern == 'M32': return self.sigma_f * kernels.Matern32Kernel(self.l_param, ndim=self.n_dim) elif self.kern == 'M52': return self.sigma_f * kernels.Matern52Kernel(self.l_param, ndim=self.n_dim)
def create_kernel(self): if self.kern == 'SE': return self.sigma_f * kernels.ExpSquaredKernel(self.l_param, ndim=self.n_dim) elif self.kern == 'M32': return self.sigma_f * kernels.Matern32Kernel(self.l_param, ndim=self.n_dim) elif self.kern == 'M52': return self.sigma_f * kernels.Matern52Kernel(self.l_param, ndim=self.n_dim)
def step(theta): l1 = np.exp(theta[0]) l2 = np.exp(theta[1]) sigma = np.exp(theta[2]) yerr = np.diagonal(np.sqrt(noise2)) + theta[3] ##kernel = sigma * kernels.ExpSquaredKernel([l1,l2], ndim=2, axes=[0, 1]) kernel = sigma * kernels.Matern52Kernel([l1, l2], ndim=2, axes=[0, 1]) gp = george.GP(kernel) gp.compute(X_train, yerr) #sys.exit() # Compute determinant via Cholesky decomposition return -gp.lnlikelihood(D)
def __init__(self): print('Initialize pklin emulator') self.klist = np.logspace(-3, 1, 200) self.logklist = np.log(self.klist) self.cosmos = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../data/cparams_3d.dat') self.ydata = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../learned_data/pklin/coeff_all.dat') self.eigdata = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../learned_data/pklin/pca_eigvec.dat') self.ymean = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../learned_data/pklin/pca_mean.dat') self.ystd = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../learned_data/pklin/pca_std.dat') self.yavg = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../learned_data/pklin/pca_avg.dat') self.gp_params = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../learned_data/pklin/gp_params.dat') self.ktypes = np.loadtxt( os.path.dirname(os.path.abspath(__file__)) + '/../learned_data/pklin/ktypes.dat') self.gps = [] for i in range(20): if self.ktypes[i] == 10: kernel = 1. * \ kernels.Matern52Kernel( np.ones(3), ndim=3) + kernels.ConstantKernel(1e-4, ndim=3) elif self.ktypes[i] == 6: kernel = 1. * \ kernels.ExpSquaredKernel( np.ones(3), ndim=3) + kernels.ConstantKernel(1e-4, ndim=3) else: print('kernel type 6 and 10 are the only supported types.') gp = george.GP(kernel) gp.compute(self.cosmos[:800]) gp.set_parameter_vector(self.gp_params[i]) self.gps.append(gp)
def get_kernel(architecture, kernel_name, domain_name_lst, n_dims): if architecture == "trans": mapping = trans_domain_kernel_mapping else: mapping = None kernel = None initial_ls = np.ones([n_dims]) if kernel_name == "constant": kernel = kernels.ConstantKernel(1, ndim=n_dims) elif kernel_name == "polynomial": kernel = kernels.PolynomialKernel(log_sigma2=1, order=3, ndim=n_dims) elif kernel_name == "linear": kernel = kernels.LinearKernel(log_gamma2=1, order=3, ndim=n_dims) elif kernel_name == "dotproduct": kernel = kernels.DotProductKernel(ndim=n_dims) elif kernel_name == "exp": kernel = kernels.ExpKernel(initial_ls, ndim=n_dims) elif kernel_name == "expsquared": kernel = kernels.ExpSquaredKernel(initial_ls, ndim=n_dims) elif kernel_name == "matern32": kernel = kernels.Matern32Kernel(initial_ls, ndim=n_dims) elif kernel_name == "matern52": kernel = kernels.Matern52Kernel(initial_ls, ndim=n_dims) elif kernel_name == "rationalquadratic": kernel = kernels.RationalQuadraticKernel(log_alpha=1, metric=initial_ls, ndim=n_dims) elif kernel_name == "expsine2": kernel = kernels.ExpSine2Kernel(1, 2, ndim=n_dims) elif kernel_name == "heuristic": kernel = mapping[domain_name_lst[0]](ndim=n_dims, axes=0) for i in range(len(domain_name_lst[1:])): d = domain_name_lst[1:][i] kernel += mapping[d](ndim=n_dims, axes=i) elif kernel_name == "logsquared": kernel = kernels.LogSquaredKernel(initial_ls, ndim=n_dims) return kernel
def lnlike(theta, inc, R, pc0, Epc0, noise2): N = len(pc0) X = np.ones(shape=(2, N)) X[0] = pc0 X[1] = inc X = X.T l1 = np.exp(theta[0]) l2 = np.exp(theta[1]) sigma = np.exp(theta[2]) err = theta[3] yerr = np.diagonal(np.sqrt(noise2)) + err ##kernel = sigma * kernels.ExpSquaredKernel([l1,l2], ndim=2, axes=[0, 1]) kernel = sigma * kernels.Matern52Kernel([l1, l2], ndim=2, axes=[0, 1]) gp = george.GP(kernel) gp.compute(X, yerr) return gp.lnlikelihood(R)
def init_gps(self): self.gps = [] for i in range(self.nparams): if self.k_type[i] == 0: kernel = kernels.ConstantKernel( self.logyvars[i], ndim=6) * kernels.ExpSquaredKernel( metric=np.eye(6), ndim=6) elif self.k_type[i] == 1: kernel = kernels.ConstantKernel(self.logyvars[i], ndim=6) * kernels.ExpKernel( metric=np.eye(6), ndim=6) elif self.k_type[i] == 2: kernel = kernels.ConstantKernel( self.logyvars[i], ndim=6) * kernels.Matern32Kernel( metric=np.eye(6), ndim=6) elif self.k_type[i] == 3: kernel = kernels.ConstantKernel( self.logyvars[i], ndim=6) * kernels.Matern52Kernel( metric=np.eye(6), ndim=6) tmp = copy.copy(george.GP(kernel)) tmp.compute(self.xall, self.yerr[:, i]) self.gps.append(tmp)
def __init__(self, x, y, p0, pmin, pmax, kernel='sqexp', yerr=None): """ :param x: simulation coordinates [in unit hypercube space] :param y: data values :param p0: initial values of kernel paramters :param pmin: lower sampling boundaries of kernel parameters :param pmax: upper sampling boundaries of kernel parameters :param kernel: string denoting kernel function type [default = sqexp] :param yerr: uncertainties on data values :return gaussproc: instance of a George GP object """ self.x = x # simulation coordinates self.y = y # data values self.yerr = yerr # uncertainties self.kernel = kernel # kernel-type: other choices are matern32, matern52 self.gaussproc = None self.p0 = p0 # initial value for kernel parameters a, tau = 10.0**self.p0[0], 10.0**self.p0[1:] if self.kernel == 'sqexp': self.gaussproc = george.GP( a * kernels.ExpSquaredKernel(tau, ndim=len(tau))) elif self.kernel == 'matern32': self.gaussproc = george.GP(a * kernels.Matern32Kernel(tau), ndim=len(tau)) elif self.kernel == 'matern52': self.gaussproc = george.GP(a * kernels.Matern52Kernel(tau), ndim=len(tau)) self.gaussproc.compute(self.x, self.yerr) self.pmax = pmax # sampling max self.pmin = pmin # sampling min self.emcee_flatchain = None self.emcee_flatlnprob = None self.emcee_kernel_map = None
def get_gp(self,Kernel="Exp",amplitude=1e-3,metric=10.,gamma=10.,period=10.): """ Citlalicue uses the kernels provided by george, now the options are "Exp", "Matern32", "Matern52", and Quasi-Periodic "QP" User can modify hyper parameters amplitude, metric, gamma, period. """ import george from george import kernels if Kernel == "Matern32": kernel = amplitude * kernels.Matern32Kernel(metric) elif Kernel == "Matern52": kernel = amplitude * kernels.Matern52Kernel(metric) elif Kernel == "Exp": kernel = amplitude * kernels.ExpKernel(metric) elif Kernel == "QP": log_period = np.log(period) kernel = amplitude * kernels.ExpKernel(metric)*kernels.ExpSine2Kernel(gamma,log_period) #Save the kernel name as an attribute self.kernel = kernel #Compute the kernel with George self.gp = george.GP(self.kernel,mean=1) #We compute the covariance matrix using the binned data self.gp.compute(self.time_bin, self.ferr_bin)
print('Reading data') data = json.load(open(args.inf, 'r')) ytruth = np.array(data['bkg_model']) x = np.array(data['x']) y_toys = data['toys'] if 'sig_model' in data: sig_model = np.array(data['sig_model']) x_pred = np.linspace(min(x), max(x), 500) y = np.array(y_toys[0]) yerr = np.sqrt(y) print('Defining GP') # kernel = np.var(y) * kernels.ExpSquaredKernel(0.5) kernel = np.var(y) * kernels.Matern52Kernel(0.5) # kernel = np.var(y) * kernels.ExpKernel(10) gp = george.GP(kernel) gp.compute(x, yerr) sig_mus = np.linspace(-5, 6, 500) my_llrs = [] my_max_llr = -1000000 max_llr_result = 0 fitted_y = y fitted_mu = -10 for mu in sig_mus: y_minus_sig = y - mu * sig_model llr, res = ut.gp_fit(gp, x, y_minus_sig, yerr, x_pred, 0) my_llrs.append(llr)
def plot_Rinc(ax, T, Input, pc0_lim=[-1,1], color='red', scatter=False, binned=False, xlabel=True, ylabel=True, X_twin=True, Y_twin=True, band1='r', band2='w2'): pgc = Input[0] r_w1 = Input[1] pc0 = Input[2] inc = Input[3] AB = T[2] ; table = T[5] a0, b0 = AB[0], AB[1] Er_w1 = table['Er_w1'] Epc0 = table['Epc0'] Einc = table['inc_e'] a,b,c,d, alpha, beta, gamma, Ealpha, Ebeta = getReddening_params(band1=band1, band2=band2) q2 = 10**(-1.*gamma) F = log_a_b(inc, q2) dF2 = Elogab2(inc, q2, Einc) A = F*(a*pc0**3+b*pc0**2+c*pc0+d) dA = np.sqrt(dF2*(a*pc0**3+b*pc0**2+c*pc0+d)**2+(F*(3*a*pc0**2+2*b*pc0+c)*Epc0)**2) R = r_w1-(alpha*pc0+beta) dR = np.sqrt(Er_w1**2+(alpha*Epc0)**2+(Ealpha*pc0)**2+Ebeta**2) D = R - A N = len(pc0) dR2 = Er_w1**2+(alpha*Epc0)**2+(Ealpha*pc0)**2 noise2 = (dR2+dA**2)*np.eye(N) X = np.ones(shape = (2,N)) X[0] = pc0 X[1] = inc X = X.T y = D if band1=='u': theta = [ 2.12107366, 4.62358258, -2.67502258, 0.12339909] if band1=='g': theta = [3.29876662, 7.30325097, 0.24330005, 0.07810393] if band1=='r': theta = [3.5360765 , 7.51644074, 0.32119272, 0.07308647] if band1=='i': theta = [3.6535672 , 8.04667536, 0.40698926, 0.08026879] if band1=='z': theta = [3.78077626, 8.14568691, 0.09446865, 0.08405013] if band1=='w1': theta = [2.20842506e+01, 1.32881714e+01, -2.13119882e+01, 1.01638499e-02] l1 = np.exp(theta[0]) l2 = np.exp(theta[1]) sigma = np.exp(theta[2]) yerr = np.diagonal(np.sqrt(noise2))+theta[3] kernel = sigma * kernels.Matern52Kernel([l1,l2], ndim=2, axes=[0, 1]) gp = george.GP(kernel) gp.compute(X, yerr) index = np.where(pc0>=pc0_lim[0]) r_w1 = r_w1[index] pc0 = pc0[index] pgc = pgc[index] inc = inc[index] Er_w1 = Er_w1[index] Epc0 = Epc0[index] index = np.where(pc0<pc0_lim[1]) r_w1 = r_w1[index] pc0 = pc0[index] pgc = pgc[index] inc = inc[index] Er_w1 = Er_w1[index] Epc0 = Epc0[index] R = r_w1 - (alpha*pc0+beta) dR = np.sqrt(Er_w1**2+(alpha*Epc0)**2+(Ealpha*pc0)**2+Ebeta**2) ### Model if True: _pc, _inc = np.linspace(pc0_lim[0], pc0_lim[1],20), np.linspace(45,90,20) _pc, _inc = np.meshgrid(_pc, _inc) X_ = np.c_[_pc.ravel(), _inc.ravel()] _A, var_A = gp.predict(y, X_, return_var=True) _A = _A.reshape(_pc.shape) F__ = log_a_b(_inc, q2) _A += F__*(a*_pc**3+b*_pc**2+c*_pc+d) inc__ = np.linspace(45,90,20) N = len(inc__) r_min = np.zeros(N) r_max = np.zeros(N) r_med = np.zeros(N) for ii in range(N): indx = np.where(_inc==inc__[ii]) r_min[ii] = np.min(_A[indx]) r_max[ii] = np.max(_A[indx]) r_med[ii] = np.median(_A[indx]) #if band1!='w1': ax.fill_between(inc__, r_min, r_max, alpha=0.35, facecolor=color) if scatter: ax.plot(inc, R, 'o', color='black', markersize=1, alpha=0.4) if binned: xl = [] yl= [] yel=[] low = 45; high=90 for i in np.arange(low,high,5): x = [] y = [] for ii in range(len(R)): xi = inc[ii] if xi>i and xi<=i+5: x.append(xi) y.append(R[ii]) if len(x)>0: x = np.asarray(x) y = np.asarray(y) average = np.median(y) stdev = np.std(y) index = np.where(y<average+2.*stdev) x = x[index] y = y[index] index = np.where(y>average-2.*stdev) x = x[index] y = y[index] ax.errorbar(np.median(x), np.median(y), yerr=np.std(y), fmt='o', color=color, markersize=5) xl.append(np.median(x)) yl.append(np.median(y)) yel.append(np.std(y)) ### Fitting a curve ax.plot(inc__, r_med, 'k--') ax.tick_params(which='major', length=6, width=1.5, direction='in') ax.tick_params(which='minor', length=4, color='#000033', width=1.0, direction='in') ax.minorticks_on() #ax.text(45,0.8, r''+"%.0f" % (c21w_[0])+'$< c21W_1 <'+"%.0f" % (c21w_[1])+'$', color=color, fontsize=11) ax.text(52,-0.7, r''+"%.1f" % (pc0_lim[0])+'$< P_{0,'+band2+'} <$'+"%.1f" % (pc0_lim[1]), fontsize=13) ax.text(47,1.4, band1, fontsize=14, color=color) ax.set_ylim([-0.9,1.7]) ax.set_xlim([41,99]) ax.plot([0,100], [0,0], 'k:') #if xlabel: ax.set_xlabel(r'$inclination \/ [deg]$', fontsize=16) #if ylabel: ax.set_ylabel(r'$A_{w2}^{(inc)}$', fontsize=16) if Y_twin: y_ax = ax.twinx() y_ax.set_ylim(-0.9,1.7) y_ax.set_yticklabels([]) y_ax.minorticks_on() y_ax.tick_params(which='major', length=6, width=1.5, direction='in') y_ax.tick_params(which='minor', length=4, color='#000033', width=1.0, direction='in') if X_twin: x_ax = ax.twiny() x_ax.set_xlim(41,99) x_ax.set_xticklabels([]) x_ax.minorticks_on() x_ax.tick_params(which='major', length=6, width=1.0, direction='in') x_ax.tick_params(which='minor', length=4, color='#000033', width=1.0, direction='in') if len(dR)>0: x0=47; y0=1. plt.errorbar([x0], [y0], yerr=[np.median(dR)], color='k', fmt='o', alpha=0.7, capsize=3, markersize=5) for tick in ax.xaxis.get_major_ticks(): tick.label.set_fontsize(12) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(12)
#---------------------------------------------------------------------------------- # select pixels to fit the model from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, np.vstack((y, err)).T, test_size=0.5, random_state=0) #Create the model based on selecting 50% of the pixels import george from george import kernels # Set up the Gaussian process: #k1 = np.mean(err) ** 2 * kernels.ExpKernel(1.0) #k1 = np.mean(err) ** 2 * kernels.ExpSquaredKernel(1.0) k1 = np.mean(err)**2 * kernels.Matern32Kernel(1.0) k2 = np.mean(err)**2 * kernels.Matern52Kernel(15.0) kernel = k1 + k2 gp = george.GP(kernel) # Pre-compute the factorization of the matrix. gp.compute(X_train, y_train[:, 1]) # Compute the log likelihood. print(gp.lnlikelihood(y_train[:, 0])) # predict t = np.linspace(0, 200, 500) mu, cov = gp.predict(y_train[:, 0], t) std = np.sqrt(np.diag(cov)) #Plot