def gp_sho(idx, data, resid, params): logQ, logw, logS, log_jit = params err = data.err / data.flux #logerr = np.log(np.median(err)) logerr = log_jit #print "logQ, logw, logS", logQ, logw, logS mean_resid = np.mean(resid) #resid -= mean_resid t = data.t_vis[idx] kernel = (terms.SHOTerm(log_S0=logS, log_Q=logQ, log_omega0=logw) + terms.JitterTerm(log_sigma=logerr)) gp = celerite.GP(kernel, fit_mean=True) gp.compute(t, err, check_sorted=False) #t must be ascending! mu = gp.predict(resid, t, return_cov=False) gp_lnlike = gp.log_likelihood(resid) """plt.errorbar(t, resid, err, fmt = '.k') plt.plot(t, mu) x = np.linspace(np.min(t), np.max(t), 1000) pred_mean, pred_var = gp.predict(resid, x, return_var = True) pred_std = np.sqrt(pred_var) plt.fill_between(x, pred_mean+pred_std, pred_mean-pred_std, color='blue', alpha=0.3) plt.show()""" #np.save("resid", [t, resid, err]) #return [1.0 + np.array(mu) + mean_resid, gp_lnlike] return [1.0 + np.array(mu), gp_lnlike]
def Fit0_Jitter(x, y, yerr = None, verbose = True, doPlot = False, \ xpred = None): k = terms.Matern32Term(log_sigma=0.0, log_rho=0.0) if yerr is None: wn = np.median(abs(np.diff(y))) else: wn = np.median(yerr) k += terms.JitterTerm(log_sigma=np.log(wn)) gp = GP(k, mean=1.0) gp.compute(x) HP_init = gp.get_parameter_vector() soln = minimize(NLL0, gp.get_parameter_vector(), jac=True, args=(gp, y)) gp.set_parameter_vector(soln.x) if verbose: print 'Initial pars:', HP_init print 'Fitted pars:', soln.x if xpred is None: return soln.x mu, var = gp.predict(y, xpred, return_var=True) std = np.sqrt(var) if doPlot: plt.errorbar(x, y, yerr=yerr, fmt=".k", capsize=0) plt.plot(xpred, mu, 'C0') plt.fill_between(xpred, mu + std, mu - std, color='C0', alpha=0.4, lw=0) return soln.x, mu, std
def dSHO_maxlikelihood(lc, npeaks=1): # Now let's do some of the GP stuff on this # A non-periodic component Q = 1.0 / np.sqrt(2.0) w0 = 3.0 S0 = np.var(lc['NormFlux']) / (w0 * Q) bounds = dict(log_S0=(-16, 16), log_Q=(-15, 15), log_omega0=(-15, 15)) kernel = terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0), bounds=bounds) kernel.freeze_parameter( "log_Q") # We don't want to fit for "Q" in this term # A periodic component for i in range(npeaks): Q = 1.0 w0 = 3.0 S0 = np.var(lc['NormFlux']) / (w0 * Q) kernel += terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0), bounds=bounds) sigma = np.median(lc['NormErr']) kernel += terms.JitterTerm(log_sigma=np.log(sigma)) gp = celerite.GP(kernel, mean=np.mean(lc['NormFlux'])) gp.compute(lc['Time'], lc['NormErr']) # You always need to call compute once. print("Initial log likelihood: {0}".format( gp.log_likelihood(lc['NormFlux']))) initial_params = gp.get_parameter_vector() bounds = gp.get_parameter_bounds() r = minimize(neg_log_like, initial_params, method="L-BFGS-B", bounds=bounds, args=(lc['NormFlux'], gp)) gp.set_parameter_vector(r.x) print("Final log likelihood: {0}".format(gp.log_likelihood( lc['NormFlux']))) print("Maximum Likelihood Soln: {}".format(gp.get_parameter_dict())) return gp
def _compute_modes_kernel(self, params, white=0): """ doc-string needed """ for i in range(len(params)): if i == 0: kernel = self._compute_mode_kernel(params[i, :]) else: kernel += self._compute_mode_kernel(params[i, :]) if white > 0: kernel += terms.JitterTerm(log_sigma=np.log(white)) return kernel
def get_basic_kernel(t, y, yerr): kernel = terms.SHOTerm( log_S0=np.log(np.var(y)), log_Q=-np.log(4.0), log_omega0=np.log(2*np.pi/10.), bounds=dict( log_S0=(-20.0, 10.0), log_omega0=(np.log(2*np.pi/80.0), np.log(2*np.pi/2.0)), ), ) kernel.freeze_parameter('log_Q') # Finally some jitter kernel += terms.JitterTerm(log_sigma=np.log(yerr), bounds=[(-20.0, 5.0)]) return kernel
def compute_background(t, amp, freqs, white=0): """ Compute granulation background using Gaussian process """ if white == 0: white = 1e-6 kernel = terms.JitterTerm(log_sigma=np.log(white)) S0 = calculateS0(amp, freqs) print(f"S0: {S0}") for i in range(len(amp)): kernel += terms.SHOTerm(log_S0=np.log(S0[i]), log_Q=np.log(1 / np.sqrt(2)), log_omega0=np.log(2 * np.pi * freqs[i])) gp = celerite.GP(kernel) return kernel, gp, S0
def get_basic_kernel(t, y, yerr, period=False): if not period: period = 0.5 kernel = terms.SHOTerm( log_S0=np.log(np.var(y)), log_Q=-np.log(4.0), log_omega0=np.log(2 * np.pi / 20.), bounds=dict( log_S0=(-20.0, 10.0), log_omega0=(np.log(2 * np.pi / 100.), np.log(2 * np.pi / (10))), ), ) kernel.freeze_parameter('log_Q') ## tau = 2*np.exp(-1*np.log(4.0))/np.exp(log_omega0) # Finally some jitter ls = np.log(np.median(yerr)) kernel += terms.JitterTerm(log_sigma=ls, bounds=[(ls - 5.0, ls + 5.0)]) return kernel
def init_gp(self, log_amp=-5, log_tau=-3, log_sigma=-5): bounds = [(-np.inf, np.inf), (0, np.inf), (0, 1), (0, np.inf), (0, 1), (0, 1), (0, 1)] t0, p, k, r, b, q1, q2 = [ self.init_params.get(i) for i in 't0,p,k,r,b,q1,q2'.split(',') ] mean_model = TransitMeanModel(t0=t0, p=p, k=k, r=r, b=b, q1=q1, q2=q2, bounds=bounds) kernel = terms.Matern32Term(log_sigma=log_amp, log_rho=log_tau, bounds=[(-15, 5), (-15, 5)]) kernel += terms.JitterTerm(log_sigma=log_sigma, bounds=[(-10, 0)]) self.gp = celerite.GP(kernel, mean=mean_model, fit_mean=True) self.gp.compute(self.t)
S0 = np.var(fiber['rv']) / (w0*Q) kernel = terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0), bounds=[(-20, 20), (-15, 15), (np.log(muhz2omega(0.1)), np.log(muhz2omega(100)))]) #omega upper bound: 10 muhz kernel.freeze_parameter("log_Q") #to make it a Harvey model #numax Q = np.exp(3.0) w0 = muhz2omega(25) #peak of oscillations S0 = np.var(fiber['rv']) / (w0*Q) kernel += terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0), bounds=[(-20, 20), (0.1, 5), (np.log(muhz2omega(5)), np.log(muhz2omega(50)))]) kernel += terms.JitterTerm(log_sigma=1, bounds=[(-20,40)]) #initial guess of RV model initial = RVModel(vfiber=0,K=50,w=90,e=0.01,Tr=5613.2744,period=30.37, bounds=[(-20,20), (0,100), (0,360), (0,0.99), (5600,5700), (28,32)]) # parameter_names = ("vfiber", "K", "w", "e", "Tr","P") time,rv,erv=fiber.as_matrix(['time','rv','erv']).T.astype(float) gp = celerite.GP(kernel, mean=initial, fit_mean=True) gp.compute(time, erv) labels=['logS01', 'logomega01', 'logS0osc', 'logQosc', 'logomega0osc', 'logsigma', "v", "K", "w", "e", "Tr","P"] samples=np.load(loc+'/samples.npy')#Save out samples for other code
kernel.freeze_parameter("terms[1]:log_Q") #to make it a Harvey model #numax Q = np.exp(3.0) w0 = muhz2omega(135) #peak of oscillations at 133 uhz S0 = np.var(lc) / (w0 * Q) kernel += terms.SHOTerm(log_S0=np.log(S0), log_Q=np.log(Q), log_omega0=np.log(w0)) initial = TransitModel(log_ror=np.log(0.02), log_aor=np.log(10), log_T0=np.log(133.9), log_per=np.log(42.3), log_b=np.log(89)) kernel += terms.JitterTerm(log_sigma=0) gp = celerite.GP( kernel, mean=initial, fit_mean=True ) #, log_white_noise=np.log(np.mean(yerr)**2/len(t)), fit_white_noise=False) gp.compute(time, elc) gp.set_parameter_vector(logmedians) #print(gp.get_parameter_vector()) ######PSD############# p2 = kernel.get_psd(muhz2omega(f)) p2 = p2 / (2 * np.pi) #ppm^2/Hz (same as end of gatspy) df = (f[1] - f[0]) / 1e6 lhs = (1 / len(time)) * np.sum(lc**2)
t = np.sort(np.random.uniform(0, N.max() * 0.8, N.max())) yerr = np.random.uniform(1.0, 1.5, len(t)) for ix, j in enumerate(J): kernel = terms.RealTerm(np.random.uniform(-1, 1), np.random.uniform(-5, -1)) kernel += terms.RealTerm(np.random.uniform(-1, 1), np.random.uniform(-5, -1)) while (len(kernel.coefficients[0]) + 2 * len(kernel.coefficients[2]) < 2 * j): kernel += terms.SHOTerm( log_S0=np.random.uniform(-1, 1), log_omega0=np.random.uniform(-5, 0), log_Q=np.random.uniform(0, 1), ) kernel += terms.JitterTerm(np.random.uniform(-1, 1)) assert (len(kernel.coefficients[0]) + 2 * len(kernel.coefficients[2]) == 2 * j) gp = celerite.GP(kernel) for iy, n in enumerate(N): gp.compute(t[:n], yerr[:n]) alpha_true = np.random.randn(n) args = [kernel.jitter] + list(kernel.coefficients) args += [t[:n], alpha_true] y = gp.solver.dot(*args)[:, 0] + yerr[:n]**2 * alpha_true alpha = gp.apply_inverse(y[:n])[:, 0] logdet = gp.solver.log_determinant() alpha_error[k, ix, iy] = np.max(np.abs(alpha - alpha_true))
f = np.exp(log_factor) return ( np.exp(log_amp) / (2.0 + f), 0.0, np.exp(-log_timescale), 2*np.pi*np.exp(-log_period), ) rot_kernel = terms.TermSum(RotationTerm( log_amp=np.log(np.var((f-np.median(f)))), log_timescale=np.log(10.0), log_period=np.log(3.0), log_factor=np.log(1.0))) # Jitter term: kernel_jitter = terms.JitterTerm(np.log(100*1e-6)) # Wrap GP object to compute likelihood kernel = rot_kernel + kernel_jitter gp = celerite.GP(kernel, mean=0.0) gp.compute(t) min_timescale = np.log(np.min(np.abs(np.diff(t)))/2.) max_timescale = np.log(np.max(t)-np.min(t)) print('Setting maximum and minimum timescales of period to:',np.exp(min_timescale),np.exp(max_timescale)) # Now define MultiNest priors and log-likelihood: def prior(cube, ndim, nparams): # Prior on "median flux" is uniform: cube[0] = utils.transform_uniform(cube[0],0.5,1.5) # Prior on log-"amplitude" is uniform: cube[1] = utils.transform_uniform(cube[1],-30,30.)
"Mat32": kernels.Matern32Kernel((0.5)**2), "Exp": kernels.ExpKernel((0.5)**2), # "W": kernels.WhiteKernel, # deprecated, delegated to `white_noise` "B": kernels.ConstantKernel, } george_solvers = { "basic": george.BasicSolver, "HODLR": george.HODLRSolver, } celerite_terms = { "N": terms.Term(), "B": terms.RealTerm(log_a=-6., log_c=-np.inf, bounds={"log_a": [-30, 30], "log_c": [-np.inf, np.inf]}), "W": terms.JitterTerm(log_sigma=-25, bounds={"log_sigma": [-30, 30]}), "Mat32": terms.Matern32Term( log_sigma=1., log_rho=1., bounds={"log_sigma": [-30, 30], # The `celerite` version of the Matern-3/2 # kernel has problems with very large `log_rho` # values. -7.4 is empirical. "log_rho": [-7.4, 16]}), "SHO0": terms.SHOTerm(log_S0=-6, log_Q=1.0 / np.sqrt(2.), log_omega0=0., bounds={"log_S0": [-30, 30], "log_Q": [-30, 30], "log_omega0": [-30, 30]}), "SHO1": terms.SHOTerm(log_S0=-6, log_Q=-2., log_omega0=0., bounds={"log_S0": [-10, 10], "log_omega0": [-10, 10]}),
log_rho=np.log(0.008), log_T0=np.log(res['pnum0']['T0']), log_per=np.log(res['pnum0']['period']), log_imp=np.log(res['pnum0']['impact']), bounds=[(-5, 0), (-6.0, -3.0), (np.log(res['pnum0']['T0'] - 1), np.log(res['pnum0']['period'])), (1, 3), (np.log(0.01), np.log(1.1)) ]) #, ((np.log(res['pnum0']['T0']-5), #np.log(res['pnum0']['T0']+5))-1,1), (-1,1)]) # ecosw=res['pnum0']['ecosw'], esinw=res['pnum0']['esinw'], #mean.freeze_parameter("log_rho") mean.freeze_parameter("log_per") #mean.freeze_parameter("log_T0") #mean.freeze_parameter("log_imp") kernel += terms.JitterTerm(log_sigma=-10, bounds=[(-20, 20)]) gp = celerite.GP( kernel, mean=mean, fit_mean=True ) #, log_white_noise=np.log(np.mean(yerr)**2/len(t)), fit_white_noise=False) gp.compute(t, yerr) #find max likelihood params from scipy.optimize import minimize def neg_log_like(params, y, gp): gp.set_parameter_vector(params) ll = gp.log_likelihood(y) if not np.isfinite(ll): return 1e10
K = gp.get_matrix(include_diagonal=True) ll0 = -0.5 * np.dot(y, np.linalg.solve(K, y)) ll0 -= 0.5 * np.linalg.slogdet(K)[1] ll0 -= 0.5 * len(x) * np.log(2*np.pi) assert np.allclose(ll, ll0), "face" @pytest.mark.parametrize( "kernel", [ terms.RealTerm(log_a=0.1, log_c=0.5), terms.RealTerm(log_a=0.1, log_c=0.5) + terms.RealTerm(log_a=-0.1, log_c=0.7), terms.ComplexTerm(log_a=0.1, log_c=0.5, log_d=0.1), terms.ComplexTerm(log_a=0.1, log_b=-0.2, log_c=0.5, log_d=0.1), terms.JitterTerm(log_sigma=0.1), terms.SHOTerm(log_S0=0.1, log_Q=-1, log_omega0=0.5) + terms.JitterTerm(log_sigma=0.1), terms.SHOTerm(log_S0=0.1, log_Q=-1, log_omega0=0.5), terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5), terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) + terms.RealTerm(log_a=0.1, log_c=0.4), terms.SHOTerm(log_S0=0.1, log_Q=1.0, log_omega0=0.5) * terms.RealTerm(log_a=0.1, log_c=0.4), ] ) def test_grad_log_likelihood(kernel, seed=42, eps=1.34e-7): np.random.seed(seed) x = np.sort(np.random.rand(100)) yerr = np.random.uniform(0.1, 0.5, len(x)) y = np.sin(x)
def GPSpec_2Comp(wav, flux, flux_err, shifts_in=None, nsteps=2000, nrange=3, prefix='RR2'): # NB: input wavelengths should be in nm, flux continuum should be about 1 K, N = wav.shape # Create 2-D array of scaled log wavelengths for fitting lwav = np.log(wav * 1e-9) # in m lw0, lw1 = lwav.min(), lwav.max() x = (lwav - lw0) / (lw1 - lw0) # First do GP fit to individual spectra to get estimate of GP HPs print 'GP fit to individual spectra' HPs = np.zeros((K, 3)) for i in range(K): xx = x[i, :].flatten() yy = flux[i, :].flatten() ee = flux_err[i, :].flatten() HPs[i, :] = Fit0_Jitter(xx, yy, ee, verbose=False) HPs = np.median(HPs, axis=0) print 'GP HPs:', HPs k = terms.Matern32Term(log_sigma=HPs[0], log_rho=HPs[1]) k += terms.JitterTerm(log_sigma=HPs[2]) gp1 = GP(k, mean=1.0) gp2 = GP(k, mean=1.0) # Initial (ML) estimate of parameters print "Starting ML fit" if shifts_in is None: shifts_in = np.zeros(2 * (K - 1)) par_in = shifts_in / SPEED_OF_LIGHT / (lw1 - lw0) ML_par = np.array(Fit2(x, flux, gp1, gp2, verbose=False, par_in=par_in)) par_ML = np.copy(ML_par) par_ML *= (lw1 - lw0) * SPEED_OF_LIGHT * 1e-3 print "ML fit done" # MCMC print "Starting MCMC" ndim = len(ML_par) nwalkers = ndim * 4 p0 = ML_par + 1e-4 * np.random.randn(nwalkers, ndim) sampler = emcee.EnsembleSampler(nwalkers, ndim, LP2, args=[gp1, gp2, x, flux]) for i, result in enumerate(sampler.sample(p0, iterations=nsteps)): n = int((30 + 1) * float(i) / nsteps) print i sys.stdout.write("\r[{0}{1}]".format('#' * n, ' ' * (30 - n))) sys.stdout.write("\n") print("MCMC done") # find MAP parameters iMAP = np.argmax(sampler.flatlnprobability) MAP_par = sampler.flatchain[iMAP, :].flatten() # extract MCMC chains samples = sampler.chain Lprob = sampler.lnprobability # convert chains back to physical units: shifts in km/s samples_tpl = np.copy(samples) samples_tpl *= (lw1 - lw0) * SPEED_OF_LIGHT * 1e-3 par_MAP = np.copy(MAP_par) par_MAP *= (lw1 - lw0) * SPEED_OF_LIGHT * 1e-3 # parameter names for plots labels = [] for i in range(K - 1): labels.append(r'$\delta v^1_{%d}$ (km/s)' % (i + 1)) for i in range(K - 1): labels.append(r'$\delta v^2_{%d}$ (km/s)' % (i + 1)) labels = np.array(labels) names = [] for i in range(K - 1): names.append('dv1_%d (km/s)' % (i + 1)) for i in range(K - 1): names.append('dv2_%d (km/s)' % (i + 1)) names = np.array(names) # Plot the chains fig1 = plt.figure(figsize=(12, 2 * (K - 1) + 2)) gs1 = gridspec.GridSpec(ndim + 1, 1) gs1.update(left=0.1, right=0.98, bottom=0.07, top=0.98, hspace=0) ax1 = plt.subplot(gs1[0, 0]) plt.setp(ax1.get_xticklabels(), visible=False) plt.plot(Lprob.T, 'k-', alpha=0.2) plt.ylabel(r'$\ln P$') for i in range(ndim): print i, ndim, len(labels) axc = plt.subplot(gs1[i + 1, 0], sharex=ax1) if i < (ndim - 1): plt.setp(axc.get_xticklabels(), visible=False) plt.plot(samples_tpl[:, :, i].T, 'k-', alpha=0.2) plt.ylabel(labels[i]) plt.xlim(0, nsteps) plt.xlabel('iteration number') # Discard burnout nburn = int(raw_input('Enter no. steps to discard as burnout: ')) plt.axvline(nburn) # Evaluate and print the parameter ranges print '\n{:20s}: {:10s} {:10s} {:10s} - {:7s} + {:7s}'.format('Parameter', 'ML', 'MAP', \ 'Median','Error','Error') par50 = np.zeros(ndim) par84 = np.zeros(ndim) par16 = np.zeros(ndim) for i in range(ndim): sam = samples_tpl[:, :, i].flatten() b, m, f = np.percentile(sam, [16, 50, 84]) par50[i] = m par16[i] = b par84[i] = f print '{:20s}: {:10.5f} {:10.5f} {:10.5f} - {:7.5f} + {:7.5f}'.format(names[i], \ par_ML[i], \ par_MAP[i], \ m, m-b, f-m) if prefix is None: return par_MAP, par50, par50 - par16, par84 - par50 plt.savefig('%s_chains.png' % prefix) samples_flat = samples[:, nburn:, :].reshape(-1, ndim) samples_tpl_flat = samples_tpl[:, nburn:, :].reshape(-1, ndim) # Plot the parameter distributions fig2 = corner.corner(samples_tpl_flat, truths = par_MAP, labels = labels, show_titles = True, \ quantiles = [0.16, 0.84]) plt.savefig('%s_corner.png' % prefix) # Plot the individual spectra with MAP fit xpred = np.copy(x) fpred, fpred_err, f1pred, f2pred = Pred2_2D(MAP_par, gp1, gp2, x, flux, flux_err, xpred=xpred) lwpred = (lw1 - lw0) * xpred + lw0 wpred = np.exp(lwpred) * 1e9 fig3 = plt.figure(figsize=(12, K + 1)) gs3 = gridspec.GridSpec(K, 1) gs3.update(left=0.1, right=0.98, bottom=0.07, top=0.98, hspace=0) for i in range(K): if i == 0: ax1 = plt.subplot(gs3[0, 0]) else: axc = plt.subplot(gs3[i, 0], sharex=ax1, sharey=ax1) if i < (K - 1): plt.setp(ax1.get_xticklabels(), visible=False) plt.plot(wpred[i, :], f1pred[i, :], 'C1') plt.plot(wpred[i, :], f2pred[i, :], 'C2') plt.fill_between(wpred[i,:], fpred[i,:] + 2 * fpred_err[i,:], \ fpred[i,:] - fpred_err[i,:], color = 'C0', alpha = 0.4, lw = 0) plt.plot(wpred[i, :], fpred[i, :], 'C0') plt.ylabel('spec. %d' % (i + 1)) plt.errorbar(wav[i,:], flux[i,:], yerr = flux_err[i,:], \ fmt = ".k", ms = 3, mec = 'none', capsize = 0, alpha = 0.5, lw=0.5) plt.xlim(wav.min(), wav.max()) plt.xlabel('wavelength (nm)') plt.savefig('%s_spectra.png' % prefix) # Plot the combined spectra with samples from MCMC chain s1 = np.append(0.0, MAP_par[:K - 1]) x11d = (x + s1[:, None]).flatten() lw11d = (lw1 - lw0) * x11d + lw0 w11d = np.exp(lw11d) * 1e9 K1 = gp1.get_matrix(x11d) s2 = np.append(0.0, MAP_par[K - 1:]) x21d = (x + s2[:, None]).flatten() lw21d = (lw1 - lw0) * x21d + lw0 w21d = np.exp(lw21d) * 1e9 K2 = gp2.get_matrix(x21d) y1derr = flux_err.flatten() Ktot = K1 + K2 + np.diag(y1derr**2) y1d = flux.flatten() - 1.0 y11d = (flux - f2pred).flatten() + 1 y21d = (flux - f1pred).flatten() + 1 offset = 1.5 * (y11d.min() - 1) L = sla.cho_factor(Ktot) b = sla.cho_solve(L, y1d) fig4 = plt.figure(figsize=(12, 2 * nrange + 1)) gs4 = gridspec.GridSpec(nrange, 1) gs4.update(left=0.1, right=0.98, bottom=0.07, top=0.98, hspace=0.15) ws = min(w11d.min(), w21d.min()) wr = (max(w11d.max(), w21d.max()) - ws) / float(nrange) for i in range(nrange): if i == 0: ax1 = plt.subplot(gs4[0, 0]) else: axc = plt.subplot(gs4[i, 0], sharey=ax1) wmin = ws + (i - 0.05) * wr wmax = ws + (i + 1.05) * wr l = (w11d >= wmin) * (w11d <= wmax) plt.errorbar(w11d[l], y11d[l], yerr = y1derr[l], fmt = ".k", capsize = 0, \ alpha = 0.5, ms = 2, mec='none') l = (w21d >= wmin) * (w21d <= wmax) plt.errorbar(w21d[l], y21d[l] + offset, yerr = y1derr[l], fmt = ".k", capsize = 0, \ alpha = 0.5, ms = 2, mec='none') wpred = np.linspace(wmin, wmax, 1000) lwpred = np.log(wpred * 1e-9) xpred = (lwpred - lw0) / (lw1 - lw0) isamp = np.random.randint(nsteps - nburn, size=10) for j in isamp: samp_params = samples_flat[j, :].flatten() s1 = samp_params[:K - 1] x1pred = (xpred + s1[:, None]).flatten() lw1pred = (lw1 - lw0) * x1pred + lw0 w1pred = np.exp(lw1pred) * 1e9 K1s = gp1.get_matrix(x1pred, x11d) s2 = samp_params[K - 1:] x2pred = (xpred + s2[:, None]).flatten() lw2pred = (lw1 - lw0) * x2pred + lw0 w2pred = np.exp(lw2pred) * 1e9 K2s = gp2.get_matrix(x2pred, x21d) Ks = K1s + K2s K1ss = gp1.get_matrix(x1pred) K2ss = gp2.get_matrix(x2pred) Kss = K1ss + K2ss mu1 = np.dot(K1s, b).reshape(x1pred.shape) + 1 mu2 = np.dot(K2s, b).reshape(x2pred.shape) + 1 inds1 = np.argsort(w1pred) plt.plot(w1pred[inds1], mu1[inds1], 'C0-', lw=0.5, alpha=0.5) inds2 = np.argsort(w2pred) plt.plot(w2pred[inds2], mu2[inds2] + offset, 'C1-', lw=0.5, alpha=0.5) plt.xlim(wmin, wmax) plt.ylabel('flux') plt.xlabel('wavelength (nm)') plt.savefig('%s_combined.png' % prefix) return par_MAP, par50, par50 - par16, par84 - par50, [ fig1, fig2, fig3, fig4 ]
log_P=(np.log(min_period), np.log(max_period)), mix_par=(-5.0, 5.0), log_b2=(-5.0, 5.0), log_f2=(-5.0, 5.0), ), ) kernel += terms.SHOTerm(log_S0=log_var, log_Q=-0.5 * np.log(2), log_omega0=np.log(2 * np.pi / 10.0), bounds=dict(log_S0=(-20.0, 20.0), log_omega0=(np.log(2 * np.pi / 80.0), np.log(2 * np.pi / 2.0)))) kernel.terms[1].freeze_parameter("log_Q") kernel += terms.JitterTerm(log_sigma=np.log(np.median(np.abs(np.diff(flux)))), bounds=[(-10.0, 10.0)]) mean = celerite.modeling.ConstantModel(np.mean(flux), bounds=[(-5000.0, 5000.0)]) mean.freeze_parameter("value") gp = celerite.GP(kernel, mean=mean) gp.compute(t) class PolynomialModel(modeling.ModelSet): def __init__(self, gp, sections, t, y, order=3): self.t = t self.y = y A = np.vander((t - np.mean(t)) / (np.max(t) - np.min(t)), order) sections = np.atleast_1d(sections) s = np.unique(sections)
def neo_update_kernel(theta, params): gp = george.GP(mean=0.0, fit_mean=False, white_noise=jitt) pass from celerite import terms as cterms # 2 or sp.log(10.) ? T = { 'Constant': 1.**2, 'RealTerm': cterms.RealTerm(log_a=2., log_c=2.), 'ComplexTerm': cterms.ComplexTerm(log_a=2., log_b=2., log_c=2., log_d=2.), 'SHOTerm': cterms.SHOTerm(log_S0=2., log_Q=2., log_omega0=2.), 'Matern32Term': cterms.Matern32Term(log_sigma=2., log_rho=2.0), 'JitterTerm': cterms.JitterTerm(log_sigma=2.0) } def neo_term(terms): t_out = T[terms[0][0]] for f in range(len(terms[0])): if f == 0: pass else: t_out *= T[terms[0][f]] for i in range(len(terms)): if i == 0: pass else:
for i, pval in enumerate(v): v[i] = pval + eps k.set_parameter_vector(v) coeffs = np.concatenate(k.coefficients) v[i] = pval - eps k.set_parameter_vector(v) coeffs -= np.concatenate(k.coefficients) jac0[i] = 0.5 * coeffs / eps v[i] = pval assert np.allclose(jac, jac0) @pytest.mark.parametrize("k", [ terms.JitterTerm(log_sigma=0.5), terms.RealTerm(log_a=0.5, log_c=0.1), terms.RealTerm(log_a=0.5, log_c=0.1) + terms.JitterTerm(log_sigma=0.3), terms.JitterTerm(log_sigma=0.5) + terms.JitterTerm(log_sigma=0.1), ]) def test_jitter_jacobian(k, eps=1.34e-7): if not terms.HAS_AUTOGRAD: with pytest.raises(ImportError): jac = k.get_jitter_jacobian() return v = k.get_parameter_vector() jac = k.get_jitter_jacobian() assert len(jac) == len(v) jac0 = np.empty_like(jac) for i, pval in enumerate(v):
def neo_update_kernel(theta, params): gp = george.GP(mean=0.0, fit_mean=False, white_noise=jitt) pass import celerite from celerite import terms as cterms # 2 or sp.log(10.) ? T = { 'Constant': 1.**2, 'RealTerm': cterms.RealTerm(log_a=2., log_c=2.), 'ComplexTerm': cterms.ComplexTerm(log_a=2., log_b=2., log_c=2., log_d=2.), 'SHOTerm': cterms.SHOTerm(log_S0=2., log_Q=2., log_omega0=2.), 'Matern32Term': cterms.Matern32Term(log_sigma=2., log_rho=2.), 'JitterTerm': cterms.JitterTerm(log_sigma=1e-8) } def neo_init_terms(terms): t_out = T[terms[0][0]] for f in range(len(terms[0])): if f == 0: pass else: t_out *= T[terms[0][f]] for i in range(len(terms)): if i == 0: pass else: