def exp(sigma, cts): ''' Exponential covariance function C(t,t') = sigma^2 * exp(-|t - t'|/cts) Parameters ---------- sigma [mm] : Standard deviation of displacements cts [yr] : Characteristic time-scale ''' return gauss.gpexp((0.0, sigma**2, cts), dim=1)
def exp(sigma,cts): ''' Exponential covariance function C(t,t') = sigma^2 * exp(-|t - t'|/cts) Parameters ---------- sigma [mm] : Standard deviation of displacements cts [yr] : Characteristic time-scale ''' return gauss.gpexp((0.0,sigma**2,cts),dim=1)
def fogm(sigma, w): ''' First-order Gauss Markov process C(t,t') = sigma^2/(2*w) * exp(-w*|t - t'|) Parameters ---------- sigma [mm/yr^0.5] : Standard deviation of the forcing term w [yr^-1] : Cutoff angular frequency ''' coeff = sigma**2 / (2 * w) cts = 1.0 / w return gauss.gpexp((0.0, coeff, cts), dim=1)
def fogm(sigma,w): ''' First-order Gauss Markov process C(t,t') = sigma^2/(2*w) * exp(-w*|t - t'|) Parameters ---------- sigma [mm/yr^0.5] : Standard deviation of the forcing term w [yr^-1] : Cutoff angular frequency ''' coeff = sigma**2/(2*w) cts = 1.0/w return gauss.gpexp((0.0,coeff,cts),dim=1)
def estimate_scales(tslength): #print('estimating random walk scale for timeseries length %s' % tslength) def ml_objective(theta, d, cov_rw, cov_w, p): cov = theta**2 * cov_rw + w_scale**2 * cov_w mu = np.zeros(d.shape[0]) return -likelihood(d, mu, cov, p) def reml_objective(theta, d, cov_rw, cov_w, p): cov = theta**2 * cov_rw + w_scale**2 * cov_w mu = np.zeros(d.shape[0]) return -restricted_likelihood(d, mu, cov, p) # time indices to use ml_solns = [] reml_solns = [] idx = time < tslength P = gppoly(1).basis(time[idx, None]) COV_RW = gpbrown(1.0).covariance(time[idx, None], time[idx, None]) COV_W = gpexp((0.0, 1.0, 1e-10)).covariance(time[idx, None], time[idx, None]) for data in data_sets: ans = fmin_pos(ml_objective, [1.0], args=(data[idx], COV_RW, COV_W, P), disp=False) ml_solns += [ans[0]] ans = fmin_pos(reml_objective, [1.0], args=(data[idx], COV_RW, COV_W, P), disp=False) reml_solns += [ans[0]] # compute statistics on solution mean = np.mean(ml_solns) percs = np.percentile(ml_solns, np.arange(0, 105, 5)) entry = '%s %s %s\n' % (tslength, mean, ' '.join(percs.astype(str))) ml_file.write(entry) ml_file.flush() mean = np.mean(reml_solns) percs = np.percentile(reml_solns, np.arange(0, 105, 5)) entry = '%s %s %s\n' % (tslength, mean, ' '.join(percs.astype(str))) reml_file.write(entry) reml_file.flush()
x1, x2 = np.meshgrid(x1, x2, indexing='ij') out = coeff * np.min([x1, x2], axis=0) return out out = GaussianProcess(mean, cov, dim=1) return out # sampling period in years rw_scale = 1.3 w_scale = 1.1 dt = 1.0 / 365.25 time = np.arange(dt, 2.5, dt) noise_true = gpbrown(rw_scale**2) noise_true += gpexp((0.0, w_scale**2, 1e-10)) # view a sample in the time and frequency domain to make sure # everything looks ok mu, sigma = noise_true(time[:, None]) sample = noise_true.sample(time[:, None]) freq, pow = periodogram(sample, 1.0 / dt) freq, pow = freq[1:], pow[1:] pow_true = rw_scale**2 / (2 * np.pi**2 * freq**2) + 2 * w_scale**2 * dt fig, axs = plt.subplots(2, 1) axs[0].plot(time, mu, color='k', zorder=2, label='expected') axs[0].fill_between(time, mu - sigma, mu + sigma, color='k',
def exp(sigma,cls): ''' Exponential covariance function ''' return gauss.gpexp((0.0,sigma**2,cls),dim=2)
def objective(x,t,d): '''objective function to be minimized''' gp = gpexp((0.0,x[0],x[1])) return -gp.likelihood(t,d)
This script demonstrate how to optimize the hyperparameters for a Gaussian process based on the marginal likelihood. Optimization is performed in two ways, first with a grid search method and then with a downhill simplex method. ''' import numpy as np import matplotlib.pyplot as plt from scipy.optimize import fmin from rbf.gauss import gpexp np.random.seed(3) # True signal which we want to recover. This is a exponential function # with mean=0.0, variance=2.0, and time-scale=0.1. For graphical # purposes, we will only estimate the variance and time-scale. a,b,c = 0.0, 2.0, 0.1 gp = gpexp((a, b, c)) n = 500 # number of observations time = np.linspace(-5.0, 5.0, n)[:,None] # observation points data = gp.sample(time) # signal which we want to describe # find the optimal hyperparameter with a brute force grid search b_search = 10**np.linspace(-2, 2, 30) c_search = 10**np.linspace(-2, 1, 30) likelihoods = np.zeros((30, 30)) for i, b_test in enumerate(b_search): for j, c_test in enumerate(c_search): gp = gpexp((0.0, b_test, c_test)) likelihoods[i, j] = gp.likelihood(time, data) # find the optimal hyperparameters with a positively constrained