def sample(model, t, noise_f): """Sample from a model. Args: model (:class:`gpcm.model.AbstractGPCM`): Model to sample from. t (vector): Time points to sample at. noise_f (vector): Noise for the sample of the function. Should have the same size as `t`. Returns: tuple[vector, ...]: Tuple containing kernel samples, filter samples, and function samples. """ ks, us, fs = [], [], [] # In the below, we look at the third inducing point, because that is the one # determining the value of the filter at zero: the CGPCM adds two extra inducing # points to the left. # Get a smooth sample. u1 = B.ones(model.n_u) while B.abs(u1[2]) > 1e-2: u1 = B.sample(model.compute_K_u())[:, 0] u = GP(model.k_h()) u = u | (u(model.t_u), u1) u1_full = u(t).mean.flatten() # Get a rough sample. u2 = B.zeros(model.n_u) while u2[2] < 0.5: u2 = B.sample(model.compute_K_u())[:, 0] u = GP(model.k_h()) u = u | (u(model.t_u), u2) u2_full = u(t).mean.flatten() with wbml.out.Progress(name="Sampling", total=5) as progress: for c in [0, 0.1, 0.23, 0.33, 0.5]: # Sample kernel. K = model.kernel_approx(t, t, c * u2 + (1 - c) * u1) wbml.out.kv("Sampled variance", K[0, 0]) K = K / K[0, 0] ks.append(K[0, :]) # Store filter. us.append(c * u2_full + (1 - c) * u1_full) # Sample function. f = B.matmul(B.chol(closest_psd(K)), noise_f) fs.append(f) progress() return ks, us, fs
def sample(model, t, noise_f): """Sample from a model. Args: model (:class:`gpcm.model.AbstractGPCM`): Model to sample from. t (vector): Time points to sample at. noise_f (vector): Noise for the sample of the function. Should have the same size as `t`. Returns: tuple[vector]: Tuple containing kernel samples and function samples. """ ks, fs = [], [] with wbml.out.Progress(name="Sampling", total=5) as progress: for i in range(5): # Sample kernel. u = B.sample(model.compute_K_u())[:, 0] K = model.kernel_approx(t, t, u) wbml.out.kv("Sampled variance", K[0, 0]) K = K / K[0, 0] ks.append(K[0, :]) # Sample function. f = B.matmul(B.chol(closest_psd(K)), noise_f) fs.append(f) progress() return ks, fs
def test_prior_power(Model): t_u = B.zeros(1) model = Model(window=2, scale=1, n_u=10, t=(0, 10)) K_u = model.compute_K_u() # Estimate power with Monte Carlo. powers = [] for _ in range(2_000): u = B.sample(K_u)[:, 0] powers.append(model.kernel_approx(t_u, t_u, u)[0, 0])
def test_sample_conversion(): const = Constant(1, 5, 5) sample = B.sample(const) assert B.issubdtype(B.dtype(sample), np.floating)
def _est_cov(a): num = 500_000 samples = B.dense(B.sample(a, num=num)) cov_est = B.matmul(samples, samples, tr_b=True) / num err = B.max(B.abs(B.dense(a) - cov_est)) / B.max(B.abs(B.dense(a))) assert err < 1e-1
def sample(a: Woodbury, num=1): return B.sample(a.diag, num=num) + B.sample(a.lr, num=num)