Example #1
0
# Sample a true, underlying function and observations.
vs = Vars(tf.float64)
f_true = x ** 1.8 + B.sin(2 * B.pi * x)
f, y = model(vs)
y_obs = (y | (f(x), f_true))(x_obs).sample()


def objective(vs):
    f, y = model(vs)
    evidence = y(x_obs).logpdf(y_obs)
    return -evidence


# Learn hyperparameters.
minimise_l_bfgs_b(tf.function(objective, autograph=False), vs)
f, y = model(vs)

# Print the learned parameters.
wbml.out.kv('Alpha', vs['alpha'])
wbml.out.kv('Prior', y.display(wbml.out.format))

# Condition on the observations to make predictions.
mean, lower, upper = (f | (y(x_obs), y_obs))(x).marginals()

# Plot result.
plt.plot(x, B.squeeze(f_true), label='True', c='tab:blue')
plt.scatter(x_obs, B.squeeze(y_obs), label='Observations', c='tab:red')
plt.plot(x, mean, label='Prediction', c='tab:green')
plt.plot(x, lower, ls='--', c='tab:green')
plt.plot(x, upper, ls='--', c='tab:green')
Example #2
0
locs = tf.constant(locs)
x_pred = tf.constant(x_pred)
x_data = tf.constant(x_data)
y_data_norm = tf.constant(y_data_norm)

# Model parameters:
n = data.shape[0]  # Number of data points
p = data.shape[1]  # Number of outputs
m = 10  # Number of latent processes

# Learn.
vs = Vars(tf.float64)
objective_compiled = \
    tf.function(lambda vs_: objective(vs_, m, x_data, y_data_norm, locs),
                autograph=False)
minimise_l_bfgs_b(objective_compiled, vs=vs, trace=True, iters=200)
wbml.out.kv('Learned spatial scales', vs['scales'])

# Predict.
lat_preds, obs_preds = predict(vs, m, x_data, y_data_norm, locs, x_pred)

# Convert to NumPy and undo normalisation.
obs_preds = [
    tuple(x * data_mean[0, i] + data_scale[0, i] for x in B.to_numpy(tup))
    for i, tup in enumerate(obs_preds)
]

# Plot first four latent processes.
plt.figure(figsize=(15, 5))
y_proj, _, S, _ = B.to_numpy(project(vs, m, y_data_norm, locs))
xs, _, _ = model(vs, m)
Example #3
0
    return f1, y1, f2, y2


def objective(vs):
    f1, y1, f2, y2 = model(vs)

    x1 = x_obs1
    x2 = B.stack(x_obs2, B.take(y1_obs, inds2), axis=1)
    evidence = y1(x1).logpdf(y1_obs) + y2(x2).logpdf(y2_obs)

    return -evidence


# Learn hyperparameters.
vs = Vars(tf.float64)
minimise_l_bfgs_b(objective, vs)

# Compute posteriors.
f1, y1, f2, y2 = model(vs)
x1 = x_obs1
x2 = B.stack(x_obs2, B.take(y1_obs, inds2), axis=1)
post1 = f1.measure | (y1(x1), y1_obs)
post2 = f2.measure | (y2(x2), y2_obs)
f1_post = post1(f1)
f2_post = post2(f2)

# Predict first output.
mean1, lower1, upper1 = f1_post(x).marginals()

# Predict second output with Monte Carlo.
samples = [
# Sample a true, underlying function and observations.
vs = Vars(tf.float64)
f_true = x**1.8 + B.sin(2 * B.pi * x)
f, y = model(vs)
post = f.measure | (f(x), f_true)
y_obs = post(f(x_obs)).sample()


def objective(vs):
    f, noise = model(vs)
    evidence = f(x_obs, noise).logpdf(y_obs)
    return -evidence


# Learn hyperparameters.
minimise_l_bfgs_b(objective, vs, jit=True)
f, noise = model(vs)

# Print the learned parameters.
out.kv("Prior", f.display(out.format))
vs.print()

# Condition on the observations to make predictions.
f_post = f | (f(x_obs, noise), y_obs)
mean, lower, upper = f_post(x).marginal_credible_bounds()

# Plot result.
plt.plot(x, B.squeeze(f_true), label="True", style="test")
plt.scatter(x_obs, B.squeeze(y_obs), label="Observations", style="train", s=20)
plt.plot(x, mean, label="Prediction", style="pred")
plt.fill_between(x, lower, upper, style="pred")