예제 #1
0
# Get into evaluation (predictive posterior) mode
model.eval()
likelihood.eval()

import matplotlib.pyplot as plt

with torch.no_grad(), settings.fast_pred_var():
    # Make predictions
    y_pred = likelihood(model(x_test))

    mean = y_pred.mean.numpy()
    var = y_pred.variance.numpy() * 1e3
    plot_gp(mean,
            var,
            x_test.numpy(),
            X_train=x_train.numpy(),
            Y_train=y_train.numpy())

    # # Initialize plot
    # f, ax = plt.subplots(1, 1, figsize=(4, 3))
    #
    # # Get upper and lower confidence bounds
    # lower, upper = observed_pred.confidence_region()
    # # Plot training data as black stars
    # ax.plot(x_train.numpy(), y_train.numpy(), 'k*')
    # # Plot predictive means as blue line
    # ax.plot(x_test.numpy(), observed_pred.mean.numpy(), 'b')
    # # Shade between the lower and upper confidence bounds
    # ax.fill_between(x_test.numpy(), lower.numpy(), upper.numpy(), alpha=0.5)
    # ax.set_ylim([-3, 3])
    print(
        f'Iter {i + 1} - Loss: {loss.item()}   noise: {model.likelihood.noise.item()}'
    )

    optimizer.step()

model.eval()
likelihood.eval()

with torch.no_grad(), settings.fast_pred_var(
), settings.max_root_decomposition_size(25):
    x_test = torch.from_numpy(np.linspace(1870, 2030,
                                          200)[:,
                                               np.newaxis]).type(torch.float32)
    x_test = x_test.cuda()
    f_preds = model(x_test)
    y_pred = likelihood(f_preds)

# plot
with torch.no_grad():
    mean = y_pred.mean.cpu().numpy()
    var = y_pred.variance.cpu().numpy()
    samples = y_pred.sample().cpu().numpy()
    plot_gp(mean,
            var,
            x_test.cpu().numpy(),
            X_train=x_train.cpu().numpy(),
            Y_train=y_train.cpu().numpy(),
            samples=samples)
data = pods.datasets.della_gatta_TRP63_gene_expression(data_set='della_gatta',
                                                       gene_number=937)

x = data['X']
y = data['Y']

offset = np.mean(y)
scale = np.sqrt(np.var(y))

yhat = (y - offset) / scale

#kernel = RBF(input_dim=1, variance=100)
#kernel = Matern32(input_dim=1, variance=2.0, lengthscale=200)
model = GPRegression(x, yhat)
model.kern.lengthscale = 20  #this will widen with 100, 200
#gp_regression.likelihood.variance = 0.001

print(model.log_likelihood())
model.optimize()
print(model.log_likelihood())

xt = np.linspace(-20, 260, 100)[:, np.newaxis]
yt_mean, yt_var = model.predict(xt)

plot_gp(yt_mean,
        yt_var,
        xt,
        X_train=model.X.flatten(),
        Y_train=model.Y.flatten())
             RBF(x.shape[1], ARD=True)],  # the kernels for each layer
    num_inducing=50,
    back_constraint=False)

dgp.optimize(messages=True, max_iters=100)

# plot the layer1 observables to hidden layer
xt = pred_range(x)
yt_mean, yt_var = dgp.layers[1].predict(xt)

samples = dgp.layers[1].posterior_samples(xt, size=1).squeeze(1)

plot_gp(yt_mean,
        yt_var,
        xt,
        dgp.layers[1].X.flatten(),
        dgp.layers[1].Y.mean,
        samples=samples,
        title="layer1 observables to hidden layer")

# plot the layer0 from hidden to targets
x = dgp.layers[1].Y.mean
xt = pred_range(x)
yt_mean, yt_var = dgp.layers[0].predict(xt)

samples = dgp.layers[0].posterior_samples(xt, size=1).squeeze(1)

plot_gp(yt_mean,
        yt_var,
        xt,
        dgp.layers[0].X.mean,
# choose a kernel
#kernel = Matern32(input_dim=1, variance=2.0)
#kernel = GridRBF(input_dim=1)
#kernel = RBF(input_dim=1, variance=2.0)


# gp regression and optimize the paramters using logliklihood
gp_regression = GPRegression(x_train, y_train)

#gp_regression.kern.lengthscale = 500
#gp_regression.likelihood.variance = 0.001

print("loglikelihood: ", gp_regression.log_likelihood())

gp_regression.optimize()

print("loglikelihood: ", gp_regression.log_likelihood())


# predict new unseen samples
x_test = np.linspace(1870, 2030, 200)[:, np.newaxis]
yt_mean, yt_var = gp_regression.predict(x_test)
yt_sd = np.sqrt(yt_var)

# draw some samples from the posterior
samples = gp_regression.posterior_samples(x_test, size=1).squeeze(1)

# plot
plot_gp(yt_mean, yt_var, x_test, X_train=x_train, Y_train=y_train, samples=samples)