Exemple #1
0
def model(vs,
          a_scale: Positive = 0.1,
          b_scale: Positive = 0.1,
          noise: Positive = 0.01):
    # Construct an RNN.
    f_rnn = rnn_constructor(output_size=1,
                            widths=(10, ),
                            nonlinearity=B.tanh,
                            final_dense=True)

    # Set the weights for the RNN.
    num_weights = f_rnn.num_weights(input_size=1)
    weights = Vars(tf.float32,
                   source=vs.get(shape=(num_weights, ), name="rnn"))
    f_rnn.initialise(input_size=1, vs=weights)

    with Measure():
        # Construct GPs that modulate the RNN.
        a = GP(1e-2 * EQ().stretch(a_scale))
        b = GP(1e-2 * EQ().stretch(b_scale))

        # GP-RNN model:
        f_gp_rnn = (1 + a) * (lambda x: f_rnn(x)) + b

    return f_rnn, f_gp_rnn, noise, a, b
Exemple #2
0
def model(vs):
    g = Graph()

    # Construct an RNN.
    f_rnn = rnn_constructor(output_size=1,
                            widths=(10, ),
                            nonlinearity=B.tanh,
                            final_dense=True)

    # Set the weights for the RNN.
    num_weights = f_rnn.num_weights(input_size=1)
    weights = Vars(tf.float32,
                   source=vs.get(shape=(num_weights, ), name='rnn'))
    f_rnn.initialise(input_size=1, vs=weights)

    # Construct GPs that modulate the RNN.
    a = GP(1e-2 * EQ().stretch(vs.pos(0.1, name='a/scale')), graph=g)
    b = GP(1e-2 * EQ().stretch(vs.pos(0.1, name='b/scale')), graph=g)
    e = GP(vs.pos(1e-2, name='e/var') * Delta(), graph=g)

    # GP-RNN model:
    f_gp_rnn = (1 + a) * (lambda x: f_rnn(x)) + b
    y_gp_rnn = f_gp_rnn + e

    return f_rnn, f_gp_rnn, y_gp_rnn, a, b
Exemple #3
0
def model(
    vs,
    var1: Positive = 1,
    scale1: Positive = 1,
    noise1: Positive = 0.1,
    var2: Positive = 1,
    scale2: Positive = 1,
    noise2: Positive = 0.1,
):
    # Build layers:
    f1 = GP(var1 * EQ().stretch(scale1))
    f2 = GP(var2 * EQ().stretch(scale2))
    return (f1, noise1), (f2, noise2)
Exemple #4
0
def model(vs):
    g = Graph()

    # Construct model for first layer:
    f1 = GP(vs.pos(1., name='f1/var') *
            EQ().stretch(vs.pos(1., name='f1/scale')),
            graph=g)
    e1 = GP(vs.pos(0.1, name='e1/var') * Delta(), graph=g)
    y1 = f1 + e1

    # Construct model for second layer:
    f2 = GP(vs.pos(1., name='f2/var') *
            EQ().stretch(vs.pos(np.array([1., .5]), name='f2/scale')),
            graph=g)
    e2 = GP(vs.pos(0.1, name='e2/var') * Delta(), graph=g)
    y2 = f2 + e2

    return f1, y1, f2, y2
Exemple #5
0
def model(
    vs,
    var1: Positive = 1,
    scale1: Positive = 1,
    noise1: Positive = 0.1,
    var2: Positive = 1,
    scale2: Positive = 1,
    noise2: Positive = 0.1,
):
    # Construct model for first layer:
    prior1 = Measure()
    f1 = GP(var1 * EQ() > scale1, measure=prior1)
    e1 = GP(noise1 * Delta(), measure=prior1)
    y1 = f1 + e1

    # Construct model for second layer:
    prior2 = Measure()
    f2 = GP(var2 * EQ() > scale2, measure=prior2)
    e2 = GP(noise2 * Delta(), measure=prior2)
    y2 = f2 + e2

    return f1, y1, f2, y2
def model(
    vs,
    u_var: Positive = 0.5,
    u_scale: Positive = 0.5,
    noise: Positive = 0.5,
    alpha: Positive = 1.2,
):
    with Measure():
        # Random fluctuation:
        u = GP(u_var * EQ().stretch(u_scale))
        # Construct model.
        f = u + (lambda x: x**alpha)
    return f, noise
Exemple #7
0
def model(vs):
    g = Graph()

    # Random fluctuation:
    u = GP(vs.pos(.5, name='u/var') *
           EQ().stretch(vs.pos(0.5, name='u/scale')), graph=g)

    # Noise:
    e = GP(vs.pos(0.5, name='e/var') * Delta(), graph=g)

    # Construct model:
    alpha = vs.pos(1.2, name='alpha')
    f = u + (lambda x: x ** alpha)
    y = f + e

    return f, y
def model(
    vs,
    u_var: Positive = 0.5,
    u_scale: Positive = 0.5,
    e_var: Positive = 0.5,
    alpha: Positive = 1.2,
):
    prior = Measure()

    # Random fluctuation:
    u = GP(u_var * EQ() > u_scale, measure=prior)

    # Noise:
    e = GP(e_var * Delta(), measure=prior)

    # Construct model:
    f = u + (lambda x: x**alpha)
    y = f + e

    return f, y
Exemple #9
0
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.contrib.opt import ScipyOptimizerInterface as SOI
from wbml import vars64 as vs

from stheno.tensorflow import GP, EQ, Delta

s = tf.Session()

# Define points to predict at.
x = np.linspace(0, 5, 100)
x_obs = np.linspace(0, 3, 20)

# Construct the model.
u = GP(vs.pos(.5) * EQ().stretch(vs.pos(1.)))
e = GP(vs.pos(.5) * Delta())
alpha = vs.pos(1.2)
vs.init(s)

f = u + (lambda x: x**alpha)
y = f + e

# Sample a true, underlying function and observations.
f_true = x**1.8
y_obs = s.run((y | (f(x), f_true))(x_obs).sample())

# Learn.
lml = y(x_obs).logpdf(y_obs)
SOI(-lml).minimize(s)
Exemple #10
0
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import wbml.plot

from stheno.tensorflow import B, GP, EQ, Delta, Obs

# Define points to predict at.
x = B.linspace(tf.float64, 0, 10, 200)
x_obs = B.linspace(tf.float64, 0, 10, 10)

# Construct the model.
f = 0.7 * GP(EQ()).stretch(1.5)
e = 0.2 * GP(Delta())

# Construct derivatives.
df = f.diff()
ddf = df.diff()
dddf = ddf.diff() + e

# Fix the integration constants.
zero = tf.constant(0, dtype=tf.float64)
one = tf.constant(1, dtype=tf.float64)
f, df, ddf, dddf = (f, df, ddf, dddf) | Obs((f(zero), one), (df(zero), zero),
                                            (ddf(zero), -one))

# Sample observations.
y_obs = B.sin(x_obs) + 0.2 * B.randn(*x_obs.shape)

# Condition on the observations to make predictions.
f, df, ddf, dddf = (f, df, ddf, dddf) | Obs(dddf(x_obs), y_obs)
Exemple #11
0
from varz.tensorflow import Vars, minimise_adam
from wbml.net import rnn as rnn_constructor

from stheno.tensorflow import B, Graph, GP, Delta, EQ, Obs

# Increase regularisation because we are dealing with float32.
B.epsilon = 1e-6

# Construct points which to predict at.
x = B.linspace(tf.float32, 0, 1, 100)[:, None]
inds_obs = np.arange(0, int(0.75 * len(x)))  # Train on the first 75% only.
x_obs = B.take(x, inds_obs)

# Construct function and observations.
#   Draw random modulation functions.
a_true = GP(1e-2 * EQ().stretch(0.1))(x).sample()
b_true = GP(1e-2 * EQ().stretch(0.1))(x).sample()
#   Construct the true, underlying function.
f_true = (1 + a_true) * B.sin(2 * np.pi * 7 * x) + b_true
#   Add noise.
y_true = f_true + 0.1 * B.randn(*f_true.shape)

# Normalise and split.
f_true = (f_true - B.mean(y_true)) / B.std(y_true)
y_true = (y_true - B.mean(y_true)) / B.std(y_true)
y_obs = B.take(y_true, inds_obs)


def model(vs):
    g = Graph()
s = tf.Session()

# Define points to predict at.
x = np.linspace(0, 10, 200)
x_obs1 = np.linspace(0, 10, 30)
inds2 = np.random.permutation(len(x_obs1))[:10]
x_obs2 = x_obs1[inds2]

# Construct variable storages.
vs1 = Vars(np.float64)
vs2 = Vars(np.float64)

# Construct a model for each output.
m1 = Graph()
m2 = Graph()
f1 = vs1.pos(1.) * GP(EQ(), graph=m1).stretch(vs1.pos(1.))
f2 = vs2.pos(1.) * GP(EQ(), graph=m2).stretch(vs2.pos([1., .5]))
sig1 = vs1.pos(0.1)
sig2 = vs2.pos(0.1)

# Initialise variables.
vs1.init(s)
vs2.init(s)

# Noise models:
e1 = sig1 * GP(Delta(), graph=m1)
e2 = sig2 * GP(Delta(), graph=m2)

# Observation models:
y1 = f1 + e1
y2 = f2 + e2
Exemple #13
0
# Construct function and observations.
#   Draw a random fluctuation.
k_u = .2 * RQ(1e-1).stretch(0.05)
u = s.run(GP(k_u)(np.array(x, dtype=np.float64)).sample()).squeeze()
#   Construct the true, underlying function.
f_true = np.sin(2 * np.pi * 7 * x) + np.array(u, dtype=np.float32)
#   Add noise.
y_true = f_true + 0.2 * np.array(np.random.randn(*x.shape), dtype=np.float32)

# Normalise and split.
f_true = (f_true - np.mean(y_true)) / np.std(y_true)
y_true = (y_true - np.mean(y_true)) / np.std(y_true)
y_obs = y_true[inds_obs]

# Construct the model.
a = 0.1 * GP(EQ()).stretch(vs_gp.pos(0.1))
b = 0.1 * GP(EQ()).stretch(vs_gp.pos(0.1))
e = vs_gp.pos(0.1) * GP(Delta())

# RNN-only model:
y_rnn = rnn + e

# GP-RNN model:
f_gp_rnn = (1 + a) * rnn + b
y_gp_rnn = f_gp_rnn + e

# Construct evidences.
lml_rnn = y_rnn(x_obs).logpdf(y_obs)
lml_gp_rnn = y_gp_rnn(x_obs).logpdf(y_obs)

# Construct optimisers and initialise.
Exemple #14
0
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import wbml.plot

from stheno.tensorflow import B, Measure, GP, EQ, Delta

# Define points to predict at.
x = B.linspace(tf.float64, 0, 10, 200)
x_obs = B.linspace(tf.float64, 0, 10, 10)

# Construct the model.
prior = Measure()
f = 0.7 * GP(EQ(), measure=prior).stretch(1.5)
e = 0.2 * GP(Delta(), measure=prior)

# Construct derivatives.
df = f.diff()
ddf = df.diff()
dddf = ddf.diff() + e

# Fix the integration constants.
zero = B.cast(tf.float64, 0)
one = B.cast(tf.float64, 1)
prior = prior | ((f(zero), one), (df(zero), zero), (ddf(zero), -one))

# Sample observations.
y_obs = B.sin(x_obs) + 0.2 * B.randn(*x_obs.shape)

# Condition on the observations to make predictions.
post = prior | (dddf(x_obs), y_obs)