示例#1
0
def model(vs):
    g = Graph()

    # Construct an RNN.
    f_rnn = rnn_constructor(output_size=1,
                            widths=(10, ),
                            nonlinearity=B.tanh,
                            final_dense=True)

    # Set the weights for the RNN.
    num_weights = f_rnn.num_weights(input_size=1)
    weights = Vars(tf.float32,
                   source=vs.get(shape=(num_weights, ), name='rnn'))
    f_rnn.initialise(input_size=1, vs=weights)

    # Construct GPs that modulate the RNN.
    a = GP(1e-2 * EQ().stretch(vs.pos(0.1, name='a/scale')), graph=g)
    b = GP(1e-2 * EQ().stretch(vs.pos(0.1, name='b/scale')), graph=g)
    e = GP(vs.pos(1e-2, name='e/var') * Delta(), graph=g)

    # GP-RNN model:
    f_gp_rnn = (1 + a) * (lambda x: f_rnn(x)) + b
    y_gp_rnn = f_gp_rnn + e

    return f_rnn, f_gp_rnn, y_gp_rnn, a, b
示例#2
0
def model(vs,
          a_scale: Positive = 0.1,
          b_scale: Positive = 0.1,
          noise: Positive = 0.01):
    # Construct an RNN.
    f_rnn = rnn_constructor(output_size=1,
                            widths=(10, ),
                            nonlinearity=B.tanh,
                            final_dense=True)

    # Set the weights for the RNN.
    num_weights = f_rnn.num_weights(input_size=1)
    weights = Vars(tf.float32,
                   source=vs.get(shape=(num_weights, ), name="rnn"))
    f_rnn.initialise(input_size=1, vs=weights)

    with Measure():
        # Construct GPs that modulate the RNN.
        a = GP(1e-2 * EQ().stretch(a_scale))
        b = GP(1e-2 * EQ().stretch(b_scale))

        # GP-RNN model:
        f_gp_rnn = (1 + a) * (lambda x: f_rnn(x)) + b

    return f_rnn, f_gp_rnn, noise, a, b
示例#3
0
    u = GP(vs.pos(.5, name='u/var') *
           EQ().stretch(vs.pos(0.5, name='u/scale')), graph=g)

    # Noise:
    e = GP(vs.pos(0.5, name='e/var') * Delta(), graph=g)

    # Construct model:
    alpha = vs.pos(1.2, name='alpha')
    f = u + (lambda x: x ** alpha)
    y = f + e

    return f, y


# Sample a true, underlying function and observations.
vs = Vars(tf.float64)
f_true = x ** 1.8 + B.sin(2 * B.pi * x)
f, y = model(vs)
y_obs = (y | (f(x), f_true))(x_obs).sample()


def objective(vs):
    f, y = model(vs)
    evidence = y(x_obs).logpdf(y_obs)
    return -evidence


# Learn hyperparameters.
minimise_l_bfgs_b(tf.function(objective, autograph=False), vs)
f, y = model(vs)
示例#4
0
    return f_rnn, f_gp_rnn, y_gp_rnn, a, b


def objective_rnn(vs):
    f_rnn, _, _, _, _ = model(vs)
    return B.mean((f_rnn(x_obs) - y_obs)**2)


def objective_gp_rnn(vs):
    _, _, y_gp_rnn, _, _ = model(vs)
    evidence = y_gp_rnn(x_obs).logpdf(y_obs)
    return -evidence


# Pretrain the RNN.
vs = Vars(tf.float32)
minimise_adam(tf.function(objective_rnn, autograph=False),
              vs,
              rate=1e-2,
              iters=1000,
              trace=True)

# Jointly train the RNN and GPs.
minimise_adam(tf.function(objective_gp_rnn, autograph=False),
              vs,
              rate=1e-3,
              iters=1000,
              trace=True)

_, f_gp_rnn, y_gp_rnn, a, b = model(vs)
    # Random fluctuation:
    u = GP(u_var * EQ() > u_scale, measure=prior)

    # Noise:
    e = GP(e_var * Delta(), measure=prior)

    # Construct model:
    f = u + (lambda x: x**alpha)
    y = f + e

    return f, y


# Sample a true, underlying function and observations.
vs = Vars(tf.float64)
f_true = x**1.8 + B.sin(2 * B.pi * x)
f, y = model(vs)
post = f.measure | (f(x), f_true)
y_obs = post(f(x_obs)).sample()


def objective(vs):
    f, y = model(vs)
    evidence = y(x_obs).logpdf(y_obs)
    return -evidence


# Learn hyperparameters.
minimise_l_bfgs_b(tf.function(objective, autograph=False), vs)
f, y = model(vs)