示例#1
0
def deep_gaussian_process(X, Y):
    """Deep Gaussian Process Regression."""
    lambda_ = 0.1  # Initial weight prior std. dev, this is optimised later
    noise = tf.Variable(.01)  # Likelihood st. dev. initialisation
    lenscale = tf.Variable(1.)  # learn the length scale

    net = (ab.InputLayer(name="X", n_samples=n_samples_) >> ab.RandomFourier(
        n_features=20, kernel=ab.RBF(ab.pos(lenscale))) >> ab.DenseVariational(
            output_dim=5, std=lambda_, full=False) >> ab.RandomFourier(
                n_features=10, kernel=ab.RBF(1.)) >> ab.DenseVariational(
                    output_dim=1, std=lambda_, full=False))

    f, kl = net(X=X)
    lkhood = tf.distributions.Normal(loc=f, scale=ab.pos(noise))
    loss = ab.elbo(lkhood, Y, N, kl)

    return f, loss
示例#2
0
def gaussian_process(X, Y):
    """Gaussian Process Regression."""
    lambda_ = 0.1  # Initial weight prior std. dev, this is optimised later
    noise = tf.Variable(.5)  # Likelihood st. dev. initialisation, and learning
    lenscale = tf.Variable(1.)  # learn the length scale
    kern = ab.RBF(lenscale=ab.pos(lenscale))  # keep the length scale positive
    # kern = ab.RBFVariational(lenscale=ab.pos(lenscale))

    net = (ab.InputLayer(name="X", n_samples=n_samples_) >> ab.RandomFourier(
        n_features=50, kernel=kern) >> ab.DenseVariational(
            output_dim=1, std=lambda_, full=True))

    f, kl = net(X=X)
    lkhood = tf.distributions.Normal(loc=f, scale=ab.pos(noise))
    # lkhood = tf.distributions.StudentT(df=1., loc=f, scale=ab.pos(noise))
    loss = ab.elbo(lkhood, Y, N, kl)

    return f, loss
示例#3
0
def bayesian_linear(X, Y):
    """Bayesian Linear Regression."""
    lambda_ = 100.
    std = (1 / lambda_)**.5  # Weight st. dev. prior
    noise = tf.Variable(1.)  # Likelihood st. dev. initialisation, and learning

    net = (ab.InputLayer(name="X", n_samples=n_samples_) >>
           ab.DenseVariational(output_dim=1, std=std, full=True))

    f, kl = net(X=X)
    lkhood = tf.distributions.Normal(loc=f, scale=ab.pos(noise))
    loss = ab.elbo(lkhood, Y, N, kl)

    return f, loss
示例#4
0
def bayesian_linear(X, Y):
    """Bayesian Linear Regression."""
    reg = .01  # Initial weight prior std. dev, this is optimised later
    noise = tf.Variable(.5)  # Likelihood st. dev. initialisation, and learning

    net = (
        ab.InputLayer(name="X", n_samples=n_samples) >>
        ab.DenseVariational(output_dim=1, std=reg, full=True)
    )

    phi, kl = net(X=X)
    lkhood = tf.distributions.Normal(loc=phi, scale=ab.pos(noise))
    loss = ab.elbo(lkhood, Y, N, kl)

    return phi, loss
示例#5
0
def nnet_bayesian(X, Y):
    """Bayesian neural net."""
    lambda_ = 1e-1  # Weight prior
    noise = tf.Variable(0.01)  # Likelihood st. dev. initialisation

    net = (ab.InputLayer(name="X", n_samples=n_samples_) >>
           ab.DenseVariational(output_dim=20, std=lambda_) >> ab.Activation(
               tf.nn.relu) >> ab.DenseVariational(output_dim=7, std=lambda_) >>
           ab.Activation(tf.nn.relu) >> ab.DenseVariational(
               output_dim=5, std=lambda_) >> ab.Activation(
                   tf.tanh) >> ab.DenseVariational(output_dim=1, std=lambda_))

    f, kl = net(X=X)
    lkhood = tf.distributions.Normal(loc=f, scale=ab.pos(noise))
    loss = ab.elbo(lkhood, Y, N, kl)
    return f, loss
示例#6
0
def main():
    """Run the demo."""
    data = fetch_gpml_sarcos_data()
    Xr = data.train.data.astype(np.float32)
    Yr = data.train.targets.astype(np.float32)[:, np.newaxis]
    Xs = data.test.data.astype(np.float32)
    Ys = data.test.targets.astype(np.float32)[:, np.newaxis]
    N, D = Xr.shape

    print("Iterations: {}".format(int(round(N * NEPOCHS / BATCH_SIZE))))

    # Scale and centre the data, as per the original experiment
    ss = StandardScaler()
    Xr = ss.fit_transform(Xr)
    Xs = ss.transform(Xs)
    ym = Yr.mean()
    Yr -= ym
    Ys -= ym

    # Training batches
    data_tr = Dataset.from_tensor_slices({'X': Xr, 'Y': Yr}) \
        .shuffle(buffer_size=1000) \
        .batch(BATCH_SIZE)

    # Testing iterators
    data_ts = Dataset.from_tensors({'X': Xs, 'Y': Ys}).repeat()

    with tf.name_scope("DataIterators"):
        iterator = Iterator.from_structure(data_tr.output_types,
                                           data_tr.output_shapes)
        data = iterator.get_next()
        training_init = iterator.make_initializer(data_tr)
        testing_init = iterator.make_initializer(data_ts)

    with tf.name_scope("Deepnet"):
        phi, kl = net(X=data['X'])
        std = tf.Variable(NOISE, name="noise")
        lkhood = tf.distributions.Normal(phi, scale=ab.pos(std))
        loss = ab.elbo(lkhood, data['Y'], N, kl)
        tf.summary.scalar('loss', loss)

    with tf.name_scope("Train"):
        optimizer = tf.train.AdamOptimizer()
        global_step = tf.train.create_global_step()
        train = optimizer.minimize(loss, global_step=global_step)

    with tf.name_scope("Test"):
        r2 = rsquare(data['Y'], phi)

    # Logging
    log = tf.train.LoggingTensorHook(
        {'step': global_step, 'loss': loss},
        every_n_iter=1000
    )

    with tf.train.MonitoredTrainingSession(
            config=CONFIG,
            scaffold=tf.train.Scaffold(local_init_op=training_init),
            checkpoint_dir="./sarcos/",
            save_summaries_steps=None,
            save_checkpoint_secs=20,
            save_summaries_secs=20,
            hooks=[log]
    ) as sess:
        summary_writer = sess._hooks[1]._summary_writer
        for i in range(NEPOCHS):

            # Train for one epoch
            try:
                while not sess.should_stop():
                    sess.run(train)
            except tf.errors.OutOfRangeError:
                pass

            # Init testing and assess and log R-square score on test set
            sess.run(testing_init)
            r2_score = sess.run(r2)
            score_sum = tf.Summary(value=[
                tf.Summary.Value(tag='r-square', simple_value=r2_score)
            ])
            summary_writer.add_summary(score_sum, sess.run(global_step))

            # Re-init training
            sess.run(training_init)

        # Prediction
        sess.run(testing_init)
        Ey = ab.predict_samples(phi, feed_dict=None, n_groups=NPREDICTSAMPLES,
                                session=sess)
        sigma = sess.run(std)
        r2_score = sess.run(r2)

    # Score mean standardised log likelihood
    Eymean = Ey.mean(axis=0)
    Eyvar = Ey.var(axis=0) + sigma**2  # add sigma2 for obervation noise
    snlp = msll(Ys.flatten(), Eymean, Eyvar, Yr.flatten())

    print("------------")
    print("r-square: {:.4f}, smse: {:.4f}, msll: {:.4f}."
          .format(r2_score, 1 - r2_score, snlp))
示例#7
0
import aboleth as ab
from aboleth.datasets import fetch_gpml_sarcos_data


# Set up a python logger so we can see the output of MonitoredTrainingSession
logger = logging.getLogger()
logger.setLevel(logging.INFO)

NSAMPLES = 10  # Number of random samples to get from an Aboleth net
NFEATURES = 1500  # Number of random features/bases to use in the approximation
NOISE = 3.0  # Initial estimate of the observation noise

# Random Fourier Features, this is setting up an anisotropic length scale, or
# one length scale per dimension
LENSCALE = tf.Variable(5 * np.ones((21, 1), dtype=np.float32))
KERNEL = ab.RBF(ab.pos(LENSCALE))

# Variational Fourier Features -- length-scale setting here is the "prior"
# LENSCALE = 10.
# KERNEL = ab.RBFVariational(lenscale=LENSCALE, lenscale_posterior=LENSCALE)

# Build the approximate GP
net = ab.stack(
    ab.InputLayer(name='X', n_samples=NSAMPLES),
    ab.RandomFourier(n_features=NFEATURES, kernel=KERNEL),
    ab.DenseVariational(output_dim=1, full=True)
)

# Learning and prediction settings
BATCH_SIZE = 50  # number of observations per mini batch
NEPOCHS = 100  # Number of times to iterate though the dataset
示例#8
0
def main():
    """Run the demo."""
    data = fetch_gpml_sarcos_data()
    Xr = data.train.data.astype(np.float32)
    Yr = data.train.targets.astype(np.float32)[:, np.newaxis]
    Xs = data.test.data.astype(np.float32)
    Ys = data.test.targets.astype(np.float32)[:, np.newaxis]
    N, D = Xr.shape

    print("Iterations: {}".format(int(round(N * NEPOCHS / BATCH_SIZE))))

    # Scale and centre the data, as per the original experiment
    ss = StandardScaler()
    Xr = ss.fit_transform(Xr)
    Xs = ss.transform(Xs)
    ym = Yr.mean()
    Yr -= ym
    Ys -= ym

    # Training batches
    data_tr = tf.data.Dataset.from_tensor_slices({'X': Xr, 'Y': Yr}) \
        .shuffle(buffer_size=1000) \
        .batch(BATCH_SIZE)

    # Testing iterators
    data_ts = tf.data.Dataset.from_tensors({'X': Xs, 'Y': Ys}).repeat()

    with tf.name_scope("DataIterators"):
        iterator = tf.data.Iterator.from_structure(data_tr.output_types,
                                                   data_tr.output_shapes)
        data = iterator.get_next()
        training_init = iterator.make_initializer(data_tr)
        testing_init = iterator.make_initializer(data_ts)

    with tf.name_scope("Deepnet"):
        phi, kl = net(X=data['X'])
        std = tf.Variable(NOISE, name="noise")
        lkhood = tf.distributions.Normal(phi, scale=ab.pos(std))
        loss = ab.elbo(lkhood, data['Y'], N, kl)
        tf.summary.scalar('loss', loss)

    with tf.name_scope("Train"):
        optimizer = tf.train.AdamOptimizer()
        global_step = tf.train.create_global_step()
        train = optimizer.minimize(loss, global_step=global_step)

    with tf.name_scope("Test"):
        Ey = ab.sample_mean(phi)

    # Logging
    log = tf.train.LoggingTensorHook({
        'step': global_step,
        'loss': loss
    },
                                     every_n_iter=1000)

    with tf.train.MonitoredTrainingSession(
            config=CONFIG,
            scaffold=tf.train.Scaffold(local_init_op=training_init),
            checkpoint_dir="./sarcos/",
            save_summaries_steps=None,
            save_checkpoint_secs=20,
            save_summaries_secs=20,
            hooks=[log]) as sess:
        for i in range(NEPOCHS):

            # Train for one epoch
            sess.run(training_init)
            try:
                while not sess.should_stop():
                    _, g = sess.run([train, global_step])
            except tf.errors.OutOfRangeError:
                pass

            # Init testing and assess and log R-square score on test set
            sess.run(testing_init)
            Eymean = sess.run(Ey, feed_dict={n_samples_: NPREDICTSAMPLES})
            r2 = r2_score(Ys, Eymean)
            print("Training epoch {}, r-square = {}".format(i, r2))
            rsquare_summary(r2, sess, g)

    print("------------")
    print("r-square: {:.4f}, smse: {:.4f}".format(r2, 1 - r2))
示例#9
0
# Optimization
NEPOCHS = 5  # Number of times to see the data in training
BSIZE = 100  # Mini batch size
CONFIG = tf.ConfigProto(device_count={'GPU': 0})  # Use GPU ?
LSAMPLES = 5  # Number of samples the mode returns
PSAMPLES = 10  # This will give LSAMPLES * PSAMPLES predictions

NCLASSES = 7  # Number of target classes
NFEATURES = 100  # Number of random features to use

# Network construction
data_input = ab.InputLayer(name='X', n_samples=LSAMPLES)  # Data input
mask_input = ab.MaskInputLayer(name='M')  # Missing data mask input

lenscale = ab.pos(tf.Variable(np.ones((54, 1), dtype=np.float32)))

layers = (ab.RandomArcCosine(n_features=NFEATURES, lenscale=lenscale) >>
          ab.DenseVariational(output_dim=NCLASSES))


def main():
    """Run the imputation demo."""
    # Fetch data, one-hot targets and standardise data
    data = fetch_covtype()
    X = data.data
    Y = (data.target - 1)
    X = StandardScaler().fit_transform(X)

    # Now fake some missing data with a mask
    rnd = np.random.RandomState(RSEED)
示例#10
0
def main():
    """Run the demo."""
    n_iters = int(round(n_epochs * N / batch_size))
    print("Iterations = {}".format(n_iters))

    # Get training and testing data
    Xr, Yr, Xs, Ys = gp_draws(N, Ns, kern=kernel, noise=true_noise)

    # Prediction points
    Xq = np.linspace(-20, 20, Ns).astype(np.float32)[:, np.newaxis]
    Yq = np.linspace(-4, 4, Ns).astype(np.float32)[:, np.newaxis]

    # Set up the probability image query points
    Xi, Yi = np.meshgrid(Xq, Yq)
    Xi = Xi.astype(np.float32).reshape(-1, 1)
    Yi = Yi.astype(np.float32).reshape(-1, 1)

    _, D = Xr.shape

    # Name the "data" parts of the graph
    with tf.name_scope("Input"):
        # This function will make a TensorFlow queue for shuffling and batching
        # the data, and will run through n_epochs of the data.
        Xb, Yb = batch_training(Xr,
                                Yr,
                                n_epochs=n_epochs,
                                batch_size=batch_size)
        X_ = tf.placeholder_with_default(Xb, shape=(None, D))
        Y_ = tf.placeholder_with_default(Yb, shape=(None, 1))

    # This is where we build the actual GP model
    with tf.name_scope("Deepnet"):
        phi, kl = net(X=X_)
        lkhood = tf.distributions.Normal(loc=phi, scale=ab.pos(noise))
        loss = ab.elbo(lkhood, Y_, N, kl)

    # Set up the trainig graph
    with tf.name_scope("Train"):
        optimizer = tf.train.AdamOptimizer()
        global_step = tf.train.create_global_step()
        train = optimizer.minimize(loss, global_step=global_step)

    # This is used for building the predictive density image
    with tf.name_scope("Predict"):
        logprob = tf.reduce_mean(lkhood.log_prob(Y_), axis=0)

    # Logging learning progress
    log = tf.train.LoggingTensorHook({
        'step': global_step,
        'loss': loss
    },
                                     every_n_iter=1000)

    # This is the main training "loop"
    with tf.train.MonitoredTrainingSession(config=config,
                                           save_summaries_steps=None,
                                           save_checkpoint_secs=None,
                                           hooks=[log]) as sess:
        try:
            while not sess.should_stop():
                sess.run(
                    train,
                    feed_dict={
                        n_samples_: n_samples,
                        # tf.keras.backend.learning_phase(): 1
                    })
        except tf.errors.OutOfRangeError:
            print('Input queues have been exhausted!')
            pass

        # Prediction, the [[None]] is to stop the default placeholder queue
        # we keep learning phase flag on even in prediction phase to draw
        # samples from predictive distribution
        Ey = sess.run(phi,
                      feed_dict={
                          X_: Xq,
                          Y_: [[None]],
                          n_samples_: p_samples
                      })

        logPY = sess.run(logprob,
                         feed_dict={
                             Y_: Yi,
                             X_: Xi,
                             n_samples_: p_samples
                         })

    Eymean = Ey.mean(axis=0)  # Average samples to get mean predicted funtion
    Py = np.exp(logPY.reshape(Ns, Ns))  # Turn log-prob into prob

    # Plot
    im_min = np.amin(Py)
    im_size = np.amax(Py) - im_min
    img = (Py - im_min) / im_size
    f = bk.figure(tools='pan,box_zoom,reset', sizing_mode='stretch_both')
    f.image(image=[img], x=-20., y=-4., dw=40., dh=8, palette=bp.Plasma256)
    f.circle(Xr.flatten(), Yr.flatten(), fill_color='blue', legend='Training')
    f.line(Xs.flatten(), Ys.flatten(), line_color='blue', legend='Truth')
    for y in Ey:
        f.line(Xq.flatten(),
               y.flatten(),
               line_color='red',
               legend='Samples',
               alpha=0.2)
    f.line(Xq.flatten(), Eymean.flatten(), line_color='green', legend='Mean')
    bk.show(f)