Example #1
0
 def setUpClass(cls):
     super(TestMultiFull, cls).setUpClass()
     likelihood = lik.Softmax()
     kernel = cov.SquaredExponential(input_dim=2,
                                     output_dim=2,
                                     length_scale=1.,
                                     std_dev=1.,
                                     white=0.)
     inducing_inputs = np.array([[1.0, 2.0, 3.0, 4.0]])
     cls.inf = inf.VariationalInference(num_samples=10,
                                        lik_func=likelihood,
                                        cov_func=kernel,
                                        num_components=2)
     cls.model = autogp.GaussianProcess(lik_func=likelihood,
                                        cov_func=kernel,
                                        inducing_inputs=inducing_inputs,
                                        num_components=2,
                                        diag_post=False,
                                        inf_func=cls.inf)
     cls.session.run(tf.global_variables_initializer())
Example #2
0
if __name__ == '__main__':
    FLAGS = util.util.get_flags()
    BATCH_SIZE = FLAGS.batch_size
    LEARNING_RATE = FLAGS.learning_rate
    DISPLAY_STEP = FLAGS.display_step
    EPOCHS = FLAGS.n_epochs
    NUM_SAMPLES = FLAGS.mc_train
    NUM_INDUCING = FLAGS.n_inducing
    IS_ARD = FLAGS.is_ard

    train_X, train_Y, test_X, test_Y = load_cifar()
    data = datasets.DataSet(train_X, train_Y)
    test = datasets.DataSet(test_X, test_Y)

    # Setup initial values for the model.
    likelihood = lik.Softmax()
    kern = [
        cov.SquaredExponential(data.X.shape[1],
                               length_scale=10.0,
                               input_scaling=IS_ARD) for i in range(10)
    ]
    # kern = [kernels.ArcCosine(X.shape[1], 2, 3, 5.0, 1.0, input_scaling=True) for i in range(10)] #RadialBasis(X.shape[1], input_scaling=True) for i in range(10)]

    Z = init_z(data.X, NUM_INDUCING)
    m = autogp.GaussianProcess(likelihood, kern, Z, num_samples=NUM_SAMPLES)

    # setting up loss to be reported during training
    error_rate = losses.ZeroOneLoss(data.Dout)

    o = tf.train.RMSPropOptimizer(LEARNING_RATE)
    m.fit(data,
Example #3
0
 def predict(self, latent_means, latent_vars):
     softmax = lik.Softmax()
     return tf.Session().run(
         softmax.predict(tf.constant(latent_means, dtype=tf.float32),
                         tf.constant(latent_vars, dtype=tf.float32)))
Example #4
0
 def log_prob(self, outputs, latent):
     softmax = lik.Softmax()
     return tf.Session().run(
         softmax.log_cond_prob(tf.constant(outputs, dtype=tf.float32),
                               tf.constant(latent, dtype=tf.float32)))