Example #1
0
 def setUpClass(cls):
     super(TestMultiFull, cls).setUpClass()
     likelihood = likelihoods.Softmax()
     kernel = [
         kernels.RadialBasis(input_dim=2,
                             lengthscale=1.0,
                             std_dev=1.0,
                             white=0.0) for i in range(2)
     ]
     inducing_locations = np.array([[1.0, 2.0, 3.0, 4.0]])
     cls.model = autogp.GaussianProcess(likelihood_func=likelihood,
                                        kernel_funcs=kernel,
                                        inducing_inputs=inducing_locations,
                                        num_components=2,
                                        diag_post=False,
                                        num_samples=1)
     cls.session.run(tf.global_variables_initializer())
Example #2
0

if __name__ == '__main__':
    FLAGS = util.util.get_flags()
    BATCH_SIZE = FLAGS.batch_size
    LEARNING_RATE = FLAGS.learning_rate
    DISPLAY_STEP = FLAGS.display_step
    EPOCHS = FLAGS.n_epochs
    NUM_SAMPLES = FLAGS.mc_train
    NUM_INDUCING = FLAGS.n_inducing
    IS_ARD = FLAGS.is_ard

    data, test, _ = datasets.import_mnist()

    # Setup initial values for the model.
    likelihood = likelihoods.Softmax()
    kern = [
        kernels.RadialBasis(data.X.shape[1],
                            lengthscale=10.0,
                            input_scaling=IS_ARD) for i in xrange(10)
    ]
    # kern = [kernels.ArcCosine(X.shape[1], 2, 3, 5.0, 1.0, input_scaling=True) for i in xrange(10)] #RadialBasis(X.shape[1], input_scaling=True) for i in xrange(10)]

    Z = init_z(data.X, NUM_INDUCING)
    m = autogp.GaussianProcess(likelihood, kern, Z, num_samples=NUM_SAMPLES)

    # setting up loss to be reported during training
    error_rate = losses.ZeroOneLoss(data.Dout)

    import time
    otime = time.time()
Example #3
0
 def predict(self, latent_means, latent_vars):
     softmax = likelihoods.Softmax()
     return tf.Session().run(
         softmax.predict(np.array(latent_means, dtype=np.float32),
                         np.array(latent_vars, dtype=np.float32)))
Example #4
0
 def log_prob(self, outputs, latent):
     softmax = likelihoods.Softmax()
     return tf.Session().run(
         softmax.log_cond_prob(np.array(outputs, dtype=np.float32),
                               np.array(latent, dtype=np.float32)))