Exemplo n.º 1
0
 def setUpClass(cls):
     super(TestMultiFull, cls).setUpClass()
     likelihood = likelihoods.Softmax()
     kernel = [
         kernels.RadialBasis(input_dim=2,
                             lengthscale=1.0,
                             std_dev=1.0,
                             white=0.0) for i in range(2)
     ]
     inducing_locations = np.array([[1.0, 2.0, 3.0, 4.0]])
     cls.model = autogp.GaussianProcess(likelihood_func=likelihood,
                                        kernel_funcs=kernel,
                                        inducing_inputs=inducing_locations,
                                        num_components=2,
                                        diag_post=False,
                                        num_samples=1)
     cls.session.run(tf.global_variables_initializer())
Exemplo n.º 2
0
 def setUpClass(cls):
     super(TestSimpleFull, cls).setUpClass()
     likelihood = likelihoods.Gaussian(1.0)
     kernel = [
         kernels.RadialBasis(input_dim=1,
                             lengthscale=1.0,
                             std_dev=1.0,
                             white=0.0)
     ]
     # In most of our unit test, we will replace this value with something else.
     inducing_inputs = np.array([[1.0]])
     cls.model = autogp.GaussianProcess(likelihood_func=likelihood,
                                        kernel_funcs=kernel,
                                        inducing_inputs=inducing_inputs,
                                        num_components=1,
                                        diag_post=False,
                                        num_samples=10)
     cls.session.run(tf.initialize_all_variables())
Exemplo n.º 3
0
 def setUpClass(cls):
     super(TestSimpleFull, cls).setUpClass()
     likelihood = lik.Gaussian(1.0)
     kernel = cov.SquaredExponential(input_dim=1,
                                     length_scale=[1.0],
                                     std_dev=[1.0],
                                     white=[0.0])
     # In most of our unit test, we will replace this value with something else.
     inducing_inputs = np.array([[1.0]])
     cls.inf = inf.VariationalInference(num_samples=10,
                                        lik_func=likelihood,
                                        cov_func=kernel,
                                        num_components=1)
     cls.model = autogp.GaussianProcess(lik_func=likelihood,
                                        cov_func=kernel,
                                        inducing_inputs=inducing_inputs,
                                        num_components=1,
                                        diag_post=False,
                                        inf_func=cls.inf)
     cls.session.run(tf.global_variables_initializer())
Exemplo n.º 4
0
 def setUpClass(cls):
     super(TestMultiFull, cls).setUpClass()
     likelihood = lik.Softmax()
     kernel = cov.SquaredExponential(input_dim=2,
                                     output_dim=2,
                                     length_scale=1.,
                                     std_dev=1.,
                                     white=0.)
     inducing_inputs = np.array([[1.0, 2.0, 3.0, 4.0]])
     cls.inf = inf.VariationalInference(num_samples=10,
                                        lik_func=likelihood,
                                        cov_func=kernel,
                                        num_components=2)
     cls.model = autogp.GaussianProcess(lik_func=likelihood,
                                        cov_func=kernel,
                                        inducing_inputs=inducing_inputs,
                                        num_components=2,
                                        diag_post=False,
                                        inf_func=cls.inf)
     cls.session.run(tf.global_variables_initializer())
Exemplo n.º 5
0
    data = datasets.DataSet(d['train_inputs'].astype(np.float32),
                            d['train_outputs'].astype(np.float32))
    test = datasets.DataSet(d['test_inputs'].astype(np.float32),
                            d['test_outputs'].astype(np.float32))

    # Setup initial values for the model.
    likelihood = lik.RegressionNetwork(7, 0.1)
    kern = [
        cov.SquaredExponential(data.X.shape[1],
                               length_scale=8.0,
                               input_scaling=IS_ARD) for i in range(8)
    ]
    # kern = [kernels.ArcCosine(data.X.shape[1], 1, 3, 5.0, 1.0, input_scaling=True) for i in range(10)]

    Z = init_z(data.X, NUM_INDUCING)
    m = autogp.GaussianProcess(likelihood, kern, Z, num_samples=NUM_SAMPLES)

    # setting up loss to be reported during training
    error_rate = None  # losses.StandardizedMeanSqError(d['train_outputs'].astype(np.float32), data.Dout)

    import time
    o = tf.train.RMSPropOptimizer(LEARNING_RATE)
    start = time.time()
    m.fit(data,
          o,
          loo_steps=0,
          var_steps=50,
          epochs=EPOCHS,
          batch_size=BATCH_SIZE,
          display_step=DISPLAY_STEP,
          test=test,
Exemplo n.º 6
0
                               std_dev=sigma_initial)
]
#kernel = [autogp.kernels.Matern_3_2(1, lengthscale= lengthscale_initial, std_dev = sigma_initial)]
#kernel = [autogp.kernels.Matern_5_2(1, lengthscale= lengthscale_initial, std_dev = sigma_initial)]

# Define the sparse approximation
sparsity_factor = 1.
inducing_number = int(sparsity_factor * N)
id_sparse = np.arange(N)
np.random.shuffle(id_sparse)
inducing_inputs = xtrain[id_sparse[:inducing_number]]

# Define the model
model = autogp.GaussianProcess(likelihood,
                               kernel,
                               inducing_inputs,
                               num_components=1,
                               diag_post=False)

# Define the optimizer
optimizer = tf.train.RMSPropOptimizer(0.005)

# Train the model
start = time.time()
print("Start the training")
model.fit(data, optimizer, loo_steps=0, var_steps=60, epochs=300)
end = time.time()
time_elapsed = end - start
print("Execution time in seconds", time_elapsed)

# Predict new inputs.
Exemplo n.º 7
0
                      lengthscale=LENGTHSCALE,
                      std_dev=1.0,
                      input_scaling=IS_ARD) for i in range(1)
    ]
else:
    kern = [
        cov.SquaredExponential(data.X.shape[1],
                               length_scale=LENGTHSCALE,
                               input_scaling=IS_ARD) for i in range(1)
    ]

print(f"Using Kernel {KERNEL}")

m = autogp.GaussianProcess(likelihood,
                           kern,
                           Z,
                           num_samples=NUM_SAMPLES,
                           num_components=NUM_COMPONENTS)
error_rate = losses.ZeroOneLoss(data.Dout)
o = tf.train.AdamOptimizer(LEARNING_RATE)
m.fit(data,
      o,
      loo_steps=LOOCV_STEPS,
      var_steps=VAR_STEPS,
      epochs=EPOCHS,
      batch_size=BATCH_SIZE,
      display_step=DISPLAY_STEP,
      test=test,
      loss=error_rate)

ypred = m.predict(test.X)[0]
Exemplo n.º 8
0
    data, test, _ = datasets.import_mnist()

    # Setup initial values for the model.
    likelihood = lik.Softmax()
    kern = cov.SquaredExponential(data.X.shape[1],
                                  output_dim=10,
                                  length_scale=10,
                                  std_dev=1,
                                  white=.01,
                                  input_scaling=IS_ARD)
    # kern = [kernels.ArcCosine(X.shape[1], 2, 3, 5.0, 1.0, input_scaling=True) for i in range(10)]
    # RadialBasis(X.shape[1], input_scaling=True) for i in xrange(10)]

    Z = init_z(data.X, NUM_INDUCING)
    inf = inf.VariationalInference(kern, likelihood, num_samples=NUM_SAMPLES)
    m = autogp.GaussianProcess(Z, kern, inf, likelihood)

    # setting up loss to be reported during training
    error_rate = losses.ZeroOneLoss(data.Dout)

    import time
    otime = time.time()
    o = tf.train.RMSPropOptimizer(LEARNING_RATE)
    start = time.time()
    m.fit(data,
          o,
          loo_steps=50,
          var_steps=50,
          epochs=EPOCHS,
          batch_size=BATCH_SIZE,
          display_step=DISPLAY_STEP,
Exemplo n.º 9
0
np.random.shuffle(idx)
xtrain = inputs[idx[:N]]
ytrain = outputs[idx[:N]]
data = autogp.datasets.DataSet(xtrain, ytrain)
xtest = inputs[idx[N:]]
ytest = outputs[idx[N:]]

# Initialize the Gaussian process.
likelihood = autogp.lik.Gaussian(1.)
kernel = autogp.cov.SquaredExponential(1, white=1e-5)
# inference = autogp.inf.VariationalInference(kernel, likelihood)
inference = autogp.inf.Exact(kernel, likelihood)
inducing_inputs = xtrain
model = autogp.GaussianProcess(inducing_inputs,
                               kernel,
                               inference,
                               likelihood,
                               inducing_outputs=ytrain)

# Train the model.
optimizer = tf.train.RMSPropOptimizer(0.005)
# model.fit(data, optimizer, batch_size=1, loo_steps=10, var_steps=10, epochs=100, optimize_inducing=False)
model.fit(data,
          optimizer,
          batch_size=None,
          loo_steps=0,
          var_steps=1,
          epochs=500,
          optimize_inducing=False,
          only_hyper=True)
Exemplo n.º 10
0
inputs = 5 * np.linspace(0, 1, num=N_all)[:, np.newaxis]
outputs = np.sin(inputs)

# selects training and test
idx = np.arange(N_all)
np.random.shuffle(idx)
xtrain = inputs[idx[:N]]
ytrain = outputs[idx[:N]]
data = autogp.datasets.DataSet(xtrain, ytrain)
xtest = inputs[idx[N:]]
ytest = outputs[idx[N:]]

# Initialize the Gaussian process.
likelihood = autogp.likelihoods.Gaussian()
kernel = [autogp.kernels.RadialBasis(1)]
inducing_inputs = xtrain
model = autogp.GaussianProcess(likelihood, kernel, inducing_inputs)

# Train the model.
optimizer = tf.train.RMSPropOptimizer(0.005)
model.fit(data, optimizer, loo_steps=50, var_steps=50, epochs=1000)

# Predict new inputs.
ypred, _ = model.predict(xtest)
plt.plot(xtrain, ytrain, '.', mew=2)
plt.plot(xtest, ytest, 'o', mew=2)
plt.plot(xtest, ypred, 'x', mew=2)
plt.show()