Exemplo n.º 1
0
 def setUpClass(cls):
     super(TestSimpleFull, cls).setUpClass()
     likelihood = lik.Gaussian(1.0)
     kernel = cov.SquaredExponential(input_dim=1,
                                     length_scale=[1.0],
                                     std_dev=[1.0],
                                     white=[0.0])
     # In most of our unit test, we will replace this value with something else.
     inducing_inputs = np.array([[1.0]])
     cls.inf = inf.VariationalInference(num_samples=10,
                                        lik_func=likelihood,
                                        cov_func=kernel,
                                        num_components=1)
     cls.model = autogp.GaussianProcess(lik_func=likelihood,
                                        cov_func=kernel,
                                        inducing_inputs=inducing_inputs,
                                        num_components=1,
                                        diag_post=False,
                                        inf_func=cls.inf)
     cls.session.run(tf.global_variables_initializer())
Exemplo n.º 2
0
 def setUpClass(cls):
     super(TestMultiFull, cls).setUpClass()
     likelihood = lik.Softmax()
     kernel = cov.SquaredExponential(input_dim=2,
                                     output_dim=2,
                                     length_scale=1.,
                                     std_dev=1.,
                                     white=0.)
     inducing_inputs = np.array([[1.0, 2.0, 3.0, 4.0]])
     cls.inf = inf.VariationalInference(num_samples=10,
                                        lik_func=likelihood,
                                        cov_func=kernel,
                                        num_components=2)
     cls.model = autogp.GaussianProcess(lik_func=likelihood,
                                        cov_func=kernel,
                                        inducing_inputs=inducing_inputs,
                                        num_components=2,
                                        diag_post=False,
                                        inf_func=cls.inf)
     cls.session.run(tf.global_variables_initializer())
Exemplo n.º 3
0
    if os.path.exists(
            TRAIN_PATH
    ) is False:  # directory does not exist, download the data
        get_sarcos_data()

    d = sarcos_all_joints_data()
    data = datasets.DataSet(d['train_inputs'].astype(np.float32),
                            d['train_outputs'].astype(np.float32))
    test = datasets.DataSet(d['test_inputs'].astype(np.float32),
                            d['test_outputs'].astype(np.float32))

    # Setup initial values for the model.
    likelihood = lik.RegressionNetwork(7, 0.1)
    kern = [
        cov.SquaredExponential(data.X.shape[1],
                               length_scale=8.0,
                               input_scaling=IS_ARD) for i in range(8)
    ]
    # kern = [kernels.ArcCosine(data.X.shape[1], 1, 3, 5.0, 1.0, input_scaling=True) for i in range(10)]

    Z = init_z(data.X, NUM_INDUCING)
    m = autogp.GaussianProcess(likelihood, kern, Z, num_samples=NUM_SAMPLES)

    # setting up loss to be reported during training
    error_rate = None  # losses.StandardizedMeanSqError(d['train_outputs'].astype(np.float32), data.Dout)

    import time
    o = tf.train.RMSPropOptimizer(LEARNING_RATE)
    start = time.time()
    m.fit(data,
          o,
Exemplo n.º 4
0
Z = init_z(data.X, NUM_INDUCING)
likelihood = lik.Logistic()  # Setup initial values for the model.

if KERNEL == 'arccosine':
    kern = [
        cov.ArcCosine(data.X.shape[1],
                      degree=DEGREE,
                      depth=DEPTH,
                      lengthscale=LENGTHSCALE,
                      std_dev=1.0,
                      input_scaling=IS_ARD) for i in range(1)
    ]
else:
    kern = [
        cov.SquaredExponential(data.X.shape[1],
                               length_scale=LENGTHSCALE,
                               input_scaling=IS_ARD) for i in range(1)
    ]

print(f"Using Kernel {KERNEL}")

m = autogp.GaussianProcess(likelihood,
                           kern,
                           Z,
                           num_samples=NUM_SAMPLES,
                           num_components=NUM_COMPONENTS)
error_rate = losses.ZeroOneLoss(data.Dout)
o = tf.train.AdamOptimizer(LEARNING_RATE)
m.fit(data,
      o,
      loo_steps=LOOCV_STEPS,
Exemplo n.º 5
0
    FLAGS = util.util.get_flags()
    BATCH_SIZE = FLAGS.batch_size
    LEARNING_RATE = FLAGS.learning_rate
    DISPLAY_STEP = FLAGS.display_step
    EPOCHS = FLAGS.n_epochs
    NUM_SAMPLES = FLAGS.mc_train
    NUM_INDUCING = FLAGS.n_inducing
    IS_ARD = FLAGS.is_ard

    data, test, _ = datasets.import_mnist()

    # Setup initial values for the model.
    likelihood = lik.Softmax()
    kern = cov.SquaredExponential(data.X.shape[1],
                                  output_dim=10,
                                  length_scale=10,
                                  std_dev=1,
                                  white=.01,
                                  input_scaling=IS_ARD)
    # kern = [kernels.ArcCosine(X.shape[1], 2, 3, 5.0, 1.0, input_scaling=True) for i in range(10)]
    # RadialBasis(X.shape[1], input_scaling=True) for i in xrange(10)]

    Z = init_z(data.X, NUM_INDUCING)
    inf = inf.VariationalInference(kern, likelihood, num_samples=NUM_SAMPLES)
    m = autogp.GaussianProcess(Z, kern, inf, likelihood)

    # setting up loss to be reported during training
    error_rate = losses.ZeroOneLoss(data.Dout)

    import time
    otime = time.time()
    o = tf.train.RMSPropOptimizer(LEARNING_RATE)
Exemplo n.º 6
0
    FLAGS = util.util.get_flags()
    BATCH_SIZE = FLAGS.batch_size
    LEARNING_RATE = FLAGS.learning_rate
    DISPLAY_STEP = FLAGS.display_step
    EPOCHS = FLAGS.n_epochs
    NUM_SAMPLES = FLAGS.mc_train
    NUM_INDUCING = FLAGS.n_inducing
    IS_ARD = FLAGS.is_ard

    data, test = import_mnist()

    # Setup initial values for the model.
    likelihood = lik.Softmax()
    kern = cov.SquaredExponential(data.X.shape[1],
                                  length_scale=[10] * 10,
                                  std_dev=[1] * 10,
                                  white=[.01] * 10,
                                  input_scaling=IS_ARD)
    # kern = [kernels.ArcCosine(X.shape[1], 2, 3, 5.0, 1.0, input_scaling=True) for i in range(10)]
    #RadialBasis(X.shape[1], input_scaling=True) for i in range(10)]

    Z = init_z(data.X, NUM_INDUCING)
    m = autogp.GaussianProcess(likelihood, kern, Z, num_samples=NUM_SAMPLES)

    # setting up loss to be reported during training
    error_rate = losses.ZeroOneLoss(data.Dout)

    o = tf.train.RMSPropOptimizer(LEARNING_RATE)
    m.fit(data,
          o,
          loo_steps=50,