Esempio n. 1
0
def import_mnist():
    if os.path.isdir(
            DATA_DIR) is False:  # directory does not exist, download the data
        get_mnist8m_data()

    with open(TRAIN_INPUTS) as f:
        train_images = extract_images(f)
        train_images = process_mnist(train_images)

    with open(TRAIN_OUTPUTS) as f:
        train_labels = extract_labels(f, one_hot=True)

    with open(TEST_INPUTS) as f:
        test_images = extract_images(f)
        test_images = process_mnist(test_images)

    with open(TEST_OUTPUTS) as f:
        test_labels = extract_labels(f, one_hot=True)

    return datasets.DataSet(train_images, train_labels), datasets.DataSet(
        test_images, test_labels)
Esempio n. 2
0
    FLAGS = util.util.get_flags()
    BATCH_SIZE = FLAGS.batch_size
    LEARNING_RATE = FLAGS.learning_rate
    DISPLAY_STEP = FLAGS.display_step
    EPOCHS = FLAGS.n_epochs
    NUM_SAMPLES = FLAGS.mc_train
    NUM_INDUCING = FLAGS.n_inducing
    IS_ARD = FLAGS.is_ard

    if os.path.exists(
            TRAIN_PATH
    ) is False:  # directory does not exist, download the data
        get_sarcos_data()

    d = sarcos_all_joints_data()
    data = datasets.DataSet(d['train_inputs'].astype(np.float32),
                            d['train_outputs'].astype(np.float32))
    test = datasets.DataSet(d['test_inputs'].astype(np.float32),
                            d['test_outputs'].astype(np.float32))

    # Setup initial values for the model.
    likelihood = lik.RegressionNetwork(7, 0.1)
    kern = [
        cov.SquaredExponential(data.X.shape[1],
                               length_scale=8.0,
                               input_scaling=IS_ARD) for i in range(8)
    ]
    # kern = [kernels.ArcCosine(data.X.shape[1], 1, 3, 5.0, 1.0, input_scaling=True) for i in range(10)]

    Z = init_z(data.X, NUM_INDUCING)
    m = autogp.GaussianProcess(likelihood, kern, Z, num_samples=NUM_SAMPLES)
Esempio n. 3
0
    test_Y = np.array(np.eye(10)[d["labels"]], dtype=np.float32)
    return train_X, train_Y, test_X, test_Y


if __name__ == '__main__':
    FLAGS = util.util.get_flags()
    BATCH_SIZE = FLAGS.batch_size
    LEARNING_RATE = FLAGS.learning_rate
    DISPLAY_STEP = FLAGS.display_step
    EPOCHS = FLAGS.n_epochs
    NUM_SAMPLES = FLAGS.mc_train
    NUM_INDUCING = FLAGS.n_inducing
    IS_ARD = FLAGS.is_ard

    train_X, train_Y, test_X, test_Y = load_cifar()
    data = datasets.DataSet(train_X, train_Y)
    test = datasets.DataSet(test_X, test_Y)

    # Setup initial values for the model.
    likelihood = likelihoods.Softmax()
    kern = [
        kernels.RadialBasis(data.X.shape[1],
                            lengthscale=10.0,
                            input_scaling=IS_ARD) for i in xrange(10)
    ]
    # kern = [kernels.ArcCosine(X.shape[1], 2, 3, 5.0, 1.0, input_scaling=True) for i in xrange(10)] #RadialBasis(X.shape[1], input_scaling=True) for i in xrange(10)]

    Z = init_z(data.X, NUM_INDUCING)
    m = autogp.GaussianProcess(likelihood, kern, Z, num_samples=NUM_SAMPLES)

    # setting up loss to be reported during training
Esempio n. 4
0
PRED_SAMPLES = FLAGS.mc_test
NLPDS = FLAGS.save_nlpds

MAXTIME = 300

# define GPRN P and Q
output_dim = 10  #P
node_dim = 10  #Q
lag_dim = 3  # for parsing lag features for individual latent functions

# extract dataset
d, d_link = get_inputs()
Ytr, Yte, Xtr, Xte = d['Ytr'], d['Yte'], d['Xtr'], d['Xte']

data = datasets.DataSet(Xtr.astype(np.float32),
                        Ytr.astype(np.float32),
                        shuffle=False)
test = datasets.DataSet(Xte.astype(np.float32),
                        Yte.astype(np.float32),
                        shuffle=False)

print("dataset created")

# model config block rows (where P=Q): block all w.1, w.2 etc, leave f independent
# order of block_struct is rows, node functions
# lists required: block_struct, link_inputs, kern_link, kern

#block_struct nested list of grouping order
block_struct = [[] for _ in range(output_dim)]
for i in range(output_dim):
    row = list(range(i, i + output_dim * (node_dim - 1) + 1, output_dim))