# NEED TO REDUCE BATCH SIZE IF AL EXPERIMENT IS STARTING WITH 20 DATA POINTS
    # batch_size = 32

    batch_size = 16

    print '... building model'
    sys.stdout.flush()

    bb_alpha = BB_alpha(layer_sizes, n_samples, alpha, learning_rate, v_prior, batch_size, train_set_x, train_set_y, N_train, test_set_x, test_set_y, N_test, mean_y_train, std_y_train)

    print '... training'
    sys.stdout.flush()

    #test_error, test_ll = bb_alpha.train_ADAM(adam_epochs)
    test_error, test_ll = bb_alpha.train_ADAM(adam_epochs)

    print('Test Error', test_error)
    print('Test Log Likelihood', test_ll)


    all_rmse = test_error


    for i in range(acquisition_iterations):

        print('Acquisition Iteration: ', i)

        x_pool_index = np.asarray(random.sample(range(0, X_pool.shape[0]), Queries))

        Pooled_X = X_pool[x_pool_index, :]
# batch_size = 32

batch_size = 16

print '... building model'
sys.stdout.flush()

bb_alpha = BB_alpha(layer_sizes, n_samples, alpha, learning_rate, v_prior,
                    batch_size, train_set_x, train_set_y, N_train, test_set_x,
                    test_set_y, N_test, mean_y_train, std_y_train)

print '... training'
sys.stdout.flush()

#test_error, test_ll = bb_alpha.train_ADAM(adam_epochs)
test_error, test_ll = bb_alpha.train_ADAM(adam_epochs)

print('Test Error', test_error)
print('Test Log Likelihood', test_ll)

all_rmse = test_error

for i in range(acquisition_iterations):

    print('Acquisition Iteration: ', i)

    x_pool_index = np.asarray(random.sample(range(0, X_pool.shape[0]),
                                            Queries))

    Pooled_X = X_pool[x_pool_index, :]
    Pooled_Y = y_pool[x_pool_index, :]
예제 #3
0
    learning_rate = 0.001
    v_prior = 1.0


    # NEED TO REDUCE BATCH SIZE IF AL EXPERIMENT IS STARTING WITH 20 DATA POINTS
    batch_size = 32


    print '... building model'
    sys.stdout.flush()


    bb_alpha = BB_alpha(layer_sizes, n_samples, alpha, learning_rate, v_prior, batch_size, \
        train_set_x, train_set_y, N_train, test_set_x, test_set_y, N_test, mean_y_train, std_y_train)
    print '... training'
    sys.stdout.flush()

    #test_error, test_ll = bb_alpha.train_ADAM(500)

    test_error, test_ll = bb_alpha.train_ADAM(20)

    print('Test Error', test_error)
    print('Test Log Likelihood', test_ll)


    # with open("results/test_ll.txt", "a") as myfile:
    #     myfile.write(repr(test_ll) + '\n')

    # with open("results/test_error.txt", "a") as myfile:
    #     myfile.write(repr(test_error) + '\n')
    train_set_x, train_set_y = datasets[ 0 ]
    test_set_x, test_set_y = datasets[ 1 ]

    N_train = train_set_x.get_value(borrow = True).shape[ 0 ]
    N_test = test_set_x.get_value(borrow = True).shape[ 0 ]
    layer_sizes = [ d, 100, len(mean_y_train) ]
    n_samples = 50
    alpha = 0.0001
    learning_rate = 0.001
    v_prior = 1.0
    batch_size = 32
    print '... building model'
    sys.stdout.flush()
    bb_alpha = BB_alpha(layer_sizes, n_samples, alpha, learning_rate, v_prior, batch_size, \
        train_set_x, train_set_y, N_train, test_set_x, test_set_y, N_test, mean_y_train, std_y_train)
    print '... training'
    sys.stdout.flush()

    test_error, test_ll = bb_alpha.train_ADAM(500)

    print('Test Error', test_error)
    print('Test Log Likelihood', test_ll)


    # with open("results/test_ll.txt", "a") as myfile:
    #     myfile.write(repr(test_ll) + '\n')

    # with open("results/test_error.txt", "a") as myfile:
    #     myfile.write(repr(test_error) + '\n')