Beispiel #1
0

import numpy as np
import matplotlib.pyplot as plt

import rdml_graph as gr




if __name__ == '__main__':
    X_train = np.array([0,1,2,3,4.2,6,7])
    ratings = np.array([5,5,2,1,2  ,3,3])


    gp = gr.PreferenceGP(gr.RBF_kern(0.5, 0.7), \
            other_probits={'ordinal': gr.OrdinalProbit(2.0,1.0, n_ordinals=5)})
    #gp = gr.PreferenceGP(gr.periodic_kern(1.2,0.3,5))
    #gp = gr.PreferenceGP(gr.linear_kern(0.2, 0.2, 0.2))
    #gp = gr.PreferenceGP(gr.RBF_kern(0.2,1)+gr.periodic_kern(1,0.2,0)+gr.linear_kern(0.2,0.1,0.3))
    #gp = gr.PreferenceGP(gr.RBF_kern(0.1,1)*gr.linear_kern(0.3,0.2,0.3))

    gp.add(X_train, ratings, type='ordinal')

    #gp.optimize(optimize_hyperparameter=True)
    #print('gp.calc_ll()')
    #print(gp.calc_ll())


    X = np.arange(-0.5, 8, 0.1)
    mu, sigma = gp.predict(X)
Beispiel #2
0
import rdml_graph as gr


if __name__ == '__main__':
    X_train = np.array([0,1,2,3,6,7])
    X = np.arange(-3, 12, 0.1)
    y_train = np.array([1, 0.5,0, -1, 1, 2])
    #y_train = np.array([0.2, 0.3, 0.4, 0.52, 0.7, 0.76])

    #training_sigma = 0
    training_sigma=np.array([1, 0.5, 0.1, 0.1, 0.2, 0])

    #gp = gr.GP(gr.RBF_kern, {'rbf_sigma': 1, 'rbf_l': 1})
    #gp = gr.GP(gr.periodic_kern, {'periodic_sigma': 1, 'periodic_l': 1, 'periodic_p': 20})
    #gp = gr.GP(gr.linear_kern, {'linear_sigma': 5, 'linear_sigma_b': 5, 'linear_offset': 0.2})
    gp = gr.GP(gr.RBF_kern(1,1)+gr.periodic_kern(1,1,10)+gr.linear_kern(3,1,0.3))
    gp.add(X_train, y_train, training_sigma=training_sigma)

    mu, sigma = gp.predict(X)
    std = np.sqrt(sigma)

    plt.plot(X, mu)
    sigma_to_plot = 1

    plt.gca().fill_between(X, mu-(sigma_to_plot*std), mu+(sigma_to_plot*std), color='#dddddd')
    plt.scatter(X_train, y_train)

    plt.title('Gaussian Process estimate (1 sigma)')
    plt.xlabel('x')
    plt.ylabel('y')
    plt.show()
Beispiel #3
0
# TestGPKernels.py
# Written Ian Rankin October 2021
#
#

import numpy as np
import rdml_graph as gr
import pdb

rbf = gr.RBF_kern(1, 1)
kern2 = gr.periodic_kern(1, 1, 3)
kern3 = gr.linear_kern(1, 1, 1)

print(rbf)
print(kern2)
print(kern3)
print(rbf(1, 2))
print(kern2(1, 2))
print(kern3(1, 2))

combined = rbf + (kern2 * kern3)
print(combined)
print(combined(1, 2))

print(combined.get_param())
print(combined.gradient(1, 2))
combined.set_param([2, 3, 5, 4, 7, 1, 1, 2])
print(combined.get_param())

x = np.array([0, 1, 4, 5, 6, 7])
Beispiel #4
0
# Written Ian Rankin - September 2021
#
# An example of using the Preference GP with the absbounded probit.

import numpy as np
import matplotlib.pyplot as plt

import rdml_graph as gr

if __name__ == '__main__':
    X_train = np.array([0, 1, 2, 3, 4.2, 6, 7])
    abs_values = np.array([0.8, 0.6, 0.3, 0.2, 0.22, 0.4, 0.5])
    #abs_values = np.array([0.4, 0.2, 0.2, 0.2, 0.1, 0.11, 0.3])


    gp = gr.PreferenceGP(gr.RBF_kern(0.5, 0.7), normalize_gp=False, \
            normalize_positive=False, \
            other_probits={'abs': gr.AbsBoundProbit(1.0,5.0)})
    #gp = gr.PreferenceGP(gr.periodic_kern(1.2,0.3,5))
    #gp = gr.PreferenceGP(gr.linear_kern(0.2, 0.2, 0.2))
    #gp = gr.PreferenceGP(gr.RBF_kern(0.2,1)+gr.periodic_kern(1,0.2,0)+gr.linear_kern(0.2,0.1,0.3))
    #gp = gr.PreferenceGP(gr.RBF_kern(0.1,1)*gr.linear_kern(0.3,0.2,0.3))

    gp.add(X_train, abs_values, type='abs')

    #gp.optimize(optimize_hyperparameter=True)
    #print('gp.calc_ll()')
    #print(gp.calc_ll())

    X = np.arange(-0.5, 8, 0.1)
    mu, sigma = gp.predict(X)
Beispiel #5
0


if __name__ == '__main__':
    num_side = 25
    bounds = [(0,7), (0,7)]

    num_train_pts = 40
    num_alts = 4

    utility_f = f_sq


    #gp = gr.PreferenceGP(gr.RBF_kern(0.2,0.5)*gr.linear_kern(0.2, 0.1, 0))
    #gp = gr.PreferenceGP(gr.linear_kern(0.3, 0.1, 0.0))
    gp = gr.PreferenceGP(gr.RBF_kern(1.0, 1.0), pareto_pairs=True, \
                        use_hyper_optimization=False, \
                        active_learner = gr.DetLearner(0.9))
    gp.add_prior(bounds=np.array(bounds), num_pts=20)



    for i in tqdm.tqdm(range(10)):
        train_X = np.random.random((num_train_pts,2)) * np.array([bounds[0][1]-bounds[0][0], bounds[1][1]-bounds[1][0]]) + np.array([bounds[0][0], bounds[1][0]])
        train_Y = utility_f(train_X)#f_lin(train_X)

        selected_idx, UCB, best_value = gp.select(train_X, num_alts)

        best_idx = np.argmax(train_Y[selected_idx])

        pairs = gr.ranked_pairs_from_fake(train_X[selected_idx], utility_f)
Beispiel #6
0
def f_lin(x, data=None):
    return x[:, 0] * x[:, 1]


if __name__ == '__main__':
    num_side = 25
    bounds = [(0, 7), (0, 7)]

    num_train_pts = 50

    train_X = np.random.random((num_train_pts, 2)) * np.array(
        [bounds[0][1] - bounds[0][0], bounds[1][1] - bounds[1][0]]) + np.array(
            [bounds[0][0], bounds[1][0]])
    train_Y = f_sin(train_X)

    gp = gr.GP(gr.RBF_kern(1, 0.8))

    for i in tqdm.tqdm(range(20)):
        train_X = np.random.random((num_train_pts, 2)) * np.array([
            bounds[0][1] - bounds[0][0], bounds[1][1] - bounds[1][0]
        ]) + np.array([bounds[0][0], bounds[1][0]])
        train_Y = f_sin(train_X)

        selected_idx, UCB, best_value = gp.ucb_selection(train_X, 5)

        #print(selected_idx)
        #print(UCB)
        #print(train_X[selected_idx])
        #pdb.set_trace()
        gp.add(train_X[selected_idx], train_Y[selected_idx])
        #gp.add(train_X, train_Y)