def run_demo():
    LG.basicConfig(level=LG.INFO)
    random.seed(572)

    #1. create toy data
    [x,y] = create_toy_data()

    feat_train = RealFeatures(transpose(x));
    labels = RegressionLabels(y);

    n_dimensions = 1

    #2. location of unispaced predictions
    X = SP.linspace(0,10,10)[:,SP.newaxis]

    #new interface with likelihood parametres being decoupled from the covaraince function
    likelihood = GaussianLikelihood()
    covar_parms = SP.log([2])
    hyperparams = {'covar':covar_parms,'lik':SP.log([1])}

    #construct covariance function
    SECF = GaussianKernel(feat_train, feat_train,2)
    covar = SECF
    zmean = ZeroMean();
    inf = ExactInferenceMethod(SECF, feat_train, zmean, labels, likelihood);

    gp = GaussianProcessRegression(inf, feat_train, labels);

    root=ModelSelectionParameters();
    c1=ModelSelectionParameters("inference_method", inf);
    root.append_child(c1);

    c2 = ModelSelectionParameters("scale");
    c1.append_child(c2);
    c2.build_values(0.01, 4.0, R_LINEAR);
    c3 = ModelSelectionParameters("likelihood_model", likelihood);
    c1.append_child(c3);

    c4=ModelSelectionParameters("sigma");
    c3.append_child(c4);
    c4.build_values(0.001, 4.0, R_LINEAR);
    c5 =ModelSelectionParameters("kernel", SECF);
    c1.append_child(c5);

    c6 =ModelSelectionParameters("width");
    c5.append_child(c6);
    c6.build_values(0.001, 4.0, R_LINEAR);

    crit = GradientCriterion();

    grad=GradientEvaluation(gp, feat_train, labels,
			crit);

    grad.set_function(inf);

    gp.print_modsel_params();

    root.print_tree();

    grad_search=GradientModelSelection(
			root, grad);

    grad.set_autolock(0);

    best_combination=grad_search.select_model(1);

    gp.set_return_type(GaussianProcessRegression.GP_RETURN_COV);

    St = gp.apply_regression(feat_train);

    St = St.get_labels();

    gp.set_return_type(GaussianProcessRegression.GP_RETURN_MEANS);

    M = gp.apply_regression();

    M = M.get_labels();

    #create plots
    plot_sausage(transpose(x),transpose(M),transpose(SP.sqrt(St)));
    plot_training_data(x,y);
    PL.show();
def run_demo():
    LG.basicConfig(level=LG.INFO)
    random.seed(572)

    #1. create toy data
    [x, y] = create_toy_data()

    feat_train = RealFeatures(transpose(x))
    labels = RegressionLabels(y)

    n_dimensions = 1

    #2. location of unispaced predictions
    X = SP.linspace(0, 10, 10)[:, SP.newaxis]

    #new interface with likelihood parametres being decoupled from the covaraince function
    likelihood = GaussianLikelihood()
    covar_parms = SP.log([2])
    hyperparams = {'covar': covar_parms, 'lik': SP.log([1])}

    #construct covariance function
    SECF = GaussianKernel(feat_train, feat_train, 2)
    covar = SECF
    zmean = ZeroMean()
    inf = ExactInferenceMethod(SECF, feat_train, zmean, labels, likelihood)

    gp = GaussianProcessRegression(inf, feat_train, labels)

    root = ModelSelectionParameters()
    c1 = ModelSelectionParameters("inference_method", inf)
    root.append_child(c1)

    c2 = ModelSelectionParameters("scale")
    c1.append_child(c2)
    c2.build_values(0.01, 4.0, R_LINEAR)
    c3 = ModelSelectionParameters("likelihood_model", likelihood)
    c1.append_child(c3)

    c4 = ModelSelectionParameters("sigma")
    c3.append_child(c4)
    c4.build_values(0.001, 4.0, R_LINEAR)
    c5 = ModelSelectionParameters("kernel", SECF)
    c1.append_child(c5)

    c6 = ModelSelectionParameters("width")
    c5.append_child(c6)
    c6.build_values(0.001, 4.0, R_LINEAR)

    crit = GradientCriterion()

    grad = GradientEvaluation(gp, feat_train, labels, crit)

    grad.set_function(inf)

    gp.print_modsel_params()

    root.print_tree()

    grad_search = GradientModelSelection(root, grad)

    grad.set_autolock(0)

    best_combination = grad_search.select_model(1)

    gp.set_return_type(GaussianProcessRegression.GP_RETURN_COV)

    St = gp.apply_regression(feat_train)

    St = St.get_labels()

    gp.set_return_type(GaussianProcessRegression.GP_RETURN_MEANS)

    M = gp.apply_regression()

    M = M.get_labels()

    #create plots
    plot_sausage(transpose(x), transpose(M), transpose(SP.sqrt(St)))
    plot_training_data(x, y)
    PL.show()
def regression_gaussian_process_modelselection(n=100, n_test=100, x_range=5, x_range_test=10, noise_var=0.4):

    from modshogun import RealFeatures, RegressionLabels
    from modshogun import GaussianKernel
    from modshogun import GradientModelSelection, ModelSelectionParameters
    from modshogun import (
        GaussianLikelihood,
        ZeroMean,
        ExactInferenceMethod,
        GaussianProcessRegression,
        GradientCriterion,
        GradientEvaluation,
    )

    # easy regression data: one dimensional noisy sine wave
    X_train = random.rand(1, n) * x_range
    X_test = array([[float(i) / n_test * x_range_test for i in range(n_test)]])

    y_test = sin(X_test)
    y_train = sin(X_train) + random.randn(n) * noise_var

    # shogun representation
    labels = RegressionLabels(y_train[0])
    feats_train = RealFeatures(X_train)
    feats_test = RealFeatures(X_test)

    # GP specification
    kernel = GaussianKernel(10, 0.05)

    mean = ZeroMean()

    likelihood = GaussianLikelihood(0.8)

    inf = ExactInferenceMethod(kernel, feats_train, mean, labels, likelihood)
    inf.set_scale(2.5)

    gp = GaussianProcessRegression(inf)

    means = gp.get_mean_vector(feats_test)
    variances = gp.get_variance_vector(feats_test)

    # plot results
    figure()

    subplot(2, 1, 1)
    title("Initial parameter's values")

    plot(X_train[0], y_train[0], "bx")  # training observations

    plot(X_test[0], y_test[0], "g-")  # ground truth of test
    plot(X_test[0], means, "r-")  # mean predictions of test

    fill_between(X_test[0], means - 1.96 * sqrt(variances), means + 1.96 * sqrt(variances), color="grey")

    legend(["training", "ground truth", "mean predictions"])

    # evaluate our inference method for its derivatives
    grad = GradientEvaluation(gp, feats_train, labels, GradientCriterion(), False)
    grad.set_function(inf)

    # handles all of the above structures in memory
    grad_search = GradientModelSelection(grad)

    # search for best parameters
    best_combination = grad_search.select_model(True)

    # outputs all result and information
    best_combination.apply_to_machine(gp)

    means = gp.get_mean_vector(feats_test)
    variances = gp.get_variance_vector(feats_test)

    # plot results
    subplot(2, 1, 2)
    title("Selected by gradient search parameter's values")

    plot(X_train[0], y_train[0], "bx")  # training observations

    plot(X_test[0], y_test[0], "g-")  # ground truth of test
    plot(X_test[0], means, "r-")  # mean predictions of test

    fill_between(X_test[0], means - 1.96 * sqrt(variances), means + 1.96 * sqrt(variances), color="grey")

    legend(["training", "ground truth", "mean predictions"])

    show()
Exemplo n.º 4
0
def regression_gaussian_process_modelselection (n=100, n_test=100, \
  x_range=5, x_range_test=10, noise_var=0.4):

    from modshogun import RealFeatures, RegressionLabels
    from modshogun import GaussianKernel
    from modshogun import GradientModelSelection, ModelSelectionParameters
    from modshogun import GaussianLikelihood, ZeroMean, \
     ExactInferenceMethod, GaussianProcessRegression, GradientCriterion, \
     GradientEvaluation

    # easy regression data: one dimensional noisy sine wave
    X_train = random.rand(1, n) * x_range
    X_test = array([[float(i) / n_test * x_range_test for i in range(n_test)]])

    y_test = sin(X_test)
    y_train = sin(X_train) + random.randn(n) * noise_var

    # shogun representation
    labels = RegressionLabels(y_train[0])
    feats_train = RealFeatures(X_train)
    feats_test = RealFeatures(X_test)

    # GP specification
    kernel = GaussianKernel(10, 0.05)

    mean = ZeroMean()

    likelihood = GaussianLikelihood(0.8)

    inf = ExactInferenceMethod(kernel, feats_train, mean, labels, likelihood)
    inf.set_scale(2.5)

    gp = GaussianProcessRegression(inf)

    means = gp.get_mean_vector(feats_test)
    variances = gp.get_variance_vector(feats_test)

    # plot results
    figure()

    subplot(2, 1, 1)
    title('Initial parameter\'s values')

    plot(X_train[0], y_train[0], 'bx')  # training observations

    plot(X_test[0], y_test[0], 'g-')  # ground truth of test
    plot(X_test[0], means, 'r-')  # mean predictions of test

    fill_between(X_test[0],
                 means - 1.96 * sqrt(variances),
                 means + 1.96 * sqrt(variances),
                 color='grey')

    legend(["training", "ground truth", "mean predictions"])

    # evaluate our inference method for its derivatives
    grad = GradientEvaluation(gp, feats_train, labels, GradientCriterion(),
                              False)
    grad.set_function(inf)

    # handles all of the above structures in memory
    grad_search = GradientModelSelection(grad)

    # search for best parameters
    best_combination = grad_search.select_model(True)

    # outputs all result and information
    best_combination.apply_to_machine(gp)

    means = gp.get_mean_vector(feats_test)
    variances = gp.get_variance_vector(feats_test)

    # plot results
    subplot(2, 1, 2)
    title('Selected by gradient search parameter\'s values')

    plot(X_train[0], y_train[0], 'bx')  # training observations

    plot(X_test[0], y_test[0], 'g-')  # ground truth of test
    plot(X_test[0], means, 'r-')  # mean predictions of test

    fill_between(X_test[0],
                 means - 1.96 * sqrt(variances),
                 means + 1.96 * sqrt(variances),
                 color='grey')

    legend(["training", "ground truth", "mean predictions"])

    show()