gp = GaussianProcess(kernel_dict=kdict, regularization=sdt1**2, train_fp=train, train_target=target, optimize_hyperparameters=False, gradients=gradients, scale_optimizer=False, scale_data=True) # Hyperaparam optimization algorithms change from default. gp.optimize_hyperparameters(algomin='TNC', global_opt=False) print('Optimized kernel:', gp.kernel_dict) # Do the optimized predictions. pred = gp.predict(test_fp=test, uncertainty=True) prediction = np.array(pred['prediction'][:, 0]) # Calculate the uncertainty of the predictions. uncertainty = np.array(pred['uncertainty']) # Get confidence interval on predictions. upper = prediction + uncertainty lower = prediction - uncertainty interval = upper - prediction # Plots. plt.figure(figsize=(11.0, 5.0))
# Model example 2 - Gausian linear kernel regression. if True: # Define prediction parameters kdict = {'k1': {'type': 'linear', 'scaling': 0.9}, 'c1': {'type': 'constant', 'const': 0.0}} # Starting guess for the noise parameter sdt1 = noise_magnitude # Set up the gaussian process. gp1 = GaussianProcess(kernel_dict=kdict, regularization=sdt1, train_fp=std['train'], train_target=train_targets['target'], optimize_hyperparameters=True, scale_optimizer=False) # Do predictions. linear = gp1.predict(test_fp=std['test'], get_validation_error=True, test_target=afunc(test)) prediction = np.array(linear['prediction']) * train_targets['std'] + \ train_targets['mean'] error = get_error(prediction, afunc(test)) print('Gaussian linear regression prediction:', error['absolute_average']) # Plot the prediction. plt3d.plot_surface(test_x1, test_x2, prediction.reshape(np.shape(test_x1)), alpha=0.3, color='g') # Model example 3 - Gaussian Process with sqe kernel. if True: # Set up the prediction routine and optimize hyperparameters. kdict = {'k1': {'type': 'gaussian', 'width': [0.3, 3.], 'scaling': 0.9}, 'c1': {'type': 'constant', 'const': 0.0}} # Starting guess for the noise parameter
}, 'c1': { 'type': 'constant', 'const': 0. } } # Starting guess for the noise parameter sdt1 = noise_magnitude # Set up the gaussian process. gp1 = GaussianProcess(kernel_dict=kdict, regularization=sdt1, train_fp=std['train'], train_target=train_targets['target'], optimize_hyperparameters=True) # Do predictions. linear = gp1.predict(test_fp=std['test'], uncertainty=True) # Put predictions back on real scale. prediction = np.vstack(linear['prediction']) * train_targets['std'] + \ train_targets['mean'] # Put uncertainties back on real scale. uncertainty = np.vstack(linear['uncertainty']) * train_targets['std'] # Get confidence interval on predictions. over_upper = prediction + uncertainty * tstd over_lower = prediction - uncertainty * tstd # Plot the uncertainties upper and lower bounds. plt3d.plot_surface(test_x1, test_x2, over_upper.reshape(np.shape(test_x1)), alpha=0.3, color='r') plt3d.plot_surface(test_x1,
# Set up the prediction routine. kdict = { 'k1': { 'type': 'gaussian', 'width': w1, } } # 'scaling': 0.9}} gp = GaussianProcess(kernel_dict=kdict, regularization=sdt1, train_fp=std['train'], train_target=target[0], optimize_hyperparameters=True) # Do the optimized predictions. optimized = gp.predict(test_fp=std['test'], test_target=actual[0], uncertainty=True, get_validation_error=True) opt_upper = np.array(optimized['prediction']) + \ (np.array(optimized['uncertainty'])) opt_lower = np.array(optimized['prediction']) - \ (np.array(optimized['uncertainty'])) tgp1 = gp.kernel_dict['k1']['width'][0] tgp2 = gp.regularization opte = optimized['validation_error']['rmse_average'] # Set up the prediction routine. kdict = {'k1': {'type': 'gaussian', 'width': ga_w, 'scaling': ga_s}} gp = GaussianProcess(kernel_dict=kdict, regularization=ga_r,
if True: # Model example 1 - biased model. # Define prediction parameters. sdt1 = 0.001 # Too large width results in a biased model. w1 = 3.0 kdict = {'k1': {'type': 'gaussian', 'width': w1}} # Set up the prediction routine. gp = GaussianProcess(kernel_dict=kdict, regularization=sdt1**2, train_fp=std['train'], train_target=train_targets['target'], optimize_hyperparameters=False) # Do predictions. under_fit = gp.predict(test_fp=std['test'], uncertainty=True) # Scale predictions back to the original scale. under_prediction = np.vstack(under_fit['prediction']) * \ train_targets['std'] + train_targets['mean'] under_uncertainty = np.vstack(under_fit['uncertainty']) * \ train_targets['std'] # Get average errors. error = get_error(under_prediction.reshape(-1), afunc(test).reshape(-1)) print('Gaussian linear regression prediction:', error['absolute_average']) # Get confidence interval on predictions. upper = under_prediction + under_uncertainty * tstd lower = under_prediction - under_uncertainty * tstd # Plot example 1 ax = fig.add_subplot(221) ax.plot(linex, liney, '-', lw=1, color='black')