def plot_gp_pred(sigma, **fillargs): # pdb.set_trace() nugget = (sigma ** 2 / (0.1 + d.astype('float') ** 2)) gp = GaussianProcess(corr='squared_exponential', nugget=nugget) gp.fit(np.atleast_2d(range(n)).T, np.atleast_2d(d).T) x = np.atleast_2d(np.linspace(0, n - 1)).T y_pred, MSE = gp.predict(x, eval_MSE=True) pylab.plot(x, y_pred) pylab.fill_between(x.T[0], y_pred + MSE, y_pred - MSE, **fillargs)
print("Sklearn RT") t0 = time.time() rt_sklearn = DecisionTreeRegressor(max_depth=7, max_features="sqrt", random_state=2016).fit( X_train, y_train) y_pred = rt_sklearn.predict(X_test) print("Time taken: %0.3f" % (time.time() - t0)) score = mean_absolute_error(y_test, y_pred) print("Error: %0.3f" % score) print("") print("Skearn GP") gp = GaussianProcess(regr="constant", corr='absolute_exponential', beta0=None, storage_mode='full', verbose=False, theta0=0.1, thetaL=None, thetaU=None, optimizer='fmin_cobyla', random_start=1, normalize=True, nugget=0.05, random_state=2016).fit(X_train, y_train) y_pred = gp.predict(X_test) print("Time taken: %0.3f" % (time.time() - t0)) score = mean_absolute_error(y_test, y_pred) print("Error: %0.3f" % score) print("")