コード例 #1
0
print __doc__

# Load the dataset from scikits' data sets
diabetes = datasets.load_diabetes()
X, y = diabetes['data'], diabetes['target']

# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
                     theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
                     thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch',
                     verbose=False)

# Fit the GP model to the data
gp.fit(X, y)
gp.theta0 = gp.theta
gp.thetaL = None
gp.thetaU = None
gp.verbose = False

# Estimate the leave-one-out predictions using the cross_val module
n_jobs = 2 # the distributing capacity available on the machine
y_pred = y + cross_val.cross_val_score(gp, X, y=y,
                                   cv=cross_val.LeaveOneOut(y.size),
                                   n_jobs=n_jobs,
                                ).ravel()

# Compute the empirical explained variance
Q2 = metrics.explained_variance_score(y, y_pred)

# Goodness-of-fit plot
pl.figure()
コード例 #2
0
from scikits.learn.gaussian_process import GaussianProcess
from scikits.learn.cross_val import cross_val_score, KFold
from scikits.learn.metrics import r2_score

# Load the dataset from scikits' data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target

# Instanciate a GP model
gp = GaussianProcess(regr='constant',
                     corr='absolute_exponential',
                     theta0=[1e-4] * 10,
                     thetaL=[1e-12] * 10,
                     thetaU=[1e-2] * 10,
                     nugget=1e-2,
                     optimizer='Welch')

# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)

# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta  # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None  # None bounds deactivate MLE

# Perform a cross-validation estimate of the coefficient of determination using
# the cross_val module using all CPUs available on the machine
K = 20  # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=-1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s" %
      (K, R2))
コード例 #3
0
# Author: Vincent Dubourg <*****@*****.**>
# License: BSD style

from scikits.learn import datasets
from scikits.learn.gaussian_process import GaussianProcess
from scikits.learn.cross_val import cross_val_score, KFold

# Load the dataset from scikits' data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target

# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
                     theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
                     thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')

# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)

# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE

# Perform a cross-validation estimate of the coefficient of determination using
# the cross_val module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=-1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
    % (K, R2))