コード例 #1
0
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)

# Lasso
lasso = Lasso(alpha=alpha)

y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)

# Echo
echo = er.EchoRegression(alpha=5 * alpha)
y_pred_echo = echo.fit(X_train, y_train).predict(X_test)
np.set_printoptions(precision=3, suppress=True)
print np.sort(echo.a.ravel())
r2_score_echo = r2_score(y_test, y_pred_echo)
print('echo')
print("r^2 on test data : %f" % r2_score_echo)

# Echo
echo2 = er.EchoRegression(alpha=5 * alpha, assume_diagonal=True)
y_pred_echo = echo2.fit(X_train, y_train).predict(X_test)
print np.sort(echo2.a.ravel())
r2_score_echo2 = r2_score(y_test, y_pred_echo)
print('echo')
print("r^2 on test data : %f" % r2_score_echo2)
コード例 #2
0
                         color="navy", lw=lw)
        plt.legend(loc="best")
        plt.savefig('figures/syn_best_alpha_{}.png'.format(method_name))
        plt.clf()

np.random.seed(0)
n_samples = 200
n_features = 500
e_rank = 30
X = make_low_rank_matrix(n_samples=n_samples,
                         n_features=n_features + 1,
                         effective_rank=e_rank,
                         tail_strength=0.5)
X, y = X[:,:-1], X[:,-1]
print('data shape:', X.shape)



methods = [("Ridge", Ridge(), "alpha", np.logspace(-3, 0, 20)),
           ("LASSO", Lasso(), "alpha", np.logspace(-3, 0, 20)),
           ("Echo", er.EchoRegression(), 'alpha', np.logspace(-1, 4, 20)),
           ("OLS", LinearRegression(), "fit_intercept", [True])]

for method_name, method, param_name, param_range in methods:
    train_scores, test_scores = validation_curve(method, X, y, cv=10, scoring='neg_mean_squared_error',
                                                 param_name=param_name,
                                                 param_range=param_range)
    train_scores *= -1
    test_scores *= -1
    alpha_plot(train_scores, test_scores, param_range, method_name)
コード例 #3
0
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)

# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)

# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)

rgr_echo = er.EchoRegression(alpha=0.05, assume_diagonal=True)
rgr_echo.fit(proj_operator.toarray(), proj.ravel())
rec_echo = rgr_echo.coef_.reshape(l, l)

plt.figure(figsize=(8, 3.3))
plt.subplot(141)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(142)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(143)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
コード例 #4
0
ファイル: alpha_choose.py プロジェクト: brekelma/dsbox_corex
    X0 -= np.sum(X0, axis=0)
    X_train, X_test = X0[:n_samples, :n_features], X0[n_samples:, :n_features]
    y_train, y_test = X0[:n_samples, n_features], X0[n_samples:, n_features]
    return X_train, y_train, X_test, y_test


X_train, y_train, X_test, y_test = make_regression(n_samples=400, n_features=500, effective_rank=10, tail_strength=0.5)
alphas, coefs, mis = er.echo_path(X_train, y_train)

test_scores = [np.mean(np.square(y_test - X_test.dot(coef))) for coef in coefs.T]
train_scores = [np.mean(np.square(y_train - X_train.dot(coef))) for coef in coefs.T]
f, (ax1, ax3, ax2) = plt.subplots(1, 3, figsize=(10,3))
ax1.loglog(alphas[1:-1], test_scores[1:-1], marker='.', label='Test')
ax1.loglog(alphas[1:-1], train_scores[1:-1], marker='.', label='Train')

e_model = er.EchoRegression().fit(X_train, y_train)
gams = np.sort(e_model.a[:,0])
objective = [np.mean(y_train**2) - np.sum((gams - gam).clip(0) + 0.5 * gam * np.log(np.clip(gam / gams, 0, 1)))  for gam in gams]
print 'obj', objective
print len(gams), gams
ax3.loglog(gams, objective, marker='.', label='Objective')
ax3.axvline(gams[np.argmin(objective)])

print 'alpha', alphas
print 'test', test_scores

ax2.plot(mis[1:-1], test_scores[1:-1], marker='.', label='Test')
ax2.plot(mis[1:-1], train_scores[1:-1], marker='.', label='Train')

dhy = (np.array(train_scores[1:]) - np.array(train_scores[:-1])) / (alphas[1:] - alphas[:-1])
dhy *= 0.7 * np.min(test_scores) / np.max(dhy)