visualizer.score(X_test, y_test)
visualizer.show()

visualizer = ResidualsPlot(svmReg,size=(1080, 720))
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.show()

visualizer = ResidualsPlot(adaReg,size=(1080, 720))
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.show()

visualizer = ResidualsPlot(rfReg,size=(1080, 720))
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.show()

visualizer = ResidualsPlot(mlpReg,size=(1080, 720))
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.show()


from yellowbrick.regressor import AlphaSelection
from sklearn.linear_model import RidgeCV
model = AlphaSelection(RidgeCV())
model.fit(X_train, y_train)
model.show()

Esempio n. 2
0
# min_mse = min(mse_list)
# min_mse_index = mse_list.index(min_mse)
# optimal_alpha = alphas[min_mse_index]
# print("Optimal Alpha: ", optimal_alpha)
# print("Minimum MSE: ", min_mse)
#
# plt.scatter(alphas, mse_list)
# plt.ylim((min(mse_list) - 0.0001, max(mse_list) + 0.0001))
# plt.show()

# Yellowbrick Regressor - Predict optimal alpha
ytrain = np.reshape(ytrain, (ytrain.shape[0]))
alphas = np.logspace(-10, 1, 200)
visualizer = AlphaSelection(RidgeCV(alphas=alphas))
visualizer.fit(xtrain, ytrain)
visualizer.show()

# Optimal model
optimal_alpha = 4.103
ridge_reg = RidgeCV(alphas=np.array([optimal_alpha]))
x = ridge_reg.fit(xtrain, ytrain)
# print("Coefficients: ", ridge_reg.coef_)
y_pred = ridge_reg.predict(xtest)
err = mean_squared_error(ytest, y_pred)
print("MSE for optimal model: ", err)

# Yellowbrick Regressor - Plot error
visualizer = PredictionError(ridge_reg)
visualizer.fit(xtrain, ytrain)
visualizer.score(xtest, ytest)
visualizer.show()
Esempio n. 3
0
## Model Evaluation & Hyperparameter Tuning ##
# CV Root Mean Squared Error on Training Set (Robust Scaled)
cv_rmse(lasso_reg, X_scaled, np.ravel(y)) # LASSO: 0.319
cv_rmse(elastic_reg, X_scaled, np.ravel(y)) # Elastic Net (ratio = 0.5): 0.317

# CV Root Mean Squared Error on Training Set (Standardised)
cv_rmse(lasso_reg, X_standard, np.ravel(y)) # LASSO: 0.2992
cv_rmse(elastic_reg, X_standard, np.ravel(y)) # Elastic Net (ratio = 0.5): 0.3012


# Alpha Selection
alphas = np.logspace(-10, 1, 400)
visualizer = AlphaSelection(elastic_reg)
visualizer.fit(X_scaled, y)
visualizer.show() # Optimal Alpha = 0.020

alphas = np.logspace(-10, 1, 400)
visualizer = AlphaSelection(elastic_reg)
visualizer.fit(X_standard, y)
visualizer.show() # Optimal Alpha = 0.020

# Search Algorithms to Further tune our Hyperparameters
# RandomizedSearchCV to narrow search space
rnd_params = {"l1_ratio": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
              "alphas": [[0.1], [0.2], [0.3], [0.4], [0.5]],
              "max_iter": [15000],
              "normalize": [False]}
rnd_src = RandomizedSearchCV(elastic_reg, param_distributions=rnd_params, n_iter=100, scoring="neg_mean_squared_error", n_jobs=-1)
rnd_src.fit(X_scaled, np.ravel(y))
rnd_src.best_params_# {'normalize': False, 'max_iter': 15000, 'l1_ratio': 0.1, 'alphas': [0.1]}