Beispiel #1
0
def plot_ridge_n_samples():
    X, y = load_extended_boston()

    plot_learning_curve(Ridge(alpha=1), X, y)
    plot_learning_curve(LinearRegression(), X, y)
    plt.legend(loc=(0, 1.05), ncol=2, fontsize=11)
    plt.show()
def linear_regression_boston():
    X, y = load_extended_boston()
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)

    ridge = Ridge().fit(X_train, y_train)
    print("w[0]: %f  b: %f" % (ridge.coef_[0], ridge.intercept_))
    print("Training set score : {:.2f}".format(ridge.score(X_train, y_train)))
    print("Test set score ] {:.2f}".format(ridge.score(X_test, y_test)))
Beispiel #3
0
def linear_regression_boston(alpha):
    X, y = load_extended_boston()
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)

    lasso = Lasso(alpha=alpha, max_iter=100000).fit(X_train, y_train)
    print("-------------------")
    print("[alpha : {}]Training set score: {:.2f}".format(alpha, lasso.score(X_train, y_train)))
    print("[alpha : {}]Test set score: {:.2f}".format(alpha, lasso.score(X_test, y_test)))
    print("[alpha : {}]Number of features used: {}".format(alpha, np.sum(lasso.coef_ != 0)))
def linear_regression_housing():
    X, y = load_extended_boston()
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)

    line = np.linspace(-3, 3, 100).reshape(-1, 1)

    lr = LinearRegression().fit(X_train, y_train)
    print("w[0]: %f  b: %f" % (lr.coef_[0], lr.intercept_))
    print("Training set score : {:.2f}". format(lr.score(X_train, y_train)))
    print("Test set score ] {:.2f}".format(lr.score(X_test, y_test)))
Beispiel #5
0
def linear_regression_boston():
    X, y = load_extended_boston()
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    limit = np.arange(0.01, 0.50, 0.01)

    training_scores = np.zeros(len(limit), dtype=float)
    test_scores = np.zeros(len(limit), dtype=float)

    for i, alpha in enumerate(limit):
        training_scores[i], test_scores[i] = ridge_fit(alpha, X_train, X_test,
                                                       y_train, y_test)

    plt.plot(limit, test_scores, label="Test Score")
    plt.plot(limit, training_scores, label="Tranining Score")
    plt.legend()
    plt.show()
def linear_regression_boston():
    X, y = load_extended_boston()
    X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
    limit = np.arange(0.0001, 1.0001, 0.001)

    training_scores = np.zeros(len(limit), dtype=float)
    test_scores = np.zeros(len(limit), dtype=float)

    best_score = 0.0
    best_alpha = 0.0
    for i, alpha in enumerate(limit):
        training_scores[i], test_scores[i] = lasso_fit(alpha, X_train, X_test,
                                                       y_train, y_test)
        if best_score < test_scores[i]:
            best_score = test_scores[i]
            best_alpha = alpha

    print("Best Alpha : {}, Best Score : {}".format(best_alpha, best_score))

    plt.plot(limit, test_scores, label="Test Score")
    plt.plot(limit, training_scores, label="Tranining Score")
    plt.legend()
    plt.show()
Beispiel #7
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 11:22:00 2019

@author: prawigya
"""

from mglearn.datasets import load_extended_boston
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np

X, y = load_extended_boston()
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
lr = LinearRegression().fit(x_train, y_train)
print("Accuracy of train data: ", lr.score(x_train, y_train))
print("Accuracy of test data: ", lr.score(x_test, y_test
lasso1 = Lasso(alpha = 0.1).fit(x_train, y_train)
print("Training score ", lasso1.score(x_train, y_train))
print("Test score ", lasso1.score(x_test, y_tetst))


Beispiel #8
0
def r_square(y_true, y_pred):
    from keras import backend as K
    SS_res =  K.sum(K.square(y_true - y_pred)) 
    SS_tot = K.sum(K.square(y_true - K.mean(y_true))) 
    return (1 - SS_res/(SS_tot + K.epsilon()))

# R^2로 설명할 수 없는 부분
def r_square_loss(y_true, y_pred):
    from keras import backend as K
    SS_res =  K.sum(K.square(y_true - y_pred)) 
    SS_tot = K.sum(K.square(y_true - K.mean(y_true))) 
    return 1 - ( 1 - SS_res/(SS_tot + K.epsilon()))

# 1) data loading
from mglearn.datasets import load_extended_boston
df_boston_et_x, df_boston_et_y = load_extended_boston()

# 2) scaling
m_sc = standard()
m_sc.fit(df_boston_et_x)
df_boston_et_x = m_sc.transform(df_boston_et_x)

# 3) data split
train_x, test_x, train_y, test_y = train_test_split(df_boston_et_x,
                                                    df_boston_et_y,
                                                    random_state=0)

# 4) 모델 생성
nx = df_boston_et_x.shape[1]

model = Sequential()
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 18 01:11:10 2018

@author: Nihat Allahverdiyev
"""
import mglearn.datasets as md
from sklearn.linear_model import Ridge
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression

X, y = md.load_extended_boston()

X_train, X_test, y_train, y_test = train_test_split(X, y, random_state= 0)

lr = LinearRegression().fit(X_train, y_train)

ridge = Ridge().fit(X_train, y_train)

print("Accuracy of predicting train data: {:.2f}".format(ridge.score(
        X_train, y_train)))

print("Accuracy of predicting test data: {:.2f}".format(ridge.score(
        X_test, y_test)))

ridge10 = Ridge(alpha = 10).fit(X_train, y_train)
ridge01 = Ridge(alpha = 0.1).fit(X_train, y_train)

plt.plot(ridge.coef_, 's', label = "Ridge alpha = 1")
Beispiel #10
0
from mglearn.datasets import load_extended_boston

from sklearn.model_selection import train_test_split

from sklearn.linear_model import Lasso, Ridge

import numpy as np

boston_data, boston_target = load_extended_boston()

x_train, x_test, y_train, y_test = train_test_split(boston_data,
                                                    boston_target,
                                                    random_state=0,
                                                    test_size=0.3)

lasso = Lasso().fit(x_train, y_train)

print('{:.3f}'.format(lasso.score(x_train, y_train)))

# 0.265

print('{:.3f}'.format(lasso.score(x_test, y_test)))

# 0.214

print(lasso.coef_)

# array([-0.        ,  0.        , -0.        ,  0.        , -0.        ,

#         0.        , -0.        ,  0.        , -0.        , -0.        ,
from mglearn import datasets
from sklearn.datasets import load_boston

boston = load_boston()
print("데이터의 형태: {}".format(boston.data.shape))

X, y = datasets.load_extended_boston()
print("X.shape: {}".format(X.shape))