Beispiel #1
0
def linear_test():
    X, y = make_regression(n_features=1, noise=20, random_state=1234)
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.33,
                                                        random_state=1234)

    lr = LinearRegression()
    lr.fit(X_train, y_train)
    y_pred = lr.predict(X_test)

    plt.figure(1, figsize=(5, 4))
    plt.scatter(X_test, y_test, c="black")
    plt.plot(X_test, lr.theta * X_test + lr.bias, linewidth=1, c="red")
    plt.axhline(0.5, color=".5")

    plt.ylabel("y")
    plt.xlabel("X")
    plt.legend(
        ("Linear Regression Model", ),
        loc="lower right",
        fontsize="small",
    )
    plt.tight_layout()
    plt.show()
def one_hot_regression(proc, data):
    """
		linear regression using a one hot representation
		:proc processing object
		:data tuple containing train/test (one hot encoded space)

		return linear regression object
	"""
    print('one hot regression...')
    """ridge and random forest regression"""
    # train and test
    trainOneHotX, trainY, testOneHotX, testY = data

    ######### linear regression #########
    linear_models = 'ridge'
    print('ridge regression with one hot representation...')
    linReg = LinearRegression(model=linear_models)
    linReg.fit(trainOneHotX, trainY)
    preds = linReg.predict(testOneHotX)
    print('test r2 score: ', metrics.r2_score(testY, preds))
    print('test mse: ', metrics.mse(testY, preds))

    return linReg
Beispiel #3
0
    # Add some noise to the observations
    noise_var = 0.5

    # Create random input and output data
    X = lhs(D_in, N)
    y = 5 * X + noise_var * np.random.randn(N, D_out)

    # Define the model
    model = LinearRegression(X, y)

    # Define an optimizer
    optimizer = SGD(model.num_params, lr=1e-3, momentum=0.9)
    #    optimizer = Adam(model.num_params, lr = 1e-3)
    #    optimizer = RMSprop(model.num_params, lr = 1e-3)

    # Train the model
    model.train(10000, optimizer)

    # Print the learned parameters
    print('w = %e, sigma_sq = %e' %
          (model.theta[:-1], np.exp(model.theta[-1])))

    # Make predictions
    y_pred = model.predict(X)

    # Plot
    plt.figure(1)
    plt.plot(X, y, 'o')
    plt.plot(X, y_pred)
    plt.show()
Beispiel #4
0
# Generate some noisy train data
noise = np.random.randn(n)
x = np.linspace(0, 20, n)
y = 32.3 + 5.2 * (x + noise)

x = scaleFeature(x)
y = scaleFeature(y)

x = x.reshape((-1, 1))

# Train the lm, print out the parameters, plot the fit
lm.train(x, y, 0.5, 100, 0)
print(lm.parameters)

# Plot the fit of the linear model
y_hat = lm.predict(x)

# Plot the fit of line and train data
plt.plot(x, y, 'o')
plt.plot(x, y_hat)
plt.ylabel("y")
plt.xlabel("x")
plt.title("Plot of the Models Fit")
plt.show()

wait()

# Plot the costs to check proper convergence of gradient descent
plotCosts(lm.costsOverTime)
import matplotlib.pyplot as plt
from dataset_income import Data
from metrics import MSE
from models import ConstantModel, LinearRegression, LinearRegressionWithB
from gradient_descent import stochastic_gradient_descent, gradient_descent, mini_batch_gradient_descent

if __name__ == '__main__':
    dataset = Data(
        r'C:\Users\Lautaro\PycharmProjects\ceia_intro_a_IA\clase_3\ejercicios\data\income.csv'
    )

    X_train, X_test, y_train, y_test = dataset.split(0.8)

    linear_regression = LinearRegression()
    linear_regression.fit(X_train, y_train)
    lr_y_hat = linear_regression.predict(X_test)

    linear_regression_b = LinearRegressionWithB()
    linear_regression_b.fit(X_train, y_train)
    lrb_y_hat = linear_regression_b.predict(X_test)

    constant_model = ConstantModel()
    constant_model.fit(X_train, y_train)
    ct_y_hat = constant_model.predict(X_test)

    mse = MSE()
    lr_mse = mse(y_test, lr_y_hat)
    lrb_mse = mse(y_test, lrb_y_hat)
    ct_mse = mse(y_test, ct_y_hat)

    x_plot = np.linspace(0, 10, 10)