Exemplo n.º 1
0
def run_linear_regression():
    print('Plotting data\n')
    features = setup()
    features.columns = ['Profits', 'CityPopulation']
    X = features.Profits
    y = features.CityPopulation
    m = len(y)
    iterations = 1500
    alpha = 0.01
    theta = np.zeros(m)  # Set the initial theta value
    lr = LinearRegression(X, y, iterations, alpha)

    lr.plot_data(X, y, 'Profits', 'City Population',
                 'Food Truck Profit v. City Pop')

    print('Testing gradient descent algorithm...\n')
    # Add a column of ones to X
    # X.bias = np.ones((m, 1))

    print('Initial cost: {}'.format(lr.cost_function(X, y, theta)))

    # Run the gradient descent
    theta, cost_history = lr.gradient_descent(X, y, theta, alpha, iterations)

    print('Optimum theta found by gradient descent: {}'.format(theta))
Exemplo n.º 2
0
y = data.iloc[:, feature_count].values
exp_y = y[int(y.size * 0.7):]

# feature scaling, so that both the features are at same scale
from FeatureScaling import FeatureScaling
fs = FeatureScaling(x, y)
fs_x = fs.fit_scaling_X()
fs_y = fs.fit_scaling_Y()

# split into training and testing examples
train_x = fs_x[0:int(fs_x.shape[0] * 0.7), :]
train_y = fs_y[:int(fs_y.size * 0.7), :]

test_x = fs_x[int(fs_x.shape[0] * 0.7):, :]
test_y = fs_y[int(fs_y.size * 0.7):, :]

# linear regression execution
from linear_regression import LinearRegression
ls = LinearRegression(train_x, train_y)
j = ls.cost_function()

iters = 1000
alpha = 0.07

theta, cost_history, theta_history = ls.gradient(iters, alpha)
y_pred, error_pred = ls.predict(test_x, theta, test_y)
y_pred = fs.inverse_fit_scaling_Y(y_pred)
print(
    pd.DataFrame(np.vstack([exp_y, y_pred[:, 0]]),
                 index=["Actual Values", 'Predicted Values']).T)