Beispiel #1
0
def part2_2():
    data = np.loadtxt('ex1data1.txt', delimiter=',')
    x = data[:, 0]
    y = data[:, 1]
    m = len(y)
    X = np.c_[np.ones((m, 1)), x]
    theta = np.zeros((2, 1))
    iterations = 1500
    alpha = 0.01
    cost = compute_cost(X, y, theta)
    theta = gradient_descent(X, y, theta, alpha, iterations)
    print('cost: {0}'.format(cost))
    print('theta: {0}'.format(theta))

    predict1 = np.array([1, 3.5]).dot(theta)
    predict2 = np.array([1, 7]).dot(theta)
    print('predict1: {0}'.format(predict1))
    print('predict2: {0}'.format(predict2))

    x = np.arange(5, 22, 0.1)
    y = [theta[0] + theta[1] * xi for xi in x]
    plt.plot(x, y)
    plt.savefig('2-2.png')
Beispiel #2
0

# ===================== Part 2: Gradient descent =====================
print('Running Gradient Descent...')

X = np.c_[np.ones(m), X]  # Add a column of ones to X
theta = np.zeros(2)  # initialize fitting parameters

# Some gradient descent settings
iterations = 1500
alpha = 0.01

# Compute and display initial cost
print('Initial cost : ' + str(compute_cost(X, y, theta)) + ' (This value should be about 32.07)')

theta, J_history = gradient_descent(X, y, theta, alpha, iterations)

print('Theta found by gradient descent: ' + str(theta.reshape(2)))

# Plot the linear fit
plt.figure(0)
line1, = plt.plot(X[:, 1], np.dot(X, theta), label='Linear Regression')
plt.legend(handles=[line1])

input('Program paused. Press ENTER to continue')

# Predict values for population sizes of 35,000 and 70,000
predict1 = np.dot(np.array([1, 3.5]), theta)
print('For population = 35,000, we predict a profit of {:0.3f} (This value should be about 4519.77)'.format(predict1*10000))
predict2 = np.dot(np.array([1, 7]), theta)
print('For population = 70,000, we predict a profit of {:0.3f} (This value should be about 45342.45)'.format(predict2*10000))
        if j != len(line_tempt) - 1:
            x_data[i, j + 1] = line_tempt[j]
        else:
            y_data[i, 0] = line_tempt[j]
# print(x_data)
# print(y_data)
file.close()
# description:x_data is a m*3 matrix, y_data is a vector
# loading end
# ignore the feature x0
x_data, mu, std = featureNormalize.feature_normalize(x_data)
#  trick!!: normalize data before adding x0
# =======================Part2. Gradient Descent ===========================
# initialize some params
alpha = 0.01
num_iters = 5000
theta = np.zeros((3, 1))
# my gradientDescent support multi-variables
theta, cost_history = gradientDescent.gradient_descent(x_data, y_data, theta,
                                                       alpha, num_iters)
# print(theta)
# =======================Part3. Predict ================
input_data = np.array([[1, 1650, 3]], dtype=np.float64)
input_data[0, 1:] -= mu[1:]
input_data[0, 1:] /= std[1:]
print("predict price is {}".format(np.dot(input_data, theta)))
# 正规方程比较简单,我在1_linear_regression_with_one_variable中写过,这里就不再写了
# 有几点要注意的:
# 1. 梯度下降使用了特征缩放,所以theta应该和正规方程不一样。
# 2. 预测的时候不要忘了特征缩放。
Beispiel #4
0
#creating matrices
B = np.zeros([len(X), 1])
A = np.zeros([len(X), 2])

for i in range(len(X)):
    B[i, 0] = Y[i]
    A[i, 0] = X[i]
    A[i, 1] = 1

# gradient descent optimization
iterations = 100
learning_rate = 0.0001

theta = np.array([[2], [9]])  # initial condition

theta, cost_history = grad.gradient_descent(A, B, theta, learning_rate,
                                            iterations)

print(theta)

# plot cost function
iters = []
for i in range(iterations):
    iters.append(i)

YP = np.dot(A, theta)

plt.figure()
plt.subplot(121)
plt.plot(iters, cost_history, 'r-')
plt.xlabel('iteration')
plt.ylabel('cost')
Beispiel #5
0
m = len(x_data)
theta = np.zeros((2, 1))
x_data = np.array(x_data)
y_data = np.array(y_data)
x_data = x_data[:, np.newaxis]
y_data = y_data[:, np.newaxis]
x_data = np.column_stack((np.ones(m), x_data))
# some gradient_descent settings
iterations = 1500
alpha = 0.01
# compute cost
J = computeCost.compute_cost(x_data, y_data, theta)
print("with theta is {},cost is {}".format(theta, J))
print("expected cost is 32.07")
# begin gradient descent
theta, cost_history = gradientDescent.gradient_descent(x_data, y_data, theta,
                                                       alpha, iterations)
print("after {} iterations,theta is {}".format(iterations, theta))
print("expected theta is [-3.6303,1.1664]")
# plot the linear fit
plt.plot(x_data[:, 1], np.dot(x_data, theta))
plt.scatter(x_data[:, 1], y_data, marker="*", edgecolors="red")
plt.show()
# ============= Part 4: Visualizing J(theta_0, theta_1) =============
theta0_vals = np.linspace(-10, 10, 100)
theta1_vals = np.linspace(-1, 4, 100)
J_vals = np.zeros((theta0_vals.shape[0], theta1_vals.shape[0]))
for i in range(len(theta0_vals)):
    for j in range(len(theta1_vals)):
        # hint: focus on the relation between theta0 and theta1 while plot3D
        t = np.vstack((theta0_vals[i], theta1_vals[j]))
        J_vals[i, j] = computeCost.compute_cost(x_data, y_data, t)
Beispiel #6
0
    iterations = 1500
    alpha = 0.01
    print('\nTesting the cost function ...\n')
    # compute and display initial cost
    J = compute_cost(X, y, theta)
    print('With theta = [0 ; 0]\nCost computed = %f\n' % J[0])
    print('Expected cost value (approx) 32.07\n')
    J = compute_cost(X, y, np.array(([-1], [2])))
    print('\nWith theta = [-1 ; 2]\nCost computed = %f\n' % J[0])
    print('Expected cost value (approx) 54.24\n')
    print('Program paused. Press enter to continue.\n')
    # pause_func()

    print('\nRunning Gradient Descent ...\n')
    # run gradient descent
    theta = gradient_descent(X, y, theta, alpha, iterations)
    # print theta to screen
    print('Theta found by gradient descent:\n')
    print(theta)
    print('Expected theta values (approx)\n')
    print(' -3.6303\n  1.1664\n\n')

    plt.plot(X[:, 1], np.dot(X, theta), '-', label="Linear regression")
    plt.legend()
    plt.pause(3)
    plt.close()

    predict1 = np.dot(np.array(([1, 3.5])), theta)
    print('For population = 35,000, we predict a profit of ', predict1 * 10000, '\n')
    predict2 = np.dot(np.array(([1, 7])), theta)
    print('For population = 70,000, we predict a profit of ', predict2 * 10000, '\n')
Beispiel #7
0
# Compute and display initial cost
J = compute_cost(x, y, theta)
print('With theta = [0  0]\nCost computed =', J[0][0], '\n')
print('Expected cost value (approx) 32.07\n')

# Further testing of the cost function
theta = np.array([[-1], [2]])
J = compute_cost(x, y, theta)
print('\nWith theta = [-1  2]\nCost computed =', J[0][0], '\n')
print('Expected cost value (approx) 54.24\n')

input('Program paused. Press enter to continue.\n')

print('\nRunning Gradient Descent ...\n')
# Run gradient descent
theta, j_hist = gradient_descent(x, y, theta, alpha, num_iters)

# Print theta to screen
print('Theta found by gradient descent:\n')
print(theta[0][0])
print(theta[1][0])
print('Expected theta values (approx)\n')
print(' -3.6303\n  1.1664\n\n')

# Plot the linear fit

plt.plot(x[:, [1]], np.dot(x, theta), '-')
plt.legend(('Training data', 'Linear regression'))

# Predict values for population sizes of 35,000 and 70,000
predict1 = np.dot(np.array([1, 3.5]), theta)
 def find_weights(self, initial_weights, accuracy):
     return grad.gradient_descent(self.Q, initial_weights, accuracy,
                                  self.gradQ)
Beispiel #9
0
    plt.show()

    # Feature Normalize
    X_norm, mu, sigma = feature_normalize(X)

    # Append x-sub-0 (vector of 1's)
    X_norm = np.concatenate((np.ones((m, 1)), X_norm), axis=1)
    ''' ================ Part 2: Gradient Descent ================ '''

    # Set gradient descent parameters
    alpha = 0.01
    iterations = 1000
    thetas1 = np.zeros((3, 1))

    # Gradient descent
    thetas1, j_history = gradient_descent(X_norm, y, thetas1, alpha,
                                          iterations)

    # Prediction test
    test1 = np.array([1650, 3]).reshape(1, 2)
    test1 = (test1 - mu) / sigma  # normalize input values
    test1 = np.concatenate((np.ones((1, 1)), test1), axis=1)
    predict1 = np.dot(test1, thetas1)
    # print(thetas1)
    print('Predicted price of a 1650 sq-ft, 3 br house: \n', predict1[0][0])
    ''' ================ Part 3: Normal Equations ================ '''

    # Let's try another way...
    X_copy = X.copy()

    # Append x-sub-0 (vector of 1's)
    X_copy = np.concatenate((np.ones((m, 1)), X_copy), axis=1)
#                different values of alpha and see which one gives
#                you the best result.
#
#                Finally, you should complete the code at the end
#                to predict the price of a 1650 sq-ft, 3 br house.
#
# Hint: At prediction, make sure you do the same feature normalization.
print('Running gradient descent ...')

# Choose some alpha value
alpha = 0.03
num_iters = 400

# Init theta and Run Gradient Descent
theta = np.zeros(3)
theta, J_history = gradient_descent(X, y, theta, alpha, num_iters)

# Plot the convergence graph
plt.figure()
plt.plot(np.arange(J_history.size), J_history)
plt.xlabel('Number of iterations')
plt.ylabel('Cost J')

# Display gradient descent's result
print('Theta computed from gradient descent : \n{}'.format(theta))

# Estimate the price of a 1650 sq-ft, 3 br house
# ===================== Your Code Here =====================
# Recall that the first column of X is all-ones. Thus, it does
# not need to be normalized.
price = 0  # You should change this