Esempio n. 1
0
test_len = len(y_test)

(x_norm_train, mi, sigma) = ZNorm(x_train)
x_norm_test = (x_test - mi) / sigma

x_norm_train = np.column_stack((np.ones(train_len), x_norm_train))
x_norm_test = np.column_stack((np.ones(test_len), x_norm_test))

theta = (0.05 * np.random.randn(x_norm_train.shape[1], 1)).squeeze()
# =================== Cost and Gradient descent ===================
iterations = 10000
alpha = 0.001
alpha_increase = 5000

theta, history_train, history_test = gradientDescent(x_norm_train, x_norm_test,
                                                     y_train, y_test, theta,
                                                     alpha, iterations,
                                                     alpha_increase)
fig, ax = plt.subplots()
ax.set_xlabel('Iterations')
ax.set_ylabel('J(Θ)')
ax.set_facecolor('xkcd:charcoal')
ax.set_ylim(0, 100)
ax.plot(history_train, color="#DC143C", label='train')
ax.plot(history_test, color="blue", label='test')
ax.legend(loc="upper right")

print("^^^^^^^^^^^^")
print("train:", history_train[-1])
print("test", history_test[-1])
plt.show()
Esempio n. 2
0
# Compute and display initial cost
print("With theta = [0 ; 0],  cost computed = %f" %
      computeCost(X, y, theta)[0])
print("Expected cost value (approx) 32.07")

# Further testing of the cost function
theta = np.array([[-1], [2]])
J = computeCost(X, y, theta)
print("With theta = [-1 ; 2], Cost computed = %f" % J[0])
print("Expected cost value (approx) 54.24.")

raw_input("Press Enter to continue...")

print("Running Gradient Descent ...")
# Run gradient descent
theta = gradientDescent(X, y, theta, alpha, iterations)

# Print theta to screen
print("Theta found by gradient descent:")
print("Theta_0: %f, Theta_1: %f" % (theta[0], theta[1]))

print('Expected theta values (approx)')
print(' -3.6303,  1.1664')

# Plot the linear fit
plt.plot(x, np.dot(X, theta), label='Linear regression')
plt.xlim(xmin=0)
plt.ylim(ymin=0)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Training data with Linear regression')
Esempio n. 3
0
test_pos = all_pos[4000:]
train_neg = all_neg[:4000]
test_neg = all_pos[4000:]

train_x = train_pos+train_neg
test_x = test_pos+test_neg

train_y = np.append(np.ones((len(train_pos),1)), np.zeros((len(train_neg),1)), axis=0)
test_y = np.append(np.ones((len(test_pos),1)), np.zeros((len(test_neg),1)), axis=0)

freqs = build_freqs(train_x, train_y)

X = np.zeros((len(train_x), 3))
for i in range(len(train_x)):
    X[i, :]= extract_features(train_x[i], freqs)

# training labels corresponding to X
Y = train_y

# Apply gradient descent
J, theta = gradientDescent(X, Y, np.zeros((3, 1)), 1e-9, 1500)
print(f"The cost after training is {J:.8f}.")
print(f"The resulting vector of weights is {[round(t, 8) for t in np.squeeze(theta)]}")


tmp_accuracy = test_logistic_regression(test_x, test_y, freqs, theta)
print(f"Logistic regression model's accuracy = {tmp_accuracy:.4f}")



Esempio n. 4
0
plt.show()

# ============ Part 2: Compute Cost and Gradient ============

# Setup the data matrix appropriately, and add ones for the intercept term
n,m = X.shape

# Add intercept term to x and X_test
X = np.array([np.ones(m),X[0],X[1]])

# Initialize fitting parameters
initial_theta = np.zeros(n + 1)

# Compute and display initial cost and gradient
cost = costFunction(initial_theta, X, y)
grad = gradientDescent(initial_theta, X, y)

print('\nCost at initial theta (zeros): ', cost)
print('\nExpected cost (approx): 0.693')
print('\nGradient at initial theta (zeros): \n', grad)
print('\nExpected gradients (approx):\n [-0.1000 -12.0092 -11.2628]')

# Compute and display cost and gradient with non-zero theta
test_theta = np.array([-24, 0.2, 0.2])
cost = costFunction(test_theta, X, y)
grad = gradientDescent(test_theta, X, y)

print('\nCost at test theta: ', cost)
print('\nExpected cost (approx): 0.218')
print('\nGradient at test theta: \n', grad)
print('\nExpected gradients (approx):\n [0.043 2.566 2.647]\n')
Esempio n. 5
0
[X, mu, sigma] = featureNormalize(X)

X = [np.ones(m), X[0], X[1]]
## ================ Part 1: Gradient Descent ================

print('Running gradient descent ...\n')

# Choose some alpha value
alpha = 0.1
num_iters = 50

# Init Theta and Run Gradient Descent
theta = np.zeros(3)

# Run gradient descent
[theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters)

# Plot the convergence graph
plt.rcParams['figure.figsize'] = (11, 7)
plt.plot(range(J_history.size), J_history, c='b')
plt.xlabel('Number of iterations')
plt.ylabel('Cost J')
plt.show()

# Display gradient descent's result
print('Theta computed from gradient descent: \n')
print(theta)
print('\n')

# Estimate the price of a 1650 sq-ft, 3 bedrooms house
# Recall that the first column of X is all-ones. Thus, it does
Esempio n. 6
0
#Plot the data
data = np.loadtxt('F:\ML\machine-learning-ex1\machine-learning-ex1\ex1\ex1data1.txt', delimiter=',')

X = np.c_[np.ones(data.shape[0]),data[:,0]]
y = np.c_[data[:,1]]

plt.scatter(X[:,1], y, s=30, c='r', marker='x', linewidths=1)
plt.xlim(4,24)
plt.xlabel('Population of City in 10,000s')
plt.ylabel('Profit in $10,000s');

print ( functions.computeCost(X,y) )

# theta for minimized cost J
theta , Cost_J = functions.gradientDescent(X, y)
print('theta: ',theta.ravel())

plt.plot(Cost_J)
plt.ylabel('Cost J')
plt.xlabel('Iterations');

xx = np.arange(5,23)
yy = theta[0]+theta[1]*xx

# Plot gradient descent
plt.scatter(X[:,1], y, s=30, c='r', marker='x', linewidths=1)
plt.plot(xx,yy, label='Linear regression (Gradient descent)')

# Compare with Scikit-learn Linear regression 
regr = LinearRegression()