Пример #1
0
def cost_function_reg(theta, X, y, lamb):
    m = y.size
    J, grad = cost_function(theta, X, y)
    theta_ex0 = theta[1:]
    J += lamb * np.sum(theta_ex0**2) / (2 * m)
    grad += np.r_[0, theta_ex0] * (lamb / m)
    return J, grad
Пример #2
0
def gradient_descent(X, y, theta, alpha, iterations):
    m = len(y)
    j_history = np.zeros((iterations, 1))
    # print(X.shape, np.transpose(X).shape, theta.shape, y.shape)
    for index in range(iterations):
        X_tranpose = np.transpose(X)
        hypothesis = np.dot(X, theta)
        theta = theta - (alpha/m) * (np.dot(X_tranpose, (hypothesis - y)))
        j_history[index] = cost_function(X, y, theta)
    return theta, j_history
def gradient_descent(theta, x_data, y_data):
    iters = 200000
    for i in range(iters):
        grad = costFunction.gradient(theta, x_data, y_data)  # grad is a vector
        grad.reshape((-1, 1))
        alpha = 1e-3
        theta -= alpha * grad
        if (i + 1) % 1000 == 0:
            print("with {} iterations,cost is {}".format(
                i + 1, costFunction.cost_function(theta, x_data, y_data)))
    return theta.flatten()
Пример #4
0
def grad_func(t):
    return cf.cost_function(t, X, y)[1]
Пример #5
0
def cost_func(t):
    return cf.cost_function(t, X, y)[0]
Пример #6
0
# ===================== Part 2: Compute Cost and Gradient =====================
# In this part of the exercise, you will implement the cost and gradient
# for logistic regression. You need to complete the code in
# costFunction.py

# Setup the data array appropriately, and add ones for the intercept term
(m, n) = X.shape

# Add intercept term
X = np.c_[np.ones(m), X]

# Initialize fitting parameters
initial_theta = np.zeros(n + 1)

# Compute and display initial cost and gradient
cost, grad = cf.cost_function(initial_theta, X, y)

np.set_printoptions(formatter={'float': '{: 0.4f}\n'.format})

print('Cost at initial theta (zeros): {:0.3f}'.format(cost))
print('Expected cost (approx): 0.693')
print('Gradient at initial theta (zeros): \n{}'.format(grad))
print('Expected gradients (approx): \n-0.1000\n-12.0092\n-11.2628')

# Compute and display cost and gradient with non-zero theta
test_theta = np.array([-24, 0.2, 0.2])
cost, grad = cf.cost_function(test_theta, X, y)

print('Cost at test theta (zeros): {}'.format(cost))
print('Expected cost (approx): 0.218')
print('Gradient at test theta: \n{}'.format(grad))
Пример #7
0
def grad_func(t, X, y):
    t = t.reshape((len(t), 1))
    return cf.cost_function(t, X, y)[1]
Пример #8
0
def gradient(t):
    return cost_function(t, X, y)[1]
Пример #9
0
def cost_f(t):
    return cost_function(t, X, y)[0]
Пример #10
0
import plotDecisionBoundary as pdb
from sigmoid import *

data = pd.read_csv('ex2data1.txt', header=None, names=['Exam 1 score', 'Exam 2 score', 'Admission'])

# ===================== Part 1: Plotting =====================

plot_data(data)

# ===================== Part 2: Compute Cost and Gradient =====================

data.insert(0, 'ones', 1)
X = data.values[:,:-1]
y = data.values[:,-1]
theta = np.zeros(X.shape[1])
cost = cf.cost_function(theta, X, y)
grad = cf.gradient(theta, X, y)

# ===================== Part 3: Optimizing parameters theta using advanced algorithm =====================

import scipy.optimize as opt

result = opt.fmin_tnc(func=cf.cost_function, x0=theta, fprime=cf.gradient, args=(X, y))
theta = result[0]

# ===================== Part 4: Predict and Accuracies =====================

predict = sigmoid(np.dot(np.array([1, 45, 85]), theta))

def predict(theta, X):
    probability = sigmoid(X@theta)