Beispiel #1
0
def predict(Theta1, Theta2, X):
    m = X.shape[0]
    # Input layer
    X = np.hstack((np.ones((m, 1)), X))
    z2 = np.dot(Theta1, X.T).T
    # Hidden layer
    a2 = np.hstack((np.ones((z2.shape[0], 1)), sigmoid(z2)))
    z3 = np.dot(Theta2, a2.T)
    # Output layer
    hypothesis = sigmoid(z3).T
    p = np.argmax(hypothesis, axis=1)
    return p + 1
def costFunction_2(theta, x, y):
    import numpy as np
    from ex2_logistic_regression.sigmoid import sigmoid
    m, n = x.shape
    theta = theta.reshape((n, 1))
    y = y.reshape((m, 1))
    term1 = np.log(sigmoid(x.dot(theta)))
    term2 = np.log(1 - sigmoid(x.dot(theta)))
    term1 = term1.reshape((m, 1))
    term2 = term2.reshape((m, 1))
    term = y * term1 + (1 - y) * term2
    return -((np.sum(term)) / m)
Beispiel #3
0
def predictOneVsAll(all_theta, X):
    m = X.shape[0]
    X = np.hstack((np.ones((m, 1)), X))
    z = np.dot(X, all_theta.T)
    p = np.argmax(sigmoid(z), axis=1)
    # Map from 1 to 10
    return p + 1
Beispiel #4
0
def predict(theta, X):
    import numpy as np
    from ex2_logistic_regression.sigmoid import sigmoid
    sigValue = sigmoid(np.dot(X, theta))
    p = sigValue >= 0.5

    return p
Beispiel #5
0
def plotDecisionBoundary(theta, X, y):
    x1_min, x1_max = X[:, 1].min(), X[:, 1].max()
    x2_min, x2_max = X[:, 2].min(), X[:, 2].max()
    x1_vec, x2_vec = np.meshgrid(np.linspace(x1_min, x1_max),
                                 np.linspace(x2_min, x2_max))
    new_X = np.column_stack(
        (np.ones(x1_vec.flatten().size), x1_vec.flatten(), x2_vec.flatten()))
    hypothesis = sigmoid(np.dot(new_X, theta)).reshape(x1_vec.shape)
    plt.contour(x1_vec, x2_vec, hypothesis, levels=0.5, colors='b')
    return


# file = np.loadtxt("ex2data1.txt", delimiter=",")
# X = file[..., :-1]
# y = file[..., -1]
# X = np.insert(X, 0, 1, axis=1)
# num_features = np.size(X, 1)
# theta = np.zeros(num_features)
# # Optimization terminated successfully.
# #          Current function value: 0.204316
# #          Iterations: 25
# #          Function evaluations: 63
# #          Gradient evaluations: 207
# #          Hessian evaluations: 0
# # [-22.90706342   0.18820183   0.18323267]
# final_theta = optimize_J(theta, X, y)
# plotDecisionBoundary(final_theta, X, y)
# p = predict(final_theta, X)
# # 89
# print(np.mean(p == y) * 100)
Beispiel #6
0
def plotDecisionBoundaryReg(theta, X, y):
    plt = plotData(X, y, ('y = 1', 'y = 0'))
    x1_vec, x2_vec = np.meshgrid(np.linspace(-1, 1.5), np.linspace(-1, 1.5))
    hypothesis = sigmoid(np.dot(mapFeature(x1_vec.flatten(), x2_vec.flatten()), theta)).reshape(x1_vec.shape)
    plt.contour(x1_vec, x2_vec, hypothesis, levels=0.5, colors='g')
    plt.show()
    return
def costFunctionReg(theta, x, y, _lambda):
    import numpy as np
    from ex2_logistic_regression.sigmoid import sigmoid
    h = sigmoid(np.dot(x, theta.T))

    m, _ = np.shape(x)
    cost = (np.dot(-y, np.log(h)) - np.dot(1 - y, (np.log(1 - h))))
    reg_param = sum(np.power(theta[1:], 2)) * _lambda / 2 / m
    return (cost + reg_param) / m
def compute_grad_2(theta, x, y):
    import numpy as np
    x_row, x_col = np.shape(x)
    theta = theta.reshape((x_col, 1))
    y = y.reshape((x_row, 1))
    from ex2_logistic_regression.sigmoid import sigmoid
    sigmoid_x_theta = sigmoid(x.dot(theta))
    grad = (x.T.dot(sigmoid_x_theta - y)) / x_row
    return grad.flatten()
def costFunction(theta, x, y):
    import numpy as np
    x_row, x_col = np.shape(x)
    sum_l = 0.0
    from ex2_logistic_regression.sigmoid import sigmoid
    for r in range(x_row):
        cost_row = sigmoid(theta.dot(x[r]))
        y_row = y[r]
        sum_l += (-y_row) * np.log(cost_row) - (1 - y_row) * np.log(1 - cost_row)
    return sum_l / x_row
def compute_grad_reg(theta, x, y, _lambda):
    import numpy as np
    m, n = np.shape(x)
    from ex2_logistic_regression.sigmoid import sigmoid

    h = sigmoid(np.dot(x, theta.T))
    t = h - y
    grad = [sum(np.dot(t, x)) / m] * n

    for c in range(1, n):
        grad[c] = grad[c] + _lambda / m * theta[c]
    return grad
Beispiel #11
0
def nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels,
                   X, y, L):
    m = X.shape[0]
    # Pulling original thetas out of nn_params
    theta1_shape = (hidden_layer_size, (input_layer_size + 1))
    theta1_size = theta1_shape[0] * theta1_shape[1]
    theta2_shape = (num_labels, (hidden_layer_size + 1))
    theta1, theta2 = (nn_params[:theta1_size].reshape(theta1_shape),
                      nn_params[theta1_size:].reshape(theta2_shape))
    # Input layer
    X = np.hstack((np.ones((m, 1)), X))
    z2 = np.dot(theta1, X.T).T
    # Hidden layer
    a2 = np.hstack((np.ones((z2.shape[0], 1)), sigmoid(z2)))
    z3 = np.dot(theta2, a2.T)
    # Output layer
    hypothesis = sigmoid(z3).T
    # Vectorize y for neural network
    new_y = np.zeros((m, num_labels))
    for i in range(m):
        new_y[i, y[i] - 1] = 1
    product = np.multiply(-new_y, np.log(hypothesis)) - np.multiply(
        1 - new_y, np.log(1 - hypothesis))
    J = np.sum(product) / m
    J_reg = (np.sum(np.square(theta1[:, 1:])) +
             np.sum(np.square(theta2[:, 1:]))) * (L / (2 * m))
    J_total = J + J_reg
    # Performing backpropagation
    d3 = hypothesis - new_y
    z2_deriv = sigmoidGradient(z2)
    d2 = np.dot(d3, theta2[:, 1:]) * z2_deriv
    delta1 = np.dot(d2.T, X)
    delta2 = np.dot(d3.T, a2)
    Theta1_grad = delta1 / m
    Theta2_grad = delta2 / m
    # Regularization of the gradient
    Theta1_grad[:, 1:] = Theta1_grad[:, 1:] + L / m * theta1[:, 1:]
    Theta2_grad[:, 1:] = Theta2_grad[:, 1:] + L / m * theta2[:, 1:]
    grad = np.concatenate((Theta1_grad.flatten(), Theta2_grad.flatten()))
    return (J_total, grad)
def compute_grad(theta, x, y):
    import numpy as np
    x_row, x_col = np.shape(x)
    from ex2_logistic_regression.sigmoid import sigmoid
    grad = [0] * x_col
    for r in range(x_row):
        cost_row = sigmoid(theta.dot(x[r]))
        y_row = y[r]
        for c in range(x_col):
            grad[c] += ((cost_row - y_row) * x[r][c])

    grad = np.asarray(grad) / x_row
    return grad
Beispiel #13
0
    def test_predict(self):
        from utils import file_utils
        x, y = file_utils.read_csv_split_last_col_and_add_one(data_file_path)
        from ex2_logistic_regression.ex2 import line_regression_by_fmin
        ret = line_regression_by_fmin(x, y)
        self.assertAlmostEqual(ret.fun, 0.203, delta=0.01)
        from ex2_logistic_regression.sigmoid import sigmoid
        ret_p = sigmoid(ret.x.dot([1, 45, 85]))
        self.assertAlmostEqual(ret_p, 0.776, delta=0.01)
        from ex2_logistic_regression.predict import predict
        p = predict(ret.x, x)
        count = 0
        for r in range(len(p)):
            if p[r] == y[r]:
                count = count + 1

        self.assertEqual(count, 89)
Beispiel #14
0
def predict(theta, X):
    hypothesis = sigmoid(np.dot(theta, X.T))
    return (hypothesis >= 0.5).astype(int)
Beispiel #15
0
def costFunction(theta, X, y):
    m = y.size
    hypothesis = sigmoid(np.dot(theta, X.T))
    return (np.dot(-y, np.log(hypothesis)) -
            np.dot(1 - y, np.log(1 - hypothesis))) / m
Beispiel #16
0
def gradient(theta, X, y):
    m = y.size
    hypothesis = sigmoid(np.dot(theta, X.T))
    return (np.dot(X.T, hypothesis - y)) / m
 def test_sigmoid(self):
     from ex2_logistic_regression.sigmoid import sigmoid
     ret = sigmoid(-1)
     self.assertAlmostEqual(ret, 0.268, delta=0.01)
     ret = sigmoid(0)
     self.assertAlmostEqual(ret, 0.5, delta=0.01)
Beispiel #18
0
def predict(Theta1, Theta2, X):
    m = X.shape[0]
    h1 = sigmoid((np.dot(Theta1, np.hstack((np.ones((m, 1)), X)).T))).T
    h2 = sigmoid((np.dot(Theta2, np.hstack((np.ones((m, 1)), h1)).T))).T
    return np.argmax(sigmoid(h2), axis=1) + 1
Beispiel #19
0
def sigmoidGradient(z):
    return sigmoid(z) * (1 - sigmoid(z))