Beispiel #1
0
def costFunctionReg(theta, X, y, lmbda):
    # Initialize some useful values
    m = y.shape[0]  # number of training examples

    # You need to return the following variables correctly
    J = 0
    grad = np.zeros(theta.shape)

    # ====================== YOUR CODE HERE ======================

    def h(X, theta):
        return X.dot(theta)

    J = np.float(-y.T * np.nan_to_num(np.log(sigmoid(h(X, theta))).T) -
                 (1 - y).T * np.nan_to_num(np.log(1 - sigmoid(h(X, theta))).T)) / m
    reg_cost = theta.copy()
    reg_cost[0] = 0
    J += (lmbda * reg_cost.T.dot(reg_cost)) / (2 * m)

    grad = (sigmoid(h(X, theta)) - y.T).dot(X) / m
    reg_grad = theta * (float(lmbda) / m)
    reg_grad[0] = 0
    grad = grad.A1 + reg_grad

    # =============================================================

    return (J, grad)
def costFunctionReg(theta, X, y, lmbda):
    # Initialize some useful values
    m = y.shape[0]  # number of training examples

    # You need to return the following variables correctly
    J = 0
    grad = np.zeros(theta.shape)

    # ====================== YOUR CODE HERE ======================

    def h(X, theta):
        return X.dot(theta)

    J = np.float(-y.T * np.nan_to_num(np.log(sigmoid(h(X, theta))).T) -
                 (1 - y).T *
                 np.nan_to_num(np.log(1 - sigmoid(h(X, theta))).T)) / m
    reg_cost = theta.copy()
    reg_cost[0] = 0
    J += (lmbda * reg_cost.T.dot(reg_cost)) / (2 * m)

    grad = (sigmoid(h(X, theta)) - y.T).dot(X) / m
    reg_grad = theta * (float(lmbda) / m)
    reg_grad[0] = 0
    grad = grad.A1 + reg_grad

    # =============================================================

    return (J, grad)
Beispiel #3
0
def costFunction(theta, X, y):
    # Initialize some useful values
    m = y.shape[0]  # number of training examples

    # You need to return the following variables correctly
    J = 0
    grad = np.zeros(theta.shape)

    # ====================== YOUR CODE HERE ======================

    def h(X, theta):
        return X.dot(theta)

    J = np.float(-y.T * np.nan_to_num(np.log(sigmoid(h(X, theta))).T) -
                 (1 - y).T * np.nan_to_num(np.log(1 - sigmoid(h(X, theta))).T)) / m
    grad = (sigmoid(h(X, theta)) - y.T).dot(X) / m

    # =============================================================

    return (J, grad.A1)
Beispiel #4
0
def costFunction(theta, X, y):
    # Initialize some useful values
    m = y.shape[0]  # number of training examples

    # You need to return the following variables correctly
    J = 0
    grad = np.zeros(theta.shape)

    # ====================== YOUR CODE HERE ======================

    def h(X, theta):
        return X.dot(theta)

    J = np.float(-y.T * np.nan_to_num(np.log(sigmoid(h(X, theta))).T) -
                 (1 - y).T *
                 np.nan_to_num(np.log(1 - sigmoid(h(X, theta))).T)) / m
    grad = (sigmoid(h(X, theta)) - y.T).dot(X) / m

    # =============================================================

    return (J, grad.A1)
Beispiel #5
0
def predict(theta, X):
    m = X.shape[0]  # Number of training examples

    # You need to return the following variables correctly
    p = np.zeros(m, )

    # ====================== YOUR CODE HERE ======================
    # Instructions: Complete the following code to make predictions using
    #               your learned logistic regression parameters.
    #               You should set p to a vector of 0's and 1's
    #

    p = (sigmoid(X.dot(theta)) >= 0.5) * 1

    # =========================================================================

    return p