Example #1
0
def error(xi, yi, h_theta):
    '''
    Difference between predicted and observed value for a training example
    Parameters
        xi: x vector (length j+1) for training example i
        yi: y observation for training example i
        h_theta: vector of parameters (theta0...thetaj)
    Returns
        error (predicted - observed)
    '''
    return dot(h_theta, xi) - yi
def error(xi, yi, h_theta):
    '''
    Difference between predicted and observed value for a training example
    Parameters
        xi: x vector (length j+1) for training example i
        yi: y observation for training example i
        h_theta: vector of parameters (theta0...thetaj)
    Returns
        error (predicted - observed)
    '''
    return dot(h_theta, xi) - yi
Example #3
0
def gradJ(X, y, h_theta):
    '''
    Gradient of Cost function for batch gradient descent for
    Multiple linear regression
    Parameters
        X: matrix of independent variables (i rows of observations and j cols of variables). x0=1 for all i
        y: dependent variable (i rows)
        h_theta: coefficients (j cols)
    Returns
        Gradient of cost function (j cols, one for each h_thetaj)
        Will be used to update h_theta i gradient descent
    '''
    return [dot(errors(X, y, h_theta), xj) / len(y) for xj in T(X)]
def gradJ(X, y, h_theta):
    '''
    Gradient of Cost function for batch gradient descent for
    Multiple linear regression
    Parameters
        X: matrix of independent variables (i rows of observations and j cols of variables). x0=1 for all i
        y: dependent variable (i rows)
        h_theta: coefficients (j cols)
    Returns
        Gradient of cost function (j cols, one for each h_thetaj)
        Will be used to update h_theta i gradient descent
    '''
    return [dot(errors(X, y, h_theta), xj) / len(y) for xj in T(X)]
Example #5
0
def grad_logistic(X, y, h_theta):
    errors =[logistic(dot(h_theta, xi)) - yi for (xi, yi) in zip(X, y)]
    return [dot(errors, xj) for xj in T(X)]
Example #6
0
def logistic_log_partial_ij(x_i, y_i, h_theta, j):
    """here i is the index of the data point,
    j the index of the derivative"""
    return (logistic(dot(x_i, h_theta)) - y_i) * x_i[j]
Example #7
0
def logistic_log_likelihood_i(x_i, y_i, h_theta):
    return y_i * log(logistic(dot(x_i, h_theta))) + (1 - y_i) * log(1 - logistic(dot(x_i, h_theta)))
Example #8
0
def R(h_theta, alpha):
    return 0.5 * alpha * dot(h_theta[1:], h_theta[1:])
Example #9
0
def predict(X, h_theta):
    return [dot(h_theta, xi) for xi in X]
def R(h_theta, alpha):
  return 0.5 * alpha * dot(h_theta[1:], h_theta[1:])
def predict(X, h_theta):
    return [dot(h_theta, xi) for xi in X]
Example #12
0
def predict(f, X, h_theta):
    return [f(dot(h_theta, xi)) for xi in X]