def logistic_log_gradient(x, y, beta):
    # Implement the computation of the partial derivative of log likelihood over all training examples.
    total_gradient = [0] * len(beta)

    for x_i, y_i in zip(x, y):
        partial_derivative = logistic_log_gradient_i(x_i, y_i, beta)
        total_gradient = vector_add(total_gradient, partial_derivative)

    return total_gradient
def logistic_log_gradient(x, y, beta):
    # Implement the computation of the partial derivative of log likelihood over all training examples.
    total_gradient = [0] * len(beta)

    for x_i, y_i in zip(x, y):
        partial_derivative = logistic_log_gradient_i(x_i, y_i, beta)
        total_gradient = vector_add(total_gradient, partial_derivative)

    return total_gradient
Example #3
0
def squared_error_ridge_gradient(x_i, y_i, beta, alpha):
    """the gradient corresponding to the ith squared error term
    including the ridge penalty"""
    return vector_add(squared_error_gradient(x_i, y_i, beta),
                      ridge_penalty_gradient(beta, alpha))
def squared_error_ridge_gradient(x_i, y_i, beta, alpha):
    """the gradient corresponding to the ith squared error term
    including the ridge penalty"""
    return vector_add(squared_error_gradient(x_i, y_i, beta),
                      ridge_penalty_gradient(beta, alpha))
Example #5
0
#!/usr/bin/python
from pprint import pprint
import linear_algebra as la
import stats as st

A = [1, 3, 5, 7, 9]
B = [6, 4, 8, 2, 10]

print("*** Test Module <linear_algebra> ***")
print("*** vector ......")

print("vector A = ", A)
print("vector B = ", B)

C = la.vector_add(A, B)
print("A + B = ", C)

C = la.vector_subtract(A, B)
print("A - B = ", C)

C = la.vector_sum([A, B])
print("A and B summary = ", C)

C = la.scalar_multiply(10, A)
print("10 * A = ", C)

C = la.vector_mean([A, B])
print("A and B mean = ", C)

C = la.dot(A, B)
print("A dot B = ", C)
def squared_error_ridge_gradient(x_i, y_i, beta, alpha):
    return vector_add(squared_error_gradient(x_i, y_i, beta),
                      ridge_penalty_gradient(beta, alpha))
 def test_vector_add(self):
     self.assertEqual([3, 3, 3], vector_add([1, 3, 2], [2, 0, 1]))