def log_gradient(x: np.ndarray, y: np.ndarray, theta: np.ndarray) -> np.ndarray: """Computes a gradient vector from three non-empty numpy.ndarray, with a for-loop. The three arrays must have compatible dimensions. Args: x: has to be an numpy.ndarray, a matrix of dimension m * n. y: has to be an numpy.ndarray, a vector of dimension m * 1. theta: has to be an numpy.ndarray, a vector (n + 1) * 1. Returns: The gradient as a numpy.ndarray, a vector of dimensions (n + 1) * 1, containing the result of the formula for all j. None if x, y, or theta are empty numpy.ndarray. None if x, y and theta do not have compatible dimensions. Raises: This function should not raise any Exception. """ if (0 in [x.size, y.size, theta.size] or x.shape[0] != y.shape[0] or (x.shape[1] + 1) != theta.shape[0]): return None res = np.zeros(shape=(theta.shape)) m, n = x.shape y_hat = logistic_predict_(x, theta) for i in range(m): res[0][0] += (y_hat[i][0] - y[i][0]) for j in range(n): res[j + 1][0] += (y_hat[i][0] - y[i][0]) * (x[i][j]) res = res / m return res
def log_gradient(x, y, theta): """ Computes a gradient vector from three non-empty numpy.ndarray, with a for-loop. The three arrays must have compatible dimensions. Args: x: has to be an numpy.ndarray, a matrix of dimension m * n. y: has to be an numpy.ndarray, a vector of dimension m * 1. theta: has to be an numpy.ndarray, a vector (n +1) * 1. Returns: The gradient as a numpy.ndarray, a vector of dimensions n * 1, containing the result of the formula for all j. None if x, y, or theta are empty numpy.ndarray. None if x, y and theta do not have compatible dimensions. """ if __isEmpty(x, y) is True: return None x = reshape(x) if __dimensionsMatch(x, y) is False: return None y_hat = logistic_predict_(x, theta) x = __addIntercept(x) y = __parseData(y) m = x.shape[0] return (1/m) * x.transpose().dot(y_hat - y)
def vec_log_gradient(x, y, theta): """Computes a gradient vector from three non-empty numpy.ndarray, without any for-loop. The three arrays must have compatible shapes. Args: x: has to be an numpy.ndarray, a matrix of shape m * n. y: has to be an numpy.ndarray, a vector of shape m * 1. theta: has to be an numpy.ndarray, a vector (n +1) * 1. Returns: The gradient as a numpy.ndarray, a vector of shape n * 1, containg the result of the formula for all j. None if x, y, or theta are empty numpy.ndarray. None if x, y and theta do not have compatible shapes. Raises: This function should not raise any Exception. """ if x.size == 0 or y.size == 0 or theta.size == 0: return None if type(x) != np.ndarray or type(y) != np.ndarray or type( theta) != np.ndarray: return None if x.ndim == 1: x = x.reshape(x.shape[0], 1) if y.ndim == 1: y = y.reshape(y.shape[0], 1) if theta.ndim == 1: theta = theta.reshape(theta.shape[0], 1) if x.shape[1] != theta.shape[0] - 1 or x.shape[0] != y.shape[0] or y.shape[ 1] != 1: return None y_hat = logistic_predict_(x, theta) return np.insert(x, 0, 1, axis=1).transpose() @ (y_hat - y) / y.shape[0]
def log_gradient(x, y, theta): y_hat = logistic_predict_(x, theta) sum = [] for t in range(theta.shape[0]): sum.append(0) for i in range(len(y)): for j in range(len(theta)): if j == 0: sum[j] += (y_hat[i] - y[i]) / len(y) else: if (x[i].size > 1): sum[j] += ((y_hat[i] - y[i]) * x[i][j - 1]) / len(y) else: sum[j] += ((y_hat[i] - y[i]) * x[i]) / len(y) return (sum)
def vec_log_gradient(x: np.ndarray, y: np.ndarray, theta: np.ndarray) -> np.ndarray: """Computes a gradient vector from three non-empty numpy.ndarray, without any a for-loop. The three arrays must have compatible dimensions. Args: x: has to be an numpy.ndarray, a matrix of dimension m * n. y: has to be an numpy.ndarray, a vector of dimension m * 1. theta: has to be an numpy.ndarray, a vector (n + 1) * 1. Returns: The gradient as a numpy.ndarray, a vector of dimensions (n + 1) * 1, containing the result of the formula for all j. None if x, y, or theta are empty numpy.ndarray. None if x, y and theta do not have compatible dimensions. Raises: This function should not raise any Exception. """ if (0 in [x.size, y.size, theta.size] or x.shape[0] != y.shape[0] or (x.shape[1] + 1) != theta.shape[0]): return None m = x.shape[0] y_hat = logistic_predict_(x, theta) x = add_intercept(x) nabla_j = x.T.dot(y_hat - y) / m return nabla_j
import numpy as np from log_pred import logistic_predict_ x = np.array([4]) theta = np.array([[2], [0.5]]) print(logistic_predict_(x, theta)) print("--------------------------------------------") # Output: # array([[0.98201379]]) # Example 1 x2 = np.array([[4], [7.16], [3.2], [9.37], [0.56]]) theta2 = np.array([[2], [0.5]]) print(logistic_predict_(x2, theta2)) print("--------------------------------------------") # Output: # array([[0.98201379], # [0.99624161], # [0.97340301], # [0.99875204], # [0.90720705]]) # Example 3 x3 = np.array([[0, 2, 3, 4], [2, 4, 5, 5], [1, 3, 2, 7]]) theta3 = np.array([[-2.4], [-1.5], [0.3], [-1.4], [0.7]]) print(logistic_predict_(x3, theta3)) print("--------------------------------------------") # Output: # array([[0.03916572], # [0.00045262], # [0.2890505 ]])
import numpy as np from log_pred import logistic_predict_ from vec_log_loss import vec_log_loss_ # Example 1: y1 = np.array([1]) x1 = np.array([4]) theta1 = np.array([[2], [0.5]]) y_hat1 = logistic_predict_(x1, theta1) print(vec_log_loss_(y1, y_hat1)) # Output: # 0.01814992791780973 # Example 2: y2 = np.array([[1], [0], [1], [0], [1]]) x2 = np.array([[4], [7.16], [3.2], [9.37], [0.56]]) theta2 = np.array([[2], [0.5]]) y_hat2 = logistic_predict_(x2, theta2) print(vec_log_loss_(y2, y_hat2)) # Output: # 2.4825011602474483 # Example 3: y3 = np.array([[0], [1], [1]]) x3 = np.array([[0, 2, 3, 4], [2, 4, 5, 5], [1, 3, 2, 7]]) theta3 = np.array([[-2.4], [-1.5], [0.3], [-1.4], [0.7]]) y_hat3 = logistic_predict_(x3, theta3) print(vec_log_loss_(y3, y_hat3)) # Output: