コード例 #1
0
def log_gradient(x, y, theta):
    """Computes a gradient vector from three non-empty numpy.ndarray, with a for-loop. The three
    ,→ arrays must have compatible dimensions.
    Args:
    x: has to be an numpy.ndarray, a matrix of dimension m * n.
    y: has to be an numpy.ndarray, a vector of dimension m * 1.
    theta: has to be an numpy.ndarray, a vector (n +1) * 1.
    Returns:
    The gradient as a numpy.ndarray, a vector of dimensions n * 1, containing the result of the
    ,→ formula for all j.
    None if x, y, or theta are empty numpy.ndarray.
    None if x, y and theta do not have compatible dimensions.
    Raises:
    This function should not raise any Exception.
    """
    if (type(x) is not np.ndarray or type(y) is not np.ndarray or
            type(theta) is not np.ndarray or len(x) == 0 or len(theta) == 0):
        return None
    if x.ndim == 1:
        x = x.reshape(len(x), 1)
    intercept = np.ones((x.shape[0], 1))
    print(x.shape, theta.shape)
    x_bias = np.append(intercept, x, axis=1)
    y_pred = logistic_predict(x, theta)
    y_pred = y_pred.reshape(len(y), 1)
    error = y_pred - y
    error_columns = error.reshape(len(y), 1)
    error_dot_x = np.dot(error_columns.T, x_bias)
    grad = 1/len(x) * error_dot_x
    return grad.reshape(len(theta),)
コード例 #2
0
def log_gradient(x, y, theta):
	"""Computes a gradient vector from three non-empty numpy.ndarray, with a for-loop. The three
	,→ arrays must have compatible dimensions.
	Args:
	x: has to be an numpy.ndarray, a matrix of dimension m * n.
	y: has to be an numpy.ndarray, a vector of dimension m * 1.
	theta: has to be an numpy.ndarray, a vector (n +1) * 1.
	Returns:
	The gradient as a numpy.ndarray, a vector of dimensions n * 1, containing the result of the
	,→ formula for all j.
	None if x, y, or theta are empty numpy.ndarray.
	None if x, y and theta do not have compatible dimensions.
	Raises:
	This function should not raise any Exception.
	"""
	if len(x) < 1 or len(y) < 1 or len(theta) < 1 or x is None or y is None or theta is None or x.shape[0] != y.shape[0]:
		return None
	y_hat = logistic_predict(x, theta)
	gr_vec = (np.matmul(add_intercept(x).transpose(), (y_hat - y))) / y.shape[0]
	return gr_vec
コード例 #3
0
def vec_reg_logistic_grad(y, x, theta, lambda_):
	"""Computes the regularized logistic gradient of three non-empty numpy.ndarray, without any
	,→ for-loop. The three arrays must have compatible dimensions.
	Args:
	y: has to be a numpy.ndarray, a vector of dimension m * 1.
	x: has to be a numpy.ndarray, a matrix of dimesion m * n.
	theta: has to be a numpy.ndarray, a vector of dimension n * 1.
	lambda_: has to be a float.
	Returns:
	A numpy.ndarray, a vector of dimension n * 1, containing the results of the formula for all
	,→ j.
	None if y, x, or theta are empty numpy.ndarray.
	None if y, x or theta does not share compatibles dimensions.
	Raises:
	This function should not raise any Exception.
	"""
	if x.size == 0 or y.size == 0 or theta.size == 0 or x.shape[0] != y.shape[0] or x is None or y is None:
		return None
	y_hat = logistic_predict(x, theta)
	theta2 = np.copy(theta)
	theta2[0] = 0
	gr_vec = (np.matmul(add_intercept(x).transpose(), (y_hat - y)) + (lambda_ * theta2)) / y.shape[0]
	return gr_vec
コード例 #4
0
def reg_logistic_grad(y, x, theta, lambda_):
	"""Computes the regularized logistic gradient of three non-empty numpy.ndarray, with two
	,→ for-loops. The three arrays must have compatible dimensions.
	Args:
	y: has to be a numpy.ndarray, a vector of dimension m * 1.
	x: has to be a numpy.ndarray, a matrix of dimesion m * n.
	theta: has to be a numpy.ndarray, a vector of dimension n * 1.
	lambda_: has to be a float.
	Returns:
	A numpy.ndarray, a vector of dimension n * 1, containing the results of the formula for all
	,→ j.
	None if y, x, or theta are empty numpy.ndarray.
	None if y, x or theta does not share compatibles dimensions.
	Raises:
	This function should not raise any Exception.
	"""
	if x.size == 0 or y.size == 0 or theta.size == 0 or x.shape[0] != y.shape[0] or x is None or y is None:
		return None
	gr_vec = np.zeros((theta.shape[0], 1))
	y_hat = logistic_predict(x, theta)
	gr_vec[0] =  np.sum((y_hat - y)) / float(y.shape[0])
	for j in range(1, theta.shape[0]):
		gr_vec[j] = (np.sum((y_hat - y) * x[:, j - 1].reshape(-1, 1)) + (lambda_ * theta[j])) / y.shape[0]
	return gr_vec
コード例 #5
0
	y_hat: has to be an numpy.ndarray, a vector of dimension m * 1.
	eps: has to be a float, epsilon (default=1e-15)
	Returns:
	The logistic loss value as a float.
	None on any error.
	Raises:
	This function should not raise any Exception.
	"""
    return -(np.sum((y * np.log(y_hat + eps)) +
                    ((1 - y) * np.log(1 - y_hat + eps)))) * (1 / y.shape[0])


if __name__ == "__main__":
    y1 = np.array([1])
    x1 = np.array([4])

    theta1 = np.array([[2], [0.5]])
    y_hat1 = logistic_predict(x1, theta1)
    print(log_loss_(y1, y_hat1))

    y2 = np.array([[1], [0], [1], [0], [1]])
    x2 = np.array([[4], [7.16], [3.2], [9.37], [0.56]])
    theta2 = np.array([[2], [0.5]])
    y_hat2 = logistic_predict(x2, theta2)
    print(log_loss_(y2, y_hat2))

    y3 = np.array([[0], [1], [1]])
    x3 = np.array([[0, 2, 3, 4], [2, 4, 5, 5], [1, 3, 2, 7]])
    theta3 = np.array([[-2.4], [-1.5], [0.3], [-1.4], [0.7]])
    y_hat3 = logistic_predict(x3, theta3)
    print(log_loss_(y3, y_hat3))
コード例 #6
0
def vec_log_gradient(x, y, theta):
    h0 = logistic_predict(x, theta)
    J = 1 / len(y) * (add_intercept(x).transpose() @ (h0 - y))
    return (J)