def fit_(x, y, theta, alpha, max_iter): """ Description: Fits the model to the training dataset contained in x and y. Args: x: has to be a numpy.ndarray, a vector of dimension m * 1: (number of training ,→ examples, 1). y: has to be a numpy.ndarray, a vector of dimension m * 1: (number of training ,→ examples, 1). theta: has to be a numpy.ndarray, a vector of dimension 2 * 1. alpha: has to be a float, the learning rate max_iter: has to be an int, the number of iterations done during the gradient ,→ descent Returns: new_theta: numpy.ndarray, a vector of dimension 2 * 1. None if there is a matching dimension problem. Raises: This function should not raise any Exception. """ # theta_ = gradient(x, y, theta).sum(axis=1) # print("TH", theta_) # print("THa", theta_ * alpha) # max_iter = 0 for i in range(max_iter): if not i % 100000: print(i * 100 / max_iter, "%") print(theta) theta_ = gradient(x, y, theta).sum(axis=1) * alpha theta = theta - theta_ return theta pass
def fit_(x, y, theta, alpha, max_iter): """ Description: Fits the model to the training dataset contained in x and y. Args: x: has to be a np.ndarray, a vector of dim m * 1: (nb of training ex, 1). y: has to be a np.ndarray, a vector of dim m * 1: (nb of training ex, 1). theta: has to be a np.ndarray, a vector of dim 2 * 1. alpha: has to be a float, the learning rate max_iter: has to be an int, the nb of iter done during the gradient descent Returns: new_theta: np.ndarray, a vector of dim 2 * 1. None if there is a matching dim problem. Raises: This function should not raise any Exception. """ step_tolerance = 0.00001 thetatemp0 = theta[0] thetatemp1 = theta[1] for i in range(max_iter): g = gradient(x, y, np.array((thetatemp0, thetatemp1))) # print(g) thetatemp0b = thetatemp0 - (alpha * g[0]) thetatemp1b = thetatemp1 - (alpha * g[1]) # print(thetatemp1b) # print(thetatemp1b) # if (thetatemp1 * thetatemp1b) < 0: if -step_tolerance < thetatemp0b - thetatemp0 < step_tolerance: print(f" nb of step = {i+1}") break thetatemp0 = thetatemp0b thetatemp1 = thetatemp1b return np.array((thetatemp0, thetatemp1))
def fit_(x, y, theta, alpha, max_iter): if not check(x, y, theta, alpha, max_iter): return None for i in range(max_iter): nabla = gradient(x, y, theta) if nabla is None: return None theta = theta - (nabla * alpha) return theta
def fit_(theta, X, Y, alpha, n_cycle): if isinstance(X, np.ndarray) == 1 and isinstance( theta, np.ndarray) == 1 and isinstance(Y, np.ndarray) == 1: if len(X[0]) == len(theta) - 1 and len(X) == len(Y): if isinstance(alpha, float) == 1 and isinstance(n_cycle, int) == 1: new = np.full((len(X), 1), 1.) X = np.hstack([new, X]) for i in range(n_cycle): theta = theta - alpha * gradient(X, Y, theta) return (theta) else: print("alpha is not a float or n_cycle not an int") else: print( "\nx's columns is not theta's line - 1 or dim(X) and dim(Y) different \n" ) else: print("theta or X is not a np.ndarray. Incompatible.\n")
def fit_(x, y, theta, alpha, max_iter): """ Description: Fits the model to the training dataset contained in x and y. Args: x: has to be a numpy.ndarray, a vector of dimension m * 1: (number of training examples, 1). y: has to be a numpy.ndarray, a vector of dimension m * 1: (number of training examples, 1). theta: has to be a numpy.ndarray, a vector of dimension 2 * 1. alpha: has to be a float, the learning rate max_iter: has to be an int, the number of iterations done during the gradient descent Returns: new_theta: numpy.ndarray, a vector of dimension 2 * 1. None if there is a matching dimension problem. Raises: This function should not raise any Exception. """ new_theta = theta for _ in range(max_iter): nabJ = gradient(x, y, new_theta) new_theta = new_theta - (alpha * nabJ) return new_theta
def fit_(x, y, theta, alpha, max_iter): """ Description: Fits the model to the training dataset contained in x and y. Args: x: has to be a numpy.ndarray, a vector of dimension m * 1: (number of training examples, 1). y: has to be a numpy.ndarray, a vector of dimension m * 1: (number of training examples, 1). theta: has to be a numpy.ndarray, a vector of dimension 2 * 1. alpha: has to be a float, the learning rate max_iter: has to be an int, the number of iterations done during the gradient descent Returns: new_theta: numpy.ndarray, a vector of dimension 2 * 1. None if there is a matching dimension problem. Raises: This function should not raise any Exception. """ if len(x) < 1 or len(y) < 1 or len( theta) < 1 or x.shape != y.shape or theta.shape[ 0] < 1 or x is None or y is None: return None for _ in range(max_iter): theta -= (gradient(x, y, theta) * alpha) return theta
#!/usr/bin/env python3 import numpy as np from vec_gradient import gradient import numpy as np x = np.array([12.4956442, 21.5007972, 31.5527382, 48.9145838, 57.5088733]) y = np.array([37.4013816, 36.1473236, 45.7655287, 46.6793434, 59.5585554]) # Example 0: theta1 = np.array([2, 0.7]) output = gradient(x, y, theta1) print(output) assert np.array_equal(output, [21.0342574, 587.36875564]) # Example 1: theta2 = np.array([1, -0.4]) output = gradient(x, y, theta2) print(output) assert np.array_equal(output, [58.86823748, 2229.72297889])
def fit_(x, y, theta, alpha, max_iter): theta = theta.astype(np.float32) for _ in range(max_iter): theta -= (gradient(x, y, theta) * alpha) return theta
def fit_(x, y, theta, alpha, max_iter): while cost_(y, predict_(x, theta)) != 0 and max_iter != 0: theta[0] = float(theta[0] - alpha * (gradient(x, y, theta)[0])) theta[1] = float(theta[1] - alpha * (gradient(x, y, theta)[1])) max_iter -= 1 return (np.array(theta))
import numpy as np from vec_gradient import gradient x = np.array([12.4956442, 21.5007972, 31.5527382, 48.9145838, 57.5088733]) y = np.array([37.4013816, 36.1473236, 45.7655287, 46.6793434, 59.5585554]) # Example 0: theta1 = np.array([2, 0.7]) print(gradient(x, y, theta1)) # Output: # array([21.0342574, 587.36875564]) # Example 1: theta2 = np.array([1, -0.4]) print(gradient(x, y, theta2)) # Output: # array([58.86823748, 2229.72297889])