def fit(self, X: np.array, y: np.array): if self.scale: X, self.X_offset, self.X_scale = scale(X) y, self.Y_offset, self.Y_scale = scale(y) self.X_params = X self.Y_params = y pass
def fit(self, X: np.array, y: np.array): """ 1 / (2 * n_samples) * ||y - Xw||^2_2 + l1_ratio * ||w||_1 + 0.5 * l2_ratio * ||w||^2_2 """ if self.scale: X, self.X_offset, self.X_scale = scale(X) y, self.y_offset, self.y_scale = scale(y) n_samples, n_features = X.shape if self.fit_intercept: ones = np.ones((n_samples, 1)) X = np.concatenate((ones, X), axis=1) n_samples, n_features = X.shape W = np.random.rand((n_features)) self.history = [] for _ in range(self.n_iters): preds = X.dot(W) dMSE = (1 / n_samples) * X.T.dot(preds - y) dl1 = np.array(np.sign(W)) dl2 = 2 * W W = W - self.lr * (dMSE + self.l1_ratio * dl1 + 0.5 * self.l2_ratio * dl2) self.history.append(mse(X.dot(W), y)) self.W = W
def fit(self, X: np.array, y: np.array): EPS = 1e-10 if self.scale: X, self.X_offset, self.X_scale = scale(X) n_samples, n_features = X.shape _, self.n_classes = y.shape self.p_classes = np.zeros(self.n_classes) self.mean_class = np.zeros((self.n_classes, n_features)) self.cov_class = np.zeros((n_features, n_features)) # within for i in range(self.n_classes): self.p_classes[i] = sum(y[:, i]) / n_samples Xi = X[y[:, i] == 1] self.mean_class[i] = Xi.mean(0) self.cov_class += np.cov(Xi.T) self.S_within = self.cov_class # between self.S_between = (self.mean_class - X.mean(0)).T @ (self.mean_class - X.mean(0)) pinv = np.linalg.pinv(self.S_within) if self.solver == "svd": u, s, v = np.linalg.svd(pinv @ self.S_between) self.proj = v.T # self.bias = TODO elif self.solver == "eig": xc = pinv @ self.S_between evalue, evector = np.linalg.eigh(xc) self.proj = evector[::-1].T # self.bias = TODO else: raise NotImplementedError
def fit(self, X: np.array, y: np.array): if self.scale: X, self.X_offset, self.X_scale = scale(X) n_samples, n_features = X.shape _, n_classes = y.shape if self.fit_intercept: ones = np.ones((n_samples, 1)) X = np.concatenate((ones, X), axis=1) self.n_samples, n_features = X.shape if self.multi_class == 'ovr': predFn = lambda X, W: (X.dot(W)) self.W = [] self.history = [] for i in range(n_classes): W = np.random.rand(n_features) this_y = np.zeros((n_samples)) this_y[y[:, i] == 1] = 2 this_y -= 1 W, history = LinearGradientDescent(self.dhinge_loss, X, this_y, W, self.n_iters, self.lr, l1_ratio=self.l1_ratio, l2_ratio=self.l2_ratio, metric=self.hinge_loss, predFn=predFn) self.W.append(W) self.history.append(history) elif self.multi_class == 'multi': predFn = lambda X, W: Softmax(X.dot(W)) # Softmax for cross_entropy metric W = np.random.rand(n_features, n_classes) self.W, self.history = LinearGradientDescent(self.dcrammer_singer_loss, X, y, W, self.n_iters, self.lr, l1_ratio=self.l1_ratio, l2_ratio=self.l2_ratio, metric=cross_entropy, predFn=predFn) else: raise NotImplementedError
def fit(self, X, y): if self.scale: X, self.X_offset, self.X_scale = scale(X) n_samples, n_features = X.shape _, n_classes = y.shape if self.fit_intercept: ones = np.ones((n_samples, 1)) X = np.concatenate((ones, X), axis=1) n_samples, n_features = X.shape W = np.zeros((n_features, n_classes)) self.history = [] for _ in range(self.n_iters): preds = X.dot(W) preds = self.softmax(preds) dCE = np.zeros((n_features, n_classes)) for j in range(n_classes): ydiff = preds[:, j] - y[:, j] dCE[:, j] = (1 / n_samples) * X.T.dot(ydiff) # W = W - self.lr * dCE dl1 = np.array(np.sign(W)) dl2 = 2 * W W = W - self.lr * (dCE + self.l1_ratio * dl1 + 0.5 * self.l2_ratio * dl2) self.history.append(cross_entropy(y, preds)) self.W = W pass
def fit(self, X: np.array, y: np.array): if self.scale: X, self.X_offset, self.X_scale = scale(X) self.X_params = X self.Y_params = y self.n_classes = y.shape[1] pass
def fit(self, X: np.array, y: np.array): if self.scale: X, self.X_offset, self.X_scale = scale(X) y, self.y_offset, self.y_scale = scale(y) n_samples, n_features = X.shape # fit_intercept: ones = np.ones((n_samples, 1)) X = np.concatenate((ones, X), axis=1) self.n_samples, n_features = X.shape W = np.random.rand((n_features)) predFn = lambda X, W: X.dot(W) self.W, self.history = LinearGradientDescent(self.epsilon_loss, X, y, W, self.n_iters, self.lr, l1_ratio=0, l2_ratio=self.l2_ratio, metric=mse, predFn=predFn)
def fit(self, X: np.array, y: np.array): if self.scale: X, self.X_offset, self.X_scale = scale(X) y, self.y_offset, self.y_scale = scale(y) n_samples, n_features = X.shape if self.fit_intercept: ones = np.ones((n_samples, 1)) X = np.concatenate((ones, X), axis=1) n_samples, n_features = X.shape if not self.gradient_descent: self.invert(X, y) else: W = np.random.rand((n_features)) self.history = [] for _ in range(self.n_iters): preds = X.dot(W) dMSE = (1 / n_samples) * X.T.dot(preds - y) W = W - self.lr * dMSE self.history.append(mse(X.dot(W), y)) self.W = W
def fit(self, X: np.array, y=None): if self.scale: X, self.X_offset, self.X_scale = scale(X) if self.solver == "eig": xc = np.cov(X.T) evalue, evector = np.linalg.eigh(xc) self.proj = evector[-self.dims:].T elif self.solver == "svd": u, s, v = np.linalg.svd(X) self.proj = v[:self.dims].T else: raise NotImplementedError
def fit(self, X, y): n_sample, n_features = X.shape if self.normalize_y: y, self.y_offset, self.y_scale = scale(y) self.X_dataset = X self.y_dataset = y noise = self.alpha * np.eye(n_sample) # Prior w_cov = self.kernel(X, X) + noise self.K = w_cov if self.cholesky: self.L_ = cholesky(w_cov, lower=True) self.alpha_ = cho_solve((self.L_, True), y) else: self.w_cov_i = np.linalg.pinv(w_cov)
def fit(self, X, y): EPS = 1e-10 if self.scale: X, self.X_offset, self.X_scale = scale(X) n_samples, n_features = X.shape _, n_classes = y.shape self.layers = [] nbIn = n_features for nbOut in self.layerSizes: self.layers.append( NeuralLayer( nbIn, nbOut, self.activation, self.bias, l1_ratio=self.l1_ratio, l2_ratio=self.l2_ratio, )) nbIn = nbOut derivFinal = lambda x: np.ones_like(x) self.layers.append( NeuralLayer( nbIn, n_classes, self.finalActivation, self.bias, derivative=derivFinal, l1_ratio=self.l1_ratio, l2_ratio=self.l2_ratio, )) self.history = [] for i in range(self.n_iters): for j in range(n_samples): preds = self.forward(X[j, None]) error = preds - y[j, None] self.backward(error) self.updateGrad(self.lr / n_samples) ce = cross_entropy(y, self.forward(X)) self.history.append(ce)
def fit(self, X, y=None): """ KMeans training follows the Expectation-Maximization algorithm """ if self.scale: X, self.X_offset, self.X_scale = scale(X) n_samples, n_features = X.shape self.centers = np.zeros((self.K, n_features)) # Initialize centers for f in range(n_features): self.centers[:, f] = np.linspace( np.quantile(X[:, f], 1 / self.K), np.quantile(X[:, f], 1 - 1 / self.K), self.K, ) self.history = [] for _ in range(self.max_iters): # Expectation pointsToCluster = self.e_step(X) # Maximization centers = self.m_step(X, pointsToCluster) self.centers = centers # Logs and convergence check objective = 0 for s in range(n_samples): objective += (pointsToCluster[s] * ((self.centers - X[s])**2).sum(1)).sum() self.history.append(objective) if len(self.history) > 2: if np.abs(self.history[-2] - objective) < self.tol: break