def run(self, steps: int, alpha: int, xs: np.ndarray, ys: np.ndarray, thetas=None) -> (np.ndarray, np.ndarray): xs = add_ones_column(xs) n_features = np.size(xs, 1) n = len(ys) j_values: np.ndarray = np.zeros((steps, 1)) grad: np.ndarray = np.zeros((1, n_features)) if thetas is None: thetas = np.zeros((1, n_features)) for i in range(steps): h = self.h(xs, thetas) for j in range(n_features): grad[0, j] = alpha * ((1 / n) * sum( (h - ys) * np.c_[xs[:, j]]) + self.regularization_d(thetas) / n) thetas -= grad j_values[i] = self.cost_function( xs, ys, thetas) + self.regularization_fn(thetas) return thetas, j_values
def test(self, test_xs, test_ys): n = len(test_ys) acc = 0 test_xs = add_ones_column(test_xs) for i in range(n): h = self.predict(np.c_[test_xs[i, :]]) > 0.5 acc += (h == test_ys[i].item()) return acc / n
def plot(self): plt.subplot(2, 1, 1) plt.plot(self.j_values) plt.subplot(2, 1, 2) plt.plot(self.xs, self.ys, 'ro') plt.plot(self.xs, self.optimizer.h(add_ones_column(self.xs), self.thetas)) plt.show()
def run(self, steps: int, alpha: float, xs: np.ndarray, ys: np.ndarray, thetas=None) -> (np.ndarray, np.ndarray): xs = add_ones_column(xs) n_features = np.size(xs, 1) j_values: np.ndarray = np.zeros((steps, 1)) if thetas is None: thetas = np.zeros((1, n_features)) for i in range(steps): grad = self.cost_d_fn(xs, ys, thetas).T thetas -= (alpha * grad) j_values[i] = self.cost_fn(xs, ys, thetas) return thetas, j_values
def predict(self, xs: np.ndarray, norms=None): if norms is not None: xs = normalize_by(xs.T, norms=self.norms) xs = add_ones_column(xs) return linear_h(xs, self.thetas).item()
def partial(xs, thetas): xs = add_ones_column(xs.T) return logistic_h(xs, thetas).item()
def predict(self, xs: np.ndarray): xs = add_ones_column(xs) return self.optimizer.h(xs, self.thetas)