コード例 #1
0
 def _gradient_descent(self, X, y):
     X = add_intercept(X, 1)
     self.w = np.ones((1, self.n_features + 1))
     for it in range(self._max_iters):
         grad = self._cost(X, y, self.w)
         self.w -= (self._eta + 1 / (2 + it)) * grad
         #  print("iter =", it, "w =", self.w, "grad =", grad)
     self.w = self.w.flatten()
コード例 #2
0
    def _stochastic_gradient_descent(self, X, y):
        step = 0
        self.n_features = X.shape[1]
        self.n_samples = X.shape[0]
        self.w = np.zeros((1, self.n_features))
        self.w = add_intercept(self.w, 0)
        X = add_intercept(X, 1)

        while not self._max_iters or step < self._max_iters:
            step += 1
            update_pos = self.classify(X, y)
            if update_pos >= 0:
                self.update(X, y, update_pos)
                print("Itetator {0}, Update Pos: {1}, w: {2}".format(
                    step, update_pos, self.w))
            else:
                break
        return self.w
コード例 #3
0
 def predict(self, X):
     return np.sign(np.dot(add_intercept(X, 1), self.w.T)).flatten()
コード例 #4
0
 def predict(self, X):
     X = add_intercept(X, 1)
     s = np.dot(self.w, X.T)
     return self.sigmod(s)