コード例 #1
0
ファイル: logreg.py プロジェクト: get9/ml-test
 def __init__(self, bias=False, learn_rate=None, convergence=None, regularization=None):
     self.ws = np.array([])
     self.bias = bias
     self.learn_rate = learn_rate
     self.convergence = convergence
     self.l = regularization
     self.hfunc = lambda x, y: sigmoid(x @ y)
     self.costfunc = log_likelihood
コード例 #2
0
ファイル: gradient.py プロジェクト: get9/ml-test
    def optimize(self, xs, ys):
        m, n = xs.shape
        ws = np.zeros(len(xs[0]))
        iters = 0
        grad = 1

        while iters < self.max_iters: #and np.linalg.norm(sigmoid(xs @ ws) - ys) > self.convergence:
            grad = (expit(xs @ ws) - ys) @ xs + self.r * ws
            ws -= self.alpha * grad
            iters += 1

        print('Gradient descent finished: {} iters, cost = {}'.format(iters, \
                np.linalg.norm(sigmoid(xs @ ws) - ys)))
        return ws