예제 #1
0
파일: logreg.py 프로젝트: get9/ml-test
 def __init__(self, bias=False, learn_rate=None, convergence=None, regularization=None):
     self.ws = np.array([])
     self.bias = bias
     self.learn_rate = learn_rate
     self.convergence = convergence
     self.l = regularization
     self.hfunc = lambda x, y: sigmoid(x @ y)
     self.costfunc = log_likelihood
예제 #2
0
파일: gradient.py 프로젝트: get9/ml-test
    def optimize(self, xs, ys):
        m, n = xs.shape
        ws = np.zeros(len(xs[0]))
        iters = 0
        grad = 1

        while iters < self.max_iters: #and np.linalg.norm(sigmoid(xs @ ws) - ys) > self.convergence:
            grad = (expit(xs @ ws) - ys) @ xs + self.r * ws
            ws -= self.alpha * grad
            iters += 1

        print('Gradient descent finished: {} iters, cost = {}'.format(iters, \
                np.linalg.norm(sigmoid(xs @ ws) - ys)))
        return ws