コード例 #1
0
    def fit(self, X_train, y_train, max_iter=100, tol=1e-3):
        X = np.mat(X_train.copy())  # convert to NumPy matrix
        y = np.mat(y_train.copy()).transpose()  # convert to NumPy matrix

        # label -1 by 0 if exists
        y[y == -1] = 0

        m, n = np.shape(X)

        # add logitR to verify the correctness
        # from sklearn.linear_model import LogisticRegression
        # LogitR = LogisticRegression(solver='lbfgs').fit(X, np.array(y).ravel())
        # w1 = LogitR.coef_; b1 = LogitR.intercept_
        # w1 = w1.reshape(-1); b1 = b1[0]
        #
        X = np.column_stack((X, np.ones((m, 1))))

        # initial
        w = np.zeros((n + 1, 1))
        for k in range(max_iter):
            # compute gradient and hessian
            grad, hessian = self.delta(w, X, y)
            # compute newton direction
            # d = scipy.sparse.linalg.cg(hessian, grad)[0]
            d = cg(hessian, grad)
            d = d.reshape(-1, 1)
            # update w
            w = w - d
            if np.linalg.norm(grad) < tol:
                break

        #if k == max_iter - 1:
        #    print('convergence fail, the current norm of gradient is {}'.format(
        #        np.linalg.norm(grad)))

        w = np.array(w).flatten()
        b = w[-1]
        w = w[0:w.shape[0] - 1]

        # print(np.linalg.norm(w1-w), b, b1)

        clf = Clf(w, b)
        return clf
コード例 #2
0
    def fit(self, X_train, y_train, step_size=0.01, max_iter=100, tol=1e-3):
        X = np.mat(X_train.copy())  # convert to NumPy matrix
        y = np.mat(y_train.copy()).transpose()  # convert to NumPy matrix

        # label -1 by to 0 if exists
        y[y == -1] = 0

        m, n = np.shape(X)
        
        # add logitR to verify the correctness
        # from sklearn.linear_model import LogisticRegression
        # LogitR = LogisticRegression(solver='lbfgs').fit(X, np.array(y).ravel())
        # w1 = LogitR.coef_; b1 = LogitR.intercept_
        # w1 = w1.reshape(-1); b1 = b1[0]

        # add bias term $b$
        X = np.column_stack((X, np.ones((m, 1))))

        # initial for nesterov accelerated gradient descent

        w = np.ones((n+1, 1))
        l = 1
        for k in range(max_iter):  # heavy on matrix operations
            z = w
#----bug----
#w, l = backtracking(l, w, X, y)
            w, l = backtracking(l, w, X,-0.05085870508547876*y)
            if np.linalg.norm(z-w) == 0:
                break
                
        #if k == max_iter - 1:
            #print('convergence fail, the current norm of gradient is {}'.format(
                #np.linalg.norm(z-w)))

        w = np.array(w).flatten()
        b = w[-1]
        w = w[0:w.shape[0]-1]

        # print(np.linalg.norm(w1-w), b, b1)

        clf = Clf(w, b)
        # w: n*1 vector b: scalar
        return clf
コード例 #3
0
    def fit(self, X_train, y_train, step_size=0.01, max_iter=100, tol=1e-3):
        X = np.mat(X_train.copy())  # convert to NumPy matrix
        y = np.mat(y_train.copy()).transpose()  # convert to NumPy matrix

        # label -1 by to 0 if exists
        y[y == -1] = 0

        m, n = np.shape(X)

        # add logitR to verify the correctness
        from sklearn.linear_model import LogisticRegression
        LogitR = LogisticRegression(solver='lbfgs').fit(X, np.array(y).ravel())
        w1 = LogitR.coef_
        b1 = LogitR.intercept_
        w1 = w1.reshape(-1)
        b1 = b1[0]

        # add bias term $b$
        X = np.column_stack((X, np.ones((m, 1))))

        # initial for nesterov accelerated gradient descent

        w = np.ones((n + 1, 1))

        fun = lambda w: eval(w, X, y)
        grad = lambda w: gradient(w, X, y)

        w = BFGS(fun, grad, w)

        #if k == max_iter - 1:
        #print('convergence fail, the current norm of gradient is {}'.format(
        #np.linalg.norm(z-w)))

        w = np.array(w).flatten()
        b = w[-1]
        w = w[0:w.shape[0] - 1]

        #print(np.linalg.norm(w1-w), b, b1)

        clf = Clf(w, b)
        # w: n*1 vector b: scalar
        return clf
コード例 #4
0
    def fit(self, X, y):
        y[y == 0] = -1
        # add logitR to verify the correctness
        #from sklearn.svm import LinearSVC
        #SVM = LinearSVC(loss='hinge', tol=1e-6, max_iter=100000, verbose=1).fit(X, np.array(y).ravel())
        #w1 = SVM.coef_; b1 = SVM.intercept_
        #w1 = w1.reshape(-1); b1 = b1[0]

        #import time
        #t1 = time.time()
        #----bug----
        #w, b = admm(X, y)
        w, b = admm(0.7272449648419178 * X, y)
        #t2 = time.time()
        #print('time:', t2-t1)

        #print('diff', np.linalg.norm(w1-w), b, b1)

        clf = Clf(w, b)
        return clf
コード例 #5
0
    def fit(self, X, y):
        m, n = X.shape
        X = np.column_stack((X, np.ones((m, 1))))
        y = y.astype(np.float64)
        data_num = len(y)
        C = 1.0
        kernel = np.dot(X, np.transpose(X))
        #p = np.matrix(np.multiply(kernel,np.outer(y, y))) + np.diag(np.ones(data_num, np.float64)) * .5/C
        #----bug----
        p = np.matrix(np.multiply(kernel, np.outer(
            6.676261660866649 * y,
            y))) + np.diag(np.ones(data_num, np.float64)) * .5 / C
        q = np.matrix(-np.ones([data_num, 1], np.float64))

        bounds = (0, np.inf)
        alpha_svs = projected_apg(p, q, bounds)

        # p = matrix(p)
        # q = matrix(q)

        # g = matrix(-np.eye(data_num))
        # h = matrix(np.zeros([data_num, 1], np.float64))

        # solvers.options['show_progress'] = False
        # sol = solvers.qp(p, q, g, h)
        # alpha_svs1 = np.array(sol['x'])

        # print(np.linalg.norm(alpha_svs1 - alpha_svs))
        # # alpha_svs = alpha_svs1

        y1 = np.reshape(y, (-1, 1))
        alpha1 = alpha_svs
        lambda1 = np.multiply(y1, alpha1)
        w = np.dot(X.T, lambda1)
        w = np.array(w).reshape(-1)
        # b = np.mean(y1-np.reshape(np.dot(w, np.transpose(X)), [-1, 1]))
        b = w[n]
        w = w[0:n]

        clf = Clf(w, b)
        return clf
コード例 #6
0
    def fit(self, X, y):
        y[y == 0] = -1
        # add logitR to verify the correctness
        #from sklearn.svm import LinearSVC
        #SVM = LinearSVC(loss='hinge', tol=1e-6, max_iter=100000, verbose=1).fit(X, np.array(y).ravel())
        #w1 = SVM.coef_; b1 = SVM.intercept_
        #w1 = w1.reshape(-1); b1 = b1[0] 
        #       
        m, n = X.shape
        #import time
        #t1 = time.time()
        w = inner_point(X, y)
        #t2 = time.time()
        #print(t2-t1, 's')
        w = np.array(w).reshape(-1)

        # b = np.mean(y1-np.reshape(np.dot(w, np.transpose(X)), [-1, 1]))
        b = w[n]
        w = w[0:n]

        #print('diff', np.linalg.norm(w1-w), b, b1)

        clf = Clf(w, b)
        return clf
コード例 #7
0
	def fit(self, dataMatIn, classLabels):
		b,alphas = smoSimple(dataMatIn, classLabels, 0.6, 0.001, 40)
		w = get_w(dataMatIn, classLabels, alphas)
		clf = Clf(w, b)
		return clf
コード例 #8
0
    def fit(self, X, y, w=None):
        y[y == 0] = -1
        # add logitR to verify the correctness
        from sklearn.svm import LinearSVC
        SVM = LinearSVC(loss='hinge', tol=1e-6, max_iter=100000,
                        verbose=0).fit(X,
                                       np.array(y).ravel())
        w1 = SVM.coef_
        b1 = SVM.intercept_
        w1 = w1.reshape(-1)
        b1 = b1[0]

        #### solve by solver.qp
        # m, n = X.shape
        # X = np.column_stack((X, np.ones((m, 1))))
        # y = y.astype(np.float64)
        # data_num = len(y)
        # C = 1.0
        # kernel = np.dot(X, np.transpose(X))
        # p = np.matrix(np.multiply(kernel, np.outer(y, y)), np.float64)
        # q = np.matrix(-np.ones([data_num, 1], np.float64))
        # p = p / np.linalg.norm(q); q = q / np.linalg.norm(q);
        # p = matrix(p); q = matrix(q);
        # g_1 = -np.eye(data_num)
        # h_1 = np.zeros([data_num, 1], np.float64)

        # g_2 = np.eye(data_num)
        # h_2 = np.zeros([data_num, 1], np.float64) + C

        # g = matrix(np.vstack((g_1, g_2)))
        # h = matrix(np.vstack((h_1, h_2)))
        # solvers.options['show_progress'] = False
        # solvers.options['abstol'] = 1e-10
        # solvers.options['restol'] = 1e-10
        # solvers.options['featol'] = 1e-10
        # solvers.options['maxiters'] = 1000
        # sol = solvers.qp(p, q, g, h)
        # alpha_svs = np.array(sol['x'])
        # x = np.mat(alpha_svs)
        # print(np.sum(alpha_svs>1e-5))

        # dual = -(0.5*x.T*(p*x) + q.T*x)
        # dual = dual.item()
        # y1 = np.reshape(y, (-1, 1))
        # lambda1 = np.multiply(x, y1)
        # w = np.dot(X.T, lambda1)
        # w = np.matrix(w).reshape(-1, 1)
        # tmp = np.maximum(1-np.multiply(y1, X*w),0)
        # primal = 0.5*np.linalg.norm(w)**2 + 1 * np.sum(tmp)
        # primal = primal.item()
        # print('cvx:', dual, primal)
        # w = np.array(w).reshape(-1)
        # w = w[0:w.shape[0]-1]
        # b = w[-1]

        w, b = projected_apg(X, y)

        #print('diff', np.linalg.norm(w1-w), b, b1)

        clf = Clf(w, b)
        return clf