コード例 #1
0
    def __init__(self, kernel=None, **kwargs):

        super(KernelFunction, self).__init__(**kwargs)

        if kernel is None:
            import parsimony.algorithms.utils as alg_utils
            self.kernel = alg_utils.LinearKernel()
        else:
            self.kernel = kernel
コード例 #2
0
    def __init__(self,
                 C,
                 kernel=utils.LinearKernel(),
                 eps=1e-4,
                 max_iter=consts.MAX_ITER,
                 min_iter=1,
                 info=[]):

        super(SequentialMinimalOptimization, self).__init__(kernel=kernel,
                                                            info=info)

        self.C = max(0, float(C))
        self.eps = max(consts.FLOAT_EPSILON, float(eps))
        self.min_iter = max(1, int(min_iter))
        self.max_iter = max(self.min_iter, int(max_iter))
コード例 #3
0
    def test_SVM(self):

        np.random.seed(42)

        n = 100
        X = np.vstack([
            0.2 * np.random.randn(int(n / 2), 2) + 0.25,
            0.2 * np.random.randn(int(n / 2), 2) + 0.75
        ])
        X_1 = np.hstack((-np.ones((X.shape[0], 1)), X))
        y = np.vstack(
            [1 * np.ones((int(n / 2), 1)), 3 * np.ones((int(n / 2), 1))]) - 2

        import parsimony.algorithms.algorithms as alg

        info = [utils.Info.func_val]
        K = utils.LinearKernel(X=X)
        smo = alg.SequentialMinimalOptimization(1.0,
                                                kernel=K,
                                                info=info,
                                                max_iter=100)
        w = smo.run(X, y)

        # Check that it is never increasing:
        f = smo.info_get("func_val")
        fdiff = np.array(f[2:]) - np.array(f[:-2])

        assert (np.sum(fdiff > 0) == 0)

        #        from parsimony.algorithms.subgradient import SubGradientDescent
        #        from parsimony.algorithms.utils import NonSumDimStepSize
        #        sgd = SubGradientDescent(max_iter=100,
        #                                 step_size=NonSumDimStepSize(a=0.01),
        #                                 use_gradient=False,
        #                                 info=info,
        #                                 use_best_f=True)
        #        K_1 = utils.LinearKernel(X=X_1)
        #        function = losses.NonlinearSVM(X_1, y, 1.0, kernel=K_1,
        #                                       penalty_start=1, mean=False)
        #        beta_sgd = sgd.run(function, np.zeros((n, 1)))
        #        w_sgd = np.dot(X_1.T, beta_sgd)

        #        function = LinearRegressionL1(X, y, l1=0.01, mean=False)

        #        import matplotlib.pyplot as plt
        #        plt.plot(X[:50, 0], X[:50, 1], 'g.')
        #        plt.plot(X[50:, 0], X[50:, 1], 'b.')
        #        for i in range(10000):
        #            p = np.random.rand(2, 1)
        #
        #            val = 0.0
        #            for i in xrange(y.shape[0]):
        #                val += smo.alpha[i, 0] * y[i, 0] * smo.K(X[i, :], p)
        #            val -= smo.bias
        #
        #            if np.abs(val) < 0.01:
        #                plt.plot(p[0], p[1], 'r.')
        #
        #        plt.show()

        from parsimony.estimators import RidgeLogisticRegression
        from parsimony.estimators import SupportVectorMachine
        from parsimony.algorithms.gradient import AcceleratedGradientDescent

        lr = RidgeLogisticRegression(
            1.0,
            algorithm=AcceleratedGradientDescent(max_iter=100),
            mean=False,
            penalty_start=1)
        params = lr.fit(X_1, y).parameters()
        w_lr = params["beta"]

        n_w = w / np.linalg.norm(w)
        #        n_w_sgd = w_sgd[1:, :] / np.linalg.norm(w_sgd[1:, :])
        n_w_lr = w_lr[1:, :] / np.linalg.norm(w_lr[1:, :])
        #        print n_w
        #        print n_beta_lr
        #        print np.linalg.norm(n_w - n_w_lr)
        #        print np.linalg.norm(n_w_sgd - n_w_lr)
        #        print np.linalg.norm(n_w - n_w_sgd)
        assert (np.linalg.norm(n_w - n_w_lr) < 0.065)
        #        assert(np.linalg.norm(n_w_sgd - n_w_lr) < 0.045)
        #        assert(np.linalg.norm(n_w - n_w_sgd) < 0.11)

        smo = SupportVectorMachine(1.0, kernel=K, algorithm=smo)
        smo.fit(X, y)