Example #1
0
    def solve(self, sample, c_value, kernel):
        r"""
        Solve the variable-quality SVM classification optimization problem
        corresponding to the supplied sample, according to specified value for
        tradeoff constant `C` and kernel `k`.

        INPUT:

        - ``sample`` -- list or tuple of ``AccuracyExample`` instances whose
          labels are all set either to `1` or `-1`.

        - ``c_value`` -- float or None (the former choice selects the
          soft-margin version of the algorithm) value for the tradeoff constant
          `C`.

        - ``kernel`` -- ``Kernel`` instance defining the kernel to be used.

        OUTPUT:

        list of float values -- optimal values for the optimization problem.

        EXAMPLES:

        Consider the following representation of the AND binary function, and a
        default instantiation for ``CVXOPTVQClassificationSolver``:

        ::

            >>> from yaplf.data import LabeledExample, AccuracyExample
            >>> and_sample = [AccuracyExample(LabeledExample((1, 1), 1), 0),
            ... AccuracyExample(LabeledExample((0, 0), -1), 0),
            ... AccuracyExample(LabeledExample((0, 1), -1), 1),
            ... AccuracyExample(LabeledExample((1, 0), -1), 0)]
            >>> from yaplf.algorithms.svm.solvers \
            ... import CVXOPTVQClassificationSolver
            >>> s = CVXOPTVQClassificationSolver()

        Once the solver instance is available, it is possible to invoke its
        ``solve``function, specifying a labeled sample such as ``and_sample``,
        a positive value for the constant `c_value` and a kernel instance in
        order to get the solution of the corresponding SV classification
        optimization problem:

        ::

            >>> from yaplf.models.kernel import LinearKernel
            >>> s.solve(and_sample, 2, LinearKernel())
            [2, 0.0, 2, 0.0]

        The value for `c_value` can be set to ``None``, in order to build and
        solve the original optimization problem rather than the soft-margin
        formulation; analogously, a different kernel can be used as argument to
        the solver:

        ::

            >>> from yaplf.models.kernel import PolynomialKernel
            >>> s.solve(and_sample, None, PolynomialKernel(3))
            [0.15135135150351597, 0.0, 0.097297297016552056,
            0.054054053943170456]

        Note however that this class should never be used directly. It is
        automatically used by ``SVMVQClassificationAlgorithm``.

        AUTHORS:

        - Dario Malchiodi (2010-04-12)

        """

        # cvxopt solves the problem
        # min 1/2 x' Q x + p' x
        # subject to G x >= h and A x = b
        # dict below is mapped to the above symbols as follows:
        # problem["obj_quad"] -> Q
        # problem["obj_lin"] -> p
        # problem["ineq_coeff"] -> G
        # problem["ineq_const"] -> h
        # problem["eq_coeff"] -> A
        # problem["eq_const"] -> b

        solvers.options['show_progress'] = self.verbose
        solvers.options['maxiters'] = self.max_iterations
        solvers.options['solver'] = self.solver

        # coercion to float in the following assignment is required
        # in order to work with sage notebook

        num_examples = len(sample)
        problem = {}

        problem["obj_quad"] = cvxopt_matrix([[ \
            float(elem_i.example.label * elem_j.example.label * \
            (kernel.compute(elem_i.example.pattern, elem_j.example.pattern) - \
            elem_i.example.label * elem_j.example.label * (elem_i.accuracy + \
            elem_j.accuracy))) for elem_i in sample] for elem_j in sample])
        problem["obj_lin"] = cvxopt_matrix([-1.0 for i in range(num_examples)])

        if c_value is None:
            problem["ineq_coeff"] = cvxopt_matrix([
                [float(-1.0 * kronecker_delta(i, j))
                for i in range(num_examples)] +
                [float(-1.0 * sample[j].accuracy)]
                for j in range(num_examples)])
            problem["ineq_const"] = cvxopt_matrix(
                [float(0.0)] * num_examples + [float(1.0 - self.epsilon)])
        else:
            problem["ineq_coeff"] = cvxopt_matrix([
                [float(-1.0 * kronecker_delta(i, j))
                for i in range(num_examples)] +
                [float(kronecker_delta(i, j))
                for i in range(num_examples)] +
                [float(-1.0 * sample[j].accuracy)]
                for j in range(num_examples)])
            problem["ineq_const"] = cvxopt_matrix([float(0.0)] * num_examples +
                [float(c_value)] * num_examples + [float(1.0 - self.epsilon)])

        problem["eq_coeff"] = cvxopt_matrix([float(elem.example.label)
            for elem in sample], (1, num_examples))
        problem["eq_const"] = cvxopt_matrix(0.0)
        sol = solvers.qp(problem["obj_quad"], problem["obj_lin"],
            problem["ineq_coeff"], problem["ineq_const"],
            problem["eq_coeff"], problem["eq_const"])

        return [chop(x, right=c_value) for x in list(sol['x'])]
Example #2
0
    def solve(self, sample, c=float("inf"), kernel=LinearKernel()):
        r"""
        Solve the SVM classification optimization problem corresponding
        to the supplied sample, according to specified value for the tradeoff
        constant `C`.

        INPUT:

        - ``sample`` -- list or tuple of ``LabeledExample`` instances whose
          labels are all set either to `1` or `-1`.

        - ``c`` -- float or ``float('inf')`` (the former choice selects the
          soft-margin version of the algorithm) value for the tradeoff constant
          `C`.

        - ``kernel`` -- ``Kernel`` instance defining the kernel to be used
          (default value: ``LinearKernel()``, using a linear kernel)

        OUTPUT:

        list of float values -- optimal values for the optimization problem.

        EXAMPLES:

        Consider the following representation of the AND binary function, and a
        default instantiation for ``CVXOPTClassificationSolver``:

        ::

            >>> from yaplf.data import LabeledExample
            >>> and_sample = [LabeledExample((1, 1), 1),
            ... LabeledExample((0, 0), -1), LabeledExample((0, 1), -1),
            ... LabeledExample((1, 0), -1)]
            >>> from yaplf.algorithms.svm.classification.solvers \
            ... import CVXOPTClassificationSolver
            >>> s = CVXOPTClassificationSolver()

        Once the solver instance is available, it is possible to invoke its
        ``solve``function, specifying a labeled sample such as ``and_sample``,
        a positive value for the constant `C` and a kernel instance in order to
        get the solution of the corresponding SV classification optimization
        problem:

        ::

            >>> from yaplf.models.kernel import LinearKernel
            >>> s.solve(and_sample, 2, LinearKernel())
            [2, 0, 0.9999998669645057, 0.9999998669645057]

        The value for `C` can be set to ``float('inf')``, in order to build
        and solve the original optimization problem rather than the
        soft-margin formulation:

        ::

            >>> s.solve(and_sample, float('inf'), LinearKernel())
            [4.000001003300218, 0, 2.000000364577095, 2.000000364577095]

        Note however that this class should never be used directly. It is
        automatically used by ``SVMClassificationAlgorithm``.

        AUTHORS:

        - Dario Malchiodi (2010-02-22)

        """

        solvers.options["show_progress"] = self.verbose
        solvers.options["maxiters"] = self.max_iterations
        solvers.options["solver"] = self.solver

        # cvxopt solves the problem
        # min 1/2 x' Q x + p' x
        # subject to G x >= h and A x = b
        # dict below is mapped to the above symbols as follows:
        # problem["obj_quad"] -> Q
        # problem["obj_lin"] -> p
        # problem["ineq_coeff"] -> G
        # problem["ineq_const"] -> h
        # problem["eq_coeff"] -> A
        # problem["eq_const"] -> b

        num_examples = len(sample)
        problem = {}

        problem["obj_quad"] = cvxopt_matrix(
            [
                [elem_i.label * elem_j.label * kernel.compute(elem_i.pattern, elem_j.pattern) for elem_i in sample]
                for elem_j in sample
            ]
        )
        problem["obj_lin"] = cvxopt_matrix([-1.0] * num_examples)
        if c == float("inf"):
            problem["ineq_coeff"] = cvxopt_matrix(-1.0 * eye(num_examples))
            problem["ineq_const"] = cvxopt_matrix([0.0] * num_examples)
        else:
            problem["ineq_coeff"] = cvxopt_matrix(
                [
                    [-1.0 * kronecker_delta(i, j) for i in range(num_examples)]
                    + [kronecker_delta(i, j) for i in range(num_examples)]
                    for j in range(num_examples)
                ]
            )
            problem["ineq_const"] = cvxopt_matrix([float(0.0)] * num_examples + [float(c)] * num_examples)

        # coercion to float in the following assignment is required
        # in order to work with sage notebooks
        problem["eq_coeff"] = cvxopt_matrix([float(elem.label) for elem in sample], (1, num_examples))
        problem["eq_const"] = cvxopt_matrix(0.0)
        # was
        # sol = solvers.qp(quad_coeff, lin_coeff, ineq_coeff, ineq_const, \
        #     eq_coeff, eq_const)

        sol = solvers.qp(
            problem["obj_quad"],
            problem["obj_lin"],
            problem["ineq_coeff"],
            problem["ineq_const"],
            problem["eq_coeff"],
            problem["eq_const"],
        )

        if sol["status"] != "optimal":
            raise ValueError("cvxopt returned status " + sol["status"])

        # was
        # alpha = map(lambda x: chop(x, right = c), list(sol['x']))
        alpha = [chop(x, right=c) for x in list(sol["x"])]

        return alpha