예제 #1
0
    def remove_redundant_rows(data):
        """Remove redundant constraints from A and G.

        Parameters
        ----------
        data : dict
            All the problem data.

        Returns
        -------
        str
            A status indicating if infeasibility was detected.
        """
        # Extract data.
        dims = data[s.DIMS]
        A = data[s.A]
        G = data[s.G]
        b = data[s.B]
        h = data[s.H]
        # Remove redundant rows in A.
        if A is not None:
            # The pivoting improves robustness.
            Q, R, P = scipy.linalg.qr(A.todense(), pivoting=True)
            rows_to_keep = []
            for i in range(R.shape[0]):
                if np.linalg.norm(R[i, :]) > 1e-10:
                    rows_to_keep.append(i)
            R = R[rows_to_keep, :]
            Q = Q[:, rows_to_keep]
            # Invert P from col -> var to var -> col.
            Pinv = np.zeros(P.size, dtype='int')
            for i in range(P.size):
                Pinv[P[i]] = i
            # Rearrage R.
            R = R[:, Pinv]
            A = R
            b_old = b
            b = Q.T.dot(b)
            # If b is not in the range of Q,
            # the problem is infeasible.
            if not np.allclose(b_old, Q.dot(b)):
                return s.INFEASIBLE
            dims[s.EQ_DIM] = int(b.shape[0])
            data["Q"] = intf.dense2cvxopt(Q)
        # Remove obviously redundant rows in G's <= constraints.
        if G is not None:
            G = G.tocsr()
            G_leq = G[:dims[s.LEQ_DIM], :]
            h_leq = h[:dims[s.LEQ_DIM]].ravel()
            G_other = G[dims[s.LEQ_DIM]:, :]
            h_other = h[dims[s.LEQ_DIM]:].ravel()
            G_leq, h_leq, P_leq = compress_matrix(G_leq, h_leq)
            dims[s.LEQ_DIM] = int(h_leq.shape[0])
            data["P_leq"] = intf.sparse2cvxopt(P_leq)
            G = sp.vstack([G_leq, G_other])
            h = np.hstack([h_leq, h_other])
        # Convert A, b, G, h to CVXOPT matrices.
        data[s.A] = A
        data[s.G] = G
        data[s.B] = b
        data[s.H] = h
        return s.OPTIMAL
예제 #2
0
    def remove_redundant_rows(data):
        """Check if A has redundant rows. If it does, remove redundant constraints
        from A, and apply a presolve procedure for G.

        Parameters
        ----------
        data : dict
            All the problem data.

        Returns
        -------
        str
            A status indicating if infeasibility was detected.
        """
        # Extract data.
        dims = data[s.DIMS]
        A = data[s.A]
        G = data[s.G]
        b = data[s.B]
        h = data[s.H]
        if A is None:
            return s.OPTIMAL
        TOL = 1e-10
        #
        # Use a gram matrix approach to skip dense QR factorization, if possible.
        #
        gram = A @ A.T
        if gram.shape[0] == 1:
            gram = gram.toarray().item()  # we only have one equality constraint.
            if gram > 0:
                return s.OPTIMAL
            elif not b.item() == 0.0:
                return s.INFEASIBLE
            else:
                data[s.A] = None
                data[s.B] = None
                return s.OPTIMAL
        eig = eigsh(gram, k=1, which='SM', return_eigenvectors=False)
        if eig > TOL:
            return s.OPTIMAL
        #
        # Redundant constraints exist, up to numerical tolerance;
        # reformulate equality constraints to remove this redundancy.
        #
        Q, R, P = scipy.linalg.qr(A.todense(), pivoting=True)  # pivoting helps robustness
        rows_to_keep = []
        for i in range(R.shape[0]):
            if np.linalg.norm(R[i, :]) > TOL:
                rows_to_keep.append(i)
        R = R[rows_to_keep, :]
        Q = Q[:, rows_to_keep]
        # Invert P from col -> var to var -> col.
        Pinv = np.zeros(P.size, dtype='int')
        for i in range(P.size):
            Pinv[P[i]] = i
        # Rearrage R.
        R = R[:, Pinv]
        A = R
        b_old = b
        b = Q.T.dot(b)
        # If b is not in the range of Q, the problem is infeasible.
        if not np.allclose(b_old, Q.dot(b)):
            return s.INFEASIBLE
        dims[s.EQ_DIM] = int(b.shape[0])
        data["Q"] = intf.dense2cvxopt(Q)
        #
        # Since we're applying nontrivial presolve to A, apply to G as well.
        #
        if G is not None:
            G = G.tocsr()
            G_leq = G[:dims[s.LEQ_DIM], :]
            h_leq = h[:dims[s.LEQ_DIM]].ravel()
            G_other = G[dims[s.LEQ_DIM]:, :]
            h_other = h[dims[s.LEQ_DIM]:].ravel()
            G_leq, h_leq, P_leq = compress_matrix(G_leq, h_leq)
            dims[s.LEQ_DIM] = int(h_leq.shape[0])
            data["P_leq"] = intf.sparse2cvxopt(P_leq)
            G = sp.vstack([G_leq, G_other])
            h = np.hstack([h_leq, h_other])
        # Record changes, and return.
        data[s.A] = A
        data[s.G] = G
        data[s.B] = b
        data[s.H] = h
        return s.OPTIMAL