示例#1
0
    return LinearOperator(A.shape[1], A.shape[0],
                          lambda v: np.dot(A, v),
                          matvec_transp=lambda u: np.dot(A.T, u),
                          symmetric=False)


if __name__ == '__main__':
    from pykrylov.tools import check_symmetric
    from pysparse.sparse.pysparseMatrix import PysparseMatrix as sp
    from nlpy.model import AmplModel
    import sys

    np.set_printoptions(precision=3, linewidth=80, threshold=10, edgeitems=3)

    nlp = AmplModel(sys.argv[1])
    J = sp(matrix=nlp.jac(nlp.x0))
    e1 = np.ones(J.shape[0])
    e2 = np.ones(J.shape[1])
    print 'J.shape = ', J.getShape()

    print 'Testing PysparseLinearOperator:'
    op = PysparseLinearOperator(J)
    print 'op.shape = ', op.shape
    print 'op.T.shape = ', op.T.shape
    print 'op * e2 = ', op * e2
    print "op.T * e1 = ", op.T * e1
    print 'op.T.T * e2 = ', op.T.T * e2
    print 'op.T.T.T * e1 = ', op.T.T.T * e1
    print 'With call:'
    print 'op(e2) = ', op(e2)
    print 'op.T(e1) = ', op.T(e1)
示例#2
0
    Return the square root of a linear operator, if defined. Note that
    this is not the elementwise square root. The result is a linear operator
    that, when composed with itself, yields the original operator.
    """
    return op._sqrt()

if __name__ == '__main__':
    from pykrylov.tools import check_symmetric
    from pysparse.sparse.pysparseMatrix import PysparseMatrix as sp
    from nlpy.model import AmplModel
    import sys

    np.set_printoptions(precision=3, linewidth=80, threshold=10, edgeitems=3)

    nlp = AmplModel(sys.argv[1])
    J = sp(matrix=nlp.jac(nlp.x0))
    e1 = np.ones(J.shape[0])
    e2 = np.ones(J.shape[1])
    print 'J.shape = ', J.getShape()

    print 'Testing PysparseLinearOperator:'
    op = PysparseLinearOperator(J)
    print 'op.shape = ', op.shape
    print 'op.T.shape = ', op.T.shape
    print 'op * e2 = ', op * e2
    print "op.T * e1 = ", op.T * e1
    print 'op.T.T * e2 = ', op.T.T * e2
    print 'op.T.T.T * e1 = ', op.T.T.T * e1
    print 'With call:'
    print 'op(e2) = ', op(e2)
    print 'op.T(e1) = ', op.T(e1)
示例#3
0
print 'Initial point: ', x0[:max_n]
print 'Lower bounds on x: ', nlp.Lvar[:max_n]
print 'Upper bounds on x: ', nlp.Uvar[:max_n]
print 'f( x0 ) = ', nlp.obj( x0 )
g0 = nlp.grad( x0 )
print 'grad f( x0 ) = ', g0[:max_n]

if max_m > 0:
    print 'Initial multipliers: ', pi0[:max_m]
    print 'Lower constraint bounds: ', nlp.Lcon[:max_m]
    print 'Upper constraint bounds: ', nlp.Ucon[:max_m]
    c0 = nlp.cons( x0 )
    print 'c( x0 ) = ', c0[:max_m]

J = nlp.jac( x0 )
H = nlp.hess( x0, pi0 )
print
print ' nnzJ = ', J.nnz
print ' nnzH = ', H.nnz

print
print ' Printing at most first 5x5 principal submatrix'
print

print 'J( x0 ) = ', J[:max_m,:max_n]
print 'Hessian (lower triangle):', H[:max_n,:max_n]

print
print ' Evaluating constraints individually, sparse gradients'
print
示例#4
0
print 'Initial point: ', x0[:max_n]
print 'Lower bounds on x: ', nlp.Lvar[:max_n]
print 'Upper bounds on x: ', nlp.Uvar[:max_n]
print 'f( x0 ) = ', nlp.obj(x0)
g0 = nlp.grad(x0)
print 'grad f( x0 ) = ', g0[:max_n]

if max_m > 0:
    print 'Initial multipliers: ', pi0[:max_m]
    print 'Lower constraint bounds: ', nlp.Lcon[:max_m]
    print 'Upper constraint bounds: ', nlp.Ucon[:max_m]
    c0 = nlp.cons(x0)
    print 'c( x0 ) = ', c0[:max_m]

J = nlp.jac(x0)
H = nlp.hess(x0, pi0)
print
print ' nnzJ = ', J.nnz
print ' nnzH = ', H.nnz

print
print ' Printing at most first 5x5 principal submatrix'
print

print 'J( x0 ) = ', J[:max_m, :max_n]
print 'Hessian (lower triangle):', H[:max_n, :max_n]

print
print ' Evaluating constraints individually, sparse gradients'
print
示例#5
0
    def _jac(self, x, lp=False):
        """
        Helper method to assemble the Jacobian matrix of the constraints of the
        transformed problems. See the documentation of :meth:`jac` for more
        information.

        The positional argument `lp` should be set to `True` only if the problem
        is known to be a linear program. In this case, the evaluation of the
        constraint matrix is cheaper and the argument `x` is ignored.
        """
        n = self.original_n
        m = self.original_m

        # List() simply allows operations such as 1 + [2,3] -> [3,4]
        lowerC = List(self.lowerC)
        nlowerC = self.nlowerC
        upperC = List(self.upperC)
        nupperC = self.nupperC
        rangeC = List(self.rangeC)
        nrangeC = self.nrangeC
        lowerB = List(self.lowerB)
        nlowerB = self.nlowerB
        upperB = List(self.upperB)
        nupperB = self.nupperB
        rangeB = List(self.rangeB)
        nrangeB = self.nrangeB
        nbnds = nlowerB + nupperB + 2 * nrangeB
        nSlacks = nlowerC + nupperC + 2 * nrangeC

        # Initialize sparse Jacobian
        nnzJ = 2 * self.nnzj + m + nrangeC + nbnds + nrangeB  # Overestimate
        J = sp(nrow=self.m, ncol=self.n, sizeHint=nnzJ)

        # Insert contribution of general constraints
        if lp:
            J[:m, :n] = AmplModel.A(self)
        else:
            J[:m, :n] = AmplModel.jac(self, x[:n])
        J[upperC, :n] *= -1.0  # Flip sign of 'upper' gradients
        J[m:m +
          nrangeC, :n] = J[rangeC, :n]  # Append 'upper' side of range const.
        J[m:m + nrangeC, :n] *= -1.0  # Flip sign of 'upper' range gradients.

        # Create a few index lists
        rlowerC = List(range(nlowerC))
        rlowerB = List(range(nlowerB))
        rupperC = List(range(nupperC))
        rupperB = List(range(nupperB))
        rrangeC = List(range(nrangeC))
        rrangeB = List(range(nrangeB))

        # Insert contribution of slacks on general constraints
        J.put(-1.0, lowerC, n + rlowerC)
        J.put(-1.0, upperC, n + nlowerC + rupperC)
        J.put(-1.0, rangeC, n + nlowerC + nupperC + rrangeC)
        J.put(-1.0, m + rrangeC, n + nlowerC + nupperC + nrangeC + rrangeC)

        # Insert contribution of bound constraints on the original problem
        bot = m + nrangeC
        J.put(1.0, bot + rlowerB, lowerB)
        bot += nlowerB
        J.put(1.0, bot + rrangeB, rangeB)
        bot += nrangeB
        J.put(-1.0, bot + rupperB, upperB)
        bot += nupperB
        J.put(-1.0, bot + rrangeB, rangeB)

        # Insert contribution of slacks on the bound constraints
        bot = m + nrangeC
        J.put(-1.0, bot + rlowerB, n + nSlacks + rlowerB)
        bot += nlowerB
        J.put(-1.0, bot + rrangeB, n + nSlacks + nlowerB + rrangeB)
        bot += nrangeB
        J.put(-1.0, bot + rupperB, n + nSlacks + nlowerB + nrangeB + rupperB)
        bot += nupperB
        J.put(-1.0, bot + rrangeB,
              n + nSlacks + nlowerB + nrangeB + nupperB + rrangeB)

        return J
示例#6
0
文件: slacks.py 项目: b45ch1/nlpy
    def _jac(self, x, lp=False):
        """
        Helper method to assemble the Jacobian matrix of the constraints of the
        transformed problems. See the documentation of :meth:`jac` for more
        information.

        The positional argument `lp` should be set to `True` only if the problem
        is known to be a linear program. In this case, the evaluation of the
        constraint matrix is cheaper and the argument `x` is ignored.
        """
        n = self.original_n
        m = self.original_m

        # List() simply allows operations such as 1 + [2,3] -> [3,4]
        lowerC = List(self.lowerC) ; nlowerC = self.nlowerC
        upperC = List(self.upperC) ; nupperC = self.nupperC
        rangeC = List(self.rangeC) ; nrangeC = self.nrangeC
        lowerB = List(self.lowerB) ; nlowerB = self.nlowerB
        upperB = List(self.upperB) ; nupperB = self.nupperB
        rangeB = List(self.rangeB) ; nrangeB = self.nrangeB
        nbnds  = nlowerB + nupperB + 2*nrangeB
        nSlacks = nlowerC + nupperC + 2*nrangeC

        # Initialize sparse Jacobian
        nnzJ = 2 * self.nnzj + m + nrangeC + nbnds + nrangeB  # Overestimate
        J = sp(nrow=self.m, ncol=self.n, sizeHint=nnzJ)

        # Insert contribution of general constraints
        if lp:
            J[:m,:n] = AmplModel.A(self)
        else:
            J[:m,:n] = AmplModel.jac(self,x[:n])
        J[upperC,:n] *= -1.0               # Flip sign of 'upper' gradients
        J[m:m+nrangeC,:n] = J[rangeC,:n]  # Append 'upper' side of range const.
        J[m:m+nrangeC,:n] *= -1.0        # Flip sign of 'upper' range gradients.

        # Create a few index lists
        rlowerC = List(range(nlowerC)) ; rlowerB = List(range(nlowerB))
        rupperC = List(range(nupperC)) ; rupperB = List(range(nupperB))
        rrangeC = List(range(nrangeC)) ; rrangeB = List(range(nrangeB))

        # Insert contribution of slacks on general constraints
        J.put(-1.0,      lowerC, n + rlowerC)
        J.put(-1.0,      upperC, n + nlowerC + rupperC)
        J.put(-1.0,      rangeC, n + nlowerC + nupperC + rrangeC)
        J.put(-1.0, m + rrangeC, n + nlowerC + nupperC + nrangeC + rrangeC)

        # Insert contribution of bound constraints on the original problem
        bot  = m+nrangeC ; J.put( 1.0, bot + rlowerB, lowerB)
        bot += nlowerB   ; J.put( 1.0, bot + rrangeB, rangeB)
        bot += nrangeB   ; J.put(-1.0, bot + rupperB, upperB)
        bot += nupperB   ; J.put(-1.0, bot + rrangeB, rangeB)

        # Insert contribution of slacks on the bound constraints
        bot  = m+nrangeC
        J.put(-1.0, bot + rlowerB, n + nSlacks + rlowerB)
        bot += nlowerB
        J.put(-1.0, bot + rrangeB, n + nSlacks + nlowerB + rrangeB)
        bot += nrangeB
        J.put(-1.0, bot + rupperB, n + nSlacks + nlowerB + nrangeB + rupperB)
        bot += nupperB
        J.put(-1.0, bot + rrangeB, n+nSlacks+nlowerB+nrangeB+nupperB+rrangeB)

        return J