Exemplo n.º 1
0
    def __init__(self, model, **kwargs):

        AmplModel.__init__(self, model, **kwargs)

        # Save number of variables and constraints prior to transformation
        self.original_n = self.n
        self.original_m = self.m
        self.original_nbounds = self.nbounds

        # Number of slacks for inequality constraints with a lower bound
        n_con_low = self.nlowerC + self.nrangeC ; self.n_con_low = n_con_low

        # Number of slacks for inequality constraints with an upper bound
        n_con_up = self.nupperC + self.nrangeC ; self.n_con_up = n_con_up

        # Number of slacks for variables with a lower bound
        n_var_low = self.nlowerB + self.nrangeB ; self.n_var_low = n_var_low

        # Number of slacks for variables with an upper bound
        n_var_up = self.nupperB + self.nrangeB ; self.n_var_up = n_var_up

        # Update effective number of variables and constraints
        self.n  = self.original_n + n_con_low + n_con_up + n_var_low + n_var_up
        self.m  = self.original_m + self.nrangeC + n_var_low + n_var_up

        # Redefine primal and dual initial guesses
        self.original_x0 = self.x0[:]
        self.x0 = numpy.zeros(self.n)
        self.x0[:self.original_n] = self.original_x0[:]

        self.original_pi0 = self.pi0[:]
        self.pi0 = numpy.zeros(self.m)
        self.pi0[:self.original_m] = self.original_pi0[:]

        return
Exemplo n.º 2
0
 def obj(self, x):
     """
     Return the value of the objective function at `x`. This function is
     specialized since the original objective function only depends on a
     subvector of `x`.
     """
     return AmplModel.obj(self, x[:self.original_n])
Exemplo n.º 3
0
 def obj(self, x):
     """
     Return the value of the objective function at `x`. This function is
     specialized since the original objective function only depends on a
     subvector of `x`.
     """
     return AmplModel.obj(self, x[:self.original_n])
Exemplo n.º 4
0
    def __init__(self, model, **kwargs):

        AmplModel.__init__(self, model, **kwargs)

        # Save number of variables and constraints prior to transformation
        self.original_n = self.n
        self.original_m = self.m
        self.original_nbounds = self.nbounds

        # Number of slacks for inequality constraints with a lower bound
        n_con_low = self.nlowerC + self.nrangeC
        self.n_con_low = n_con_low

        # Number of slacks for inequality constraints with an upper bound
        n_con_up = self.nupperC + self.nrangeC
        self.n_con_up = n_con_up

        # Number of slacks for variables with a lower bound
        n_var_low = self.nlowerB + self.nrangeB
        self.n_var_low = n_var_low

        # Number of slacks for variables with an upper bound
        n_var_up = self.nupperB + self.nrangeB
        self.n_var_up = n_var_up

        # Update effective number of variables and constraints
        self.n = self.original_n + n_con_low + n_con_up + n_var_low + n_var_up
        self.m = self.original_m + self.nrangeC + n_var_low + n_var_up

        # Redefine primal and dual initial guesses
        self.original_x0 = self.x0[:]
        self.x0 = numpy.zeros(self.n)
        self.x0[:self.original_n] = self.original_x0[:]

        self.original_pi0 = self.pi0[:]
        self.pi0 = numpy.zeros(self.m)
        self.pi0[:self.original_m] = self.original_pi0[:]

        return
    try:
        options, fname = getopt.getopt(arglist, '')

    except getopt.error, e:
        commandline_err("%s" % str(e))
        return None

    return fname[0]


ProblemName = parse_cmdline(sys.argv[1:])

# Create a NLPy AmplModel

print 'Problem', ProblemName
nlp = AmplModel(ProblemName)  #amplpy.AmplModel( ProblemName )

# Translate this NLPy - Ampl problem in a pyOpt problem

nlp.Uvar = numpy.inf * numpy.ones(9)

opt_prob = PyOpt_From_NLPModel(nlp)
print opt_prob

# Call the imported solver SNOPT
snopt = SNOPT()

# Choose sensitivity type for computing gradients with :
#  'FD' : finite difference
#  opt_prob.grad_func : NLPy
#
Exemplo n.º 6
0
    try:
        options, fname = getopt.getopt(arglist, '')

    except getopt.error, e:
        commandline_err("%s" % str(e))
        return None

    return fname[0]


ProblemName = parse_cmdline(sys.argv[1:])

# Create a model
print 'Problem', ProblemName
nlp = AmplModel(ProblemName)  #amplpy.AmplModel( ProblemName )

# Query the model
x0 = nlp.x0
pi0 = nlp.pi0
n = nlp.n
m = nlp.m
print 'There are %d variables and %d constraints' % (n, m)

max_n = min(n, 5)
max_m = min(m, 5)

print
print ' Printing at most 5 first components of vectors'
print
Exemplo n.º 7
0

if __name__ == '__main__':

    from nlpy.model import AmplModel
    from nlpy.krylov.linop import PysparseLinearOperator
    from nlpy.optimize.tr.trustregion import TrustRegionFramework, TrustRegionCG
    from nlpy.tools.dercheck import DerivativeChecker

    # Set printing standards for arrays.
    np.set_printoptions(precision=3, linewidth=80, threshold=10, edgeitems=3)

    prob = sys.argv[1]

    # Initialize problem.
    nlp = AmplModel(prob)
    pdmerit = _meritfunction(nlp)
    (nx, nz) = (pdmerit.nlp.n, pdmerit.nz)

    # Initialize trust region framework.
    TR = TrustRegionFramework(Delta=1.0,
                              eta1=0.0001,
                              eta2=0.95,
                              gamma1=1.0 / 3,
                              gamma2=2.5)

    # Set up interior-point framework.
    TRIP = PrimalDualInteriorPointFramework(
        pdmerit,
        TR,
        TrustRegionCG,
Exemplo n.º 8
0
error = False

if multiple_problems:
    # Define formats for output table.
    hdrfmt = '%-10s %5s %5s %15s %7s %7s %6s %6s %5s'
    hdr = hdrfmt % ('Name','Iter','Feval','Objective','dResid','pResid',
                    'Setup','Solve','Opt')
    lhdr = len(hdr)
    fmt = '%-10s %5d %5d %15.8e %7.1e %7.1e %6.2f %6.2f %5s'
    log.info(hdr)
    log.info('-' * lhdr)

# Solve each problem in turn.
for ProblemName in args:

    nlp = AmplModel(ProblemName)

    # Check for equality-constrained problem.
    n_ineq = nlp.nlowerC + nlp.nupperC + nlp.nrangeC
    if nlp.nbounds > 0 or n_ineq > 0:
        msg = '%s has %d bounds and %d inequality constraints\n'
        log.error(msg % (nlp.name, nlp.nbounds, n_ineq))
        error = True
    else:
        ProblemName = os.path.basename(ProblemName)
        if ProblemName[-3:] == '.nl':
            ProblemName = ProblemName[:-3]
        t_setup, funn = pass_to_funnel(nlp, **opts)
        if multiple_problems:
            log.info(fmt % (ProblemName, funn.niter, funn.nlp.feval, funn.f,
                            funn.dResid, funn.pResid,
Exemplo n.º 9
0

if __name__ == '__main__':

    from nlpy.model import AmplModel
    from nlpy.krylov.linop import PysparseLinearOperator
    from nlpy.optimize.tr.trustregion import TrustRegionFramework, TrustRegionCG
    from nlpy.tools.dercheck import DerivativeChecker

    # Set printing standards for arrays.
    np.set_printoptions(precision=3, linewidth=80, threshold=10, edgeitems=3)

    prob = sys.argv[1]

    # Initialize problem.
    nlp = AmplModel(prob)
    pdmerit = _meritfunction(nlp)
    (nx, nz) = (pdmerit.nlp.n, pdmerit.nz)

    # Initialize trust region framework.
    TR = TrustRegionFramework(Delta = 1.0,
                              eta1 = 0.0001,
                              eta2 = 0.95,
                              gamma1 = 1.0/3,
                              gamma2 = 2.5)

    # Set up interior-point framework.
    TRIP = PrimalDualInteriorPointFramework(
                pdmerit,
                TR,
                TrustRegionCG,
Exemplo n.º 10
0
                # 4. Update x using projected linesearch with initial step=1.
                (x, qval) = self.projected_linesearch(x, g, d, qval)
                g = qp.grad(x)
                pg = self.pgrad(x, g=g, active_set=(lower, upper))
                pgNorm = np.linalg.norm(pg)

        self.exitOptimal = exitOptimal
        self.exitIter = exitIter
        self.niter = iter
        self.x = x
        self.qval = qval
        self.lower = lower
        self.upper = upper
        return


if __name__ == '__main__':
    import sys
    from nlpy.model import AmplModel

    qp = AmplModel(sys.argv[1])
    bqp = BQP(qp)
    bqp.solve(maxiter=50, stoptol=1.0e-8)
    print 'optimal = ', repr(bqp.exitOptimal)
    print 'niter = ', bqp.niter
    print 'solution: ', bqp.x
    print 'objective value: ', bqp.qval
    print 'vars on lower bnd: ', bqp.lower
    print 'vars on upper bnd: ', bqp.upper
Exemplo n.º 11
0
        commandline_err( 'Specify file name (look in data directory)' )
        return None

    try: options, fname = getopt.getopt( arglist, '' )

    except getopt.error, e:
        commandline_err( "%s" % str( e ) )
        return None

    return fname[0]

ProblemName = parse_cmdline(sys.argv[1:]) 

# Create a model
print 'Problem', ProblemName
nlp = AmplModel( ProblemName ) #amplpy.AmplModel( ProblemName )

# Query the model
x0  = nlp.x0
pi0 = nlp.pi0
n = nlp.n
m = nlp.m
print 'There are %d variables and %d constraints' % ( n, m )

max_n = min( n, 5 )
max_m = min( m, 5 )

print
print ' Printing at most 5 first components of vectors'
print
        return None

    try: options, fname = getopt.getopt( arglist, '' )

    except getopt.error, e:
        commandline_err( "%s" % str( e ) )
        return None

    return fname[0]

ProblemName = parse_cmdline(sys.argv[1:])

# Create a NLPy AmplModel

print 'Problem', ProblemName
nlp = AmplModel( ProblemName )

# Translate this NLPy - Ampl problem in a pyOpt problem
opt_prob = PyOpt_From_NLPModel(nlp)
print opt_prob

# Call the imported solver IPOPT
ipopt = IPOPT()


# Choose sensitivity type for computing gradients with :
#  'FD' : finite difference
#  opt_prob.grad_func : NLPy
# 
# As AMPL isn't handling complex values, we cannot use the complex step method
# to estimate the gradients
Exemplo n.º 13
0
        commandline_err( 'Specify file name (look in data directory)' )
        return None

    try: options, fname = getopt.getopt( arglist, '' )

    except getopt.error, e:
        commandline_err( "%s" % str( e ) )
        return None

    return fname[0]

ProblemName = parse_cmdline(sys.argv[1:])

# Create a model
print 'Problem', ProblemName
nlp = AmplModel( ProblemName ) #amplpy.AmplModel( ProblemName )

# Query the model
x0  = nlp.x0
pi0 = nlp.pi0
n = nlp.n
m = nlp.m
print 'There are %d variables and %d constraints' % ( n, m )

max_n = min( n, 5 )
max_m = min( m, 5 )

print
print ' Printing at most 5 first components of vectors'
print
Exemplo n.º 14
0
    def _jac(self, x, lp=False):
        """
        Helper method to assemble the Jacobian matrix of the constraints of the
        transformed problems. See the documentation of :meth:`jac` for more
        information.

        The positional argument `lp` should be set to `True` only if the problem
        is known to be a linear program. In this case, the evaluation of the
        constraint matrix is cheaper and the argument `x` is ignored.
        """
        n = self.original_n
        m = self.original_m

        # List() simply allows operations such as 1 + [2,3] -> [3,4]
        lowerC = List(self.lowerC) ; nlowerC = self.nlowerC
        upperC = List(self.upperC) ; nupperC = self.nupperC
        rangeC = List(self.rangeC) ; nrangeC = self.nrangeC
        lowerB = List(self.lowerB) ; nlowerB = self.nlowerB
        upperB = List(self.upperB) ; nupperB = self.nupperB
        rangeB = List(self.rangeB) ; nrangeB = self.nrangeB
        nbnds  = nlowerB + nupperB + 2*nrangeB
        nSlacks = nlowerC + nupperC + 2*nrangeC

        # Initialize sparse Jacobian
        nnzJ = 2 * self.nnzj + m + nrangeC + nbnds + nrangeB  # Overestimate
        J = sp(nrow=self.m, ncol=self.n, sizeHint=nnzJ)

        # Insert contribution of general constraints
        if lp:
            J[:m,:n] = AmplModel.A(self)
        else:
            J[:m,:n] = AmplModel.jac(self,x[:n])
        J[upperC,:n] *= -1.0               # Flip sign of 'upper' gradients
        J[m:m+nrangeC,:n] = J[rangeC,:n]  # Append 'upper' side of range const.
        J[m:m+nrangeC,:n] *= -1.0        # Flip sign of 'upper' range gradients.

        # Create a few index lists
        rlowerC = List(range(nlowerC)) ; rlowerB = List(range(nlowerB))
        rupperC = List(range(nupperC)) ; rupperB = List(range(nupperB))
        rrangeC = List(range(nrangeC)) ; rrangeB = List(range(nrangeB))

        # Insert contribution of slacks on general constraints
        J.put(-1.0,      lowerC, n + rlowerC)
        J.put(-1.0,      upperC, n + nlowerC + rupperC)
        J.put(-1.0,      rangeC, n + nlowerC + nupperC + rrangeC)
        J.put(-1.0, m + rrangeC, n + nlowerC + nupperC + nrangeC + rrangeC)

        # Insert contribution of bound constraints on the original problem
        bot  = m+nrangeC ; J.put( 1.0, bot + rlowerB, lowerB)
        bot += nlowerB   ; J.put( 1.0, bot + rrangeB, rangeB)
        bot += nrangeB   ; J.put(-1.0, bot + rupperB, upperB)
        bot += nupperB   ; J.put(-1.0, bot + rrangeB, rangeB)

        # Insert contribution of slacks on the bound constraints
        bot  = m+nrangeC
        J.put(-1.0, bot + rlowerB, n + nSlacks + rlowerB)
        bot += nlowerB
        J.put(-1.0, bot + rrangeB, n + nSlacks + nlowerB + rrangeB)
        bot += nrangeB
        J.put(-1.0, bot + rupperB, n + nSlacks + nlowerB + nrangeB + rupperB)
        bot += nupperB
        J.put(-1.0, bot + rrangeB, n+nSlacks+nlowerB+nrangeB+nupperB+rrangeB)

        return J
Exemplo n.º 15
0
    def cons(self, x):
        """
        Evaluate the vector of general constraints for the modified problem.
        Constraints are stored in the order in which they appear in the
        original problem. If constraint i is a range constraint, c[i] will
        be the constraint that has the slack on the lower bound on c[i].
        The constraint with the slack on the upper bound on c[i] will be stored
        in position m + k, where k is the position of index i in
        rangeC, i.e., k=0 iff constraint i is the range constraint that
        appears first, k=1 iff it appears second, etc.

        Constraints appear in the following order:

        1. [ c  ]   general constraints in origninal order
        2. [ cR ]   'upper' side of range constraints
        3. [ b  ]   linear constraints corresponding to bounds on original problem
        4. [ bR ]   linear constraints corresponding to 'upper' side of two-sided
                    bounds
        """
        n = self.n ; on = self.original_n
        m = self.m ; om = self.original_m
        equalC = self.equalC
        lowerC = self.lowerC ; nlowerC = self.nlowerC
        upperC = self.upperC ; nupperC = self.nupperC
        rangeC = self.rangeC ; nrangeC = self.nrangeC

        mslow = on + self.n_con_low
        msup  = mslow + self.n_con_up
        s_low = x[on:mslow]    # len(s_low) = n_con_low
        s_up  = x[mslow:msup]  # len(s_up)  = n_con_up

        c = numpy.empty(m)
        c[:om] = AmplModel.cons(self, x[:on])
        c[om:om+nrangeC] = c[rangeC]

        c[equalC] -= self.Lcon[equalC]
        c[lowerC] -= self.Lcon[lowerC] ; c[lowerC] -= s_low[:nlowerC]

        c[upperC] -= self.Ucon[upperC] ; c[upperC] *= -1
        c[upperC] -= s_up[:nupperC]

        c[rangeC] -= self.Lcon[rangeC] ; c[rangeC] -= s_low[nlowerC:]

        c[om:om+nrangeC] -= self.Ucon[rangeC]
        c[om:om+nrangeC] *= -1
        c[om:om+nrangeC] -= s_up[nupperC:]

        # Add linear constraints corresponding to bounds on original problem
        lowerB = self.lowerB ; nlowerB = self.nlowerB ; Lvar = self.Lvar
        upperB = self.upperB ; nupperB = self.nupperB ; Uvar = self.Uvar
        rangeB = self.rangeB ; nrangeB = self.nrangeB

        nt = on + self.n_con_low + self.n_con_up
        ntlow = nt + self.n_var_low
        t_low = x[nt:ntlow]
        t_up  = x[ntlow:]

        b = c[om+nrangeC:]

        b[:nlowerB] = x[lowerB] - Lvar[lowerB] - t_low[:nlowerB]
        b[nlowerB:nlowerB+nrangeB] = x[rangeB] - Lvar[rangeB] - t_low[nlowerB:]
        b[nlowerB+nrangeB:nlowerB+nrangeB+nupperB] = Uvar[upperB] - x[upperB] - t_up[:nupperB]
        b[nlowerB+nrangeB+nupperB:] = Uvar[rangeB] - x[rangeB] - t_up[nupperB:]

        return c
Exemplo n.º 16
0
multiple_problems = len(args) > 1

if multiple_problems:
    # Define formats for output table.
    hdrfmt = '%-15s %5s %5s %15s %7s %7s %7s %5s %6s %6s %5s'
    hdr = hdrfmt % ('Name', 'Iter', 'Feval', 'Objective', 'dResid', 'pResid',
                    'Comp', 'LogPen', 'Setup', 'Solve', 'Opt')
    lhdr = len(hdr)
    fmt = '%-15s %5d %5d %15.8e %7.1e %7.1e %7.1e %5.1f %6.2f %6.2f %5s'
    log.info(hdr)
    log.info('-' * lhdr)

for problemName in args:

    nlp = AmplModel(problemName)
    problemName = os.path.splitext(os.path.basename(problemName))[0]

    # Create solution logger (if requested).
    if options.solution_requested:
        solution_logger = config_logger('elastic.solution',
                                        filename=problemName + '.sol',
                                        filemode='w',
                                        stream=None)

    t_setup, eif = pass_to_elastic(nlp, **opts)
    l1 = eif.l1bar.l1
    max_penalty = max(l1.get_penalty_parameters())

    if not multiple_problems:  # Output final statistics
        print
Exemplo n.º 17
0
        return None

    try: options, fname = getopt.getopt( arglist, '' )

    except getopt.error, e:
        commandline_err( "%s" % str( e ) )
        return None

    return fname[0]

ProblemName = parse_cmdline(sys.argv[1:])

# Create a NLPy AmplModel

print 'Problem', ProblemName
nlp = AmplModel( ProblemName ) #amplpy.AmplModel( ProblemName )


# Translate this NLPy - Ampl problem in a pyOpt problem

nlp.Uvar = numpy.inf * numpy.ones(9)

opt_prob = PyOpt_From_NLPModel(nlp)
print opt_prob

# Call the imported solver SNOPT
snopt = SNOPT()


# Choose sensitivity type for computing gradients with :
#  'FD' : finite difference
Exemplo n.º 18
0
    """
    Return the square root of a linear operator, if defined. Note that
    this is not the elementwise square root. The result is a linear operator
    that, when composed with itself, yields the original operator.
    """
    return op._sqrt()

if __name__ == '__main__':
    from pykrylov.tools import check_symmetric
    from pysparse.sparse.pysparseMatrix import PysparseMatrix as sp
    from nlpy.model import AmplModel
    import sys

    np.set_printoptions(precision=3, linewidth=80, threshold=10, edgeitems=3)

    nlp = AmplModel(sys.argv[1])
    J = sp(matrix=nlp.jac(nlp.x0))
    e1 = np.ones(J.shape[0])
    e2 = np.ones(J.shape[1])
    print 'J.shape = ', J.getShape()

    print 'Testing PysparseLinearOperator:'
    op = PysparseLinearOperator(J)
    print 'op.shape = ', op.shape
    print 'op.T.shape = ', op.T.shape
    print 'op * e2 = ', op * e2
    print "op.T * e1 = ", op.T * e1
    print 'op.T.T * e2 = ', op.T.T * e2
    print 'op.T.T.T * e1 = ', op.T.T.T * e1
    print 'With call:'
    print 'op(e2) = ', op(e2)
Exemplo n.º 19
0
    def cons(self, x):
        """
        Evaluate the vector of general constraints for the modified problem.
        Constraints are stored in the order in which they appear in the
        original problem. If constraint i is a range constraint, c[i] will
        be the constraint that has the slack on the lower bound on c[i].
        The constraint with the slack on the upper bound on c[i] will be stored
        in position m + k, where k is the position of index i in
        rangeC, i.e., k=0 iff constraint i is the range constraint that
        appears first, k=1 iff it appears second, etc.

        Constraints appear in the following order:

        1. [ c  ]   general constraints in origninal order
        2. [ cR ]   'upper' side of range constraints
        3. [ b  ]   linear constraints corresponding to bounds on original problem
        4. [ bR ]   linear constraints corresponding to 'upper' side of two-sided
                    bounds
        """
        n = self.n
        on = self.original_n
        m = self.m
        om = self.original_m
        equalC = self.equalC
        lowerC = self.lowerC
        nlowerC = self.nlowerC
        upperC = self.upperC
        nupperC = self.nupperC
        rangeC = self.rangeC
        nrangeC = self.nrangeC

        mslow = on + self.n_con_low
        msup = mslow + self.n_con_up
        s_low = x[on:mslow]  # len(s_low) = n_con_low
        s_up = x[mslow:msup]  # len(s_up)  = n_con_up

        c = numpy.empty(m)
        c[:om] = AmplModel.cons(self, x[:on])
        c[om:om + nrangeC] = c[rangeC]

        c[equalC] -= self.Lcon[equalC]
        c[lowerC] -= self.Lcon[lowerC]
        c[lowerC] -= s_low[:nlowerC]

        c[upperC] -= self.Ucon[upperC]
        c[upperC] *= -1
        c[upperC] -= s_up[:nupperC]

        c[rangeC] -= self.Lcon[rangeC]
        c[rangeC] -= s_low[nlowerC:]

        c[om:om + nrangeC] -= self.Ucon[rangeC]
        c[om:om + nrangeC] *= -1
        c[om:om + nrangeC] -= s_up[nupperC:]

        # Add linear constraints corresponding to bounds on original problem
        lowerB = self.lowerB
        nlowerB = self.nlowerB
        Lvar = self.Lvar
        upperB = self.upperB
        nupperB = self.nupperB
        Uvar = self.Uvar
        rangeB = self.rangeB
        nrangeB = self.nrangeB

        nt = on + self.n_con_low + self.n_con_up
        ntlow = nt + self.n_var_low
        t_low = x[nt:ntlow]
        t_up = x[ntlow:]

        b = c[om + nrangeC:]

        b[:nlowerB] = x[lowerB] - Lvar[lowerB] - t_low[:nlowerB]
        b[nlowerB:nlowerB +
          nrangeB] = x[rangeB] - Lvar[rangeB] - t_low[nlowerB:]
        b[nlowerB + nrangeB:nlowerB + nrangeB +
          nupperB] = Uvar[upperB] - x[upperB] - t_up[:nupperB]
        b[nlowerB + nrangeB +
          nupperB:] = Uvar[rangeB] - x[rangeB] - t_up[nupperB:]

        return c
Exemplo n.º 20
0
            # Check second partial derivatives in turn.
            for i in range(n):
                xph = self.x.copy()
                xph[i] += h
                xmh = self.x.copy()
                xmh[i] -= h
                dgdx = (nlp.igrad(k, xph) - nlp.igrad(k, xmh)) / (2 * h)
                for j in range(i + 1):
                    dgjdxi = dgdx[j]
                    err = abs(Hk[i, j] - dgjdxi) / (1 + abs(Hk[i, j]))

                    line = self.d2fmt % (k + 1, i, j, Hk[i, j], dgjdxi, err)
                    if verbose:
                        sys.stderr.write(line)

                    if err > self.tol:
                        errs.append(line)

        return errs


if __name__ == '__main__':

    import sys
    from nlpy.model import AmplModel
    nlp = AmplModel(sys.argv[1])
    print 'Checking at x = ', nlp.x0
    derchk = DerivativeChecker(nlp, nlp.x0)
    derchk.check(verbose=True)
    nlp.close()
Exemplo n.º 21
0
    def _jac(self, x, lp=False):
        """
        Helper method to assemble the Jacobian matrix of the constraints of the
        transformed problems. See the documentation of :meth:`jac` for more
        information.

        The positional argument `lp` should be set to `True` only if the problem
        is known to be a linear program. In this case, the evaluation of the
        constraint matrix is cheaper and the argument `x` is ignored.
        """
        n = self.original_n
        m = self.original_m

        # List() simply allows operations such as 1 + [2,3] -> [3,4]
        lowerC = List(self.lowerC)
        nlowerC = self.nlowerC
        upperC = List(self.upperC)
        nupperC = self.nupperC
        rangeC = List(self.rangeC)
        nrangeC = self.nrangeC
        lowerB = List(self.lowerB)
        nlowerB = self.nlowerB
        upperB = List(self.upperB)
        nupperB = self.nupperB
        rangeB = List(self.rangeB)
        nrangeB = self.nrangeB
        nbnds = nlowerB + nupperB + 2 * nrangeB
        nSlacks = nlowerC + nupperC + 2 * nrangeC

        # Initialize sparse Jacobian
        nnzJ = 2 * self.nnzj + m + nrangeC + nbnds + nrangeB  # Overestimate
        J = sp(nrow=self.m, ncol=self.n, sizeHint=nnzJ)

        # Insert contribution of general constraints
        if lp:
            J[:m, :n] = AmplModel.A(self)
        else:
            J[:m, :n] = AmplModel.jac(self, x[:n])
        J[upperC, :n] *= -1.0  # Flip sign of 'upper' gradients
        J[m:m +
          nrangeC, :n] = J[rangeC, :n]  # Append 'upper' side of range const.
        J[m:m + nrangeC, :n] *= -1.0  # Flip sign of 'upper' range gradients.

        # Create a few index lists
        rlowerC = List(range(nlowerC))
        rlowerB = List(range(nlowerB))
        rupperC = List(range(nupperC))
        rupperB = List(range(nupperB))
        rrangeC = List(range(nrangeC))
        rrangeB = List(range(nrangeB))

        # Insert contribution of slacks on general constraints
        J.put(-1.0, lowerC, n + rlowerC)
        J.put(-1.0, upperC, n + nlowerC + rupperC)
        J.put(-1.0, rangeC, n + nlowerC + nupperC + rrangeC)
        J.put(-1.0, m + rrangeC, n + nlowerC + nupperC + nrangeC + rrangeC)

        # Insert contribution of bound constraints on the original problem
        bot = m + nrangeC
        J.put(1.0, bot + rlowerB, lowerB)
        bot += nlowerB
        J.put(1.0, bot + rrangeB, rangeB)
        bot += nrangeB
        J.put(-1.0, bot + rupperB, upperB)
        bot += nupperB
        J.put(-1.0, bot + rrangeB, rangeB)

        # Insert contribution of slacks on the bound constraints
        bot = m + nrangeC
        J.put(-1.0, bot + rlowerB, n + nSlacks + rlowerB)
        bot += nlowerB
        J.put(-1.0, bot + rrangeB, n + nSlacks + nlowerB + rrangeB)
        bot += nrangeB
        J.put(-1.0, bot + rupperB, n + nSlacks + nlowerB + nrangeB + rupperB)
        bot += nupperB
        J.put(-1.0, bot + rrangeB,
              n + nSlacks + nlowerB + nrangeB + nupperB + rrangeB)

        return J
Exemplo n.º 22
0
    "Return a linear operator from a Numpy `ndarray`."
    return LinearOperator(A.shape[1], A.shape[0],
                          lambda v: np.dot(A, v),
                          matvec_transp=lambda u: np.dot(A.T, u),
                          symmetric=False)


if __name__ == '__main__':
    from pykrylov.tools import check_symmetric
    from pysparse.sparse.pysparseMatrix import PysparseMatrix as sp
    from nlpy.model import AmplModel
    import sys

    np.set_printoptions(precision=3, linewidth=80, threshold=10, edgeitems=3)

    nlp = AmplModel(sys.argv[1])
    J = sp(matrix=nlp.jac(nlp.x0))
    e1 = np.ones(J.shape[0])
    e2 = np.ones(J.shape[1])
    print 'J.shape = ', J.getShape()

    print 'Testing PysparseLinearOperator:'
    op = PysparseLinearOperator(J)
    print 'op.shape = ', op.shape
    print 'op.T.shape = ', op.T.shape
    print 'op * e2 = ', op * e2
    print "op.T * e1 = ", op.T * e1
    print 'op.T.T * e2 = ', op.T.T * e2
    print 'op.T.T.T * e1 = ', op.T.T.T * e1
    print 'With call:'
    print 'op(e2) = ', op(e2)
Exemplo n.º 23
0
try:
    from nlpy.model import AmplModel
except:
    msg='NLPy is required to run this demo. See http://nlpy.sf.net'
    raise RuntimeError, msg

from pyorder.tools import coord2csc
from pyorder.pymc60 import sloan, rcmk
from pyorder.tools.spy import FastSpy
import numpy as np
import matplotlib.pyplot as plt

nlp = AmplModel('truss18bars.nl')
x = np.random.random(nlp.n)
y = np.random.random(nlp.m)
H = nlp.hess(x,y)
(val,irow,jcol) = H.find()
(rowind, colptr, values) = coord2csc(nlp.n, irow, jcol, val)  # Convert to CSC

perm1, rinfo1 = rcmk(nlp.n, rowind, colptr)   # Reverse Cuthill-McKee
perm2, rinfo2 = sloan(nlp.n, rowind, colptr)  # Sloan's method

left = plt.subplot(131)
FastSpy(nlp.n, nlp.n, irow, jcol, sym=True,
        ax=left.get_axes(), title='Original')

# Apply permutation 1 and plot reordered matrix
middle = plt.subplot(132)
FastSpy(nlp.n, nlp.n, perm1[irow], perm1[jcol], sym=True,
        ax=middle.get_axes(), title='Rev. Cuthill-McKee (semibandwidth=%d)' % rinfo1[2])