コード例 #1
0
ファイル: test_amplmodel.py プロジェクト: codeants2012/NLP.py
    def test_obj_scaling(self):
        model = AmplModel(self.model_name)
        log = config_logger("nlp.der",
                            "%(name)-10s %(levelname)-8s %(message)s",
                            level=logging.DEBUG)
        dcheck = DerivativeChecker(model, model.x0, tol=1e-5)
        dcheck.check(hess=True, chess=True)
        assert len(dcheck.grad_errs) == 0
        assert len(dcheck.hess_errs) == 0
        for j in xrange(model.ncon):
            assert (len(dcheck.chess_errs[j]) == 0)

        model.compute_scaling_obj(g_max=1.)
        assert model.obj(model.x0) == -0.39056208756589972
        assert np.allclose(model.grad(model.x0), np.array([0.8, -1.]))
        assert model.scale_obj == 1.

        model.compute_scaling_obj(reset=True)
        assert model.scale_obj is None

        model.compute_scaling_obj(g_max=0.5)
        assert model.obj(model.x0) == -0.19528104378294986
        assert np.allclose(model.grad(model.x0), np.array([0.4, -0.5]))
        assert np.allclose(
            model.sgrad(model.x0).to_array(), np.array([0.4, -0.5]))
        assert model.scale_obj == 0.5

        dcheck = DerivativeChecker(model, model.x0, tol=1e-5)
        dcheck.check(hess=True, chess=True)
        assert len(dcheck.grad_errs) == 0
        assert len(dcheck.hess_errs) == 0
        for j in xrange(model.ncon):
            assert (len(dcheck.chess_errs[j]) == 0)
コード例 #2
0
 def test_derivatives(self):
     log = config_logger("nlp.der",
                         "%(name)-10s %(levelname)-8s %(message)s",
                         level=logging.DEBUG)
     dcheck = DerivativeChecker(self.model, self.x)
     dcheck.check(chess=False)
     assert len(dcheck.grad_errs) == 0
     assert len(dcheck.hess_errs) == 0
コード例 #3
0
ファイル: test_amplmodel.py プロジェクト: codeants2012/NLP.py
    def test_cons_scaling(self):
        model = self.model
        log = config_logger("nlp.der",
                            "%(name)-10s %(levelname)-8s %(message)s",
                            level=logging.DEBUG)
        dcheck = DerivativeChecker(model, model.x0, tol=1e-5)
        dcheck.check(hess=True, chess=True)
        assert len(dcheck.jac_errs) == 0
        assert len(dcheck.hess_errs) == 0
        for j in xrange(model.ncon):
            assert (len(dcheck.chess_errs[j]) == 0)

        model.compute_scaling_cons(g_max=2.)
        assert np.allclose(model.scale_con, np.array([1.]))
        cons = np.array([0.])
        for j in range(model.ncon):
            assert np.allclose(model.icons(j, model.x0), cons[j])
        assert np.allclose(model.cons(model.x0), cons)
        assert np.allclose(
            model.jop(model.x0).to_array(), np.array([[1., 2.]]))
        assert np.allclose(
            ndarray_from_coord(model.ncon,
                               model.nvar,
                               *model.A(),
                               symmetric=False), np.array([[1., 2.]]))

        model.compute_scaling_cons(reset=True)
        assert model.scale_con is None

        model.compute_scaling_cons(g_max=1.)
        assert np.allclose(model.scale_con, np.array([0.5]))
        assert np.allclose(model.cons(model.x0), np.array([0.]))
        assert np.allclose(
            model.jop(model.x0).to_array(), np.array([[0.5, 1.]]))
        assert np.allclose(
            ndarray_from_coord(model.ncon,
                               model.nvar,
                               *model.A(),
                               symmetric=False), np.array([[0.5, 1.]]))
        assert np.allclose(model.igrad(0, model.x0), np.array([0.5, 1.]))
        assert np.allclose(
            model.sigrad(0, model.x0).to_array(), np.array([0.5, 1.]))

        v = np.ones(model.nvar)
        assert np.allclose(model.hiprod(model.x0, 0, v), np.zeros(model.n))

        dcheck = DerivativeChecker(model, model.x0, tol=1e-5)
        dcheck.check(hess=True, chess=True)
        assert len(dcheck.jac_errs) == 0
        assert len(dcheck.hess_errs) == 0
        for j in xrange(model.ncon):
            assert (len(dcheck.chess_errs[j]) == 0)

        print model.display_basic_info()
コード例 #4
0
ファイル: test_amplmodel.py プロジェクト: codeants2012/NLP.py
    def test_cons_scaling(self):
        model = AmplModel(self.model_name)
        log = config_logger("nlp.der",
                            "%(name)-10s %(levelname)-8s %(message)s",
                            level=logging.DEBUG)
        dcheck = DerivativeChecker(model, model.x0, tol=1e-5)
        dcheck.check(hess=True, chess=True)
        assert len(dcheck.jac_errs) == 0
        assert len(dcheck.hess_errs) == 0
        for j in xrange(model.ncon):
            assert (len(dcheck.chess_errs[j]) == 0)

        model.compute_scaling_cons(g_max=40.)
        assert np.allclose(model.scale_con, np.array([1.]))
        assert np.allclose(model.cons(model.x0), np.array([29.0]))
        assert np.allclose(
            model.jop(model.x0).to_array(), np.array([[40., 4.]]))
        model.compute_scaling_cons(reset=True)
        assert model.scale_con is None

        model.compute_scaling_cons(g_max=20.)
        assert np.allclose(model.cons(model.x0), np.array([14.5]))
        assert np.allclose(model.scale_con, np.array([0.5]))
        assert np.allclose(
            model.jop(model.x0).to_array(), np.array([[20., 2.]]))
        assert np.allclose(model.igrad(0, model.x0), np.array([20., 2.]))
        assert np.allclose(
            model.sigrad(0, model.x0).to_array(), np.array([20., 2.]))

        v = np.ones(model.nvar)
        assert np.allclose(model.hiprod(model.x0, 0, v), np.array([26., 1.]))

        dcheck = DerivativeChecker(model, model.x0, tol=1e-5)
        dcheck.check(hess=True, chess=True)
        assert len(dcheck.jac_errs) == 0
        assert len(dcheck.hess_errs) == 0
        for j in xrange(model.ncon):
            assert (len(dcheck.chess_errs[j]) == 0)

        print model.display_basic_info()
コード例 #5
0
ファイル: test_amplmodel.py プロジェクト: codeants2012/NLP.py
    def test_obj_scaling(self):
        model = self.model
        model.x0 = np.zeros(model.nvar)

        log = config_logger("nlp.der",
                            "%(name)-10s %(levelname)-8s %(message)s",
                            level=logging.DEBUG)
        dcheck = DerivativeChecker(model, model.x0, tol=1e-5)
        dcheck.check(hess=True, chess=True)
        assert len(dcheck.grad_errs) == 0
        assert len(dcheck.hess_errs) == 0
        for j in xrange(model.ncon):
            assert (len(dcheck.chess_errs[j]) == 0)

        model.compute_scaling_obj(g_max=1.)
        assert model.scale_obj == 1.
        assert model.obj(model.x0) == 1.
        assert np.allclose(model.grad(model.x0), np.array([1., 0.]))
        print model.cost()
        assert np.allclose(model.cost().to_array(), np.array([1., 0.]))

        model.compute_scaling_obj(reset=True)
        assert model.scale_obj is None

        model.compute_scaling_obj(g_max=0.5)
        assert model.scale_obj == 0.5
        assert model.obj(model.x0) == 0.5
        assert np.allclose(model.grad(model.x0), np.array([0.5, 0.]))
        assert np.allclose(model.cost().to_array(), np.array([0.5, 0.]))
        assert np.allclose(
            model.sgrad(model.x0).to_array(), np.array([0.5, 0.]))

        dcheck = DerivativeChecker(model, model.x0, tol=1e-5)
        dcheck.check(hess=True, chess=True)
        assert len(dcheck.grad_errs) == 0
        assert len(dcheck.hess_errs) == 0
        for j in xrange(model.ncon):
            assert (len(dcheck.chess_errs[j]) == 0)
コード例 #6
0
    from nlp.optimize.funnel import QNFunnel as Funnel
    from pykrylov.linop import CompactLSR1Operator as QNOperator

    opts["H"] = QNOperator
    opts["npairs"] = args.npairs
    opts["scaling"] = True
else:
    from nlp.model.pysparsemodel import PySparseAmplModel as Model
    from nlp.optimize.funnel import Funnel

nprobs = len(other)
if nprobs == 0:
    raise ValueError("Please supply problem name as argument")

# Create root logger.
logger = config_logger("nlp", "%(name)-9s %(levelname)-5s %(message)s")

# Create Funnel logger.
funnel_logger = config_logger(
    "funnel",
    "%(name)-9s %(levelname)-5s %(message)s",
    level=logging.WARN if nprobs > 1 else logging.DEBUG)
qn_logger = config_logger("qn",
                          "%(name)-9s %(levelname)-5s %(message)s",
                          level=logging.WARN if nprobs > 1 else logging.DEBUG)

if nprobs > 1:
    logger.info("%12s %5s %5s %8s %8s %8s %6s %6s %6s %7s", "name", "nvar",
                "iter", "f", u"‖c‖", u"‖g+Jᵀy‖", "#f", u"#∇f", "stat", "time")

for problem in other:
コード例 #7
0
ファイル: demo_pyomo.py プロジェクト: codeants2012/NLP.py
except ImportError:
    import sys
    print "Pyomo is not installed."
    sys.exit(0)

from nlp.model.amplmodel import AmplModel
from nlp.optimize.tron import TRON
from nlp.optimize.pcg import TruncatedCG
from nlp.tools.logs import config_logger
from nlp.tools.utils import evaluate_model_methods_at_starting_point
import logging

import numpy as np

tron_logger = config_logger("nlp.tron",
                            "%(name)-8s %(levelname)-5s %(message)s",
                            level=logging.INFO)


if __name__ == "__main__":
    # Create a Pyomo Rosenbrock model
    pyomo_model = ConcreteModel()
    pyomo_model.x = Var()
    pyomo_model.y = Var(bounds=(-1.5, None))
    pyomo_model.o = Objective(expr=(pyomo_model.x - 1)**2 + \
                                    100 * (pyomo_model.y - pyomo_model.x**2)**2)
    pyomo_model.x.set_value(-2.)
    pyomo_model.y.set_value(1.)

    # Writes a Pyomo model in NL file format."""
    nl_filename = "rosenbrock.nl"
コード例 #8
0
                    help="maximum number of iterations")

# Parse command-line arguments.
(args, other) = parser.parse_known_args()

if args.armijo:
    from nlp.optimize.lbfgs import LBFGS
else:
    from nlp.optimize.lbfgs import WolfeLBFGS as LBFGS

nprobs = len(other)
if nprobs == 0:
    raise ValueError("Please supply problem name as argument")

# Create root logger.
logger = config_logger("nlp", "%(name)-3s %(levelname)-5s %(message)s")

# Create LBFGS logger.
slv_log = config_logger("nlp.lbfgs",
                        "%(name)-9s %(levelname)-5s %(message)s",
                        level=logging.WARN if nprobs > 1 else logging.INFO)

logger.info("%10s %5s %6s %8s %8s %6s %6s %5s %7s", "name", "nvar", "iter",
            "f", u"‖∇f‖", "#f", u"#∇f", "stat", "time")

for problem in other:
    model = QNAmplModel(problem,
                        H=InverseLBFGSOperator,
                        npairs=args.npairs,
                        scaling=True)
    model.compute_scaling_obj()
コード例 #9
0
# Argument parsing, take list of names from the command line
parser = argparse.ArgumentParser()
parser.add_argument("name_list", nargs='+', help="list of SIF files to process")
parser.add_argument("--qn_type", type=str, default="bfgs", choices=["bfgs","sr1"],
    help="the type of quasi-Newton approximation to be used")
parser.add_argument("--approx_type", type=str, default=None, choices=[None,"full","struct"],
    help="choose exact Hessian (None), full approximate Hessian (full), or structured approximation (struct)")
args = parser.parse_args()

nprobs = len(args.name_list)

# Create root logger.
# logger = config_logger("nlp", "%(name)-3s %(levelname)-5s %(message)s")
logger = config_logger("nlp", "%(name)-3s %(levelname)-5s %(message)s",
                                stream=None,
                                filename="cutest_rough.txt", filemode="a")

# Create Auglag logger.
auglag_logger = config_logger("nlp.auglag",
                              "%(name)-8s %(levelname)-5s %(message)s",
                              level=logging.WARN if nprobs > 1 else logging.INFO)

# Create TRON logger.
tron_logger = config_logger("nlp.tron",
                            "%(name)-8s %(levelname)-5s %(message)s",
                            level=logging.WARN if nprobs > 1 else logging.INFO)

if nprobs > 1:
    logger.info("%9s %5s %5s %6s %8s %8s %6s %6s %5s %7s",
                "name", "nvar", "ncon", "iter", "f", u"‖P∇L‖", "#f", u"#∇f", "stat",
コード例 #10
0
ファイル: nlp_dercheck.py プロジェクト: codeants2012/NLP.py
#!/usr/bin/env python
"""Simple AMPL driver for the derivative checker."""

from nlp.model.pysparsemodel import PySparseAmplModel
from nlp.tools.dercheck import DerivativeChecker
from nlp.tools.logs import config_logger
import sys

if len(sys.argv) == 1:
    raise ValueError("Please supply problem name as argument")

# Create root logger.
log = config_logger("nlp.der", "%(name)-10s %(levelname)-8s %(message)s")

nlp = PySparseAmplModel(sys.argv[1])
dcheck = DerivativeChecker(nlp, nlp.x0)
dcheck.check()
コード例 #11
0
ファイル: nlp_auglag.py プロジェクト: codeants2012/NLP.py
        pg = auglag.pgnorm
        ts = auglag.tsolve
    else:
        it = -auglag.iter
        fc, gc = -auglag.model.obj.ncalls, -auglag.model.grad.ncalls
        pg = -1.0 if auglag.pgnorm is None else -auglag.pgnorm
        ts = -1.0 if auglag.tsolve is None else -auglag.tsolve
    return (it, fc, gc, pg, ts)


nprobs = len(sys.argv) - 1
if nprobs == 0:
    raise ValueError("Please supply problem name as argument")

# Create root logger.
logger = config_logger("nlp", "%(name)-3s %(levelname)-5s %(message)s")

# Create Auglag logger.
auglag_logger = config_logger("nlp.auglag",
                              "%(name)-8s %(levelname)-5s %(message)s",
                              level=logging.WARN if nprobs > 1 else logging.INFO)

# Create TRON logger.
tron_logger = config_logger("nlp.tron",
                            "%(name)-8s %(levelname)-5s %(message)s",
                            level=logging.WARN if nprobs > 1 else logging.INFO)

if nprobs > 1:
    logger.info("%12s %5s %6s %8s %8s %6s %6s %5s %7s",
                "name", "nvar", "iter", "f", u"‖P∇f‖", "#f", u"#∇f", "stat",
                "time")
コード例 #12
0
ファイル: nlp_cqp.py プロジェクト: codeants2012/NLP.py
        pg = cqp.pgnorm
        ts = cqp.tsolve
    else:
        it = -cqp.iter
        fc, gc = -cqp.model.obj.ncalls, -cqp.model.grad.ncalls
        pg = -1.0 if cqp.pgnorm is None else -cqp.pgnorm
        ts = -1.0 if cqp.tsolve is None else -cqp.tsolve
    return (it, fc, gc, pg, ts)


nprobs = len(sys.argv) - 1
if nprobs == 0:
    raise ValueError("Please supply problem name as argument")

# Create root logger.
logger = config_logger("nlp", "%(name)-3s %(levelname)-5s %(message)s")

# Create CQP logger.
cqp_logger = config_logger("nlp.cqp",
                           "%(name)-8s %(levelname)-5s %(message)s",
                           level=logging.WARN if nprobs > 1 else logging.INFO)

if nprobs > 1:
    logger.info("%12s %5s %6s %8s %8s %6s %6s %5s %7s", "name", "nvar", "iter",
                "f", u"‖P∇f‖", "#f", u"#∇f", "stat", "time")

for problem in sys.argv[1:]:
    model = PySparseAmplModel(problem)
    model.compute_scaling_obj()

    # Check for inequality- or equality-constrained problem.