def setUp(self):
     pytest.importorskip("nlp.model.amplmodel")
     pytest.importorskip("scipy")
     model = os.path.join(this_path, 'hs010.nl')
     self.model = AugmentedLagrangian(SciPyAmplModel(model), prox=1.0)
     self.model.pi = np.array([3.])
     self.x = np.array([2, 2, 1], dtype=np.float)
 def setUp(self):
     pytest.importorskip("nlp.model.amplmodel")
     pytest.importorskip("cysparse")
     model = os.path.join(this_path, 'hs007.nl')
     self.model = AugmentedLagrangian(CySparseAmplModel(model), prox=1.0)
     self.model.pi = 3
     self.x = np.array([2, 2], dtype=np.float)
Esempio n. 3
0
    def __init__(self, model, bc_solver, **kwargs):
        u"""Instantiate an augmented Lagrangian solver for general constrained problem.

        The model should have the general form

            min f(x)  subject to   cₗ ≤ c(x) ≤ cᵤ,    l ≤ x ≤ u.

        The augmented Lagrangian is defined as:

            L(x, π; ρ) := f(x) - π"c(x) + ½ ρ |c(x)|².
        where π are the current Lagrange multiplier estimates and ρ is the
        current penalty parameter.

        The algorithm stops as soon as the infinity norm of the projected
        gradient of the Lagrangian falls below ``max(abstol, reltol * pg0)``
        where ``pg0`` is the infinity norm of the projected gradient of the
        Lagrangian at the initial point.

        :parameters:
            :model:            a :class:`NLPModel` object representing the
                               problem. For instance, model may arise from an
                               AMPL model
            :bc_solver:        a solver for solving the inner iteration
                               subproblem
        :keywords:
            :x0:               starting point                  (`model.x0`)
            :reltol:           relative stopping tolerance     (1.0e-5)
            :abstol:           absolute stopping tolerance     (1.0e-12)
            :maxiter:          maximum number of iterations    (max(1000, 10n))
            :maxupdate:        maximum number of penalty or multiplier
                               updates                         (100)
            :ny:               apply Nocedal/Yuan linesearch   (False)
            :nbk:              max number of backtracking steps in Nocedal/Yuan
                               linesearch                      (5)
            :monotone:         use monotone descent strategy   (False)
            :n_iter_non_mono:  number of iterations for which non-strict
                               descent can be tolerated if monotone = False
                                                               (25)
            :least_squares_pi: initialize with least squares multipliers (True)
            :logger_name:      name of a logger object that can be used in the
                               post-iteration                  (nlp.auglag)

        :Exit codes:
            :opt:    Optimal solution found
            :iter:   Maximum iteration reached
            :feas:   Feasible, but not optimal, solution found
            :fail:   Cannot make further progress from current point
            :stal:   Problem converged to an infeasible point
            :time:   Time limit exceeded
        """
        full_qn = kwargs.get("full_qn",False)
        if full_qn:
            self.model = QuasiNewtonAugmentedLagrangian(model, **kwargs)
        else:
            self.model = AugmentedLagrangian(model, **kwargs)

        print self.model
        print self.model.model

        self.x = kwargs.get("x0", self.model.x0.copy())

        self.least_squares_pi = kwargs.get("least_squares_pi", True)

        self.bc_solver = bc_solver

        self.tau = kwargs.get("tau", 0.1)
        self.omega = None
        self.eta = None
        self.eta0 = 0.1258925
        self.omega0 = 1.
        self.omega_init = kwargs.get(
            "omega_init", self.omega0 * 0.1)  # penalty_init**-1
        self.eta_init = kwargs.get(
            "eta_init", self.eta0**0.1)  # penalty_init**-0.1
        self.a_omega = kwargs.get("a_omega", 1.)
        self.b_omega = kwargs.get("b_omega", 1.)
        self.a_eta = kwargs.get("a_eta", 0.1)
        self.b_eta = kwargs.get("b_eta", 0.9)
        self.omega_rel = kwargs.get("omega_rel", 1.e-5)
        self.omega_abs = kwargs.get("omega_abs", 1.e-7)
        self.eta_rel = kwargs.get("eta_rel", 1.e-5)
        self.eta_abs = kwargs.get("eta_abs", 1.e-7)

        self.f0 = self.f = None

        # Maximum number of inner iterations
        self.maxiter = kwargs.get("maxiter",
                                  100 * self.model.model.original_n)

        self.maxupdate = kwargs.get("maxupdate",100)

        # Maximum run time
        self.maxtime = kwargs.get("maxtime", 3600.)

        self.update_on_rejected_step = False

        self.inner_fail_count = 0
        self.status = None

        self.hformat = "%-5s  %8s  %8s  %8s  %8s  %5s  %4s  %8s  %8s"
        self.header = self.hformat % ("iter", "f", u"‖P∇L‖", u"‖c‖", u"ρ",
                                      "inner", "stat", u"ω", u"η")
        self.format = "%-5d %8.1e %8.1e %8.1e %8.1e %5d %4s %8.1e %8.1e"
        self.format0 = "%-5d %8.1e %8.1e %8s %8s %5s %4s %8.1e %8.1e"

        # Initialize some counters for counting number of Hprod used in
        # BQP linesearch and CG.
        self.hprod_bqp_linesearch = 0
        self.hprod_bqp_linesearch_fail = 0
        self.nlinesearch = 0
        self.hprod_bqp_cg = 0
        self.tsolve = 0.0

        # Setup the logger. Install a NullHandler if no output needed.
        logger_name = kwargs.get("logger_name", "nlp.auglag")
        self.log = logging.getLogger(logger_name)
        self.log.propagate = False
 def setUp(self):
     self.model = AugmentedLagrangian(SimpleQP(), prox=1.0)
     self.model.pi = 3
     self.x = np.array([1, 2], dtype=np.float)
def rosenbrock(request):
    return AugmentedLagrangian(Rosenbrock(request.param), prox=1.0)