Пример #1
0
def c1boundedrosenbrock_restriction_feas(request):
    nvar = request.param
    # The original model has bounds 0 ≤ x ≤ 1.
    # We choose an x inside the bounds and a random d.
    return C1LineModel(BoundedRosenbrock(nvar, np.zeros(nvar), np.ones(nvar)),
                       np.random.random(nvar),
                       np.random.random(nvar) - 0.5)
Пример #2
0
    def nyf(self, x, f, f_trial, g, step, bkmax=5, armijo=1.0e-4):
        """Perform a simple backtracking linesearch on the objective function.

        Linesearch is performed starting from `x` along direction `step`.
        Here, `f` and `f_trial` are the objective value at `x` and `x + step`,
        respectively, and `g` is the gradient of the objective at `x`.

        Return (x, f, steplength), where `x + steplength * step` satisfies
        the Armijo condition and `f` is the objective value at this new point.
        """
        ls_fmt = "nyf-ls: %7.1e  %8.1e"

        slope = np.dot(g, step)
        line_model = C1LineModel(self.model, x, step)
        ls = ArmijoLineSearch(line_model,
                              value=f,
                              trial_value=f_trial,
                              slope=slope,
                              bkmax=bkmax,
                              decr=1.2,
                              ftol=armijo)
        try:
            for step in ls:
                self.log.debug(ls_fmt, step, ls.trial_value)
        except LineSearchFailure:
            pass
        self.log.debug(ls_fmt, ls.step, ls.trial_value)

        return (ls.iterate, ls.trial_value, ls.step)
Пример #3
0
def c1boundedrosenbrock_restriction_infeas(request):
    nvar = request.param
    # The original model has bounds 0 ≤ x ≤ 1.
    # We choose an x outside the bounds and  d.
    x = np.zeros(nvar)
    x[0] = 2
    return C1LineModel(BoundedRosenbrock(nvar, np.zeros(nvar), np.ones(nvar)),
                       x, np.ones(nvar))
Пример #4
0
 def test_ls_steepest(self):
     x = np.ones(self.model.n)
     g = self.model.grad(x)
     c1model = C1LineModel(self.model, x, -g)
     ls = QuadraticCubicLineSearch(c1model)
     with pytest.raises(StopIteration):
         ls.next()
     np.allclose(ls.iterate, np.zeros(self.model.n))
Пример #5
0
 def test_ls(self):
     x = np.array([-4.])
     g = np.array([1])
     c1model = C1LineModel(self.model, x, g)
     ls = QuadraticCubicLineSearch(c1model, step=8)
     ls.next()
     ls.next()
     with pytest.raises(StopIteration):
         ls.next()
     np.allclose(ls.iterate, np.array([1.69059892324]))
Пример #6
0
    def nyc(self,
            x,
            theta,
            theta_trial,
            c,
            grad_theta,
            step,
            bkmax=5,
            armijo=1.0e-4):
        u"""Perform a backtracking linesearch on the infeasibility measure.

        Linesearch is performed on the infeasibility measure defined by
            Θ(x) = 1/2 ‖c(x)‖²
        starting from `x` along direction `step`.

        Here, `theta` and `theta_trial` are the infeasibility at `x` and
        `x + step`, respectively, `c` is the vector of constraints at `x`, and
        `grad_theta` is the gradient of the infeasibility measure at `x`.

        Return (x, c, theta, steplength), where `x + steplength + step`
        satisifes the Armijo condition, and `c` and `theta` are the vector of
        constraints and the infeasibility at this new point, respectively.
        """
        ls_fmt = "nyc-ls: %7.1e  %8.1e"

        slope = np.dot(grad_theta, step)
        line_model = C1LineModel(InfeasibilityModel(self.model), x, step)
        ls = ArmijoLineSearch(line_model,
                              value=theta_trial,
                              trial_value=theta_trial,
                              slope=slope,
                              bkmax=bkmax,
                              decr=1.2,
                              ftol=armijo)

        try:
            for step in ls:
                self.log.debug(ls_fmt, step, ls.trial_value)
        except LineSearchFailure:
            pass

        c_trial = self.model.cons(ls.iterate)
        self.log.debug(ls_fmt, ls.step, ls.trial_value)

        return (ls.iterate, c_trial, ls.trial_value, ls.step)
Пример #7
0
def c1rosenbrock_restriction(request):
    return C1LineModel(Rosenbrock(request.param), np.zeros(request.param),
                       np.ones(request.param))
Пример #8
0
def rosenbrock_wolfe_ascent(request):
    model = Rosenbrock(request.param)
    x = np.zeros(request.param)
    g = model.grad(x)
    c1model = C1LineModel(model, x, g)  # ascent direction!
    return c1model
Пример #9
0
def rosenbrock_wolfe(request):
    model = Rosenbrock(request.param)
    x = np.zeros(request.param)
    g = model.grad(x)
    c1model = C1LineModel(model, x, -g)  # steepest descent direction
    return StrongWolfeLineSearch(c1model)
Пример #10
0
 def test_ascent(self):
     x = np.ones(self.model.n)
     g = self.model.grad(x)
     c1model = C1LineModel(self.model, x, g)
     with pytest.raises(ValueError):
         QuadraticCubicLineSearch(c1model)
Пример #11
0
def rosenbrock_armijo(request):
    model = Rosenbrock(request.param)
    x = np.zeros(request.param)
    g = model.grad(x)
    c1model = C1LineModel(model, x, -g)  # steepest descent direction
    return ArmijoLineSearch(c1model)
Пример #12
0
    def solve(self):
        """Solve method.

        All keyword arguments are passed directly to the constructor of the
        trust-region solver.
        """
        self.log.debug("entering solve")
        model = self.model
        ls_fmt = "%7.1e  %8.1e"

        # Project the initial point into [l,u].
        self.x = project(self.x, model.Lvar, model.Uvar)

        # Gather initial information.
        self.f = model.obj(self.x)
        self.f0 = self.f
        self.g = model.grad(self.x)  # Current  gradient
        self.g_old = self.g.copy()
        self.x_old = self.x.copy()
        pgnorm = projected_gradient_norm2(self.x, self.g, model.Lvar,
                                          model.Uvar)
        self.pg0 = pgnorm
        cgtol = self.cgtol
        cg_iter = 0
        cgitermax = model.n

        # Initialize the trust region radius
        self.tr.radius = min(max(0.1 * self.pg0, 1.0), 100)

        # Test for convergence or termination
        stoptol = max(self.gabstol, self.greltol * self.pg0)
        # stoptol = self.greltol * pgnorm
        exitUser = False
        exitOptimal = pgnorm <= stoptol
        exitIter = self.iter >= self.maxiter
        exitFunCall = model.obj.ncalls >= self.maxfuncall
        status = ""

        tick = cputime()

        # Print out header and initial log.
        if self.iter % 20 == 0:
            self.log.info(self.header)
            self.log.info(self.format0, self.iter, self.f, pgnorm, "", "", "",
                          self.tr.radius, "")

        while not (exitUser or exitOptimal or exitIter or exitFunCall):
            self.iter += 1

            self.step_accepted = False
            if self.save_g:
                self.g_old = self.g.copy()
                self.x_old = self.x.copy()

            # Wrap Hessian into an operator.
            H = model.hop(self.x.copy())

            # Compute the Cauchy step and store in s.
            (s, self.alphac) = self.cauchy(self.x, self.g, H, model.Lvar,
                                           model.Uvar, self.tr.radius,
                                           self.alphac)

            # Compute the projected Newton step.
            (x, s, cg_iter,
             _) = self.projected_newton_step(self.x, self.g, H, self.tr.radius,
                                             model.Lvar, model.Uvar, s, cgtol,
                                             cgitermax)

            snorm = norms.norm2(s)
            self.total_cgiter += cg_iter

            # Compute the predicted reduction.
            m = np.dot(s, self.g) + .5 * np.dot(s, H * s)

            # Evaluate actual objective.
            x_trial = project(self.x + s, model.Lvar, model.Uvar)
            f_trial = model.obj(x_trial)

            # Incorporate a magical step to further improve the trial
            # (if possible) and modify the predicted reduction to
            # take the extra improvement into account
            if "magical_step" in dir(model):
                (x_trial, s_magic) = model.magical_step(x_trial)
                if s_magic is not None:
                    s += s_magic
                    m -= f_trial
                    f_trial = model.obj(x_trial)
                    m += f_trial

            # Evaluate the step and determine if the step is successful.

            # Compute the actual reduction.
            rho = self.tr.ratio(self.f, f_trial, m)
            ared = self.f - f_trial

            # On the first iteration, adjust the initial step bound.
            snorm = norms.norm2(s)
            if self.iter == 1:
                self.tr.radius = min(self.tr.radius, snorm)

            # Update the trust region bound
            slope = np.dot(self.g, s)
            if f_trial - self.f - slope <= 0:
                alpha = self.tr.gamma3
            else:
                alpha = max(self.tr.gamma1,
                            -0.5 * (slope / (f_trial - self.f - slope)))

            # Update the trust region bound according to the ratio
            # of actual to predicted reduction
            self.tr.update_radius(rho, snorm, alpha)

            # Update the iterate.
            if rho > self.tr.eta0:
                # Successful iterate
                # Trust-region step is accepted.
                self.x = x_trial
                self.f = f_trial
                self.g = model.grad(self.x)
                step_status = "Acc"
                self.step_accepted = True
                self.dvars = s
                if self.save_g:
                    self.dgrad = self.g - self.g_old

            elif self.ny:
                try:
                    # Trust-region step is rejected; backtrack.
                    line_model = C1LineModel(model, self.x, s)
                    ls = ArmijoLineSearch(line_model, bkmax=5, decr=1.75)

                    for step in ls:
                        self.log.debug(ls_fmt, step, ls.trial_value)

                    ared = self.f - ls.trial_value
                    self.x = ls.iterate
                    self.f = ls.trial_value
                    self.g = model.grad(self.x)
                    snorm *= ls.step
                    self.tr.radius = snorm
                    step_status = "N-Y"
                    self.dvars = ls.step * s
                    self.step_accepted = True
                    if self.save_g:
                        self.dgrad = self.g - self.g_old

                except (LineSearchFailure, ValueError):
                    step_status = "Rej"

            else:
                # Fall back on trust-region rule.
                step_status = "Rej"

            self.step_status = step_status
            status = ""
            try:
                self.post_iteration()
            except UserExitRequest:
                status = "usr"

            # Print out header, say, every 20 iterations
            if self.iter % 20 == 0:
                self.log.info(self.header)

            pstatus = step_status if step_status != "Acc" else ""

            # Test for convergence.
            pgnorm = projected_gradient_norm2(self.x, self.g, model.Lvar,
                                              model.Uvar)
            if pstatus == "" or pstatus == "N-Y":
                if pgnorm <= stoptol:
                    exitOptimal = True
                    status = "gtol"
                elif abs(ared) <= self.abstol and -m <= self.abstol:
                    exitOptimal = True
                    status = "fatol"
                elif abs(ared) <= self.reltol * abs(self.f) and \
                   (-m <= self.reltol * abs(self.f)):
                    exitOptimal = True
                    status = "frtol"
            else:
                self.iter -= 1  # to match TRON iteration number

            exitIter = self.iter > self.maxiter
            exitFunCall = model.obj.ncalls >= self.maxfuncall
            exitUser = status == "usr"

            self.log.info(self.format, self.iter, self.f, pgnorm, cg_iter, rho,
                          snorm, self.tr.radius, pstatus)

        self.tsolve = cputime() - tick  # Solve time
        self.pgnorm = pgnorm
        # Set final solver status.
        if status == "usr":
            pass
        elif self.iter > self.maxiter:
            status = "itr"
        elif status == "":  # corner case; initial guess was optimal
            status = "gtol"
        self.status = status
        self.log.info("final status: %s", self.status)
Пример #13
0
def run_demo(model, x, p, step0):
    """Create arrays necessary to display steps of linesearch."""
    c1model = C1LineModel(model, x, p)
    ls = QuadraticCubicLineSearch(c1model, step=step0)

    t = np.linspace(-0.2, 1.2 * ls._step0, 1000)

    y = np.empty(t.size)
    k = 0
    for x in t:
        y[k] = c1model.obj(x)
        k += 1
    plt.figure()
    plt.ion()
    plt.plot(t, y)
    t = np.linspace(0, ls._step0, 1000)

    x_p = []
    y_p = []
    x_p.append(0.)
    y_p.append(ls._value)
    plt.annotate("$t=0$",
                 xy=(0, ls._value),
                 xytext=(-5, 5),
                 textcoords='offset points',
                 ha='right',
                 va='bottom')

    x_p.append(ls.step)
    y_p.append(ls.trial_value)
    plt.scatter(x_p, y_p)

    plt.annotate("$t_0=" + str(ls.step) + "$",
                 xy=(ls.step, ls.trial_value),
                 xytext=(-5, 5),
                 textcoords='offset points',
                 ha='right',
                 va='bottom')

    try:
        for k, step in enumerate(ls):
            print k, step
            if k == 0:
                phi = quadratic_interpolant(ls, t)
                curve, = plt.plot(t, phi)
                last_step = ls._last_step
                s = ls.step
            else:
                phi3 = cubic_interpolant(ls, t, last_step, s)
                curve, = plt.plot(t, phi3)
                last_step = ls._last_step
                s = ls.step
            x_p.append(ls.step)
            y_p.append(ls.trial_value)
            plt.annotate("$t_" + str(k + 1) + "=%3.1f" % ls.step + "$",
                         xy=(ls.step, ls.trial_value),
                         xytext=(-5, 5),
                         textcoords='offset points',
                         ha='right',
                         va='bottom')
            plt.scatter(x_p, y_p)
            plt.pause(3)
            curve.remove()
    except LineSearchFailure:
        pass
Пример #14
0
    def solve(self):
        """Solve model with the L-BFGS method."""
        model = self.model
        x = self.x
        self.logger.info(self.hdr)

        tstart = cputime()

        self.f0 = self.f = f = model.obj(x)
        self.g = g = model.grad(x)
        self.g_norm0 = g_norm = norms.norm2(g)
        stoptol = max(self.abstol, self.reltol * self.g_norm0)

        exitUser = False
        exitLS = False
        exitOptimal = g_norm <= stoptol
        exitIter = self.iter >= self.maxiter
        status = ""

        while not (exitUser or exitOptimal or exitIter or exitLS):

            # Obtain search direction
            H = model.hop(x)
            d = -(H * g)

            # Prepare for modified linesearch
            step0 = max(1.0e-3, 1.0 / g_norm) if self.iter == 0 else 1.0
            line_model = C1LineModel(self.model, x, d)
            ls = self.setup_linesearch(line_model, step0)
            try:
                for step in ls:
                    self.logger.debug(self.ls_fmt, step, ls.trial_value)
            except LineSearchFailure:
                exitLS = True
                continue

            self.logger.info(self.fmt, self.iter, f, g_norm, ls.slope, ls.step)

            # Prepare new pair {s,y} to be inserted into L-BFGS operator.
            self.s = ls.step * d
            x = ls.iterate
            g_next = line_model.gradval
            self.y = g_next - g
            status = ""
            try:
                self.post_iteration()
            except UserExitRequest:
                status = "usr"

            # Prepare for next round.
            g = g_next
            g_norm = norms.norm2(g)
            f = ls.trial_value
            self.iter += 1

            exitOptimal = g_norm <= stoptol
            exitIter = self.iter >= self.maxiter
            exitUser = status == "usr"

        self.tsolve = cputime() - tstart
        self.logger.info(self.fmt_short, self.iter, f, g_norm)

        self.x = x
        self.f = f
        self.g = g
        self.g_norm = g_norm

        # Set final solver status.
        if status == "usr":
            pass
        elif self.g_norm <= stoptol:
            status = "opt"
        elif exitLS:
            status = "lsf"
        else:  # self.iter > self.maxiter:
            status = "itr"
        self.status = status