Ejemplo n.º 1
0
 def test_sanity(self):
     # Quick sanity checks.
     x = Variable(3)
     opt = OptimizationProblem(x[0],
                               cons_eq=(x[1], ),
                               cons_ineq=(x[2] + 2, x[1] - 1))
     slack_opt = SlackOptimizationProblem(opt)
     # Test that number of constraints is correct.
     self.assertEqual(len(slack_opt.get_inequality_constraints()), 0)
     self.assertEqual(len(slack_opt.get_equality_constraints()), 3)
     # Test that we can use SlackParam.
     slack_param = slack_opt.build_param(DirectParam([3, 1, 2]))
     self.assertEqual(slack_opt.calculate_objective_function(slack_param),
                      3)
     self.assertEqual(
         slack_opt.calculate_gradient(slack_param).tolist(),
         [1, 0, 0, 0, 0])
     eq_cons, ineq_cons = slack_opt.calculate_constraints(slack_param)
     self.assertEqual(eq_cons.tolist(), [1, 4, 0])
     self.assertEqual(ineq_cons.tolist(), [])
     eq_grad, ineq_grad = slack_opt.calculate_constraint_gradients(
         slack_param)
     self.assertEqual(len(eq_grad), 3)
     self.assertEqual(len(ineq_grad), 0)
     self.assertEqual(eq_grad[0].tolist(), [0, 1, 0, 0, 0])
     self.assertEqual(eq_grad[1].tolist(), [0, 0, 1, 1, 0])
     self.assertEqual(eq_grad[2].tolist(), [0, 1, 0, 0, 1])
Ejemplo n.º 2
0
    def __call__(self, opt, param, callback=None, cycle_callback=None):
        """ Run optimization.

        Args:
            opt: OptimizationProblem.
            param: Initial parametrization.
            callback: Optional callback.
        Returns:
            Optimized parametrization.
        """
        opt = SlackOptimizationProblem(opt)
        slack_param = opt.build_param(param)
        mu = self.mu0

        for k in range(self.cycle_num, self.num_cycles):
            self.cycle_num = k
            self.mu = mu
            logger.info('Running penalty objective cycle {} with mu {}'.format(
                k, mu))

            # Build penalty objective.
            penalty_obj = opt.get_objective() + mu * Sum(
                [eq**self.pf for eq in opt.get_equality_constraints()])
            penalty_opt = OptimizationProblem(penalty_obj)

            ftol = max(1 / 100**(k + 1), self.ftol)

            if self.optimizer is None:
                optimizer = ScipyOptimizer(method='L-BFGS-B',
                                           options={
                                               'ftol': ftol,
                                               'gtol': self.gtol,
                                               'maxiter': self.maxiter
                                           })
            else:
                optimizer = self.optimizer

            def aug_callback(x):
                slack_param.from_vector(x)
                logger.info('Current penalty objective: {}'.format(
                    penalty_opt.calculate_objective_function(slack_param)))
                if callback:
                    callback(slack_param.get_param())

            slack_param = optimizer(penalty_opt,
                                    slack_param,
                                    callback=aug_callback)

            res = optimizer.get_results()

            mu = mu**self.tau
            if cycle_callback:
                cycle_callback(slack_param.get_param())
        return slack_param.get_param()
Ejemplo n.º 3
0
    def fit2eps(self, eps_bg, S, eps):
        from spins.invdes.problem import Fit2Eps, OptimizationProblem
        import spins.invdes.optimization as optim

        # make objective
        obj = Fit2Eps(eps_bg, S, eps)
        obj = OptimizationProblem(obj)

        # optimize continuous
        opt_cont = optim.ScipyOptimizer(
            method='L-BFGS-B', options={
                'maxiter': 200,
                'maxcor':  10
            })
        iter_num = 0

        def callback(_):
            nonlocal iter_num
            iter_num += 1

        opt_cont(obj, self, callback=callback)
Ejemplo n.º 4
0
    def __call__(self, opt, param, callback=None, cycle_callback=None):
        """
        Optimize augmented Lagrangian using L-BFGS-b
        (Nocedal: p520 Algorithm 17.4 (Algorithm implemented in LANCELOT)
             Note however that optimization is done by L-BFGS-B, not by gradient
             projection method.)
        """
        if not isinstance(opt, OptimizationProblem):
            opt = OptimizationProblem(opt)
        opt = SlackOptimizationProblem(opt)
        slack_param = opt.build_param(param)

        # Prepare tolerances and Lagrange variables.
        mu = self.mu0
        lagr_var0 = np.ones(len(opt.get_equality_constraints()))
        lagr_var = lagr_var0.astype(float)
        lagrangian_tolerance = 1 / mu**self.mu_exp  # In Nocedal this is 1/mu.
        constraint_tolerance = 1 / mu**0.1

        # Run optimization.
        for k in range(self.num_cycles):
            logger.info('Running augmented Lagrangian cycle {}'.format(k))

            sgn = -1
            # Build Lagrangian.
            lagrangian_obj = (
                opt.get_objective() + sgn * Sum(opt.get_equality_constraints(),
                                                weights=lagr_var,
                                                parallelize=False) + 0.5 * mu *
                Sum([eq**2 for eq in opt.get_equality_constraints()],
                    parallelize=False))
            lagrangian = OptimizationProblem(lagrangian_obj)

            def aug_callback(x):
                slack_param.from_vector(x)
                logger.info(
                    'Current Augmented Lagrangian objective: {}'.format(
                        lagrangian.calculate_objective_function(slack_param)))

                if callback:
                    callback(slack_param.get_param())

            if self.optimizer.options is None:
                self.optimizer.options = {}
            self.optimizer.options['ftol'] = lagrangian_tolerance
            slack_param = self.optimizer(lagrangian,
                                         slack_param,
                                         callback=aug_callback)

            # Evaluate results and prepare parameters for the next iteration.
            constraints_val, ineq_cons = opt.calculate_constraints(slack_param)
            total_constraint_err = np.sum(np.abs(constraints_val))
            if total_constraint_err < constraint_tolerance:
                # Test if the lagrangian_tolerance reached.
                if (lagrangian_tolerance < self.lagrangian_tolerance_final
                        and total_constraint_err <
                        self.constraint_tolerance_final):
                    break
                lagr_var += sgn * mu * constraints_val
                constraint_tolerance /= mu**0.9
                lagrangian_tolerance /= mu
            else:
                mu *= 100
                constraint_tolerance = 1 / mu**0.1
                lagrangian_tolerance = 1 / mu
            if cycle_callback:
                cycle_callback(slack_param.get_param())
        return slack_param.get_param()