Example #1
0
 def runTest(self):
     # Simple problem with global minimum at the origin, for instance
     objfun = lambda x: np.array(
         [np.sin(x[0])**2,
          np.sin(x[1])**2,
          np.sum(np.sin(x[2:]**2))])
     jac = lambda x: np.array(
         [[2 * np.sin(x[0]) * np.cos(x[0]), 0, 0, 0, 0],
          [0, 2 * np.sin(x[1]) * np.cos(x[1]), 0, 0, 0],
          [
              0, 0, 2 * np.sin(x[2]) * np.cos(x[2]), 2 * np.sin(x[3]) * np.
              cos(x[3]), 2 * np.sin(x[4]) * np.cos(x[4])
          ]])  # for n=5 only
     x0 = np.ones((5, ))
     np.random.seed(0)
     soln = dfols.solve(objfun,
                        x0,
                        user_params={'growing.ndirs_initial': 2})
     self.assertTrue(array_compare(soln.x, np.zeros((5, )), thresh=1e-3),
                     "Wrong xmin")
     self.assertTrue(
         array_compare(soln.resid, objfun(soln.x), thresh=1e-10),
         "Wrong resid")
     print(soln.jacobian)
     print(jac(soln.x))
     self.assertTrue(array_compare(soln.jacobian, jac(soln.x), thresh=1e-2),
                     "Wrong Jacobian")
     self.assertTrue(abs(soln.f) < 1e-10, "Wrong fmin")
def run_local_dfols(user_specs, comm_queue, x0, f0, child_can_read,
                    parent_can_read):

    # Define bound constraints (lower <= x <= upper)
    lb = np.zeros(len(x0))
    ub = np.ones(len(x0))

    # Set random seed (for reproducibility)
    np.random.seed(0)

    # Care must be taken here because a too-large initial step causes DFO-LS to move the starting point!
    dist_to_bound = min(min(ub - x0), min(x0 - lb))
    assert dist_to_bound > np.finfo(
        np.float32).eps, "The distance to the boundary is too small"

    # Call DFO-LS
    soln = dfols.solve(lambda x: scipy_dfols_callback_fun(
        x, comm_queue, child_can_read, parent_can_read, user_specs),
                       x0,
                       bounds=(lb, ub),
                       rhobeg=0.5 * dist_to_bound,
                       do_logging=False)

    x_opt = soln.x

    # FIXME: Need to do something with the exit codes.
    # print(exit_code)

    finish_queue(x_opt, comm_queue, parent_can_read)
    def fit(self):
        """
        Run problem with DFO.
        """
        if self.minimizer == 'dfogn':
            self._soln = dfogn.solve(self.problem.eval_r, self._pinit)
        elif self.minimizer == 'dfols':
            self._soln = dfols.solve(self.problem.eval_r, self._pinit)

        self._popt = self._soln.x
        self._status = self._soln.flag
Example #4
0
 def runTest(self):
     # n, m = 2, 2
     x0 = np.array([-1.2, 1.0])
     soln = dfols.solve(rosenbrock, x0, npt=5)
     self.assertTrue(
         array_compare(soln.x, np.array([1.0, 1.0]), thresh=1e-4),
         "Wrong xmin")
     self.assertTrue(
         array_compare(soln.resid, rosenbrock(soln.x), thresh=1e-10),
         "Wrong resid")
     self.assertTrue(
         array_compare(soln.jacobian,
                       rosenbrock_jacobian(soln.x),
                       thresh=1.5e-1), "Wrong Jacobian")
     self.assertTrue(abs(soln.f) < 1e-10, "Wrong fmin")
Example #5
0
 def runTest(self):
     # n, m = 2, 2
     x0 = np.array([-1.2,
                    0.7])  # standard start point too close to upper bounds
     lower = np.array([-2.0, -2.0])
     upper = np.array([0.9, 0.9])
     xmin = np.array([0.9, 0.81])  # approximate
     fmin = np.dot(rosenbrock(xmin), rosenbrock(xmin))
     jacmin = np.array([[-18.0, 10.0], [-1.0, 0.0]])
     soln = dfols.solve(rosenbrock, x0, bounds=(lower, upper))
     print(soln.x)
     self.assertTrue(array_compare(soln.x, xmin, thresh=1e-2), "Wrong xmin")
     self.assertTrue(abs(soln.f - fmin) < 1e-4, "Wrong fmin")
     self.assertTrue(array_compare(soln.jacobian, jacmin, thresh=2e-2),
                     "Wrong jacobian")
Example #6
0
 def runTest(self):
     n, m = 2, 5
     np.random.seed(0)  # (fixing random seed)
     A = np.random.rand(m, n)
     b = np.random.rand(m)
     objfun = lambda x: np.dot(A, x) - b
     xmin = np.linalg.lstsq(A, b, rcond=None)[0]
     fmin = np.dot(objfun(xmin), objfun(xmin))
     x0 = np.zeros((n, ))
     soln = dfols.solve(objfun, x0)
     self.assertTrue(array_compare(soln.x, xmin, thresh=1e-2), "Wrong xmin")
     self.assertTrue(
         array_compare(soln.resid, objfun(soln.x), thresh=1e-10),
         "Wrong resid")
     self.assertTrue(array_compare(soln.jacobian, A, thresh=1e-2),
                     "Wrong Jacobian")
     self.assertTrue(abs(soln.f - fmin) < 1e-4, "Wrong fmin")
Example #7
0
    def dfols_fit(self, model: Callable, **kwargs):
        """
        Method to convert easyCore styling to DFO-LS styling (yes, again)

        :param model: Model which accepts f(x[0])
        :type model: Callable
        :param kwargs: Any additional arguments for dfols.solver
        :type kwargs: dict
        :return: dfols fit results container
=        """
        x0 = np.array(
            [par.raw_value for par in iter(self._cached_pars.values())])
        bounds = (np.array([
            par.min for par in iter(self._cached_pars.values())
        ]), np.array([par.max for par in iter(self._cached_pars.values())]))
        results = dfols.solve(model, x0, bounds=bounds, **kwargs)
        return results
def run_local_dfols(user_specs, comm_queue, x0, f0, child_can_read,
                    parent_can_read):
    """
    Runs a DFOLS local optimization run starting at ``x0``, governed by the
    parameters in ``user_specs``.
    """

    # Define bound constraints (lower <= x <= upper)
    lb = np.zeros(len(x0))
    ub = np.ones(len(x0))

    # Set random seed (for reproducibility)
    np.random.seed(0)

    # Care must be taken here because a too-large initial step causes DFO-LS to move the starting point!
    dist_to_bound = min(min(ub - x0), min(x0 - lb))
    assert dist_to_bound > np.finfo(
        np.float32).eps, "The distance to the boundary is too small"
    assert 'bounds' not in user_specs.get(
        'dfols_kwargs', {}), "APOSMM must set the bounds for DFO-LS"
    assert 'rhobeg' not in user_specs.get(
        'dfols_kwargs', {}), "APOSMM must set rhobeg for DFO-LS"
    assert 'x0' not in user_specs.get('dfols_kwargs',
                                      {}), "APOSMM must set x0 for DFO-LS"

    # Call DFO-LS
    soln = dfols.solve(lambda x: scipy_dfols_callback_fun(
        x, comm_queue, child_can_read, parent_can_read, user_specs),
                       x0,
                       bounds=(lb, ub),
                       rhobeg=0.5 * dist_to_bound,
                       **user_specs.get('dfols_kwargs', {}))

    x_opt = soln.x

    if soln.flag == soln.EXIT_SUCCESS:
        opt_flag = 1
    else:
        print("[APOSMM] The DFO-LS run started from " + str(x0) +
              " stopped with an exit "
              "flag of " + str(soln.flag) + ". No point from this run will be "
              "ruled as a minimum! APOSMM may start a new run from some point "
              "in this run.")
        opt_flag = 0

    finish_queue(x_opt, opt_flag, comm_queue, parent_can_read, user_specs)
Example #9
0
 def runTest(self):
     # n, m = 2, 2
     x0 = np.array([-1.2, 1.0])
     np.random.seed(0)
     params = {"general.accept_any_decrease": False}
     soln = dfols.solve(rosenbrock, x0, user_params=params)
     # print(soln.x)
     self.assertTrue(
         array_compare(soln.x, np.array([1.0, 1.0]), thresh=1e-4),
         "Wrong xmin")
     self.assertTrue(
         array_compare(soln.resid, rosenbrock(soln.x), thresh=1e-10),
         "Wrong resid")
     # print(soln.jacobian, rosenbrock_jacobian(soln.x))
     self.assertTrue(
         array_compare(soln.jacobian,
                       rosenbrock_jacobian(soln.x),
                       thresh=2e-1), "Wrong Jacobian")
     self.assertTrue(abs(soln.f) < 1e-10, "Wrong fmin")
Example #10
0
 def runTest(self):
     # n, m = 2, 2
     x0 = np.array([-1.2, 1.0])
     soln = dfols.solve(rosenbrock,
                        x0,
                        user_params={"growing.ndirs_initial": 1})
     self.assertTrue(
         array_compare(soln.x, np.array([1.0, 1.0]), thresh=1e-4),
         "Wrong xmin")
     self.assertTrue(
         array_compare(soln.resid, rosenbrock(soln.x), thresh=1e-10),
         "Wrong resid")
     print(soln.jacobian)
     print(rosenbrock_jacobian(soln.x))
     self.assertTrue(
         array_compare(soln.jacobian,
                       rosenbrock_jacobian(soln.x),
                       thresh=3e-0), "Wrong Jacobian")
     self.assertTrue(abs(soln.f) < 1e-10, "Wrong fmin")
Example #11
0
 def runTest(self):
     # n, m = 2, 2
     x0 = np.array([-1.2,
                    0.7])  # standard start point does not satisfy bounds
     lower = np.array([-2.0, -2.0])
     upper = np.array([1.0, 0.9])
     xmin = np.array([0.9486, 0.9])  # approximate
     fmin = np.dot(rosenbrock(xmin), rosenbrock(xmin))
     soln = dfols.solve(rosenbrock, x0, bounds=(lower, upper))
     print(soln.x)
     self.assertTrue(array_compare(soln.x, xmin, thresh=1e-2), "Wrong xmin")
     self.assertTrue(
         array_compare(soln.resid, rosenbrock(soln.x), thresh=1e-10),
         "Wrong resid")
     self.assertTrue(
         array_compare(soln.jacobian,
                       rosenbrock_jacobian(soln.x),
                       thresh=1e-2), "Wrong Jacobian")
     self.assertTrue(abs(soln.f - fmin) < 1e-4, "Wrong fmin")
Example #12
0
 def simulate_npsolve(self,t):
     try:
         assert t>0
         pmeet0 = self.pmeet_exo[:t] 
         ppreg0 = self.ppreg_exo[:t]
     except:
         pmeet0 = None
         ppreg0 = None
     
     
     
     # this is horrible but works somehow...
     # the way k_m is defined makes it impossible to simulate with diff't
     # probabilities and the same t...
     
     km_save = self.k_m.copy()
     mk_save = self.m_k.copy()
     nmar_save  = self.nmar.copy()
     kmtrue_save = self.k_m_true.copy()
     
     
     
     s_dta = self.ss_val[t]
     kf_dta = self.kf_val[t]
 
     
     def get_residuals(x):
         pmeet_here = x[0]
         ppreg_here = x[1]
         
         
         self.k_m = km_save.copy()
         self.m_k = mk_save.copy()
         self.km_ture = kmtrue_save.copy()
         self.nmar = nmar_save.copy()
         
         
         self.pmeet_exo = [pmeet_here] if pmeet0 is None else pmeet0 + [pmeet_here]
         self.ppreg_exo = [ppreg_here] if ppreg0 is None else ppreg0 + [ppreg_here]
         
         self.anext(t)
         self.iexonext(t) 
         self.statenext(t)
         
         
         s_mdl = (self.state[:,t+1] == self.state_codes['Female, single']).mean() #((self.i_singles())[:,t+1]).mean()
         
         
         kf_mdl = (self.k_m[:,t+1] & (self.nmar[:,t+1] == 1)).mean() #(self.i_km())[:,t+1].mean()
         
         
         resid = np.array([s_mdl - s_dta,kf_mdl-kf_dta])
         
         return resid
     
     
     
     if t == 0:
         xinit = np.array([0.1,0.1])
     else:
         xinit = np.array([self.pmeet_exo[t-1],self.ppreg_exo[t-1]])
     
 
     res = dfols.solve(get_residuals,xinit,rhoend=1e-3,bounds=(np.array([0.0,0.0]),np.array([1.0,1.0])))
 
     get_residuals(res.x) # this writes the probabilities into the object
Example #13
0
            'logging.save_diagnostic_info': True,
            'logging.save_xk': True,
            "noise.quit_on_noise_level": False,
            'init.run_in_parallel': True,
            'general.check_objfun_for_overflow': False
        }
        # merge in values from namedSetting into userParams
        namedSettings = optimise.get('DFOLS_namedSettings', {})
        for k in namedSettings.keys():  # loop over keys
            if not re.search('_comment\s*$', k):  # not a comment
                userParams[k] = namedSettings[k]
        solution = dfols.solve(optFunctionDFOLS,
                               start.values,
                               objfun_has_noise=False,
                               bounds=prange,
                               maxfun=optimise.get('maxfun', 100),
                               rhobeg=optimise.get('rhobeg', 1e-1),
                               rhoend=optimise.get('rhoend', 1e-3),
                               user_params=userParams,
                               scaling_within_bounds=True)
        if solution.flag == solution.EXIT_LINALG_ERROR:  # linear algebra error
            raise np.linalg.LinAlgError  # re-raise the linear algebra error which will trigger doing more runs..
        elif solution.flag not in (solution.EXIT_SUCCESS,
                                   solution.EXIT_MAXFUN_WARNING):
            print("dfols failed with flag %i error : %s" %
                  (solution.flag, solution.msg))
            raise Exception("Problem with dfols")
        ## code here will be run when DFOLS has completed. It mostly is to put stuff in the final JSON file
        ## so can easily be looked at for subsequent analysis.
        ## some of it could be done even if DFOLS did not complete.
Example #14
0
# DFO-LS example: minimize the Rosenbrock function
from __future__ import print_function
import numpy as np
import dfols


# Define the objective function
def rosenbrock(x):
    return np.array([10.0 * (x[1] - x[0]**2), 1.0 - x[0]])


# Define the starting point
x0 = np.array([-1.2, 1.0])

# For optional extra output details
# import logging
# logging.basicConfig(level=logging.INFO, format='%(message)s')

# Call DFO-LS
soln = dfols.solve(rosenbrock, x0)

# Display output
print(soln)
Example #15
0

# Define the starting point
x0 = np.array([-1.2, 1.0])

# Set random seed (for reproducibility)
np.random.seed(0)

print("Demonstrate noise in function evaluation:")
for i in range(5):
    print("objfun(x0) = %s" % str(rosenbrock_noisy(x0)))
print("")

# Call DFO-LS
#soln = dfols.solve(rosenbrock_noisy, x0)
soln = dfols.solve(rosenbrock_noisy, x0, objfun_has_noise=True)

# Display output
print(soln)

# Compare with a derivative-based solver
import scipy.optimize as opt
soln = opt.least_squares(rosenbrock_noisy, x0)

print("")
print("** SciPy results **")
print("Solution xmin = %s" % str(soln.x))
print("Objective value f(xmin) = %.10g" % (2.0 * soln.cost))
print("Needed %g objective evaluations" % soln.nfev)
print("Exit flag = %g" % soln.status)
print(soln.message)
Example #16
0
def nag_dfols(
    criterion_and_derivative,
    x,
    lower_bounds,
    upper_bounds,
    *,
    clip_criterion_if_overflowing=CLIP_CRITERION_IF_OVERFLOWING,
    convergence_minimal_trustregion_radius_tolerance=CONVERGENCE_MINIMAL_TRUSTREGION_RADIUS_TOLERANCE,  # noqa: E501
    convergence_noise_corrected_criterion_tolerance=CONVERGENCE_NOISE_CORRECTED_CRITERION_TOLERANCE,  # noqa: E501
    convergence_scaled_criterion_tolerance=0.0,
    convergence_slow_progress=None,
    initial_directions=INITIAL_DIRECTIONS,
    interpolation_rounding_error=INTERPOLATION_ROUNDING_ERROR,
    noise_additive_level=None,
    noise_multiplicative_level=None,
    noise_n_evals_per_point=None,
    random_directions_orthogonal=RANDOM_DIRECTIONS_ORTHOGONAL,
    stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,
    threshold_for_safety_step=THRESHOLD_FOR_SAFETY_STEP,
    trustregion_expansion_factor_successful=TRUSTREGION_EXPANSION_FACTOR_SUCCESSFUL,
    trustregion_expansion_factor_very_successful=TRUSTREGION_EXPANSION_FACTOR_VERY_SUCCESSFUL,  # noqa: E501
    trustregion_fast_start_options=None,
    trustregion_initial_radius=None,
    trustregion_method_to_replace_extra_points="geometry_improving",
    trustregion_n_extra_points_to_replace_successful=0,
    trustregion_n_interpolation_points=None,
    trustregion_precondition_interpolation=TRUSTREGION_PRECONDITION_INTERPOLATION,
    trustregion_reset_options=None,
    trustregion_shrinking_factor_not_successful=TRUSTREGION_SHRINKING_FACTOR_NOT_SUCCESSFUL,  # noqa: E501
    trustregion_shrinking_factor_lower_radius=TRUSTREGION_SHRINKING_FACTOR_LOWER_RADIUS,
    trustregion_shrinking_factor_upper_radius=TRUSTREGION_SHRINKING_FACTOR_UPPER_RADIUS,
    trustregion_threshold_successful=TRUSTREGION_THRESHOLD_SUCCESSFUL,
    trustregion_threshold_very_successful=TRUSTREGION_THRESHOLD_VERY_SUCCESSFUL,
):
    r"""Minimize a function with least squares structure using DFO-LS.

    Do not call this function directly but pass its name "nag_dfols" to estimagic's
    maximize or minimize function as `algorithm` argument. Specify your desired
    arguments as a dictionary and pass them as `algo_options` to minimize or
    maximize.

    The DFO-LS algorithm :cite:`Cartis2018b` is designed to solve the nonlinear
    least-squares minimization problem (with optional bound constraints).
    Remember to cite :cite:`Cartis2018b` when using DF-OLS in addition to estimagic.

    .. math::

        \min_{x\in\mathbb{R}^n}  &\quad  f(x) := \sum_{i=1}^{m}r_{i}(x)^2 \\
        \text{s.t.} &\quad  \text{lower_bounds} \leq x \leq \text{upper_bounds}

    The :math:`r_{i}` are called root contributions in estimagic.

    DFO-LS is a derivative-free optimization algorithm, which means it does not require
    the user to provide the derivatives of f(x) or :math:`r_{i}(x)`, nor does it
    attempt to estimate them internally (by using finite differencing, for instance).

    There are two main situations when using a derivative-free algorithm
    (such as DFO-LS) is preferable to a derivative-based algorithm (which is the vast
    majority of least-squares solvers):

    1. If the residuals are noisy, then calculating or even estimating their derivatives
       may be impossible (or at least very inaccurate). By noisy, we mean that if we
       evaluate :math:`r_{i}(x)` multiple times at the same value of x, we get different
       results. This may happen when a Monte Carlo simulation is used, for instance.

    2. If the residuals are expensive to evaluate, then estimating derivatives
       (which requires n evaluations of each :math:`r_{i}(x)` for every point of
       interest x) may be prohibitively expensive. Derivative-free methods are designed
       to solve the problem with the fewest number of evaluations of the criterion as
       possible.

    To read the detailed documentation of the algorithm `click here
    <https://numericalalgorithmsgroup.github.io/dfols/>`_.

    There are four possible convergence criteria:

    1. when the lower trust region radius is shrunk below a minimum
       (``convergence_minimal_trustregion_radius_tolerance``).

    2. when the improvements of iterations become very small
       (``convergence_slow_progress``). This is very similar to
       ``relative_criterion_tolerance`` but ``convergence_slow_progress`` is more
       general allowing to specify not only the threshold for convergence but also
       a period over which the improvements must have been very small.

    3. when a sufficient reduction to the criterion value at the start parameters
       has been reached, i.e. when
       :math:`\frac{f(x)}{f(x_0)} \leq
       \text{convergence_scaled_criterion_tolerance}`

    4. when all evaluations on the interpolation points fall within a scaled version of
       the noise level of the criterion function. This is only applicable if the
       criterion function is noisy. You can specify this criterion with
       ``convergence_noise_corrected_criterion_tolerance``.

    DF-OLS supports resetting the optimization and doing a fast start by
    starting with a smaller interpolation set and growing it dynamically.
    For more information see `their detailed documentation
    <https://numericalalgorithmsgroup.github.io/dfols/>`_ and :cite:`Cartis2018b`.

    Args:
        clip_criterion_if_overflowing (bool): see :ref:`algo_options`.
        convergence_minimal_trustregion_radius_tolerance (float): see
            :ref:`algo_options`.
        convergence_noise_corrected_criterion_tolerance (float): Stop when the
            evaluations on the set of interpolation points all fall within this factor
            of the noise level.
            The default is 1, i.e. when all evaluations are within the noise level.
            If you want to not use this criterion but still flag your
            criterion function as noisy, set this tolerance to 0.0.

            .. warning::
                Very small values, as in most other tolerances don't make sense here.

        convergence_scaled_criterion_tolerance (float):
            Terminate if a point is reached where the ratio of the criterion value
            to the criterion value at the start params is below this value, i.e. if
            :math:`f(x_k)/f(x_0) \leq
            \text{convergence_scaled_criterion_tolerance}`. Note this is
            deactivated unless the lowest mathematically possible criterion value (0.0)
            is actually achieved.
        convergence_slow_progress (dict): Arguments for converging when the evaluations
            over several iterations only yield small improvements on average, see
            see :ref:`algo_options` for details.
        initial_directions (str): see :ref:`algo_options`.
        interpolation_rounding_error (float): see :ref:`algo_options`.
        noise_additive_level (float): Used for determining the presence of noise
            and the convergence by all interpolation points being within noise level.
            0 means no additive noise. Only multiplicative or additive is supported.
        noise_multiplicative_level (float): Used for determining the presence of noise
            and the convergence by all interpolation points being within noise level.
            0 means no multiplicative noise. Only multiplicative or additive is
            supported.
        noise_n_evals_per_point (callable): How often to evaluate the criterion
            function at each point.
            This is only applicable for criterion functions with noise,
            when averaging multiple evaluations at the same point produces a more
            accurate value.
            The input parameters are the ``upper_trustregion_radius`` (:math:`\Delta`),
            the ``lower_trustregion_radius`` (:math:`\rho`),
            how many iterations the algorithm has been running for, ``n_iterations``
            and how many resets have been performed, ``n_resets``.
            The function must return an integer.
            Default is no averaging (i.e.
            ``noise_n_evals_per_point(...) = 1``).
        random_directions_orthogonal (bool): see :ref:`algo_options`.
        stopping_max_criterion_evaluations (int): see :ref:`algo_options`.
        threshold_for_safety_step (float): see :ref:`algo_options`.
        trustregion_expansion_factor_successful (float): see :ref:`algo_options`.
        trustregion_expansion_factor_very_successful (float): see :ref:`algo_options`.
        trustregion_fast_start_options (dict): see :ref:`algo_options`.
        trustregion_initial_radius (float): Initial value of the trust region radius.
        trustregion_method_to_replace_extra_points (str): If replacing extra points in
            successful iterations, whether to use geometry improving steps or the
            momentum method. Can be "geometry_improving" or "momentum".
        trustregion_n_extra_points_to_replace_successful (int): The number of extra
            points (other than accepting the trust region step) to replace. Useful when
            ``trustregion_n_interpolation_points > len(x) + 1``.
        trustregion_n_interpolation_points (int): The number of interpolation points to
            use. The default is :code:`len(x) + 1`. If using resets, this is the
            number of points to use in the first run of the solver, before any resets.
        trustregion_precondition_interpolation (bool): see :ref:`algo_options`.
        trustregion_shrinking_factor_not_successful (float): see :ref:`algo_options`.
        trustregion_shrinking_factor_lower_radius (float): see :ref:`algo_options`.
        trustregion_shrinking_factor_upper_radius (float): see :ref:`algo_options`.
        trustregion_threshold_successful (float): Share of the predicted improvement
            that has to be achieved for a trust region iteration to count as successful.
        trustregion_threshold_very_successful (float): Share of the predicted
            improvement that has to be achieved for a trust region iteration to count
            as very successful.

    Returns:
        results (dict): See :ref:`internal_optimizer_output` for details.

    """
    if not IS_DFOLS_INSTALLED:
        raise NotImplementedError(
            "The dfols package is not installed and required for 'nag_dfols'. "
            "You can install it with 'pip install DFO-LS'. "
            "For additional installation instructions visit: ",
            r"https://numericalalgorithmsgroup.github.io/dfols/build/html/install.html",
        )
    if trustregion_method_to_replace_extra_points == "momentum":
        trustregion_use_momentum = True
    elif trustregion_method_to_replace_extra_points in ["geometry_improving", None]:
        trustregion_use_momentum = False
    else:
        raise ValueError(
            "trustregion_method_to_replace_extra_points must be "
            "'geometry_improving', 'momentum' or None."
        )

    algo_info = {
        "name": "nag_dfols",
        "primary_criterion_entry": "root_contributions",
        "parallelizes": False,
        "needs_scaling": False,
    }

    advanced_options, trustregion_reset_options = _create_nag_advanced_options(
        x=x,
        noise_multiplicative_level=noise_multiplicative_level,
        noise_additive_level=noise_additive_level,
        noise_n_evals_per_point=noise_n_evals_per_point,
        convergence_noise_corrected_criterion_tolerance=convergence_noise_corrected_criterion_tolerance,  # noqa: E501
        trustregion_initial_radius=trustregion_initial_radius,
        trustregion_reset_options=trustregion_reset_options,
        convergence_slow_progress=convergence_slow_progress,
        interpolation_rounding_error=interpolation_rounding_error,
        threshold_for_safety_step=threshold_for_safety_step,
        clip_criterion_if_overflowing=clip_criterion_if_overflowing,
        initial_directions=initial_directions,
        random_directions_orthogonal=random_directions_orthogonal,
        trustregion_precondition_interpolation=trustregion_precondition_interpolation,
        trustregion_threshold_successful=trustregion_threshold_successful,
        trustregion_threshold_very_successful=trustregion_threshold_very_successful,
        trustregion_shrinking_factor_not_successful=trustregion_shrinking_factor_not_successful,  # noqa: E501
        trustregion_expansion_factor_successful=trustregion_expansion_factor_successful,
        trustregion_expansion_factor_very_successful=trustregion_expansion_factor_very_successful,  # noqa:E501
        trustregion_shrinking_factor_lower_radius=trustregion_shrinking_factor_lower_radius,  # noqa: E501
        trustregion_shrinking_factor_upper_radius=trustregion_shrinking_factor_upper_radius,  # noqa: E501
    )

    fast_start = _build_options_dict(
        user_input=trustregion_fast_start_options,
        default_options=TRUSTREGION_FAST_START_OPTIONS,
    )
    if fast_start["floor_of_jacobian_singular_values"] != 1:
        warnings.warn(
            "Setting the `floor_of_jacobian_singular_values` is not supported by "
            "DF-OLS as of version 1.2.1."
        )
    if (
        fast_start["shrink_upper_radius_in_safety_steps"]
        and fast_start["full_geometry_improving_step"]
    ):
        raise ValueError(
            "full_geometry_improving_step of the trustregion_fast_start_options can "
            "only be True if shrink_upper_radius_in_safety_steps is False."
        )

    (
        faststart_jac,
        faststart_step,
    ) = _get_fast_start_method(fast_start["method"])

    if (
        trustregion_reset_options["n_extra_interpolation_points_per_soft_reset"]
        < trustregion_reset_options["n_extra_interpolation_points_per_soft_reset"]
    ):
        raise ValueError(
            "In the trustregion_reset_options "
            "'n_extra_interpolation_points_per_soft_reset' must "
            "be larger or the same as 'n_extra_interpolation_points_per_hard_reset'."
        )

    dfols_options = {
        "growing.full_rank.use_full_rank_interp": faststart_jac,
        "growing.perturb_trust_region_step": faststart_step,
        "restarts.hard.use_old_rk": trustregion_reset_options[
            "reuse_criterion_value_at_hard_reset"
        ],
        "restarts.auto_detect.min_chgJ_slope": trustregion_reset_options[
            "auto_detect_min_jacobian_increase"
        ],  # noqa: E501
        "restarts.max_npt": trustregion_reset_options["max_interpolation_points"],
        "restarts.increase_npt": trustregion_reset_options[
            "n_extra_interpolation_points_per_soft_reset"
        ]
        > 0,
        "restarts.increase_npt_amt": trustregion_reset_options[
            "n_extra_interpolation_points_per_soft_reset"
        ],
        "restarts.hard.increase_ndirs_initial_amt": trustregion_reset_options[
            "n_extra_interpolation_points_per_hard_reset"
        ]
        - trustregion_reset_options["n_extra_interpolation_points_per_soft_reset"],
        "model.rel_tol": convergence_scaled_criterion_tolerance,
        "regression.num_extra_steps": trustregion_n_extra_points_to_replace_successful,
        "regression.momentum_extra_steps": trustregion_use_momentum,
        "regression.increase_num_extra_steps_with_restart": trustregion_reset_options[
            "n_additional_extra_points_to_replace_per_reset"
        ],
        "growing.ndirs_initial": fast_start["min_inital_points"],
        "growing.delta_scale_new_dirns": fast_start[
            "scale_of_trustregion_step_perturbation"
        ],
        "growing.full_rank.scale_factor": fast_start[
            "scale_of_jacobian_components_perturbation"
        ],
        "growing.full_rank.svd_max_jac_cond": fast_start[
            "jacobian_max_condition_number"
        ],
        "growing.do_geom_steps": fast_start["geometry_improving_steps"],
        "growing.safety.do_safety_step": fast_start["safety_steps"],
        "growing.safety.reduce_delta": fast_start[
            "shrink_upper_radius_in_safety_steps"
        ],
        "growing.safety.full_geom_step": fast_start["full_geometry_improving_step"],
        "growing.reset_delta": fast_start["reset_trustregion_radius_after_fast_start"],
        "growing.reset_rho": fast_start[
            "reset_min_trustregion_radius_after_fast_start"
        ],
        "growing.gamma_dec": fast_start["shrinking_factor_not_successful"],
        "growing.num_new_dirns_each_iter": fast_start[
            "n_extra_search_directions_per_iteration"
        ],
    }

    advanced_options.update(dfols_options)

    criterion = partial(
        criterion_and_derivative, task="criterion", algorithm_info=algo_info
    )

    res = dfols.solve(
        criterion,
        x0=x,
        bounds=(lower_bounds, upper_bounds),
        maxfun=stopping_max_criterion_evaluations,
        rhobeg=trustregion_initial_radius,
        npt=trustregion_n_interpolation_points,
        rhoend=convergence_minimal_trustregion_radius_tolerance,
        nsamples=noise_n_evals_per_point,
        objfun_has_noise=noise_additive_level or noise_multiplicative_level,
        scaling_within_bounds=False,
        do_logging=False,
        print_progress=False,
        user_params=advanced_options,
    )

    return _process_nag_result(res, len(x))
Example #17
0
    [455.2, 428.6, 124.1, 67.3, 43.2, 28.1, 13.1, -0.4, -1.3, -1.5])


# Model is y(t) = x[0] * exp(x[1] * t)
def prediction_error(x):
    return ydata - x[0] * np.exp(x[1] * tdata)


# Define the starting point
x0 = np.array([100.0, -1.0])

# We expect exponential decay: set upper bound x[1] <= 0
upper = np.array([1e20, 0.0])

# Call DFO-LS
soln = dfols.solve(prediction_error, x0, bounds=(None, upper))

# Display output
print(soln)
exit()

# Plot calibrated model vs. observations
ts = np.linspace(0.0, 90.0)
ys = soln.x[0] * np.exp(soln.x[1] * ts)

import matplotlib.pyplot as plt
plt.figure(1)
ax = plt.gca()  # current axes
ax.plot(ts, ys, 'k-', label='Model')
ax.plot(tdata, ydata, 'bo', label='Data')
ax.set_xlabel('t')
def do_parameter_fit(x0, datas, lsq_solver, model_solver, LARGE_V=50):
    """
    Function to fit parameters given a starting point and datas.
    Choice of which optimisation algorithm and which model to use.
    """
    def prediction_error(x,
                         show_plots=True,
                         show_stars=True,
                         save_plot_data=False):
        """
        Compare model to data and return array of differences
        """
        # Rescale x
        x = lower + (upper - lower) * x
        # Initial SOCs
        q0s = np.append([1], x[4:])
        # Prepare dict
        modelVs = {}
        for i, name in enumerate(datas.keys()):
            data = datas[name]

            # Make dict of fitting parameters from x
            pars = {
                "epsnmax": x[0],
                "epssmax": x[1],
                "epspmax": x[0],
                "cmax": 5.65,
                "jref_n": x[2],
                "jref_p": x[2] / 10,
                "qinit": q0s[i],
            }

            # Shape preserving interpolation of current: do the hard work offline
            interpclass = interp.PchipInterpolator(data["time"],
                                                   data["current"])

            def Icircuit(t):
                return interpclass(t)

            # Load parameters
            pars = Parameters(fit=pars,
                              Icircuit=Icircuit,
                              Ibar=np.max(Icircuit(data["time"])))

            # Nondimensionalise time
            t = data["time"] / pars.scales.time

            # Run model
            model = model_solver(t, pars, grid, Vonly=True)
            model.dimensionalise(pars, grid)

            # Take away resistance of the wires
            model.Vcircuit -= x[3] * data["current"][:, np.newaxis]

            # Remove NaNs
            model.Vcircuit[np.isnan(model.Vcircuit)] = LARGE_V

            # Store voltages
            modelVs[name] = model.Vcircuit

        # Plot data vs model
        if show_plots:
            plt.ion()
            fig = plt.figure(1)
            plt.clf()
            for i, name in enumerate(datas.keys()):
                plt.plot(
                    datas[name]["time"],
                    datas[name]["voltage"],
                    "o",
                    markersize=1,
                    color="C" + str(i),
                )
                plt.plot(datas[name]["time"],
                         modelVs[name],
                         color="C" + str(i),
                         label=name)
            # plt.title('params={}'.format(x))
            plt.xlabel("Time [h]")
            plt.ylabel("Voltage [V]")
            legend = plt.legend()
            fig.canvas.flush_events()
            time.sleep(0.01)

        # Make vector of differences (including weights)
        diffs = np.concatenate([
            (data["voltage"] - modelVs[name][:, 0]) * data["weights"]
            for name, data in datas.items()
        ])

        # Show progress
        # if show_stars:
        #     plt.figure(2)
        #     plt.plot(x, np.dot(diffs, diffs), 'rx')
        #     plt.pause(.01)

        # Save plot data
        if save_plot_data:
            # Set linestyles
            linestyles = {
                "3A": "",
                "2.5A": "",
                "2A": "",
                "1.5A": "",
                "1A": "",
                "0.5A": "",
            }
            with open(
                    "out/plot_calls/fits/{}_{}.txt".format(
                        lsq_solver, model_solver.__name__),
                    "w",
            ) as fit_plot_calls:
                # Save data and model from each current
                for i, name in enumerate(datas.keys()):
                    save_data(
                        "fits/{}/{}/{}_model".format(lsq_solver,
                                                     model_solver.__name__,
                                                     name),
                        datas[name]["time"],
                        modelVs[name],
                        fit_plot_calls,
                        linestyles[name],
                        n_coarse_points=100,
                    )
                    save_data(
                        "fits/{}/{}/{}_data".format(lsq_solver,
                                                    model_solver.__name__,
                                                    name),
                        datas[name]["time"],
                        datas[name]["voltage"],
                        fit_plot_calls,
                        linestyles[name],
                        n_coarse_points=100,
                    )
                # Add legend (two commas to ignore the data entries)
                fit_plot_calls.write(
                    ("\\legend{{{!s}" + ", ,{!s}" * (len(datas.keys()) - 1) +
                     "}}\n").format(*tuple(datas.keys())))
        return diffs

    # Compute grid (doesn't depend on base parameters)
    grid = Grid(10)

    fit_currents = datas.keys()

    # Set the bounds
    lower = np.concatenate(
        [np.array([0.0, 0.0, 0.01, 0.0]),
         np.zeros(len(datas) - 1)])
    upper = np.concatenate(
        [np.array([1.0, 1.0, 1.0, 1.0]),
         np.ones(len(datas) - 1)])

    # Rescale x0 so that all fitting parameters go from 0 to 1
    x0 = (x0 - lower) / (upper - lower)
    # errs = np.array([])
    # js = np.linspace(0.01,1,10)
    # for j in js:
    #     diffs = prediction_error(j, show_plots=True, show_stars=False)
    #     errs = np.append(errs, np.dot(diffs, diffs))
    # plt.figure(2)
    # plt.plot(js, errs)
    # plt.pause(0.01)
    # Do curve fitting
    print("Fit using {} on the {} model".format(lsq_solver,
                                                model_solver.__name__))
    print("-" * 60)
    if lsq_solver in ["dfogn", "dfols"]:
        # Set logging to INFO to view progress
        logging.basicConfig(level=logging.INFO, format="%(message)s")

        # Call and time DFO-GN or DFO-LS
        start = time.time()
        if lsq_solver == "dfogn":
            soln = dfogn.solve(
                prediction_error,
                x0,
                lower=np.zeros(len(x0)),
                upper=np.ones(len(x0)),
                rhoend=1e-5,
            )
        elif lsq_solver == "dfols":
            soln = dfols.solve(
                prediction_error,
                x0,
                bounds=(np.zeros(len(x0)), np.ones(len(x0))),
                rhoend=1e-5,
            )
        soln_time = time.time() - start

        # Scale x back to original scale
        x = lower + (upper - lower) * soln.x

        # Display output
        print(" *** DFO-GN results *** ")
        print("Solution xmin = %s" % str(x))
        print("Objective value f(xmin) = %.10g" % soln.f)
        print("Needed %g objective evaluations" % soln.nf)
        print("Exit flag = %g" % soln.flag)
        print(soln.msg)

        # Save solution parameters
        # save_output = {'y': True, 'n': False}[input('Save fitted params? (y/n): ')]
        # if save_output:
        #     filename = "out/fits/dfogn_{}.txt".format(model_solver.__name__)
        #     np.savetxt(filename, lower+(upper-lower)*soln.x)

        return (x, soln.f, soln_time)

    elif lsq_solver == "scipy":
        # Call and time scipy least squares
        start = time.time()
        soln = opt.least_squares(
            prediction_error,
            x0,
            bounds=(np.zeros(len(x0)), np.ones(len(x0))),
            method="trf",
            jac="2-point",
            diff_step=1e-5,
            ftol=1e-4,
            verbose=2,
        )
        soln_time = time.time() - start

        # Scale x back to original scale
        x = lower + (upper - lower) * soln.x

        # Display output
        print(" *** SciPy results *** ")
        print("Solution xmin = %s" % str(x))
        print("Objective value f(xmin) = %.10g" % (soln.cost * 2))
        print("Needed %g objective evaluations" % soln.nfev)
        print("Exit flag = %g" % soln.status)
        print(soln.message)

        # Save solution parameters
        # save_output = {'y': True, 'n': False}[input('Save fitted params? (y/n): ')]
        # if save_output:
        #     filename = "out/fits/scipy_{}.txt".format(model_solver.__name__)
        #     np.savetxt(filename, x)

        return (x, soln.cost * 2, soln_time)

    elif lsq_solver is None:
        # Do nothing
        diffs = prediction_error(x0)
        return (x0, np.dot(diffs, diffs), 0)

    # save plots (hacky!)
    prediction_error(x, save_plot_data=True)
Example #19
0
def run(resume=False, high_e=True):

    xinit, targ_mode = get_point(high_e, read_wisdom=False)
    tar = target_values(targ_mode)

    if resume:
        x_load = filer('wisdom_refined.pkl', 0, 0)
        prob_meet_load = x_load['pmeet_exo']
        prob_preg_load = x_load['ppreg_exo']
        xinit = x_load

    out, mdl, agents, res, mom = mdl_resid(x=xinit,
                                           targets=tar,
                                           return_format=[
                                               'distance', 'models', 'agents',
                                               'scaled residuals', 'moments'
                                           ],
                                           verbose=False)

    print('initial distance is {}'.format(out))

    if resume:
        prob_meet_init = prob_meet_load
        prob_preg_init = prob_preg_load
    else:
        prob_meet_init = np.array(
            mdl[0].setup.pars['pmeet_t'][:mdl[0].setup.pars['Tmeet']])
        prob_preg_init = np.array([
            mdl[0].setup.upp_precomputed_fem[t][3]
            for t in range(mdl[0].setup.pars['Tmeet'])
        ])

    nopt = 10
    yfactor = 1.5

    for iopt in range(nopt):

        print('running esimation round {}'.format(iopt))

        print('estimating probabilities:')

        prob_meet_est = 0.0
        prob_preg_est = 0.0
        nrep = 4 if iopt > 0 else 1
        np.random.seed(12)

        for rep in range(nrep):
            o = AgentsEst(mdl, T=30, verbose=False, fix_seed=False)

            prob_meet_est += (1 / nrep) * o.pmeet_exo.copy()
            prob_preg_est += (1 / nrep) * o.ppreg_exo.copy()

        print('estimated pmeet = {}'.format(prob_meet_est))
        print('estimated ppreg = {}'.format(prob_preg_est))

        # this does binary search

        w = 1.0

        factor = 0.5

        ne = prob_meet_est.size
        nw = 10
        print('reference value is {}'.format(out))

        y_previous = out

        for i in range(nw):
            prob_meet_w = w * prob_meet_est + (1 - w) * prob_meet_init[:ne]
            prob_preg_w = w * prob_preg_est + (1 - w) * prob_preg_init[:ne]

            xsearch = xinit.copy()
            xsearch.update({
                'pmeet_exo': prob_meet_w,
                'ppreg_exo': prob_preg_w
            })

            out_w = mdl_resid(x=xsearch,
                              targets=tar,
                              return_format=['distance'],
                              verbose=False)

            print('with weight = {}, distance is {}'.format(w, out_w))

            if out_w < yfactor * out:
                print('found a potentially imporving weight for yfactor = {}'.
                      format(yfactor))
                break
            else:
                w = factor * w
                if i < nw - 1:
                    print('trying new weight = {}'.format(w))
                else:
                    print('no luck...')

        xfix = {
            k: xinit[k]
            for k in [
                'pmeet_21', 'pmeet_30', 'pmeet_40', 'preg_21', 'preg_28',
                'preg_35'
            ]
        }

        lb, ub, _, keys, translator = calibration_params(xfix=xfix)

        def tr(x):
            xx = translator(x)
            xx.update({'pmeet_exo': prob_meet_w, 'ppreg_exo': prob_preg_w})
            return xx

        x0 = [xinit[key] for key in keys]

        x0, lb, ub = np.array(x0), np.array(lb), np.array(ub)

        print('starting from {}'.format(tr(x0)))

        tar = target_values('high education')

        def q(pt):
            #print('computing at point {}'.format(translator(pt)))
            try:
                ans = mdl_resid(tr(pt), return_format=['scaled residuals'])
            except BaseException as a:
                print('During optimization function evaluation failed at {}'.
                      format(pt))
                print(a)
                ans = np.array([1e6])
            finally:
                gc.collect()
            return ans

        res = dfols.solve(q,
                          x0,
                          rhobeg=0.02,
                          rhoend=1e-5,
                          maxfun=60,
                          bounds=(lb, ub),
                          scaling_within_bounds=True,
                          objfun_has_noise=False,
                          npt=len(x0) + 5,
                          user_params={
                              'restarts.use_restarts': True,
                              'restarts.rhoend_scale': 0.5,
                              'restarts.increase_npt': True
                          })

        print(res)
        print('Result is {}'.format(tr(res.x)))
        filer('wisdom_refined.pkl', tr(res.x), True)
        print('wrote to the file!')

        xinit = tr(res.x)
        out, mdl, agents, res, mom = mdl_resid(x=xinit,
                                               return_format=[
                                                   'distance', 'models',
                                                   'agents',
                                                   'scaled residuals',
                                                   'moments'
                                               ])
        if out > y_previous:
            print('no reduction in function value obtained')
            yfactor = 0.5 * yfactor + 0.5

        y_previous = out
        return 1e5 * np.ones_like(data_plus_noise)
    prediction = objective(solution)
    # Crude way of making sure we always get an answer
    if len(prediction) != len(data_plus_noise):
        return 1e5 * np.ones_like(data_plus_noise)
    else:
        out = prediction - data_plus_noise
        # print(x, np.linalg.norm(out))
        return out


# Do parameter fitting to find solution (using derivative-free library)
x0 = np.array([0.7, 0.5])

soln_lsq = least_squares(prediction_error, x0, verbose=2)
soln_dfols = dfols.solve(prediction_error, x0)  # , bounds=(np.array([0]), None))

for algorithm, soln in [("scipy.least_squares", soln_lsq), ("DFO-LS", soln_dfols)]:
    print(algorithm)
    print("-" * 20)
    print(soln)
    print("-" * 20)
    found_solution = solver.solve(
        model,
        t_eval,
        inputs={
            "Cation transference number": soln.x[0],
            "Electrolyte conductivity [S.m-1]": soln.x[1],
        },
    )
    plt.plot(
Example #21
0
# DFO-LS example: Solving a nonlinear system of equations
# Originally from:
# http://support.sas.com/documentation/cdl/en/imlug/66112/HTML/default/viewer.htm#imlug_genstatexpls_sect004.htm

from __future__ import print_function
from math import exp
import numpy as np
import dfols


# Want to solve:
#   x1 + x2 - x1*x2 + 2 = 0
#   x1 * exp(-x2) - 1   = 0
def nonlinear_system(x):
    return np.array([x[0] + x[1] - x[0] * x[1] + 2, x[0] * exp(-x[1]) - 1.0])


# Warning: if there are multiple solutions, which one
#          DFO-LS returns will likely depend on x0!
x0 = np.array([0.1, -2.0])

# Set random seed (for reproducibility)
np.random.seed(0)

# Call DFO-LS
soln = dfols.solve(nonlinear_system, x0)

# Display output
print(soln)
Example #22
0
def fun(x):
    assert type(x) is tuple, 'x must be a tuple!'

    action = x[0]
    args = x[1]

    assert type(action) is str, 'x[0] should be string for action'
    assert len(x) <= 2, 'too many things in x! x is (action,agrs)'

    if action == 'test':
        return mdl_resid()
    elif action == 'compute':
        return mdl_resid(args)
    elif action == 'minimize':

        import dfols
        import pybobyqa

        i, N_st, xfix = args

        xl, xu, x0, keys, translator = calibration_params(xfix=xfix)

        #Sort lists
        def sortFirst(val):
            return val[0]

        #Get the starting point for local minimization

        #Open File with best solution so far
        param = filer('wisdom.pkl', 0, False)

        param.sort(key=sortFirst)
        print('f best so far is {} and x is {}'.format(param[0][0],
                                                       param[0][1]))
        xm = param[0][1]

        #Get right sobol sequence point
        xt = filer('sobol.pkl', None, False)

        #Determine the initial position
        dump = min(max(0.1, ((i + 1) / N_st)**(0.5)), 0.995)

        xc = dump * xm + (1 - dump) * xt[:, i]
        xc = xc.squeeze()

        print('The initial position is {}'.format(xc))

        #Standard Way
        def q(pt):
            try:
                ans = mdl_resid(translator(pt),
                                return_format=['scaled residuals'])[0]

            except:
                print('During optimization function evaluation failed at {}'.
                      format(pt))
                ans = np.array([1e6])
            finally:
                gc.collect()
                return ans

        res = dfols.solve(q,
                          xc,
                          rhobeg=0.01,
                          rhoend=1e-4,
                          maxfun=100,
                          bounds=(xl, xu),
                          scaling_within_bounds=True,
                          objfun_has_noise=False,
                          print_progress=True)

        #res=pybobyqa.solve(q, xc, rhobeg = 0.001, rhoend=1e-6, maxfun=80, bounds=(xl,xu),
        #               scaling_within_bounds=True,objfun_has_noise=False,print_progress=True)

        print(res)

        if res.flag == -1:
            raise Exception('solver returned something creepy...')

        fbest = mdl_resid(translator(
            res.x))[0]  # in prnciple, this can be inconsistent with
        # squared sum of residuals

        print('fbest is {} and res.f is {}'.format(fbest, res.f))

        print('Final value is {}'.format(fbest))

        param_new = filer('wisdom.pkl', None, False)

        param_write = param_new + [(fbest, res.x)]

        #Save Updated File
        param_write.sort(key=sortFirst)
        filer('wisdom.pkl', param_write, True)

        return fbest

    else:
        raise Exception('unsupported action or format')
# DFO-LS example: minimize the Rosenbrock function
from __future__ import print_function
import numpy as np
import dfols

# Define the objective function
def rosenbrock(x):
    return np.array([10.0 * (x[1] - x[0] ** 2), 1.0 - x[0]])

# Define the starting point
x0 = np.array([-1.2, 1.0])

# Define bound constraints (lower <= x <= upper)
lower = np.array([-10.0, -10.0])
upper = np.array([0.9, 0.85])

# Set random seed (for reproducibility)
np.random.seed(0)

# For optional extra output details
import logging
logging.basicConfig(level=logging.INFO, format='%(message)s')

# Call DFO-LS
soln = dfols.solve(rosenbrock, x0, bounds=(lower, upper))

# Display output
print(soln)

Example #24
0
def fun(x):
    assert type(x) is tuple, 'x must be a tuple!'

    action = x[0]
    args = x[1]

    assert type(action) is str, 'x[0] should be string for action'
    assert len(x) <= 2, 'too many things in x! x is (action,agrs)'

    if action == 'test':
        return mdl_resid()
    elif action == 'compute':
        return mdl_resid(args)
    elif action == 'moments':
        agents = mdl_resid(args, return_format=['agents'])
        mom = agents.compute_moments()
        return mom
    elif action == 'minimize':

        import dfols

        i, N_st, xfix = args

        xl, xu, x0, keys, translator = calibration_params(xfix=xfix)

        #Sort lists
        def sortFirst(val):
            return val[0]

        #Get the starting point for local minimization

        #Open File with best solution so far
        param = filer('wisdom.pkl', 0, False)

        param.sort(key=sortFirst)
        print('f best so far is {} and x is {}'.format(param[0][0],
                                                       param[0][1]))
        xm = param[0][1]

        #Get right sobol sequence point
        xt = filer('sobol.pkl', None, False)

        #Determine the initial position
        dump = min(max(0.1, ((i + 1) / N_st)**(0.5)), 0.995)

        xc = dump * xm + (1 - dump) * xt[:, i]
        xc = xc.squeeze()

        def q(pt):
            try:
                ans = mdl_resid(translator(pt),
                                moments_repeat=3,
                                return_format=['scaled residuals'])
            except BaseException as a:
                print('During optimization function evaluation failed at {}'.
                      format(pt))
                print(a)
                ans = np.array([1e6])
            finally:
                gc.collect()
                return ans

        res = dfols.solve(
            q,
            xc,
            rhobeg=0.15,
            rhoend=1e-6,
            maxfun=npt,
            bounds=(xl, xu),
            #npt=len(xc)+5,
            scaling_within_bounds=True,
            #user_params={'tr_radius.gamma_dec':0.75,'tr_radius.gamma_inc':1.5,
            #             'tr_radius.alpha1':0.5,'tr_radius.alpha2':0.75,
            #             'regression.momentum_extra_steps':True,
            #'restarts.use_restarts':True},
            objfun_has_noise=True)

        print(res)

        if res.flag == -1:
            raise Exception('solver returned something creepy...')

        fbest = mdl_resid(translator(
            res.x))  # in prnciple, this can be inconsistent with
        # squared sum of residuals

        print('fbest is {} and res.f is {}'.format(fbest, res.f))

        print('Final value is {}'.format(fbest))

        param_new = filer('wisdom.pkl', None, False)

        param_write = param_new + [(fbest, res.x)]

        #Save Updated File
        param_write.sort(key=sortFirst)
        filer('wisdom.pkl', param_write, True)

        return fbest

    else:
        raise Exception('unsupported action or format')
Example #25
0
def nag_dfols(
    criterion,
    x,
    lower_bounds,
    upper_bounds,
    *,
    clip_criterion_if_overflowing=CLIP_CRITERION_IF_OVERFLOWING,
    convergence_minimal_trustregion_radius_tolerance=CONVERGENCE_MINIMAL_TRUSTREGION_RADIUS_TOLERANCE,  # noqa: E501
    convergence_noise_corrected_criterion_tolerance=CONVERGENCE_NOISE_CORRECTED_CRITERION_TOLERANCE,  # noqa: E501
    convergence_scaled_criterion_tolerance=0.0,
    convergence_slow_progress=None,
    initial_directions=INITIAL_DIRECTIONS,
    interpolation_rounding_error=INTERPOLATION_ROUNDING_ERROR,
    noise_additive_level=None,
    noise_multiplicative_level=None,
    noise_n_evals_per_point=None,
    random_directions_orthogonal=RANDOM_DIRECTIONS_ORTHOGONAL,
    stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS,
    threshold_for_safety_step=THRESHOLD_FOR_SAFETY_STEP,
    trustregion_expansion_factor_successful=TRUSTREGION_EXPANSION_FACTOR_SUCCESSFUL,
    trustregion_expansion_factor_very_successful=TRUSTREGION_EXPANSION_FACTOR_VERY_SUCCESSFUL,  # noqa: E501
    trustregion_fast_start_options=None,
    trustregion_initial_radius=None,
    trustregion_method_to_replace_extra_points="geometry_improving",
    trustregion_n_extra_points_to_replace_successful=0,
    trustregion_n_interpolation_points=None,
    trustregion_precondition_interpolation=TRUSTREGION_PRECONDITION_INTERPOLATION,
    trustregion_reset_options=None,
    trustregion_shrinking_factor_not_successful=TRUSTREGION_SHRINKING_FACTOR_NOT_SUCCESSFUL,  # noqa: E501
    trustregion_shrinking_factor_lower_radius=TRUSTREGION_SHRINKING_FACTOR_LOWER_RADIUS,
    trustregion_shrinking_factor_upper_radius=TRUSTREGION_SHRINKING_FACTOR_UPPER_RADIUS,
    trustregion_threshold_successful=TRUSTREGION_THRESHOLD_SUCCESSFUL,
    trustregion_threshold_very_successful=TRUSTREGION_THRESHOLD_VERY_SUCCESSFUL,
):
    r"""Minimize a function with least squares structure using DFO-LS.

    For details see :ref:`list_of_nag_algorithms`.

    """
    if not IS_DFOLS_INSTALLED:
        raise NotInstalledError(
            "The 'nag_dfols' algorithm requires the DFO-LS package to be installed."
            "You can install it with 'pip install DFO-LS'. "
            "For additional installation instructions visit: ",
            r"https://numericalalgorithmsgroup.github.io/dfols/build/html/install.html",
        )
    if trustregion_method_to_replace_extra_points == "momentum":
        trustregion_use_momentum = True
    elif trustregion_method_to_replace_extra_points in [
            "geometry_improving", None
    ]:
        trustregion_use_momentum = False
    else:
        raise ValueError("trustregion_method_to_replace_extra_points must be "
                         "'geometry_improving', 'momentum' or None.")

    advanced_options, trustregion_reset_options = _create_nag_advanced_options(
        x=x,
        noise_multiplicative_level=noise_multiplicative_level,
        noise_additive_level=noise_additive_level,
        noise_n_evals_per_point=noise_n_evals_per_point,
        convergence_noise_corrected_criterion_tolerance=
        convergence_noise_corrected_criterion_tolerance,  # noqa: E501
        trustregion_initial_radius=trustregion_initial_radius,
        trustregion_reset_options=trustregion_reset_options,
        convergence_slow_progress=convergence_slow_progress,
        interpolation_rounding_error=interpolation_rounding_error,
        threshold_for_safety_step=threshold_for_safety_step,
        clip_criterion_if_overflowing=clip_criterion_if_overflowing,
        initial_directions=initial_directions,
        random_directions_orthogonal=random_directions_orthogonal,
        trustregion_precondition_interpolation=
        trustregion_precondition_interpolation,
        trustregion_threshold_successful=trustregion_threshold_successful,
        trustregion_threshold_very_successful=
        trustregion_threshold_very_successful,
        trustregion_shrinking_factor_not_successful=
        trustregion_shrinking_factor_not_successful,  # noqa: E501
        trustregion_expansion_factor_successful=
        trustregion_expansion_factor_successful,
        trustregion_expansion_factor_very_successful=
        trustregion_expansion_factor_very_successful,  # noqa: E501
        trustregion_shrinking_factor_lower_radius=
        trustregion_shrinking_factor_lower_radius,  # noqa: E501
        trustregion_shrinking_factor_upper_radius=
        trustregion_shrinking_factor_upper_radius,  # noqa: E501
    )

    fast_start = _build_options_dict(
        user_input=trustregion_fast_start_options,
        default_options=TRUSTREGION_FAST_START_OPTIONS,
    )
    if fast_start["floor_of_jacobian_singular_values"] != 1:
        warnings.warn(
            "Setting the `floor_of_jacobian_singular_values` is not supported by "
            "DF-OLS as of version 1.2.1.")
    if (fast_start["shrink_upper_radius_in_safety_steps"]
            and fast_start["full_geometry_improving_step"]):
        raise ValueError(
            "full_geometry_improving_step of the trustregion_fast_start_options can "
            "only be True if shrink_upper_radius_in_safety_steps is False.")

    (
        faststart_jac,
        faststart_step,
    ) = _get_fast_start_method(fast_start["method"])

    if (trustregion_reset_options["n_extra_interpolation_points_per_soft_reset"]
            < trustregion_reset_options[
                "n_extra_interpolation_points_per_soft_reset"]):
        raise ValueError(
            "In the trustregion_reset_options "
            "'n_extra_interpolation_points_per_soft_reset' must "
            "be larger or the same as 'n_extra_interpolation_points_per_hard_reset'."
        )

    dfols_options = {
        "growing.full_rank.use_full_rank_interp":
        faststart_jac,
        "growing.perturb_trust_region_step":
        faststart_step,
        "restarts.hard.use_old_rk":
        trustregion_reset_options["reuse_criterion_value_at_hard_reset"],
        "restarts.auto_detect.min_chgJ_slope":
        trustregion_reset_options[
            "auto_detect_min_jacobian_increase"],  # noqa: E501
        "restarts.max_npt":
        trustregion_reset_options["max_interpolation_points"],
        "restarts.increase_npt":
        trustregion_reset_options[
            "n_extra_interpolation_points_per_soft_reset"] > 0,
        "restarts.increase_npt_amt":
        trustregion_reset_options[
            "n_extra_interpolation_points_per_soft_reset"],
        "restarts.hard.increase_ndirs_initial_amt":
        trustregion_reset_options[
            "n_extra_interpolation_points_per_hard_reset"] -
        trustregion_reset_options[
            "n_extra_interpolation_points_per_soft_reset"],
        "model.rel_tol":
        convergence_scaled_criterion_tolerance,
        "regression.num_extra_steps":
        trustregion_n_extra_points_to_replace_successful,
        "regression.momentum_extra_steps":
        trustregion_use_momentum,
        "regression.increase_num_extra_steps_with_restart":
        trustregion_reset_options[
            "n_additional_extra_points_to_replace_per_reset"],
        "growing.ndirs_initial":
        fast_start["min_inital_points"],
        "growing.delta_scale_new_dirns":
        fast_start["scale_of_trustregion_step_perturbation"],
        "growing.full_rank.scale_factor":
        fast_start["scale_of_jacobian_components_perturbation"],
        "growing.full_rank.svd_max_jac_cond":
        fast_start["jacobian_max_condition_number"],
        "growing.do_geom_steps":
        fast_start["geometry_improving_steps"],
        "growing.safety.do_safety_step":
        fast_start["safety_steps"],
        "growing.safety.reduce_delta":
        fast_start["shrink_upper_radius_in_safety_steps"],
        "growing.safety.full_geom_step":
        fast_start["full_geometry_improving_step"],
        "growing.reset_delta":
        fast_start["reset_trustregion_radius_after_fast_start"],
        "growing.reset_rho":
        fast_start["reset_min_trustregion_radius_after_fast_start"],
        "growing.gamma_dec":
        fast_start["shrinking_factor_not_successful"],
        "growing.num_new_dirns_each_iter":
        fast_start["n_extra_search_directions_per_iteration"],
    }

    advanced_options.update(dfols_options)

    res = dfols.solve(
        criterion,
        x0=x,
        bounds=(lower_bounds, upper_bounds),
        maxfun=stopping_max_criterion_evaluations,
        rhobeg=trustregion_initial_radius,
        npt=trustregion_n_interpolation_points,
        rhoend=convergence_minimal_trustregion_radius_tolerance,
        nsamples=noise_n_evals_per_point,
        objfun_has_noise=noise_additive_level or noise_multiplicative_level,
        scaling_within_bounds=False,
        do_logging=False,
        print_progress=False,
        user_params=advanced_options,
    )

    return _process_nag_result(res, len(x))
Example #26
0
    def q(pt):
        #print('computing at point {}'.format(translator(pt)))
        try:
            ans = mdl_resid(translator(pt), return_format=['scaled residuals'])
        except:
            print(
                'During optimization function evaluation failed at {}'.format(
                    pt))
            ans = np.array([1e6])
        finally:
            gc.collect()
            return ans

    res = dfols.solve(q,
                      x_init,
                      rhobeg=0.02,
                      rhoend=1e-5,
                      maxfun=120,
                      bounds=(lb, ub),
                      scaling_within_bounds=True,
                      objfun_has_noise=False,
                      npt=len(x_init) + 5,
                      user_params={
                          'restarts.use_restarts': True,
                          'restarts.rhoend_scale': 0.5,
                          'restarts.increase_npt': True
                      })

    print(res)
    print('Result is {}'.format(translator(res.x)))