コード例 #1
0
def _check_dimensions_hessian(hessian, f_tree, params_tree):
    extended_registry = get_registry(extended=True)
    flat_f = tree_leaves(f_tree, registry=extended_registry)
    flat_p = tree_leaves(params_tree, registry=extended_registry)

    if len(flat_f) == 1:
        if np.squeeze(hessian).ndim == 0:
            if len(flat_p) != 1:
                raise ValueError(
                    "Hessian dimension does not match those of params.")
        elif np.squeeze(hessian).ndim == 2:
            if np.squeeze(hessian).shape != (len(flat_p), len(flat_p)):
                raise ValueError(
                    "Hessian dimension does not match those of params.")
        else:
            raise ValueError(
                "Hessian must be 0- or 2-d if f is scalar-valued.")
    else:
        if hessian.ndim != 3:
            raise ValueError("Hessian must be 3d if f is multidimensional.")
        if hessian.shape[0] != len(flat_f):
            raise ValueError(
                "First Hessian dimension does not match that of f.")
        if hessian.shape[1:] != (len(flat_p), len(flat_p)):
            raise ValueError(
                "Last two Hessian dimensions do not match those of params.")
コード例 #2
0
def _check_dimensions_matrix(matrix, outer_tree, inner_tree):
    extended_registry = get_registry(extended=True)
    flat_outer = tree_leaves(outer_tree, registry=extended_registry)
    flat_inner = tree_leaves(inner_tree, registry=extended_registry)

    if matrix.shape[0] != len(flat_outer):
        raise ValueError(
            "First dimension of matrix does not match that of outer_tree.")
    if matrix.shape[1] != len(flat_inner):
        raise ValueError(
            "Second dimension of matrix does not match that of inner_tree.")
コード例 #3
0
def test_block_tree_to_hessian_bijection():
    params = {"a": np.arange(4), "b": [{"c": (1, 2), "d": np.array([5, 6])}]}
    f_tree = {"e": np.arange(3), "f": (5, 6, [7, 8, {"g": 1.0}])}

    registry = get_registry(extended=True)
    n_p = len(tree_leaves(params, registry=registry))
    n_f = len(tree_leaves(f_tree, registry=registry))

    expected = np.arange(n_f * n_p**2).reshape(n_f, n_p, n_p)
    block_hessian = hessian_to_block_tree(expected, f_tree, params)
    got = block_tree_to_hessian(block_hessian, f_tree, params)
    assert_array_equal(expected, got)
コード例 #4
0
def block_tree_to_matrix(block_tree, outer_tree, inner_tree):
    """Convert a block tree to a matrix.

    A block tree most often arises when one applies an operation to a function that maps
    between two trees. Two main examples are the Jacobian of the function f : inner_tree
    -> outer_tree, which results in a block tree structure, or the covariance matrix of
    a tree, in which case outer_tree = inner_tree.

    Args:
        block_tree: A (block) pytree, must match dimensions of outer_tree and inner_tree
        outer_tree: A pytree.
        inner_tree: A pytree.

    Returns:
        matrix (np.ndarray): 2d array containing information stored in block_tree.

    """
    flat_outer = tree_leaves(outer_tree)
    flat_inner = tree_leaves(inner_tree)
    flat_block_tree = tree_leaves(block_tree)

    flat_outer_np = [
        _convert_to_numpy(leaf, only_pandas=True) for leaf in flat_outer
    ]
    flat_inner_np = [
        _convert_to_numpy(leaf, only_pandas=True) for leaf in flat_inner
    ]

    size_outer = [np.size(a) for a in flat_outer_np]
    size_inner = [np.size(a) for a in flat_inner_np]

    n_blocks_outer = len(size_outer)
    n_blocks_inner = len(size_inner)

    block_rows_raw = [
        flat_block_tree[n_blocks_inner * i:n_blocks_inner * (i + 1)]
        for i in range(n_blocks_outer)
    ]

    block_rows = []
    for s1, row in zip(size_outer, block_rows_raw):
        shapes = [(s1, s2) for s2 in size_inner]
        row_np = [_convert_to_numpy(leaf, only_pandas=False) for leaf in row]
        row_reshaped = _reshape_list(row_np, shapes)
        row_concatenated = np.concatenate(row_reshaped, axis=1)
        block_rows.append(row_concatenated)

    matrix = np.concatenate(block_rows, axis=0)

    _check_dimensions_matrix(matrix, flat_outer, flat_inner)
    return matrix
コード例 #5
0
def _update_bounds_and_flatten(nan_tree, bounds, direction):
    registry = get_registry(extended=True, data_col=direction)
    flat_nan_tree = tree_leaves(nan_tree, registry=registry)

    if bounds is not None:

        registry = get_registry(extended=True)
        flat_bounds = tree_leaves(bounds, registry=registry)

        seperator = 10 * "$"
        params_names = leaf_names(nan_tree,
                                  registry=registry,
                                  separator=seperator)
        bounds_names = leaf_names(bounds,
                                  registry=registry,
                                  separator=seperator)

        flat_nan_dict = dict(zip(params_names, flat_nan_tree))

        invalid = {"names": [], "bounds": []}
        for bounds_name, bounds_leaf in zip(bounds_names, flat_bounds):

            # if a bounds leaf is None we treat it as saying the the corresponding
            # subtree of params has no bounds.
            if bounds_leaf is not None:
                if bounds_name in flat_nan_dict:
                    flat_nan_dict[bounds_name] = bounds_leaf
                else:
                    invalid["names"].append(bounds_name)
                    invalid["bounds"].append(bounds_leaf)

        if invalid["bounds"]:
            msg = (
                f"{direction} could not be matched to params pytree. The bounds "
                f"{invalid['bounds']} with names {invalid['names']} are not part of "
                "params.")
            raise InvalidBoundsError(msg)

        flat_nan_tree = list(flat_nan_dict.values())

    updated = np.array(flat_nan_tree, dtype=np.float64)
    return updated
コード例 #6
0
def _convert_evals_to_numpy(raw_evals,
                            key,
                            registry,
                            is_scalar_out=False,
                            is_vector_out=False):
    """harmonize the output of the function evaluations.

    The raw_evals might contain dictionaries of which we only need one entry, scalar
    np.nan where we need arrays filled with np.nan or pandas objects. The processed
    evals only contain numpy arrays.

    """
    # get rid of dictionaries
    evals = [
        val[key] if isinstance(val, dict) and key is not None else val
        for val in raw_evals
    ]

    # convert pytrees to arrays
    if is_scalar_out:
        evals = [
            np.array([val], dtype=float) if not _is_scalar_nan(val) else val
            for val in evals
        ]

    elif is_vector_out:
        evals = [
            val.astype(float) if not _is_scalar_nan(val) else val
            for val in evals
        ]
    else:
        evals = [
            np.array(tree_leaves(val, registry=registry), dtype=np.float64)
            if not _is_scalar_nan(val) else val for val in evals
        ]

    # find out the correct output shape
    try:
        array = next(x for x in evals
                     if hasattr(x, "shape") or isinstance(x, dict))
        out_shape = array.shape
    except StopIteration:
        out_shape = "scalar"

    # convert to correct output shape
    if out_shape == "scalar":
        evals = [np.atleast_1d(val) for val in evals]
    else:
        for i in range(len(evals)):
            if isinstance(evals[i], float) and np.isnan(evals[i]):
                evals[i] = np.full(out_shape, np.nan)

    return evals
コード例 #7
0
def get_bounds(
    params,
    lower_bounds=None,
    upper_bounds=None,
    soft_lower_bounds=None,
    soft_upper_bounds=None,
    registry=None,
    add_soft_bounds=False,
):
    """Consolidate lower/upper bounds with bounds available in params.

    Updates bounds defined in params. If no bounds are available the entry is set to
    -np.inf for the lower bound and np.inf for the upper bound. If a bound is defined in
    params and lower_bounds or upper_bounds, the bound from lower_bounds or upper_bounds
    will be used.

    Args:
        params (pytree): The parameter pytree.
        lower_bounds (pytree): Must be a subtree of params.
        upper_bounds (pytree): Must be a subtree of params.
        registry (dict): pybaum registry.

    Returns:
        np.ndarray: Consolidated and flattened lower_bounds.
        np.ndarray: Consolidated and flattened upper_bounds.

    """
    fast_path = _is_fast_path(
        params=params,
        lower_bounds=lower_bounds,
        upper_bounds=upper_bounds,
        add_soft_bounds=add_soft_bounds,
    )
    if fast_path:
        return _get_fast_path_bounds(
            params=params,
            lower_bounds=lower_bounds,
            upper_bounds=upper_bounds,
        )

    registry = get_registry(extended=True) if registry is None else registry
    n_params = len(tree_leaves(params, registry=registry))

    # Fill leaves with np.nan. If params contains a data frame with bounds as a column,
    # that column is NOT overwritten (as long as an extended registry is used).
    nan_tree = tree_map(lambda leaf: np.nan, params, registry=registry)

    lower_flat = _update_bounds_and_flatten(nan_tree,
                                            lower_bounds,
                                            direction="lower_bound")
    upper_flat = _update_bounds_and_flatten(nan_tree,
                                            upper_bounds,
                                            direction="upper_bound")

    if len(lower_flat) != n_params:
        raise InvalidBoundsError(
            "lower_bounds do not match dimension of params.")
    if len(upper_flat) != n_params:
        raise InvalidBoundsError(
            "upper_bounds do not match dimension of params.")

    lower_flat[np.isnan(lower_flat)] = -np.inf
    upper_flat[np.isnan(upper_flat)] = np.inf

    if add_soft_bounds:
        lower_flat_soft = _update_bounds_and_flatten(
            nan_tree, soft_lower_bounds, direction="soft_lower_bound")
        lower_flat_soft[np.isnan(lower_flat_soft)] = -np.inf
        lower_flat = np.maximum(lower_flat, lower_flat_soft)

        upper_flat_soft = _update_bounds_and_flatten(
            nan_tree, soft_upper_bounds, direction="soft_upper_bound")
        upper_flat_soft[np.isnan(upper_flat_soft)] = np.inf
        upper_flat = np.minimum(upper_flat, upper_flat_soft)

    if (lower_flat > upper_flat).any():
        msg = "Invalid bounds. Some lower bounds are larger than upper bounds."
        raise InvalidBoundsError(msg)

    return lower_flat, upper_flat
コード例 #8
0
def second_derivative(
    func,
    params,
    *,
    func_kwargs=None,
    method="central_cross",
    n_steps=1,
    base_steps=None,
    scaling_factor=1,
    lower_bounds=None,
    upper_bounds=None,
    step_ratio=2,
    min_steps=None,
    f0=None,
    n_cores=DEFAULT_N_CORES,
    error_handling="continue",
    batch_evaluator="joblib",
    return_func_value=False,
    return_info=False,
    key=None,
):
    """Evaluate second derivative of func at params according to method and step options

    Internally, the function is converted such that it maps from a 1d array to a 1d
    array. Then the Hessians of that function are calculated. The resulting derivative
    estimate is always a :class:`numpy.ndarray`.

    The parameters and the function output can be pandas objects (Series or DataFrames
    with value column). In that case the output of second_derivative is also a pandas
    object and with appropriate index and columns.

    Detailed description of all options that influence the step size as well as an
    explanation of how steps are adjusted to bounds in case of a conflict,
    see :func:`~estimagic.differentiation.generate_steps.generate_steps`.

    Args:
        func (callable): Function of which the derivative is calculated.
        params (numpy.ndarray, pandas.Series or pandas.DataFrame): 1d numpy array or
            :class:`pandas.DataFrame` with parameters at which the derivative is
            calculated. If it is a DataFrame, it can contain the columns "lower_bound"
            and "upper_bound" for bounds. See :ref:`params`.
        func_kwargs (dict): Additional keyword arguments for func, optional.
        method (str): One of {"forward", "backward", "central_average", "central_cross"}
            These correspond to the finite difference approximations defined in
            equations [7, x, 8, 9] in Rideout [2009], where ("backward", x) is not found
            in Rideout [2009] but is the natural extension of equation 7 to the backward
            case. Default "central_cross".
        n_steps (int): Number of steps needed. For central methods, this is
            the number of steps per direction. It is 1 if no Richardson extrapolation
            is used.
        base_steps (numpy.ndarray, optional): 1d array of the same length as params.
            base_steps * scaling_factor is the absolute value of the first (and possibly
            only) step used in the finite differences approximation of the derivative.
            If base_steps * scaling_factor conflicts with bounds, the actual steps will
            be adjusted. If base_steps is not provided, it will be determined according
            to a rule of thumb as long as this does not conflict with min_steps.
        scaling_factor (numpy.ndarray or float): Scaling factor which is applied to
            base_steps. If it is an numpy.ndarray, it needs to be as long as params.
            scaling_factor is useful if you want to increase or decrease the base_step
            relative to the rule-of-thumb or user provided base_step, for example to
            benchmark the effect of the step size. Default 1.
        lower_bounds (numpy.ndarray): 1d array with lower bounds for each parameter. If
            params is a DataFrame and has the columns "lower_bound", this will be taken
            as lower_bounds if now lower_bounds have been provided explicitly.
        upper_bounds (numpy.ndarray): 1d array with upper bounds for each parameter. If
            params is a DataFrame and has the columns "upper_bound", this will be taken
            as upper_bounds if no upper_bounds have been provided explicitly.
        step_ratio (float, numpy.array): Ratio between two consecutive Richardson
            extrapolation steps in the same direction. default 2.0. Has to be larger
            than one. The step ratio is only used if n_steps > 1.
        min_steps (numpy.ndarray): Minimal possible step sizes that can be chosen to
            accommodate bounds. Must have same length as params. By default min_steps is
            equal to base_steps, i.e step size is not decreased beyond what is optimal
            according to the rule of thumb.
        f0 (numpy.ndarray): 1d numpy array with func(x), optional.
        n_cores (int): Number of processes used to parallelize the function
            evaluations. Default 1.
        error_handling (str): One of "continue" (catch errors and continue to calculate
            derivative estimates. In this case, some derivative estimates can be
            missing but no errors are raised), "raise" (catch errors and continue
            to calculate derivative estimates at fist but raise an error if all
            evaluations for one parameter failed) and "raise_strict" (raise an error
            as soon as a function evaluation fails).
        batch_evaluator (str or callable): Name of a pre-implemented batch evaluator
            (currently 'joblib' and 'pathos_mp') or Callable with the same interface
            as the estimagic batch_evaluators.
        return_func_value (bool): If True, return function value at params, stored in
            output dict under "func_value". Default False. This is useful when using
            first_derivative during optimization.
        return_info (bool): If True, return additional information on function
            evaluations and internal derivative candidates, stored in output dict under
            "func_evals" and "derivative_candidates". Derivative candidates are only
            returned if n_steps > 1. Default False.
        key (str): If func returns a dictionary, take the derivative of
            func(params)[key].

    Returns:
        result (dict): Result dictionary with keys:
            - "derivative" (numpy.ndarray, pandas.Series or pandas.DataFrame): The
                estimated second derivative of func at params. The shape of the output
                depends on the dimension of params and func(params):

                - f: R -> R leads to shape (1,), usually called second derivative
                - f: R^m -> R leads to shape (m, m), usually called Hessian
                - f: R -> R^n leads to shape (n,), usually called Hessian
                - f: R^m -> R^n leads to shape (n, m, m), usually called Hessian tensor

            - "func_value" (numpy.ndarray, pandas.Series or pandas.DataFrame): Function
                value at params, returned if return_func_value is True.

            - "func_evals_one_step" (pandas.DataFrame): Function evaluations produced by
                internal derivative method when altering the params vector at one
                dimension, returned if return_info is True.

            - "func_evals_two_step" (pandas.DataFrame): This features is not implemented
                yet and therefore set to None. Once implemented it will contain
                function evaluations produced by internal derivative method when
                altering the params vector at two dimensions, returned if return_info is
                True.

            - "func_evals_cross_step" (pandas.DataFrame): This features is not
                implemented yet and therefore set to None. Once implemented it will
                contain function evaluations produced by internal derivative method when
                altering the params vector at two dimensions in different directions,
                returned if return_info is True.

    """
    lower_bounds, upper_bounds = get_bounds(params, lower_bounds, upper_bounds)

    # handle keyword arguments
    func_kwargs = {} if func_kwargs is None else func_kwargs
    partialed_func = functools.partial(func, **func_kwargs)

    # convert params to numpy
    registry = get_registry(extended=True)
    x, params_treedef = tree_flatten(params, registry=registry)
    x = np.atleast_1d(x).astype(np.float64)

    if np.isnan(x).any():
        raise ValueError("The parameter vector must not contain NaNs.")

    implemented_methods = {
        "forward", "backward", "central_average", "central_cross"
    }
    if method not in implemented_methods:
        raise ValueError(f"Method has to be in {implemented_methods}.")

    # generate the step array
    steps = generate_steps(
        x=x,
        method=("central" if "central" in method else method),
        n_steps=n_steps,
        target="second_derivative",
        base_steps=base_steps,
        scaling_factor=scaling_factor,
        lower_bounds=lower_bounds,
        upper_bounds=upper_bounds,
        step_ratio=step_ratio,
        min_steps=min_steps,
    )

    # generate parameter vectors at which func has to be evaluated as numpy arrays
    evaluation_points = {"one_step": [], "two_step": [], "cross_step": []}
    for step_arr in steps:
        # single direction steps
        for i, j in product(range(n_steps), range(len(x))):
            if np.isnan(step_arr[i, j]):
                evaluation_points["one_step"].append(np.nan)
            else:
                point = x.copy()
                point[j] += step_arr[i, j]
                evaluation_points["one_step"].append(point)
        # two and cross direction steps
        for i, j, k in product(range(n_steps), range(len(x)), range(len(x))):
            if j > k or np.isnan(step_arr[i, j]) or np.isnan(step_arr[i, k]):
                evaluation_points["two_step"].append(np.nan)
                evaluation_points["cross_step"].append(np.nan)
            else:
                point = x.copy()
                point[j] += step_arr[i, j]
                point[k] += step_arr[i, k]
                evaluation_points["two_step"].append(point)
                if j == k:
                    evaluation_points["cross_step"].append(np.nan)
                else:
                    point = x.copy()
                    point[j] += step_arr[i, j]
                    point[k] -= step_arr[i, k]
                    evaluation_points["cross_step"].append(point)

    # convert the numpy arrays to whatever is needed by func
    evaluation_points = {
        # entries are either a numpy.ndarray or np.nan, we unflatten only
        step_type:
        [_unflatten_if_not_nan(p, params_treedef, registry) for p in points]
        for step_type, points in evaluation_points.items()
    }

    # we always evaluate f0, so we can fall back to one-sided derivatives if
    # two-sided derivatives fail. The extra cost is negligible in most cases.
    if f0 is None:
        evaluation_points["one_step"].append(params)

    # do the function evaluations for one and two step, including error handling
    batch_error_handling = "raise" if error_handling == "raise_strict" else "continue"
    raw_evals = _nan_skipping_batch_evaluator(
        func=partialed_func,
        arguments=list(
            itertools.chain.from_iterable(evaluation_points.values())),
        n_cores=n_cores,
        error_handling=batch_error_handling,
        batch_evaluator=batch_evaluator,
    )

    # extract information on exceptions that occurred during function evaluations
    exc_info = "\n\n".join([val for val in raw_evals if isinstance(val, str)])
    raw_evals = [
        val if not isinstance(val, str) else np.nan for val in raw_evals
    ]

    n_one_step, n_two_step, n_cross_step = map(len, evaluation_points.values())
    raw_evals = {
        "one_step": raw_evals[:n_one_step],
        "two_step": raw_evals[n_one_step:n_two_step + n_one_step],
        "cross_step": raw_evals[n_two_step + n_one_step:],
    }

    # store full function value at params as func_value and a processed version of it
    # that we need to calculate derivatives as f0
    if f0 is None:
        f0 = raw_evals["one_step"][-1]
        raw_evals["one_step"] = raw_evals["one_step"][:-1]
    func_value = f0

    f0_tree = f0[key] if key is not None and isinstance(f0, dict) else f0
    f0 = tree_leaves(f0_tree, registry=registry)
    f0 = np.array(f0, dtype=np.float64)

    # convert the raw evaluations to numpy arrays
    raw_evals = {
        step_type: _convert_evals_to_numpy(evals, key, registry)
        for step_type, evals in raw_evals.items()
    }

    # reshape arrays into dimension (n_steps, dim_f, dim_x) or (n_steps, dim_f, dim_x,
    # dim_x) for finite differences
    evals = {}
    evals["one_step"] = _reshape_one_step_evals(raw_evals["one_step"], n_steps,
                                                len(x))
    evals["two_step"] = _reshape_two_step_evals(raw_evals["two_step"], n_steps,
                                                len(x))
    evals["cross_step"] = _reshape_cross_step_evals(raw_evals["cross_step"],
                                                    n_steps, len(x), f0)

    # apply finite difference formulae
    hess_candidates = {}
    for m in ["forward", "backward", "central_average", "central_cross"]:
        hess_candidates[m] = finite_differences.hessian(evals, steps, f0, m)

    # get the best derivative estimate out of all derivative estimates that could be
    # calculated, given the function evaluations.
    orders = {
        "central_cross":
        ["central_cross", "central_average", "forward", "backward"],
        "central_average":
        ["central_average", "central_cross", "forward", "backward"],
        "forward": ["forward", "backward", "central_average", "central_cross"],
        "backward":
        ["backward", "forward", "central_average", "central_cross"],
    }

    if n_steps == 1:
        hess = _consolidate_one_step_derivatives(hess_candidates,
                                                 orders[method])
        updated_candidates = None
    else:
        raise ValueError(
            "Richardson extrapolation is not implemented for the second derivative yet."
        )

    # raise error if necessary
    if error_handling in ("raise", "raise_strict") and np.isnan(hess).any():
        raise Exception(exc_info)

    # results processing
    derivative = hessian_to_block_tree(hess, f0_tree, params)

    result = {"derivative": derivative}
    if return_func_value:
        result["func_value"] = func_value
    if return_info:
        info = _collect_additional_info(steps,
                                        evals,
                                        updated_candidates,
                                        target="second_derivative")
        result = {**result, **info}
    return result
コード例 #9
0
def first_derivative(
    func,
    params,
    *,
    func_kwargs=None,
    method="central",
    n_steps=1,
    base_steps=None,
    scaling_factor=1,
    lower_bounds=None,
    upper_bounds=None,
    step_ratio=2,
    min_steps=None,
    f0=None,
    n_cores=DEFAULT_N_CORES,
    error_handling="continue",
    batch_evaluator="joblib",
    return_func_value=False,
    return_info=False,
    key=None,
):
    """Evaluate first derivative of func at params according to method and step options.

    Internally, the function is converted such that it maps from a 1d array to a 1d
    array. Then the Jacobian of that function is calculated.

    The parameters and the function output can be estimagic-pytrees; for more details on
    estimagi-pytrees see :ref:`eeppytrees`. By default the resulting Jacobian will be
    returned as a block-pytree.

    For a detailed description of all options that influence the step size as well as an
    explanation of how steps are adjusted to bounds in case of a conflict, see
    :func:`~estimagic.differentiation.generate_steps.generate_steps`.

    Args:
        func (callable): Function of which the derivative is calculated.
        params (pytree): A pytree. See :ref:`params`.
        func_kwargs (dict): Additional keyword arguments for func, optional.
        method (str): One of ["central", "forward", "backward"], default "central".
        n_steps (int): Number of steps needed. For central methods, this is
            the number of steps per direction. It is 1 if no Richardson extrapolation
            is used.
        base_steps (numpy.ndarray, optional): 1d array of the same length as params.
            base_steps * scaling_factor is the absolute value of the first (and possibly
            only) step used in the finite differences approximation of the derivative.
            If base_steps * scaling_factor conflicts with bounds, the actual steps will
            be adjusted. If base_steps is not provided, it will be determined according
            to a rule of thumb as long as this does not conflict with min_steps.
        scaling_factor (numpy.ndarray or float): Scaling factor which is applied to
            base_steps. If it is an numpy.ndarray, it needs to be as long as params.
            scaling_factor is useful if you want to increase or decrease the base_step
            relative to the rule-of-thumb or user provided base_step, for example to
            benchmark the effect of the step size. Default 1.
        lower_bounds (pytree): To be written.
        upper_bounds (pytree): To be written.
        step_ratio (float, numpy.array): Ratio between two consecutive Richardson
            extrapolation steps in the same direction. default 2.0. Has to be larger
            than one. The step ratio is only used if n_steps > 1.
        min_steps (numpy.ndarray): Minimal possible step sizes that can be chosen to
            accommodate bounds. Must have same length as params. By default min_steps is
            equal to base_steps, i.e step size is not decreased beyond what is optimal
            according to the rule of thumb.
        f0 (numpy.ndarray): 1d numpy array with func(x), optional.
        n_cores (int): Number of processes used to parallelize the function
            evaluations. Default 1.
        error_handling (str): One of "continue" (catch errors and continue to calculate
            derivative estimates. In this case, some derivative estimates can be
            missing but no errors are raised), "raise" (catch errors and continue
            to calculate derivative estimates at fist but raise an error if all
            evaluations for one parameter failed) and "raise_strict" (raise an error
            as soon as a function evaluation fails).
        batch_evaluator (str or callable): Name of a pre-implemented batch evaluator
            (currently 'joblib' and 'pathos_mp') or Callable with the same interface
            as the estimagic batch_evaluators.
        return_func_value (bool): If True, return function value at params, stored in
            output dict under "func_value". Default False. This is useful when using
            first_derivative during optimization.
        return_info (bool): If True, return additional information on function
            evaluations and internal derivative candidates, stored in output dict under
            "func_evals" and "derivative_candidates". Derivative candidates are only
            returned if n_steps > 1. Default False.
        key (str): If func returns a dictionary, take the derivative of
            func(params)[key].

    Returns:
        result (dict): Result dictionary with keys:
            - "derivative" (numpy.ndarray, pandas.Series or pandas.DataFrame): The
                estimated first derivative of func at params. The shape of the output
                depends on the dimension of params and func(params):

                - f: R -> R leads to shape (1,), usually called derivative
                - f: R^m -> R leads to shape (m, ), usually called Gradient
                - f: R -> R^n leads to shape (n, 1), usually called Jacobian
                - f: R^m -> R^n leads to shape (n, m), usually called Jacobian

            - "func_value" (numpy.ndarray, pandas.Series or pandas.DataFrame): Function
                value at params, returned if return_func_value is True.

            - "func_evals" (pandas.DataFrame): Function evaluations produced by internal
                derivative method, returned if return_info is True.

            - "derivative_candidates" (pandas.DataFrame): Derivative candidates from
                Richardson extrapolation, returned if return_info is True and n_steps >
                1.

    """
    _is_fast_params = isinstance(params, np.ndarray) and params.ndim == 1
    registry = get_registry(extended=True)

    lower_bounds, upper_bounds = get_bounds(params, lower_bounds, upper_bounds)

    # handle keyword arguments
    func_kwargs = {} if func_kwargs is None else func_kwargs
    partialed_func = functools.partial(func, **func_kwargs)

    # convert params to numpy
    if not _is_fast_params:
        x, params_treedef = tree_flatten(params, registry=registry)
        x = np.array(x, dtype=np.float64)
    else:
        x = params.astype(float)

    if np.isnan(x).any():
        raise ValueError("The parameter vector must not contain NaNs.")

    # generate the step array
    steps = generate_steps(
        x=x,
        method=method,
        n_steps=n_steps,
        target="first_derivative",
        base_steps=base_steps,
        scaling_factor=scaling_factor,
        lower_bounds=lower_bounds,
        upper_bounds=upper_bounds,
        step_ratio=step_ratio,
        min_steps=min_steps,
    )

    # generate parameter vectors at which func has to be evaluated as numpy arrays
    evaluation_points = []
    for step_arr in steps:
        for i, j in product(range(n_steps), range(len(x))):
            if np.isnan(step_arr[i, j]):
                evaluation_points.append(np.nan)
            else:
                point = x.copy()
                point[j] += step_arr[i, j]
                evaluation_points.append(point)

    # convert the numpy arrays to whatever is needed by func
    if not _is_fast_params:
        evaluation_points = [
            # entries are either a numpy.ndarray or np.nan
            _unflatten_if_not_nan(p, params_treedef, registry)
            for p in evaluation_points
        ]

    # we always evaluate f0, so we can fall back to one-sided derivatives if
    # two-sided derivatives fail. The extra cost is negligible in most cases.
    if f0 is None:
        evaluation_points.append(params)

    # do the function evaluations, including error handling
    batch_error_handling = "raise" if error_handling == "raise_strict" else "continue"
    raw_evals = _nan_skipping_batch_evaluator(
        func=partialed_func,
        arguments=evaluation_points,
        n_cores=n_cores,
        error_handling=batch_error_handling,
        batch_evaluator=batch_evaluator,
    )

    # extract information on exceptions that occurred during function evaluations
    exc_info = "\n\n".join([val for val in raw_evals if isinstance(val, str)])
    raw_evals = [
        val if not isinstance(val, str) else np.nan for val in raw_evals
    ]

    # store full function value at params as func_value and a processed version of it
    # that we need to calculate derivatives as f0
    if f0 is None:
        f0 = raw_evals[-1]
        raw_evals = raw_evals[:-1]
    func_value = f0

    use_key = key is not None and isinstance(f0, dict)
    f0_tree = f0[key] if use_key else f0
    scalar_out = np.isscalar(f0_tree)
    vector_out = isinstance(f0_tree, np.ndarray) and f0_tree.ndim == 1

    if scalar_out:
        f0 = np.array([f0_tree], dtype=float)
    elif vector_out:
        f0 = f0_tree.astype(float)
    else:
        f0 = tree_leaves(f0_tree, registry=registry)
        f0 = np.array(f0, dtype=np.float64)

    # convert the raw evaluations to numpy arrays
    raw_evals = _convert_evals_to_numpy(
        raw_evals=raw_evals,
        key=key,
        registry=registry,
        is_scalar_out=scalar_out,
        is_vector_out=vector_out,
    )

    # apply finite difference formulae
    evals = np.array(raw_evals).reshape(2, n_steps, len(x), -1)
    evals = np.transpose(evals, axes=(0, 1, 3, 2))
    evals = Evals(pos=evals[0], neg=evals[1])

    jac_candidates = {}
    for m in ["forward", "backward", "central"]:
        jac_candidates[m] = finite_differences.jacobian(evals, steps, f0, m)

    # get the best derivative estimate out of all derivative estimates that could be
    # calculated, given the function evaluations.
    orders = {
        "central": ["central", "forward", "backward"],
        "forward": ["forward", "backward"],
        "backward": ["backward", "forward"],
    }

    if n_steps == 1:
        jac = _consolidate_one_step_derivatives(jac_candidates, orders[method])
        updated_candidates = None
    else:
        richardson_candidates = _compute_richardson_candidates(
            jac_candidates, steps, n_steps)
        jac, updated_candidates = _consolidate_extrapolated(
            richardson_candidates)

    # raise error if necessary
    if error_handling in ("raise", "raise_strict") and np.isnan(jac).any():
        raise Exception(exc_info)

    # results processing
    if _is_fast_params and vector_out:
        derivative = jac
    elif _is_fast_params and scalar_out:
        derivative = jac.flatten()
    else:
        derivative = matrix_to_block_tree(jac, f0_tree, params)

    result = {"derivative": derivative}
    if return_func_value:
        result["func_value"] = func_value
    if return_info:
        info = _collect_additional_info(steps,
                                        evals,
                                        updated_candidates,
                                        target="first_derivative")
        result = {**result, **info}
    return result
コード例 #10
0
def block_tree_to_hessian(block_hessian, f_tree, params_tree):
    """Convert a block tree to a Hessian array.

    Remark: In comparison to Jax we need this formatting function because we calculate
    the second derivative using second-order finite differences. Jax computes the
    second derivative by applying their jacobian function twice, which produces the
    desired block-tree shape of the Hessian automatically. If we apply our first
    derivative function twice we get the same block-tree shape.

    Args:
        block_hessian: A (block) pytree, must match dimensions of f_tree and params_tree
        f_tree (pytree): The function evaluated at params_tree.
        params_tree (pytree): The params_tree.

    Returns:
        matrix (np.ndarray): 2d array containing information stored in block_tree.

    """
    flat_f = tree_leaves(f_tree)
    flat_p = tree_leaves(params_tree)
    flat_block_tree = tree_leaves(block_hessian)

    flat_f_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_f]
    flat_p_np = [_convert_to_numpy(leaf, only_pandas=True) for leaf in flat_p]

    size_f = [np.size(a) for a in flat_f_np]
    size_p = [np.size(a) for a in flat_p_np]

    n_blocks_f = len(size_f)
    n_blocks_p = len(size_p)

    outer_blocks = [
        flat_block_tree[(n_blocks_p**2) * i:(n_blocks_p**2) * (i + 1)]
        for i in range(n_blocks_f)
    ]

    inner_matrices = []
    for outer_block_dim, list_inner_blocks in zip(size_f, outer_blocks):

        block_rows_raw = [
            list_inner_blocks[n_blocks_p * i:n_blocks_p * (i + 1)]
            for i in range(n_blocks_p)
        ]
        block_rows = []
        for s1, row in zip(size_p, block_rows_raw):
            shapes = [(outer_block_dim, s1, s2) for s2 in size_p]
            row_np = [
                _convert_to_numpy(leaf, only_pandas=False) for leaf in row
            ]
            row_np_3d = [
                leaf[np.newaxis] if leaf.ndim < 3 else leaf for leaf in row_np
            ]
            row_reshaped = _reshape_list(row_np_3d, shapes)
            row_concatenated = np.concatenate(row_reshaped, axis=2)
            block_rows.append(row_concatenated)

        inner_matrix = np.concatenate(block_rows, axis=1)
        inner_matrices.append(inner_matrix)

    hessian = np.concatenate(inner_matrices, axis=0)
    _check_dimensions_hessian(hessian, f_tree, params_tree)
    return hessian