Пример #1
0
 def test_vector_vector(self):
     x0 = np.array([-100.0, 0.2])
     jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
                                    method='2-point')
     jac_diff_3 = approx_derivative(self.fun_vector_vector, x0)
     jac_true = self.jac_vector_vector(x0)
     assert_allclose(jac_diff_2, jac_true, rtol=1e-5)
     assert_allclose(jac_diff_3, jac_true, rtol=1e-6)
Пример #2
0
 def test_scalar_vector(self):
     x0 = 0.5
     jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
                                    method='2-point')
     jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0)
     jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
     assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
     assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
Пример #3
0
 def test_non_numpy(self):
     x0 = 1.0
     jac_true = self.jac_non_numpy(x0)
     jac_diff_2 = approx_derivative(self.jac_non_numpy, x0,
                                    method='2-point')
     jac_diff_3 = approx_derivative(self.jac_non_numpy, x0)
     assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
     assert_allclose(jac_diff_3, jac_true, rtol=1e-8)
Пример #4
0
 def test_scalar_scalar(self):
     x0 = 1.0
     jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
                                    method='2-point')
     jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0)
     jac_true = self.jac_scalar_scalar(x0)
     assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
     assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
Пример #5
0
 def test_equivalence(self):
     structure = np.ones((self.n, self.n), dtype=int)
     groups = np.arange(self.n)
     for method in ['2-point', '3-point', 'cs']:
         J_dense = approx_derivative(self.fun, self.x0, method=method)
         J_sparse = approx_derivative(
             self.fun, self.x0, sparsity=(structure, groups), method=method)
         assert_equal(J_dense, J_sparse.toarray())
Пример #6
0
 def test_custom_rel_step(self):
     x0 = np.array([-0.1, 0.1])
     jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
                                    method='2-point', rel_step=1e-4)
     jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
                                    rel_step=1e-4)
     jac_true = self.jac_vector_vector(x0)
     assert_allclose(jac_diff_2, jac_true, rtol=1e-2)
     assert_allclose(jac_diff_3, jac_true, rtol=1e-4)
Пример #7
0
 def test_vector_scalar(self):
     x0 = np.array([100.0, -0.5])
     jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
                                    method='2-point')
     jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0)
     jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
                                    method='cs')
     jac_true = self.jac_vector_scalar(x0)
     assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
     assert_allclose(jac_diff_3, jac_true, rtol=1e-7)
     assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
Пример #8
0
    def test_non_numpy(self):
        x0 = 1.0
        jac_true = self.jac_non_numpy(x0)
        jac_diff_2 = approx_derivative(self.jac_non_numpy, x0,
                                       method='2-point')
        jac_diff_3 = approx_derivative(self.jac_non_numpy, x0)
        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
        assert_allclose(jac_diff_3, jac_true, rtol=1e-8)

        # math.exp cannot handle complex arguments, hence this raises
        assert_raises(TypeError, approx_derivative, self.jac_non_numpy, x0,
                                       **dict(method='cs'))
Пример #9
0
    def test_non_numpy(self):
        x0 = 1.0
        jac_true = self.jac_non_numpy(x0)
        jac_diff_2 = approx_derivative(self.jac_non_numpy,
                                       x0,
                                       method='2-point')
        jac_diff_3 = approx_derivative(self.jac_non_numpy, x0)
        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
        assert_allclose(jac_diff_3, jac_true, rtol=1e-8)

        # math.exp cannot handle complex arguments, hence this raises
        assert_raises(TypeError, approx_derivative, self.jac_non_numpy, x0,
                      **dict(method='cs'))
Пример #10
0
 def test_equivalence(self):
     structure = np.ones((self.n, self.n), dtype=int)
     groups = np.arange(self.n)
     for method in ['2-point', '3-point', 'cs']:
         J_dense = approx_derivative(self.fun, self.x0, method=method)
         J_sparse = approx_derivative(self.fun,
                                      self.x0,
                                      sparsity=(structure, groups),
                                      method=method)
         assert_allclose(J_dense,
                         J_sparse.toarray(),
                         rtol=5e-16,
                         atol=7e-15)
Пример #11
0
 def cjac(x, *args):
     if jac in ['2-point', '3-point', 'cs']:
         return approx_derivative(fun,
                                  x,
                                  method=jac,
                                  args=args,
                                  rel_step=finite_diff_rel_step)
     else:
         return approx_derivative(fun,
                                  x,
                                  method='2-point',
                                  abs_step=epsilon,
                                  args=args)
Пример #12
0
 def test_scalar_scalar_abs_step(self):
     # can approx_derivative use abs_step?
     x0 = 1.0
     jac_diff_2 = approx_derivative(self.fun_scalar_scalar, x0,
                                    method='2-point', abs_step=1.49e-8)
     jac_diff_3 = approx_derivative(self.fun_scalar_scalar, x0,
                                    abs_step=1.49e-8)
     jac_diff_4 = approx_derivative(self.fun_scalar_scalar, x0,
                                    method='cs', abs_step=1.49e-8)
     jac_true = self.jac_scalar_scalar(x0)
     assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
     assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
     assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
Пример #13
0
 def test_vector_scalar_abs_step(self):
     # can approx_derivative use abs_step?
     x0 = np.array([100.0, -0.5])
     jac_diff_2 = approx_derivative(self.fun_vector_scalar, x0,
                                    method='2-point', abs_step=1.49e-8)
     jac_diff_3 = approx_derivative(self.fun_vector_scalar, x0,
                                    abs_step=1.49e-8, rel_step=np.inf)
     jac_diff_4 = approx_derivative(self.fun_vector_scalar, x0,
                                    method='cs', abs_step=1.49e-8)
     jac_true = self.jac_vector_scalar(x0)
     assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
     assert_allclose(jac_diff_3, jac_true, rtol=3e-9)
     assert_allclose(jac_diff_4, jac_true, rtol=1e-12)
Пример #14
0
    def _covar_from_residuals(self):
        _pvals = np.array(self.varying_parameters())

        used_residuals_scaler = False

        def fn_scaler(vals):
            return np.squeeze(self.residuals(_pvals * vals))

        try:
            # we should be able to calculate a Jacobian for a parameter whose
            # value is zero. However, the scaling approach won't work.
            # This will force Jacobian calculation by unscaled parameters
            if np.any(_pvals == 0):
                raise FloatingPointError()

            with np.errstate(invalid="raise"):
                jac = approx_derivative(fn_scaler, np.ones_like(_pvals))
            used_residuals_scaler = True
        except FloatingPointError:
            jac = approx_derivative(self.residuals, _pvals)
        finally:
            # using approx_derivative changes the state of the objective
            # parameters have to make sure they're set at the end
            self.setp(_pvals)

        # need to create this because GlobalObjective may not have
        # access to all the datapoints being fitted.
        n_datapoints = np.size(jac, 0)

        # covar = J.T x J

        # from scipy.optimize.minpack.py
        # eliminates singular parameters
        _, s, VT = np.linalg.svd(jac, full_matrices=False)
        threshold = np.finfo(float).eps * max(jac.shape) * s[0]
        s = s[s > threshold]
        VT = VT[:s.size]
        covar = np.dot(VT.T / s**2, VT)

        if used_residuals_scaler:
            # unwind the scaling.
            covar = covar * np.atleast_2d(_pvals) * np.atleast_2d(_pvals).T

        scale = 1.0
        # scale by reduced chi2 if experimental uncertainties weren't used.
        if not (self.weighted):
            scale = self.chisqr() / (n_datapoints -
                                     len(self.varying_parameters()))

        return covar * scale
Пример #15
0
def approx_jacobian(x, func, epsilon, *args):
    """
    Approximate the Jacobian matrix of a callable function.

    Parameters
    ----------
    x : array_like
        The state vector at which to compute the Jacobian matrix.
    func : callable f(x,*args)
        The vector-valued function.
    epsilon : float
        The perturbation used to determine the partial derivatives.
    args : sequence
        Additional arguments passed to func.

    Returns
    -------
    An array of dimensions ``(lenf, lenx)`` where ``lenf`` is the length
    of the outputs of `func`, and ``lenx`` is the number of elements in
    `x`.

    Notes
    -----
    The approximation is done using forward differences.

    """
    # approx_derivative returns (m, n) == (lenf, lenx)
    jac = approx_derivative(func,
                            x,
                            method='2-point',
                            abs_step=epsilon,
                            args=args)
    # if func returns a scalar jac.shape will be (lenx,). Make sure
    # it's at least a 2D array.
    return np.atleast_2d(jac)
Пример #16
0
    def __jacobian(self, param, *args, bounds=None, segments="all", **_):
        """
        Approximate the jacobian numerically
        The calculation is the same as "3-point"
        but we can tell residuals that we are within a jacobian
        """
        self.progressbar_jacobian.reset()
        g = approx_derivative(
            self.__residuals,
            param,
            method="3-point",
            # This feels pretty bad, passing the latest synthetic spectrum
            # by reference as a parameter of the residuals function object
            f0=self._latest_residual,
            bounds=bounds,
            args=args,
            kwargs={
                "isJacobian": True,
                "segments": segments
            },
        )

        if not np.all(np.isfinite(g)):
            g[~np.isfinite(g)] = 0
            logger.warning(
                "Some derivatives are non-finite, setting them to zero. "
                "Final uncertainties will be inaccurate. "
                "You might be running into the boundary of the grid")

        self._last_jac = np.copy(g)

        return g
Пример #17
0
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
    n = x0.size

    if diff_step is None:
        epsfcn = EPS
    else:
        epsfcn = diff_step**2

    # Compute MINPACK's `diag`, which is inverse of our `x_scale` and
    # ``x_scale='jac'`` corresponds to ``diag=None``.
    if isinstance(x_scale, string_types) and x_scale == 'jac':
        diag = None
    else:
        diag = 1 / x_scale

    full_output = True
    col_deriv = False
    factor = 100.0

    if jac is None:
        if max_nfev is None:
            # n squared to account for Jacobian evaluations.
            max_nfev = 100 * n * (n + 1)
        x, info, status = _minpack._lmdif(fun, x0, (), full_output, ftol, xtol,
                                          gtol, max_nfev, epsfcn, factor, diag)
    else:
        if max_nfev is None:
            max_nfev = 100 * n
        x, info, status = _minpack._lmder(fun, jac, x0, (), full_output,
                                          col_deriv, ftol, xtol, gtol,
                                          max_nfev, factor, diag)

    f = info['fvec']

    if callable(jac):
        J = jac(x)
    else:
        J = np.atleast_2d(approx_derivative(fun, x))

    cost = 0.5 * np.dot(f, f)
    g = J.T.dot(f)
    g_norm = norm(g, ord=np.inf)

    nfev = info['nfev']
    njev = info.get('njev', None)

    status = FROM_MINPACK_TO_COMMON[status]
    active_mask = np.zeros_like(x0, dtype=int)

    return OptimizeResult(x=x,
                          cost=cost,
                          fun=f,
                          jac=J,
                          grad=g,
                          optimality=g_norm,
                          active_mask=active_mask,
                          nfev=nfev,
                          njev=njev,
                          status=status)
Пример #18
0
 def test_options(self):
     x0 = np.array([1.0, 1.0])
     c0 = -1.0
     c1 = 1.0
     lb = 0.0
     ub = 2.0
     f0 = self.fun_parametrized(x0, c0, c1=c1)
     rel_step = np.array([-1e-6, 1e-7])
     jac_true = self.jac_parametrized(x0, c0, c1)
     jac_diff_2 = approx_derivative(
         self.fun_parametrized, x0, method='2-point', rel_step=rel_step,
         f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
     jac_diff_3 = approx_derivative(
         self.fun_parametrized, x0, rel_step=rel_step,
         f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
     assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
     assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
Пример #19
0
 def test_vector_vector(self):
     x0 = np.array([-100.0, 0.2])
     jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
                                    method='2-point',
                                    as_linear_operator=True)
     jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
                                    as_linear_operator=True)
     jac_diff_4 = approx_derivative(self.fun_vector_vector, x0,
                                    method='cs',
                                    as_linear_operator=True)
     jac_true = self.jac_vector_vector(x0)
     np.random.seed(1)
     for i in range(10):
         p = np.random.uniform(-10, 10, size=x0.shape)
         assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5)
         assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=1e-6)
         assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=1e-7)
Пример #20
0
 def test_options(self):
     x0 = np.array([1.0, 1.0])
     c0 = -1.0
     c1 = 1.0
     lb = 0.0
     ub = 2.0
     f0 = self.fun_parametrized(x0, c0, c1=c1)
     rel_step = np.array([-1e-6, 1e-7])
     jac_true = self.jac_parametrized(x0, c0, c1)
     jac_diff_2 = approx_derivative(
         self.fun_parametrized, x0, method='2-point', rel_step=rel_step,
         f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
     jac_diff_3 = approx_derivative(
         self.fun_parametrized, x0, rel_step=rel_step,
         f0=f0, args=(c0,), kwargs=dict(c1=c1), bounds=(lb, ub))
     assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
     assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
Пример #21
0
 def jac_wrapped(x, f):
     J = approx_derivative(
         fun, x, rel_step=diff_step, method=jac, f0=f,
         bounds=bounds, args=args, kwargs=kwargs)
     J = np.atleast_2d(J)
     if J.ndim > 2:
         raise RuntimeError("`jac` must return at most 2-d array_like.")
     return J
Пример #22
0
            def jac_wrapped(x, f):
                J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
                                      f0=f, bounds=bounds, args=args,
                                      kwargs=kwargs, sparsity=jac_sparsity)
                if J.ndim != 2:  # J is guaranteed not sparse.
                    J = np.atleast_2d(J)

                return J
Пример #23
0
 def test_vector_vector(self):
     x0 = np.array([-100.0, 0.2])
     jac_diff_2 = approx_derivative(self.fun_vector_vector, x0,
                                    method='2-point',
                                    as_linear_operator=True)
     jac_diff_3 = approx_derivative(self.fun_vector_vector, x0,
                                    as_linear_operator=True)
     jac_diff_4 = approx_derivative(self.fun_vector_vector, x0,
                                    method='cs',
                                    as_linear_operator=True)
     jac_true = self.jac_vector_vector(x0)
     np.random.seed(1)
     for i in range(10):
         p = np.random.uniform(-10, 10, size=x0.shape)
         assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5)
         assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=1e-6)
         assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=1e-7)
            def jac_wrapped(x, f):
                J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
                                      f0=f, bounds=bounds, args=args,
                                      kwargs=kwargs, sparsity=jac_sparsity)
                if J.ndim != 2:  # J is guaranteed not sparse.
                    J = np.atleast_2d(J)

                return J
Пример #25
0
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, scaling, diff_step):
    n = x0.size

    if diff_step is None:
        epsfcn = EPS
    else:
        epsfcn = diff_step ** 2

    if isinstance(scaling, string_types) and scaling == "jac":
        scaling = None

    full_output = True
    col_deriv = False
    factor = 100.0

    if jac is None:
        if max_nfev is None:
            # n squared to account for Jacobian evaluations.
            max_nfev = 100 * n * (n + 1)
        x, info, status = _minpack._lmdif(fun, x0, (), full_output, ftol, xtol, gtol, max_nfev, epsfcn, factor, scaling)
    else:
        if max_nfev is None:
            max_nfev = 100 * n
        x, info, status = _minpack._lmder(
            fun, jac, x0, (), full_output, col_deriv, ftol, xtol, gtol, max_nfev, factor, scaling
        )

    f = info["fvec"]

    if callable(jac):
        J = jac(x)
    else:
        J = np.atleast_2d(approx_derivative(fun, x))

    cost = 0.5 * np.dot(f, f)
    g = J.T.dot(f)
    g_norm = norm(g, ord=np.inf)

    nfev = info["nfev"]
    njev = info.get("njev", None)

    status = FROM_MINPACK_TO_COMMON[status]
    active_mask = np.zeros_like(x0, dtype=int)

    return OptimizeResult(
        x=x,
        cost=cost,
        fun=f,
        jac=J,
        grad=g,
        optimality=g_norm,
        active_mask=active_mask,
        nfev=nfev,
        njev=njev,
        status=status,
    )
Пример #26
0
def test_first_derivative_jacobian_richardson(example_function_jacobian_fixtures):
    f = example_function_jacobian_fixtures["func"]
    fprime = example_function_jacobian_fixtures["func_prime"]

    true_fprime = fprime(np.ones(3))
    scipy_fprime = approx_derivative(f, np.ones(3))
    our_fprime = first_derivative(f, np.ones(3), n_steps=3, method="central", n_cores=1)

    aaae(scipy_fprime, our_fprime)
    aaae(true_fprime, our_fprime)
Пример #27
0
    def test_tight_bounds(self):
        x0 = np.array([10.0, 10.0])
        lb = x0 - 3e-9
        ub = x0 + 2e-9
        jac_true = self.jac_vector_vector(x0)
        jac_diff = approx_derivative(
            self.fun_vector_vector, x0, method='2-point', bounds=(lb, ub))
        assert_allclose(jac_diff, jac_true, rtol=1e-6)
        jac_diff = approx_derivative(
            self.fun_vector_vector, x0, method='2-point',
            rel_step=1e-6, bounds=(lb, ub))
        assert_allclose(jac_diff, jac_true, rtol=1e-6)

        jac_diff = approx_derivative(
            self.fun_vector_vector, x0, bounds=(lb, ub))
        assert_allclose(jac_diff, jac_true, rtol=1e-6)
        jac_diff = approx_derivative(
            self.fun_vector_vector, x0, rel_step=1e-6, bounds=(lb, ub))
        assert_allclose(jac_true, jac_diff, rtol=1e-6)
Пример #28
0
    def test_tight_bounds(self):
        x0 = np.array([10.0, 10.0])
        lb = x0 - 3e-9
        ub = x0 + 2e-9
        jac_true = self.jac_vector_vector(x0)
        jac_diff = approx_derivative(
            self.fun_vector_vector, x0, method='2-point', bounds=(lb, ub))
        assert_allclose(jac_diff, jac_true, rtol=1e-6)
        jac_diff = approx_derivative(
            self.fun_vector_vector, x0, method='2-point',
            rel_step=1e-6, bounds=(lb, ub))
        assert_allclose(jac_diff, jac_true, rtol=1e-6)

        jac_diff = approx_derivative(
            self.fun_vector_vector, x0, bounds=(lb, ub))
        assert_allclose(jac_diff, jac_true, rtol=1e-6)
        jac_diff = approx_derivative(
            self.fun_vector_vector, x0, rel_step=1e-6, bounds=(lb, ub))
        assert_allclose(jac_true, jac_diff, rtol=1e-6)
Пример #29
0
    def perform(self, node, inputs, outputs):
        (theta, ) = inputs

        # define version of likelihood function to pass to derivative function
        def logl(values):
            return self.likelihood(values)

        # calculate gradients
        grads = approx_derivative(logl, theta, method="2-point")

        outputs[0][0] = grads
Пример #30
0
 def test_scalar_vector(self):
     x0 = 0.5
     jac_diff_2 = approx_derivative(self.fun_scalar_vector, x0,
                                    method='2-point',
                                    as_linear_operator=True)
     jac_diff_3 = approx_derivative(self.fun_scalar_vector, x0,
                                    as_linear_operator=True)
     jac_diff_4 = approx_derivative(self.fun_scalar_vector, x0,
                                    method='cs',
                                    as_linear_operator=True)
     jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
     np.random.seed(1)
     for i in range(10):
         p = np.random.uniform(-10, 10, size=(1,))
         assert_allclose(jac_diff_2.dot(p), jac_true.dot(p),
                         rtol=1e-5)
         assert_allclose(jac_diff_3.dot(p), jac_true.dot(p),
                         rtol=5e-6)
         assert_allclose(jac_diff_4.dot(p), jac_true.dot(p),
                         rtol=5e-6)
Пример #31
0
    def test_all(self):
        A = self.structure(self.n)
        order = np.arange(self.n)
        groups_1 = group_columns(A, order)
        np.random.shuffle(order)
        groups_2 = group_columns(A, order)

        for method, groups, l, u in product(
                ['2-point', '3-point', 'cs'], [groups_1, groups_2],
                [-np.inf, self.lb], [np.inf, self.ub]):
            J = approx_derivative(self.fun, self.x0, method=method,
                                  bounds=(l, u), sparsity=(A, groups))
            assert_(isinstance(J, csr_matrix))
            assert_allclose(J.toarray(), self.J_true, rtol=1e-6)

            rel_step = 1e-8 * np.ones_like(self.x0)
            rel_step[::2] *= -1
            J = approx_derivative(self.fun, self.x0, method=method,
                                  rel_step=rel_step, sparsity=(A, groups))
            assert_allclose(J.toarray(), self.J_true, rtol=1e-5)
Пример #32
0
 def test_scalar_vector(self):
     x0 = 0.5
     jac_diff_2 = approx_derivative(self.fun_scalar_vector,
                                    x0,
                                    method='2-point',
                                    as_linear_operator=True)
     jac_diff_3 = approx_derivative(self.fun_scalar_vector,
                                    x0,
                                    as_linear_operator=True)
     jac_diff_4 = approx_derivative(self.fun_scalar_vector,
                                    x0,
                                    method='cs',
                                    as_linear_operator=True)
     jac_true = self.jac_scalar_vector(np.atleast_1d(x0))
     np.random.seed(1)
     for i in range(10):
         p = np.random.uniform(-10, 10, size=(1, ))
         assert_allclose(jac_diff_2.dot(p), jac_true.dot(p), rtol=1e-5)
         assert_allclose(jac_diff_3.dot(p), jac_true.dot(p), rtol=5e-6)
         assert_allclose(jac_diff_4.dot(p), jac_true.dot(p), rtol=5e-6)
Пример #33
0
    def test_all(self):
        A = self.structure(self.n)
        order = np.arange(self.n)
        groups_1 = group_columns(A, order)
        np.random.shuffle(order)
        groups_2 = group_columns(A, order)

        for method, groups, l, u in product(
                ['2-point', '3-point', 'cs'], [groups_1, groups_2],
                [-np.inf, self.lb], [np.inf, self.ub]):
            J = approx_derivative(self.fun, self.x0, method=method,
                                  bounds=(l, u), sparsity=(A, groups))
            assert_(isinstance(J, csr_matrix))
            assert_allclose(J.toarray(), self.J_true, rtol=1e-6)

            rel_step = 1e-8 * np.ones_like(self.x0)
            rel_step[::2] *= -1
            J = approx_derivative(self.fun, self.x0, method=method,
                                  rel_step=rel_step, sparsity=(A, groups))
            assert_allclose(J.toarray(), self.J_true, rtol=1e-5)
Пример #34
0
    def test_with_bounds_3_point(self):
        lb = np.array([1.0, 1.0])
        ub = np.array([2.0, 2.0])

        x0 = np.array([1.0, 2.0])
        jac_true = self.jac_vector_vector(x0)

        jac_diff = approx_derivative(self.fun_vector_vector, x0)
        assert_allclose(jac_diff, jac_true, rtol=1e-9)

        jac_diff = approx_derivative(self.fun_vector_vector, x0,
                                     bounds=(lb, np.inf))
        assert_allclose(jac_diff, jac_true, rtol=1e-9)

        jac_diff = approx_derivative(self.fun_vector_vector, x0,
                                     bounds=(-np.inf, ub))
        assert_allclose(jac_diff, jac_true, rtol=1e-9)

        jac_diff = approx_derivative(self.fun_vector_vector, x0,
                                     bounds=(lb, ub))
        assert_allclose(jac_diff, jac_true, rtol=1e-9)
Пример #35
0
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
    n = x0.size

    if diff_step is None:
        epsfcn = EPS
    else:
        epsfcn = diff_step**2

    # Compute MINPACK's `diag`, which is inverse of our `x_scale` and
    # ``x_scale='jac'`` corresponds to ``diag=None``.
    if isinstance(x_scale, string_types) and x_scale == 'jac':
        diag = None
    else:
        diag = 1 / x_scale

    full_output = True
    col_deriv = False
    factor = 100.0

    if jac is None:
        if max_nfev is None:
            # n squared to account for Jacobian evaluations.
            max_nfev = 100 * n * (n + 1)
        x, info, status = _minpack._lmdif(
            fun, x0, (), full_output, ftol, xtol, gtol,
            max_nfev, epsfcn, factor, diag)
    else:
        if max_nfev is None:
            max_nfev = 100 * n
        x, info, status = _minpack._lmder(
            fun, jac, x0, (), full_output, col_deriv,
            ftol, xtol, gtol, max_nfev, factor, diag)

    f = info['fvec']

    if callable(jac):
        J = jac(x)
    else:
        J = np.atleast_2d(approx_derivative(fun, x))

    cost = 0.5 * np.dot(f, f)
    g = J.T.dot(f)
    g_norm = norm(g, ord=np.inf)

    nfev = info['nfev']
    njev = info.get('njev', None)

    status = FROM_MINPACK_TO_COMMON[status]
    active_mask = np.zeros_like(x0, dtype=int)

    return OptimizeResult(
        x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
        active_mask=active_mask, nfev=nfev, njev=njev, status=status)
Пример #36
0
    def test_with_bounds_3_point(self):
        lb = np.array([1.0, 1.0])
        ub = np.array([2.0, 2.0])

        x0 = np.array([1.0, 2.0])
        jac_true = self.jac_vector_vector(x0)

        jac_diff = approx_derivative(self.fun_vector_vector, x0)
        assert_allclose(jac_diff, jac_true, rtol=1e-9)

        jac_diff = approx_derivative(self.fun_vector_vector, x0,
                                     bounds=(lb, np.inf))
        assert_allclose(jac_diff, jac_true, rtol=1e-9)

        jac_diff = approx_derivative(self.fun_vector_vector, x0,
                                     bounds=(-np.inf, ub))
        assert_allclose(jac_diff, jac_true, rtol=1e-9)

        jac_diff = approx_derivative(self.fun_vector_vector, x0,
                                     bounds=(lb, ub))
        assert_allclose(jac_diff, jac_true, rtol=1e-9)
Пример #37
0
    def test_with_bounds_2_point(self):
        lb = -np.ones(2)
        ub = np.ones(2)

        x0 = np.array([-2.0, 0.2])
        assert_raises(ValueError, approx_derivative,
                      self.fun_vector_vector, x0, bounds=(lb, ub))

        x0 = np.array([-1.0, 1.0])
        jac_diff = approx_derivative(self.fun_vector_vector, x0,
                                     method='2-point', bounds=(lb, ub))
        jac_true = self.jac_vector_vector(x0)
        assert_allclose(jac_diff, jac_true, rtol=1e-6)
Пример #38
0
    def _jac_chi_squared(self, p, data=None):
        """
        Utility function to get jacobian for problem.eval_r_norm

        :param p: parameters
        :type p: list
        :param data: x data, this is discarded as the defaults can be used.
        :type data: N/A
        :return: jacobian approximation for problem.eval_r_norm
        :rtype: numpy array
        """
        j = approx_derivative(self.problem.eval_r_norm, p)
        return j
Пример #39
0
    def test_with_bounds_2_point(self):
        lb = -np.ones(2)
        ub = np.ones(2)

        x0 = np.array([-2.0, 0.2])
        assert_raises(ValueError, approx_derivative,
                      self.fun_vector_vector, x0, bounds=(lb, ub))

        x0 = np.array([-1.0, 1.0])
        jac_diff = approx_derivative(self.fun_vector_vector, x0,
                                     method='2-point', bounds=(lb, ub))
        jac_true = self.jac_vector_vector(x0)
        assert_allclose(jac_diff, jac_true, rtol=1e-6)
Пример #40
0
    def test_bound_switches(self):
        lb = -1e-8
        ub = 1e-8
        x0 = 0.0
        jac_true = self.jac_with_nan(x0)
        jac_diff_2 = approx_derivative(
            self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
            bounds=(lb, ub))
        jac_diff_3 = approx_derivative(
            self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
        assert_allclose(jac_diff_3, jac_true, rtol=1e-9)

        x0 = 1e-8
        jac_true = self.jac_with_nan(x0)
        jac_diff_2 = approx_derivative(
            self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
            bounds=(lb, ub))
        jac_diff_3 = approx_derivative(
            self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
        assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
Пример #41
0
    def test_bound_switches(self):
        lb = -1e-8
        ub = 1e-8
        x0 = 0.0
        jac_true = self.jac_with_nan(x0)
        jac_diff_2 = approx_derivative(
            self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
            bounds=(lb, ub))
        jac_diff_3 = approx_derivative(
            self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
        assert_allclose(jac_diff_3, jac_true, rtol=1e-9)

        x0 = 1e-8
        jac_true = self.jac_with_nan(x0)
        jac_diff_2 = approx_derivative(
            self.fun_with_nan, x0, method='2-point', rel_step=1e-6,
            bounds=(lb, ub))
        jac_diff_3 = approx_derivative(
            self.fun_with_nan, x0, rel_step=1e-6, bounds=(lb, ub))
        assert_allclose(jac_diff_2, jac_true, rtol=1e-6)
        assert_allclose(jac_diff_3, jac_true, rtol=1e-9)
Пример #42
0
def droot_dpolynomial_fd(p, x, rel_step=None):
    '''
    finite differences version for testing purposes
    '''
    def nearest_root(p):
        rts = np.roots(p)
        if np.imag(x) == 0:
            # keep root real if it was previously so (good idea?)
            rts = np.real(rts[(np.imag(rts) == 0)])
        if rts.size == 0:
            return np.nan
        return rts[np.argmin(np.abs(rts - x))]

    return approx_derivative(nearest_root, p, rel_step=rel_step)
def test_absolute_step():
    # test for gh12487
    # if an absolute step is specified for 2-point differences make sure that
    # the side corresponds to the step. i.e. if step is positive then forward
    # differences should be used, if step is negative then backwards
    # differences should be used.

    # function has double discontinuity at x = [-1, -1]
    # first component is \/, second component is /\
    def f(x):
        return -np.abs(x[0] + 1) + np.abs(x[1] + 1)

    # check that the forward difference is used
    grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=1e-8)
    assert_allclose(grad, [-1.0, 1.0])

    # check that the backwards difference is used
    grad = approx_derivative(f, [-1, -1], method='2-point', abs_step=-1e-8)
    assert_allclose(grad, [1.0, -1.0])

    # check that the forwards difference is used with a step for both
    # parameters
    grad = approx_derivative(f, [-1, -1],
                             method='2-point',
                             abs_step=[1e-8, 1e-8])
    assert_allclose(grad, [-1.0, 1.0])

    # check that we can mix forward/backwards steps.
    grad = approx_derivative(f, [-1, -1],
                             method='2-point',
                             abs_step=[1e-8, -1e-8])
    assert_allclose(grad, [-1.0, -1.0])
    grad = approx_derivative(f, [-1, -1],
                             method='2-point',
                             abs_step=[-1e-8, 1e-8])
    assert_allclose(grad, [1.0, 1.0])

    # the forward step should reverse to a backwards step if it runs into a
    # bound
    # This is kind of tested in TestAdjustSchemeToBounds, but only for a lower level
    # function.
    grad = approx_derivative(f, [-1, -1],
                             method='2-point',
                             abs_step=1e-8,
                             bounds=(-np.inf, -1))
    assert_allclose(grad, [1.0, -1.0])

    grad = approx_derivative(f, [-1, -1],
                             method='2-point',
                             abs_step=-1e-8,
                             bounds=(-1, np.inf))
    assert_allclose(grad, [-1.0, 1.0])
Пример #44
0
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, scaling, diff_step):
    n = x0.size

    if diff_step is None:
        epsfcn = EPS
    else:
        epsfcn = diff_step**2

    if scaling == 'jac':
        scaling = None

    full_output = True
    col_deriv = False
    factor = 100.0

    if jac is None:
        if max_nfev is None:
            # n squared to account for Jacobian evaluations.
            max_nfev = 100 * n * (n + 1)
        x, info, status = _minpack._lmdif(
            fun, x0, (), full_output, ftol, xtol, gtol,
            max_nfev, epsfcn, factor, scaling)
    else:
        if max_nfev is None:
            max_nfev = 100 * n
        x, info, status = _minpack._lmder(
            fun, jac, x0, (), full_output, col_deriv,
            ftol, xtol, gtol, max_nfev, factor, scaling)

    f = info['fvec']

    if callable(jac):
        J = jac(x)
    else:
        J = np.atleast_2d(approx_derivative(fun, x))

    cost = 0.5 * np.dot(f, f)
    g = J.T.dot(f)
    g_norm = norm(g, ord=np.inf)

    nfev = info['nfev']
    njev = info.get('njev', None)

    status = FROM_MINPACK_TO_COMMON[status]
    active_mask = np.zeros_like(x0, dtype=int)

    return OptimizeResult(
        x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
        active_mask=active_mask, nfev=nfev, njev=njev, status=status)
Пример #45
0
    def _jacobian(
        self,
        param,
        *args,
        bounds=None,
        segments="all",
        step_sizes=None,
        method="2-point",
        **_,
    ):
        """
        Approximate the jacobian numerically
        The calculation is the same as "2-point"
        but we can tell residuals that we are within a jacobian.

        Note that when we reuse the wavelength grid, the results are
        slightly different for reasons(?). Therefore the step size should
        be larger than those differences, which is why we specify the
        step size for each parameter.
        """
        self.progressbar_jacobian.reset()

        # Here we replace the scipy version of approx_derivative with our own
        # The only difference being that we use Multiprocessing for the jacobian
        g = approx_derivative(
            self._residuals,
            param,
            method=method,
            # This feels pretty bad, passing the latest synthetic spectrum
            # by reference as a parameter of the residuals function object
            f0=self._latest_residual,
            abs_step=step_sizes,
            bounds=bounds,
            args=args,
            kwargs={
                "isJacobian": True,
                "segments": segments
            },
        )

        if not np.all(np.isfinite(g)):
            g[~np.isfinite(g)] = 0
            logger.warning(
                "Some derivatives are non-finite, setting them to zero. "
                "Final uncertainties will be inaccurate. "
                "You might be running into the boundary of the grid")
        self._latest_jacobian = np.copy(g)
        return g
Пример #46
0
    def __call__(self, x, *args, **kwds):
        x = np.atleast_1d(x)
        method = dict(complex='cs',
                      central='3-point',
                      forward='2-point',
                      backward='2-point')[self.method]
        options = dict(method=method,
                       rel_step=self.step,
                       args=args,
                       kwargs=kwds,
                       bounds=self.bounds,
                       sparsity=self.sparsity)

        grad = approx_derivative(self.fun, x, **options)

        return grad
Пример #47
0
    def eval_j(self, params, func=None, **kwargs):
        """
        Approximate the Jacobian using scipy for a given function at a given
        point.

        :param params: The parameter values to find the Jacobian at
        :type params: list
        :param func: Function to find the Jacobian for, defaults to self.eval_r
        :type func: Callable, optional

        :return: Approximation of the Jacobian
        :rtype: numpy array
        """
        if func is None:
            func = self.eval_r

        return approx_derivative(func, params, kwargs=kwargs)
Пример #48
0
def jacobian(param, *args, bounds=None, segments="all", **_):
    """
    Approximate the jacobian numerically
    The calculation is the same as "3-point"
    but we can tell residuals that we are within a jacobian
    """
    return approx_derivative(
        residuals,
        param,
        method="3-point",
        # This feels pretty bad, passing the latest synthetic spectrum
        # by reference as a parameter of the residuals function object
        f0=residuals.resid,
        bounds=bounds,
        args=args,
        kwargs={"isJacobian": True, "segments": segments},
    )
Пример #49
0
def call_leastsq(fun, x0, jac, ftol, xtol, gtol, max_nfev, scaling,
                 diff_step, args, options):
    if jac == '3-point':
        warn("jac='3-point' works equivalently to '2-point' "
             "for 'lm' method.")

    if jac in ['2-point', '3-point']:
        jac = None

    if max_nfev is None:
        max_nfev = 0

    if diff_step is None:
        epsfcn = None
    else:
        epsfcn = diff_step**2

    if scaling == 'jac':
        scaling = None

    x, cov_x, info, message, status = leastsq(
        fun, x0, args=args, Dfun=jac, full_output=True, ftol=ftol, xtol=xtol,
        gtol=gtol, maxfev=max_nfev, epsfcn=epsfcn, diag=scaling, **options)

    f = info['fvec']

    if callable(jac):
        J = jac(x, *args)
    else:
        J = approx_derivative(fun, x, args=args)
    J = np.atleast_2d(J)

    obj_value = np.dot(f, f)
    g = J.T.dot(f)
    g_norm = norm(g, ord=np.inf)

    nfev = info['nfev']
    njev = info.get('njev', None)

    status = FROM_MINPACK_TO_COMMON[status]
    active_mask = np.zeros_like(x0, dtype=int)

    return OptimizeResult(
        x=x, fun=f, jac=J, obj_value=obj_value, optimality=g_norm,
        active_mask=active_mask, nfev=nfev, njev=njev, status=status,
        message=message, success=status > 0, x_covariance=cov_x)
Пример #50
0
 def test_no_precomputed_groups(self):
     A = self.structure(self.n)
     J = approx_derivative(self.fun, self.x0, sparsity=A)
     assert_allclose(J.toarray(), self.J_true, rtol=1e-6)