Example #1
0
    >>> c, info = ndf.derivative(fun, z0=0, n=6, full_output=True)
    >>> np.allclose(c, [1, 1, 2, 6, 24, 120, 720, 5040])
    True
    >>> np.all(info.error_estimate < 1e-9*c.real)
    True
    >>> (info.function_count, info.iterations, info.failed) == (144, 18, False)
    True


    References
    ----------
    [1] Fornberg, B. (1981).
        Numerical Differentiation of Analytic Functions.
        ACM Transactions on Mathematical Software (TOMS),
        7(4), 512-526. http://doi.org/10.1145/355972.355979
    """
    result = taylor(fun, z0, n=n, **kwds)
    # convert taylor series --> actual derivatives.
    m = _num_taylor_coefficients(n)
    fact = factorial(np.arange(m))
    if kwds.get('full_output'):
        coefs, info_ = result
        info = _INFO(info_.error_estimate * fact, *info_[1:])
        return coefs * fact, info
    return result * fact


if __name__ == '__main__':
    from numdifftools.testing import test_docstrings
    test_docstrings()
Example #2
0
    True
    '''
    kwargs = {} if kwargs is None else kwargs
    n = len(x)
    # TODO:  add scaled stepsize
    f0 = f(*(x,) + args, **kwargs)
    dim = np.atleast_1d(f0).shape  # it could be a scalar
    grad = np.zeros((n,) + dim, float)
    ei = np.zeros(np.shape(x), float)
    if not centered:
        epsilon = _get_epsilon(x, 2, epsilon, n)
        for k in range(n):
            ei[k] = epsilon[k]
            grad[k, :] = (f(*(x + ei,) + args, **kwargs) - f0) / epsilon[k]
            ei[k] = 0.0
    else:
        epsilon = _get_epsilon(x, 3, epsilon, n) / 2.
        for k in range(n):
            ei[k] = epsilon[k]
            grad[k, :] = (f(*(x + ei,) + args, **kwargs) -
                          f(*(x - ei,) + args, **kwargs)) / (2 * epsilon[k])
            ei[k] = 0.0
    grad = grad.squeeze()
    axes = [0, 1, 2][:grad.ndim]
    axes[:2] = axes[1::-1]
    return np.transpose(grad, axes=axes).squeeze()

if __name__ == '__main__':
    from numdifftools.testing import test_docstrings
    test_docstrings()
Example #3
0
    >>> z = lambda xy: sin(xy[0]-xy[1]) + xy[1]*exp(xy[0])
    >>> dz = nd.Gradient(z)
    >>> grad2 = dz([1, 1])
    >>> np.allclose(grad2, [ 3.71828183,  1.71828183])
    True

    # At the global minimizer (1,1) of the Rosenbrock function,
    # compute the gradient. It should be essentially zero.

    >>> rosen = lambda x : (1-x[0])**2 + 105.*(x[1]-x[0]**2)**2
    >>> rd = nd.Gradient(rosen)
    >>> grad3 = rd([1,1])
    >>> np.allclose(grad3,[0, 0])
    True

    See also
    --------
    Hessian, Jacobian
    """
    def __call__(self, x, *args, **kwds):
        return super(Gradient,
                     self).__call__(np.atleast_1d(x).ravel(), *args,
                                    **kwds).squeeze()


if __name__ == '__main__':
    from numdifftools.testing import test_docstrings
    test_docstrings(__file__)
#     print(np.log(_EPS)/np.log(1e-6))
#     print(_EPS**(1./2.5))
Example #4
0
    >>> def h(z): return 1.0/np.sin(z)**2
    >>> res_h, info = Residue(h, full_output=True, pole_order=2)([0, np.pi])
    >>> np.allclose(res_h, 1)
    True
    >>> (info.error_estimate < 1e-10).all()
    True

    """

    def __init__(self, f, step=None, method="above", order=None, pole_order=1, full_output=False, **options):
        if order is None:
            # MethodOrder will always = pole_order + 2
            order = pole_order + 2

        _assert(pole_order < order, "order must be at least pole_order+1.")
        self.pole_order = pole_order

        super(Residue, self).__init__(f, step=step, method=method, order=order, full_output=full_output, **options)

    def _fun(self, z, dz, *args, **kwds):
        return self.fun(z + dz, *args, **kwds) * (dz ** self.pole_order)

    def __call__(self, x, *args, **kwds):
        return self.limit(x, *args, **kwds)


if __name__ == "__main__":
    from numdifftools.testing import test_docstrings

    test_docstrings(__file__)