Exemple #1
0
    def transform_nodes(self, nodes, a, b, verbose=False):
        r"""
        Rescale standardized nodes (for `[-1, 1]`) to a general
        interval `[a, b]`. For a finite interval, a simple linear
        change of variables is used. Otherwise, the following
        transformations are used:

        .. math ::

            [a, \infty] : t = \frac{1}{x} + (a-1)

            [-\infty, b] : t = (b+1) - \frac{1}{x}

            [-\infty, \infty] : t = \frac{x}{\sqrt{1-x^2}}

        """
        a = mpmathify(a)
        b = mpmathify(b)
        one = mpf(1)
        if (a, b) == (-one, one):
            return nodes
        half = mpf(0.5)
        new_nodes = []
        if (a, b) == (-inf, inf):
            p05 = mpf(-0.5)
            for x, w in nodes:
                x2 = x*x
                px1 = one-x2
                spx1 = px1**p05
                x = x*spx1
                w *= spx1/px1
                new_nodes.append((x, w))
        elif a == -inf:
            b1 = b+1
            for x, w in nodes:
                u = 2/(x+one)
                x = b1-u
                w *= half*u**2
                new_nodes.append((x, w))
        elif b == inf:
            a1 = a-1
            for x, w in nodes:
                u = 2/(x+one)
                x = a1+u
                w *= half*u**2
                new_nodes.append((x, w))
        else:
            # Simple linear change of variables
            C = (b-a)/2
            D = (b+a)/2
            for x, w in nodes:
                new_nodes.append((D+C*x, C*w))
        return new_nodes
Exemple #2
0
    def transform_nodes(self, nodes, a, b, verbose=False):
        r"""
        Rescale standardized nodes (for `[-1, 1]`) to a general
        interval `[a, b]`. For a finite interval, a simple linear
        change of variables is used. Otherwise, the following
        transformations are used:

        .. math ::

            [a, \infty] : t = \frac{1}{x} + (a-1)

            [-\infty, b] : t = (b+1) - \frac{1}{x}

            [-\infty, \infty] : t = \frac{x}{\sqrt{1-x^2}}

        """
        a = mpmathify(a)
        b = mpmathify(b)
        one = mpf(1)
        if (a, b) == (-one, one):
            return nodes
        half = mpf(0.5)
        new_nodes = []
        if (a, b) == (-inf, inf):
            p05 = mpf(-0.5)
            for x, w in nodes:
                x2 = x*x
                px1 = one-x2
                spx1 = px1**p05
                x = x*spx1
                w *= spx1/px1
                new_nodes.append((x, w))
        elif a == -inf:
            b1 = b+1
            for x, w in nodes:
                u = 2/(x+one)
                x = b1-u
                w *= half*u**2
                new_nodes.append((x, w))
        elif b == inf:
            a1 = a-1
            for x, w in nodes:
                u = 2/(x+one)
                x = a1+u
                w *= half*u**2
                new_nodes.append((x, w))
        else:
            # Simple linear change of variables
            C = (b-a)/2
            D = (b+a)/2
            for x, w in nodes:
                new_nodes.append((D+C*x, C*w))
        return new_nodes
Exemple #3
0
def calculate_nome(k):
    """
    Calculate the nome, q, from the value for k.

    Useful factoids:

    k**2 = m;   m is used in Abramowitz
    """
    k = mpmathify(k)

    if abs(k) > one:             # range error
        raise ValueError

    if k == zero:
        return zero
    elif k == one:
        return one
    else:
        kprimesquared = one - k**2
        kprime = sqrt(kprimesquared)
        top = ellipk(kprimesquared)
        bottom = ellipk(k**2)

        argument = -pi*top/bottom

        nome = exp(argument)
        return nome
Exemple #4
0
def mnorm(A, p=1):
    r"""
    Gives the matrix (operator) `p`-norm of A. Currently ``p=1`` and ``p=inf``
    are supported:

    ``p=1`` gives the 1-norm (maximal column sum)

    ``p=inf`` gives the `\infty`-norm (maximal row sum).
    You can use the string 'inf' as well as float('inf') or mpf('inf')

    ``p=2`` (not implemented) for a square matrix is the usual spectral
    matrix norm, i.e. the largest singular value.

    ``p='f'`` (or 'F', 'fro', 'Frobenius, 'frobenius') gives the
    Frobenius norm, which is the elementwise 2-norm. The Frobenius norm is an
    approximation of the spectral norm and satisfies

    .. math ::

        \frac{1}{\sqrt{\mathrm{rank}(A)}} \|A\|_F \le \|A\|_2 \le \|A\|_F

    The Frobenius norm lacks some mathematical properties that might
    be expected of a norm.

    For general elementwise `p`-norms, use :func:`norm` instead.

    **Examples**

        >>> from mpmath import *
        >>> mp.dps = 15
        >>> A = matrix([[1, -1000], [100, 50]])
        >>> mnorm(A, 1)
        mpf('1050.0')
        >>> mnorm(A, inf)
        mpf('1001.0')
        >>> mnorm(A, 'F')
        mpf('1006.2310867787777')

    """
    A = matrix(A)
    if type(p) is not int:
        if type(p) is str and 'frobenius'.startswith(p.lower()):
            return norm(A, 2)
        p = mpmathify(p)
    m, n = A.rows, A.cols
    if p == 1:
        return max(
            fsum((A[i, j] for i in xrange(m)), absolute=1) for j in xrange(n))
    elif p == inf:
        return max(
            fsum((A[i, j] for j in xrange(n)), absolute=1) for i in xrange(m))
    else:
        raise NotImplementedError("matrix p-norm for arbitrary p")
Exemple #5
0
def calculate_k(q):
    """
    Calculates the value of k for a particular nome, q,
    using Jacobi theta functions.
    """

    q = mpmathify(q)

    v2 = jtheta(2, 0, q)
    v3 = jtheta(3, 0, q)
    m = v2**2/v3**2
    return m
Exemple #6
0
 def interpolant(x):
     x = mpmathify(x)
     orig = mp.prec
     try:
         mp.prec = workprec
         ser, xa, xb = get_series(x)
         y = mpolyval(ser, x-xa)
     finally:
         mp.prec = orig
     if return_vector:
         return [+yk for yk in y]
     else:
         return +y[0]
Exemple #7
0
 def interpolant(x):
     x = mpmathify(x)
     orig = mp.prec
     try:
         mp.prec = workprec
         ser, xa, xb = get_series(x)
         y = mpolyval(ser, x-xa)
     finally:
         mp.prec = orig
     if return_vector:
         return [+yk for yk in y]
     else:
         return +y[0]
Exemple #8
0
def mnorm(A, p=1):
    r"""
    Gives the matrix (operator) `p`-norm of A. Currently ``p=1`` and ``p=inf``
    are supported:

    ``p=1`` gives the 1-norm (maximal column sum)

    ``p=inf`` gives the `\infty`-norm (maximal row sum).
    You can use the string 'inf' as well as float('inf') or mpf('inf')

    ``p=2`` (not implemented) for a square matrix is the usual spectral
    matrix norm, i.e. the largest singular value.

    ``p='f'`` (or 'F', 'fro', 'Frobenius, 'frobenius') gives the
    Frobenius norm, which is the elementwise 2-norm. The Frobenius norm is an
    approximation of the spectral norm and satisfies

    .. math ::

        \frac{1}{\sqrt{\mathrm{rank}(A)}} \|A\|_F \le \|A\|_2 \le \|A\|_F

    The Frobenius norm lacks some mathematical properties that might
    be expected of a norm.

    For general elementwise `p`-norms, use :func:`norm` instead.

    **Examples**

        >>> from mpmath import *
        >>> mp.dps = 15
        >>> A = matrix([[1, -1000], [100, 50]])
        >>> mnorm(A, 1)
        mpf('1050.0')
        >>> mnorm(A, inf)
        mpf('1001.0')
        >>> mnorm(A, 'F')
        mpf('1006.2310867787777')

    """
    A = matrix(A)
    if type(p) is not int:
        if type(p) is str and 'frobenius'.startswith(p.lower()):
            return norm(A, 2)
        p = mpmathify(p)
    m, n = A.rows, A.cols
    if p == 1:
        return max(fsum((A[i,j] for i in xrange(m)), absolute=1) for j in xrange(n))
    elif p == inf:
        return max(fsum((A[i,j] for j in xrange(n)), absolute=1) for i in xrange(m))
    else:
        raise NotImplementedError("matrix p-norm for arbitrary p")
Exemple #9
0
def norm(x, p=2):
    r"""
    Gives the entrywise `p`-norm of an iterable *x*, i.e. the vector norm
    `\left(\sum_k |x_k|^p\right)^{1/p}`, for any given `1 \le p \le \infty`.

    Special cases:

    If *x* is not iterable, this just returns ``absmax(x)``.

    ``p=1`` gives the sum of absolute values.

    ``p=2`` is the standard Euclidean vector norm.

    ``p=inf`` gives the magnitude of the largest element.

    For *x* a matrix, ``p=2`` is the Frobenius norm.
    For operator matrix norms, use :func:`mnorm` instead.

    You can use the string 'inf' as well as float('inf') or mpf('inf')
    to specify the infinity norm.

    **Examples**

        >>> from mpmath import *
        >>> mp.dps = 15
        >>> x = matrix([-10, 2, 100])
        >>> norm(x, 1)
        mpf('112.0')
        >>> norm(x, 2)
        mpf('100.5186549850325')
        >>> norm(x, inf)
        mpf('100.0')

    """
    try:
        iter(x)
    except TypeError:
        return absmax(x)
    if type(p) is not int:
        p = mpmathify(p)
    if p == inf:
        return max(absmax(i) for i in x)
    elif p == 1:
        return fsum(x, absolute=1)
    elif p == 2:
        return sqrt(fsum(x, absolute=1, squared=1))
    elif p > 1:
        return nthroot(fsum(abs(i)**p for i in x), p)
    else:
        raise ValueError('p has to be >= 1')
Exemple #10
0
def norm(x, p=2):
    r"""
    Gives the entrywise `p`-norm of an iterable *x*, i.e. the vector norm
    `\left(\sum_k |x_k|^p\right)^{1/p}`, for any given `1 \le p \le \infty`.

    Special cases:

    If *x* is not iterable, this just returns ``absmax(x)``.

    ``p=1`` gives the sum of absolute values.

    ``p=2`` is the standard Euclidean vector norm.

    ``p=inf`` gives the magnitude of the largest element.

    For *x* a matrix, ``p=2`` is the Frobenius norm.
    For operator matrix norms, use :func:`mnorm` instead.

    You can use the string 'inf' as well as float('inf') or mpf('inf')
    to specify the infinity norm.

    **Examples**

        >>> from mpmath import *
        >>> mp.dps = 15
        >>> x = matrix([-10, 2, 100])
        >>> norm(x, 1)
        mpf('112.0')
        >>> norm(x, 2)
        mpf('100.5186549850325')
        >>> norm(x, inf)
        mpf('100.0')

    """
    try:
        iter(x)
    except TypeError:
        return absmax(x)
    if type(p) is not int:
        p = mpmathify(p)
    if p == inf:
        return max(absmax(i) for i in x)
    elif p == 1:
        return fsum(x, absolute=1)
    elif p == 2:
        return sqrt(fsum(x, absolute=1, squared=1))
    elif p > 1:
        return nthroot(fsum(abs(i)**p for i in x), p)
    else:
        raise ValueError('p has to be >= 1')
Exemple #11
0
def pslq(x, tol=None, maxcoeff=1000, maxsteps=100, verbose=False):
    r"""
    Given a vector of real numbers `x = [x_0, x_1, ..., x_n]`, ``pslq(x)``
    uses the PSLQ algorithm to find a list of integers
    `[c_0, c_1, ..., c_n]` such that

    .. math ::

        |c_1 x_1 + c_2 x_2 + ... + c_n x_n| < \mathrm{tol}

    and such that `\max |c_k| < \mathrm{maxcoeff}`. If no such vector
    exists, :func:`pslq` returns ``None``. The tolerance defaults to
    3/4 of the working precision.

    **Examples**

    Find rational approximations for `\pi`::

        >>> from mpmath import *
        >>> mp.dps = 15
        >>> pslq([pi, 1], tol=0.01)
        [-7, 22]
        >>> pslq([pi, 1], tol=0.001)
        [113, -355]

    Pi is not a rational number with denominator less than 1000::

        >>> pslq([pi, 1])
        >>>

    To within the standard precision, it can however be approximated
    by at least one rational number with denominator less than `10^{12}`::

        >>> pslq([pi, 1], maxcoeff=10**12)
        [-75888275702L, 238410049439L]
        >>> print mpf(_[1])/_[0]
        -3.14159265358979

    The PSLQ algorithm can be applied to long vectors. For example,
    we can investigate the rational (in)dependence of integer square
    roots::

        >>> mp.dps = 30
        >>> pslq([sqrt(n) for n in range(2, 5+1)])
        >>>
        >>> pslq([sqrt(n) for n in range(2, 6+1)])
        >>>
        >>> pslq([sqrt(n) for n in range(2, 8+1)])
        [2, 0, 0, 0, 0, 0, -1]

    **Machin formulas**

    A famous formula for `\pi` is Machin's,

    .. math ::

        \frac{\pi}{4} = 4 \operatorname{acot} 5 - \operatorname{acot} 239

    There are actually infinitely many formulas of this type. Two
    others are

    .. math ::

        \frac{\pi}{4} = \operatorname{acot} 1

        \frac{\pi}{4} = 12 \operatorname{acot} 49 + 32 \operatorname{acot} 57
            + 5 \operatorname{acot} 239 + 12 \operatorname{acot} 110443

    We can easily verify the formulas using the PSLQ algorithm::

        >>> mp.dps = 30
        >>> pslq([pi/4, acot(1)])
        [1, -1]
        >>> pslq([pi/4, acot(5), acot(239)])
        [1, -4, 1]
        >>> pslq([pi/4, acot(49), acot(57), acot(239), acot(110443)])
        [1, -12, -32, 5, -12]

    We could try to generate a custom Machin-like formula by running
    the PSLQ algorithm with a few inverse cotangent values, for example
    acot(2), acot(3) ... acot(10). Unfortunately, there is a linear
    dependence among these values, resulting in only that dependence
    being detected, with a zero coefficient for `\pi`::

        >>> pslq([pi] + [acot(n) for n in range(2,11)])
        [0, 1, -1, 0, 0, 0, -1, 0, 0, 0]

    We get better luck by removing linearly dependent terms::

        >>> pslq([pi] + [acot(n) for n in range(2,11) if n not in (3, 5)])
        [1, -8, 0, 0, 4, 0, 0, 0]

    In other words, we found the following formula::

        >>> print 8*acot(2) - 4*acot(7)
        3.14159265358979323846264338328
        >>> print pi
        3.14159265358979323846264338328

    **Algorithm**

    This is a fairly direct translation to Python of the pseudocode given by
    David Bailey, "The PSLQ Integer Relation Algorithm":
    http://www.cecm.sfu.ca/organics/papers/bailey/paper/html/node3.html

    The present implementation uses fixed-point instead of floating-point
    arithmetic, since this is significantly (about 7x) faster.
    """

    n = len(x)
    assert n >= 2

    # At too low precision, the algorithm becomes meaningless
    prec = mp.prec
    assert prec >= 53

    if verbose and prec // max(2, n) < 5:
        print "Warning: precision for PSLQ may be too low"

    target = int(prec * 0.75)

    if tol is None:
        tol = mpf(2)**(-target)
    else:
        tol = mpmathify(tol)

    extra = 60
    prec += extra

    if verbose:
        print "PSLQ using prec %i and tol %s" % (prec, nstr(tol))

    tol = to_fixed(tol._mpf_, prec)
    assert tol

    # Convert to fixed-point numbers. The dummy None is added so we can
    # use 1-based indexing. (This just allows us to be consistent with
    # Bailey's indexing. The algorithm is 100 lines long, so debugging
    # a single wrong index can be painful.)
    x = [None] + [to_fixed(mpf(xk)._mpf_, prec) for xk in x]

    # Sanity check on magnitudes
    minx = min(abs(xx) for xx in x[1:])
    if not minx:
        raise ValueError("PSLQ requires a vector of nonzero numbers")
    if minx < tol // 100:
        if verbose:
            print "STOPPING: (one number is too small)"
        return None

    g = sqrt_fixed((4 << prec) // 3, prec)
    A = {}
    B = {}
    H = {}
    # Initialization
    # step 1
    for i in xrange(1, n + 1):
        for j in xrange(1, n + 1):
            A[i, j] = B[i, j] = (i == j) << prec
            H[i, j] = 0
    # step 2
    s = [None] + [0] * n
    for k in xrange(1, n + 1):
        t = 0
        for j in xrange(k, n + 1):
            t += (x[j]**2 >> prec)
        s[k] = sqrt_fixed(t, prec)
    t = s[1]
    y = x[:]
    for k in xrange(1, n + 1):
        y[k] = (x[k] << prec) // t
        s[k] = (s[k] << prec) // t
    # step 3
    for i in xrange(1, n + 1):
        for j in xrange(i + 1, n):
            H[i, j] = 0
        if i <= n - 1:
            if s[i]:
                H[i, i] = (s[i + 1] << prec) // s[i]
            else:
                H[i, i] = 0
        for j in range(1, i):
            sjj1 = s[j] * s[j + 1]
            if sjj1:
                H[i, j] = ((-y[i] * y[j]) << prec) // sjj1
            else:
                H[i, j] = 0
    # step 4
    for i in xrange(2, n + 1):
        for j in xrange(i - 1, 0, -1):
            #t = floor(H[i,j]/H[j,j] + 0.5)
            if H[j, j]:
                t = round_fixed((H[i, j] << prec) // H[j, j], prec)
            else:
                #t = 0
                continue
            y[j] = y[j] + (t * y[i] >> prec)
            for k in xrange(1, j + 1):
                H[i, k] = H[i, k] - (t * H[j, k] >> prec)
            for k in xrange(1, n + 1):
                A[i, k] = A[i, k] - (t * A[j, k] >> prec)
                B[k, j] = B[k, j] + (t * B[k, i] >> prec)
    # Main algorithm
    for REP in range(maxsteps):
        # Step 1
        m = -1
        szmax = -1
        for i in range(1, n):
            h = H[i, i]
            sz = (sqrt_fixed(
                (4 << prec) // 3, prec)**i * abs(h)) >> (prec * (i - 1))
            if sz > szmax:
                m = i
                szmax = sz
        # Step 2
        y[m], y[m + 1] = y[m + 1], y[m]
        tmp = {}
        for i in xrange(1, n + 1):
            H[m, i], H[m + 1, i] = H[m + 1, i], H[m, i]
        for i in xrange(1, n + 1):
            A[m, i], A[m + 1, i] = A[m + 1, i], A[m, i]
        for i in xrange(1, n + 1):
            B[i, m], B[i, m + 1] = B[i, m + 1], B[i, m]
        # Step 3
        if m <= n - 2:
            t0 = sqrt_fixed((H[m, m]**2 + H[m, m + 1]**2) >> prec, prec)
            # A zero element probably indicates that the precision has
            # been exhausted. XXX: this could be spurious, due to
            # using fixed-point arithmetic
            if not t0:
                break
            t1 = (H[m, m] << prec) // t0
            t2 = (H[m, m + 1] << prec) // t0
            for i in xrange(m, n + 1):
                t3 = H[i, m]
                t4 = H[i, m + 1]
                H[i, m] = (t1 * t3 + t2 * t4) >> prec
                H[i, m + 1] = (-t2 * t3 + t1 * t4) >> prec
        # Step 4
        for i in xrange(m + 1, n + 1):
            for j in xrange(min(i - 1, m + 1), 0, -1):
                try:
                    t = round_fixed((H[i, j] << prec) // H[j, j], prec)
                # Precision probably exhausted
                except ZeroDivisionError:
                    break
                y[j] = y[j] + ((t * y[i]) >> prec)
                for k in xrange(1, j + 1):
                    H[i, k] = H[i, k] - (t * H[j, k] >> prec)
                for k in xrange(1, n + 1):
                    A[i, k] = A[i, k] - (t * A[j, k] >> prec)
                    B[k, j] = B[k, j] + (t * B[k, i] >> prec)
        # Until a relation is found, the error typically decreases
        # slowly (e.g. a factor 1-10) with each step TODO: we could
        # compare err from two successive iterations. If there is a
        # large drop (several orders of magnitude), that indicates a
        # "high quality" relation was detected. Reporting this to
        # the user somehow might be useful.
        best_err = maxcoeff << prec
        for i in xrange(1, n + 1):
            err = abs(y[i])
            # Maybe we are done?
            if err < tol:
                # We are done if the coefficients are acceptable
                vec = [int(round_fixed(B[j,i], prec) >> prec) for j in \
                range(1,n+1)]
                if max(abs(v) for v in vec) < maxcoeff:
                    if verbose:
                        print "FOUND relation at iter %i/%i, error: %s" % \
                            (REP, maxsteps, nstr(err / mpf(2)**prec, 1))
                    return vec
            best_err = min(err, best_err)
        # Calculate a lower bound for the norm. We could do this
        # more exactly (using the Euclidean norm) but there is probably
        # no practical benefit.
        recnorm = max(abs(h) for h in H.values())
        if recnorm:
            norm = ((1 << (2 * prec)) // recnorm) >> prec
            norm //= 100
        else:
            norm = inf
        if verbose:
            print "%i/%i:  Error: %8s   Norm: %s" % \
                (REP, maxsteps, nstr(best_err / mpf(2)**prec, 1), norm)
        if norm >= maxcoeff:
            break
    if verbose:
        print "CANCELLING after step %i/%i." % (REP, maxsteps)
        print "Could not find an integer relation. Norm bound: %s" % norm
    return None
Exemple #12
0
def jtheta(n, z, q):
    r"""
    Computes the Jacobi theta function `\vartheta_n(z, q)`, where
    `n = 1, 2, 3, 4`. The theta functions are functions of two
    variables:

    * `z` is the *argument*, an arbitrary real or complex number

    * `q` is the *nome*, which must be a real or complex number
      in the unit disk (i.e. `|q| < 1`)

    One also commonly encounters the notation `\vartheta_n(z, \tau)`
    in the literature. The variable `\tau` is called the *parameter*
    and can be converted to a nome using the formula
    `q = \exp(i \pi \tau)`. Note the condition `|q| < 1` requires
    `\Im(\tau) > 0`; i.e. Jacobi theta functions are defined for
    `\tau` in the upper half plane.

    Other notations are also in use. For example, some authors use
    the single-argument form `\vartheta_n(x)`. Depending on context,
    this can mean ``jtheta(n, 0, x)``, ``jtheta(n, x, q)``, or possibly
    something else. Needless to say, it is a good idea to cross-check
    the definitions when working with theta functions.

    **Definition**

    The four Jacobi theta functions as implemented by :func:`jtheta`
    are defined by the following infinite series:

    .. math ::

      \vartheta_1(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty}
        (-1)^n q^{n^2+n\,} \sin((2n+1)z)

      \vartheta_2(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty}
        q^{n^{2\,} + n} \cos((2n+1)z)

      \vartheta_3(z,q) = 1 + 2 \sum_{n=0}^{\infty}
        q^{n^2\,} \cos(2 n z)

      \vartheta_4(z,q) = 1 + 2 \sum_{n=0}^{\infty}
        (-q)^{n^2\,} \cos(2 n z)

    For `|q| \ll 1`, these series converge very quickly, so the
    Jacobi theta functions can efficiently be evaluated to high
    precision.

    **Examples and basic properties**

    Considered as functions of `z`, the Jacobi theta functions may be
    viewed as generalizations of the ordinary trigonometric functions
    cos and sin. They are periodic functions::

        >>> from mpmath import *
        >>> mp.dps = 15
        >>> print jtheta(1, 0.1, 1/5.)
        0.117756191842059
        >>> print jtheta(1, 0.1 + 2*pi, 1/5.)
        0.117756191842059

    Indeed, the series defining the theta functions are essentially
    trigonometric Fourier series. The coefficients can be retrieved
    using :func:`fourier`::

        >>> nprint(fourier(lambda x: jtheta(2, x, 0.5), [-pi, pi], 4))
        ([0.0, 1.68179, 0.0, 0.420448, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0])

    The Jacobi theta functions are also so-called quasiperiodic
    functions of `z` and `\tau`, meaning that for fixed `\tau`,
    `\vartheta_n(z, q)` and `\vartheta_n(z+\pi \tau, q)` are the same
    except for an exponential factor::

        >>> tau = 0.3*j
        >>> q = exp(pi*j*tau)
        >>> z = 10
        >>> print jtheta(4, z+tau*pi, q)
        (-0.682420280786035 + 1.5266839997214j)
        >>> print -exp(-2*j*z)/q * jtheta(4, z, q)
        (-0.682420280786035 + 1.5266839997214j)

    The Jacobi theta functions satisfy a huge number of other
    functional equations, such as the following identity (valid for
    any `q`)::

        >>> q = 0.3
        >>> print jtheta(3,0,q)**4
        6.82374408935276
        >>> print jtheta(2,0,q)**4 + jtheta(4,0,q)**4
        6.82374408935276

    Extensive listings of identities satisfied by the Jacobi theta
    functions can be found in standard reference works.

    The Jacobi theta functions are related to the gamma function
    for special arguments::

        >>> print jtheta(3, 0, exp(-pi))
        1.08643481121331
        >>> print pi**(1/4.) / gamma(3/4.)
        1.08643481121331

    :func:`jtheta` supports arbitrary precision evaluation and complex
    arguments::

        >>> mp.dps = 50
        >>> print jtheta(4, sqrt(2), 0.5)
        2.0549510717571539127004115835148878097035750653737
        >>> mp.dps = 25
        >>> print jtheta(4, 1+2j, (1+j)/5)
        (7.180331760146805926356634 - 1.634292858119162417301683j)

    **Possible issues**

    For `|q| \ge 1` or `\Im(\tau) \le 0`, :func:`jtheta` raises
    ``ValueError``. This exception is also raised for `|q|` extremely
    close to 1 (or equivalently `\tau` very close to 0), since the
    series would converge too slowly::

        >>> jtheta(1, 10, 0.99999999 * exp(0.5*j))
        Traceback (most recent call last):
          ...
        ValueError: abs(q) > Q_LIM = 1.000000

    """
    z = mpmathify(z)
    q = mpmathify(q)

    # Implementation note
    # If z.imag is close to zero, _jacobi_theta2 and _jacobi_theta3
    # are used,
    # which compute the series starting from n=0 using fixed precision
    # numbers;
    # otherwise  _jacobi_theta2a and _jacobi_theta3a are used, which compute
    # the series starting from n=n0, which is the largest term.

    # TODO write _jacobi_theta2a and _jacobi_theta3a using fixed precision.

    if abs(q) > Q_LIM:
        raise ValueError('abs(q) > Q_LIM = %f' % Q_LIM)

    extra = 10
    cz = 0.5
    extra2 = 50
    prec0 = mp.prec
    try:
        mp.prec += extra
        if n == 1:
            if abs(z.imag) != 0:
                if abs(z.imag) < cz * abs(log(q).real):
                    mp.dps += extra2
                    res = _jacobi_theta2(z - pi/2, q)
                else:
                    mp.dps += 10
                    res = _jacobi_theta2a(z - pi/2, q)
            else:
                res = _jacobi_theta2(z - pi/2, q)
        elif n == 2:
            if abs(z.imag) != 0:
                if abs(z.imag) < cz * abs(log(q).real):
                    mp.dps += extra2
                    res = _jacobi_theta2(z, q)
                else:
                    mp.dps += 10
                    res = _jacobi_theta2a(z, q)
            else:
                res = _jacobi_theta2(z, q)
        elif n == 3:
            if abs(z.imag) != 0:
                if abs(z.imag) < cz * abs(log(q).real):
                    mp.dps += extra2
                    res = _jacobi_theta3(z, q)
                else:
                    mp.dps += 10
                    res = _jacobi_theta3a(z, q)
            else:
                res = _jacobi_theta3(z, q)
        elif n == 4:
            if abs(z.imag) != 0:
                if abs(z.imag) < cz * abs(log(q).real):
                    mp.dps += extra2
                    res = _jacobi_theta3(z, -q)
                else:
                    mp.dps += 10
                    res = _jacobi_theta3a(z, -q)
            else:
                res = _jacobi_theta3(z, -q)
        else:
            raise ValueError
    finally:
        mp.prec = prec0
    return res
Exemple #13
0
def quadosc(f, interval, omega=None, period=None, zeros=None):
    r"""
    Calculates

    .. math ::

        I = \int_a^b f(x) dx

    where at least one of `a` and `b` is infinite and where
    `f(x) = g(x) \cos(\omega x  + \phi)` for some slowly
    decreasing function `g(x)`. With proper input, :func:`quadosc`
    can also handle oscillatory integrals where the oscillation
    rate is different from a pure sine or cosine wave.

    In the standard case when `|a| < \infty, b = \infty`,
    :func:`quadosc` works by evaluating the infinite series

    .. math ::

        I = \int_a^{x_1} f(x) dx +
        \sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx

    where `x_k` are consecutive zeros (alternatively
    some other periodic reference point) of `f(x)`.
    Accordingly, :func:`quadosc` requires information about the
    zeros of `f(x)`. For a periodic function, you can specify
    the zeros by either providing the angular frequency `\omega`
    (*omega*) or the *period* `2 \pi/\omega`. In general, you can
    specify the `n`-th zero by providing the *zeros* arguments.
    Below is an example of each::

        >>> from sympy.mpmath import *
        >>> mp.dps = 15
        >>> f = lambda x: sin(3*x)/(x**2+1)
        >>> print quadosc(f, [0,inf], omega=3)
        0.37833007080198
        >>> print quadosc(f, [0,inf], period=2*pi/3)
        0.37833007080198
        >>> print quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
        0.37833007080198
        >>> print (ei(3)*exp(-3)-exp(3)*ei(-3))/2  # Computed by Mathematica
        0.37833007080198

    Note that *zeros* was specified to multiply `n` by the
    *half-period*, not the full period. In theory, it does not matter
    whether each partial integral is done over a half period or a full
    period. However, if done over half-periods, the infinite series
    passed to :func:`nsum` becomes an *alternating series* and this
    typically makes the extrapolation much more efficient.

    Here is an example of an integration over the entire real line,
    and a half-infinite integration starting at `-\infty`::

        >>> print quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
        1.15572734979092
        >>> print pi/e
        1.15572734979092
        >>> print quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
        -0.0844109505595739
        >>> print cos(1)+si(1)-pi/2
        -0.0844109505595738

    Of course, the integrand may contain a complex exponential just as
    well as a real sine or cosine::

        >>> print quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
        (0.156410688228254 + 0.0j)
        >>> print pi/e**3
        0.156410688228254
        >>> print quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
        (0.00317486988463794 - 0.0447701735209082j)
        >>> print 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
        (0.00317486988463794 - 0.0447701735209082j)

    **Non-periodic functions**

    If `f(x) = g(x) h(x)` for some function `h(x)` that is not
    strictly periodic, *omega* or *period* might not work, and it might
    be necessary to use *zeros*.

    A notable exception can be made for Bessel functions which, though not
    periodic, are "asymptotically periodic" in a sufficiently strong sense
    that the sum extrapolation will work out::

        >>> print quadosc(j0, [0, inf], period=2*pi)
        1.0
        >>> print quadosc(j1, [0, inf], period=2*pi)
        1.0

    More properly, one should provide the exact Bessel function zeros::

        >>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
        >>> print quadosc(j0, [0, inf], zeros=j0zero)
        1.0

    For an example where *zeros* becomes necessary, consider the
    complete Fresnel integrals

    .. math ::

        \int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
        = \sqrt{\frac{\pi}{8}}.

    Although the integrands do not decrease in magnitude as
    `x \to \infty`, the integrals are convergent since the oscillation
    rate increases (causing consecutive periods to asymptotically
    cancel out). These integrals are virtually impossible to calculate
    to any kind of accuracy using standard quadrature rules. However,
    if one provides the correct asymptotic distribution of zeros
    (`x_n \sim \sqrt{n}`), :func:`quadosc` works::

        >>> mp.dps = 30
        >>> f = lambda x: cos(x**2)
        >>> print quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
        0.626657068657750125603941321203
        >>> f = lambda x: sin(x**2)
        >>> print quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
        0.626657068657750125603941321203
        >>> print sqrt(pi/8)
        0.626657068657750125603941321203

    (Interestingly, these integrals can still be evaluated if one
    places some other constant than `\pi` in the square root sign.)

    In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
    the inverse-function distribution `h^{-1}(x)`::

        >>> mp.dps = 15
        >>> f = lambda x: sin(exp(x))
        >>> print quadosc(f, [1,inf], zeros=lambda n: log(n))
        -0.25024394235267
        >>> print pi/2-si(e)
        -0.250243942352671

    **Non-alternating functions**

    If the integrand oscillates around a positive value, without
    alternating signs, the extrapolation might fail. A simple trick
    that sometimes works is to multiply or divide the frequency by 2::

        >>> f = lambda x: 1/x**2+sin(x)/x**4
        >>> print quadosc(f, [1,inf], omega=1)  # Bad
        1.28642190869921
        >>> print quadosc(f, [1,inf], omega=0.5)  # Perfect
        1.28652953559617
        >>> print 1+(cos(1)+ci(1)+sin(1))/6
        1.28652953559617

    **Fast decay**

    :func:`quadosc` is primarily useful for slowly decaying
    integrands. If the integrand decreases exponentially or faster,
    :func:`quad` will likely handle it without trouble (and generally be
    much faster than :func:`quadosc`)::

        >>> print quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
        0.5
        >>> print quad(lambda x: cos(x)/exp(x), [0, inf])
        0.5

    """
    a, b = AS_POINTS(interval)
    a = mpmathify(a)
    b = mpmathify(b)
    if [omega, period, zeros].count(None) != 2:
        raise ValueError( \
            "must specify exactly one of omega, period, zeros")
    if a == -inf and b == inf:
        s1 = quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
        s2 = quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
        return s1 + s2
    if a == -inf:
        if zeros:
            return quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
        else:
            return quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
    if b != inf:
        raise ValueError("quadosc requires an infinite integration interval")
    if not zeros:
        if omega:
            period = 2*pi/omega
        zeros = lambda n: n*period/2
    #for n in range(1,10):
    #    p = zeros(n)
    #    if p > a:
    #        break
    #if n >= 9:
    #    raise ValueError("zeros do not appear to be correctly indexed")
    n = 1
    from calculus import nsum
    s = quadgl(f, [a, zeros(n)])
    s += nsum(lambda k: quadgl(f, [zeros(k), zeros(k+1)]), [n, inf])
    return s
Exemple #14
0
def pslq(x, tol=None, maxcoeff=1000, maxsteps=100, verbose=False):
    r"""
    Given a vector of real numbers `x = [x_0, x_1, ..., x_n]`, ``pslq(x)``
    uses the PSLQ algorithm to find a list of integers
    `[c_0, c_1, ..., c_n]` such that

    .. math ::

        |c_1 x_1 + c_2 x_2 + ... + c_n x_n| < \mathrm{tol}

    and such that `\max |c_k| < \mathrm{maxcoeff}`. If no such vector
    exists, :func:`pslq` returns ``None``. The tolerance defaults to
    3/4 of the working precision.

    **Examples**

    Find rational approximations for `\pi`::

        >>> from mpmath import *
        >>> mp.dps = 15; mp.pretty = True
        >>> pslq([pi, 1], tol=0.01)
        [-7, 22]
        >>> pslq([pi, 1], tol=0.001)
        [113, -355]

    Pi is not a rational number with denominator less than 1000::

        >>> pslq([pi, 1])
        >>>

    To within the standard precision, it can however be approximated
    by at least one rational number with denominator less than `10^{12}`::

        >>> pslq([pi, 1], maxcoeff=10**12)
        [-75888275702L, 238410049439L]
        >>> mpf(_[1])/_[0]
        -3.14159265358979

    The PSLQ algorithm can be applied to long vectors. For example,
    we can investigate the rational (in)dependence of integer square
    roots::

        >>> mp.dps = 30
        >>> pslq([sqrt(n) for n in range(2, 5+1)])
        >>>
        >>> pslq([sqrt(n) for n in range(2, 6+1)])
        >>>
        >>> pslq([sqrt(n) for n in range(2, 8+1)])
        [2, 0, 0, 0, 0, 0, -1]

    **Machin formulas**

    A famous formula for `\pi` is Machin's,

    .. math ::

        \frac{\pi}{4} = 4 \operatorname{acot} 5 - \operatorname{acot} 239

    There are actually infinitely many formulas of this type. Two
    others are

    .. math ::

        \frac{\pi}{4} = \operatorname{acot} 1

        \frac{\pi}{4} = 12 \operatorname{acot} 49 + 32 \operatorname{acot} 57
            + 5 \operatorname{acot} 239 + 12 \operatorname{acot} 110443

    We can easily verify the formulas using the PSLQ algorithm::

        >>> mp.dps = 30
        >>> pslq([pi/4, acot(1)])
        [1, -1]
        >>> pslq([pi/4, acot(5), acot(239)])
        [1, -4, 1]
        >>> pslq([pi/4, acot(49), acot(57), acot(239), acot(110443)])
        [1, -12, -32, 5, -12]

    We could try to generate a custom Machin-like formula by running
    the PSLQ algorithm with a few inverse cotangent values, for example
    acot(2), acot(3) ... acot(10). Unfortunately, there is a linear
    dependence among these values, resulting in only that dependence
    being detected, with a zero coefficient for `\pi`::

        >>> pslq([pi] + [acot(n) for n in range(2,11)])
        [0, 1, -1, 0, 0, 0, -1, 0, 0, 0]

    We get better luck by removing linearly dependent terms::

        >>> pslq([pi] + [acot(n) for n in range(2,11) if n not in (3, 5)])
        [1, -8, 0, 0, 4, 0, 0, 0]

    In other words, we found the following formula::

        >>> 8*acot(2) - 4*acot(7)
        3.14159265358979323846264338328
        >>> +pi
        3.14159265358979323846264338328

    **Algorithm**

    This is a fairly direct translation to Python of the pseudocode given by
    David Bailey, "The PSLQ Integer Relation Algorithm":
    http://www.cecm.sfu.ca/organics/papers/bailey/paper/html/node3.html

    The present implementation uses fixed-point instead of floating-point
    arithmetic, since this is significantly (about 7x) faster.
    """

    n = len(x)
    assert n >= 2

    # At too low precision, the algorithm becomes meaningless
    prec = mp.prec
    assert prec >= 53

    if verbose and prec // max(2,n) < 5:
        print "Warning: precision for PSLQ may be too low"

    target = int(prec * 0.75)

    if tol is None:
        tol = mpf(2)**(-target)
    else:
        tol = mpmathify(tol)

    extra = 60
    prec += extra

    if verbose:
        print "PSLQ using prec %i and tol %s" % (prec, nstr(tol))

    tol = to_fixed(tol._mpf_, prec)
    assert tol

    # Convert to fixed-point numbers. The dummy None is added so we can
    # use 1-based indexing. (This just allows us to be consistent with
    # Bailey's indexing. The algorithm is 100 lines long, so debugging
    # a single wrong index can be painful.)
    x = [None] + [to_fixed(mpf(xk)._mpf_, prec) for xk in x]

    # Sanity check on magnitudes
    minx = min(abs(xx) for xx in x[1:])
    if not minx:
        raise ValueError("PSLQ requires a vector of nonzero numbers")
    if minx < tol//100:
        if verbose:
            print "STOPPING: (one number is too small)"
        return None

    g = sqrt_fixed((4<<prec)//3, prec)
    A = {}
    B = {}
    H = {}
    # Initialization
    # step 1
    for i in xrange(1, n+1):
        for j in xrange(1, n+1):
            A[i,j] = B[i,j] = (i==j) << prec
            H[i,j] = 0
    # step 2
    s = [None] + [0] * n
    for k in xrange(1, n+1):
        t = 0
        for j in xrange(k, n+1):
            t += (x[j]**2 >> prec)
        s[k] = sqrt_fixed(t, prec)
    t = s[1]
    y = x[:]
    for k in xrange(1, n+1):
        y[k] = (x[k] << prec) // t
        s[k] = (s[k] << prec) // t
    # step 3
    for i in xrange(1, n+1):
        for j in xrange(i+1, n):
            H[i,j] = 0
        if i <= n-1:
            if s[i]:
                H[i,i] = (s[i+1] << prec) // s[i]
            else:
                H[i,i] = 0
        for j in range(1, i):
            sjj1 = s[j]*s[j+1]
            if sjj1:
                H[i,j] = ((-y[i]*y[j])<<prec)//sjj1
            else:
                H[i,j] = 0
    # step 4
    for i in xrange(2, n+1):
        for j in xrange(i-1, 0, -1):
            #t = floor(H[i,j]/H[j,j] + 0.5)
            if H[j,j]:
                t = round_fixed((H[i,j] << prec)//H[j,j], prec)
            else:
                #t = 0
                continue
            y[j] = y[j] + (t*y[i] >> prec)
            for k in xrange(1, j+1):
                H[i,k] = H[i,k] - (t*H[j,k] >> prec)
            for k in xrange(1, n+1):
                A[i,k] = A[i,k] - (t*A[j,k] >> prec)
                B[k,j] = B[k,j] + (t*B[k,i] >> prec)
    # Main algorithm
    for REP in range(maxsteps):
        # Step 1
        m = -1
        szmax = -1
        for i in range(1, n):
            h = H[i,i]
            sz = (g**i * abs(h)) >> (prec*(i-1))
            if sz > szmax:
                m = i
                szmax = sz
        # Step 2
        y[m], y[m+1] = y[m+1], y[m]
        tmp = {}
        for i in xrange(1,n+1): H[m,i], H[m+1,i] = H[m+1,i], H[m,i]
        for i in xrange(1,n+1): A[m,i], A[m+1,i] = A[m+1,i], A[m,i]
        for i in xrange(1,n+1): B[i,m], B[i,m+1] = B[i,m+1], B[i,m]
        # Step 3
        if m <= n - 2:
            t0 = sqrt_fixed((H[m,m]**2 + H[m,m+1]**2)>>prec, prec)
            # A zero element probably indicates that the precision has
            # been exhausted. XXX: this could be spurious, due to
            # using fixed-point arithmetic
            if not t0:
                break
            t1 = (H[m,m] << prec) // t0
            t2 = (H[m,m+1] << prec) // t0
            for i in xrange(m, n+1):
                t3 = H[i,m]
                t4 = H[i,m+1]
                H[i,m] = (t1*t3+t2*t4) >> prec
                H[i,m+1] = (-t2*t3+t1*t4) >> prec
        # Step 4
        for i in xrange(m+1, n+1):
            for j in xrange(min(i-1, m+1), 0, -1):
                try:
                    t = round_fixed((H[i,j] << prec)//H[j,j], prec)
                # Precision probably exhausted
                except ZeroDivisionError:
                    break
                y[j] = y[j] + ((t*y[i]) >> prec)
                for k in xrange(1, j+1):
                    H[i,k] = H[i,k] - (t*H[j,k] >> prec)
                for k in xrange(1, n+1):
                    A[i,k] = A[i,k] - (t*A[j,k] >> prec)
                    B[k,j] = B[k,j] + (t*B[k,i] >> prec)
        # Until a relation is found, the error typically decreases
        # slowly (e.g. a factor 1-10) with each step TODO: we could
        # compare err from two successive iterations. If there is a
        # large drop (several orders of magnitude), that indicates a
        # "high quality" relation was detected. Reporting this to
        # the user somehow might be useful.
        best_err = maxcoeff<<prec
        for i in xrange(1, n+1):
            err = abs(y[i])
            # Maybe we are done?
            if err < tol:
                # We are done if the coefficients are acceptable
                vec = [int(round_fixed(B[j,i], prec) >> prec) for j in \
                range(1,n+1)]
                if max(abs(v) for v in vec) < maxcoeff:
                    if verbose:
                        print "FOUND relation at iter %i/%i, error: %s" % \
                            (REP, maxsteps, nstr(err / mpf(2)**prec, 1))
                    return vec
            best_err = min(err, best_err)
        # Calculate a lower bound for the norm. We could do this
        # more exactly (using the Euclidean norm) but there is probably
        # no practical benefit.
        recnorm = max(abs(h) for h in H.values())
        if recnorm:
            norm = ((1 << (2*prec)) // recnorm) >> prec
            norm //= 100
        else:
            norm = inf
        if verbose:
            print "%i/%i:  Error: %8s   Norm: %s" % \
                (REP, maxsteps, nstr(best_err / mpf(2)**prec, 1), norm)
        if norm >= maxcoeff:
            break
    if verbose:
        print "CANCELLING after step %i/%i." % (REP, maxsteps)
        print "Could not find an integer relation. Norm bound: %s" % norm
    return None
Exemple #15
0
def odefun(F, x0, y0, tol=None, degree=None, method='taylor', verbose=False):
    r"""
    Returns a function `y(x) = [y_0(x), y_1(x), \ldots, y_n(x)]`
    that is a numerical solution of the `n+1`-dimensional first-order
    ordinary differential equation (ODE) system

    .. math ::

        y_0'(x) = F_0(x, [y_0(x), y_1(x), \ldots, y_n(x)])

        y_1'(x) = F_1(x, [y_0(x), y_1(x), \ldots, y_n(x)])

        \vdots

        y_n'(x) = F_n(x, [y_0(x), y_1(x), \ldots, y_n(x)])

    The derivatives are specified by the vector-valued function
    *F* that evaluates
    `[y_0', \ldots, y_n'] = F(x, [y_0, \ldots, y_n])`.
    The initial point `x_0` is specified by the scalar argument *x0*,
    and the initial value `y(x_0) =  [y_0(x_0), \ldots, y_n(x_0)]` is
    specified by the vector argument *y0*.

    For convenience, if the system is one-dimensional, you may optionally
    provide just a scalar value for *y0*. In this case, *F* should accept
    a scalar *y* argument and return a scalar. The solution function
    *y* will return scalar values instead of length-1 vectors.

    Evaluation of the solution function `y(x)` is permitted
    for any `x \ge x_0`.

    A high-order ODE can be solved by transforming it into first-order
    vector form. This transformation is described in standard texts
    on ODEs. Examples will also be given below.

    **Options, speed and accuracy**

    By default, :func:`odefun` uses a high-order Taylor series
    method. For reasonably well-behaved problems, the solution will
    be fully accurate to within the working precision. Note that
    *F* must be possible to evaluate to very high precision
    for the generation of Taylor series to work.

    To get a faster but less accurate solution, you can set a large
    value for *tol* (which defaults roughly to *eps*). If you just
    want to plot the solution or perform a basic simulation,
    *tol = 0.01* is likely sufficient.

    The *degree* argument controls the degree of the solver (with
    *method='taylor'*, this is the degree of the Taylor series
    expansion). A higher degree means that a longer step can be taken
    before a new local solution must be generated from *F*,
    meaning that fewer steps are required to get from `x_0` to a given
    `x_1`. On the other hand, a higher degree also means that each
    local solution becomes more expensive (i.e., more evaluations of
    *F* are required per step, and at higher precision).

    The optimal setting therefore involves a tradeoff. Generally,
    decreasing the *degree* for Taylor series is likely to give faster
    solution at low precision, while increasing is likely to be better
    at higher precision.

    The function
    object returned by :func:`odefun` caches the solutions at all step
    points and uses polynomial interpolation between step points.
    Therefore, once `y(x_1)` has been evaluated for some `x_1`,
    `y(x)` can be evaluated very quickly for any `x_0 \le x \le x_1`.
    and continuing the evaluation up to `x_2 > x_1` is also fast.

    **Examples of first-order ODEs**

    We will solve the standard test problem `y'(x) = y(x), y(0) = 1`
    which has explicit solution `y(x) = \exp(x)`::

        >>> from mpmath import *
        >>> mp.dps = 15; mp.pretty = True
        >>> f = odefun(lambda x, y: y, 0, 1)
        >>> for x in [0, 1, 2.5]:
        ...     print f(x), exp(x)
        ...
        1.0 1.0
        2.71828182845905 2.71828182845905
        12.1824939607035 12.1824939607035

    The solution with high precision::

        >>> mp.dps = 50
        >>> f = odefun(lambda x, y: y, 0, 1)
        >>> f(1)
        2.7182818284590452353602874713526624977572470937
        >>> exp(1)
        2.7182818284590452353602874713526624977572470937

    Using the more general vectorized form, the test problem
    can be input as (note that *f* returns a 1-element vector)::

        >>> mp.dps = 15
        >>> f = odefun(lambda x, y: [y[0]], 0, [1])
        >>> f(1)
        [2.71828182845905]

    :func:`odefun` can solve nonlinear ODEs, which are generally
    impossible (and at best difficult) to solve analytically. As
    an example of a nonlinear ODE, we will solve `y'(x) = x \sin(y(x))`
    for `y(0) = \pi/2`. An exact solution happens to be known
    for this problem, and is given by
    `y(x) = 2 \tan^{-1}\left(\exp\left(x^2/2\right)\right)`::

        >>> f = odefun(lambda x, y: x*sin(y), 0, pi/2)
        >>> for x in [2, 5, 10]:
        ...     print f(x), 2*atan(exp(mpf(x)**2/2))
        ...
        2.87255666284091 2.87255666284091
        3.14158520028345 3.14158520028345
        3.14159265358979 3.14159265358979

    If `F` is independent of `y`, an ODE can be solved using direct
    integration. We can therefore obtain a reference solution with
    :func:`quad`::

        >>> f = lambda x: (1+x**2)/(1+x**3)
        >>> g = odefun(lambda x, y: f(x), pi, 0)
        >>> g(2*pi)
        0.72128263801696
        >>> quad(f, [pi, 2*pi])
        0.72128263801696

    **Examples of second-order ODEs**

    We will solve the harmonic oscillator equation `y''(x) + y(x) = 0`.
    To do this, we introduce the helper functions `y_0 = y, y_1 = y_0'`
    whereby the original equation can be written as `y_1' + y_0' = 0`. Put
    together, we get the first-order, two-dimensional vector ODE

    .. math ::

        \begin{cases}
        y_0' = y_1 \\
        y_1' = -y_0
        \end{cases}

    To get a well-defined IVP, we need two initial values. With
    `y(0) = y_0(0) = 1` and `-y'(0) = y_1(0) = 0`, the problem will of
    course be solved by `y(x) = y_0(x) = \cos(x)` and
    `-y'(x) = y_1(x) = \sin(x)`. We check this::

        >>> f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0])
        >>> for x in [0, 1, 2.5, 10]:
        ...     nprint(f(x), 15)
        ...     nprint([cos(x), sin(x)], 15)
        ...     print "---"
        ...
        [1.0, 0.0]
        [1.0, 0.0]
        ---
        [0.54030230586814, 0.841470984807897]
        [0.54030230586814, 0.841470984807897]
        ---
        [-0.801143615546934, 0.598472144103957]
        [-0.801143615546934, 0.598472144103957]
        ---
        [-0.839071529076452, -0.54402111088937]
        [-0.839071529076452, -0.54402111088937]
        ---

    Note that we get both the sine and the cosine solutions
    simultaneously.

    **TODO**

    * Better automatic choice of degree and step size
    * Make determination of Taylor series convergence radius
      more robust
    * Allow solution for `x < x_0`
    * Allow solution for complex `x`
    * Test for difficult (ill-conditioned) problems
    * Implement Runge-Kutta and other algorithms

    """
    if tol:
        tol_prec = int(-log(mpmathify(tol), 2))+10
    else:
        tol_prec = mp.prec+10
    degree = degree or (3 + int(3*mp.dps/2.))
    workprec = mp.prec + 40
    try:
        len(y0)
        return_vector = True
    except TypeError:
        F_ = F
        F = lambda x, y: [F_(x, y[0])]
        y0 = [y0]
        return_vector = False
    ser, xb = ode_taylor(F, x0, y0, tol_prec, degree)
    series_boundaries = [x0, xb]
    series_data = [(ser, x0, xb)]
    # We will be working with vectors of Taylor series
    def mpolyval(ser, a):
        return [polyval(s[::-1], a) for s in ser]
    # Find nearest expansion point; compute if necessary
    def get_series(x):
        if x < x0:
            raise ValueError
        n = bisect(series_boundaries, x)
        if n < len(series_boundaries):
            return series_data[n-1]
        while 1:
            ser, xa, xb = series_data[-1]
            if verbose:
                print "Computing Taylor series for [%f, %f]" % (xa, xb)
            y = mpolyval(ser, xb-xa)
            xa = xb
            ser, xb = ode_taylor(F, xb, y, tol_prec, degree)
            series_boundaries.append(xb)
            series_data.append((ser, xa, xb))
            if x <= xb:
                return series_data[-1]
    # Evaluation function
    def interpolant(x):
        x = mpmathify(x)
        orig = mp.prec
        try:
            mp.prec = workprec
            ser, xa, xb = get_series(x)
            y = mpolyval(ser, x-xa)
        finally:
            mp.prec = orig
        if return_vector:
            return [+yk for yk in y]
        else:
            return +y[0]
    return interpolant
Exemple #16
0
def odefun(F, x0, y0, tol=None, degree=None, method='taylor', verbose=False):
    r"""
    Returns a function `y(x) = [y_0(x), y_1(x), \ldots, y_n(x)]`
    that is a numerical solution of the `n+1`-dimensional first-order
    ordinary differential equation (ODE) system

    .. math ::

        y_0'(x) = F_0(x, [y_0(x), y_1(x), \ldots, y_n(x)])

        y_1'(x) = F_1(x, [y_0(x), y_1(x), \ldots, y_n(x)])

        \vdots

        y_n'(x) = F_n(x, [y_0(x), y_1(x), \ldots, y_n(x)])

    The derivatives are specified by the vector-valued function
    *F* that evaluates
    `[y_0', \ldots, y_n'] = F(x, [y_0, \ldots, y_n])`.
    The initial point `x_0` is specified by the scalar argument *x0*,
    and the initial value `y(x_0) =  [y_0(x_0), \ldots, y_n(x_0)]` is
    specified by the vector argument *y0*.

    For convenience, if the system is one-dimensional, you may optionally
    provide just a scalar value for *y0*. In this case, *F* should accept
    a scalar *y* argument and return a scalar. The solution function
    *y* will return scalar values instead of length-1 vectors.

    Evaluation of the solution function `y(x)` is permitted
    for any `x \ge x_0`.

    A high-order ODE can be solved by transforming it into first-order
    vector form. This transformation is described in standard texts
    on ODEs. Examples will also be given below.

    **Options, speed and accuracy**

    By default, :func:`odefun` uses a high-order Taylor series
    method. For reasonably well-behaved problems, the solution will
    be fully accurate to within the working precision. Note that
    *F* must be possible to evaluate to very high precision
    for the generation of Taylor series to work.

    To get a faster but less accurate solution, you can set a large
    value for *tol* (which defaults roughly to *eps*). If you just
    want to plot the solution or perform a basic simulation,
    *tol = 0.01* is likely sufficient.

    The *degree* argument controls the degree of the solver (with
    *method='taylor'*, this is the degree of the Taylor series
    expansion). A higher degree means that a longer step can be taken
    before a new local solution must be generated from *F*,
    meaning that fewer steps are required to get from `x_0` to a given
    `x_1`. On the other hand, a higher degree also means that each
    local solution becomes more expensive (i.e., more evaluations of
    *F* are required per step, and at higher precision).

    The optimal setting therefore involves a tradeoff. Generally,
    decreasing the *degree* for Taylor series is likely to give faster
    solution at low precision, while increasing is likely to be better
    at higher precision.

    The function
    object returned by :func:`odefun` caches the solutions at all step
    points and uses polynomial interpolation between step points.
    Therefore, once `y(x_1)` has been evaluated for some `x_1`,
    `y(x)` can be evaluated very quickly for any `x_0 \le x \le x_1`.
    and continuing the evaluation up to `x_2 > x_1` is also fast.

    **Examples of first-order ODEs**

    We will solve the standard test problem `y'(x) = y(x), y(0) = 1`
    which has explicit solution `y(x) = \exp(x)`::

        >>> from mpmath import *
        >>> mp.dps = 15; mp.pretty = True
        >>> f = odefun(lambda x, y: y, 0, 1)
        >>> for x in [0, 1, 2.5]:
        ...     print f(x), exp(x)
        ...
        1.0 1.0
        2.71828182845905 2.71828182845905
        12.1824939607035 12.1824939607035

    The solution with high precision::

        >>> mp.dps = 50
        >>> f = odefun(lambda x, y: y, 0, 1)
        >>> f(1)
        2.7182818284590452353602874713526624977572470937
        >>> exp(1)
        2.7182818284590452353602874713526624977572470937

    Using the more general vectorized form, the test problem
    can be input as (note that *f* returns a 1-element vector)::

        >>> mp.dps = 15
        >>> f = odefun(lambda x, y: [y[0]], 0, [1])
        >>> f(1)
        [2.71828182845905]

    :func:`odefun` can solve nonlinear ODEs, which are generally
    impossible (and at best difficult) to solve analytically. As
    an example of a nonlinear ODE, we will solve `y'(x) = x \sin(y(x))`
    for `y(0) = \pi/2`. An exact solution happens to be known
    for this problem, and is given by
    `y(x) = 2 \tan^{-1}\left(\exp\left(x^2/2\right)\right)`::

        >>> f = odefun(lambda x, y: x*sin(y), 0, pi/2)
        >>> for x in [2, 5, 10]:
        ...     print f(x), 2*atan(exp(mpf(x)**2/2))
        ...
        2.87255666284091 2.87255666284091
        3.14158520028345 3.14158520028345
        3.14159265358979 3.14159265358979

    If `F` is independent of `y`, an ODE can be solved using direct
    integration. We can therefore obtain a reference solution with
    :func:`quad`::

        >>> f = lambda x: (1+x**2)/(1+x**3)
        >>> g = odefun(lambda x, y: f(x), pi, 0)
        >>> g(2*pi)
        0.72128263801696
        >>> quad(f, [pi, 2*pi])
        0.72128263801696

    **Examples of second-order ODEs**

    We will solve the harmonic oscillator equation `y''(x) + y(x) = 0`.
    To do this, we introduce the helper functions `y_0 = y, y_1 = y_0'`
    whereby the original equation can be written as `y_1' + y_0' = 0`. Put
    together, we get the first-order, two-dimensional vector ODE

    .. math ::

        \begin{cases}
        y_0' = y_1 \\
        y_1' = -y_0
        \end{cases}

    To get a well-defined IVP, we need two initial values. With
    `y(0) = y_0(0) = 1` and `-y'(0) = y_1(0) = 0`, the problem will of
    course be solved by `y(x) = y_0(x) = \cos(x)` and
    `-y'(x) = y_1(x) = \sin(x)`. We check this::

        >>> f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0])
        >>> for x in [0, 1, 2.5, 10]:
        ...     nprint(f(x), 15)
        ...     nprint([cos(x), sin(x)], 15)
        ...     print "---"
        ...
        [1.0, 0.0]
        [1.0, 0.0]
        ---
        [0.54030230586814, 0.841470984807897]
        [0.54030230586814, 0.841470984807897]
        ---
        [-0.801143615546934, 0.598472144103957]
        [-0.801143615546934, 0.598472144103957]
        ---
        [-0.839071529076452, -0.54402111088937]
        [-0.839071529076452, -0.54402111088937]
        ---

    Note that we get both the sine and the cosine solutions
    simultaneously.

    **TODO**

    * Better automatic choice of degree and step size
    * Make determination of Taylor series convergence radius
      more robust
    * Allow solution for `x < x_0`
    * Allow solution for complex `x`
    * Test for difficult (ill-conditioned) problems
    * Implement Runge-Kutta and other algorithms

    """
    if tol:
        tol_prec = int(-log(mpmathify(tol), 2))+10
    else:
        tol_prec = mp.prec+10
    degree = degree or (3 + int(3*mp.dps/2.))
    workprec = mp.prec + 40
    try:
        len(y0)
        return_vector = True
    except TypeError:
        F_ = F
        F = lambda x, y: [F_(x, y[0])]
        y0 = [y0]
        return_vector = False
    ser, xb = ode_taylor(F, x0, y0, tol_prec, degree)
    series_boundaries = [x0, xb]
    series_data = [(ser, x0, xb)]
    # We will be working with vectors of Taylor series
    def mpolyval(ser, a):
        return [polyval(s[::-1], a) for s in ser]
    # Find nearest expansion point; compute if necessary
    def get_series(x):
        if x < x0:
            raise ValueError
        n = bisect(series_boundaries, x)
        if n < len(series_boundaries):
            return series_data[n-1]
        while 1:
            ser, xa, xb = series_data[-1]
            if verbose:
                print "Computing Taylor series for [%f, %f]" % (xa, xb)
            y = mpolyval(ser, xb-xa)
            xa = xb
            ser, xb = ode_taylor(F, xb, y, tol_prec, degree)
            series_boundaries.append(xb)
            series_data.append((ser, xa, xb))
            if x <= xb:
                return series_data[-1]
    # Evaluation function
    def interpolant(x):
        x = mpmathify(x)
        orig = mp.prec
        try:
            mp.prec = workprec
            ser, xa, xb = get_series(x)
            y = mpolyval(ser, x-xa)
        finally:
            mp.prec = orig
        if return_vector:
            return [+yk for yk in y]
        else:
            return +y[0]
    return interpolant
Exemple #17
0
def djtheta(n, z, q, nd=1):
    r"""
    For an integer `nd \ge 1`, computes the `nd`:th derivative with
    respect to `z` of the Jacobi theta function `\vartheta_n(z,q)`::

        >>> from mpmath import *
        >>> mp.dps = 15
        >>> print djtheta(3, 7, 0.2)
        -0.795947847483158
        >>> print diff(lambda x: jtheta(3, x, 0.2), 7)
        -0.795947847483158

    For additional details, see :func:`jtheta`.
    """

    z = mpmathify(z)
    q = mpmathify(q)

    if abs(q) > Q_LIM:
        raise ValueError('abs(q) > Q_LIM = %f' % Q_LIM)
    extra = 10 + mp.prec * nd // 10
    cz = 0.5
    extra2 = 50
    prec0 = mp.prec
    try:
        mp.prec += extra
        if n == 1:
            if abs(z.imag) != 0:
                if abs(z.imag) < cz * abs(log(q).real):
                    mp.dps += extra2
                    res = _djacobi_theta2(z - pi/2, q, nd)
                else:
                    mp.dps += 10
                    res = _djacobi_theta2a(z - pi/2, q, nd)
            else:
                res = _djacobi_theta2(z - pi/2, q, nd)
        elif n == 2:
            if abs(z.imag) != 0:
                if abs(z.imag) < cz * abs(log(q).real):
                    mp.dps += extra2
                    res = _djacobi_theta2(z, q, nd)
                else:
                    mp.dps += 10
                    res = _djacobi_theta2a(z, q, nd)
            else:
                res = _djacobi_theta2(z, q, nd)
        elif n == 3:
            if abs(z.imag) != 0:
                if abs(z.imag) < cz * abs(log(q).real):
                    mp.dps += extra2
                    res = _djacobi_theta3(z, q, nd)
                else:
                    mp.dps += 10
                    res = _djacobi_theta3a(z, q, nd)
            else:
                res = _djacobi_theta3(z, q, nd)
        elif n == 4:
            if abs(z.imag) != 0:
                if abs(z.imag) < cz * abs(log(q).real):
                    mp.dps += extra2
                    res = _djacobi_theta3(z, -q, nd)
                else:
                    mp.dps += 10
                    res = _djacobi_theta3a(z, -q, nd)
            else:
                res = _djacobi_theta3(z, -q, nd)
        else:
            raise ValueError
    finally:
        mp.prec = prec0
    return res
Exemple #18
0
def quadosc(f, interval, omega=None, period=None, zeros=None):
    r"""
    Calculates

    .. math ::

        I = \int_a^b f(x) dx

    where at least one of `a` and `b` is infinite and where
    `f(x) = g(x) \cos(\omega x  + \phi)` for some slowly
    decreasing function `g(x)`. With proper input, :func:`quadosc`
    can also handle oscillatory integrals where the oscillation
    rate is different from a pure sine or cosine wave.

    In the standard case when `|a| < \infty, b = \infty`,
    :func:`quadosc` works by evaluating the infinite series

    .. math ::

        I = \int_a^{x_1} f(x) dx +
        \sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx

    where `x_k` are consecutive zeros (alternatively
    some other periodic reference point) of `f(x)`.
    Accordingly, :func:`quadosc` requires information about the
    zeros of `f(x)`. For a periodic function, you can specify
    the zeros by either providing the angular frequency `\omega`
    (*omega*) or the *period* `2 \pi/\omega`. In general, you can
    specify the `n`-th zero by providing the *zeros* arguments.
    Below is an example of each::

        >>> from mpmath import *
        >>> mp.dps = 15
        >>> f = lambda x: sin(3*x)/(x**2+1)
        >>> print quadosc(f, [0,inf], omega=3)
        0.37833007080198
        >>> print quadosc(f, [0,inf], period=2*pi/3)
        0.37833007080198
        >>> print quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
        0.37833007080198
        >>> print (ei(3)*exp(-3)-exp(3)*ei(-3))/2  # Computed by Mathematica
        0.37833007080198

    Note that *zeros* was specified to multiply `n` by the
    *half-period*, not the full period. In theory, it does not matter
    whether each partial integral is done over a half period or a full
    period. However, if done over half-periods, the infinite series
    passed to :func:`nsum` becomes an *alternating series* and this
    typically makes the extrapolation much more efficient.

    Here is an example of an integration over the entire real line,
    and a half-infinite integration starting at `-\infty`::

        >>> print quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
        1.15572734979092
        >>> print pi/e
        1.15572734979092
        >>> print quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
        -0.0844109505595739
        >>> print cos(1)+si(1)-pi/2
        -0.0844109505595738

    Of course, the integrand may contain a complex exponential just as
    well as a real sine or cosine::

        >>> print quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
        (0.156410688228254 + 0.0j)
        >>> print pi/e**3
        0.156410688228254
        >>> print quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
        (0.00317486988463794 - 0.0447701735209082j)
        >>> print 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
        (0.00317486988463794 - 0.0447701735209082j)

    **Non-periodic functions**

    If `f(x) = g(x) h(x)` for some function `h(x)` that is not
    strictly periodic, *omega* or *period* might not work, and it might
    be necessary to use *zeros*.

    A notable exception can be made for Bessel functions which, though not
    periodic, are "asymptotically periodic" in a sufficiently strong sense
    that the sum extrapolation will work out::

        >>> print quadosc(j0, [0, inf], period=2*pi)
        1.0
        >>> print quadosc(j1, [0, inf], period=2*pi)
        1.0

    More properly, one should provide the exact Bessel function zeros::

        >>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
        >>> print quadosc(j0, [0, inf], zeros=j0zero)
        1.0

    For an example where *zeros* becomes necessary, consider the
    complete Fresnel integrals

    .. math ::

        \int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
        = \sqrt{\frac{\pi}{8}}.

    Although the integrands do not decrease in magnitude as
    `x \to \infty`, the integrals are convergent since the oscillation
    rate increases (causing consecutive periods to asymptotically
    cancel out). These integrals are virtually impossible to calculate
    to any kind of accuracy using standard quadrature rules. However,
    if one provides the correct asymptotic distribution of zeros
    (`x_n \sim \sqrt{n}`), :func:`quadosc` works::

        >>> mp.dps = 30
        >>> f = lambda x: cos(x**2)
        >>> print quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
        0.626657068657750125603941321203
        >>> f = lambda x: sin(x**2)
        >>> print quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
        0.626657068657750125603941321203
        >>> print sqrt(pi/8)
        0.626657068657750125603941321203

    (Interestingly, these integrals can still be evaluated if one
    places some other constant than `\pi` in the square root sign.)

    In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
    the inverse-function distribution `h^{-1}(x)`::

        >>> mp.dps = 15
        >>> f = lambda x: sin(exp(x))
        >>> print quadosc(f, [1,inf], zeros=lambda n: log(n))
        -0.25024394235267
        >>> print pi/2-si(e)
        -0.250243942352671

    **Non-alternating functions**

    If the integrand oscillates around a positive value, without
    alternating signs, the extrapolation might fail. A simple trick
    that sometimes works is to multiply or divide the frequency by 2::

        >>> f = lambda x: 1/x**2+sin(x)/x**4
        >>> print quadosc(f, [1,inf], omega=1)  # Bad
        1.28642190869921
        >>> print quadosc(f, [1,inf], omega=0.5)  # Perfect
        1.28652953559617
        >>> print 1+(cos(1)+ci(1)+sin(1))/6
        1.28652953559617

    **Fast decay**

    :func:`quadosc` is primarily useful for slowly decaying
    integrands. If the integrand decreases exponentially or faster,
    :func:`quad` will likely handle it without trouble (and generally be
    much faster than :func:`quadosc`)::

        >>> print quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
        0.5
        >>> print quad(lambda x: cos(x)/exp(x), [0, inf])
        0.5

    """
    a, b = AS_POINTS(interval)
    a = mpmathify(a)
    b = mpmathify(b)
    if [omega, period, zeros].count(None) != 2:
        raise ValueError( \
            "must specify exactly one of omega, period, zeros")
    if a == -inf and b == inf:
        s1 = quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
        s2 = quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
        return s1 + s2
    if a == -inf:
        if zeros:
            return quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
        else:
            return quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
    if b != inf:
        raise ValueError("quadosc requires an infinite integration interval")
    if not zeros:
        if omega:
            period = 2*pi/omega
        zeros = lambda n: n*period/2
    #for n in range(1,10):
    #    p = zeros(n)
    #    if p > a:
    #        break
    #if n >= 9:
    #    raise ValueError("zeros do not appear to be correctly indexed")
    n = 1
    from calculus import nsum
    s = quadgl(f, [a, zeros(n)])
    s += nsum(lambda k: quadgl(f, [zeros(k), zeros(k+1)]), [n, inf])
    return s