Beispiel #1
0
 def summation(cls, f, points, prec, epsilon, max_level, verbose=False):
     """
     Main summation function
     """
     I = err = mpf(0)
     for i in xrange(len(points)-1):
         a, b = points[i], points[i+1]
         if a == b:
             continue
         g = transform(f, a, b)
         results = []
         for level in xrange(1, max_level+1):
             if verbose:
                 print "Integrating from %s to %s (level %s of %s)" % \
                     (nstr(a), nstr(b), level, max_level)
             results.append(cls.sum_next(prec, level, results, g, verbose))
             if level > 2:
                 err = cls.estimate_error(results, prec, epsilon)
                 if err <= epsilon:
                     break
                 if verbose:
                     print "Estimated error:", nstr(err)
         I += results[-1]
     if err > epsilon:
         if verbose:
             print "Failed to reach full accuracy. Estimated error:", nstr(err)
     return I, err
Beispiel #2
0
 def summation(cls, f, points, prec, epsilon, max_level, verbose=False):
     """
     Main summation function
     """
     I = err = mpf(0)
     for i in xrange(len(points) - 1):
         a, b = points[i], points[i + 1]
         if a == b:
             continue
         g = transform(f, a, b)
         results = []
         for level in xrange(1, max_level + 1):
             if verbose:
                 print "Integrating from %s to %s (level %s of %s)" % \
                     (nstr(a), nstr(b), level, max_level)
             results.append(cls.sum_next(prec, level, results, g, verbose))
             if level > 2:
                 err = cls.estimate_error(results, prec, epsilon)
                 if err <= epsilon:
                     break
                 if verbose:
                     print "Estimated error:", nstr(err)
         I += results[-1]
     if err > epsilon:
         if verbose:
             print "Failed to reach full accuracy. Estimated error:", nstr(
                 err)
     return I, err
Beispiel #3
0
    def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
        """
        Main integration function. Computes the 1D integral over
        the interval specified by *points*. For each subinterval,
        performs quadrature of degree from 1 up to *max_degree*
        until :func:`estimate_error` signals convergence.

        :func:`summation` transforms each subintegration to
        the standard interval and then calls :func:`sum_next`.
        """
        I = err = mpf(0)
        for i in xrange(len(points)-1):
            a, b = points[i], points[i+1]
            if a == b:
                continue
            # XXX: we could use a single variable transformation,
            # but this is not good in practice. We get better accuracy
            # by having 0 as an endpoint.
            if (a, b) == (-inf, inf):
                _f = f
                f = lambda x: _f(NEG(x)) + _f(x)
                a, b = (mpf(0), inf)
            results = []
            for degree in xrange(1, max_degree+1):
                nodes = self.get_nodes(a, b, degree, prec, verbose)
                if verbose:
                    print "Integrating from %s to %s (degree %s of %s)" % \
                        (nstr(a), nstr(b), degree, max_degree)
                results.append(self.sum_next(f, nodes, degree, prec, results, verbose))
                if degree > 1:
                    err = self.estimate_error(results, prec, epsilon)
                    if err <= epsilon:
                        break
                    if verbose:
                        print "Estimated error:", nstr(err)
            I += results[-1]
        if err > epsilon:
            if verbose:
                print "Failed to reach full accuracy. Estimated error:", nstr(err)
        return I, err
Beispiel #4
0
    def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
        """
        Main integration function. Computes the 1D integral over
        the interval specified by *points*. For each subinterval,
        performs quadrature of degree from 1 up to *max_degree*
        until :func:`estimate_error` signals convergence.

        :func:`summation` transforms each subintegration to
        the standard interval and then calls :func:`sum_next`.
        """
        I = err = mpf(0)
        for i in xrange(len(points)-1):
            a, b = points[i], points[i+1]
            if a == b:
                continue
            # XXX: we could use a single variable transformation,
            # but this is not good in practice. We get better accuracy
            # by having 0 as an endpoint.
            if (a, b) == (-inf, inf):
                _f = f
                f = lambda x: _f(NEG(x)) + _f(x)
                a, b = (mpf(0), inf)
            results = []
            for degree in xrange(1, max_degree+1):
                nodes = self.get_nodes(a, b, degree, prec, verbose)
                if verbose:
                    print "Integrating from %s to %s (degree %s of %s)" % \
                        (nstr(a), nstr(b), degree, max_degree)
                results.append(self.sum_next(f, nodes, degree, prec, results, verbose))
                if degree > 1:
                    err = self.estimate_error(results, prec, epsilon)
                    if err <= epsilon:
                        break
                    if verbose:
                        print "Estimated error:", nstr(err)
            I += results[-1]
        if err > epsilon:
            if verbose:
                print "Failed to reach full accuracy. Estimated error:", nstr(err)
        return I, err
Beispiel #5
0
 def __nstr__(self, n=None):
     # Build table of string representations of the elements
     res = []
     # Track per-column max lengths for pretty alignment
     maxlen = [0] * self.cols
     for i in range(self.rows):
         res.append([])
         for j in range(self.cols):
             if n:
                 string = nstr(self[i,j], n)
             else:
                 string = str(self[i,j])
             res[-1].append(string)
             maxlen[j] = max(len(string), maxlen[j])
     # Patch strings together
     for i, row in enumerate(res):
         for j, elem in enumerate(row):
             # Pad each element up to maxlen so the columns line up
             row[j] = elem.rjust(maxlen[j])
         res[i] = "[" + colsep.join(row) + "]"
     return rowsep.join(res)
Beispiel #6
0
 def __nstr__(self, n=None):
     # Build table of string representations of the elements
     res = []
     # Track per-column max lengths for pretty alignment
     maxlen = [0] * self.cols
     for i in range(self.rows):
         res.append([])
         for j in range(self.cols):
             if n:
                 string = nstr(self[i,j], n)
             else:
                 string = str(self[i,j])
             res[-1].append(string)
             maxlen[j] = max(len(string), maxlen[j])
     # Patch strings together
     for i, row in enumerate(res):
         for j, elem in enumerate(row):
             # Pad each element up to maxlen so the columns line up
             row[j] = elem.rjust(maxlen[j])
         res[i] = "[" + colsep.join(row) + "]"
     return rowsep.join(res)
Beispiel #7
0
def pslq(x, tol=None, maxcoeff=1000, maxsteps=100, verbose=False):
    r"""
    Given a vector of real numbers `x = [x_0, x_1, ..., x_n]`, ``pslq(x)``
    uses the PSLQ algorithm to find a list of integers
    `[c_0, c_1, ..., c_n]` such that

    .. math ::

        |c_1 x_1 + c_2 x_2 + ... + c_n x_n| < \mathrm{tol}

    and such that `\max |c_k| < \mathrm{maxcoeff}`. If no such vector
    exists, :func:`pslq` returns ``None``. The tolerance defaults to
    3/4 of the working precision.

    **Examples**

    Find rational approximations for `\pi`::

        >>> from mpmath import *
        >>> mp.dps = 15
        >>> pslq([pi, 1], tol=0.01)
        [-7, 22]
        >>> pslq([pi, 1], tol=0.001)
        [113, -355]

    Pi is not a rational number with denominator less than 1000::

        >>> pslq([pi, 1])
        >>>

    To within the standard precision, it can however be approximated
    by at least one rational number with denominator less than `10^{12}`::

        >>> pslq([pi, 1], maxcoeff=10**12)
        [-75888275702L, 238410049439L]
        >>> print mpf(_[1])/_[0]
        -3.14159265358979

    The PSLQ algorithm can be applied to long vectors. For example,
    we can investigate the rational (in)dependence of integer square
    roots::

        >>> mp.dps = 30
        >>> pslq([sqrt(n) for n in range(2, 5+1)])
        >>>
        >>> pslq([sqrt(n) for n in range(2, 6+1)])
        >>>
        >>> pslq([sqrt(n) for n in range(2, 8+1)])
        [2, 0, 0, 0, 0, 0, -1]

    **Machin formulas**

    A famous formula for `\pi` is Machin's,

    .. math ::

        \frac{\pi}{4} = 4 \operatorname{acot} 5 - \operatorname{acot} 239

    There are actually infinitely many formulas of this type. Two
    others are

    .. math ::

        \frac{\pi}{4} = \operatorname{acot} 1

        \frac{\pi}{4} = 12 \operatorname{acot} 49 + 32 \operatorname{acot} 57
            + 5 \operatorname{acot} 239 + 12 \operatorname{acot} 110443

    We can easily verify the formulas using the PSLQ algorithm::

        >>> mp.dps = 30
        >>> pslq([pi/4, acot(1)])
        [1, -1]
        >>> pslq([pi/4, acot(5), acot(239)])
        [1, -4, 1]
        >>> pslq([pi/4, acot(49), acot(57), acot(239), acot(110443)])
        [1, -12, -32, 5, -12]

    We could try to generate a custom Machin-like formula by running
    the PSLQ algorithm with a few inverse cotangent values, for example
    acot(2), acot(3) ... acot(10). Unfortunately, there is a linear
    dependence among these values, resulting in only that dependence
    being detected, with a zero coefficient for `\pi`::

        >>> pslq([pi] + [acot(n) for n in range(2,11)])
        [0, 1, -1, 0, 0, 0, -1, 0, 0, 0]

    We get better luck by removing linearly dependent terms::

        >>> pslq([pi] + [acot(n) for n in range(2,11) if n not in (3, 5)])
        [1, -8, 0, 0, 4, 0, 0, 0]

    In other words, we found the following formula::

        >>> print 8*acot(2) - 4*acot(7)
        3.14159265358979323846264338328
        >>> print pi
        3.14159265358979323846264338328

    **Algorithm**

    This is a fairly direct translation to Python of the pseudocode given by
    David Bailey, "The PSLQ Integer Relation Algorithm":
    http://www.cecm.sfu.ca/organics/papers/bailey/paper/html/node3.html

    The present implementation uses fixed-point instead of floating-point
    arithmetic, since this is significantly (about 7x) faster.
    """

    n = len(x)
    assert n >= 2

    # At too low precision, the algorithm becomes meaningless
    prec = mp.prec
    assert prec >= 53

    if verbose and prec // max(2, n) < 5:
        print "Warning: precision for PSLQ may be too low"

    target = int(prec * 0.75)

    if tol is None:
        tol = mpf(2)**(-target)
    else:
        tol = mpmathify(tol)

    extra = 60
    prec += extra

    if verbose:
        print "PSLQ using prec %i and tol %s" % (prec, nstr(tol))

    tol = to_fixed(tol._mpf_, prec)
    assert tol

    # Convert to fixed-point numbers. The dummy None is added so we can
    # use 1-based indexing. (This just allows us to be consistent with
    # Bailey's indexing. The algorithm is 100 lines long, so debugging
    # a single wrong index can be painful.)
    x = [None] + [to_fixed(mpf(xk)._mpf_, prec) for xk in x]

    # Sanity check on magnitudes
    minx = min(abs(xx) for xx in x[1:])
    if not minx:
        raise ValueError("PSLQ requires a vector of nonzero numbers")
    if minx < tol // 100:
        if verbose:
            print "STOPPING: (one number is too small)"
        return None

    g = sqrt_fixed((4 << prec) // 3, prec)
    A = {}
    B = {}
    H = {}
    # Initialization
    # step 1
    for i in xrange(1, n + 1):
        for j in xrange(1, n + 1):
            A[i, j] = B[i, j] = (i == j) << prec
            H[i, j] = 0
    # step 2
    s = [None] + [0] * n
    for k in xrange(1, n + 1):
        t = 0
        for j in xrange(k, n + 1):
            t += (x[j]**2 >> prec)
        s[k] = sqrt_fixed(t, prec)
    t = s[1]
    y = x[:]
    for k in xrange(1, n + 1):
        y[k] = (x[k] << prec) // t
        s[k] = (s[k] << prec) // t
    # step 3
    for i in xrange(1, n + 1):
        for j in xrange(i + 1, n):
            H[i, j] = 0
        if i <= n - 1:
            if s[i]:
                H[i, i] = (s[i + 1] << prec) // s[i]
            else:
                H[i, i] = 0
        for j in range(1, i):
            sjj1 = s[j] * s[j + 1]
            if sjj1:
                H[i, j] = ((-y[i] * y[j]) << prec) // sjj1
            else:
                H[i, j] = 0
    # step 4
    for i in xrange(2, n + 1):
        for j in xrange(i - 1, 0, -1):
            #t = floor(H[i,j]/H[j,j] + 0.5)
            if H[j, j]:
                t = round_fixed((H[i, j] << prec) // H[j, j], prec)
            else:
                #t = 0
                continue
            y[j] = y[j] + (t * y[i] >> prec)
            for k in xrange(1, j + 1):
                H[i, k] = H[i, k] - (t * H[j, k] >> prec)
            for k in xrange(1, n + 1):
                A[i, k] = A[i, k] - (t * A[j, k] >> prec)
                B[k, j] = B[k, j] + (t * B[k, i] >> prec)
    # Main algorithm
    for REP in range(maxsteps):
        # Step 1
        m = -1
        szmax = -1
        for i in range(1, n):
            h = H[i, i]
            sz = (sqrt_fixed(
                (4 << prec) // 3, prec)**i * abs(h)) >> (prec * (i - 1))
            if sz > szmax:
                m = i
                szmax = sz
        # Step 2
        y[m], y[m + 1] = y[m + 1], y[m]
        tmp = {}
        for i in xrange(1, n + 1):
            H[m, i], H[m + 1, i] = H[m + 1, i], H[m, i]
        for i in xrange(1, n + 1):
            A[m, i], A[m + 1, i] = A[m + 1, i], A[m, i]
        for i in xrange(1, n + 1):
            B[i, m], B[i, m + 1] = B[i, m + 1], B[i, m]
        # Step 3
        if m <= n - 2:
            t0 = sqrt_fixed((H[m, m]**2 + H[m, m + 1]**2) >> prec, prec)
            # A zero element probably indicates that the precision has
            # been exhausted. XXX: this could be spurious, due to
            # using fixed-point arithmetic
            if not t0:
                break
            t1 = (H[m, m] << prec) // t0
            t2 = (H[m, m + 1] << prec) // t0
            for i in xrange(m, n + 1):
                t3 = H[i, m]
                t4 = H[i, m + 1]
                H[i, m] = (t1 * t3 + t2 * t4) >> prec
                H[i, m + 1] = (-t2 * t3 + t1 * t4) >> prec
        # Step 4
        for i in xrange(m + 1, n + 1):
            for j in xrange(min(i - 1, m + 1), 0, -1):
                try:
                    t = round_fixed((H[i, j] << prec) // H[j, j], prec)
                # Precision probably exhausted
                except ZeroDivisionError:
                    break
                y[j] = y[j] + ((t * y[i]) >> prec)
                for k in xrange(1, j + 1):
                    H[i, k] = H[i, k] - (t * H[j, k] >> prec)
                for k in xrange(1, n + 1):
                    A[i, k] = A[i, k] - (t * A[j, k] >> prec)
                    B[k, j] = B[k, j] + (t * B[k, i] >> prec)
        # Until a relation is found, the error typically decreases
        # slowly (e.g. a factor 1-10) with each step TODO: we could
        # compare err from two successive iterations. If there is a
        # large drop (several orders of magnitude), that indicates a
        # "high quality" relation was detected. Reporting this to
        # the user somehow might be useful.
        best_err = maxcoeff << prec
        for i in xrange(1, n + 1):
            err = abs(y[i])
            # Maybe we are done?
            if err < tol:
                # We are done if the coefficients are acceptable
                vec = [int(round_fixed(B[j,i], prec) >> prec) for j in \
                range(1,n+1)]
                if max(abs(v) for v in vec) < maxcoeff:
                    if verbose:
                        print "FOUND relation at iter %i/%i, error: %s" % \
                            (REP, maxsteps, nstr(err / mpf(2)**prec, 1))
                    return vec
            best_err = min(err, best_err)
        # Calculate a lower bound for the norm. We could do this
        # more exactly (using the Euclidean norm) but there is probably
        # no practical benefit.
        recnorm = max(abs(h) for h in H.values())
        if recnorm:
            norm = ((1 << (2 * prec)) // recnorm) >> prec
            norm //= 100
        else:
            norm = inf
        if verbose:
            print "%i/%i:  Error: %8s   Norm: %s" % \
                (REP, maxsteps, nstr(best_err / mpf(2)**prec, 1), norm)
        if norm >= maxcoeff:
            break
    if verbose:
        print "CANCELLING after step %i/%i." % (REP, maxsteps)
        print "Could not find an integer relation. Norm bound: %s" % norm
    return None
Beispiel #8
0
    def calc_nodes(cls, prec, level, verbose=False):
        """
        The abscissas and weights for tanh-sinh quadrature are given by

            x[k] = tanh(pi/2 * sinh(t))
            w[k] = pi/2 * cosh(t) / cosh(pi/2 sinh(t))**2

        Here t varies uniformly with k: t0, t0+h, t0+2*h, ...

        The list of nodes is actually infinite, but the weights
        die off so rapidly that only a few are needed.
        """
        nodes = []

        extra = 20
        mp.prec += extra
        eps = ldexp(1, -prec-10)
        pi4 = pi/4

        # For simplicity, we work in steps h = 1/2^n, with the first point
        # offset so that we can reuse the sum from the previous level

        # We define level 1 to include the "level 0" steps, including
        # the point x = 0. (It doesn't work well otherwise; not sure why.)
        t0 = ldexp(1, -level)
        if level == 1:
            nodes.append((mpf(0), pi4))
            h = t0
        else:
            h = t0*2

        # Since h is fixed, we can compute the next exponential
        # by simply multiplying by exp(h)
        expt0 = exp(t0)
        a = pi4 * expt0
        b = pi4 / expt0
        udelta = exp(h)
        urdelta = 1/udelta

        for k in xrange(0, 20*2**level+1):
            # Reference implementation:
            # t = t0 + k*h
            # x = tanh(pi/2 * sinh(t))
            # w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2

            # Fast implementation. Note that c = exp(pi/2 * sinh(t))
            c = exp(a-b)
            d = 1/c
            co = (c+d)/2
            si = (c-d)/2
            x = si / co
            w = (a+b) / co**2
            diff = abs(x-1)
            if diff <= eps:
                break

            nodes.append((x, w))
            a *= udelta
            b *= urdelta

            if verbose and k % 300 == 150:
                # Note: the number displayed is rather arbitrary. Should
                # figure out how to print something that looks more like a
                # percentage
                print "Calculating nodes:", nstr(-log(diff, 10) / prec)

        mp.prec -= extra
        return nodes
Beispiel #9
0
    def calc_nodes(self, degree, prec, verbose=False):
        r"""
        The abscissas and weights for tanh-sinh quadrature of degree
        `m` are given by

        .. math::

            x_k = \tanh(\pi/2 \sinh(t_k))

            w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2

        where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
        list of nodes is actually infinite, but the weights die off so
        rapidly that only a few are needed.
        """
        nodes = []

        extra = 20
        mp.prec += extra
        eps = ldexp(1, -prec-10)
        pi4 = pi/4

        # For simplicity, we work in steps h = 1/2^n, with the first point
        # offset so that we can reuse the sum from the previous degree

        # We define degree 1 to include the "degree 0" steps, including
        # the point x = 0. (It doesn't work well otherwise; not sure why.)
        t0 = ldexp(1, -degree)
        if degree == 1:
            #nodes.append((mpf(0), pi4))
            #nodes.append((-mpf(0), pi4))
            nodes.append((mpf(0), pi/2))
            h = t0
        else:
            h = t0*2

        # Since h is fixed, we can compute the next exponential
        # by simply multiplying by exp(h)
        expt0 = exp(t0)
        a = pi4 * expt0
        b = pi4 / expt0
        udelta = exp(h)
        urdelta = 1/udelta

        for k in xrange(0, 20*2**degree+1):
            # Reference implementation:
            # t = t0 + k*h
            # x = tanh(pi/2 * sinh(t))
            # w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2

            # Fast implementation. Note that c = exp(pi/2 * sinh(t))
            c = exp(a-b)
            d = 1/c
            co = (c+d)/2
            si = (c-d)/2
            x = si / co
            w = (a+b) / co**2
            diff = abs(x-1)
            if diff <= eps:
                break

            nodes.append((x, w))
            nodes.append((NEG(x), w))

            a *= udelta
            b *= urdelta

            if verbose and k % 300 == 150:
                # Note: the number displayed is rather arbitrary. Should
                # figure out how to print something that looks more like a
                # percentage
                print "Calculating nodes:", nstr(-log(diff, 10) / prec)

        mp.prec -= extra
        return nodes
Beispiel #10
0
def pslq(x, tol=None, maxcoeff=1000, maxsteps=100, verbose=False):
    r"""
    Given a vector of real numbers `x = [x_0, x_1, ..., x_n]`, ``pslq(x)``
    uses the PSLQ algorithm to find a list of integers
    `[c_0, c_1, ..., c_n]` such that

    .. math ::

        |c_1 x_1 + c_2 x_2 + ... + c_n x_n| < \mathrm{tol}

    and such that `\max |c_k| < \mathrm{maxcoeff}`. If no such vector
    exists, :func:`pslq` returns ``None``. The tolerance defaults to
    3/4 of the working precision.

    **Examples**

    Find rational approximations for `\pi`::

        >>> from mpmath import *
        >>> mp.dps = 15; mp.pretty = True
        >>> pslq([pi, 1], tol=0.01)
        [-7, 22]
        >>> pslq([pi, 1], tol=0.001)
        [113, -355]

    Pi is not a rational number with denominator less than 1000::

        >>> pslq([pi, 1])
        >>>

    To within the standard precision, it can however be approximated
    by at least one rational number with denominator less than `10^{12}`::

        >>> pslq([pi, 1], maxcoeff=10**12)
        [-75888275702L, 238410049439L]
        >>> mpf(_[1])/_[0]
        -3.14159265358979

    The PSLQ algorithm can be applied to long vectors. For example,
    we can investigate the rational (in)dependence of integer square
    roots::

        >>> mp.dps = 30
        >>> pslq([sqrt(n) for n in range(2, 5+1)])
        >>>
        >>> pslq([sqrt(n) for n in range(2, 6+1)])
        >>>
        >>> pslq([sqrt(n) for n in range(2, 8+1)])
        [2, 0, 0, 0, 0, 0, -1]

    **Machin formulas**

    A famous formula for `\pi` is Machin's,

    .. math ::

        \frac{\pi}{4} = 4 \operatorname{acot} 5 - \operatorname{acot} 239

    There are actually infinitely many formulas of this type. Two
    others are

    .. math ::

        \frac{\pi}{4} = \operatorname{acot} 1

        \frac{\pi}{4} = 12 \operatorname{acot} 49 + 32 \operatorname{acot} 57
            + 5 \operatorname{acot} 239 + 12 \operatorname{acot} 110443

    We can easily verify the formulas using the PSLQ algorithm::

        >>> mp.dps = 30
        >>> pslq([pi/4, acot(1)])
        [1, -1]
        >>> pslq([pi/4, acot(5), acot(239)])
        [1, -4, 1]
        >>> pslq([pi/4, acot(49), acot(57), acot(239), acot(110443)])
        [1, -12, -32, 5, -12]

    We could try to generate a custom Machin-like formula by running
    the PSLQ algorithm with a few inverse cotangent values, for example
    acot(2), acot(3) ... acot(10). Unfortunately, there is a linear
    dependence among these values, resulting in only that dependence
    being detected, with a zero coefficient for `\pi`::

        >>> pslq([pi] + [acot(n) for n in range(2,11)])
        [0, 1, -1, 0, 0, 0, -1, 0, 0, 0]

    We get better luck by removing linearly dependent terms::

        >>> pslq([pi] + [acot(n) for n in range(2,11) if n not in (3, 5)])
        [1, -8, 0, 0, 4, 0, 0, 0]

    In other words, we found the following formula::

        >>> 8*acot(2) - 4*acot(7)
        3.14159265358979323846264338328
        >>> +pi
        3.14159265358979323846264338328

    **Algorithm**

    This is a fairly direct translation to Python of the pseudocode given by
    David Bailey, "The PSLQ Integer Relation Algorithm":
    http://www.cecm.sfu.ca/organics/papers/bailey/paper/html/node3.html

    The present implementation uses fixed-point instead of floating-point
    arithmetic, since this is significantly (about 7x) faster.
    """

    n = len(x)
    assert n >= 2

    # At too low precision, the algorithm becomes meaningless
    prec = mp.prec
    assert prec >= 53

    if verbose and prec // max(2,n) < 5:
        print "Warning: precision for PSLQ may be too low"

    target = int(prec * 0.75)

    if tol is None:
        tol = mpf(2)**(-target)
    else:
        tol = mpmathify(tol)

    extra = 60
    prec += extra

    if verbose:
        print "PSLQ using prec %i and tol %s" % (prec, nstr(tol))

    tol = to_fixed(tol._mpf_, prec)
    assert tol

    # Convert to fixed-point numbers. The dummy None is added so we can
    # use 1-based indexing. (This just allows us to be consistent with
    # Bailey's indexing. The algorithm is 100 lines long, so debugging
    # a single wrong index can be painful.)
    x = [None] + [to_fixed(mpf(xk)._mpf_, prec) for xk in x]

    # Sanity check on magnitudes
    minx = min(abs(xx) for xx in x[1:])
    if not minx:
        raise ValueError("PSLQ requires a vector of nonzero numbers")
    if minx < tol//100:
        if verbose:
            print "STOPPING: (one number is too small)"
        return None

    g = sqrt_fixed((4<<prec)//3, prec)
    A = {}
    B = {}
    H = {}
    # Initialization
    # step 1
    for i in xrange(1, n+1):
        for j in xrange(1, n+1):
            A[i,j] = B[i,j] = (i==j) << prec
            H[i,j] = 0
    # step 2
    s = [None] + [0] * n
    for k in xrange(1, n+1):
        t = 0
        for j in xrange(k, n+1):
            t += (x[j]**2 >> prec)
        s[k] = sqrt_fixed(t, prec)
    t = s[1]
    y = x[:]
    for k in xrange(1, n+1):
        y[k] = (x[k] << prec) // t
        s[k] = (s[k] << prec) // t
    # step 3
    for i in xrange(1, n+1):
        for j in xrange(i+1, n):
            H[i,j] = 0
        if i <= n-1:
            if s[i]:
                H[i,i] = (s[i+1] << prec) // s[i]
            else:
                H[i,i] = 0
        for j in range(1, i):
            sjj1 = s[j]*s[j+1]
            if sjj1:
                H[i,j] = ((-y[i]*y[j])<<prec)//sjj1
            else:
                H[i,j] = 0
    # step 4
    for i in xrange(2, n+1):
        for j in xrange(i-1, 0, -1):
            #t = floor(H[i,j]/H[j,j] + 0.5)
            if H[j,j]:
                t = round_fixed((H[i,j] << prec)//H[j,j], prec)
            else:
                #t = 0
                continue
            y[j] = y[j] + (t*y[i] >> prec)
            for k in xrange(1, j+1):
                H[i,k] = H[i,k] - (t*H[j,k] >> prec)
            for k in xrange(1, n+1):
                A[i,k] = A[i,k] - (t*A[j,k] >> prec)
                B[k,j] = B[k,j] + (t*B[k,i] >> prec)
    # Main algorithm
    for REP in range(maxsteps):
        # Step 1
        m = -1
        szmax = -1
        for i in range(1, n):
            h = H[i,i]
            sz = (g**i * abs(h)) >> (prec*(i-1))
            if sz > szmax:
                m = i
                szmax = sz
        # Step 2
        y[m], y[m+1] = y[m+1], y[m]
        tmp = {}
        for i in xrange(1,n+1): H[m,i], H[m+1,i] = H[m+1,i], H[m,i]
        for i in xrange(1,n+1): A[m,i], A[m+1,i] = A[m+1,i], A[m,i]
        for i in xrange(1,n+1): B[i,m], B[i,m+1] = B[i,m+1], B[i,m]
        # Step 3
        if m <= n - 2:
            t0 = sqrt_fixed((H[m,m]**2 + H[m,m+1]**2)>>prec, prec)
            # A zero element probably indicates that the precision has
            # been exhausted. XXX: this could be spurious, due to
            # using fixed-point arithmetic
            if not t0:
                break
            t1 = (H[m,m] << prec) // t0
            t2 = (H[m,m+1] << prec) // t0
            for i in xrange(m, n+1):
                t3 = H[i,m]
                t4 = H[i,m+1]
                H[i,m] = (t1*t3+t2*t4) >> prec
                H[i,m+1] = (-t2*t3+t1*t4) >> prec
        # Step 4
        for i in xrange(m+1, n+1):
            for j in xrange(min(i-1, m+1), 0, -1):
                try:
                    t = round_fixed((H[i,j] << prec)//H[j,j], prec)
                # Precision probably exhausted
                except ZeroDivisionError:
                    break
                y[j] = y[j] + ((t*y[i]) >> prec)
                for k in xrange(1, j+1):
                    H[i,k] = H[i,k] - (t*H[j,k] >> prec)
                for k in xrange(1, n+1):
                    A[i,k] = A[i,k] - (t*A[j,k] >> prec)
                    B[k,j] = B[k,j] + (t*B[k,i] >> prec)
        # Until a relation is found, the error typically decreases
        # slowly (e.g. a factor 1-10) with each step TODO: we could
        # compare err from two successive iterations. If there is a
        # large drop (several orders of magnitude), that indicates a
        # "high quality" relation was detected. Reporting this to
        # the user somehow might be useful.
        best_err = maxcoeff<<prec
        for i in xrange(1, n+1):
            err = abs(y[i])
            # Maybe we are done?
            if err < tol:
                # We are done if the coefficients are acceptable
                vec = [int(round_fixed(B[j,i], prec) >> prec) for j in \
                range(1,n+1)]
                if max(abs(v) for v in vec) < maxcoeff:
                    if verbose:
                        print "FOUND relation at iter %i/%i, error: %s" % \
                            (REP, maxsteps, nstr(err / mpf(2)**prec, 1))
                    return vec
            best_err = min(err, best_err)
        # Calculate a lower bound for the norm. We could do this
        # more exactly (using the Euclidean norm) but there is probably
        # no practical benefit.
        recnorm = max(abs(h) for h in H.values())
        if recnorm:
            norm = ((1 << (2*prec)) // recnorm) >> prec
            norm //= 100
        else:
            norm = inf
        if verbose:
            print "%i/%i:  Error: %8s   Norm: %s" % \
                (REP, maxsteps, nstr(best_err / mpf(2)**prec, 1), norm)
        if norm >= maxcoeff:
            break
    if verbose:
        print "CANCELLING after step %i/%i." % (REP, maxsteps)
        print "Could not find an integer relation. Norm bound: %s" % norm
    return None
Beispiel #11
0
def sumem(f, interval, N=None, integral=None, fderiv=None, error=False,
    verbose=False):
    """
    Sum f(k) for k = a, a+1, ..., b where [a, b] = interval,
    using Euler-Maclaurin summation. This algorithm is efficient
    for slowly convergent nonoscillatory sums; the essential condition
    is that f must be analytic. The method relies on approximating the
    sum by an integral, so f must be smooth and well-behaved enough
    to be integrated numerically.

    With error=True, a tuple (s, err) is returned where s is the
    calculated sum and err is the estimated magnitude of the error.
    With verbose=True, detailed information about progress and errors
    is printed.

        >>> mp.dps = 15
        >>> s, err = sumem(lambda n: 1/n**2, 1, inf, error=True)
        >>> print s
        1.64493406684823
        >>> print pi**2 / 6
        1.64493406684823
        >>> nprint(err)
        2.22045e-16

    N is the number of terms to compute directly before using the
    Euler-Maclaurin formula to approximate the tail. It must be set
    high enough; often roughly N ~ dps is the right size.

    High-order derivatives of f are also needed. By default, these
    are computed using numerical integration, which is the most
    expensive part of the calculation. The default method assumes
    that all poles of f are located close to the origin. A custom
    nth derivative function fderiv(x, n) can be provided as a
    keyword parameter.

    This is much more efficient:

        >>> f = lambda n: 1/n**2
        >>> fp = lambda x, n: (-1)**n * factorial(n+1) * x**(-2-n)
        >>> mp.dps = 50
        >>> print sumem(lambda n: 1/n**2, 1, inf, fderiv=fp)
        1.6449340668482264364724151666460251892189499012068
        >>> print pi**2 / 6
        1.6449340668482264364724151666460251892189499012068

    If b = inf, f and its derivatives are all assumed to vanish
    at infinity. It is assumed that a is finite, so doubly
    infinite sums cannot be evaluated directly.
    """
    a, b = AS_POINTS(interval)
    if N is None:
        N = 3*mp.dps + 20
    a, b, N = mpf(a), mpf(b), mpf(N)
    infinite = (b == inf)
    weps = eps * 2**8
    if verbose:
        print "Summing f(k) from k = %i to %i" % (a, a+N-1)
    S = sum(f(mpf(k)) for k in xrange(a, a+N))
    if integral is None:
        if verbose:
            print "Integrating f(x) from x = %i to %s" % (a+N, nstr(b))
        I, ierr = quadts(f, [a+N, b], error=1)
        # XXX: hack for relative error
        ierr /= abs(I)
    else:
        I, ierr = integral(a+N, b), mpf(0)
    # There is little hope if the tail cannot be integrated
    # accurately. Estimate magnitude of tail as the error.
    if ierr > weps:
        if verbose:
            print "Failed to converge to target accuracy (integration failed)"
        if error:
            return S+I, abs(I) + ierr
        else:
            return S+I
    if infinite:
        C = f(a+N) / 2
    else:
        C = (f(a+N) + f(b)) / 2
    # Default (inefficient) approach for derivatives
    if not fderiv:
        fderiv = lambda x, n: diffc(f, x, n, radius=N*0.75)
    k = 1
    prev = 0
    if verbose:
        print "Summing tail"
    fac = 2
    while 1:
        if infinite:
            D = fderiv(a+N, 2*k-1)
        else:
            D = fderiv(a+N, 2*k-1) - fderiv(b, 2*k-1)
        # B(2*k) / fac(2*k)
        term = bernoulli(2*k) / fac * D
        mag = abs(term)
        if verbose:
            print "term", k, "magnitude =", nstr(mag)
        # Error can be estimated as the magnitude of the smallest term
        if k >= 2:
            if mag < weps:
                if verbose:
                    print "Converged to target accuracy"
                res, err = I + C + S, eps * 2**15
                break
            if mag > abs(prev):
                if verbose:
                    print "Failed to converge to target accuracy (N too low)"
                res, err = I + C + S, abs(term)
                break
        S -= term
        k += 1
        fac *= (2*k) * (2*k-1)
        prev = term
    if isinstance(res, mpc) and not isinstance(I, mpc):
        res, err = res.real, err
    if error:
        return res, err
    else:
        return res
Beispiel #12
0
    def calc_nodes(self, degree, prec, verbose=False):
        r"""
        The abscissas and weights for tanh-sinh quadrature of degree
        `m` are given by

        .. math::

            x_k = \tanh(\pi/2 \sinh(t_k))

            w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2

        where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
        list of nodes is actually infinite, but the weights die off so
        rapidly that only a few are needed.
        """
        nodes = []

        extra = 20
        mp.prec += extra
        eps = ldexp(1, -prec-10)
        pi4 = pi/4

        # For simplicity, we work in steps h = 1/2^n, with the first point
        # offset so that we can reuse the sum from the previous degree

        # We define degree 1 to include the "degree 0" steps, including
        # the point x = 0. (It doesn't work well otherwise; not sure why.)
        t0 = ldexp(1, -degree)
        if degree == 1:
            #nodes.append((mpf(0), pi4))
            #nodes.append((-mpf(0), pi4))
            nodes.append((mpf(0), pi/2))
            h = t0
        else:
            h = t0*2

        # Since h is fixed, we can compute the next exponential
        # by simply multiplying by exp(h)
        expt0 = exp(t0)
        a = pi4 * expt0
        b = pi4 / expt0
        udelta = exp(h)
        urdelta = 1/udelta

        for k in xrange(0, 20*2**degree+1):
            # Reference implementation:
            # t = t0 + k*h
            # x = tanh(pi/2 * sinh(t))
            # w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2

            # Fast implementation. Note that c = exp(pi/2 * sinh(t))
            c = exp(a-b)
            d = 1/c
            co = (c+d)/2
            si = (c-d)/2
            x = si / co
            w = (a+b) / co**2
            diff = abs(x-1)
            if diff <= eps:
                break

            nodes.append((x, w))
            nodes.append((NEG(x), w))

            a *= udelta
            b *= urdelta

            if verbose and k % 300 == 150:
                # Note: the number displayed is rather arbitrary. Should
                # figure out how to print something that looks more like a
                # percentage
                print "Calculating nodes:", nstr(-log(diff, 10) / prec)

        mp.prec -= extra
        return nodes
Beispiel #13
0
    def calc_nodes(cls, prec, level, verbose=False):
        """
        The abscissas and weights for tanh-sinh quadrature are given by

            x[k] = tanh(pi/2 * sinh(t))
            w[k] = pi/2 * cosh(t) / cosh(pi/2 sinh(t))**2

        Here t varies uniformly with k: t0, t0+h, t0+2*h, ...

        The list of nodes is actually infinite, but the weights
        die off so rapidly that only a few are needed.
        """
        nodes = []

        extra = 20
        mp.prec += extra
        eps = ldexp(1, -prec - 10)
        pi4 = pi / 4

        # For simplicity, we work in steps h = 1/2^n, with the first point
        # offset so that we can reuse the sum from the previous level

        # We define level 1 to include the "level 0" steps, including
        # the point x = 0. (It doesn't work well otherwise; not sure why.)
        t0 = ldexp(1, -level)
        if level == 1:
            nodes.append((mpf(0), pi4))
            h = t0
        else:
            h = t0 * 2

        # Since h is fixed, we can compute the next exponential
        # by simply multiplying by exp(h)
        expt0 = exp(t0)
        a = pi4 * expt0
        b = pi4 / expt0
        udelta = exp(h)
        urdelta = 1 / udelta

        for k in xrange(0, 20 * 2**level + 1):
            # Reference implementation:
            # t = t0 + k*h
            # x = tanh(pi/2 * sinh(t))
            # w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2

            # Fast implementation. Note that c = exp(pi/2 * sinh(t))
            c = exp(a - b)
            d = 1 / c
            co = (c + d) / 2
            si = (c - d) / 2
            x = si / co
            w = (a + b) / co**2
            diff = abs(x - 1)
            if diff <= eps:
                break

            nodes.append((x, w))
            a *= udelta
            b *= urdelta

            if verbose and k % 300 == 150:
                # Note: the number displayed is rather arbitrary. Should
                # figure out how to print something that looks more like a
                # percentage
                print "Calculating nodes:", nstr(-log(diff, 10) / prec)

        mp.prec -= extra
        return nodes