def __gap_params__(self, data, beta, gamma, theta, verbose=False):
        G1112A = lambda z: mp.meijerg([[1 - 1 / gamma], []], [[0], [-1 / gamma]], (z ** gamma) / theta)
        G1112B = lambda z: mp.meijerg([[0], []], [[0], [1]], (z ** gamma) / theta)
        G1223 = lambda z: mp.meijerg([[0, 1 - 1 / gamma], []], [[0], [-1 / gamma, 1]], (z ** gamma) / theta)
        G1001 = lambda z: mp.meijerg([[], []], [[0], []], (z ** gamma) / theta)
        G1334 = lambda z: mp.meijerg([[0, 1 - 1 / gamma, 1 - 1 / gamma], []], [[0], [-1 / gamma, -1 / gamma, 1]], (z ** gamma) / theta)
        G1445 = lambda z: mp.meijerg([[0, 1 - 1 / gamma, 1 - 1 / gamma, 1 - 1 / gamma], []], [[0], [-1 / gamma, -1 / gamma, -1 / gamma, 1]], (z ** gamma) / theta)

        # gap beta
        si = 0
        for Ti in data.censorTimes:
            si += Ti * (1 - G1112A(Ti) / gamma)
        gap_beta = -si + len(data.allFailures) / beta

        # gap gamma
        si = 0
        for Ti in data.censorTimes:
            si += Ti * (mp.ln(Ti) * G1334(Ti) - G1445(Ti) / gamma)
        sij = 0
        for tij in data.allFailures:
            sij += (mp.ln(tij) * G1112B(tij)) / (1 - G1001(tij))
        gap_gamma = (beta / gamma) * si - sij

        # gap theta
        si = 0
        for Ti in data.censorTimes:
            si += Ti * G1223(Ti)
        sij = 0
        for tij in data.allFailures:
            sij += G1112B(tij) / (1 - G1001(tij))
        gap_theta = (beta * theta / gamma) * si - theta * sij

        if verbose:
            print('  gap beta  = %g' % gap_beta)
            print('  gap gamma = %g' % gap_gamma)
            print('  gap theta = %g' % gap_theta)

        return np.array([gap_beta, gap_gamma, gap_theta], dtype=float)
    def __gap_params__(self, data, beta, gamma, theta, verbose=False):
        def G1112A(z): return mp.meijerg([[1-1/gamma,0], []], [[0], [-1/gamma]], (z ** gamma) / theta)
        def G1333(z): return mp.meijerg([[0,1-1/gamma,0], []], [[0], [-1/gamma,1]], (z ** gamma) / theta)
        def G1112B(z): return mp.meijerg([[0, 0], []], [[0], [1]], (z ** gamma) / theta)
        def G1111(z): return mp.meijerg([[0], []], [[0], []], (z ** gamma) / theta)
        def G1444(z): return mp.meijerg([[0,1-1/gamma,1-1/gamma,0], []], [[0], [-1/gamma,-1/gamma,1]], (z ** gamma) / theta)
        def G1555(z): return mp.meijerg([[0,1-1/gamma,1-1/gamma,1-1/gamma,0], []], [[0], [-1/gamma,-1/gamma,-1/gamma,1]], (z ** gamma) / theta)

        Tij = data.allFailures
        Ti = data.censorTimes

        # calculates in advance to save time
        assert isinstance(Tij, np.ndarray)
        GTij = np.zeros(Tij.size, dtype=np.float)
        for k in range(Tij.size):
            GTij[k] = G1112B(Tij[k]) / (1 - G1111(Tij[k]))

        # gap beta
        si = 0
        for ti in Ti:
            si += ti * (1 - G1112A(ti) / gamma)
        gap_beta = -si + Tij.size / beta

        # gap gamma
        si = 0
        for ti in Ti:
            si += ti * (mp.ln(ti) * G1444(ti) - G1555(ti) / gamma)
        sij = np.sum(np.log(Tij) * GTij)
        gap_gamma = (beta / gamma) * si - sij

        # gap theta
        si = 0
        for ti in Ti:
            si += ti * G1333(ti)
        sij = np.sum(GTij)
        gap_theta = (beta * theta / gamma) * si - theta * sij

        if verbose:
            print('  gap beta  = %g' % gap_beta)
            print('  gap gamma = %g' % gap_gamma)
            print('  gap theta = %g' % gap_theta)

        return np.array([gap_beta, gap_gamma, gap_theta], dtype=float)
Example #3
0
def tanh_sinh_lr(f_left, f_right, alpha, eps, max_steps=10):
    '''Integrate a function `f` between `a` and `b` with accuracy `eps`. The
    function `f` is given in terms of two functions

        * `f_left(s) = f(a + s)`, i.e., `f` linearly scaled such that
          `f_left(0) = a`, `f_left(b-a) = b`,

        * `f_right(s) = f(b - s)`, i.e., `f` linearly scaled such that
          `f_right(0) = b`, `f_left(b-a) = a`.

    Implemented are Bailey's enhancements plus a few more tricks.

    David H. Bailey, Karthik Jeyabalan, and Xiaoye S. Li,
    Error function quadrature,
    Experiment. Math., Volume 14, Issue 3 (2005), 317-329,
    <https://projecteuclid.org/euclid.em/1128371757>.

    David H. Bailey,
    Tanh-Sinh High-Precision Quadrature,
    2006,
    <http://www.davidhbailey.com/dhbpapers/dhb-tanh-sinh.pdf>.
    '''
    num_digits = int(-mp.log10(eps) + 1)
    mp.dps = num_digits

    alpha2 = alpha / mp.mpf(2)

    # What's a good initial step size `h`?
    # The larger `h` is chosen, the fewer points will be part of the
    # evaluation. However, we don't want to choose the step size too large
    # since that means less accuracy for the quadrature overall. The idea would
    # then be too choose `h` such that it is just large enough for the first
    # tanh-sinh-step to contain only one point, the midpoint. The expression
    #
    #    j = mp.ln(-2/mp.pi * mp.lambertw(-tau/h/2, -1)) / h
    #
    # hence needs to just smaller than 1. (Ideally, one would actually like to
    # get `j` from the full tanh-sinh formula, but the above approximation is
    # good enough.) One gets
    #
    #    0 = pi/2 * exp(h) - h - ln(h) - ln(pi/tau)
    #
    # for which there is no analytic solution. One can, however, approximate
    # it. Since pi/2 * exp(h) >> h >> ln(h) (for `h` large enough), one can
    # either forget about both h and ln(h) to get
    #
    #     h0 = ln(2/pi * ln(pi/tau))
    #
    # or just scratch ln(h) to get
    #
    #     h1 = ln(tau/pi) - W_{-1}(-tau/2).
    #
    # Both of these suggestions underestimate and `j` will be too large. An
    # approximation that overestimates is obtained by replacing `ln(h)` by `h`,
    #
    #     h2 = 1/2 - log(sqrt(pi/tau)) - W_{-1}(-sqrt(exp(1)*pi*tau) / 4).
    #
    # Application of Newton's method will improve all of these approximations
    # and will also always overestimate such that `j` won't exceed 1 in the
    # first step. Nice!
    # TODO since we're doing Newton iterations anyways, use a more accurate
    #      representation for j, and consequently for h
    h = _solve_expx_x_logx(eps**2, tol=1.0e-10)

    last_error_estimate = None

    success = False
    for level in range(max_steps + 1):
        # We would like to calculate the weights until they are smaller than
        # tau, i.e.,
        #
        #     h * pi/2 * cosh(h*j) / cosh(pi/2 * sinh(h*j))**2 < tau.
        #
        # (TODO Newton on this expression to find tau?)
        #
        # To streamline the computation, j is estimated in advance. The only
        # assumption we're making is that h*j>>1 such that exp(-h*j) can be
        # neglected. With this, the above becomes
        #
        #     tau > h * pi/2 * exp(h*j)/2 / cosh(pi/2 * exp(h*j)/2)**2
        #
        # and further
        #
        #     tau > h * pi * exp(h*j) / exp(pi/2 * exp(h*j)).
        #
        # Calling z = - pi/2 * exp(h*j), one gets
        #
        #     tau > -2*h*z * exp(z)
        #
        # This inequality is fulfilled exactly if z = W(-tau/h/2) with W being
        # the (-1)-branch of the Lambert-W function IF exp(1)*tau < 2*h (which
        # we can assume since `tau` will generally be small). We finally get
        #
        #     j > ln(-2/pi * W(-tau/h/2)) / h.
        #
        # We do require j to be positive, so -2/pi * W(-tau/h/2) > 1. This
        # translates to the slightly stricter requirement
        #
        #     tau * exp(pi/2) < pi * h,
        #
        # i.e., h needs to be about 1.531 times larger than tau (not only 1.359
        # times as the previous bound suggested).
        #
        # Note further that h*j is ever decreasing as h decreases.
        assert eps**2 * mp.exp(mp.pi / 2) < mp.pi * h
        j = int(mp.ln(-2 / mp.pi * mp.lambertw(-eps**2 / h / 2, -1)) / h)

        # At level 0, one only takes the midpoint, for all greater levels every
        # other point. The value estimation is later completed with the
        # estimation from the previous level which.
        if level == 0:
            t = [0]
        else:
            t = h * numpy.arange(1, j + 1, 2)

        sinh_t = mp.pi / 2 * numpy.array(list(map(mp.sinh, t)))
        cosh_t = mp.pi / 2 * numpy.array(list(map(mp.cosh, t)))
        cosh_sinh_t = numpy.array(list(map(mp.cosh, sinh_t)))

        # y = alpha/2 * (1 - x)
        # x = [mp.tanh(v) for v in u2]
        exp_sinh_t = numpy.array(list(map(mp.exp, sinh_t)))

        y0 = alpha2 / exp_sinh_t / cosh_sinh_t
        y1 = -alpha2 * cosh_t / cosh_sinh_t**2

        weights = -h * y1

        fly = numpy.array([f_left[0](yy) for yy in y0])
        fry = numpy.array([f_right[0](yy) for yy in y0])

        lsummands = fly * weights
        rsummands = fry * weights

        # Perform the integration.
        if level == 0:
            # The root level only contains one node, the midpoint; function
            # values of f_left and f_right are equal here. Deliberately take
            # lsummands here.
            value_estimates = list(lsummands)
        else:
            value_estimates.append(
                # Take the estimation from the previous step and half the step
                # size. Fill the gaps with the sum of the values of the current
                # step.
                value_estimates[-1] / 2 + mp.fsum(lsummands) +
                mp.fsum(rsummands))

        # error estimation
        if 1 in f_left and 2 in f_left:
            assert 1 in f_right and 2 in f_right
            error_estimate = _error_estimate1(h, sinh_t, cosh_t, cosh_sinh_t,
                                              y0, y1, fly, fry, f_left,
                                              f_right, alpha,
                                              last_error_estimate)
            last_error_estimate = error_estimate
        else:
            error_estimate = _error_estimate2(eps, value_estimates, lsummands,
                                              rsummands)

        if abs(error_estimate) < eps:
            success = True
            break

        h /= 2

    assert success
    return value_estimates[-1], error_estimate