Beispiel #1
0
def qromberg(func, a, b, caller='caller', tolf=TWOMACHEPS, maxnsplits=16):
    """
    Romberg integration of a function of one variable over the interval [a, b]. 
    'caller' is the name of the program, method or function calling qromberg.
    'tolf' is the maximum allowed fractional difference between the two last 
    "tail" integrals on the "T table diagonal". 'maxnsplits' is the maximum 
    number of consecutive splits of the subintervals into halves.
    (cf. Davis-Rabinowitz and Dahlquist-Bjorck-Anderson).
    """

    span  =  b - a
    assert span >= 0.0, \
               "Integration limits must be in increasing order in qromberg!"
    assert is_nonneginteger(maxnsplits), \
           "Max number of splits must be a nonnegative integer in qromberg!"

    m  = 1; n  =  1
    summ       =  0.5 * (func(a)+func(b))
    intgrl     =  span * summ
    ttable     =  [[intgrl]]    # First entry into T table (a nested list)
    previntgrl =  (1.0 + 2.0*tolf)*intgrl  # Ascertains intgrl != previntgrl...
    adiff      = abs(intgrl-previntgrl)

    while (adiff > tolf*abs(intgrl)) and (m <= maxnsplits):
        n *= 2
        # We have made a computation for all lower powers-of-2 starting with 1. 
        # The below procedure sums up the new ordinates and then multiplies the 
        # sum with the width of the trapezoids.
        nhalved = n // 2   # Integer division
        h       = span / float(nhalved)
        x       = a + 0.5*h
        subsum  = 0.0
        for k in range(0, nhalved):
            subsum += func(x + k*h)
        summ += subsum
        ttable.append([])
        ttable[m].append(span * summ / n)

        # Interpolation Richardson style:
        for k in range(0, m):    
            aux = float(4**(k+1))
            y   = (aux*ttable[m][k] - ttable[m-1][k]) / (aux - 1.0)
            ttable[m].append(y)
        intgrl     = ttable[m][m]
        previntgrl = ttable[m-1][m-1]
        adiff      = abs(intgrl-previntgrl)
        m += 1

    if m > maxnsplits:
        wtxt1 = "qromberg called by " + caller + " failed to converge.\n"
        wtxt2 = "abs(intgrl-previntgrl) = " + str(adiff)
        wtxt3 = " for " + str(maxnsplits) + " splits"
        warn(wtxt1+wtxt2+wtxt3)

    return intgrl
Beispiel #2
0
def boot_mednfrac(vector, centwin=0.90, confdeg=0.90, nsamples=None, \
                                            printns=False):
    """
    boot_mednfrac computes symmetric confidence limits for a median-fractile 
    estimate for confidence degree 'confdeg' using mednfrac and bootstrapping. 
    Lower and upper limit of 1) the median, 2) the lower fractile and 3) the 
    upper fractile for confidence degree 'confdeg'. When the number of bootstrap 
    samples nsamples=None, a default number int(sqrt(_BOOTCARDINAL/length)) + 1
    will be used. For printns=True the number of bootstrap samples used are 
    printed to stdout.
        
    For more general bootstrap sampling needs the methods 'bootvector' and 
    'bootindexvector' of the RandomStructure class may be used as a basis.
    """

    assert 0.0 <= centwin and centwin <= 1.0, \
          "Fractile interval must be in [0.0, 1.0] in boot_mednfrac!"
    assert 0.0 <= confdeg and confdeg <= 1.0, \
          "Confidence degree must be in [0.0, 1.0] in boot_mednfrac!"

    length  = len(vector)

    if nsamples == None: nsamples = int(sqrt(_BOOTCARDINAL/length)) + 1

    if nsamples < 101:
        nsamples = 101
        wtxt1 = "Number of samples in the bootstrap in boot_mednfrac should\n"
        wtxt2 = "be at least 101 (101 samples will be used)"
        warn(wtxt1+wtxt2)


    median = []
    lower  = []
    upper  = []

    rstream = GeneralRandomStream()
    for k in range(0, nsamples):
        bootv = [vector[rstream.runif_int0N(length)] for v in vector]
        med, low, upp = mednfrac(bootv, centwin)
        median.append(med)
        lower.append(low)
        upper.append(upp)

    med, lowmed, uppmed  =  mednfrac(median, confdeg)
    med, lowlow, upplow  =  mednfrac(lower,  confdeg)
    med, lowupp, uppupp  =  mednfrac(upper,  confdeg)

    if printns and nsamples != None:
        print("(number of bootstrap samples used in boot_mednfrac = " + \
                                                     str(nsamples) + ")")

    return lowmed, uppmed, lowlow, upplow, lowupp, uppupp
Beispiel #3
0
    def _iterwarn(self, caller, t):

        wtext1 = "Newton iteration did not converge in " + caller + " for time"
        wtext2 = " = " + str(t) + ". Try changing tolerances or maxitn"
        warn(wtext1+wtext2)

    # end of _iterwarn

# ------------------------------------------------------------------------------

# end of StiffDynamics

# ------------------------------------------------------------------------------
Beispiel #4
0
def boot_mednfrac(vector, centwin=0.90, confdeg=0.90, nsamples=None, \
                                            printns=False):
    """
    boot_mednfrac computes symmetric confidence limits for a median-fractile 
    estimate for confidence degree 'confdeg' using mednfrac and bootstrapping. 
    Lower and upper limit of 1) the median, 2) the lower fractile and 3) the 
    upper fractile for confidence degree 'confdeg'. When the number of bootstrap 
    samples nsamples=None, a default number int(sqrt(_BOOTCARDINAL/length)) + 1
    will be used. For printns=True the number of bootstrap samples used are 
    printed to stdout.
        
    For more general bootstrap sampling needs the methods 'bootvector' and 
    'bootindexvector' of the RandomStructure class may be used as a basis.
    """

    assert 0.0 <= centwin and centwin <= 1.0, \
          "Fractile interval must be in [0.0, 1.0] in boot_mednfrac!"
    assert 0.0 <= confdeg and confdeg <= 1.0, \
          "Confidence degree must be in [0.0, 1.0] in boot_mednfrac!"

    length = len(vector)

    if nsamples == None: nsamples = int(sqrt(_BOOTCARDINAL / length)) + 1

    if nsamples < 101:
        nsamples = 101
        wtxt1 = "Number of samples in the bootstrap in boot_mednfrac should\n"
        wtxt2 = "be at least 101 (101 samples will be used)"
        warn(wtxt1 + wtxt2)

    median = []
    lower = []
    upper = []

    rstream = GeneralRandomStream()
    for k in range(0, nsamples):
        bootv = [vector[rstream.runif_int0N(length)] for v in vector]
        med, low, upp = mednfrac(bootv, centwin)
        median.append(med)
        lower.append(low)
        upper.append(upp)

    med, lowmed, uppmed = mednfrac(median, confdeg)
    med, lowlow, upplow = mednfrac(lower, confdeg)
    med, lowupp, uppupp = mednfrac(upper, confdeg)

    if printns and nsamples != None:
        print("(number of bootstrap samples used in boot_mednfrac = " + \
                                                     str(nsamples) + ")")

    return lowmed, uppmed, lowlow, upplow, lowupp, uppupp
Beispiel #5
0
def boot_ratio(vnumer, vdenom, confdeg=0.90, nsamples=None, printns=False):
    """
    boot_ratio computes symmetric confidence limits for a ratio of a sum to 
    another sum for confidence degree 'confdeg' using bootstrapping. The 
    sums are defined by the two input sequences (lists/'d' arrays/tuples).
    Lower and upper limit are returned. When the number of bootstrap samples 
    nsamples=None, a default number int(sqrt(_BOOTCARDINAL/length)) + 1 
    will be used. For printns=True the number of bootstrap samples used 
    are printed to stdout.
        
    For more general bootstrap sampling needs the methods 'bootvector' and 
    'bootindexvector' of the RandomStructure class may be used as a basis.
    """

    length  = len(vnumer)
    assert len(vdenom) == length, \
           "lengths of numerator and denominator must be equal in boot_ratio!"

    if nsamples == None: nsamples = int(sqrt(_BOOTCARDINAL/length)) + 1

    if nsamples < 101:
        nsamples = 101
        wtxt1 = "Number of samples in the bootstrap in boot_ratio should be\n"
        wtxt2 = "at least 101 (101 samples will be used)"
        warn(wtxt1+wtxt2)

    ratio   = []
    rstream = GeneralRandomStream()
    for k in range(nsamples):
        sumd    = 0.0
        sumn    = 0.0
        indexv  = [rstream.runif_int0N(length) for n in vnumer]
        for i in indexv:
           sumn += vnumer[i]
           sumd += vdenom[i]

        rate = sumn / float(sumd)   # Safety first - input could be ranks...
        ratio.append(rate)

    median, conflow, confupp  =  mednfrac(ratio, confdeg)

    if length <= 1: confupp = conflow = None

    if printns and nsamples != None:
        print("(number of bootstrap samples used in boot_ratio = " +\
                                                 str(nsamples) + ")")

    return conflow, confupp
Beispiel #6
0
def boot_ratio(vnumer, vdenom, confdeg=0.90, nsamples=None, printns=False):
    """
    boot_ratio computes symmetric confidence limits for a ratio of a sum to 
    another sum for confidence degree 'confdeg' using bootstrapping. The 
    sums are defined by the two input sequences (lists/'d' arrays/tuples).
    Lower and upper limit are returned. When the number of bootstrap samples 
    nsamples=None, a default number int(sqrt(_BOOTCARDINAL/length)) + 1 
    will be used. For printns=True the number of bootstrap samples used 
    are printed to stdout.
        
    For more general bootstrap sampling needs the methods 'bootvector' and 
    'bootindexvector' of the RandomStructure class may be used as a basis.
    """

    length = len(vnumer)
    assert len(vdenom) == length, \
           "lengths of numerator and denominator must be equal in boot_ratio!"

    if nsamples == None: nsamples = int(sqrt(_BOOTCARDINAL / length)) + 1

    if nsamples < 101:
        nsamples = 101
        wtxt1 = "Number of samples in the bootstrap in boot_ratio should be\n"
        wtxt2 = "at least 101 (101 samples will be used)"
        warn(wtxt1 + wtxt2)

    ratio = []
    rstream = GeneralRandomStream()
    for k in range(nsamples):
        sumd = 0.0
        sumn = 0.0
        indexv = [rstream.runif_int0N(length) for n in vnumer]
        for i in indexv:
            sumn += vnumer[i]
            sumd += vdenom[i]

        rate = sumn / float(sumd)  # Safety first - input could be ranks...
        ratio.append(rate)

    median, conflow, confupp = mednfrac(ratio, confdeg)

    if length <= 1: confupp = conflow = None

    if printns and nsamples != None:
        print("(number of bootstrap samples used in boot_ratio = " +\
                                                 str(nsamples) + ")")

    return conflow, confupp
Beispiel #7
0
def _betaicf(a, b, y, tolf, itmax):
    # Auxiliary function with continued fractions expansion
    # for cbeta (cf Abramowitz & Stegun):

    apb = a + b
    ap1 = a + 1.0
    am1 = a - 1.0
    c = 1.0
    d = 1.0 - y * apb / ap1
    if abs(d) < MINFLOAT:
        d = MINFLOAT
    d = 1.0 / d
    h = d

    converged = False
    itmaxp1 = itmax + 1
    for k in range(1, itmaxp1):
        fk = float(k)
        tfk = fk + fk
        aa = fk * (b - fk) * y / ((am1 + tfk) * (a + tfk))
        d = 1.0 + aa * d
        if abs(d) < MINFLOAT:
            d = MINFLOAT
        c = 1.0 + aa / c
        if abs(c) < MINFLOAT:
            c = MINFLOAT
        d = 1.0 / d
        h *= d * c
        aa = -(a + fk) * (apb + fk) * y / ((a + tfk) * (ap1 + tfk))
        d = 1.0 + aa * d
        if abs(d) < MINFLOAT:
            d = MINFLOAT
        c = 1.0 + aa / c
        if abs(c) < MINFLOAT:
            c = MINFLOAT
        d = 1.0 / d
        dl = d * c
        h *= dl
        if abs(dl - 1.0) < tolf:
            converged = True
            break

    if not converged:
        warn("cbeta has not converged for itmax = " + str(itmax) + " and tolf = " + str(tolf))

    return h
Beispiel #8
0
def _betaicf(a, b, y, tolf, itmax):
    # Auxiliary function with continued fractions expansion
    # for cbeta (cf Abramowitz & Stegun):

    apb = a + b
    ap1 = a + 1.0
    am1 = a - 1.0
    c = 1.0
    d = 1.0 - y * apb / ap1
    if abs(d) < MINFLOAT: d = MINFLOAT
    d = 1.0 / d
    h = d

    converged = False
    itmaxp1 = itmax + 1
    for k in range(1, itmaxp1):
        fk = float(k)
        tfk = fk + fk
        aa = fk * (b - fk) * y / ((am1 + tfk) * (a + tfk))
        d = 1.0 + aa * d
        if abs(d) < MINFLOAT: d = MINFLOAT
        c = 1.0 + aa / c
        if abs(c) < MINFLOAT: c = MINFLOAT
        d = 1.0 / d
        h *= d * c
        aa = -(a + fk) * (apb + fk) * y / ((a + tfk) * (ap1 + tfk))
        d = 1.0 + aa * d
        if abs(d) < MINFLOAT: d = MINFLOAT
        c = 1.0 + aa / c
        if abs(c) < MINFLOAT: c = MINFLOAT
        d = 1.0 / d
        dl = d * c
        h *= dl
        if abs(dl - 1.0) < tolf:
            converged = True
            break

    if not converged:
        warn("cbeta has not converged for itmax = " + \
                     str(itmax) + " and tolf = " + str(tolf))

    return h
Beispiel #9
0
def boot_aritmean(vector, confdeg=0.90, nsamples=None, printns=False):
    """
    boot_aritmean computes symmetric confidence limits around an arithmetic 
    mean estimate for confidence degree 'confdeg' using bootstrapping. Lower 
    and upper limit are returned. When the number of bootstrap samples 
    nsamples=None, a default number int(sqrt(_BOOTCARDINAL/length)) + 1 will 
    be used. For printns=True the number of bootstrap samples used are printed 
    to stdout.
    
    For more general bootstrap sampling needs the methods 'bootvector' and 
    'bootindexvector' of the RandomStructure class may be used as a basis.
    """

    assert 0.0 <= confdeg and confdeg <= 1.0, \
          "Confidence degree must be in [0.0, 1.0] in boot_aritmean!"

    length  = len(vector)

    if nsamples == None: nsamples = int(sqrt(_BOOTCARDINAL/length)) + 1

    if nsamples < 101:
        nsamples = 101
        wtxt1 = "Number of samples in the bootstrap in boot_aritmean should\n"
        wtxt2 = "be at least 101 (101 samples will be used)"
        warn(wtxt1+wtxt2)

    ratio   = []
    rstream = GeneralRandomStream()
    for k in range(0, nsamples):
        bootv = [vector[rstream.runif_int0N(length)] for v in vector]
        rate  = fsum(bootv) / length   # Will be a float...
        ratio.append(rate)

    median, conflow, confupp  =  mednfrac(ratio, confdeg)

    if length <= 1: confupp = conflow = None

    if printns and nsamples != None:
        print("(number of bootstrap samples used in boot_aritmean = " +\
                                                    str(nsamples) + ")")

    return conflow, confupp
Beispiel #10
0
def lusubs_imp(matrix, lower, upper, bvector, permlist, xvector, \
                                     tolf=SQRTMACHEPS, nitermax=4):
    """
    May be used to polish the result from lusubs (some of the necessary 
    checks are made in lusubs). 
    
    tolf is the maximum fractional difference between two consecutive sums
    of absolute values of the output vector, and nitermax is the maximum 
    number of improvements carried out regardless of whether the tolerance 
    is met or not.
    """

    assert tolf >= 0.0, \
            "max fractional tolerance must not be negative in lusubs_imp!"

    assert is_posinteger(nitermax), \
            "max number of iterations must be a positive number in lusubs_imp!"

    ndim = len(bvector)

    sumx = fsum(abs(x) for x in xvector)
    converged = False
    for n in range(0, nitermax):
        resid = array('d', [])  # will get len = ndim
        for k in range(0, ndim):
            sdp = -bvector[k]
            for j in range(0, ndim):
                sdp += matrix[k][j] * xvector[j]
            resid.append(sdp)
        resid = lusubs(lower, upper, resid, permlist)
        for k in range(0, ndim):
            xvector[k] = xvector[k] - resid[k]
        sumn = fsum(abs(x) for x in xvector)
        if abs(sumn - sumx) < sumn * tolf:
            converged = True
            break
        sumx = sumn

    wtext = "lusubs_imp did not converge. Try changing tolerance or nitermax"
    if not converged: warn(wtext)

    return xvector
Beispiel #11
0
def boot_aritmean(vector, confdeg=0.90, nsamples=None, printns=False):
    """
    boot_aritmean computes symmetric confidence limits around an arithmetic 
    mean estimate for confidence degree 'confdeg' using bootstrapping. Lower 
    and upper limit are returned. When the number of bootstrap samples 
    nsamples=None, a default number int(sqrt(_BOOTCARDINAL/length)) + 1 will 
    be used. For printns=True the number of bootstrap samples used are printed 
    to stdout.
    
    For more general bootstrap sampling needs the methods 'bootvector' and 
    'bootindexvector' of the RandomStructure class may be used as a basis.
    """

    assert 0.0 <= confdeg and confdeg <= 1.0, \
          "Confidence degree must be in [0.0, 1.0] in boot_aritmean!"

    length = len(vector)

    if nsamples == None: nsamples = int(sqrt(_BOOTCARDINAL / length)) + 1

    if nsamples < 101:
        nsamples = 101
        wtxt1 = "Number of samples in the bootstrap in boot_aritmean should\n"
        wtxt2 = "be at least 101 (101 samples will be used)"
        warn(wtxt1 + wtxt2)

    ratio = []
    rstream = GeneralRandomStream()
    for k in range(0, nsamples):
        bootv = [vector[rstream.runif_int0N(length)] for v in vector]
        rate = fsum(bootv) / length  # Will be a float...
        ratio.append(rate)

    median, conflow, confupp = mednfrac(ratio, confdeg)

    if length <= 1: confupp = conflow = None

    if printns and nsamples != None:
        print("(number of bootstrap samples used in boot_aritmean = " +\
                                                    str(nsamples) + ")")

    return conflow, confupp
Beispiel #12
0
def lusubs_imp(matrix, lower, upper, bvector, permlist, xvector, \
                                     tolf=SQRTMACHEPS, nitermax=4):
    """
    May be used to polish the result from lusubs (some of the necessary 
    checks are made in lusubs). 
    
    tolf is the maximum fractional difference between two consecutive sums
    of absolute values of the output vector, and nitermax is the maximum 
    number of improvements carried out regardless of whether the tolerance 
    is met or not.
    """

    assert tolf >= 0.0, \
            "max fractional tolerance must not be negative in lusubs_imp!"
    
    assert is_posinteger(nitermax), \
            "max number of iterations must be a positive number in lusubs_imp!"

    ndim = len(bvector)

    sumx      = fsum(abs(x) for x in xvector)
    converged = False
    for n in range(0, nitermax):
        resid  = array('d', [])   # will get len = ndim
        for k in range(0, ndim):
            sdp = -bvector[k]
            for j in range(0, ndim):
                sdp += matrix[k][j]*xvector[j]
            resid.append(sdp)
        resid = lusubs(lower, upper, resid, permlist)
        for k in range(0, ndim): xvector[k] = xvector[k] - resid[k]
        sumn = fsum(abs(x) for x in xvector)
        if abs(sumn-sumx) < sumn*tolf:
            converged = True
            break
        sumx = sumn

    wtext = "lusubs_imp did not converge. Try changing tolerance or nitermax"
    if not converged: warn(wtext)

    return xvector
Beispiel #13
0
    def _checkinput(self, cll, t, tnext, tolf, tola, maxitn, imprv):
        """
        Used to check the values of the input parameters to the solver methods. 
        cll is the name of the present method (a string).
        """

        assert tnext > t, "time step must be positive in " + cll + "!"

        wtext1 = "tolerances smaller than machine epsilon are not recommended "
        wtext2 = "in " + cll + ". Machine epsilon is used instead"
        wtext  = wtext1 + wtext2
        if tolf < MACHEPS:
            tolf = MACHEPS
            warn(wtext)
        if tola < MACHEPS:
            tola = MACHEPS
            warn(wtext)
        assert is_posinteger(maxitn), \
                     "maxitn must be a positive integer in " + cll + "!"

        if not is_nonneginteger(imprv):
            imprv = 0
            wtext1  = "imprv must be a non-negative integer in "
            wtext2  = cll + "! imprv=0 is used instead"
            wtext   = wtext1 + wtext2
            warn(wtext)
Beispiel #14
0
def ifactorial(n, integer=True):
    """
    Computation of a factorial, returning an integer. For integer=True a long, 
    exact integer is returned. For integer=False an integer obtained from 
    floating-point arithmetics based on the function lngamma is returned - 
    it is approximate and may not be exact but faster for large n. ERRCODE 
    (cf. misclib.numbers) is returned if a floating-point OverflowError occurs 
    and a warning is sent to stdout (no error can occur when integer=True).
    """

    assert is_nonneginteger(n), \
                 "the argument to ifactorial must be a non-negative integer!"

    if integer:
        fact = factorial(n)

    else:
        try:
            fact = safeint(round(exp(lngamma(n + 1.0))), 'ifactorial')
        except OverflowError:
            fact = ERRCODE
            warn("OverflowError in ifactorial - ERRCODE is returned")

    return fact
Beispiel #15
0
def ifactorial(n, integer=True):
    """
    Computation of a factorial, returning an integer. For integer=True a long, 
    exact integer is returned. For integer=False an integer obtained from 
    floating-point arithmetics based on the function lngamma is returned - 
    it is approximate and may not be exact but faster for large n. ERRCODE 
    (cf. misclib.numbers) is returned if a floating-point OverflowError occurs 
    and a warning is sent to stdout (no error can occur when integer=True).
    """

    assert is_nonneginteger(n), \
                 "the argument to ifactorial must be a non-negative integer!"

    if integer:
        fact = factorial(n)

    else:
        try:
            fact = safeint(round(exp(lngamma(n+1.0))), 'ifactorial')
        except OverflowError:
            fact = ERRCODE
            warn("OverflowError in ifactorial - ERRCODE is returned")

    return fact
Beispiel #16
0
def zsteffen(func, x0, caller='caller', tolf=FOURMACHEPS, \
                         tola=SQRTTINY, maxniter=512):
    """
    Solves the equation func(x) = 0 using Steffensen's method. Steffensen's 
    method is a method with second order convergence (like Newton's method) 
    at the price of two function evaluations per iteration. It does NOT 
    require that the derivative be computed, as opposed to Newton's method.
        In practice zsteffen seems to require a rather clever initial guess 
    as to the root and/or a lot of iterations since it starts slowly when the 
    initial guess is too far away from the actual root, but it might anyway 
    converge in the end. It is clearly inferior to znewton and should be 
    avoided unless the derivative offers problems with numerics or speed. 
        If computation of the derivative is slow, a good idea is to use 
    znewton for a very small number of iterations to produce an initial guess 
    that can be used as an input to zsteffen (may at times be very efficient).
        And/or: Try the additional feature of taking the solution to the 
    practical limit (cf. below) rather than trying to guess the required 
    number of iterations! 

    For the theory behind Steffensen's method, consult Dahlquist-Bjorck-
    Anderson. 
    
    NB. The function always returns a value but a warning is printed 
    to stdout if the iteration procedure has not converged!

    Arguments:
    ----------
    func        Function having the proposed root as its argument

    x0          Initial guess as to the value of the root
   
    tolf        Desired fractional accuracy of root (a combination of absolute 
                and fractional will actually be used: tolf*abs(root) + tola)

    tola        Desired max absolute difference of func(root) from zero 
                AND
                desired absolute accuracy of root (a combination of absolute 
                and fractional will actually be used: tolf*abs(root) + tola)
                
    maxniter    Maximum number of iterations

    Additional feature: 
    -------------------
    For maxniter == 0 the solution will be taken beyond tolf and tola (if 
    possible) to the limit where either abs(func(x)) or abs(h) - where h is 
    the increment of the root estimate - has stopped shrinking. If convergence 
    is not reached after 2**16 (= 65,536) iterations, the procedure is halted 
    and the present estimate is returned anyway (a minimum of 16 iterations 
    will be carried out anyhow).
    
    Returns:
    ---------
    Final value of root
    """

    if tolf < MACHEPS:
        tolf  = MACHEPS
        wtxt1 = "Fractional tolerance less than machine epsilon is not a "
        wtxt2 = "good idea in zsteffen. Machine epsilon will be used instead!"
        warn(wtxt1+wtxt2)

    if tola < 0.0:
        tola  = 0.0
        wtxt1 = "Negative absolute tolerance is not a good idea "
        wtxt2 = "in zsteffen. 0.0 (zero) will be used instead!"
        warn(wtxt1+wtxt2)

    assert is_nonneginteger(maxniter), \
      "Maximum number of iterations must be a non-negative integer in zsteffen!"

    MAXMAX   = 2**16
    MINNITER = 16

    if maxniter == 0:
        limit  = True
        maxnit = MAXMAX
    else:
        limit  = False
        maxnit = maxniter

    x  = x0
    f  = func(x)
    if f == 0.0: return x
    af = abs(f)
    if not limit:
        if af < tola: return x
    g  = (func(x+f) - f) / f
    h  = f/g
    x -= h
    ah = abs(h)
    if not limit:
        if ah < tolf*abs(x) + tola: return x

    niter = 0
    while True:
        niter +=  1
        afprev = af
        ahprev = ah
        f  = func(x)
        if f == 0.0: return x
        af = abs(f)
        if limit:
            if af < tola and niter >= MINNITER and af >= afprev: return x
        else:
            if af < tola: return x
        g  = (func(x+f) - f) / f
        h  = f/g
        x -= h
        ah = abs(h)
        if limit:
            if ah < tolf*abs(x) + tola and niter >= MINNITER and ah >= ahprev: \
                                        return x
        else:
            if ah < tolf*abs(x) + tola: return x
        if niter >= maxnit:
            break
            

    wtxt1 = str(maxnit) + " iterations not sufficient in zsteffen called by "
    wtxt2 = caller + ". func(x) = " + str(f) + " for x = " + str(x)
    warn(wtxt1+wtxt2)
    return x
Beispiel #17
0
def znewton(fifi2fid, x0, caller='caller', tolf=FOURMACHEPS, \
                            tola=SQRTTINY, maxniter=64):
    """
    Solves the equation fi(x) = 0 using the Newton-Raphson algorithm. 
    
    NB. The function always returns a value but a warning is printed 
    to stdout if the iteration procedure has not converged!
    
    Convergence is fast for the Newton algorithm - at the price of having to 
    compute the derivative of the function. Convergence cannot be guaranteed 
    for all functions and/or initial guesses, either...

    Arguments:
    ----------
    fifi2fid    Function having the desired root as its argument and 
                1: the value of fi, AND
                2: the value of the ratio of its value to the 
                value of its derivative given that root as its 
                outputs, in that order

    x0          Initial guess as to the value of the root
   
    tolf        Desired fractional accuracy of root (a combination of absolute 
                and fractional will actually be used: tolf*abs(root) + tola)

    tola        Desired max absolute difference of fi(root) from zero 
                AND
                desired absolute accuracy of root (a combination of absolute 
                and fractional will actually be used: tolf*abs(root) + tola)
                
    maxniter    Maximum number of iterations

    Additional feature:
    -------------------
    For maxniter == 0 the solution will be taken beyond tolf and tola (if 
    possible) to the limit where either abs(fi(x)) or abs(fi(x)/fid(x)) has 
    stopped shrinking. If convergence is not reached after 2048 iterations, 
    the procedure is halted and the present estimate is returned anyway (a 
    minimum of 8 iterations will be carried out anyhow).

    Returns:
    ---------
    Final value of root
    """

    if tolf < MACHEPS:
        tolf  = MACHEPS
        wtxt1 = "Fractional tolerance less than machine epsilon is not a "
        wtxt2 = "good idea in znewton. Machine epsilon will be used instead!"
        warn(wtxt1+wtxt2)

    if tola < 0.0:
        tola  = 0.0
        wtxt1 = "Negative absolute tolerance is not a good idea "
        wtxt2 = "in znewton. 0.0 (zero) will be used instead!"
        warn(wtxt1+wtxt2)

    assert is_nonneginteger(maxniter), \
       "Maximum number of iterations must be a non-negative integer in znewton!"

    MAXMAX   = 2048
    MINNITER =    8

    if maxniter == 0:
        limit  = True
        maxnit = MAXMAX
    else:
        limit  = False
        maxnit = maxniter

    x  = x0
    fi, fi2fid  = fifi2fid(x)
    if fi == 0.0: return x
    af = abs(fi)
    if not limit: 
        if af < tola: return x
    x -= fi2fid
    ah = abs(fi2fid)
    if not limit:
        if ah < tolf*abs(x) + tola: return x

    niter = 0
    while True:
        niter +=  1
        afprev = af
        ahprev = ah
        fi, fi2fid  = fifi2fid(x)
        if fi == 0.0: return x
        af = abs(fi)
        if limit:
            if af < tola and niter >= MINNITER and af >= afprev: return x
        else:
            if af < tola: return x
        x -= fi2fid
        ah = abs(fi2fid)
        if limit:
            if ah < tolf*abs(x) + tola and niter >= MINNITER and ah >= ahprev: \
                                        return x
        else:
            if ah < tolf*abs(x) + tola: return x
        if niter >= maxnit:
            break

    wtxt1 = str(maxnit) + " iterations not sufficient in znewton called by "
    wtxt2 = caller + ". fi(x) = " + str(fi) + " for x = " + str(x)
    warn(wtxt1+wtxt2)
    return x
Beispiel #18
0
def zbrent(func, x1, x2, caller='caller', tolf=FOURMACHEPS, \
                         tola=SQRTTINY, maxniter=128, bracket=False):
    """
    Solves the equation func(x) = 0 on [x1, x2] using a variant of Richard 
    Brent's algorithm (more like the "ZEROIN" of Forsythe-Malcolm-Moler). 
    
    NB. The function always returns a value but a warning is printed to stdout 
    if the iteration procedure has not converged! Cf. comment below regarding 
    convergence! 
    
    Arguments:
    ----------
    func      Function having the proposed root as its argument

    x1        Lower search limit (root must be known to be >= x1 unless 
              prior bracketing is used)
    
    x2        Upper search limit (root must be known to be <= x2 unless 
              prior bracketing is used)

    tolf      Desired fractional accuracy of root (a combination of fractional 
              and absolute will actually be used: tolf*abs(root) + tola). tolf 
              should not be < 4.0*machine epsilon since this may inhibit 
              convergence!

    tola      Desired absolute accuracy of root (a combination of fractional 
              and absolute will actually be used: tolf*abs(root) + tola)

    maxniter  Maximum number of iterations

    bracket   If True, x1 and x2 are used in an initial bracketing before 
              solving

    Returns:
    ---------
    Final value of root

    This algorithm is claimed to guarantee convergence within about 
    (log2((b-a)/tol))**2 function evaluations, which is more demanding 
    than bisection. For instance: b-a = 1.0 and tol = 1.8e-12 is guaranteed 
    to converge with about 1,500 evaluations. It normally converges with 
    fewer ITERATIONS, however, and for reasonably "smooth and well-behaved" 
    functions it will be on the average more efficient and accurate than 
    bisection. For details on the algorithm see Forsythe-Malcolm-Moler, 
    as well as Brent, R.P.; "An algorithm with guaranteed convergence 
    for finding a zero of a function", The Computer Journal 14(4), 
    pp. 422-425, 1971.
    """

    if tolf < FOURMACHEPS:
        tolf  = FOURMACHEPS
        wtxt1 = "Fractional tol. less than 4.0*machine epsilon may prevent "
        wtxt2 = "convergence in zbrent. 4.0*macheps will be used instead!"
        warn(wtxt1+wtxt2)

    if tola < 0.0:
        tola  = 0.0
        wtxt1 = "Negative absolute tolerance is not a good idea "
        wtxt2 = "in zbrent. 0.0 (zero) will be used instead!"
        warn(wtxt1+wtxt2)

    assert is_posinteger(maxniter), \
            "Maximum number of iterations must be a positive integer in zbrent!"

    if bracket: x1, x2 = bracketzero(func, x1, x2, caller, maxniter)

    assert x2 > x1, "Bounds must be given with the lower bound first in zbrent!"


    a       = x1
    b       = x2
    c       = x2 ###############################  NOT IN REFERENCES !!!!!
    fa      = func(x1)
    if fa == 0.0: return x1
    fb      = func(x2)
    if fb == 0.0: return x2
    if fsign(fa) == fsign(fb):
        x1, x2 = bracketzero(func, x1, x2, caller, maxniter)
        wtxt1 = "Starting points must be on opposite sides of the root in "
        wtxt2 = "zbrent. Bracketing will be used to find an appropriate span!"
        warn(wtxt1+wtxt2)
    fc      = fb

    niter = 0
    while niter <= maxniter:
        niter += 1

        if fsign(fb) == fsign(fc):
            c  = a
            fc = fa
            d  = b - a
            e  = d

        if abs(fc) < abs(fb):
            a  = b
            b  = c
            c  = a
            fa = fb
            fb = fc
            fc = fa

        tol  = tolf*abs(b) + tola
        tol1 = 0.5 * tol
        xm   = 0.5 * (c-b)

        if abs(xm) <= tol1 or fb == 0.0: return b

        if abs(e) >= tol1 and abs(fa) > abs(fb):
            s = fb / fa
            if a == c:
                p = 2.0 * xm * s
                q = 1.0 - s
            else:
                q = fa / fc
                r = fb / fc
                p = s * (2.0*xm*q*(q-r)-(b-a)*(r-1.0))
                q = (q-1.0) * (r-1.0) * (s-1.0)

            if p > 0.0: q = -q
            p = abs(p)
            if 2.0*p < min(3.0*xm*q-abs(tol1*q), abs(e*q)):
                e = d
                d = p / q
            else:
                d = xm
                e = d

        else:
            d = xm
            e = d

        a  = b
        fa = fb

        if abs(d) > tol1:
            b = b + d
        else:
            #b = b + sign(tol1, xm)
            if   xm < 0.0: b = b - tol1
            elif xm > 0.0: b = b + tol1
            else:          b = b

        fb = func(b)

    else:
        numb  = int(math.log((x2-x1)/tol, 2)**2 + 0.5)
        wtxt1 = str(maxniter) + " iterations not sufficient in zbrent called by"
        wtxt2 = " " + caller + ". func(x) = " + str(fb) + " for x = " + str(b)
        warn(wtxt1+wtxt2)
        return b
Beispiel #19
0
def zbisect(func, x1, x2, caller='caller', tolf=FOURMACHEPS, \
                          tola=SQRTTINY, maxniter=256, bracket=False):
    """
    Solves the equation func(x) = 0 on [x1, x2] using a bisection algorithm. 
    zbisect converges slower than zbrent in most cases, but it might be faster 
    in some cases!
    
    NB. The function always returns a value but a warning is printed to stdout 
    if the iteration procedure has not converged! Cf. comment below regarding 
    convergence!

    Arguments:
    ----------
    func      Function having the proposed root as its argument
    
    x1        Lower search limit (root must be known to be >= x1 unless 
              prior bracketing is used)
    
    x2        Upper search limit (root must be known to be <= x2 unless 
              prior bracketing is used)
    
    tolf      Desired fractional accuracy of root (a combination of fractional 
              and absolute will actually be used: tolf*abs(root) + tola)

    tola      Desired absolute accuracy of root (a combination of fractional 
              and absolute will actually be used: tolf*abs(root) + tola)
              AND
              desired max absolute difference of func(root) from zero

    maxniter  Maximum number of iterations

    bracket   If True, x1 and x2 are used in an initial bracketing before 
              solving 

    Returns:
    ---------
    Final value of root
    
    This algorithm needs on the average log2((b-a)/tol) function evaluations to 
    reach convergence. For instance: b-a = 1.0 and tol = 1.8e-12 will on the 
    average provide convergence in about 40 iterations. Bisection is "dead 
    certain" and will always converge if there is a root. It is likely to pass 
    the tolerances with no extra margin. If there is no root, it will converge 
    to a singularity if there is one...
    """

    if tolf < MACHEPS:
        tolf  = MACHEPS
        wtxt1 = "Fractional tolerance less than machine epsilon is not a "
        wtxt2 = "good idea in zbisect. Machine epsilon will be used instead!"
        warn(wtxt1+wtxt2)

    if tola < 0.0:
        tola  = 0.0
        wtxt1 = "Negative absolute tolerance is not a good idea "
        wtxt2 = "in zbisect. 0.0 (zero) will be used instead!"
        warn(wtxt1+wtxt2)

    assert is_posinteger(maxniter), \
          "Maximum number of iterations must be a positive integer in zbisect!"

    if bracket: x1, x2 = bracketzero(func, x1, x2, caller, maxniter)

    assert x2 > x1, \
                  "Bounds must be given with the lower bound first in zbisect!"


    fmid = func(x2)
    if fmid == 0.0: return x2
    f    = func(x1)
    if f   ==  0.0: return x1
    if fsign(fmid) == fsign(f):
        x1, x2 = bracketzero(func, x1, x2, caller, maxniter)
        wtxt1 = "Starting points must be on opposite sides of the root in "
        wtxt2 = "zbisect. Bracketing will be used to find an appropriate span!"
        warn(wtxt1+wtxt2)

    if f < 0.0:
        root = x1
        h    = x2 - x1
    else:
        root = x2
        h    = x1 - x2
    
    niter = 0
    while niter <= maxniter:
        niter += 1
        h      = 0.5 * h
        xmid   = root + h
        fmid   = func(xmid)
        if abs(fmid) < tola: return xmid
        if fmid <= 0.0: root = xmid
        absh = abs(h)
        if absh < tolf*abs(root) + tola: return root
        
    else:
        wtxt1 = str(maxniter) + " it'ns not sufficient in zbisect called by "
        wtxt2 = caller + ".\nfunc(x) = " + str(fmid) + " for x = " + str(root)
        warn(wtxt1+wtxt2)
        return root
Beispiel #20
0
def nelder_mead(objfunc, point0, spans, \
                trace=False, tolf=SQRTMACHEPS, tola=SQRTTINY, maxniter=256, \
                rho=1.0, xsi=2.0, gamma=0.5, sigma=0.5):
    """
    The Nelder & Mead downhill simplex method is designed to find the minimum 
    of an objective function that has a multi-dimensional input, (see for 
    instance Lagarias et al. (1998), "Convergence Properties of the Nelder-Mead 
    Simplex in Low Dimensions", SIAM J. Optim., Society for Industrial and 
    Applied Mathematics Vol. 9, No. 1, pp. 112-147 for details). The algorithm 
    is said to first have been presented by Nelder and Mead in Computer Journal,
    Vol. 7, pp. 308-313 (1965).

    The initial simplex must be entered by entering an initial point (an 
    array of coordinates), plus an array of spans for the corresponding 
    point coordinates.

    For trace=True a trace is printed to stdout consisting of the present 
    number of iterations, the present low value of the objective function, 
    the present value of the absolute value of difference between the high and
    the low value of the objective function, and the present list of vertices 
    of the low value of the objective function = the present "best" point.
    
    tolf is the fractional tolerance and tola is the absolute tolerance of 
    the absolute value of difference between the high and the low value of 
    the objective function.

    maxniter is the maximum allowed number of iterations.

    rho, xsi, gamma and sigma are the parameters for reflection, expansion,
    contraction and shrinkage, respectively (cf. the references above).
    """

    # Check the input parameters
    assert is_nonneginteger(maxniter), \
       "max number of iterations must be a non-negative integer in nelder_mead!"
    if tolf < MACHEPS:
        tolf = MACHEPS
        wtext  = "fractional tolerance smaller than machine epsilon is not "
        wtext += "recommended in nelder_mead. Machine epsilon is used instead"
        warn(wtext)
    assert rho > 0.0, "rho must be positive in nelder_mead!"
    assert xsi > 1.0, "xsi must be > 1.0 in nelder_mead!"
    assert xsi > rho, "xsi must be > rho in nelder_mead!"
    assert 0.0 < gamma < 1.0, "gamma must be in (0.0, 1.0) in nelder_mead!"
    assert 0.0 < sigma < 1.0, "sigma be in (0.0, 1.0) in nelder_mead!"
    assert tola >= 0.0, "absolute tolerance must be positive in nelder_mead!"

    # Prepare matrix of vertices
    ndim     = len(point0)
    assert len(spans) == ndim
    vertices = Matrix()
    vertices.append(array('d', list(point0)))
    ndimp1   = ndim + 1
    fndim    = float(ndim)
    for j in range(0, ndim): vertices.append(array('d', list(point0)))
    for j in range(0, ndim): vertices[j+1][j] += spans[j]

    # Prepare a few variants of parameters
    oneprho = 1.0 + rho

    # LOOP!!!!!!!!
    niter = 0
    while True:
        niter += 1
        if niter > maxniter:
            txt1 = "nelder_mead did not converge. Absolute error = "
            txt2 = str(abs(high-low)) + " for " + str(niter-1)
            txt3 = " iterations. Consider new tols or maxniter!"
            raise Error(txt1+txt2+txt3)
        # Compute the objective function values for the vertices
        flist = array('d', [])
        for k in range(0, ndimp1):
            fk = objfunc(vertices[k])
            flist.append(fk)

        # Establish the highest point, the next highest point and the lowest
        low   = flist[0]
        high  = nxhi = low
        ilow  = 0
        ihigh = 0
        for k in range(1, ndimp1):
            fk = flist[k]
            if fk > high:
                nxhi   = high
                high   = fk
                ihigh  = k
            elif fk < low:
                low  = fk
                ilow = k

        if trace: print(niter, low, abs(high-low), list(vertices[ilow]))
        if low < tola: tol = tola
        else:          tol = abs(low)*tolf
        if abs(high-low) < tol: return low, list(vertices[ilow])

        # Reflect the high point
        # First find a new vertix = the centroid of the non-max vertices
        cntr  = array('d', ndim*[float('nan')])
        newr  = array('d', ndim*[float('nan')])
        for j in range(0, ndim):
            xsum = 0.0
            for k in range(0, ndimp1):
                if k != ihigh:
                    xsum += vertices[k][j]
            cntr[j] = xsum/fndim
        # Then move from the centroid in an away-from-max direction
        for j in range(0, ndim):
            newr[j] = oneprho*cntr[j] - rho*vertices[ihigh][j]

        # Check the new vertix
        accepted = False
        phir = objfunc(newr)
        if low <= phir < nxhi:
            # Everything is OK!
            if trace: print("Reflection sufficient")
            vertices[ihigh] = newr
            phi             = phir
            accepted        = True
        elif phir < low:
            # Expand:
            if trace: print("Expansion")
            newe = array('d', ndim*[float('nan')])
            for j in range(0, ndim):
                newe[j] = cntr[j] + xsi*(newr[j]-cntr[j])
            phie = objfunc(newe)
            if phie < phir:
                vertices[ihigh] = newe
                phi             = phie
            else:
                vertices[ihigh] = newr
                phi             = phir
            accepted = True
        elif phir >= nxhi:
            # Contract
            if phir < high:
                # -outside:
                if trace: print("Outside contraction")
                newo = array('d', ndim*[float('nan')])
                for j in range(0, ndim):
                    newo[j] = cntr[j] + gamma*(newr[j]-cntr[j])
                phio = objfunc(newo)
                if phio <= phir:
                    vertices[ihigh] = newo
                    phi             = phio
                    accepted        = True
            else:
                # -inside:
                if trace: print("Inside contraction")
                newi = array('d', ndim*[float('nan')])
                for j in range(0, ndim):
                    newi[j] = cntr[j] - gamma*(cntr[j]-vertices[ihigh][j])
                phii = objfunc(newi)
                if phii <= high:
                    vertices[ihigh] = newi
                    phi             = phii
                    accepted        = True
        if not accepted:
            # Shrink:
            if trace: print("Shrinkage")
            for k in range(0, ndimp1):
                for j in range(j, ndim):
                    vertices[k][j] = vertices[ilow][j] + sigma*(vertices[k][j] -
                                                             vertices[ilow][j])

# end of nelder_mead

# ------------------------------------------------------------------------------
Beispiel #21
0
def erfc1(x, tol=_EIGHTMACHEPS):
    """
    Computation of the complementary error function for real argument.
    Fractional error is estimated to < 50*machine epsilon for abs(x) <= 1.5
    and < 1.e-8 elsewhere (erfc2 is called for abs(x) > 1.5 for numeric reasons).
    
    The function uses a power series expansion for arguments between -1.5 
    and +1.5 (cf. Abramowitz & Stegun) and continued fractions for all other 
    arguments (cf. A. Cuyt et al., "Continued Fractions for Special Functions: 
    Handbook and Software", Universiteit Antwerpen, where a slightly faster 
    converging expression than that of Abramowitz & Stegun's CF is presented. 
    Cuyt's "ER.20" is used here).
    """

    if tol < _EIGHTMACHEPS:
        tol = _EIGHTMACHEPS
        txt1 = "No use using tolerance < 8.0*machine epsilon in erfc1."
        txt2 = " 8.0*machine epsilon is used"
        warn(txt)

    ax  = abs(x)
    xx  = x*x

    if ax <= _ERFC21:
        # Power series expansion (cf. Abramowitz & Stegun)
        k     = 0.0
        sign  = 1.0
        xpart = 1.0
        den1  = 1.0
        #den2  = 1.0
        #term  = sign*xpart/(den1*den2)
        #summ  = term
        summ   = 1.0
        c      = 0.0
        while True: # The Kahan summation proc. (cf. Dahlquist, Bjorck & Anderson)
            k     += 1.0
            summo  = summ
            sign   = -sign
            xpart *= xx
            den1  *= k
            den2   = 2.0*k + 1.0
            term   = sign*xpart/(den1*den2)
            y      = term + c
            t      = summ + y
            if fsign(y) == fsign(summ):
                f = (0.46*t-t) + t
                c = ((summ-f)-(t-f)) + y
            else:
                c = (summ-t) + y
            summ  = t
            if abs(summ-summo) < tol*abs(summ):
                summ += c
                break
        #r = 1.0 - (2.0*ax/SQRTPI)*summ
        r = 1.0 - (2.0*SQRTPIINV*ax)*summ

    else: return erfc2(x)
    """
        # Compute continued fractions:
        # Q = b0 + a1/(b1 + a2/(b2 + a3/(b3 + ......... where ak   
        # are numerator terms and where bk are denominator terms 
        # (and where a0 is always 0).
        # Here:
        # b0 = 0.0
        # a1 = 1.0
        # a2 = 0.5
        # a3 = 1.5
        # a4 = 2.0
        # b1 = b3 etc = x*x
        # b2 = b4 etx = 1.0
        # (cf. Cuyt et al.)

        #k   = 0.0
        bk  = 0.0
        Am1 = 1.0
        Bm1 = 0.0
        A0  = bk
        B0  = 1.0

        k   = 1.0
        bk  = xx
        ak  = 1.0
        Ap1 = bk*A0 + ak*Am1
        Bp1 = bk*B0 + ak*Bm1
        Q   = Ap1/Bp1
        Am1 = A0
        Bm1 = B0
        A0  = Ap1
        B0  = Bp1

        while True:
            k   += 1.0
            Qold = Q
            if is_eveninteger(k): bk = 1.0
            else:                 bk = xx
            ak   = 0.5 * (k-1.0)
            Ap1  = bk*A0 + ak*Am1
            Bp1  = bk*B0 + ak*Bm1
            Q    = Ap1/Bp1
            if abs(Q-Qold) < abs(Q)*tol:
                break
            Am1  = A0
            Bm1  = B0
            A0   = Ap1
            B0   = Bp1

        p  = exp(-xx)
        if p == 0.0: # Take a chance...
            #r = exp(-xx + log(ax*Q/SQRTPI))
            r = exp(-xx + log(SQRTPIINV*ax*Q))
        else:
            #r = ax * p * Q / SQRTPI
            r = SQRTPIINV * ax * p * Q"""

    if x < 0.0: r = 2.0 - r
    r = kept_within(0.0, r, 2.0)
    return r
Beispiel #22
0
def zsteffen(func, x0, caller='caller', tolf=FOURMACHEPS, \
                         tola=SQRTTINY, maxniter=512):
    """
    Solves the equation func(x) = 0 using Steffensen's method. Steffensen's 
    method is a method with second order convergence (like Newton's method) 
    at the price of two function evaluations per iteration. It does NOT 
    require that the derivative be computed, as opposed to Newton's method.
        In practice zsteffen seems to require a rather clever initial guess 
    as to the root and/or a lot of iterations since it starts slowly when the 
    initial guess is too far away from the actual root, but it might anyway 
    converge in the end. It is clearly inferior to znewton and should be 
    avoided unless the derivative offers problems with numerics or speed. 
        If computation of the derivative is slow, a good idea is to use 
    znewton for a very small number of iterations to produce an initial guess 
    that can be used as an input to zsteffen (may at times be very efficient).
        And/or: Try the additional feature of taking the solution to the 
    practical limit (cf. below) rather than trying to guess the required 
    number of iterations! 

    For the theory behind Steffensen's method, consult Dahlquist-Bjorck-
    Anderson. 
    
    NB. The function always returns a value but a warning is printed 
    to stdout if the iteration procedure has not converged!

    Arguments:
    ----------
    func        Function having the proposed root as its argument

    x0          Initial guess as to the value of the root
   
    tolf        Desired fractional accuracy of root (a combination of absolute 
                and fractional will actually be used: tolf*abs(root) + tola)

    tola        Desired max absolute difference of func(root) from zero 
                AND
                desired absolute accuracy of root (a combination of absolute 
                and fractional will actually be used: tolf*abs(root) + tola)
                
    maxniter    Maximum number of iterations

    Additional feature: 
    -------------------
    For maxniter == 0 the solution will be taken beyond tolf and tola (if 
    possible) to the limit where either abs(func(x)) or abs(h) - where h is 
    the increment of the root estimate - has stopped shrinking. If convergence 
    is not reached after 2**16 (= 65,536) iterations, the procedure is halted 
    and the present estimate is returned anyway (a minimum of 16 iterations 
    will be carried out anyhow).
    
    Returns:
    ---------
    Final value of root
    """

    if tolf < MACHEPS:
        tolf = MACHEPS
        wtxt1 = "Fractional tolerance less than machine epsilon is not a "
        wtxt2 = "good idea in zsteffen. Machine epsilon will be used instead!"
        warn(wtxt1 + wtxt2)

    if tola < 0.0:
        tola = 0.0
        wtxt1 = "Negative absolute tolerance is not a good idea "
        wtxt2 = "in zsteffen. 0.0 (zero) will be used instead!"
        warn(wtxt1 + wtxt2)

    assert is_nonneginteger(maxniter), \
      "Maximum number of iterations must be a non-negative integer in zsteffen!"

    MAXMAX = 2**16
    MINNITER = 16

    if maxniter == 0:
        limit = True
        maxnit = MAXMAX
    else:
        limit = False
        maxnit = maxniter

    x = x0
    f = func(x)
    if f == 0.0: return x
    af = abs(f)
    if not limit:
        if af < tola: return x
    g = (func(x + f) - f) / f
    h = f / g
    x -= h
    ah = abs(h)
    if not limit:
        if ah < tolf * abs(x) + tola: return x

    niter = 0
    while True:
        niter += 1
        afprev = af
        ahprev = ah
        f = func(x)
        if f == 0.0: return x
        af = abs(f)
        if limit:
            if af < tola and niter >= MINNITER and af >= afprev: return x
        else:
            if af < tola: return x
        g = (func(x + f) - f) / f
        h = f / g
        x -= h
        ah = abs(h)
        if limit:
            if ah < tolf * abs(x) + tola and niter >= MINNITER and ah >= ahprev:                 \
                                                        return x
        else:
            if ah < tolf * abs(x) + tola: return x
        if niter >= maxnit:
            break

    wtxt1 = str(maxnit) + " iterations not sufficient in zsteffen called by "
    wtxt2 = caller + ". func(x) = " + str(f) + " for x = " + str(x)
    warn(wtxt1 + wtxt2)
    return x
Beispiel #23
0
def znewton(fifi2fid, x0, caller='caller', tolf=FOURMACHEPS, \
                            tola=SQRTTINY, maxniter=64):
    """
    Solves the equation fi(x) = 0 using the Newton-Raphson algorithm. 
    
    NB. The function always returns a value but a warning is printed 
    to stdout if the iteration procedure has not converged!
    
    Convergence is fast for the Newton algorithm - at the price of having to 
    compute the derivative of the function. Convergence cannot be guaranteed 
    for all functions and/or initial guesses, either...

    Arguments:
    ----------
    fifi2fid    Function having the desired root as its argument and 
                1: the value of fi, AND
                2: the value of the ratio of its value to the 
                value of its derivative given that root as its 
                outputs, in that order

    x0          Initial guess as to the value of the root
   
    tolf        Desired fractional accuracy of root (a combination of absolute 
                and fractional will actually be used: tolf*abs(root) + tola)

    tola        Desired max absolute difference of fi(root) from zero 
                AND
                desired absolute accuracy of root (a combination of absolute 
                and fractional will actually be used: tolf*abs(root) + tola)
                
    maxniter    Maximum number of iterations

    Additional feature:
    -------------------
    For maxniter == 0 the solution will be taken beyond tolf and tola (if 
    possible) to the limit where either abs(fi(x)) or abs(fi(x)/fid(x)) has 
    stopped shrinking. If convergence is not reached after 2048 iterations, 
    the procedure is halted and the present estimate is returned anyway (a 
    minimum of 8 iterations will be carried out anyhow).

    Returns:
    ---------
    Final value of root
    """

    if tolf < MACHEPS:
        tolf = MACHEPS
        wtxt1 = "Fractional tolerance less than machine epsilon is not a "
        wtxt2 = "good idea in znewton. Machine epsilon will be used instead!"
        warn(wtxt1 + wtxt2)

    if tola < 0.0:
        tola = 0.0
        wtxt1 = "Negative absolute tolerance is not a good idea "
        wtxt2 = "in znewton. 0.0 (zero) will be used instead!"
        warn(wtxt1 + wtxt2)

    assert is_nonneginteger(maxniter), \
       "Maximum number of iterations must be a non-negative integer in znewton!"

    MAXMAX = 2048
    MINNITER = 8

    if maxniter == 0:
        limit = True
        maxnit = MAXMAX
    else:
        limit = False
        maxnit = maxniter

    x = x0
    fi, fi2fid = fifi2fid(x)
    if fi == 0.0: return x
    af = abs(fi)
    if not limit:
        if af < tola: return x
    x -= fi2fid
    ah = abs(fi2fid)
    if not limit:
        if ah < tolf * abs(x) + tola: return x

    niter = 0
    while True:
        niter += 1
        afprev = af
        ahprev = ah
        fi, fi2fid = fifi2fid(x)
        if fi == 0.0: return x
        af = abs(fi)
        if limit:
            if af < tola and niter >= MINNITER and af >= afprev: return x
        else:
            if af < tola: return x
        x -= fi2fid
        ah = abs(fi2fid)
        if limit:
            if ah < tolf * abs(x) + tola and niter >= MINNITER and ah >= ahprev:                 \
                                                        return x
        else:
            if ah < tolf * abs(x) + tola: return x
        if niter >= maxnit:
            break

    wtxt1 = str(maxnit) + " iterations not sufficient in znewton called by "
    wtxt2 = caller + ". fi(x) = " + str(fi) + " for x = " + str(x)
    warn(wtxt1 + wtxt2)
    return x
Beispiel #24
0
def zbrent(func, x1, x2, caller='caller', tolf=FOURMACHEPS, \
                         tola=SQRTTINY, maxniter=128, bracket=False):
    """
    Solves the equation func(x) = 0 on [x1, x2] using a variant of Richard 
    Brent's algorithm (more like the "ZEROIN" of Forsythe-Malcolm-Moler). 
    
    NB. The function always returns a value but a warning is printed to stdout 
    if the iteration procedure has not converged! Cf. comment below regarding 
    convergence! 
    
    Arguments:
    ----------
    func      Function having the proposed root as its argument

    x1        Lower search limit (root must be known to be >= x1 unless 
              prior bracketing is used)
    
    x2        Upper search limit (root must be known to be <= x2 unless 
              prior bracketing is used)

    tolf      Desired fractional accuracy of root (a combination of fractional 
              and absolute will actually be used: tolf*abs(root) + tola). tolf 
              should not be < 4.0*machine epsilon since this may inhibit 
              convergence!

    tola      Desired absolute accuracy of root (a combination of fractional 
              and absolute will actually be used: tolf*abs(root) + tola)

    maxniter  Maximum number of iterations

    bracket   If True, x1 and x2 are used in an initial bracketing before 
              solving

    Returns:
    ---------
    Final value of root

    This algorithm is claimed to guarantee convergence within about 
    (log2((b-a)/tol))**2 function evaluations, which is more demanding 
    than bisection. For instance: b-a = 1.0 and tol = 1.8e-12 is guaranteed 
    to converge with about 1,500 evaluations. It normally converges with 
    fewer ITERATIONS, however, and for reasonably "smooth and well-behaved" 
    functions it will be on the average more efficient and accurate than 
    bisection. For details on the algorithm see Forsythe-Malcolm-Moler, 
    as well as Brent, R.P.; "An algorithm with guaranteed convergence 
    for finding a zero of a function", The Computer Journal 14(4), 
    pp. 422-425, 1971.
    """

    if tolf < FOURMACHEPS:
        tolf = FOURMACHEPS
        wtxt1 = "Fractional tol. less than 4.0*machine epsilon may prevent "
        wtxt2 = "convergence in zbrent. 4.0*macheps will be used instead!"
        warn(wtxt1 + wtxt2)

    if tola < 0.0:
        tola = 0.0
        wtxt1 = "Negative absolute tolerance is not a good idea "
        wtxt2 = "in zbrent. 0.0 (zero) will be used instead!"
        warn(wtxt1 + wtxt2)

    assert is_posinteger(maxniter), \
            "Maximum number of iterations must be a positive integer in zbrent!"

    if bracket: x1, x2 = bracketzero(func, x1, x2, caller, maxniter)

    assert x2 > x1, "Bounds must be given with the lower bound first in zbrent!"

    a = x1
    b = x2
    c = x2  ###############################  NOT IN REFERENCES !!!!!
    fa = func(x1)
    if fa == 0.0: return x1
    fb = func(x2)
    if fb == 0.0: return x2
    if fsign(fa) == fsign(fb):
        x1, x2 = bracketzero(func, x1, x2, caller, maxniter)
        wtxt1 = "Starting points must be on opposite sides of the root in "
        wtxt2 = "zbrent. Bracketing will be used to find an appropriate span!"
        warn(wtxt1 + wtxt2)
    fc = fb

    niter = 0
    while niter <= maxniter:
        niter += 1

        if fsign(fb) == fsign(fc):
            c = a
            fc = fa
            d = b - a
            e = d

        if abs(fc) < abs(fb):
            a = b
            b = c
            c = a
            fa = fb
            fb = fc
            fc = fa

        tol = tolf * abs(b) + tola
        tol1 = 0.5 * tol
        xm = 0.5 * (c - b)

        if abs(xm) <= tol1 or fb == 0.0: return b

        if abs(e) >= tol1 and abs(fa) > abs(fb):
            s = fb / fa
            if a == c:
                p = 2.0 * xm * s
                q = 1.0 - s
            else:
                q = fa / fc
                r = fb / fc
                p = s * (2.0 * xm * q * (q - r) - (b - a) * (r - 1.0))
                q = (q - 1.0) * (r - 1.0) * (s - 1.0)

            if p > 0.0: q = -q
            p = abs(p)
            if 2.0 * p < min(3.0 * xm * q - abs(tol1 * q), abs(e * q)):
                e = d
                d = p / q
            else:
                d = xm
                e = d

        else:
            d = xm
            e = d

        a = b
        fa = fb

        if abs(d) > tol1:
            b = b + d
        else:
            #b = b + sign(tol1, xm)
            if xm < 0.0: b = b - tol1
            elif xm > 0.0: b = b + tol1
            else: b = b

        fb = func(b)

    else:
        numb = int(math.log((x2 - x1) / tol, 2)**2 + 0.5)
        wtxt1 = str(
            maxniter) + " iterations not sufficient in zbrent called by"
        wtxt2 = " " + caller + ". func(x) = " + str(fb) + " for x = " + str(b)
        warn(wtxt1 + wtxt2)
        return b
Beispiel #25
0
def zbisect(func, x1, x2, caller='caller', tolf=FOURMACHEPS, \
                          tola=SQRTTINY, maxniter=256, bracket=False):
    """
    Solves the equation func(x) = 0 on [x1, x2] using a bisection algorithm. 
    zbisect converges slower than zbrent in most cases, but it might be faster 
    in some cases!
    
    NB. The function always returns a value but a warning is printed to stdout 
    if the iteration procedure has not converged! Cf. comment below regarding 
    convergence!

    Arguments:
    ----------
    func      Function having the proposed root as its argument
    
    x1        Lower search limit (root must be known to be >= x1 unless 
              prior bracketing is used)
    
    x2        Upper search limit (root must be known to be <= x2 unless 
              prior bracketing is used)
    
    tolf      Desired fractional accuracy of root (a combination of fractional 
              and absolute will actually be used: tolf*abs(root) + tola)

    tola      Desired absolute accuracy of root (a combination of fractional 
              and absolute will actually be used: tolf*abs(root) + tola)
              AND
              desired max absolute difference of func(root) from zero

    maxniter  Maximum number of iterations

    bracket   If True, x1 and x2 are used in an initial bracketing before 
              solving 

    Returns:
    ---------
    Final value of root
    
    This algorithm needs on the average log2((b-a)/tol) function evaluations to 
    reach convergence. For instance: b-a = 1.0 and tol = 1.8e-12 will on the 
    average provide convergence in about 40 iterations. Bisection is "dead 
    certain" and will always converge if there is a root. It is likely to pass 
    the tolerances with no extra margin. If there is no root, it will converge 
    to a singularity if there is one...
    """

    if tolf < MACHEPS:
        tolf = MACHEPS
        wtxt1 = "Fractional tolerance less than machine epsilon is not a "
        wtxt2 = "good idea in zbisect. Machine epsilon will be used instead!"
        warn(wtxt1 + wtxt2)

    if tola < 0.0:
        tola = 0.0
        wtxt1 = "Negative absolute tolerance is not a good idea "
        wtxt2 = "in zbisect. 0.0 (zero) will be used instead!"
        warn(wtxt1 + wtxt2)

    assert is_posinteger(maxniter), \
          "Maximum number of iterations must be a positive integer in zbisect!"

    if bracket: x1, x2 = bracketzero(func, x1, x2, caller, maxniter)

    assert x2 > x1, \
                  "Bounds must be given with the lower bound first in zbisect!"

    fmid = func(x2)
    if fmid == 0.0: return x2
    f = func(x1)
    if f == 0.0: return x1
    if fsign(fmid) == fsign(f):
        x1, x2 = bracketzero(func, x1, x2, caller, maxniter)
        wtxt1 = "Starting points must be on opposite sides of the root in "
        wtxt2 = "zbisect. Bracketing will be used to find an appropriate span!"
        warn(wtxt1 + wtxt2)

    if f < 0.0:
        root = x1
        h = x2 - x1
    else:
        root = x2
        h = x1 - x2

    niter = 0
    while niter <= maxniter:
        niter += 1
        h = 0.5 * h
        xmid = root + h
        fmid = func(xmid)
        if abs(fmid) < tola: return xmid
        if fmid <= 0.0: root = xmid
        absh = abs(h)
        if absh < tolf * abs(root) + tola: return root

    else:
        wtxt1 = str(maxniter) + " it'ns not sufficient in zbisect called by "
        wtxt2 = caller + ".\nfunc(x) = " + str(fmid) + " for x = " + str(root)
        warn(wtxt1 + wtxt2)
        return root
Beispiel #26
0
    def lhs_sample(self, nparams, nintervals, rcorrmatrix=None, checklevel=0):

        """
        Generates a full Latin Hypercube Sample of uniformly distributed 
        random variates in [0.0, 1.0] placed in a matrix with one realization 
        in each row. A target rank correlation matrix can be given (must have 
        the dimension nsamples*nsamples).
        
        checklevel may be 0, 1 or 2 and is used to control trace printout. 
        0 produces no trace output, whereas 2 produces the most.

        NB. IN ORDER FOR LATIN HYPERCUBE SAMPLING TO BE MEANINGFUL THE OUTPUT 
        STREAM OF RANDOM VARIATES MUST BE HANDLED BY INVERSE METHODS !!!! 

        Latin Hypercube Sampling was first described by McKay, Conover & 
        Beckman in a Technometrics article 1979. The use of the LHS technique 
        to introduce rank correlations was first described by Iman & Conover 
        1982 in an issue of Communications of Statistics.
        """

        # lhs_sample uses the Matrix class to a great extent

        if nparams > nintervals:
            warn("nparams > nintervals in RandomStructure.lhs_sample")

        nsamples     = nintervals   # Just to remember
        rstreaminner = self.rstream
        rstreamouter = self.rstream2

        factor  =  1.0 / float(nintervals)

        tlhsmatrix1 = Matrix()  # tlhsmatrix1 belongs to the Matrix class
        if rcorrmatrix: tscorematrix = Matrix()
        for k in range(0, nparams):
            if rcorrmatrix:
                tnvector, tscorevector = \
                            self.scramble_range(nsamples, rstreamouter, True)
                rowk = array('d', tscorevector)
                tscorematrix.append(rowk)
            else:
                tnvector = self.scramble_range(nsamples, rstreamouter)
            pvector = array('d', [])
            for number in tnvector:
                p  =  factor * (float(number) + rstreaminner.runif01())
                p  =  max(p, 0.0) # Probabilities must be in [0.0, 1.0]
                p  =  min(p, 1.0)
                pvector.append(p)
            tlhsmatrix1.append(pvector)
                
        
        # tlhsmatrix1 (and tscorematrix) are now transposed to run with 
        # one subsample per row to fit with output as well as Iman-Conover 
        # formulation. tlhsmatrix1 and tscorematrix will be used anyway 
        # for some manipulations which are more simple when matrices run 
        # with one variable per row

        lhsmatrix1  = transposed(tlhsmatrix1)
        if rcorrmatrix: scorematrix = transposed(tscorematrix)

        if checklevel == 2:
            print("lhs_sample: Original LHS sample matrix")
            mxdisplay(lhsmatrix1)
            if rcorrmatrix: 
                print("lhs_sample: Target rank correlation matrix")
                mxdisplay(rcorrmatrix)
        if checklevel == 1 or checklevel == 2:
            print("lhs_sample: Rank correlation matrix of")
            print("            original LHS sample matrix")
            trankmatrix1 = Matrix()
            for k in range (0, nparams):
                rowk = array('d', extract_ranks(tlhsmatrix1[k]))
                trankmatrix1.append(rowk)
            mxdisplay(Matrix(corrmatrix(trankmatrix1)))

        if not rcorrmatrix:
            return lhsmatrix1

        else:
            scorecorr = Matrix(corrmatrix(tscorematrix))
            if checklevel == 2:
                print("lhs_sample: Score matrix of original LHS sample matrix")
                mxdisplay(scorematrix)
                print("lhs_sample: Correlation matrix of scores of")
                print("            original LHS sample")
                mxdisplay(scorecorr)

            slower, slowert = ludcmp_chol(scorecorr)
            slowerinverse   = inverted(slower)
            tslowerinverse  = transposed(slowerinverse)
            clower, clowert = ludcmp_chol(rcorrmatrix)
            scoresnostar    = scorematrix*tslowerinverse # Matrix multiplication
            if checklevel == 2:
                print("lhs_sample: Correlation matrix of scoresnostar")
                mxdisplay(corrmatrix(transposed(scoresnostar)))

            scoresstar  = scoresnostar*clowert    # Matrix multiplication
            tscoresstar = transposed(scoresstar)
            trankmatrix = Matrix()
            for k in range (0, nparams):
                trankmatrix.append(extract_ranks(tscoresstar[k]))
            if checklevel == 2:
                print("lhs_sample: scoresstar matrix")
                mxdisplay(scoresstar)
                print("lhs_sample: Correlation matrix of scoresstar")
                mxdisplay(corrmatrix(tscoresstar))
                print("lhs_sample: scoresstar matrix converted to rank")
                mxdisplay(transposed(trankmatrix))
                for k in range(0, nparams):
                    tlhsmatrix1[k] = array('d', sorted(list(tlhsmatrix1[k])))
                print("RandomStructure.lhs_sample: Sorted LHS sample matrix")
                mxdisplay(transposed(tlhsmatrix1))

            tlhsmatrix2 = Matrix()
            for k in range(0, nparams):
                # Sort each row in tlhsmatrix1 and reorder 
                # according to trankmatrix rows
                auxvec = reorder(tlhsmatrix1[k], trankmatrix[k], \
                                                 straighten=True)
                tlhsmatrix2.append(auxvec)
            lhsmatrix2 = transposed(tlhsmatrix2)
            if checklevel == 2:
                print("lhs_sample: Corrected/reordered LHS sample matrix")
                mxdisplay(transposed(tlhsmatrix2))

            if checklevel == 1 or checklevel == 2:
                trankmatrix2 = Matrix()
                auxmatrix2   = tlhsmatrix2
                for k in range (0, nparams):
                    trankmatrix2.append(extract_ranks(auxmatrix2[k]))
                print("lhs_sample: Rank correlation matrix of corrected/")
                print("            /reordered LHS sample matrix")
                mxdisplay(corrmatrix(trankmatrix2))


            return lhsmatrix2
Beispiel #27
0
def cgamma(alpha, lam, x, lngamalpha=False, tolf=FOURMACHEPS, itmax=128):
    """
    The gamma distrib. f = lam * exp(-lam*x) * (lam*x)**(alpha-1) / gamma(alpha)
    F is the integral = the incomplete gamma or the incomplete gamma / complete 
    gamma depending on how the incomplete gamma function is defined.
    x, lam, alpha >= 0
    tolf  =  allowed fractional error in computation of the incomplete function
    itmax =  maximum number of iterations to obtain accuracy 

    NB It is possible to gain efficiency by providing the value of the 
    natural logarithm of the complete gamma function ln(gamma(alpha)) 
    as a pre-computed input (may be computed using numlib.specfunc.lngamma) 
    instead of the default 'False'.
    """

    assert alpha >= 0.0, "alpha must not be negative in cgamma!"
    assert lam >= 0.0, "lambda must not be negative i cgamma!"
    assert x >= 0.0, "variate must not be negative in cgamma!"
    assert tolf >= 0.0, "tolerance must not be negative in cgamma!"
    assert is_posinteger(itmax), "maximum number of iterations must be a positive integer in cgamma!"

    if alpha == 1.0:
        return cexpo(1.0 / lam, x)

    lamx = lam * x
    if lamx == 0.0:
        return 0.0
    if lngamalpha:
        lnga = lngamalpha
    else:
        lnga = lngamma(alpha)

    # -------------------------------------------------------------------------
    def _gamser():
        # A series expansion is used for lamx < alpha + 1.0
        # (cf. Abramowitz & Stegun)
        apn = alpha
        summ = 1.0 / apn
        dela = summ
        converged = False
        for k in range(0, itmax):
            apn += 1.0
            dela = dela * lamx / apn
            summ += dela
            if abs(dela) < abs(summ) * tolf:
                converged = True
                return summ * exp(-lamx + alpha * log(lamx) - lnga), converged
        return summ * exp(-lamx + alpha * log(lamx) - lnga), converged

    # -------------------------------------------------------------------------
    def _gamcf():
        # A continued fraction expansion is used for
        # lamx >= alpha + 1.0 (cf. Abramowitz & Stegun):
        gold = 0.0
        a0 = 1.0
        a1 = lamx
        b0 = 0.0
        b1 = 1.0
        fac = 1.0
        converged = False
        for k in range(0, itmax):
            ak = float(k + 1)
            aka = ak - alpha
            a0 = (a1 + a0 * aka) * fac
            b0 = (b1 + b0 * aka) * fac
            akf = ak * fac
            a1 = lamx * a0 + akf * a1
            b1 = lamx * b0 + akf * b1
            if a1 != 0.0:
                fac = 1.0 / a1
                g = b1 * fac
                if abs(g - gold) < abs(g) * tolf:
                    converged = True
                    return 1.0 - exp(-lamx + alpha * log(lamx) - lnga) * g, converged
                gold = g
        return 1.0 - exp(-lamx + alpha * log(lamx) - lnga) * g, converged

    # -------------------------------------------------------------------------

    if lamx < alpha + 1.0:
        cdf, converged = _gamser()
    else:
        cdf, converged = _gamcf()

    if not converged:
        warn("cgamma has not converged for itmax = " + str(itmax) + " and tolf = " + str(tolf))

    cdf = kept_within(0.0, cdf, 1.0)

    return cdf
Beispiel #28
0
def cgamma(alpha, lam, x, lngamalpha=False, tolf=FOURMACHEPS, itmax=128):
    """
    The gamma distrib. f = lam * exp(-lam*x) * (lam*x)**(alpha-1) / gamma(alpha)
    F is the integral = the incomplete gamma or the incomplete gamma / complete 
    gamma depending on how the incomplete gamma function is defined.
    x, lam, alpha >= 0
    tolf  =  allowed fractional error in computation of the incomplete function
    itmax =  maximum number of iterations to obtain accuracy 

    NB It is possible to gain efficiency by providing the value of the 
    natural logarithm of the complete gamma function ln(gamma(alpha)) 
    as a pre-computed input (may be computed using numlib.specfunc.lngamma) 
    instead of the default 'False'.
    """

    assert alpha >= 0.0, "alpha must not be negative in cgamma!"
    assert lam >= 0.0, "lambda must not be negative i cgamma!"
    assert x >= 0.0, "variate must not be negative in cgamma!"
    assert tolf >= 0.0, "tolerance must not be negative in cgamma!"
    assert is_posinteger(itmax), \
           "maximum number of iterations must be a positive integer in cgamma!"

    if alpha == 1.0: return cexpo(1.0 / lam, x)

    lamx = lam * x
    if lamx == 0.0: return 0.0
    if lngamalpha: lnga = lngamalpha
    else: lnga = lngamma(alpha)

    # -------------------------------------------------------------------------
    def _gamser():
        # A series expansion is used for lamx < alpha + 1.0
        # (cf. Abramowitz & Stegun)
        apn = alpha
        summ = 1.0 / apn
        dela = summ
        converged = False
        for k in range(0, itmax):
            apn += 1.0
            dela = dela * lamx / apn
            summ += dela
            if abs(dela) < abs(summ) * tolf:
                converged = True
                return summ * exp(-lamx + alpha * log(lamx) - lnga), converged
        return summ * exp(-lamx + alpha * log(lamx) - lnga), converged

    # -------------------------------------------------------------------------
    def _gamcf():
        # A continued fraction expansion is used for
        # lamx >= alpha + 1.0 (cf. Abramowitz & Stegun):
        gold = 0.0
        a0 = 1.0
        a1 = lamx
        b0 = 0.0
        b1 = 1.0
        fac = 1.0
        converged = False
        for k in range(0, itmax):
            ak = float(k + 1)
            aka = ak - alpha
            a0 = (a1 + a0 * aka) * fac
            b0 = (b1 + b0 * aka) * fac
            akf = ak * fac
            a1 = lamx * a0 + akf * a1
            b1 = lamx * b0 + akf * b1
            if a1 != 0.0:
                fac = 1.0 / a1
                g = b1 * fac
                if abs(g - gold) < abs(g) * tolf:
                    converged = True
                    return 1.0 - exp(-lamx + alpha * log(lamx) -
                                     lnga) * g, converged
                gold = g
        return 1.0 - exp(-lamx + alpha * log(lamx) - lnga) * g, converged

    # -------------------------------------------------------------------------

    if lamx < alpha + 1.0:
        cdf, converged = _gamser()
    else:
        cdf, converged = _gamcf()

    if not converged:
        warn("cgamma has not converged for itmax = " + \
                     str(itmax) + " and tolf = " + str(tolf))

    cdf = kept_within(0.0, cdf, 1.0)

    return cdf
Beispiel #29
0
def erfc1(x, tol=_EIGHTMACHEPS):
    """
    Computation of the complementary error function for real argument.
    Fractional error is estimated to < 50*machine epsilon for abs(x) <= 1.5
    and < 1.e-8 elsewhere (erfc2 is called for abs(x) > 1.5 for numeric reasons).
    
    The function uses a power series expansion for arguments between -1.5 
    and +1.5 (cf. Abramowitz & Stegun) and continued fractions for all other 
    arguments (cf. A. Cuyt et al., "Continued Fractions for Special Functions: 
    Handbook and Software", Universiteit Antwerpen, where a slightly faster 
    converging expression than that of Abramowitz & Stegun's CF is presented. 
    Cuyt's "ER.20" is used here).
    """

    if tol < _EIGHTMACHEPS:
        tol = _EIGHTMACHEPS
        txt1 = "No use using tolerance < 8.0*machine epsilon in erfc1."
        txt2 = " 8.0*machine epsilon is used"
        warn(txt)

    ax = abs(x)
    xx = x * x

    if ax <= _ERFC21:
        # Power series expansion (cf. Abramowitz & Stegun)
        k = 0.0
        sign = 1.0
        xpart = 1.0
        den1 = 1.0
        #den2  = 1.0
        #term  = sign*xpart/(den1*den2)
        #summ  = term
        summ = 1.0
        c = 0.0
        while True:  # The Kahan summation proc. (cf. Dahlquist, Bjorck & Anderson)
            k += 1.0
            summo = summ
            sign = -sign
            xpart *= xx
            den1 *= k
            den2 = 2.0 * k + 1.0
            term = sign * xpart / (den1 * den2)
            y = term + c
            t = summ + y
            if fsign(y) == fsign(summ):
                f = (0.46 * t - t) + t
                c = ((summ - f) - (t - f)) + y
            else:
                c = (summ - t) + y
            summ = t
            if abs(summ - summo) < tol * abs(summ):
                summ += c
                break
        #r = 1.0 - (2.0*ax/SQRTPI)*summ
        r = 1.0 - (2.0 * SQRTPIINV * ax) * summ

    else:
        return erfc2(x)
    """
        # Compute continued fractions:
        # Q = b0 + a1/(b1 + a2/(b2 + a3/(b3 + ......... where ak   
        # are numerator terms and where bk are denominator terms 
        # (and where a0 is always 0).
        # Here:
        # b0 = 0.0
        # a1 = 1.0
        # a2 = 0.5
        # a3 = 1.5
        # a4 = 2.0
        # b1 = b3 etc = x*x
        # b2 = b4 etx = 1.0
        # (cf. Cuyt et al.)

        #k   = 0.0
        bk  = 0.0
        Am1 = 1.0
        Bm1 = 0.0
        A0  = bk
        B0  = 1.0

        k   = 1.0
        bk  = xx
        ak  = 1.0
        Ap1 = bk*A0 + ak*Am1
        Bp1 = bk*B0 + ak*Bm1
        Q   = Ap1/Bp1
        Am1 = A0
        Bm1 = B0
        A0  = Ap1
        B0  = Bp1

        while True:
            k   += 1.0
            Qold = Q
            if is_eveninteger(k): bk = 1.0
            else:                 bk = xx
            ak   = 0.5 * (k-1.0)
            Ap1  = bk*A0 + ak*Am1
            Bp1  = bk*B0 + ak*Bm1
            Q    = Ap1/Bp1
            if abs(Q-Qold) < abs(Q)*tol:
                break
            Am1  = A0
            Bm1  = B0
            A0   = Ap1
            B0   = Bp1

        p  = exp(-xx)
        if p == 0.0: # Take a chance...
            #r = exp(-xx + log(ax*Q/SQRTPI))
            r = exp(-xx + log(SQRTPIINV*ax*Q))
        else:
            #r = ax * p * Q / SQRTPI
            r = SQRTPIINV * ax * p * Q"""

    if x < 0.0: r = 2.0 - r
    r = kept_within(0.0, r, 2.0)
    return r
Beispiel #30
0
    def lhs_sample(self, nparams, nintervals, rcorrmatrix=None, checklevel=0):
        """
        Generates a full Latin Hypercube Sample of uniformly distributed 
        random variates in [0.0, 1.0] placed in a matrix with one realization 
        in each row. A target rank correlation matrix can be given (must have 
        the dimension nsamples*nsamples).
        
        checklevel may be 0, 1 or 2 and is used to control trace printout. 
        0 produces no trace output, whereas 2 produces the most.

        NB. IN ORDER FOR LATIN HYPERCUBE SAMPLING TO BE MEANINGFUL THE OUTPUT 
        STREAM OF RANDOM VARIATES MUST BE HANDLED BY INVERSE METHODS !!!! 

        Latin Hypercube Sampling was first described by McKay, Conover & 
        Beckman in a Technometrics article 1979. The use of the LHS technique 
        to introduce rank correlations was first described by Iman & Conover 
        1982 in an issue of Communications of Statistics.
        """

        # lhs_sample uses the Matrix class to a great extent

        if nparams > nintervals:
            warn("nparams > nintervals in RandomStructure.lhs_sample")

        nsamples = nintervals  # Just to remember
        rstreaminner = self.rstream
        rstreamouter = self.rstream2

        factor = 1.0 / float(nintervals)

        tlhsmatrix1 = Matrix()  # tlhsmatrix1 belongs to the Matrix class
        if rcorrmatrix: tscorematrix = Matrix()
        for k in range(0, nparams):
            if rcorrmatrix:
                tnvector, tscorevector = \
                            self.scramble_range(nsamples, rstreamouter, True)
                rowk = array('d', tscorevector)
                tscorematrix.append(rowk)
            else:
                tnvector = self.scramble_range(nsamples, rstreamouter)
            pvector = array('d', [])
            for number in tnvector:
                p = factor * (float(number) + rstreaminner.runif01())
                p = max(p, 0.0)  # Probabilities must be in [0.0, 1.0]
                p = min(p, 1.0)
                pvector.append(p)
            tlhsmatrix1.append(pvector)

        # tlhsmatrix1 (and tscorematrix) are now transposed to run with
        # one subsample per row to fit with output as well as Iman-Conover
        # formulation. tlhsmatrix1 and tscorematrix will be used anyway
        # for some manipulations which are more simple when matrices run
        # with one variable per row

        lhsmatrix1 = transposed(tlhsmatrix1)
        if rcorrmatrix: scorematrix = transposed(tscorematrix)

        if checklevel == 2:
            print("lhs_sample: Original LHS sample matrix")
            mxdisplay(lhsmatrix1)
            if rcorrmatrix:
                print("lhs_sample: Target rank correlation matrix")
                mxdisplay(rcorrmatrix)
        if checklevel == 1 or checklevel == 2:
            print("lhs_sample: Rank correlation matrix of")
            print("            original LHS sample matrix")
            trankmatrix1 = Matrix()
            for k in range(0, nparams):
                rowk = array('d', extract_ranks(tlhsmatrix1[k]))
                trankmatrix1.append(rowk)
            mxdisplay(Matrix(corrmatrix(trankmatrix1)))

        if not rcorrmatrix:
            return lhsmatrix1

        else:
            scorecorr = Matrix(corrmatrix(tscorematrix))
            if checklevel == 2:
                print("lhs_sample: Score matrix of original LHS sample matrix")
                mxdisplay(scorematrix)
                print("lhs_sample: Correlation matrix of scores of")
                print("            original LHS sample")
                mxdisplay(scorecorr)

            slower, slowert = ludcmp_chol(scorecorr)
            slowerinverse = inverted(slower)
            tslowerinverse = transposed(slowerinverse)
            clower, clowert = ludcmp_chol(rcorrmatrix)
            scoresnostar = scorematrix * tslowerinverse  # Matrix multiplication
            if checklevel == 2:
                print("lhs_sample: Correlation matrix of scoresnostar")
                mxdisplay(corrmatrix(transposed(scoresnostar)))

            scoresstar = scoresnostar * clowert  # Matrix multiplication
            tscoresstar = transposed(scoresstar)
            trankmatrix = Matrix()
            for k in range(0, nparams):
                trankmatrix.append(extract_ranks(tscoresstar[k]))
            if checklevel == 2:
                print("lhs_sample: scoresstar matrix")
                mxdisplay(scoresstar)
                print("lhs_sample: Correlation matrix of scoresstar")
                mxdisplay(corrmatrix(tscoresstar))
                print("lhs_sample: scoresstar matrix converted to rank")
                mxdisplay(transposed(trankmatrix))
                for k in range(0, nparams):
                    tlhsmatrix1[k] = array('d', sorted(list(tlhsmatrix1[k])))
                print("RandomStructure.lhs_sample: Sorted LHS sample matrix")
                mxdisplay(transposed(tlhsmatrix1))

            tlhsmatrix2 = Matrix()
            for k in range(0, nparams):
                # Sort each row in tlhsmatrix1 and reorder
                # according to trankmatrix rows
                auxvec = reorder(tlhsmatrix1[k], trankmatrix[k], \
                                                 straighten=True)
                tlhsmatrix2.append(auxvec)
            lhsmatrix2 = transposed(tlhsmatrix2)
            if checklevel == 2:
                print("lhs_sample: Corrected/reordered LHS sample matrix")
                mxdisplay(transposed(tlhsmatrix2))

            if checklevel == 1 or checklevel == 2:
                trankmatrix2 = Matrix()
                auxmatrix2 = tlhsmatrix2
                for k in range(0, nparams):
                    trankmatrix2.append(extract_ranks(auxmatrix2[k]))
                print("lhs_sample: Rank correlation matrix of corrected/")
                print("            /reordered LHS sample matrix")
                mxdisplay(corrmatrix(trankmatrix2))

            return lhsmatrix2