예제 #1
0
    def rwiener(self, tau, xmin=float('-inf'), xmax=float('inf'), \
                           pmin=0.0, pmax=1.0):
        """
        Generates random numbers corrsponding to a Wiener process (the 
        intergral of white noise (Langevin's function)), inverse variant. 
        The Wiener process is W(t+tau) - W (t) = N(0, sqrt(tau)) 
        where tau is the time increment and N(0, sqrt(tau)) is a  
        normally distributed random variable having zero mean and 
        sqrt(tau) as its standard deviation.
        
        This method returns W(t+tau) - W(t) given tau and allows 
        tau to be negative.
        """

        self._checkminmax(xmin, xmax, pmin, pmax, 'rwiener')

        mu    = 0.0
        sigma = sqrt(abs(tau))   # abs(tau) is used

        pmn = pmin
        pmx = pmax
        if xmin > float('-inf'): pmn = max(pmin, cnormal(mu, sigma, xmin))
        if xmax < float('inf'):  pmx = min(pmax, cnormal(mu, sigma, xmax))

        p  =  pmn + (pmx-pmn)*self.runif01()
        w  =  inormal(p, mu, sigma)

        return w
예제 #2
0
def normalscores(ivector, scheme="van_der_Waerden", bottominteger=0):
    """
    Creates a list of Blom, Tukey or van der Waerden normal scores given a list 
    of N integers in [0, N) or [1, N] depending on whether the bottom integer 
    for the ranking is set to 0 or 1 (0 is default). scheme is either 'Blom', 
    'Tukey' or 'van_der_Waerden'. 
    """

    assert bottominteger == 0 or bottominteger == 1, \
                       "Bottom rank must be set to 0 or 1 in normalscores!"

    if   scheme == 'Blom':
        constnumer = - 0.375
        constdenom =   0.250
    elif scheme == 'Tukey':
        constnumer = - 1.0/3.0
        constdenom = - constnumer
    elif scheme == 'van_der_Waerden':
        constnumer = 0.0
        constdenom = 1.0

    length      = len(ivector)
    scorevector = []
    for integer in ivector:
        n = integer + 1 - bottominteger
        scorevector.append(inormal((n-constnumer)/(length+constdenom)))

    return scorevector
예제 #3
0
def normalscores(ivector, scheme="van_der_Waerden", bottominteger=0):
    """
    Creates a list of Blom, Tukey or van der Waerden normal scores given a list 
    of N integers in [0, N) or [1, N] depending on whether the bottom integer 
    for the ranking is set to 0 or 1 (0 is default). scheme is either 'Blom', 
    'Tukey' or 'van_der_Waerden'. 
    """

    assert bottominteger == 0 or bottominteger == 1, \
                       "Bottom rank must be set to 0 or 1 in normalscores!"

    if scheme == 'Blom':
        constnumer = -0.375
        constdenom = 0.250
    elif scheme == 'Tukey':
        constnumer = -1.0 / 3.0
        constdenom = -constnumer
    elif scheme == 'van_der_Waerden':
        constnumer = 0.0
        constdenom = 1.0

    length = len(ivector)
    scorevector = []
    for integer in ivector:
        n = integer + 1 - bottominteger
        scorevector.append(inormal((n - constnumer) / (length + constdenom)))

    return scorevector
예제 #4
0
    def rnormal(self, mu, sigma, xmin=float('-inf'), xmax=float('inf'), \
                                 pmin=0.0, pmax=1.0):
        """
        Generator of normally distributed random variates using an inverse 
        method, claimed to give a maximum relative error less than 2.6e-9 
        (cf. statlib.invcdf.inormal for details). 
        """

        self._checkminmax(xmin, xmax, pmin, pmax, 'rnormal')

        pmn = pmin
        pmx = pmax
        if xmin > float('-inf'): pmn = max(pmin, cnormal(mu, sigma, xmin))
        if xmax < float('inf'):  pmx = min(pmax, cnormal(mu, sigma, xmax))

        p  =  pmn + (pmx-pmn)*self.runif01()
        x  =  inormal(p, mu, sigma)

        return x
예제 #5
0
def _studQ(ndf, confdeg=0.90):
    """
    Quantiles for Student's "t" distribution (one number is returned)
    ---------
    ndf       the number of degrees of freedom
    confdeg  confidence level
    --------
    Accuracy in decimal digits of about:
      5 if ndf >= 8., machine precision if ndf = 1. or 2., 3 otherwise
    ----------
    Reference: G.W. Hill, Communications of the ACM, Vol. 13, no. 10, Oct. 1970
    """

    # Error handling
    assert 0.0 <= confdeg and confdeg <= 1.0, \
          "Confidence degree must be in [0.0, 1.0] in _studQ!"
    assert ndf > 0.0, "Number of degrees of freedom must be positive in _studQ!"


    phi = 1.0 - confdeg

    # For ndf = 1 we can use the Cauchy distribution
    if ndf == 1.0:
        prob = 1.0 - 0.5*phi
        stud = icauchy(prob)

    # Finns exakt metod aven for ndf = 2
    elif ndf == 2.0:
        if phi <= TINY: phi = TINY
        stud = sqrt(2.0 / (phi*(2.0-phi)) - 2.0)

    # Check to see if we're not too far out in the tails
    elif phi < TINY:
        t = 1.0 / TINY
        stud = t

    # General case
    else:
        a = 1.0 / (ndf-0.5)
        b = 48.0 / a**2
        c = ((20700.0*a/b - 98.0) * a - 16.0) * a + 96.36
        d = ((94.5/(b + c) - 3.0) / b + 1.0) * sqrt(a*PIHALF) * ndf
        x = d * phi
        y = x ** (2.0 / ndf)

        if y > 0.05 + a:  #     Asymptotic inverse expansion about normal
            x = inormal(0.5*phi)
            y = x**2
            if ndf < 5.0: c = c + 0.3 * (ndf-4.5) * (x+0.6)
            c = (((0.05 * d * x - 5.0) * x - 7.0) * x - 2.0) * x + b + c
            y = (((((0.4*y + 6.3) * y + 36.0) * y + 94.5) / c - y  - 3.0) \
                                                              / b + 1.0) * x
            y = a * y**2
            if y > 0.002: y = exp(y) - 1.0
            else:         y = 0.5 * y**2 + y
        else:

            y = ((1.0 / (((ndf + 6.0) / (ndf * y) - 0.089 * d -  \
                0.822) * (ndf + 2.0) * 30) + 0.5 / (ndf + 4.0))  \
                * y - 1.0) * (ndf + 1.0) / (ndf + 2.0) + 1.0 / y

        stud = sqrt(ndf*y)

    return stud
예제 #6
0
def _studQ(ndf, confdeg=0.90):
    """
    Quantiles for Student's "t" distribution (one number is returned)
    ---------
    ndf       the number of degrees of freedom
    confdeg  confidence level
    --------
    Accuracy in decimal digits of about:
      5 if ndf >= 8., machine precision if ndf = 1. or 2., 3 otherwise
    ----------
    Reference: G.W. Hill, Communications of the ACM, Vol. 13, no. 10, Oct. 1970
    """

    # Error handling
    assert 0.0 <= confdeg and confdeg <= 1.0, \
          "Confidence degree must be in [0.0, 1.0] in _studQ!"
    assert ndf > 0.0, "Number of degrees of freedom must be positive in _studQ!"

    phi = 1.0 - confdeg

    # For ndf = 1 we can use the Cauchy distribution
    if ndf == 1.0:
        prob = 1.0 - 0.5 * phi
        stud = icauchy(prob)

    # Finns exakt metod aven for ndf = 2
    elif ndf == 2.0:
        if phi <= TINY: phi = TINY
        stud = sqrt(2.0 / (phi * (2.0 - phi)) - 2.0)

    # Check to see if we're not too far out in the tails
    elif phi < TINY:
        t = 1.0 / TINY
        stud = t

    # General case
    else:
        a = 1.0 / (ndf - 0.5)
        b = 48.0 / a**2
        c = ((20700.0 * a / b - 98.0) * a - 16.0) * a + 96.36
        d = ((94.5 / (b + c) - 3.0) / b + 1.0) * sqrt(a * PIHALF) * ndf
        x = d * phi
        y = x**(2.0 / ndf)

        if y > 0.05 + a:  #     Asymptotic inverse expansion about normal
            x = inormal(0.5 * phi)
            y = x**2
            if ndf < 5.0: c = c + 0.3 * (ndf - 4.5) * (x + 0.6)
            c = (((0.05 * d * x - 5.0) * x - 7.0) * x - 2.0) * x + b + c
            y = (((((0.4*y + 6.3) * y + 36.0) * y + 94.5) / c - y  - 3.0) \
                                                              / b + 1.0) * x
            y = a * y**2
            if y > 0.002: y = exp(y) - 1.0
            else: y = 0.5 * y**2 + y
        else:

            y = ((1.0 / (((ndf + 6.0) / (ndf * y) - 0.089 * d -  \
                0.822) * (ndf + 2.0) * 30) + 0.5 / (ndf + 4.0))  \
                * y - 1.0) * (ndf + 1.0) / (ndf + 2.0) + 1.0 / y

        stud = sqrt(ndf * y)

    return stud