def lusubs(lower, upper, bvector, permlist): """ Back substitution for LU decomposition. NB. bvector and permlist are just lists, not matrix type vectors. """ # Check input matrices and vectors/lists for inconsistencies ndiml = squaredim(lower, 'lusubs') ndimu = squaredim(upper, 'lusubs') errortext1 = "lower and upper have different dimensions in lusubs!" assert ndimu == ndiml, errortext1 nb = len(bvector) errortext2 = "inconsistent dimensions in matrices and vector in lusubs!" assert nb == ndiml, errortext2 errortext3 = "inconsistent length of permutation list in lusubs!" assert len(permlist) == nb, errortext3 cvector = reorder(bvector, permlist) # First do forward substitution with lower matrix to # create intermediate vector (yvector) yvector = array('d', nb*[0.0]) divisor = float(lower[0][0]) if abs(divisor) < TINY: divisor = fsign(divisor)*TINY yvector[0] = cvector[0] / divisor for i in range(1, nb): summ = 0.0 for j in range(0, i): summ += lower[i][j]*yvector[j] divisor = float(lower[i][i]) if abs(divisor) < TINY: divisor = fsign(divisor)*TINY yvector[i] = (cvector[i]-summ) / divisor # Then do backward substitution using upper matrix and intermediate # vector to acheive final result xvector = array('d', nb*[0.0]) nbm1 = nb - 1 divisor = float(upper[nbm1][nbm1]) if abs(divisor) < TINY: divisor = fsign(divisor)*TINY xvector[nbm1] = yvector[nbm1] / divisor nbm2 = nbm1 - 1 for i in range(nbm2, -1, -1): summ = 0.0 ip1 = i + 1 for j in range(ip1, nb): summ += upper[i][j]*xvector[j] divisor = float(upper[i][i]) if abs(divisor) < TINY: divisor = fsign(divisor)*TINY xvector[i] = (yvector[i]-summ) / divisor return xvector
def lusubs(lower, upper, bvector, permlist): """ Back substitution for LU decomposition. NB. bvector and permlist are just lists, not matrix type vectors. """ # Check input matrices and vectors/lists for inconsistencies ndiml = squaredim(lower, 'lusubs') ndimu = squaredim(upper, 'lusubs') errortext1 = "lower and upper have different dimensions in lusubs!" assert ndimu == ndiml, errortext1 nb = len(bvector) errortext2 = "inconsistent dimensions in matrices and vector in lusubs!" assert nb == ndiml, errortext2 errortext3 = "inconsistent length of permutation list in lusubs!" assert len(permlist) == nb, errortext3 cvector = reorder(bvector, permlist) # First do forward substitution with lower matrix to # create intermediate vector (yvector) yvector = array('d', nb * [0.0]) divisor = float(lower[0][0]) if abs(divisor) < TINY: divisor = fsign(divisor) * TINY yvector[0] = cvector[0] / divisor for i in range(1, nb): summ = 0.0 for j in range(0, i): summ += lower[i][j] * yvector[j] divisor = float(lower[i][i]) if abs(divisor) < TINY: divisor = fsign(divisor) * TINY yvector[i] = (cvector[i] - summ) / divisor # Then do backward substitution using upper matrix and intermediate # vector to acheive final result xvector = array('d', nb * [0.0]) nbm1 = nb - 1 divisor = float(upper[nbm1][nbm1]) if abs(divisor) < TINY: divisor = fsign(divisor) * TINY xvector[nbm1] = yvector[nbm1] / divisor nbm2 = nbm1 - 1 for i in range(nbm2, -1, -1): summ = 0.0 ip1 = i + 1 for j in range(ip1, nb): summ += upper[i][j] * xvector[j] divisor = float(upper[i][i]) if abs(divisor) < TINY: divisor = fsign(divisor) * TINY xvector[i] = (yvector[i] - summ) / divisor return xvector
def crossed(self, t, tnext): """ The method used for actually finding a (possible) crossing point between the present time t and proposed/scheduled next time point tnext. It does not require that the present time point be the tnext of the previous step - it is possible to let t > previous tnext (by a small amount, naturally) to allow for "tiptoeing" around discontinuities. It is also possible to let t < previous tnext (by a small amount) in order to come as close as possible to a discontinuity before passing it. NB1. mode = 'any' allows for multiple crossings, whereas 'pos' and 'neg' allows for one crossing only - possible future crossings are not registered for 'pos' and 'neg'. NB2. A warning will be issued if the solver fails to converge for the tolerances and maximum number of iterations input at the initiation of the instance object, but a result is always returned. NB3. tcrosslo will always be output >=t and tcrosshi will always be output <= tnext. """ assert tnext >= t, \ "There is something strange with the times input to crossed!" # No more crossings if self.__has_crossed and self.__mode != 'any': return False # Check to see whether the present time has already caused a crossing # (in which case it should have been taken care of): diff = self.__diffunc(t) if (self.__mode == 'any' and fsign(diff) != fsign(self.__diffprev)) \ or (self.__mode == 'pos' and diff >= 0.0) \ or (self.__mode == 'neg' and diff <= 0.0): self.__has_crossed = True return False diff = self.__diffunc(tnext) if (self.__mode == 'any' and fsign(diff) != fsign(self.__diffprev)) \ or (self.__mode == 'pos' and diff >= 0.0) \ or (self.__mode == 'neg' and diff <= 0.0): self.__diffprev = diff tcross = self.__solver(self.__diffunc, t, tnext, 'crossed', \ self.__tolf, self.__tola, self.__maxniter) eps = self.__tolf * abs(tcross) + self.__tola tcrosslo = max(t, tcross - eps) tcrosshi = min(tcross + eps, tnext) return tcrosslo, tcross, tcrosshi else: self.__diffprev = diff return False
def crossed(self, t, tnext): """ The method used for actually finding a (possible) crossing point between the present time t and proposed/scheduled next time point tnext. It does not require that the present time point be the tnext of the previous step - it is possible to let t > previous tnext (by a small amount, naturally) to allow for "tiptoeing" around discontinuities. It is also possible to let t < previous tnext (by a small amount) in order to come as close as possible to a discontinuity before passing it. NB1. mode = 'any' allows for multiple crossings, whereas 'pos' and 'neg' allows for one crossing only - possible future crossings are not registered for 'pos' and 'neg'. NB2. A warning will be issued if the solver fails to converge for the tolerances and maximum number of iterations input at the initiation of the instance object, but a result is always returned. NB3. tcrosslo will always be output >=t and tcrosshi will always be output <= tnext. """ assert tnext >= t, \ "There is something strange with the times input to crossed!" # No more crossings if self.__has_crossed and self.__mode != 'any': return False # Check to see whether the present time has already caused a crossing # (in which case it should have been taken care of): diff = self.__diffunc(t) if (self.__mode == 'any' and fsign(diff) != fsign(self.__diffprev)) \ or (self.__mode == 'pos' and diff >= 0.0) \ or (self.__mode == 'neg' and diff <= 0.0): self.__has_crossed = True return False diff = self.__diffunc(tnext) if (self.__mode == 'any' and fsign(diff) != fsign(self.__diffprev)) \ or (self.__mode == 'pos' and diff >= 0.0) \ or (self.__mode == 'neg' and diff <= 0.0): self.__diffprev = diff tcross = self.__solver(self.__diffunc, t, tnext, 'crossed', \ self.__tolf, self.__tola, self.__maxniter) eps = self.__tolf*abs(tcross) + self.__tola tcrosslo = max(t, tcross-eps) tcrosshi = min(tcross+eps, tnext) return tcrosslo, tcross, tcrosshi else: self.__diffprev = diff return False
def z2nddeg_real(a, b, c): """ Finds and returns the real roots of the quadratic equation a*x^2 + b*x + c = 0, are there any. Seems to handle the problems in Forsythe-Malcolm-Moler in a l e s s reliable manner than does z2nddeg_complex! Returns: ---------- x1, x2 The two roots of the second degree equation (returns None, None if roots are complex) """ assert a != 0.0, "x^2 coefficient must not be zero in z2nddeg_real!" undersquareroot = b**2 - 4.0 * a * c if undersquareroot >= 0.0: x1 = -b + fsign(b) * math.sqrt(undersquareroot) x1 = 0.5 * x1 / a x2 = c / (a * x1) return x1, x2 else: return None, None
def cexppower(loc, scale, alpha, x, lngam1oalpha=False, tolf=FOURMACHEPS, itmax=128): """ The exponential power distribution f = (a/s) * exp(-abs([x-l]/s)**a) / [2*gamma(1/a)] F = 1/2 * [1 + sgn(x-l) * Fgamma(1/a, abs([x-l]/s)**a)], x in R s, a > 0 where Fgamma is the gamma distribution cdf. NB It is possible to gain efficiency by providing the value of the natural logarithm of the complete gamma function ln(gamma(1.0/alpha)) as a pre-computed input (may be computed using numlib.specfunc.lngamma) instead of the default 'False'. tolf and itmax are the numerical control parameters of cgamma. """ assert scale > 0.0, "scale parameter must be a positive float in cexppower!" assert alpha > 0.0, "shape parameter alpha must be a positive float in cexppower!" if alpha == 1.0: return claplace(loc, scale, x) ainv = 1.0 / alpha xml = x - loc if not lngam1oalpha: lng1oa = lngamma(ainv) else: lng1oa = lngam1oalpha cg = cgamma(ainv, 1.0, abs(xml / scale) ** alpha, lng1oa, tolf, itmax) cdf = 0.5 * (fsign(xml) * cg + 1.0) cdf = kept_within(0.0, cdf, 1.0) return cdf
def cexppower(loc, scale, alpha, x, lngam1oalpha=False, \ tolf=FOURMACHEPS, itmax=128): """ The exponential power distribution f = (a/s) * exp(-abs([x-l]/s)**a) / [2*gamma(1/a)] F = 1/2 * [1 + sgn(x-l) * Fgamma(1/a, abs([x-l]/s)**a)], x in R s, a > 0 where Fgamma is the gamma distribution cdf. NB It is possible to gain efficiency by providing the value of the natural logarithm of the complete gamma function ln(gamma(1.0/alpha)) as a pre-computed input (may be computed using numlib.specfunc.lngamma) instead of the default 'False'. tolf and itmax are the numerical control parameters of cgamma. """ assert scale > 0.0, \ "scale parameter must be a positive float in cexppower!" assert alpha > 0.0, \ "shape parameter alpha must be a positive float in cexppower!" if alpha == 1.0: return claplace(loc, scale, x) ainv = 1.0 / alpha xml = x - loc if not lngam1oalpha: lng1oa = lngamma(ainv) else: lng1oa = lngam1oalpha cg = cgamma(ainv, 1.0, abs(xml / scale)**alpha, lng1oa, tolf, itmax) cdf = 0.5 * (fsign(xml) * cg + 1.0) cdf = kept_within(0.0, cdf, 1.0) return cdf
def z2nddeg_real(a, b, c): """ Finds and returns the real roots of the quadratic equation a*x^2 + b*x + c = 0, are there any. Seems to handle the problems in Forsythe-Malcolm-Moler in a l e s s reliable manner than does z2nddeg_complex! Returns: ---------- x1, x2 The two roots of the second degree equation (returns None, None if roots are complex) """ assert a != 0.0, "x^2 coefficient must not be zero in z2nddeg_real!" undersquareroot = b**2 - 4.0*a*c if undersquareroot >= 0.0: x1 = -b + fsign(b)*math.sqrt(undersquareroot) x1 = 0.5 * x1 / a x2 = c / (a*x1) return x1, x2 else: return None, None
def _dstable_sym_small(alpha, x, tolr): """ A series expansion for small x due to Bergstrom. Converges for x < 1.0 and in practice also for somewhat larger x. The function uses the Kahan summation procedure (cf. Dahlquist, Bjorck & Anderson). """ summ = 0.0 c = 0.0 fact = -1.0 xx = x*x xpart = 1.0 k = 0 zero2 = zero1 = False while True: k += 1 summo = summ twokm1 = 2*k - 1 twokm1oa = float(twokm1)/alpha r = lngamma(twokm1oa) - lnfactorial(twokm1) term = twokm1 * exp(r) * xpart fact = - fact term *= fact y = term + c t = summ + y if fsign(y) == fsign(summ): f = (0.46*t-t) + t c = ((summ-f)-(t-f)) + y else: c = (summ-t) + y summ = t if abs(summ-summo) < tolr*abs(summ) and abs(term) < tolr and zero2: break xpart *= xx if abs(term) < tolr: if zero1: zero2 = True else: zero1 = True summ += c pdf = summ / (PI*alpha) pdf = kept_within(0.0, pdf) return pdf
def _dstable_sym_small(alpha, x, tolr): """ A series expansion for small x due to Bergstrom. Converges for x < 1.0 and in practice also for somewhat larger x. The function uses the Kahan summation procedure (cf. Dahlquist, Bjorck & Anderson). """ summ = 0.0 c = 0.0 fact = -1.0 xx = x * x xpart = 1.0 k = 0 zero2 = zero1 = False while True: k += 1 summo = summ twokm1 = 2 * k - 1 twokm1oa = float(twokm1) / alpha r = lngamma(twokm1oa) - lnfactorial(twokm1) term = twokm1 * exp(r) * xpart fact = -fact term *= fact y = term + c t = summ + y if fsign(y) == fsign(summ): f = (0.46 * t - t) + t c = ((summ - f) - (t - f)) + y else: c = (summ - t) + y summ = t if abs(summ - summo) < tolr * abs(summ) and abs(term) < tolr and zero2: break xpart *= xx if abs(term) < tolr: if zero1: zero2 = True else: zero1 = True summ += c pdf = summ / (PI * alpha) pdf = kept_within(0.0, pdf) return pdf
def _dstable_sym_big(alpha, x, tolr): """ A series expansion for large x due to Bergstrom. Converges for x > 1.0 The function uses the Kahan summation procedure (cf. Dahlquist, Bjorck & Anderson). """ summ = 0.0 c = 0.0 fact = 1.0 k = 0 zero2 = zero1 = False while True: k += 1 summo = summ ak = alpha * k akh = 0.5 * ak r = lngamma(ak) - lnfactorial(k) term = - ak * exp(r) * sin(PIHALF*ak) / pow(x, ak+1) fact = - fact term *= fact y = term + c t = summ + y if fsign(y) == fsign(summ): f = (0.46*t-t) + t c = ((summ-f)-(t-f)) + y else: c = (summ-t) + y summ = t if abs(summ-summo) < tolr*abs(summ) and abs(term) < tolr and zero2: break if abs(term) < tolr: if zero1: zero2 = True else: zero1 = True summ += c #pdf = summ / PI pdf = PIINV * summ pdf = kept_within(0.0, pdf) return pdf
def _dstable_sym_big(alpha, x, tolr): """ A series expansion for large x due to Bergstrom. Converges for x > 1.0 The function uses the Kahan summation procedure (cf. Dahlquist, Bjorck & Anderson). """ summ = 0.0 c = 0.0 fact = 1.0 k = 0 zero2 = zero1 = False while True: k += 1 summo = summ ak = alpha * k akh = 0.5 * ak r = lngamma(ak) - lnfactorial(k) term = -ak * exp(r) * sin(PIHALF * ak) / pow(x, ak + 1) fact = -fact term *= fact y = term + c t = summ + y if fsign(y) == fsign(summ): f = (0.46 * t - t) + t c = ((summ - f) - (t - f)) + y else: c = (summ - t) + y summ = t if abs(summ - summo) < tolr * abs(summ) and abs(term) < tolr and zero2: break if abs(term) < tolr: if zero1: zero2 = True else: zero1 = True summ += c #pdf = summ / PI pdf = PIINV * summ pdf = kept_within(0.0, pdf) return pdf
def bracketzero(func, x1, x2, caller='caller', factor=GOLDPHI1, maxniter=32): # GOLDPHI1 is approx. 1.6 """ Bracket a root by expanding from the input "guesses" x1, x2. NB. It is not required that x2 > x1. Designed for use prior to any of the one-variable equation solvers. The function carries out a maximum of 'maxniter' iterations, each one expanding the original span by a factor of 'factor', until a span is reached in which there is a zero crossing. """ assert factor > 1.0, "Expansion factor must be > 1.0 in bracketzero!" assert is_posinteger(maxniter), \ "Maximum number of iterations must be a positive integer in bracketzero!" lo = min(x1, x2) up = max(x1, x2) flo = func(lo) fup = func(up) for k in range(0, maxniter): if fsign(flo) != fsign(fup): return lo, up if abs(flo) < abs(fup): lo += factor * (lo - up) flo = func(lo) else: up += factor * (up - lo) fup = func(up) errtxt1 = "Root bracketing failed after " + str(maxniter) errtxt2 = " iterations in bracketzero " + "(called from " + caller + ")" raise Error(errtxt1 + errtxt2)
def ludcmp_crout(matrix): """ Decomposes/factorizes square input matrix into a lower and an upper matrix using Crout's algorithm WITHOUT pivoting. NB. It only works for square matrices!!! """ ndim = squaredim(matrix, 'ludcmp_crout') # Copy object instance to new matrix in order for the original instance # not to be destroyed. # Create two new square matrices of the same sized as the input matrix: # one unity matrix (to be the lower matrix), one zero matrix (to be # the upper matrix) copymx = deepcopy(matrix) lower = Matrix() lower.unity(ndim) upper = Matrix() upper.zero(ndim, ndim) permlist = list(range(0, ndim)) # Perform the necessary manipulations: for j in range(0, ndim): iu = 0 while iu <= j: k = 0 summ = 0.0 while k < iu: summ += lower[iu][k]*upper[k][j] k = k + 1 upper[iu][j] = copymx[iu][j] - summ iu = iu + 1 il = j + 1 while il < ndim: k = 0 summ = 0.0 while k < j: summ += lower[il][k]*upper[k][j] k = k + 1 divisor = float(upper[j][j]) if abs(divisor) < TINY: divisor = fsign(divisor)*TINY lower[il][j] = (copymx[il][j]-summ) / divisor il = il + 1 parity = 1.0 return lower, upper, permlist, parity
def bracketzero(func, x1, x2, caller='caller', factor=GOLDPHI1, maxniter=32): # GOLDPHI1 is approx. 1.6 """ Bracket a root by expanding from the input "guesses" x1, x2. NB. It is not required that x2 > x1. Designed for use prior to any of the one-variable equation solvers. The function carries out a maximum of 'maxniter' iterations, each one expanding the original span by a factor of 'factor', until a span is reached in which there is a zero crossing. """ assert factor > 1.0, "Expansion factor must be > 1.0 in bracketzero!" assert is_posinteger(maxniter), \ "Maximum number of iterations must be a positive integer in bracketzero!" lo = min(x1, x2) up = max(x1, x2) flo = func(lo) fup = func(up) for k in range(0, maxniter): if fsign(flo) != fsign(fup): return lo, up if abs(flo) < abs(fup): lo += factor*(lo-up) flo = func(lo) else: up += factor*(up-lo) fup = func(up) errtxt1 = "Root bracketing failed after " + str(maxniter) errtxt2 = " iterations in bracketzero " + "(called from " + caller + ")" raise Error(errtxt1 + errtxt2)
def ludcmp_crout(matrix): """ Decomposes/factorizes square input matrix into a lower and an upper matrix using Crout's algorithm WITHOUT pivoting. NB. It only works for square matrices!!! """ ndim = squaredim(matrix, 'ludcmp_crout') # Copy object instance to new matrix in order for the original instance # not to be destroyed. # Create two new square matrices of the same sized as the input matrix: # one unity matrix (to be the lower matrix), one zero matrix (to be # the upper matrix) copymx = deepcopy(matrix) lower = Matrix() lower.unity(ndim) upper = Matrix() upper.zero(ndim, ndim) permlist = list(range(0, ndim)) # Perform the necessary manipulations: for j in range(0, ndim): iu = 0 while iu <= j: k = 0 summ = 0.0 while k < iu: summ += lower[iu][k] * upper[k][j] k = k + 1 upper[iu][j] = copymx[iu][j] - summ iu = iu + 1 il = j + 1 while il < ndim: k = 0 summ = 0.0 while k < j: summ += lower[il][k] * upper[k][j] k = k + 1 divisor = float(upper[j][j]) if abs(divisor) < TINY: divisor = fsign(divisor) * TINY lower[il][j] = (copymx[il][j] - summ) / divisor il = il + 1 parity = 1.0 return lower, upper, permlist, parity
def icauchy(prob, location=0.0, scale=1.0): """ The inverse of a Cauchy distribution: f = 1 / [s*pi*(1 + [(x-l)/s]**2)] F = (1/pi)*arctan((x-l)/s) + 1/2 (also known as the Lorentzian or Lorentz distribution) scale must be >= 0 """ _assertprob(prob, 'icauchy') # --- assert scale >= 0.0, "scale parameter must not be negative in icauchy!" x = prob - 0.5 try: r = tan(PI*x) r = scale*r + location except OverflowError: r = fsign(x) * float('inf') return r
def lngamma(alpha): """ The natural logarithm of the gamma function for real, positive argument. Maximum fractional error can be estimated to < 1.e-13 For alpha < 20.0 lngamma uses Laczos expansion with coefficients taken from http://home.att.net/~numericana/answer/info/godfrey.htm where fractional error of the gamma function using these specific coefficients is claimed to be < 1.e-13 For alpha >= 20.0 the Euler-McLaurin series expansion for ln(gamma) is used (see for instance Dahlquist, Bjorck & Anderson). For EulerMcLaurin the fractional t r u n c a t i o n error is less than 2.4e-14 (and always positive). """ assert alpha > 0.0, "Argument must be real and positive in lngamma!" if alpha < 20.0: # The Laczos expansion with coefficients taken from # http://home.att.net/~numericana/answer/info/godfrey.htm # where fractional error of the gamma function using these # particular coefficients is claimed to be < 1.e-13: coeff = ( \ 1.000000000000000174663, 5716.400188274341379136, \ -14815.30426768413909044, 14291.49277657478554025, \ -6348.160217641458813289, 1301.608286058321874105, \ -108.1767053514369634679, 2.605696505611755827729, \ -0.7423452510201416151527e-2, 0.5384136432509564062961e-7, \ -0.4023533141268236372067e-8 ) lm1 = len(coeff) - 1 g = 9.0 arg1 = alpha + 0.5 arg2 = arg1 + g summ = 0.0 c = 0.0 a = alpha + 11.0 for k in range(lm1, 0, -1): # The Kahan summation procedure is used a -= 1.0 term = coeff[k] / a y = term + c t = summ + y if fsign(y) == fsign(summ): f = (0.46 * t - t) + t c = ((summ - f) - (t - f)) + y else: c = (summ - t) + y summ = t summ += c summ += coeff[0] summ *= SQRTTWOPI lngam = arg1 * log(arg2) - arg2 + log(summ / alpha) else: # The Euler-McLaurin series expansion for ln(gamma) # (see for instance Dahlquist, Bjorck & Anderson). # For alpha >= 20.0 the fractional t_r_u_n_c_a_t_i_o_n # error is less than 2.4e-14 (and always positive): alfa = alpha - 1.0 #const = 0.9189385332046727417803296 # 0.5*log(2.0*pi) #coeff0 = -1.0 #coeff1 = 0.0833333333333333333333333333 # 1.0/12.0 #coeff2 = -0.0027777777777777777777777778 # -1.0/360.0 #coeff3 = 0.0007936507936507936507936508 # 1.0/1260.0 #coeff4 = -0.0005952380952380952380952381 # -1.0/1680.0 #coeff5 = 0.0008417508417508417184175084 # 1.0/1188.0 oneoa2 = 1.0 / alfa**2 summ = -1.0 + oneoa2*\ ( 0.0833333333333333333333333333 + oneoa2*\ (-0.0027777777777777777777777778 + oneoa2*\ ( 0.0007936507936507936507936508 + oneoa2*\ -0.0005952380952380952380952381))) # coeff5 is not used summ *= alfa lngam = 0.9189385332046727417803296 + (alfa + 0.5) * log(alfa) + summ #print coeff5 / (alfa**7 * lngam) # Fractional truncation error - # coeff5 used here return lngam
def cstable_sym(alpha, location, scale, x): """ Cumulative distribution of a SYMMETRICAL stable distribution where alpha is the tail exponent. For numerical reasons alpha is restricted to [0.25, 0.9] and [1.125, 1.9] - but alpha = 1.0 (the Cauchy) and alpha = 2.0 (scaled normal) are also allowed! Numerics are somewhat crude but the fractional error is mostly < 0.001 - sometimes much less - and the absolute error is almost always < 0.001 - sometimes much less... NB This function is slow, particularly for small alpha !!!!! """ # NB Do not change the numerical parameters - they are matched - they # are also matched with the parameters in the corresponding pdf function # dstable_sym on which this function is partly based! Changes in dstable_sym # are likely to require changes in this function! assert 0.25 <= alpha and alpha <= 2.0, \ "alpha must be in [0.25, 2.0] in cstable_sym!" if alpha < 1.0: assert alpha <= 0.9, \ "alpha <= 1.0 must be <= 0.9 in cstable_sym!" if alpha > 1.0: assert alpha >= 1.125, \ "alpha > 1.0 must be >= 1.125 in cstable_sym!" if alpha > 1.9: assert alpha == 2.0, \ "alpha > 1.9 must be = 2.0 in cstable_sym!" assert scale > 0.0, "scale must be a positive float in cstable_sym!" if alpha == 1.0: return ccauchy(location, scale, x) if alpha == 2.0: return cnormal(location, SQRT2 * scale, x) x = (x - location) / float(scale) s = fsign(x) x = abs(x) if x == 0.0: return 0.5 if alpha < 1.0: if x <= 1.0: tolromb = 0.5**12 / (alpha * alpha) cdf = _stable_sym_int(alpha, x, tolromb, 10) else: cdf = _cstable_sym_big(alpha, x, MACHEPS) elif alpha > 1.0: y1 = -2.212502985 + alpha * (3.03077875081 - alpha * 0.742811132) dy = 0.130 * sqrt((alpha - 1.0)) y2 = y1 + dy y1 = y1 - dy y1 = pow(10.0, y1) y2 = pow(10.0, y2) if x <= y1: cdf = _cstable_sym_small(alpha, x, MACHEPS) elif x <= y2: c1 = (x - y1) / (y2 - y1) c2 = 1.0 - c1 cdf = c2*_cstable_sym_small(alpha, x, MACHEPS) + \ c1*_stable_sym_tail(alpha, x) else: cdf = _stable_sym_tail(alpha, x) if s < 0.0: cdf = 1.0 - cdf cdf = kept_within(0.0, cdf, 1.0) return cdf
def cstable_sym(alpha, location, scale, x): """ Cumulative distribution of a SYMMETRICAL stable distribution where alpha is the tail exponent. For numerical reasons alpha is restricted to [0.25, 0.9] and [1.125, 1.9] - but alpha = 1.0 (the Cauchy) and alpha = 2.0 (scaled normal) are also allowed! Numerics are somewhat crude but the fractional error is mostly < 0.001 - sometimes much less - and the absolute error is almost always < 0.001 - sometimes much less... NB This function is slow, particularly for small alpha !!!!! """ # NB Do not change the numerical parameters - they are matched - they # are also matched with the parameters in the corresponding pdf function # dstable_sym on which this function is partly based! Changes in dstable_sym # are likely to require changes in this function! assert 0.25 <= alpha and alpha <= 2.0, "alpha must be in [0.25, 2.0] in cstable_sym!" if alpha < 1.0: assert alpha <= 0.9, "alpha <= 1.0 must be <= 0.9 in cstable_sym!" if alpha > 1.0: assert alpha >= 1.125, "alpha > 1.0 must be >= 1.125 in cstable_sym!" if alpha > 1.9: assert alpha == 2.0, "alpha > 1.9 must be = 2.0 in cstable_sym!" assert scale > 0.0, "scale must be a positive float in cstable_sym!" if alpha == 1.0: return ccauchy(location, scale, x) if alpha == 2.0: return cnormal(location, SQRT2 * scale, x) x = (x - location) / float(scale) s = fsign(x) x = abs(x) if x == 0.0: return 0.5 if alpha < 1.0: if x <= 1.0: tolromb = 0.5 ** 12 / (alpha * alpha) cdf = _stable_sym_int(alpha, x, tolromb, 10) else: cdf = _cstable_sym_big(alpha, x, MACHEPS) elif alpha > 1.0: y1 = -2.212502985 + alpha * (3.03077875081 - alpha * 0.742811132) dy = 0.130 * sqrt((alpha - 1.0)) y2 = y1 + dy y1 = y1 - dy y1 = pow(10.0, y1) y2 = pow(10.0, y2) if x <= y1: cdf = _cstable_sym_small(alpha, x, MACHEPS) elif x <= y2: c1 = (x - y1) / (y2 - y1) c2 = 1.0 - c1 cdf = c2 * _cstable_sym_small(alpha, x, MACHEPS) + c1 * _stable_sym_tail(alpha, x) else: cdf = _stable_sym_tail(alpha, x) if s < 0.0: cdf = 1.0 - cdf cdf = kept_within(0.0, cdf, 1.0) return cdf
def lngamma(alpha): """ The natural logarithm of the gamma function for real, positive argument. Maximum fractional error can be estimated to < 1.e-13 For alpha < 20.0 lngamma uses Laczos expansion with coefficients taken from http://home.att.net/~numericana/answer/info/godfrey.htm where fractional error of the gamma function using these specific coefficients is claimed to be < 1.e-13 For alpha >= 20.0 the Euler-McLaurin series expansion for ln(gamma) is used (see for instance Dahlquist, Bjorck & Anderson). For EulerMcLaurin the fractional t r u n c a t i o n error is less than 2.4e-14 (and always positive). """ assert alpha > 0.0, "Argument must be real and positive in lngamma!" if alpha < 20.0: # The Laczos expansion with coefficients taken from # http://home.att.net/~numericana/answer/info/godfrey.htm # where fractional error of the gamma function using these # particular coefficients is claimed to be < 1.e-13: coeff = ( \ 1.000000000000000174663, 5716.400188274341379136, \ -14815.30426768413909044, 14291.49277657478554025, \ -6348.160217641458813289, 1301.608286058321874105, \ -108.1767053514369634679, 2.605696505611755827729, \ -0.7423452510201416151527e-2, 0.5384136432509564062961e-7, \ -0.4023533141268236372067e-8 ) ; lm1 = len(coeff) - 1 g = 9.0 arg1 = alpha + 0.5 arg2 = arg1 + g summ = 0.0 c = 0.0 a = alpha + 11.0 for k in range(lm1, 0, -1): # The Kahan summation procedure is used a -= 1.0 term = coeff[k]/a y = term + c t = summ + y if fsign(y) == fsign(summ): f = (0.46*t-t) + t c = ((summ-f)-(t-f)) + y else: c = (summ-t) + y summ = t summ += c summ += coeff[0] summ *= SQRTTWOPI lngam = arg1*log(arg2) - arg2 + log(summ/alpha) else: # The Euler-McLaurin series expansion for ln(gamma) # (see for instance Dahlquist, Bjorck & Anderson). # For alpha >= 20.0 the fractional t_r_u_n_c_a_t_i_o_n # error is less than 2.4e-14 (and always positive): alfa = alpha - 1.0 #const = 0.9189385332046727417803296 # 0.5*log(2.0*pi) #coeff0 = -1.0 #coeff1 = 0.0833333333333333333333333333 # 1.0/12.0 #coeff2 = -0.0027777777777777777777777778 # -1.0/360.0 #coeff3 = 0.0007936507936507936507936508 # 1.0/1260.0 #coeff4 = -0.0005952380952380952380952381 # -1.0/1680.0 #coeff5 = 0.0008417508417508417184175084 # 1.0/1188.0 oneoa2 = 1.0 / alfa**2 summ = -1.0 + oneoa2*\ ( 0.0833333333333333333333333333 + oneoa2*\ (-0.0027777777777777777777777778 + oneoa2*\ ( 0.0007936507936507936507936508 + oneoa2*\ -0.0005952380952380952380952381))) # coeff5 is not used summ *= alfa lngam = 0.9189385332046727417803296 + (alfa+0.5)*log(alfa) + summ #print coeff5 / (alfa**7 * lngam) # Fractional truncation error - # coeff5 used here return lngam
def erfc1(x, tol=_EIGHTMACHEPS): """ Computation of the complementary error function for real argument. Fractional error is estimated to < 50*machine epsilon for abs(x) <= 1.5 and < 1.e-8 elsewhere (erfc2 is called for abs(x) > 1.5 for numeric reasons). The function uses a power series expansion for arguments between -1.5 and +1.5 (cf. Abramowitz & Stegun) and continued fractions for all other arguments (cf. A. Cuyt et al., "Continued Fractions for Special Functions: Handbook and Software", Universiteit Antwerpen, where a slightly faster converging expression than that of Abramowitz & Stegun's CF is presented. Cuyt's "ER.20" is used here). """ if tol < _EIGHTMACHEPS: tol = _EIGHTMACHEPS txt1 = "No use using tolerance < 8.0*machine epsilon in erfc1." txt2 = " 8.0*machine epsilon is used" warn(txt) ax = abs(x) xx = x*x if ax <= _ERFC21: # Power series expansion (cf. Abramowitz & Stegun) k = 0.0 sign = 1.0 xpart = 1.0 den1 = 1.0 #den2 = 1.0 #term = sign*xpart/(den1*den2) #summ = term summ = 1.0 c = 0.0 while True: # The Kahan summation proc. (cf. Dahlquist, Bjorck & Anderson) k += 1.0 summo = summ sign = -sign xpart *= xx den1 *= k den2 = 2.0*k + 1.0 term = sign*xpart/(den1*den2) y = term + c t = summ + y if fsign(y) == fsign(summ): f = (0.46*t-t) + t c = ((summ-f)-(t-f)) + y else: c = (summ-t) + y summ = t if abs(summ-summo) < tol*abs(summ): summ += c break #r = 1.0 - (2.0*ax/SQRTPI)*summ r = 1.0 - (2.0*SQRTPIINV*ax)*summ else: return erfc2(x) """ # Compute continued fractions: # Q = b0 + a1/(b1 + a2/(b2 + a3/(b3 + ......... where ak # are numerator terms and where bk are denominator terms # (and where a0 is always 0). # Here: # b0 = 0.0 # a1 = 1.0 # a2 = 0.5 # a3 = 1.5 # a4 = 2.0 # b1 = b3 etc = x*x # b2 = b4 etx = 1.0 # (cf. Cuyt et al.) #k = 0.0 bk = 0.0 Am1 = 1.0 Bm1 = 0.0 A0 = bk B0 = 1.0 k = 1.0 bk = xx ak = 1.0 Ap1 = bk*A0 + ak*Am1 Bp1 = bk*B0 + ak*Bm1 Q = Ap1/Bp1 Am1 = A0 Bm1 = B0 A0 = Ap1 B0 = Bp1 while True: k += 1.0 Qold = Q if is_eveninteger(k): bk = 1.0 else: bk = xx ak = 0.5 * (k-1.0) Ap1 = bk*A0 + ak*Am1 Bp1 = bk*B0 + ak*Bm1 Q = Ap1/Bp1 if abs(Q-Qold) < abs(Q)*tol: break Am1 = A0 Bm1 = B0 A0 = Ap1 B0 = Bp1 p = exp(-xx) if p == 0.0: # Take a chance... #r = exp(-xx + log(ax*Q/SQRTPI)) r = exp(-xx + log(SQRTPIINV*ax*Q)) else: #r = ax * p * Q / SQRTPI r = SQRTPIINV * ax * p * Q""" if x < 0.0: r = 2.0 - r r = kept_within(0.0, r, 2.0) return r
def ludcmp_crout_piv(matrix): """ Decomposes/factorizes square input matrix into a lower and an upper matrix using Crout's algorithm WITH pivoting. NB. It only works on square matrices!!! """ ndim = squaredim(matrix, 'ludcmp_crout_piv') ndm1 = ndim - 1 vv = array('d', ndim*[0.0]) permlist = list(range(0, ndim)) parity = 1.0 imax = 0 # Copy to matrix to be processed (maintains the original matrix intact) compactlu = deepcopy(matrix) for i in range(0, ndim): # Copy and do some other stuff big = 0.0 for j in range(0, ndim): temp = abs(compactlu[i][j]) if temp > big: big = temp assert big > 0.0 vv[i] = 1.0/big # Perform the necessary manipulations: for j in range(0, ndim): for i in range(0, j): sum = compactlu[i][j] for k in range(0, i): sum -= compactlu[i][k] * compactlu[k][j] compactlu[i][j] = sum big = 0.0 for i in range(j, ndim): sum = compactlu[i][j] for k in range(0, j): sum -= compactlu[i][k] * compactlu[k][j] compactlu[i][j] = sum dum = vv[i] * abs(sum) if dum > big: big = dum imax = i if j != imax: # Substitute row imax and row j imaxdum = permlist[imax] # NB in !!!!!!!!!!!!!!!! jdum = permlist[j] # NB in !!!!!!!!!!!!!!!! permlist[j] = imaxdum # NB in !!!!!!!!!!!!!!!! permlist[imax] = jdum # NB in !!!!!!!!!!!!!!!! for k in range(0, ndim): dum = compactlu[imax][k] compactlu[imax][k] = compactlu[j][k] compactlu[j][k] = dum parity = - parity vv[imax] = vv[j] #permlist[j] = imax # NB out !!!!!!!!!!!!!!!!!!!!! divisor = float(compactlu[j][j]) if abs(divisor) < TINY: divisor = fsign(divisor)*TINY dum = 1.0 / divisor if j != ndm1: jp1 = j + 1 for i in range(jp1, ndim): compactlu[i][j] *= dum lower = Matrix() lower.zero(ndim, ndim) upper = Matrix() upper.zero(ndim, ndim) for i in range(0, ndim): for j in range(i, ndim): lower[j][i] = compactlu[j][i] for i in range(0, ndim): lower[i][i] = 1.0 for i in range(0, ndim): for j in range(i, ndim): upper[i][j] = compactlu[i][j] del compactlu return lower, upper, permlist, parity
def zbisect(func, x1, x2, caller='caller', tolf=FOURMACHEPS, \ tola=SQRTTINY, maxniter=256, bracket=False): """ Solves the equation func(x) = 0 on [x1, x2] using a bisection algorithm. zbisect converges slower than zbrent in most cases, but it might be faster in some cases! NB. The function always returns a value but a warning is printed to stdout if the iteration procedure has not converged! Cf. comment below regarding convergence! Arguments: ---------- func Function having the proposed root as its argument x1 Lower search limit (root must be known to be >= x1 unless prior bracketing is used) x2 Upper search limit (root must be known to be <= x2 unless prior bracketing is used) tolf Desired fractional accuracy of root (a combination of fractional and absolute will actually be used: tolf*abs(root) + tola) tola Desired absolute accuracy of root (a combination of fractional and absolute will actually be used: tolf*abs(root) + tola) AND desired max absolute difference of func(root) from zero maxniter Maximum number of iterations bracket If True, x1 and x2 are used in an initial bracketing before solving Returns: --------- Final value of root This algorithm needs on the average log2((b-a)/tol) function evaluations to reach convergence. For instance: b-a = 1.0 and tol = 1.8e-12 will on the average provide convergence in about 40 iterations. Bisection is "dead certain" and will always converge if there is a root. It is likely to pass the tolerances with no extra margin. If there is no root, it will converge to a singularity if there is one... """ if tolf < MACHEPS: tolf = MACHEPS wtxt1 = "Fractional tolerance less than machine epsilon is not a " wtxt2 = "good idea in zbisect. Machine epsilon will be used instead!" warn(wtxt1 + wtxt2) if tola < 0.0: tola = 0.0 wtxt1 = "Negative absolute tolerance is not a good idea " wtxt2 = "in zbisect. 0.0 (zero) will be used instead!" warn(wtxt1 + wtxt2) assert is_posinteger(maxniter), \ "Maximum number of iterations must be a positive integer in zbisect!" if bracket: x1, x2 = bracketzero(func, x1, x2, caller, maxniter) assert x2 > x1, \ "Bounds must be given with the lower bound first in zbisect!" fmid = func(x2) if fmid == 0.0: return x2 f = func(x1) if f == 0.0: return x1 if fsign(fmid) == fsign(f): x1, x2 = bracketzero(func, x1, x2, caller, maxniter) wtxt1 = "Starting points must be on opposite sides of the root in " wtxt2 = "zbisect. Bracketing will be used to find an appropriate span!" warn(wtxt1 + wtxt2) if f < 0.0: root = x1 h = x2 - x1 else: root = x2 h = x1 - x2 niter = 0 while niter <= maxniter: niter += 1 h = 0.5 * h xmid = root + h fmid = func(xmid) if abs(fmid) < tola: return xmid if fmid <= 0.0: root = xmid absh = abs(h) if absh < tolf * abs(root) + tola: return root else: wtxt1 = str(maxniter) + " it'ns not sufficient in zbisect called by " wtxt2 = caller + ".\nfunc(x) = " + str(fmid) + " for x = " + str(root) warn(wtxt1 + wtxt2) return root
def zbrent(func, x1, x2, caller='caller', tolf=FOURMACHEPS, \ tola=SQRTTINY, maxniter=128, bracket=False): """ Solves the equation func(x) = 0 on [x1, x2] using a variant of Richard Brent's algorithm (more like the "ZEROIN" of Forsythe-Malcolm-Moler). NB. The function always returns a value but a warning is printed to stdout if the iteration procedure has not converged! Cf. comment below regarding convergence! Arguments: ---------- func Function having the proposed root as its argument x1 Lower search limit (root must be known to be >= x1 unless prior bracketing is used) x2 Upper search limit (root must be known to be <= x2 unless prior bracketing is used) tolf Desired fractional accuracy of root (a combination of fractional and absolute will actually be used: tolf*abs(root) + tola). tolf should not be < 4.0*machine epsilon since this may inhibit convergence! tola Desired absolute accuracy of root (a combination of fractional and absolute will actually be used: tolf*abs(root) + tola) maxniter Maximum number of iterations bracket If True, x1 and x2 are used in an initial bracketing before solving Returns: --------- Final value of root This algorithm is claimed to guarantee convergence within about (log2((b-a)/tol))**2 function evaluations, which is more demanding than bisection. For instance: b-a = 1.0 and tol = 1.8e-12 is guaranteed to converge with about 1,500 evaluations. It normally converges with fewer ITERATIONS, however, and for reasonably "smooth and well-behaved" functions it will be on the average more efficient and accurate than bisection. For details on the algorithm see Forsythe-Malcolm-Moler, as well as Brent, R.P.; "An algorithm with guaranteed convergence for finding a zero of a function", The Computer Journal 14(4), pp. 422-425, 1971. """ if tolf < FOURMACHEPS: tolf = FOURMACHEPS wtxt1 = "Fractional tol. less than 4.0*machine epsilon may prevent " wtxt2 = "convergence in zbrent. 4.0*macheps will be used instead!" warn(wtxt1 + wtxt2) if tola < 0.0: tola = 0.0 wtxt1 = "Negative absolute tolerance is not a good idea " wtxt2 = "in zbrent. 0.0 (zero) will be used instead!" warn(wtxt1 + wtxt2) assert is_posinteger(maxniter), \ "Maximum number of iterations must be a positive integer in zbrent!" if bracket: x1, x2 = bracketzero(func, x1, x2, caller, maxniter) assert x2 > x1, "Bounds must be given with the lower bound first in zbrent!" a = x1 b = x2 c = x2 ############################### NOT IN REFERENCES !!!!! fa = func(x1) if fa == 0.0: return x1 fb = func(x2) if fb == 0.0: return x2 if fsign(fa) == fsign(fb): x1, x2 = bracketzero(func, x1, x2, caller, maxniter) wtxt1 = "Starting points must be on opposite sides of the root in " wtxt2 = "zbrent. Bracketing will be used to find an appropriate span!" warn(wtxt1 + wtxt2) fc = fb niter = 0 while niter <= maxniter: niter += 1 if fsign(fb) == fsign(fc): c = a fc = fa d = b - a e = d if abs(fc) < abs(fb): a = b b = c c = a fa = fb fb = fc fc = fa tol = tolf * abs(b) + tola tol1 = 0.5 * tol xm = 0.5 * (c - b) if abs(xm) <= tol1 or fb == 0.0: return b if abs(e) >= tol1 and abs(fa) > abs(fb): s = fb / fa if a == c: p = 2.0 * xm * s q = 1.0 - s else: q = fa / fc r = fb / fc p = s * (2.0 * xm * q * (q - r) - (b - a) * (r - 1.0)) q = (q - 1.0) * (r - 1.0) * (s - 1.0) if p > 0.0: q = -q p = abs(p) if 2.0 * p < min(3.0 * xm * q - abs(tol1 * q), abs(e * q)): e = d d = p / q else: d = xm e = d else: d = xm e = d a = b fa = fb if abs(d) > tol1: b = b + d else: #b = b + sign(tol1, xm) if xm < 0.0: b = b - tol1 elif xm > 0.0: b = b + tol1 else: b = b fb = func(b) else: numb = int(math.log((x2 - x1) / tol, 2)**2 + 0.5) wtxt1 = str( maxniter) + " iterations not sufficient in zbrent called by" wtxt2 = " " + caller + ". func(x) = " + str(fb) + " for x = " + str(b) warn(wtxt1 + wtxt2) return b
def ludcmp_crout_piv(matrix): """ Decomposes/factorizes square input matrix into a lower and an upper matrix using Crout's algorithm WITH pivoting. NB. It only works on square matrices!!! """ ndim = squaredim(matrix, 'ludcmp_crout_piv') ndm1 = ndim - 1 vv = array('d', ndim * [0.0]) permlist = list(range(0, ndim)) parity = 1.0 imax = 0 # Copy to matrix to be processed (maintains the original matrix intact) compactlu = deepcopy(matrix) for i in range(0, ndim): # Copy and do some other stuff big = 0.0 for j in range(0, ndim): temp = abs(compactlu[i][j]) if temp > big: big = temp assert big > 0.0 vv[i] = 1.0 / big # Perform the necessary manipulations: for j in range(0, ndim): for i in range(0, j): sum = compactlu[i][j] for k in range(0, i): sum -= compactlu[i][k] * compactlu[k][j] compactlu[i][j] = sum big = 0.0 for i in range(j, ndim): sum = compactlu[i][j] for k in range(0, j): sum -= compactlu[i][k] * compactlu[k][j] compactlu[i][j] = sum dum = vv[i] * abs(sum) if dum > big: big = dum imax = i if j != imax: # Substitute row imax and row j imaxdum = permlist[imax] # NB in !!!!!!!!!!!!!!!! jdum = permlist[j] # NB in !!!!!!!!!!!!!!!! permlist[j] = imaxdum # NB in !!!!!!!!!!!!!!!! permlist[imax] = jdum # NB in !!!!!!!!!!!!!!!! for k in range(0, ndim): dum = compactlu[imax][k] compactlu[imax][k] = compactlu[j][k] compactlu[j][k] = dum parity = -parity vv[imax] = vv[j] #permlist[j] = imax # NB out !!!!!!!!!!!!!!!!!!!!! divisor = float(compactlu[j][j]) if abs(divisor) < TINY: divisor = fsign(divisor) * TINY dum = 1.0 / divisor if j != ndm1: jp1 = j + 1 for i in range(jp1, ndim): compactlu[i][j] *= dum lower = Matrix() lower.zero(ndim, ndim) upper = Matrix() upper.zero(ndim, ndim) for i in range(0, ndim): for j in range(i, ndim): lower[j][i] = compactlu[j][i] for i in range(0, ndim): lower[i][i] = 1.0 for i in range(0, ndim): for j in range(i, ndim): upper[i][j] = compactlu[i][j] del compactlu return lower, upper, permlist, parity
def zbrent(func, x1, x2, caller='caller', tolf=FOURMACHEPS, \ tola=SQRTTINY, maxniter=128, bracket=False): """ Solves the equation func(x) = 0 on [x1, x2] using a variant of Richard Brent's algorithm (more like the "ZEROIN" of Forsythe-Malcolm-Moler). NB. The function always returns a value but a warning is printed to stdout if the iteration procedure has not converged! Cf. comment below regarding convergence! Arguments: ---------- func Function having the proposed root as its argument x1 Lower search limit (root must be known to be >= x1 unless prior bracketing is used) x2 Upper search limit (root must be known to be <= x2 unless prior bracketing is used) tolf Desired fractional accuracy of root (a combination of fractional and absolute will actually be used: tolf*abs(root) + tola). tolf should not be < 4.0*machine epsilon since this may inhibit convergence! tola Desired absolute accuracy of root (a combination of fractional and absolute will actually be used: tolf*abs(root) + tola) maxniter Maximum number of iterations bracket If True, x1 and x2 are used in an initial bracketing before solving Returns: --------- Final value of root This algorithm is claimed to guarantee convergence within about (log2((b-a)/tol))**2 function evaluations, which is more demanding than bisection. For instance: b-a = 1.0 and tol = 1.8e-12 is guaranteed to converge with about 1,500 evaluations. It normally converges with fewer ITERATIONS, however, and for reasonably "smooth and well-behaved" functions it will be on the average more efficient and accurate than bisection. For details on the algorithm see Forsythe-Malcolm-Moler, as well as Brent, R.P.; "An algorithm with guaranteed convergence for finding a zero of a function", The Computer Journal 14(4), pp. 422-425, 1971. """ if tolf < FOURMACHEPS: tolf = FOURMACHEPS wtxt1 = "Fractional tol. less than 4.0*machine epsilon may prevent " wtxt2 = "convergence in zbrent. 4.0*macheps will be used instead!" warn(wtxt1+wtxt2) if tola < 0.0: tola = 0.0 wtxt1 = "Negative absolute tolerance is not a good idea " wtxt2 = "in zbrent. 0.0 (zero) will be used instead!" warn(wtxt1+wtxt2) assert is_posinteger(maxniter), \ "Maximum number of iterations must be a positive integer in zbrent!" if bracket: x1, x2 = bracketzero(func, x1, x2, caller, maxniter) assert x2 > x1, "Bounds must be given with the lower bound first in zbrent!" a = x1 b = x2 c = x2 ############################### NOT IN REFERENCES !!!!! fa = func(x1) if fa == 0.0: return x1 fb = func(x2) if fb == 0.0: return x2 if fsign(fa) == fsign(fb): x1, x2 = bracketzero(func, x1, x2, caller, maxniter) wtxt1 = "Starting points must be on opposite sides of the root in " wtxt2 = "zbrent. Bracketing will be used to find an appropriate span!" warn(wtxt1+wtxt2) fc = fb niter = 0 while niter <= maxniter: niter += 1 if fsign(fb) == fsign(fc): c = a fc = fa d = b - a e = d if abs(fc) < abs(fb): a = b b = c c = a fa = fb fb = fc fc = fa tol = tolf*abs(b) + tola tol1 = 0.5 * tol xm = 0.5 * (c-b) if abs(xm) <= tol1 or fb == 0.0: return b if abs(e) >= tol1 and abs(fa) > abs(fb): s = fb / fa if a == c: p = 2.0 * xm * s q = 1.0 - s else: q = fa / fc r = fb / fc p = s * (2.0*xm*q*(q-r)-(b-a)*(r-1.0)) q = (q-1.0) * (r-1.0) * (s-1.0) if p > 0.0: q = -q p = abs(p) if 2.0*p < min(3.0*xm*q-abs(tol1*q), abs(e*q)): e = d d = p / q else: d = xm e = d else: d = xm e = d a = b fa = fb if abs(d) > tol1: b = b + d else: #b = b + sign(tol1, xm) if xm < 0.0: b = b - tol1 elif xm > 0.0: b = b + tol1 else: b = b fb = func(b) else: numb = int(math.log((x2-x1)/tol, 2)**2 + 0.5) wtxt1 = str(maxniter) + " iterations not sufficient in zbrent called by" wtxt2 = " " + caller + ". func(x) = " + str(fb) + " for x = " + str(b) warn(wtxt1+wtxt2) return b
def zbisect(func, x1, x2, caller='caller', tolf=FOURMACHEPS, \ tola=SQRTTINY, maxniter=256, bracket=False): """ Solves the equation func(x) = 0 on [x1, x2] using a bisection algorithm. zbisect converges slower than zbrent in most cases, but it might be faster in some cases! NB. The function always returns a value but a warning is printed to stdout if the iteration procedure has not converged! Cf. comment below regarding convergence! Arguments: ---------- func Function having the proposed root as its argument x1 Lower search limit (root must be known to be >= x1 unless prior bracketing is used) x2 Upper search limit (root must be known to be <= x2 unless prior bracketing is used) tolf Desired fractional accuracy of root (a combination of fractional and absolute will actually be used: tolf*abs(root) + tola) tola Desired absolute accuracy of root (a combination of fractional and absolute will actually be used: tolf*abs(root) + tola) AND desired max absolute difference of func(root) from zero maxniter Maximum number of iterations bracket If True, x1 and x2 are used in an initial bracketing before solving Returns: --------- Final value of root This algorithm needs on the average log2((b-a)/tol) function evaluations to reach convergence. For instance: b-a = 1.0 and tol = 1.8e-12 will on the average provide convergence in about 40 iterations. Bisection is "dead certain" and will always converge if there is a root. It is likely to pass the tolerances with no extra margin. If there is no root, it will converge to a singularity if there is one... """ if tolf < MACHEPS: tolf = MACHEPS wtxt1 = "Fractional tolerance less than machine epsilon is not a " wtxt2 = "good idea in zbisect. Machine epsilon will be used instead!" warn(wtxt1+wtxt2) if tola < 0.0: tola = 0.0 wtxt1 = "Negative absolute tolerance is not a good idea " wtxt2 = "in zbisect. 0.0 (zero) will be used instead!" warn(wtxt1+wtxt2) assert is_posinteger(maxniter), \ "Maximum number of iterations must be a positive integer in zbisect!" if bracket: x1, x2 = bracketzero(func, x1, x2, caller, maxniter) assert x2 > x1, \ "Bounds must be given with the lower bound first in zbisect!" fmid = func(x2) if fmid == 0.0: return x2 f = func(x1) if f == 0.0: return x1 if fsign(fmid) == fsign(f): x1, x2 = bracketzero(func, x1, x2, caller, maxniter) wtxt1 = "Starting points must be on opposite sides of the root in " wtxt2 = "zbisect. Bracketing will be used to find an appropriate span!" warn(wtxt1+wtxt2) if f < 0.0: root = x1 h = x2 - x1 else: root = x2 h = x1 - x2 niter = 0 while niter <= maxniter: niter += 1 h = 0.5 * h xmid = root + h fmid = func(xmid) if abs(fmid) < tola: return xmid if fmid <= 0.0: root = xmid absh = abs(h) if absh < tolf*abs(root) + tola: return root else: wtxt1 = str(maxniter) + " it'ns not sufficient in zbisect called by " wtxt2 = caller + ".\nfunc(x) = " + str(fmid) + " for x = " + str(root) warn(wtxt1+wtxt2) return root
def inormal(prob, mu=0.0, sigma=1.0): """ Returns the inverse of the cumulative normal distribution function. Reference: Boris Moro "The Full Monte", Risk Magazine, 8(2) (February): 57-58, 1995, where Moro improves on the Beasley-Springer algorithm (J. D. Beasley and S. G. Springer, Applied Statistics, vol. 26, 1977, pp. 118-121). This is further refined by Shaw, c. f. below. Max relative error is claimed to be less than 2.6e-9 """ _assertprob(prob, 'inormal') assert sigma >= 0.0, "sigma must not be negative in inormal!" #a = ( 2.50662823884, -18.61500062529, \ # 41.39119773534, -25.44106049637) # Moro #b = (-8.47351093090, 23.08336743743, \ # -21.06224101826, 3.13082909833) # Moro # The a and b below are claimed to be better by William Shaw in a # Mathematica working report: "Refinement of the Normal Quantile - # A benchmark Normal quantile based on recursion, and an appraisal # of the Beasley-Springer-Moro, Acklam, and Wichura (AS241) methods" # (William Shaw, Financial Mathematics Group, King's College, London; # [email protected]). # Max RELATIVE error is claimed to be reduced from 1.4e-8 to 2.6e-9 # over the central region a = ( 2.5066282682076065359, -18.515898959450185753, \ 40.864622120467790785, -24.820209533706798850) # Moro/Shaw b = ( -8.4339736056039657294, 22.831834928541562628, \ -20.641301545177201274, 3.0154847661978822127) # Moro/Shaw c = (0.3374754822726147, 0.9761690190917186, 0.1607979714918209, \ 0.0276438810333863, 0.0038405729373609, 0.0003951896511919, \ 0.0000321767881768, 0.0000002888167364, 0.0000003960315187) # Moro x = prob - 0.5 if abs(x) < 0.42: # A rational approximation for the central region... r = x * x r = x * (((a[3]*r+a[2])*r+a[1])*r+a[0]) / ((((b[3]*r+b[2])*r+\ b[1])*r+b[0])*r+1.0) r = sigma*r + mu else: # ...and a polynomial for the tails r = prob if x > 0.0: r = 1.0 - prob try: r = safelog(-safelog(r)) r = c[0] + r*(c[1] + r*(c[2] + r*(c[3] + r*(c[4] + r*(c[5] +\ r*(c[6] + r*(c[7] + r*c[8]))))))) if x < 0.0: r = -r r = sigma*r + mu except ValueError: r = fsign(x) * float('inf') return r
def erfc1(x, tol=_EIGHTMACHEPS): """ Computation of the complementary error function for real argument. Fractional error is estimated to < 50*machine epsilon for abs(x) <= 1.5 and < 1.e-8 elsewhere (erfc2 is called for abs(x) > 1.5 for numeric reasons). The function uses a power series expansion for arguments between -1.5 and +1.5 (cf. Abramowitz & Stegun) and continued fractions for all other arguments (cf. A. Cuyt et al., "Continued Fractions for Special Functions: Handbook and Software", Universiteit Antwerpen, where a slightly faster converging expression than that of Abramowitz & Stegun's CF is presented. Cuyt's "ER.20" is used here). """ if tol < _EIGHTMACHEPS: tol = _EIGHTMACHEPS txt1 = "No use using tolerance < 8.0*machine epsilon in erfc1." txt2 = " 8.0*machine epsilon is used" warn(txt) ax = abs(x) xx = x * x if ax <= _ERFC21: # Power series expansion (cf. Abramowitz & Stegun) k = 0.0 sign = 1.0 xpart = 1.0 den1 = 1.0 #den2 = 1.0 #term = sign*xpart/(den1*den2) #summ = term summ = 1.0 c = 0.0 while True: # The Kahan summation proc. (cf. Dahlquist, Bjorck & Anderson) k += 1.0 summo = summ sign = -sign xpart *= xx den1 *= k den2 = 2.0 * k + 1.0 term = sign * xpart / (den1 * den2) y = term + c t = summ + y if fsign(y) == fsign(summ): f = (0.46 * t - t) + t c = ((summ - f) - (t - f)) + y else: c = (summ - t) + y summ = t if abs(summ - summo) < tol * abs(summ): summ += c break #r = 1.0 - (2.0*ax/SQRTPI)*summ r = 1.0 - (2.0 * SQRTPIINV * ax) * summ else: return erfc2(x) """ # Compute continued fractions: # Q = b0 + a1/(b1 + a2/(b2 + a3/(b3 + ......... where ak # are numerator terms and where bk are denominator terms # (and where a0 is always 0). # Here: # b0 = 0.0 # a1 = 1.0 # a2 = 0.5 # a3 = 1.5 # a4 = 2.0 # b1 = b3 etc = x*x # b2 = b4 etx = 1.0 # (cf. Cuyt et al.) #k = 0.0 bk = 0.0 Am1 = 1.0 Bm1 = 0.0 A0 = bk B0 = 1.0 k = 1.0 bk = xx ak = 1.0 Ap1 = bk*A0 + ak*Am1 Bp1 = bk*B0 + ak*Bm1 Q = Ap1/Bp1 Am1 = A0 Bm1 = B0 A0 = Ap1 B0 = Bp1 while True: k += 1.0 Qold = Q if is_eveninteger(k): bk = 1.0 else: bk = xx ak = 0.5 * (k-1.0) Ap1 = bk*A0 + ak*Am1 Bp1 = bk*B0 + ak*Bm1 Q = Ap1/Bp1 if abs(Q-Qold) < abs(Q)*tol: break Am1 = A0 Bm1 = B0 A0 = Ap1 B0 = Bp1 p = exp(-xx) if p == 0.0: # Take a chance... #r = exp(-xx + log(ax*Q/SQRTPI)) r = exp(-xx + log(SQRTPIINV*ax*Q)) else: #r = ax * p * Q / SQRTPI r = SQRTPIINV * ax * p * Q""" if x < 0.0: r = 2.0 - r r = kept_within(0.0, r, 2.0) return r