def _softMax(self,Y): """ softmax function used on outputs""" y = [] sumExpY = 0.0 for i in Y: sumExpY = sumExpY + _exp(i) for i in Y: t = _exp(i)/sumExpY y.append(t) return y
def gammavariate(self, alpha, beta): """Gamma distribution. Not the gamma function! Conditions on the parameters are alpha > 0 and beta > 0. The probability distribution function is: x ** (alpha - 1) * math.exp(-x / beta) pdf(x) = -------------------------------------- math.gamma(alpha) * beta ** alpha """ if alpha <= 0.0 or beta <= 0.0: raise ValueError, "gammavariate: alpha and beta must be > 0.0" random = self.random if alpha > 1.0: ainv = _sqrt(2.0 * alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv while 1: u1 = random() if not 1e-07 < u1 < 0.9999999: continue u2 = 1.0 - random() v = _log(u1 / (1.0 - u1)) / ainv x = alpha * _exp(v) z = u1 * u1 * u2 r = bbb + ccc * v - x if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z): return x * beta else: if alpha == 1.0: u = random() while u <= 1e-07: u = random() return -_log(u) * beta while 1: u = random() b = (_e + alpha) / _e p = b * u if p <= 1.0: x = p ** (1.0 / alpha) else: x = -_log((b - p) / alpha) u1 = random() if p > 1.0: if u1 <= x ** (alpha - 1.0): break elif u1 <= _exp(-x): break return x * beta
def stdgamma(self, alpha, ainv, bbb, ccc): # ainv = sqrt(2 * alpha - 1) # bbb = alpha - log(4) # ccc = alpha + ainv random = self.random if alpha <= 0.0: raise ValueError, 'stdgamma: alpha must be > 0.0' if alpha > 1.0: # Uses R.C.H. Cheng, "The generation of Gamma # variables with non-integral shape parameters", # Applied Statistics, (1977), 26, No. 1, p71-74 while 1: u1 = random() u2 = random() v = _log(u1/(1.0-u1))/ainv x = alpha*_exp(v) z = u1*u1*u2 r = bbb+ccc*v-x if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z): return x elif alpha == 1.0: # expovariate(1) u = random() while u <= 1e-7: u = random() return -_log(u) else: # alpha is between 0 and 1 (exclusive) # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle while 1: u = random() b = (_e + alpha)/_e p = b*u if p <= 1.0: x = pow(p, 1.0/alpha) else: # p > 1 x = -_log((b-p)/alpha) u1 = random() if not (((p <= 1.0) and (u1 > _exp(-x))) or ((p > 1) and (u1 > pow(x, alpha - 1.0)))): break return x
def _calculate_weights(self, this_samples, N): """Calculate and save the weights of a run.""" this_weights = self.weights.append(N)[:,0] if self.target_values is None: for i in range(N): tmp = self.target(this_samples[i]) - self.proposal.evaluate(this_samples[i]) this_weights[i] = _exp(tmp) else: this_target_values = self.target_values.append(N) for i in range(N): this_target_values[i] = self.target(this_samples[i]) tmp = this_target_values[i] - self.proposal.evaluate(this_samples[i]) this_weights[i] = _exp(tmp)
def vonmisesvariate(self, mu, kappa): """Circular data distribution. mu is the mean angle, expressed in radians between 0 and 2*pi, and kappa is the concentration parameter, which must be greater than or equal to zero. If kappa is equal to zero, this distribution reduces to a uniform random angle over the range 0 to 2*pi. """ random = self.random if kappa <= 1e-06: return TWOPI * random() s = 0.5 / kappa r = s + _sqrt(1.0 + s * s) while 1: u1 = random() z = _cos(_pi * u1) d = z / (r + z) u2 = random() if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): break q = 1.0 / r f = (q + z) / (1.0 + q * z) u3 = random() if u3 > 0.5: theta = (mu + _acos(f)) % TWOPI else: theta = (mu - _acos(f)) % TWOPI return theta
def vonmisesvariate(self, mu, kappa): """Circular data distribution. mu is the mean angle, expressed in radians between 0 and 2*pi, and kappa is the concentration parameter, which must be greater than or equal to zero. If kappa is equal to zero, this distribution reduces to a uniform random angle over the range 0 to 2*pi. """ random = self.random if kappa <= 1e-06: return TWOPI * random() a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa) b = (a - _sqrt(2.0 * a)) / (2.0 * kappa) r = (1.0 + b * b) / (2.0 * b) while 1: u1 = random() z = _cos(_pi * u1) f = (1.0 + r * z) / (r + z) c = kappa * (r - f) u2 = random() if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c): break u3 = random() if u3 > 0.5: theta = mu % TWOPI + _acos(f) else: theta = mu % TWOPI - _acos(f) return theta
def lognormvariate(self, mu, sigma): # """Log normal distribution. # If you take the natural logarithm of this distribution, you'll get a # normal distribution with mean mu and standard deviation sigma. # mu can have any value, and sigma must be greater than zero. # """ return _exp(self.normalvariate(mu, sigma))
def _myExp(x): if x < 0.00000001: return 0 elif x > 500: return 1e200 else: return _exp(x)
def _sigmoid(x): if x < -50: return 0. elif x > 20: return 1. else: return 1./(1.+_exp(-1*x))
def vonmisesvariate(self, mu, kappa): random = self.random if kappa <= 9.9999999999999995e-007: return TWOPI * random() a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa) b = (a - _sqrt(2.0 * a)) / 2.0 * kappa r = (1.0 + b * b) / 2.0 * b while 1: u1 = random() z = _cos(_pi * u1) f = (1.0 + r * z) / (r + z) c = kappa * (r - f) u2 = random() if u2 >= c * (2.0 - c): pass if not (u2 > c * _exp(1.0 - c)): break u3 = random() if u3 > 0.5: theta = mu % TWOPI + _acos(f) else: theta = mu % TWOPI - _acos(f) return theta
def gammavariate(self, alpha, beta): if alpha <= 0.0 or beta <= 0.0: raise ValueError('gammavariate: alpha and beta must be > 0.0') random = self.random if alpha > 1.0: ainv = _sqrt(2.0*alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv u1 = random() if not 1e-07 < u1 < 0.9999999: continue u2 = 1.0 - random() v = _log(u1/(1.0 - u1))/ainv x = alpha*_exp(v) z = u1*u1*u2 r = bbb + ccc*v - x #ERROR: Unexpected statement: 517 BINARY_MULTIPLY | 518 RETURN_VALUE if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z): return x*beta continue else: if alpha == 1.0: u = random() while u <= 1e-07: u = random() return -_log(u)*beta while True: u = random() b = (_e + alpha)/_e p = b*u if p <= 1.0: x = p**(1.0/alpha) else: x = -_log((b - p)/alpha) u1 = random() if p > 1.0: if u1 <= x**(alpha - 1.0): break continue if u1 <= _exp(-x): break elif u1 <= _exp(-x): break return x*beta
def gammavariate(self, alpha, beta): if alpha <= 0.0 or beta <= 0.0: raise ValueError, 'gammavariate: alpha and beta must be > 0.0' random = self.random if alpha > 1.0: ainv = _sqrt(2.0 * alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv while 1: u1 = random() if not 1e-07 < u1 < 0.9999999: continue u2 = 1.0 - random() v = _log(u1 / (1.0 - u1)) / ainv x = alpha * _exp(v) z = u1 * u1 * u2 r = bbb + ccc * v - x if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z): return x * beta else: if alpha == 1.0: u = random() while u <= 1e-07: u = random() return -_log(u) * beta while 1: u = random() b = (_e + alpha) / _e p = b * u if p <= 1.0: x = p ** (1.0 / alpha) else: x = -_log((b - p) / alpha) u1 = random() if p > 1.0: if u1 <= x ** (alpha - 1.0): break elif u1 <= _exp(-x): break return x * beta
def exp(S): """Convenience function for exponentiating PowerSeries. This can also replace the ``math.exp`` function, extending it to take a PowerSeries as an argument. """ from math import exp as _exp if isinstance(S, PowerSeries): return S.exponential() return _exp(S)
def gammavariate(self, alpha, beta): if alpha <= 0.0 or beta <= 0.0: raise ValueError, 'gammavariate: alpha and beta must be > 0.0' random = self.random if alpha > 1.0: ainv = _sqrt(2.0 * alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv while 1: u1 = random() u2 = random() v = _log(u1 / (1.0 - u1)) / ainv x = alpha * _exp(v) z = u1 * u1 * u2 r = bbb + ccc * v - x if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z): return x * beta elif alpha == 1.0: u = random() while u <= 9.9999999999999995e-008: u = random() return -_log(u) * beta else: while 1: u = random() b = (_e + alpha) / _e p = b * u if p <= 1.0: x = pow(p, 1.0 / alpha) else: x = -_log((b - p) / alpha) u1 = random() if p <= 1.0 and u1 > _exp(-x) and p > 1: pass if not (u1 > pow(x, alpha - 1.0)): break return x * beta
def _sigmoid(x): """Sigmoid function >>> _sigmoid(0) 0.5 >>> _sigmoid(1) 0.7310585786300049 >>> _sigmoid(-1) 0.2689414213699951 """ e=0 try: e = _exp(-1*x) except OverflowError: return 0 return 1./(1.+e)
def vonmisesvariate(self, mu, kappa): """Circular data distribution. mu is the mean angle, expressed in radians between 0 and 2*pi, and kappa is the concentration parameter, which must be greater than or equal to zero. If kappa is equal to zero, this distribution reduces to a uniform random angle over the range 0 to 2*pi. """ # mu: mean angle (in radians between 0 and 2*pi) # kappa: concentration parameter kappa (>= 0) # if kappa = 0 generate uniform random angle # Based upon an algorithm published in: Fisher, N.I., # "Statistical Analysis of Circular Data", Cambridge # University Press, 1993. # Thanks to Magnus Kessler for a correction to the # implementation of step 4. random = self.random if kappa <= 1e-6: return TWOPI * random() a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa) b = (a - _sqrt(2.0 * a)) / (2.0 * kappa) r = (1.0 + b * b) / (2.0 * b) while 1: u1 = random() z = _cos(_pi * u1) f = (1.0 + r * z) / (r + z) c = kappa * (r - f) u2 = random() if not (u2 >= c * (2.0 - c) and u2 > c * _exp(1.0 - c)): break u3 = random() if u3 > 0.5: theta = (mu % TWOPI) + _acos(f) else: theta = (mu % TWOPI) - _acos(f) return theta
def vonmisesvariate(self, mu, kappa): """Circular data distribution. mu is the mean angle, expressed in radians between 0 and 2*pi, and kappa is the concentration parameter, which must be greater than or equal to zero. If kappa is equal to zero, this distribution reduces to a uniform random angle over the range 0 to 2*pi. """ # mu: mean angle (in radians between 0 and 2*pi) # kappa: concentration parameter kappa (>= 0) # if kappa = 0 generate uniform random angle # Based upon an algorithm published in: Fisher, N.I., # "Statistical Analysis of Circular Data", Cambridge # University Press, 1993. # Thanks to Magnus Kessler for a correction to the # implementation of step 4. random = self.random if kappa <= 1e-6: return TWOPI * random() s = 0.5 / kappa r = s + _sqrt(1.0 + s * s) while 1: u1 = random() z = _cos(_pi * u1) d = z / (r + z) u2 = random() if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): break q = 1.0 / r f = (q + z) / (1.0 + q * z) u3 = random() if u3 > 0.5: theta = (mu + _acos(f)) % TWOPI else: theta = (mu - _acos(f)) % TWOPI return theta
def _propagateInputClassification(self,input): """Same as _propagateInput; but applies softMax """ Y,Z = self._propagateInputRegression(input) #apply softmax function try: expY = [_exp(y) for y in Y] #if the exp of the outputs starts getting too big just normalize the outputs except OverflowError: expY = Y sumExpY = sum(expY) Y = [y/sumExpY for y in Y] return Y,Z
def integer_nthroot(y, n): """ Return a tuple containing x = floor(y**(1/n)) and a boolean indicating whether the result is exact (that is, whether x**n == y). >>> integer_nthroot(16,2) (4, True) >>> integer_nthroot(26,2) (5, False) """ if y < 0: raise ValueError("y must not be negative") if n < 1: raise ValueError("n must be positive") if y in (0, 1): return y, True if n == 1: return y, True if n > y: return 1, False # Get initial estimate for Newton's method. Care must be taken to # avoid overflow try: guess = int(y ** (1.0 / n) + 0.5) except OverflowError: try: guess = int(_exp(_log(y) / n) + 0.5) except OverflowError: guess = 1 << int(_log(y, 2) / n) # Newton iteration xprev, x = -1, guess while abs(x - xprev) > 1: t = x ** (n - 1) xprev, x = x, x - (t * x - y) // (n * t) # Compensate t = x ** n while t > y: x -= 1 t = x ** n return x, t == y
def vonmisesvariate(self, mu, kappa): random = self.random if kappa <= 1e-06: return TWOPI*random() s = 0.5/kappa r = s + _sqrt(1.0 + s*s) while True: u1 = random() z = _cos(_pi*u1) d = z/(r + z) u2 = random() if u2 < 1.0 - d*d or u2 <= (1.0 - d)*_exp(d): break q = 1.0/r f = (q + z)/(1.0 + q*z) u3 = random() if u3 > 0.5: theta = (mu + _acos(f)) % TWOPI else: theta = (mu - _acos(f)) % TWOPI return theta
def vonmisesvariate(self, mu, kappa): random = self.random if kappa <= 1e-06: return TWOPI * random() a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa) b = (a - _sqrt(2.0 * a)) / (2.0 * kappa) r = (1.0 + b * b) / (2.0 * b) while 1: u1 = random() z = _cos(_pi * u1) f = (1.0 + r * z) / (r + z) c = kappa * (r - f) u2 = random() if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c): break u3 = random() if u3 > 0.5: theta = mu % TWOPI + _acos(f) else: theta = mu % TWOPI - _acos(f) return theta
"""Random variable generators.
def gammavariate(self, alpha, beta): """Gamma distribution. Not the gamma function! Conditions on the parameters are alpha > 0 and beta > 0. The probability distribution function is: x ** (alpha - 1) * math.exp(-x / beta) pdf(x) = -------------------------------------- math.gamma(alpha) * beta ** alpha """ # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 # Warning: a few older sources define the gamma distribution in terms # of alpha > -1.0 if alpha <= 0.0 or beta <= 0.0: raise ValueError('gammavariate: alpha and beta must be > 0.0') random = self.random if alpha > 1.0: # Uses R.C.H. Cheng, "The generation of Gamma # variables with non-integral shape parameters", # Applied Statistics, (1977), 26, No. 1, p71-74 ainv = _sqrt(2.0 * alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv while 1: u1 = random() if not 1e-7 < u1 < 0.9999999: continue u2 = 1.0 - random() v = _log(u1 / (1.0 - u1)) / ainv x = alpha * _exp(v) z = u1 * u1 * u2 r = bbb + ccc * v - x if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z): return x * beta elif alpha == 1.0: # expovariate(1) u = random() while u <= 1e-7: u = random() return -_log(u) * beta else: # alpha is between 0 and 1 (exclusive) # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle while 1: u = random() b = (_e + alpha) / _e p = b * u if p <= 1.0: x = p**(1.0 / alpha) else: x = -_log((b - p) / alpha) u1 = random() if p > 1.0: if u1 <= x**(alpha - 1.0): break elif u1 <= _exp(-x): break return x * beta
def _f_Gauss(self, k_y, W_y): """Gaussian spectrum amplitude.""" return _exp(-(k_y * W_y / 2)**2)
def sigmoidProb(y): return _random.random() < 1 / (1 + _exp(-y * invMaxY))
def profile(self, r): """...""" if self.x == 0 and self._m == 0: return self._norm * _exp(-((r.y**2 + r.z**2) / self._W_y**2)) else: return super().profile(r)
def _f_Gauss_cartesian(self, k_y, k_z, W_y): """2d-Gaussian spectrum amplitude. Impementation for Cartesian coordinates. """ return _exp(-W_y**2 * (k_y**2 + k_z**2)/4)
'shuffle', 'normalvariate', 'lognormvariate', 'cunifvariate', 'expovariate', 'vonmisesvariate', 'gammavariate', 'stdgamma', 'gauss', 'betavariate', 'paretovariate', 'weibullvariate', 'getstate', 'setstate', 'jumpahead', 'whseed' ] def _verify(name, computed, expected): # for some reason, this failed on some machines, breaking some ages. Original precision: 9.9999999999999995e-008 if (abs((computed - expected)) > 1e-4): raise ValueError(( 'computed value for %s deviates too much (computed %g, expected %g)' % (name, computed, expected))) NV_MAGICCONST = ((4 * _exp(-0.5)) / _sqrt(2.0)) _verify('NV_MAGICCONST', NV_MAGICCONST, 1.71552776992141) TWOPI = (2.0 * _pi) _verify('TWOPI', TWOPI, 6.2831853071800001) LOG4 = _log(4.0) _verify('LOG4', LOG4, 1.3862943611198899) SG_MAGICCONST = (1.0 + _log(4.5)) _verify('SG_MAGICCONST', SG_MAGICCONST, 2.5040773967762702) del _verify class Random: VERSION = 1 def __init__(self, x=None):
def exp(x): return _exp(x)
def benchmark(): """ Run some benchmarks for clambdify and frange. NumPy and Psyco are used as reference if available. """ from time import time from timeit import Timer def fbenchmark(f, var=[Symbol('x')]): """ Do some benchmarks with f using clambdify, lambdify and psyco. """ global cf, pf, psyf start = time() cf = clambdify(var, f) print('compile time (including sympy overhead): %f s' % ( time() - start)) pf = lambdify(var, f, 'math') psyf = None psyco = import_module('psyco') if psyco: psyf = lambdify(var, f, 'math') psyco.bind(psyf) code = '''for x in (i/1000. for i in range(1000)): f(%s)''' % ('x,'*len(var)).rstrip(',') t1 = Timer(code, 'from __main__ import cf as f') t2 = Timer(code, 'from __main__ import pf as f') if psyf: t3 = Timer(code, 'from __main__ import psyf as f') else: t3 = None print('for x = (0, 1, 2, ..., 999)/1000') print('20 times in 3 runs') print('compiled: %.4f %.4f %.4f' % tuple(t1.repeat(3, 20))) print('Python lambda: %.4f %.4f %.4f' % tuple(t2.repeat(3, 20))) if t3: print('Psyco lambda: %.4f %.4f %.4f' % tuple(t3.repeat(3, 20))) print('big function:') from sympy import _exp, _sin, _cos, pi x = Symbol('x') ## f1 = diff(_exp(x)**2 - _sin(x)**pi, x) \ ## * x**12-2*x**3+2*_exp(x**2)-3*x**7+4*_exp(123+x-x**5+2*x**4) \ ## * ((x + pi)**5).expand() f1 = 2*_exp(x**2) + x**12*(-pi*_sin(x)**((-1) + pi)*_cos(x) + 2*_exp(2*x)) \ + 4*(10*pi**3*x**2 + 10*pi**2*x**3 + 5*pi*x**4 + 5*x*pi**4 + pi**5 + x**5)*_exp(123 + x + 2*x**4 - x**5) - 2*x**3 - 3*x**7 fbenchmark(f1) print() print('simple function:') y = Symbol('y') f2 = sqrt(x*y) + x*5 fbenchmark(f2, [x, y]) times = 100000 fstr = '_exp(_sin(_exp(-x**2)) + sqrt(pi)*_cos(x**5/(x**3-x**2+pi*x)))' print print('frange with f(x) =') print(fstr) print('for x=1, ..., %i' % times) print('in 3 runs including full compile time') t4 = Timer("frange('lambda x: %s', 0, %i)" % (fstr, times), 'from __main__ import frange') numpy = import_module('numpy') print('frange: %.4f %.4f %.4f' % tuple(t4.repeat(3, 1))) if numpy: t5 = Timer('x = arange(%i); result = %s' % (times, fstr), 'from numpy import arange, sqrt, exp, sin, cos, exp, pi') print('numpy: %.4f %.4f %.4f' % tuple(t5.repeat(3, 1)))
def lognormvariate(self, mu, sigma): return _exp(self.normalvariate(mu, sigma))
def _f_Gauss_spherical(self, sin_theta, W_y, k): """2d-Gaussian spectrum amplitude. Implementation for spherical coordinates. """ return _exp(-(k*W_y*sin_theta/2)**2)
def _gauss(x, mu, sigma): sigma2 = sigma**2 return _exp(-(x-mu)**2/sigma2) / _sqrt(2*_pi*sigma2)
"vonmisesvariate", "gammavariate", "triangular", "gauss", "betavariate", "paretovariate", "weibullvariate", "getstate", "setstate", "jumpahead", "WichmannHill", "getrandbits", "SystemRandom", ] NV_MAGICCONST = 4 * _exp(-0.5) / _sqrt(2.0) TWOPI = 2.0 * _pi LOG4 = _log(4.0) SG_MAGICCONST = 1.0 + _log(4.5) BPF = 53 # Number of bits in a float RECIP_BPF = 2**-BPF # Translated by Guido van Rossum from C source provided by # Adrian Baddeley. Adapted by Raymond Hettinger for use with # the Mersenne Twister and os.urandom() core generators. import _random class Random(_random.Random): """Random number generator base class used by bound module functions.
def benchmark(): """ Run some benchmarks for clambdify and frange. NumPy and Psyco are used as reference if available. """ from time import time from timeit import Timer def fbenchmark(f, var=[Symbol('x')]): """ Do some benchmarks with f using clambdify, lambdify and psyco. """ global cf, pf, psyf start = time() cf = clambdify(var, f) print('compile time (including sympy overhead): %f s' % (time() - start)) pf = lambdify(var, f, 'math') psyf = None psyco = import_module('psyco') if psyco: psyf = lambdify(var, f, 'math') psyco.bind(psyf) code = '''for x in (i/1000. for i in range(1000)): f(%s)''' % ('x,' * len(var)).rstrip(',') t1 = Timer(code, 'from __main__ import cf as f') t2 = Timer(code, 'from __main__ import pf as f') if psyf: t3 = Timer(code, 'from __main__ import psyf as f') else: t3 = None print('for x = (0, 1, 2, ..., 999)/1000') print('20 times in 3 runs') print('compiled: %.4f %.4f %.4f' % tuple(t1.repeat(3, 20))) print('Python lambda: %.4f %.4f %.4f' % tuple(t2.repeat(3, 20))) if t3: print('Psyco lambda: %.4f %.4f %.4f' % tuple(t3.repeat(3, 20))) print('big function:') from sympy import _exp, _sin, _cos, pi, lambdify x = Symbol('x') ## f1 = diff(_exp(x)**2 - _sin(x)**pi, x) \ ## * x**12-2*x**3+2*_exp(x**2)-3*x**7+4*_exp(123+x-x**5+2*x**4) \ ## * ((x + pi)**5).expand() f1 = 2*_exp(x**2) + x**12*(-pi*_sin(x)**((-1) + pi)*_cos(x) + 2*_exp(2*x)) \ + 4*(10*pi**3*x**2 + 10*pi**2*x**3 + 5*pi*x**4 + 5*x*pi**4 + pi**5 + x**5)*_exp(123 + x + 2*x**4 - x**5) - 2*x**3 - 3*x**7 fbenchmark(f1) print() print('simple function:') y = Symbol('y') f2 = sqrt(x * y) + x * 5 fbenchmark(f2, [x, y]) times = 100000 fstr = '_exp(_sin(_exp(-x**2)) + sqrt(pi)*_cos(x**5/(x**3-x**2+pi*x)))' print print('frange with f(x) =') print(fstr) print('for x=1, ..., %i' % times) print('in 3 runs including full compile time') t4 = Timer("frange('lambda x: %s', 0, %i)" % (fstr, times), 'from __main__ import frange') numpy = import_module('numpy') print('frange: %.4f %.4f %.4f' % tuple(t4.repeat(3, 1))) if numpy: t5 = Timer('x = arange(%i); result = %s' % (times, fstr), 'from numpy import arange, sqrt, exp, sin, cos, exp, pi') print('numpy: %.4f %.4f %.4f' % tuple(t5.repeat(3, 1)))
def sampling_weight(self): return _exp(-self.value)
"weibullvariate", "getstate", "setstate", "jumpahead", "whseed", ] def _verify(name, computed, expected): if abs(computed - expected) > 1e-7: raise ValueError( "computed value for %s deviates too much " "(computed %g, expected %g)" % (name, computed, expected) ) NV_MAGICCONST = 4 * _exp(-0.5) / _sqrt(2.0) _verify("NV_MAGICCONST", NV_MAGICCONST, 1.71552776992141) TWOPI = 2.0 * _pi _verify("TWOPI", TWOPI, 6.28318530718) LOG4 = _log(4.0) _verify("LOG4", LOG4, 1.38629436111989) SG_MAGICCONST = 1.0 + _log(4.5) _verify("SG_MAGICCONST", SG_MAGICCONST, 2.50407739677627) del _verify # Translated by Guido van Rossum from C source provided by # Adrian Baddeley.
def gammavariate(self, alpha: float, beta: float) -> float: """Gamma distribution. Not the gamma function! Conditions on the parameters are alpha > 0 and beta > 0. The probability distribution function is: x ** (alpha - 1) * math.exp(-x / beta) pdf(x) = -------------------------------------- math.gamma(alpha) * beta ** alpha """ # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 # Warning: a few older sources define the gamma distribution in terms # of alpha > -1.0 if alpha <= 0.0 or beta <= 0.0: raise ValueError('gammavariate: alpha and beta must be > 0.0') random = self.random if alpha > 1.0: # Uses R.C.H. Cheng, "The generation of Gamma # variables with non-integral shape parameters", # Applied Statistics, (1977), 26, No. 1, p71-74 ainv = _sqrt(2.0 * alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv while 1: u1 = random() if not (1e-7 < u1 and u1 < .9999999): continue u2 = 1.0 - random() v = _log(u1/(1.0-u1))/ainv x = alpha*_exp(v) z = u1*u1*u2 r = bbb+ccc*v-x if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z): return x * beta elif alpha == 1.0: # expovariate(1) u = random() while u <= 1e-7: u = random() return -_log(u) * beta else: # alpha is between 0 and 1 (exclusive) # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle while 1: u = random() b = (_e + alpha)/_e p = b*u if p <= 1.0: x = p ** (1.0/alpha) else: x = -_log((b-p)/alpha) u1 = random() if p > 1.0: if u1 <= x ** (alpha - 1.0): break elif u1 <= _exp(-x): break return x * beta
def gammavariate(self, alpha, beta): """Gamma distribution. Not the gamma function! Conditions on the parameters are alpha > 0 and beta > 0. """ # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 # Warning: a few older sources define the gamma distribution in terms # of alpha > -1.0 if alpha <= 0.0 or beta <= 0.0: raise ValueError, "gammavariate: alpha and beta must be > 0.0" random = self.random if alpha > 1.0: # Uses R.C.H. Cheng, "The generation of Gamma # variables with non-integral shape parameters", # Applied Statistics, (1977), 26, No. 1, p71-74 ainv = _sqrt(2.0 * alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv while 1: u1 = random() if not 1e-7 < u1 < 0.9999999: continue u2 = 1.0 - random() v = _log(u1 / (1.0 - u1)) / ainv x = alpha * _exp(v) z = u1 * u1 * u2 r = bbb + ccc * v - x if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z): return x * beta elif alpha == 1.0: # expovariate(1) u = random() while u <= 1e-7: u = random() return -_log(u) * beta else: # alpha is between 0 and 1 (exclusive) # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle while 1: u = random() b = (_e + alpha) / _e p = b * u if p <= 1.0: x = pow(p, 1.0 / alpha) else: # p > 1 x = -_log((b - p) / alpha) u1 = random() if not (((p <= 1.0) and (u1 > _exp(-x))) or ((p > 1) and (u1 > pow(x, alpha - 1.0)))): break return x * beta
ifile_name, dummy, dummy, draws, separator, klasse_index = hdw.handle_commands( sys.argv, 'd:', ['separator=', 'classid=']) del dummy sample_count = 5 ### this can be changed, though the cross-validation ### tends to be too inaccurate with high values universe = hdw.abstract_file(ifile_name, separator, klasse_index) gtsigma = hdw.compute_maxvar(universe) ### our limit pieces = 20 ### how many intervals graph = [None] * (pieces + 1) ### statistics to plot print 'file %s, iterating sigma from 0 to %.4f' % (ifile_name.split( os.sep)[-1], gtsigma) for h in xrange(pieces + 1): #sigma=(_exp(h.__float__()/pieces)-1)*gtsigma/(_e-1) sigma = (_exp(h.__float__() / pieces)**4 - 1) * gtsigma / (_e**4 - 1) ### more little values than greater print 'step %2d: sigma = %.4f' % (h + 1, sigma) overall_success = [0] * draws ### per sigma overall_compress = [0] * draws ### per sigma for i in xrange(draws): nsc.welt = {} ### every draw we clear the world universe = hdw.abstract_file(ifile_name, separator, klasse_index) ### i can't understand this!!! this way we read the file at every draw ### instead of once per run. however, if we don't do this, strange ### things happen and the test go f**k!!! ### probably the universe set() get messy somewhere in the code but i ### can't find where.