def test_isqrt(): from math import sqrt as _sqrt limit = 17984395633462800708566937239551 assert int(_sqrt(limit)) == integer_nthroot(limit, 2)[0] assert int(_sqrt(limit + 1)) != integer_nthroot(limit + 1, 2)[0] assert isqrt(limit + 1) == integer_nthroot(limit + 1, 2)[0] assert isqrt(limit + 1 + S.Half) == integer_nthroot(limit + 1, 2)[0]
def vonmisesvariate(self, mu, kappa): random = self.random if kappa <= 9.9999999999999995e-007: return TWOPI * random() a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa) b = (a - _sqrt(2.0 * a)) / 2.0 * kappa r = (1.0 + b * b) / 2.0 * b while 1: u1 = random() z = _cos(_pi * u1) f = (1.0 + r * z) / (r + z) c = kappa * (r - f) u2 = random() if u2 >= c * (2.0 - c): pass if not (u2 > c * _exp(1.0 - c)): break u3 = random() if u3 > 0.5: theta = mu % TWOPI + _acos(f) else: theta = mu % TWOPI - _acos(f) return theta
def rgb_distance(r1, g1, b1, r2, g2, b2): """Calculates numerical distance between two colors in RGB color space. The distance is calculated by CIE94 formula. :params: Two colors with ``r, g, b`` values in ``0..1`` range :returns: A number in ``0..100`` range. The lesser - the closer colors are. """ # Formulae from wikipedia article re CIE94 L1, A1, B1 = xyz_to_lab(*rgb_to_xyz(r1, b1, g1)) L2, A2, B2 = xyz_to_lab(*rgb_to_xyz(r2, b2, g2)) dL = L1 - L2 C1 = _sqrt(A1 * A1 + B1 * B1) C2 = _sqrt(A2 * A2 + B2 * B2) dCab = C1 - C2 dA = A1 - A2 dB = B1 - B2 dEab = _sqrt(dL ** 2 + dA ** 2 + dB ** 2) dHab = _sqrt(max(dEab ** 2 - dL ** 2 - dCab ** 2, 0.0)) dE = _sqrt((dL ** 2) + ((dCab / (1 + 0.045 * C1)) ** 2) + ( dHab / (1 + 0.015 * C1)) ** 2) return dE
def vonmisesvariate(self, mu, kappa): """Circular data distribution. mu is the mean angle, expressed in radians between 0 and 2*pi, and kappa is the concentration parameter, which must be greater than or equal to zero. If kappa is equal to zero, this distribution reduces to a uniform random angle over the range 0 to 2*pi. """ random = self.random if kappa <= 1e-06: return TWOPI * random() a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa) b = (a - _sqrt(2.0 * a)) / (2.0 * kappa) r = (1.0 + b * b) / (2.0 * b) while 1: u1 = random() z = _cos(_pi * u1) f = (1.0 + r * z) / (r + z) c = kappa * (r - f) u2 = random() if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c): break u3 = random() if u3 > 0.5: theta = mu % TWOPI + _acos(f) else: theta = mu % TWOPI - _acos(f) return theta
def arcball(x, y): h2 = x*x+y*y if h2 > 1.: h = _sqrt(h2) v = x/h, y/h, 0. else: v = x, y, _sqrt(1.-h2) return 0., v
def __parse_word(word): """ Read an string and determines if it is a word or a number or a repetitive list os values such as 3*4.5=[4.5 , 4.5, 4.5] Args: word: An string that should be parsed Returns: result: Value extracted (word, number or list) kind: 'word' such as 'ntime', 'eV', '*1', 'angstrom', etc 'int' such as 1, 2, 3 'float' such as 4.5, 6.7, etc 'list' such as [4.5 , 4.5, 4.5] """ result = None kind = None if word[0].isalpha() and word[:4] != 'sqrt' and word[:5] != '-sqrt': result = word kind = 'word' elif word[:4] == 'sqrt': result = _sqrt(float(word[5:-1])) kind = 'float' elif word[:5] == '-sqrt': result = -_sqrt(float(word[6:-1])) kind = 'float' elif word[0] == '*': result = word kind = 'word' elif word.isdigit(): result = int(word) kind = "int" elif '*' in word: splt = word.split('*') if splt[0].isdigit(): mult = int(splt[0]) number, kind = string2number(splt[1]) if number is not None: result = mult * [number] kind = 'list' else: result = None kind = None else: result, kind = string2number(word) return result, kind
def get_sphere_mapping(x, y, width, height): x = min([max([x, 0]), width]) y = min([max([y, 0]), height]) sr = _sqrt((width/2)**2 + (height/2)**2) sx = ((x - width / 2) / sr) sy = ((y - height / 2) / sr) sz = 1.0 - sx**2 - sy**2 if sz > 0.0: sz = _sqrt(sz) return (sx, sy, sz) else: sz = 0 return norm((sx, sy, sz))
def _facmod(self, n, q): res, N = 1, int(_sqrt(n)) # Exponent of prime p in n! is e_p(n) = [n/p] + [n/p**2] + ... # for p > sqrt(n), e_p(n) < sqrt(n), the primes with [n/p] = m, # occur consecutively and are grouped together in pw[m] for # simultaneous exponentiation at a later stage pw = [1]*N m = 2 # to initialize the if condition below for prime in sieve.primerange(2, n + 1): if m > 1: m, y = 0, n // prime while y: m += y y //= prime if m < N: pw[m] = pw[m]*prime % q else: res = res*pow(prime, m, q) % q for ex, bs in enumerate(pw): if ex == 0 or bs == 1: continue if bs == 0: return 0 res = res*pow(bs, ex, q) % q return res
def gauss(self, mu, sigma): # """Gaussian distribution. # mu is the mean, and sigma is the standard deviation. This is # slightly faster than the normalvariate() function. # Not thread-safe without a lock around calls. # """ # When x and y are two variables from [0, 1), uniformly # distributed, then # # cos(2*pi*x)*sqrt(-2*log(1-y)) # sin(2*pi*x)*sqrt(-2*log(1-y)) # # are two *independent* variables with normal distribution # (mu = 0, sigma = 1). # (Lambert Meertens) # (corrected version; bug discovered by Mike Miller, fixed by LM) # Multithreading note: When two threads call this function # simultaneously, it is possible that they will receive the # same return value. The window is very small though. To # avoid this, you have to use a lock around all calls. (I # didn't want to slow this down in the serial case by using a # lock here.) __random = self.random z = self.gauss_next self.gauss_next = None if z is None: x2pi = __random() * TWOPI g2rad = _sqrt(-2.0 * _log(1.0 - __random())) z = _cos(x2pi) * g2rad self.gauss_next = _sin(x2pi) * g2rad return mu + z*sigma
def vonmisesvariate(self, mu, kappa): """Circular data distribution. mu is the mean angle, expressed in radians between 0 and 2*pi, and kappa is the concentration parameter, which must be greater than or equal to zero. If kappa is equal to zero, this distribution reduces to a uniform random angle over the range 0 to 2*pi. """ random = self.random if kappa <= 1e-06: return TWOPI * random() s = 0.5 / kappa r = s + _sqrt(1.0 + s * s) while 1: u1 = random() z = _cos(_pi * u1) d = z / (r + z) u2 = random() if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): break q = 1.0 / r f = (q + z) / (1.0 + q * z) u3 = random() if u3 > 0.5: theta = (mu + _acos(f)) % TWOPI else: theta = (mu - _acos(f)) % TWOPI return theta
def _swing(cls, n): if n < 33: return cls._small_swing[n] else: N, primes = int(_sqrt(n)), [] for prime in sieve.primerange(3, N + 1): p, q = 1, n while True: q //= prime if q > 0: if q & 1 == 1: p *= prime else: break if p > 1: primes.append(p) for prime in sieve.primerange(N + 1, n//3 + 1): if (n // prime) & 1 == 1: primes.append(prime) L_product = R_product = 1 for prime in sieve.primerange(n//2 + 1, n + 1): L_product *= prime for prime in primes: R_product *= prime return L_product*R_product
def vonmisesvariate(self, mu, kappa): """Circular data distribution. mu is the mean angle, expressed in radians between 0 and 2*pi, and kappa is the concentration parameter, which must be greater than or equal to zero. If kappa is equal to zero, this distribution reduces to a uniform random angle over the range 0 to 2*pi. """ # mu: mean angle (in radians between 0 and 2*pi) # kappa: concentration parameter kappa (>= 0) # if kappa = 0 generate uniform random angle # Based upon an algorithm published in: Fisher, N.I., # "Statistical Analysis of Circular Data", Cambridge # University Press, 1993. # Thanks to Magnus Kessler for a correction to the # implementation of step 4. random = self.random if kappa <= 1e-6: return TWOPI * random() a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa) b = (a - _sqrt(2.0 * a)) / (2.0 * kappa) r = (1.0 + b * b) / (2.0 * b) while 1: u1 = random() z = _cos(_pi * u1) f = (1.0 + r * z) / (r + z) c = kappa * (r - f) u2 = random() if not (u2 >= c * (2.0 - c) and u2 > c * _exp(1.0 - c)): break u3 = random() if u3 > 0.5: theta = (mu % TWOPI) + _acos(f) else: theta = (mu % TWOPI) - _acos(f) return theta
def eval(cls, n, k): n, k = map(sympify, (n, k)) if k.is_Number: if k.is_Integer: if k < 0: return S.Zero elif k == 0 or n == k: return S.One elif n.is_Integer and n >= 0: n, k = int(n), int(k) if k > n: return S.Zero elif k > n // 2: k = n - k M, result = int(_sqrt(n)), 1 for prime in sieve.primerange(2, n+1): if prime > n - k: result *= prime elif prime > n // 2: continue elif prime > M: if n % prime < k % prime: result *= prime else: N, K = n, k exp = a = 0 while N > 0: a = int((N % prime) < (K % prime + a)) N, K = N // prime, K // prime exp = a + exp if exp > 0: result *= prime**exp return C.Integer(result) else: result = n - k + 1 for i in xrange(2, k+1): result *= n-k+i result /= i return result elif k.is_negative: return S.Zero elif (n - k).simplify().is_negative: return S.Zero else: d = n - k if d.is_Integer: return cls.eval(n, d)
def is_prime2(num): '''Tests if a given number is prime. Written with a map.''' if num == 2: return True elif num % 2 == 0 or num <= 1: return False root = _ceil(_sqrt(num)) return all(map(lambda div: False if num % div == 0 else True, range(3, root+1, 2)))
def is_prime(integer): """Returns True if ``integer`` is a prime, otherwise False.""" assert integer < primes[-1] ** 2 integer = -integer if integer < 0 else integer limit = _floor(_sqrt(integer)) + 1 for i in _takewhile(lambda elem: elem < limit, primes): if integer % i == 0: return False return integer > 1
def is_prime3(num): '''Tests if a given number is prime. Written with reduce.''' if num == 2: return True elif num % 2 == 0 or num <= 1: return False root = _ceil(_sqrt(num)) return _reduce(lambda acc, d: False if not acc or num % d == 0 else True, range(3, root+1, 2), True)
def sqrt(S): """Convenience function for taking square roots of PowerSeries. This can also replace the ``math.sqrt`` function, extending it to take a PowerSeries as an argument. """ from math import sqrt as _sqrt if isinstance(S, PowerSeries): return S.squareroot() return _sqrt(S)
def gauss(self, mu, sigma): random = self.random z = self.gauss_next self.gauss_next = None if z is None: x2pi = random() * TWOPI g2rad = _sqrt(-2.0 * _log(1.0 - random())) z = _cos(x2pi) * g2rad self.gauss_next = _sin(x2pi) * g2rad return mu + z * sigma
def gammavariate(self, alpha, beta): """Gamma distribution. Not the gamma function! Conditions on the parameters are alpha > 0 and beta > 0. The probability distribution function is: x ** (alpha - 1) * math.exp(-x / beta) pdf(x) = -------------------------------------- math.gamma(alpha) * beta ** alpha """ if alpha <= 0.0 or beta <= 0.0: raise ValueError, "gammavariate: alpha and beta must be > 0.0" random = self.random if alpha > 1.0: ainv = _sqrt(2.0 * alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv while 1: u1 = random() if not 1e-07 < u1 < 0.9999999: continue u2 = 1.0 - random() v = _log(u1 / (1.0 - u1)) / ainv x = alpha * _exp(v) z = u1 * u1 * u2 r = bbb + ccc * v - x if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z): return x * beta else: if alpha == 1.0: u = random() while u <= 1e-07: u = random() return -_log(u) * beta while 1: u = random() b = (_e + alpha) / _e p = b * u if p <= 1.0: x = p ** (1.0 / alpha) else: x = -_log((b - p) / alpha) u1 = random() if p > 1.0: if u1 <= x ** (alpha - 1.0): break elif u1 <= _exp(-x): break return x * beta
def _eval(self, n, k): # n.is_Number and k.is_Integer and k != 1 and n != k from sympy.functions.elementary.exponential import log from sympy.core import N if k.is_Integer: if n.is_Integer and n >= 0: n, k = int(n), int(k) if k > n: return S.Zero elif k > n // 2: k = n - k if HAS_GMPY: from sympy.core.compatibility import gmpy return Integer(gmpy.bincoef(n, k)) prime_count_estimate = N(n / log(n)) # if the number of primes less than n is less than k, use prime sieve method # otherwise it is more memory efficient to compute factorials explicitly if prime_count_estimate < k: M, result = int(_sqrt(n)), 1 for prime in sieve.primerange(2, n + 1): if prime > n - k: result *= prime elif prime > n // 2: continue elif prime > M: if n % prime < k % prime: result *= prime else: N, K = n, k exp = a = 0 while N > 0: a = int((N % prime) < (K % prime + a)) N, K = N // prime, K // prime exp = a + exp if exp > 0: result *= prime**exp else: result = ff(n, k) / factorial(k) return Integer(result) else: d = result = n - k + 1 for i in range(2, k + 1): d += 1 result *= d result /= i return result
def is_prime(num): '''Tests if a given number is prime. Written procedurally.''' if num == 2: return True elif num % 2 == 0 or num <= 1: return False count = 3 root = _sqrt(num) while count <= root: if num % count == 0: # If anything divides evenly, it isn't prime. return False count += 2 return True
def vonmisesvariate(self, mu, kappa): random = self.random if kappa <= 1e-06: return TWOPI * random() a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa) b = (a - _sqrt(2.0 * a)) / (2.0 * kappa) r = (1.0 + b * b) / (2.0 * b) while 1: u1 = random() z = _cos(_pi * u1) f = (1.0 + r * z) / (r + z) c = kappa * (r - f) u2 = random() if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c): break u3 = random() if u3 > 0.5: theta = mu % TWOPI + _acos(f) else: theta = mu % TWOPI - _acos(f) return theta
def vonmisesvariate(self, mu, kappa): """Circular data distribution. mu is the mean angle, expressed in radians between 0 and 2*pi, and kappa is the concentration parameter, which must be greater than or equal to zero. If kappa is equal to zero, this distribution reduces to a uniform random angle over the range 0 to 2*pi. """ # mu: mean angle (in radians between 0 and 2*pi) # kappa: concentration parameter kappa (>= 0) # if kappa = 0 generate uniform random angle # Based upon an algorithm published in: Fisher, N.I., # "Statistical Analysis of Circular Data", Cambridge # University Press, 1993. # Thanks to Magnus Kessler for a correction to the # implementation of step 4. random = self.random if kappa <= 1e-6: return TWOPI * random() s = 0.5 / kappa r = s + _sqrt(1.0 + s * s) while 1: u1 = random() z = _cos(_pi * u1) d = z / (r + z) u2 = random() if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d): break q = 1.0 / r f = (q + z) / (1.0 + q * z) u3 = random() if u3 > 0.5: theta = (mu + _acos(f)) % TWOPI else: theta = (mu - _acos(f)) % TWOPI return theta
def gammavariate(self, alpha, beta): if alpha <= 0.0 or beta <= 0.0: raise ValueError('gammavariate: alpha and beta must be > 0.0') random = self.random if alpha > 1.0: ainv = _sqrt(2.0*alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv u1 = random() if not 1e-07 < u1 < 0.9999999: continue u2 = 1.0 - random() v = _log(u1/(1.0 - u1))/ainv x = alpha*_exp(v) z = u1*u1*u2 r = bbb + ccc*v - x #ERROR: Unexpected statement: 517 BINARY_MULTIPLY | 518 RETURN_VALUE if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z): return x*beta continue else: if alpha == 1.0: u = random() while u <= 1e-07: u = random() return -_log(u)*beta while True: u = random() b = (_e + alpha)/_e p = b*u if p <= 1.0: x = p**(1.0/alpha) else: x = -_log((b - p)/alpha) u1 = random() if p > 1.0: if u1 <= x**(alpha - 1.0): break continue if u1 <= _exp(-x): break elif u1 <= _exp(-x): break return x*beta
def _test_generator(n, funccall): import time print n, 'times', funccall code = compile(funccall, funccall, 'eval') sum = 0.0 sqsum = 0.0 smallest = 10000000000.0 largest = -10000000000.0 t0 = time.time() for i in range(n): x = eval(code) sum = sum + x sqsum = sqsum + x * x smallest = min(x, smallest) largest = max(x, largest) t1 = time.time() print round(t1 - t0, 3), 'sec,'avg = sum / nstddev = _sqrt(sqsum / n - avg * avg), 'avg %g, stddev %g, min %g, max %g' % (avg, stddev, smallest, largest)
def gauss(self, mu, sigma): """Gaussian distribution. mu is the mean, and sigma is the standard deviation. This is slightly faster than the normalvariate() function. Not thread-safe without a lock around calls. """ random = self.random z = self.gauss_next self.gauss_next = None if z is None: x2pi = random() * TWOPI g2rad = _sqrt(-2.0 * _log(1.0 - random())) z = _cos(x2pi) * g2rad self.gauss_next = _sin(x2pi) * g2rad return mu + z * sigma
def gammavariate(self, alpha, beta): if alpha <= 0.0 or beta <= 0.0: raise ValueError, 'gammavariate: alpha and beta must be > 0.0' random = self.random if alpha > 1.0: ainv = _sqrt(2.0 * alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv while 1: u1 = random() if not 1e-07 < u1 < 0.9999999: continue u2 = 1.0 - random() v = _log(u1 / (1.0 - u1)) / ainv x = alpha * _exp(v) z = u1 * u1 * u2 r = bbb + ccc * v - x if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z): return x * beta else: if alpha == 1.0: u = random() while u <= 1e-07: u = random() return -_log(u) * beta while 1: u = random() b = (_e + alpha) / _e p = b * u if p <= 1.0: x = p ** (1.0 / alpha) else: x = -_log((b - p) / alpha) u1 = random() if p > 1.0: if u1 <= x ** (alpha - 1.0): break elif u1 <= _exp(-x): break return x * beta
def triangular(self, low=0.0, high=1.0, mode=None): """Triangular distribution. Continuous distribution bounded by given lower and upper limits, and having a given mode value in-between. http://en.wikipedia.org/wiki/Triangular_distribution """ u = self.random() try: c = 0.5 if mode is None else (mode - low) / (high - low) except ZeroDivisionError: return low if u > c: u = 1.0 - u c = 1.0 - c low, high = high, low return low + (high - low) * _sqrt(u * c)
def _test_generator(n, func, args): print(n, 'times', func.__name__) total = 0.0 sqsum = 0.0 smallest = 1e10 largest = -1e10 t0 = time.time() for i in range(n): x = func(*args) total += x sqsum = sqsum + x*x smallest = min(x, smallest) largest = max(x, largest) t1 = time.time() print(round(t1-t0, 3), 'sec,') avg = total/n stddev = _sqrt(sqsum/n - avg*avg) print('avg %g, stddev %g, min %g, max %g' % (avg, stddev, smallest, largest))
def _test_generator(n, func, args): import time print(n, "times", func.__name__) total = 0.0 sqsum = 0.0 smallest = 1e10 largest = -1e10 t0 = time.time() for i in range(n): x = func(*args) total += x sqsum = sqsum + x * x smallest = min(x, smallest) largest = max(x, largest) t1 = time.time() print(round(t1 - t0, 3), "sec,", end=" ") avg = total / n stddev = _sqrt(sqsum / n - avg * avg) print("avg %g, stddev %g, min %g, max %g\n" % (avg, stddev, smallest, largest))
def cdf(x): return (1.0 + _erf(x / _sqrt(2.0))) / 2.0
def mag(a): return _sqrt(a[0]**2 + a[1]**2 + a[2]**2)
"""Random variable generators.
from math import sqrt as _sqrt from math import factorial as _fact # pi = {n*sk}^{-1} n = _sqrt(8) / (99**2) sk = 0 for k in range(5): sk += ((_fact(4 * k)) / (_fact(k)**4)) * ((26390 * k + 1103) / (396**(4 * k))) PI = 1 / (n * sk)
def reset_parameters(self): self.weight.data.normal_(0, 1 * (_sqrt(1. / self.in_features))) if self.bias is not None: self.bias.data.zero_()
def gammavariate(self, alpha, beta): # beta times standard gamma ainv = _sqrt(2.0 * alpha - 1.0) return beta * self.stdgamma(alpha, ainv, alpha - LOG4, alpha + ainv)
def gammavariate(self, alpha, beta): """Gamma distribution. Not the gamma function! Conditions on the parameters are alpha > 0 and beta > 0. The probability distribution function is: x ** (alpha - 1) * math.exp(-x / beta) pdf(x) = -------------------------------------- math.gamma(alpha) * beta ** alpha """ # alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2 # Warning: a few older sources define the gamma distribution in terms # of alpha > -1.0 if alpha <= 0.0 or beta <= 0.0: raise ValueError('gammavariate: alpha and beta must be > 0.0') random = self.random if alpha > 1.0: # Uses R.C.H. Cheng, "The generation of Gamma # variables with non-integral shape parameters", # Applied Statistics, (1977), 26, No. 1, p71-74 ainv = _sqrt(2.0 * alpha - 1.0) bbb = alpha - LOG4 ccc = alpha + ainv while 1: u1 = random() if not 1e-7 < u1 < .9999999: continue u2 = 1.0 - random() v = _log(u1 / (1.0 - u1)) / ainv x = alpha * _exp(v) z = u1 * u1 * u2 r = bbb + ccc * v - x if r + SG_MAGICCONST - 4.5 * z >= 0.0 or r >= _log(z): return x * beta elif alpha == 1.0: # expovariate(1) u = random() while u <= 1e-7: u = random() return -_log(u) * beta else: # alpha is between 0 and 1 (exclusive) # Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle while 1: u = random() b = (_e + alpha) / _e p = b * u if p <= 1.0: x = p**(1.0 / alpha) else: x = -_log((b - p) / alpha) u1 = random() if p > 1.0: if u1 <= x**(alpha - 1.0): break elif u1 <= _exp(-x): break return x * beta
def mysqrt(x): if x < 0: raise ValueError("sqrt of negativ number") return math._sqrt(x)
def binomialvariate(self, n=1, p=0.5): """Binomial random variable. Gives the number of successes for *n* independent trials with the probability of success in each trial being *p*: sum(random() < p for i in range(n)) Returns an integer in the range: 0 <= X <= n """ # Error check inputs and handle edge cases if n < 0: raise ValueError("n must be non-negative") if p <= 0.0 or p >= 1.0: if p == 0.0: return 0 if p == 1.0: return n raise ValueError("p must be in the range 0.0 <= p <= 1.0") random = self.random # Fast path for a common case if n == 1: return _index(random() < p) # Exploit symmetry to establish: p <= 0.5 if p > 0.5: return n - self.binomialvariate(n, 1.0 - p) if n * p < 10.0: # BG: Geometric method by Devroye with running time of O(np). # https://dl.acm.org/doi/pdf/10.1145/42372.42381 x = y = 0 c = _log(1.0 - p) if not c: return x while True: y += _floor(_log(random()) / c) + 1 if y > n: return x x += 1 # BTRS: Transformed rejection with squeeze method by Wolfgang Hörmann # https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.47.8407&rep=rep1&type=pdf assert n * p >= 10.0 and p <= 0.5 setup_complete = False spq = _sqrt(n * p * (1.0 - p)) # Standard deviation of the distribution b = 1.15 + 2.53 * spq a = -0.0873 + 0.0248 * b + 0.01 * p c = n * p + 0.5 vr = 0.92 - 4.2 / b while True: u = random() v = random() u -= 0.5 us = 0.5 - _fabs(u) k = _floor((2.0 * a / us + b) * u + c) if k < 0 or k > n: continue # The early-out "squeeze" test substantially reduces # the number of acceptance condition evaluations. if us >= 0.07 and v <= vr: return k # Acceptance-rejection test. # Note, the original paper errorneously omits the call to log(v) # when comparing to the log of the rescaled binomial distribution. if not setup_complete: alpha = (2.83 + 5.1 / b) * spq lpq = _log(p / (1.0 - p)) m = _floor((n + 1) * p) # Mode of the distribution h = _lgamma(m + 1) + _lgamma(n - m + 1) setup_complete = True # Only needs to be done once v *= alpha / (a / (us * us) + b) if _log(v) <= h - _lgamma(k + 1) - _lgamma(n - k + 1) + (k - m) * lpq: return k
def gf_ddf_shoup(f, p, K): """ Kaltofen-Shoup: Deterministic Distinct Degree Factorization Given a monic square-free polynomial ``f`` in ``GF(p)[x]``, computes partial distinct degree factorization ``f_1,...,f_d`` of ``f`` where ``deg(f_i) != deg(f_j)`` for ``i != j``. The result is returned as a list of pairs ``(f_i, e_i)`` where ``deg(f_i) > 0`` and ``e_i > 0`` is an argument to the equal degree factorization routine. This algorithm is an improved version of Zassenhaus algorithm for large ``deg(f)`` and modulus ``p`` (especially for ``deg(f) ~ lg(p)``). Examples ======== >>> from sympy.polys.domains import ZZ >>> from sympy.polys.galoistools import gf_ddf_shoup, gf_from_dict >>> f = gf_from_dict({6: ZZ(1), 5: ZZ(-1), 4: ZZ(1), 3: ZZ(1), 1: ZZ(-1)}, 3, ZZ) >>> gf_ddf_shoup(f, 3, ZZ) [([1, 1, 0], 1), ([1, 1, 0, 1, 2], 2)] References ========== 1. [Kaltofen98]_ 2. [Shoup95]_ 3. [Gathen92]_ """ n = gf_degree(f) k = int(_ceil(_sqrt(n//2))) h = gf_pow_mod([K.one, K.zero], int(p), f, p, K) U = [[K.one, K.zero], h] + [K.zero]*(k - 1) for i in xrange(2, k + 1): U[i] = gf_compose_mod(U[i - 1], h, f, p, K) h, U = U[k], U[:k] V = [h] + [K.zero]*(k - 1) for i in xrange(1, k): V[i] = gf_compose_mod(V[i - 1], h, f, p, K) factors = [] for i, v in enumerate(V): h, j = [K.one], k - 1 for u in U: g = gf_sub(v, u, p, K) h = gf_mul(h, g, p, K) h = gf_rem(h, f, p, K) g = gf_gcd(f, h, p, K) f = gf_quo(f, g, p, K) for u in reversed(U): h = gf_sub(v, u, p, K) F = gf_gcd(g, h, p, K) if F != [K.one]: factors.append((F, k*(i + 1) - j)) g, j = gf_quo(g, F, p, K), j - 1 if f != [K.one]: factors.append((f, gf_degree(f))) return factors
TEXT_STYLE_POLYGONS = 1 TEXT_STYLE_LABELS = 2 PATH_TYPE_NORMAL = 0 PATH_TYPE_ROUNDED = 1 PATH_TYPE_EXTENDED = 2 PATH_TYPES = [PATH_TYPE_NORMAL, PATH_TYPE_ROUNDED, PATH_TYPE_EXTENDED] GDSII_MAX_COORDINATES = 200 NORTH = Coord2(0.0, 1.0) SOUTH = Coord2(0.0, -1.0) EAST = Coord2(1.0, 0.0) WEST = Coord2(-1.0, 0.0) _sqrt2_2 = _sqrt(0.5) NORTHEAST = Coord2( _sqrt2_2, _sqrt2_2, ) NORTHWEST = Coord2( -_sqrt2_2, _sqrt2_2, ) SOUTHEAST = Coord2( _sqrt2_2, -_sqrt2_2, ) SOUTHWEST = Coord2( -_sqrt2_2, -_sqrt2_2,
def _s(): s0 = Fraction.from_float(_sqrt(self.zero)) yield s0 for term in (self.tail * (s0 + S).reciprocal()): yield term
def _eval_Mod(self, q): n, k = self.args if any(x.is_integer is False for x in (n, k, q)): raise ValueError("Integers expected for binomial Mod") if all(x.is_Integer for x in (n, k, q)): n, k = map(int, (n, k)) aq, res = abs(q), 1 # handle negative integers k or n if k < 0: return 0 if n < 0: n = -n + k - 1 res = -1 if k % 2 else 1 # non negative integers k and n if k > n: return 0 isprime = aq.is_prime aq = int(aq) if isprime: if aq < n: # use Lucas Theorem N, K = n, k while N or K: res = res * binomial(N % aq, K % aq) % aq N, K = N // aq, K // aq else: # use Factorial Modulo d = n - k if k > d: k, d = d, k kf = 1 for i in range(2, k + 1): kf = kf * i % aq df = kf for i in range(k + 1, d + 1): df = df * i % aq res *= df for i in range(d + 1, n + 1): res = res * i % aq res *= pow(kf * df % aq, aq - 2, aq) res %= aq else: # Binomial Factorization is performed by calculating the # exponents of primes <= n in `n! /(k! (n - k)!)`, # for non-negative integers n and k. As the exponent of # prime in n! is e_p(n) = [n/p] + [n/p**2] + ... # the exponent of prime in binomial(n, k) would be # e_p(n) - e_p(k) - e_p(n - k) M = int(_sqrt(n)) for prime in sieve.primerange(2, n + 1): if prime > n - k: res = res * prime % aq elif prime > n // 2: continue elif prime > M: if n % prime < k % prime: res = res * prime % aq else: N, K = n, k exp = a = 0 while N > 0: a = int((N % prime) < (K % prime + a)) N, K = N // prime, K // prime exp += a if exp > 0: res *= pow(prime, exp, aq) res %= aq return Integer(res % q)
def _phase(self, k_y, x, y): """Phase function.""" return x * _sqrt(self._k**2 - k_y**2) + k_y * y
def __abs__(_self): 'Return the sqrt of the sum of the squares of all elements' return _sqrt(sum(e * e for e in _self))
from math import sqrt as _sqrt # Taking formula of Time period of a pendulum, whose time # period already is 2s and length is 1m; we can solve the # equation to find that g = pi^2 _g = 9.81 # acceleration due to gravtiy on earth PI = _sqrt(_g)
def hypot(x, y): arg = x * x + y * y if arg.is_Rational: return _sqrt(arg) return sqrt(arg)
def _draw_arrow(self, x1, y1, x2, y2, Dx, Dy, label="", width=1.0, arrow_curvature=1.0, color="grey", patchA=None, patchB=None, shrinkA=0, shrinkB=0, arrow_label_size=None): """ Draws a slightly curved arrow from (x1,y1) to (x2,y2). Will allow the given patches at start end end. """ # set arrow properties dist = _sqrt(((x2 - x1) / float(Dx))**2 + ((y2 - y1) / float(Dy))**2) arrow_curvature *= 0.075 # standard scale rad = arrow_curvature / (dist) tail_width = width head_width = max(0.5, 2 * width) head_length = head_width self.ax.annotate( "", xy=(x2, y2), xycoords='data', xytext=(x1, y1), textcoords='data', arrowprops=dict( arrowstyle='simple,head_length=%f,head_width=%f,tail_width=%f' % (head_length, head_width, tail_width), color=color, shrinkA=shrinkA, shrinkB=shrinkB, patchA=patchA, patchB=patchB, connectionstyle="arc3,rad=%f" % -rad), zorder=0) # weighted center position center = _np.array([0.55 * x1 + 0.45 * x2, 0.55 * y1 + 0.45 * y2]) v = _np.array([x2 - x1, y2 - y1]) # 1->2 vector vabs = _np.abs(v) vnorm = _np.array([v[1], -v[0]]) # orthogonal vector vnorm = _np.divide(vnorm, _np.linalg.norm(vnorm)) # normalize # cross product to determine the direction into which vnorm points z = _np.cross(v, vnorm) if z < 0: vnorm *= -1 offset = 0.5 * arrow_curvature * \ ((vabs[0] / (vabs[0] + vabs[1])) * Dx + (vabs[1] / (vabs[0] + vabs[1])) * Dy) ptext = center + offset * vnorm self.ax.text(ptext[0], ptext[1], label, size=arrow_label_size, horizontalalignment='center', verticalalignment='center', zorder=1)
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin from os import urandom as _urandom from _collections_abc import Set as _Set, Sequence as _Sequence from hashlib import sha512 as _sha512 __all__ = [ "Random", "seed", "random", "uniform", "randint", "choice", "sample", "randrange", "shuffle", "normalvariate", "lognormvariate", "expovariate", "vonmisesvariate", "gammavariate", "triangular", "gauss", "betavariate", "paretovariate", "weibullvariate", "getstate", "setstate", "getrandbits", "SystemRandom" ] NV_MAGICCONST = 4 * _exp(-0.5) / _sqrt(2.0) TWOPI = 2.0 * _pi LOG4 = _log(4.0) SG_MAGICCONST = 1.0 + _log(4.5) BPF = 53 # Number of bits in a float RECIP_BPF = 2**-BPF # Translated by Guido van Rossum from C source provided by # Adrian Baddeley. Adapted by Raymond Hettinger for use with # the Mersenne Twister and os.urandom() core generators. import _random class Random(_random.Random): """Random number generator base class used by bound module functions.
Can be used like: from parmed.constants import * """ from __future__ import division from math import pi as _pi, sqrt as _sqrt, log10 as _log10 __all__ = ['AMBER_ELECTROSTATIC', 'AMBER_POINTERS', 'NATOM', 'NTYPES', 'NBONH', 'MBONA', 'NTHETH', 'MTHETA', 'NPHIH', 'MPHIA', 'NHPARM', 'NPARM', 'NEXT', 'NRES', 'NBONA', 'NTHETA', 'NPHIA', 'NUMBND', 'NUMANG', 'NPTRA', 'NATYP', 'NPHB', 'IFPERT', 'NBPER', 'NGPER', 'NDPER', 'MBPER', 'MGPER', 'MDPER', 'IFBOX', 'NMXRS', 'IFCAP', 'NUMEXTRA', 'NCOPY', 'NNB', 'RAD_TO_DEG', 'DEG_TO_RAD'] AMBER_ELECTROSTATIC = 18.2223 CHARMM_ELECTROSTATIC = _sqrt(332.0716) AMBER_POINTERS = """ NATOM : total number of atoms NTYPES : total number of distinct atom types NBONH : number of bonds containing hydrogen MBONA : number of bonds not containing hydrogen NTHETH : number of angles containing hydrogen MTHETA : number of angles not containing hydrogen NPHIH : number of dihedrals containing hydrogen MPHIA : number of dihedrals not containing hydrogen NHPARM : currently not used NPARM : currently not used NEXT : number of excluded atoms NRES : number of residues NBONA : MBONA + number of constraint bonds
def distance(first, second): """returns the Euclidian distance between first and second""" dist = 0 for i in xrange(dim): dist += (first.features[i] - second.features[i])**2 return _sqrt(dist)
def __init__(self, x, params, called=False): """...""" self._W_y = params['W_y'] self._norm = 2 * _sqrt(math.pi) / self._W_y super().__init__(x, params, called)
def isqrt(n): """Return the largest integer less than or equal to sqrt(n).""" if n < 17984395633462800708566937239552: return int(_sqrt(n)) return integer_nthroot(int(n), 2)[0]
def sqrt(x): if x > 0: return _sqrt(x) else: raise MathError(translate('MathErrors', _errors['mde']))
from math import sqrt as _sqrt, pi # noinspection PyUnresolvedReferences from scipy.constants import c as clight from scipy.constants import physical_constants as _cst from scipy.constants import e as qe e_mass = 1.0e+06 * _cst['electron mass energy equivalent in MeV'][0] # eV p_mass = 1.0e+06 * _cst['proton mass energy equivalent in MeV'][0] # eV _e_radius = _cst['classical electron radius'][0] _hbar_c = _cst['Planck constant over 2 pi times c in MeV fm'][0] Cgamma = 4.0 * pi * _e_radius / 3.0 / pow(e_mass, 3) # m/eV^3 Cq = 55 / 32 / _sqrt(3) * _hbar_c / e_mass * 1.0e-9 # m
def __isnumber(word): """ Take a string and try to convert into a number of list of numbers. It try to returns integer values always that is is possible. Examples: '3' => 3 '3.14' => 3.14 '1.5e-5' => 0.000015 '1.5d-5' => 0.000015 '1.5E-5' => 0.000015 '1.5D-5' => 0.000015 '1/2' => 0.5 'sqrt(3.0)' => 1.732050 Args: word: An string that should be converted into a number Returns: number: The value extracted kind: The kind of value """ number = None kind = None try: number = int(word) kind = 'int' except ValueError: try: number = float(word) kind = 'float' except ValueError: if 'd' in word: word = word.replace('d', 'e') elif 'D' in word: word = word.replace('D', 'e') try: number = float(word) kind = 'float' except ValueError: if '/' in word: splt = word.split('/') if splt[0].isdigit() and splt[1].isdigit(): number = float(splt[0]) / float(splt[1]) kind = 'float' elif word[:4] == 'sqrt': number = _sqrt(float(word[5:-1])) kind = 'float' elif word[:5] == '-sqrt': number = -_sqrt(float(word[6:-1])) kind = 'float' else: print('ERROR: "%s" is not a number\n' % word) return number, kind
def metrics(returns, benchmark=None, rf=0., display=True, mode='basic', sep=False, compounded=True, periods_per_year=252, prepare_returns=True, match_dates=False, **kwargs): win_year, _ = _get_trading_periods(periods_per_year) if benchmark is not None \ and isinstance(benchmark, _pd.DataFrame) and len(benchmark.columns) > 1: raise ValueError("`benchmark` must be a pandas Series, " "but a multi-column DataFrame was passed") blank = [''] if isinstance(returns, _pd.DataFrame): if len(returns.columns) > 1: raise ValueError( "`returns` needs to be a Pandas Series or one column DataFrame. multi colums DataFrame was passed" ) returns = returns[returns.columns[0]] if prepare_returns: returns = _utils._prepare_returns(returns) df = _pd.DataFrame({"returns": returns}) if benchmark is not None: blank = ['', ''] benchmark = _utils._prepare_benchmark(benchmark, returns.index, rf) if match_dates is True: returns, benchmark = _match_dates(returns, benchmark) df["returns"] = returns df["benchmark"] = benchmark df = df.fillna(0) # pct multiplier pct = 100 if display or "internal" in kwargs else 1 if kwargs.get("as_pct", False): pct = 100 # return df dd = _calc_dd(df, display=(display or "internal" in kwargs), as_pct=kwargs.get("as_pct", False)) metrics = _pd.DataFrame() s_start = {'returns': df['returns'].index.strftime('%Y-%m-%d')[0]} s_end = {'returns': df['returns'].index.strftime('%Y-%m-%d')[-1]} s_rf = {'returns': rf} if "benchmark" in df: s_start['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[0] s_end['benchmark'] = df['benchmark'].index.strftime('%Y-%m-%d')[-1] s_rf['benchmark'] = rf metrics['Start Period'] = _pd.Series(s_start) metrics['End Period'] = _pd.Series(s_end) metrics['Risk-Free Rate %'] = _pd.Series(s_rf) metrics['Time in Market %'] = _stats.exposure(df, prepare_returns=False) * pct metrics['~'] = blank if compounded: metrics['Cumulative Return %'] = (_stats.comp(df) * pct).map( '{:,.2f}'.format) else: metrics['Total Return %'] = (df.sum() * pct).map('{:,.2f}'.format) metrics['CAGR﹪%'] = _stats.cagr(df, rf, compounded) * pct metrics['~~~~~~~~~~~~~~'] = blank metrics['Sharpe'] = _stats.sharpe(df, rf, win_year, True) if mode.lower() == 'full': metrics['Smart Sharpe'] = _stats.smart_sharpe(df, rf, win_year, True) metrics['Sortino'] = _stats.sortino(df, rf, win_year, True) if mode.lower() == 'full': metrics['Smart Sortino'] = _stats.smart_sortino(df, rf, win_year, True) metrics['Sortino/√2'] = metrics['Sortino'] / _sqrt(2) if mode.lower() == 'full': metrics['Smart Sortino/√2'] = metrics['Smart Sortino'] / _sqrt(2) metrics['Omega'] = _stats.omega(df, rf, 0., win_year) metrics['~~~~~~~~'] = blank metrics['Max Drawdown %'] = blank metrics['Longest DD Days'] = blank if mode.lower() == 'full': ret_vol = _stats.volatility( df['returns'], win_year, True, prepare_returns=False) * pct if "benchmark" in df: bench_vol = _stats.volatility( df['benchmark'], win_year, True, prepare_returns=False) * pct metrics['Volatility (ann.) %'] = [ret_vol, bench_vol] metrics['R^2'] = _stats.r_squared(df['returns'], df['benchmark'], prepare_returns=False) else: metrics['Volatility (ann.) %'] = [ret_vol] metrics['Calmar'] = _stats.calmar(df, prepare_returns=False) metrics['Skew'] = _stats.skew(df, prepare_returns=False) metrics['Kurtosis'] = _stats.kurtosis(df, prepare_returns=False) metrics['~~~~~~~~~~'] = blank metrics['Expected Daily %%'] = _stats.expected_return( df, prepare_returns=False) * pct metrics['Expected Monthly %%'] = _stats.expected_return( df, aggregate='M', prepare_returns=False) * pct metrics['Expected Yearly %%'] = _stats.expected_return( df, aggregate='A', prepare_returns=False) * pct metrics['Kelly Criterion %'] = _stats.kelly_criterion( df, prepare_returns=False) * pct metrics['Risk of Ruin %'] = _stats.risk_of_ruin(df, prepare_returns=False) metrics['Daily Value-at-Risk %'] = -abs( _stats.var(df, prepare_returns=False) * pct) metrics['Expected Shortfall (cVaR) %'] = -abs( _stats.cvar(df, prepare_returns=False) * pct) metrics['~~~~~~'] = blank metrics['Gain/Pain Ratio'] = _stats.gain_to_pain_ratio(df, rf) metrics['Gain/Pain (1M)'] = _stats.gain_to_pain_ratio(df, rf, "M") # if mode.lower() == 'full': # metrics['GPR (3M)'] = _stats.gain_to_pain_ratio(df, rf, "Q") # metrics['GPR (6M)'] = _stats.gain_to_pain_ratio(df, rf, "2Q") # metrics['GPR (1Y)'] = _stats.gain_to_pain_ratio(df, rf, "A") metrics['~~~~~~~'] = blank metrics['Payoff Ratio'] = _stats.payoff_ratio(df, prepare_returns=False) metrics['Profit Factor'] = _stats.profit_factor(df, prepare_returns=False) metrics['Common Sense Ratio'] = _stats.common_sense_ratio( df, prepare_returns=False) metrics['CPC Index'] = _stats.cpc_index(df, prepare_returns=False) metrics['Tail Ratio'] = _stats.tail_ratio(df, prepare_returns=False) metrics['Outlier Win Ratio'] = _stats.outlier_win_ratio( df, prepare_returns=False) metrics['Outlier Loss Ratio'] = _stats.outlier_loss_ratio( df, prepare_returns=False) # returns metrics['~~'] = blank comp_func = _stats.comp if compounded else _np.sum today = df.index[-1] # _dt.today() metrics['MTD %'] = comp_func( df[df.index >= _dt(today.year, today.month, 1)]) * pct d = today - _td(3 * 365 / 12) metrics['3M %'] = comp_func( df[df.index >= _dt(d.year, d.month, d.day)]) * pct d = today - _td(6 * 365 / 12) metrics['6M %'] = comp_func( df[df.index >= _dt(d.year, d.month, d.day)]) * pct metrics['YTD %'] = comp_func(df[df.index >= _dt(today.year, 1, 1)]) * pct d = today - _td(12 * 365 / 12) metrics['1Y %'] = comp_func( df[df.index >= _dt(d.year, d.month, d.day)]) * pct d = today - _td(3 * 365) metrics['3Y (ann.) %'] = _stats.cagr( df[df.index >= _dt(d.year, d.month, d.day)], 0., compounded) * pct d = today - _td(5 * 365) metrics['5Y (ann.) %'] = _stats.cagr( df[df.index >= _dt(d.year, d.month, d.day)], 0., compounded) * pct d = today - _td(10 * 365) metrics['10Y (ann.) %'] = _stats.cagr( df[df.index >= _dt(d.year, d.month, d.day)], 0., compounded) * pct metrics['All-time (ann.) %'] = _stats.cagr(df, 0., compounded) * pct # best/worst if mode.lower() == 'full': metrics['~~~'] = blank metrics['Best Day %'] = _stats.best(df, prepare_returns=False) * pct metrics['Worst Day %'] = _stats.worst(df, prepare_returns=False) * pct metrics['Best Month %'] = _stats.best( df, aggregate='M', prepare_returns=False) * pct metrics['Worst Month %'] = _stats.worst( df, aggregate='M', prepare_returns=False) * pct metrics['Best Year %'] = _stats.best( df, aggregate='A', prepare_returns=False) * pct metrics['Worst Year %'] = _stats.worst( df, aggregate='A', prepare_returns=False) * pct # dd metrics['~~~~'] = blank for ix, row in dd.iterrows(): metrics[ix] = row metrics['Recovery Factor'] = _stats.recovery_factor(df) metrics['Ulcer Index'] = _stats.ulcer_index(df) metrics['Serenity Index'] = _stats.serenity_index(df, rf) # win rate if mode.lower() == 'full': metrics['~~~~~'] = blank metrics['Avg. Up Month %'] = _stats.avg_win( df, aggregate='M', prepare_returns=False) * pct metrics['Avg. Down Month %'] = _stats.avg_loss( df, aggregate='M', prepare_returns=False) * pct metrics['Win Days %%'] = _stats.win_rate(df, prepare_returns=False) * pct metrics['Win Month %%'] = _stats.win_rate( df, aggregate='M', prepare_returns=False) * pct metrics['Win Quarter %%'] = _stats.win_rate( df, aggregate='Q', prepare_returns=False) * pct metrics['Win Year %%'] = _stats.win_rate( df, aggregate='A', prepare_returns=False) * pct if "benchmark" in df: metrics['~~~~~~~'] = blank greeks = _stats.greeks(df['returns'], df['benchmark'], win_year, prepare_returns=False) metrics['Beta'] = [str(round(greeks['beta'], 2)), '-'] metrics['Alpha'] = [str(round(greeks['alpha'], 2)), '-'] # prepare for display for col in metrics.columns: try: metrics[col] = metrics[col].astype(float).round(2) if display or "internal" in kwargs: metrics[col] = metrics[col].astype(str) except Exception: pass if (display or "internal" in kwargs) and "%" in col: metrics[col] = metrics[col] + '%' try: metrics['Longest DD Days'] = _pd.to_numeric( metrics['Longest DD Days']).astype('int') metrics['Avg. Drawdown Days'] = _pd.to_numeric( metrics['Avg. Drawdown Days']).astype('int') if display or "internal" in kwargs: metrics['Longest DD Days'] = metrics['Longest DD Days'].astype(str) metrics['Avg. Drawdown Days'] = metrics[ 'Avg. Drawdown Days'].astype(str) except Exception: metrics['Longest DD Days'] = '-' metrics['Avg. Drawdown Days'] = '-' if display or "internal" in kwargs: metrics['Longest DD Days'] = '-' metrics['Avg. Drawdown Days'] = '-' metrics.columns = [ col if '~' not in col else '' for col in metrics.columns ] metrics.columns = [ col[:-1] if '%' in col else col for col in metrics.columns ] metrics = metrics.T if "benchmark" in df: metrics.columns = ['Strategy', 'Benchmark'] else: metrics.columns = ['Strategy'] if display: print(_tabulate(metrics, headers="keys", tablefmt='simple')) return None if not sep: metrics = metrics[metrics.index != ''] return metrics
from math import sqrt as _sqrt _upper_limit = 100 PI = 0 for k in range(0, _upper_limit + 1): PI += ((-1)**k) / ((3**k) * (2 * k + 1)) PI *= _sqrt(12)
def norm(v): return _sqrt(dot(v, v))
def plot_network(self, state_sizes=None, state_scale=1.0, state_colors='#ff5500', state_labels='auto', arrow_scale=1.0, arrow_curvature=1.0, arrow_labels='weights', arrow_label_format='%10.2f', max_width=12, max_height=12, figpadding=0.2, xticks=False, yticks=False, show_frame=False, **textkwargs): """ Draws a network using discs and curved arrows. The thicknesses and labels of the arrows are taken from the off-diagonal matrix elements in A. """ # Set the default values for the text dictionary from matplotlib import pyplot as _plt textkwargs.setdefault('size', None) textkwargs.setdefault('horizontalalignment', 'center') textkwargs.setdefault('verticalalignment', 'center') textkwargs.setdefault('color', 'black') # remove the temporary key 'arrow_label_size' as it cannot be parsed by plt.text! arrow_label_size = textkwargs.pop('arrow_label_size', textkwargs['size']) if self.pos is None: self.layout_automatic() # number of nodes n = len(self.pos) # get bounds and pad figure xmin = _np.min(self.pos[:, 0]) xmax = _np.max(self.pos[:, 0]) Dx = xmax - xmin xmin -= Dx * figpadding xmax += Dx * figpadding Dx *= 1 + figpadding ymin = _np.min(self.pos[:, 1]) ymax = _np.max(self.pos[:, 1]) Dy = ymax - ymin ymin -= Dy * figpadding ymax += Dy * figpadding Dy *= 1 + figpadding # sizes of nodes if state_sizes is None: state_sizes = 0.5 * state_scale * \ min(Dx, Dy)**2 * _np.ones(n) / float(n) else: state_sizes = 0.5 * state_scale * \ min(Dx, Dy)**2 * state_sizes / (_np.max(state_sizes) * float(n)) # automatic arrow rescaling arrow_scale *= 1.0 / \ (_np.max(self.A - _np.diag(_np.diag(self.A))) * _sqrt(n)) # size figure if (Dx / max_width > Dy / max_height): figsize = (max_width, Dy * (max_width / Dx)) else: figsize = (Dx / Dy * max_height, max_height) if self.ax is None: logger.debug("creating new figure") fig = _plt.figure(None, figsize=figsize) self.ax = fig.add_subplot(111) else: fig = self.ax.figure window_extend = self.ax.get_window_extent() axes_ratio = window_extend.height / window_extend.width data_ratio = (ymax - ymin) / (xmax - xmin) q = axes_ratio / data_ratio if q > 1.0: ymin *= q ymax *= q else: xmin /= q xmax /= q if not xticks: self.ax.get_xaxis().set_ticks([]) if not yticks: self.ax.get_yaxis().set_ticks([]) # show or suppress frame self.ax.set_frame_on(show_frame) # set node labels if state_labels is None: pass elif isinstance(state_labels, str) and state_labels == 'auto': state_labels = [str(i) for i in _np.arange(n)] else: if len(state_labels) != n: raise ValueError( "length of state_labels({}) has to match length of states({})." .format(len(state_labels), n)) # set node colors if state_colors is None: state_colors = '#ff5500' # None is not acceptable if isinstance(state_colors, str): state_colors = [state_colors] * n if isinstance(state_colors, list) and not len(state_colors) == n: raise ValueError( "Mistmatch between nstates and nr. state_colors (%u vs %u)" % (n, len(state_colors))) try: colorscales = _types.ensure_ndarray(state_colors, ndim=1, kind='numeric') colorscales /= colorscales.max() state_colors = [ _plt.cm.binary(int(256.0 * colorscales[i])) for i in range(n) ] except AssertionError: # assume we have a list of strings now. logger.debug("could not cast 'state_colors' to numeric values.") # set arrow labels if isinstance(arrow_labels, _np.ndarray): L = arrow_labels if isinstance(arrow_labels[0, 0], str): arrow_label_format = '%s' elif isinstance(arrow_labels, str) and arrow_labels.lower() == 'weights': L = self.A[:, :] elif arrow_labels is None: L = _np.empty(_np.shape(self.A), dtype=object) L[:, :] = '' arrow_label_format = '%s' else: raise ValueError('invalid arrow labels') # draw circles circles = [] for i in range(n): # choose color c = _plt.Circle(self.pos[i], radius=_sqrt(0.5 * state_sizes[i]) / 2.0, color=state_colors[i], zorder=2) circles.append(c) self.ax.add_artist(c) # add annotation if state_labels is not None: self.ax.text(self.pos[i][0], self.pos[i][1], state_labels[i], zorder=3, **textkwargs) assert len(circles) == n, "%i != %i" % (len(circles), n) # draw arrows for i in range(n): for j in range(i + 1, n): if (abs(self.A[i, j]) > 0): self._draw_arrow(self.pos[i, 0], self.pos[i, 1], self.pos[j, 0], self.pos[j, 1], Dx, Dy, label=arrow_label_format % L[i, j], width=arrow_scale * self.A[i, j], arrow_curvature=arrow_curvature, patchA=circles[i], patchB=circles[j], shrinkA=3, shrinkB=0, arrow_label_size=arrow_label_size) if (abs(self.A[j, i]) > 0): self._draw_arrow(self.pos[j, 0], self.pos[j, 1], self.pos[i, 0], self.pos[i, 1], Dx, Dy, label=arrow_label_format % L[j, i], width=arrow_scale * self.A[j, i], arrow_curvature=arrow_curvature, patchA=circles[j], patchB=circles[i], shrinkA=3, shrinkB=0, arrow_label_size=arrow_label_size) # plot self.ax.set_xlim(xmin, xmax) self.ax.set_ylim(ymin, ymax) return fig