def long_interval(w, n, t): """ Return the interval of integration for longitudinal impedance, which is of the form [0, root, inf], where root satisfies d_l(root, w, n, t) = 0. """ if w < mp.sqrt(1+n): return [0, mp.inf] if n == 0: guesses = [4, 6, 8, 10] else: guesses = [4, 6, 8, 10, 12, 14] int_range = [0, mp.inf] for guess in guesses: try: root = mp.findroot(lambda z: BiMax.d_l(z, w, n, t), guess) except ValueError: continue unique = True for z in int_range: if mp.fabs(z - mp.fabs(root)) < 1e-4: unique = False if unique: int_range += [mp.fabs(root)] int_range = np.sort(int_range) return int_range
def int_interval(wrel, n, t, k): """ find out the almost-singular point and divide the integration integrals. """ if wrel < 1 or wrel > 1.3: return [0, mp.inf] else: guesses = [4, 6, 8, 10, 12, 14] wc = wrel * mp.sqrt(1+n) int_range = [0, mp.inf] for guess in guesses: try: root = mp.findroot(lambda zc: MaxKappa.e_l(zc, wc, n, t, k), guess) except ValueError: continue unique = True for z in int_range: if mp.fabs(z - mp.fabs(root)) < 1e-4: unique = False if unique: int_range += [mp.fabs(root)] int_range = np.sort(int_range) return int_range
def gamma_shot(self, wrel, l, n, t, tc): """ Calculate 1, transfer gain 2, electron shot noise """ if wrel > 1.0 and wrel < 1.2: mp.mp.dps = 80 else: mp.mp.dps = 40 wc = wrel * mp.sqrt(1+n) za = self.za_l(wc, l, n, t, tc) mp.mp.dps = 15 zr = self.zr_mp(wc, l, tc) # below calculating shot noise: ldc = self.ant_len/l nc =permittivity * boltzmann * tc/ ldc**2 / echarge**2 vtc = np.sqrt(2 * boltzmann * tc/ emass) ne = nc * vtc * (1 + n * mp.sqrt(t)) * 2 * np.pi * self.ant_rad * self.ant_len / np.sqrt(4 * np.pi) ################### ## a: coefficient in shot noise. see Issautier et al. 1999 ################### a = 1 + echarge * 3.6 / boltzmann/tc shot_noise = 2 * a * echarge**2 * mp.fabs(za)**2 * ne return [mp.fabs((zr+za)/zr)**2, shot_noise]
def gamma_shot(self, wrel, l, n, t, k, tc): """ Calculate 1, transfer gain 2, electron shot noise """ if wrel > 1.0 and wrel < 1.1: mp.mp.dps = 40 else: mp.mp.dps = 20 wc = wrel * mp.sqrt(1+n) za_val = self.za(wrel, l, n, t, k, tc) mp.mp.dps = 15 zr_val = self.zr(wc, l, tc) # below calculating shot noise: ldc = self.ant_len/l nc =permittivity * boltzmann * tc/ ldc**2 / echarge**2 vtc = np.sqrt(2 * boltzmann * tc/ emass) ne = nc * vtc * (1 + n * mp.sqrt(t)) * 2 * np.pi * self.ant_rad * self.ant_len / np.sqrt(4 * np.pi) ################################### ## a: coefficient in shot noise. ## ################################### scpot= 4 a = 1 + echarge * scpot / boltzmann/tc shot_noise = 2 * a * echarge**2 * mp.fabs(za_val)**2 * ne return [mp.fabs((zr_val+za_val)/zr_val)**2, shot_noise]
def mpf_assert_allclose(res, std, atol=0, rtol=1e-17): try: len(res) except TypeError: res = list(res) n = len(std) if len(res) != n: raise AssertionError("Lengths of inputs not equal.") failures = [] for k in range(n): try: assert_(mp.fabs(res[k] - std[k]) <= atol + rtol*mp.fabs(std[k])) except AssertionError: failures.append(k) ndigits = int(abs(np.log10(rtol))) msg = [""] msg.append("Bad results ({} out of {}) for the following points:" .format(len(failures), n)) for k in failures: resrep = mp.nstr(res[k], ndigits, min_fixed=0, max_fixed=0) stdrep = mp.nstr(std[k], ndigits, min_fixed=0, max_fixed=0) if std[k] == 0: rdiff = "inf" else: rdiff = mp.fabs((res[k] - std[k])/std[k]) rdiff = mp.nstr(rdiff, 3) msg.append("{}: {} != {} (rdiff {})".format(k, resrep, stdrep, rdiff)) if failures: assert_(False, "\n".join(msg))
def __dip__(self, p: int, kmax: int, f: LambdaType, x: list, eps: float) -> tuple: """ Calculates the df(s) = (s-1)-th derivative of f at O , for s = 1 .. p+1. Uses k-th degree polynomial for some k satisfying p <= k <= kmax. If the relative change in df(s) from k - 1 to k is less than eps then this determines k and success is true. Otherwise k = kmax and success is false. :param p: maximum derivative to calculate; p <= length(x) <= kmax :param kmax: maximum degree of the interpolating polynomial :param f: function to derive :param x: array of values around 0 :param eps: desired tolerance :return: array of derivatives at 0 """ self.C = mp.zeros(1, int(kmax * (kmax + 3) / 2) + 1) df = mp.zeros(1, p + 1) for k in range(0, kmax + 1): self.__update__(int(k), int(p), x, f(x[int(k)])) if k < p: continue self.r = mp.mpf(1) for s in range(0, p + 1): if self.r: self.r = mp.mpf( mp.fabs(self.C[k - s] - df[s]) <= eps * mp.fabs(self.C[k - s])) df[s] = self.C[k - s] if self.r: break for s in range(1, p + 1): df[s] = mp.factorial(s) * df[s] return df
def find_cutoff(self, thresh): # Find a cutoff to do finite integration # Evaluate the modulus of the characteristic function domain = np.linspace(0, 5, 500) char_fn = list(map(lambda t: mp.fabs(self.char_fn(t)), domain)) # thresh 1e-40 thresh = 1e-30 thresh_check = [ domain[i] for i in range(500) if char_fn[i] < mp.mpf(thresh) ] # Need to extend the domain if len(list(thresh_check)) == 0: j = 1 while len(list(thresh_check)) == 0: domain = np.linspace(5 * j, 5 * (j + 1), 500) char_fn = list(map(lambda t: mp.fabs(self.char_fn(t)), domain)) # thresh 1e-40 thresh_check = [ domain[i] for i in range(500) if char_fn[i] < mp.mpf(thresh) ] j += 1 cutoff = thresh_check[0] return cutoff
def newton(f, f_x, x0, erro, max_iter, valor=mpf(0.0)): u''' Implementa: Parâmetros: - f: function(x) => - f_x: function(x) => - x0: num => chute inicial - erro: num => valor de erro no qual o método vai parar - max_iter: num => máximo de iterações - valor: num => valor que o método espera a função alcançar Retorna (valor, numero_iter), onde: - valor: valor obtido pelo método - numero_iter: número de iterações necessárias ''' x0, erro, valor = mpf(x0), fabs(erro), mpf(valor) for i in range(max_iter): try: x1 = x0 - (f(x0)-valor) / f_x(x0) except TypeError: x1 = x0 + 300.0 #raise ValueError('O metodo teve uma aberracao') erro_iter = fabs(x1 - x0) x0 = x1 if erro_iter < erro: break else: raise ValueError('O metodo nao convergiu, o ultimo foi: %f' % x0) return (x0, i)
def update_vardict(vardict): # expand vardict into component mapping vardict = expand_vardict(vardict) # extract every free symbol present in vardict symdict = { symbol: None for var in vardict if isinstance(vardict[var], sp.Basic) for symbol in vardict[var].free_symbols } for free_symbol in symdict: # seed random number generator with free_symbol hash value random.seed(hash(free_symbol)) # update symdict with mapping: free_symbol -> unique random number symdict[free_symbol] = mpf(random.random()) for var in vardict: # apply CSE to every expression in vardict replaced, reduced = sp.cse(vardict[var], order='none') # calculate value after substituting the unique random number # from each free symbol in symdict into every expression in vardict value = compute_value(symdict, replaced, reduced, factor=1) # double the precision (factor = 2) whenever value within range of zero if fabs(value) != mpf('0.0') and fabs(value) < 10**( (-2.0 / 3) * precision): _value = compute_value(symdict, replaced, reduced, factor=2) if fabs(_value) < 10**(-(4.0 / 3) * precision): value = mpf('0.0') # update vardict with mapping: variable -> (pseudo) unique number vardict[var] = value return vardict
def bissecao(func, a, b, erro, valor=mpf(0.0)): u''' Implementa o método da bisseção para encontrar o ponto onde a função encontra o valor desejado. Retorna (valor, numero_iter) ''' f = lambda x: func(x) - valor a, b = mpf(a), mpf(b) erro = fabs(erro) i = mpf(0) while True: i += 1 e = fabs(a-b) / 2 x = (a+b) / 2 if e <= erro: return (x, i) if f(a) * f(x) > 0: a, b = x, b elif f(x) * f(b) > 0: a, b = a, x else: raise ValueError('Nao ha variacao de sinal entre os dois.')
def mp_assert_allclose(res, std, atol=0, rtol=1e-17): """ Compare lists of mpmath.mpf's or mpmath.mpc's directly so that it can be done to higher precision than double. """ failures = [] for k, (resval, stdval) in enumerate(zip_longest(res, std)): if resval is None or stdval is None: raise ValueError('Lengths of inputs res and std are not equal.') if mpmath.fabs(resval - stdval) > atol + rtol * mpmath.fabs(stdval): failures.append((k, resval, stdval)) nfail = len(failures) if nfail > 0: ndigits = int(abs(np.log10(rtol))) msg = [""] msg.append( "Bad results ({} out of {}) for the following points:".format( nfail, k + 1)) for k, resval, stdval in failures: resrep = mpmath.nstr(resval, ndigits, min_fixed=0, max_fixed=0) stdrep = mpmath.nstr(stdval, ndigits, min_fixed=0, max_fixed=0) if stdval == 0: rdiff = "inf" else: rdiff = mpmath.fabs((resval - stdval) / stdval) rdiff = mpmath.nstr(rdiff, 3) msg.append("{}: {} != {} (rdiff {})".format( k, resrep, stdrep, rdiff)) assert_(False, "\n".join(msg))
def compose_result_string(self, mpf_x, mpf_m, n, mpf_value, output_dps): return ('{' + mp.nstr(mpf_x, output_dps * 2) + ',{' + mp.nstr(mpf_m.real, output_dps * 2) + ',' + mp.nstr(mpf_m.imag, output_dps * 2) + '},' + str(n) + ',{' + mp.nstr(mpf_value.real, output_dps) + ',' + mp.nstr(mpf_value.imag, output_dps) + '},' + mp.nstr(mp.fabs(mpf_value.real * 10**-output_dps), 2) + ',' + mp.nstr(mp.fabs(mpf_value.imag * 10**-output_dps), 2) + '},')
def _mp_round(n): ceil = mp.ceil(n) floor = mp.floor(n) ceil_diff = mp.fabs(n - ceil) floor_diff = mp.fabs(n - floor) if ceil_diff <= floor_diff: return ceil else: return floor
def cdf_segment(alpha_1, beta_1, alpha_2, beta_2, n=200): '''The segment of cdf of Gamma_1 - Gamma_2 Compute the z variables of n(=200) segments of cdf. For example, cdf = 1/n, 2/n, 3/n Args: alpha_1: An int. Alpha of Gamma1. beta_1: An int. Beta of Gamma1. alpha_2: An int. Alpha of Gamma2. beta_2: An int. Beta of Gamma2. n: An int. The number of segments. Returns: A list of z. The z corresponding to the segmented cumulative probability of Gamma1- Gamma2. ''' center = float(alpha_1 ) / float(beta_1) - float(alpha_2) / float(beta_2) standard_err = (float(alpha_1 ) / beta_1 ** 2 + float(alpha_2 ) / beta_2 ** 2 ) ** 0.5 x = center - 300 * standard_err cdf_n = [] cdf = 0 pdf_1 = pdf_of_gamma_difference(x, alpha_1, beta_1, alpha_2, beta_2) pdf_2 = pdf_1 delta = standard_err / 30. epsilon = standard_err / 10000. while pdf_1 * delta < 1e-10: x += delta pdf_2 = pdf_of_gamma_difference(x, alpha_1, beta_1, alpha_2, beta_2) pdf_1 = pdf_2 if math.fabs(pdf_2 + pdf_1) * delta < 1e-8: pass elif math.fabs(pdf_2 - pdf_1) / math.fabs(pdf_2 + pdf_1) < 0.2: delta = delta * 2 elif math.fabs(pdf_2 - pdf_1) / math.fabs(pdf_2 + pdf_1) > 0.5: delta = delta / 2. if delta < epsilon: print 'Error' return None for i in range(1,n): while cdf < float(i) / n: pdf_1 = pdf_2 pdf_2 = pdf_of_gamma_difference(x + delta, alpha_1, beta_1, alpha_2, beta_2) cdf += (pdf_1 + pdf_2) / 2 * delta x += delta if delta < epsilon: print 'Error' return None if mpmath.fabs(pdf_2 + pdf_1) * delta < 1e-8: pass elif mpmath.fabs(pdf_2 - pdf_1) / mpmath.fabs(pdf_2 + pdf_1) < 0.001: delta = delta * 2 elif mpmath.fabs(pdf_2 - pdf_1) / mpmath.fabs(pdf_2 + pdf_1) > 0.003: delta = delta / 2. cdf_n.append(float(x - delta * (cdf - float(i) / n) / ((pdf_1 + pdf_2) / 2 * delta))) return cdf_n
def inverse_cdf(y, alpha_1, beta_1, alpha_2, beta_2): '''The inverse function of cdf of Gamma_1 - Gamma_2 Given a cumulative probability y, compute z. Args: z: A float. x1 - x2. alpha_1: An int. Alpha of Gamma1. beta_1: An int. Beta of Gamma1. alpha_2: An int. Alpha of Gamma2. beta_2: An int. Beta of Gamma2. Returns: The z corresponding to the cumulative probability y of Gamma1- Gamma2. ''' center = float(alpha_1 ) / float(beta_1) - float(alpha_2) / float(beta_2) standard_err = (float(alpha_1 ) / beta_1 ** 2 + float(alpha_2 ) / beta_2 ** 2 ) ** 0.5 x = center - 300 * standard_err cdf = 0 pdf_1 = pdf_of_gamma_difference(x, alpha_1, beta_1, alpha_2, beta_2) pdf_2 = pdf_1 delta = standard_err / 30. epsilon = standard_err / 10000. if y < 1e-8 or y >= 1 - 1e-8: return numpy.inf while pdf_1 * delta < 1e-10: x += delta pdf_2 = pdf_of_gamma_difference(x, alpha_1, beta_1, alpha_2, beta_2) pdf_1 = pdf_2 if math.fabs(pdf_2 + pdf_1) < 1e-8: pass elif math.fabs(pdf_2 - pdf_1) / math.fabs(pdf_2 + pdf_1) < 0.2: delta = delta * 2 elif math.fabs(pdf_2 - pdf_1) / math.fabs(pdf_2 + pdf_1) > 0.5: delta = delta / 2. if delta < epsilon: print 'Error' return None while cdf < y: pdf_1 = pdf_2 pdf_2 = pdf_of_gamma_difference(x + delta, alpha_1, beta_1, alpha_2, beta_2) cdf += (pdf_1 + pdf_2) / 2 * delta x += delta if delta < epsilon: print 'Error' return None if mpmath.fabs(pdf_2 + pdf_1) * delta < 1e-8: pass elif mpmath.fabs(pdf_2 - pdf_1) / mpmath.fabs(pdf_2 + pdf_1) < 0.001: delta = delta * 2. elif mpmath.fabs(pdf_2 - pdf_1) / mpmath.fabs(pdf_2 + pdf_1) > 0.003: delta = delta / 2. x = x - delta * (cdf - y) / ((pdf_1 + pdf_2) / 2 * delta) return x
def calc_hyper_volume(paretos, ref_point): sum = 0 for idx, item in enumerate(paretos): if idx == len(paretos) - 1: sum += mpmath.fabs(paretos[idx][0] - ref_point[0]) * mpmath.fabs(paretos[idx][1] - ref_point[1]) else: sum += mpmath.fabs(paretos[idx][0] - paretos[idx + 1][0] ) * mpmath.fabs(paretos[idx][0] - ref_point[1]) return sum
def getPrimeFactors( n, verbose = False ): cutoff = primes[ -1 ] * primes[ -1 ] smallFactors = [ ] largeFactors = [ ] qPrimes = [ ] if verbose: print( "\nFactoring", n, '...' ) remaining, smallFactors = getSmallFactors( n, verbose ) if remaining > 1 and g.factorCache is not None: if remaining in g.factorCache: if verbose: print( 'cache hit:', remaining ) largeFactors.extend( g.factorCache[ remaining ] ) remaining = 1 while remaining > 1: exponent = 0 P = getLargeFactors( remaining, verbose ) while True: Z = remaining % P if Z != 0: break exponent += 1 remaining //= P if verbose: if P < cutoff: print( P, 'is a prime factor of', n ) if fabs( P ) > fabs( cutoff ): if verbose: print( P, 'is a q-prime factor of', n ) qPrimes.append( ( P, 1 ) ) largeFactors.append( ( P, exponent ) ) if verbose: print( ' exponent:', exponent ) print( '--' ) if verbose: print( 'factorization into primes and q-primes completed for', n ) return smallFactors, largeFactors, qPrimes
def eigen(M,dic,order): M1 = M.xreplace(dic) with mp.workdps(int(mp.mp.dps*2)): M1 = M1.evalf(mp.mp.dps) det =(M1).det(method = 'berkowitz') detp = Poly(det,kz) co = detp.all_coeffs() co = [mp.mpc(str(re(k)),str(im(k))) for k in co] maxsteps = 3000 extraprec = 500 ok =0 while ok == 0: try: sol,err = mp.polyroots(co,maxsteps =maxsteps,extraprec = extraprec,error =True) sol = np.array(sol) print("Error on polyroots =", err) ok=1 except: maxsteps = int(maxsteps*2) extraprec = int(extraprec*1.5) print("Poly roots fail precision increased: ",maxsteps,extraprec) te = np.array([mp.fabs(m) < mp.mpf(10**mp.mp.dps) for m in sol]) solr = sol[te] if Bound_nb == 1: solr = solr[[mp.im(m) < 0 for m in solr]] eigen1 = np.empty((len(solr),np.shape(M1)[0]),dtype = object) with mp.workdps(int(mp.mp.dps*2)): for i in range(len(solr)): M2 = mpmathM(M1.xreplace({kz:solr[i]})) eigen1[i] = null_space(M2) solr1 = solr div = [mp.fabs(x) for x in (order*kxl.xreplace(dic)*eigen1[:,4]+order*kyl.xreplace(dic)*eigen1[:,5]+solr1*eigen1[:,6])] testdivB = [mp.almosteq(x,0,10**(-(mp.mp.dps/2))) for x in div] eigen1 =eigen1[testdivB] solr1 = solr1[testdivB] if len(solr1) == 3: print("Inviscid semi infinite domain") elif len(solr1) == 6: print("Inviscid 2 boundaries") elif len(solr1) == 5: print("Viscous semi infinite domain") elif len(solr1) == 10: print("Viscous 2 boundaries") else: print("number of solution inconsistent,",len(solr1)) return(solr1,eigen1,M1)
def ReflectedLightIntensity(self): for i in range(self.numberOfLayers): try: self.reflectedLight[i] = 0 except IndexError: print("IndexError is i =" + str(i)) self.StructuralReflectionCoefficient(self.numberOfLayers) R = mpmath.fabs(self.reflectedLight[1]) #for i in range(len(self.reflectedLight)): #self.reflectedLight[i] = mpmath.fabs(self.reflectedLight[i])**2 #print("self.reflectedLight[%s] = %s"%(i,self.reflectedLight[i])) dep = mpmath.fabs(self.DetectionDepth(self.numberOfLayers)) return R**2, dep
def alpha(z, x, beta): """ Eq. (A4) from Ref[1] Note that 'x' here corresponds to 'chi = x/rho', and 'z' here corresponds to 'xi = z/2/rho' in the paper. """ arg1 = sqrt(2 * fabs(m(z, x, beta))) arg2 = -2 * (m(z, x, beta) + nu(x, beta)) arg3 = 2 * eta(z, x, beta) / arg1 if z < 0: return re(1 / 2 * (-arg1 + sqrt(fabs(arg2 + arg3)))) else: return re(1 / 2 * (arg1 + sqrt(fabs(arg2 - arg3))))
def brot(c, depth=200, eps=0.001): z = c dz = 1 epsSq = eps * eps for i in range(depth): dz = dz * z * 2 z = mpmath.power(z, 2) + c if mpmath.power(mpmath.fabs(z), 2) > 4: return i if mpmath.power(mpmath.fabs(dz), 2) < epsSq: return -1 return -1
def logSpaceAdd(update, before): ''' Assume a >= b We want to calculate ln(exp(ln(a))+exp(ln(b))), thus ln( exp(ln(b))*(1+exp(ln(a)-ln(b))) ) -> ln(b) + ln(1+exp(ln(a)-ln(b))) -> ln(b) + ln1p(exp(ln(a)-ln(b))) ''' #As there is no neutral element of addition in log space we only start adding when two values are given if before == None: return update else: a = None b = None #Check which value is larger if update > before: b = update a = before else: b = before a = update x = mp.mpf(a) - mp.mpf(b) #print(update,before) xexp = memoexp(x) #print('Exp:',xexp) val = memolog1p(xexp) #print('Log:',val) val = val + b if val == 0: print('a:{} b:{} x:{} xexp:{} log1p:{}'.format(a, b, x, xexp, val)) raise ValueError( 'LogSpace Addition has resulted in a value of 0: a = {} b = {} (possible underflow)' .format(a, b)) #elif val > 0: #print('a:{} b:{} x:{} xexp:{} log1p:{}'.format(a,b,x,xexp,val)) #raise ValueError('Prior Update has resulted in a value > 0: a = {} b = {} val = {}'.format(a,b,val)) if before == val: #print('a:{} b:{} x:{} xexp:{} log1p:{}'.format(a,b,x,xexp,val)) logging.warning( 'LogAddition had no effect: a = {} b = {} val = {}'.format( a, b, val)) #raise ValueError('LogAddition had no effect: a = {} b = {} val = {}'.format(a,b,val)) #raise ValueError('LogAddition had no effect!') if mp.isnan(val): raise ValueError( 'LogSpace Addition has resulted in a value of nan: a = {} b = {}' .format(a, b)) if mp.fabs(val) > 1000: #At this point who cares let's round a bit val = mp.floor(val) return val
def sig_figs_jy(jn, jn1, yn, yn1, z): """Check relation http://dlmf.nist.gov/10.50 . Parameters ---------- jn : mpf The value of j_n(x). jn1 : mpf The value of j_{n + 1}(x). yn : mpf The value of y_n(x). yn1 : mpf The value of y_{n + 1}(x). Returns ------- The estimated number of significant digits to which the computation of the passed Bessel functions is correct. """ w = mpmath.fabs(z**2*(jn1*yn - jn*yn1) - 1) if not mpmath.isfinite(w): return w if w > 0: return 1 - mpmath.log10(w) else: return mpmath.mp.dps
def sig_figs_jy(jn, jn1, yn, yn1, z): """Check relation http://dlmf.nist.gov/10.50 . Parameters ---------- jn : mpf The value of j_n(x). jn1 : mpf The value of j_{n + 1}(x). yn : mpf The value of y_n(x). yn1 : mpf The value of y_{n + 1}(x). Returns ------- The estimated number of significant digits to which the computation of the passed Bessel functions is correct. """ w = mpmath.fabs(z**2 * (jn1 * yn - jn * yn1) - 1) if not mpmath.isfinite(w): return w if w > 0: return 1 - mpmath.log10(w) else: return mpmath.mp.dps
def daubechies(N): # p vanishing moments. p = int(N / 2) # make polynomial; see Mallat, 7.96 Py = [sm.binomial(p - 1 + k, k) for k in reversed(range(p))] # get polynomial roots y[k] Py_roots = sm.mp.polyroots(Py, maxsteps=200, extraprec=64) z = [] for yk in Py_roots: # substitute y = -1/4z + 1/2 - 1/4/z to factor f(y) = y - y[k] # We've found the roots of P(y). We need the roots of Q(z) = P((1-z-1/z)/4) f = [sm.mpf('-1/4'), sm.mpf('1/2') - yk, sm.mpf('-1/4')] # get polynomial roots z[k] z += sm.mp.polyroots(f) # make polynomial using the roots within unit circle h0z = sm.sqrt('2') for zk in z: if sm.fabs(zk) < 1: # This calculation is superior to Mallat, (equation between 7.96 and 7.97) h0z *= sympy.sympify('(z-zk)/(1-zk)').subs('zk', zk) # adapt vanishing moments hz = (sympy.sympify('(1+z)/2')**p * h0z).expand() # get scaling coefficients return [sympy.re(hz.coeff('z', k)) for k in reversed(range(p * 2))]
def run_stoch_eig(P, verbose=0): """ stoch_eig returns a stochastic vector x such that x P = x for an irreducible stochstic matrix P. """ if verbose > 1: print("original matrix (stoch_eig):\n", P) x = mp.stoch_eig(P) if verbose > 1: print("x\n", x) eps = mp.exp(0.8 * mp.log(mp.eps)) # From test_eigen.py # x is a left eigenvector of P with eigenvalue unity err0 = mp.norm(x*P-x, p=1) if verbose > 0: print("|xP - x| (stoch_eig):", err0) assert err0 < eps # x is a nonnegative vector if verbose > 0: print("min(x) (stoch_eig):", min(x)) assert min(x) >= 0 - eps # 1-norm of x is one err1 = mp.fabs(mp.norm(x, p=1) - 1) if verbose > 0: print("||x| - 1| (stoch_eig):", err1) assert err1 < eps
def run_gth_solve(A, verbose=0): """ gth_solve returns a stochastic vector x such that x A = 0 for an irreducible transition rate matrix A. """ if verbose > 1: print("original matrix (gth_solve):\n", A) x = mp.gth_solve(A) if verbose > 1: print("x\n", x) eps = mp.exp(0.8 * mp.log(mp.eps)) # test_eigen.py # x is a solution to x A = 0 err0 = mp.norm(x*A, p=1) if verbose > 0: print("|xA| (gth_solve):", err0) assert err0 < eps # x is a nonnegative vector if verbose > 0: print("min(x) (gth_solve):", min(x)) assert min(x) >= 0 - eps # 1-norm of x is one err1 = mp.fabs(mp.norm(x, p=1) - 1) if verbose > 0: print("||x| - 1| (gth_solve):", err1) assert err1 < eps
def my_assert_allclose_TFmp(H, b, a, tol): if max(H.num.shape) != b.rows or max(H.den.shape) != a.rows: raise ValueError('MP Transfer function is not the same size as the scipy transfer function!') for i in range(0, b.rows): if mpmath.fabs(b[i, 0] - H.num[0, i]) < tol: assert True else: raise ValueError("MP transfer function is not close to the dTF") for i in range(0, a.rows): if mpmath.fabs(a[i, 0] - H.den[0, i]) < tol: assert True else: raise ValueError("MP transfer function is not close to the dTF")
def daubechies(N): # make polynomial q_y = [mpmath.binomial(N - 1 + k, k) for k in reversed(range(N))] # get polynomial roots y[k] y = mpmath.mp.polyroots(q_y, maxsteps=200, extraprec=64) z = [] for yk in y: # subustitute y = -1/4z + 1/2 - 1/4/z to factor f(y) = y - y[k] f = [mpmath.mpf('-1/4'), mpmath.mpf('1/2') - yk, mpmath.mpf('-1/4')] # get polynomial roots z[k] within unit circle z += [zk for zk in mpmath.mp.polyroots(f) if mpmath.fabs(zk) < 1] # make polynomial using the roots h0z = mpmath.sqrt('2') for zk in z: h0z *= sympy.sympify('(z-zk)/(1-zk)').subs('zk', zk) # adapt vanishing moments hz = (sympy.sympify('(1+z)/2')**N * h0z).expand() # get scaling coefficients return [sympy.re(hz.coeff('z', k)) for k in reversed(range(N * 2))]
def lognormal_minmax_log(kmin, kmax, mu, sigma): '''returns the sum( 1/k*exp(-0.5*((lnk - mu)/sigma)^2),{k,kmin,kmax} ) with kmax=None for infty ''' # print kmin,kmax,mu,sigma mpm.mp.dps = 15 if kmax == None: x = mpm.log( mpm.sumem( lambda k: 1.0 / (k - kmin + 1.0) * mpm.exp(-0.5 * (mpm.log( (k - kmin + 1.0)) - mu) * (mpm.log( (k - kmin + 1.0)) - mu) / sigma / sigma), [kmin, mpm.inf])) if mpm.im(x) != 0: logS = float(mpm.fabs(x)) else: logS = float(x) else: logS = (float( mpm.log( mpm.sumem( lambda k: 1.0 / (k - kmin + 1.0) * mpm.exp(-0.5 * (mpm.log( (k - kmin + 1.0)) - mu) * (mpm.log( (k - kmin + 1.0)) - mu) / sigma / sigma), [kmin, kmax])))) return logS
def __init__(self, theta, phi, r=None, is_polar=True): from mpmath import mpf, mp, norm, fabs, chop if not r: r = Point._scale else: r *= Point._scale Point._can_change_the_scale = False mp.dps = Point._precision if not is_polar: self.x = mpf(theta) self.y = mpf(phi) self.z = mpf(r) self.norm = norm(self.get_coordinate(), 2) self.set_polar_coordinate() else: self.norm = fabs(r) self.theta = theta self.phi = phi self.set_coordinate() # Setting float parameters self.floatx = float(self.x) self.floaty = float(self.y) self.floatz = float(self.z)
def bimax_integrand(self, z, wc, l, n, t): """ Integrand of electron-noise integral. """ return f1(wc*l/z/mp.sqrt(2)) * z * \ (mp.exp(-z**2) + n/mp.sqrt(t)*mp.exp(-z**2 / t)) / \ (mp.fabs(BiMax.d_l(z, wc, n, t))**2 * wc**2)
def makeEulerBrick( _a, _b, _c ): a, b, c = sorted( [ real( _a ), real( _b ), real( _c ) ] ) if fadd( power( a, 2 ), power( b, 2 ) ) != power( c, 2 ): raise ValueError( "'euler_brick' requires a pythogorean triple" ) result = [ ] a2 = fmul( a, a ) b2 = fmul( b, b ) c2 = fmul( c, c ) result.append( fabs( fmul( a, fsub( fmul( 4, b2 ), c2 ) ) ) ) result.append( fabs( fmul( b, fsub( fmul( 4, a2 ), c2 ) ) ) ) result.append( fprod( [ 4, a, b, c ] ) ) return sorted( result )
def assert_equal(vardict_1, vardict_2, suppress_message=False): """ Assert SymPy Expression Equality >>> from sympy import sin, cos >>> from sympy.abc import x >>> assert_equal(sin(2*x), 2*sin(x)*cos(x)) Assertion Passed! >>> assert_equal(cos(2*x), cos(x)**2 - sin(x)**2) Assertion Passed! >>> assert_equal(cos(2*x), 1 - 2*sin(x)**2) Assertion Passed! >>> assert_equal(cos(2*x), 1 + 2*sin(x)**2) Traceback (most recent call last): ... AssertionError >>> vardict_1 = {'A': sin(2*x), 'B': cos(2*x)} >>> vardict_2 = {'A': 2*sin(x)*cos(x), 'B': cos(x)**2 - sin(x)**2} >>> assert_equal(vardict_1, vardict_2) Assertion Passed! >>> assert_equal('(a^2 - b^2) - (a + b)*(a - b)', 0) Assertion Passed! """ if not isinstance(vardict_1, dict): vardict_1 = {'': vardict_1} if not isinstance(vardict_2, dict): vardict_2 = {'': vardict_2} for var_1, var_2 in zip(vardict_1, vardict_2): if not isinstance(vardict_1[var_1], sp.Basic): vardict_1[var_1] = sp.sympify(vardict_1[var_1]) if not isinstance(vardict_2[var_2], sp.Basic): vardict_2[var_2] = sp.sympify(vardict_2[var_2]) # update each vardict with mapping: variable -> (pseudo) unique number vardict_1, vardict_2 = update_vardict(vardict_1), update_vardict(vardict_2) # assert whether SDA >= (2/3) * precision, implying expression equality for var_1, var_2 in zip(vardict_1, vardict_2): n_1, n_2 = vardict_1[var_1], vardict_2[var_2] if n_1 == n_2: continue E_rel = 2 * fabs(n_1 - n_2)/(fabs(n_1) + fabs(n_2)) assert -log10(E_rel) + 1 >= (2.0/3) * precision if not suppress_message: print('Assertion Passed!')
def ponto_fixo(func, x0): x0 = float(x0) x = func(x0) while fabs(x0 - x) > 0.000000001: x0 = x x = func(x0) return x
def questao7(): chute = chute_temp() t_star = 1319.7542885 # Obtido no item anterior delta = fabs(t_star - chute) + 3 print 'Intervalo inicial: [1100, 1300]' valor, n_iter = bissecao(temp, chute-delta, chute+delta, 0.000001, valor=50.0) print 'Valor encontrado: %f' % valor print 'Numero de iteracoes: %d' % n_iter
def maxkappa_integrand(zc, wc, lc, n, t, k): """ integrand of electron noise integral. lc:= l/ldc """ num = f1(wc*lc/mp.sqrt(2)/zc) * MaxKappa.b(zc, n, t, k) denom = mp.fabs(MaxKappa.e_l(zc, wc, n, t, k))**2 return num/denom
def error(coefs, progress=True): (a, b) = coefs xs = (x * mp.pi / mp.mpf(4096) for x in range(-4096, 4097)) err = max(fabs(sin(x) - f(x, a, b)) for x in xs) if progress: print('(a, b, c): ({}, {}, {})'.format(a, b, c(a, b))) print('evaluated error: ', err) print() return float(err)
def test_fft(): n = 2**10 a = mp_rand_dist(n) b = mp_rand_dist(n) c = mp_conv(a, b) d = mp_conv2(a, b) eps = 0 for i in range(n): eps = max(eps, mp.fabs(c[i] - d[i])) return eps
def achar_ponto_fixo(l, n_iter=50): l = float(l) x0 = 0.1 vec = [] fixos = [] for n in range(n_iter): for val in vec: if fabs(val - x0) < 0.0000001: for fixo in fixos: if fabs(fixo - x0) < 0.0000001: return fixos else: fixos.append(x0) vec.append(x0) x0 = iter_proc(x0, l) return None
def getDecimalDigitList(n, k): result = [] setAccuracy(k) digits = floor(log10(n)) if digits < 0: for _ in arange(fsub(fabs(digits), 1)): result.append(0) k = fsub(k, fsub(fabs(digits), 1)) value = fmul(n, power(10, fsub(k, fadd(digits, 1)))) for c in getMPFIntegerAsString(floor(value)): result.append(int(c)) return result
def psi(t, eigenvalues, coefficients, f0=mp.mpf("1.0")): '''The nearly periodic function''' f = None if len(eigenvalues) == len(coefficients): f = [mp.expj(E*t) for E in eigenvalues] f = mp.fdot(coefficients, f) f = mp.fabs(f - f0) return(f)
def _transformlng(lng, lat): ret = 300.0 + lng + 2.0 * lat + 0.1 * lng * lng + \ 0.1 * lng * lat + 0.1 * math.sqrt(math.fabs(lng)) ret += (20.0 * math.sin(6.0 * lng * pi) + 20.0 * math.sin(2.0 * lng * pi)) * 2.0 / 3.0 ret += (20.0 * math.sin(lng * pi) + 40.0 * math.sin(lng / 3.0 * pi)) * 2.0 / 3.0 ret += (150.0 * math.sin(lng / 12.0 * pi) + 300.0 * math.sin(lng / 30.0 * pi)) * 2.0 / 3.0 return ret
def _transformlat(lng, lat): ret = -100.0 + 2.0 * lng + 3.0 * lat + 0.2 * lat * lat + \ 0.1 * lng * lat + 0.2 * math.sqrt(math.fabs(lng)) ret += (20.0 * math.sin(6.0 * lng * pi) + 20.0 * math.sin(2.0 * lng * pi)) * 2.0 / 3.0 ret += (20.0 * math.sin(lat * pi) + 40.0 * math.sin(lat / 3.0 * pi)) * 2.0 / 3.0 ret += (160.0 * math.sin(lat / 12.0 * pi) + 320 * math.sin(lat * pi / 30.0)) * 2.0 / 3.0 return ret
def grandCountGN_UltraX1_Limited_wrapperMpmath (funcf, jacf, measdata:list, binit:list, bstart:list, bend:list, c, NSIG=10, NSIGGENERAL=10, implicit=False, verbose=False, verbose_wrapper=False, isBinitGood=True): """ Обёртка для grandCountGN_UltraX1_Limited для реализации общего алгоритма :param funcf callable функция, параметры по формату x,b,c :param jacf callable функция, параметры по формату x,b,c,y :param measdata:list список словарей экспериментальных данных [{'x': [] 'y':[])},{'x': [] 'y':[])}] :param binit:list начальное приближение b :param bstart:list нижняя граница b :param bend:list верхняя граница b :param c словарь дополнительных постоянных :param A матрица коэффициентов a :param NSIG=3 точность (кол-во знаков после запятой) :param implicit True если функция - неявная, иначе false :param verbose Если True, то подробно принтить результаты итераций :returns b, numiter, log - вектор оценки коэффициентов, число итераций, сообщения """ maxiter=10 b,bpriv=binit,binit gknux=None gknuxlist=list() Skinit = makeSkInitMpmath(funcf,measdata,binit, c) A=makeAinitMpmath(bstart, bend,Skinit,binit, isBinitGood) log='' if verbose_wrapper: print ('==grandCountGN_UltraX1_Limited_wrapperMpmath is launched==\n\n') for numiter in range (maxiter): bpriv=copy.copy(b) gknux=grandCountGN_UltraX1_mpmath_Limited (funcf, jacf, measdata, b, bstart, bend, c, A, NSIG, implicit, verbose) #посчитали b if gknux is None: print ("grandCountGN_UltraX1_Limited_wrapper crashed on some iteration") continue gknuxlist.append(gknux) if verbose_wrapper: print ('Iteration \n',numiter,'\n' ,gknux) b=gknux[0] if not gknux[2]=='': #log+="On gknux iteration "+numiter+": "+ gknux[2]+"\n" log+="On gknux iteration {0}: {1}\n".format (numiter, gknux[2]) for j in range (len(binit)): #уменьшили в два раза A[j,0]*=mpm.mpf('0.5') A[j,1]*=mpm.mpf('0.5') condition=False for i in range (len(b)): if mpm.fabs ((b[i]-bpriv[i])/bpriv[i]) > math.pow(10,-1*NSIGGENERAL): #сравнение идёт по обычному флоат condition=True if not condition: break #мол если хоть один компонент вектора b значимо изменился, тогда продолжать. Иначе программа дойдёт до break и цикл прекратится # print ('grandCountGN_UltraX1_Limited_wrapper iterations number:', numiter) # print (gknux[0], gknux[1], log, gknux[3], gknux[4]) return gknux[0], gknux[1], log, gknux[3], gknux[4]
def norm(self): """ >>> import mpmapy >>> import mpmath >>> x = mpmapy.mpArray([1, -1]) >>> print x.norm() 1.4142135623731 >>> print x.norm() == mpmath.sqrt("2") True """ return mpmath.sqrt(mpmath.fabs(self.inner()))
def __test_getEigen(self): dim = random.randint(1,20) qmin = -mpmath.rand() qmax = mpmath.rand() pmin = -mpmath.rand() pmax = mpmath.rand() domain = [[qmin,qmax],[pmin,pmax]] qmapsys = QmapSystem(map=self.map, type='U', dim=dim, domain=domain) evals, evecs = qmapsys.getEigen() for evec in evecs: self.assertTrue(mpmath.fabs(evec.norm() -mpmath.mpf(1) ) < 1e-32) for i in range(len(evecs)): vals = [ evecs[i].inner(evecs[j]) for j in range(len(evecs)) ] self.assertTrue(mpmath.fabs(mpmath.fabs(vals[i]) - 1.0) < 1e-32) vals.pop(i) index = [x < 1e-32 for x in vals] self.assertFalse(numpy.all(index)) for evec in evecs: evec.hsmrep(10,10)
def str_var(v): l = len(v) var = mp.mpf(0) eps = 1.0 for i in range(-l / 2, l / 2): var += v[i] * (i * i) eps -= v[i] if mp.fabs(eps) > prq_eps: return "!!! eps=" + str_log2(eps) exit() return mp.nstr(var).ljust(12)
def psi_x(z, x, beta): """ Eq.(24) from Ref[1] with argument zeta=0 and no constant factor e*beta**2/2/rho**2. Note that 'x' here corresponds to 'chi = x/rho', and 'z' here corresponds to 'xi = z/2/rho' in the paper. """ kap = kappa(z, x, beta) alp = alpha(z, x, beta) arg2 = -4 * (1 + x) / x**2 T1 = (1 / fabs(x) / (1 + x) * ((2 + 2 * x + x**2) * ellipf(alp, arg2) - x**2 * ellipe(alp, arg2))) D = kap**2 - beta**2 * (1 + x)**2 * sin(2 * alp)**2 T2 = ((kap**2 - 2 * beta**2 * (1 + x)**2 + beta**2 * (1 + x) * (2 + 2 * x + x**2) * cos(2 * alp)) / beta / (1 + x) / D) T3 = -kap * sin(2 * alp) / D T4 = kap * beta**2 * (1 + x) * sin(2 * alp) * cos(2 * alp) / D T5 = 1 / fabs(x) * ellipf(alp, arg2) # psi_phi without e/rho**2 factor out = re((T1 + T2 + T3 + T4) - 2 / beta**2 * T5) return out
def asymptotic_expansion(self, omega): # Evaluate the modulus of the characteristic function domain = np.linspace(0, 5, 500) char_fn = list(map(lambda t: mp.fabs(self.char_fn(t)), domain)) # thresh 1e-40 thresh_check = [ domain[i] for i in range(500) if char_fn[i] < mp.mpf(1e-50) ] # Need to extend the domain if len(list(thresh_check)) == 0: j = 1 while len(list(thresh_check)) == 0: domain = np.linspace(5 * j, 5 * (j + 1), 500) char_fn = list(map(lambda t: mp.fabs(self.char_fn(t)), domain)) # thresh 1e-40 thresh_check = [ domain[i] for i in range(500) if char_fn[i] < mp.mpf(1e-50) ] j += 1 cutoff = thresh_check[0] # Generate the derivatives for the asymptotic expansion order = 6 if not hasattr(self, 'diffs'): self.gen_diffs(order) # Evaluate the expansion asym_series = mp.matrix(order, 1) for i in range(1, order + 1): asym_series[i - 1] = self.series_term(i, omega, cutoff) # Sum up and take the real part # We do not multiply by (-1) because the fact that we have g(x) = -x cancels this return mp.re(mp.fsum(asym_series))
def compute_l2_norm(machine, initial_spin, steps): _old_dps = mpmath.mp.dps mpmath.mp.dps = 50 wave_function = {} chain = MetropolisMC(machine, initial_spin) for state in islice(chain, *steps): spin = CompactSpin(state.spin) wave_function[spin] = mpmath.exp(mpmath.mpc(state.log_wf())) l2_norm = mpmath.sqrt( sum(map(lambda x: mpmath.fabs(x)**2, wave_function.values()), mpmath.mpf(0)) / len(wave_function)) mpmath.mp.dps = _old_dps return float(l2_norm)
def getGCD( a, b = 0 ): if real( b ) == 0: a = list( a ) else: a, b = fabs( a ), fabs( b ) while a: b, a = a, fmod( b, a ) return b if isinstance( a[ 0 ], ( list, RPNGenerator ) ): return [ getGCD( real( arg ) ) for arg in a ] else: result = max( a ) for pair in itertools.combinations( a, 2 ): gcd = getGCD( *pair ) if gcd < result: result = gcd return result
def makePythagoreanTriple( n, k ): if real( n ) < 0 or real( k ) < 0: raise ValueError( "'make_pyth_3' requires positive arguments" ) if n == k: raise ValueError( "'make_pyth_3' requires unequal arguments" ) result = [ ] result.append( fprod( [ 2, n, k ] ) ) result.append( fabs( fsub( fmul( n, n ), fmul( k, k ) ) ) ) result.append( fadd( fmul( n, n ), fmul( k, k ) ) ) return sorted( result )
def smallest_largest_elements(_matrix): l = len(_matrix) - 1 smallest_element = mp.mpf(1e100) # initialize to very large number largest_element = mp.mpf(0) for i in range(l): for j in range(l): num = _matrix[i, j] if num != 0: abs_num = fabs(num) if abs_num < smallest_element: smallest_element = abs_num if abs_num > largest_element: largest_element = abs_num return largest_element, smallest_element
def test_Matrix01(self): a = mpmath.mpc(mpmath.pi,"0") b = mpmath.mpc("0","1") m = mpArray.mpArray([[a,b],[b,a]]) mat = mpArray.mpMatrix(m) evals, evecs = mat.eigen(False, 'qeispack', verbose=True) self.assertTrue(mpmath.fabs(mpmath.mpc(mpmath.pi,"1") - evals[0]) < 1e-32) self.assertTrue(mpmath.fabs(mpmath.mpc(mpmath.pi,"-1") - evals[1]) < 1e-32) self.assertTrue(numpy.all([mpmath.fabs(mpmath.mpf(1)/mpmath.sqrt("2") - mpmath.fabs(x)) < 1e-32 for x in evecs[0]])) self.assertTrue(numpy.all([mpmath.fabs(mpmath.mpf(1)/mpmath.sqrt("2") - mpmath.fabs(x)) < 1e-32 for x in evecs[1]])) self.assertTrue(mpmath.fabs(mpmath.fsum(evecs[0]*evecs[0].conj())) - mpmath.mpf(1) < 1e-32) self.assertTrue(numpy.all([vec.abs2() -mpmath.mpf(1) < 1e-32 for vec in evecs])) self.assertTrue(mpmath.fabs(evecs[0].inner(evecs[1])) < 1e-32)
# coefficients are computed for an infinite domain). Notice as well # that these syntax only captures the cosine coefficients, which is # correct for the standard initial condition. def agnesi(x): return dtheta/((x/aa)**2+r1)*mpm.sin(l*z) THk0 = Fourier( Nk , agnesi ) T1 = time.clock(); print('Fourier time: %f'%(T1-T0)) # Preliminary check: compare the initial datum with its Fourier # series. TH0 = invFourier(x,THk0) print('Inverse Fourier time: %f'%(time.clock()-T1)) err0 = [ mpm.fabs(agnesi(x[i])-TH0[i]) for i in xrange(len(x)) ] T1 = time.clock(); print('Total setup time: %f'%(T1-T0)) print('Maximum initial error: %e' % max( float(ei) for ei in err0 )) import pylab as py py.figure(1) py.plot([float(xi) for xi in x],[float(ei) for ei in err0],'go-') py.title("Initial error") py.figure(2) py.title("Initial datum and Fourier series") py.plot( \ [float(xi) for xi in x] , \ [float(ti) for ti in TH0], \ 'mo-', label='Fourier series') py.plot( \
def _distance(num): return mp.fabs(mp.fsub(num, mp.nint(num)))
def getNumberName( n, ordinal = False ): units = '' if isinstance( n, RPNMeasurement ): value = n.value if value == 1 or value == -1: units = n.getUnitName( ) else: units = n.getPluralUnitName( ) n = real_int( value ) if n == 0: if ordinal: name = 'zeroth' else: name = 'zero' if units: name += ' ' name += units return name current = fabs( n ) if current >= power( 10, 3003 ): raise ValueError( 'value out of range for converting to an English name' ) group = 0 name = '' firstTime = True while current > 0: section = getSmallNumberName( int( fmod( current, 1000 ) ), ordinal if firstTime else False ) firstTime = False if section != '': groupName = getNumberGroupName( group ) if groupName != '': section += ' ' + groupName if ordinal and name == '': section += 'th' if name == '': name = section else: name = section + ' ' + name current = floor( fdiv( current, 1000 ) ) group += 1 if n < 0: name = 'negative ' + name if units: name += ' ' name += units return name