def _loglikelihood(self, alpha_delta, xmin, freq, N, Nmax): alpha, delta = alpha_delta delta = delta * xmin sumlog = np.sum(freq[:-1, -1] * np.log(freq[:-1, 0] - delta)) logll = Nmax * np.log(float(mp.zeta(alpha, self.xmax - delta))) logll -= alpha * sumlog + N * np.log( float(mp.zeta(alpha, xmin - delta))) return -logll
def extend(self, t): while self.next <= t: self.pts.append( (self.next, [mp.zeta(re + self.next * 1j) for re in self.res])) self.next += self.interval while len(self.pts) > 0 and self.pts[0][0] < t - self.limit: self.pts.pop(0)
def _loglikelihood(self, alpha_beta_dtrans, xmin, freq, N): alpha, beta, dtrans0 = alpha_beta_dtrans dtrans = xmin + (self.xmax - xmin) * dtrans0 dtrans_ceil = np.ceil(dtrans) logc = -np.log( float(mp.zeta(alpha, xmin)) - float(mp.zeta(alpha, dtrans_ceil)) + dtrans**(beta - alpha) * float(mp.zeta(beta, dtrans_ceil))) region1_freq = freq[freq[:, 0] < dtrans_ceil] region2_freq = freq[freq[:, 0] >= dtrans_ceil] logll = logc * N logll -= alpha * np.sum( np.log(region1_freq[:, 0]) * region1_freq[:, -1]) logll -= beta * np.sum( np.log(region2_freq[:, 0]) * region2_freq[:, -1]) logll += (beta - alpha) * np.sum(region2_freq[:, -1]) * np.log(dtrans) return -logll
def _get_ccdf(self, xmin): alpha = self.fitting_res[xmin][1]['alpha'] total, ccdf = 1., [] normfactor = 1. / float(mp.zeta(alpha, xmin)) for x in range(xmin, self.xmax): total -= x**(-alpha) * normfactor ccdf.append([x, total]) return np.asarray(ccdf)
def _get_ccdf(self, xmin): alpha = self.fitting_res[xmin][1]['alpha'] beta = self.fitting_res[xmin][1]['beta'] dtrans = self.fitting_res[xmin][1]['dtrans'] total, ccdf = 1., [] dtrans_ceil = int(np.ceil(dtrans)) c = 1. / (float(mp.zeta(alpha, xmin)) - float(mp.zeta(alpha, dtrans_ceil)) + dtrans**(beta - alpha) * float(mp.zeta(beta, dtrans_ceil))) for x in range(xmin, dtrans_ceil): total -= x**(-alpha) * c ccdf.append([x, total]) for x in range(dtrans_ceil, self.xmax): total -= x**(-beta) * c * dtrans**(beta - alpha) ccdf.append([x, total]) return np.asarray(ccdf)
def eps_err(s, t): sigma = s.real T = s.imag N = int((mp.sqrt((T - (t * mp.pi() / 8.0)) / (2 * mp.pi()))).real) alph = alpha1(s) term1 = sigma + (t / 2.0) * (alph.real) - (t / 4.0) * mp.log(N) neg_term1 = -1 * term1 term2 = (t * t / 4.0) * (abs(alph)**2) + (1 / 3.0) + t if term1.real > 1: zsum = mp.zeta(term1) else: zsum = 0.0 for n in range(1, N + 1): zsum += mp.power(n, neg_term1) return 0.5 * zsum * mp.exp(term2 / (2 * (T - 3.33))) * term2
def test_levin_1(): mp.dps = 17 eps = mp.mpf(mp.eps) with mp.extraprec(2 * mp.prec): L = mp.levin(method = "levin", variant = "v") A, n = [], 1 while 1: s = mp.mpf(n) ** (2 + 3j) n += 1 A.append(s) v, e = L.update(A) if e < eps: break if n > 1000: raise RuntimeError("iteration limit exceeded") eps = mp.exp(0.9 * mp.log(eps)) err = abs(v - mp.zeta(-2-3j)) assert err < eps w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v") err = abs(v - w) assert err < eps
def test_levin_1(): mp.dps = 17 eps = mp.mpf(mp.eps) with mp.extraprec(2 * mp.prec): L = mp.levin(method="levin", variant="v") A, n = [], 1 while 1: s = mp.mpf(n) ** (2 + 3j) n += 1 A.append(s) v, e = L.update(A) if e < eps: break if n > 1000: raise RuntimeError("iteration limit exceeded") eps = mp.exp(0.9 * mp.log(eps)) err = abs(v - mp.zeta(-2 - 3j)) assert err < eps w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method="levin", levin_variant="v") err = abs(v - w) assert err < eps
def S_Kummer(beta0, kc, L, order, qmax): """Compute even and odd coefficents at an order number""" K = 2 * pi / L beta_n = lambda n: beta0 + n * K gamma_n = lambda n: -1j * np.sqrt(kc ** 2 - beta_n(n) ** 2 + 0j) theta_n = lambda n: np.arcsin(beta_n(n) / kc - 1j * np.sign(n) * np.spacing(1)) if order == 0: qs = np.hstack([np.arange(-qmax, 0), np.arange(1, qmax + 1)]) S0sum = 1 / gamma_n(qs) - 1 / (K * np.abs(qs)) \ - (kc ** 2 + 2 * beta0 ** 2) / (2 * K ** 3 * np.abs(qs) ** 3) S0sum = np.sum(S0sum) S0 = -1 - (2j / pi) * (np.euler_gamma + np.log(kc / (2 * K))) \ - 2j / (gamma_n(0) * L) \ - 2j * (kc ** 2 + 2 * beta0 ** 2) * zeta(3) / (K ** 3 * L) \ - (2j / L) * S0sum return S0 qs = np.arange(1, qmax + 1) oeven = 2 * order oodd = 2 * order - 1 # Even sum with wavenumber terms, large terms excluded SEsum0 = np.exp(-1j * oeven * theta_n(qs)) / gamma_n(qs) \ + np.exp(1j * oeven * theta_n(-qs)) / gamma_n(-qs) SEsum0 = SEsum0.sum() SEsum0 += np.exp(-1j * oeven * theta_n(0)) / gamma_n(0) SEsum0 *= -2j / L # Odd sum with wavenumber terms, large terms excluded SOsum0 = np.exp(-1j * oodd * theta_n(qs)) / gamma_n(qs) \ - np.exp(1j * oodd * theta_n(-qs)) / gamma_n(-qs) SOsum0 = SOsum0.sum() SOsum0 += np.exp(-1j * oodd * theta_n(0)) / gamma_n(0) SOsum0 *= 2j / L # Even sum with factorial terms ms = np.arange(1., order + 1) b = np.array([float(mp.bernpoly(2 * m, beta0 / K)) for m in ms]) SEsumF = (-1) ** ms * 2 ** (2 * ms) * factorial(order + ms - 1) \ * (K / kc) ** (2 * ms) * b \ / (factorial(2 * ms) * factorial(order - ms)) SEsumF = np.sum(SEsumF) SEsumF *= 1j / pi # Odd sum with factorial terms ms = np.arange(0, order) b = np.array([float(mp.bernpoly(2 * m + 1, beta0 / K)) for m in ms]) SOsumF = (-1) ** ms * 2 ** (2 * ms) * factorial(order + ms - 1) \ * (K / kc) ** (2 * ms + 1) * b \ / (factorial(2 * ms + 1) * factorial(order - ms - 1)) SOsumF = np.sum(SOsumF) SOsumF *= -2 / pi # extended precision calculations for large sum terms t1 = (1 / pi) * (kc / (2 * K)) ** oeven t2 = (beta0 * L * order / pi ** 2) * (kc / (2 * K)) ** oodd # assume we need ~15 digits of precision at a magnitude of 1 dps = np.max([int(np.ceil(np.log10(np.abs(t1)))) + 15, int(np.ceil(np.log10(np.abs(t2)))) + 15, 15]) mp.dps = dps arg_ = mp.mpf(kc / (2 * K)) SEinf = -(-1) ** order * arg_ ** (2 * order) \ * mp.zeta(2 * order + 1) / mp.pi SOinf = (-1) ** order * beta0 * L * order * arg_ ** (2 * order - 1) \ * mp.zeta(2 * order + 1) / mp.pi ** 2 for i, m in enumerate(mp.arange(1, qmax + 1)): even_term = ((-1) ** order / (m * mp.pi)) * (arg_ / m) ** (2 * order) odd_term = ((-1) ** order * beta0 * L * order / (m ** 2 * mp.pi ** 2)) \ * (arg_ / m) ** (2 * order - 1) SEinf += even_term SOinf -= odd_term # break condition, where we should be OK moving back to double precision if mp.fabs(even_term) < 1 and mp.fabs(odd_term) < 1: break mp.dps = 15 SEinf = complex(SEinf) SOinf = complex(SOinf) if i + 1 < qmax: ms = np.arange(i + 2, qmax) even_terms = ((-1) ** order / (ms * pi)) \ * (kc / (2 * K * ms)) ** (2 * order) odd_terms = ((-1) ** order * beta0 * L * order / (ms ** 2 * pi ** 2)) \ * (kc / (2 * K * ms)) ** (2 * order - 1) SEinf += even_terms.sum() SEinf *= 2j SOinf -= odd_terms.sum() SOinf *= 2 SEven = SEsum0 + SEinf + 1j / (pi * order) + SEsumF SOdd = SOsum0 + SOinf + SOsumF return SOdd, SEven
def extend(self, t): self.pts.append((t, mp.zeta(self.re + t * 1j))) while len(self.pts) > 0 and self.pts[0][0] < t - self.limit: self.pts.pop(0)
# NOTE: This file implements an slightly different version of li_criter. # It finds the taylor expansion coefficients of log(xi(z/(z-1)), instead if its derivative, # which is in the original li_criterion # # the following code is from # http://fredrikj.net/blog/2013/03/testing-lis-criterion/ # It uses mpmath to calculate taylor expansion of xi function # # It will produce the 1st 21 coefficients for Li-criter # [-0.69315, 0.023096, 0.046173, 0.069213, 0.092198, 0.11511, 0.13793, 0.16064, 0.18322, 0.20566, # 0.22793, 0.25003, 0.27194, 0.29363, 0.31511, 0.33634, 0.35732, 0.37803, 0.39847, 0.41862, 0.43846] # # More information about mpmath can be found at: mpmath.org # http://mpmath.org/ from mpmath import mp mp.dps = 5 mp.pretty = True xi = lambda s: (s - 1) * mp.pi ** (-0.5 * s) * mp.gamma(1 + 0.5 * s) * mp.zeta(s) # calculate 1st 21 coefficients of taylor expansion of log(xi(z/(z-1)) tmp = mp.taylor(lambda z: mp.log(xi(z / (z - 1))), 0, 20) print tmp
def zeta(z): return mp.zeta(z)
def _loglikelihood(self, alpha_, xmin, logsum_N): alpha, = alpha_ logsum, N = logsum_N logll = -alpha * logsum - N * np.log(float(mp.zeta(alpha, xmin))) return -logll