def part_of_sum(k_on, k_off, mu, n, r, t): part_b1 = math.comb(n, r) logging.debug(part_b1) part_b2 = (-1) ** r * sc.poch(-k_off, r) * sc.poch(1 - k_off, n - r) * np.exp(-r * t) logging.debug(part_b2) part_b3 = 1 / sc.poch(1 + k_on + k_off, r) logging.debug(part_b3) part_b4 = 1 / sc.poch(2 - k_on - k_off, n - r) logging.debug(part_b4) part_b5 = sc.hyp1f1(k_off + r, 1 + k_on + k_off + r, mu * np.exp(-t)) logging.debug(part_b5) # NB: some terms in the Pochhammer's reappear in 1F1 # and may cancel each other and this may avoid dividing by zero? part_b6 = sc.hyp1f1(1 - k_off + n - r, 2 - k_on - k_off + n - r, -mu) logging.debug(part_b6) part_b = part_b1 * part_b2 * part_b3 * part_b4 * part_b5 * part_b6 logging.debug(part_b) return part_b
def hansen_coefficient(s, r, x): a = poch(3 / 2, r / 2 - 1) * poch(3 / 2 + s, r / 2 - 1) / poch( s + 1, r / 2 - 1) b = np.zeros_like(x) for j in xrange(int(r / 2)): b = b + (-1)**j * poch(1 / 2 + s + r / 2, j) * x**( 2 * j) / factorial(j) / factorial(r / 2 - 1 - j) / poch(3 / 2, j) return a * b
def generalized_assoc_laguerre(x, n, k): sum = np.linspace(0 + 0j, 0 + 0j, len(x)) for i in range(n + 1): numerator = special.poch(-n, i) * special.poch(i + k + 1, n - i) denumerator = float(special.factorial(i)) sum += numerator * (x**i) / denumerator return sum / special.factorial(n)
def keval(self, x, k, order): return ( (-1) ** (order - k) * special.poch(-order, k) * special.poch(-x, k) * self.alpha ** (order - k) / math.factorial(k) )
def _hyp0f2(b1, b2, z, eps=1e-6, nmax=10): sum = 0 #accumulate the sum from scratch, no convenient identities, but 5 terms seems good enough, use 10 to be safe #mpmath does this but don't want to introduce dependency just for one function for k in range(nmax): sum += 1 / (poch(b1, k) * poch(b2, k)) * z**k / np.math.factorial(k) return sum
def p_stationary(n, k_on, k_off, k_syn, k_d): k_on = k_on / k_d k_off = k_off / k_d k_syn = k_syn / k_d part1 = k_syn**n/fac(n) part2 = sc.poch(k_on, n)/sc.poch(k_on + k_off, n) part3 = sc.hyp1f1(k_on + n, k_on + k_off + n, - k_syn) ret_val = part1 * part2 * part3 return ret_val
def eval(self, x, order): mp.dps = 25 mp.pretty = True return ( special.poch(1 - self.N, order) * hyp3f2(-order, -x, 1 + order, 1, 1 - self.N, 1) )
def test_fht_exact(n): rng = np.random.RandomState(3491349965) # for a(r) a power law r^\gamma, the fast Hankel transform produces the # exact continuous Hankel transform if biased with q = \gamma mu = rng.uniform(0, 3) # convergence of HT: -1-mu < gamma < 1/2 gamma = rng.uniform(-1 - mu, 1 / 2) r = np.logspace(-2, 2, n) a = r**gamma dln = np.log(r[1] / r[0]) offset = fhtoffset(dln, mu, initial=0.0, bias=gamma) A = fht(a, dln, mu, offset=offset, bias=gamma) k = np.exp(offset) / r[::-1] # analytical result At = (2 / k)**gamma * poch((mu + 1 - gamma) / 2, gamma) assert_allclose(A, At)
def laplace_b(s, j, n, alpha): """ Calculates nth derivative with respect to a (alpha) of Laplace coefficient b_s^j(a). Uses recursion and scipy special functions. Arguments --------- s : float half-integer parameter of Laplace coefficient. j : int integer parameter of Laplace coefficient. n : int return nth derivative with respect to a of b_s^j(a) a : float semimajor axis ratio a1/a2 (alpha) """ assert alpha >= 0 and alpha < 1, "alpha not in range [0,1): alpha={}".format( alpha) if j < 0: return laplace_b(s, -j, n, alpha) if n >= 2: return s * (laplace_b(s + 1, j - 1, n - 1, alpha) - 2 * alpha * laplace_b(s + 1, j, n - 1, alpha) + laplace_b(s + 1, j + 1, n - 1, alpha) - 2 * (n - 1) * laplace_b(s + 1, j, n - 2, alpha)) if n == 1: return s * (laplace_b(s + 1, j - 1, 0, alpha) - 2 * alpha * laplace_b(s + 1, j, 0, alpha) + laplace_b(s + 1, j + 1, 0, alpha)) return 2 * poch(s, j) * alpha**j * hyp2f1(s, s + j, j + 1, alpha** 2) / factorial(j)
def torontonian_analytical(l, nbar): r"""Return the value of the Torontonian of the O matrices generated by gen_omats Args: l (int): number of modes nbar (float): mean photon number of the first mode (the only one not prepared in vacuum) Returns: float: Value of the torontonian of gen_omats(l,nbar) """ if np.allclose(l, nbar, atol=1e-14, rtol=0.0): return 1.0 beta = -(nbar / (l * (1 + nbar))) pref = factorial(l) / beta p1 = pref * l / poch(1 / beta, l + 2) p2 = pref * beta / poch(2 + 1 / beta, l) return (p1 + p2) * (-1)**l
def check_sph_harm(l, m, theta, phi): print(sp.sph_harm(m, l, phi, theta)) Clm = np.sqrt( (2 * l + 1.0) / 4 / np.pi * sp.poch(l + abs(m) + 1, -2 * abs(m))) [Pmlv, Pmlvdiff] = sp.lpmn(abs(m), l, np.cos(theta)) ans = Clm * Pmlv[abs(m), l] * np.exp(1j * m * phi) if (m < 0): ans = (-1)**m * np.conjugate(ans) print(ans)
def matel(x, i, j): n = min(i, j) m = max(i, j) factor = (-0.5)**((m-n)/2.) \ *1./np.sqrt(special.poch(n+1,m-n)) \ *x**(m-n) \ *np.exp(-0.25*x**2) \ *special.eval_genlaguerre(n, m-n, 0.5*x**2) return factor
def spherical_harmonic_normalization(n, m, norm='full'): r"""The normalization factor for real valued spherical harmonics. .. math:: N_n^m = \sqrt{\frac{2n+1}{4\pi}\frac{(n-m)!}{(n+m)!}} Parameters ---------- n : int The spherical harmonic order. m : int The spherical harmonic degree. norm : 'full', 'semi', optional Normalization to use. Can be either fully normalzied on the sphere or semi-normalized. Returns ------- norm : double The normalization factor. """ if np.abs(m) > n: factor = 0.0 else: if norm == 'full': z = n + m + 1 factor = _spspecial.poch(z, -2 * m) factor *= (2 * n + 1) / (4 * np.pi) if int(m) != 0: factor *= 2 factor = np.sqrt(factor) elif norm == 'semi': z = n + m + 1 factor = _spspecial.poch(z, -2 * m) if int(m) != 0: factor *= 2 factor = np.sqrt(factor) else: raise ValueError("Unknown normalization.") return factor
def paris_exponential_series(a, b, z, i, maxiters): """The exponentially small addition to the asymptotic expansion on the negative real axis. The argument `i` is the truncation index for the original series. This function does not reproduce all of the significant figures of Table 2 in Paris (2013), suggesting a small implementation bug. Possibly fewer than all 5 terms should be summed for optimal peformance. (There are of course infinitely many terms, but the first 5 are the only ones for which the polynomial coefficients were included in the paper.) """ M = 5 theta = a - b x = -z if i < maxiters: # The optimal truncation term has been determined. v = a + i + theta else: # We don't know how many terms are optimal exactly (it's more than the # number of terms summed), so we'll use an approximation. v = x A = np.arange(M) A = poch(1 - a, A)*poch(b - a, A)/gamma(A + 1) first_sum = np.sum((-1)**np.arange(M) * A * x**(-np.arange(M))) second_sum = 0 for idx in xrange(M): B = sum((-2)**k*poch(0.5, k)*A[idx-k]*np.polyval(PARIS_G[k,:],v - x - (idx-k))*6**(-2*k) for k in xrange(idx + 1)) second_sum += (-1)**idx * B * x**(-idx) if np.real(a) and np.real(b) and (b < 0 or a < 0): c = np.exp(gammaln(b + 0j) - gammaln(a + 0j) - x + theta*np.log(x)) c = np.real(c) else: c = np.exp(gammaln(b) - gammaln(a) - x + theta*np.log(x)) return c*(np.cos(np.pi*theta)*first_sum - 2*np.sin(np.pi*theta)/np.sqrt(2*np.pi*x)*second_sum)
def test_gegenbauer(self): a = 5 * np.random.random() - 0.5 if np.any(a == 0): a = -0.2 Ca0 = orth.gegenbauer(0, a) Ca1 = orth.gegenbauer(1, a) Ca2 = orth.gegenbauer(2, a) Ca3 = orth.gegenbauer(3, a) Ca4 = orth.gegenbauer(4, a) Ca5 = orth.gegenbauer(5, a) assert_array_almost_equal(Ca0.c, array([1]), 13) assert_array_almost_equal(Ca1.c, array([2 * a, 0]), 13) assert_array_almost_equal(Ca2.c, array([2 * a * (a + 1), 0, -a]), 13) assert_array_almost_equal( Ca3.c, array([4 * sc.poch(a, 3), 0, -6 * a * (a + 1), 0]) / 3.0, 11) assert_array_almost_equal( Ca4.c, array([ 4 * sc.poch(a, 4), 0, -12 * sc.poch(a, 3), 0, 3 * a * (a + 1) ]) / 6.0, 11) assert_array_almost_equal( Ca5.c, array([ 4 * sc.poch(a, 5), 0, -20 * sc.poch(a, 4), 0, 15 * sc.poch(a, 3), 0 ]) / 15.0, 11)
def solution_me(k: int, alpha: float, m: int = 1) -> float: """ Functions that return value of probability that was calculated analytical using Master equation and stand for if alpha!=0 P(k) = bunch of gamma functions... note that this equation will broke at alpha -> 0, thus if alpha == 0 : m^(k-m) / (m+1)^(k-m+1) :param k: vertex degree :param alpha: alpha value :param m: at this moment always equal 1, originally this is yet another generalization of BA graph :return: probability of given vertex degree """ if alpha > 0.001: m = 1 ret = 2 / (2 * m + 2 - alpha * m) ret *= poch(2 * m / alpha - m, 1 + 2 / alpha) ret /= poch(2 * m / alpha - 2 * m + k, 1 + 2 / alpha) else: m = 1 ret = m**(k - m) / (m + 1)**(k - m + 1) return ret
def threeFtwo(a, b): """ Hypergerometric 3_F_2([a1,a2,a3],[b1,b2],1) Used in calcluations of KaulaF function Arguments --------- a : list of ints b : list of ints Returns ------- float """ a1, a2, a3 = a b1, b2 = b kmax = min(1 - a1, 1 - a2, 1 - a3) tot = 0 for k in range(0, kmax): tot += poch(a1, k) * poch(a2, k) * poch(a3, k) / poch(b1, k) / poch( b2, k) / factorial(k) return tot
def get_vecsph(x, y, z, l, m): m_abs = abs( m ) #calculate everything with |m|, then at the end conjugate and multiply (-1)^m if needed A1lm = np.zeros(3, dtype=complex) A2lm = np.zeros(3, dtype=complex) A3lm = np.zeros(3, dtype=complex) if m_abs > l: print("m>l encountered in get_vecsph") return A1lm, A2lm, A3lm r, theta, phi, rhat, thehat, phihat = Cart_to_sphere(x, y, z) costheta = np.cos(theta) [Pmlv, Pmlvdiff] = sp.lpmn( m_abs, l, costheta ) #assembling spherical harmonics by hand since we also need their derivatives Clm = np.sqrt( (2 * l + 1.0) / 4 / np.pi * sp.poch(l + m_abs + 1, -2 * m_abs)) #prefactor for spherical harmonics phiphase = np.exp(1j * m_abs * phi) #A3lm = rhat*sp.sph_harm(m,l,phi,theta) #scipy harmonics have azimuthal angle as first angle argument A3lm = rhat * Clm * Pmlv[m_abs, l] * phiphase if theta == 0.0 or costheta == -1.0: #special case, avoid 1/sin(theta) nan, see Kristensson appendix D if m_abs == 1: prefact = np.sqrt((2 * l + 1.0) / 16 / np.pi) A1lm = -prefact * np.array([1j, -1.0, 0.0]) A2lm = -prefact * np.array([1.0, 1j, 0.0]) if costheta == -1.0: A1lm = (-1)**l * A1lm A2lm = (-1)**l * A2lm #otherwise nothing is done to A1lm, A2lm, and we get back 0s elif l > 0: #A100 A200 are all 0 pdvYphi_over_sine = 1j * m_abs * Clm * Pmlv[ m_abs, l] * phiphase / np.sin(theta) pdvYthe = -np.sin(theta) * Clm * phiphase pdvYthe *= Pmlvdiff[m_abs, l] A1lm = (thehat * pdvYphi_over_sine - phihat * pdvYthe) / np.sqrt( l * (l + 1.0)) A2lm = (thehat * pdvYthe + phihat * pdvYphi_over_sine) / np.sqrt( l * (l + 1.0)) if m < 0: A1lm = (-1)**m * np.conjugate(A1lm) A2lm = (-1)**m * np.conjugate(A2lm) A3lm = (-1)**m * np.conjugate(A3lm) return A1lm, A2lm, A3lm
def hansen_coefficient(s, r, x): a = poch(3/2, r/2-1)*poch(3/2+s, r/2-1)/poch(s+1, r/2-1) b = np.zeros_like(x) for j in xrange(int(r/2)): b = b + (-1)**j*poch(1/2+s+r/2, j)*x**(2*j)/factorial(j)/factorial(r/2-1-j)/poch(3/2, j) return a*b
def poch_(z, m): return 1.0 / poch(z, m)
def poch_minus(z, m): return 1.0 / poch(z, -m)
def c_k_partial( k, immi_rate, birth_rate, death_rate, N, K, comp_overlap ): value = 1.0 if k == 0.0: return value else: """ previous calc. for i in np.arange(1,k+1): value = value*( ( immi_rate + birth_rate*(i-1))/( i*(death_rate + (birth_rate-death_rate)*(i*(1-comp_overlap)+comp_overlap*N)/K) ) ) """ c = ( death_rate*K/((birth_rate-death_rate)) + comp_overlap*N ) / ( 1-comp_overlap ) value = ( birth_rate*K / ( (birth_rate - death_rate)*(1-comp_overlap) ) ) ** k * poch( immi_rate/birth_rate, k ) / ( factorial(k)*poch( c + 1, k ) ) return value
def R(i, immi_rate, birth_rate, death_rate, N, K, comp_overlap): if i == 1: return 1.0 else: return ( birth_rate*K )**(i-1) * poch( immi_rate/birth_rate + 1 , i - 1 ) / ( fact(i-1)*poch(death_rate*K + comp_overlap*N + 1, i-1) )
def negative_binom(minus_q, l): # scipy.special.binom returns a NaN when called at a # negative integer so I use this alternate formulation # when the argument is potenially a negative integer return (-1)**l * poch(-1 * minus_q, l) / factorial(l)
def falling_factorial(n, p): return 1.0 / spspec.poch(n + 1, -p)
def arcsin_taylor_factor(p): """Returns the (1+2p)th Taylor expansion factor of arcsin(x) around x=0, which is a_p = (1/2)_p / (1+2p)*p!""" return poch(1 / 2, p) / ((1 + 2 * p) * math.factorial(p))
def pdf_smalln_better_unnormalized(x, stoch, cap, delta=1.):#this creates problems as it divides ~0 by ~0 or something like that P1 = 1.; return P1*(1-stoch)/(1-2*stoch)/x*poch(1+cap*(1+delta)/(1-2*stoch),x-1)/poch(1+(1-stoch+cap*delta/2.)/(1-stoch),x-1)*((1-2*stoch)/(1-stoch))**x
def pdf(a, b, n, k): return sp.poch(n, k) * sp.poch(a, n) * sp.poch(b, k) / ( math.factorial(k) * sp.poch(a + b, n) * sp.poch(n + a + b, k))
def kurtosis(m, omega): return (m * (4 * m + 1) - 2 * (2 * m + 1) * sp.poch(m, 1 / 2)**2) / ( m - sp.poch(m, 1 / 2)**2)**2 - 3
def skewness(m, omega): return (sp.poch(m, 1 / 2) * (1 / 2 - 2 * (m - sp.poch(m, 1 / 2)**2))) / math.pow( m - sp.poch(m, 1 / 2)**2, 3 / 2)
def poch_sum_ln_15_4_1(a,b,c,z): m = -a n = np.arange(m+1) return np.sum(poch(-m,n)*z**n*np.exp(pochln(b,n)-gammaln(n+1))/poch(c,n))
def poch_sum_15_4_1(a,b,c,z): m = -a n = np.arange(m+1) return np.sum(poch(-m,n)*poch(b,n)*z**n/(poch(c,n)*gamma(n+1)))