def _rhov2int(self, phi, r, ra): """Compute dimensionless pressure integral for phi, r """ # Isotropic case first (equation 9, GZ15) rhov2r = exp(phi)*gammainc(self.g + 2.5, phi) rhov2 = 3*rhov2r rhov2t = 2*rhov2r # Add anisotropy, add parts depending explicitly on r # (see equations 12, 13, and 14 of GZ15) if (ra < self.ramax) and (r>0) and (phi>0): p, g = r/ra, self.g p2 = p**2 p12 = 1+p2 g3, g5, g7, fp2 = g+1.5, g+2.5, g+3.5, phi*p2 P1 = p2*phi**g5/gamma(g7) H1 = hyp1f1(1, g7, -fp2) if fp2 <self.max_arg_exp else g5/fp2 H2 = hyp1f1(2, g7, -fp2) if fp2 <self.max_arg_exp else g5*g3/fp2**2 rhov2r += P1*H1 rhov2r /= p12 rhov2t /= p12 rhov2t += 2*P1*(H1/p12 + H2) rhov2t /= p12 rhov2 = rhov2r + rhov2t return rhov2, rhov2r, rhov2t
def beta_poisson_pmf(x,lmbd,Phi,N): ''' Evaluate the probability mass function for beta-Poisson distribution. Parameters ---------- x : int or array point(s) at which to evaluate function lmbd : float Phi : float N : float Returns ------- P : float or array probability of each point in x ''' if type(x)==int: P=spsp.hyp1f1(x+Phi*lmbd,x+Phi*N,-N) for n in range(1,x+1): # This loop gives us the N^x/gamma(x+1 term) P=(N/n)*P for m in range(x): # This loop gives us the term with the two gamma functions in numerator and denominator P=((m+Phi*lmbd)/(m+Phi*N))*P else: P=[] for i in range(0,len(x)): p=spsp.hyp1f1(x[i]+Phi*lmbd,x[i]+Phi*N,-N) for n in range(1,x[i]+1): # This loop gives us the N^x/gamma(x+1 term) p=(N/n)*p for m in range(x[i]): # This loop gives us the term with the two gamma functions in numerator and denominator p=((m+Phi*lmbd)/(m+Phi*N))*p P=P+[p] return P
def deviate1(C0, sigma, beta, orig=False): """ Calculate deviate to solve fo C0 Args: C0 (float): sigma (float): beta (float): orig (bool, optional): use the original approach. Not recommended Returns: float: deviate """ if orig: # Calculate <D> x = -1 * C0**2 / 18 / sigma**2 num1 = const1_A * hyp1f1(2 / 3, 3 / 2, x) num2 = const2_A * sigma * hyp1f1(1 / 6, 1 / 2, x) cerf = erf(C0 / (3 * np.sqrt(2) * sigma)) # Used again below denom = const3_A * sigma**(4 / 3) * (cerf + 1) avgD = (num1 + num2) / denom else: PDF = DMcosmic_PDF(Delta_values, C0, sigma=sigma, beta=beta) avgD = np.sum(Delta_values * PDF) / np.sum(PDF) # Return return np.abs(avgD - 1)
def raw_gaussian_moments_bivar(indices, mu1, mu2, sig1, sig2): """ This function returns raw 2D-Gaussian moments as a function of means (mu_1,mu_2) and standard deviations (sigma_1,sigma_2) """ num_moments = len(indices) moments = np.zeros(num_moments) for i_moment in range(num_moments): i = indices[i_moment][0] j = indices[i_moment][1] moments[i_moment] = ( (1.0 / math.pi) * 2**((-4.0 + i + j) / 2.0) * math.exp(-(mu1**2.0 / (2.0 * sig1**2)) - (mu2**2.0 / (2 * sig2**2.0))) * sig1**(-1.0 + i) * sig2**(-1 + j) * (-math.sqrt(2.0) * (-1.0 + (-1.0)**i) * mu1 * sc.gamma(1.0 + i / 2.0) * sc.hyp1f1(1 + i / 2.0, 3.0 / 2.0, mu1**2.0 / (2.0 * sig1**2.0)) + (1.0 + (-1.0)**i) * sig1 * sc.gamma((1.0 + i) / 2.0) * sc.hyp1f1( (1.0 + i) / 2.0, 1.0 / 2.0, mu1**2.0 / (2.0 * sig1**2.0))) * (-math.sqrt(2.0) * (-1 + (-1)**j) * mu2 * sc.gamma(1.0 + j / 2.0) * sc.hyp1f1(1.0 + j / 2.0, 3.0 / 2.0, mu2**2.0 / (2.0 * sig2**2.0)) + (1.0 + (-1)**j) * sig2 * sc.gamma((1.0 + j) / 2.0) * sc.hyp1f1( (1.0 + j) / 2.0, 1.0 / 2.0, mu2**2.0 / (2.0 * sig2**2.0)))) return moments
def wake(self, z, maxi=25, L=None, bunlen=80e-6, convolved=True): gaus_to_si = 120*_c/4 # _Z0 _c / (4 pi) L = L or (2 * _np.pi * self.rho) # Free Space Term inds = z < 0 W0 = _np.zeros(len(z)) W0[inds] = (-2/3**(4/3)/self.rho**(2/3) / _np.power((-z[inds]), 4/3)*gaus_to_si*L) # Shielding Term W1 = _np.zeros(len(z)) zshield = self.shielding / self.bl * z for i in range(1, maxi): uai = self._getY(3*zshield/i**(3/2)) W1 += 8*_np.pi*(-1)**(i+1)/i/i*uai*(3 - uai)/(1 + uai)**3 W1 *= -1/self.h**2 / (2 * _np.pi) * gaus_to_si * L # If want the convolved values if convolved: # For the free space used analytical formulas of convolution bl = bunlen C = _Z0*_c/2**(13/6)/_np.pi**(3/2)/(3*self.rho**2*bl**10)**(1/3)*L W0 = C*(2**(1/2)*_scyspe.gamma(5/6)*( bl**2*_scyspe.hyp1f1(-1/3, 1/2, -z*z/2/bl**2) - z**2*_scyspe.hyp1f1(2/3, 3/2, -z*z/2/bl**2)) + z*bl*_scyspe.gamma(4/3)*( 3*_scyspe.hyp1f1(1/6, 1/2, -z*z/2/bl**2) - 2*_scyspe.hyp1f1(1/6, 3/2, -z*z/2/bl**2))) # For shielding perform convolution numerically bunch = _np.exp(-(z*z/bl**2)/2)/_np.sqrt(2*_np.pi)/bl # gaussian W1 = _scysig.fftconvolve(W1, bunch, mode='same') * (z[1]-z[0]) return W0, W1
def _rhov2int(self, phi, r, ra): """Compute product of density and mean square velocity """ # Isotropic case first rhov2r = exp(phi)*gammainc(self.g + 2.5, phi) rhov2 = 3*rhov2r rhov2t = 2*rhov2r # Add anisotropy if (ra < self.ramax)&(r>0)&(phi>0): p, g = r/ra, self.g p2 = p**2 p12 = 1+p2 g3, g5, g7, fp2 = g+1.5, g+2.5, g+3.5, phi*p2 P1 = p2*phi**g5/gamma(g7) H1, H2 = hyp1f1(1, g7, -fp2), hyp1f1(2, g7, -fp2) rhov2r += P1*H1 rhov2r /= p12 rhov2t /= p12 rhov2t += 2*P1*(H1/p12 + H2) rhov2t /= p12 rhov2 = rhov2r + rhov2t return rhov2, rhov2r, rhov2t
def part_of_sum(k_on, k_off, mu, n, r, t): part_b1 = math.comb(n, r) logging.debug(part_b1) part_b2 = (-1) ** r * sc.poch(-k_off, r) * sc.poch(1 - k_off, n - r) * np.exp(-r * t) logging.debug(part_b2) part_b3 = 1 / sc.poch(1 + k_on + k_off, r) logging.debug(part_b3) part_b4 = 1 / sc.poch(2 - k_on - k_off, n - r) logging.debug(part_b4) part_b5 = sc.hyp1f1(k_off + r, 1 + k_on + k_off + r, mu * np.exp(-t)) logging.debug(part_b5) # NB: some terms in the Pochhammer's reappear in 1F1 # and may cancel each other and this may avoid dividing by zero? part_b6 = sc.hyp1f1(1 - k_off + n - r, 2 - k_on - k_off + n - r, -mu) logging.debug(part_b6) part_b = part_b1 * part_b2 * part_b3 * part_b4 * part_b5 * part_b6 logging.debug(part_b) return part_b
def P0(n): p = 0.0 for m in range(n): p += sp.comb(n-1,m) * rb**(n-1-m) * (R/Sb)**m * fracrise(a,b,m) * \ (Sb*(m+a)*sp.hyp1f1(a+m+1, b+m, w0)/(Sb-1) - \ (m+a+su*rb/R)*sp.hyp1f1(a+m, b+m, w0)) return p / math.factorial(n)
def _Z1ba2(b, a, order=10): js = np.arange(0, order + 1) _arg = 0.25 * (2 * js + 1) expr1 = gamma(_arg) * hyp1f1(_arg, 0.5, 0.25 * b**2) expr2 = b * gamma(_arg + 0.5) * hyp1f1(_arg + 0.5, 1.5, 0.25 * b**2) expr3 = a**(2 * js) / factorial(2 * js) return 0.5 * sum((expr1 + expr2) * expr3)
def mSweepSel(a, mu, s, V, k, m): hyperg1 = hyp1f1(k + mu, m + 2.0 * mu, -(1.0 - a) * V + a * V) hyperg2 = hyp1f1(k + mu, m + 2.0 * mu, s - (1.0 - a) * V + a * V) ret = math.exp(gammaln(k + mu) + gammaln(m - k + mu) - gammaln(m + 2.0 * mu)) ret *= binom(m, k) * math.exp(-(1.0 - a) * s - a * V) / zSel(a, mu, s) ret *= math.exp((1.0 - a) * s) * hyperg1 - hyperg2; ret += hitchhikingFractionSel(a, 1, mu, s, V) if k == m else 0.0 ret += hitchhikingFractionSel(a, 0, mu, s, V) if k == 0 else 0.0 return ret
def hitchhikingFractionSel(a, b, mu, s, V): hyperg1 = hyp1f1(b + mu, 1.0 + 2.0 * mu, -(1.0 - a) * V + a * V) hyperg2 = hyp1f1(b + mu, 1.0 + 2.0 * mu, s - (1.0 - a) * V + a * V) ret = math.exp(gammaln(b + mu) + gammaln(1.0 - b + mu) - gammaln(1.0 + 2.0 * mu)) ret *= math.exp(-(1.0 - a) * s - a * V) ret /= zSel(a, mu, s) ret *= math.exp((1.0 - a) * s) * hyperg1 - hyperg2 diff = mSel(a, mu, s, b, 1) diff -= ret; return diff
def mSweep(a, mu, V, k, m): ret = binom(m, k) hyperg1 = hyp1f1(k + mu, m + 2.0 * mu, (2.0 * a - 1.0) * V) hyperg2 = hyp1f1(k + mu, 1.0 + m + 2.0 * mu, (2.0 * a - 1.0) * V) ret /= zNeutral(mu) ret *= (-a * math.exp(-V) + (1.0 - a)) * math.exp(gammaln(k + mu) + gammaln(-k + m + mu) - gammaln(m + 2.0 * mu)) * ((-2.0 * a) * hyperg1 + 2.0 * (-k + m + mu) / (m + 2.0 * mu) * hyperg2) ret += hitchhikingFraction(a, 1, mu, V) if k == m else 0.0 ret += hitchhikingFraction(a, 0, mu, V) if k == 0 else 0.0 return ret;
def whittM(k, m, z): """Evaluates the Whitaker function M(k, m, z) as defined in Abramowitz & Stegun, Section 13.1. """ from scipy.special import hyp1f1 if k is int or m is int: return np.exp(-0.5 * z) * np.power(z, 0.5 + float(m)) * hyp1f1(0.5 + float(m - k), 1 + 2 * m, z) elif k is int and m is int: return np.exp(-0.5 * z) * np.power(z, 0.5 + float(m)) * hyp1f1(0.5 + float(m - k), 1 + 2 * m, z) else: return np.exp(-0.5 * z) * np.power(z, 0.5 + m) * hyp1f1(0.5 + m - k, 1.0 + 2.0 * m, z)
def koay_next(t_n, N, r): """ returns the n+1 guassian SNR value given an estimate @param: t_n estimate of the guassian SNR @param: N the number of MRA channels @param: r the measure signal to noise ratio """ g_n = g_theta(t_n, N, r) b_n = beta_N(N) f1_a = hyp1f1(-0.5, N, -0.5*t_n**2.0) f1_b = hyp1f1(0.5, N+1, -0.5*t_n**2.0) return t_n - (g_n*(g_n - t_n) ) / (t_n*(1.0+r**2.0)*(1.0 - (0.5*b_n**2.0 / N) * f1_a * f1_b) - g_n)
def hitchhikingFraction(a, b, mu, V): hyperg1 = hyp1f1(b + mu, 1.0 + 2.0 * mu, (2.0 * a - 1.0) * V) hyperg2 = hyp1f1(b + mu, 2.0 + 2.0 * mu, (2.0 * a - 1.0) * V) ret = -a * math.exp(-V) + 1.0 - a ret /= zNeutral(mu) ret *= math.exp(gammaln(b + mu) + gammaln(1.0 - b + mu) - gammaln(1.0 + 2.0 * mu)) term = -2.0 * a * hyperg1 term += 2.0 * (1.0 - b + mu) / (1.0 + 2.0 * mu) * hyperg2 ret *= term diff = mNeutral(a, mu, b, 1) - ret return diff
def _get_spline(self): """Defines a qubic spline to fit concentration parameter.""" x = np.logspace(-3, np.log10(self.max_concentration), self.spline_markers) y = (hyp1f1(2, self.dimension + 1, x) / (self.dimension * hyp1f1(1, self.dimension, x))) return interp1d(y, x, kind='quadratic', assume_sorted=True, bounds_error=False, fill_value=(0, self.max_concentration))
def HermiteIndefiniteIntegral(z, n): ''' Integrate[HermiteH[n, z]/E^z^2, z] ''' if n == 0: return 0.5 * np.sqrt(np.pi) * erf(z) elif n == 1: return -0.5 * np.exp(-z ** 2) else: return 2 ** n * np.sqrt(np.pi) * (z * hyp1f1(n / 2. + 1 / 2., 3 / 2., -z ** 2) / gamma(1 / 2. - n / 2.) + \ hyp1f1(n / 2., 1 / 2., -z ** 2) / ( n * gamma(-n / 2.)))
def WatsonMeanDirDensity(x, k, p): Coeff = gamma(p / 2.0) * (gamma( (p - 1.0) / 2.0) * np.sqrt(np.pi) / hyp1f1(1.0 / 2.0, p / 2.0, k)) y = Coeff * np.exp(k * (np.power(x, 2.0))) * np.power(1.0 - x * x, (p - 3.0) / 2.0) return y
def dDphi_dz(self, r, phi, phi_q, wavelength): """differential contribution to the phase structure function """ return 4.0 * (wavelength / self.wavelength_reference )**2 * self.C_scatt_0 / self.scatt_alpha * (sps.hyp1f1( -self.scatt_alpha / 2.0, 0.5, -r**2 / (4.0 * self.r_in**2) * np.cos(phi - phi_q)**2) - 1.0)
def kummer_log(a, b, x): ## First try using the funcion in the library. ## If it is 0 or inf then we try to use our own implementation with logs ## If it does not converge, then we return None !! f = hyp1f1(a, b, x) if (np.isinf(f) == True): # warnings.warn("hyp1f1() is 'inf', trying log version, (a,b,x) = (%f,%f,%f)" %(a,b,x),UserWarning, stacklevel=2) f_log = kummer_own_log(a, b, x) # print f_log elif (f == 0): # warnings.warn("hyp1f1() is '0', trying log version, (a,b,x) = (%f,%f,%f)" %(a,b,x),UserWarning, stacklevel=2) raise RuntimeError('Kummer function is 0. Kappa = %f', "Kummer_is_0", x) # f_log = kummer_own_log(a,b,x) # TODO: We cannot do negative x, the functions is in log else: f_log = np.log(f) # print (a,b,x) # print f_log f_log = float(f_log) return f_log
def mSel(a, mu, s, k, m): hyperg = hyp1f1(k + mu, m + 2.0 * mu, s) ret = binom(m, k) ret /= zSel(a, mu, s) ret *= math.exp(gammaln(k + mu) + gammaln(m - k + mu) - gammaln(m + 2.0 * mu)) ret *= (1.0 - math.exp(-(1.0 - a) * s) * hyperg) return ret
def _pdf(self, x, df, nc): n = df*1.0 nc = nc*1.0 x2 = x*x ncx2 = nc*nc*x2 fac1 = n + x2 trm1 = n/2.*np.log(n) + special.gammaln(n+1) trm1 -= n*np.log(2)+nc*nc/2.+(n/2.)*np.log(fac1)+special.gammaln(n/2.) Px = np.exp(trm1) valF = ncx2 / (2*fac1) trm1 = np.sqrt(2)*nc*x*special.hyp1f1(n/2+1,1.5,valF) trm1 /= (fac1*special.gamma((n+1)/2)) trm2 = special.hyp1f1((n+1)/2,0.5,valF) trm2 /= (np.sqrt(fac1)*special.gamma(n/2+1)) Px *= trm1+trm2 return Px
def P1(n): p = 0.0 for m in range(n + 1): p += sp.comb(n,m) * rb**(n-m) * (R/Sb)**m * fracrise(a,b,m) * \ sp.hyp1f1(a+m, b+m, w0) return p / math.factorial(n)
def curve_phantom(curve, direction, kappa, px=(20,20,20), vox_dim=(100,100,100), cyl_rad=0.2, max_l=6, dtype=np.float32): # Setup grid xyz = np.array(np.meshgrid( np.linspace(-(px[0]/2)*vox_dim[0], (px[0]/2)*vox_dim[0], px[0]), np.linspace(-(px[1]/2)*vox_dim[1], (px[1]/2)*vox_dim[1], px[1]), np.linspace(-(px[2]/2)*vox_dim[2], (px[2]/2)*vox_dim[2], px[2]))) xyz = np.moveaxis(xyz, [0, 1], [-1, 1]) # Calculate directions and kappas diff = xyz[:,:,:,None,:] - curve dist = np.linalg.norm(diff, axis=-1) min_dist = np.min(dist, axis=-1) # min dist between grid points and curve t_index = np.argmin(dist, axis=-1) min_dir = direction[t_index] # directions for closest point on curve min_k = kappa[t_index] # kappas # Calculate watson spang_shape = xyz.shape[0:-1] + (util.maxl2maxj(max_l),) spang1 = spang.Spang(np.zeros(spang_shape), vox_dim=vox_dim) dot = np.einsum('ijkl,ml->ijkm', min_dir, spang1.sphere.vertices) k = min_k[...,None] watson = np.exp(k*dot**2)/(4*np.pi*hyp1f1(0.5, 1.5, k)) watson_sh = np.einsum('ijkl,lm', watson, spang1.B) watson_sh = watson_sh/watson_sh[...,None,0] # Normalize # Cylinder mask mask = min_dist < cyl_rad spang1.f = np.einsum('ijkl,ijk->ijkl', watson_sh, mask).astype(dtype) return spang1
def beta_poisson_loglh(data,lmbd,phi,N): ''' Calculate log likelihood of beta-Poisson parameters given data. Parameters ---------- data : list sample dataset lmbd : float phi : float N : float Returns ------- llh : float log likelihood of parameters given data ''' llh=0 for x in data: llh+=x*np.log(N)-np.real(spsp.loggamma(x+1))+np.real(spsp.loggamma(phi*N))+np.real(spsp.loggamma(x+phi*lmbd))-np.real(spsp.loggamma(x+phi*N))-np.real(spsp.loggamma(phi*lmbd)) if x+phi*N<50: llh+=np.log(spsp.hyp1f1(x+phi*lmbd,x+phi*N,-N)) else: llh+=np.log(float(hyp1f1_alt(x+phi*lmbd,x+phi*N,-N))) return llh
def watson_girdle_logp(x, lon_lat, kappa): if x[1] < -90. or x[1] > 90.: raise ZeroProbability return -np.inf if np.abs(kappa) < eps: return np.log(1. / 4. / np.pi) mu = np.array([ np.cos(lon_lat[1] * d2r) * np.cos(lon_lat[0] * d2r), np.cos(lon_lat[1] * d2r) * np.sin(lon_lat[0] * d2r), np.sin(lon_lat[1] * d2r) ]) test_point = np.transpose( np.array([ np.cos(x[1] * d2r) * np.cos(x[0] * d2r), np.cos(x[1] * d2r) * np.sin(x[0] * d2r), np.sin(x[1] * d2r) ])) normalization = 1. / sp.hyp1f1(0.5, 1.5, kappa) / 4. / np.pi logp_elem = np.log( normalization ) + \ kappa * (np.dot(test_point, mu)**2.) logp = logp_elem.sum() return logp
def test_geometric_convergence(self, a, b, x, result): # Test the region where we are relying on the ratio of # # (|a| + 1) * |x| / |b| # # being small. Desired answers computed using Mpmath assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
def get_cp(Ndim, kappa): gammaValue = gamma(float(Ndim) / 2) M = hyp1f1(0.5, float(Ndim) / 2, kappa) # Confluent hypergeometric function 1F1(a, b; x) cp = gammaValue / (np.power(2 * np.pi, float(Ndim) / 2) * M) return cp
def boys(n, t): """Boys function for the calculation of coulombic integrals. Parameters ---------- n : int Order of boys function t : float Varible for boys function. Raises ------ TypeError If boys function order is not an integer. ValueError If boys function order n is not a none negative number. """ if not isinstance(n, int): raise TypeError("Boys function order n must be an integer") if n < 0: raise ValueError( "Boys function order n must be a none negative number") if not isinstance(t, float): raise TypeError("Boys function varible t must be integer or float") return sc.hyp1f1(n + 0.5, n + 1.5, -t) / (2.0 * n + 1.0)
def KLD2(x,mu_a_x,mu_a_y,d_r_a,sigma_d_r_a): #sigma_d_r_a = .01 d = 0 for i in np.arange(start=0,stop=mu_a_x.__len__(),step=1): d = d+ (-2*d_r_a[i]*np.sqrt(x[2]**2*np.pi/2)* hyp1f1(-1/2,1,-np.linalg.norm(np.array([x[0]-mu_a_x[i],x[1]-mu_a_y[i]]))**2/(2*x[2]**2))+ np.linalg.norm(np.array([x[0]-mu_a_x[i],x[1]-mu_a_y[i]]))**2+2*x[2]**2)/(2*sigma_d_r_a[i]) return d
def KLD3(x,mu_m_x,mu_m_y,sigma_m,d_r_m,sigma_d_r_m): d = 0 for i in np.arange(start=0,stop=mu_m_x.__len__(),step=1): d = d+(-2*d_r_m[i]*np.sqrt((x[2]**2+sigma_m[i])*np.pi/2)* hyp1f1(-1/2,1,-np.linalg.norm(np.array([x[0]-mu_m_x[i],x[1]-mu_m_y[i]]))**2/(2*(x[2]**2+sigma_m[i])))+ np.linalg.norm(np.array([x[0]-mu_m_x[i],x[1]-mu_m_y[i]]))**2+2*x[2]**2)/(2*sigma_d_r_m[i]) return d
def KLD3_1(x, mu_m_x, mu_m_y, sigma_m, d_r_m): sigma_d_r_m = .01 return (-2 * d_r_m * np.sqrt((x[2]**2 + sigma_m) * np.pi / 2) * hyp1f1( -1 / 2, 1, -np.linalg.norm(np.array([x[0] - mu_m_x, x[1] - mu_m_y]))**2 / (2 * (x[2]**2 + sigma_m))) + np.linalg.norm(np.array([x[0] - mu_m_x, x[1] - mu_m_y]))**2 + 2 * x[2]**2) / (2 * sigma_d_r_m)
def spline(self): """Defines a cubic spline to fit concentration parameter.""" assert self.dimension is not None, ( "You need to specify dimension. This can be done at object " "instantiation or it can be inferred when using the fit function.") x = np.logspace(-3, np.log10(self.max_concentration), self.spline_markers) y = hyp1f1(2, self.dimension + 1, x) / (self.dimension * hyp1f1(1, self.dimension, x)) return interp1d( y, x, kind="quadratic", assume_sorted=True, bounds_error=False, fill_value=(0, self.max_concentration), )
def check_Kummer(Ndim, kappa): # This functions checks if the Kummer function will go to inf # Returns 1 if Kummer is stable, 0 if unstable f = hyp1f1(0.5, float(Ndim) / 2, kappa) if (np.isinf(f) == False): return 1 else: return 0
def compute_characteristic_function(self, t): """ The characteristic function of the beta distribution is Kummer's confluent hypergeometric function which is implemented by scipy.special.hyp1f1. See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.hyp1f1.html If Phi_X(t) is the characteristic function of X, then for any constant c, the characteristic function of cX is Phi_cX(t) = Phi_X(ct) """ return hyp1f1(self.alpha, self.beta, self.c * t)
def test_hyp1f1_complex(self): assert_mpmath_equal( _inf_to_nan(lambda a, b, x: sc.hyp1f1(a.real, b.real, x)), _exception_to_nan( lambda a, b, x: mpmath.hyp1f1(a, b, x, **HYPERKW)), [Arg(-1e3, 1e3), Arg(-1e3, 1e3), ComplexArg()], n=2000)
def boys_function(n, x): """ the Boys Function :param n: order of the boys function :param x: argument :return: """ return hyp1f1(n + 0.5, n + 1.5, -x) / (2.0 * n + 1.0)
def radial_function(r, n, l, zeta): result = 0.0 for i in range(n + 1): result += \ (- 1)**i * utils.binomial(n + 0.5, n - i) / \ factorial(i) * 2**(0.5 * l + i - 0.5) * \ special.gamma(0.5 * l + i + 1.5) * \ hyp1f1((2 * i + l + 3) * 0.5, l + 1.5, - 2 * np.pi**2 * r**2 * zeta) result = result * 4 * (-1)**(0.5 * l) * zeta**(0.5 * l + 1.5) * \ np.pi**(l + 1.5) * r**l / special.gamma(l + 1.5) * spf.kappa(zeta, n) return result
def _rhoint(self, phi, r, ra): """ Dimensionless density as a function of phi and r (scalars only) """ # Isotropic case first rho = exp(phi)*gammainc(self.g + 1.5, phi) # Add anisotropy if (self.ra < self.ramax)&(phi>0)&(r>0): p, g = r/ra, self.g p2 = p**2 g3, g5, fp2 = g+1.5, g+2.5, phi*p2 rho += p2*phi**(g+1.5)*hyp1f1(1, g5, -fp2)/gamma(g5) rho /= (1+p2) return rho
def _rhoint(self, phi, r, ra): """ Dimensionless density integral as a function of phi and r (scalar) """ # Isotropic case first (equation 8, GZ15) rhoint = exp(phi)*gammainc(self.g + 1.5, phi) # Anisotropic case, add r-dependent part explicitly (equation 11, GZ15) if (self.ra < self.ramax) and (phi>0) and (r>0): p, g = r/ra, self.g p2 = p**2 g3, g5, fp2 = g+1.5, g+2.5, phi*p2 func = hyp1f1(1, g5, -fp2) if fp2 < self.max_arg_exp else g3/fp2 rhoint += p2*phi**(g+1.5)*func/gamma(g5) rhoint /= (1+p2) return rhoint
def bingham_pdf(fit): """ From the *Encyclopedia of Paleomagnetism* From Onstott, 1980: Vector resultant: R is analogous to eigenvectors of T. Eigenvalues are analogous to |R|/N. """ # Uses eigenvectors of the covariance matrix e = fit.hyperbolic_axes #singular_values #e = sampling_covariance(fit) # not sure e = e[2]**2/e kappa = (e-e[2])[:-1] kappa /= kappa[-1] F = N.sqrt(N.pi)*confluent_hypergeometric_function(*kappa) ax = fit.axes Z = 1/e M = ax F = 1/hyp1f1(*1/Z) def pdf(coords): lon,lat = coords I = lat D = lon# + N.pi/2 #D,I = _rotate(N.degrees(D),N.degrees(I),90) # Bingham is given in spherical coordinates of inclination # and declination in radians # From USGS bingham statistics reference xhat = N.array(sph2cart(lon,lat)).T #return F*expm(dot(xhat.T, M, N.diag(Z), M.T, xhat)) return 1/(F*N.exp(dot(xhat.T, M, N.diag(Z), M.T, xhat))) return pdf
def R_nl(self, r): """ The radial part of the wavefunction, R_nl(r). Quantum Mechanics of one and two electron atoms, H. A. Bethe and E. E. Salpeter 1957 r in units of the Bohr radius, a_0. quantum numbers: n, l """ rho = 1.0 * r / self.n # re-scaled rho by 1/2 from hydrogen epsilon = 1.0 / self.n c1 = np.sqrt(factorial(self.n + self.l) / (2.0 * self.n * \ factorial(self.n - self.l - 1))) * \ 1.0/ (factorial(2.0 * self.l + 1.0)) c2 = np.power(2.0, -3.0/2.0) # re-normalise again for Ps return c1 * c2 * (2* epsilon)**(3.0/2.0) * np.exp(-0.5* rho) * rho**self.l *\ hyp1f1(-(self.n - self.l - 1), 2 * self.l + 2, rho)
def watson_girdle_logp(x, lon_lat, kappa): if x[1] < -90. or x[1] > 90.: raise ZeroProbability return -np.inf if np.abs(kappa) < eps: return np.log(1. / 4. / np.pi) mu = np.array([np.cos(lon_lat[1] * d2r) * np.cos(lon_lat[0] * d2r), np.cos(lon_lat[1] * d2r) * np.sin(lon_lat[0] * d2r), np.sin(lon_lat[1] * d2r)]) test_point = np.transpose(np.array([np.cos(x[1] * d2r) * np.cos(x[0] * d2r), np.cos(x[1] * d2r) * np.sin(x[0] * d2r), np.sin(x[1] * d2r)])) normalization = 1. / sp.hyp1f1(0.5, 1.5, kappa) / 4. / np.pi logp_elem = np.log( normalization ) + \ kappa * (np.dot(test_point, mu)**2.) logp = logp_elem.sum() return logp
def zSel(a, mu, s): hyperg = hyp1f1(mu, 2.0 * mu, s) ret = math.exp(2.0 * gammaln(mu) - gammaln(2.0 * mu)) * (1.0 - math.exp(-(1.0 - a) * s) * hyperg) return ret
def exp_inc_gamma(m, z): return 1.0/(2*m+1) * hyp1f1(1, m+1.5, -z)
def psi_l(l, b): n = l//2 v = (-b)**n v *= gamma(n + 1./2) / gamma(2*n + 3./2) v *= hyp1f1(n + 1./2, 2*n + 3./2, -b) return v
def test_hyp1f1_complex(self): assert_mpmath_equal(_inf_to_nan(lambda a, b, x: sc.hyp1f1(a.real, b.real, x)), _exception_to_nan(lambda a, b, x: mpmath.hyp1f1(a, b, x, **HYPERKW)), [Arg(-1e3, 1e3), Arg(-1e3, 1e3), ComplexArg()], n=2000)
def boys(n,T): """ """ return hyp1f1(n+0.5,n+1.5,-T)/(2.0*n+1.0)
db = pymc.database.pickle.load(dbname) else: pymc.MAP(model).fit() mcmc.sample(10000) mcmc.db.close() db = pymc.database.pickle.load(dbname) #pymc.Matplot.trace(db.trace('concentration')) concentration_trace = db.trace('concentration')[:] plt.hist( concentration_trace ) plt.show() ax = plt.axes( projection=ccrs.Mollweide(0.) ) ax.set_global() interval = 1 ax.scatter(lon_lat[::interval,0],lon_lat[::interval,1], transform=ccrs.PlateCarree()) plt.show() n_samples = len(lon_lat[:,1]) uniform_x = np.linspace(0., np.pi, 100) uniform_y = np.sin(uniform_x)/2. kappa = -0.78#hidden_concentration watson_x = np.linspace(0., np.pi, 100) watson_y = 1./sp.hyp1f1(0.5,1.5,kappa) * np.exp(kappa * np.cos(watson_x)**2.)*np.sin(watson_x)/2. bins = np.linspace(-90., 90., 9) plt.hist( lon_lat[:,1], bins=bins, weights=np.ones_like(lon_lat[:,1])/n_samples, normed=False) plt.plot( 90.-uniform_x*180./np.pi, uniform_y, 'r', lw=3) plt.plot( 90.-watson_x*180./np.pi, watson_y, 'g', lw=3) plt.show()
def diallelic_d_helper(n0, n1, g): if not g: return n0 / float(n0 + n1) else: return (special.hyp1f1(n0, n0 + n1, g) - 1) / math.expm1(g)
def WatsonMeanDirDensity(x, k, p): Coeff = special.gamma(p/2.0) * (special.gamma((p - 1.0) / 2.0) * math.sqrt(pi) * hyp1f1(1.0/2.0, p/2.0, k))**(-1.0) y = Coeff*np.exp(k*x**2.0)*(1.0-x**2.0)**((p-3.0)/2.0) return y
def WatsonMeanDirDensity(x, k, p): Coeff = gamma(p/2.0) * (gamma((p - 1.0) / 2.0) * np.sqrt(np.pi) / hyp1f1(1.0/2.0, p/2.0, k)) y = Coeff*np.exp(k*(np.power(x,2.0)))*np.power(1.0-x*x,(p-3.0)/2.0) return y
def erfi(x): """ Why does scipy.special not include this? """ return 2 * x * special.hyp1f1(0.5, 1.5, x*x) / math.sqrt(math.pi)
def pr(u, x): if u == 0: out = 1.0 * np.exp(-x) else: out = 1.0 * x * np.exp(2 * -x) * (2 ** -u) * spc.hyp1f1(u + 1, 2, x) return out
def inc_gamma(m, z): return 1.0/(2*m+1) * hyp1f1(m+0.5, m+1.5, -z)
def runTest(self): self.assertAlmostEqual(hyp1f1(1,2,3), 6.361845641062556) self.assertAlmostEqual(hyp2f1(1,2,3,-4.), 0.2988202609457375) self.assertAlmostEqual(hyp2f1(1,2,3,-0.5), 0.7562791351346849) self.assertAlmostEqual(hyp2f1(1,2,3,0.7), 2.0570318543915747) self.assertAlmostEqual(hyp2f1(1, 0.3, 1.3, -3.0), 0.7113010112875268)