def pdf(a, b, d, l, m, x): g = math.sqrt(a**2 - b**2) ff = math.pow(g / d, l) * math.exp( b * (x - m)) / (math.sqrt(2 * math.pi) * sp.kv(l, d * g)) ff *= sp.kv(l - 1 / 2, a * math.sqrt(d**2 + (x - m)**2)) / math.pow( math.sqrt(d**2 + (x - m)**2) / a, 1 / 2 - l) return ff
def dark_heff(self, td): """ Compute the effective d.o.f. in entropy of the dark sector. Parameters ---------- td: float The dark sector temperature. """ xe = self.m_eta() / td xd = self.m_del() / td ge = 1.0 gd = self.g_del() pre = 45.0 / (4.0 * np.pi ** 4) pree = pre * ge * xe ** 3 pred = pre * gd * xd ** 3 bess_sum_e = sum( 1.0 / (1.0 + k) * kv(3, (1.0 + k) * xe) for k in range(5) ) bess_sum_d = kv(3, xd) return pree * bess_sum_e + pred * bess_sum_d
def matern_full(d, params): '''Evaluate Matern covariance function. Inputs: d -- float or array, shape (N, N), distance matrix params -- array, shape (3,), hyperparameter vector -> [amplitude , lengthscale, order ] Result: float or array, shape (N,N), matern function corresponding to each distance. ''' s, rho, v = params mask = np.where(d == 0, False, True) # Avoids warnings out = np.zeros_like(d) try: out[mask] = s**2 * (2**(1 - v)) * ( np.sqrt(2 * v) * d[mask] / rho)**v * spec.kv( v, np.sqrt(2 * v) * d[mask] / rho) / spec.gamma(v) out[~mask] = s**2 except TypeError: if d == 0: out = s**2 else: out = s**2 * (2 **(1 - v)) * (np.sqrt(2 * v) * d / rho)**v * spec.kv( v, np.sqrt(2 * v) * d / rho) / spec.gamma(v) return out
def test_bessel(self): n = 1000 ds = math.pi/n ib0 = 0 ib1 = 0 x= 1.4 for j in range (0, n): tt = (j - 0.5)*ds ib0 += math.exp(x*math.cos(tt))*ds ib1 += +math.exp(x*math.cos(tt))*ds*math.cos(tt) ib0 = ib0/math.pi ib1 = ib1/math.pi kb0 = 0 kb1 = 0 ds = 2*math.pi/n for j in range(0, n): tt = (j - 0.5)*ds ch = (math.exp(tt) + math.exp(-tt))/2 kb1 += math.exp(-x*ch)*ch*ds kb0 += math.exp(-x*ch)*ds i0 = sp.iv(0, x) i1 = sp.iv(1, x) k0 = sp.kv(0, x) k1 = sp.kv(1, x) self.assertTrue(((abs(ib0 - i0)<0.005)&(abs(ib1 - i1)<0.005)&(abs(kb0 - k0)<0.005)&(abs(kb1 - k1)<0.005)), msg = 'Deltas I0, I1, K0, K1: ' + str(abs(i0 - ib0)) + ', ' + str(abs(i1 - ib1)) + ', ' + str(abs(k0 - kb0)) + ', ' + str(abs(k1 - kb1)))
def cond_int_var(self, vovn, zhat): m1 = self.condvar_m1(zhat, vovn) m2 = self.condvar_m2(zhat, vovn) m1m2_ratio = m2 / m1**2 m1 *= np.exp(zhat * vovn) w2 = np.ones_like(zhat) if self.dist.lower() == 'm1': r_var = m1 r_vol = np.sqrt(r_var) elif self.dist.lower() == 'ln': r_var = m1 / np.sqrt(np.sqrt(m1m2_ratio)) r_vol = np.sqrt(r_var) elif self.dist.lower() == 'ig': # inverse Gaussian lam = m1 / (m1m2_ratio - 1.0) r_var = 1 - 1 / (8 * lam) * (1 - 9 / (2 * 8 * lam) * (1 - 25 / (6 * 8 * lam))) r_var[lam < 100] = spsp.kv(0, lam[lam < 100]) / spsp.kv( -0.5, lam[lam < 100]) r_var = m1 * r_var**2 r_vol = np.sqrt(r_var) else: pass assert r_var.shape == w2.shape return r_var, r_vol, w2
def constant_charge_single_energy(phi0, r1, kappa, epsilon): N = 20 # Number of terms in expansion qe = 1.60217646e-19 Na = 6.0221415e23 E_0 = 8.854187818e-12 cal2J = 4.184 index2 = arange(N+1, dtype=float) + 0.5 index = index2[0:-1] K1 = special.kv(index2, kappa*r1) K1p = index/(kappa*r1)*K1[0:-1] - K1[1:] k1 = special.kv(index, kappa*r1)*sqrt(pi/(2*kappa*r1)) k1p = -sqrt(pi/2)*1/(2*(kappa*r1)**(3/2.))*special.kv(index, kappa*r1) + sqrt(pi/(2*kappa*r1))*K1p a0_inf = -phi0/(epsilon*kappa*k1p[0]) U1_inf = a0_inf*k1[0] C1 = 2*pi*phi0*r1*r1 C0 = qe**2*Na*1e-3*1e10/(cal2J*E_0) E_inter = C0*C1*U1_inf return E_inter
def psynch(gamma, nu, B): """ equation 13 from Chiaberge & Ghisellini. This is the single particle synchrotron emissivity j_nu averaged over an isotropic distribution of pitch angles. Parameters: gamma array-like Lorentz factors of electrons nu float frequency in Hz B float magnetic field in Gauss """ nu_B = unit.e * B / 2.0 / np.pi / unit.melec / unit.c t = nu / (3.0 * gamma * gamma * nu_B) x = 3.0 * np.sqrt( 3.0) / np.pi * unit.thomson * unit.c * B * B / 8.0 / np.pi x *= t * t / nu_B # get the modified Bessel functions K13 = special.kv(1.0 / 3.0, t) K43 = special.kv(4.0 / 3.0, t) K43sq = K43 * K43 K13sq = K13 * K13 kterm = (K13 * K43) - (0.6 * t * (K43sq - K13sq)) return x * kterm
def constant_charge_single_energy(phi0, r1, kappa, epsilon): N = 20 # Number of terms in expansion qe = 1.60217646e-19 Na = 6.0221415e23 E_0 = 8.854187818e-12 cal2J = 4.184 index2 = arange(N + 1, dtype=float) + 0.5 index = index2[0:-1] K1 = special.kv(index2, kappa * r1) K1p = index / (kappa * r1) * K1[0:-1] - K1[1:] k1 = special.kv(index, kappa * r1) * sqrt(pi / (2 * kappa * r1)) k1p = -sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv( index, kappa * r1) + sqrt(pi / (2 * kappa * r1)) * K1p a0_inf = -phi0 / (epsilon * kappa * k1p[0]) U1_inf = a0_inf * k1[0] C1 = 2 * pi * phi0 * r1 * r1 C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0) E_inter = C0 * C1 * U1_inf return E_inter
def magnetic_field(self, xy, field="secondary"): """Magnetic field due to a magnetic dipole over a half space The analytic expression is only valid for a source and receiver at the surface of the earth. For arbitrary source and receiver locations above the earth, use the layered solution. Parameters ---------- xy : numpy.ndarray receiver locations of shape (n_locations, 2) field : ("secondary", "total") Flag for the type of field to return. """ sig = self.sigma_hat # (n_freq, ) f = self.frequency w = 2*np.pi*f k = np.sqrt(-1j*w*mu_0*sig)[:, None] # This will get it to broadcast over locations dxy = xy[:, :2] - self.location[:2] r = np.linalg.norm(dxy, axis=-1) x = dxy[:, 0] y = dxy[:, 1] em_x = em_y = em_z = 0 src_x, src_y, src_z = self.orientation # Z component of source alpha = 1j*k*r/2. IK1 = iv(1, alpha)*kv(1, alpha) IK2 = iv(2, alpha)*kv(2, alpha) if src_z != 0.0: em_z += src_z*2.0/(k**2*r**5)*(9-(9+9*1j*k*r-4*k**2*r**2-1j*k**3*r**3)*np.exp(-1j*k*r)) Hr = (k**2/r)*(IK1 - IK2) angle = np.arctan2(y, x) em_x += src_z*np.cos(angle)*Hr em_y += src_z*np.sin(angle)*Hr if src_x != 0.0 or src_y != 0.0: # X component of source phi = 2/(k**2*r**4)*(3 + k**2*r**2 - (3 + 3j*k*r - k**2*r**2)*np.exp(-1j*k*r)) dphi_dr = 2/(k**2*r**5)*(-2*k**2*r**2 - 12 + (-1j*k**3*r**3 - 5*k**2*r**2 + 12j*k*r + 12)*np.exp(-1j*k*r)) if src_x != 0.0: em_x += src_x*(-1.0/r**3)*(y**2*phi + x**2*r*dphi_dr) em_y += src_x*(1.0/r**3)*x*y*(phi - r*dphi_dr) em_z -= src_x*(k**2*x/r**2)*(IK1 - IK2) # Y component of source if src_y != 0.0: em_x += src_y*(1.0/r**3)*x*y*(phi - r*dphi_dr) em_y += src_y*(-1.0/r**3)*(x**2*phi + y**2*r*dphi_dr) em_z -= src_y*(k**2*y/r**2)*(IK1 - IK2) if field == "secondary": # subtract out primary field from above mdotr = src_x*x + src_y*y# + m[2]*(z=0) em_x -= 3*x*mdotr/r**5 - src_x/r**3 em_y -= 3*y*mdotr/r**5 - src_y/r**3 em_z -= -src_z/r**3 # + 3*(z=0)*mdotr/r**5 return self.moment/(4*np.pi)*np.stack((em_x, em_y, em_z), axis=-1)
def __call__(self, X, Y=None, eval_gradient=False): from scipy.spatial.distance import pdist, cdist, squareform from scipy import special X = np.atleast_2d(X) if Y is None: dists = pdist(X, metric='mahalanobis', VI=self.invLam) Filter = (dists != 0.) K = np.zeros_like(dists) K[Filter] = dists[Filter] **(5./6.) * special.kv(5./6., 2*np.pi * dists[Filter]) lim0 = special.gamma(5./6.) /(2 * ((np.pi)**(5./6.)) ) K = squareform(K) np.fill_diagonal(K, lim0) K /= lim0 else: if eval_gradient: raise ValueError( "Gradient can not be evaluated.") dists = cdist(X, Y, metric='mahalanobis', VI=self.invLam) Filter = (dists != 0.) K = np.zeros_like(dists) K[Filter] = dists[Filter] **(5./6.) * special.kv(5./6., 2*np.pi * dists[Filter]) lim0 = special.gamma(5./6.) /(2 * ((np.pi)**(5./6.)) ) if np.sum(Filter) != len(K[0])*len(K[:,0]): K[~Filter] = lim0 K /= lim0 if eval_gradient: raise ValueError( "Gradient can not be evaluated.") else: return K
def Ep_clad(r, p, phase, pol, l, a, beta, u, w, s, A): return ( 1j * A * beta * ((a * jv(l, u)) / (w * kv(l, w))) * ((1 - s) / 2 * kv(l-1, w * r / a) - (1 + s) / 2 * kv(l+1, w * r / a)) * np.sin(l * p + pol) * np.exp(1j * phase) )
def associateLPModeProfiles(modes, indexProfile): ''' Associate the linearly polarized mode profile to the corresponding constants found solving the analytical dispersion relation. see: "Weakly Guiding Fibers" by D. Golge in Applied Optics, 1971 ''' assert (not modes.profiles) R = indexProfile.R TH = indexProfile.TH a = indexProfile.a logger.info( 'Finding analytical LP mode profiles associated to the propagation constants.' ) for idx in range(modes.number): m = modes.m[idx] l = modes.l[idx] u = modes.u[idx] w = modes.w[idx] # two pi/2 rotated degenerate modes for m > 0 if (m, l) in zip(modes.m[:idx], modes.l[:idx]): psi = np.pi / 2 else: psi = 0 # Avoid division bt zero in the Bessel function R[R < np.finfo(np.float32).eps] = np.finfo(np.float32).eps # Non-zero transverse component Et = ( jv(m,u/a*R)/jv(m,u)*np.cos(m*TH+psi)*(R <= a)+ \ kv(m,w/a*R)/kv(m,w)*np.cos(m*TH+psi)*(R > a)) modes.profiles.append(Et.ravel().astype(np.complex64)) modes.profiles[-1] = modes.profiles[-1] / np.sqrt( np.sum(np.abs(modes.profiles[-1])**2)) return modes
def T(chi_phot): coeff = 1./(np.pi * np.sqrt(3.) * chi_phot * chi_phot) inner = lambda x : integ.quad(lambda s: np.sqrt(s)*spe.kv(1./3., 2./3. * s**(3./2.)), x, np.inf)[0] return integ.quad(lambda chi_ele: coeff*(inner(X(chi_phot, chi_ele)) - (2.0 - chi_phot*np.power(X(chi_phot, chi_ele), 3./2.))*spe.kv(2./3., 2./3. *X(chi_phot, chi_ele)**(3./2.)) ) , 0, chi_phot)[0]
def dark_geff(self, td): """ Compute the effective d.o.f. in energy of the dark sector. Parameters ---------- td: float The dark sector temperature. """ xe = self.m_eta() / td xd = self.m_del() / td ge = 1.0 gd = self.g_del() pre = 30.0 / (2.0 * np.pi ** 4) pree = pre * ge * xe ** 2 pred = pre * gd * xd ** 2 bess_sum_e = sum( 1.0 / (1.0 + k) ** 2 * ( (1.0 + k) * xe * kv(1, (1.0 + k) * xe) + 3.0 * kv(2, (1.0 + k) * xe) ) for k in range(5) ) bess_sum_d = xd * kv(1, xd) + 3.0 * kv(2, xd) return pree * bess_sum_e + pred * bess_sum_d
def LPModeProfile(m, psi, u, w, a, npoints, areasize, coordtype='cart', forFFT=0, inf_profile=False): ''' Linearly polarized mode, see : "Weakly Guiding Fibers" by D. Golge in Applied Optics, 1971 ''' if (forFFT): # If forFFT = 1, the center on the mode is half a pixel shifted for an easier access of the Fourier transform # x = -1.*np.arange(npoints/2,-npoints/2,-1)*areasize/npoints+1e-9 x = np.arange(-npoints / 2, npoints / 2, 1) * areasize / npoints + 1e-9 # x = np.arange(-npoints/2*areasize/npoints,npoints/2*areasize/npoints,areasize/npoints) else: x = np.linspace(-areasize / 2, areasize / 2, npoints) [X, Y] = np.meshgrid(x, x) [TH, R] = cart2pol(X, Y) if inf_profile: # infinite profile Et = jv(m, u / a * R) / jv(m, u) * np.cos(m * TH + psi) else: # Non-zero transverse component Et = ( jv(m,u/a*R)/jv(m,u)*np.cos(m*TH+psi)*(R <= a) + \ kv(m,w/a*R)/kv(m,w)*np.cos(m*TH+psi)*(R > a) ) Norm = np.sqrt(np.sum(np.abs(Et)**2)) return Et / Norm * np.sign(Et[npoints // 2, npoints // 2]), [X, Y]
def DeltaPs(s, rw, constantes, n, qw): (k, fi, pin, re, h, u, ct, B0) = constantes sn = math.sqrt(s/n) a = qw*u/(2*math.pi*k*h) b = s*sn*rw*(bessel.iv(1,sn*re)*bessel.kv(1,sn*rw)-bessel.iv(1,sn*rw)*bessel.kv(1,sn*re)) c = bessel.iv(0,sn*rw)*bessel.kv(1,sn*re)+bessel.iv(1,sn*re)*bessel.kv(0,sn*rw) return a*c/b
def tester(r, zeta): rin = zeta*r A1 = kv(0,rin)/iv(0,rin) B2 = iv(0,zeta)/kv(0,zeta) temp = (iv(1,zeta)-B2*(A1*r*iv(1,rin)+kv(1,zeta)-r*kv(1,rin)))/(iv(1,zeta)-B2*kv(1,zeta)) return 1./(1-temp)
def radiation_theoric(self,omega,observation_angle): gamma=self.Lorentz_factor() X=gamma*observation_angle y=omega/self.critical_frequency() xi=y*0.5*np.sqrt((1.+X**2)**3) cst=(3.*codata.alpha*(gamma**2)*1e-3*1e-6*self.I_current()*y**2)/(codata.e*4.*np.pi**2) rad=((1.+X**2)**2)*((special.kv((2./3.),xi))**2+((X**2)/(1.+X**2))*(special.kv((2./3.),xi))**2) return rad*cst
def Hp_clad(r, p, phase, pol, l, omega, a, n_2, u, w, s_2, A): return ( -1j * A * omega * epsilon_0 * n_2 ** 2 * ((a * jv(l, u)) / (w * kv(l, w))) * ((1 - s_2) / 2 * kv(l-1, w * r / a) + (1 + s_2) / 2 * kv(l+1, w * r / a)) * np.sin(l * p + pol) * np.exp(1j * phase) )
def constant_potential_twosphere_identical(phi01, phi02, r1, r2, R, kappa, epsilon): # From Carnie+Chan 1993 N = 20 # Number of terms in expansion qe = 1.60217646e-19 Na = 6.0221415e23 E_0 = 8.854187818e-12 cal2J = 4.184 index = arange(N, dtype=float) + 0.5 k1 = special.kv(index, kappa*r1)*sqrt(pi/(2*kappa*r1)) k2 = special.kv(index, kappa*r2)*sqrt(pi/(2*kappa*r2)) i1 = special.iv(index, kappa*r1)*sqrt(pi/(2*kappa*r1)) i2 = special.iv(index, kappa*r2)*sqrt(pi/(2*kappa*r2)) B = zeros((N,N), dtype=float) for n in range(N): for m in range(N): for nu in range(N): if n>=nu and m>=nu: g1 = gamma(n-nu+0.5) g2 = gamma(m-nu+0.5) g3 = gamma(nu+0.5) g4 = gamma(m+n-nu+1.5) f1 = factorial(n+m-nu) f2 = factorial(n-nu) f3 = factorial(m-nu) f4 = factorial(nu) Anm = g1*g2*g3*f1*(n+m-2*nu+0.5)/(pi*g4*f2*f3*f4) kB = special.kv(n+m-2*nu+0.5,kappa*R)*sqrt(pi/(2*kappa*R)) B[n,m] += Anm*kB M = zeros((N,N), float) for i in range(N): for j in range(N): M[i,j] = (2*i+1)*B[i,j]*i1[i] if i==j: M[i,j] += k1[i] RHS = zeros(N) RHS[0] = phi01 a = solve(M,RHS) a0 = a[0] U = 4*pi * ( -pi/2 * a0/phi01 * 1/sinh(kappa*r1) + kappa*r1 + kappa*r1/tanh(kappa*r1) ) # print 'E: %f'%U C0 = qe**2*Na*1e-3*1e10/(cal2J*E_0) C1 = r1*epsilon*phi01*phi01 E_inter = U*C1*C0 return E_inter
def E_zeta(self, r, theta): r0_ind = np.where(r <= self.a) r1_ind = np.where(r > self.a) temp = np.zeros(r.shape, dtype=np.complex128) r0, r1 = r[r0_ind], r[r1_ind] temp[r0_ind] = jv(self.n, self.u*r0/self.a) temp[r1_ind] = jv(self.n, self.u) * \ kv(self.n, self.w*r1/self.a)/kv(self.n, self.w) return temp*np.cos(self.n*theta), temp*np.cos(self.n*theta+pi/2)
def calc_P_clad(l, omega, a, n_2, beta, u, w, s, s_2, A): return ( (np.pi / 4) * omega * epsilon_0 * n_2 ** 2 * beta * np.abs(A) ** 2 * ((a * jv(l, u)) / (w * kv(l, w))) ** 2 * ((1 - s) * (1 - s_2) * integrate.quad(lambda r: r * kv(l-1, r) ** 2, a, np.inf) + (1 + s) * (1 + s_2) * integrate.quad(lambda r: r * kv(l+1, r) ** 2, a, np.inf)) )
def ansInteg() : a = sp.jv(3,2.7)**2 - sp.jv(4,2.7)*sp.jv(2,2.7) j3 = sp.jv(3,2.7) j2 = sp.jv(2,2.7) j4 = sp.jv(4,2.7) k2 = sp.kv(2,1.2) k3 = sp.kv(3,1.2) k4 = sp.kv(4,1.2) b = (j3/k3)**2 *(k4*k2 - k3**2)
def system(vec,V,Delta): ru,iu,rw,iw = vec u = ru+1j*iu w = rw+1j*iw first = u**2+w**2 - V**2 second = jv(0,u)/u/jv(1,u) - (1-Delta)*kv(0,w)/w/kv(1,w) return np.real(first), np.imag(first),np.real(second), np.imag(second)
def yukawa(self, n_g, l=0, gamma=1e-6): """Calculates the radial grid yukawa integral. The the integral kernel for the Yukawa interaction: \ _ _ exp(- /\ | r - r' |) ---------------------- _ _ | r - r' | is defined as __ __ \ r \ r * ^ ^ \ 4 || I_(l+0.5)(/\ <) K_(l+0.5) (/\ >) Y (r) Y(r') ) -------------------------- lm lm / __ (rr')^0.5 lm where I and K are the modified Bessel functions of the first and second kind (K is also known as Macdonald function). r = min (r, r') r = max(r, r') < > We now calculate the integral: ^ / _ ^ v (r) Y (r) = |dr' n(r') Y (r') l lm / l lm with the Yukawa kernel mentioned above. And the output array is 'vr' as it is within the Hartree / radial Poisson solver. """ from scipy.special import iv, kv vr_g = self.zeros() nrdr_g = n_g * self.r_g**1.5 * self.dr_g p = 0 q = 0 k_rgamma = kv(l + 0.5, self.r_g * gamma) # K(>) i_rgamma = iv(l + 0.5, self.r_g * gamma) # I(<) k_rgamma[0] = kv(l + 0.5, self.r_g[1] * gamma * 1e-5) # We have two integrals: one for r< and one for r> # This loop-technique helps calculate them in once for g_ind in range(len(nrdr_g) - 1, -1, -1): dp = k_rgamma[g_ind] * nrdr_g[g_ind] # r' is r> dq = i_rgamma[g_ind] * nrdr_g[g_ind] # r' is r< vr_g[g_ind] = (p + 0.5 * dp) * i_rgamma[g_ind] - \ (q + 0.5 * dq) * k_rgamma[g_ind] p += dp q += dq vr_g[:] += q * k_rgamma[:] vr_g *= 4 * pi vr_g[:] *= self.r_g[:]**0.5 return vr_g
def plot4(V=10, nguess=100, wavelength = 10**-6, a = 5*10**-6, l=0, eps=10**-4): lguess = [0]*nguess for i in range(nguess): lguess[i]=i*V*1.1/nguess lguess.append(V) functy = lambda x: (V**2-x*x)**(1/2) functj = lambda x: x*jv(l+1, x)/jv(l, x) functk = lambda x: functy(x)*kv(l+1, functy(x))/kv(l, functy(x)) funct = lambda x: functj(x)-functk(x) lroots = [0] * len(lguess) for i in range(len(lroots)): lroots[i] = fsolve(funct, lguess[i]) #print lroots lx = np.linspace(0,V+2, 10000) plt.scatter(lx, functj(lx), s=.5) plt.plot(lx, functk(lx)) #plt.plot(lx, funct(lx)) plt.ylim([0, max(functk(lx))]) #sorry this bit isn't particularly clean. Had to change strategies. Lazy. Etc. def floatequals(a,b, eps=10**-6): if type(b)==float: b=[b] for number in b: if abs(a-number)<eps: return True return False roots=[] for i in range(len(lroots)): if lroots[i]<0: continue if lroots[i]==lguess[i]: continue if floatequals(lroots[i], roots, eps=eps): continue roots.extend(lroots[i]) print roots plt.scatter(roots, functj(roots), c='red') #plt.show() plt.scatter(roots, map(functk, roots), c='red') title='Characteristic equation for a fiber optic, recall that Y^2=V^2-X^2' plt.title(title, size='small') plt.xlabel('X') plt.ylabel('X*J_(l+1)(X)/J_l(X) and Y*K_(l+1)(Y)/K_l(Y)') plt.savefig('hw9-4.png') print 'the roots are ' + str(roots) print min(roots) print functk(0) plt.show() plt.close()
def fdem_hy(self, freq, m=1.): r = self.r sigma = 1 / self.res omega = freq * 2 * np.pi k = np.sqrt(-1j * mu_0 * sigma * omega) arg = 1.j * k * r / 2 h_r = - m * k * k / (4 * np.pi * r) h_r *= (iv(1, arg) * kv(1, arg) - iv(2, arg) * kv(2, arg)) h_y = h_r * self.sin_phi return h_y
def calc_vert_dist(e_relative): G = (e_relative / 2.0) * (gamma_psi**(1.5)) K13_G = kv(1.0 / 3.0, G) K23_G = kv(2.0 / 3.0, G) dN_dOmega = (1.33e13) * (E**2) * I * (e_relative**2) * (gamma_psi**2) dN_dOmega *= ((K23_G**2) + (((gamma**2) * (psi**2)) / (gamma_psi)) * (K13_G**2)) return dN_dOmega
def coupling(a1, a2, n1_1, n1_2, n2, V1, V2, u1, u2, d): delta_1 = (n1_1**2 - n2**2) / (2 * n1_1**2) delta_2 = (n1_2**2 - n2**2) / (2 * n1_2**2) w1 = np.sqrt(V1**2 - u1**2) w2 = np.sqrt(V2**2 - u2**2) first = np.sqrt(2 / (a1 * a2)) second = (delta_1 * delta_2 / (V1**6 * V2**6))**(1 / 4) third = u1 * u2 * kv(0, w1 * d / a1) / (kv(1, w1) * kv(1, w2)) return first * second * third
def gammaCirc(ni, Re): # Funzione idrodinamica per cantilever a sezione circolare Re = np.sqrt(Re/2) - 1j*np.sqrt(Re/2) num = 4*sx.kv(1,Re) den = Re*sx.kv(0,Re) return (1 + (num/den))
def radiation_theoric(self, omega, observation_angle): gamma = self.Lorentz_factor() X = gamma * observation_angle y = omega / self.critical_frequency() xi = y * 0.5 * np.sqrt((1. + X**2)**3) cst = (3. * codata.alpha * (gamma**2) * 1e-3 * 1e-6 * self.I_current() * y**2) / (codata.e * 4. * np.pi**2) rad = ((1. + X**2)**2) * ((special.kv( (2. / 3.), xi))**2 + ((X**2) / (1. + X**2)) * (special.kv( (2. / 3.), xi))**2) return rad * cst
def v_circ_exp(xkpc, param, arrsize=1500.): ''' function for a rotation curve that turns over and declines at large radii ''' # exponential disk model velocity curve (Freeman 1970; Equation 10) # v^2 = R^2*!PI*G*nu0*a*(I0K0-I1K1) # param = [r0,s0,v0,roff,theta] # r0 = 1/a = disk radii # R = radial distance # roff = offset of velocity curve # from 0 -> might want to set to fixed at 0?) # s0 = nu0 = surface density constant (nu(R) = nu0*exp(-aR)) # v0 is the overall velocity offset # G G = 6.67408e-11 #m*kg^-1*(m/s)^2 G = G * 1.989e30 #m*Msol^-1*(m/s)^2 G = G / 3.0857e19 #kpc*Msol^-1(m/s)^2 G = G / 1000. / 1000. # parameters r0 = param[0] s0 = 10**np.double(param[1]) v0 = param[2] roff = param[3] theta = param[4] # evaluate bessel functions (evaluated at 0.5aR; see Freeman70) rr = 0.025 * np.arange(arrsize) + 0.001 # set up an array #rr = 0.025*np.arange(15000.)+0.001 temp = (0.5 * (rr) / r0) temp[temp > 709.] = 709. I0K0 = iv(0, temp) * kv(0, temp) I1K1 = iv(1, temp) * kv(1, temp) bsl = I0K0 - I1K1 # velocity curve v2a = rr * ((np.pi * G * s0 * bsl) / r0)**0.5 v2a = v2a * np.sin(np.pi * theta / 180.) # reflect the rotation curve rr_r = np.append(-np.array(rr), np.array(rr)) v2_r = np.append(-np.array(v2a), np.array(v2a)) rrb = np.sort(rr_r) #rr_r(sort(rr_r)) v2b = v2_r[np.argsort(rr_r)] #v2_r(sort(rr_r)) # regrid back onto kpc scale and velocity offset f = scipy.interpolate.interp1d(rrb, v2b, bounds_error=False) v2 = f(xkpc - roff) + v0 return v2
def get_u(u, w, l, V): with np.errstate(divide='ignore', invalid='ignore'): # Retourne le membre de gauche et le membre de droite de l'équation des modes LP left = jv(l, u) / (u * jv(l - 1, u)) right = -kv(l, w) / (w * kv(l - 1, w)) # Trouve les indices d'intersection entre le membre de gauche et de droite de l'équation de modes LP idx = np.argwhere(np.diff(np.sign(left - right))).flatten() # Calcule les valeurs de u correspondant à l'indice u_values = get_intersects(u, idx, V) return u_values, [left, right ], f'l = {l}; {len(u_values)} intersects: {u_values}'
def fdem_hz(self, freq, m=1.): r = self.r x = self.x y = self.y sigma = 1 / self.res omega = freq * 2 * np.pi k = np.sqrt(-1j * mu_0 * sigma * omega) arg = 1.j * k * r / 2 h_z = m * k ** 2 * x / (4 * np.pi * r ** 2) h_z *= iv(1, arg) * kv(1, arg) - iv(2, arg) * kv(2, arg) return h_z
def AnalyticalSolution(nu, l, c, R): # Modified Bessel function of the second kind of real order v : from scipy.special import kv F = 8.0*(1.0-nu)*((l**2)/(c**2)) * \ 1.0 / (( 4.0 + ((R**2)/(c**2)) + \ ((2.0*R)/c) * kv(0,R/c)/kv(1,R/c) )) SCF = (3.0 + F) / (1.0 + F) # stress concentration factor return SCF
def Psy(a, b, g): c = np.abs(a) * np.sqrt(2 + b**2) u = b / np.sqrt(2 + b**2) value = ( c**(g+0.5) * np.exp(np.sign(a)*c) * (1+u)**g ) / ( np.sqrt(2*np.pi) * g * scps.gamma(g) ) * \ scps.kv(g+0.5 ,c) * Phi( g,1-g,1+g, (1+u)/2, -np.sign(a)*c*(1+u) ) - \ np.sign(a) * ( c**(g+0.5) * np.exp(np.sign(a)*c) * (1+u)**(1+g) ) / ( np.sqrt(2*np.pi) * (g+1) * scps.gamma(g) ) *\ scps.kv(g-0.5 ,c) * Phi( g+1,1-g,2+g, (1+u)/2, -np.sign(a)*c*(1+u) ) + \ np.sign(a) * ( c**(g+0.5) * np.exp(np.sign(a)*c) * (1+u)**(1+g) ) / ( np.sqrt(2*np.pi) * (g+1) * scps.gamma(g) ) *\ scps.kv(g-0.5 ,c) * Phi( g,1-g,1+g, (1+u)/2, -np.sign(a)*c*(1+u) ) return value
def inLaplace_mode2(s, r, ri, p0, pi, c, alpha, G, M11): xi = r * (s / c)**0.5 beta = ri * (s / c)**0.5 p_2 = -(p0 - pi) / s * kv(0., xi) / kv(0., beta) sig_rr_2 = -(p0 - pi) / s * (2. * G * alpha) / M11 * ( ri / r * kv(1., xi) - ri**2. / r**2 * kv(1., beta)) / (beta * kv(0., beta)) sig_tt_2 = (p0 - pi) / s * (2. * G * alpha) / M11 * ( (ri / r * kv(1., xi) - ri**2. / r**2 * kv(1., beta)) / (beta * kv(0., beta)) + kv(0., xi) / kv(0., beta)) return [p_2, sig_rr_2, sig_tt_2]
def C(k): """@brief Theodorsen's function. @param k Reduced frequency. @return C Function value, complex type. """ # Theodorsen's Function if k < 0.0: raise Exception('Reduced frequency should not be negative.') elif k == 0.0: return complex(1.0,0.0) else: return kv(1, complex(0, k)) / (kv(0, complex(0, k)) + kv(1, complex(0, k)))
def atmSF(model, D, m, wlum, zen, r0inmRef): """ create the atmosphere phase structure function model = 'Kolm' = 'vonK' """ r0a = r0Wz(r0inmRef, zen, wlum) L0 = 30 # outer scale in meter, only used when model=vonK m0 = np.rint(0.5 * (m + 1) + 1e-5) aa = np.arange(1, m + 1) x, y = np.meshgrid(aa, aa) dr = D / (m - 1) # frequency resolution in 1/rad r = dr * np.sqrt((x - m0)**2 + (y - m0)**2) if model == 'Kolm': sfa = 6.88 * (r / r0a)**(5 / 3) elif model == 'vonK': sfa_c = 2 * sp.gamma(11 / 6) / 2**(5 / 6) / np.pi**(8 / 3) *\ (24 / 5 * sp.gamma(6 / 5))**(5 / 6) * (r0a / L0)**(-5 / 3) # modified bessel of 2nd/3rd kind sfa_k = sp.kv(5 / 6, (2 * np.pi / L0 * r)) sfa = sfa_c * (2**(-1 / 6) * sp.gamma(5 / 6) - (2 * np.pi / L0 * r)**(5 / 6) * sfa_k) # if we don't do below, everything will be nan after ifft2 # midp = r.shape[0]/2+1 # 1e-2 is to avoid x.49999 be rounded to x midp = np.rint(0.5 * (r.shape[0] - 1) + 1e-2) sfa[midp, midp] = 0 # at this single point, sfa_k=Inf, 0*Inf=Nan; return sfa
def MED2(p,T,m,n,dim): """ p,T,m,n,dim """ de = T/50 ef = 4.5 normconst = (2./m*k*T)**((dim-1)/2.)/sqrt(pi) *sp.kv((dim+1)/2.,(1.*m)/(k*T))*Gamma((dim/2.)) energy = zeros([1,n]) enrange = arange(m+0.1,ef,de) Juttner = zeros([1,len(enrange)]) m2 = m**2 dim = double(dim) for i in range(n): energy[0,i] = sqrt((p[i].dot(p[i]))+m2) g= 0 for E in enrange: Juttner[0,g] = (n*de)*(1/normconst)*((E**2 -m**2)/(m**2))**(dim/2.)*(E/(E**2-m**2))*e**(-E/(k*T)) g+= 1 weights = ones([1,n]) y,binEdges=np.histogram(energy[0],bins=len(enrange),range=(m+0.1,ef),weights=weights[0]) menStd = sqrt((y-(y/n))) errorbar(enrange,y, yerr=menStd, fmt='ro') plot(enrange,Juttner[0]) numwithin = 0 for i in range(len(enrange)): if (y[i]+menStd[i] >= Juttner[0,i]) and (y[i]-menStd[i] <= Juttner[0,i]): numwithin+= 1 print((numwithin*100)/len(enrange), "% are within 1 standard dev")
def fundamental3D( cot ): ''' plots fundamental solution in 3D ''' x = dic[cot.mesh_name].source V = cot.V mesh_obj = cot.mesh_obj kappa = cot.kappa y = cot.mesh_obj.coordinates() x_y = x-y ra = x_y * x_y ra = np.sum( ra, axis = 1 ) ra = np.sqrt( ra ) + 1e-13 kappara = kappa * ra phi_arr = cot.factor * np.power( kappara, 0.5 ) * sp.kv( 0.5, kappara ) phi = Function( V ) phi.vector().set_local( phi_arr[dof_to_vertex_map(V)] ) helper.save_plots( phi, ["Free Space", "Greens Function"], cot )
def SteMat(h,r,kappa): # Matern isotropic covariance function (Stein's parameterization) n = numpy.size(h) corelation = numpy.zeros(n,dtype=numpy.double) for i in range(n): if h[i] == 0.0: corelation[i] = 1.0 else: hr=numpy.double(h[i])/r bes=kv(kappa,2.0*numpy.sqrt(kappa)*hr) if not numpy.isfinite(bes): corelation[i] = 1.0 elif bes == 0.0: corelation[i] = 0.0 else: mult = 2.0**(1.0 - kappa)/gamma(kappa)*(2.0*numpy.sqrt(kappa)*hr)**kappa if not numpy.isfinite(mult): corelation[i] = 0.0 else: corelation[i] = bes * mult return corelation
def MaternARD(X,Y,theta,white_noise=False): """ Matern covariance kernel - not fully tested! different length scales in all inputs theta[0] - overall scale param - ie prior covariance theta[1] - shape parameter theta[2:-1] - length scales theta[-1] - white noise """ #Calculate distance matrix with scaling D = EuclideanDist(X,Y,v=theta[2:-1]) #Calculate covariance matrix from matern function v = theta[1] K = 2**(1.-v) / gamma(v) * (np.sqrt(2*v)*D)**v * kv(v,np.sqrt(2*v)*D) #diagonal terms should be set to one (when D2 = 0, kv diverges but full function = 1) #this only works for square 'covariance' matrix... #ie fails for blocks..; # K[np.where(np.identity(X[:,0].size)==1)] = 1. #this should work, but again needs tested properly... K[np.where(D==0.)] = 1. #now multiply by an overall scale function K = K * theta[0] #Add white noise if white_noise == True: K += np.identity(X[:,0].size) * (theta[-1]**2) return np.matrix(K)
def _F(x): """ This is F(x) defined in equation 6.31c in R&L. F(x) = x*int(K_5/3(x)dx) where the integral goes from x to infinity. for some reason, special.kv(5/3,1e10) is NaN, not 0 ??? for now, just clip the function above 1e5 to be 0. This function can be evaluated in mathematica using the following command F[x_] := N[x*Integrate[BesselK[5/3, y], {y, x, Infinity}]] From mathematica, we find that x F(x) ----- ------------ 0.1 0.818186 1 0.651423 10 0.000192238 100 0 Comparing our function to the Mathematica integral, we find >>> np.allclose(Synchrotron.F([0.1,1,10,100]), [0.818186, 0.651423, 0.000192238,0], rtol=1e-4, atol=1e-4) True Note, this function is _F so that the docstring will get executed. """ if x>1e5: return 0 return x*integrate.quad(lambda j: special.kv(5./3,j),x,inf)[0]
def flux_distrib(self): """ :return: flux in ph/sec/mrad**2/0.1%BW """ C_om = 1.3255e22 #ph/(sec * rad**2 * GeV**2 * A) g = self.gamma #self.eph_c = 1. ksi = lambda w,t: 1./2.*w * (1. + g*g*t*t)**(3./2.) F = lambda w, t: (1.+g*g*t*t)**2 * (1.+ g*g*t*t/(1.+g*g*t*t) * (kv(1./3.,ksi(w, t))/kv(2./3.,ksi(w, t)))**2) dw_over_w = 0.001 # 0.1% BW mrad2 = 1e-6 # transform rad to mrad I = lambda eph, theta: mrad2*C_om * self.energy**2*self.I* dw_over_w* (eph/self.eph_c)**2 * kv(2./3.,ksi(eph/self.eph_c,theta))**2 * F(eph/self.eph_c, theta) return I
def MaternRad(X,Y,theta,white_noise=False): """ Matern covariance kernel - not properly tested! Radial - ie same length scales in all inputs """ #Calculate distance matrix with (global) scaling D = EuclideanDist(X,Y) / theta[2] #Calculate covariance matrix from matern function v = theta[1] K = 2.**(1.-v) / gamma(v) * (np.sqrt(2.*v)*D)**v * kv(v,np.sqrt(2.*v)*D) #diagonal terms should be set to one (when D2 = 0, kv diverges but full function = 1) #this only works for square 'covariance' matrix... #ie fails for blocks..; # K[np.where(np.identity(X[:,0].size)==1)] = 1. #this should work, but again needs tested properly... K[np.where(D==0.)] = 1. #now multiply by an overall scale function K = K * theta[0]**2 #Add white noise if white_noise == True: K += np.identity(X[:,0].size) * (theta[3]**2) return np.matrix(K)
def phase_covariance(r, r0, L0): """ Calculate the phase covariance between two points seperated by `r`, in turbulence with a given `r0 and `L0`. Uses equation 5 from Assemat and Wilson, 2006. Parameters: r (float, ndarray): Seperation between points in metres (can be ndarray) r0 (float): Fried parameter of turbulence in metres L0 (float): Outer scale of turbulence in metres """ # Make sure everything is a float to avoid nasty surprises in division! r = numpy.float32(r) r0 = float(r0) L0 = float(L0) # Get rid of any zeros r += 1e-40 A = (L0 / r0) ** (5. / 3) B1 = (2 ** (-5. / 6)) * gamma(11. / 6) / (numpy.pi ** (8. / 3)) B2 = ((24. / 5) * gamma(6. / 5)) ** (5. / 6) C = (((2 * numpy.pi * r) / L0) ** (5. / 6)) * kv(5. / 6, (2 * numpy.pi * r) / L0) cov = A * B1 * B2 * C return cov
def pdf_one_point(x=0.0, c=0.0, sigma=1.0, theta=0.0, nu=1.0): ''' VarGamma probability density function in a point x ''' temp1 = 2.0 / ( sigma*(2.0*pi)**0.5*nu**(1/nu)*special.gamma(1/nu) ) temp2 = ((2*sigma**2/nu+theta**2)**0.5)**(0.5-1/nu) temp3 = exp(theta*(x-c)/sigma**2) * abs(x-c)**(1/nu - 0.5) temp4 = special.kv(1/nu - 0.5, abs(x-c)*(2*sigma**2/nu+theta**2)**0.5/sigma**2) return temp1*temp2*temp3*temp4
def FrankelStoppingPower(E0,T): # Frankel (PRA 1979) - formula 5.10. # Only ee collisions. # E0 : initial electron energy (MeV) # T : background plasma temperature (MeV) E0_ = np.double(E0) n = E0_.size v1 = np.double(np.sqrt(1.-(E0_/.511+1.)**-2)) g1 = np.double((1.-v1**2)**(-0.5)) a = .511/T y = np.zeros(n) for k in range(n): if E0_[k] < 0.1: pmin=g1[k]*v1[k]*(1.-g1[k]**2/4.) pmax=g1[k]*v1[k]*(1.+g1[k]**2/4.) dp = (pmax-pmin)/1000000. y[k] = quad(lambda p: tot1(p,v1[k],a), 0. ,pmin ,epsrel=3.e-14)[0] y[k]+= quad(lambda p: tot1(p,v1[k],a), pmin ,pmax-dp,epsrel=3.e-14)[0] y[k]+= quad(lambda p: tot1(p,v1[k],a), pmax ,np.inf ,epsrel=3.e-14)[0] y[k]+= quad(lambda p: tot (p,v1[k],a), 0. ,np.inf ,epsrel=3.e-14)[0] else: y[k] = quad(lambda p: tot (p,v1[k],a), 0. ,np.inf ,epsrel=3.e-14)[0] y[k]+= quad(lambda p: tot1(p,v1[k],a), 0. ,np.inf ,epsrel=3.e-14)[0] y *= (a/(4.*np.pi*kv(2,a)))/v1 y *= 3.204e-24 # 8*pi^2*me*c^2*re^2 in MeV*cm^2 return y
def th_roc_glq(mod_order, snr_db, n_samples, n_thresh, n_terms, fading, *args): """ Computes the theorectical CROC using the Gauss-Laguerre quadrature. Parameters ---------- mod_order : int Modulation order. snr_db : float Signal-to-noise ratio in dB. n_samples : int Number of transmitted symbols. n_thresh : int Number of thresholds to be evaluated. n_terms : int Number of terms for the Gauss-Laguerre quadrature. fading : str Name of the fading. args : array-like Fading parameters. """ if fading not in FADINGS: raise NotImplementedError('the formulations for this fading is not' ' implemented yet.') thresholds = np.linspace(.0, 100.0, n_thresh) # symbol energy Es = 1./mod_order # noise variance var_w = Es*sps.exp10(-snr_db/10.) Pf = 1 - sps.gammainc(n_samples/2., thresholds/(2*var_w)) Pm = 0.0 printProgress(0, n_terms, prefix='Progress', suffix='Complete', barLength=50) if fading == 'exp_weibull': beta, alpha, eta = args[0:3] roots, weights = sps.orthogonal.la_roots(n_terms, 0.0) for k in range(n_terms): Pm = Pm + (weights[k] * (1 - math.exp(-roots[k]))**(alpha - 1))*(1 - marcumQ(math.sqrt(n_samples * Es * (eta * roots[k]**(1./beta))**2 / var_w), np.sqrt(thresholds / var_w), n_samples / 2.0)) printProgress(k, n_terms-1, prefix='Progress', suffix='Complete', barLength=50) Pm = alpha*Pm elif fading == 'gamma_gamma': beta, alpha = args[0:2] roots, weights = sps.orthogonal.la_roots(n_terms, 0.5*(alpha + beta)) for k in range(n_terms): Pm = Pm + weights[k] * math.exp(roots[k]) * kv(alpha - beta, 2 * math.sqrt(alpha * beta * roots[k])) * (1 - marcumQ(roots[k] * math.sqrt(n_samples * Es /var_w), np.sqrt(thresholds / var_w), n_samples / 2.0)) printProgress(k, n_terms-1, prefix='Progress', suffix='Complete', barLength=50) Pm = Pm * 2 * (alpha * beta)**(0.5 * (alpha + beta)) / (gamma(alpha) * gamma(beta)) return Pf, Pm
def cvm_unif_inf(statistic): """ Calculates the limiting distribution of the Cramer-von Mises statistic. After the second line of equation 1.3 from the Csorgo and Faraway paper. """ args = inf_args / statistic return (inf_cs * exp(-args) * kv(.25, args)).sum() / statistic ** .5
def P1(self): U = self.U W = self.W return W**2/U**2 * kv(1, W)**2/jv(1, U)**2 * ( (1-self.s)**2 * (jv(0, U)**2 + jv(1, U)**2) + (1+self.s)**2 * (jv(2, U)**2 - jv(1, U)*jv(3, U)) + 2*U**2/self.rb**2 * (jv(1, U)**2 - jv(0, U)*jv(2, U)) )
def denom3D(x0, x1, x2, kappa, n): ra = np.sqrt(x0 * x0 + x1 * x1 + x2 * x2) + 1e-9 kappara = kappa * ra Khalf = sp.kv(0.5, kappara) expon = np.exp(-kappara) tmp = Khalf * expon / np.sqrt(ra) return 2.0 * np.sum(tmp)
def integrate_yukawa(self, n1, n2, l, gamma): """Integrate two densities n1 and n2 with yukawa interaction.""" from scipy.special import iv, kv r = self.r_g dr = self.dr_g k_rgamma = kv(l + 0.5, r * gamma) # K(>) i_rgamma = iv(l + 0.5, r * gamma) # I(<) k_rgamma[0] = kv(l + 0.5, r[1] * gamma * 1e-5) matrix_ik = np.outer(n1 * dr, n2 * dr) len_vec = len(k_rgamma) for i in xrange(len_vec): k_rgi = k_rgamma[i] for k in xrange(i): modified_bessels = i_rgamma[k] * k_rgi matrix_ik[i, k] *= modified_bessels matrix_ik[k, i] *= modified_bessels matrix_ik[i, i] *= i_rgamma[i] * k_rgi return matrix_ik.sum()
def Cgw_reg_year(alphaab,times_f,alpha=-2/3,fL=1.0/500,fH=None,decompose=False): t1, t2 = N.meshgrid(times_f,times_f) x = 2 * math.pi * (day/year) * fL * N.abs(t1 - t2) # print N.min(x), N.max(x), N.max(t2 - t1) year100ns = 1.0 # was year100ns = year/1e-7 for Ggw_reg_year norm = (year100ns**2 * fL**(2*alpha - 2)) * 2**(alpha - 3) / (3 * math.pi**1.5 * SS.gamma(1.5 - alpha)) if fH is not None: # introduce a high-frequency cutoff xi = fH/fL # avoid the gamma singularity at alpha = 1 if abs(alpha - 1) < 1e-6: diag = math.log(xi) + (EulerGamma + math.log(0.5 * xi)) * math.log(xi) * (alpha - 1) else: diag = 2**(-alpha) * SS.gamma(1 - alpha) * (1 - xi**(2*alpha - 2)) with numpy_seterr(divide='ignore'): bessel = N.where(xi*x > 1e3,0.0,SS.kv(1 - alpha,xi * x)) if decompose: corr = N.where(x==0,0.0,x**(1 - alpha) * (SS.kv(1 - alpha,x) - xi**(alpha - 1) * bessel) - diag) else: corr = N.where(x==0,norm * diag,norm * x**(1 - alpha) * (SS.kv(1 - alpha,x) - xi**(alpha - 1) * bessel)) else: if decompose: diag = 2**(-alpha) * SS.gamma(1 - alpha) corr = N.where(x==0,0,x**(1 - alpha) * SS.kv(1 - alpha,x) - diag) else: # testing for zero is dangerous, but kv seems to behave OK for arbitrarily small arguments corr = N.where(x==0,norm * 2**(-alpha) * SS.gamma(1 - alpha), norm * x**(1 - alpha) * SS.kv(1 - alpha,x)) ps, ts = len(alphaab), len(times_f) / len(alphaab) for i in range(ps): for j in range(ps): corr[i*ts:(i+1)*ts,j*ts:(j+1)*ts] *= alphaab[i,j] if decompose: return norm, diag, corr else: return corr
def I1RK1r(self, rin, iaq, ipint): r = rin / self.aq.lab2[iaq, ipint] R = self.R / self.aq.lab2[iaq, ipint] if np.isinf(self.i1R[iaq, ipint]).any(): rv = np.sqrt(1 / (4 * r * R)) * np.exp(R - r) * \ (1 - 3 / (8 * R) - 15 / (128 * R ** 2) - 315 / (3072 * R ** 3)) * \ (1 + 3 / (8 * r) - 15 / (128 * r ** 2) + 315 / (3072 * r ** 3)) else: rv = self.i1R[iaq, ipint] * kv(1, r) return rv
def th_roc_num(mod_order, snr_db, n_samples, n_thresh, fading, *args): """ Computes the theorectical CROC using the scipy numerical integration library. Parameters ---------- mod_order : int Modulation order. snr_db : float Signal-to-noise ratio in dB. n_samples : int Number of transmitted symbols. n_thresh : int Number of thresholds to be evaluated. n_terms : int Number of terms for the Gauss-Laguerre quadrature. fading : str Name of the fading. args : array-like Fading parameters. """ if fading not in FADINGS: raise NotImplementedError('the formulations for this fading is not' ' implemented yet.') thresholds = np.linspace(.0, 100.0, n_thresh) # symbol energy Es = 1./mod_order # noise variance var_w = Es*sps.exp10(-snr_db/10.) Pf = 1 - sps.gammainc(n_samples/2., thresholds/(2*var_w)) Pm = np.zeros(n_thresh) printProgress(0, n_thresh, prefix='Progress', suffix='Complete', barLength=50) if fading == 'exp_weibull': beta, alpha, eta = args[0:3] for k in range(n_thresh): integrand = lambda u: (alpha*math.exp(-u)*(1 - math.exp(-u))**(alpha-1)) * (1 - marcumQ(math.sqrt(n_samples*Es*(eta*u**(1./beta))**2/var_w), math.sqrt(thresholds[k]/var_w), n_samples/2.0)) Pm[k] = quad(integrand, 0.0, np.inf, epsrel=1e-9, epsabs=0)[0] printProgress(k, n_thresh-1, prefix='Progress', suffix='Complete', barLength=50) elif fading == 'gamma_gamma': beta, alpha = args[0:2] for k in range(n_thresh): integrand = lambda r: r**(0.5 * (alpha + beta)) * kv(alpha - beta, 2 * math.sqrt(alpha * beta * r)) * (1 - marcumQ(r * math.sqrt(n_samples * Es / var_w), np.sqrt(thresholds[k] / var_w), n_samples / 2.0)) Pm[k] = quad(integrand, 0.0, np.inf, epsrel=1e-9, epsabs=0)[0] * 2 * (alpha * beta)**(0.5 * (alpha + beta)) / (gamma(alpha) * gamma(beta)) printProgress(k, n_thresh-1, prefix='Progress', suffix='Complete', barLength=50) return Pf, Pm
def constant_charge_single_energy(sigma0, r1, kappa, epsilon): """ It computes the total energy of a single sphere at constant charge, inmmersed in water. Arguments ---------- sigma0 : float, constant charge on the surface of the sphere. r1 : float, radius of sphere. kappa : float, reciprocal of Debye length. epsilon: float, water dielectric constant. Returns -------- E : float, total energy. """ N = 20 # Number of terms in expansion qe = 1.60217646e-19 Na = 6.0221415e23 E_0 = 8.854187818e-12 cal2J = 4.184 index2 = numpy.arange(N + 1, dtype=float) + 0.5 index = index2[0:-1] K1 = special.kv(index2, kappa * r1) K1p = index / (kappa * r1) * K1[0:-1] - K1[1:] k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1)) k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv( index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p a0_inf = -sigma0 / (epsilon * kappa * k1p[0]) U1_inf = a0_inf * k1[0] C1 = 2 * pi * sigma0 * r1 * r1 C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0) E = C0 * C1 * U1_inf return E
def WL_Q0_bT(self, bT): return 1.0 * ( self.A / 2.0 / self.C * np.exp(-bT ** 2 / 4 / self.C) - self.D * 2 ** (1 - self.nu) * self.M * (bT * self.M) ** self.nu * kv(1 - self.nu, bT * self.M) / bT / gamma(self.nu) )
def _anisotropic_vonkarman_kernel(x, sigma, corr_length, g1, g2): L = get_correlation_length_matrix(corr_length, g1, g2) invL = np.linalg.inv(L) dists = pdist(x, metric='mahalanobis', VI=invL) K = dists **(5./6.) * special.kv(5./6., 2*np.pi * dists) lim0 = special.gamma(5./6.) /(2 * ((np.pi)**(5./6.)) ) K = squareform(K) np.fill_diagonal(K, lim0) K /= lim0 K *= sigma**2 return K