def PotDisk(n, r): "n: order of differentiation" rratio = 0.5 * r / r0 if n == 1: dPhidr_disk = (G * Md * r / (2. * r0 * r0 * r0)) * (i0(rratio) * k0(rratio) - i1(rratio) * k1(rratio)) return dPhidr_disk elif n == 2: d2Phidr2_disk = (G * Md / (4. * r0 * r0 * r0)) * ( -rratio * iv(2, rratio) * k1(rratio) + i0(rratio) * (2 * k0(rratio) - 3. * rratio * k1(rratio)) + i1(rratio) * (3. * rratio * k0(rratio) - 2. * k1(rratio) + rratio * kn(2, rratio))) return d2Phidr2_disk elif n == 'v': dPhidr_disk = (G * Md * r / (2. * r0 * r0 * r0)) * (i0(rratio) * k0(rratio) - i1(rratio) * k1(rratio)) Vc = np.sqrt(r * dPhidr_disk) return Vc else: Phi_disk = (-G * Md * r / (2. * r0 * r0)) * (i0(rratio) * k1(rratio) - i1(rratio) * k0(rratio)) return Phi_disk
def T6fun(i, x): betah = (locs['a'])[i] * (locs['k'])[i] * locs['alpha'][i] betam = par['am'] * par['km'] * par['alpha_m'] a = numpy.zeros([2, 2]) a[0, 0] = i0(locs['alpha'][i] * locs['rad'][i]) a[1, 0] = betah * i1(locs['alpha'][i] * locs['rad'][i]) a[0, 1] = -k0(par['alpha_m'] * locs['rad'][i]) a[1, 1] = betam * k1(par['alpha_m'] * locs['rad'][i]) ## b is markedly different in zheng2009 and confirm.nb ## this is now the confirm.nb version b = numpy.zeros([2, 1]) b[0] = (1.0 / par['cm']) - (locs['rad'][i] * k0(locs['alpha'][i] * locs['rad'][i]) * i1(locs['alpha'][i] * locs['rad'][i]) / ((locs['a'])[i] * locs['alpha'][i])) b[1] = ((locs['k'])[i] * locs['rad'][i] * k1(locs['alpha'][i] * locs['rad'][i]) * i1(locs['alpha'][i] * locs['rad'][i])) const = (inv(a)).dot(b) qh = ((1.0 / (locs['c'])[i]) * (1.0 - locs['alpha'][i] * locs['rad'][i] * i0(locs['alpha'][i] * x) * k1(locs['alpha'][i] * locs['rad'][i]))) if (x < locs['rad'][i]): return const[0] * i0(locs['alpha'][i] * x) + qh else: return const[1] * k0(par['alpha_m'] * x) + (1.0 / par['cm'])
def integr_kaiser1(x, *arg): c, alpha, gamma, a, rabi_frequency, rotation_angle = arg integral, err = quad( lambda t: i1(gamma * sqrt(1 - (t / x)**2)) / (i1(gamma) * sqrt(1 - (t / x)**2)), 0, x) return rotation_angle / rabi_frequency - integral
def test_bessel_i1(self): x_single = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float32) x_double = np.arange(-3, 3).reshape(1, 3, 2).astype(np.float64) try: from scipy import special # pylint: disable=g-import-not-at-top self.assertAllClose(special.i1(x_single), self.evaluate(special_math_ops.bessel_i1(x_single))) self.assertAllClose(special.i1(x_double), self.evaluate(special_math_ops.bessel_i1(x_double))) except ImportError as e: tf_logging.warn('Cannot test special functions: %s' % str(e))
def __init__(self, model = 'simple', scatt_alpha = 5.0/3.0, observer_screen_distance = 8.023*10**21, source_screen_distance = 1.790*10**22, theta_maj_mas_ref = 1.309, theta_min_mas_ref = 0.64, POS_ANG = 78, wavelength_reference_cm = 1.0, r_in = 10000*10**5, r_out = 10**20): self.model = model self.POS_ANG = POS_ANG #Major axis position angle [degrees, east of north] self.observer_screen_distance = observer_screen_distance #cm self.source_screen_distance = source_screen_distance #cm M = observer_screen_distance/source_screen_distance self.wavelength_reference = wavelength_reference_cm #Reference wavelength [cm] self.r_in = r_in #inner scale [cm] self.r_out = r_out #outer scale [cm] self.scatt_alpha = scatt_alpha if model == 'simple': if r_in == 0.0: print("Error! The 'simple' scattering model requires a finite inner scale.") #Now, we need to solve for the effective parameters accounting for an inner scale #By default, we will match the fitted Gaussian kernel as the wavelength goes to infinity axial_ratio = theta_min_mas_ref/theta_maj_mas_ref #axial ratio of the scattering disk at long wavelengths (minor/major size < 1) #self.C_scatt = (1.20488e-15*axial_ratio*self.r_in**(2.0 - self.scatt_alpha)*self.wavelength_reference**2)/self.scatt_alpha self.C_scatt = 4.76299e-18*(1.0+M)**2 * np.pi**4 * r_in**(2.0 - scatt_alpha) * theta_maj_mas_ref * theta_min_mas_ref/(scatt_alpha * wavelength_reference_cm**2 * np.log(4.)) #Note: the prefactor is exactly equal to 1/209952000000000000 geometric_mean = (2.0*self.r_in**(2.0-self.scatt_alpha)/self.scatt_alpha/self.C_scatt)**0.5 self.r0_maj = geometric_mean*axial_ratio**0.5 #Phase coherence length at the reference wavelength [cm] self.r0_min = geometric_mean/axial_ratio**0.5 #Phase coherence length at the reference wavelength [cm] self.Qprefactor = self.C_scatt*(self.r0_maj*self.r0_min)**(self.scatt_alpha/2.0) # This accounts for the effects of a finite inner scale elif model == 'power-law': self.r0_maj = (2.0*np.log(2.0))**0.5/np.pi * wavelength_reference_cm/(theta_maj_mas_ref/1000.0/3600.0*np.pi/180.0) #Phase coherence length at the reference wavelength [cm] self.r0_min = (2.0*np.log(2.0))**0.5/np.pi * wavelength_reference_cm/(theta_min_mas_ref/1000.0/3600.0*np.pi/180.0) #Phase coherence length at the reference wavelength [cm] elif model == 'amph_von_Misses': axial_ratio = theta_min_mas_ref/theta_maj_mas_ref #axial ratio of the scattering disk at long wavelengths (minor/major size < 1) self.C_scatt = 4.76299e-18*(1.0+M)**2 * np.pi**4 * r_in**(2.0 - scatt_alpha) * theta_maj_mas_ref * theta_min_mas_ref/(scatt_alpha * wavelength_reference_cm**2 * np.log(4.)) #Note: the prefactor is exactly equal to 1/209952000000000000 geometric_mean = (2.0*self.r_in**(2.0-self.scatt_alpha)/self.scatt_alpha/self.C_scatt)**0.5 self.r0_maj = geometric_mean*axial_ratio**0.5 #Phase coherence length at the reference wavelength [cm] self.r0_min = geometric_mean/axial_ratio**0.5 #Phase coherence length at the reference wavelength [cm] self.r_in_p = 1.0/(sps.gamma(0.5 - self.scatt_alpha/2.0)*sps.gamma(1.0 + self.scatt_alpha)) * ((2.0**(self.scatt_alpha + 4.0) * np.pi)/(1.0 + np.cos(np.pi * self.scatt_alpha)))**0.5 * self.r_in A = theta_maj_mas_ref/theta_min_mas_ref self.kzeta = -0.17370 + 0.38067*A + 0.944246*A**2 # This is an approximate solution self.zeta = 1.0 - 2.0*sps.i1(self.kzeta)/(self.kzeta * sps.i0(self.kzeta)) elif model == 'boxcar': axial_ratio = theta_min_mas_ref/theta_maj_mas_ref #axial ratio of the scattering disk at long wavelengths (minor/major size < 1) self.C_scatt = 4.76299e-18*(1.0+M)**2 * np.pi**4 * r_in**(2.0 - scatt_alpha) * theta_maj_mas_ref * theta_min_mas_ref/(scatt_alpha * wavelength_reference_cm**2 * np.log(4.)) #Note: the prefactor is exactly equal to 1/209952000000000000 geometric_mean = (2.0*self.r_in**(2.0-self.scatt_alpha)/self.scatt_alpha/self.C_scatt)**0.5 self.r0_maj = geometric_mean*axial_ratio**0.5 #Phase coherence length at the reference wavelength [cm] self.r0_min = geometric_mean/axial_ratio**0.5 #Phase coherence length at the reference wavelength [cm] self.r_in_p = 1.0/(sps.gamma(0.5 - self.scatt_alpha/2.0)*sps.gamma(1.0 + self.scatt_alpha)) * ((2.0**(self.scatt_alpha + 4.0) * np.pi)/(1.0 + np.cos(np.pi * self.scatt_alpha)))**0.5 * self.r_in A = theta_maj_mas_ref/theta_min_mas_ref self.kzeta = -0.17370 + 0.38067*A + 0.944246*A**2 # This is an approximate solution self.kzeta_3 = 0.02987 + 0.28626*A # This is an approximate solution self.zeta = 1.0 - 2.0*sps.i1(self.kzeta)/(self.kzeta * sps.i0(self.kzeta)) else: print("Scattering Model Not Recognized!") return
def region2_pressure(self): self.kappa = np.sqrt(12*self.eps0*(1+self.nu)*(self.a/self.h)**2) self.delta = self.gamma - self. alpha self.g = -self.eps0/self.eps**2/self.kappa**2*(self.r-i1(self.kappa*self.r)/i1(self.kappa)) #self.beta = -6*(1-self.nu**2)/self.kappa**2*(self.p*self.a**3)/(self.E*self.h**3)*(self.r-i1(self.k*self.r)/i1(self.k)) self.beta = -6*(1-self.nu**2)/self.kappa**2*(self.p*self.a**3)/(self.E*self.h**3)*(self.r-i1(self.kappa*self.r)/i1(self.kappa)) self.numIntegrateBeta() # Making ends meet: compare caclulated beta from relation to g with direct formula. Is this appropriate?? if 0: self.beta = self.eps**self.delta*self.g self.numIntegrateBeta()
def Gamma(nw_rad, lay_ox, L_d, L_tf, eps_1, eps_2, eps_3): fact1 = (nw_rad + lay_ox) / L_d fact2 = nw_rad / L_tf fact3 = fact1**(-1) fact4 = (nw_rad + lay_ox) / nw_rad num = eps_1 * k0(fact1) * (L_d / L_tf) * i1(fact2) denom1 = k0(fact1) * fact3 denom2 = log(fact4) * k1(fact1) * (eps_3 / eps_2) denom3 = (denom1 + denom2) * eps_1 * fact2 * i1(fact2) denom = denom3 + eps_3 * k1(fact1) * i0(fact2) gamma = num / denom return gamma
def Gamma(nw_rad, lay_ox, L_d, L_tf, eps_1, eps_2, eps_3): fact1 = (nw_rad + lay_ox)/L_d fact2 = nw_rad/L_tf fact3 = fact1**(-1) fact4 = (nw_rad + lay_ox)/nw_rad num = eps_1*k0(fact1)*(L_d/L_tf)*i1(fact2) denom1 = k0(fact1)*fact3 denom2 = log(fact4)*k1(fact1)*(eps_3/eps_2) denom3 = (denom1 + denom2)*eps_1*fact2*i1(fact2) denom = denom3 + eps_3*k1(fact1)*i0(fact2) gamma = num/denom return gamma
def ZTransDSC(self): if self.beam.gammarel == float('inf'): ZTrans_DSC = np.zeros(len(self.f)) + 1.j * np.zeros(len(self.f)) return ZTrans_DSC kbess = 2 * const.pi * self.f / (self.beam.betarel * const.c) argbess0 = kbess * self.beam.test_beam_shift / self.beam.gammarel argbess1 = kbess * self.chamber.pipe_rad_m / self.beam.gammarel BessBeamT = (i1(argbess0) / self.beam.test_beam_shift)**2 BessBeamTDSC = k1(argbess0) / i1(argbess0) ZTrans_DSC = -(1.j * Z0 * self.chamber.pipe_len_m * BessBeamT * BessBeamTDSC / (const.pi * self.beam.gammarel**2 * self.beam.betarel)) return ZTrans_DSC
def testDerivativeKappa(self): "Test vonMisesKappaConjugate derivative by changing kappa" try: from scipy.special import i0, i1 except ImportError: self.skipTest("this test requires the scipy Python module") c = 10 R0 = 1 self.J = IMP.isd.vonMisesKappaConjugateRestraint(self.m, self.kappa, c, R0) for i in range(100): no = uniform(0.1, 100) self.kappa.set_scale(no) self.J.evaluate(True) ratio = i1(no) / i0(no) self.assertAlmostEqual(self.kappa.get_scale_derivative(), -R0 + c * i1(no) / i0(no), delta=0.001)
def Vd(R, Rd, sigma_0): y = R / (2 * Rd) return np.sqrt( 4 * np.pi * G * sigma_0 * Rd * y**2 * [special.i0(y) * special.k0(y) - special.i1(y) * special.k1(y)][0])
def B2B1(s, H_g, kappa, r_Db): numerator = kappa * math.sqrt(H_g * s / kappa) * sp.i1(r_Db * math.sqrt(H_g * s / kappa)) * sp.k0( r_Db * math.sqrt(s)) + math.sqrt(s) * sp.i0(r_Db * math.sqrt(H_g * s / kappa)) * sp.k1(r_Db * math.sqrt(s)) denominator = kappa * math.sqrt(H_g * s / kappa) * sp.k1(r_Db * math.sqrt(H_g * s / kappa)) * sp.k0( r_Db * math.sqrt(s)) - math.sqrt(s) * sp.k0(r_Db * math.sqrt(H_g * s / kappa)) * sp.k1(r_Db * math.sqrt(s)) rt = numerator / denominator return rt
def invmievonmises_pdf(theta, kappa, nu, lambda_, loc): alpha1 = i1(kappa) / i0(kappa) # inverse transformation by Newton's method inv_theta = inv_trans_APF(theta, loc, lambda_, nu) C = (1 - nu * alpha1) p = vonmises.pdf(inv_theta, loc=0, kappa=kappa) / C return p
def exponential_velocity(r, rd, vmax): """ Velocity function for an exponential profile r and rd must be in the same units Parameters ---------- r : array radial positions where the model is to be computed rd : float radius at which the maximum velocity is reached vmax : float Maximum velocity of the model Returns ------- Array with the same shape of r, containing the model velocity curve/map """ # disk scale length rd2 = rd / 2.15 vr = np.zeros(np.shape(r)) # to prevent any problem in the center q = np.where(r != 0) vr[q] = r[q] / rd2 * vmax / 0.88 * np.sqrt( i0(0.5 * r[q] / rd2) * k0(0.5 * r[q] / rd2) - i1(0.5 * r[q] / rd2) * k1(0.5 * r[q] / rd2)) return vr
def estimate_distribution(hours): """ Estimate the parameters of the von Mises distribution for the data. Arguments: hours: A NumPy array holding incident times as floats between 0 and 24. Returns: mu: The distribution's center as a float between -pi and pi. kappa: The distribution's measure of concentration as a float. More information about the von Mises distribution: https://en.wikipedia.org/wiki/Von_Mises_distribution """ theta = hours_to_radians(hours) z = np.vectorize(complex)(np.cos(theta), np.sin(theta)) n = len(z) z_mean = np.mean(z) mu = np.arctan2(z_mean.imag, z_mean.real) r2 = np.mean(z.real)**2 + np.mean(z.imag)**2 re = np.sqrt(n/(n - 1)*(r2 - 1/n)) x = np.arange(0, 10, 1e-4) y = np.abs(i1(x)/i0(x) - re) kappa = x[np.argmin(y)] return mu, kappa
def _evaluate(self, R, z, phi=0., t=0.): """ NAME: _evaluate PURPOSE: evaluate the potential at (R,z) INPUT: R - Cylindrical Galactocentric radius z - vertical height phi - azimuth t - time OUTPUT: potential at (R,z) HISTORY: 2012-12-26 - Written - Bovy (IAS) """ if self._new: #if R > 6.: return self._kp(R,z) if nu.fabs(z) < 10.**-6.: y = 0.5 * self._alpha * R return -nu.pi * R * (special.i0(y) * special.k1(y) - special.i1(y) * special.k0(y)) kalphamax = 10. ks = kalphamax * 0.5 * (self._glx + 1.) weights = kalphamax * self._glw sqrtp = nu.sqrt(z**2. + (ks + R)**2.) sqrtm = nu.sqrt(z**2. + (ks - R)**2.) evalInt = nu.arcsin(2. * ks / (sqrtp + sqrtm)) * ks * special.k0( self._alpha * ks) return -2. * self._alpha * nu.sum(weights * evalInt) raise NotImplementedError( "Not new=True not implemented for RazorThinExponentialDiskPotential" )
def tetmConstants(self, ri, ro, neff, wl, EH, c, idx): a = numpy.empty((2, 2)) n = self.maxIndex(wl) u = self.u(ro, neff, wl) urp = self.u(ri, neff, wl) if neff < n: B1 = j0(u) B2 = y0(u) F1 = j0(urp) / B1 F2 = y0(urp) / B2 F3 = -j1(urp) / B1 F4 = -y1(urp) / B2 c1 = wl.k0 * ro / u else: B1 = i0(u) B2 = k0(u) F1 = i0(urp) / B1 F2 = k0(urp) / B2 F3 = i1(urp) / B1 F4 = -k1(urp) / B2 c1 = -wl.k0 * ro / u c3 = c * c1 a[0, 0] = F1 a[0, 1] = F2 a[1, 0] = F3 * c3 a[1, 1] = F4 * c3 return numpy.linalg.solve(a, EH.take(idx))
def testContinuedFraction(self): # Check that the simplest continued fraction returns the golden ratio. self.assertAllClose( self.evaluate( _compute_general_continued_fraction( 100, [], partial_numerator_fn=lambda _: 1.)), scipy_constants.golden - 1.) # Check the continued fraction constant is returned. cf_constant_denominators = scipy_special.i1(2.) / scipy_special.i0(2.) self.assertAllClose(self.evaluate( _compute_general_continued_fraction( 100, [], partial_denominator_fn=lambda i: i, tolerance=1e-5)), cf_constant_denominators, rtol=1e-5) cf_constant_numerators = np.sqrt( 2 / (np.e * np.pi)) / (scipy_special.erfc(np.sqrt(0.5))) - 1. # Check that we can specify dtype and tolerance. self.assertAllClose(self.evaluate( _compute_general_continued_fraction( 100, [], partial_numerator_fn=lambda i: i, tolerance=1e-5, dtype=tf.float64)), cf_constant_numerators, rtol=1e-5)
def _evaluate(self,R,z,phi=0.,t=0.): """ NAME: _evaluate PURPOSE: evaluate the potential at (R,z) INPUT: R - Cylindrical Galactocentric radius z - vertical height phi - azimuth t - time OUTPUT: potential at (R,z) HISTORY: 2012-12-26 - Written - Bovy (IAS) """ if self._new: #if R > 6.: return self._kp(R,z) if nu.fabs(z) < 10.**-6.: y= 0.5*self._alpha*R return -nu.pi*R*(special.i0(y)*special.k1(y)-special.i1(y)*special.k0(y)) kalphamax= 10. ks= kalphamax*0.5*(self._glx+1.) weights= kalphamax*self._glw sqrtp= nu.sqrt(z**2.+(ks+R)**2.) sqrtm= nu.sqrt(z**2.+(ks-R)**2.) evalInt= nu.arcsin(2.*ks/(sqrtp+sqrtm))*ks*special.k0(self._alpha*ks) return -2.*self._alpha*nu.sum(weights*evalInt) raise NotImplementedError("Not new=True not implemented for RazorThinExponentialDiskPotential")
def plotting_arai(file1): """ Main plotting function """ freqs, _, control, _, _, _, _ = np.loadtxt(file1, delimiter=',', unpack=True) k = np.linspace(0, 3000, 10000) sigma = 0.07 a = (2e-3) / 2 rho = 1000 w_squared = ((sigma * k) / (rho * a**2)) * (1 - k**2 * a**2) * (i1(k * a) / i0(k * a)) sqrt_w = np.sqrt(w_squared) v = arai_velocity(1551) wavelength = v / freqs wavenumber = 2 * np.pi / wavelength savgol_control = savgol_filter(control, 1001, 2) fig, ax = plt.subplots() ax.plot(k * a, sqrt_w, label='Rayleigh') ax.plot(wavenumber * a, savgol_control, label='Experimental (average velocity)') ax.set_xlim(0, 7) ax.set_ylim(0, 100) ax.legend() ax.set_xlabel('ka', fontsize=16) ax.set_ylabel('$\\omega$', fontsize=16)
def compute_by_noise_pow(signal, n_pow): global _window global _G global _prevGamma global _alpha global _prevAmp global _ratio global _constant global _gamma15 s_spec = np.fft.fftpack.fft(signal * _window) s_amp = np.absolute(s_spec) s_phase = np.angle(s_spec) #for idx in xrange(len(s_phase)): # print(s_phase[idx]) gamma = _calc_aposteriori_snr(s_amp, n_pow) xi = _calc_apriori_snr(gamma) _prevGamma = gamma nu = gamma * xi / (1.0 + xi) _G = (_gamma15 * np.sqrt(nu) / gamma) * np.exp(-nu / 2.0) *\ ((1.0 + nu) * spc.i0(nu / 2.0) + nu * spc.i1(nu / 2.0)) idx = np.less(s_amp**2.0, n_pow) _G[idx] = _constant idx = np.isnan(_G) + np.isinf(_G) _G[idx] = xi[idx] / (xi[idx] + 1.0) idx = np.isnan(_G) + np.isinf(_G) _G[idx] = _constant _G = np.maximum(_G, 0.0) amp = _G * s_amp amp = np.maximum(amp, 0.0) amp2 = _ratio * amp + (1.0 - _ratio) * s_amp _prevAmp = amp spec = amp2 * np.exp(s_phase * 1j) return np.real(np.fft.fftpack.ifft(spec))
def update_gradients_full(self, dL_dK, X, X2=None): if X2 is None: X2 = X trig_arg = (2 * np.pi / self.period) * (X - X2.T) cos_term = np.cos(trig_arg) sin_term = np.sin(trig_arg) invL2 = 1 / self.lengthscale**2 if np.any(self.lengthscale > 1e4): # Limit for l -> infinity dK_dV = cos_term # K / V dK_dp = (self.variance / self.period) * trig_arg * sin_term # This is 0 in the limit, but best to set it to a small non-0 value dK_dl = 1e-4 / self.lengthscale elif np.any(invL2 < 3.75): bessel0 = i0(invL2) bessel1 = i1(invL2) eInvL2 = np.exp(invL2) dInvL2_dl = -2 * invL2 / self.lengthscale # == -2 / l^3 denom = eInvL2 - bessel0 exp_term = np.exp(cos_term * invL2) K_no_Var = ( exp_term - bessel0 ) / denom # == K / V; here just for clarity of further expressions dK_dV = K_no_Var dK_dp = (self.variance / self.period ) * invL2 * trig_arg * sin_term * exp_term / denom dK_dl = dInvL2_dl * self.variance * ( (cos_term * exp_term - bessel1) - K_no_Var * (eInvL2 - bessel1)) / denom else: embi0 = self.embi0(invL2) # embi1 = self.embi1(invL2) # embi0min1 = embi0 - embi1 embi0min1 = self.embi0min1(invL2) dInvL2_dl = -2 * invL2 / self.lengthscale # == -2 / l^3 denom = 1 - embi0 exp_term = np.exp((cos_term - 1) * invL2) K_no_Var = ( exp_term - embi0 ) / denom # == K / V; here just for clarity of further expressions dK_dV = K_no_Var dK_dp = ( self.variance / self.period ) * invL2 * trig_arg * sin_term * exp_term / denom # I.e. SAME as the above case at this abstraction level dK_dl = dInvL2_dl * self.variance * ( (cos_term - 1) * exp_term + embi0min1 - K_no_Var * embi0min1) / denom self.variance.gradient = np.sum(dL_dK * dK_dV) self.period.gradient = np.sum(dL_dK * dK_dp) self.lengthscale.gradient = np.sum(dL_dK * dK_dl)
def kappa_to_stddev(kappa): ''' Convert kappa to wrapped gaussian std dev std = 1 - I_1(kappa)/I_0(kappa) ''' # return 1.0 - scsp.i1(kappa)/scsp.i0(kappa) return np.sqrt(-2.*np.log(scsp.i1(kappa)/scsp.i0(kappa)))
def C0(s, N_s, N_w, H_g, kappa, r_Db): rt = 1.0 - 1.0 / ( 1.0 - kappa * N_s / 2 / N_w * math.sqrt(H_g * s / kappa) * (sp.i1(math.sqrt(H_g * s / kappa)) - B2B1(s, H_g, kappa, r_Db) * sp.k1(math.sqrt(H_g * s / kappa))) / (sp.i0(math.sqrt(H_g * s / kappa)) + B2B1(s, H_g, kappa, r_Db) * sp.k0(math.sqrt(H_g * s / kappa)))) return rt
def w_minus(self, r): if self.r0 == 1: return r * 0 return (special.i1(np.sqrt(self.Pi) * r) * self.nu_1(r) / r + special.k1(np.sqrt(self.Pi) * r) * self.nu_2(r) / r)
def _tmcoeq(self, v0, nu): u1r1, u2r1, u2r2, s1, s2, n1sq, n2sq, n3sq = self.__params(v0) if s1 == 0: # e f11a, f11b = 2, 1 elif s1 > 0: # a, b, d f11a, f11b = j0(u1r1) * u1r1, j1(u1r1) else: # c f11a, f11b = i0(u1r1) * u1r1, i1(u1r1) if s2 > 0: f22a, f22b = j0(u2r2), y0(u2r2) f2a = j1(u2r1) * f22b - y1(u2r1) * f22a f2b = j0(u2r1) * f22b - y0(u2r1) * f22a else: # a f22a, f22b = i0(u2r2), k0(u2r2) f2a = i1(u2r1) * f22b + k1(u2r1) * f22a f2b = i0(u2r1) * f22b - k0(u2r1) * f22a return f11a * n2sq * f2a - f11b * n1sq * f2b * u2r1
def rotation_velocity(pos): rho = (pos[0]**2 + pos[1]**2)**0.5 phi = np.arctan2(pos[1], pos[0]) y = rho/(2*Rd) sigma0 = M_dm / (2*pi*Rd**2) speed = (4*pi*G*sigma0*y**2*(i0(y)*k0(y) - i1(y)*k1(y)) + (G*M_dm*rho)/(rho+a_dm)**2 + (G*M_bulge*rho)/(rho+a_bulge)**2)**0.5 return (-speed*sin(phi), speed*cos(phi), 0)
def gendata(X): l = '%5s%23s%23s%23s%23s%23s%23s%23s%23s\n' % ('x', 'I0', 'I1', 'I2', 'I3', 'K0', 'K1', 'K2', 'K3') for i, x in enumerate(X): l += '%5.2f%23.15e%23.15e%23.15e%23.15e%23.15e%23.15e%23.15e%23.15e\n' % ( x, sp.i0(x), sp.i1(x), sp.iv(2, x), sp.iv( 3, x), sp.k0(x), sp.k1(x), sp.kn(2, x), sp.kn(3, x)) return l
def testVonMisesVariance(self): locs_v = np.array([-3., -2., -1., 0.3, 2.3]) concentrations_v = np.array([0.0, 0.1, 1.0, 2.0, 10.0]) von_mises = tfd.VonMises(self.make_tensor(locs_v), self.make_tensor(concentrations_v)) expected_vars = 1.0 - sp_special.i1(concentrations_v) / sp_special.i0( concentrations_v) self.assertAllClose(expected_vars, self.evaluate(von_mises.variance()))
def gradient(self,phases,log10_ens=3,free=False): e,width,loc = self._make_p(log10_ens) my_i0 = i0(1./width) my_i1 = i1(1./width) z = TWOPI*(phases-loc) cz = np.cos(z) sz = np.sin(z) f = (np.exp(cz)/width)/my_i0 return np.asarray([-cz/width**2*f,TWOPI*(sz/width+my_i1/my_i0)*f])
def testDerivativeKappa(self): "Test vonMisesKappaConjugate derivative by changing kappa" try: from scipy.special import i0, i1 except ImportError: self.skipTest("this test requires the scipy Python module") c = 10 R0 = 1 self.J = IMP.isd.vonMisesKappaConjugateRestraint( self.m, self.kappa, c, R0) for i in range(100): no = uniform(0.1, 100) self.kappa.set_scale(no) self.J.evaluate(True) ratio = i1(no) / i0(no) self.assertAlmostEqual(self.kappa.get_scale_derivative(), -R0 + c * i1(no) / i0(no), delta=0.001)
def mmse_stsa(xi, gamma): nu = np.multiply(xi, np.divide(gamma, np.add(1, xi))) G = np.multiply(np.multiply(np.multiply(np.divide(np.sqrt(np.pi), 2), np.divide(np.sqrt(nu), gamma)), np.exp(np.divide(-nu,2))), np.add(np.multiply(np.add(1, nu), spsp.i0(np.divide(nu,2))), np.multiply(nu, spsp.i1(np.divide(nu, 2))))) # MMSE-STSA gain function. idx = np.isnan(G) | np.isinf(G) # replace by Wiener gain. G[idx] = np.divide(xi[idx], np.add(1, xi[idx])) # Wiener gain. return G
def get_vcirc_expo(R, Mgas=3e10, Rd=4.0): sigma0 = Mgas / (2.0 * np.pi * Rd**2) sigma = sigma0 * np.exp(-R / Rd) y = R / (2.0 * Rd) I0 = special.i0(y) K0 = special.k0(y) I1 = special.i1(y) K1 = special.k1(y) return np.sqrt(4.0 * np.pi * G * sigma0 * Rd * y**2 * (I0 * K0 - I1 * K1))
def rotation_velocity(pos): rho = (pos[0]**2 + pos[1]**2)**0.5 phi = np.arctan2(pos[1], pos[0]) y = rho / (2 * Rd) sigma0 = M_dm / (2 * pi * Rd**2) speed = (4 * pi * G * sigma0 * y**2 * (i0(y) * k0(y) - i1(y) * k1(y)) + (G * M_dm * rho) / (rho + a_dm)**2 + (G * M_bulge * rho) / (rho + a_bulge)**2)**0.5 return (-speed * sin(phi), speed * cos(phi), 0)
def giddings(t, w, x): print(w, x) # w != 0 y = np.zeros(len(t)) y[t > 0] = (1. / w) * sqrt(x / t[t > 0]) * exp((t[t > 0] + x) / -w) # TODO: "overflow encountered in i1" # y[t > 0] *= i1(2. * sqrt(x * t[t > 0]) / w) # trying to keep the shape, but not allow such high numbers? y[t > 0] *= i1(np.linspace(2, 10, sum(t > 0))) return y
def cutoffHE1(b): a = rho * b i = ivp(1, u1*a) / (u1*a * i1(u1*a)) X = (1 / (u1*a)**2 + 1 / (u2*a)**2) P = j1(u2*a) * y1(u2*b) - y1(u2*a) * j1(u2*b) Ps = (jvp(1, u2*a) * y1(u2*b) - yvp(1, u2*a) * j1(u2*b)) / (u2 * a) return (i * P + Ps) * (n12 * i * P + n22 * Ps) - n32 * X * X * P * P
def testValueP(self): "Test vonMisesKappaJeffreys probability" try: from scipy.special import i0, i1 except ImportError: self.skipTest("this test requires the scipy Python module") for i in range(100): no = uniform(0.1, 100) self.kappa.set_scale(no) ratio = i1(no) / i0(no) self.assertAlmostEqual( self.J.get_probability(), sqrt(ratio * (no - ratio - no * ratio * ratio)), delta=0.001 )
def testEvaluateDKappa(self): "tests vonMises.evaluate_derivative_kappa" try: from scipy.special import i0,i1 except ImportError: self.skipTest("this test requires the scipy Python module") for i in xrange(100): randno = [uniform(-4*pi,4*pi), uniform(-pi,pi), uniform(0.1,100)] fn=vonMises(*randno) self.assertAlmostEqual(fn.evaluate_derivative_kappa(), i1(randno[2])/i0(randno[2]) - cos(randno[0]-randno[1]), delta=0.001)
def testVonMisesVariance(self): locs_v = np.array([-3., -2., -1., 0.3, 2.3]) concentrations_v = np.array([0.0, 0.1, 1.0, 2.0, 10.0]) von_mises = tfd.VonMises( self.make_tensor(locs_v), self.make_tensor(concentrations_v)) try: from scipy import special # pylint:disable=g-import-not-at-top except ImportError: tf.logging.warn("Skipping scipy-dependent tests") return expected_vars = 1.0 - special.i1(concentrations_v) / special.i0( concentrations_v) self.assertAllClose(expected_vars, self.evaluate(von_mises.variance()))
def _R2deriv(self,R,z,phi=0.,t=0.): """ NAME: R2deriv PURPOSE: evaluate R2 derivative INPUT: R - Cylindrical Galactocentric radius z - vertical height phi - azimuth t - time OUTPUT: -d K_R (R,z) d R HISTORY: 2012-12-27 - Written - Bovy (IAS) """ if self._new: if nu.fabs(z) < 10.**-6.: y= 0.5*self._alpha*R return nu.pi*self._alpha*(special.i0(y)*special.k0(y)-special.i1(y)*special.k1(y)) \ +nu.pi/4.*self._alpha**2.*R*(special.i1(y)*(3.*special.k0(y)+special.kn(2,y))-special.k1(y)*(3.*special.i0(y)+special.iv(2,y))) raise AttributeError("'R2deriv' for RazorThinExponentialDisk not implemented for z =/= 0")
def testValueE(self): "Test if vonMisesKappaJeffreys score is log(scale)" try: from scipy.special import i0, i1 except ImportError: self.skipTest("this test requires the scipy Python module") for i in range(100): no = uniform(0.1, 100) self.kappa.set_scale(no) ratio = i1(no) / i0(no) self.assertAlmostEqual( self.J.unprotected_evaluate(None), -0.5 * log(ratio * (no - ratio - no * ratio * ratio)), delta=0.001 )
def testDerivative(self): "test the derivative of the restraint" try: from scipy.special import i0,i1 except ImportError: self.skipTest("this test requires the scipy Python module") for i in xrange(100): no=uniform(0.1,100) self.kappa.set_scale(no) self.m.evaluate(self.DA) ratio=i1(no)/i0(no) self.assertAlmostEqual(self.kappa.get_scale_derivative(), 0.5*(-1/ratio+3*ratio+1/no+1/(no-no**2/ratio+ratio*no**2)), delta=0.001)
def testVonMisesStddev(self): locs_v = np.array([-3., -2., -1., 0.3, 2.3]).reshape([1, -1]) concentrations_v = np.array([0.0, 0.1, 1.0, 2.0, 10.0]).reshape([-1, 1]) von_mises = tfd.VonMises( self.make_tensor(locs_v), self.make_tensor(concentrations_v)) try: from scipy import special # pylint:disable=g-import-not-at-top except ImportError: tf.logging.warn("Skipping scipy-dependent tests") return expected_stddevs = (np.sqrt(1.0 - special.i1(concentrations_v) / special.i0(concentrations_v)) + np.zeros_like(locs_v)) self.assertAllClose(expected_stddevs, self.evaluate(von_mises.stddev()))
def testEvaluateDKappa(self): "Test vonMisesSufficient.evaluate_derivative_kappa" try: from scipy.special import i0, i1 except ImportError: self.skipTest("this test requires the scipy Python module") for i in xrange(100): x = uniform(-4 * pi, 4 * pi) N = randint(1, 20) R = randint(1, N) chiexp = uniform(-pi, pi) kappa = uniform(0.1, 100) fn = vonMisesSufficient(x, N, R, chiexp, kappa) self.assertAlmostEqual( fn.evaluate_derivative_kappa(), N * i1(kappa) / i0(kappa) - R * cos(x - chiexp), delta=0.001 )
def testValuePR0(self): "Test vonMisesKappaConjugate probability by changing R0" try: from scipy.special import i0, i1 except ImportError: self.skipTest("this test requires the scipy Python module") c = 10.0 no = 1.0 self.kappa.set_scale(no) for i in range(100): R0 = uniform(0.0, 10.0) self.J = IMP.isd.vonMisesKappaConjugateRestraint(self.m, self.kappa, c, R0) ratio = i1(no) / i0(no) py = exp(no * R0) / i0(no) ** c cpp = self.J.get_probability() self.assertAlmostEqual(cpp, py, delta=0.001)
def estimate_distribution(hours): theta = hours_to_radians(hours) z = np.vectorize(complex)(np.cos(theta), np.sin(theta)) n = len(z) z_mean = np.mean(z) mu = np.arctan2(z_mean.imag, z_mean.real) r2 = np.mean(z.real)**2 + np.mean(z.imag)**2 re = np.sqrt(n/(n - 1)*(r2 - 1/n)) x = np.arange(0, 10, 1e-4) y = np.abs(i1(x)/i0(x) - re) kappa = x[np.argmin(y)] return mu, kappa
def testValueEc(self): "Test vonMisesKappaConjugate energy by changing c" try: from scipy.special import i0, i1 except ImportError: self.skipTest("this test requires the scipy Python module") R0 = 1 no = 1.0 self.kappa.set_scale(no) for i in range(100): c = uniform(1.0, 100) self.J = IMP.isd.vonMisesKappaConjugateRestraint(self.m, self.kappa, c, R0) ratio = i1(no) / i0(no) py = -no * R0 + c * log(i0(no)) cpp = self.J.evaluate(False) self.assertAlmostEqual(cpp, py, delta=0.001)
def K1RI1r(self, rin): rv = np.zeros(len(self.lab)) if self.islarge.any(): index = (self.R - rin) / self.labbig < 10 if index.any(): r = rin / self.labbig[index] R = self.R / self.labbig[index] rv[self.islarge * index] = np.sqrt(1 / (4 * r * R)) * np.exp(r - R) * \ (1 + 3 / (8 * R) - 15 / (128 * R ** 2) + 315 / (3072 * R ** 3)) * \ (1 - 3 / (8 * r) - 15 / (128 * r ** 2) - 315 / (3072 * r ** 3)) if ~self.islarge.any(): index = (self.R - rin) / self.labsmall < 10 if index.any(): r = rin / self.labsmall[index] rv[~self.islarge * index] = self.k1Roverlab[index] * i1(r) return rv
def testValueEKappa(self): "Test vonMisesKappaConjugate energy by changing kappa" try: from scipy.special import i0,i1 except ImportError: self.skipTest("this test requires the scipy Python module") c=10 R0=1 self.J = IMP.isd.vonMisesKappaConjugateRestraint(self.kappa,c,R0) self.m.add_restraint(self.J) for i in xrange(100): no=uniform(0.1,100) self.kappa.set_scale(no) ratio=i1(no)/i0(no) py=-no*R0 + c*log(i0(no)) cpp=self.J.evaluate(None) self.assertAlmostEqual(cpp,py,delta=0.001)
def testDerivative(self): "Test the derivative of vonMisesKappaJeffreysRestraint" try: from scipy.special import i0, i1 except ImportError: self.skipTest("this test requires the scipy Python module") sf = IMP.core.RestraintsScoringFunction([self.J]) for i in range(100): no = uniform(0.1, 100) self.kappa.set_scale(no) sf.evaluate(True) ratio = i1(no) / i0(no) self.assertAlmostEqual( self.kappa.get_scale_derivative(), 0.5 * (-1 / ratio + 3 * ratio + 1 / no + 1 / (no - no ** 2 / ratio + ratio * no ** 2)), delta=0.001, )
def testValueDKappa1(self): """test derivatives for kappa by varying kappa""" try: from scipy.special import i0,i1 except ImportError: self.skipTest("this test requires the scipy Python module") self.setup_restraint() self.p3.set_coordinates(IMP.algebra.Vector3D(0,1,-1)) for i in xrange(100): kappa = uniform(0.1,10) self.kappa.set_scale(kappa) self.talos.evaluate(self.DA) py=self.N*i1(kappa)/i0(kappa) - self.R*cos(pi/2-self.chiexp) cpp=self.kappa.get_scale_derivative() if py == 0: self.assertEqual(cpp,0) else: self.assertAlmostEqual(cpp/py,1.0,delta=1e-6)
def testValuePc(self): "test probability by changing c" try: from scipy.special import i0,i1 except ImportError: self.skipTest("this test requires the scipy Python module") R0=1.0 no=1.0 self.kappa.set_scale(no) for i in xrange(100): c=uniform(2.0,75) self.J = IMP.isd.vonMisesKappaConjugateRestraint(self.kappa,c,R0) self.m.add_restraint(self.J) ratio=i1(no)/i0(no) py=exp(no*R0)/i0(no)**c cpp=self.J.get_probability() self.assertAlmostEqual(cpp,py,delta=0.001) self.m.remove_restraint(self.J)
def testValueDKappa2(self): """Test TALOS derivatives for kappa by varying the angle""" try: from scipy.special import i0,i1 except ImportError: self.skipTest("this test requires the scipy Python module") self.setup_restraint() for i in xrange(100): x=i/(2*pi) self.p3.set_coordinates(IMP.algebra.Vector3D( cos(2*pi-x),1,sin(2*pi-x))) kappa = self.kappa.get_scale() self.talos.evaluate(self.DA) py=self.N*i1(kappa)/i0(kappa) - self.R*cos(x-self.chiexp) cpp=self.kappa.get_scale_derivative() if py == 0: self.assertEqual(cpp,0) else: self.assertAlmostEqual(cpp/py,1.0,delta=1e-6)