def GetHeadForRadiusTime(self, radius, time): '''Compute head using the Theis solution radius distance to injection well [L] time time since injection started [T]''' #check if inputs are scalars or lists RadiusList, RadIsList = self.ListOrSingle(radius) TimeList, TimIsList = self.ListOrSingle(time) if not RadIsList and not TimIsList: ListOut = False else: ListOut = True #initialize output list Heads = [] for tim in TimeList: for rad in RadiusList: u = self.uFactor * rad**2 / tim head = self.HeadFactor * sci.expn(1, u) if self.InjectionEnd != -1 and tim > self.InjectionEnd: u_new = self.uFactor * rad**2 \ / (tim - self.InjectionEnd) head = head - self.HeadFactor * sci.expn(1, u_new) if head < 0.0: head = 0.0 Heads.append([tim, rad, head]) if ListOut: return Heads else: return Heads[0][2]
def posterior(x, n, p1, p2): """ calculates the posterior probability that the probability of developing severe side effects falls within a specific range given the data: :param x: num of patients with sever side effects :param n: tot num patients observed :param p1: lower bound range :param p2: upper bound range :return: posterior probability that p is within the range [p1, p2] given x and n """ if not isinstance(n, int) or n < 1: raise ValueError("n must be a positive integer") if not isinstance(x, int) or x < 0: m = "x must be an integer that is greater than or equal to 0" raise ValueError(m) if x > n: raise ValueError("x cannot be greater than n") if not isinstance(p1, float) or p1 < 0 or p1 > 1: raise ValueError("p1 must be a float in the range [0, 1]") if not isinstance(p2, float) or p2 < 0 or p2 > 1: raise ValueError("p2 must be a float in the range [0, 1]") if p2 <= p1: raise ValueError("p2 must be greater than p1") inter = intersection(x, n, p1, p2) return ((special.expn(inter, p2)) / (special.expn(inter, p1)))
def dtint(gamma, xs, cthfun, beta=None): ''' calculates the (normalized) time for a sound wave to travel from the surface to the shock front (or back) input: BS gamma, BS beta, position of the shock in rstar units ''' nxs = size(xs) if nxs <= 1: nx = 10000 x = (xs - 1.) * arange(nx) / double(nx - 1) + 1. if beta is None: beta = 1. - gamma * exp(gamma) * (expn(1, gamma) - expn(1, gamma * xs)) csq = 1. / 3. * exp(gamma * x) * ( expn(2, gamma * x) / x + beta * exp(-gamma) - expn(2, gamma) ) # / x**3 cth = cthfun(x) # print("mean cos = "+str(cth.mean())) w = where(csq > 0.) dt = simps((sqrt((3. * cth**2 + 1.) / csq) / cth)[w], x=x[w]) / 2. else: dt = zeros(nxs) for k in arange(nxs): dt[k] = dtint(gamma, xs[k], cthfun, beta=beta) return dt
def rhs( theta ): src = np.zeros(N_g - 2) for i in range( 0, len(src) ): for j in range(1, N_g): src[i] += (delta_tau/(2.*N))*(theta[j]**4 - theta[j-1]**4) \ *(expn(3, abs(i+1-j)*delta_tau) - expn(3, abs(i+2-j)*delta_tau)) return src
def integrated_flux(self, phi0, gamma, e_cutoff, emin, emax): r''' ''' rmax = -emax * (emax/self._E0)**(-gamma) * expn(gamma,\ emax/e_cutoff) rmin = -emin * (emin/self._E0)**(-gamma) * expn(gamma, \ emin/e_cutoff) return phi0 * (rmax - rmin)
def J_over_JUV_outside_slab(tau, tau_SF): """ Compute the mean intensity at height |z| > Lz/2 with tau(z) = tau > tau_SF/2 """ # if not np.all(np.abs(tau) >= 0.5*tau_SF): # raise ValueError("optical depth must be larger than or equal to tau_SF/2") return 0.5/tau_SF*(expn(2,tau - 0.5*tau_SF) - expn(2,tau + 0.5*tau_SF))
def TP(Teq, Teeff, g00, kv1, kv2, kth, alpha): """ This function takes stellar, planetary, and atmospheric parameters and returns the temperature-pressure profile. Parameters ---------- Teq Teeff g00 kv1 kv2 kth alpha Returns ------- T: np.ndarray The Temperature in Kelvin P: np.ndarray The Pressure in bar """ Teff = Teeff f = 1.0 # solar re-radiation factor A = 0.0 # planetary albedo g0 = g00 # Compute equilibrium temperature and set up gamma's T0 = Teq gamma1 = kv1 / kth gamma2 = kv2 / kth # Initialize arrays logtau = np.arange(-10, 20, .1) tau = 10**logtau #computing temperature T4ir = 0.75 * (Teff**(4.)) * (tau + (2.0 / 3.0)) f1 = 2.0 / 3.0 + 2.0 / (3.0 * gamma1) * ( 1. + (gamma1 * tau / 2.0 - 1.0) * sp.exp(-gamma1 * tau) ) + 2.0 * gamma1 / 3.0 * (1.0 - tau**2.0 / 2.0) * special.expn( 2.0, gamma1 * tau) f2 = 2.0 / 3.0 + 2.0 / (3.0 * gamma2) * ( 1. + (gamma2 * tau / 2.0 - 1.0) * sp.exp(-gamma2 * tau) ) + 2.0 * gamma2 / 3.0 * (1.0 - tau**2.0 / 2.0) * special.expn( 2.0, gamma2 * tau) T4v1 = f * 0.75 * T0**4.0 * (1.0 - alpha) * f1 T4v2 = f * 0.75 * T0**4.0 * alpha * f2 T = (T4ir + T4v1 + T4v2)**(0.25) P = tau * g0 / (kth * 0.1) / 1.E5 # Return TP profile return T, P
def J_over_JUV_inside_slab(tau, tau_SF): """ Compute the mean intensity at tau(z) assuming that source distribtuion in a layer of optical depth tau_SF Compute the mean intensity at tau(z) (optical depth from the midplane) 0 < zz := tau(z)/(tau_SF/2) < 1.0 """ # if not np.all(np.abs(tau) <= 0.5*tau_SF): # raise ValueError("tau must be smaller than or equal to tau_SF/2") return 0.5/tau_SF*(2.0 - expn(2,0.5*tau_SF - tau) - expn(2,0.5*tau_SF + tau))
def F(self,rho,tau): #tau = tau(:); #tau(find(tau > 100)) = 100; if tau>100: tau=100 #h_inf = besselk(0,rho); h_inf = kn(0,rho) expintrho = expn(1,rho) w = (expintrho-h_inf)/(expintrho-expn(1,rho/2)) I = h_inf - w*expn(1,rho/2*np.exp(abs(tau))) + (w-1)*expn(1,rho*np.cosh(tau)) h=h_inf+self.sign(tau)*I return h
def F(self, rho, tau): #tau = tau(:); #tau(find(tau > 100)) = 100; if tau > 100: tau = 100 #h_inf = besselk(0,rho); h_inf = kn(0, rho) expintrho = expn(1, rho) w = (expintrho - h_inf) / (expintrho - expn(1, rho / 2)) I = h_inf - w * expn(1, rho / 2 * np.exp(abs(tau))) + (w - 1) * expn( 1, rho * np.cosh(tau)) h = h_inf + self.sign(tau) * I return h
def curly_F_tau(Teff, tau): """ Function for problem 10 calculates the eddington flux at a given tau NOTE this is not a F_\nu, assumes a planckian source function """ return 2 * np.pi * (trapezoidal( lambda t: integrated_planck(Teff * (0.5 + 3 / 4 * t)** (1 / 4)) * sc.expn(2, t - tau), tau, 20, 5000) - trapezoidal( lambda t: integrated_planck(Teff * (0.5 + 3 / 4 * t)** (1 / 4)) * sc.expn(2, tau - t), 0, tau, 5000))
def BSsolution(gamma, eta): nx = 1000 xs, beta = xis(gamma, eta, n=3, x0=20., ifbeta=True) x = xs**(arange(nx) / double(nx - 1)) u = (1. - exp(gamma) / beta * (expn(2, gamma) - expn(2, gamma * x) / x))**4 # u/u[0] v = exp(gamma * x) / x**3 * (expn(1, gamma * x) + beta * exp(-gamma) - expn(2, gamma)) / u v = v / v[-1] * 1. / sqrt(xs) / 7. # normalisation return x, v, u
def exp_int(s, x): r"""Calculate the exponential integral :math:`E_s(x)`. Given by: :math:`E_s(x) = \int_1^\infty \frac{e^{-xt}}{t^s}\,\mathrm dt` Parameters ---------- s : :class:`float` exponent in the integral (should be > -100) x : :class:`numpy.ndarray` input values """ if np.isclose(s, 1): return sps.exp1(x) if np.isclose(s, np.around(s)) and s > -0.5: return sps.expn(int(np.around(s)), x) x = np.array(x, dtype=np.double) x_neg = x < 0 x = np.abs(x) x_compare = x**min((10, max(((1 - s), 1)))) res = np.empty_like(x) # use asymptotic behavior for zeros x_zero = np.isclose(x_compare, 0, atol=1e-20) x_inf = x > max(30, -s / 2) # function is like exp(-x)*(1/x + s/x^2) x_fin = np.logical_not(np.logical_or(x_zero, x_inf)) x_fin_pos = np.logical_and(x_fin, np.logical_not(x_neg)) if s > 1.0: # limit at x=+0 res[x_zero] = 1.0 / (s - 1.0) else: res[x_zero] = np.inf res[x_inf] = np.exp(-x[x_inf]) * (x[x_inf]**-1 - s * x[x_inf]**-2) res[x_fin_pos] = inc_gamma(1 - s, x[x_fin_pos]) * x[x_fin_pos]**(s - 1) res[x_neg] = np.nan # nan for x < 0 return res
def scale_bt_rate(inDict, ip, f=1.7): """ Apply ionization descaling of [7]_, a Burgess-Tully type scaling to ionization rates and temperatures. The result of the scaling is to return a scaled temperature between 0 and 1 and a slowly varying scaled rate as a function of scaled temperature. In addition, the scaled rates vary slowly along an iso-electronic sequence. Parameters ---------- inDict : `dict` the input dictionary should have the following key pairs: `temperature`, array-like and `rate`, array-like ip : `float` the ionization potential in eV. f : `float` (optional) the scaling parameter, 1.7 generally works well Notes ----- `btTemperature` and `btRate` keys are added to `inDict` """ if ('temperature' and 'rate') in inDict.keys(): rT = inDict['temperature'] * const.boltzmannEv / ip btTemperature = 1. - np.log(f) / np.log(rT + f) btRate = np.sqrt(rT) * inDict['rate'] * ip**1.5 / (expn(1, 1. / rT)) inDict['btTemperature'] = btTemperature inDict['btRate'] = btRate inDict['ip'] = ip else: print(' input dict does not have the correct keys') return
def star_f(z, rho_mean, m_x, r_x, k_x, r_t0): m_x = np.float64(m_x) r_x = np.float64(r_x) k_x = np.float64(k_x) if len(m_x.shape) == 0: m_x = np.array([m_x]) r_x = np.array([r_x]) alpha = 1.0 nu_alpha = 1.0 - (2.0/alpha) G_alpha = sp.gamma(1.0 - nu_alpha) u_k = np.zeros((len(k_x), len(m_x))) for i in range(len(m_x)): r_t = r_t0[i] * r_x[i] x = r_x[i]/r_t x_delta = r_x[i]/r_t K = r_t * k_x E_alpha = sp.expn(np.absolute(nu_alpha), (x_delta ** alpha)) rho_t = (m_x[i]*f_stars(m_x, np.array([m_x[i]]), 1.0)*alpha)/(4.0*np.pi*(r_t**3.0)*(G_alpha - (x_delta**2.0)*E_alpha)) u_k[:,i] = (4.0 * np.pi * (r_t**3.0) * rho_t) / (m_x[i] * (1.0 + (K**2.0))) return u_k
def scale_bt_rate(inDict, ip, f=1.7): """ Apply ionization descaling of [1]_, a Burgess-Tully type scaling to ionization rates and temperatures. The result of the scaling is to return a scaled temperature between 0 and 1 and a slowly varying scaled rate as a function of scaled temperature. In addition, the scaled rates vary slowly along an iso-electronic sequence. Parameters ---------- inDict : `dict` the input dictionary should have the following key pairs: `temperature`, array-like and `rate`, array-like ip : `float` the ionization potential in eV. f : `float` (optional) the scaling parameter, 1.7 generally works well Notes ----- `btTemperature` and `btRate` keys are added to `inDict` References ---------- .. [1] Dere, K. P., 2007, A&A, `466, 771 <http://adsabs.harvard.edu/abs/2007A%26A...466..771D>`_ """ if ('temperature' and 'rate') in inDict.keys(): rT = inDict['temperature']*const.boltzmannEv/ip btTemperature = 1. - np.log(f)/np.log(rT + f) btRate = np.sqrt(rT)*inDict['rate']*ip**1.5/(expn(1,1./rT)) inDict['btTemperature'] = btTemperature inDict['btRate'] = btRate inDict['ip'] = ip else: print(' input dict does not have the correct keys') return
def exp_integral(x): """Returns truncated iterated logarithm y = log( -log(x) ) where if x<delta, x = delta and if 1-delta < x, x = 1-delta. """ gamma = 0.577215665 return -gamma - expn(x, 1) - np.log(x)
def in_plane_thin_film(kn): """ The mfp shrinkage calculation for in-plane thin film. .. math: B(Kn) = 1 - (3/8) Kn(1-4E_3 Kn^{-1}+4E_5 Kn^{-1}) , where E_3 and E_5 are exponential integral polynomials. Args: kn (float): The knudsen number. :param kn: :return: """ e3 = sc.expn(3, 1 / kn) # exponential integral E_3(1/kn) e5 = sc.expn(5, 1 / kn) # exponential integral E_5{1/kn} return 1 - 3. / 8. * kn * (1 - 4 * e3 + 4 * e5)
def matchlogtoexpn(p0 = [30, 2e2, -20]): import scipy.optimize as opt p = opt.fmin(matchlogtoexpn_score, p0, xtol=1e-9, ftol=1e-9) m, r0, off = p plot(funcs.expn(2,rr/20.)) plot(m*log(r0/rr) - off) return p
def J_over_JUV_avg_slab(tau_SF): """ Compute the mean intensity averaged over the entrie volume of the slab from -Lz/2 < z < Lz/2 or from -tau_SF/2 < tau < tau_SF/2 """ return 1.0 / tau_SF * (1.0 - (0.5 - expn(3, tau_SF)) / tau_SF)
def logmmse_gain(parameters=None): """ calculate suppression gain by MMSE log spectral amplitude method """ gamma = parameters['gamma'] ksi = parameters['ksi'] A = ksi / (1 + ksi) vk = A * gamma ei_vk = 0.5 * sp.expn(1, vk) gain = A * np.exp(ei_vk) return gain
def xis(gamma, eta, n=3, x0=20., ifbeta=False): ''' solves equation (34) from Basko&Sunyaev (1976) arguments: gamma = c R_{NS}^3/(kappa dot{M} A_\perp * afac**2) \simeq (RNS/Rsph) * (RNS**2/across) eta = (8/21 * u_0 d_0 kappa / \sqrt{2GMR} c )**0.25, u_0 = B^2/8/pi (gamma = rstar**2/mdot/across[0]/afac**2) (eta = (8/21/sqrt(2)))**0.25 (umag*sqrt(rstar)*d0**2)**0.25, where d0 = (across/4./pi/rstar/sin(theta))[0] ''' if ((eta * gamma**0.25) < 1.) | (gamma > 1000.): return nan x = fsolve(fxis, x0, args=(gamma, eta, n), maxfev=1000, xtol=1e-10) # print(fxis(x, gamma, eta, n)) if ifbeta: print("beta") beta = 1. - gamma * exp(gamma) * (expn(1, gamma) - expn(1, gamma * x)) return x, beta else: return x
def eta(gamma, tau): import scipy.special as spe part1 = 2.0 / 3.0 + 2.0 / (3.0 * gamma) * ( 1.0 + (gamma * tau / 2.0 - 1.0) * np.exp(-1.0 * gamma * tau)) part2 = 2.0 * gamma / 3.0 * (1.0 - tau**2/2.0) * \ spe.expn(2, (gamma * tau)) return part1 + part2
def calc_Jrad(z, S4pi, zstar, fp, fm, dz_pc): J = 0.0 for S4pi_, zstar_ in zip(S4pi, zstar): Dz = 0.2*dz_pc tau_SF = fp(zstar_ + Dz) - fp(zstar_ - Dz) #print(tau_SF) if z >= zstar_ + Dz: tau = fp(z) - fp(zstar_) J += S4pi_*0.5*expn(1, tau) #J += SFUV4pi_*J_over_JUV_outside_slab(tau, tau_SF) elif z <= zstar_ - Dz: tau = fm(z) - fm(zstar_) J += S4pi_*0.5*expn(1, tau) #J += SFUV4pi_*J_over_JUV_outside_slab(tau, tau_SF) else: J += S4pi_*J_over_JUV_avg_slab(tau_SF) #J += SFUV4pi_*J_over_JUV_inside_slab(0.0, tau_SF) pass return J
def denoise(wav, noise_profile: NoiseProfile, eta=0.15): wav, dtype = to_float(wav) wav += np.finfo(np.float64).eps p = noise_profile nframes = int( math.floor(len(wav) / p.len2) - math.floor(p.window_size / p.len2)) x_final = np.zeros(nframes * p.len2) aa = 0.98 mu = 0.98 ksi_min = 10**(-25 / 10) x_old = np.zeros(p.len1) xk_prev = np.zeros(p.len1) noise_mu2 = p.noise_mu2 for k in range(0, nframes * p.len2, p.len2): insign = p.win * wav[k:k + p.window_size] spec = np.fft.fft(insign, p.n_fft, axis=0) sig = np.absolute(spec) sig2 = sig**2 gammak = np.minimum(sig2 / noise_mu2, 40) if xk_prev.all() == 0: ksi = aa + (1 - aa) * np.maximum(gammak - 1, 0) else: ksi = aa * xk_prev / noise_mu2 + (1 - aa) * np.maximum( gammak - 1, 0) ksi = np.maximum(ksi_min, ksi) log_sigma_k = gammak * ksi / (1 + ksi) - np.log(1 + ksi) vad_decision = np.sum(log_sigma_k) / p.window_size if vad_decision < eta: noise_mu2 = mu * noise_mu2 + (1 - mu) * sig2 a = ksi / (1 + ksi) vk = a * gammak ei_vk = 0.5 * expn(1, np.maximum(vk, 1e-8)) hw = a * np.exp(ei_vk) sig = sig * hw xk_prev = sig**2 xi_w = np.fft.ifft(hw * spec, p.n_fft, axis=0) xi_w = np.real(xi_w) x_final[k:k + p.len2] = x_old + xi_w[0:p.len1] x_old = xi_w[p.len1:p.window_size] output = from_float(x_final, dtype) output = np.pad(output, (0, len(wav) - len(output)), mode="constant") return output
def cross_plane_thin_film(kn): """The mfp shrinkage calculation for cross-plane thin film. .. math:: B(Kn)=1 + 3. * Kn(E_5 Kn^{-1} - 1. / 4.) where E_5 are exponential integral polynomials. Args: kn (float): The knudsen number. """ e5 = sc.expn(5, 1 / kn) return 1 + 3. * kn * (e5 - 1. / 4.)
def calculate_yg_van_regemorter(atomic_data, t_electrons, continuum_interaction_species): """ Calculate collision strengths in the van Regemorter approximation. This function calculates thermally averaged effective collision strengths (divided by the statistical weight of the lower level) Y_ij / g_i using the van Regemorter approximation. Parameters ---------- atomic_data : tardis.io.atom_data.AtomData t_electrons : numpy.ndarray continuum_interaction_species : pandas.MultiIndex Returns ------- pandas.DataFrame Thermally averaged effective collision strengths (divided by the statistical weight of the lower level) Y_ij / g_i Notes ----- See Eq. 9.58 in [2]. References ---------- .. [1] van Regemorter, H., “Rate of Collisional Excitation in Stellar Atmospheres.”, The Astrophysical Journal, vol. 136, p. 906, 1962. doi:10.1086/147445. .. [2] Hubeny, I. and Mihalas, D., "Theory of Stellar Atmospheres". 2014. """ I_H = atomic_data.ionization_data.loc[(1, 1)] mask_selected_species = atomic_data.lines.index.droplevel( ["level_number_lower", "level_number_upper"]).isin(continuum_interaction_species) lines_filtered = atomic_data.lines[mask_selected_species] f_lu = lines_filtered.f_lu.values nu_lines = lines_filtered.nu.values yg = f_lu * (I_H / (H * nu_lines))**2 coll_const = A0**2 * np.pi * np.sqrt(8 * K_B / (np.pi * M_E)) yg = 14.5 * coll_const * t_electrons * yg[:, np.newaxis] u0 = nu_lines[np.newaxis].T / t_electrons * (H / K_B) gamma = 0.276 * np.exp(u0) * expn(1, u0) gamma[gamma < 0.2] = 0.2 yg *= u0 * gamma / BETA_COLL yg = pd.DataFrame(yg, index=lines_filtered.index, columns=t_electrons) return yg
def custom_structure(kn): """ The mfp shrinkage calculation for your own structure. you should implement this function on your own. Args: kn (float): The Knudsen number. :param kn: :return: """ e5 = sc.expn(5, 1 / kn) return 1 + 3. * kn * (e5 - 1. / 4.)
def GetSpec(specType): """ Given a 2FGL Spectral type return lambdas for the spectrum and integrated spectrum :param specType: Can be 'PowerLaw','PLExpCutoff', or 'LogParabola' :returns Spec,IntegratedSpec: the spectrum and integrated spectrum. See function def for param ordering. """ if specType == 'PowerLaw': Spec = lambda e, gamma: e**-gamma IntegratedSpec = lambda e1, e2, gamma: (e1*e2)**-gamma * (e1*e2**gamma - e1**gamma*e2)/(gamma-1) elif specType == 'PLExpCutoff': Spec = lambda e, gamma, cutoff: e**-gamma * np.exp(-e/cutoff) IntegratedSpec = lambda e1, e2, gamma, cutoff: (e1**(1-gamma)*expn(gamma, e1/cutoff) -e2**(1-gamma)*expn(gamma, e2/cutoff)) elif specType == 'LogParabola': Spec = lambda e, alpha, beta, pivot: (e/pivot)**-(alpha+beta*np.log(e/pivot)) IntegratedSpec = lambda e1, e2, alpha, beta, pivot: quad(Spec, e1, e2, args=(alpha, beta, pivot))[0] else: raise Exception("Spectral type not supported.") return Spec, IntegratedSpec
def EvalWellFunction(self, u, rB): '''Evaluates the Hantush-Jacob well function. u dimensionless time rB dimensionless radius''' #return zero for large u to avoid numerical issues if u > 14: return 0.0 #set number of terms used in summation. as u increases more #terms are needed if u <= 2: endmember = 10 elif u <= 7: endmember = 20 else: endmember = 30 #compute temporary variable r4B = rB**2 / 4.0 #compute summation term last_term = 0.0 for i in range(1,endmember): for j in range(1,i+1): last_term += (-1)**(i+j) * spy.factorial(i - j + 1) / \ (spy.factorial(i+2))**2 * u**(i-j) * r4B**j #for rB values close to 0 one term is dropped to avoid numerical #problems if abs(sci.iv(0.0,rB) - 1.0) < 1e-15: WellFuncHJ = 2.0 * sci.kv(0.0,rB) \ - sci.iv(0.0,rB) * sci.expn(1,r4B / u) \ + mth.exp(-1.0 * r4B / u) * (0.5772156649015328606 \ + mth.log(u) + sci.expn(1,u) - u - u**2 * last_term) else: #full expression WellFuncHJ = 2.0 * sci.kv(0.0,rB) \ - sci.iv(0.0,rB) * sci.expn(1,r4B / u) \ + mth.exp(-1.0 * r4B / u) * (0.5772156649015328606 \ + mth.log(u) + sci.expn(1,u) - u + u * (sci.iv(0.0,rB) - 1.0) \ / r4B - u**2 * last_term) return WellFuncHJ
def exp_expn(n, x): """ Returns :math:`e^x E_n(x)`. The exponential integral :math:`E_n(x)` is defined as .. math:: E_n(x) \\equiv \\int_1^\\infty dt\\, \\frac{e^{-xt}}{t^n} Circumvents overflow error in ``np.exp`` by expanding the exponential integral in a series to the 5th or 6th order. Parameters ---------- n : {1,2} The order of the exponential integral. x : ndarray The argument of the function. Returns ------- ndarray The value of :math:`e^x E_n(x)`. """ import scipy.special as sp x_flt64 = np.array(x, dtype='float64') low = x < 700 high = ~low expr = np.zeros_like(x) if np.any(low): expr[low] = np.exp(x[low]) * sp.expn(n, x_flt64[low]) if np.any(high): if n == 1: # The relative error is roughly 1e-15 for 700, smaller for larger arguments. expr[high] = (1 / x[high] - 1 / x[high]**2 + 2 / x[high]**3 - 6 / x[high]**4 + 24 / x[high]**5) elif n == 2: # The relative error is roughly 6e-17 for 700, smaller for larger arguments. expr[high] = (1 / x[high] - 2 / x[high]**2 + 6 / x[high]**3 - 24 / x[high]**4 + 120 / x[high]**5 - 720 / x[high]**6) else: raise TypeError('only supports n = 1 or 2 for x > 700.') return expr
def xi(gamma, tau): """ Calculate Equation (14) of Line et al. (2013) Apj 775, 137 Parameters: ----------- gamma: Float Visible-to-thermal stream Planck mean opacity ratio. tau: 1D float ndarray Gray IR optical depth. Modification History: --------------------- 2014-12-10 patricio Initial implemetation. """ return (2.0/3) * (1 + (1/gamma) * (1 + (0.5*gamma*tau-1)*np.exp(-gamma*tau)) + gamma*(1 - 0.5*tau**2) * sp.expn(2, gamma*tau) )
def exp_int(s, x): r"""The exponential integral :math:`E_s(x)` Given by: :math:`E_s(x) = \int_1^\infty \frac{e^{-xt}}{t^s}\,\mathrm dt` Parameters ---------- s : :class:`float` exponent in the integral x : :class:`numpy.ndarray` input values """ if np.isclose(s, 1): return sps.exp1(x) if np.isclose(s, np.around(s)) and s > -1: return sps.expn(int(np.around(s)), x) return inc_gamma(1 - s, x) * x**(s - 1)
def H_0_src(src_func, a_array, tau_array): """ given an array of optical depths, the emergent H is calculated This is calculated assuming a given source function and using the exp int Also assume no incident radiation src_func: a function for the source function a_array: an array containing the a_n coeficients for the source function tau_array: an array of tau values to use for the integration (in reality, only the max and min values and the number of points) """ min_tau = min(tau_array) sampling = len(tau_array) max_tau = max(tau_array) return 0.5 * trap_log(lambda t: src_func(t, a_array) * sc.expn(2, t), min_tau, max_tau, sampling)
def inc_gamma(s, x): r"""Calculate the (upper) incomplete gamma function. Given by: :math:`\Gamma(s,x) = \int_x^{\infty} t^{s-1}\,e^{-t}\,{\rm d}t` Parameters ---------- s : :class:`float` exponent in the integral x : :class:`numpy.ndarray` input values """ if np.isclose(s, 0): return sps.exp1(x) if np.isclose(s, np.around(s)) and s < -0.5: return x**(s - 1) * sps.expn(int(1 - np.around(s)), x) if s < 0: return (inc_gamma(s + 1, x) - x**s * np.exp(-x)) / s return sps.gamma(s) * sps.gammaincc(s, x)
def G_function_ILS(Fo): """ Infinite Line Source solution Fo = Fourier number Reference: ---------- Lamarche and Beauchamp (2007). A new contribution to the finite line-source model for geothermal boreholes. Energy and Building, 39:188-198. """ if Fo == 0: G_ILS = 0 else: G_ILS = 1 / (4 * pi) * special.expn(1, 1 / (4 * Fo)) # Exponential integral return G_ILS
def u_s(r_x, alpha, r_t0, m, M, sigma, alpha_hod, A, M_1, gamma_1, gamma_2, b_0, b_1, b_2): """ NFW for smaller scales, more to the center, exponential decline for the outer regions, which can be varied with alpha parameter! r_t can be changed! Fedeli -> 0.03 """ r_t = r_t0 * r_x[-1] x = r_x/r_t x_delta = r_x[-1]/r_t nu_alpha = 1.0 - (2.0/alpha) # absolute value, yes / no? - still not completely solved! BE AWARE! E_alpha = sp.expn(np.absolute(nu_alpha), (x_delta ** alpha)) G_alpha = sp.gamma(1.0 - nu_alpha) rho_t = (M*f_stars(m, M, sigma, alpha_hod, A, M_1, gamma_1, gamma_2, b_0, b_1, b_2)*alpha)/(4.0*np.pi*(r_t**3.0)*(G_alpha - (x_delta**2.0)*E_alpha)) profile = (rho_t/x) * np.exp(-(x**alpha)) return profile
def one_source(x0, y0, mag, rad, core, grid, type=0): r = fromfunction(lambda x,y: sqrt((x-x0)**2 + (y-y0)**2), grid) if type == 0: # log sources #[ 1.60121182e-06 7.82761030e+03 -6.36886579e-01] s = mag*log(rad/r) + core #mag*log(1/(rad+r))+core elif type == 1: # from the 3d diffusion green's function #[ 3.24852149e-05 8.97514165e+00 -6.11534255e-02] s = mag*funcs.erf(r/rad)/r + core elif type == 2: # just a plain 'ol gaussian s = mag*exp(-r**2/rad) + core elif type == 3: # from the 2d green's function #[ 5.99160034e-06 2.39560167e+01 9.78734951e-03] s = mag*funcs.expn(2,r/rad) + core else: s = mag/(r+rad) + core return s
def single(self, freqs, atm, b, alpha, orientation=None, taulimit=20.0, plot= None, verbose = None, discAverage = False, normW4plot=True): """This computes the brightness temperature along one ray path""" if verbose is None: verbose = self.verbose if plot is None: plot = self.plot # get path lengths (ds_layer) vs layer number (num_layer) - currently frequency independent refractivity self.path = ray.compute_ds(atm,b,orientation,gtype=None,verbose=verbose,plot=plot) if self.path.ds == None: print 'Off planet' self.Tb = [] for j in range(len(freqs)): self.Tb.append(utils.T_cmb) return self.Tb # these profiles are saved self.tau = [] self.W = [] self.Tb_lyr = [] # temporary arrays taus = [] Tbs = [] Ws = [] # initialize for j in range(len(freqs)): taus.append(0.0) Tbs.append(0.0) Ws.append(0.0) self.tau.append(taus) self.W.append(Ws) self.Tb_lyr.append(Tbs) if alpha.config.Doppler: P = atm.gas[atm.config.C['P']] T = atm.gas[atm.config.C['T']] alphaUnits = 'invcm' #--debug--#self.debugDoppler = [] print '' for i in range( len(self.path.ds)-1 ): ds = self.path.ds[i]*utils.Units[utils.processingAtmLayerUnit]/utils.Units['cm'] taus = [] Ws = [] Tbs = [] ii = self.path.layer4ds[i] ii1= self.path.layer4ds[i+1] for j,f in enumerate(freqs): if not alpha.config.Doppler: a1 = self.layerAlpha[j][ii1] a0 = self.layerAlpha[j][ii] else: fshifted=[[f/self.path.doppler[i]],[f/self.path.doppler[i+1]]] #--debug--#self.debugDoppler.append(fshifted[0][0]) print '\rdoppler corrected frequency at layer',i, a1 = alpha.getAlpha(fshifted[0],T[ii1],P[ii1],atm.gas[:,ii1],atm.config.C,atm.cloud[:,ii1],atm.config.Cl,units=alphaUnits,verbose=False) a0 = alpha.getAlpha(fshifted[1],T[ii],P[ii],atm.gas[:,ii],atm.config.C,atm.cloud[:,ii],atm.config.Cl,units=alphaUnits,verbose=False) dtau = (a0 + a1)*ds/2.0 taus.append(self.tau[i][j] + dtau) # this is tau_(i+1) T1 = atm.gas[atm.config.C['T']][ii1] T0 = atm.gas[atm.config.C['T']][ii] if discAverage==True: Ws.append( 2.0*a1*ss.expn(2,taus[j]) ) # this is W_(i+1) for disc average # dTb = ( T1*ss.expn(2,taus[j])/scriptR(T1,freqs[j]) + T0*ss.expn(2,self.tau[ii][j])/scriptR(T0,freqs[j]) )*dtau # Tbs.append( self.Tb_lyr[i][j] + dTb ) else: Ws.append( a1*math.exp(-taus[j]) ) # this is W_(i+1) for non disc average dTb = ( T1*Ws[j]/scriptR(T1,freqs[j]) + T0*self.W[i][j]/scriptR(T0,freqs[j]) )*ds/2.0 Tbs.append( self.Tb_lyr[i][j] + dTb) self.tau.append(taus) self.W.append(Ws) self.Tb_lyr.append(Tbs) print '' # final spectrum self.Tb = [] for j in range(len(freqs)): top_Tb_lyr = self.Tb_lyr[-1][j] if top_Tb_lyr < utils.T_cmb: top_Tb_lyr = utils.T_cmb self.Tb.append(top_Tb_lyr) self.tau = np.array(self.tau).transpose() self.W = np.array(self.W).transpose() self.Tb_lyr = np.array(self.Tb_lyr).transpose() try: if plot: # save a local copy of self.P = atm.gas[atm.config.C['P']][0:len(self.W[0])] self.z = atm.gas[atm.config.C['Z']][0:len(self.W[0])] #####-----Weigthing functions plt.figure('radtran') plt.subplot(121) for i,f in enumerate(freqs): #label=r'$\tau$: %.1f GHz' % (f) #plt.semilogy(self.tau[i],self.P,label=label) if normW4plot: wplot = self.W[i]/np.max(self.W[i]) else: wplot = self.W[i] label=r'$W$: %.1f GHz' % (f) label=r'%.1f cm' % (30.0/f) #label=r'%.0f$^o$' % ((180.0/math.pi)*math.asin(b[0])) plt.semilogy(wplot,self.P,label=label,linewidth=3) #label=r'Tlyr$_b$: %.1f GHz' % (f) #plt.semilogy(self.Tb_lyr[i],self.P,label=label) plt.legend() plt.axis(ymin=100.0*math.ceil(self.P[-1]/100.0), ymax=1.0E-7*math.ceil(self.P[0]/1E-7)) #plt.xlabel('units') plt.ylabel('P [bars]') #####-----Alpha plt.figure('alpha') for i,f in enumerate(freqs): label=r'$\alpha$: %.1f GHz' % (f) label=r'%.1f cm' % (30.0/f) pl = list(self.layerAlpha[i]) del pl[0] #delete because alpha is at the layer boundaries, so there are n+1 of them plt.loglog(pl,self.P,label=label) plt.legend() v = list(plt.axis()) v[2] = 100.0*math.ceil(self.P[-1]/100.0) v[3] = 1.0E-7*math.ceil(self.P[0]/1E-7) plt.axis(v) #plt.legend() #plt.xlabel('units') plt.ylabel('P [bars]') #####-----Brightness temperature plt.figure('brightness') lt = '-' if (len(self.Tb)==1): lt = 'o' plt.plot(freqs,self.Tb,lt) plt.xlabel('Frequency [GHz]') plt.ylabel('Brightness temperature [K]') except: print 'Plotting broke' del taus, Tbs, Ws return self.Tb
###----------------------------------- ### p11 from scipy.integrate import quad def integrand(t,n,x): return exp(-x*t) / t**n def expint(n,x): return quad(integrand, 1, Inf, args=(n, x))[0] vec_expint = vectorize(expint) vec_expint(3, arange(1.0, 4.0, 0.5)) special.expn(3, arange(1.0, 4.0, 0.5)) result = quad(lambda x:expint(3, x), 0, inf) print result I3 = 1.0/3.0 print I3 print I3 - result[0] ###----------------------------------- ### 11 from scipy.integrate import quad, dblquad def I(n): return dblquad(lambda t, x: exp(-x*t) / t**n, 0, Inf, lambda x: 1, lambda x: Inf)
def matchlogtoexpn_score(p): m, r0, off = p return ((funcs.expn(2,rr/20.) - m*log(r0/rr) + off)**2).sum()
tau_start = 0. tau_end = 1.0 sigma = 100 x_start = tau_start/sigma x_end = tau_end/sigma dx = x_end - x_start psi_inc = 500 print("In MFP: ", sigma*(x_end - x_start)) #Compute values for multiple cells for i in range(4): print(x_start,x_end) psi_x2 = lambda x: 6.*psi_inc*(expn(3,sigma*x) - 0.5*expn(2,sigma*x)) phi_avg =psi_inc/(dx)*quadrature(lambda x: expn(2,sigma*x),x_start,x_end,tol=1.E-12,maxiter=500)[0] phi_eval = lambda x: 1/(sigma*(dx))*6.*psi_inc*(0.5*expn(3,sigma*x) - expn(4,sigma*x)) phi_mu = phi_eval(x_end) - phi_eval(x_start) phi_x_int = lambda x: 6.*psi_inc/(dx*dx)*(x-0.5*(x_start+x_end))*expn(2,sigma*x) phi_x = quadrature(lambda x: phi_x_int(x),x_start,x_end,tol=1.E-12,maxiter=500)[0] print("---Results for %f < x < %f ---") print("Moments: ",phi_avg,phi_x,phi_mu) print("Corner values = ",phi_avg+phi_x-phi_mu,phi_avg-phi_x-phi_mu) print("Phi_avg outflow = ",phi_avg+phi_x) print("") x_start += dx x_end += dx
def time_expn_large_n(self): expn(self.n, self.x)
def baz(r): from scipy.special import expn x1 = (R-r)/L x2 = (r+R)/L return r*(-expn(1,x1) + expn(1,x2))
def single(self, freqs, atm, b, alpha, orientation=None, taulimit=20.0, discAverage=False, normW4plot=True): """This computes the brightness temperature along one ray path""" if self.layerAlpha is None: self.layerAbsorption(freqs, atm, alpha) # get path lengths (ds_layer) vs layer number (num_layer) - currently frequency independent refractivity print_meta = (self.verbose == 'loud') travel = ray.compute_ds(atm, b, orientation, gtype=None, verbose=print_meta, plot=self.plot) self.travel = travel if travel.ds is None: print('Off planet') self.Tb = [] for j in range(len(freqs)): self.Tb.append(utils.T_cmb) return self.Tb # set and initialize arrays integrated_W = [0.0 for f in freqs] self.tau = [[0.0 for f in freqs]] self.Tb_lyr = [[0.0 for f in freqs]] self.W = [[0.0 for f in freqs]] P_layers = atm.gas[atm.config.C['P']] T_layers = atm.gas[atm.config.C['T']] z_layers = atm.gas[atm.config.C['Z']] self.P = [P_layers[travel.layer4ds[0]]] self.z = [z_layers[travel.layer4ds[0]]] for i in range(len(travel.ds) - 1): ds = travel.ds[i] * utils.Units[utils.atmLayerUnit] / utils.Units['cm'] taus = [] Ws = [] Tbs = [] ii = travel.layer4ds[i] ii1 = travel.layer4ds[i + 1] T1 = T_layers[ii1] T0 = T_layers[ii] self.P.append((P_layers[ii] + P_layers[ii1]) / 2.0) self.z.append((z_layers[ii] + z_layers[ii1]) / 2.0) if self.layerAlpha is None: print("is None at ", i) for j, f in enumerate(freqs): if not alpha.config.Doppler: a1 = self.layerAlpha[j][ii1] a0 = self.layerAlpha[j][ii] else: print("\n\nDoppler currently broken since the getAlpha call is different.") fshifted = [[f / travel.doppler[i]], [f / travel.doppler[i + 1]]] print('\rdoppler corrected frequency at layer', i, end='') a1 = alpha.getAlpha(fshifted[0], T_layers[ii1], P_layers[ii1], atm.gas[:, ii1], atm.config.C, atm.cloud[:, ii1], atm.config.Cl, units=utils.alphaUnit) a0 = alpha.getAlpha(fshifted[1], T_layers[ii], P_layers[ii], atm.gas[:, ii], atm.config.C, atm.cloud[:, ii], atm.config.Cl, units=utils.alphaUnit) dtau = (a0 + a1) * ds / 2.0 taus.append(self.tau[i][j] + dtau) # this is tau_(i+1) if discAverage is True: Ws.append(2.0 * a1 * ss.expn(2, taus[j])) # this is W_(i+1) for disc average else: Ws.append(a1 * math.exp(-taus[j])) # this is W_(i+1) for non disc average integrated_W[j] += (Ws[j] + self.W[i][j]) * ds / 2.0 dTb = (T1 * Ws[j] + T0 * self.W[i][j]) * ds / 2.0 Tbs.append(self.Tb_lyr[i][j] + dTb) self.tau.append(taus) self.W.append(Ws) self.Tb_lyr.append(Tbs) # final spectrum self.Tb = [] for j in range(len(freqs)): top_Tb_lyr = self.Tb_lyr[-1][j] if top_Tb_lyr < utils.T_cmb: top_Tb_lyr = utils.T_cmb else: top_Tb_lyr /= integrated_W[j] # Normalize by integrated weights (makes assumptions) if integrated_W[j] < 0.96 and self.verbose: print("Weight correction at {:.2f} is {:.4f} (showing below 0.96)".format(freqs[j], integrated_W[j])) self.Tb.append(top_Tb_lyr) self.tau = np.array(self.tau).transpose() self.W = np.array(self.W).transpose() self.Tb_lyr = np.array(self.Tb_lyr).transpose() self.P = np.array(self.P) self.z = np.array(self.z) if self.plot: # ####-----Weigthing functions plt.figure('INT_W') plt.plot(freqs, integrated_W) plt.title('Integrated weighting function') plt.xlabel('Frequency [GHz]') plt.figure('radtran') plt.subplot(121) for i, f in enumerate(freqs): if normW4plot: wplot = self.W[i] / np.max(self.W[i]) else: wplot = self.W[i] if self.output_type == 'frequency': label = (r'{:.1f} GHz').format(f) else: label = (r'{:.1f} cm').format(30.0 / f) plt.semilogy(wplot, self.P, label=label, linewidth=3) plt.legend() plt.axis(ymin=100.0 * math.ceil(np.max(self.P) / 100.0), ymax=1.0E-7 * math.ceil(np.min(self.P) / 1E-7)) plt.ylabel('P [bars]') # ####-----Alpha plt.figure('alpha') for i, f in enumerate(freqs): if self.output_type == 'frequency': label = (r'$\alpha$: {:.1f} GHz').format(f) else: label = (r'{:.1f} cm').format(30.0 / f) pl = list(self.layerAlpha[i]) del pl[0] plt.loglog(pl, self.P, label=label) plt.legend() v = list(plt.axis()) v[2] = 100.0 * math.ceil(np.max(self.P) / 100.0) v[3] = 1.0E-7 * math.ceil(np.min(self.P) / 1E-7) plt.axis(v) plt.ylabel('P [bars]') # ####-----Brightness temperature plt.figure('brightness') lt = '-' if (len(self.Tb) == 1): lt = 'o' plt.plot(freqs, self.Tb, lt) plt.xlabel('Frequency [GHz]') plt.ylabel('Brightness temperature [K]') del taus, Tbs, Ws return self.Tb
from scipy.special import expn import numpy as np ## http://docs.scipy.org/doc/scipy/reference/tutorial/integrate.html ## http://docs.scipy.org/doc/scipy/reference/generated/scipy.special.expn.html ## Siempre el primer arg es 1, el segundo valor va a ser lo que se ponga en Matlab res=expn(1,1.0) print "valor: ", res