def custom_incomplete_gamma(a, x): """ Incomplete gamma function. For the case covered by scipy, a > 0, scipy is called. Otherwise the gamma function recurrence relations are called, extending the scipy behavior. Parameters ----------- a : array_like x : array_like Returns -------- gamma : array_like Examples -------- >>> a, x = 1, np.linspace(1, 10, 100) >>> g = custom_incomplete_gamma(a, x) >>> a = 0 >>> g = custom_incomplete_gamma(a, x) >>> a = -1 >>> g = custom_incomplete_gamma(a, x) """ if isinstance(a, np.ndarray): if not isinstance(x, np.ndarray): x = np.repeat(x, len(a)) if len(a) != len(x): msg = ("The ``a`` and ``x`` arguments of the " "``custom_incomplete_gamma`` function must have the same" "length.\n") raise HalotoolsError(msg) result = np.zeros(len(a)) mask = (a < 0) if np.any(mask): result[mask] = ((custom_incomplete_gamma(a[mask]+1, x[mask]) - x[mask]**a[mask] * np.exp(-x[mask])) / a[mask]) mask = (a == 0) if np.any(mask): result[mask] = -expi(-x[mask]) mask = a > 0 if np.any(mask): result[mask] = gammaincc(a[mask], x[mask]) * gamma(a[mask]) return result else: if a < 0: return (custom_incomplete_gamma(a+1, x) - x**a * np.exp(-x))/a elif a == 0: return -expi(-x) else: return gammaincc(a, x) * gamma(a)
def analytic_unit0(t, T0, dH, dS): R = 8.314472 kB = 1.3806504e-23 h = 6.62606896e-34 A = kB/h*np.exp(dS/R) B = dH/R return np.exp(A*( (-B**2*np.exp(B/T0)*expi(-B/T0) - T0*(B - T0))*np.exp(-B/T0) + (B**2*np.exp(B/(t + T0))*expi(-B/(t + T0)) - (t + T0)*(-B + t + T0))*np.exp(-B/(t + T0)) )/2)
def analytic_unit0(t, k, m, dH, dS): R = 8.314472 kB = 1.3806504e-23 h = 6.62606896e-34 A = kB/h*np.exp(dS/R) B = dH/R return k*np.exp(B*(k*t + 2*m)/(m*(k*t + m)))/( A*(-B**2*np.exp(B/(k*t + m))*expi(-B/(k*t + m)) - B*k*t - B*m + k**2*t**2 + 2*k*m*t + m**2)*np.exp(B/m) + (A*B**2*np.exp(B/m)*expi(-B/m) - A*m*(-B + m) + k*np.exp(B/m))*np.exp(B/(k*t + m)) )
def custom_incomplete_gamma(a, x): """ Incomplete gamma function. For the case covered by scipy, a > 0, scipy is called. Otherwise the gamma function recurrence relations are called, extending the scipy behavior. The only other difference from the scipy function is that in `custom_incomplete_gamma` only supports the case where the input ``a`` is a scalar. Parameters ----------- a : float x : array_like Returns -------- gamma : array_like Examples -------- >>> a, x = 1, np.linspace(1, 10, 100) >>> g = custom_incomplete_gamma(a, x) >>> a = 0 >>> g = custom_incomplete_gamma(a, x) >>> a = -1 >>> g = custom_incomplete_gamma(a, x) """ if a < 0: return (custom_incomplete_gamma(a + 1, x) - x ** a * np.exp(-x)) / a elif a == 0: return -expi(-x) else: return gammaincc(a, x) * gamma(a)
def delaytau_logarithmic(logages, logt, tau=None, tage=None, **extras): """SFR = (tage-t) * e^{(tage-t)/\tau} """ t = 10**logt tprime = t / tau a = (t - tage - tau) * (logt - logages) - tau * loge b = (tage + tau) * loge return a * np.exp(tprime) + b * expi(tprime)
def pdf(self, t, l): r, c, tau = self.r, self.c, self.tau t = np.atleast_1d(t) tcut = (l+r)/c tmin = (r-l)/c x = t.copy() x[t > tcut] = tcut y = c*x*tau*np.exp(tmin/tau)*(r+l-c*tau) y += np.exp(x/tau)*(l**2-r**2+c**2*x*tau)*tau y += x*(l**2-r**2)*(expi(tmin/tau) - expi(x/tau)) y /= 4*c*l*x*tau y *= np.exp(-t/tau)/tau y[t < tmin] = 0 return y if y.size > 1 else y.item()
def sojourn_indefinite(x, g): """ Integral of equation (9) of McVean and Charlesworth 1999. integral of 2*expm1(-a*(1-x)) / (expm1(-a)*x*(1-x)) limit x -> 1 of (-2/expm1(a))*(-exp(a)*Ei(a*(x-1)) + Ei(a*x) + exp(a)*log((1-x)/x)) """ if not 0 < x <= 1: raise ValueError('x should be in the half open interval (0, 1]') if g: prefix = 2 / math.expm1(g) suffix = -special.expi(g*x) if x == 1: eulergamma = -special.digamma(1) suffix += math.exp(g)*(math.log(g) + eulergamma) else: suffix += math.exp(g)*(special.expi(g*(x-1)) - math.log((1-x)/x)) return prefix * suffix else: return 2*math.log(x)
def gammainc_fun( a, z ): if np.any(z < 0): print('ERROR: z must be >= 0') return if a == 0: return -expi(-z) elif a < 0: return ( gammainc_fun(a+1,z) - np.power(z,a) * np.exp(-z) ) / a else: return gammaincc(a,z) * gamma(a)
def prospect_d (N, cab, car, cbrown, cw, cm, ant, nr, kab, kcar, kbrown, kw, km, kant, alpha=40.): lambdas = np.arange(400, 2501) # wavelengths n_lambdas = len(lambdas) n_elems_list = [len(spectrum) for spectrum in [nr, kab, kcar, kbrown, kw, km, kant]] if not all(n_elems == n_lambdas for n_elems in n_elems_list): raise ValueError("Leaf spectra don't have the right shape!") kall = (cab*kab + car*kcar + ant*kant + cbrown*kbrown + cw*kw + cm*km)/N j = kall > 0 t1 = (1-kall)*np.exp(-kall) t2 = kall**2*(-expi(-kall)) tau = np.ones_like(t1) tau[j] = t1[j] + t2[j] r, t, Ra, Ta, denom = refl_trans_one_layer (alpha, nr, tau) # *********************************************************************** # reflectance and transmittance of N layers # Stokes equations to compute properties of next N-1 layers (N real) # Normal case # *********************************************************************** # Stokes G.G. (1862), On the intensity of the light reflected from # or transmitted through a pile of plates, Proc. Roy. Soc. Lond., # 11:545-556. # *********************************************************************** D = np.sqrt((1+r+t)*(1+r-t)*(1.-r+t)*(1.-r-t)) rq = r*r tq = t*t a = (1+rq-tq+D)/(2*r) b = (1-rq+tq+D)/(2*t) bNm1 = np.power(b, N-1) bN2 = bNm1*bNm1 a2 = a*a denom = a2*bN2-1 Rsub = a*(bN2-1)/denom Tsub = bNm1*(a2-1)/denom # Case of zero absorption j = r+t >= 1. Tsub[j] = t[j]/(t[j]+(1-t[j])*(N-1)) Rsub[j] = 1-Tsub[j] # Reflectance and transmittance of the leaf: combine top layer with next N-1 layers denom = 1-Rsub*r tran = Ta*Tsub/denom refl = Ra+Ta*Rsub*t/denom return lambdas, refl, tran
def theis_expected(r, t): P0 = 20.0E6 q = 0.16E-3 viscosity = 0.001 fluid_bulk = 2.0E9 porosity = 0.05 permeability = 1.0E-14 reciprocal_biot_modulus = porosity / fluid_bulk # Biot coefficient is assumed to be unity conductivity = permeability / viscosity alpha = conductivity / reciprocal_biot_modulus return P0 + q / (4 * np.pi * conductivity) * expi(- r * r / (4 * alpha * t))
def GammaInc( a, z ): if z.any() < 0: print('ERROR: z must be >= 0') return if a == 0: return -expi(-z) elif a < 0: return ( GammaInc(a+1,z) - np.power(z,a) * np.exp(-z) ) / a else: return gammaincc(a,z) * gamma(a)
def pdf(self, t, l): """ Returns the pdf for a hit at time `t` from an event at radius `l`. """ r, c, tau = self.r, self.c, self.tau t = np.atleast_1d(t) tcut = (l+r)/c tmin = (r-l)/c x = t.copy() x[t > tcut] = tcut y = c*x*tau*np.exp(tmin/tau)*(r+l-c*tau) y += np.exp(x/tau)*(l**2-r**2+c**2*x*tau)*tau y += x*(l**2-r**2)*(expi(tmin/tau) - expi(x/tau)) y /= 4*c*l*x*tau y *= np.exp(-t/tau)/tau y[t < tmin] = 0 return y if y.size > 1 else y.item()
def J2_indefinite_integral(x, a): """ limit x->0+ of Ei(ax) + Ei(-ax) - exp(a)*Ei(ax-a) - exp(-a)*Ei(a-ax) - 2*log(x/(1-x)) """ eulergamma = -special.digamma(1) if x == 0: if a == 0: # limit a->0 of # 2*(log|a| + eulergamma) - exp(a)*Ei(-a) - exp(-a)*Ei(a) return 0 else: x1 = math.log(abs(a)) + eulergamma x2 = math.exp(a)*special.expi(-a) x3 = math.exp(-a)*special.expi(a) return 2*x1 - x2 - x3 else: x1 = special.expi(a*x) + special.expi(-a*x) x2 = math.exp(a)*special.expi(a*(x-1)) x3 = math.exp(-a)*special.expi(-a*(x-1)) x4 = 2*math.log(x/(1-x)) return x1 - x2 - x3 - x4
def J1_indefinite_integral(x, a): """ Here is a wolfram alpha command that finds a limit of the indefinite integral of the integrand in J1 in equation (17) of Kimura and Ohto. limit x->1- of Ei(ax-a) - Ei(-ax) + log(x/(1-x)) - exp(-a)*( Ei(ax) - Ei(a-ax) + log((1-x)/x) ) """ eulergamma = -special.digamma(1) if x == 1: if a == 0: return 0 else: x1 = math.exp(-a) x2 = math.log(abs(a)) + eulergamma x3 = math.exp(a) + 1 x4 = special.expi(-a) + math.exp(-a) * special.expi(a) return x1 * x2 * x3 - x4 else: x1 = special.expi(a*(x-1)) - special.expi(-a*x) + math.log(x/(1-x)) x2 = special.expi(-a*(x-1)) - special.expi(a*x) + math.log(x/(1-x)) return x1 + math.exp(-a)*x2
# They both take about the same time, but the second implementation seems (slightly) faster. # ## Comparison with [`scipy.special.expi`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.expi.html#scipy.special.expi) # # The $\mathrm{Ei}$ function is also implemented as [`scipy.special.expi`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.expi.html#scipy.special.expi): # In[35]: from scipy.special import expi # In[36]: Y_3 = expi(X) # In[37]: np.allclose(Y, Y_3) # The difference is not too large: # In[38]: np.max(np.abs(Y - Y_3))
def getLogUniformDecay(self, fieldType, times, chi0, dchi, tau1, tau2): """ Decay function for a step-off waveform for log-uniform distribution of time-relaxation constants. The output of this function is the magnetization at each time for each cell, normalized by the inducing field. REQUIRED ARGUMENTS: fieldType -- must be 'h', 'b', 'dhdt' or 'dbdt'. times -- Observation times chi0 -- DC (zero-frequency) magnetic susceptibility for all cells dchi -- DC (zero-frequency) magnetic susceptibility attributed to VRM for all cells tau1 -- Lower-bound for log-uniform distribution of time-relaxation constants for all cells tau2 -- Upper-bound for log-uniform distribution of time-relaxation constants for all cells OUTPUTS: eta -- characteristic decay function evaluated at all specified times. """ if fieldType not in ["dhdt", "dbdt"]: raise NameError('For step-off, fieldType must be one of "dhdt" or "dbdt". Cannot be "h" or "dbdt".') nT = len(times) nC = len(dchi) t0 = self.t0 times = np.kron(np.ones((nC, 1)), times) chi0 = np.kron(np.reshape(chi0, newshape=(nC, 1)), np.ones((1, nT))) dchi = np.kron(np.reshape(dchi, newshape=(nC, 1)), np.ones((1, nT))) tau1 = np.kron(np.reshape(tau1, newshape=(nC, 1)), np.ones((1, nT))) tau2 = np.kron(np.reshape(tau2, newshape=(nC, 1)), np.ones((1, nT))) if fieldType is "h": eta = ( 0.5*(1-np.sign(times-t0))*chi0 + 0.5*(1+np.sign(times-t0))*(dchi/np.log(tau2/tau1)) * (spec.expi(-(times-t0)/tau2) - spec.expi(-(times-t0)/tau1)) ) elif fieldType is "b": mu0 = 4*np.pi*1e-7 eta = ( 0.5*(1-np.sign(times-t0))*chi0 + 0.5*(1+np.sign(times-t0))*(dchi/np.log(tau2/tau1)) * (spec.expi(-(times-t0)/tau2) - spec.expi(-(times-t0)/tau1)) ) eta = mu0*eta elif fieldType is "dhdt": eta = ( 0. + 0.5*(1+np.sign(times-t0))*(dchi/np.log(tau2/tau1)) * (np.exp(-(times-t0)/tau1)-np.exp(-(times-t0)/tau2))/(times-t0) ) elif fieldType is "dbdt": mu0 = 4*np.pi*1e-7 eta = ( 0. + 0.5*(1+np.sign(times-t0))*(dchi/np.log(tau2/tau1)) * (np.exp(-(times-t0)/tau1)-np.exp(-(times-t0)/tau2))/(times-t0) ) eta = mu0*eta return eta
def Optimality(PAR, TA, VPD, CO2, PA, LAI, ALB, CC, CI, kn, npast): ''' The optimality model to calculate daily Vcmax25 of top leaves. PAR: time series of incident photosynthetically active radiation (mol m-2 d-1). TA: time series of surface air temperature (K). VPD: time series of vapor pressure deficit (Pa). CO2: time seires of ambient CO2 concentration (umol mol-1). PA: time series of surface air pressure (Pa). LAI: time series of leaf area index (LAI). ALB: time series of albedo in visible range (a.u.). CC: time series of crown cover (a.u.). CI: time series of clumping index (a.u.). kn: model parameter: nitrogen distribution coefficient accounting for vertical variation in nitrogen within the plant canopy (a.u.). npast: model parameter: the past n days that are considered for the lag effect (day). ''' # antecedent environment PARLag = np.array( [np.nanmean(PAR[max(0, i - npast):i + 1]) for i in range(PAR.size)]) TALag = np.array( [np.nanmean(TA[max(0, i - npast):i + 1]) for i in range(TA.size)]) VPDLag = np.array( [np.nanmean(VPD[max(0, i - npast):i + 1]) for i in range(VPD.size)]) CO2Lag = np.array( [np.nanmean(CO2[max(0, i - npast):i + 1]) for i in range(CO2.size)]) PALag = np.array( [np.nanmean(PA[max(0, i - npast):i + 1]) for i in range(PA.size)]) # growth temperature, defined as mean air temperature over the past 30 days. tgrowth = np.array( [np.nanmean(TA[max(0, i - 30):i + 1]) for i in range(TA.size)]) - 273.16 # the maximum quantum yield of photosystem II for a light-adapted leaf alf = 0.352 + 0.022 * (TALag - 273.16) - 0.00034 * (TALag - 273.16)**2 # (Eq (3)) alf = alf * 1 * 0.5 / 4 # (Eq (2)) but leaf absorptance is set 1 as it is already embedded in FPAR. # gas constant R = 8.314e-3 # [kJ mol-1 K-1] # patial pressure of O2 O = PALag * 0.21 # [Pa] # Michaelis–Menten constants of carboxylation (Supplementary Table 1) KC = np.exp(38.05 - 79.43 / (R * TALag)) * 1e-6 * PALag # [Pa] # Michaelis–Menten constants of oxygenation (Supplementary Table 1) KO = np.exp(20.30 - 36.38 / (R * TALag)) * 1000 * 1e-6 * PALag # [Pa] # CO2 compensation point (Supplementary Table 1) GammaS = np.exp(19.02 - 37.83 / (R * TALag)) * 1e-6 * PALag # [Pa] # Michaelis–Menten coefficient of Rubisco (Supplementary Table 1) K = KC * (1 + O / KO) # [Pa] # viscosity of water relative to its value at 25 °C (Supplementary Table 1) etaS = np.exp(-580 / (-138 + TALag)**2 * (TALag - 298)) # [-] # the ratio of intercellular CO2 to ambient CO2 (Eq (5)) ksi = np.sqrt(240 * (K + GammaS) / (1.6 * etaS * 2.4)) chi = (ksi + GammaS * np.sqrt(VPDLag) / CO2Lag) / (ksi + np.sqrt(VPDLag) ) # [-] # Landscape-level leaf area index LAI0 = LAI.copy() # Plant-level leaf area index (Eq (7)) LAI = LAI0 / CC # [-] # extinction coefficient under diffuse sky radiation (Eq (9)) c = -0.5 * CI * LAI I = 0.5 * (np.exp(c) * (c + 1) - c**2 * expi(c)) kd = -np.log(2 * I) / (CI * LAI) # [-] # plant-level fraction of absorbed PAR FPAR = (1 - ALB) * (1 - np.exp(-kd * CI * LAI)) # antecedent FPAR FPARLag = np.array( [np.nanmean(FPAR[max(0, i - npast):i + 1]) for i in range(FPAR.size)]) # intercellular CO2 concentration Ci = CO2Lag * chi * 1e-6 * PALag # [Pa] # plant-level averaged Vcmax (Eq (6)) m = (Ci - GammaS) / (Ci + 2 * GammaS) c = np.sqrt(1 - (0.41 / m)**(2. / 3)) c = np.real(c) m_ = m * c A = alf * 12 * PARLag * FPARLag * m_ # [gC m-2 d-1] V = A / ((Ci - GammaS) / (Ci + K)) / (60 * 60 * 24 * 1e-6 * 12) # [umol m-2 s-1] # top-leaf Vcmax (Eq (10)) fC = (1 - np.exp(-kn * CI)) / (kn * CI) Vtoc = V / fC # [umol m-2 s-1] # entropy factor of Vcmax (Eq (12)) dS = (668.39 + tgrowth * -1.07) / 1000 # temperature correction function (Eq (11)) fT = np.exp(72 * (TALag - 298.15) / (R * TALag * 298.15)) * (1 + np.exp( (dS * 298.15 - 200) / (R * 298.15))) / (1 + np.exp( (dS * TALag - 200) / (R * TALag))) fT[fT < 0.08] = 0.08 # top-leaf Vcmax at 25 °C (Eq (13)) Vtoc25 = Vtoc / fT # constrains Vtoc25[LAI0 <= 1e-5] = np.nanmin(Vtoc25) data = savgol_filter(Vtoc25, 31, 1) data[LAI0 <= 1e-5] = np.nan return data
def tau_logarithmic(logages, logt, tau=None, **extras): """SFR = e^{(tage-t)/\tau} """ tprime = 10**logt / tau return (logages - logt) * np.exp(tprime) + loge * expi(tprime)
def prob_fixation_select(s, p_iter, N, B=1): s_range = np.linspace(0, s, num=p_iter) g = np.vectorize(lambda s: np.log10( ((2*B *N) / (s * (np.exp(2*B*s) -1) ) ) \ * ((np.exp(2*B*s) + 1) * math.e - expi(2*B*s) + np.log(2*B*s) \ + np.exp(2*B*s) * (-expi(-2*B*s) + np.log(2*B*s)))) ) return g(s_range)
def p11(self, s, x): return s**(2.*self.rho) * np.exp(self.lambd * s ** (self.rho + 1) - 2.*self.lambd * (0.577215664901532 + np.log(s**(self.rho+1)/2.) - expi(-s**(self.rho+1)/2.)) -s**self.rho * x)
def prob_fixation_select(s, p_iter, N, B=1): s_range = np.linspace(0, s, num = p_iter) g = np.vectorize(lambda s: np.log10( ((2*B *N) / (s * (np.exp(2*B*s) -1) ) ) \ * ((np.exp(2*B*s) + 1) * math.e - expi(2*B*s) + np.log(2*B*s) \ + np.exp(2*B*s) * (-expi(-2*B*s) + np.log(2*B*s)))) ) return g(s_range)
def f(x, b): t = 27 return 1 / t * np.exp(-x / t) - b / (t * (b + x)) + b / t**2 * np.exp( -(x + b) / t) * (expi((x + b) / t) - expi(b / t))
def getLogUniformDecay(self, fieldType, times, chi0, dchi, tau1, tau2): """ Decay function for a step-off waveform for log-uniform distribution of time-relaxation constants. The output of this function is the magnetization at each time for each cell, normalized by the inducing field. REQUIRED ARGUMENTS: fieldType -- must be 'h', 'b', 'dhdt' or 'dbdt'. times -- Observation times chi0 -- DC (zero-frequency) magnetic susceptibility for all cells dchi -- DC (zero-frequency) magnetic susceptibility attributed to VRM for all cells tau1 -- Lower-bound for log-uniform distribution of time-relaxation constants for all cells tau2 -- Upper-bound for log-uniform distribution of time-relaxation constants for all cells OUTPUTS: eta -- characteristic decay function evaluated at all specified times. """ assert fieldType in [ "h", "dhdt", "b", "dbdt" ], "For step-off, fieldType must be one of 'h', dhdt', 'b' or 'dbdt' " nT = len(times) nC = len(dchi) t0 = self.t0 times = np.kron(np.ones((nC, 1)), times) chi0 = np.kron(np.reshape(chi0, newshape=(nC, 1)), np.ones((1, nT))) dchi = np.kron(np.reshape(dchi, newshape=(nC, 1)), np.ones((1, nT))) tau1 = np.kron(np.reshape(tau1, newshape=(nC, 1)), np.ones((1, nT))) tau2 = np.kron(np.reshape(tau2, newshape=(nC, 1)), np.ones((1, nT))) if fieldType is "h": eta = (0.5 * (1 - np.sign(times - t0)) * chi0 + 0.5 * (1 + np.sign(times - t0)) * (dchi / np.log(tau2 / tau1)) * (spec.expi(-(times - t0) / tau2) - spec.expi(-(times - t0) / tau1))) elif fieldType is "b": mu0 = 4 * np.pi * 1e-7 eta = (0.5 * (1 - np.sign(times - t0)) * chi0 + 0.5 * (1 + np.sign(times - t0)) * (dchi / np.log(tau2 / tau1)) * (spec.expi(-(times - t0) / tau2) - spec.expi(-(times - t0) / tau1))) eta = mu0 * eta elif fieldType is "dhdt": eta = ( 0. + 0.5 * (1 + np.sign(times - t0)) * (dchi / np.log(tau2 / tau1)) * (np.exp(-(times - t0) / tau1) - np.exp(-(times - t0) / tau2)) / (times - t0)) elif fieldType is "dbdt": mu0 = 4 * np.pi * 1e-7 eta = ( 0. + 0.5 * (1 + np.sign(times - t0)) * (dchi / np.log(tau2 / tau1)) * (np.exp(-(times - t0) / tau1) - np.exp(-(times - t0) / tau2)) / (times - t0)) eta = mu0 * eta return eta
def analytic_solution_plane(tau): return 0.5 / tau * (1. + np.exp(-tau) * (tau - 1. + tau**2 * np.exp(tau) * sp.expi(-tau)))
def Phi(x): return self.lambd * (0.577215664901532 + np.log(x) - expi(-x))
def analytical_derivative(x): dIUG = np.log(x) * IUG(1.0, x) - expi(-x) return dIUG
def integrant(t): return t**(2.*self.rho) * np.exp(self.lambd * t ** (self.rho + 1) -2.*self.lambd * (0.577215664901532 + np.log(t**(self.rho+1)/2.) - expi(-t**(self.rho+1)/2.)) -t**self.rho * (x + t/2.)) /t
def p11(self, s, x): ''' density of number of fragments of length L > s ''' return s ** (2.*self.rho) * np.exp(self.lambd * s ** (self.rho + 1) - 2.*self.lambd * (0.577215664901532 + np.log(s ** (self.rho + 1) / 2.) - expi(-s ** (self.rho + 1) / 2.)) - s ** self.rho * x)
fe = File("pressure_exact.pvd") # Analytic solution p_e = Function(P) n = P.dim() d = mesh.geometry().dim() p_e_values = np.zeros(n) dof_coords = P.dofmap().tabulate_all_coordinates(mesh) dof_coords.resize((n,d)) dof_x = dof_coords[:,0] dof_y = dof_coords[:,1] while(t < T): # Update analytic solution p_e_values = -(1./(4.*np.pi))*expi(-c*(dof_x**2 + dof_y**2)/(4.*t)) p_e.vector()[:] = p_e_values # Define stress tensor epsilon = sym(grad(u)) epsilond = epsilon - tr(epsilon)/3.*Identity(2) sigma = 2.*G*epsilond + K*tr(epsilon)*Identity(2) epsilon_t = sym(grad(u_t)) # Define facet normal and radius n = FacetNormal(mesh) r = Expression("pow(x[0]*x[0] + x[1]*x[1], 0.5)") # Weak form F = inner(sym(grad(u_t)), sigma)*dx - B*div(u_t)*p*dx + inner(u_t, n)*p*ds(3) + inner(u_t, n)*p*ds(4) F += p_t*B*div(u - u_prev)*dx + p_t*(p - p_prev)*dx + dt*inner(grad(p_t), grad(p))*dx - dt*p_t/2./3.1415926538/r*ds(4)
def test_continuity_on_positive_real_axis(self): assert_allclose(sc.expi(complex(1, 0)), sc.expi(complex(1, -0.0)), atol=0, rtol=1e-15)
def kernel( jv , X=None , Y=None , m=0 , n=0 ): # computes \partial_x^m \partial_y^n G_\delta( Z_i - z_j) # output should be of shape [size(X),size(jv.x)] if X is None: X = jv.x if Y is None: Y = jv.y dx = jv.dx(X) dy = jv.dy(Y) r2 = dx**2 + dy**2 r = np.sqrt(r2) rho = r2 / (delta**2) store = np.zeros( [np.size(X), jv.N] ) near = (r < delta/100.) far = (r >= delta/100.) r_far = r*far+2.0*near r2_far = r_far**2 dx_far = dx + near*1.0 dx_near = near*dx dy_far = dy + near*1.0 dy_near = near*dy if (m==0) & (n==0): t0 = time.time() kernel_far = (expi( -r2_far/((delta)**2) ) \ - 2.0*np.log(r_far))/(4.0*np.pi) factorial_k = 1 for k in range(1,6): factorial_k = k*factorial_k store = store + ((-1)**k)*(rho**k) / (k*factorial_k) euler_mascheroni = 0.5772156649 kernel_near = (1./(4*np.pi))*( euler_mascheroni \ - 2.*np.log(delta) \ + store) if (m==1) & (n==0): kernel_far = DxG(dx_far,dy_far) kernel_near = DxG_near(dx_near,dy_near) if (m==0) & (n==1): kernel_far = DyG(dx_far,dy_far) kernel_near = DyG_near(dx_near,dy_near) if (m==2) & (n==0): kernel_far = DxxG(dx_far,dy_far) kernel_near = DxxG_near(dx_near,dy_near) if (m==1) & (n==1): kernel_far = DxyG(dx_far,dy_far) kernel_near = DxyG_near(dx_near,dy_near) if (m==0) & (n==2): kernel_far = DyyG(dx_far,dy_far) kernel_near = DyyG_near(dx_near,dy_near) if (m==3) & (n==0): kernel_far = DxxxG(dx_far,dy_far) kernel_near = DxxxG_near(dx_near,dy_near) if (m==2) & (n==1): kernel_far = DxxyG(dx_far,dy_far) kernel_near = DxxyG_near(dx_near,dy_near) if (m==1) & (n==2): kernel_far = DxyyG(dx_far,dy_far) kernel_near = DxyyG_near(dx_near,dy_near) if (m==0) & (n==3): kernel_far = DyyyG(dx_far,dy_far) kernel_near = DyyyG_near(dx_near,dy_near) if (m==4) & (n==0): kernel_far = DxxxxG(dx_far,dy_far) kernel_near = DxxxxG_near(dx_near,dy_near) if (m==3) & (n==1): kernel_far = DxxxyG(dx_far,dy_far) kernel_near = DxxxyG_near(dx_near,dy_near) if (m==2) & (n==2): kernel_far = DxxyyG(dx_far,dy_far) kernel_near = DxxyyG_near(dx_near,dy_near) if (m==1) & (n==3): kernel_far = DxyyyG(dx_far,dy_far) kernel_near = DxyyyG_near(dx_near,dy_near) if (m==0) & (n==4): kernel_far = DyyyyG(dx_far,dy_far) kernel_near = DyyyyG_near(dx_near,dy_near) if (m==5) & (n==0): kernel_far = DxxxxxG(dx_far,dy_far) kernel_near = DxxxxxG_near(dx_near,dy_near) if (m==4) & (n==1): kernel_far = DxxxxyG(dx_far,dy_far) kernel_near = DxxxxyG_near(dx_near,dy_near) if (m==3) & (n==2): kernel_far = DxxxyyG(dx_far,dy_far) kernel_near = DxxxyyG_near(dx_near,dy_near) if (m==2) & (n==3): kernel_far = DxxyyyG(dx_far,dy_far) kernel_near = DxxyyyG_near(dx_near,dy_near) if (m==1) & (n==4): kernel_far = DxyyyyG(dx_far,dy_far) kernel_near = DxyyyyG_near(dx_near,dy_near) if (m==0) & (n==5): kernel_far = DyyyyyG(dx_far,dy_far) kernel_near = DyyyyyG_near(dx_near,dy_near) return far*kernel_far + near*kernel_near
def termino3(wc, t, n, m, k, g, nu1, nu2): u, v = nu1.real, nu1.imag x, y = nu2.real, nu2.imag ret = (1j/4)*(((np.cos((2*n+u+1j*v)*t)+1j*np.sin((2*n+u+1j*v)*t))*(g-(2*1j)*(2*m+u+1j*v))*(np.sin((2*k+x+1j*y)*t)*(g-(2*1j)*m-1j*u+v)-np.cos((2*k+x+1j*y)*t)*(2*k+x+1j*y))*(expi(-(t*(g-(2*1j)*(2*m+u+1j*v)))/2)-expi(-(t*(g-(2*1j)*(2*m+u+1j*v-wc)))/2))*np.exp((t*(g-(2*1j)*(2*m+u+1j*v)))/2))/(g**2+4*k**2-4*m**2-4*m*u-u**2-(4*1j)*m*v-(2*1j)*u*v+v**2+2*g*((-2*1j)*m-1j*u+v)+x**2+4*k*(x+1j*y)+(2*1j)*x*y-y**2)-((np.cos((2*n+u+1j*v)*t)-1j*np.sin((2*n+u+1j*v)*t))*(g+(2*1j)*(2*m+u+1j*v))*(np.sin((2*k+x+1j*y)*t)*(g+1j*(2*m+u+1j*v))-np.cos((2*k+x+1j*y)*t)*(2*k+x+1j*y))*(expi(-(t*(g+(2*1j)*(2*m+u+1j*v)))/2)-expi(-(t*(g+(2*1j)*(2*m+u+1j*v+wc)))/2))*np.exp((t*(g+(2*1j)*(2*m+u+1j*v)))/2))/(g**2+4*k**2-4*m**2-4*m*u-u**2+(2*1j)*g*(2*m+u+1j*v)-(4*1j)*m*v-(2*1j)*u*v+v**2+x**2+4*k*(x+1j*y)+(2*1j)*x*y-y**2)-((np.cos((2*k+x+1j*y)*t)-1j*np.sin((2*k+x+1j*y)*t))*(g-(2*1j)*(2*k+x+1j*y))*(np.cos((2*n+u+1j*v)*t)*(2*m+u+1j*v)+np.sin((2*n+u+1j*v)*t)*(g-(2*1j)*k-1j*x+y))*(expi((t*(g-(2*1j)*(2*k+x+1j*y)))/2)-expi((t*(g-(2*1j)*(2*k+wc+x+1j*y)))/2))*np.exp(-(t*(g-(2*1j)*(2*k+x+1j*y)))/2))/(g**2-4*k**2+4*m**2+4*m*u+u**2+(4*1j)*m*v+(2*1j)*u*v-v**2-x**2-4*k*(x+1j*y)-(2*1j)*x*y+y**2+2*g*((-2*1j)*k-1j*x+y))+((np.cos((2*k+x+1j*y)*t)+1j*np.sin((2*k+x+1j*y)*t))*(np.cos((2*n+u+1j*v)*t)*(2*m+u+1j*v)+np.sin((2*n+u+1j*v)*t)*(g+1j*(2*k+x+1j*y)))*(g+(2*1j)*(2*k+x+1j*y))*(expi((t*(g+(2*1j)*(2*k+x+1j*y)))/2)-expi((t*(g+(2*1j)*(2*k-wc+x+1j*y)))/2))*np.exp(-(t*(g+(2*1j)*(2*k+x+1j*y)))/2))/(g**2-4*k**2+4*m**2+4*m*u+u**2+(4*1j)*m*v+(2*1j)*u*v-v**2-x**2-4*k*(x+1j*y)+(2*1j)*g*(2*k+x+1j*y)-(2*1j)*x*y+y**2)) return ret
def test_expi_complex128(self): z = np.asarray(np.random.rand(4, 4) + 1j*np.random.rand(4, 4), np.complex128) z_gpu = gpuarray.to_gpu(z) e_gpu = special.expi(z_gpu) assert np.allclose(sp.special.expi(z), e_gpu.get())
import numpy as np from scipy.special import expi x = np.linspace(-50,50.,10000); ans = expi(x) res = np.fromfile('res.dat') for i,j,k in zip(x,ans,res): print(i,j,k) if np.all(np.isclose(res,ans)): print("Pass") else: print("Failed") import matplotlib.pyplot as plt fig,ax=plt.subplots() ax.plot(x, abs((res-ans)/ans),'x') ax.set_xlabel('$x$') ax.set_ylabel('Relative Error') ax.minorticks_on() ax.set_yscale('log') fig.tight_layout() fig.savefig('result.png')