def calc_ff(self,Q_VAL): for q in Q_VAL: muz = np.cos(self.theta) mul = np.cos(self.phi) fz = integrate.quad(lambda x: cos(q*muz*x)*self.rho(x), -self.L/2, self.L/2) fzi = integrate.quad(lambda x: sin(q*muz*x)*rho_L(x),-self.L/2, self.L/2) fl = integrate.dblquad(lambda x,y: x*cos(q*(1-mul**2)**.5*cos(y)*x)*self.rho_theta(y)*self.rho_R(R), 0, 2*3.14, 0, self.R) fli = integrate.dblquad(lambda x,y: x*sin(q*(1-mul**2)**.5*cos(y)*x), 0,2*3.14, 0, self.R) pq = (fl[0]-1j*fli[0])*(fz[0]-1j*flz[0])*(1/(self.L*3.14*self.R**2)) yield pq
def funcbG(E,verbose,prereqs): """ functional form of mathcalG relies on Ginterior returns mathcalG(E) """ model,psigood,ggood = prereqs tolerance = 1.49e-8 try: t = E.shape Gans = [] problems = [] for i in range(len(E)): print i+1, 'of', len(E) rapoval = rapo(E[i],psigood) try: temp = intg.dblquad(bGinterior,0,rapoval,lambda r: 1e-4, lambda r: 1,args = (E[i],prereqs),epsabs = tolerance,epsrel = tolerance) except UserWarning as e: if verbose == True: print 'G, E = ', E[i], 'message = ', e problems.append(i) Gans.append(temp[0]) return array(Gans),problems except AttributeError: rapoval = rapo(E,psigood) problem = [] try: temp = intg.dblquad(bGinterior,0,rapoval,lambda r: 0, lambda r: 1,args = (E,prereqs)) except UserWarning as e: if verbose == True: print 'G, E = ', E, 'message = ', temp[3] problem = [E] return temp[0],problem
def funcbG(E,verbose = False): tolerance = 1.49e-8 try: t = E.shape Gans = [] problems = [] for i in range(len(E)): print i+1, 'of', len(E) rapoval = rapo(E[i]) try: temp = intg.dblquad(bGinterior,0,rapoval,lambda r: 1e-4, lambda r: 1,args = (E[i],),epsabs = tolerance,epsrel = tolerance) except UserWarning as e: if verbose == True: print 'G, E = ', E[i], 'message = ', e problems.append(i) Gans.append(temp[0]) return array(Gans),problems except AttributeError: rapoval = rapo(E) problem = [] try: temp = intg.dblquad(bGinterior,0,rapoval,lambda r: 0, lambda r: 1,args = (E,verbose)) except UserWarning as e: if verbose == True: print 'G, E = ', E, 'message = ', temp[3] problem = [E] return temp[0],problem
def test2(): D = [[0,2], [2,4], [4,6], [6,8]] f = lambda x,y: np.exp(-x)*np.exp(-y)*(y-x) I1 = dblquad(f, 0, 8, lambda l:0, lambda l:l)[0] I3 = 0 for i in np.arange(1,4): for j in np.arange(0,i): I3 += dblquad(f, D[i][0], D[i][1], lambda l:D[j][0], lambda l:D[j][1])[0] for i in range(4): I3 += dblquad(f, D[i][0], D[i][1], lambda l:D[i][0], lambda l:l)[0] I2 = 0 for i in np.arange(1,4): for j in np.arange(0,i): tmp1 = gp.gintegral_seg(1,1,D[i][0],D[i][1])*gp.gintegral_seg(0,1,D[j][0],D[j][1]) tmp2 = gp.gintegral_seg(0,1,D[i][0],D[i][1])*gp.gintegral_seg(1,1,D[j][0],D[j][1]) I2 += tmp1-tmp2 for i in range(4): I2 += dblquad(f, D[i][0], D[i][1], lambda l:D[i][0], lambda l:l)[0] print I1 print I2 print I3 return
def over1(i,j,B,B_func,w_vec): """ Calculates the first overlap integral. If it is found that it is with itself then the inverse effective area is returned otherwise the integrals are calculated. For the mode calculations the hermit-gaussian approximation is taken. Also the calculation is done in terms of microns^2 and is transformed in to m^2 in calc_overlaps Inputs:: i,j (int,int): Integer on what whave the overlap is calculated for B(str vec shape[4]): Holding the mode for each wave. (lp01 or lp11) B_func( function vec shape[4]) : Points to what function is used to calculate each mode(field0 or field1) w_vec(float vec shape[2]) : The width of the lp01 or the lp11 modes. (calculated in other script) Local:: fieldi,fieldj (function): Holds the ith and jth wave mode function calculator r(float): The radius of the fibre (there is no need to calculate infinities as the definition might give you) int1,int2,int3,top bottom (float vectors [4,4]): Integrals (look at Agrawal for the integrals themselves) Returns:: The first overlap integrals """ if i == j: if B[i] == 'LP01': return 1/161 elif B[i] == 'LP11': return 1/170 r = 62.45 fieldi = B_func[i] fieldj = B_func[j] int1 = lambda y,x : np.abs(fieldi(y,x,w_vec))**2 * np.abs(fieldj(y,x,w_vec))**2 top = dblquad(int1,-r,r,lambda x : -r,lambda x: r)[0] int2 = lambda y,x : np.abs(fieldi(y,x,w_vec))**2 int3 = lambda y,x : np.abs(fieldj(y,x,w_vec))**2 bottom = dblquad(int2,-r,r,lambda x : -r,lambda x: r)[0]*\ dblquad(int3,-r,r,lambda x : -r,lambda x: r)[0] return top/bottom
def bunchlength(bunch, cavity, sigma_dz): print 'Iterative evaluation of bunch length...' counter = 0 eps = 1 R = cavity.circumference / (2 * np.pi) eta = cavity.eta(bunch) Qs = cavity.Qs(bunch) zmax = np.pi * R / cavity.h Hmax = cavity.hamiltonian(zmax, 0, bunch) # Initial values z0 = sigma_dz p0 = z0 * Qs / eta / R #Matching condition H0 = eta * bunch.beta * c * p0 ** 2 z1 = z0 while abs(eps)>1e-6: # cf1 = 2 * Qs ** 2 / (eta * h) ** 2 # dplim = lambda dz: np.sqrt(cf1 * (1 + np.cos(h / R * dz) + (h / R * dz - np.pi) * np.sin(cavity.phi_s))) # dplim = lambda dz: np.sqrt(2) * Qs / (eta * h) * np.sqrt(np.cos(h / R * dz) - np.cos(h / R * zmax)) # Stationary distribution # psi = lambda dz, dp: np.exp(cavity.hamiltonian(dz, dp, bunch) / H0) - np.exp(Hmax / H0) # zs = zmax / 2. psi = stationary_exponential(cavity.hamiltonian, Hmax, H0, bunch) dplim = cavity.separatrix.__get__(cavity) N = dblquad(lambda dp, dz: psi(dz, dp), -zmax, zmax, lambda dz: -dplim(dz, bunch), lambda dz: dplim(dz, bunch)) I = dblquad(lambda dp, dz: dz ** 2 * psi(dz, dp), -zmax, zmax, lambda dz: -dplim(dz, bunch), lambda dz: dplim(dz, bunch)) # Second moment z2 = np.sqrt(I[0] / N[0]) eps = z2 - z0 # print z1, z2, eps z1 -= eps p0 = z1 * Qs / eta / R H0 = eta * bunch.beta * c * p0 ** 2 counter += 1 if counter > 100: print "\n*** WARNING: too many interation steps! There are several possible reasons for that:" print "1. Is the Hamiltonian correct?" print "2. Is the stationary distribution function convex around zero?" print "3. Is the bunch too long to fit into the bucket?" print "4. Is this algorithm not qualified?" print "Aborting..." sys.exit(-1) return z1
def CavityIntegral(a,b,t): # Integrate over the top and bottom half of the hexagonal chunk we are # trying to remove. TopIntegral = dblquad(integrand, b, b + (np.sqrt(3.0)/2.0)*t, lambda y: y/np.sqrt(3.0) - b/np.sqrt(3.0) + a - t, lambda y: -y/np.sqrt(3.0) + b/np.sqrt(3.0) + a +t) BottomIntegral = dblquad(integrand, b - (np.sqrt(3.0)/2.0)*t, b, lambda y: -y/np.sqrt(3.0) + b/np.sqrt(3.0) + a - t, lambda y: y/np.sqrt(3.0) - b/np.sqrt(3.0) + a +t) return TopIntegral[0] + BottomIntegral[0]
def effective_area(self,lim): """ Computes the effective area of mode """ integrand1 = dblquad(self.Eabs2, -lim, lim, lambda x: -lim,lambda x: lim) integrand2 = dblquad(lambda y,x: self.Eabs2(y,x)**2, -lim, lim, lambda x: -lim,lambda x: lim) self.Aeff = integrand1[0]**2/integrand2[0] return None
def H(self): """Calculates the integrated beam just over the horizon (pi/2 - 10*pi/180 <theta<pi/2). Assumes Peak power is directly overhead""" [O1,err1] = integrate.dblquad(self.beam_pattern_integrand,0,2*np.pi,self.H_gfun,self.H_hfun,([0,0,1],0)) [O2,err2] = integrate.dblquad(self.beam_pattern_integrand,0,2*np.pi,self.H_gfun,self.H_hfun,([0,1,0],0)) Peak_power = self.beam_pattern(0,0,[0,0,1]) + self.beam_pattern(0,0,[0,1,0]) return (O1+O2)/Peak_power
def integrate_circle(func, R, args = None): if args is None: result, _ = dblquad(func, -R, R, lambda x: -np.sqrt(R**2 - x**2), lambda x: np.sqrt(R**2 - x**2)) else: result, _ = dblquad(func, -R, R, lambda x: -np.sqrt(R**2 - x**2), lambda x: np.sqrt(R**2 - x**2), args = args) return result
def prob_decay_KsKs(t1a,t1e,t2a,t2e): def f(t1,t2): return exp(-Gamma_S*t1-Gamma_S*t2) Int = dblquad(f, 0, np.inf, # limits t2 lambda x : 0, # limits t1 lambda x: np.inf)[0] Part = dblquad(f, t1a, t1e, # limits t2 lambda x : t2a, # limits t1 lambda x: t2e)[0] return (BrKs)**2*Part/Int
def Omega(self): """Calculates the integrated beam (0<theta<pi/2). Assumes Peak power is directly overhead""" [O1,err1] = integrate.dblquad(self.beam_pattern_integrand,0,2*np.pi,self.Omega_gfun,self.Omega_hfun,([0,0,1],0)) [O2,err2] = integrate.dblquad(self.beam_pattern_integrand,0,2*np.pi,self.Omega_gfun,self.Omega_hfun,([0,1,0],0)) Peak_power = self.beam_pattern(0,0,[0,0,1]) + self.beam_pattern(0,0,[0,1,0]) # print O1,err1 # print O2,err2 # print Peak_power return (O1+O2)/Peak_power
def integrate(self, Q): rDispersion = RealDispersion(self.distribution, self.detuning, Q) iDispersion = ImaginaryDispersion(self.distribution, self.detuning, Q) realPart, realErr = dblquad(rDispersion, self.minJx, self.maxJx, self.minJy, self.maxJy) imagPart, imagErr = dblquad(iDispersion, self.minJx, self.maxJx, self.minJy, self.maxJy) return -1.0/complex(realPart, imagPart)
def calc_theta_norm_2(theta_min, theta_max, dist_min, dist_max, dist_KDE_prior, region, alpha_1, alpha_2, s_crit): # args = dist_min, dist_max, dist_KDE_prior, region, alpha_1, alpha_2, s_crit args = dist_KDE_prior, region, alpha_1, alpha_2, s_crit if region == 1: norm = dblquad(calc_integrand, theta_min, theta_max, d_min_region_1, d_max_region_1, args=args) else: norm = dblquad(calc_integrand, theta_min, theta_max, d_min_region_2, d_max_region_2, args=args) # norm = quad(calc_integral, theta_min, theta_max, args=args) return norm[0]
def effective(omega,xmin,xmax,ymin,ymax,l,m): omega1,omega2 = omega n =2 a = dblquad(psi,xmin,xmax,lambda x : ymin,lambda x: ymax,args = (omega1,0,0,n))[0] n = 4 eff1 = a**2 / dblquad(psi,x.min(),x.max(),lambda x : ymin,lambda x: ymax,args = (omega1,0,0,n))[0] n =2 a = dblquad(psi,xmin,xmax,lambda x : ymin,lambda x: ymax,args = (omega2,0,1,n))[0] n = 4 eff2 = a**2 / dblquad(psi,x.min(),x.max(),lambda x : ymin,lambda x: ymax,args = (omega2,0,1,n))[0] return (eff1 - 1.61e-10,eff2 - 1.70e-10)
def checkdf(df,*args,**keywords): plt.clf() x = np.linspace(0,2,200) y = np.linspace(0,2,200) X,Y = np.meshgrid(x,y) plt.subplot(aspect='equal') plt.imshow(df(X,Y, (keywords)), origin='lower', extent=[0,2,0,2]) plt.plot(x, np.sqrt(2-x**2),c='k', linewidth=3) plt.colorbar() if df==vel_func_benson: print 'integral in bound region', dblquad(df, 0.,np.sqrt(2), lambda x:0, lambda x:np.sqrt(2.-x**2), args = (keywords['redshift'],)) elif df==vel_func_gaussian: print 'integral in bound region', dblquad(df, 0.,np.sqrt(2), lambda x:0, lambda x:np.sqrt(2.-x**2), args = (keywords,)) else: print 'integral in bound region', dblquad(df, 0.,np.sqrt(2), lambda x:0, lambda x:np.sqrt(2.-x**2))
def prob_decay_signal(t1a,t1e,t2a,t2e): Constant = BrKl*BrKs#propPiPiKs**2*eta**2 def intensity(t1,t2): Part1 = exp(-Gamma_L*t1-Gamma_S*t2) Part2 = exp(-Gamma_L*t2-Gamma_S*t1) Part3 = -2*(1-zeta)*exp(-.5*(Gamma_L+Gamma_S)*(t1+t2))*cos(Delta_m*(t1-t2)) return Part1+Part2+Part3 Int = dblquad(intensity, 0, np.inf, # limits t2 lambda x : 0, # limits t1 lambda x: np.inf)[0] Part = dblquad(intensity, t1a, t1e, # limits t2 lambda x : t2a, # limits t1 lambda x: t2e)[0] return Constant*Part/Int
def sca_xsect(scatterer, h_pol=True): """Scattering cross section for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The scattering cross section. """ if scatterer.psd_integrator is not None: return scatterer.psd_integrator.get_angular_integrated( scatterer.psd, scatterer.get_geometry(), "sca_xsect") old_geom = scatterer.get_geometry() def d_xsect(thet, phi): (scatterer.phi, scatterer.thet) = (phi*rad_to_deg, thet*rad_to_deg) Z = scatterer.get_Z() I = sca_intensity(scatterer, h_pol) return I * np.sin(thet) try: xsect = dblquad(d_xsect, 0.0, 2*np.pi, lambda x: 0.0, lambda x: np.pi)[0] finally: scatterer.set_geometry(old_geom) return xsect
def asym(scatterer, h_pol=True): """Asymmetry parameter for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The asymmetry parameter. """ if scatterer.psd_integrator is not None: return scatterer.psd_integrator.get_angular_integrated( scatterer.psd, scatterer.get_geometry(), "asym") old_geom = scatterer.get_geometry() cos_t0 = np.cos(scatterer.thet0 * deg_to_rad) sin_t0 = np.sin(scatterer.thet0 * deg_to_rad) p0 = scatterer.phi0 * deg_to_rad def integrand(thet, phi): (scatterer.phi, scatterer.thet) = (phi*rad_to_deg, thet*rad_to_deg) cos_T_sin_t = 0.5 * (np.sin(2*thet)*cos_t0 + \ (1-np.cos(2*thet))*sin_t0*np.cos(p0-phi)) I = sca_intensity(scatterer, h_pol) return I * cos_T_sin_t try: cos_int = dblquad(integrand, 0.0, 2*np.pi, lambda x: 0.0, lambda x: np.pi)[0] finally: scatterer.set_geometry(old_geom) return cos_int/sca_xsect(scatterer, h_pol)
def water_area(d): waterline1 = fsolve(lambda x: hull_xz(x) - waterline_xz(x, d), -1)[0] waterline2 = fsolve(lambda x: hull_xz(x) - waterline_xz(x, d), 1)[0] if waterline2 <= 1 and waterline1 != waterline2: integral = integrate.dblquad(lambda x, y: 1, waterline1, waterline2, hull_xz, lambda x: waterline_xz(x, d))[0] return integral else: waterline3 = fsolve(lambda x: deck(x) - waterline_xz(x, d), 0)[0] if not -1 <= waterline1 <= 1: waterline1 = fsolve(lambda x: hull_xz(x) - waterline_xz(x, d), 0)[0] print 'waterline1: ' + str(waterline1) print 'waterline3: ' + str(waterline3) integral1 = integrate.dblquad(lambda x, y: 1, waterline1, waterline3, hull_xz, lambda x: waterline_xz(x, d))[0] integral2 = integrate.dblquad(lambda x, y: 1, waterline3, 1, hull_xz, deck)[0] print integral1, integral2 return integral1 + integral2
def m_projection(graph_orig, members, prods, full_graph): logging.info('Projecting the graph on members') graph = graph_orig.subgraph(graph_orig.nodes()) #considering only favorable edges graph.remove_edges_from([e for e in graph.edges(data=True) if e[2]['starRating'] < 4]) assert set(graph) == (set(members) | set(prods)) mg = nx.Graph() mg.add_nodes_from(members) prods_len = float(len(prods)) last_pctg = 0 prod_names = dict() for p_i, p in enumerate(prods): # first check whether two favorable reviews within a WINDOW is significant enough (p-value < 0.5) ts = [e['date'] for e in full_graph[p].values()] # In order for gkde to work, there should be more than one point value if len(ts) >= MIN_TS_LEN and min(ts) < max(ts): gkde = gaussian_kde(ts) p_value, err = dblquad(lambda u, v: gkde(u)*gkde(v), min(ts) - WINDOW/2.0, max(ts) + WINDOW/2.0, lambda v: v - WINDOW/2.0, lambda v: v + WINDOW/2.0) if p_value - EPS >= SIGNF_LEVEL and err < EPS: continue for m1, m2 in itertools.combinations(nx.neighbors(graph, p), 2): # order m1,m2 so the key (m1,m2) for prod_names works regardless of edge direction if m1 > m2: m1, m2 = m2, m1 #assert m1 in members and m2 in members if abs(graph[p][m1]['date'] - graph[p][m2]['date']) < WINDOW: if mg.has_edge(m1, m2): c = mg[m1][m2]['weight'] else: c = 0 prod_names[(m1, m2)] = [] prod_names[(m1, m2)].append(p) mg.add_edge(m1, m2, weight=c + 1) pctg = int(p_i/prods_len*100) if pctg % 10 == 0 and pctg > last_pctg: last_pctg = pctg logging.info('%d%% Done' % pctg) logging.debug('Normalizing edge weights: meet/max') for e in mg.edges(): u, v = e if mg[u][v]['weight'] <= 1: mg.remove_edge(u, v) del prod_names[(min(u, v), max(u, v))] else: norm = max(len(nx.neighbors(graph, u)), len(nx.neighbors(graph, v))) mg.add_edge(u, v, weight=float(mg[u][v]['weight']) / norm, denom=norm) # remove isolated nodes degrees = mg.degree() mg.remove_nodes_from([n for n in mg if degrees[n] == 0]) # adding original graph metadata on nodes for m in mg: mg.node[m] = graph_orig.node[m] logging.debug(r'Projected Nodes = %d, Projected Edges = %d' % (mg.order(), len(mg.edges()))) return mg, prod_names
def _scipy_integrate(f,lb,ub): """ Returns the integral of an n-dimensional function f from lb to ub (where n = 1, 2, or 3), using scipy.integrate Inputs: f -- a function that takes a list and returns a number. lb -- a list of lower bounds ub -- a list of upper bounds """ from scipy.integrate import quad, dblquad, tplquad if len(lb) == 3: def func(z,y,x): return f([x,y,z]) def qf(x,y): return lb[2] def rf(x,y): return ub[2] def gf(x): return lb[1] def hf(x): return ub[1] expectation,confidence = tplquad(func,lb[0],ub[0],gf,hf,qf,rf) return expectation if len(lb) == 2: def func(y,x): return f([x,y]) def gf(x): return lb[1] def hf(x): return ub[1] expectation,confidence = dblquad(func,lb[0],ub[0],gf,hf) return expectation if len(lb) == 1: expectation,confidence = quad(f,lb[0],ub[0]) return expectation
def __init__(self, psfmodel, renormalize_psf=True, flux=flux.default, x_0=x_0.default, y_0=y_0.default, xname=None, yname=None, fluxname=None, **kwargs): self.psfmodel = psfmodel.copy() if renormalize_psf: from scipy.integrate import dblquad self._psf_scale_factor = 1. / dblquad(self.psfmodel, -np.inf, np.inf, lambda x: -np.inf, lambda x: np.inf)[0] else: self._psf_scale_factor = 1 self.xname = xname self.yname = yname self.fluxname = fluxname # these can be used to adjust the integration behavior. Might be # used in the future to expose how the integration happens self._dblquadkwargs = {} super(PRFAdapter, self).__init__(n_models=1, x_0=x_0, y_0=y_0, flux=flux, **kwargs)
def pcs_numeric_quad_int_iso_cone_torsionless(theta_max=None, c=None, r_pivot_atom=None, r_ln_pivot=None, A=None, R_eigen=None, RT_eigen=None, Ri_prime=None): """Determine the averaged PCS value via numerical integration. @keyword theta_max: The half cone angle. @type theta_max: float @keyword c: The PCS constant (without the interatomic distance and in Angstrom units). @type c: float @keyword r_pivot_atom: The pivot point to atom vector. @type r_pivot_atom: numpy rank-1, 3D array @keyword r_ln_pivot: The lanthanide position to pivot point vector. @type r_ln_pivot: numpy rank-1, 3D array @keyword A: The full alignment tensor of the non-moving domain. @type A: numpy rank-2, 3D array @keyword R_eigen: The eigenframe rotation matrix. @type R_eigen: numpy rank-2, 3D array @keyword RT_eigen: The transpose of the eigenframe rotation matrix (for faster calculations). @type RT_eigen: numpy rank-2, 3D array @keyword Ri_prime: The empty rotation matrix for the in-frame isotropic cone motion, used to calculate the PCS for each state i in the numerical integration. @type Ri_prime: numpy rank-2, 3D array @return: The averaged PCS value. @rtype: float """ # Perform numerical integration. result = dblquad(pcs_pivot_motion_torsionless_quad_int, -pi, pi, lambda phi: 0.0, lambda phi: theta_max, args=(r_pivot_atom, r_ln_pivot, A, R_eigen, RT_eigen, Ri_prime)) # The surface area normalisation factor. SA = 2.0 * pi * (1.0 - cos(theta_max)) # Return the value. return c * result[0] / SA
def expect(self, func=lambda x: 1, lower=(-10,-10), upper=(10,10)): def fun(x, y): x = np.column_stack((x,y)) return func(x) * self.pdf(x) from scipy.integrate import dblquad return dblquad(fun, lower[0], upper[0], lambda y: lower[1], lambda y: upper[1])
def integrate_nd(f, domain, shape, dtype): if shape == () or shape == (1,): if dtype in continuous_types: return integrate.quad(f, domain.lower, domain.upper, epsabs=1e-8)[0] else: return np.sum(list(map(f, np.arange(domain.lower, domain.upper + 1)))) elif shape == (2,): def f2(a, b): return f([a, b]) return integrate.dblquad(f2, domain.lower[0], domain.upper[0], lambda a: domain.lower[1], lambda a: domain.upper[1])[0] elif shape == (3,): def f3(a, b, c): return f([a, b, c]) return integrate.tplquad(f3, domain.lower[0], domain.upper[0], lambda a: domain.lower[1], lambda a: domain.upper[1], lambda a, b: domain.lower[2], lambda a, b: domain.upper[2])[0] else: raise ValueError("Dont know how to integrate shape: " + str(shape))
def test(): fr = 0.5 rhor = 400 qitot = 1e-3 nitot = 100000 qifrac = qitot/nitot s = p3lookup(rhor, fr, qifrac, 0, 0.01, 'p3lookup_new.npy') i1 = gp.gintegral_seg(s.mu, s.l, s.dgr, s.dcr) i2, err = quad(lambda x: x**s.mu*np.exp(-s.l*x), s.dgr, s.dcr) print '---------SINGLE INTEGRATION---------' print 'result from gamma function:', i1 print 'result from numerical int:', i2 print 'difference i2-i1:', i2-i1 cr1 = collision_rate_pyt(s.dgr, s.dcr, s.rhog, fr, s.mu, s.l) cr2 = collision_rate(s.dgr, s.dcr, s.rhog, fr, s.mu, s.l) print '---------COLLISION RATE---------' print 'result from gamma function:', cr1 print 'result from numerical int:', cr2 print 'difference i2-i1:', cr2-cr1 mu2 = 2 di1 = gp.gintegral_seg(s.mu, s.l, s.dgr, s.dcr)*gp.gintegral_seg(s.mu+1, s.l, s.dgr, s.dcr) di2, err = dblquad(lambda x,y: x**s.mu*np.exp(-s.l*x)*y**(s.mu+1)*np.exp(-s.l*y), s.dgr, s.dcr, lambda m:s.dgr, lambda m: s.dcr) print '---------DOUBLE INTEGRAL---------' print 'result from gamma function:', di1 print 'result from numerical int:', di2 print 'difference i2-i1:', di2-di1 return
def prodRate(mDarkPhoton, epsilon, tmin = -0.5 * math.pi, tmax = 0.5 * math.pi): """ dNdPdTheta integrated over p and theta """ integral = dblquad( dNdPdTheta, # integrand tmin, tmax, # theta boundaries (2nd argument of integrand) lambda x: pMin(mDarkPhoton), lambda x: pMax(mDarkPhoton), # p boundaries (1st argument of integrand) args=(mDarkPhoton, epsilon) ) # extra parameters to pass to integrand return integral[0]
def test_double_integral2(self): def func(x0, x1, t0, t1): return x0 + x1 + t0 + t1 g = lambda x: x h = lambda x: 2 * x args = 1, 2 assert_quad(dblquad(func, 1, 2, g, h, args=args),35./6 + 9*.5)
def test_call(self): from scipy.integrate import dblquad m = MultiGauss2D(sigmas=[1, 2], norms=[3, 4]) xy_max = 5 * m.max_sigma # integration range integral = dblquad(m, -xy_max, xy_max, lambda _: -xy_max, lambda _: xy_max)[0] assert_almost_equal(integral, 7, decimal=5)
def prepare_psf_model(psfmodel, xname=None, yname=None, fluxname=None, renormalize_psf=True): """ Convert a 2D PSF model to one suitable for use with `BasicPSFPhotometry` or its subclasses. The resulting model may be a composite model, but should have only the x, y, and flux related parameters un-fixed. Parameters ---------- psfmodel : a 2D model The model to assume as representative of the PSF. xname : str or None The name of the ``psfmodel`` parameter that corresponds to the x-axis center of the PSF. If None, the model will be assumed to be centered at x=0, and a new parameter will be added for the offset. yname : str or None The name of the ``psfmodel`` parameter that corresponds to the y-axis center of the PSF. If None, the model will be assumed to be centered at y=0, and a new parameter will be added for the offset. fluxname : str or None The name of the ``psfmodel`` parameter that corresponds to the total flux of the star. If None, a scaling factor will be added to the model. renormalize_psf : bool If True, the model will be integrated from -inf to inf and re-scaled so that the total integrates to 1. Note that this renormalization only occurs *once*, so if the total flux of ``psfmodel`` depends on position, this will *not* be correct. Returns ------- outmod : a model A new model ready to be passed into `BasicPSFPhotometry` or its subclasses. """ if xname is None: xinmod = models.Shift(0, name='x_offset') xname = 'offset_0' else: xinmod = models.Identity(1) xname = xname + '_2' xinmod.fittable = True if yname is None: yinmod = models.Shift(0, name='y_offset') yname = 'offset_1' else: yinmod = models.Identity(1) yname = yname + '_2' yinmod.fittable = True outmod = (xinmod & yinmod) | psfmodel if fluxname is None: outmod = outmod * models.Const2D(1, name='flux_scaling') fluxname = 'amplitude_3' else: fluxname = fluxname + '_2' if renormalize_psf: # we do the import here because other machinery works w/o scipy from scipy import integrate integrand = integrate.dblquad(psfmodel, -np.inf, np.inf, lambda x: -np.inf, lambda x: np.inf)[0] normmod = models.Const2D(1. / integrand, name='renormalize_scaling') outmod = outmod * normmod # final setup of the output model - fix all the non-offset/scale # parameters for pnm in outmod.param_names: outmod.fixed[pnm] = pnm not in (xname, yname, fluxname) # and set the names so that BasicPSFPhotometry knows what to do outmod.xname = xname outmod.yname = yname outmod.fluxname = fluxname # now some convenience aliases if reasonable outmod.psfmodel = outmod[2] if 'x_0' not in outmod.param_names and 'y_0' not in outmod.param_names: outmod.x_0 = getattr(outmod, xname) outmod.y_0 = getattr(outmod, yname) if 'flux' not in outmod.param_names: outmod.flux = getattr(outmod, fluxname) return outmod
def noise_denominator_integration(ell, j): return integrate.dblquad( noise_denominator_integrand, 0, l_max, 0, l_max, args=(ell, j))[0] / two_pi_squared
def N1(ell, j, N3_for_this_L_j): return Tz(redshift)**2 * j_max * mass_moment_3 * (integrate.dblquad( lambda l1, l2: 1, 0, l_max, lambda l2: 0, lambda l2: l_max)[0] / two_pi_squared) * N3_for_this_L_j / ( eta_D2_L**2 * mass_moment_1**3 * Cshot(redshift))
print(cg_coef(1, 0, 0.5, 0.5, 1.5, 0.5), np.sqrt(2 / 3)) print(cg_coef(1, 0, 0.5, 0.5, 0.5, 0.5), -np.sqrt(1 / 3)) print(cg_coef(1, -1, 0.5, 0.5, 0.5, -0.5), -np.sqrt(2 / 3)) print(cg_coef(1.5, -0.5, 1, -1, 2.5, -1.5), np.sqrt(3 / 5)) print(cg_coef(1, 0, 1, 0, 2, 0), np.sqrt(2 / 3)) print(cg_coef(30, 18, 21, -5, 19, 13)) print() print( integ.dblquad(lambda x, y: x * y, 0, 0.5, lambda y: 0, lambda y: 1 - 2 * y)) print( si.math.complex_dblquad(lambda x, y: x * y, 0, 0.5, lambda y: 0, lambda y: 1 - 2 * y)) print( "num", triple_y_integral(1, 0, 1, 0, 2, 0), 1.5 * np.sqrt(1 / (5 * pi)) * (cg_coef(1, 0, 1, 0, 2, 0)**2), ) print( "num", triple_y_integral(1, 1, 1, 0, 2, 1), 1.5 * np.sqrt(1 / (5 * pi)) * cg_coef(1, 0, 1, 0, 2, 0) * cg_coef(1, 1, 1, 0, 2, 1), )
#print "Loop " + str(iloop1)+" and " +str(iloop2) GAUSSINT = 0 dist, sameflag = diloops(LOOPCOORDS[iloop1][0], LOOPCOORDS[iloop2][0]), difloops( LOOPCOORDS[iloop1][0], LOOPCOORDS[iloop2][0]) if (dist and sameflag): #print "Loop " + str(iloop1)+","+str(LOOPCOORDS[iloop1][1])+" and " +str(iloop2)+" + "+str(LOOPCOORDS[iloop2][1])+" with "+str(dist)+" "+str(sameflag) for pa1 in xrange(0, len(LOOPCOORDS[iloop1][0]) - 1): for pa2 in xrange(0, len(LOOPCOORDS[iloop2][0]) - 1): p1, q1, p2, q2 = LOOPCOORDS[iloop1][0][ pa1 + 1], LOOPCOORDS[iloop1][0][pa1], LOOPCOORDS[ iloop2][0][pa2 + 1], LOOPCOORDS[iloop2][0][pa2] #print p1, q1, p2, q2 GAUSSINT = GAUSSINT + integrate.dblquad( SEGF, 0, 1, lambda x: 0, lambda y: 1)[0] GAUSSINT = GAUSSINT / (4.0 * pi) #print GAUSSINT if (fabs(GAUSSINT) > 0.5): type1, looi1 = LOOPCOORDS[iloop1][1][0], LOOPCOORDS[ iloop1][1][1] type2, looi2 = LOOPCOORDS[iloop2][1][0], LOOPCOORDS[ iloop2][1][1] indexes = [] if (type1 == "hairpin "): indexes.append([HAIRPINS[looi1], "hp"]) elif (type1 == "stem "): indexes.append([STEMS[looi1], "st"]) elif (type1 == "unstl "): indexes.append([FULLUNSTLS[looi1], "il"]) if (type2 == "hairpin "):
print(fun5) print('---------------------') def var1(x): return x**3 fun6 = integrate.quad(var1, 0, 6) print(fun6) def var2(y, x): return x * y**4 fun6 = integrate.dblquad(var2, 0, 6, lambda x: 0, lambda x: 1) print(fun6) print('---------------------') var3 = np.array([[2, 4, 6], [1, 3, 5]]) print(var3) trans1 = sp.fft(var3) print(trans1) print('---------------------') array1 = np.array(([1, 3], [2, 4]))
def F(x): res = np.zeros_like(x) for i, val in enumerate(x): y, err = integrate.dblquad(dE, 0, val, lambda x: 0, lambda x: val) res[i] = y return res
def N2(ell, j, redshift): return Tz(redshift)**2 * 0.21**2 * integrate.dblquad( lambda l1, l2: N3_integrand(ell, l1, l2, j, redshift), 0, current_l_max, lambda l2: 0, lambda l2: current_l_max)[0] / (eta_D2_L**2 * 0.3**4)
def N3(ell, j, redshift): return Cshot(redshift) * 2 * integrate.dblquad( lambda l1, l2: N3_integrand(ell, l1, l2, j, redshift), 0, current_l_max, lambda l2: 0, lambda l2: current_l_max)[0]
def half_sphere(x, y): return (1 - x**2 - y**2)**0.5 N = 1000000 x = np.linspace(-1, 1, N) #mid 1)切割法 dx = x[1] - x[0] y = half_circle(x) #mid y是一个np.ndarray数组值,不是函数,这估计就是向量计算了 s = 2 * dx * np.sum(y) print '%.100f' % s #mid 2)多边形角坐标法求和 s = np.trapz(y, x) * 2 print '%.100f' % s #mid 3)数值积分法 pi_half, err = integrate.quad(half_circle, -1, 1) s = pi_half * 2 print '%.100f' % s #mid 4)求半球体积,使用二重积分 ''' 对于x轴从-1到1进行进行积分,对于y轴从-half_circle(x)到 half_circle(x)进行积分 ''' v_half, err = integrate.dblquad(half_sphere, -1, 1, lambda x: -half_circle(x), lambda x: half_circle(x)) print '%.10f' % v_half
def N0(ell, j, redshift): return Tz(redshift)**4 * j_max**2 * 46.64 * ( integrate.dblquad(lambda l1, l2: 1, 0, current_l_max, lambda l2: 0, lambda l2: current_l_max)[0] / two_pi_squared)**2 / (eta_D2_L**3 * 0.3**4)
from scipy.integrate import dblquad area = dblquad(lambda x, y: x*y, 0, 0.5, lambda x: 0, lambda x: 1-2*x) print(area)
def compute_gaussian_teo_corr(rate, paramMap, quad_int=True, compute_matrices=True, d_w=0.01, fmax=6): """ Computes input correlation function, its hankel transform. Example usage: C_mean,C_mean_ft,corr_prof,corr_prof_ht,uran,fran,kran=compute_gaussian_teo_corr(rate_model,paramMap,fmax=6) # compute eigenvalues if rate_model is True: eigs=(C_mean_ft-a)*eta eigs_prof=(corr_prof_ht*density-a)*eta else: eigs=(C_mean_ft+k3)*eta eigs_prof=(corr_prof_ht+k3)*eta """ #print 'Computing theoretical correlations' p = get_params(paramMap) amp = p.input_mean * p.L**2 / (2 * np.pi * p.sigma**2) # do not allow naive integration with compute_matrices assert (quad_int is True or (quad_int is False and compute_matrices is False)) # kernel if not hasattr(p, 'filter_type') or p.filter_type == FilterType.FILTER_INPUT: K_t_fun = lambda t: K_t(p.b1, p.b2, p.b3, p.mu1, p.mu2, p.mu3, t) else: K_t_fun = lambda t: K_outeq_t(p.b_in, p.b_out, p.mu_out, t) corr_rate_fun = lambda tau, u: corr_rate(K_t_fun, p.speed, p.sigma, tau, u) # integration limits and steps d_u = 0.01 d_s = 0.005 d_tau = 0.001 u_min, u_max = 0, 2 tau_min, tau_max = 0, 2 w_min, w_max = 0, 2 * np.pi * fmax s_min, s_max = -.5, .5 if compute_matrices is True: # compute the distances of all place field centers ran, step = np.linspace(-p.L / 2., p.L / 2., p.n, endpoint=False, retstep=True) UX, UY = np.meshgrid(ran, ran) allu = np.sqrt(UX**2 + UY**2) # compute theoretical correlation matrix C_mean = np.zeros((p.n, p.n)) samp_uran = np.unique(allu.ravel()) # RATE BASED if rate is True: # quad integration if quad_int is True: # profile corr_prof = np.pi * (amp**2) * p.sigma**2 / (p.L**2) * np.array([ quad(corr_rate_fun, tau_min, tau_max, args=(u)) for u in np.arange(u_min, u_max, d_u) ])[:, 0] # matrix if compute_matrices is True: for u in samp_uran: c = np.pi * (amp**2) * p.sigma**2 / (p.L**2) * quad( corr_rate_fun, tau_min, tau_max, args=(u))[0] C_mean[allu == u] = c # naif integration else: U2, T2 = np.mgrid[u_min:u_max:d_u, tau_min:tau_max:d_tau] A2 = K_t_fun(T2) * np.exp(-(U2**2 + T2**2) / (4 * p.sigma**2)) * iv( 0, T2 * U2 / (2 * p.sigma**2)) corr_prof = np.pi * (amp**2) * p.sigma**2 / (p.L**2) * sum( A2, 1) * d_tau # SPIKE BASED else: # theoretical function step_pos = lambda t: (np.sign(t) + 1) * 0.5 step_neg = lambda t: (1 - np.sign(t)) * 0.5 W_fun_t = lambda t: step_neg(t) * p.Aplus * np.exp( t / p.tau_plus) + step_pos(t) * p.Aminus * np.exp(-t / p.tau_minus) corr_spike_fun = lambda s, tau, u: W_fun_t(s) * K_t_fun(tau) * np.exp( -(u**2 + (tau - s)**2) / (4 * p.sigma**2)) * iv(0, (tau - s) * u / (2 * p.sigma**2)) # quad integration if quad_int is True: # profile corr_prof = np.pi * (amp**2) * p.sigma**2 / (p.L**2) * np.array([ dblquad(corr_spike_fun, tau_min, tau_max, lambda s: s_min, lambda s: s_max, args=(u, )) for u in np.arange(u_min, u_max, d_u) ])[:, 0] # matrix if compute_matrices is True: for u in samp_uran: c = np.pi * (amp**2) * p.sigma**2 / (p.L**2) * dblquad( corr_spike_fun, tau_min, tau_max, lambda s: s_min, lambda s: s_max, args=(u, ))[0] C_mean[allu == u] = c # naive integration else: U3, T3, S3 = np.mgrid[u_min:u_max:d_u, tau_min:tau_max:d_tau, s_min:s_max:d_s] A3 = W_fun_t(S3) * K_t(T3) * np.exp( -(U3**2 + (T3 - S3)**2) / (4 * p.sigma**2)) * iv(0, (T3 - S3) * U3 / (2 * p.sigma**2)) corr_prof = np.pi * (amp**2) * p.sigma**2 / (p.L**2) * sum( sum(A3, 2), 1) * d_tau * d_s # FOURIER DOMAIN # hankel transform of the profile curve W2, U2 = np.mgrid[w_min:w_max:d_w, u_min:u_max:d_u] corr_prof_ht = 2 * np.pi * sum( corr_prof[np.newaxis, :] * U2 * jv(0, W2 * U2), 1) * d_u #*(p.n**2)/p.L**2 # compute matrix dft if compute_matrices is True: C_mean_ft = np.real( np.fft.fftshift(np.fft.fft2( np.fft.fftshift(C_mean)))).astype(float) uran = np.arange(u_min, u_max, d_u) fran = np.arange(w_min, w_max, d_w) / (2 * np.pi) kran = np.fft.fftshift(np.fft.fftfreq(int(p.n), d=p.L / float(p.n))) if compute_matrices is False: return corr_prof, corr_prof_ht, uran, fran, kran else: return C_mean, C_mean_ft, corr_prof, corr_prof_ht, uran, fran, kran
# %% def f(y, x): return x + y**2 def gfun(x): return 1 def hfun(x): return x print(dblquad(f, 1, 2, gfun, hfun)) # ### Ordinary differential equation (ODE) # # SciPy provides two ways for solving ODE: # 1) an API with `odeint`, # 2) an object-oriented API with `ode`. # # `odeint` is simpler, we will only use that one here. # # Import: from scipy.integrate import odeint # %% # An ODE system can be written : #
def rate_migdal(w, mw, sigma_nucleon, interaction='SI', m_med=float('inf'), include_approx_nr=False, t=None, halo_model=None, **kwargs): """Differential rate per unit detector mass and deposited ER energy of Migdal effect WIMP-nucleus scattering :param w: ER energy deposited in detector through Migdal effect :param mw: Mass of WIMP :param sigma_nucleon: WIMP/nucleon cross-section :param interaction: string describing DM-nucleus interaction. See sigma_erec for options :param m_med: Mediator mass. If not given, assumed very heavy. :param include_approx_nr: If True, instead return differential rate per *detected* energy, including the contribution of the simultaneous NR signal approximately, assuming q_{NR} = 0.15. This is how https://arxiv.org/abs/1707.07258 presented the Migdal spectra. :param t: A J2000.0 timestamp. If not given, conservative velocity distribution is used. :param halo_model: class (default to standard halo model) containing velocity distribution :param progress_bar: if True, show a progress bar during evaluation (if w is an array) Further kwargs are passed to scipy.integrate.quad numeric integrator (e.g. error tolerance). """ halo_model = wr.StandardHaloModel() if halo_model is None else halo_model include_approx_nr = 1 if include_approx_nr else 0 result = 0 for state, binding_e in binding_es_for_migdal.items(): binding_e *= nu.eV # Only consider n=3 and n=4 # n=5 is the valence band so unreliable in in liquid # n=1,2 contribute very little if state[0] not in ['3', '4']: continue # Lookup for differential probability (units of ev^-1) p = interp1d(df_migdal['E'].values * nu.eV, df_migdal[state].values / nu.eV, bounds_error=False, fill_value=0) def diff_rate(v, erec): # Observed energy = energy of emitted electron # + binding energy of state eelec = w - binding_e - include_approx_nr * erec * 0.15 if eelec < 0: return 0 return ( # Usual elastic differential rate, # common constants follow at end wr.sigma_erec(erec, v, mw, sigma_nucleon, interaction, m_med=m_med) * v * halo_model.velocity_dist(v, t) # Migdal effect |Z|^2 # TODO: ?? what is explicit (eV/c)**2 doing here? * (nu.me * (2 * erec / wr.mn())**0.5 / (nu.eV / nu.c0))**2 / (2 * np.pi) * p(eelec)) # Note dblquad expects the function to be f(y, x), not f(x, y)... r = dblquad( diff_rate, 0, wr.e_max(mw, wr.v_max(t, halo_model.v_esc)), lambda erec: vmin_migdal(w - include_approx_nr * erec * 0.15, erec, mw), lambda _: wr.v_max(t, halo_model.v_esc), **kwargs)[0] result += r return halo_model.rho_dm / mw * (1 / wr.mn()) * result
def approximate_Z(func): return integrate.dblquad(func, -2.5, 2.5, lambda x: -2.5, lambda x: 2.5)[0]
def N3(ell, j): return Cshot(redshift) * 2 * integrate.dblquad( N3_integrand, 0, l_max, 0, l_max, args=(ell, j))[0]
return maxx #plt.plot(at,I(at),'b') #plt.plot(at,pol(at),'r') #plt.plot(at,np.cos(at),'k') #plt.plot(at,np.polyval(zp,at)*(1+np.tanh((0.33-at)*20))/2,'r') #plt.plot(at,pol(at)*(1+np.tanh((0.33-at)*20))/2,'g') #plt.show() #area = dblquad(lambda x, y: I_sqrt(a,x,y), 0, a/10, lambda x: 0, lambda x: a/10) for i in range(1): a = 0.15 + i / 20 area = dblquad(lambda x, y: I_sqrt(a, x, y), 0, a, lambda x: 0, lambda x: a) moy = area[0] / a / a sm = dblquad(lambda x, y: (I_sqrt(a, x, y) - moy)**2, 0, a, lambda x: 0, lambda x: a) #area = dblquad(lambda x, y: I_tst(a,x,y), 0, a, lambda x: 0, lambda x: a) #area = dblquad(lambda x, y: I_tst(a,x,y), 0, a, lambda x: 0, lambda x: a) print(a, sm) fig = plt.figure() ax = fig.gca(projection='3d') X = np.arange(-1, 1, 0.05) Y = np.arange(-1, 1, 0.05) X, Y = np.meshgrid(X, Y) #R = np.sqrt(X**2 + Y**2) Z = I_sqrt(a, X, Y) surf = ax.plot_surface(X,
def N0(ell): return Tz(redshift)**4 * j_max**2 * mass_moment_4 * ( integrate.dblquad(lambda l1, l2: 1, 0, l_max, lambda l2: 0, lambda l2: l_max)[0] / two_pi_squared)**2 / (eta_D2_L**3 * mass_moment_1**4)
def test_disentangled_gaussian(self): norm, err = dblquad( lambda x1, x2: normsq(disentangled_gaussian_wavefcn()(x1, x2)), -np.inf, np.inf, lambda x: -np.inf, lambda y: np.inf) self.assertAlmostEqual(norm, 1, delta=abs(err))
def N4(ell): return integrate.dblquad(N4_integrand, 0, l_max, 0, l_max, args=(ell, ))[0]
def test_correlated_bipartite_gaussian(self): covmatrix = np.array([[2., 0.5], [0.5, 1.]]) wavefcn = correlated_bipartite_gaussian_wavefcn(covmatrix) norm, err = dblquad(lambda x1, x2: normsq(wavefcn(x1, x2)), -np.inf, np.inf, lambda x: -np.inf, lambda y: np.inf) self.assertAlmostEqual(norm, 1, delta=abs(err))
for i in range(shock, column): if i < center_x + 0.5: far_p[0] = center_x - i + 1 near_p[0] = center_x - i elif i > center_x + 0.5: far_p[0] = i - center_x near_p[0] = i - center_x - 1 if j < center_y + 0.5: far_p[1] = center_y - j + 1 near_p[1] = center_y - j elif j > center_y + 0.5: far_p[1] = j - center_y near_p[1] = j - center_y - 1 if (np.linalg.norm(near_p) < R) & (np.linalg.norm(far_p) > R): Z_a[j, i], err[j, i] = dblquad(idx, i - 1.0, i - 0.0, lambda y: j - 1.0, lambda y: j - 0.0) Z_a[j, i] = Z_a[j, i] * Z_a_out + (1 - Z_a[j, i]) * Z_a_in elif np.linalg.norm(near_p) >= R: Z_a[j, i] = Z_a_out else: Z_a[j, i] = Z_a_in for j in range(0, line): for i in range(shock, column): RHO_a[j, i] = rho_a_2 U_a[j, i] = u_a_2 V_a[j, i] = 0.0 P_a[j, i] = p_a_2 for j in range(0, line): for i in range(0, column):
def test_excited_states(self): for n in range(3): wavefcn = coupled_excited_harmonics(n) norm, err = dblquad(lambda x1, x2: normsq(wavefcn(x1, x2)), -100, 100, lambda x2: -100, lambda x2: 100) self.assertAlmostEqual(norm, 1, delta=abs(err))
def hit(obstacle, sigma): f = partial(normal, mean=obstacle, sigma=sigma) mu, error = dblquad(f, 0, MAX_DIS, 0, MAX_DIS) mu = 1 / mu return lambda x, y: mu * normal(x, y, obstacle, sigma)
def noise_denominator_integration(ell, j, redshift): return integrate.dblquad( lambda l1, l2: noise_denominator_integrand(ell, j, l1, l2, redshift), 0, current_l_max, lambda l2: 0, lambda l2: current_l_max)[0]
def potential(self, i, sobo=False, sphere=False): """Get the potential between the electron and nucleus (Using Cartesian Coordinates, NO gaussian product rule, fft). Parameters ---------- i : int The atomic center index. sobo : Bool If True, use sobolev preconditioning. If False, no sobolev preconditioning. Returns ------- numpy array The potential matrix. """ if sphere is False: x_3d, y_3d, z_3d = np.meshgrid(self.r, self.r, self.r) V_mat = np.zeros((self.num_of_basis, self.num_of_basis), dtype=complex) coulomb_e_p = 1 / np.sqrt( (x_3d - self.atomic_position[i - 1][0])**2 + (y_3d - self.atomic_position[i - 1][1])**2 + (z_3d - self.atomic_position[i - 1][2])**2) if sobo is True: spacing = (self.max_r - self.min_r) / (self.num_of_div - 1) r_freq = spyfft.fftfreq(self.num_of_div, spacing) x_freq_3d, y_freq_3d, z_freq_3d, = np.meshgrid( r_freq, r_freq, r_freq) precond_in_k = (1 + (0.5) * ((2 * np.pi)**2) * (x_freq_3d**2 + y_freq_3d**2 + z_freq_3d**2)) for mu in range(self.num_of_basis): for nu in range(self.num_of_basis): potential_mu_nu_temp = np.zeros( (self.num_of_div, self.num_of_div, self.num_of_div), dtype=complex) for p in range(self.num_of_gauss): for q in range(self.num_of_gauss): int_func_1 = self.GF( self.a_overlap_mat[p][mu], np.sqrt( (x_3d - self.atomic_position[mu][0])**2 + (y_3d - self.atomic_position[mu][1])**2 + (z_3d - self.atomic_position[mu][2])**2)) int_func_2 = self.GF( self.a_overlap_mat[q][nu], np.sqrt( (x_3d - self.atomic_position[nu][0])**2 + (y_3d - self.atomic_position[nu][1])**2 + (z_3d - self.atomic_position[nu][2])**2)) if sobo is False: integrate_func = np.multiply( int_func_1, np.multiply(coulomb_e_p, int_func_2)) elif sobo is True: int_func_2_fft = spyfft.ifftn( spyfft.fftn( np.multiply(coulomb_e_p, int_func_2)) / precond_in_k) integrate_func = np.real( np.multiply(int_func_1, int_func_2_fft)) potential_mu_nu_temp -= self.d_overlap_mat[p][ mu] * self.d_overlap_mat[q][nu] * ( 2 * self.a_overlap_mat[p][mu] / m.pi )**(3 / 4) * (2 * self.a_overlap_mat[q][nu] / m.pi)**(3 / 4) * integrate_func # print(potential_mu_nu_temp) # print("Potential", potential_mu_nu_temp) V_mat[mu][nu] += spyint.simpson( spyint.simpson( spyint.simpson(potential_mu_nu_temp, self.r), self.r), self.r) return np.real(V_mat) elif sphere is True: theta = np.linspace(0, m.pi, 50) radius = np.linspace(0, self.max_r, self.num_of_div) r_2d, theta_2d = np.meshgrid(radius, theta) V_mat = np.zeros((self.num_of_basis, self.num_of_basis), dtype=complex) min_r = 0 max_r = 100 for mu in range(self.num_of_basis): for nu in range(self.num_of_basis): overlap_mu_nu_temp = 0 for p in range(self.num_of_gauss): for q in range(self.num_of_gauss): new_a, new_R, new_K = self.two_GF( self.a_overlap_mat[p][mu], self.a_overlap_mat[q][nu], self.atomic_position[mu], self.atomic_position[nu]) new_RR = new_R - self.atomic_position[0] new_RR_abs = m.sqrt(np.inner(new_RR, new_RR)) # Please use a function instead of a lambda function so that debugging is easier. int_func = lambda t, r: (2 * m.pi * (r)**2) * ( m.sin(t) / (m.sqrt(r**2 + new_RR_abs**2 - 2 * r * new_RR_abs * m.cos(t)) )) * new_K * self.GF(new_a, r) overlap_mu_nu_temp += self.d_overlap_mat[p][ mu] * self.d_overlap_mat[q][nu] * ( 2 * new_a / m.pi)**(3 / 4) * spyint.dblquad( int_func, min_r, max_r, 0, m.pi)[0] V_mat[mu][nu] += overlap_mu_nu_temp return V_mat
def main(): # Intergal:积分 from scipy.integrate import quad, dblquad, nquad # 一元积分 print(quad(lambda x: np.exp(-x), 0, np.inf)) # 二元积分 print( dblquad(lambda t, x: np.exp(-x * t) / t**3, 0, np.inf, lambda x: 1, lambda x: np.inf)) # n元积分 def f(x, y): return x * y def bound_y(): return [0, 0.5] def bound_x(y): return [0, 1 - 2 * y] print(nquad(f, [bound_x, bound_y])) #Optimizer:优化器 from scipy.optimize import minimize def rosen(x): return sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0) x0 = np.array([1.3, 0.7, 0.8, 1.9, 1.2]) res = minimize(rosen, x0, method="nelder-mead", options={ "xtol": 1e-8, "disp": True }) print("ROSE MINI:", res.x) #下面弄的是雅可比行列式 def func(x): return -(2 * x[0] * x[1] + 2 * x[0] - x[0]**2 - 2 * x[1]**2) def func_deriv(x): dfdx0 = -(-2 * x[0] + 2 * x[1] + 2) dfdx1 = -(2 * x[0] - 4 * x[1]) return np.array([dfdx0, dfdx1]) cons = ({ "type": "eq", "fun": lambda x: np.array([x[0]**3 - x[1]]), "jac": lambda x: np.array([3.0 * (x[0]**2.0), -1.0]) }, { 'type': 'ineq', 'fun': lambda x: np.array(x[1] - 1), 'jac': lambda x: np.array([0.0, 1.0]) }) res = minimize(func, [-1.0, 1.0], jac=func_deriv, constraints=cons, method='SLSQP', options={'disp': True}) print("RESTRICT:", res) #求根 from scipy.optimize import root def fun(x): return x + 2 * np.cos(x) sol = root(fun, 0.1) print("ROOT:", sol.x, sol.fun) #Interpolation:插值——离散函数逼近的重要方法 注意与拟合的区别 x = np.linspace(0, 1, 10) y = np.sin(2 * np.pi * x) from scipy.interpolate import interp1d li = interp1d(x, y, kind="cubic") x_new = np.linspace(0, 1, 50) y_new = li(x_new) figure() plot(x, y, "r") plot(x_new, y_new, "k") show() print(y_new) #Linear:线性计算与矩阵分解 from scipy import linalg as lg arr = np.array([[1, 2], [3, 4]]) print("Det:", lg.det(arr)) print("Inv:", lg.inv(arr)) #解线性方程组 b = np.array([6, 14]) print("Sol:", lg.solve(arr, b)) #特征值 print("Eig:", lg.eig(arr)) #矩阵分解 print("LU:", lg.lu(arr)) print("QR:", lg.qr(arr)) print("SVD:", lg.svd(arr)) print("Schur:", lg.schur(arr))
def N4(ell, redshift): return integrate.dblquad( lambda l1, l2: N4_integrand(ell, l1, l2, redshift), 0, current_l_max, lambda l2: 0, lambda l2: current_l_max)[0]
def b_integral(): # calculating with https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.dblquad.html print("b)") value, error =dblquad(fun4,0,2,-2,2) print("value: ",value, "error estimate: ", error)