Пример #1
0
def GL(m, a, b):
    points = p_roots(m)[0]
    weights = p_roots(m)[1]
    return (b - a) * sum([
        weights[i] * f((b - a) * points[i] / 2 + (b + a) / 2)
        for i in range(len(points))
    ]) / 2
Пример #2
0
def source_local(V, e, f, quad_degree=4):
    B_e = np.zeros((V.N_loc))
    c_basis = V.c_basis
    vertices = V.mesh.vertices
    dofmap = V.dofmap()
    # Global coordinates for an element
    x_map = c_basis[0].subs([(V.x_[i], vertices[dofmap(e, i)][0])
                             for i in range(V.N_loc)])
    y_map = c_basis[1].subs([(V.y_[i], vertices[dofmap(e, i)][1])
                             for i in range(V.N_loc)])

    # Local Jacobian of determinant
    detJac_loc = V.Jac.det().subs([(V.x_[i], vertices[dofmap(e, i)][0])
                                   for i in range(V.N_loc)])
    detJac_loc = detJac_loc.subs([(V.y_[i], vertices[dofmap(e, i)][1])
                                  for i in range(V.N_loc)])

    # Use Gauss-Legendre quadrature
    p, w = special.p_roots(quad_degree)
    for i in range(V.N_loc):
        for c_x in range(len(w)):
            for c_y in range(len(w)):
                B_e[i] += w[c_x] * w[c_y] * detJac_loc *\
                          (f(x_map, y_map) * V.basis[i])\
                          .subs([("xi", p[c_x]), ("eta", p[c_y])])
    return B_e
Пример #3
0
 def Gaussian_quadrature(n, lower_l, upper_l):
     m = (upper_l - lower_l) / 2
     c = (upper_l + lower_l) / 2
     [x, w] = p_roots(n + 1)
     weights = m * w
     time_train_integral = m * x + c
     return time_train_integral, weights
Пример #4
0
    def __init__(self, *arg):
        super(Frame2D, self).__init__(*arg)
        self.n_intps = arg[3] if len(arg)>3 else 5
        self.dim = 2
        self.dof = 6

        self.local_dof = 3 # 单元局部自由度
        self.strain = np.zeros(self.local_dof)

        # 计算积分点位置及权系数
        xi,w = p_roots(self.n_intps)
        xi = xi.real
        self.loc_intps,self.wf_intps = 0.5*(xi+1.0),0.5*w
        
        # 积分点的局部坐标
        self.x = self.length*self.loc_intps
        # 积分点截面序列
        self.intps = []
        # 积分点截面应变-位移转换矩阵序列
        self.Bx = []
        # 积分点截面轴向应变序列
        self.epslnx = np.zeros(self.n_intps)
        # 积分点截面曲率序列
        self.kappax = np.zeros(self.n_intps)
        # 遍历所有积分点
        for i in xrange(self.n_intps):
            self.intps.append(deepcopy(self.section))
            xi = self.loc_intps[i]
            self.Bx.append(spl.block_diag([1.],[6.*xi-4.,6.*xi-2]))
        
        # 计算单元刚度
        self.get_stiffness()
        self.get_trans_matrix()
        self.get_stiffness_matrix()
Пример #5
0
    def __init__(self, *arg):
        super(Frame2D, self).__init__(*arg)
        self.n_intps = arg[3] if len(arg) > 3 else 5
        self.dim = 2
        self.dof = 6

        self.local_dof = 3  # 单元局部自由度
        self.strain = np.zeros(self.local_dof)

        x, w = p_roots(self.n_intps)
        x = x.real
        self.loc_intps, self.wf_intps = 0.5 * (x + 1.0), 0.5 * w

        self.x = self.length * self.loc_intps
        self.intps = []
        self.Dax = np.ones(self.n_intps)
        self.Dbx = np.ones(self.n_intps)
        for i in xrange(self.n_intps):
            self.intps.append(deepcopy(self.section))
            self.Dax[i] = self.intps[i].Da
            self.Dbx[i] = self.intps[i].Db

        self.kappax = np.zeros(self.n_intps)

        self.get_stiffness()
        self.get_trans_matrix()
        self.get_stiffness_matrix()
Пример #6
0
def quest2b():
    "Routine for answering question 2b"
    E = 5.*np.arange(0,71) # in MeV
    a = E[:-1] / 20.
    b = E[1:]/20.
    nodes = 10

    F = 8*pi*np.power(20e6 * const.EV / (const.C*const.H),3)

    n_i = np.array([])
    [roots,weights] = spy.p_roots(nodes,0)
    
    for k in xrange(0,len(a)):
        n_i = np.append(n_i, 0.5*(b[k] - a[k])*sum(weights*funcb((b[k] - a[k])*roots/2. + (b[k] + a[k])/2.)))
    
    Ex = E[:-1] + 2.5 # in Mev, centered bins
    plt.clf()
    fig1 = plt.figure(1)
    plt.plot(Ex,F*n_i / 5.)
    plt.xlabel('E (MeV)')
    plt.ylabel('dn/dE ($cm^{-3} MeV^{-1}$)')
    pp = PdfPages('ws3_prob2b.pdf')
    pp.savefig(fig1)
    pp.close()
    
    return sum(n_i)*F
Пример #7
0
def A_local(V, e, quad_degree=4):
    """
    Assemble the local stiffness matrix on element # e
    """
    vertices = V.mesh.vertices
    dofmap = V.dofmap()
    A_e = np.zeros((V.N_loc, V.N_loc))
    # Local Jacobian of determinant
    detJac_loc = V.Jac.det().subs([(V.x_[i], vertices[dofmap(e, i)][0])
                                   for i in range(V.N_loc)])
    detJac_loc = detJac_loc.subs([(V.y_[i], vertices[dofmap(e, i)][1])
                                  for i in range(V.N_loc)])
    # Local Jacobian
    Jac_loc = V.Jac.subs([(V.x_[i], vertices[dofmap(e, i)][0])
                          for i in range(V.N_loc)])
    Jac_loc = Jac_loc.subs([(V.y_[i], vertices[dofmap(e, i)][1])
                            for i in range(V.N_loc)])

    p, w = special.p_roots(quad_degree)
    for i in range(V.N_loc):
        for j in range(V.N_loc):
            # Looping over quadrature points on ref element
            for c_x in range(len(w)):
                for c_y in range(len(w)):
                    # Stiffness Matrix
                    Jac_loc = Jac_loc.subs([("xi", p[c_x]), ("eta", p[c_y])])
                    gradgrad = (sp.transpose(Jac_loc.inv() * V.grads[j]) *
                                Jac_loc.inv() * V.grads[i])
                    integrand = w[c_x] * w[c_y] * gradgrad * detJac_loc
                    A_e[i, j] += integrand.subs([("xi", p[c_x]),
                                                 ("eta", p[c_y])])[0, 0]
    return A_e
Пример #8
0
def leg(n, a, b):
	[legroots, legweights] = sp.p_roots(n,0)
	integ = 0.0
	for i in range(0, n):
		troot = ((b-a)*legroots[i]+(a+b))/2.0
		integ += legweights[i]*f(troot)
	return integ*(b-a)/2.0
Пример #9
0
    def get_trial_derivative_L2(self, expected):
        # Get the L2 norm error for the given expected function vs FEM results
        integral = 0
        for i in self.elements.keys():
            xi = sy.symbols('xi')
            expr_exp = sy.sympify(expected)
            expr_approx = self.elements[i].trial_prime

            expr_error = (expr_exp - expr_approx) ** 2

            domain = [self.elements[i].start, self.elements[i].end]
            order = sy.degree(expr_error, x)

            length = domain[-1] - domain[0]
            npg = ceil((order + 1) / 2)

            new_x = (0.5 * (domain[0] + domain[1])
                     + 0.5 * xi * (domain[1] - domain[0]))
            expr = expr_error.subs(x, new_x)

            [new_xi, w] = p_roots(npg)

            for j in range(len(new_xi)):
                integral = (integral
                            + (w[j] * length * 0.5 * expr.subs(xi,
                                                               new_xi[j]))
                            )

        print(integral)
        self.L2_error = integral
Пример #10
0
def twob():
	n=20 #matches the n=20 used for highest accuracy in part a)
	deltaE=5 #MeV
	kT=3.2*10**(-5) #cgs
	hc=3.16*10**(-17) #cgs
#use Python's built-in Gauss-Legendre roots and weights
	[le_r,le_w]=sp.p_roots(n)
#define the substitution I made
	x=lambda u,i:(5.*u+10.*i+5.)/(2.)
#and f(x)
	f=lambda u,i:(x(u,i))**2/(np.exp(x(u,i))+1)
	ne=np.zeros(150./5.)#array of n_e for each energy bin
	Q=np.zeros(len(le_r))

#loop over each energy bin
	for i in range(len(ne)):
#and calculate n_e in each bin
		for j in range(len(Q)):
			Q[j]=le_w[j]*f(le_r[j],i)
		Qtot=np.sum(Q)
		ne[i]=5./2.*(Qtot*8.*np.pi*(kT)**3)/(2.*np.pi*hc)**3
	dndE=ne/deltaE
#print the result
	print dndE
#and check if the total n_e matches part a)
	print np.sum(dndE)*deltaE
Пример #11
0
 def Gaussian_quadrature(self, n, lower_l, upper_l):
     m = (upper_l-lower_l)/2
     c = (upper_l+lower_l)/2
     [x,w] = p_roots(n+1)
     self.weights = w*m
     # self.weights = torch.tensor(self.weights).type('torch.FloatTensor') 
     self.time_train_integral = m*x+c
     return [self.time_train_integral, self.weights]
Пример #12
0
def _cached_roots_legendre(n):
    """
    Cache roots_legendre results to speed up calls of the fixed_quad
    function.
    """
    if n in _cached_roots_legendre.cache:
        return _cached_roots_legendre.cache[n]
    _cached_roots_legendre.cache[n] = p_roots(n)
    return _cached_roots_legendre.cache[n]
Пример #13
0
    def get_psi_sq(self):
        """

        Calculate the term <psii,psij>

        Returns
        -------
        psi_sq : array
            the term <psii,psij>

        """
        dim = self.my_experiment.dimension

        multindices = self.basis['multi-indices']

        n_terms = len(multindices)

        psi_sq = np.ones(n_terms, )

        for i in range(n_terms):
            for j in range(dim):
                deg = multindices[i][j]

                if self.my_experiment.polytypes[j] == 'Legendre':

                    x_i, w_i = special.p_roots(deg + 1)
                    '''
                    Integrate exactly the SQUARE
                    of the Legendre polynomial. For example,
                    if the Legendre polynomial is of order (deg),
                    the numerical integration must be exact
                    till order (deg**2). Thus, we need at least
                    (deg+1) abscissas' and weights.
                    '''
                    poly = special.legendre(deg)**2
                    psi_sq[i] *= 1.0 / 2 * sum(w_i * poly(x_i))

                elif self.my_experiment.polytypes[j] == 'Hermite':

                    x_i, w_i = special.he_roots(deg + 1)
                    '''
                    special.he_roots(deg) and
                    np.polynomial.hermite_e.hermegauss(deg)
                    returns the same abscissas'
                    but different weights (!). There is a factor 2
                    between the two. Given the fact that the integral of
                    the standard Gaussian must be 1,
                    np.polynomial.hermite_e.hermegauss(deg)
                    provides the right weights.
                    '''
                    poly = special.hermitenorm(deg)**2

                    # 2*w_i*poly(x_i)
                    psi_sq[i] *= 1.0 / np.sqrt(2 * np.pi) * sum(
                        w_i * poly(x_i))

        return psi_sq
Пример #14
0
def gle(n, a, b):
    [leg_roots, leg_weights] = sp.p_roots(n,0)
    # Once again, I abandon the for loop
   # total = 0
   # for i in range(n):
   #     total += leg_weights[i] * g(((b-a)/2)*leg_roots[i] + ((a+b)/2))
   #     return total
    func_vals = g(((b-a)/2)*leg_roots + ((a+b)/2))
    return np.dot(leg_weights, func_vals)
Пример #15
0
def gausslegendre(f, a, b, n):
    # uses roots of legendre polynomials
    x, w = p_roots(n)
    psum = 0
    # valid only over interval [-1,1]
    # map input [a,b] to interval [-1,1]
    for i in range(n):
        # transform interval
        psum += w[i] * f(0.5 * (b - a) * x[i] + 0.5 * (b + a))
    gaus = 0.5 * (b - a) * psum
    return gaus
def gausslegendre(f,a,b,n):
    # uses roots of legendre polynomials
    x,w  = p_roots(n)
    psum = 0
    # valid only over interval [-1,1]
    # map input [a,b] to interval [-1,1]
    for i in range(n):
        # transform interval
        psum += w[i]*f(0.5*(b-a)*x[i] + 0.5*(b+a))
    gaus = 0.5*(b-a)*psum
    return gaus
Пример #17
0
def _sk_integral_fixed_quad(k, y, Nquad):

    # Get numerical quadrature nodes and weight
    nodes, weights = p_roots(Nquad)

    # Rescale for integration interval from [-1,1] to [-pi,pi]
    nodes = nodes * np.pi
    weights = weights * 0.5
    arg1 = 2 * k * (1 + y * np.cos(nodes)) / 3
    arg2 = k * nodes + 4 * k * y * np.sin(nodes) / 3
    integrand = k0(arg1) * np.cos(arg2)
    return (2 / np.pi) * integrand @ weights
Пример #18
0
def GaussianQuadrature():

    ########## PART A ##########
    # Array to carry integrands
    n = 30
    intArr = np.zeros(n,)

    # Loop over all numbers of nodes 
    intFact = FD_factor(20.)	# kT = 20MeV
    for i in range(1,n+1):
	# Find roots and approximate integral
	[xi,wi] = sp.l_roots(i)
	intArr[i-1] = intFact*np.sum(wi*fermi_dirac_mod(xi))	

    # Save integral array to file
    np.savetxt('Q2_numdensity.dat',intArr,fmt='%10.10e')
    ############################

    ########## PART B ##########
    # nInt-1 intervals of width dE = 5MeV 
    # in range [0,150] MeV
    nInt = 30
    dE = 5.
    dndE = np.zeros(nInt,)

    # Find roots
    [xi,wi] = sp.p_roots(n)

    # Transformations for xi,wi in each interval
    xTemp = np.linspace(0.,150.,num=nInt+1)
    bMa = 2.5					# b-a/2
    bPa = 0.5*(xTemp[1:] + xTemp[:-1])		# b+a/2
 
    # Loop over all intervals
    for i in range(0,nInt):
	# Transform variables
	w = wi*bMa
	x = bMa*xi + bPa[i]
	# Evaluate intgral and differential
        dndE[i] = (intFact/dE)*np.sum(w*fermi_dirac(x))
   
    # Save spectral information to file
    outArr1 = np.zeros((nInt,2.),float)
    outArr1[:,0] = bPa
    outArr1[:,1] = dndE
    np.savetxt('Q2_energyspectrum.dat',outArr1,fmt='%10.10e')
	
    # Verify method and save to file
    outArr2 = [intArr[-1],np.sum(dndE*dE)]
    np.savetxt('Q2_verifymethod.dat',outArr2,fmt='%10.10e')
    ############################

    return
Пример #19
0
def gaussLegendre(g, a, b, n):
    f = lambda x: eval(g)    # define function to evaluate

    # get abscissas (xi), and weights (wi) from p_roots(n) function below
    # scipy.special.p_roots(n) returns abscissas & weights of Legendre polynomials
    abscissa, weight = p_roots(n)
    intSum = 0

    # Gauss-Legendre method is valid only over interval [-1,1]
    # map input [a,b] to interval [-1,1]
    for i in range(n):
        # evaluate integral with interval transformed by x--> 0.5(b-a)x + 0.5(b+a)
        intSum = intSum + weight[i]*f(0.5*(b-a)*abscissa[i] + 0.5*(b+a))

    return 0.5*(b-a)*intSum
Пример #20
0
def volume_local(V, e, coeffs, quad_degree=4):
    dofmap = V.dofmap()
    # Local Jacobian of determinant
    detJac_loc = V.Jac.det().subs([(V.x_[i], V.mesh.vertices[dofmap(e, i)][0])
                                   for i in range(V.N_loc)])
    detJac_loc = detJac_loc.subs([(V.y_[i], V.mesh.vertices[dofmap(e, i)][1])
                                  for i in range(V.N_loc)])

    p, w = special.p_roots(quad_degree)
    loc = 0
    for i in range(V.N_loc):
        for c_x in range(len(w)):
            for c_y in range(len(w)):
                loc += w[c_x] * w[c_y] *\
                       (detJac_loc * ((coeffs[dofmap(e, i)] * V.basis[i]))
                        .subs([("xi", p[c_x]), ("eta", p[c_y])]))
    return loc
Пример #21
0
def gauss_legendre(f, a, b, n):
    [roots,weights] = sp.p_roots(n,0)
    return (b-a)/2.*sum([weights[i]*f((b-a)/2.*roots[i]+(a+b)/2.) for i in range(n)])
Пример #22
0
def gauss_legendre(f,n,a,b):
    [legendre_roots, legendre_weights] = sp.p_roots(n,0)
    trans = float(b-a)/2
    return trans * sum( \
                f(legendre_roots * trans + float(a+b)/2) * legendre_weights )
Пример #23
0
# # E -- FFT # #
# Signal = 0
_, f, _, ftarg = utils.check_time(t, 0, 'fft', [0.0005, 2**20, '', 10], 0)
fEM = test_freq(res, off, f)
fft0 = {'fEM': fEM, 'f': f, 'ftarg': ftarg}

# # F -- QWE - FQWE # #
nquad = fqwe0['ftarg'][2]
maxint = fqwe0['ftarg'][3]
fEM = fqwe0['fEM']
freq = fqwe0['f']
# The following is a condensed version of transform.fqwe, without doqwe-part
xint = np.concatenate((np.array([1e-20]), np.arange(1, maxint + 1) * np.pi))
intervals = xint / t[:, None]
g_x, g_w = special.p_roots(nquad)
dx = np.repeat(np.diff(xint) / 2, nquad)
Bx = dx * (np.tile(g_x, maxint) + 1) + np.repeat(xint[:-1], nquad)
SS = np.sin(Bx) * np.tile(g_w, maxint)
tEM_iint = iuSpline(np.log(2 * np.pi * freq), fEM.imag)
sEM = tEM_iint(np.log(Bx / t[:, None])) * SS
fqwe0['sEM'] = sEM
fqwe0['intervals'] = intervals

# # G -- QWE - HQWE # #
# Model
model = utils.check_model([], 10, 2, 2, 5, 1, 10, True, 0)
depth, res, aniso, epermH, epermV, mpermH, mpermV, isfullspace = model
frequency = utils.check_frequency(1, res, aniso, epermH, epermV, mpermH,
                                  mpermV, 0)
freq, etaH, etaV, zetaH, zetaV = frequency
Пример #24
0
from numpy import *
from scipy import special as sp

[legendre_roots,legendre_weights]=sp.p_roots(2,0)

#create intervals dE=5
t= linspace(0,150, 31.)

#from Wolfram Alpha
k=20#in 1/MeV
h=1.97327*10**-11 #in Mev cm


#define our integrad with change of variables
def f(x,n):
    return 2.5*(2.5*x+.5*(t[int(n)]+t[int(n+1)]))**2./(exp(.05*(2.5*x+.5*(t[int(n)]+t[int(n+1)])))+1)

#convert to 31xn dimensional array whose ith element is the ith integral in (0,150,31)
z=[]
for i in linspace(0,29,30):
    z.extend([f(legendre_roots,i)])    

#sum over rows and columns
b1=sum(z,axis=0)
c1=sum(b1,axis=0)

print sum(legendre_weights*b1)
Пример #25
0
    def build_mesh(self):

        N, E = self.N, self.E
        L = self.L
        periodic = self.periodic

        n = N + 1
        n_dofs = N * E

        if not periodic:
            n_dofs += 1
        self.n_dofs = n_dofs

        semh = SEMhat(N)

        vertices = np.linspace(0, L, E + 1)
        etv = np.zeros((E, 2), dtype=np.int)
        etv[:, 0] = np.arange(E)
        etv[:, 1] = np.arange(E) + 1
        topo = Interval()
        xq = topo.ref_to_phys(vertices[etv], semh.xgll)
        if periodic:
            xq = xq[:, :-1]
        self.xq = xq
        jacb_det = topo.calc_jacb(vertices[etv])[0]
        self.jacb_det = jacb_det

        if periodic:
            etv[0, 0] = etv[-1, -1]

        # Make 1D elem to dof map
        etd = np.arange(E * (N + 1))
        etd = etd.reshape((E, -1))
        etd -= np.arange(E)[:, newaxis]
        if periodic:
            etd[-1, -1] = etd[0, 0]

        # Make Q
        cols = etd.ravel()
        rows = np.arange(len(cols))
        vals = np.ones(len(cols))
        Q0 = sps.coo_matrix((vals, (rows, cols)), dtype=np.int)
        Q = kron3(Q0, Q0, Q0)
        self.Q = Q
        self.Q0 = Q0

        # Build etd for full mesh
        nd = E * (N + 1)
        dofs = Q.dot(np.arange(Q.shape[1])).reshape((nd, nd, nd))
        etd = np.zeros((E**3, n**3), dtype=np.int)
        ind = 0
        for iz in range(E):
            for iy in range(E):
                for ix in range(E):
                    a = dofs[iz * n:iz * n + n, iy * n:iy * n + n,
                             ix * n:ix * n + n]
                    etd[ind, :] = a.ravel()
                    ind += 1
        self.etd = etd
        dofs = None

        # Q for assembly. NOT the tensor product Q.
        cols = etd.ravel()
        rows = np.arange(len(cols))
        vals = np.ones(len(cols))
        QA = sps.coo_matrix((vals, (rows, cols)), dtype=np.int)
        self.QA = QA

        # Make R
        if periodic:
            R0 = sps.eye(n_dofs)
        else:
            R0 = sps.dia_matrix((np.ones(n_dofs), 1),
                                shape=(n_dofs - 2, n_dofs))
        R = kron3(R0, R0, R0)
        self.R = R

        if (not periodic):
            dofs = np.arange(n_dofs**3)
            bndy_dofs = list(set(dofs) - set(R.dot(dofs)))
            bndy_dofs = np.array(bndy_dofs, dtype=np.int)
            self.bndy_dofs = bndy_dofs
            dofs = None

        ## Build A and B
        Al = sps.kron(sps.eye(E), semh.Ah / jacb_det)
        A0 = Q0.T.dot(Al.dot(Q0))
        A0 = R0.dot(A0.dot(R0.T))
        Ah0 = semh.Ah / jacb_det

        # Full local mass matrix
        xgll, wgll = gll_points(n)
        xg, wg = p_roots(n)
        L = eval_phi1d(xgll, xg).T
        Bf = sps.dia_matrix((wg, 0), shape=(n, n))
        Bf = L.T.dot(Bf.dot(L))

        # Bl0 = sps.kron(sps.eye(E), semh.Bh*jacb_det)
        Bl0 = sps.kron(sps.eye(E), Bf * jacb_det)
        B0 = Q0.T.dot(Bl0.dot(Q0))
        B0 = R0.dot(B0.dot(R0.T))
        Bh0 = Bf * jacb_det
        Bh = kron3(Bh0, Bh0, Bh0).toarray()
        self.Bh = Bh

        Ah = kron3(Bh0, Bh0, Ah0)
        Ah += kron3(Bh0, Ah0, Bh0)
        Ah += kron3(Ah0, Bh0, Bh0)
        self.Ah = Ah.toarray()
        # Bh = kron3(Bh0, Bh0, Bh0)
        # self.Bh = Bh

        eigvals, eigvecs = scipy.linalg.eigh(A0.toarray(), B0.toarray())
        self.eigvals, self.eigvecs = eigvals, eigvecs
        n_eigs = len(eigvals)

        L = sps.dia_matrix((eigvals, 0), shape=(n_eigs, n_eigs))
        eye = sps.eye(n_eigs)
        C1 = kron3(eye, eye, L) + kron3(eye, L, eye) + kron3(L, eye, eye)
        is_zero = np.abs(C1.data) < 1e-12
        C1.data[is_zero] = 1.0
        C1.data = 1.0 / C1.data
        C1.data[is_zero] = 0.0
        self.C1 = C1
Пример #26
0
def check_legendre_roots(n):
    xs, ws = ([], []) if n == 0 else p_roots(n) # from SciPy
    xl, wl = libsharp.legendre_roots(n)
    assert_allclose(xs, xl)
    assert_allclose(ws, wl)
Пример #27
0
def hqwe(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab, etaH, etaV, zetaH,
         zetaV, xdirect, qweargs, use_ne_eval, msrc, mrec):
    r"""Hankel Transform using Quadrature-With-Extrapolation.

    *Quadrature-With-Extrapolation* was introduced to geophysics by
    [Key12]_. It is one of many so-called *ISE* methods to solve Hankel
    Transforms, where *ISE* stands for Integration, Summation, and
    Extrapolation.

    Following [Key12]_, but without going into the mathematical details here,
    the QWE method rewrites the Hankel transform of the form

    .. math:: F(r)   = \int^\infty_0 f(\lambda)J_v(\lambda r)\
            \mathrm{d}\lambda

    as a quadrature sum which form is similar to the DLF (equation 15),

    .. math::   F_i   \approx \sum^m_{j=1} f(x_j/r)w_j g(x_j) =
                \sum^m_{j=1} f(x_j/r)\hat{g}(x_j) \ ,

    but with various bells and whistles applied (using the so-called Shanks
    transformation in the form of a routine called :math:`\epsilon`-algorithm
    ([Shan55]_, [Wynn56]_; implemented with algorithms from [Tref00]_ and
    [Weni89]_).

    This function is based on ``get_CSEM1D_FD_QWE.m``, ``qwe.m``, and
    ``getBesselWeights.m`` from the source code distributed with [Key12]_.

    In the spline-version, ``hqwe`` checks how steep the decay of the
    wavenumber-domain result is, and calls QUAD for the very steep interval,
    for which QWE is not suited.

    The function is called from one of the modelling routines in :mod:`model`.
    Consult these modelling routines for a description of the input and output
    parameters.

    Returns
    -------
    fEM : array
        Returns frequency-domain EM response.

    kcount : int
        Kernel count.

    conv : bool
        If true, QWE/QUAD converged. If not, <htarg> might have to be adjusted.

    """
    # Input params have an additional dimension for frequency, reduce here
    etaH = etaH[0, :]
    etaV = etaV[0, :]
    zetaH = zetaH[0, :]
    zetaV = zetaV[0, :]

    # Get rtol, atol, nquad, maxint, and pts_per_dec
    rtol, atol, nquad, maxint, pts_per_dec = qweargs[:5]

    # 1. PRE-COMPUTE THE BESSEL FUNCTIONS
    # at fixed quadrature points for each interval and multiply by the
    # corresponding Gauss quadrature weights

    # Get Gauss quadrature weights
    g_x, g_w = special.p_roots(nquad)

    # Compute n zeros of the Bessel function of the first kind of order 1 using
    # the Newton-Raphson method, which is fast enough for our purposes.  Could
    # be done with a loop for (but it is slower):
    # b_zero[i] = optimize.newton(special.j1, b_zero[i])

    # Initial guess using asymptotic zeros
    b_zero = np.pi * np.arange(1.25, maxint + 1)

    # Newton-Raphson iterations
    for i in range(10):  # 10 is more than enough, usually stops in 5

        # Evaluate
        b_x0 = special.j1(b_zero)  # j0 and j1 have faster versions
        b_x1 = special.jv(2, b_zero)  # j2 does not have a faster version

        # The step length
        b_h = -b_x0 / (b_x0 / b_zero - b_x1)

        # Take the step
        b_zero += b_h

        # Check for convergence
        if all(np.abs(b_h) < 8 * np.finfo(float).eps * b_zero):
            break

    # 2. COMPUTE THE QUADRATURE INTERVALS AND BESSEL FUNCTION WEIGHTS

    # Lower limit of integrand, a small but non-zero value
    xint = np.concatenate((np.array([1e-20]), b_zero))

    # Assemble the output arrays
    dx = np.repeat(np.diff(xint) / 2, nquad)
    Bx = dx * (np.tile(g_x, maxint) + 1) + np.repeat(xint[:-1], nquad)
    BJ0 = special.j0(Bx) * np.tile(g_w, maxint)
    BJ1 = special.j1(Bx) * np.tile(g_w, maxint)

    # 3. START QWE

    # Intervals and lambdas for all offset
    intervals = xint / off[:, None]
    lambd = Bx / off[:, None]

    # The following lines until
    #       "Call and return QWE, depending if spline or not"
    # are part of the splined routine. However, we calculate it here to get
    # the non-zero kernels, `k_used`.

    # New lambda, from min to max required lambda with pts_per_dec
    start = np.log10(lambd.min())
    stop = np.log10(lambd.max())

    # If not spline, we just calculate three lambdas to check
    if pts_per_dec == 0:
        ilambd = np.logspace(start, stop, 3)
    else:
        ilambd = np.logspace(start, stop, (stop - start) * pts_per_dec + 1)

    # Call the kernel
    PJ0, PJ1, PJ0b = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth,
                                       etaH[None, :], etaV[None, :],
                                       zetaH[None, :], zetaV[None, :],
                                       np.atleast_2d(ilambd), ab, xdirect,
                                       msrc, mrec, use_ne_eval)

    # Check which kernels have information
    k_used = [True, True, True]
    for i, val in enumerate((PJ0, PJ1, PJ0b)):
        if val is None:
            k_used[i] = False

    # Call and return QWE, depending if spline or not
    if pts_per_dec != 0:  # If spline, we calculate all kernels here

        # Interpolation : Has to be done separately on each PJ,
        # in order to work with multiple offsets which have different angles.
        if k_used[0]:
            sPJ0r = iuSpline(np.log(ilambd), PJ0.real)
            sPJ0i = iuSpline(np.log(ilambd), PJ0.imag)
        else:
            sPJ0r = None
            sPJ0i = None

        if k_used[1]:
            sPJ1r = iuSpline(np.log(ilambd), PJ1.real)
            sPJ1i = iuSpline(np.log(ilambd), PJ1.imag)
        else:
            sPJ1r = None
            sPJ1i = None

        if k_used[2]:
            sPJ0br = iuSpline(np.log(ilambd), PJ0b.real)
            sPJ0bi = iuSpline(np.log(ilambd), PJ0b.imag)
        else:
            sPJ0br = None
            sPJ0bi = None

        # Get quadargs: diff_quad, a, b, limit
        diff_quad, a, b, limit = qweargs[5:]

        # Set quadargs if not given:
        if not limit:
            limit = maxint
        if not a:
            a = intervals[:, 0]
        else:
            a = a * np.ones(off.shape)
        if not b:
            b = intervals[:, -1]
        else:
            b = b * np.ones(off.shape)

        # Check if we use QWE or SciPy's QUAD
        # If there are any steep decays within an interval we have to use QUAD,
        # as QWE is not designed for these intervals.
        check0 = np.log(intervals[:, :-1])
        check1 = np.log(intervals[:, 1:])
        numerator = np.zeros((off.size, maxint), dtype=complex)
        denominator = np.zeros((off.size, maxint), dtype=complex)

        if k_used[0]:
            numerator += sPJ0r(check0) + 1j * sPJ0i(check0)
            denominator += sPJ0r(check1) + 1j * sPJ0i(check1)

        if k_used[1]:
            numerator += sPJ1r(check0) + 1j * sPJ1i(check0)
            denominator += sPJ1r(check1) + 1j * sPJ1i(check1)

        if k_used[2]:
            numerator += sPJ0br(check0) + 1j * sPJ0bi(check0)
            denominator += sPJ0br(check1) + 1j * sPJ0bi(check1)

        doqwe = np.all((np.abs(numerator) / np.abs(denominator) < diff_quad),
                       1)

        # Pre-allocate output array
        fEM = np.zeros(off.size, dtype=complex)
        conv = True

        # Carry out SciPy's Quad if required
        if np.any(~doqwe):

            # Loop over offsets that require Quad
            for i in np.where(~doqwe)[0]:

                # Input-dictionary for quad
                iinp = {
                    'a': a[i],
                    'b': b[i],
                    'epsabs': atol,
                    'epsrel': rtol,
                    'limit': limit
                }

                fEM[i], tc = quad(sPJ0r, sPJ0i, sPJ1r, sPJ1i, sPJ0br, sPJ0bi,
                                  ab, off[i], factAng[i], iinp)

                # Update conv
                conv *= tc

            # Return kcount=1 in case no QWE is calculated
            kcount = 1

        if np.any(doqwe):
            # Get EM-field at required offsets
            if k_used[0]:
                sPJ0 = sPJ0r(np.log(lambd)) + 1j * sPJ0i(np.log(lambd))
            if k_used[1]:
                sPJ1 = sPJ1r(np.log(lambd)) + 1j * sPJ1i(np.log(lambd))
            if k_used[2]:
                sPJ0b = sPJ0br(np.log(lambd)) + 1j * sPJ0bi(np.log(lambd))

            # Carry out and return the Hankel transform for this interval
            sEM = np.zeros_like(numerator, dtype=complex)
            if k_used[1]:
                sEM += np.sum(
                    np.reshape(sPJ1 * BJ1, (off.size, nquad, -1), order='F'),
                    1)
                if ab in [11, 12, 21, 22, 14, 24, 15, 25]:  # Because of J2
                    # J2(kr) = 2/(kr)*J1(kr) - J0(kr)
                    sEM /= np.atleast_1d(off[:, np.newaxis])
            if k_used[2]:
                sEM += np.sum(
                    np.reshape(sPJ0b * BJ0, (off.size, nquad, -1), order='F'),
                    1)
            if k_used[1] or k_used[2]:
                sEM *= factAng[:, np.newaxis]
            if k_used[0]:
                sEM += np.sum(
                    np.reshape(sPJ0 * BJ0, (off.size, nquad, -1), order='F'),
                    1)

            getkernel = sEM[doqwe, :]

            # Get QWE
            fEM[doqwe], kcount, tc = qwe(rtol, atol, maxint, getkernel,
                                         intervals[doqwe, :], None, None, None)
            conv *= tc

    else:  # If not spline, we define the wavenumber-kernel here

        def getkernel(i, inplambd, inpoff, inpfang):
            r"""Return wavenumber-domain-kernel as a fct of interval i."""

            # Indices and factor for this interval
            iB = i * nquad + np.arange(nquad)

            # PJ0 and PJ1 for this interval
            PJ0, PJ1, PJ0b = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth,
                                               etaH[None, :], etaV[None, :],
                                               zetaH[None, :], zetaV[None, :],
                                               np.atleast_2d(inplambd)[:, iB],
                                               ab, xdirect, msrc, mrec,
                                               use_ne_eval)

            # Carry out and return the Hankel transform for this interval
            gEM = np.zeros_like(inpoff, dtype=complex)
            if k_used[1]:
                gEM += inpfang * np.dot(PJ1[0, :], BJ1[iB])
                if ab in [11, 12, 21, 22, 14, 24, 15, 25]:  # Because of J2
                    # J2(kr) = 2/(kr)*J1(kr) - J0(kr)
                    gEM /= np.atleast_1d(inpoff)
            if k_used[2]:
                gEM += inpfang * np.dot(PJ0b[0, :], BJ0[iB])
            if k_used[0]:
                gEM += np.dot(PJ0[0, :], BJ0[iB])

            return gEM

        # Get QWE
        fEM, kcount, conv = qwe(rtol, atol, maxint, getkernel, intervals,
                                lambd, off, factAng)

    return fEM, kcount, conv
Пример #28
0
def fqwe(fEM, time, freq, qweargs):
    r"""Fourier Transform using Quadrature-With-Extrapolation.

    It follows the QWE methodology [Key12]_ for the Hankel transform, see
    ``hqwe`` for more information.

    The function is called from one of the modelling routines in :mod:`model`.
    Consult these modelling routines for a description of the input and output
    parameters.

    This function is based on ``get_CSEM1D_TD_QWE.m`` from the source code
    distributed with [Key12]_.

    ``fqwe`` checks how steep the decay of the frequency-domain result is, and
    calls QUAD for the very steep interval, for which QWE is not suited.

    Returns
    -------
    tEM : array
        Returns time-domain EM response of ``fEM`` for given ``time``.

    conv : bool
        If true, QWE/QUAD converged. If not, <ftarg> might have to be adjusted.

    """
    # Get rtol, atol, nquad, maxint, diff_quad, a, b, and limit
    rtol, atol, nquad, maxint, _, diff_quad, a, b, limit, sincos = qweargs

    # Calculate quadrature intervals for all offset
    xint = np.concatenate(
        (np.array([1e-20]), np.arange(1, maxint + 1) * np.pi))
    if sincos == np.cos:  # Adjust zero-crossings if cosine-transform
        xint[1:] -= np.pi / 2
    intervals = xint / time[:, None]

    # Get Gauss Quadrature Weights
    g_x, g_w = special.p_roots(nquad)

    # Pre-compute the Bessel functions at fixed quadrature points, multiplied
    # by the corresponding Gauss quadrature weight.
    dx = np.repeat(np.diff(xint) / 2, nquad)
    Bx = dx * (np.tile(g_x, maxint) + 1) + np.repeat(xint[:-1], nquad)
    SS = sincos(Bx) * np.tile(g_w, maxint)

    # Interpolate in frequency domain
    tEM_rint = iuSpline(np.log(2 * np.pi * freq), fEM.real)
    tEM_iint = iuSpline(np.log(2 * np.pi * freq), -fEM.imag)

    # Check if we use QWE or SciPy's QUAD
    # If there are any steep decays within an interval we have to use QUAD, as
    # QWE is not designed for these intervals.
    check0 = np.log(intervals[:, :-1])
    check1 = np.log(intervals[:, 1:])
    doqwe = np.all(
        (np.abs(tEM_rint(check0) + 1j * tEM_iint(check0)) /
         np.abs(tEM_rint(check1) + 1j * tEM_iint(check1)) < diff_quad), 1)

    # Choose imaginary part if sine-transform, else real part
    if sincos == np.sin:
        tEM_int = tEM_iint
    else:
        tEM_int = tEM_rint

    # Set quadargs if not given:
    if not limit:
        limit = maxint
    if not a:
        a = intervals[:, 0]
    else:
        a = a * np.ones(time.shape)
    if not b:
        b = intervals[:, -1]
    else:
        b = b * np.ones(time.shape)

    # Pre-allocate output array
    tEM = np.zeros(time.size)
    conv = True

    # Carry out SciPy's Quad if required
    if np.any(~doqwe):

        def sEMquad(w, t):
            r"""Return scaled, interpolated value of tEM_int for ``w``."""
            return tEM_int(np.log(w)) * sincos(w * t)

        # Loop over times that require QUAD
        for i in np.where(~doqwe)[0]:
            out = integrate.quad(sEMquad, a[i], b[i], (time[i], ), 1, atol,
                                 rtol, limit)
            tEM[i] = out[0]

            # If there is a fourth output from QUAD, it means it did not conv.
            if len(out) > 3:
                conv *= False

    # Carry out QWE for 'well-behaved' intervals
    if np.any(doqwe):
        sEM = tEM_int(np.log(Bx / time[doqwe, None])) * SS
        tEM[doqwe], _, tc = qwe(rtol, atol, maxint, sEM, intervals[doqwe, :])
        conv *= tc

    return tEM, conv
Пример #29
0
# parameters
n = 100 # number of nodes in Legendre Quadrature
dE = 5 # energy bin size (MeV)
max_E = 155 # energy cutoff (MeV)

Es = np.arange(0, 155, dE) # energies
xs = Es / 20.0 # x parameter, energy / temperature (20 MeV)

def x(y, a, b):
    return 0.5 * (y + 1) * (b - a) + a

def f(y, a, b):
    x_ = x(y, a, b)
    return 0.5 * (b - a) * x_ * x_ / (np.exp(x_) + 1)

[ys, ws] = sp.p_roots(n, 0)

Qs = np.array([np.sum(ws * f(ys, xs[i], xs[i+1])) for i in range(len(xs) - 1)])

print("\nPart B\n")
print("Total number density: {}".format(number_density_coeff * np.sum(Qs)))

myfig = pl.figure(figsize=(10,8))
myfig.subplots_adjust(left=0.13)
myfig.subplots_adjust(bottom=0.14)
myfig.subplots_adjust(top=0.90)
myfig.subplots_adjust(right=0.95)
pl.bar(Es[:-1], number_density_coeff * Qs / 10**34, color='c', width=5)
pl.xlim(0, max_E - dE)
pl.xlabel("Energy bin [MeV]")
pl.ylabel(r"Number density [$\times 10^{34}$ cm$^{-3}$]")
Пример #30
0
def GaussLegendre(func, a, b, n):
    [legendre_roots, legendre_weights] = sp.p_roots(n, 0.)
    
    return np.sum(legendre_weights*(b-a)*func((b-a)*
    legendre_roots/2.+(a+b)/2.)/2.)
Пример #31
0
Файл: p2b.py Проект: xchma/ay190
import numpy as np
from scipy import special as sp

def g(x):
    return (x**2)/(1.0+np.exp(x/20.0))

energy=np.arange(0,155,5)
interval=len(energy)-1
integrate=np.zeros(interval)

for i in np.arange(interval):
    a=energy[i]; b=energy[i+1];
    roots,weights=sp.p_roots(10);
    x=a+0.5*(b-a)*(roots+1);
    integrate[i]=np.sum(weights*0.5*(b-a)*g(x))

print "i","E_i","n_i","n_i/dE"
for i in np.arange(interval):
    print i+1,energy[i],integrate[i],integrate[i]/5.
Пример #32
0
def check_spectrum(pyrat):
    """
  Check that user input arguments make sense.
  """
    # Shortcuts:
    log = pyrat.log
    phy = pyrat.phy
    spec = pyrat.spec
    atm = pyrat.atm
    obs = pyrat.obs

    # Check that input files exist:
    if pyrat.mol.molfile is None:
        pyrat.mol.molfile = pc.ROOT + 'pyratbay/data/molecules.dat'

    with pt.log_error(log):
        pt.file_exists('atmfile', 'Atmospheric', pyrat.atm.atmfile)
        pt.file_exists('tlifile', 'TLI', pyrat.lt.tlifile)
        pt.file_exists('molfile', 'Molecular-data', pyrat.mol.molfile)

    if pyrat.runmode == 'spectrum' and spec.specfile is None:
        log.error('Undefined output spectrum file (specfile).')

    # Compute the Hill radius for the planet:
    if (phy.mstar is not None and phy.mplanet is not None
            and phy.smaxis is not None):
        phy.rhill = phy.smaxis * (phy.mplanet / (3 * phy.mstar))**(1.0 / 3.0)

    # Check Voigt-profile arguments:
    if (pyrat.voigt.dmin is not None and pyrat.voigt.dmax is not None
            and pyrat.voigt.dmax <= pyrat.voigt.dmin):
        log.error('dmax ({:g} cm-1) must be > dmin ({:g} cm-1).'.format(
            pyrat.voigt.dmax, pyrat.voigt.dmin))

    if (pyrat.voigt.lmin is not None and pyrat.voigt.lmax is not None
            and pyrat.voigt.lmax <= pyrat.voigt.lmin):
        log.error('lmax ({:g} cm-1) must be > lmin ({:g} cm-1).'.format(
            pyrat.voigt.lmax, pyrat.voigt.lmin))

    if pyrat.runmode == 'opacity' or pt.isfile(pyrat.ex.extfile) == 0:
        if pyrat.ex.tmin is None:
            log.error('Undefined lower temperature boundary (tmin) for '
                      'extinction-coefficient grid.')
        if pyrat.ex.tmax is None:
            log.error('Undefined upper temperature boundary (tmax) for '
                      'extinction-coefficient grid.')
        if pyrat.ex.tstep is None:
            log.error('Undefined temperature sampling step (tstep) for '
                      'extinction-coefficient grid.')
        if pyrat.lt.tlifile is None:
            log.error('Requested extinction-coefficient table, but there '
                      'are no input TLI files.')

    if pyrat.runmode == 'mcmc':
        if pyrat.od.rt_path in pc.emission_rt:
            if pyrat.phy.rplanet is None or pyrat.phy.rstar is None:
                log.error("Undefined radius ratio (need rplanet and rstar).")
        if pyrat.obs.data is None:
            log.error("Undefined transit/eclipse data (data).")
        if pyrat.obs.uncert is None:
            log.error("Undefined data uncertainties (uncert).")
        if pyrat.obs.filters is None:
            log.error("Undefined transmission filters (filters).")
        if pyrat.ret.retflag == []:
            log.error(
                'Undefined retrieval model flags.  Select from {}.'.format(
                    pc.retflags))
        if pyrat.ret.sampler is None:
            log.error('Undefined retrieval algorithm (sampler).  Select from '
                      '[snooker].')
        if pyrat.ret.nsamples is None:
            log.error('Undefined number of retrieval samples (nsamples).')
        if pyrat.ret.burnin is None:
            log.error(
                'Undefined number of retrieval burn-in samples (burnin).')
        if pyrat.ret.nchains is None:
            log.error(
                'Undefined number of retrieval parallel chains (nchains).')
        if pyrat.ret.params is None:
            log.error('Undefined retrieval fitting parameters (params).')

    # Check cloud models:
    if pyrat.cloud.model_names is not None:
        pyrat.cloud.models = []
        npars = 0
        for name in pyrat.cloud.model_names:
            model = pa.clouds.get_model(name)
            npars += model.npars
            pyrat.cloud.models.append(model)
        # Parse the cloud parameters:
        if pyrat.cloud.pars is not None:
            if npars != len(pyrat.cloud.pars):
                log.error(
                    'Number of input cloud parameters ({:d}) does not '
                    'match the number of required model parameters ({:d}).'.
                    format(len(pyrat.cloud.pars), npars))
            j = 0
            for model in pyrat.cloud.models:
                npars = model.npars
                model.pars = pyrat.cloud.pars[j:j + npars]
                j += npars

    # Check Rayleigh models:
    if pyrat.rayleigh.model_names is not None:
        pyrat.rayleigh.models = []
        npars = 0
        for name in pyrat.rayleigh.model_names:
            model = pa.rayleigh.get_model(name)
            npars += model.npars
            pyrat.rayleigh.models.append(model)
        # Process the Rayleigh parameters:
        if npars == 0 and pyrat.rayleigh.pars is None:
            pyrat.rayleigh.pars = []
        if pyrat.rayleigh.pars is not None:
            if npars != len(pyrat.rayleigh.pars):
                log.error(
                    'Number of input Rayleigh parameters ({:d}) does not '
                    'match the number of required model parameters ({:d}).'.
                    format(len(pyrat.rayleigh.pars), npars))
            j = 0
            for model in pyrat.rayleigh.models:
                npars = model.npars
                model.pars = pyrat.rayleigh.pars[j:j + npars]
                j += npars

    # Check alkali arguments:
    if pyrat.alkali.model_names is not None:
        pyrat.alkali.models = [
            pa.alkali.get_model(name, pyrat.alkali.cutoff)
            for name in pyrat.alkali.model_names
        ]

    # Accept ray-path argument:
    print(pyrat.od)
    if pyrat.runmode in ['spectrum', 'mcmc'] and pyrat.od.rt_path is None:
        log.error("Undefined radiative-transfer observing geometry (rt_path)."
                  f"  Select from {pc.rt_paths}.")

    if 'temp' in pyrat.ret.retflag and atm.tmodelname is None:
        log.error('Requested temp in retflag, but there is no tmodel.')
    if 'mol' in pyrat.ret.retflag:
        if atm.molmodel is None:
            log.error("Requested mol in retflag, but there is no 'molmodel'.")
        if atm.bulk is None:
            log.error(
                'Requested mol in retflag, but there are no bulk species.')
    if 'ray' in pyrat.ret.retflag and pyrat.rayleigh.models == []:
        log.error(
            'Requested ray in retflag, but there are no rayleigh models.')
    if 'cloud' in pyrat.ret.retflag and pyrat.cloud.models == []:
        log.error('Requested cloud in retflag, but there are no cloud models.')

    # Check system arguments:
    if pyrat.od.rt_path in pc.transmission_rt and phy.rstar is None:
        log.error(
            'Undefined stellar radius (rstar), required for transmission '
            'calculation.')

    # Check raygrid:
    if spec.raygrid[0] != 0:
        log.error('First angle in raygrid must be 0.0 (normal to surface).')
    if np.any(spec.raygrid < 0) or np.any(spec.raygrid > 90):
        log.error('raygrid angles must lie between 0 and 90 deg.')
    if np.any(np.ediff1d(spec.raygrid) <= 0):
        log.error('raygrid angles must be monotonically increasing.')
    # Store raygrid values in radians:
    spec.raygrid *= sc.degree

    # Gauss quadrature integration variables:
    if spec.quadrature is not None:
        qnodes, qweights = ss.p_roots(spec.quadrature)
        spec.qnodes = 0.5 * (qnodes + 1.0)
        spec.qweights = 0.5 * qweights

    # Number of datapoints and filters:
    if obs.data is not None:
        obs.ndata = len(obs.data)
    if obs.filters is not None:
        obs.nfilters = len(obs.filters)
    # Number checks:
    if pyrat.obs.uncert is not None and pyrat.obs.ndata != len(
            pyrat.obs.uncert):
        log.error('Number of data uncertainty values ({:d}) does not match '
                  'the number of data points ({:d}).'.format(
                      len(pyrat.obs.uncert), pyrat.obs.ndata))
    if (obs.filters is not None and obs.ndata > 0
            and obs.ndata != obs.nfilters):
        log.error('Number of filter bands ({:d}) does not match the '
                  'number of data points ({:d}).'.format(
                      obs.nfilters, obs.ndata))

    if pyrat.ncpu >= mp.cpu_count():
        log.warning('Number of requested CPUs ({:d}) is >= than the number '
                    'of available CPUs ({:d}).  Enforced ncpu to {:d}.'.format(
                        pyrat.ncpu, mp.cpu_count(),
                        mp.cpu_count() - 1))
        pyrat.ncpu = mp.cpu_count() - 1
    log.head('Check spectrum done.')
Пример #33
0
MeV_to_ergs = 1.602177e-6 # factor to convert units of energy from MeV to ergs
T = 20*MeV_to_ergs/kB     # temperature (kT = 20MeV)
integrand = lambda x: x**2/(exp(x)+1)

# (a) Use Gauss-Laguerre Quadrature to determine the total
#     number density of electrons
[laguerre_roots,laguerre_weights] = special.l_roots(20)
weight   = lambda x: exp(-x)
integral = sum(laguerre_weights*integrand(laguerre_roots)/weight(laguerre_roots))

n_electrons = 8*pi*(kB*T)**3/(2*pi*hbar*c)**3 * integral
print 'The number density of electrons is %.4e/cm^3' % n_electrons

# (b) Use Gauss-Legendre Quadrature to determine the spectral
#     distribution of the electrons
[legendre_roots,legendre_weights] = special.p_roots(20)

dE = 5; Emax = 200
E_bins = linspace(0,Emax,Emax/dE+1) # MeV
x_bins = E_bins*MeV_to_ergs/(kB*T)
n_bins = zeros(len(E_bins)-1) # number density of electrons in each energy bin

for i in range(len(E_bins)-1):
    a = x_bins[i]; b = x_bins[i+1]
    transformed_integrand = lambda x: (b-a)/2*integrand((b-a)/2*x+(a+b)/2)
    integral = sum(legendre_weights*transformed_integrand(legendre_roots))
    n_bins[i] = 8*pi*(kB*T)**3/(2*pi*hbar*c)**3 * integral / (dE*MeV_to_ergs)
print 'The integral of the spectral distribution is %.4e/cm^3' % sum(n_bins*dE*MeV_to_ergs)

figure()
plot((E_bins[1:]+E_bins[:-1])/2,n_bins,'k-')
Пример #34
0
        weights[i] * np.sqrt(1 - (points[i])**2) * f((b - a) * points[i] / 2 +
                                                     (b + a) / 2)
        for i in range(len(points))
    ]) / 2


def Cheby2(m, a, b):
    points = u_roots(m)[0]
    weights = u_roots(m)[1]
    return (b - a) * sum([(weights[i] / np.sqrt(1 - (points[i])**2)) *
                          f((b - a) * points[i] / 2 + (b + a) / 2)
                          for i in range(len(points))]) / 2


a = [5, 10]

for m in a:
    # approximation results
    print(GL(m))
    print(Cheby1(m))
    print(Cheby2(m))

    # nodes
    print(p_roots(m)[0])
    print(t_roots(m)[0])
    print(u_roots(m)[0])

    # weights
    print(p_roots(m)[1])
    print(t_roots(m)[1])
    print(u_roots(m)[1])
Пример #35
0
def check_legendre_roots(n):
    xs, ws = ([], []) if n == 0 else p_roots(n) # from SciPy
    xl, wl = libsharp.legendre_roots(n)
    assert_allclose(xs, xl, rtol=1e-14, atol=1e-14)
    assert_allclose(ws, wl, rtol=1e-14, atol=1e-14)