예제 #1
0
	def LandaoLevelSpinor_GaugeX(self, B , n ,  Py ):
		def energy(n):
			return np.sqrt( (self.mass*self.c**2)**2 + 2*B*self.c*self.hBar*n  )

		K = B*(self.X - self.c*Py/B)**2/( 2.*self.c*self.hBar )
		
		psi1 = np.exp(-K)*(  self.mass*self.c**2 + energy(n) )* legendre(n)( K/np.sqrt(B*self.c*self.hBar) ) 

		psi3 = np.exp(-K)*(  self.mass*self.c**2 + energy(n) )* legendre(n)( K/np.sqrt(B*self.c*self.hBar) ) 

		if n>0:
			psi2 = np.exp(-K)*(  self.mass*self.c**2 + energy(n) )* legendre(n-1)( K/np.sqrt(B*self.c*self.hBar) ) 	
			psi2 = 2*1j*n*np.sqrt(B*self.c*self.hBar)

			psi4 = -psi2

		else: 
			psi2 = 0.*K
			psi4 = 0.*K	


		spinor = np.array([psi1 , psi2 , psi3 , psi2  ])

		norm = self.Norm(spinor)
       		spinor /= norm 

		return spinor
예제 #2
0
def LGL_points(N):
    '''
    Calculates : math: `N` Legendre-Gauss-Lobatto (LGL) points.
    LGL points are the roots of the polynomial

    :math: `(1 - \\xi ** 2) P_{n - 1}'(\\xi) = 0`

    Where :math: `P_{n}(\\xi)` are the Legendre polynomials.
    This function finds the roots of the above polynomial.

    Parameters
    ----------

    N : int
        Number of LGL nodes required

    Returns
    -------

    lgl : arrayfire.Array [N 1 1 1]
          The Lagrange-Gauss-Lobatto Nodes.

    **See:** `document`_
    .. _document: https://goo.gl/KdG2Sv

    '''
    xi = np.poly1d([1, 0])
    legendre_N_minus_1 = N * (xi * sp.legendre(N - 1) - sp.legendre(N))
    lgl_points = legendre_N_minus_1.r
    lgl_points.sort()
    lgl_points = af.np_to_af_array(lgl_points)

    return lgl_points
    def __init__(self, intervals, order):
        """
        :arg intervals: determines the boundaries of the subintervals to
            be used. If this is ``[a,b,c]``, then there are two subintervals
            :math:`(a,b)` and :math:`(b,c)`.
            (and the overall domain is :math:`(a,c)`)

        :arg order: highest polynomial degree being used
        """

        self.intervals = intervals
        self.nintervals = len(intervals) - 1
        self.npoints = order + 1
        self.mid_pts = (self.intervals[1:] + self.intervals[:-1]) / 2
        self.scales = (self.intervals[1:] - self.intervals[:-1]) / 2
        self.sample_nodes = sp.legendre(self.npoints).weights[:, 0].real
        self.sample_weights = sp.legendre(self.npoints).weights[:, 1].real
        # taking real part because sometimes sp.legendre is returning
        # complex numbers with zero imaginary part and displaying a warning

        nodes = (self.mid_pts + np.outer(self.sample_nodes, self.scales)).T
        self.nodes = np.reshape(nodes, -1)
        weights = np.outer(self.scales, self.sample_weights)
        self.weights = np.reshape(weights, -1)

        monos = np.array([self.sample_nodes**k for k in range(self.npoints)])
        integrals = np.array([
            (self.sample_nodes**(k + 1) - (-1)**(k + 1)) / (k + 1)
            for k in range(self.npoints)
        ])
        self.spec_int_mat = la.solve(monos, integrals)
예제 #4
0
def compute_recon_power_spectrum(fishcast,z,b=-1.,b2=-1.,bs=-1.,N=None):
   '''
   Returns the reconstructed power spectrum, following Stephen's paper.
   '''
   if b == -1.: b = compute_b(fishcast,z)
   if b2 == -1: b2 = 8*(b-1)/21
   if bs == -1: bs = -2*(b-1)/7
   noise = 1/compute_n(fishcast,z)
   if fishcast.experiment.HI: noise = castorinaPn(z)
   if N is None: N = 1/compute_n(fishcast,z)
   f = fishcast.cosmo.scale_independent_growth_factor_f(z) 
    
   bL1 = b-1.
   bL2 = b2-8*(b-1)/21
   bLs = bs+2*(b-1)/7
    
   K,MU = fishcast.k,fishcast.mu
   h = fishcast.params['h']
   klin = np.logspace(np.log10(min(K)),np.log10(max(K)),fishcast.Nk)
   mulin = MU.reshape((fishcast.Nk,fishcast.Nmu))[0,:]
   plin = np.array([fishcast.cosmo.pk_cb_lin(k*h,z)*h**3. for k in klin])
    
   zelda = Zeldovich_Recon(klin,plin,R=15,N=2000,jn=5)

   kSparse,p0ktable,p2ktable,p4ktable = zelda.make_pltable(f,ngauss=3,kmin=min(K),kmax=max(K),nk=200,method='RecSym')
   bias_factors = np.array([1, bL1, bL1**2, bL2, bL1*bL2, bL2**2, bLs, bL1*bLs, bL2*bLs, bLs**2,0,0,0])
   p0Sparse = np.sum(p0ktable*bias_factors, axis=1)
   p2Sparse = np.sum(p2ktable*bias_factors, axis=1)
   p4Sparse = np.sum(p4ktable*bias_factors, axis=1)
   p0,p2,p4 = Spline(kSparse,p0Sparse)(klin),Spline(kSparse,p2Sparse)(klin),Spline(kSparse,p4Sparse)(klin)
   l0,l2,l4 = legendre(0),legendre(2),legendre(4)
   Pk = lambda mu: p0*l0(mu) + p2*l2(mu) + p4*l4(mu)
   result = np.array([Pk(mu) for mu in mulin]).T
   return result.flatten() + N
예제 #5
0
    def to_poles(self, k, ells, Nmu=41, flatten=False):
        """
        Compute the multipoles by integrating over the extrapolated
        power spectrum
        """
        from scipy.special import legendre
        from scipy.integrate import simps

        scalar = np.isscalar(ells)
        if scalar: ells = [ells]

        mus = np.linspace(0., 1., Nmu)
        pkmu = self(k, mus)

        if len(ells) != len(k):
            toret = []
            for ell in ells:
                kern = (2 * ell + 1.) * legendre(ell)(mus)
                val = np.array([simps(kern * d, x=mus) for d in pkmu])
                toret.append(val)

            if scalar:
                return toret[0]
            else:
                toret = np.vstack(toret).T
                return toret if not flatten else np.ravel(toret, order='F')
        else:
            kern = np.asarray([(2 * ell + 1.) * legendre(ell)(mus)
                               for ell in ells])
            return np.array([simps(d, x=mus) for d in kern * pkmu])
예제 #6
0
    def set_grid(self, k, mu, ells, path_mu=None):

        assert ells[0] == 0  # for modellin

        for key in ['k', 'mu']:
            setattr(self, key,
                    scipy.array(self.params[key], dtype=self.TYPE_FLOAT))
        self.kk, self.mumu = scipy.meshgrid(self.k,
                                            self.mu,
                                            sparse=False,
                                            indexing='ij')
        murange = self.mu[-1] - self.mu[0]
        self.logger.info('Setting grid {:d} (k) x {:d} (mu).'.format(
            self.kk.shape[0], self.mumu.shape[-1]))

        self.kernel = scipy.asarray(
            [(2. * ell + 1.) * special.legendre(ell)(self.mumu)
             for ell in self.ells]) / murange

        if path_mu is not None:
            self.logger.info('Loading provided mu window function.')
            window = MuFunction.load(path_mu)
            self.logger.info('With resolution {:d} (k) x {:d} (mu).'.format(
                len(window.k), len(window.mu)))
            window = window(self.k, self.mu)
            norm = integrate.trapz(window, x=self.mu, axis=-1) / murange
            self.logger.info(
                'Renormalizing window(mu) by {:.4f} - {:.4f}.'.format(
                    norm.min(), norm.max()))
            window /= norm[:, None]
            self.kernelcorrmu = scipy.asarray(
                [(2. * ell + 1.) * window * special.legendre(ell)(self.mumu)
                 for ell in self.ells]) / murange
예제 #7
0
 def covmat_N_l1l2(self, l1, l2):
     '''
     l1l2 term of the covariance matrix from N
     '''
     if self.smooth:
         window = 1.
     else:
         window = self.Wk
     if l1 == 0 and l2 == 0:
         return self.covmat_N_00
     elif l1 == 0 and l2 == 2:
         return self.covmat_N_02
     elif l1 == 0 and l2 == 4:
         return self.covmat_N_04
     elif l1 == 2 and l2 == 2:
         return self.covmat_N_22
     elif l1 == 2 and l2 == 4:
         return self.covmat_N_24
     elif l1 == 4 and l2 == 4:
         return self.covmat_N_44
     else:
         Ll1 = legendre(l1)(self.mui_grid)
         Ll2 = legendre(l2)(self.mui_grid)
         return (2. * l1 + 1.) * (2. * l2 + 1.) * np.trapz(
             self.Pnoise**2. * l1l2 / (self.Nmodes * window**2),
             self.mu,
             axis=0)
예제 #8
0
        def harmonics(self):
            r"""
            Radial distributions of spherical harmonics
            (Legendre polynomials :math:`P_n(\cos \theta)`).

            Spherical harmonics are orthogonal with respect to integration over
            the full sphere:

            .. math::
                \iint P_n P_m \,d\Omega =
                \int_0^{2\pi} \int_0^\pi P_n(\cos \theta) P_m(\cos \theta)
                    \,\sin\theta d\theta \,d\varphi = 0

            for *n* ≠ *m*; and :math:`P_0(\cos \theta)` is the spherically
            averaged intensity.

            Returns
            -------
            Pn : (# terms) × (rmax + 1) numpy array
                radial dependences of the :math:`P_n(\cos \theta)` terms
            """
            terms = self.cn.shape[0]
            # conversion matrix (cos^k → P_n)
            CH = np.zeros((terms, terms))
            for i in range(terms):
                if self.odd:
                    c = legendre(i).c[::-1]
                else:
                    c = legendre(2 * i).c[::-2]
                CH[:len(c), i] = c
            CH = inv(CH)
            # apply to all radii
            harm = CH.dot(self.cn)
            return harm
예제 #9
0
파일: ifunc.py 프로젝트: taldcroft/example
def load_displ_legendre(n_ax, n_az, ord_ax=2, ord_az=0):
    x = np.linspace(-1, 1, n_az).reshape(1, n_az)
    y = np.linspace(-1, 1, n_ax).reshape(n_ax, 1)
    displ_x = (1 - legendre(ord_ax)(y)) * (1 - legendre(ord_az)(x))  # um
    displ_ry = np.gradient(displ_x, 0.5 * 1000)[0]  # radians

    return displ_x, displ_ry
예제 #10
0
def aperture_vibrating_spherical_cap(
        n_max,
        rad_sphere,
        rad_cap):
    r"""
    Aperture function for a vibrating cap with radius :math:`r_c` in a rigid
    sphere with radius :math:`r_s` [5]_, [6]_

    .. math::

        a_n (r_{s}, \alpha) =
        \begin{cases}
            \displaystyle \cos\left(\alpha\right) P_n\left[ \cos\left(\alpha\right) \right] - P_{n-1}\left[ \cos\left(\alpha\right) \right],  & {n>0} \newline
            \displaystyle  1 - \cos(\alpha),  & {n=0}
        \end{cases}

    where :math:`\alpha = \arcsin \left(\frac{r_c}{r_s} \right)` is the
    aperture angle.


    References
    ----------
    .. [5]  E. G. Williams, Fourier Acoustics. Academic Press, 1999.
    .. [6]  F. Zotter, A. Sontacchi, and R. Höldrich, “Modeling a spherical
            loudspeaker system as multipole source,” in Proceedings of the 33rd
            DAGA German Annual Conference on Acoustics, 2007, pp. 221–222.


    Parameters
    ----------
    n_max : integer, ndarray
        Maximal spherical harmonic order
    r_sphere : double, ndarray
        Radius of the sphere
    r_cap : double
        Radius of the vibrating cap

    Returns
    -------
    A : double, ndarray
        Aperture function in diagonal matrix form with shape
        :math:`[(n_{max}+1)^2~\times~(n_{max}+1)^2]`

    """
    angle_cap = np.arcsin(rad_cap / rad_sphere)
    arg = np.cos(angle_cap)
    n_sh = (n_max+1)**2

    aperture = np.zeros((n_sh, n_sh), dtype=np.double)

    aperture[0, 0] = (1-arg)*2*np.pi**2
    for n in range(1, n_max+1):
        legendre_minus = special.legendre(n-1)(arg)
        legendre_plus = special.legendre(n+1)(arg)
        for m in range(-n, n+1):
            acn = nm2acn(n, m)
            aperture[acn, acn] = (legendre_minus - legendre_plus) * \
                    4 * np.pi**2 / (2*n+1)

    return aperture
예제 #11
0
def covll_reduced(kvals,mu,Pkmu,ng,mlps=[0,2,4]):
	""" Gets Cov' in equation (91) from https://wwwmpa.mpa-garching.mpg.de/~komatsu/lecturenotes/Shun_Saito_on_RSD.pdf. """

	# Count things
	nk = len(kvals)
	nl = len(mlps)

	# Get the k-value separation - expecting to be linearly spaced, i.e. equally spaced!
	dk = float_unique(np.diff(kvals),-2) # EXTREMELY LOW TOLERANCE SET!!!
	if len(dk)>1:
		print(dk)
		raise Exception('Can only calculate the covariance matrix for linearly spaced k-values!')

	# Multiply final result by 2 if only have positive mu
	pref = 1.
	if not np.any(mu<0.): pref/=2.

	# Loop over both multipoles
	covmat = np.zeros((nk*nl,nk*nl))
	for i1,l1 in enumerate(mlps):
		for i2,l2 in enumerate(mlps):
			prefactor = pref * (2.*l1+1.)*(2.*l2+1.) / 2.
			
			# Loop over k-modes
			for i,k in enumerate(kvals):
				integrand = legendre(l1)(mu) * legendre(l2)(mu) * (Pkmu[i,:]+1./ng)**2
				covmat[i1*nk+i,i2*nk+i] = prefactor * np.trapz(integrand,x=mu)

	return covmat
def roots_legendre(n):
    """
    Computes the sample points and weights for Gauss-Legendre quadrature.
    The sample points are the roots of the n-th degree Legendre polynomial
    `P_n(x)`.  These sample points and weights correctly integrate
    polynomials of degree `2n - 1` or less over the interval
    `[-1, 1]` with weight function `f(x) = 1.0`.

    Parameters
    ----------
    n : int
        quadrature order

    Returns
    -------
    x : ndarray
        Sample points
    w : ndarray
        Weights
    """
    p = legendre(n + 1)
    x = legendre(n).roots
    w = 2 * (1 - x**2) / ((n + 1)**2 * p(x)**2)
    temp = list(zip(x, w))
    temp.sort()
    x, w = array([i for i, _ in temp]), array([i for _, i in temp])
    return x, w
예제 #13
0
 def covmat_l1l2(self, l1, l2):
     '''
     l1l2 term of the total covariance matrix
     '''
     if self.smooth:
         window = 1.
     else:
         window = self.Wk
     if l1 == 0 and l2 == 0:
         return self.covmat_00
     elif l1 == 0 and l2 == 2:
         return self.covmat_02
     elif l1 == 0 and l2 == 4:
         return self.covmat_04
     elif l1 == 2 and l2 == 2:
         return self.covmat_22
     elif l1 == 2 and l2 == 4:
         return self.covmat_24
     elif l1 == 4 and l2 == 4:
         return self.covmat_44
     else:
         l1 = legendre(l1)(self.mui_grid)
         l2 = legendre(l2)(self.mui_grid)
         integrand = (self.Pk + self.Pnoise / window) / self.Nmodes**0.5
         return (2. * l1 + 1.) * (2. * l2 + 1.) * np.trapz(
             integrand**2 * l1 * l2, self.mu, axis=0)
예제 #14
0
파일: pktoxi.py 프로젝트: andreicuceu/vega
    def __init__(self, k_grid, muk_grid, ell_max=6, old_fftlog=False):
        """Initialize the FFTLog and the Legendre polynomials

        Parameters
        ----------
        k : 1D Array
            Wavenumber grid of power spectrum
        muk : ND Array
            k_parallel / k grid for input power spectrum
        ell_max : int, optional
            Maximum multipole to sum over, by default 6
        """
        self.k_grid = k_grid
        self.muk_grid = muk_grid
        self.dmuk = 1 / len(muk_grid)
        self.ell_max = ell_max
        self._old_fftlog = old_fftlog

        # Initialize the multipole values we will need (only even ells)
        self.ell_vals = np.arange(0, ell_max + 1, 2)

        # Initialize FFTLog objects and Legendre polynomials for each multipole
        self.fftlog_objects = {}
        self.legendre_pk = {}
        self.legendre_xi = {}
        for ell in self.ell_vals:
            if not self._old_fftlog:
                self.fftlog_objects[ell] = P2xi(k_grid, l=ell, lowring=True)
            # Precompute the Legendre polynomials used to decompose Pk into Pk_ell
            self.legendre_pk[ell] = special.legendre(ell)(self.muk_grid)
            # We don't know the mu grid for Xi in advance, so just initialize
            self.legendre_xi[ell] = special.legendre(ell)
예제 #15
0
	def LandaoLevelSpinor_GaugeX(self, B , n ,  Py ):
		def energy(n):
			return np.sqrt( (self.mass*self.c**2)**2 + 2*B*self.c*self.hBar*n  )

		K = B*(self.X - self.c*Py/B)**2/( 2.*self.c*self.hBar )
		
		psi1 = np.exp(-K)*(  self.mass*self.c**2 + energy(n) )* legendre(n)( K/np.sqrt(B*self.c*self.hBar) ) 

		psi3 = np.exp(-K)*(  self.mass*self.c**2 + energy(n) )* legendre(n)( K/np.sqrt(B*self.c*self.hBar) ) 

		if n>0:
			psi2 = np.exp(-K)*(  self.mass*self.c**2 + energy(n) )* legendre(n-1)( K/np.sqrt(B*self.c*self.hBar) ) 	
			psi2 = 2*1j*n*np.sqrt(B*self.c*self.hBar)

			psi4 = -psi2

		else: 
			psi2 = 0.*K
			psi4 = 0.*K	


		spinor = np.array([psi1 , psi2 , psi3 , psi2  ])

		norm = self.Norm(spinor)
       		spinor /= norm 

		return spinor
예제 #16
0
    def lgf_test_plot(self, pl="lgf_test", **kwargs):
        """test plot of legendre functions to legendre polynomials using Legendre class"""
        nu_max = 30
        v_arr = linspace(-1.0, nu_max, 1000)
        print "start plot"
        pl = line(v_arr,
                  self.Pv(v_arr, 0.0),
                  pl=pl,
                  color="blue",
                  linewidth=0.5,
                  label=r"$P_{\nu}(0)$")
        line(v_arr,
             self.Pv(v_arr, 0.25),
             pl=pl,
             color="red",
             linewidth=0.5,
             label=r"$P_{\nu}(0.25)$")
        line(v_arr,
             self.Pv(v_arr, 0.5),
             pl=pl,
             color="green",
             linewidth=0.5,
             label=r"$P_{\nu}(0.5)$")
        line(v_arr,
             self.Pv(v_arr, 0.75),
             pl=pl,
             color="purple",
             linewidth=0.5,
             label=r"$P_{\nu}(0.75)$")
        print "stop plot"
        if 1:
            for nu in range(nu_max):
                scatter(array([nu]),
                        array([legendre(nu)(0.0)]),
                        pl=pl,
                        color="blue",
                        marker_size=3.0)
                scatter(array([nu]),
                        array([legendre(nu)(0.25)]),
                        pl=pl,
                        color="red",
                        marker_size=3.0)
                scatter(array([nu]),
                        array([legendre(nu)(0.5)]),
                        pl=pl,
                        color="green",
                        marker_size=3.0)
                scatter(array([nu]),
                        array([legendre(nu)(0.75)]),
                        pl=pl,
                        color="purple",
                        marker_size=3.0)

            pl.xlabel = r"$\nu$"
            pl.ylabel = r"$P_{\nu}(x)$"
            pl.legend()
            pl.set_ylim(-0.75, 1.5)
        return pl
예제 #17
0
def load_displ_legendre(ifuncs, ord_ax=2, ord_az=0, rms=None):
    n_ax, n_az = ifuncs.shape[2:4]
    x = np.linspace(-1, 1, n_az).reshape(1, n_az)
    y = np.linspace(-1, 1, n_ax).reshape(n_ax, 1)
    displ = (1 - legendre(ord_ax)(y)) * (1 - legendre(ord_az)(x))
    if rms:
        displ = displ / np.std(displ) * rms

    return displ
예제 #18
0
파일: Lambda.py 프로젝트: tt-nakamura/atom
def ComputeLambda(i, j, k):
    """ (1/4)\int_{-1}^1 P_i(x) P_j(x) P_k(x) dx
    where P_i is Legendre polynomical of degree i
    """
    n = i + j + k
    if n & 1 or i + j < k or j + k < i or k + i < j: return 0

    y = legendre(i) * legendre(j) * legendre(k)
    return fixed_quad(y, 0, 1, n=(n >> 1) + 1)[0] / 2
예제 #19
0
 def covmat_CV_24(self):
     '''
     24 term of the covariance matrix from CV
     (equal to the 42)
     '''
     L2 = legendre(2)(self.mui_grid)
     L4 = legendre(4)(self.mui_grid)
     return 45. / 2. * np.trapz(
         self.Pk**2 * L2 * L4 / self.Nmodes, self.mu, axis=0)
예제 #20
0
def load_displ_legendre(ifuncs, ord_ax=2, ord_az=0, rms=None):
    n_ax, n_az = ifuncs.shape[2:4]
    x = np.linspace(-1, 1, n_az).reshape(1, n_az)
    y = np.linspace(-1, 1, n_ax).reshape(n_ax, 1)
    displ = (1 - legendre(ord_ax)(y)) * (1 - legendre(ord_az)(x))
    if rms:
        displ = displ / np.std(displ) * rms

    return displ
예제 #21
0
    def legendre_basis(self):

        leg1 = legendre(0)
        phi_leg = leg1(self.x)

        for i in range(1, (self.M) + 1):
            leg1 = legendre(i)
            phi_leg = np.concatenate((phi_leg, leg1(self.x)), axis=1)

        return phi_leg
예제 #22
0
    def __init__(self, intervals, order):
        """
        :arg intervals: determines the boundaries of the subintervals to
            be used. If this is ``[a,b,c]``, then there are two subintervals
            :math:`(a,b)` and :math:`(b,c)`.
            (and the overall domain is :math:`(a,c)`)

        :arg order: highest polynomial degree being used
        """
        self.intervals=intervals

        self.nintervals=len(intervals)-1

        self.npoints=order + 1

        #Initializing shifted_nodes
        shifted_nodes=np.zeros(self.npoints*self.nintervals) 

        #Calling the scipy function to obtain the unshifted nodes
        unshifted_nodes=sp.legendre(self.npoints).weights[:,0]

        #Initializing shifted weights
        shifted_weights=np.zeros(self.nintervals*self.npoints)

        #Calling the scipy function to obtain the unshifted weights
        unshifted_weights=sp.legendre(self.npoints).weights[:,1]

        #Linearly mapping the unshifted nodes and weights to get the shifted
        #nodes and weights
        for i in range(self.nintervals):
                shifted_nodes[i*self.npoints:(i+1)*self.npoints]=(self.intervals[i]+ self.intervals[i+1])/2 + (self.intervals[i+1]-self.intervals[i])*(unshifted_nodes[0:self.npoints])/2
                shifted_weights[i*self.npoints:(i+1)*self.npoints]=(self.intervals[i+1]-self.intervals[i])*(unshifted_weights[0:self.npoints])/2
       
        #Setting nodes and weights attributes
        self.nodes=np.reshape(shifted_nodes,(self.nintervals,self.npoints))
        self.weights=np.reshape(shifted_weights,(self.nintervals,self.npoints))
    
        #Obtaining Vandermonde and RHS matrices to get A
        def vandermonde_rhs(m,arr):
            X=np.zeros((m,m))
            RHS=np.zeros((m,m))
            for i in range(m):
                for j in range(m):
                    X[i][j]=arr[i]**j
                    RHS[i][j]=((arr[i]**(j+1))-((-1)**(j+1)))/(j+1)
            return X,RHS
        
        A=np.zeros((self.npoints,self.npoints))
        X,RHS=vandermonde_rhs(self.npoints,unshifted_nodes)

        #Solving for spectral integration matrix
        A=np.dot(RHS,la.inv(X))
        
        self.A=A
예제 #23
0
 def getRadauRightPolyDeriv(self):
     from scipy.special import legendre
     from numpy.polynomial.polynomial import polyval
     from numpy import insert
     ###########################################################
     # Construct Right Radau Polynomial
     Temp1 = legendre(self.Order).deriv(1).coeffs
     Temp2 = legendre(self.Order+1).deriv(1).coeffs
     RadauRPolyDeriv_Vec = (-1)**self.Order / 2.0 * (insert(Temp1, 0, 0) - Temp2)
     RadauRPolyDerivValue_Vec = polyval(self.Nodes, RadauRPolyDeriv_Vec[::-1])
     return RadauRPolyDerivValue_Vec
예제 #24
0
 def covmat_24(self):
     '''
     24 term of the total covariance matrix
     '''
     if self.smooth:
         window = 1.
     else:
         window = self.Wk
     L2 = legendre(2)(self.mui_grid)
     L4 = legendre(4)(self.mui_grid)
     integrand = (self.Pk + self.Pnoise / window) / self.Nmodes**0.5
     return 45. / 2. * np.trapz(integrand**2 * L2 * L4, self.mu, axis=0)
예제 #25
0
def gauss_quad(p):
    # Chebychev pts as inital guess
    x_0 = np.cos(np.arange(1, p + 1) / (p + 1) * np.pi)
    nodal_pts = np.empty(p)
    for i, ch_pt in enumerate(x_0):
        leg = legendre(p)
        leg_p = partial(_legendre_prime, n=p)
        nodal_pts[i] = _newton_method(leg, leg_p, ch_pt, 100)

    weights = 2 / (p * legendre(p - 1)(nodal_pts) *
                   _legendre_prime(nodal_pts, p))
    return nodal_pts[::-1], weights
예제 #26
0
def f_l_lp_integrand(mu, x, y, l, lp): 
    ''' Integrand of f_l_lp integration 
    '''
    Leg_l = legendre(l)     # Legendre polynomial of order l 
    Leg_lp = legendre(lp)   # Legendre polynomial of order l'
    
    if np.abs(mu) > np.abs(x/y): 
        raise ValueError

    theta = np.sqrt(x**2 - y**2 * mu**2)

    return Leg_l(mu) * Leg_lp(y * mu/x) * (W_2d(theta) + y**2 * (1. - mu**2) * W_secorder(theta))
예제 #27
0
def test_modify_paget():
    qp = map_to(paget(10, 2), [0,1])
    qg = gaussxw(10)
    import ipdb
    ipdb.set_trace()
    from scipy.special import legendre

    lhs = [legendre(i)(qg[0]) for i in range(10)]
    rhs = [sum(legendre(i)(qg[0]) * qg[1]) for i in range(10)]
    wts = np.linalg.solve(lhs, rhs)
    import ipdb
    ipdb.set_trace()
예제 #28
0
def mod_legendre(ell, xx):
    if (ell == 0):
        return special.legendre(ell)(xx)  #xx/xx
    elif (ell == 1):
        return special.legendre(ell)(xx)
    elif (ell == 2):
        return (2 * 2 + 1) * special.legendre(ell)(xx)
    elif (ell == -2):
        return (1 - xx)**2
    elif (ell == 4):
        return (2 * 4 + 1) * special.legendre(ell)(xx)
    else:
        raise Exception('Polynomial not yet defined')
    def __init__(self, degX, xMin, xMax, degY, yMin, yMax):
        self.degX = degX
        self.xMin = xMin
        self.xMax = xMax
        self.xRange = xMax - xMin

        self.degY = degY
        self.yMin = yMin
        self.yMax = yMax
        self.yRange = yMax - yMin

        self.Xp = sps.legendre(self.degX)
        self.Yp = sps.legendre(self.degY)
예제 #30
0
    def internal_flux(self):
        # Compute internal flux array
        up = np.zeros((self.order, self.order))
        for i in range(self.order):
            for j in range(self.order):
                up[i, j] = self.weights[j] * sum(
                    (2 * s + 1) / 2 * sp.legendre(s)(self.nodes[i]) *
                    sp.legendre(s).deriv()(self.nodes[j])
                    for s in range(self.order))

        # Clear machine errors
        up[np.abs(up) < 1.0e-10] = 0

        return cp.asarray(up)
예제 #31
0
def load_displ_legendre(n_ax,
                        n_az,
                        ord_ax=2,
                        ord_az=0,
                        offset_ax=1,
                        offset_az=1,
                        norm=1.0):
    x = np.linspace(-1, 1, n_az).reshape(1, n_az)
    y = np.linspace(-1, 1, n_ax).reshape(n_ax, 1)
    displ_x = norm * (offset_ax - legendre(ord_ax)(y)) * (
        offset_az - legendre(ord_az)(x))  # um
    displ_ry = np.gradient(displ_x, 0.5 * 1000)[0]  # radians

    return displ_x, displ_ry
예제 #32
0
    def add_poly(self, order=0, include_lower=True):
        """Add nth order Legendre polynomial terms as columns to design matrix. Good for adding constant/intercept to model (order = 0) and accounting for slow-frequency nuisance artifacts e.g. linear, quadratic, etc drifts. Care is recommended when using this with `.add_dct_basis()` as some columns will be highly correlated.

        Args:
            order (int): what order terms to add; 0 = constant/intercept
                        (default), 1 = linear, 2 = quadratic, etc
            include_lower: (bool) whether to add lower order terms if order > 0

        """
        if order < 0:
            raise ValueError("Order must be 0 or greater")

        if self.polys and any(elem.count("_") == 2 for elem in self.polys):
            raise AmbiguityError(
                "It appears that this Design Matrix contains polynomial terms that were kept seperate from a previous append operation. This makes it ambiguous for adding polynomials terms. Try calling .add_poly() on each separate Design Matrix before appending them instead."
            )

        polyDict = {}
        # Normal/canonical legendre polynomials on the range -1,1 but with size defined by number of observations; keeps all polynomials on similar scales (i.e. big polys don't blow up) and betas are better behaved
        norm_order = np.linspace(-1, 1, self.shape[0])

        if "poly_" + str(order) in self.polys:
            print(
                "Design Matrix already has {}th order polynomial...skipping".format(
                    order
                )
            )
            return self

        if include_lower:
            for i in range(order + 1):
                if "poly_" + str(i) in self.polys:
                    print(
                        "Design Matrix already has {}th order polynomial...skipping".format(
                            i
                        )
                    )
                else:
                    polyDict["poly_" + str(i)] = legendre(i)(norm_order)
        else:
            polyDict["poly_" + str(order)] = legendre(order)(norm_order)

        toAdd = Design_Matrix(polyDict, sampling_freq=self.sampling_freq)
        out = self.append(toAdd, axis=1)
        if out.polys:
            new_polys = out.polys + list(polyDict.keys())
            out.polys = new_polys
        else:
            out.polys = list(polyDict.keys())
        return out
예제 #33
0
    def derivative_matrix(self):
        der = np.zeros((self.order, self.order))

        for i in range(self.order):
            for j in range(self.order):
                der[i, j] = self.weights[j] * sum(
                    self.eigenvalues[s] * sp.legendre(s).deriv()
                    (self.nodes[i]) * sp.legendre(s)(self.nodes[j])
                    for s in range(self.order))

        # Clear machine errors
        der[np.abs(der) < 1.0e-15] = 0

        return der
예제 #34
0
    def advection_matrix(self):
        adv = np.zeros((self.order, self.order))

        # Fill matrix
        for i in range(self.order):
            for j in range(self.order):
                adv[i, j] = self.weights[i] * self.weights[j] * sum(
                    self.eigenvalues[s] * sp.legendre(s)(self.nodes[i]) *
                    sp.legendre(s).deriv()(self.nodes[j])
                    for s in range(self.order))

        # Clean machine error
        adv[np.abs(adv) < 1.0e-15] = 0

        return adv
예제 #35
0
 def covmat_N_24(self):
     '''
     24 term of the covariance matrix from instrumental noise
     (equal to the 42)
     '''
     if self.smooth:
         window = 1.
     else:
         window = self.Wk
     L2 = legendre(2)(self.mui_grid)
     L4 = legendre(4)(self.mui_grid)
     return 45. / 2. * np.trapz(self.Pnoise**2. * L2 * L4 /
                                (self.Nmodes * window**2),
                                self.mu,
                                axis=0)
예제 #36
0
def calculate_betas(ev):
    fit_position = np.array(ev.GetFitResult('scintFitter').GetVertex(0).GetPosition())
    hit_pmts = [ev.GetPMTUnCal(i) for i in range(ev.GetPMTUnCalCount())]
    npairs = len(hit_pmts) * (len(hit_pmts) - 1) / 2

    count = 0
    thetas = []
    for i, u in enumerate(hit_pmts[:-1]):
        if count % 200 == 0 and debug:
            print 'Pair', count, '/', npairs

        for v in hit_pmts[i+1:]:
            thetas.append(get_theta(pmtpos[u.GetID()], pmtpos[v.GetID()], fit_position))
            count += 1

    betas = {}
    beta14 = np.empty_like(thetas)
    for l in range(5):
        ps = legendre(l)(thetas)
        betas[l] = np.mean(ps)

        if l == 1:
            beta14 += ps
        elif l == 4:
            beta14 += 4.0 * ps

    betas['14'] = np.mean(beta14)

    return betas
예제 #37
0
파일: misc.py 프로젝트: turingbirds/nipype
 def _run_interface(self, runtime):
     img = nb.load(self.inputs.in_file)
     data = img.get_data()
     if isdefined(self.inputs.regress_poly):
         timepoints = img.get_shape()[-1]
         X = np.ones((timepoints,1))
         for i in range(self.inputs.regress_poly):
             X = np.hstack((X,legendre(i+1)(np.linspace(-1, 1, timepoints))[:, None]))
         betas = np.dot(np.linalg.pinv(X), np.rollaxis(data, 3, 2))
         datahat = np.rollaxis(np.dot(X[:,1:],
                                      np.rollaxis(betas[1:, :, :, :], 0, 3)),
                               0, 4)
         data = data - datahat
         img = nb.Nifti1Image(data, img.get_affine(), img.get_header())
         nb.save(img,  self._gen_output_file_name('detrended'))
     meanimg = np.mean(data, axis=3)
     stddevimg = np.std(data, axis=3)
     tsnr = meanimg/stddevimg
     img = nb.Nifti1Image(tsnr, img.get_affine(), img.get_header())
     nb.save(img,  self._gen_output_file_name())
     img = nb.Nifti1Image(meanimg, img.get_affine(), img.get_header())
     nb.save(img,  self._gen_output_file_name('mean'))
     img = nb.Nifti1Image(stddevimg, img.get_affine(), img.get_header())
     nb.save(img,  self._gen_output_file_name('stddev'))
     return runtime
예제 #38
0
def delPcorr_integrand_kmu_dmudq(mu, q, k, l=None, rc=None, Pkmu_interp=None, k_bin=None, 
        mu_bin=None): 
    ''' Del P^corr integrand calculated using P(k, mu) and dblquad: 

    Leg_l(mu) q P(q, (k mu)/q) W_1D(k rc sqrt(1-mu^2), q rc sqrt(1 - (k mu/q)^2))
    '''
    #integ_time = time.time()
    Leg_l = legendre(l)
    theta = k*mu/q  
    x = k * rc * np.sqrt(1. - mu**2)
    y = q * rc * np.sqrt(1. - theta**2)

    # if q is beyond the k bounds
    if q < k_bin[0]: 
        return 0.0
    elif q > k_bin[-1]:
        return 0.0
    # to prevent boundary issues
    if theta > mu_bin[-1] and theta <= 1.0: 
        theta = mu_bin[-1]
    elif theta < mu_bin[0] and theta >= -1.0: 
        theta = mu_bin[0]

    w1d = W_2d(y) + (J1(y)/(y**2) - J1(y)/(2*y)) * x**2

    integrand = Leg_l(mu) * q * Pkmu_interp(np.array([theta, q]))[0] * w1d 
    #print 'delPcorr integrand takes ', time.time()-integ_time
    return integrand
예제 #39
0
def Gaussian_quad(fx,start,end,sample=0.1):
    x=list(range(start,end,sample))
    i=end-start
    quad=0
    for y in enumerate(x):
        quad+=(fx(y[1])*legendre(y[0])*i)
    return(quad)
예제 #40
0
파일: flegendre.py 프로젝트: bsipocz/pydl
def flegendre(x,m):
    """Compute the first `m` Legendre polynomials.

    Parameters
    ----------
    x : array-like
        Compute the Legendre polynomials at these abscissa values.
    m : :class:`int`
        The number of Legendre polynomials to compute.  For example, if
        :math:`m = 3`, :math:`P_0 (x)`, :math:`P_1 (x)` and :math:`P_2 (x)` will be computed.

    Returns
    -------
    flegendre : :class:`numpy.ndarray`
    """
    import numpy as np
    from scipy.special import legendre
    if isinstance(x,np.ndarray):
        n = x.size
    else:
        n = 1
    if m < 1:
        raise ValueError('Number of Legendre polynomials must be at least 1.')
    try:
        dt = x.dtype
    except AttributeError:
        dt = np.float64
    leg = np.ones((m,n),dtype=dt)
    if m >= 2:
        leg[1,:] = x
    if m >= 3:
        for k in range(2,m):
            leg[k,:] = np.polyval(legendre(k),x)
    return leg
예제 #41
0
파일: spf.py 프로젝트: ecaruyer/qspace
    def odf_marginal(self):
        """Computes the marginal ODF from the q-space signal attenuation 
        expressed in the SPF basis, following [cheng-ghosh-etal:10].

        Returns
        -------
        spherical_harmonics : sh.SphericalHarmonics instance.
        """
        dim_sh = sh.dimension(self.angular_rank)
    
        sh_coefs = np.zeros(dim_sh)
        sh_coefs[0] = 1 / np.sqrt(4 * np.pi)
    
        for l in range(2, self.angular_rank + 1, 2):
            for m in range(-l, l + 1):
                j = sh.index_j(l, m)
                for n in range(1, self.radial_order):
                    partial_sum = 0.0
                    for i in range(1, n + 1):
                        partial_sum += (-1)**i * \
                          utils.binomial(n + 0.5, n - i) * 2**i / i
                    sh_coefs[j] += partial_sum * kappa(self.zeta, n) * \
                      self.coefficients[n, j] * \
                      legendre(l)(0) * l * (l + 1) / (8 * np.pi)
        return sh.SphericalHarmonics(sh_coefs)
def outside_integrand(mu, k, l, k_data, P_data, extrap_params=[[3345.0, -1.6], [400.0, -4.]], rc = 0.4): 

    Leg_l = legendre(l)

    integrand = Leg_l(mu) * inside_integral(mu, k, k_data, P_data, extrap_params=extrap_params, lower_bound=np.abs(k * mu), rc=rc)

    return integrand
예제 #43
0
def flegendre(x,m):
    """Compute a Legendre polynomial.

    Parameters
    ----------
    x : array_like
    m : int

    Returns
    -------
    flegendre : array_like
    """
    import numpy as np
    from scipy.special import legendre
    if isinstance(x,np.ndarray):
        n = x.size
    else:
        n = 1
    if m < 1:
        raise ValueError('Order of Legendre polynomial must be at least 1.')
    leg = np.ones((m,n),dtype='d')
    if m >= 2:
        leg[1,:] = x
    if m >= 3:
        for k in range(2,m):
            leg[k,:] = np.polyval(legendre(k),x)
    return leg
예제 #44
0
    def __init__(self, rbins, ells):
        from scipy.special import legendre

        Binning.__init__(self, ['r'], [rbins], ells)

        self.ells = numpy.array(ells)
        self.legendre = [legendre(l) for l in self.channels]
def legendre_(l,m,x):
    """
    Legendre polynomial.
    
    Check equation (3) from Townsend, 2002:
    
    >>> ls,x = [0,1,2,3,4,5],cos(linspace(0,pi,100))
    >>> check = 0
    >>> for l in ls:
    ...     for m in range(-l,l+1,1):
    ...         Ppos = legendre_(l,m,x)
    ...         Pneg = legendre_(l,-m,x)
    ...         mycheck = Pneg,(-1)**m * factorial(l-m)/factorial(l+m) * Ppos
    ...         check += sum(abs(mycheck[0]-mycheck[1])>1e-10)
    >>> print check
    0
    """
    m_ = abs(m)
    legendre_poly = legendre(l)
    deriv_legpoly_ = legendre_poly.deriv(m=m_)
    deriv_legpoly = np.polyval(deriv_legpoly_,x)
    P_l_m = (-1)**m_ * (1-x**2)**(m_/2.) * deriv_legpoly
    if m<0:
        P_l_m = (-1)**m_ * factorial(l-m_)/factorial(l+m_) * P_l_m
    return P_l_m
def chutes_iniciais(n=2, size=1024, mu=None):
    """Retorna os n primeiros polinomios de legendre modulados por uma
    gaussiana.

    Params
    ------

    n : int
        o numero de vetores
    size : int
        o tamanho dos vetores
    mu : float
        centro da gaussiana, entre 0 e 1

    Returns
    -------

    Um array com n arrays contendo os polinomios modulados
    """
    sg = np.linspace(-1, 1, size)  # short grid
    g = gaussian(size, std=int(size/100))  # gaussian
    if mu:
        sigma = np.ptp(sg)/100
        g = (1.0/np.sqrt(2*np.pi*sigma**2))*np.exp(-(sg-mu)**2 / (2*sigma**2))
    vls = [g*legendre(i)(sg) for i in range(n)]
    return np.array(vls, dtype=np.complex_)
예제 #47
0
def check_legendre_transform(lmax, ntheta):
    l = np.arange(lmax + 1)
    if lmax >= 1:
        sigma = -np.log(1e-3) / lmax / (lmax + 1)
        bl = np.exp(-sigma*l*(l+1))
        bl *= (2 * l + 1)
    else:
        bl = np.asarray([1], dtype=np.double)

    theta = np.linspace(0, np.pi, ntheta, endpoint=True)
    x = np.cos(theta)

    # Compute truth using scipy.special.legendre
    P = np.zeros((ntheta, lmax + 1))
    for l in range(lmax + 1):
        P[:, l] = legendre(l)(x)
    y0 = np.dot(P, bl)


    # double-precision
    y = libsharp.legendre_transform(x, bl)

    assert_allclose(y, y0, rtol=1e-12, atol=1e-12)

    # single-precision
    y32 = libsharp.legendre_transform(x.astype(np.float32), bl)
    assert_allclose(y, y0, rtol=1e-5, atol=1e-5)
예제 #48
0
파일: misc.py 프로젝트: cdla/nipype
 def _run_interface(self, runtime):
     img = nb.load(self.inputs.in_file[0])
     header = img.get_header().copy()
     vollist = [nb.load(filename) for filename in self.inputs.in_file]
     data = np.concatenate([vol.get_data().reshape(
         vol.get_shape()[:3] + (-1,)) for vol in vollist], axis=3)
     if data.dtype.kind == 'i':
         header.set_data_dtype(np.float32)
         data = data.astype(np.float32)
     if isdefined(self.inputs.regress_poly):
         timepoints = img.get_shape()[-1]
         X = np.ones((timepoints, 1))
         for i in range(self.inputs.regress_poly):
             X = np.hstack((X, legendre(
                 i + 1)(np.linspace(-1, 1, timepoints))[:, None]))
         betas = np.dot(np.linalg.pinv(X), np.rollaxis(data, 3, 2))
         datahat = np.rollaxis(np.dot(X[:, 1:],
                                      np.rollaxis(
                                          betas[1:, :, :, :], 0, 3)),
                               0, 4)
         data = data - datahat
         img = nb.Nifti1Image(data, img.get_affine(), header)
         nb.save(img, self._gen_output_file_name('detrended'))
     meanimg = np.mean(data, axis=3)
     stddevimg = np.std(data, axis=3)
     tsnr = meanimg / stddevimg
     img = nb.Nifti1Image(tsnr, img.get_affine(), header)
     nb.save(img, self._gen_output_file_name())
     img = nb.Nifti1Image(meanimg, img.get_affine(), header)
     nb.save(img, self._gen_output_file_name('mean'))
     img = nb.Nifti1Image(stddevimg, img.get_affine(), header)
     nb.save(img, self._gen_output_file_name('stddev'))
     return runtime
예제 #49
0
파일: confounds.py 프로젝트: Conxz/nipype
def regress_poly(degree, data):
    ''' returns data with degree polynomial regressed out.
    The last dimension (i.e. data.shape[-1]) should be time.
    '''
    datashape = data.shape
    timepoints = datashape[-1]

    # Rearrange all voxel-wise time-series in rows
    data = data.reshape((-1, timepoints))

    # Generate design matrix
    X = np.ones((timepoints, 1))
    for i in range(degree):
        polynomial_func = legendre(i+1)
        value_array = np.linspace(-1, 1, timepoints)
        X = np.hstack((X, polynomial_func(value_array)[:, np.newaxis]))

    # Calculate coefficients
    betas = np.linalg.pinv(X).dot(data.T)

    # Estimation
    datahat = X[:, 1:].dot(betas[1:, ...]).T
    regressed_data = data - datahat

    # Back to original shape
    return regressed_data.reshape(datashape)
예제 #50
0
 def __init__(self, rbins, ells, los, **kwargs):
     from scipy.special import legendre 
     
     Binning.__init__(self, ['r'], [rbins], **kwargs)
     
     self.los = los
     self.ells = numpy.array(ells)
     self.legendre = [legendre(l) for l in self.ells]
예제 #51
0
파일: sh.py 프로젝트: mdesco/qspace
def P(rank=_default_rank):
    "returns the Funk-Radon operator matrix"
    dim_sh = dimension(rank)
    P = np.zeros((dim_sh, dim_sh))
    for j in range(dim_sh):
        l =  index_l(j)
        P[j, j] = 2 * np.pi * legendre(l)(0)
    return P
def panel_method_vectorized(a, b, N, Z, ng):

    cz = 0.5 * (Z[1:] + Z[:-1])  # complex midpoint
    cx = cz.real  # x-coor panel midpoint
    cy = cz.imag  # y-coor panel midpoint
    L = abs(Z[1:] - Z[:-1])  # Panel length
    n1 = (-Z.imag[1:] + Z.imag[:-1]) / L  # x-comp normal vector
    n2 = (Z.real[1:] - Z.real[:-1]) / L  # y-comp normal vector
    x = Z.real
    y = Z.imag

    # Matrix coefficients P and q
    P = np.zeros([N, N])
    q = np.zeros([N, N])

    #
    def TpG(x0, x1, xm, y0, y1, ym, t):
        r = np.sqrt((0.5 * (x0 * (1 - t) + x1 * (1 + t)) - xm)**2 +
                    (0.5 * (y0 * (1 - t) + y1 * (1 + t)) - ym)**2)
        v = 0.5 * np.sqrt((x1 - x0)**2 + (y1 - y0)**2)
        return np.log(r) * v

    # Weights and points for gauss quad
    # from readdata import readdata
    # w, p = readdata('weightsAndPoints%s.txt' % ng)

    # Importing weight and points for gauss quad int
    from scipy import special as sp
    p, w = sp.legendre(ng).weights[:, :-1].T
    p, w = p.reshape(ng, 1), w.reshape(ng, 1)

    # Opening angle given by the law of cosines
    for i in xrange(N):
        b = abs(cz[i] - Z[1:])
        c = abs(cz[i] - Z[:-1])
        P[i] = -np.arccos((b**2 + c**2 - L**2) / (2 * b * c))
        q[i] = np.sum(w * TpG(x[:-1], x[1:], cx[i], y[:-1], y[1:], cy[i], p),
                      0)

    P[np.isnan(P)] = 0  # Needed for rectangle
    np.fill_diagonal(P, -np.pi)  # phi = -pi, for i = j

    # RHS
    Q = np.transpose(
        [np.dot(q, n1),
         np.dot(q, n2),
         np.dot(q, (cx * n2 - cy * n1))])

    # Velocity potential for each panel
    phi_i = np.linalg.solve(P, Q)

    # Added mass
    m11 = np.sum(phi_i[:, 0] * n1 * L)  # m11 for the whole body
    m22 = np.sum(phi_i[:, 1] * n2 * L)
    m12 = np.sum(phi_i[:, 0] * n2 * L)  # Cross coupling
    m66 = np.sum(phi_i[:, 2] * (cx * n2 - cy * n1) * L)

    return m11, m22, m12, m66
예제 #53
0
def load_file_legendre(ifuncs, filename='data/exemplar_021312.dat',
                       slope=False, rms=None):
    n_ax, n_az = ifuncs.shape[2:4]
    lines = (line.strip() for line in open(filename, 'rb')
             if not line.startswith('#'))
    D = np.array([[float(val) for val in line.split()]
                  for line in lines])
    nD_ax, nD_az = D.shape  # nD_m, nD_n

    x = np.linspace(-1, 1, n_az).reshape(1, n_az)
    y = np.linspace(-1, 1, n_ax).reshape(n_ax, 1)
    Pm_x = np.vstack([legendre(i)(x) for i in range(nD_ax)])
    Pn_y = np.hstack([legendre(i)(y) for i in range(nD_az)])
    Y_az_ax = np.zeros((n_ax, n_az), dtype=np.float)
    for n in range(nD_az):
        sum_Pm = np.zeros_like(x)
        for m in range(nD_ax):
            sum_Pm += D[m, n] * Pm_x[m, :]
        Y_az_ax += Pn_y[:, n].reshape(-1, 1) * sum_Pm

    # Unvectorized version for reference.
    #
    # xs = np.linspace(-1, 1, n_az)
    # ys = np.linspace(-1, 1, n_ax)
    # Pm_x = np.vstack([legendre(i)(xs) for i in range(nD_ax)])
    # Pn_y = np.vstack([legendre(i)(ys) for i in range(nD_az)])
    # Y_az_ax = np.zeros((n_ax, n_az), dtype=np.float)
    # for ix, x in enumerate(xs):
    #     for iy, y in enumerate(ys):
    #         for n in range(nD_az):
    #             sum_Pm = 0.0
    #             for m in range(nD_ax):
    #                 sum_Pm += D[m, n] * Pm_x[m, ix]
    #             Y_az_ax[iy, ix] += Pn_y[n, iy] * sum_Pm

    if slope:
        # 0.5 mm spacing * 1000 um / mm, then convert radians to arcsec
        displ = np.gradient(Y_az_ax, 0.5 * 1000)[0] * RAD2ARCSEC
    else:
        displ = Y_az_ax

    if rms:
        displ = displ / np.std(displ) * rms

    return displ
예제 #54
0
파일: ifunc.py 프로젝트: taldcroft/example
def load_file_legendre(n_ax, n_az, filename='data/exemplar_021312.dat',
                       apply_10_0=True):
    lines = (line.strip() for line in open(filename, 'rb')
             if not line.startswith('#'))
    D = np.array([[float(val) for val in line.split()]
                  for line in lines])
    nD_ax, nD_az = D.shape  # nD_m, nD_n

    x = np.linspace(-1, 1, n_az).reshape(1, n_az)
    y = np.linspace(-1, 1, n_ax).reshape(n_ax, 1)
    Pm_x = np.vstack([legendre(i)(x) for i in range(nD_ax)])
    Pn_y = np.hstack([legendre(i)(y) for i in range(nD_az)])
    Y_az_ax = np.zeros((n_ax, n_az), dtype=np.float)
    for n in range(nD_az):
        sum_Pm = np.zeros_like(x)
        for m in range(nD_ax):
            sum_Pm += D[m, n] * Pm_x[m, :]
        Y_az_ax += Pn_y[:, n].reshape(-1, 1) * sum_Pm

    # Unvectorized version for reference.
    #
    # xs = np.linspace(-1, 1, n_az)
    # ys = np.linspace(-1, 1, n_ax)
    # Pm_x = np.vstack([legendre(i)(xs) for i in range(nD_ax)])
    # Pn_y = np.vstack([legendre(i)(ys) for i in range(nD_az)])
    # Y_az_ax = np.zeros((n_ax, n_az), dtype=np.float)
    # for ix, x in enumerate(xs):
    #     for iy, y in enumerate(ys):
    #         for n in range(nD_az):
    #             sum_Pm = 0.0
    #             for m in range(nD_ax):
    #                 sum_Pm += D[m, n] * Pm_x[m, ix]
    #             Y_az_ax[iy, ix] += Pn_y[n, iy] * sum_Pm

    displ_x = Y_az_ax  # microns

    if apply_10_0:
        mount_map = get_mount_map(n_ax, n_az)
        displ_x = displ_x * mount_map

    # 0.5 mm spacing * 1000 um / mm
    displ_ry = np.gradient(Y_az_ax, 0.5 * 1000)[0]  # radians

    return displ_x, displ_ry
def inside_integrand(mu, q, k, lp, f_interp, f_extrap, rc=0.4, q_min=0.002, q_max=0.3): 
    
    Leg_lp = legendre(lp)   # Legendre polynomial of order l'

    if q < q_min:
        return 0.0
    elif q > q_max: 
        return f_extrap(q) * Leg_lp(k*mu/q) * W_2d(rc * np.sqrt(q**2 - k**2 * mu**2)) * q
    else: 
        return f_interp(q) * Leg_lp(k*mu/q) * W_2d(rc * np.sqrt(q**2 - k**2 * mu**2)) * q
예제 #56
0
파일: Utility.py 프로젝트: fukuball/fuku-ml
    def feature_transform(X, mode='polynomial', degree=1):

        poly = PolynomialFeatures(degree)
        process_X = poly.fit_transform(X)

        if mode == 'legendre':
            lege = legendre(degree)
            process_X = lege(process_X)

        return process_X
예제 #57
0
def main():
    """
    Start with the poisson equation single layer potential
    Take normal derivative with respect to observation variable to get double
    layer potential.
    Take normal derivative with respect to source/integration variable to get
    hypersingular potential
    Normal derivatives are in the y direction because the element is along
    the x-axis
    """
    x1, x2, y1, y2 = sp.symbols('x1, x2, y1, y2')
    dist = sp.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
    slp = -1 / (2 * sp.pi) * sp.log(dist)
    dlp = sp.diff(slp, y1)
    print dlp
    hlp = sp.diff(dlp, y2)
    args = (x1, x2, y1, y2)
    single_layer = sp.utilities.lambdify(args, slp)
    double_layer = sp.utilities.lambdify(args, dlp)
    hypersing = sp.utilities.lambdify(args, hlp)

    settings['n'] = 8

    test_problems = dict()
    # Problem format: (kernel, singular_pt, basis, exact, include_pt, error_step)
    # include_pt indicates whether to include the nearest point on the element
    # in the integration. For some highly singular integrals, ignoring this
    # point does not hurt convergence and is much more numerically stable.
    # error_step is the step between the error terms in the taylor expansion
    # the true value
    #TODO: Figure out why the series
    test_problems['single1'] = (single_layer, 0.2, legendre(1), 0.0628062411975970, True, 2, 1)
    test_problems['single3'] = (single_layer, 0.2, legendre(3), -0.03908707208816243, True, 2, 3)
    test_problems['single16'] = (single_layer, 0.2, legendre(16), -0.00580747813511577, True, 1, 1)
    test_problems['single32'] = (single_layer, 0.2, legendre(32), 0.002061099155941667, True, 1, 1)
    test_problems['double1'] = (double_layer, 0.2, legendre(1), -0.1, True, 2, 0)
    test_problems['double3'] = (double_layer, 0.2, legendre(3), 0.14, True, 1, 1)
    test_problems['hyper1'] = (hypersing, 0.2, legendre(1), -0.1308463358283272, True, 2, 1)
    test_problems['hyper3'] = (hypersing, 0.2, legendre(3), 0.488588401102108, True, 2, 1)

    fig, ax = plt.subplots(1)
    run(test_problems['single1'], 'single1', fig, ax)
    run(test_problems['single3'], 'single3', fig, ax)
    run(test_problems['single16'], 'single16', fig, ax)
    run(test_problems['single32'], 'single32', fig, ax)
    run(test_problems['double1'], 'double1', fig, ax)
    run(test_problems['double3'], 'double3', fig, ax)
    run(test_problems['hyper1'], 'hyper1', fig, ax)
    run(test_problems['hyper3'], 'hyper3', fig, ax)
    ax.legend(loc = 'lower right')
    fig.savefig('all_errors.pdf')
    fig.savefig('all_errors.png')
예제 #58
0
def checkLegendre(i=2):
    spvec = np.arange(-1.0, 1.0, 0.1)
    for sp in spvec:
        if i == 6:
            lin = P6(sp)
        elif i == 4:
            lin = P4(sp)
        else:
            lin = P2(sp)
        pyt = scisp.legendre(i)(sp)
        print lin, pyt
예제 #59
0
    def legendre_(n, x):
        """Helper to avoid problems with scipy 0.8.0 returning inf for -1

        Scipy 0.8.0 (and possibly later) has regression of reporting
        'inf's for negative boundary. Lets guard against it for now
        """
        leg = legendre(n)
        r = leg(x)
        infs = np.isinf(r)
        if np.any(infs):
            r[infs] = leg(x[infs] + 1e-10) # offset to try to overcome problems
        return r
예제 #60
0
def al0(xi,ell):
	"""
	For m=0
	Ylm = 1/2 sqrt((2l+1)/pi) LegendreP(l,x)
	xi: angle in radians
	Note: The integral over the other angle gives a factor of 2pi
	While healpix does the spherical harmonic decomposition w.r.t spherical 
	polar coordinates (theta,phi), this manual decomposition is w.r.t the 
	angle (xi,phi) and xi is not theta.
	"""
	LegPol = legendre(ell)
	return 2.*np.pi*Delta_T(xi)*np.sqrt((2.*ell+1.)/(4.*np.pi))*LegPol(np.cos(xi))*np.sin(xi)