def get_source(I, K, sigma_s, phi_old, phi_edge_old, q, mu, N): """determine the new total source (scattering + q) Inputs: I: number of zones K: scattering order sigma_s: array of scattering cross-sections phi_old: scalar flux from previous time step in each zone q: array of source within each zone mu: quadrature angles N: number of quadrature angles Outputs: source: scattering source for new time step + q """ source = np.zeros((I, N + 1)) for k in range(K + 1): source[:, 0] += 0.5 * (2 * k + 1) * ( sigma_s[:, k] * phi_old[:, k]) * special.eval_legendre(k, -1.0) source[:, 0] += 0.5 * q for n in range(1, N + 1): for k in range(K + 1): source[:, n] += 0.5 * (2 * k + 1) * ( sigma_s[:, k] * phi_old[:, k]) * special.eval_legendre( k, mu[n - 1]) source[:, n] += 0.5 * q return source
def evaluate_basis(self, x, i=0, output_array=None): x = np.atleast_1d(x) if output_array is None: output_array = np.zeros(x.shape) output_array[:] = eval_legendre( i, x) - i * (i + 1.) / (i + 2.) / (i + 3.) * eval_legendre(i + 2, x) return output_array
def evaluate_basis(self, x, i=0, output_array=None): x = np.atleast_1d(x) if output_array is None: output_array = np.zeros(x.shape) output_array[:] = eval_legendre(i, x) - eval_legendre(i + 2, x) if self.is_scaled(): output_array /= np.sqrt(4 * i + 6) return output_array
def capacitance(self): """ Return the electrical capacitance of the transducer. """ return self.gamma()*self.transducer_width*self.nb_finger/2.\ *self.epsilon_inf()*np.sin(np.pi/self.Se)\ *eval_legendre(-1./self.Se, np.cos(self.delta()))\ /eval_legendre(-1./self.Se, -np.cos(self.delta()))
def evaluate_basis(self, x, i=0, output_array=None): x = np.atleast_1d(x) if output_array is None: output_array = np.zeros(x.shape) if i < self.N - 4: output_array[:] = eval_legendre( i, x) - 2 * (2 * i + 5.) / (2 * i + 7.) * eval_legendre( i + 2, x) + ((2 * i + 3.) / (2 * i + 7.)) * eval_legendre(i + 4, x) else: output_array[:] = sympy.lambdify(sympy.symbols('x'), self.sympy_basis(i))(x) return output_array
def test_legendre(): allTrue = True for order in range(ORDER_MAX): x = np.random.rand(1).item() valuePy = sp.eval_legendre(order, x) valueCpp = NumCpp.legendre_p_Scaler1(order, x) if np.round(valuePy, DECIMALS_ROUND) != np.round( valueCpp, DECIMALS_ROUND): allTrue = False assert allTrue allTrue = True for order in range(ORDER_MAX): shapeInput = np.random.randint(10, 100, [ 2, ], dtype=np.uint32) shape = NumCpp.Shape(*shapeInput) cArray = NumCpp.NdArray(shape) x = np.random.rand(*shapeInput) cArray.setArray(x) valuePy = sp.eval_legendre(order, x) valueCpp = NumCpp.legendre_p_Array1(order, cArray) if not np.array_equal(np.round(valuePy, DECIMALS_ROUND), np.round(valueCpp, DECIMALS_ROUND)): allTrue = False assert allTrue allTrue = True for order in range(ORDER_MAX): x = np.random.rand(1).item() degree = np.random.randint(order, ORDER_MAX) valuePy = sp.lpmn(order, degree, x)[0][order, degree] valueCpp = NumCpp.legendre_p_Scaler2(order, degree, x) if np.round(valuePy, DECIMALS_ROUND) != np.round( valueCpp, DECIMALS_ROUND): allTrue = False assert allTrue allTrue = True for order in range(ORDER_MAX): x = np.random.rand(1).item() valuePy = sp.lqn(order, x)[0][order] valueCpp = NumCpp.legendre_q_Scaler(order, x) if np.round(valuePy, DECIMALS_ROUND) != np.round( valueCpp, DECIMALS_ROUND): allTrue = False assert allTrue
def _exact_kernel(self, x): amplitude = (sp.eval_legendre(self._N + 1, x) - sp.eval_legendre(self._N, x)) with warnings.catch_warnings(): # The kernel is so condensed near 1 at high N that np.isclose() # does a terrible job at letting us manually treat values close to # the upper limit. # The best way to implement K_N(t) is to let the floating point # division fail and then replace NaNs. warnings.simplefilter(action='ignore', category=RuntimeWarning) amplitude /= x - 1 amplitude[np.isnan(amplitude)] = self._N + 1 return amplitude
def calc_a(self, multindices): """ This method builds the matrix containing the basis functions evaluated at sample locations, i.e. the matrix A in Au = b Parameters ---------- multindices : list the list with the multi-indices Returns ------- A : array the matrix with the evaluated basis functions for the samples """ dimension = self.my_experiment.dimension x_u_scaled = self.my_experiment.x_u_scaled a_matrix = np.ones([self.my_experiment.size, len(multindices)]) # generate the A matrix for i, multiindex in enumerate(multindices): for j in range(dimension): deg = multiindex[j] if self.my_experiment.polytypes[j] == 'Legendre': a_matrix[:, i] *= special.eval_legendre( deg, x_u_scaled[:, j]) elif self.my_experiment.polytypes[j] == 'Hermite': a_matrix[:, i] *= special.eval_hermitenorm( deg, x_u_scaled[:, j]) return a_matrix
def __init__(self): self.lmax = 2 self.odd = 0 self.raw = 2. * np.random.normal(0.5, size=(256, 256)) # Simulate a ring image x = np.arange(0, 500) y = np.arange(0, 500) X, Y = np.meshgrid(x, y) # Create a 2d map r = np.sqrt(X ** 2 + Y ** 2) X -= 250 # Center the ring Y -= 250 # Center the ring theta = theta_f(X, Y) r = np.sqrt(X ** 2 + Y ** 2) self.datas = np.exp(-(r - 80) ** 2 / 50) * eval_legendre(2, np.cos(theta)) self.datas /= self.datas.max() # self.datas[self.datas<0.]=0. self.raw = self.datas self.center = (0., 0.) self.get_com() self.r = 100. self.scale = ArrayInfos(self.datas) # Output self.normed_pes = np.zeros(Rbin) self.ang = np.zeros((Angbin, self.get_NumberPoly())) self.output = np.zeros_like(self.datas) self.pes_error = np.zeros(Rbin) self.ang_var = np.zeros((Angbin, self.get_NumberPoly()))
def image_for_display_alt(self): dim = int(1.1 * self.r) # Calculate new image in cartesian coordinates and return it for display X, Y = np.meshgrid(np.arange(-dim, dim + 1), np.arange(-dim, dim + 1)) new_r = np.sqrt(X ** 2 + Y ** 2).ravel() new_t = theta_f(X, Y).ravel() del X, Y # List of legendre polynoms listLegendre = np.arange(0, self.lmax + 1, (not self.odd) + 1) kvec, list = np.meshgrid(np.arange(Funcnumber), listLegendre) K, Rad = np.meshgrid(kvec.T.ravel(), new_r) K, Theta = np.meshgrid(kvec.T.ravel(), new_t) del listLegendre, kvec Rad *= Rbin / float(self.r) func = np.exp(-(Rad - K * Bspace) ** 2 / (2 * Bwidth ** 2)) leg = eval_legendre(list.T.ravel(), np.cos(Theta)) * self.coefficients outdata = (func * leg).sum(axis=1) * new_r outdata[outdata < 0] = 0 outdata = outdata.reshape((2 * dim + 1, 2 * dim + 1)) self.output = np.zeros_like(self.raw) self.output[max(self.center[1] - dim, 0):min(self.center[1] + dim + 1, self.raw.shape[0] / 2), max(self.center[0] - dim, 0):min(self.center[0] + dim + 1, self.raw.shape[1] / 2)] = outdata[ max(0, dim / 2):min( dim / 2 + 1, self.raw.shape[1] / 2), max(0, dim / 2)::min( dim / 2 + 1, self.raw.shape[1] / 2)]
def get_rademacher_gaussian_simulation_data(p, n, split_list, sigma_squared, s): y, true_partition = [], [] if len(split_list) == 0: true_partition = [range(p)] else: for i, r in enumerate(split_list): if i == 0: true_partition.append(range(int(math.floor(r * p)))) else: true_partition.append( range( max(true_partition[i - 1]) + 1, int(math.floor(r * p)), 1)) true_partition.append(range(max(true_partition[i]) + 1, p, 1)) for q, P in enumerate(true_partition): for k in P: nu_list, nu = [], np.random.normal(0, 1) nu_list.append(nu) for i in range(1, n, 1): nu = (0.5 * (i + 1) / n - 0.2) * nu + np.random.normal(0, 1) nu_list.append(nu) y.append([ eval_legendre(s + q, 2 * (i + 1) / n - 1) + (k + 1) - 3 - 5 * math.floor((k + 1) / 5) + 2 * np.sqrt(sigma_squared) * (((i + 1) / n - 0.5)**2) * nu_list[i] for i in range(n) ]) y = np.array(y) return [y, true_partition]
def gravitational_potential_sph(self, lat, radius, n_max=4, degrees=True): """Return normal gravitational potential V, in m**2/s**2. Calculate normal gravitational potential from spherical approximation. Parameters ---------- lat : float or array_like of floats Spherical (geocentric) latitude. radius : float or array_like of floats Radius, in metres. n_max : int Maximum degree. degrees : bool, optional If True, the input `lat` is given in degrees, otherwise radians. """ if degrees: lat = np.radians(lat) out = 0 for degree in range(1, n_max + 1): leg = special.eval_legendre(2 * degree, np.sin(lat)) out += self.j2n(degree) * (self.a / radius)**(2 * degree) * leg return self.gm / radius * (1 - out)
def mie_sphere(mesh, Params, name='mie-sphere.pos', field='sca'): count = 0 N, params, kk = Params R, (ce, ci), jumps = params kk = Cartesian(kk) k = kk.norm() kk = kk.normalized() vals = [0 + 0j] * len(mesh.points) for ii, point in enumerate(mesh.points): _print_progress(ii, len(mesh.points), 'computed', mod=1) p = Cartesian(point) pnorm = p.norm() pn = p.normalized() costheta = pn.dot(kk) for n in myrange(N): if field == 'sca': cn = coeff_ext(n, params, k) * H1(n, k * pnorm) elif field == 'int': cn = coeff_int(n, params, k) * J(n, ci * k * pnorm) else: cn = ((1j)**n) * (2 * n + 1) * J(n, k * pnorm) c = eval_legendre(n, costheta) count += 1 vals[ii] += cn * c print(' --> {1} computations i.e. N={0}.'.format(N, count)) mesh.write(vals, name) return vals
def gll_points(n): """GLL points and weights :param n: Number of points :returns: (x, w) :rtype: """ assert n>=2 if n==2: x = np.array([-1.0, 1.0]) w = np.array([ 1.0, 1.0]) return x, w # See Nodal Discontinuous Galerkin Methods Appendix A for x and # the Mathworld page on Lobatto Quadrature for w x = j_roots(n-2, 1, 1)[0] L = eval_legendre(n-1, x) w1 = 2.0/(n*(n-1)) w = 2.0/(n*(n-1)*L*L) x = np.hstack([-1.0, x, 1.0]) w = np.hstack([w1, w, w1]) return x, w
def mie_sphere(mesh, Params, name="mie-sphere.pos", field="sca"): count = 0 N, params, kk = Params R, (ce, ci), jumps = params kk = Cartesian(kk) k = kk.norm() kk = kk.normalized() vals = [0 + 0j] * len(mesh.points) for ii, point in enumerate(mesh.points): _print_progress(ii, len(mesh.points), "computed", mod=1) p = Cartesian(point) pnorm = p.norm() pn = p.normalized() costheta = pn.dot(kk) for n in myrange(N): if field == "sca": cn = coeff_ext(n, params, k) * H1(n, k * pnorm) elif field == "int": cn = coeff_int(n, params, k) * J(n, ci * k * pnorm) else: cn = ((1j) ** n) * (2 * n + 1) * J(n, k * pnorm) c = eval_legendre(n, costheta) count += 1 vals[ii] += cn * c print(" --> {1} computations i.e. N={0}.".format(N, count)) mesh.write(vals, name) return vals
def legendre_list(order): l = [] for n in range(order+1): l.append(lambda x, n=n: eval_legendre(n, x)) return l
def max_rE_weights(N): """Return max-rE modal weight coefficients for spherical harmonics order N. See Also -------- :py:func:`spaudiopy.sph.unity_gain` : Unit amplitude compensation. References ---------- Zotter, F., & Frank, M. (2012). All-Round Ambisonic Panning and Decoding. Journal of Audio Engineering Society, eq. (10). Examples -------- .. plot:: :context: close-figs dirac_azi = np.deg2rad(45) dirac_colat = np.deg2rad(45) N = 5 # cross section azi = np.linspace(0, 2 * np.pi, 720, endpoint=True) # Bandlimited Dirac pulse, with max r_E tapering window w_n = spa.sph.max_rE_weights(N) w_n = spa.sph.unity_gain(w_n) dirac_tapered = spa.sph.bandlimited_dirac(N, azi - dirac_azi, w_n=w_n) spa.plots.polar(azi, dirac_tapered) """ theta = np.deg2rad(137.9) / (N + 1.51) a_n = scyspecial.eval_legendre(np.arange(N + 1), np.cos(theta)) return a_n
def sub_ortho_poly(vis, time, mask, n): logger.debug('sub. mean') window = mask x = time upbroad = (slice(None), slice(None)) + (None, ) * (window.ndim - 1) window = window[None, ...] x_mid = (x.max() + x.min())/2. x_range = (x.max() - x.min()) /2. x = (x - x_mid) / x_range n = np.arange(n)[:, None] x = x[None, :] polys = special.eval_legendre(n, x, out=None) polys = polys[upbroad] * window for ii in range(n.shape[0]): for jj in range(ii): amp = np.sum(polys[ii, ...] * polys[jj, ...], axis=0) polys[ii, ...] -= amp[None, ...] * polys[jj, ...] norm = np.sqrt(np.sum(polys[ii] ** 2, axis=0)) norm[norm==0] = np.inf polys[ii] /= norm[None, ...] amp = np.sum(polys * vis[None, ...], 1) vis_fit = np.sum(amp[:, None, ...] * polys, 0) vis -= vis_fit #return vis return vis_fit
def gll_points(n): """GLL points and weights :param n: Number of points :returns: (x, w) :rtype: """ assert n >= 2 if n == 2: x = np.array([-1.0, 1.0]) w = np.array([1.0, 1.0]) return x, w # See Nodal Discontinuous Galerkin Methods Appendix A for x and # the Mathworld page on Lobatto Quadrature for w x = j_roots(n - 2, 1, 1)[0] L = eval_legendre(n - 1, x) w1 = 2.0 / (n * (n - 1)) w = 2.0 / (n * (n - 1) * L * L) x = np.hstack([-1.0, x, 1.0]) w = np.hstack([w1, w, w1]) return x, w
def legendre_function(indices, x, t=-1): output = 1 unique_indices = set(indices) for i in unique_indices: order = indices.count(i) output *= eval_legendre(order, x[t - i]) return output
def __init__(self, N, max_length=1024, measure='legs', discretization='bilinear'): """ max_length: maximum sequence length """ super().__init__() self.N = N A, B = transition(measure, N) B = B.squeeze(-1) A_stacked = np.empty((max_length, N, N), dtype=A.dtype) B_stacked = np.empty((max_length, N), dtype=B.dtype) for t in range(1, max_length + 1): At = A / t Bt = B / t if discretization == 'forward': A_stacked[t - 1] = np.eye(N) + At B_stacked[t - 1] = Bt elif discretization == 'backward': A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, np.eye(N), lower=True) B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, Bt, lower=True) elif discretization == 'bilinear': A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, np.eye(N) + At / 2, lower=True) B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, Bt, lower=True) else: # ZOH A_stacked[t - 1] = la.expm(A * (math.log(t + 1) - math.log(t))) B_stacked[t - 1] = la.solve_triangular(A, A_stacked[t - 1] @ B - B, lower=True) self.A_stacked = torch.Tensor(A_stacked) # (max_length, N, N) self.B_stacked = torch.Tensor(B_stacked) # (max_length, N) # print("B_stacked shape", B_stacked.shape) vals = np.linspace(0.0, 1.0, max_length) self.eval_matrix = torch.Tensor((B[:, None] * ss.eval_legendre(np.arange(N)[:, None], 2 * vals - 1)).T)
def leg2tau(beta, legorder, taus): r""" Get transformation matrix from a Legendre expansion to a tau mesh. Computes the transformation matrix @f$ L @f$ from a finite set of Legendre coefficients @f$ G_l @f$ to a imaginary time mesh @f$ (\tau_i)_i @f$: @f[ G(\tau_i) = \sum_l L_{il} G(l) @f] given simply by the evaluation of the Legendre polynomials at the different tau points (see Boehnke et al., 2011, eq. 1): @f[ L_{il} = \frac \sqrt{2l+1} \beta P_l(x(\tau_i)) @f] This matrix is isometric (L*L = 1) in the limit of a dense tau mesh, co-isometric (LL* = 1) in the limit of infinite Legendre order and unitary (LL* = L*L = 1) in the combined limit. @see tau2leg() @param beta Thermodynamic beta @param legorder Number of Legendre coefficients (highest order minus 1) @param taus Either array (tau mesh) or number of tau points as integer """ # Get array beta = _ensurebeta(beta) taus = _tauarray(beta, taus) red_taus = 2*taus/beta - 1 legcoeffs = np.empty(shape=(taus.size,legorder)) for l in range(0,legorder): legcoeffs[:,l] = np.sqrt(2*l+1)/beta * sps.eval_legendre(l,red_taus) return legcoeffs
def plot_it(scatter_matrix): import matplotlib.pyplot as plt import scipy.special as ss groups = scatter_matrix.shape[0] orders = scatter_matrix.shape[2] Nmu = 50 mu = np.linspace(-1, 1, Nmu) f = np.zeros(shape=(groups, groups, Nmu)) for gin in range(groups): for gout in range(groups): data = scatter_matrix[gin, gout, :] data /= data[0] for l in range(orders): f[gin, gout, :] += ((float(l) + 0.5) * ss.eval_legendre(l, mu) * data[l]) i = 0 for gin in range(groups): for gout in range(groups): i += 1 plt.subplot(groups, groups, i) plt.title(str(gin + 1) + ' to ' + str(gout + 1)) plt.plot(mu, f[gin, gout, :]) plt.show() plt.close()
def legendre_normalized_function(indices, x, t=-1): output = 1 unique_indices = set(indices) for i in unique_indices: order = indices.count(i) norm_factor = np.sqrt(2 * order + 1) output *= norm_factor * eval_legendre(order, x[t - i]) return output
def _SL(i, x, y, Beta_convol, index, legendre_orders): """Calculates interpolated β(r), where r= radius""" r = np.sqrt(x**2 + y**2 + 0.1) # + 0.1 to avoid divison by zero. # normalize: divison by circumference. BB = np.interp(r, index, Beta_convol[i, :], left=0) / (2 * np.pi * r) return BB * eval_legendre(legendre_orders[i], x / r)
def analytic_sol(R,a,theta,Ampli,k,N_terms): result = 0 for n in range(N_terms): tosum = 0 cn = -Ampli*(2*n+1)*(-1j)**(n)*(jn(n,k*a,derivative=True)/ (jn(n,k*a,derivative=True) - 1j*yn(n,k*a,derivative=True))) tosum = cn*(jn(n,k*R,derivative=False) - 1j*yn(n,k*R,derivative=False))*eval_legendre(n,np.cos(theta)) result = result + tosum return result
def MaxRECoefficients(Nmax): """rE computation (maximum zero of the Nmax+1 degree legendre polynomial)""" t = np.arange(0.5, 1.0, 0.05) # Sampling the interval [0.5,1] # Search the highest root of the N+1 degree legendre polynom in the interval [0.5,1]. This value is the highest rE reachable. rE = np.max(fsolve(legendre(Nmax+1), t)) # The coefficient we need to apply to the n order HOA signals is just the n order legendre polynom evaluate at the value rE. return eval_legendre(np.arange(Nmax + 1), rE)
def legendre_flux_mom(self, flux, degree): ''' return the degree'th legendre flux moment for corresponding angular flux vector ''' flux_leg_mom = 0 for ang in range(self.n): flux_leg_mom += flux[ang] * self.w[ang] * special.eval_legendre( degree, self.ang_cell_centers[ang]) return flux_leg_mom
def weavebasket(xinfo, npoly=0): """ Make design matrix of the problem given information in "xinfo" dictionary """ nscans=xinfo['scans'].size nix=xinfo['nx'] nterms=npoly+1 # NB - swapping R&C relative to IDL-> basket=np.zeros([nix,nscans*nterms]) for i in np.arange(nix): for j in np.arange(nterms): basket[i,xinfo['rint'][i]*nterms+j] = 1.0*sps.eval_legendre(j,xinfo['rtx'][i]) basket[i,xinfo['cint'][i]*nterms+j] = -1.0*sps.eval_legendre(j,xinfo['ctx'][i]) # translation - # design_matrix[ scan & term index, intersection ] = +/- legendre_value(order j , time_at_crossing) # + or - determined by whether it's a row or a column. rows by fiat are +ve in the model. # (that is determined by makedeltavec(), which does rowdata - columndata) return basket
def ApproxMaxRECoefficients(Nmax): """Approximate maxRE coefficients for a given order, from [0]. [0] Zotter, Franz, and Matthias Frank. "All-round ambisonic panning and decoding." Journal of the audio engineering society 60.10 (2012) """ rE = np.cos(np.radians(137.9 / (Nmax + 1.51))) return eval_legendre(np.arange(Nmax + 1), rE)
def cmpl_set_Rn_Legendre_at_R(self, a_n, a_R): """ Brief: The function for complete set in the radial direction (at r = R). . ~~~ Legendre polynomial version ~~~ return: Rn(R; R,Delta). """ return sqrt( (2 * a_n + 1) / (2 * self.delta)) * eval_legendre(a_n, 0.0) / a_R
def _SL(i, x, y, Beta_convol, index, legendre_orders): """Calculates interpolated β(r), where r= radius""" r = np.sqrt(x**2 + y**2 + 0.1) # + 0.1 to avoid divison by zero. # normalize: divison by circumference. # @stggh 2/r to correctly normalize intensity cf O2 PES BB = np.interp(r, index, Beta_convol[i, :], left=0)/(4*np.pi*r*r) return BB*eval_legendre(legendre_orders[i], x/r)
def cmpl_set_Rn_Legendre(self, a_n, a_R, a_r): """ Brief: The function for complete set in the radial direction. . ~~~ Legendre polynomial version ~~~ return: Rn(r; R,Delta). """ return (sqrt((2 * a_n + 1) / (2 * self.delta)) * eval_legendre(a_n, (a_r - a_R) / self.delta) / a_r)
def bandlimited_dirac(N, d, w_n=None): r"""Order N spatially bandlimited Dirac pulse at central angle d. Parameters ---------- N : int SH order. d : (Q,) array_like Central angle in rad. w_n : (N,) array_like, optional. Default is None. Tapering window w_n. Returns ------- dirac : (Q,) array_like Amplitude at central angle d. Notes ----- Normalize with .. math:: \sum^N \frac{2N + 1}{4 \pi} = \frac{(N+1)^2}{4 \pi} . References ---------- Rafaely, B. (2015). Fundamentals of Spherical Array Processing. Springer., eq. (1.60). Examples -------- .. plot:: :context: close-figs dirac_azi = np.deg2rad(0) dirac_colat = np.deg2rad(90) N = 5 # cross section azi = np.linspace(0, 2 * np.pi, 720, endpoint=True) # Bandlimited Dirac pulse dirac_bandlim = 4 * np.pi / (N + 1) ** 2 * \ spa.sph.bandlimited_dirac(N, azi - dirac_azi) spa.plots.polar(azi, dirac_bandlim) """ d = utils.asarray_1d(d) if w_n is None: w_n = np.ones(N + 1) g_n = np.zeros([(N + 1)**2, len(d)]) for n, i in enumerate(range(N + 1)): g_n[i, :] = w_n[i] * (2 * n + 1) / (4 * np.pi) * \ scyspecial.eval_legendre(n, np.cos(d)) dirac = np.sum(g_n, axis=0) return dirac
def return_correction(self, x, coeff): y = np.zeros_like(x) if self.type == "polynomial": for i in range(self.degree + 1): y = y + coeff[i] * np.power(x, i) if self.type == "legendre": for i in range(self.degree + 1): y = y + coeff[i] * special.eval_legendre(i, x) return y
def return_source(self, i, n): ''' return the current RHS at iterate l if we are solving for iterate l+1 for cell i and direction n ''' Q = 0 for k in range(self.scat_order + 1): Q += (((2 * k) + 1) / 2) * (self.scat_mom[k][i] + self.source_mom[k][i]) * special.eval_legendre(k, n) return Q
def _evalVecLegFlux(self, l, pos=0): """ Vectorized version of legendre moment of flux routine (must faster) group legendre group flux scalar_flux_lg = (1/2) * sum_n(w_n * P_l * flux_n) where l is the legendre order and n is the ordinate iterate """ legsum = np.sum(spc.eval_legendre(l, self.sNmu) * self.wN * (self.ordFlux[:, pos, :]), axis=1) return 0.5 * legsum
def test_scatt_positivity(self, dtype, num_mu_pts = 201, order = None): # This function will pass through each Ein and outgoing group and # will return a flag if the data set is negative and will also return # a list of negative iE and groups if (dtype == 'elastic' or dtype == 'el'): scatter = self.elastic NEin = self.NE_el elif (dtype == 'inelastic' or dtype == 'inel'): scatter = self.inelastic NEin = self.NE_inel elif (dtype == 'nuinelastic' or dtype == 'nuinel' or dtype == 'nu'): if self.nuinelastic_present: scatter = self.nuinelastic NEin = self.NE_inel # Initialize the return values positivity = True negativity_list = [] min_value = 1.0E50 # Set maxL equal to whatever is smaller, order, or self.scatt_order # Note that this will only be used if scatt_type == SCATT_TYPE_LEGENDRE if order != None: maxL = min(order, self.scatt_order) else: maxL = self.scatt_order mu = np.linspace(-1.0, 1.0, num_mu_pts) if self.scatt_type == SCATT_TYPE_LEGENDRE: for iE in xrange(NEin): gmin = scatter[iE].gmin gmax = scatter[iE].gmax for g in xrange(gmin, gmax + 1): expanded = np.zeros(num_mu_pts) for l in xrange(maxL): expanded[:] = expanded[:] + \ (float(l) + 0.5) * ss.eval_legendre(l, mu[:]) * \ scatter[iE].outgoing[g - gmin][l] minval = min(expanded) if minval < min_value: min_value = minval if minval < 0.0: positivity = False negativity_list.append((iE, g + gmin)) elif self.scatt_type == SCATT_TYPE_TABULAR: pass return (positivity, negativity_list, min_value)
def conductance_central(self, M): """ Return the acoustic central conductance of the transducer. Parameters ---------- M : int, float Harmonics number """ return self.alpha(M)*M*self.center_angular_frequency()\ *(self.nb_finger/2.)**2.*self.transducer_width*self.gamma_s()\ *(2.*self.epsilon_inf()*np.sin(np.pi*M/self.Se)\ /eval_legendre(-M/self.Se, -np.cos(self.delta())))**2.
def f(self, theta): """Computes the (complex) scattering amplitude f(theta). Arguments: theta: scattering angle (radians) Returns: f: the scattering amplitude """ retval = 0 l=0 for delta in self.phase_shifts: retval += (2*l+1)*np.exp(1j*delta)*np.sin(delta)*special.eval_legendre(l,np.cos(theta)) l += 1 return retval/self.ki
def calcFoxWolfram(objects, orders, weight_func): """ http://arxiv.org/pdf/1212.4436v1.pdf """ lvecs = [lvec(o) for o in objects] h = np.zeros(len(orders)) for i in range(len(lvecs)): for j in range(len(lvecs)): cos_omega_ij = (lvecs[i].CosTheta() * lvecs[j].CosTheta() + math.sqrt((1.0 - lvecs[i].CosTheta()**2) * (1.0 - lvecs[j].CosTheta()**2)) * (math.cos(lvecs[i].Phi() - lvecs[j].Phi())) ) w_ij = weight_func(lvecs, lvecs[i], lvecs[j]) vals = np.array([cos_omega_ij]*len(orders)) p_l = np.array(eval_legendre(orders, vals)) h += w_ij * p_l return h
def test_subtract_sing(): degree = 20 f = lambda x: eval_legendre(degree, x) * log(x + 1) almost_exact_x, almost_exact_w = telles_singular(51, -1) exact = sum(f(almost_exact_x) * almost_exact_w) # print exact gauss_x, gauss_w = gaussxw(degree + 1) est = sum(f(gauss_x) * gauss_w) error = abs(exact - est) f_singular_pt = lambda x: (-1.0) ** degree * log(x + 1) f_minus_singularity = lambda x: f(x) - f_singular_pt(x) addme = 0.6137056388801094 * (-1.0) ** (degree + 1) est2 = addme + sum(f_minus_singularity(gauss_x) * gauss_w) error2 = abs(exact - est2) # print error / exact # print error2 / exact # subtracting out the singularity is super ineffective on this problem... assert(abs(error2 / exact) < 0.3)
def expand_scatt(self, outgoing, dtype, num_mu_pts = 201, order = None): # Outgoing is the set of legendre moments vs incoming energies # It is only for one group (or an already condensed set of groups) # num_mu_pts is the number of pts to use on the mu variable to set up # the functional data. # (Perhaps this could be moved to a sympy function in the future instead # of discrete pts) if (dtype == 'elastic' or dtype == 'el'): scatter = self.elastic NEin = self.NE_el elif (dtype == 'inelastic' or dtype == 'inel'): scatter = self.inelastic NEin = self.NE_inel elif (dtype == 'nuinelastic' or dtype == 'nuinel' or dtype == 'nu'): if self.nuinelastic_present: scatter = self.nuinelastic NEin = self.NE_inel expanded = np.zeros((NEin, num_mu_pts)) mu = np.linspace(-1.0, 1.0, num_mu_pts) # Set maxL equal to whatever is smaller, order, or self.scatt_order # Note that this will only be used if scatt_type == SCATT_TYPE_LEGENDRE if order != None: maxL = min(order, self.scatt_order) else: maxL = self.scatt_order if self.scatt_type == SCATT_TYPE_LEGENDRE: for iE in xrange(NEin): for l in xrange(maxL): expanded[iE][:] = expanded[iE][:] + \ (float(l) + 0.5) * ss.eval_legendre(l, mu[:]) * \ outgoing[iE][l] elif self.scatt_type == SCATT_TYPE_TABULAR: pass return (expanded, mu)
def mie_N4grid_slow(field, kk, R, C, ce, ci, jumpe, jumpi, N, point): """ Requires: kk : numpy.array([kx, ky, kz]) R : radius of the sphere C : center of the sphere ce, ci : contrast sqrt(epsExt), sqrt*espInt) jumpe: coeff jump exterior (alpha_Dir, beta_Neu) jumpi: coeff jump interior (alpha_Dir, beta_Neu) N : Number of modes """ pt = point[:] kk = Cartesian(kk) k = kk.norm() kk = kk.normalized() # be careful with this test !! if sp.linalg.norm(sp.linalg.norm(pt - C) - R) > 0.3: return 0.0 + 0j else: jumps = (jumpe, jumpi) params = (R, (ce, ci), jumps) p = Cartesian((pt[0], pt[1], pt[2])) pnorm = p.norm() pn = p.normalized() costheta = pn.dot(kk) val = 0 for n in myrange(N): if field == "sca": cn = k * coeff_ext(n, params, k) * H1p(n, k * pnorm) elif field == "int": cn = ci * k * coeff_int(n, params, k) * Jp(n, ci * k * pnorm) else: cn = k * ((1j) ** n) * (2 * n + 1) * Jp(n, k * pnorm) c = eval_legendre(n, costheta) val += cn * c return val
def eval_legendre_dd(n, x): return eval_legendre(n.astype('d'), x)
def eval_legendre_ld(n, x): return eval_legendre(n.astype('l'), x)
def evalNormPoly(self,x,n): norm = 1.0/np.sqrt(2.0/(2.0*float(n)+1.0)) return norm*polys.eval_legendre(n,x)
# In[43]: # CODE BOTTLENECK! # # Evaluate Legendre from l=2 to l=lmax for each matrix entry # [P_2(M) P_3(M) P_4(M) .... P_lmax(M) ] # # WITHOUT BROADCASTING, one would do something like # PlMat = [] # for i in ellval: # PlMat.append( eval_legendre(i, dotproductmatrix) ) # # # With broadcasting, we use PlMat = eval_legendre(ellval[:, None, None], dotproductmatrix) # PlMat = [P_2(M) P_3(M) P_4(M) .... P_lmax(M) ] # PlMat is an array, len()=31 of 31 3072 by 3072 matrices # PlMat.shape = (31, 3072, 3072) # In[44]: # multiply PlMat by (2*l+1)/4pi, i.e. norm norm_matrix = norm[:, None, None] * PlMat # [5/4pi * P_2(M) 7/4pi * P_3(M) .... 65/4pi * P_32(M)] # In[ ]:
def integrand(s, theta, delta): return np.sin(np.pi*s)*np.cos((s - 1./2.)*theta)\ /eval_legendre(-s, -np.cos(delta))
savefig("output/Cl_Dz.pdf") print("--> 'output/Cl_Dz.pdf' created") #show() """ ------------------------- ----> Calculate xi(z-z'): ------------------------- """ print("Computing xi(Dz)...") cosTh=math.cos(theta*math.pi/180.) xi=[0]*N_corr_Dz for row in range(len(ell)): xi += (2.*ell[row]+1.)*Cl[:,row]*sp.eval_legendre(ell[row],cosTh) * math.exp(-ell[row]*(ell[row]+1.)/ls**2.) xi = xi/(4.*math.pi) Dz_axis=[2.*i*Dz for i in range(N_corr_Dz)] "#Export data file" Fout_xiDz = open('output/xi_Dz.dat','w') Fout_xiDz.write('# Dz xi\n') for i in range(N_corr_Dz): Fout_xiDz.write("%e\t %e\n"%(Dz_axis[i], xi[i])) Fout_xiDz.close() print("--> 'output/xi_Dz.dat' created") "Export plot" figure()
def ComputeCov(th_line): for th_column in range(Npixel): if th_column>=th_line: for row in range(Nell): cosTh=math.cos(th_line*theta_max/Npixel*math.pi/180.) cosTh_prime=math.cos(th_column*theta_max/Npixel*math.pi/180.) """ The following matrix is not handled by multi.Manager(), hence it will be forgotten outside this ComputeCov() function when running in parallel """ cov[th_column,th_line] += 2./fsky* (2.*ell[row]+1.) /(4.*math.pi)**2. *(Cl[Dz_bin,row]+shot_noise)**2.*sp.eval_legendre(ell[row],cosTh)*sp.eval_legendre(ell[row],cosTh_prime) """ Fill the th_line-element with all the columns in the line number=th_line. Hence cov_line is a vector where every element is a vector containing a line of cov """ cov_line[th_line]=cov[:,th_line]
# In[29]: from scipy.special import eval_legendre ##special scipy function # In[30]: ## Begin calculating S_ij piece by piece, in order to do the summation correctly. # # S_ij = sum(2ell+1) C_l P_l(dotproductmatrix) # NOT QUICK! summatrix = np.sum( [eval_legendre(i, dotproductmatrix) for i in ell], axis=0) # In[31]: # matrix_total = # (1/(4*math.pi)) * sum((2 * ll + 1) * cltemp ) * eval_legendre(ll, matrix_dotprod) # # Begin with adding theoretical scalar C_l values # add_clvalues = np.sum([ i * summatrix for i in newnewcls ], axis=0) # In[32]:
def mie_N4grid(field, kk, R, C, ce, ci, jumpe, jumpi, N, point): """ Requires: kk : numpy.array([kx, ky, kz]) R : radius of the sphere C : center of the sphere ce, ci : contrast sqrt(epsExt), sqrt*espInt) jumpe: coeff jump exterior (alpha_Dir, beta_Neu) jumpi: coeff jump interior (alpha_Dir, beta_Neu) N : Number of modes """ pt = point[:] kk = Cartesian(kk) k = kk.norm() kk = kk.normalized() # be careful with this test !! if sp.linalg.norm(sp.linalg.norm(pt - C) - R) > 0.3: return 0.0 + 0j else: p = Cartesian((pt[0], pt[1], pt[2])) pnorm = p.norm() pn = p.normalized() costheta = pn.dot(kk) kpnorm = k * pnorm ke, ki = ce * k, ci * k keR, kiR = ke * R, ki * R ae, be = jumpe ai, bi = jumpi ke_aeai = ke * ae * ai ki_aebi = ki * ae * bi ke_aibe = ke * ai * be ke_beae = ke * be * ae ke_bebe = ke * be * be ki_bebe = ke * ae * bi ke_aibe = ke * ai * be sqrt_e = sp.sqrt(sp.pi / (2 * keR)) sqrt_i = sp.sqrt(sp.pi / (2 * kiR)) sqrt_n = sp.sqrt(sp.pi / (2 * kpnorm)) sqrt_m = sp.sqrt(sp.pi / (2 * ci * kpnorm)) val = 0 for n in myrange(N): Je = sqrt_e * jv(n + 0.5, keR) Ji = sqrt_i * jv(n + 0.5, kiR) Jpe = (n / keR) * Je - sqrt_e * jv(n + 1.5, keR) Jpi = (n / kiR) * Ji - sqrt_i * jv(n + 1.5, kiR) Ye = sqrt_e * yv(n + 0.5, keR) Ype = (n / keR) * Ye - sqrt_e * yv(n + 1.5, keR) locH1 = Je + 1j * Ye locH1p = Jpe + 1j * Ype if field == "sca": a = ke_aeai * Ji * Jpe b = ki_aebi * Jpi * Je c = ki_aebi * locH1 * Jpi d = ke_aibe * locH1p * Ji v = (2 * n + 1) * ((1j) ** n) * (a - b) / (c - d) Jn = sqrt_n * jv(n + 0.5, kpnorm) Jpn = (n / kpnorm) * Jn - sqrt_n * jv(n + 1.5, kpnorm) Yn = sqrt_n * yv(n + 0.5, kpnorm) Ypn = (n / kpnorm) * Yn - sqrt_n * yv(n + 1.5, kpnorm) locH1p = Jpn + 1j * Ypn cn = k * v * locH1p elif field == "int": a = ke_beae * locH1 * Jpe b = ke_bebe * Je * locH1p c = ki_aebi * locH1 * Jpi d = ke_aibe * locH1p * Ji v = (2 * n + 1) * ((1j) ** n) * (a - b) / (c - d) Jn = sqrt_m * jv(n + 0.5, ci * kpnorm) Jpn = (n / (ci * kpnorm)) * Jn - sqrt_m * jv(n + 1.5, ci * kpnorm) cn = ci * k * v * Jpn else: cn = k * ((1j) ** n) * (2 * n + 1) * Jp(n, k * pnorm) c = eval_legendre(n, costheta) val += cn * c return val
# Calculate i for which theta_i <= alpha. n = len([i for i in theta_i if i <= alpha]) # Total number of angle points defines the size of the system of linear equations to solve. N = len(theta_i) M = np.zeros([N,N]) from scipy.special import eval_legendre # Legendre polynomials # Set up LHS of matrix equation (see Eq. 6 of Lamberti & Prato). for i in range(N): for j in range(N): # for angles outside the aperture if (i >= 0 and i <= n): M[i][j] = math.pow(a,j) * eval_legendre(j, math.cos(theta_i[i])) # for angles inside the aperture else: M[i][j] = (2.0*j + 1.0) * math.pow(a, j-1) * eval_legendre(j, math.cos(theta_i[i])) # Set up RHS vector. V_rhs = np.zeros(N) for i in range(N): if (i >= 0 and i <= n): V_rhs[i] = V # Solve linear system for the coefficients A_i in Eqs. 1-2 of Lamberti & Prato. A = np.linalg.solve(M, V_rhs) # Calculate the surface charge density on the conductor surface (see Eq. 7 of Lamberti & Prato). sigma = np.zeros(N)
dec = np.arcsin(2*rand(n_frb) - 1) n_dot_n = (np.sin(ra[:,None]) * np.sin(ra[None,:]) + np.cos(ra[:,None]) * np.cos(ra[None,:]) * np.cos(dec[:,None] - dec[None,:])) # Correct numerical errors on diagonal. n_dot_n.flat[::n_frb + 1] = 1 C_tilde_alpha = np.zeros((n_l_bin, n_frb, n_frb), dtype=DTYPE) this_l_bin_min = l_min for ii in range(n_l_bin): for ll in range(this_l_bin_min, l_bin_max[ii]): interp_domain = np.linspace(-1., 1., 4 * l_max, endpoint=True) legen_interp = interp1d(interp_domain, eval_legendre(ll, interp_domain), kind="linear") legendre = legen_interp(n_dot_n) C_tilde_alpha[ii,:,:] += ((2 * ll + 1) / 4. / np.pi) * legendre C_tilde_alpha[ii].flat[::n_frb + 1] = 0 this_l_bin_min = l_bin_max[ii] print "C-tilde-alpha computed." #auto_var = 200**2 # Pulled random number from Matt McQuinn's paper. pc/cm^3. auto_var = 100**2 # Round number for testing. Tr_ab = np.zeros((n_l_bin, n_l_bin), dtype=float) Tr_gab = np.zeros((n_l_bin, n_l_bin, n_l_bin), dtype=float) for ii in range(n_l_bin): for jj in range(n_l_bin):
def createLegArray(sNmu, lMax): legArray = np.zeros((lMax + 1, len(sNmu))) for l in range(lMax + 1): legArray[l, :] = spc.eval_legendre(l, sNmu) return legArray
def ortho_poly(x, n, window=1., axis=-1): """Generate orthonormal basis polynomials. Generate the first `n` orthonormal basis polynomials over the given domain and for the given window using the Gram-Schmidt process. Parameters ---------- x : 1D array length m Functional domain. n : integer number of polynomials to generate. `n` - 1 is the maximum order of the polynomials. window : 1D array length m Window (weight) function for which the polynomials are orthogonal. Returns ------- polys : n by m array The n polynomial basis functions. Normalization is such that np.sum(polys[i,:] * window * polys[j,:]) = delta_{ij} """ if np.any(window < 0): raise ValueError("Window function must never be negative.") # Check scipy versions. If there is a stable polynomial package, use it. s_ver = sp.__version__.split('.') major = int(s_ver[0]) minor = int(s_ver[1]) if major <= 0 and minor < 8: new_sp = False if n > 20: raise NotImplementedError("High order polynomials unstable.") else: new_sp = True # Get the broadcasted shape of `x` and `window`. # The following is the only way I know how to get the broadcast shape of # x and window. # Turns out I could use np.broadcast here. Fix this later. shape = np.broadcast(x, window).shape m = shape[axis] # Construct a slice tuple for up broadcasting arrays. upbroad = [slice(sys.maxsize)] * len(shape) upbroad[axis] = None upbroad = tuple(upbroad) # Allocate memory for output. polys = np.empty((n,) + shape, dtype=float) # For stability, rescale the domain. x_range = np.amax(x, axis) - np.amin(x, axis) x_mid = (np.amax(x, axis) + np.amin(x, axis)) / 2. x = (x - x_mid[upbroad]) / x_range[upbroad] * 2 # Reshape x to be the final shape. x = np.zeros(shape, dtype=float) + x # Now loop through the polynomials and construct them. # This array will be the starting polynomial, before orthogonalization # (only used for earlier versions of scipy). if not new_sp: basic_poly = np.ones(shape, dtype=float) / np.sqrt(m) for ii in range(n): # Start with the basic polynomial. # If we have an up-to-date scipy, start with nearly orthogonal # functions. Otherwise, just start with the next polynomial. if not new_sp: new_poly = basic_poly.copy() else: new_poly = special.eval_legendre(ii, x) # Orthogonalize against all lower order polynomials. for jj in range(ii): new_poly -= (np.sum(new_poly * window * polys[jj,:], axis)[upbroad] * polys[jj,:]) # Normalize, accounting for possibility that all data is masked. norm = np.array(np.sqrt(np.sum(new_poly**2 * window, axis))) if norm.shape == (): if norm == 0: bad_inds = np.array(True) norm = np.array(1.) else: bad_inds = np.array(False) else: bad_inds = norm == 0 norm[bad_inds] = 1. new_poly /= norm[upbroad] new_poly[bad_inds[upbroad]] = 0 # Copy into output. polys[ii,:] = new_poly # Increment the base polynomial with another power of the domain for # the next iteration. if not new_sp: basic_poly *= x return polys
import matplotlib.pyplot as plt import numpy as np from scipy.special import legendre, eval_legendre, eval_sh_legendre if __name__ == '__main__': # Legendre plt.figure(1) x = np.arange(-1, 1, 0.01) for n in range(6): p = eval_legendre(n, x) plt.plot(x, p) # Shifted to [0,1] Legendre plt.figure(2) x = np.arange(0, 1, 0.01) for n in range(6): p = eval_sh_legendre(n, x) plt.plot(x, p) # Shifted to [0,A] Legendre plt.figure(3) A = 10 f = lambda x: (2./A)*x - 1 x = np.arange(0, A, 0.001) for n in range(6): p = eval_legendre(n, f(x)) plt.plot(x, p) plt.show()
def ortho_poly_2D(x, y, n, window=1): """Generate a 2D orthonormal polynomial basis up to order `n - 1`. Generate the first `n(n + 1)/2` orthonormal basis polynomials over the given domain and for the given window using the Gram-Schmidt process. Parameters ---------- x : 2D array Functional domain, x coordinate. y : 2D array Functional domain, y coordinate. Shape must be the same as `x` or broadcastable to the same shape. n : integer number of polynomials to generate. `n - 1` is the maximum order of the polynomials. window : 2D array Window (weight) function for which the polynomials are orthogonal. Shape must be the same as `x` or broadcastable to the same shape. Returns ------- polys : array of shape (n, n) + x.shape The `n(n + 1)/2` polynomial basis functions. Normalization is such that np.sum(polys[i, j,:] * window * polys[k, l,:]) = delta_{ik} delta_{jl}. Note that half the array is not used and is left empty. """ # Check input shapes b = np.broadcast(x, y, window) if b.nd != 2: raise ValueError("Inputs not broadcastable to a 2D array.") # This doesn't actually work and it might not be possible to make it work. msg = "This function is not working and the theory behind it is dubious." raise NotImplementedError(msg) out = np.zeros((n, n) + b.shape, dtype=float) # Check scipy versions. If there is a stable polynomial package, use it. s_ver = sp.__version__.split('.') major = int(s_ver[0]) minor = int(s_ver[1]) if major <= 0 and minor < 8: new_sp = False if n > 20: raise NotImplementedError("High order polynomials unstable.") else: new_sp = True # For stability, rescale the domain. x_range = np.amax(x) - np.amin(x) x_mid = (np.amax(x) + np.amin(x)) / 2. x = (x - x_mid) / x_range * 2 y_range = np.amax(y) - np.amin(y) y_mid = (np.amax(y) + np.amin(y)) / 2. y = (y - y_mid) / y_range * 2 # Initialize basic polynomials for x and y domains if using old scipy. if not new_sp: basic_x = np.ones_like(x) # Loop over the polynomial indices and generate them. for ii in range(n): if not new_sp: basic_y = np.ones_like(y) # If using the new scipy, start with stably evaluated Legendre. # polynomial. if new_sp: basic_x = special.eval_legendre(ii, x) for jj in range(n - ii): if new_sp: basic_y = special.eval_legendre(jj, y) # This polynomial begins as the product of the basic polynomials. new_poly = basic_x * basic_y # Orthogonalize against all lower order polynomials. #for kk in range(ii): # for mm in range(jj): # new_poly -= out[kk,mm] * np.sum(out[kk,mm] * window * # new_poly) # Normalize. new_poly /= np.sqrt(np.sum(new_poly**2)) # Copy to output array. out[ii,jj,...] = new_poly # If using old scipy, update the basic polynomial to the next # order. if not new_sp: basic_y *= y if not new_sp: basic_x *= x return out