def basis(self, type, M, X_star=None): if type is 'monomial': if X_star is None: Phi = np.zeros((np.shape(self.X)[0], (M + 1))) for c in range(0, np.shape(Phi)[1]): Phi[:, c] = self.X[:, 0].T**c else: Phi = np.zeros((np.shape(X_star)[0], (M + 1))) for c in range(0, np.shape(Phi)[1]): Phi[:, c] = X_star[:, 0].T**c if type is 'fourier': if X_star is None: Phi = np.zeros((np.shape(self.X)[0], (M + 1) * 2)) for c in range(0, np.shape(Phi)[1], 2): Phi[:, c] = np.sin(int(c / 2) * np.pi * self.X[:, 0].T) Phi[:, c + 1] = np.cos(int(c / 2) * np.pi * self.X[:, 0].T) else: Phi = np.zeros((np.shape(X_star)[0], (M + 1) * 2)) for c in range(0, np.shape(Phi)[1], 2): Phi[:, c] = np.sin(int(c / 2) * np.pi * X_star[:, 0].T) Phi[:, c + 1] = np.cos(int(c / 2) * np.pi * X_star[:, 0].T) if type is 'legendre': from numpy.polynomial import Legendre if X_star is None: Phi = Legendre.basis(M)(self.X) else: Phi = Legendre.basis(M)(X_star) return Phi
def dg_interior_flux_matrix(p): """Calculate the interior flux matrix for the standard DG advection equations There is an analytical expression I could use for this too. See my thesis page 74. """ F = np.zeros((p + 1, p + 1)) for i in range(0, p + 1): dli = L.basis(i).deriv() for j in range(0, p + 1): lj = L.basis(j) F[i, j] = basis.integrate_legendre_product(dli, lj) return F
def icb_interface_flux_matrix(p, K, T): """The interface flux matrices for the ICB schemes, we use upwinding to get the fluxes. Denote T the translation necessary to evaluate the flux from the other cell. """ # Enhanced polynomial degree phat = p + len(K) G0 = smp.zeros(p + 1, phat + 1) G1 = smp.zeros(p + 1, phat + 1) for i in range(0, p + 1): for j in range(0, phat + 1): li = L.basis(i) lj = L.basis(j) G0[i, j] = leg.legval(-1, li.coef) * \ leg.legval(1, lj.coef) * (T**-1) G1[i, j] = leg.legval(1, li.coef) * leg.legval(1, lj.coef) # Enhancement matrix A, Ainv, B, Binv = enhance.enhancement_matrices(p, K) # Using the enhanced function in the flux (see notes 21/4/15) BL = smp.zeros(phat + 1, phat + 1) BR = smp.zeros(phat + 1, phat + 1) for i in range(p + 1): li = L.basis(i) BL[i, i] = 1 # basis.integrate_legendre_product(li,li) BR[i, i] = 1 # BL[i,i] for i, k in enumerate(K): lk = L.basis(k) int_lklk = basis.integrate_legendre_product(lk, lk) BL[i + p + 1, i + p + 1] = T # * int_lklk BR[i + p + 1, i + p + 1] = (T**(-1)) # * int_lklk # reduction matrix R = smp.zeros(phat + 1, p + 1) for i in range(p + 1): R[i, i] = 1 for i, k in enumerate(K): for j in range(p + 1): R[i + p + 1, j] = auxf.delta(k, j) # Convert to sympy matrices G0 = smp.Matrix(G0) * smp.Matrix(Ainv) * BL * R G1 = smp.Matrix(G1) * smp.Matrix(Ainv) * BL * R return G0, G1
def regress_poly(degree, data, remove_mean=True, axis=-1): ''' returns data with degree polynomial regressed out. Be default it is calculated along the last axis (usu. time). If remove_mean is True (default), the data is demeaned (i.e. degree 0). If remove_mean is false, the data is not. ''' IFLOG.debug('Performing polynomial regression on data of shape ' + str(data.shape)) datashape = data.shape timepoints = datashape[axis] # Rearrange all voxel-wise time-series in rows data = data.reshape((-1, timepoints)) # Generate design matrix X = np.ones((timepoints, 1)) # quick way to calc degree 0 for i in range(degree): polynomial_func = Legendre.basis(i + 1) value_array = np.linspace(-1, 1, timepoints) X = np.hstack((X, polynomial_func(value_array)[:, np.newaxis])) # Calculate coefficients betas = np.linalg.pinv(X).dot(data.T) # Estimation if remove_mean: datahat = X.dot(betas).T else: # disregard the first layer of X, which is degree 0 datahat = X[:, 1:].dot(betas[1:, ...]).T regressed_data = data - datahat # Back to original shape return regressed_data.reshape(datashape)
def evaluate_basis_gauss(self): """Evaluate the basis at the Gaussian quadrature nodes. phi will be used to transform Legendre solution coefficients to the solution evaluated at the Gaussian quadrature nodes. dphi_w will be used for the interior flux integral """ phi = np.zeros((len(self.x), self.N_s)) dphi_w = np.zeros((len(self.x), self.N_s)) for n in range(self.N_s): # Get the Legendre polynomial of order n and its gradient l = L.basis(n) dl = l.deriv() # Evaluate the basis at the Gaussian nodes phi[:, n] = leg.legval(self.x, l.coef) # Evaluate the gradient at the Gaussian nodes and multiply by the # weights dphi_w[n, :] = leg.legval(self.x, dl.coef) * self.w return phi, dphi_w
def enhancement_matrices(solution_order, modes): """Returns the enhancement matrices (and their inverse) Returns A and inv(A) where A \hat{u} = [uL;some_modes_of(uR)] B and inv(B) where B \hat{u} = [uR;some_modes_of(uL)] Note: this is slightly different than what I do in icb_functions.py (called by advection.py) where the right hand side contains the normalization factors (i.e A x = b where b = uL_i \int \phi_i \phi_i dx). Here I put \int \phi_i \phi_i dx into A and B (denoted norm down in the code below). """ # Enhanced solution order order = solution_order + len(modes) # Submatrices to build the main matrix later a = np.diag(np.ones(solution_order + 1)) b = np.zeros((solution_order + 1, len(modes))) cl = np.zeros((len(modes), order + 1)) cr = np.zeros((len(modes), order + 1)) # Loop on the modes we are keeping in the neighboring cell # (the right cell) for i, mode in enumerate(modes): # Loop on the enhancement basis for j in range(order + 1): # Basis function in the right cell l1 = L.basis(mode) # Enhanced basis function extending into the right cell (or left # cell) ll = basis.shift_legendre_polynomial(L.basis(j), 2) lr = basis.shift_legendre_polynomial(L.basis(j), -2) # Inner product for the left and right enhancements norm = basis.integrate_legendre_product(l1, l1) cl[i, j] = basis.integrate_legendre_product(l1, ll) / norm cr[i, j] = basis.integrate_legendre_product(l1, lr) / norm # Put the matrices together A = np.vstack((np.hstack((a, b)), cl)) B = np.vstack((np.hstack((a, b)), cr)) return A, np.linalg.inv(A), B, np.linalg.inv(B)
def dg_interface_flux_matrix(p, T): """The interface flux matrices, we use upwinding to get the fluxes Denote T the translation necessary to evaluate the flux from the other cell. There are also analytical expressions for these. See my thesis page 73. """ G0 = smp.zeros(p + 1) G1 = smp.zeros(p + 1) for i in range(0, p + 1): for j in range(0, p + 1): li = L.basis(i) lj = L.basis(j) G0[i, j] = leg.legval(-1, li.coef) * \ leg.legval(1, lj.coef) * (T**-1) G1[i, j] = leg.legval(1, li.coef) * leg.legval(1, lj.coef) return G0, G1
def test_integrate_legendre_product(self): """Is the integral of the product of two Legendre polynomials correct """ # Given a Legendre Polynomial: L(x) = 0.5*(3x^2 -1) l1 = L.basis(2) # Evaluate L(x+2) l2 = basis.shift_legendre_polynomial(l1, 2) # The integral of l1*l2 over [-1,1] = 0.4 self.assertAlmostEqual(basis.integrate_legendre_product(l1, l2), 0.4)
def test_shift_legendre_polynomial(self): """Is the shifting of Legendre polynomials correct""" # Given a Legendre Polynomial: L(x) = 0.5*(3x^2 -1) l = L.basis(2) # Evaluate L(x+2) ls = basis.shift_legendre_polynomial(l, 2) # This should be equal to 5.5 + 6x + 1.5 x^2 npt.assert_array_almost_equal(ls.convert( kind=P).coef, np.array([5.5, 6, 1.5]), decimal=13)
def genNoise(self,grid,maxNoiseOrder): """Noise is a matrix of Legendre polynomials of 0<order<maxNoiseOrder Additionally 60Hz sine and cosine waves are added to account for the DC component of EEG grid -- Grid to be used for timing information maxNoiseOrder--Maximum order of noise to be considered""" if self.grid() is not None and self.noiseOrders() is not None: logger.info( 'Generating noise matrix') legpoly = np.array([Legendre.basis(i)(np.arange(len(grid.times()))) for i in self.noiseOrders()]).T #Polynomials sw = np.sin(60 * np.arange(len(grid.times())) * 2 * np.pi / float(grid.fs())) # Sine for AC component cw = np.cos(60 * np.arange(len(grid.times())) * 2 * np.pi / float(grid.fs())) # Cosine for AC component legpoly = np.column_stack((legpoly, sw, cw)) return pd.DataFrame(legpoly,index=self.grid().times())
def test_shift_legendre_polynomial(self): """Is the shifting of Legendre polynomials correct""" # Given a Legendre Polynomial: L(x) = 0.5*(3x^2 -1) l = L.basis(2) # Evaluate L(x+2) ls = basis.shift_legendre_polynomial(l, 2) # This should be equal to 5.5 + 6x + 1.5 x^2 npt.assert_array_almost_equal(ls.convert(kind=P).coef, np.array([5.5, 6, 1.5]), decimal=13)
def genNoise(self, grid, maxNoiseOrder): """Noise is a matrix of Legendre polynomials of 0<order<maxNoiseOrder Additionally 60Hz sine and cosine waves are added to account for the DC component of EEG grid -- Grid to be used for timing information maxNoiseOrder--Maximum order of noise to be considered""" if self.grid() is not None and self.noiseOrders() is not None: logger.info('Generating noise matrix') legpoly = np.array([ Legendre.basis(i)(np.arange(len(grid.times()))) for i in self.noiseOrders() ]).T #Polynomials sw = np.sin(60 * np.arange(len(grid.times())) * 2 * np.pi / float(grid.fs())) # Sine for AC component cw = np.cos(60 * np.arange(len(grid.times())) * 2 * np.pi / float(grid.fs())) # Cosine for AC component legpoly = np.column_stack((legpoly, sw, cw)) return pd.DataFrame(legpoly, index=self.grid().times())
def regress_poly(degree, data, remove_mean=True, axis=-1): """ Returns data with degree polynomial regressed out. :param bool remove_mean: whether or not demean data (i.e. degree 0), :param int axis: numpy array axes along which regression is performed """ timepoints = data.shape[0] # Generate design matrix X = np.ones((timepoints, 1)) # quick way to calc degree 0 for i in range(degree): polynomial_func = Legendre.basis(i + 1) value_array = np.linspace(-1, 1, timepoints) X = np.hstack((X, polynomial_func(value_array)[:, np.newaxis])) non_constant_regressors = X[:, :-1] if X.shape[1] > 1 else np.array([]) betas = np.linalg.pinv(X).dot(data) if remove_mean: datahat = X.dot(betas) else: # disregard the first layer of X, which is degree 0 datahat = X[:, 1:].dot(betas[1:, ...]) regressed_data = data - datahat return regressed_data, non_constant_regressors
def approx_legendre_poly(Moments): n_moments = Moments.shape[0]-1 exp_coef = (np.zeros((1))) # For method description see, for instance: # Chapter 3 of "The Problem of Moments", James Alexander Shohat, Jacob David Tamarkin for i in range(n_moments+1): p = Legendre.basis(i).convert(window = [0.0,1.0], kind=Polynomial) q = (2*i+1)*np.sum(Moments[0:(i+1)]*p.coef) pq = (p.coef*q) exp_coef = polynomial.polyadd(exp_coef, pq) expansion = Polynomial(exp_coef) return expansion
def regress_poly(degree, data, remove_mean=True, axis=-1): """ Returns data with degree polynomial regressed out. :param bool remove_mean: whether or not demean data (i.e. degree 0), :param int axis: numpy array axes along which regression is performed """ IFLOGGER.debug('Performing polynomial regression on data of shape %s', str(data.shape)) datashape = data.shape timepoints = datashape[axis] # Rearrange all voxel-wise time-series in rows data = data.reshape((-1, timepoints)) # Generate design matrix X = np.ones((timepoints, 1)) # quick way to calc degree 0 for i in range(degree): polynomial_func = Legendre.basis(i + 1) value_array = np.linspace(-1, 1, timepoints) X = np.hstack((X, polynomial_func(value_array)[:, np.newaxis])) non_constant_regressors = X[:, :-1] if X.shape[1] > 1 else np.array([]) # Calculate coefficients betas = np.linalg.pinv(X).dot(data.T) # Estimation if remove_mean: datahat = X.dot(betas).T else: # disregard the first layer of X, which is degree 0 datahat = X[:, 1:].dot(betas[1:, ...]).T regressed_data = data - datahat # Back to original shape return regressed_data.reshape(datashape), non_constant_regressors
def fit_legendres_images(images, centers, lg_inds, rad_inds, maxPixel, rotate=0, image_stds=None, image_counts=None, image_nanMaps=None, image_weights=None, chiSq_fit=False, rad_range=None): """ Fits legendre polynomials to an array of single images (3d) or a list/array of an array of scan images, possible dimensionality: 1) [NtimeSteps, image_rows, image_cols] 2) [NtimeSteps (list), Nscans, image_rows, image_cols] """ if image_counts is None: image_counts = [] for im in range(len(images)): image_counts.append(np.ones_like(images[im])) image_counts[im][np.isnan(images[im])] = 0 if chiSq_fit and (image_stds is None): print("If using the chiSq fit you must supply image_stds") return None if image_stds is None: image_stds = [] for im in range(len(images)): image_stds.append(np.ones_like(images[im])) image_stds[im][np.isnan(images[im])] = 0 with_scans = len(images[0].shape) + 1 >= 4 img_fits = [[] for x in range(len(images))] img_covs = [[] for x in range(len(images))] for rad in range(maxPixel): if rad_range is not None: if rad < rad_range[0] or rad >= rad_range[1]: continue if rad % 25 == 0: print("Fitting radius {}".format(rad)) pixels, nans, angles = [], [], [] all_angles = np.arctan2(rad_inds[rad][1].astype(float), rad_inds[rad][0].astype(float)) all_angles[all_angles < 0] += 2 * np.pi all_angles = np.mod(all_angles + rotate, 2 * np.pi) all_angles[all_angles > np.pi] -= 2 * np.pi if np.sum(np.mod(lg_inds, 2)) == 0: all_angles[np.abs(all_angles) > np.pi / 2.] -= np.pi * np.sign( all_angles[np.abs(all_angles) > np.pi / 2.]) angles = np.unique(np.abs(all_angles)) ang_sort_inds = np.argsort(angles) angles = angles[ang_sort_inds] Nangles = angles.shape[0] if len(angles) == len(all_angles): do_merge = False else: do_merge = True mi_rows, mi_cols, mi_data = [], [], [] pr, pc, pv = [], [], [] for ia, ang in enumerate(angles): inds = np.where(np.abs(all_angles) == ang)[0] mi_rows.append(np.ones_like(inds) * ia) mi_cols.append(inds) mi_rows, mi_cols = np.concatenate(mi_rows), np.concatenate(mi_cols) merge_indices = csr_matrix( (np.ones_like(mi_rows), (mi_rows, mi_cols)), shape=(len(angles), len(all_angles))) for im in range(len(images)): if with_scans: angs_tile = np.tile(angles, (images[im].shape[0], 1)) scn_inds, row_inds, col_inds = [], [], [] for isc in range(images[im].shape[0]): scn_inds.append( np.ones(rad_inds[rad][0].shape[0], dtype=int) * isc) row_inds.append(rad_inds[rad][0] + centers[im][isc, 0]) col_inds.append(rad_inds[rad][1] + centers[im][isc, 1]) scn_inds = np.concatenate(scn_inds) row_inds = np.concatenate(row_inds) col_inds = np.concatenate(col_inds) img_pixels = np.reshape( copy(images[im][scn_inds, row_inds, col_inds]), (images[im].shape[0], -1)) img_counts = np.reshape( copy(image_counts[im][scn_inds, row_inds, col_inds]), (images[im].shape[0], -1)) img_stds = np.reshape( copy(image_stds[im][scn_inds, row_inds, col_inds]), (images[im].shape[0], -1)) if image_nanMaps is not None: img_pixels[np.reshape( image_nanMaps[im][scn_inds, row_inds, col_inds], (images[im].shape[0], -1)).astype(bool)] = np.nan img_counts[np.reshape( image_nanMaps[im][scn_inds, row_inds, col_inds], (images[im].shape[0], -1)).astype(bool)] = 0 if image_weights is not None: img_weights = np.reshape( copy(image_weights[im][scn_inds, row_inds, col_inds]), (images[im].shape[0], -1)) else: angs_tile = np.expand_dims(angles, 0) row_inds = rad_inds[rad][0] + centers[im, 0] col_inds = rad_inds[rad][1] + centers[im, 1] img_pixels = np.reshape(copy(images[im][row_inds, col_inds]), (1, -1)) img_counts = np.reshape( copy(image_counts[im][row_inds, col_inds]), (1, -1)) img_stds = np.reshape(copy(image_stds[im][row_inds, col_inds]), (1, -1)) if image_nanMaps is not None: img_pixels[np.reshape( image_nanMaps[im][row_inds, col_inds], (1, -1)).astype(bool)] = np.nan img_counts[np.reshape( image_nanMaps[im][row_inds, col_inds], (1, -1)).astype(bool)] = 0 if image_weights is not None: img_weights = np.reshape( copy(image_weights[im][row_inds, col_inds]), (1, -1)) img_pix = img_pixels * img_counts img_var = img_counts * (img_stds**2) img_pix[np.isnan(img_pixels)] = 0 img_var[np.isnan(img_pixels)] = 0 if do_merge: img_pixels[np.isnan(img_pixels)] = 0 img_pix = np.transpose(merge_indices.dot( np.transpose(img_pix))) img_var = np.transpose(merge_indices.dot( np.transpose(img_var))) img_counts = np.transpose( merge_indices.dot(np.transpose(img_counts))) if image_weights is not None: print("Must fill this in, don't forget std option") sys.exit(0) else: img_pix = img_pix[:, ang_sort_inds] img_var = img_var[:, ang_sort_inds] img_counts = img_counts[:, ang_sort_inds] img_pix /= img_counts img_var /= img_counts Nnans = np.sum(np.isnan(img_pix), axis=-1) ang_inds = np.where(img_counts > 0) arr_inds = np.concatenate( [np.arange(Nangles - Nn) for Nn in Nnans]) img_pixels = np.zeros_like(img_pix) img_vars = np.zeros_like(img_var) img_angs = np.zeros_like(img_pix) img_dang = np.zeros_like(img_pix) img_pixels[ang_inds[0][:-1], arr_inds[:-1]] =\ (img_pix[ang_inds[0][:-1], ang_inds[1][:-1]] + img_pix[ang_inds[0][1:], ang_inds[1][1:]])/2. img_vars[ang_inds[0][:-1], arr_inds[:-1]] =\ (img_var[ang_inds[0][:-1], ang_inds[1][:-1]] + img_var[ang_inds[0][1:], ang_inds[1][1:]])/2. img_angs[ang_inds[0][:-1], arr_inds[:-1]] =\ (angs_tile[ang_inds[0][:-1], ang_inds[1][:-1]] + angs_tile[ang_inds[0][1:], ang_inds[1][1:]])/2. img_dang[ang_inds[0][:-1], arr_inds[:-1]] =\ (angs_tile[ang_inds[0][1:], ang_inds[1][1:]] - angs_tile[ang_inds[0][:-1], ang_inds[1][:-1]]) for isc in range(Nnans.shape[0]): # Using angle midpoint => one less angle => Nnans[isc]+1 img_pixels[isc, -1 * (Nnans[isc] + 1):] = 0 img_vars[isc, -1 * (Nnans[isc] + 1):] = 0 img_angs[isc, -1 * (Nnans[isc] + 1):] = 0 img_dang[isc, -1 * (Nnans[isc] + 1):] = 0 if image_weights is not None: print("Must fill this in and check below") sys.exit(0) elif chiSq_fit: img_weights = 1. / img_vars img_weights[img_vars == 0] = 0 else: img_weights = np.ones_like(img_pixels) img_weights *= np.sin(img_angs) * img_dang lgndrs = [] for lg in lg_inds: lgndrs.append(Legendre.basis(lg)(np.cos(img_angs))) lgndrs = np.transpose(np.array(lgndrs), (1, 0, 2)) empty_scan = np.sum(img_weights.astype(bool), -1) < 2 overlap = np.einsum('bai,bi,bci->bac', lgndrs[np.invert(empty_scan)], img_weights[np.invert(empty_scan)], lgndrs[np.invert(empty_scan)], optimize='greedy') empty_scan[np.invert(empty_scan)] = (np.linalg.det(overlap) == 0.0) if np.any(empty_scan): fit = np.ones((img_pixels.shape[0], len(lg_inds))) * np.nan cov = np.ones( (img_pixels.shape[0], len(lg_inds), len(lg_inds))) * np.nan if np.any(np.invert(empty_scan)): img_pixels = img_pixels[np.invert(empty_scan)] img_weights = img_weights[np.invert(empty_scan)] img_vars = img_vars[np.invert(empty_scan)] lgndrs = lgndrs[np.invert(empty_scan)] fit[np.invert(empty_scan)], cov[np.invert(empty_scan)] =\ normal_eqn_vects(lgndrs, img_pixels, img_weights, img_vars) else: fit, cov = normal_eqn_vects(lgndrs, img_pixels, img_weights, img_vars) img_fits[im].append(np.expand_dims(fit, 1)) img_covs[im].append(np.expand_dims(cov, 1)) Nscans = None for im in range(len(img_fits)): img_fits[im] = np.concatenate(img_fits[im], 1) img_covs[im] = np.concatenate(img_covs[im], 1) if Nscans is None: Nscans = img_fits[im].shape[0] elif Nscans != img_fits[im].shape[0]: Nscans = -1 if Nscans > 0: img_fits = np.array(img_fits) img_covs = np.array(img_covs) if with_scans: return img_fits, img_covs else: return img_fits[:, 0, :, :], img_covs[:, 0, :, :]