def decode( self, symbol_stream: np.ndarray, channel_state: ChannelStateInformation, stream_noises: np.ndarray ) -> Tuple[np.ndarray, ChannelStateInformation, np.ndarray]: for time_idx, (symbols, csi, noise) in enumerate( zip(symbol_stream.T, channel_state.samples(), stream_noises.T)): noise_variance = np.mean(noise) # Combine the responses of all superimposed transmit antennas for equalization transform = np.sum(csi.linear[:, :, 0, :], axis=2, keepdims=False) # Compute the pseudo-inverse from the singular-value-decomposition of the linear channel transform # noinspection PyTupleAssignmentBalance u, s, vh = svd(transform.todense(), full_matrices=False, check_finite=False) u *= s / (s**2 + noise_variance) equalizer = (u @ vh).T.conj() symbol_stream[:, time_idx] = equalizer @ symbols channel_state.state[:, :, time_idx, :] = np.tensordot(equalizer, csi.linear[:, :, 0, :], axes=(1, 0)) stream_noises[:, time_idx] = noise * (s**2 + noise_variance) return symbol_stream, channel_state, stream_noises
def extract_noise_components(realigned_file, mask_file, num_components=5, extra_regressors=None): """Derive components most reflective of physiological noise Parameters ---------- realigned_file: a 4D Nifti file containing realigned volumes mask_file: a 3D Nifti file containing white matter + ventricular masks num_components: number of components to use for noise decomposition extra_regressors: additional regressors to add Returns ------- components_file: a text file containing the noise components """ from scipy.linalg.decomp_svd import svd import numpy as np import nibabel as nb from nipype.utils import NUMPY_MMAP from nipype.utils.filemanip import filename_to_list import os imgseries = nb.load(realigned_file, mmap=NUMPY_MMAP) components = None for filename in filename_to_list(mask_file): mask = nb.load(filename, mmap=NUMPY_MMAP).get_data() if len(np.nonzero(mask > 0)[0]) == 0: continue voxel_timecourses = imgseries.get_data()[mask > 0] voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0 # remove mean and normalize by variance # voxel_timecourses.shape == [nvoxels, time] X = voxel_timecourses.T stdX = np.std(X, axis=0) stdX[stdX == 0] = 1. stdX[np.isnan(stdX)] = 1. stdX[np.isinf(stdX)] = 1. X = (X - np.mean(X, axis=0)) / stdX u, _, _ = svd(X, full_matrices=False) if components is None: components = u[:, :num_components] else: components = np.hstack((components, u[:, :num_components])) if extra_regressors: regressors = np.genfromtxt(extra_regressors) components = np.hstack((components, regressors)) components_file = os.path.join(os.getcwd(), 'noise_components.txt') np.savetxt(components_file, components, fmt=b"%.10f") return components_file
def extract_noise_components(realigned_file, mask_file, num_components=5, extra_regressors=None): """Derive components most reflective of physiological noise Parameters ---------- realigned_file: a 4D Nifti file containing realigned volumes mask_file: a 3D Nifti file containing white matter + ventricular masks num_components: number of components to use for noise decomposition extra_regressors: additional regressors to add Returns ------- components_file: a text file containing the noise components """ from scipy.linalg.decomp_svd import svd import numpy as np import nibabel as nb from nipype.utils import NUMPY_MMAP import os imgseries = nb.load(realigned_file, mmap=NUMPY_MMAP) components = None for filename in filename_to_list(mask_file): mask = nb.load(filename, mmap=NUMPY_MMAP).get_data() if len(np.nonzero(mask > 0)[0]) == 0: continue voxel_timecourses = imgseries.get_data()[mask > 0] voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0 # remove mean and normalize by variance # voxel_timecourses.shape == [nvoxels, time] X = voxel_timecourses.T stdX = np.std(X, axis=0) stdX[stdX == 0] = 1. stdX[np.isnan(stdX)] = 1. stdX[np.isinf(stdX)] = 1. X = (X - np.mean(X, axis=0)) / stdX u, _, _ = svd(X, full_matrices=False) if components is None: components = u[:, :num_components] else: components = np.hstack((components, u[:, :num_components])) if extra_regressors: regressors = np.genfromtxt(extra_regressors) components = np.hstack((components, regressors)) components_file = os.path.join(os.getcwd(), 'noise_components.txt') np.savetxt(components_file, components, fmt=b"%.10f") return components_file
def orthogonal_procrustes(A, ref_matrix, reflection=False): # Adaptation of scipy.linalg.orthogonal_procrustes -> https://github.com/scipy/scipy/blob/v0.16.0/scipy/linalg/_procrustes.py#L14 # Info here: http://compgroups.net/comp.soft-sys.matlab/procrustes-analysis-without-reflection/896635 # goal is to find unitary matrix R with det(R) > 0 such that ||A*R - ref_matrix||^2 is minimized from scipy.linalg.decomp_svd import svd # Singular Value Decomposition, factors matrices from scipy.linalg import det import numpy as np A = np.asarray_chkfinite(A) ref_matrix = np.asarray_chkfinite(ref_matrix) if A.ndim != 2: raise ValueError('expected ndim to be 2, but observed %s' % A.ndim) if A.shape != ref_matrix.shape: raise ValueError('the shapes of A and ref_matrix differ (%s vs %s)' % (A.shape, ref_matrix.shape)) u, w, vt = svd(ref_matrix.T.dot(A).T) # Goal: minimize ||A*R - ref||^2, switch to trace # trace((A*R-ref).T*(A*R-ref)), now we distribute # trace(R'*A'*A*R) + trace(ref.T*ref) - trace((A*R).T*ref) - trace(ref.T*(A*R)), trace doesn't care about order, so re-order # trace(R*R.T*A.T*A) + trace(ref.T*ref) - trace(R.T*A.T*ref) - trace(ref.T*A*R), simplify # trace(A.T*A) + trace(ref.T*ref) - 2*trace(ref.T*A*R) # Thus, to minimize we want to maximize trace(ref.T * A * R) # u*w*v.T = (ref.T*A).T # ref.T * A = w * u.T * v # trace(ref.T * A * R) = trace (w * u.T * v * R) # differences minimized when trace(ref.T * A * R) is maximized, thus when trace(u.T * v * R) is maximized # This occurs when u.T * v * R = I (as u, v and R are all unitary matrices so max is 1) # R is a rotation matrix so R.T = R^-1 # u.T * v * I = R^-1 = R.T # R = u * v.T # Thus, R = u.dot(vt) R = u.dot(vt) # Get the rotation matrix, including reflections if not reflection and det(R) < 0: # If we don't want reflection # To remove reflection, we change the sign of the rightmost column of u (or v) and the scalar associated # with that column u[:,-1] *= -1 w[-1] *= -1 R = u.dot(vt) scale = w.sum() # Get the scaled difference return R,scale
def decode( self, symbol_stream: np.ndarray, channel_state: ChannelStateInformation, stream_noises: np.ndarray ) -> Tuple[np.ndarray, ChannelStateInformation, np.ndarray]: equalized_symbols = np.empty( (channel_state.num_receive_streams, channel_state.num_samples), dtype=complex) equalized_noises = np.empty( (channel_state.num_receive_streams, channel_state.num_samples), dtype=float) equalized_channel_state = ChannelStateInformation( channel_state.state_format) # Equalize in space in a first step for idx, (symbols, stream_state, noise) in enumerate( zip(symbol_stream, channel_state.received_streams(), stream_noises)): noise_variance = np.mean(noise) # Combine the responses of all superimposed transmit antennas for equalization linear_state = stream_state.linear transform = np.sum(linear_state[0, ::], axis=0, keepdims=False) # Compute the pseudo-inverse from the singular-value-decomposition of the linear channel transform # noinspection PyTupleAssignmentBalance u, s, vh = svd(transform.todense(), full_matrices=False, check_finite=False) u *= s / (s**2 + noise_variance) equalizer = (u @ vh).T.conj() equalized_symbols[idx, :] = equalizer @ symbols equalized_csi_slice = tensordot(equalizer, linear_state, axes=(1, 2)).transpose( (1, 2, 0, 3)) equalized_channel_state.append_linear(equalized_csi_slice, 0) equalized_noises[idx, :] = noise[:stream_state.num_samples] * ( s**2 + noise_variance) return equalized_symbols, channel_state, equalized_noises
def orthogonal_procrustes(A, ref_matrix, reflection=False): ''' Using the orthogonal procrustes method, we find the unitary matrix R with det(R) > 0 such that ||A*R - ref_matrix||^2 is minimized. This varies from that within scipy by the addition of the reflection term, allowing and disallowing inversion. NOTE - This means that the rotation matrix is used for right side multiplication! **Parameters** A: *list,* :class:`squid.structures.atom.Atom` A list of atoms for which R will minimize the frobenius norm ||A*R - ref_matrix||^2. ref_matrix: *list,* :class:`squid.structures.atom.Atom` A list of atoms for which *A* is being rotated towards. reflection: *bool, optional* Whether inversion is allowed (True) or not (False). **Returns** R: *list, list, float* Right multiplication rotation matrix to best overlay A onto the reference matrix. scale: *float* Scalar between the matrices. **Derivation** Goal: minimize ||A\*R - ref||^2, switch to trace trace((A\*R-ref).T\*(A\*R-ref)), now we distribute trace(R'\*A'\*A\*R) + trace(ref.T\*ref) - trace((A\*R).T\*ref) - trace(ref.T\*(A\*R)), trace doesn't care about order, so re-order trace(R\*R.T\*A.T\*A) + trace(ref.T\*ref) - trace(R.T\*A.T\*ref) - trace(ref.T\*A\*R), simplify trace(A.T\*A) + trace(ref.T\*ref) - 2\*trace(ref.T\*A\*R) Thus, to minimize we want to maximize trace(ref.T \* A \* R) u\*w\*v.T = (ref.T\*A).T ref.T \* A = w \* u.T \* v trace(ref.T \* A \* R) = trace (w \* u.T \* v \* R) differences minimized when trace(ref.T \* A \* R) is maximized, thus when trace(u.T \* v \* R) is maximized This occurs when u.T \* v \* R = I (as u, v and R are all unitary matrices so max is 1) R is a rotation matrix so R.T = R^-1 u.T \* v \* I = R^-1 = R.T R = u \* v.T Thus, R = u.dot(vt) **References** * https://github.com/scipy/scipy/blob/v0.16.0/scipy/linalg/ _procrustes.py#L14 * http://compgroups.net/comp.soft-sys.matlab/procrustes-analysis -without-reflection/896635 ''' assert hasattr(A, "__len__") and hasattr(ref_matrix, "__len__"),\ "Error - A and ref_matrix must be lists of atomic coordinates!" cast.assert_vec(A[0], length=3, numeric=True) cast.assert_vec(ref_matrix[0], length=3, numeric=True) A = np.asarray_chkfinite(A) ref_matrix = np.asarray_chkfinite(ref_matrix) if A.ndim != 2: raise ValueError('expected ndim to be 2, but observed %s' % A.ndim) if A.shape != ref_matrix.shape: raise ValueError('the shapes of A and ref_matrix differ (%s vs %s)' % (A.shape, ref_matrix.shape)) u, w, vt = svd(A.T.dot(ref_matrix)) R = u.dot(vt) # Get the rotation matrix, including reflections if not reflection and scipy.linalg.det(R) < 0: # To remove reflection, we change the sign of the rightmost column of # u (or v) and the scalar associated # with that column u[:, -1] *= -1 w[-1] *= -1 R = u.dot(vt) scale = w.sum() # Get the scaled difference return R, scale
def pinv2(a, rank=None, cond=None, rcond=None, return_rank=False, check_finite=True): """ Compute the (Moore-Penrose) pseudo-inverse of a matrix. Calculate a generalized inverse of a matrix using its singular-value decomposition and including all 'large' singular values. Parameters ---------- a : (M, N) array_like Matrix to be pseudo-inverted. cond, rcond : float or None Cutoff for 'small' singular values; singular values smaller than this value are considered as zero. If both are omitted, the default value ``max(M,N)*largest_singular_value*eps`` is used where ``eps`` is the machine precision value of the datatype of ``a``. .. versionchanged:: 1.3.0 Previously the default cutoff value was just ``eps*f`` where ``f`` was ``1e3`` for single precision and ``1e6`` for double precision. return_rank : bool, optional If True, return the effective rank of the matrix. check_finite : bool, optional Whether to check that the input matrix contains only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- B : (N, M) ndarray The pseudo-inverse of matrix `a`. rank : int The effective rank of the matrix. Returned if `return_rank` is True. Raises ------ LinAlgError If SVD computation does not converge. Examples -------- >>> from scipy import linalg >>> a = np.random.randn(9, 6) >>> B = linalg.pinv2(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True >>> np.allclose(B, np.dot(B, np.dot(a, B))) True """ a = _asarray_validated(a, check_finite=check_finite) u, s, vh = decomp_svd.svd(a, full_matrices=False, check_finite=False) if rank is None: if rcond is not None: cond = np.max(s) * rcond if cond in [None, -1]: t = u.dtype.char.lower() cond = np.max(s) * max(a.shape) * np.finfo(t).eps rank = np.sum(s > cond) elif rank == float('inf'): assert (cond is not None) rank = len(s) s[s < cond] = cond print(rank, flush='True') u = u[:, :rank] u /= s[:rank] B = np.transpose(np.conjugate(np.dot(u, vh[:rank]))) if return_rank: return B, rank else: return B