def multi_tissue_basis(gtab, sh_order, iso_comp): """ Builds a basis for multi-shell multi-tissue CSD model. Parameters ---------- gtab : GradientTable sh_order : int iso_comp: int Number of tissue compartments for running the MSMT-CSD. Minimum number of compartments required is 2. Returns ------- B : ndarray Matrix of the spherical harmonics model used to fit the data m : int ``|m| <= n`` The order of the harmonic. n : int ``>= 0`` The degree of the harmonic. """ if iso_comp < 2: msg = ("Multi-tissue CSD requires at least 2 tissue compartments") raise ValueError(msg) r, theta, phi = geo.cart2sphere(*gtab.gradients.T) m, n = shm.sph_harm_ind_list(sh_order) B = shm.real_sph_harm(m, n, theta[:, None], phi[:, None]) B[np.ix_(gtab.b0s_mask, n > 0)] = 0. iso = np.empty([B.shape[0], iso_comp]) iso[:] = SH_CONST B = np.concatenate([iso, B], axis=1) return B, m, n
def test_sph_harm_ind_list(): m_list, n_list = sph_harm_ind_list(8) assert_equal(m_list.shape, n_list.shape) assert_equal(m_list.shape, (45,)) assert_true(np.all(np.abs(m_list) <= n_list)) assert_array_equal(n_list % 2, 0) assert_raises(ValueError, sph_harm_ind_list, 1)
def sh_smooth(data, bvals, bvecs, sh_order=4, similarity_threshold=50, regul=0.006): """Smooth the raw diffusion signal with spherical harmonics. data : ndarray The diffusion data to smooth. gtab : gradient table object Corresponding gradients table object to data. sh_order : int, default 8 Order of the spherical harmonics to fit. similarity_threshold : int, default 50 All b-values such that |b_1 - b_2| < similarity_threshold will be considered as identical for smoothing purpose. Must be lower than 200. regul : float, default 0.006 Amount of regularization to apply to sh coefficients computation. Return --------- pred_sig : ndarray The smoothed diffusion data, fitted through spherical harmonics. """ if similarity_threshold > 200: raise ValueError("similarity_threshold = {}, which is higher than 200," " please use a lower value".format(similarity_threshold)) m, n = sph_harm_ind_list(sh_order) L = -n * (n + 1) where_b0s = bvals == 0 pred_sig = np.zeros_like(data, dtype=np.float32) # Round similar bvals together for identifying similar shells rounded_bvals = np.zeros_like(bvals) for unique_bval in np.unique(bvals): idx = np.abs(unique_bval - bvals) < similarity_threshold rounded_bvals[idx] = unique_bval # process each b-value separately for unique_bval in np.unique(rounded_bvals): idx = rounded_bvals == unique_bval # Just give back the signal for the b0s since we can't really do anything about it if np.all(idx == where_b0s): if np.sum(where_b0s) > 1: pred_sig[..., idx] = np.mean(data[..., idx], axis=-1, keepdims=True) else: pred_sig[..., idx] = data[..., idx] continue x, y, z = bvecs[:, idx] r, theta, phi = cart2sphere(x, y, z) # Find the sh coefficients to smooth the signal B_dwi = real_sph_harm(m, n, theta[:, None], phi[:, None]) invB = smooth_pinv(B_dwi, np.sqrt(regul) * L) sh_coeff = np.dot(data[..., idx], invB.T) # Find the smoothed signal from the sh fit for the given gtab pred_sig[..., idx] = np.dot(sh_coeff, B_dwi.T) return pred_sig
def test_sph_harm_ind_list(): m_list, n_list = sph_harm_ind_list(8) assert_equal(m_list.shape, n_list.shape) assert_equal(m_list.shape, (45, )) assert_true(np.all(np.abs(m_list) <= n_list)) assert_array_equal(n_list % 2, 0) assert_raises(ValueError, sph_harm_ind_list, 1)
def main(): parser = _build_arg_parser() args = parser.parse_args() assert_inputs_exist(parser, args.in_sh, optional=args.mask) # Load data sh_img = nib.load(args.in_sh) sh = sh_img.get_fdata(dtype=np.float32) mask = None if args.mask: mask = get_data_as_mask(nib.load(args.mask), dtype=bool) # Precompute output filenames to check if they exist sh_order = order_from_ncoef(sh.shape[-1], full_basis=args.full_basis) _, order_ids = sph_harm_ind_list(sh_order, full_basis=args.full_basis) orders = sorted(np.unique(order_ids)) output_fnames = ["{}{}.nii.gz".format(args.out_prefix, i) for i in orders] assert_outputs_exist(parser, args, output_fnames) # Compute RISH features rish, final_orders = compute_rish(sh, mask, full_basis=args.full_basis) # Make sure the precomputed orders match the orders returned assert np.all(orders == np.array(final_orders)) # Save each RISH feature as a separate file for i, fname in enumerate(output_fnames): nib.save(nib.Nifti1Image(rish[..., i], sh_img.affine), fname)
def forward_sdeconv_mat(r_rh, sh_order): """ Build forward spherical deconvolution matrix Parameters ---------- r_rh : ndarray (``(sh_order + 1)*(sh_order + 2)/2``,) ndarray of rotational harmonics coefficients for the single fiber response function sh_order : int maximal SH order Returns ------- R : ndarray (``(sh_order + 1)*(sh_order + 2)/2``, ``(sh_order + 1)*(sh_order + 2)/2``) """ m, n = sph_harm_ind_list(sh_order) b = np.zeros(m.shape) i = 0 for l in np.arange(0, sh_order + 1, 2): for m in np.arange(-l, l + 1): b[i] = r_rh[l / 2] i = i + 1 return np.diag(b)
def test_hat_and_lcr(): hemi = hemi_icosahedron.subdivide(3) m, n = sph_harm_ind_list(8) with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=descoteaux07_legacy_msg, category=PendingDeprecationWarning) B = real_sh_descoteaux_from_index(m, n, hemi.theta[:, None], hemi.phi[:, None]) H = hat(B) B_hat = np.dot(H, B) assert_array_almost_equal(B, B_hat) R = lcr_matrix(H) d = np.arange(len(hemi.theta)) r = d - np.dot(H, d) lev = np.sqrt(1 - H.diagonal()) r /= lev r -= r.mean() r2 = np.dot(R, d) assert_array_almost_equal(r, r2) r3 = np.dot(d, R.T) assert_array_almost_equal(r, r3)
def gen_dirac(pol, azi, sh_order): """ Generate Dirac delta function orientated in (theta, phi) = (azi, pol) on the sphere. The spherical harmonics (SH) representation of this Dirac is returned. Parameters ---------- pol : float [0, pi] The polar (colatitudinal) coordinate (phi) az : float [0, 2*pi] The azimuthal (longitudinal) coordinate (theta) sh_order : int maximal SH order of the SH representation Returns ------- dirac : ndarray (``(sh_order + 1)(sh_order + 2)/2``,) SH coefficients representing the Dirac function """ m, n = sph_harm_ind_list(sh_order) dirac = np.zeros(m.shape) i = 0 for l in np.arange(0, sh_order + 1, 2): for m in np.arange(-l, l + 1): if m == 0: dirac[i] = real_sph_harm(0, l, azi, pol) i = i + 1 return dirac
def get_deconv_matrix(gtab, response, sh_order): """ Compute the deconvolution of the response of a single fiber Attributes: gtab (dipy GradientTable): the gradient of the dwi in wich the response is represented. response (float[4]): first 3 elements: eigenvalues, 4th: mean b0 value sh_order (int): the sh_order of the DWI used for the deconvolution Returns: R: r_rh: B_dwi (np.array(nb_sh_coeff, nb_dwi_directions)): matrix to fit DWi->SH """ m, n = sph_harm_ind_list(sh_order) # x, y, z = gtab.gradients[~gtab.b0s_mask].T # r, theta, phi = cart2sphere(x, y, z) # # for the gradient sphere # B_dwi = real_sph_harm(m, n, theta[:, None], phi[:, None]) B_dwi, _ = get_B_matrix(gtab, sh_order) S_r = estimate_response(gtab, response[0:3], response[3]) r_sh = np.linalg.lstsq(B_dwi, S_r[~gtab.b0s_mask], rcond=-1)[0] n_response = n m_response = m r_rh = sh_to_rh(r_sh, m_response, n_response) R = forward_sdeconv_mat(r_rh, n) # X = R.diagonal() * B_dwi return R, r_rh, B_dwi
def test_smooth_pinv(): hemi = hemi_icosahedron.subdivide(2) m, n = sph_harm_ind_list(4) with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=descoteaux07_legacy_msg, category=PendingDeprecationWarning) B = real_sh_descoteaux_from_index(m, n, hemi.theta[:, None], hemi.phi[:, None]) L = np.zeros(len(m)) C = smooth_pinv(B, L) D = np.dot(npl.inv(np.dot(B.T, B)), B.T) assert_array_almost_equal(C, D) L = n * (n + 1) * .05 C = smooth_pinv(B, L) L = np.diag(L) D = np.dot(npl.inv(np.dot(B.T, B) + L * L), B.T) assert_array_almost_equal(C, D) L = np.arange(len(n)) * .05 C = smooth_pinv(B, L) L = np.diag(L) D = np.dot(npl.inv(np.dot(B.T, B) + L * L), B.T) assert_array_almost_equal(C, D)
def mats_odfdeconv(sphere, basis=None, ratio=3 / 15., sh_order=8, lambda_=1., tau=0.1, r2=True): m, n = sph_harm_ind_list(sh_order) r, theta, phi = cart2sphere(sphere.x, sphere.y, sphere.z) real_sym_sh = sph_harm_lookup[basis] B_reg, m, n = real_sym_sh(sh_order, theta[:, None], phi[:, None]) R, P = forward_sdt_deconv_mat(ratio, sh_order, r2_term=r2) lambda_ = lambda_ * R.shape[0] * R[0, 0] / B_reg.shape[0] return R, B_reg
def test_forward_sdeconv_mat(): m, n = sph_harm_ind_list(4) mat = forward_sdeconv_mat(np.array([0, 2, 4]), n) expected = np.diag([0, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4]) npt.assert_array_equal(mat, expected) sh_order = 8 expected_size = (sh_order + 1) * (sh_order + 2) / 2 r_rh = np.arange(0, sh_order + 1, 2) m, n = sph_harm_ind_list(sh_order) mat = forward_sdeconv_mat(r_rh, n) npt.assert_equal(mat.shape, (expected_size, expected_size)) npt.assert_array_equal(mat.diagonal(), n) # Odd spherical harmonic degrees should raise a ValueError n[2] = 3 npt.assert_raises(ValueError, forward_sdeconv_mat, r_rh, n)
def test_order_from_ncoeff(): """ """ # Just try some out: for sh_order in [2, 4, 6, 8, 12, 24]: m, n = sph_harm_ind_list(sh_order) n_coef = m.shape[0] npt.assert_equal(order_from_ncoef(n_coef), sh_order)
def test_order_from_ncoeff(): """ """ # Just try some out: for sh_order in [2, 4, 6, 8, 12, 24]: m, n = sph_harm_ind_list(sh_order) n_coef = m.shape[0] assert_equal(order_from_ncoef(n_coef), sh_order)
def __init__(self, acquisition_scheme, model, x0_vector=None, sh_order=8, unity_constraint=True, lambda_lb=0.): self.model = model self.acquisition_scheme = acquisition_scheme self.sh_order = sh_order self.Ncoef = int((sh_order + 2) * (sh_order + 1) // 2) self.Nmodels = len(self.model.models) self.lambda_lb = lambda_lb self.unity_constraint = unity_constraint self.sphere_jacobian = 2 * np.sqrt(np.pi) sphere = get_sphere('symmetric724') hemisphere = HemiSphere(phi=sphere.phi, theta=sphere.theta) self.L_positivity = real_sym_sh_mrtrix(self.sh_order, hemisphere.theta, hemisphere.phi)[0] x0_single_voxel = np.reshape(x0_vector, (-1, x0_vector.shape[-1]))[0] if np.all(np.isnan(x0_single_voxel)): self.single_convolution_kernel = True self.A = self._construct_convolution_kernel(x0_single_voxel) else: self.single_convolution_kernel = False self.Ncoef_total = 0 vf_array = [] if self.model.volume_fractions_fixed: self.sh_start = 0 self.Ncoef_total = self.Ncoef self.vf_indices = np.array([0]) else: for model in self.model.models: if 'orientation' in model.parameter_types.values(): self.sh_start = self.Ncoef_total sh_model = np.zeros(self.Ncoef) sh_model[0] = 1 vf_array.append(sh_model) self.Ncoef_total += self.Ncoef else: vf_array.append(1) self.Ncoef_total += 1 self.vf_indices = np.where(np.hstack(vf_array))[0] sh_l = sph_harm_ind_list(sh_order)[1] lb_weights = sh_l**2 * (sh_l + 1)**2 # laplace-beltrami if self.model.volume_fractions_fixed: self.R_smoothness = np.diag(lb_weights) else: diagonal = np.zeros(self.Ncoef_total) diagonal[self.sh_start:self.sh_start + self.Ncoef] = lb_weights self.R_smoothness = np.diag(diagonal)
def odf_sh_to_sharp(odfs_sh, sphere, basis=None, ratio=3 / 15., sh_order=8, lambda_=1., tau=0.1): r""" Sharpen odfs using the spherical deconvolution transform [1]_ This function can be used to sharpen any smooth ODF spherical function. In theory, this should only be used to sharpen QballModel ODFs, but in practice, one can play with the deconvolution ratio and sharpen almost any ODF-like spherical function. The constrained-regularization is stable and will not only sharp the ODF peaks but also regularize the noisy peaks. Parameters ---------- odfs_sh : ndarray (``(sh_order + 1)*(sh_order + 2)/2``, ) array of odfs expressed as spherical harmonics coefficients sphere : Sphere sphere used to build the regularization matrix basis : {None, 'mrtrix', 'fibernav'} different spherical harmonic basis. None is the fibernav basis as well. ratio : float, ratio of the smallest vs the largest eigenvalue of the single prolate tensor response function (:math:`\frac{\lambda_2}{\lambda_1}`) sh_order : int maximal SH order of the SH representation lambda_ : float lambda parameter (see odfdeconv) (default 1.0) tau : float tau parameter in the L matrix construction (see odfdeconv) (default 0.1) Returns ------- fodf_sh : ndarray sharpened odf expressed as spherical harmonics coefficients References ---------- .. [1] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and Probabilistic Tractography Based on Complex Fibre Orientation Distributions """ m, n = sph_harm_ind_list(sh_order) r, theta, phi = cart2sphere(sphere.x, sphere.y, sphere.z) real_sym_sh = sph_harm_lookup[basis] B_reg, m, n = real_sym_sh(sh_order, theta[:, None], phi[:, None]) R, P = forward_sdt_deconv_mat(ratio, sh_order) # scale lambda to account for differences in the number of # SH coefficients and number of mapped directions lambda_ = lambda_ * R.shape[0] * R[0, 0] / B_reg.shape[0] fodf_sh = np.zeros(odfs_sh.shape) for index in ndindex(odfs_sh.shape[:-1]): fodf_sh[index], num_it = odf_deconv(odfs_sh[index], sh_order, R, B_reg, lambda_=lambda_, tau=tau) return fodf_sh
def compute_odd_power_map(sh_coeffs, order, mask): _, l_list = sph_harm_ind_list(order, full_basis=True) odd_l_list = (l_list % 2 == 1).reshape((1, 1, 1, -1)) odd_order_norm = np.linalg.norm(sh_coeffs * odd_l_list, ord=2, axis=-1) full_order_norm = np.linalg.norm(sh_coeffs, ord=2, axis=-1) asym_map = np.zeros(sh_coeffs.shape[:-1]) mask = np.logical_and(full_order_norm > 0, mask) asym_map[mask] = odd_order_norm[mask] / full_order_norm[mask] return asym_map
def test_order_from_ncoeff(): """ """ # Just try some out: for sh_order in [2, 4, 6, 8, 12, 24]: m, n = sph_harm_ind_list(sh_order) n_coef = m.shape[0] assert_equal(order_from_ncoef(n_coef), sh_order) # Try out full basis m_full, n_full = sph_harm_full_ind_list(sh_order) n_coef_full = m_full.shape[0] assert_equal(order_from_ncoef(n_coef_full, True), sh_order)
def __init__(self, acquisition_scheme, model, x0_vector=None, sh_order=8, lambda_pos=1., lambda_lb=5e-4, tau=0.1, max_iter=50, unity_constraint=True, init_sh_order=4): self.model = model self.acquisition_scheme = acquisition_scheme self.sh_order = sh_order self.Ncoef = int((sh_order + 2) * (sh_order + 1) // 2) self.Ncoef4 = int((init_sh_order + 2) * (init_sh_order + 1) // 2) self.Nmodels = len(self.model.models) self.lambda_pos = lambda_pos self.lambda_lb = lambda_lb self.tau = tau self.max_iter = max_iter self.unity_constraint = unity_constraint self.sphere_jacobian = 1 / (2 * np.sqrt(np.pi)) # step 1: prepare positivity grid on sphere sphere = get_sphere('symmetric724') hemisphere = HemiSphere(phi=sphere.phi, theta=sphere.theta) self.L_positivity = real_sym_sh_mrtrix(self.sh_order, hemisphere.theta, hemisphere.phi)[0] sh_l = sph_harm_ind_list(sh_order)[1] self.R_smoothness = np.diag(sh_l**2 * (sh_l + 1)**2) # check if there is only one model. If so, precompute rh array. if self.model.volume_fractions_fixed: x0_single_voxel = np.reshape(x0_vector, (-1, x0_vector.shape[-1]))[0] if np.all(np.isnan(x0_single_voxel)): self.single_convolution_kernel = True parameters_dict = self.model.parameter_vector_to_parameters( x0_single_voxel) self.A = self.model._construct_convolution_kernel( **parameters_dict) self.AT_A = np.dot(self.A.T, self.A) else: self.single_convolution_kernel = False else: msg = "This CSD optimizer cannot estimate volume fractions." raise ValueError(msg)
def compute_cos_asym_map(sh_coeffs, order, mask): _, l_list = sph_harm_ind_list(order, full_basis=True) sign = np.power(-1.0, l_list) sign = np.reshape(sign, (1, 1, 1, len(l_list))) sh_squared = sh_coeffs**2 mask = np.logical_and(sh_squared.sum(axis=-1) > 0., mask) cos_asym_map = np.zeros(sh_coeffs.shape[:-1]) cos_asym_map[mask] = np.sum(sh_squared * sign, axis=-1)[mask] / \ np.sum(sh_squared, axis=-1)[mask] cos_asym_map = np.sqrt(1 - cos_asym_map**2) * mask return cos_asym_map
def compute_asymmetry_map(sh_coeffs): order = order_from_ncoef(sh_coeffs.shape[-1], full_basis=True) _, l_list = sph_harm_ind_list(order, full_basis=True) sign = np.power(-1.0, l_list) sign = np.reshape(sign, (1, 1, 1, len(l_list))) sh_squared = sh_coeffs**2 mask = sh_squared.sum(axis=-1) > 0. asym_map = np.zeros(sh_coeffs.shape[:-1]) asym_map[mask] = np.sum(sh_squared * sign, axis=-1)[mask] / \ np.sum(sh_squared, axis=-1)[mask] asym_map = np.sqrt(1 - asym_map**2) * mask return asym_map
def test_faster_sph_harm(): sh_order = 8 m, n = sph_harm_ind_list(sh_order) theta = np.array([1.61491146, 0.76661665, 0.11976141, 1.20198246, 1.74066314, 1.5925956, 2.13022055, 0.50332859, 1.19868988, 0.78440679, 0.50686938, 0.51739718, 1.80342999, 0.73778957, 2.28559395, 1.29569064, 1.86877091, 0.39239191, 0.54043037, 1.61263047, 0.72695314, 1.90527318, 1.58186125, 0.23130073, 2.51695237, 0.99835604, 1.2883426, 0.48114057, 1.50079318, 1.07978624, 1.9798903, 2.36616966, 2.49233299, 2.13116602, 1.36801518, 1.32932608, 0.95926683, 1.070349, 0.76355762, 2.07148422, 1.50113501, 1.49823314, 0.89248164, 0.22187079, 1.53805373, 1.9765295, 1.13361568, 1.04908355, 1.68737368, 1.91732452, 1.01937457, 1.45839, 0.49641525, 0.29087155, 0.52824641, 1.29875871, 1.81023541, 1.17030475, 2.24953206, 1.20280498, 0.76399964, 2.16109722, 0.79780421, 0.87154509]) phi = np.array([-1.5889514, -3.11092733, -0.61328674, -2.4485381, 2.88058822, 2.02165946, -1.99783366, 2.71235211, 1.41577992, -2.29413676, -2.24565773, -1.55548635, 2.59318232, -1.84672472, -2.33710739, 2.12111948, 1.87523722, -1.05206575, -2.85381987, -2.22808984, 2.3202034, -2.19004474, -1.90358372, 2.14818373, 3.1030696, -2.86620183, -2.19860123, -0.45468447, -3.0034923, 1.73345011, -2.51716288, 2.49961525, -2.68782986, 2.69699056, 1.78566133, -1.59119705, -2.53378963, -2.02476738, 1.36924987, 2.17600517, 2.38117241, 2.99021511, -1.4218007, -2.44016802, -2.52868164, 3.01531658, 2.50093627, -1.70745826, -2.7863931, -2.97359741, 2.17039906, 2.68424643, 1.77896086, 0.45476215, 0.99734418, -2.73107896, 2.28815009, 2.86276506, 3.09450274, -3.09857384, -1.06955885, -2.83826831, 1.81932195, 2.81296654]) sh = spherical_harmonics(m, n, theta[:, None], phi[:, None]) sh2 = sph_harm_sp(m, n, theta[:, None], phi[:, None]) assert_array_almost_equal(sh, sh2, 8) sh = spherical_harmonics(m, n, theta[:, None], phi[:, None], use_scipy=False) assert_array_almost_equal(sh, sh2, 8)
def __init__(self, gtab, response, sh_order, lambda_=1, tau=0.1, size=3, method='center'): """ Attributes: gtab (dipy GradientTable): the gradient of the dwi in wich the response is represented. response (float[4]): first 3 elements: eigenvalues 4th one: mean b0 value sh_order (int): the sh_order of the DWI used for the deconvolution lambda_ (float): the coefficient to rescale the loss better to keep to 1. and change the 'coeff' param when defining the loss tau (float): the coefficient to rescale the threshold used size (int): the size of the subsample taken to compute the loss take size**3 voxels method (str): the method to take the subsample (random or center) """ super(NegativefODFLoss, self).__init__() m, n = sph_harm_ind_list(sh_order) self.sphere = get_sphere('symmetric362') r, theta, phi = cart2sphere(self.sphere.x, self.sphere.y, self.sphere.z) # B_reg = real_sph_harm(m, n, theta[:, None], phi[:, None]) B_reg = shm.get_B_matrix(theta=theta, phi=phi, sh_order=sh_order) R, r_rh, B_dwi = shm.get_deconv_matrix(gtab, response, sh_order) # scale lambda_ to account for differences in the number of # SH coefficients and number of mapped directions # This is exactly what is done in [4]_ lambda_ = (lambda_ * R.shape[0] * r_rh[0] / (np.sqrt(B_reg.shape[0]) * np.sqrt(362.))) B_reg = torch.FloatTensor(B_reg * lambda_) self.B_reg = nn.Parameter(B_reg, requires_grad=False) self.tau = tau self.size = size self.method = method
def sh_smooth(data, gtab, sh_order=4): """Smooth the raw diffusion signal with spherical harmonics data : ndarray The diffusion data to smooth. gtab : gradient table object Corresponding gradients table object to data. sh_order : int, default 4 Order of the spherical harmonics to fit. Return --------- pred_sig : ndarray The smoothed diffusion data, fitted through spherical harmonics. """ m, n = sph_harm_ind_list(sh_order) where_b0s = lazy_index(gtab.b0s_mask) where_dwi = lazy_index(~gtab.b0s_mask) x, y, z = gtab.gradients[where_dwi].T r, theta, phi = cart2sphere(x, y, z) # Find the sh coefficients to smooth the signal B_dwi = real_sph_harm(m, n, theta[:, None], phi[:, None]) sh_shape = (np.prod(data.shape[:-1]), -1) sh_coeff = np.linalg.lstsq(B_dwi, data[..., where_dwi].reshape(sh_shape).T)[0] # Find the smoothed signal from the sh fit for the given gtab smoothed_signal = np.dot(B_dwi, sh_coeff).T.reshape(data.shape[:-1] + (-1, )) pred_sig = np.zeros(smoothed_signal.shape[:-1] + (gtab.bvals.shape[0], )) pred_sig[..., ~gtab.b0s_mask] = smoothed_signal # Just give back the signal for the b0s since we can't really do anything about it if np.sum(gtab.b0s_mask) > 1: pred_sig[..., where_b0s] = np.mean(data[..., where_b0s], axis=-1) else: pred_sig[..., where_b0s] = data[..., where_b0s] return pred_sig
def main(): parser = _build_arg_parser() args = parser.parse_args() if args.verbose: logging.basicConfig(level=logging.INFO) # Checking args outputs = [args.out_sh] if args.out_sym: outputs.append(args.out_sym) assert_outputs_exist(parser, args, outputs) assert_inputs_exist(parser, args.in_sh) nbr_processes = validate_nbr_processes(parser, args) # Prepare data sh_img = nib.load(args.in_sh) data = sh_img.get_fdata(dtype=np.float32) sh_order, full_basis = get_sh_order_and_fullness(data.shape[-1]) t0 = time.perf_counter() logging.info('Executing angle-aware bilateral filtering.') asym_sh = angle_aware_bilateral_filtering( data, sh_order=sh_order, sh_basis=args.sh_basis, in_full_basis=full_basis, sphere_str=args.sphere, sigma_spatial=args.sigma_spatial, sigma_angular=args.sigma_angular, sigma_range=args.sigma_range, use_gpu=args.use_gpu, nbr_processes=nbr_processes) t1 = time.perf_counter() logging.info('Elapsed time (s): {0}'.format(t1 - t0)) logging.info('Saving filtered SH to file {0}.'.format(args.out_sh)) nib.save(nib.Nifti1Image(asym_sh, sh_img.affine), args.out_sh) if args.out_sym: _, orders = sph_harm_ind_list(sh_order, full_basis=True) logging.info('Saving symmetric SH to file {0}.'.format(args.out_sym)) nib.save(nib.Nifti1Image(asym_sh[..., orders % 2 == 0], sh_img.affine), args.out_sym)
def test_faster_sph_harm(): sh_order = 8 m, n = sph_harm_ind_list(sh_order) theta = np.array([1.61491146, 0.76661665, 0.11976141, 1.20198246, 1.74066314, 1.5925956, 2.13022055, 0.50332859, 1.19868988, 0.78440679, 0.50686938, 0.51739718, 1.80342999, 0.73778957, 2.28559395, 1.29569064, 1.86877091, 0.39239191, 0.54043037, 1.61263047, 0.72695314, 1.90527318, 1.58186125, 0.23130073, 2.51695237, 0.99835604, 1.2883426, 0.48114057, 1.50079318, 1.07978624, 1.9798903, 2.36616966, 2.49233299, 2.13116602, 1.36801518, 1.32932608, 0.95926683, 1.070349, 0.76355762, 2.07148422, 1.50113501, 1.49823314, 0.89248164, 0.22187079, 1.53805373, 1.9765295, 1.13361568, 1.04908355, 1.68737368, 1.91732452, 1.01937457, 1.45839, 0.49641525, 0.29087155, 0.52824641, 1.29875871, 1.81023541, 1.17030475, 2.24953206, 1.20280498, 0.76399964, 2.16109722, 0.79780421, 0.87154509]) phi = np.array([-1.5889514, -3.11092733, -0.61328674, -2.4485381, 2.88058822, 2.02165946, -1.99783366, 2.71235211, 1.41577992, -2.29413676, -2.24565773, -1.55548635, 2.59318232, -1.84672472, -2.33710739, 2.12111948, 1.87523722, -1.05206575, -2.85381987, -2.22808984, 2.3202034, -2.19004474, -1.90358372, 2.14818373, 3.1030696, -2.86620183, -2.19860123, -0.45468447, -3.0034923, 1.73345011, -2.51716288, 2.49961525, -2.68782986, 2.69699056, 1.78566133, -1.59119705, -2.53378963, -2.02476738, 1.36924987, 2.17600517, 2.38117241, 2.99021511, -1.4218007, -2.44016802, -2.52868164, 3.01531658, 2.50093627, -1.70745826, -2.7863931, -2.97359741, 2.17039906, 2.68424643, 1.77896086, 0.45476215, 0.99734418, -2.73107896, 2.28815009, 2.86276506, 3.09450274, -3.09857384, -1.06955885, -2.83826831, 1.81932195, 2.81296654]) sh = spherical_harmonics(m, n, theta[:, None], phi[:, None]) sh2 = sph_harm_sp(m, n, theta[:, None], phi[:, None]) assert_array_almost_equal(sh, sh2, 8)
def forward_sdt_deconv_mat(ratio, sh_order): """ Build forward sharpening deconvolution transform (SDT) matrix Parameters ---------- ratio : float ratio = $\frac{\lambda_2}{\lambda_1}$ of the single fiber response function sh_order : int spherical harmonic order Returns ------- R : ndarray (``(sh_order + 1)*(sh_order + 2)/2``, ``(sh_order + 1)*(sh_order + 2)/2``) SDT deconvolution matrix P : ndarray (``(sh_order + 1)*(sh_order + 2)/2``, ``(sh_order + 1)*(sh_order + 2)/2``) Funk-Radon Transform (FRT) matrix """ m, n = sph_harm_ind_list(sh_order) sdt = np.zeros(m.shape) # SDT matrix frt = np.zeros(m.shape) # FRT (Funk-Radon transform) q-ball matrix b = np.zeros(m.shape) bb = np.zeros(m.shape) for l in np.arange(0, sh_order + 1, 2): from scipy.integrate import quad sharp = quad( lambda z: lpn(l, z)[0][-1] * np.sqrt(1 / (1 - (1 - ratio) * z * z)), -1., 1.) sdt[l / 2] = sharp[0] frt[l / 2] = 2 * np.pi * lpn(l, 0)[0][-1] i = 0 for l in np.arange(0, sh_order + 1, 2): for m in np.arange(-l, l + 1): b[i] = sdt[l / 2] bb[i] = frt[l / 2] i = i + 1 return np.diag(b), np.diag(bb)
def norm_of_laplacian_fod(self): """ Estimates the squared norm of the Laplacian of the FOD. Similar to the anisotropy index, a higher norm means there are larger higher-order coefficients in the FOD spherical harmonics expansion. This indicates the FOD is more anisotropic overall. This kind of measure was explored in e.g. [1]_. References ---------- .. [1] Descoteaux, Maxime, et al. "Regularized, fast, and robust analytical Q-ball imaging." Magnetic Resonance in Medicine: An Official Journal of the International Society for Magnetic Resonance in Medicine 58.3 (2007): 497-510. """ sh_coef = self.fitted_parameters['sh_coeff'] sh_l = sph_harm_ind_list(self.model.sh_order)[1] lb_weights = sh_l**2 * (sh_l + 1)**2 # laplace-beltrami norm_laplacian = np.sum(sh_coef**2 * lb_weights, axis=-1) return norm_laplacian
def test_hat_and_lcr(): hemi = hemi_icosahedron.subdivide(3) m, n = sph_harm_ind_list(8) B = real_sph_harm(m, n, hemi.theta[:, None], hemi.phi[:, None]) H = hat(B) B_hat = np.dot(H, B) assert_array_almost_equal(B, B_hat) R = lcr_matrix(H) d = np.arange(len(hemi.theta)) r = d - np.dot(H, d) lev = np.sqrt(1 - H.diagonal()) r /= lev r -= r.mean() r2 = np.dot(R, d) assert_array_almost_equal(r, r2) r3 = np.dot(d, R.T) assert_array_almost_equal(r, r3)
def test_hat_and_lcr(): v, e, f = create_half_unit_sphere(6) m, n = sph_harm_ind_list(8) r, pol, azi = cart2sphere(*v.T) B = real_sph_harm(m, n, azi[:, None], pol[:, None]) H = hat(B) B_hat = np.dot(H, B) assert_array_almost_equal(B, B_hat) R = lcr_matrix(H) d = np.arange(len(azi)) r = d - np.dot(H, d) lev = np.sqrt(1 - H.diagonal()) r /= lev r -= r.mean() r2 = np.dot(R, d) assert_array_almost_equal(r, r2) r3 = np.dot(d, R.T) assert_array_almost_equal(r, r3)
def forward_sdt_deconv_mat(ratio, sh_order): """ Build forward sharpening deconvolution transform (SDT) matrix Parameters ---------- ratio : float ratio = $\frac{\lambda_2}{\lambda_1}$ of the single fiber response function sh_order : int spherical harmonic order Returns ------- R : ndarray (``(sh_order + 1)*(sh_order + 2)/2``, ``(sh_order + 1)*(sh_order + 2)/2``) SDT deconvolution matrix P : ndarray (``(sh_order + 1)*(sh_order + 2)/2``, ``(sh_order + 1)*(sh_order + 2)/2``) Funk-Radon Transform (FRT) matrix """ m, n = sph_harm_ind_list(sh_order) sdt = np.zeros(m.shape) # SDT matrix frt = np.zeros(m.shape) # FRT (Funk-Radon transform) q-ball matrix b = np.zeros(m.shape) bb = np.zeros(m.shape) for l in np.arange(0, sh_order + 1, 2): from scipy.integrate import quad sharp = quad(lambda z: lpn(l, z)[0][-1] * np.sqrt(1 / (1 - (1 - ratio) * z * z)), -1., 1.) sdt[l / 2] = sharp[0] frt[l / 2] = 2 * np.pi * lpn(l, 0)[0][-1] i = 0 for l in np.arange(0, sh_order + 1, 2): for m in np.arange(-l, l + 1): b[i] = sdt[l / 2] bb[i] = frt[l / 2] i = i + 1 return np.diag(b), np.diag(bb)
def test_smooth_pinv(): hemi = hemi_icosahedron.subdivide(2) m, n = sph_harm_ind_list(4) B = real_sph_harm(m, n, hemi.theta[:, None], hemi.phi[:, None]) L = np.zeros(len(m)) C = smooth_pinv(B, L) D = np.dot(npl.inv(np.dot(B.T, B)), B.T) assert_array_almost_equal(C, D) L = n * (n + 1) * .05 C = smooth_pinv(B, L) L = np.diag(L) D = np.dot(npl.inv(np.dot(B.T, B) + L * L), B.T) assert_array_almost_equal(C, D) L = np.arange(len(n)) * .05 C = smooth_pinv(B, L) L = np.diag(L) D = np.dot(npl.inv(np.dot(B.T, B) + L * L), B.T) assert_array_almost_equal(C, D)
def test_smooth_pinv(): v, e, f = create_half_unit_sphere(3) m, n = sph_harm_ind_list(4) r, pol, azi = cart2sphere(*v.T) B = real_sph_harm(m, n, azi[:, None], pol[:, None]) L = np.zeros(len(m)) C = smooth_pinv(B, L) D = np.dot(npl.inv(np.dot(B.T, B)), B.T) assert_array_almost_equal(C, D) L = n * (n + 1) * 0.05 C = smooth_pinv(B, L) L = np.diag(L) D = np.dot(npl.inv(np.dot(B.T, B) + L * L), B.T) assert_array_almost_equal(C, D) L = np.arange(len(n)) * 0.05 C = smooth_pinv(B, L) L = np.diag(L) D = np.dot(npl.inv(np.dot(B.T, B) + L * L), B.T) assert_array_almost_equal(C, D)
def test_smooth_pinv(): v, e, f = create_half_unit_sphere(3) m, n = sph_harm_ind_list(4) r, pol, azi = cart2sphere(*v.T) B = real_sph_harm(m, n, azi[:, None], pol[:, None]) L = np.zeros(len(m)) C = smooth_pinv(B, L) D = np.dot(npl.inv(np.dot(B.T, B)), B.T) assert_array_almost_equal(C, D) L = n * (n + 1) * .05 C = smooth_pinv(B, L) L = np.diag(L) D = np.dot(npl.inv(np.dot(B.T, B) + L * L), B.T) assert_array_almost_equal(C, D) L = np.arange(len(n)) * .05 C = smooth_pinv(B, L) L = np.diag(L) D = np.dot(npl.inv(np.dot(B.T, B) + L * L), B.T) assert_array_almost_equal(C, D)
def reconst_flow_core(flow): with TemporaryDirectory() as out_dir: data_path, bval_path, bvec_path = get_fnames('small_64D') volume, affine = load_nifti(data_path) mask = np.ones_like(volume[:, :, :, 0]) mask_path = pjoin(out_dir, 'tmp_mask.nii.gz') save_nifti(mask_path, mask.astype(np.uint8), affine) reconst_flow = flow() for sh_order in [4, 6, 8]: if flow.get_short_name() == 'csd': reconst_flow.run(data_path, bval_path, bvec_path, mask_path, sh_order=sh_order, out_dir=out_dir, extract_pam_values=True) elif flow.get_short_name() == 'csa': reconst_flow.run(data_path, bval_path, bvec_path, mask_path, sh_order=sh_order, odf_to_sh_order=sh_order, out_dir=out_dir, extract_pam_values=True) gfa_path = reconst_flow.last_generated_outputs['out_gfa'] gfa_data = load_nifti_data(gfa_path) npt.assert_equal(gfa_data.shape, volume.shape[:-1]) peaks_dir_path =\ reconst_flow.last_generated_outputs['out_peaks_dir'] peaks_dir_data = load_nifti_data(peaks_dir_path) npt.assert_equal(peaks_dir_data.shape[-1], 15) npt.assert_equal(peaks_dir_data.shape[:-1], volume.shape[:-1]) peaks_idx_path = \ reconst_flow.last_generated_outputs['out_peaks_indices'] peaks_idx_data = load_nifti_data(peaks_idx_path) npt.assert_equal(peaks_idx_data.shape[-1], 5) npt.assert_equal(peaks_idx_data.shape[:-1], volume.shape[:-1]) peaks_vals_path = \ reconst_flow.last_generated_outputs['out_peaks_values'] peaks_vals_data = load_nifti_data(peaks_vals_path) npt.assert_equal(peaks_vals_data.shape[-1], 5) npt.assert_equal(peaks_vals_data.shape[:-1], volume.shape[:-1]) shm_path = reconst_flow.last_generated_outputs['out_shm'] shm_data = load_nifti_data(shm_path) # Test that the number of coefficients is what you would expect # given the order of the sh basis: npt.assert_equal(shm_data.shape[-1], sph_harm_ind_list(sh_order)[0].shape[0]) npt.assert_equal(shm_data.shape[:-1], volume.shape[:-1]) pam = load_peaks(reconst_flow.last_generated_outputs['out_pam']) npt.assert_allclose(pam.peak_dirs.reshape(peaks_dir_data.shape), peaks_dir_data) npt.assert_allclose(pam.peak_values, peaks_vals_data) npt.assert_allclose(pam.peak_indices, peaks_idx_data) npt.assert_allclose(pam.shm_coeff, shm_data) npt.assert_allclose(pam.gfa, gfa_data) bvals, bvecs = read_bvals_bvecs(bval_path, bvec_path) bvals[0] = 5. bvecs = generate_bvecs(len(bvals)) tmp_bval_path = pjoin(out_dir, "tmp.bval") tmp_bvec_path = pjoin(out_dir, "tmp.bvec") np.savetxt(tmp_bval_path, bvals) np.savetxt(tmp_bvec_path, bvecs.T) reconst_flow._force_overwrite = True if flow.get_short_name() == 'csd': reconst_flow = flow() reconst_flow._force_overwrite = True reconst_flow.run(data_path, bval_path, bvec_path, mask_path, out_dir=out_dir, frf=[15, 5, 5]) reconst_flow = flow() reconst_flow._force_overwrite = True reconst_flow.run(data_path, bval_path, bvec_path, mask_path, out_dir=out_dir, frf='15, 5, 5') reconst_flow = flow() reconst_flow._force_overwrite = True reconst_flow.run(data_path, bval_path, bvec_path, mask_path, out_dir=out_dir, frf=None) reconst_flow2 = flow() reconst_flow2._force_overwrite = True reconst_flow2.run(data_path, bval_path, bvec_path, mask_path, out_dir=out_dir, frf=None, roi_center=[5, 5, 5]) else: with npt.assert_raises(BaseException): npt.assert_warns(UserWarning, reconst_flow.run, data_path, tmp_bval_path, tmp_bvec_path, mask_path, out_dir=out_dir, extract_pam_values=True) # test parallel implementation reconst_flow = flow() reconst_flow._force_overwrite = True reconst_flow.run(data_path, bval_path, bvec_path, mask_path, out_dir=out_dir, parallel=True, nbr_processes=None) reconst_flow = flow() reconst_flow._force_overwrite = True reconst_flow.run(data_path, bval_path, bvec_path, mask_path, out_dir=out_dir, parallel=True, nbr_processes=2)
def __init__(self, gtab, response, reg_sphere=None, sh_order=8, lambda_=1, tau=0.1, convergence=50): r""" Constrained Spherical Deconvolution (CSD) [1]_. Spherical deconvolution computes a fiber orientation distribution (FOD), also called fiber ODF (fODF) [2]_, as opposed to a diffusion ODF as the QballModel or the CsaOdfModel. This results in a sharper angular profile with better angular resolution that is the best object to be used for later deterministic and probabilistic tractography [3]_. A sharp fODF is obtained because a single fiber *response* function is injected as *a priori* knowledge. The response function is often data-driven and is thus provided as input to the ConstrainedSphericalDeconvModel. It will be used as deconvolution kernel, as described in [1]_. Parameters ---------- gtab : GradientTable response : tuple or AxSymShResponse object A tuple with two elements. The first is the eigen-values as an (3,) ndarray and the second is the signal value for the response function without diffusion weighting. This is to be able to generate a single fiber synthetic signal. The response function will be used as deconvolution kernel ([1]_) reg_sphere : Sphere (optional) sphere used to build the regularization B matrix. Default: 'symmetric362'. sh_order : int (optional) maximal spherical harmonics order. Default: 8 lambda_ : float (optional) weight given to the constrained-positivity regularization part of the deconvolution equation (see [1]_). Default: 1 tau : float (optional) threshold controlling the amplitude below which the corresponding fODF is assumed to be zero. Ideally, tau should be set to zero. However, to improve the stability of the algorithm, tau is set to tau*100 % of the mean fODF amplitude (here, 10% by default) (see [1]_). Default: 0.1 convergence : int Maximum number of iterations to allow the deconvolution to converge. References ---------- .. [1] Tournier, J.D., et al. NeuroImage 2007. Robust determination of the fibre orientation distribution in diffusion MRI: Non-negativity constrained super-resolved spherical deconvolution .. [2] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and Probabilistic Tractography Based on Complex Fibre Orientation Distributions .. [3] Côté, M-A., et al. Medical Image Analysis 2013. Tractometer: Towards validation of tractography pipelines .. [4] Tournier, J.D, et al. Imaging Systems and Technology 2012. MRtrix: Diffusion Tractography in Crossing Fiber Regions """ # Initialize the parent class: SphHarmModel.__init__(self, gtab) m, n = sph_harm_ind_list(sh_order) self.m, self.n = m, n self._where_b0s = lazy_index(gtab.b0s_mask) self._where_dwi = lazy_index(~gtab.b0s_mask) no_params = ((sh_order + 1) * (sh_order + 2)) / 2 if no_params > np.sum(~gtab.b0s_mask): msg = "Number of parameters required for the fit are more " msg += "than the actual data points" warnings.warn(msg, UserWarning) x, y, z = gtab.gradients[self._where_dwi].T r, theta, phi = cart2sphere(x, y, z) # for the gradient sphere self.B_dwi = real_sph_harm(m, n, theta[:, None], phi[:, None]) # for the sphere used in the regularization positivity constraint if reg_sphere is None: self.sphere = small_sphere else: self.sphere = reg_sphere r, theta, phi = cart2sphere(self.sphere.x, self.sphere.y, self.sphere.z) self.B_reg = real_sph_harm(m, n, theta[:, None], phi[:, None]) if response is None: response = (np.array([0.0015, 0.0003, 0.0003]), 1) self.response = response if isinstance(response, AxSymShResponse): r_sh = response.dwi_response self.response_scaling = response.S0 n_response = response.n m_response = response.m else: self.S_r = estimate_response(gtab, self.response[0], self.response[1]) r_sh = np.linalg.lstsq(self.B_dwi, self.S_r[self._where_dwi], rcond=-1)[0] n_response = n m_response = m self.response_scaling = response[1] r_rh = sh_to_rh(r_sh, m_response, n_response) self.R = forward_sdeconv_mat(r_rh, n) # scale lambda_ to account for differences in the number of # SH coefficients and number of mapped directions # This is exactly what is done in [4]_ lambda_ = (lambda_ * self.R.shape[0] * r_rh[0] / (np.sqrt(self.B_reg.shape[0]) * np.sqrt(362.))) self.B_reg *= lambda_ self.sh_order = sh_order self.tau = tau self.convergence = convergence self._X = X = self.R.diagonal() * self.B_dwi self._P = np.dot(X.T, X)
def __init__(self, gtab, ratio, reg_sphere=None, sh_order=8, lambda_=1., tau=0.1): r""" Spherical Deconvolution Transform (SDT) [1]_. The SDT computes a fiber orientation distribution (FOD) as opposed to a diffusion ODF as the QballModel or the CsaOdfModel. This results in a sharper angular profile with better angular resolution. The Constrained SDTModel is similar to the Constrained CSDModel but mathematically it deconvolves the q-ball ODF as oppposed to the HARDI signal (see [1]_ for a comparison and a through discussion). A sharp fODF is obtained because a single fiber *response* function is injected as *a priori* knowledge. In the SDTModel, this response is a single fiber q-ball ODF as opposed to a single fiber signal function for the CSDModel. The response function will be used as deconvolution kernel. Parameters ---------- gtab : GradientTable ratio : float ratio of the smallest vs the largest eigenvalue of the single prolate tensor response function reg_sphere : Sphere sphere used to build the regularization B matrix sh_order : int maximal spherical harmonics order lambda_ : float weight given to the constrained-positivity regularization part of the deconvolution equation tau : float threshold (tau *mean(fODF)) controlling the amplitude below which the corresponding fODF is assumed to be zero. References ---------- .. [1] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and Probabilistic Tractography Based on Complex Fibre Orientation Distributions. """ SphHarmModel.__init__(self, gtab) m, n = sph_harm_ind_list(sh_order) self.m, self.n = m, n self._where_b0s = lazy_index(gtab.b0s_mask) self._where_dwi = lazy_index(~gtab.b0s_mask) no_params = ((sh_order + 1) * (sh_order + 2)) / 2 if no_params > np.sum(~gtab.b0s_mask): msg = "Number of parameters required for the fit are more " msg += "than the actual data points" warnings.warn(msg, UserWarning) x, y, z = gtab.gradients[self._where_dwi].T r, theta, phi = cart2sphere(x, y, z) # for the gradient sphere self.B_dwi = real_sph_harm(m, n, theta[:, None], phi[:, None]) # for the odf sphere if reg_sphere is None: self.sphere = get_sphere('symmetric362') else: self.sphere = reg_sphere r, theta, phi = cart2sphere(self.sphere.x, self.sphere.y, self.sphere.z) self.B_reg = real_sph_harm(m, n, theta[:, None], phi[:, None]) self.R, self.P = forward_sdt_deconv_mat(ratio, n) # scale lambda_ to account for differences in the number of # SH coefficients and number of mapped directions self.lambda_ = (lambda_ * self.R.shape[0] * self.R[0, 0] / self.B_reg.shape[0]) self.tau = tau self.sh_order = sh_order
def reconst_flow_core(flow): with TemporaryDirectory() as out_dir: data_path, bval_path, bvec_path = get_fnames('small_64D') vol_img = nib.load(data_path) volume = vol_img.get_data() mask = np.ones_like(volume[:, :, :, 0]) mask_img = nib.Nifti1Image(mask.astype(np.uint8), vol_img.affine) mask_path = pjoin(out_dir, 'tmp_mask.nii.gz') nib.save(mask_img, mask_path) reconst_flow = flow() for sh_order in [4, 6, 8]: if flow.get_short_name() == 'csd': reconst_flow.run(data_path, bval_path, bvec_path, mask_path, sh_order=sh_order, out_dir=out_dir, extract_pam_values=True) elif flow.get_short_name() == 'csa': reconst_flow.run(data_path, bval_path, bvec_path, mask_path, sh_order=sh_order, odf_to_sh_order=sh_order, out_dir=out_dir, extract_pam_values=True) gfa_path = reconst_flow.last_generated_outputs['out_gfa'] gfa_data = nib.load(gfa_path).get_data() npt.assert_equal(gfa_data.shape, volume.shape[:-1]) peaks_dir_path =\ reconst_flow.last_generated_outputs['out_peaks_dir'] peaks_dir_data = nib.load(peaks_dir_path).get_data() npt.assert_equal(peaks_dir_data.shape[-1], 15) npt.assert_equal(peaks_dir_data.shape[:-1], volume.shape[:-1]) peaks_idx_path = \ reconst_flow.last_generated_outputs['out_peaks_indices'] peaks_idx_data = nib.load(peaks_idx_path).get_data() npt.assert_equal(peaks_idx_data.shape[-1], 5) npt.assert_equal(peaks_idx_data.shape[:-1], volume.shape[:-1]) peaks_vals_path = \ reconst_flow.last_generated_outputs['out_peaks_values'] peaks_vals_data = nib.load(peaks_vals_path).get_data() npt.assert_equal(peaks_vals_data.shape[-1], 5) npt.assert_equal(peaks_vals_data.shape[:-1], volume.shape[:-1]) shm_path = reconst_flow.last_generated_outputs['out_shm'] shm_data = nib.load(shm_path).get_data() # Test that the number of coefficients is what you would expect # given the order of the sh basis: npt.assert_equal(shm_data.shape[-1], sph_harm_ind_list(sh_order)[0].shape[0]) npt.assert_equal(shm_data.shape[:-1], volume.shape[:-1]) pam = load_peaks(reconst_flow.last_generated_outputs['out_pam']) npt.assert_allclose(pam.peak_dirs.reshape(peaks_dir_data.shape), peaks_dir_data) npt.assert_allclose(pam.peak_values, peaks_vals_data) npt.assert_allclose(pam.peak_indices, peaks_idx_data) npt.assert_allclose(pam.shm_coeff, shm_data) npt.assert_allclose(pam.gfa, gfa_data) bvals, bvecs = read_bvals_bvecs(bval_path, bvec_path) bvals[0] = 5. bvecs = generate_bvecs(len(bvals)) tmp_bval_path = pjoin(out_dir, "tmp.bval") tmp_bvec_path = pjoin(out_dir, "tmp.bvec") np.savetxt(tmp_bval_path, bvals) np.savetxt(tmp_bvec_path, bvecs.T) reconst_flow._force_overwrite = True with npt.assert_raises(BaseException): npt.assert_warns(UserWarning, reconst_flow.run, data_path, tmp_bval_path, tmp_bvec_path, mask_path, out_dir=out_dir, extract_pam_values=True) if flow.get_short_name() == 'csd': reconst_flow = flow() reconst_flow._force_overwrite = True reconst_flow.run(data_path, bval_path, bvec_path, mask_path, out_dir=out_dir, frf=[15, 5, 5]) reconst_flow = flow() reconst_flow._force_overwrite = True reconst_flow.run(data_path, bval_path, bvec_path, mask_path, out_dir=out_dir, frf='15, 5, 5') reconst_flow = flow() reconst_flow._force_overwrite = True reconst_flow.run(data_path, bval_path, bvec_path, mask_path, out_dir=out_dir, frf=None) reconst_flow2 = flow() reconst_flow2._force_overwrite = True reconst_flow2.run(data_path, bval_path, bvec_path, mask_path, out_dir=out_dir, frf=None, roi_center=[10, 10, 10])
def sh_smooth(data, bvals, bvecs, sh_order=4, similarity_threshold=50, regul=0.006): """Smooth the raw diffusion signal with spherical harmonics. data : ndarray The diffusion data to smooth. gtab : gradient table object Corresponding gradients table object to data. sh_order : int, default 8 Order of the spherical harmonics to fit. similarity_threshold : int, default 50 All b-values such that |b_1 - b_2| < similarity_threshold will be considered as identical for smoothing purpose. Must be lower than 200. regul : float, default 0.006 Amount of regularization to apply to sh coefficients computation. Return --------- pred_sig : ndarray The smoothed diffusion data, fitted through spherical harmonics. """ if similarity_threshold > 200: raise ValueError( "similarity_threshold = {}, which is higher than 200," " please use a lower value".format(similarity_threshold)) m, n = sph_harm_ind_list(sh_order) L = -n * (n + 1) where_b0s = bvals == 0 pred_sig = np.zeros_like(data, dtype=np.float32) # Round similar bvals together for identifying similar shells rounded_bvals = np.zeros_like(bvals) for unique_bval in np.unique(bvals): idx = np.abs(unique_bval - bvals) < similarity_threshold rounded_bvals[idx] = unique_bval # process each b-value separately for unique_bval in np.unique(rounded_bvals): idx = rounded_bvals == unique_bval # Just give back the signal for the b0s since we can't really do anything about it if np.all(idx == where_b0s): if np.sum(where_b0s) > 1: pred_sig[..., idx] = np.mean(data[..., idx], axis=-1, keepdims=True) else: pred_sig[..., idx] = data[..., idx] continue x, y, z = bvecs[:, idx] r, theta, phi = cart2sphere(x, y, z) # Find the sh coefficients to smooth the signal B_dwi = real_sph_harm(m, n, theta[:, None], phi[:, None]) invB = smooth_pinv(B_dwi, np.sqrt(regul) * L) sh_coeff = np.dot(data[..., idx], invB.T) # Find the smoothed signal from the sh fit for the given gtab pred_sig[..., idx] = np.dot(sh_coeff, B_dwi.T) return pred_sig
def __init__(self, gtab, ratio, reg_sphere=None, sh_order=8, lambda_=1., tau=0.1): r""" Spherical Deconvolution Transform (SDT) [1]_. The SDT computes a fiber orientation distribution (FOD) as opposed to a diffusion ODF as the QballModel or the CsaOdfModel. This results in a sharper angular profile with better angular resolution. The Contrained SDTModel is similar to the Constrained CSDModel but mathematically it deconvolves the q-ball ODF as oppposed to the HARDI signal (see [1]_ for a comparison and a through discussion). A sharp fODF is obtained because a single fiber *response* function is injected as *a priori* knowledge. In the SDTModel, this response is a single fiber q-ball ODF as opposed to a single fiber signal function for the CSDModel. The response function will be used as deconvolution kernel. Parameters ---------- gtab : GradientTable ratio : float ratio of the smallest vs the largest eigenvalue of the single prolate tensor response function reg_sphere : Sphere sphere used to build the regularization B matrix sh_order : int maximal spherical harmonics order lambda_ : float weight given to the constrained-positivity regularization part of the deconvolution equation tau : float threshold (tau *mean(fODF)) controlling the amplitude below which the corresponding fODF is assumed to be zero. References ---------- .. [1] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and Probabilistic Tractography Based on Complex Fibre Orientation Distributions. """ m, n = sph_harm_ind_list(sh_order) self.m, self.n = m, n self._where_b0s = lazy_index(gtab.b0s_mask) self._where_dwi = lazy_index(~gtab.b0s_mask) no_params = ((sh_order + 1) * (sh_order + 2)) / 2 if no_params > np.sum(gtab.b0s_mask == False): msg = "Number of parameters required for the fit are more " msg += "than the actual data points" warnings.warn(msg, UserWarning) x, y, z = gtab.gradients[self._where_dwi].T r, theta, phi = cart2sphere(x, y, z) # for the gradient sphere self.B_dwi = real_sph_harm(m, n, theta[:, None], phi[:, None]) # for the odf sphere if reg_sphere is None: self.sphere = get_sphere('symmetric362') else: self.sphere = reg_sphere r, theta, phi = cart2sphere(self.sphere.x, self.sphere.y, self.sphere.z) self.B_reg = real_sph_harm(m, n, theta[:, None], phi[:, None]) self.R, self.P = forward_sdt_deconv_mat(ratio, sh_order) # scale lambda_ to account for differences in the number of # SH coefficients and number of mapped directions self.lambda_ = lambda_ * self.R.shape[0] * self.R[0, 0] / self.B_reg.shape[0] self.tau = tau self.sh_order = sh_order
def odf_deconv(odf_sh, sh_order, R, B_reg, lambda_=1., tau=0.1): r""" ODF constrained-regularized sherical deconvolution using the Sharpening Deconvolution Transform (SDT) [1]_, [2]_. Parameters ---------- odf_sh : ndarray (``(sh_order + 1)*(sh_order + 2)/2``,) ndarray of SH coefficients for the ODF spherical function to be deconvolved sh_order : int maximal SH order of the SH representation R : ndarray (``(sh_order + 1)(sh_order + 2)/2``, ``(sh_order + 1)(sh_order + 2)/2``) SDT matrix in SH basis B_reg : ndarray (``(sh_order + 1)(sh_order + 2)/2``, ``(sh_order + 1)(sh_order + 2)/2``) SH basis matrix used for deconvolution lambda_ : float lambda parameter in minimization equation (default 1.0) tau : float threshold (tau *max(fODF)) controlling the amplitude below which the corresponding fODF is assumed to be zero. Returns ------- fodf_sh : ndarray (``(sh_order + 1)(sh_order + 2)/2``,) Spherical harmonics coefficients of the constrained-regularized fiber ODF num_it : int Number of iterations in the constrained-regularization used for convergence References ---------- .. [1] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and Probabilistic Tractography Based on Complex Fibre Orientation Distributions .. [2] Descoteaux, M, PhD thesis, INRIA Sophia-Antipolis, 2008. """ m, n = sph_harm_ind_list(sh_order) # Generate initial fODF estimate, which is the ODF truncated at SH order 4 fodf_sh = np.linalg.lstsq(R, odf_sh)[0] fodf_sh[15:] = 0 fodf = np.dot(B_reg, fodf_sh) Z = np.linalg.norm(fodf) fodf_sh /= Z fodf = np.dot(B_reg, fodf_sh) threshold = tau * np.max(np.dot(B_reg, fodf_sh)) #print(np.min(fodf), np.max(fodf), np.mean(fodf), threshold, tau) k = [] convergence = 50 for num_it in range(1, convergence + 1): A = np.dot(B_reg, fodf_sh) k2 = np.nonzero(A < threshold)[0] if (k2.shape[0] + R.shape[0]) < B_reg.shape[1]: warnings.warn( 'too few negative directions identified - failed to converge') return fodf_sh, num_it if num_it > 1 and k.shape[0] == k2.shape[0]: if (k == k2).all(): return fodf_sh, num_it k = k2 M = np.concatenate((R, lambda_ * B_reg[k, :])) ODF = np.concatenate((odf_sh, np.zeros(k.shape))) fodf_sh = np.linalg.lstsq(M, ODF)[0] warnings.warn('maximum number of iterations exceeded - failed to converge') return fodf_sh, num_it
def __init__(self, gtab, response, reg_sphere=None, sh_order=8, lambda_=1, tau=0.1): r""" Constrained Spherical Deconvolution (CSD) [1]_. Spherical deconvolution computes a fiber orientation distribution (FOD), also called fiber ODF (fODF) [2]_, as opposed to a diffusion ODF as the QballModel or the CsaOdfModel. This results in a sharper angular profile with better angular resolution that is the best object to be used for later deterministic and probabilistic tractography [3]_. A sharp fODF is obtained because a single fiber *response* function is injected as *a priori* knowledge. The response function is often data-driven and thus, comes as input to the ConstrainedSphericalDeconvModel. It will be used as deconvolution kernel, as described in [1]_. Parameters ---------- gtab : GradientTable response : tuple or callable If tuple, then it should have two elements. The first is the eigen-values as an (3,) ndarray and the second is the signal value for the response function without diffusion weighting. This is to be able to generate a single fiber synthetic signal. If callable then the function should return an ndarray with the all the signal values for the response function. The response function will be used as deconvolution kernel ([1]_) reg_sphere : Sphere sphere used to build the regularization B matrix sh_order : int maximal spherical harmonics order lambda_ : float weight given to the constrained-positivity regularization part of the deconvolution equation (see [1]_) tau : float threshold controlling the amplitude below which the corresponding fODF is assumed to be zero. Ideally, tau should be set to zero. However, to improve the stability of the algorithm, tau is set to tau*100 % of the mean fODF amplitude (here, 10% by default) (see [1]_) References ---------- .. [1] Tournier, J.D., et al. NeuroImage 2007. Robust determination of the fibre orientation distribution in diffusion MRI: Non-negativity constrained super-resolved spherical deconvolution .. [2] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and Probabilistic Tractography Based on Complex Fibre Orientation Distributions .. [3] C\^ot\'e, M-A., et al. Medical Image Analysis 2013. Tractometer: Towards validation of tractography pipelines .. [4] Tournier, J.D, et al. Imaging Systems and Technology 2012. MRtrix: Diffusion Tractography in Crossing Fiber Regions """ m, n = sph_harm_ind_list(sh_order) self.m, self.n = m, n self._where_b0s = lazy_index(gtab.b0s_mask) self._where_dwi = lazy_index(~gtab.b0s_mask) no_params = ((sh_order + 1) * (sh_order + 2)) / 2 if no_params > np.sum(gtab.b0s_mask == False): msg = "Number of parameters required for the fit are more " msg += "than the actual data points" warnings.warn(msg, UserWarning) x, y, z = gtab.gradients[self._where_dwi].T r, theta, phi = cart2sphere(x, y, z) # for the gradient sphere self.B_dwi = real_sph_harm(m, n, theta[:, None], phi[:, None]) # for the sphere used in the regularization positivity constraint if reg_sphere is None: self.sphere = get_sphere('symmetric362') else: self.sphere = reg_sphere r, theta, phi = cart2sphere(self.sphere.x, self.sphere.y, self.sphere.z) self.B_reg = real_sph_harm(m, n, theta[:, None], phi[:, None]) if callable(response): S_r = response else: if response is None: S_r = estimate_response(gtab, np.array([0.0015, 0.0003, 0.0003]), 1) else: S_r = estimate_response(gtab, response[0], response[1]) r_sh = np.linalg.lstsq(self.B_dwi, S_r[self._where_dwi])[0] r_rh = sh_to_rh(r_sh, sh_order) self.R = forward_sdeconv_mat(r_rh, sh_order) # scale lambda_ to account for differences in the number of # SH coefficients and number of mapped directions # This is exactly what is done in [4]_ self.lambda_ = lambda_ * self.R.shape[0] * r_rh[0] / self.B_reg.shape[0] self.sh_order = sh_order self.tau = tau
def odf_deconv(odf_sh, sh_order, R, B_reg, lambda_=1., tau=0.1): r""" ODF constrained-regularized sherical deconvolution using the Sharpening Deconvolution Transform (SDT) [1]_, [2]_. Parameters ---------- odf_sh : ndarray (``(sh_order + 1)*(sh_order + 2)/2``,) ndarray of SH coefficients for the ODF spherical function to be deconvolved sh_order : int maximal SH order of the SH representation R : ndarray (``(sh_order + 1)(sh_order + 2)/2``, ``(sh_order + 1)(sh_order + 2)/2``) SDT matrix in SH basis B_reg : ndarray (``(sh_order + 1)(sh_order + 2)/2``, ``(sh_order + 1)(sh_order + 2)/2``) SH basis matrix used for deconvolution lambda_ : float lambda parameter in minimization equation (default 1.0) tau : float threshold (tau *max(fODF)) controlling the amplitude below which the corresponding fODF is assumed to be zero. Returns ------- fodf_sh : ndarray (``(sh_order + 1)(sh_order + 2)/2``,) Spherical harmonics coefficients of the constrained-regularized fiber ODF num_it : int Number of iterations in the constrained-regularization used for convergence References ---------- .. [1] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and Probabilistic Tractography Based on Complex Fibre Orientation Distributions .. [2] Descoteaux, M, PhD thesis, INRIA Sophia-Antipolis, 2008. """ m, n = sph_harm_ind_list(sh_order) # Generate initial fODF estimate, which is the ODF truncated at SH order 4 fodf_sh = np.linalg.lstsq(R, odf_sh)[0] fodf_sh[15:] = 0 fodf = np.dot(B_reg, fodf_sh) Z = np.linalg.norm(fodf) fodf_sh /= Z fodf = np.dot(B_reg, fodf_sh) threshold = tau * np.max(np.dot(B_reg, fodf_sh)) #print(np.min(fodf), np.max(fodf), np.mean(fodf), threshold, tau) k = [] convergence = 50 for num_it in range(1, convergence + 1): A = np.dot(B_reg, fodf_sh) k2 = np.nonzero(A < threshold)[0] if (k2.shape[0] + R.shape[0]) < B_reg.shape[1]: warnings.warn('too few negative directions identified - failed to converge') return fodf_sh, num_it if num_it > 1 and k.shape[0] == k2.shape[0]: if (k == k2).all(): return fodf_sh, num_it k = k2 M = np.concatenate((R, lambda_ * B_reg[k, :])) ODF = np.concatenate((odf_sh, np.zeros(k.shape))) fodf_sh = np.linalg.lstsq(M, ODF)[0] warnings.warn('maximum number of iterations exceeded - failed to converge') return fodf_sh, num_it