def test_disperse_charges(): charges = np.array([[1., 0, 0], [0, 1., 0], [0, 0, 1.]]) d_sphere, pot = disperse_charges(HemiSphere(xyz=charges), 10) nt.assert_array_almost_equal(charges, d_sphere.vertices) a = np.sqrt(3) / 2 charges = np.array([[3. / 5, 4. / 5, 0], [4. / 5, 3. / 5, 0]]) expected_charges = np.array([[0, 1., 0], [1., 0, 0]]) d_sphere, pot = disperse_charges(HemiSphere(xyz=charges), 1000, .2) nt.assert_array_almost_equal(expected_charges, d_sphere.vertices) for ii in xrange(1, len(pot)): # check that the potential of the system is going down nt.assert_(pot[ii] - pot[ii - 1] <= 0) # Check that the disperse_charges does not blow up with a large constant d_sphere, pot = disperse_charges(HemiSphere(xyz=charges), 1000, 20.) nt.assert_array_almost_equal(expected_charges, d_sphere.vertices) for ii in xrange(1, len(pot)): # check that the potential of the system is going down nt.assert_(pot[ii] - pot[ii - 1] <= 0) # check that the function seems to work with a larger number of charges charges = np.arange(21).reshape(7, 3) norms = np.sqrt((charges * charges).sum(-1)) charges = charges / norms[:, None] d_sphere, pot = disperse_charges(HemiSphere(xyz=charges), 1000, .05) for ii in xrange(1, len(pot)): # check that the potential of the system is going down nt.assert_(pot[ii] - pot[ii - 1] <= 0) # check that the resulting charges all lie on the unit sphere d_charges = d_sphere.vertices norms = np.sqrt((d_charges * d_charges).sum(-1)) nt.assert_array_almost_equal(norms, 1)
def generate_combinations(items, n): """ Combine sets of size n from items Parameters ------------ items : sequence n : int Returns -------- ic : iterator Examples -------- >>> from dipy.tracking.metrics import generate_combinations >>> ic=generate_combinations(range(3),2) >>> for i in ic: print(i) [0, 1] [0, 2] [1, 2] """ if n == 0: yield [] elif n == 2: # if n=2 non_recursive for i in xrange(len(items)-1): for j in xrange(i+1, len(items)): yield [i, j] else: # if n>2 uses recursion for i in xrange(len(items)): for cc in generate_combinations(items[i+1:], n-1): yield [items[i]] + cc
def resample( self, in_path, idx_out, Ylm_out, doMergeB0 ) : if doMergeB0: nS = 1+self.scheme.dwi_count merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx)) else: nS = self.scheme.nS merge_idx = np.arange(nS) KERNELS = {} KERNELS['model'] = self.id KERNELS['wmr'] = np.zeros( (len(self.Rs),181,181,nS,), dtype=np.float32 ) KERNELS['wmh'] = np.zeros( (len(self.ICVFs),181,181,nS,), dtype=np.float32 ) KERNELS['iso'] = np.zeros( (len(self.d_ISOs),nS,), dtype=np.float32 ) nATOMS = len(self.Rs) + len(self.ICVFs) + len(self.d_ISOs) progress = ProgressBar( n=nATOMS, prefix=" ", erase=True ) # Cylinder(s) for i in xrange(len(self.Rs)) : lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) ) KERNELS['wmr'][i,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )[:,:,merge_idx] progress.update() # Zeppelin(s) for i in xrange(len(self.ICVFs)) : lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) ) KERNELS['wmh'][i,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )[:,:,merge_idx] progress.update() # Ball(s) for i in xrange(len(self.d_ISOs)) : lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) ) KERNELS['iso'][i,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )[merge_idx] progress.update() return KERNELS
def resample_kernel(KRlm, nS, idx_out, Ylm_out, is_isotropic): """Project/resample a spherical function to signal space. Parameters ---------- KRlm : numpy.array Rotated spherical functions (in SH space) to project nS : integer Number of samples in the subject's acquisition scheme idx_out : list of list Index of samples in output kernel Ylm_out : numpy.array Matrix to project back all shells from SH space to signal space (of the subject) is_isotropic : boolean Indentifies whether Klm is an isotropic function or not Returns ------- KR = numpy.array Rotated spherical functions projected to signal space of the subject """ if is_isotropic == False: KR = np.ones((181, 181, nS), dtype=np.float32) for ox in xrange(181): for oy in xrange(181): KR[ox, oy, idx_out] = np.dot(Ylm_out, KRlm[ox, oy, :]).astype(np.float32) else: KR = np.ones(nS, dtype=np.float32) KR[idx_out] = np.dot(Ylm_out, KRlm).astype(np.float32) return KR
def resample_kernel( KRlm, nS, idx_out, Ylm_out, is_isotropic ) : """Project/resample a spherical function to signal space. Parameters ---------- KRlm : numpy.array Rotated spherical functions (in SH space) to project nS : integer Number of samples in the subject's acquisition scheme idx_out : list of list Index of samples in output kernel Ylm_out : numpy.array Matrix to project back all shells from SH space to signal space (of the subject) is_isotropic : boolean Indentifies whether Klm is an isotropic function or not Returns ------- KR = numpy.array Rotated spherical functions projected to signal space of the subject """ if is_isotropic == False : KR = np.ones( (181,181,nS), dtype=np.float32 ) for ox in xrange(181) : for oy in xrange(181) : KR[ox,oy,idx_out] = np.dot( Ylm_out, KRlm[ox,oy,:] ).astype(np.float32) else : KR = np.ones( nS, dtype=np.float32 ) KR[idx_out] = np.dot( Ylm_out, KRlm ).astype(np.float32) return KR
def resample( self, in_path, idx_out, Ylm_out, doMergeB0 ) : if doMergeB0: nS = 1+self.scheme.dwi_count merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx)) else: nS = self.scheme.nS merge_idx = np.arange(nS) KERNELS = {} KERNELS['model'] = self.id KERNELS['D'] = np.zeros( (len(self.d_perps),181,181,nS), dtype=np.float32 ) KERNELS['CSF'] = np.zeros( (len(self.d_isos),nS), dtype=np.float32 ) nATOMS = len(self.d_perps) + len(self.d_isos) progress = ProgressBar( n=nATOMS, prefix=" ", erase=True ) # Tensor compartment(s) for i in xrange(len(self.d_perps)) : lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) ) KERNELS['D'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )[:,:,merge_idx] progress.update() # Isotropic compartment(s) for i in xrange(len(self.d_isos)) : lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) ) KERNELS['CSF'][i,...] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )[merge_idx] progress.update() return KERNELS
def partition(array): size = len(array) half_size = int(size / 2) x1 = [] x2 = [] for index in xrange(0, half_size): x1.append(array[index]) for index in xrange(half_size, size): x2.append(array[index]) return [x1, x2]
def test_voxel_ornt(): sh = (40, 40, 40) sz = (1, 2, 3) I4 = np.eye(4) ras = orientation_from_string('ras') sra = orientation_from_string('sra') lpi = orientation_from_string('lpi') srp = orientation_from_string('srp') affine = reorder_voxels_affine(ras, ras, sh, sz) assert_array_equal(affine, I4) affine = reorder_voxels_affine(sra, sra, sh, sz) assert_array_equal(affine, I4) affine = reorder_voxels_affine(lpi, lpi, sh, sz) assert_array_equal(affine, I4) affine = reorder_voxels_affine(srp, srp, sh, sz) assert_array_equal(affine, I4) streamlines = make_streamlines() box = np.array(sh) * sz sra_affine = reorder_voxels_affine(ras, sra, sh, sz) toras_affine = reorder_voxels_affine(sra, ras, sh, sz) assert_array_equal(np.dot(toras_affine, sra_affine), I4) expected_sl = (sl[:, [2, 0, 1]] for sl in streamlines) test_sl = move_streamlines(streamlines, sra_affine) for _ in xrange(len(streamlines)): assert_array_equal(next(test_sl), next(expected_sl)) lpi_affine = reorder_voxels_affine(ras, lpi, sh, sz) toras_affine = reorder_voxels_affine(lpi, ras, sh, sz) assert_array_equal(np.dot(toras_affine, lpi_affine), I4) expected_sl = (box - sl for sl in streamlines) test_sl = move_streamlines(streamlines, lpi_affine) for _ in xrange(len(streamlines)): assert_array_equal(next(test_sl), next(expected_sl)) srp_affine = reorder_voxels_affine(ras, srp, sh, sz) toras_affine = reorder_voxels_affine(srp, ras, (40, 40, 40), (3, 1, 2)) assert_array_equal(np.dot(toras_affine, srp_affine), I4) expected_sl = [sl.copy() for sl in streamlines] for sl in expected_sl: sl[:, 1] = box[1] - sl[:, 1] expected_sl = (sl[:, [2, 0, 1]] for sl in expected_sl) test_sl = move_streamlines(streamlines, srp_affine) for _ in xrange(len(streamlines)): assert_array_equal(next(test_sl), next(expected_sl))
def create_high_resolution_scheme(scheme, b_scale=1): """Create an high-resolution version of a scheme to be used for kernel rotation (500 directions per shell). All other parameters of the scheme remain the same. Parameters ---------- scheme : Scheme class Original acquisition scheme b_scale : float If needed, apply a scaling to the b-values (default : 1) """ n = len(scheme.shells) raw = np.zeros((500 * n, 4 if scheme.version == 0 else 7)) row = 0 for i in xrange(n): raw[row:row + 500, 0:3] = grad if scheme.version == 0: raw[row:row + 500, 3] = scheme.shells[i]['b'] * b_scale else: raw[row:row + 500, 3] = scheme.shells[i]['G'] raw[row:row + 500, 4] = scheme.shells[i]['Delta'] raw[row:row + 500, 5] = scheme.shells[i]['delta'] raw[row:row + 500, 6] = scheme.shells[i]['TE'] row += 500 return amico.scheme.Scheme(raw)
def test_TriLinearInterpolator(): # Place (0, 0, 0) at the bottom left of the image l, m, n, o = np.ogrid[.5:6.51, .5:6.51, .5:6.51, 0:4] data = l + m + n + o data = data.astype("float32") tli = TriLinearInterpolator(data, (1, 1, 1)) a, b, c = np.mgrid[.5:6.5:1.6, .5:6.5:2.7, .5:6.5:3.8] for ii in xrange(a.size): x = a.flat[ii] y = b.flat[ii] z = c.flat[ii] expected_result = x + y + z + o.ravel() assert_array_almost_equal(tli[x, y, z], expected_result, decimal=5) ind = np.array([x, y, z]) assert_array_almost_equal(tli[ind], expected_result) # Index at 0 expected_value = np.arange(4) + 1.5 assert_array_almost_equal(tli[0, 0, 0], expected_value) # Index at shape expected_value = np.arange(4) + (6.5 * 3) assert_array_almost_equal(tli[7, 7, 7], expected_value) assert_raises(OutsideImage, tli.__getitem__, (-.1, 0, 0)) assert_raises(OutsideImage, tli.__getitem__, (0, 7.01, 0))
def aux_structures_resample( scheme, lmax = 12 ) : """Compute the auxiliary data structures to resample the kernels to the original acquisition scheme. Parameters ---------- scheme : Scheme class Acquisition scheme of the acquired signal lmax : int Maximum SH order to use for the rotation phase (default : 12) Returns ------- idx_OUT : numpy array Indices of the samples belonging to each shell Ylm_OUT : numpy array Operator to transform each shell from Spherical harmonics to original signal space """ nSH = (lmax+1)*(lmax+2)//2 idx_OUT = np.zeros( scheme.dwi_count, dtype=np.int32 ) Ylm_OUT = np.zeros( (scheme.dwi_count,nSH*len(scheme.shells)), dtype=np.float32 ) # matrix from SH to real space idx = 0 for s in xrange( len(scheme.shells) ) : nS = len( scheme.shells[s]['idx'] ) idx_OUT[ idx:idx+nS ] = scheme.shells[s]['idx'] _, theta, phi = cart2sphere( scheme.shells[s]['grad'][:,0], scheme.shells[s]['grad'][:,1], scheme.shells[s]['grad'][:,2] ) tmp, _, _ = real_sym_sh_basis( lmax, theta, phi ) Ylm_OUT[ idx:idx+nS, nSH*s:nSH*(s+1) ] = tmp idx += nS return ( idx_OUT, Ylm_OUT )
def aux_structures_resample(scheme, lmax=12): """Compute the auxiliary data structures to resample the kernels to the original acquisition scheme. Parameters ---------- scheme : Scheme class Acquisition scheme of the acquired signal lmax : int Maximum SH order to use for the rotation phase (default : 12) Returns ------- idx_OUT : numpy array Indices of the samples belonging to each shell Ylm_OUT : numpy array Operator to transform each shell from Spherical harmonics to original signal space """ nSH = (lmax + 1) * (lmax + 2) // 2 idx_OUT = np.zeros(scheme.dwi_count, dtype=np.int32) Ylm_OUT = np.zeros((scheme.dwi_count, nSH * len(scheme.shells)), dtype=np.float32) # matrix from SH to real space idx = 0 for s in xrange(len(scheme.shells)): nS = len(scheme.shells[s]['idx']) idx_OUT[idx:idx + nS] = scheme.shells[s]['idx'] _, theta, phi = cart2sphere(scheme.shells[s]['grad'][:, 0], scheme.shells[s]['grad'][:, 1], scheme.shells[s]['grad'][:, 2]) tmp, _, _ = real_sym_sh_basis(lmax, theta, phi) Ylm_OUT[idx:idx + nS, nSH * s:nSH * (s + 1)] = tmp idx += nS return (idx_OUT, Ylm_OUT)
def create_high_resolution_scheme( scheme, b_scale = 1 ) : """Create an high-resolution version of a scheme to be used for kernel rotation (500 directions per shell). All other parameters of the scheme remain the same. Parameters ---------- scheme : Scheme class Original acquisition scheme b_scale : float If needed, apply a scaling to the b-values (default : 1) """ n = len( scheme.shells ) raw = np.zeros( (500*n, 4 if scheme.version==0 else 7) ) row = 0 for i in xrange(n) : raw[row:row+500,0:3] = grad if scheme.version == 0 : raw[row:row+500,3] = scheme.shells[i]['b'] * b_scale else : raw[row:row+500,3] = scheme.shells[i]['G'] raw[row:row+500,4] = scheme.shells[i]['Delta'] raw[row:row+500,5] = scheme.shells[i]['delta'] raw[row:row+500,6] = scheme.shells[i]['TE'] row += 500 return amico.scheme.Scheme( raw )
def precompute_rotation_matrices(lmax=12): """Precompute the rotation matrices to rotate the high-resolution kernels (500 directions/shell). Parameters ---------- lmax : int Maximum SH order to use for the rotation phase (default : 12) """ if not isdir(dipy_home): makedirs(dipy_home) filename = pjoin(dipy_home, 'AMICO_aux_matrices_lmax=%d.pickle' % lmax) if isfile(filename): return print('\n-> Precomputing rotation matrices for l_max=%d:' % lmax) AUX = {} AUX['lmax'] = lmax # matrix to fit the SH coefficients _, theta, phi = cart2sphere(grad[:, 0], grad[:, 1], grad[:, 2]) tmp, _, _ = real_sym_sh_basis(lmax, theta, phi) AUX['fit'] = np.dot(np.linalg.pinv(np.dot(tmp.T, tmp)), tmp.T) # matrices to rotate the functions in SH space AUX['Ylm_rot'] = np.zeros((181, 181), dtype=np.object) for ox in xrange(181): for oy in xrange(181): tmp, _, _ = real_sym_sh_basis(lmax, ox / 180.0 * np.pi, oy / 180.0 * np.pi) AUX['Ylm_rot'][ox, oy] = tmp.reshape(-1) # auxiliary data to perform rotations AUX['const'] = np.zeros(AUX['fit'].shape[0], dtype=np.float64) AUX['idx_m0'] = np.zeros(AUX['fit'].shape[0], dtype=np.int32) i = 0 for l in xrange(0, AUX['lmax'] + 1, 2): const = np.sqrt(4.0 * np.pi / (2.0 * l + 1.0)) idx_m0 = (l * l + l + 2.0) / 2.0 - 1 for m in xrange(-l, l + 1): AUX['const'][i] = const AUX['idx_m0'][i] = idx_m0 i += 1 with open(filename, 'wb+') as fid: pickle.dump(AUX, fid, protocol=2) print(' [ DONE ]')
def precompute_rotation_matrices( lmax = 12 ) : """Precompute the rotation matrices to rotate the high-resolution kernels (500 directions/shell). Parameters ---------- lmax : int Maximum SH order to use for the rotation phase (default : 12) """ if not isdir(dipy_home) : makedirs(dipy_home) filename = pjoin( dipy_home, 'AMICO_aux_matrices_lmax=%d.pickle'%lmax ) if isfile( filename ) : return print('\n-> Precomputing rotation matrices for l_max=%d:' % lmax) AUX = {} AUX['lmax'] = lmax # matrix to fit the SH coefficients _, theta, phi = cart2sphere( grad[:,0], grad[:,1], grad[:,2] ) tmp, _, _ = real_sym_sh_basis( lmax, theta, phi ) AUX['fit'] = np.dot( np.linalg.pinv( np.dot(tmp.T,tmp) ), tmp.T ) # matrices to rotate the functions in SH space AUX['Ylm_rot'] = np.zeros( (181,181), dtype=np.object ) for ox in xrange(181) : for oy in xrange(181) : tmp, _, _ = real_sym_sh_basis( lmax, ox/180.0*np.pi, oy/180.0*np.pi ) AUX['Ylm_rot'][ox,oy] = tmp.reshape(-1) # auxiliary data to perform rotations AUX['const'] = np.zeros( AUX['fit'].shape[0], dtype=np.float64 ) AUX['idx_m0'] = np.zeros( AUX['fit'].shape[0], dtype=np.int32 ) i = 0 for l in xrange(0,AUX['lmax']+1,2) : const = np.sqrt(4.0*np.pi/(2.0*l+1.0)) idx_m0 = (l*l + l + 2.0)/2.0 - 1 for m in xrange(-l,l+1) : AUX['const'][i] = const AUX['idx_m0'][i] = idx_m0 i += 1 with open( filename, 'wb+' ) as fid : pickle.dump( AUX, fid, protocol=2 ) print(' [ DONE ]')
def rotate_kernel(K, AUX, idx_IN, idx_OUT, is_isotropic): """Rotate a response function (symmetric about z-axis). Parameters ---------- K : numpy.ndarray Spherical function (in signal space) to rotate AUX : dictionary Auxiliary data structures needed to rotate functions in SH space idx_IN : list of list Index of samples in input kernel (K) belonging to each shell idx_OUT : list of list Index of samples in output kernel (K) belonging to each shell is_isotropic : boolean Indentifies whether K is an isotropic function or not Returns ------- KRlm = numpy.array Spherical function (in SH space) rotated to 181x181 directions distributed on a hemisphere """ # project kernel K to SH space Klm = [] for s in xrange(len(idx_IN)): Klm.append(np.dot(AUX['fit'], K[idx_IN[s]])) n = len(idx_IN) * AUX['fit'].shape[0] if is_isotropic == False: # fit SH and rotate kernel to 181*181 directions KRlm = np.zeros((181, 181, n), dtype=np.float32) for ox in xrange(181): for oy in xrange(181): Ylm_rot = AUX['Ylm_rot'][ox, oy] for s in xrange(len(idx_IN)): KRlm[ox, oy, idx_OUT[s]] = AUX['const'] * Klm[s][ AUX['idx_m0']] * Ylm_rot else: # simply fit SH KRlm = np.zeros(n, dtype=np.float32) for s in xrange(len(idx_IN)): KRlm[idx_OUT[s]] = Klm[s].astype(np.float32) return KRlm
def disperse_charges(hemi, iters, const=.2): """Models electrostatic repulsion on the unit sphere Places charges on a sphere and simulates the repulsive forces felt by each one. Allows the charges to move for some number of iterations and returns their final location as well as the total potential of the system at each step. Parameters ---------- hemi : HemiSphere Points on a unit sphere. iters : int Number of iterations to run. const : float Using a smaller const could provide a more accurate result, but will need more iterations to converge. Returns ------- hemi : HemiSphere Distributed points on a unit sphere. potential : ndarray The electrostatic potential at each iteration. This can be useful to check if the repulsion converged to a minimum. Note: ----- This function is meant to be used with diffusion imaging so antipodal symmetry is assumed. Therefor each charge must not only be unique, but if there is a charge at +x, there cannot be a charge at -x. These are treated as the same location and because the distance between the two charges will be zero, the result will be unstable. """ if not isinstance(hemi, HemiSphere): raise ValueError("expecting HemiSphere") charges = hemi.vertices forces, v = _get_forces(charges) force_mag = np.sqrt((forces*forces).sum()) const = const / force_mag.max() potential = np.empty(iters) v_min = v for ii in xrange(iters): new_charges = charges + forces * const norms = np.sqrt((new_charges**2).sum(-1)) new_charges /= norms[:, None] new_forces, v = _get_forces(new_charges) if v <= v_min: charges = new_charges forces = new_forces potential[ii] = v_min = v else: const /= 2. potential[ii] = v_min return HemiSphere(xyz=charges), potential
def disperse_charges(hemi, iters, const=.2): """Models electrostatic repulsion on the unit sphere Places charges on a sphere and simulates the repulsive forces felt by each one. Allows the charges to move for some number of iterations and returns their final location as well as the total potential of the system at each step. Parameters ---------- hemi : HemiSphere Points on a unit sphere. iters : int Number of iterations to run. const : float Using a smaller const could provide a more accurate result, but will need more iterations to converge. Returns ------- hemi : HemiSphere Distributed points on a unit sphere. potential : ndarray The electrostatic potential at each iteration. This can be useful to check if the repulsion converged to a minimum. Note: ----- This function is meant to be used with diffusion imaging so antipodal symmetry is assumed. Therefor each charge must not only be unique, but if there is a charge at +x, there cannot be a charge at -x. These are treated as the same location and because the distance between the two charges will be zero, the result will be unstable. """ if not isinstance(hemi, HemiSphere): raise ValueError("expecting HemiSphere") charges = hemi.vertices forces, v = _get_forces(charges) force_mag = np.sqrt((forces * forces).sum()) const = const / force_mag.max() potential = np.empty(iters) v_min = v for ii in xrange(iters): new_charges = charges + forces * const norms = np.sqrt((new_charges**2).sum(-1)) new_charges /= norms[:, None] new_forces, v = _get_forces(new_charges) if v <= v_min: charges = new_charges forces = new_forces potential[ii] = v_min = v else: const /= 2. potential[ii] = v_min return HemiSphere(xyz=charges), potential
def rotate_kernel( K, AUX, idx_IN, idx_OUT, is_isotropic ) : """Rotate a response function (symmetric about z-axis). Parameters ---------- K : numpy.ndarray Spherical function (in signal space) to rotate AUX : dictionary Auxiliary data structures needed to rotate functions in SH space idx_IN : list of list Index of samples in input kernel (K) belonging to each shell idx_OUT : list of list Index of samples in output kernel (K) belonging to each shell is_isotropic : boolean Indentifies whether K is an isotropic function or not Returns ------- KRlm = numpy.array Spherical function (in SH space) rotated to 181x181 directions distributed on a hemisphere """ # project kernel K to SH space Klm = [] for s in xrange(len(idx_IN)) : Klm.append( np.dot( AUX['fit'], K[ idx_IN[s] ] ) ) n = len(idx_IN)*AUX['fit'].shape[0] if is_isotropic == False : # fit SH and rotate kernel to 181*181 directions KRlm = np.zeros( (181,181,n), dtype=np.float32 ) for ox in xrange(181) : for oy in xrange(181) : Ylm_rot = AUX['Ylm_rot'][ox,oy] for s in xrange(len(idx_IN)) : KRlm[ox,oy,idx_OUT[s]] = AUX['const'] * Klm[s][AUX['idx_m0']] * Ylm_rot else : # simply fit SH KRlm = np.zeros( n, dtype=np.float32 ) for s in xrange(len(idx_IN)) : KRlm[idx_OUT[s]] = Klm[s].astype(np.float32) return KRlm
def resample( self, in_path, idx_out, Ylm_out, doMergeB0 ): nATOMS = len(self.IC_ODs)*len(self.IC_VFs) + 1 if doMergeB0: nS = 1+self.scheme.dwi_count merge_idx = np.hstack((self.scheme.b0_idx[0],self.scheme.dwi_idx)) else: nS = self.scheme.nS merge_idx = np.arange(nS) KERNELS = {} KERNELS['model'] = self.id KERNELS['wm'] = np.zeros( (nATOMS-1,181,181,nS), dtype=np.float32 ) KERNELS['iso'] = np.zeros( nS, dtype=np.float32 ) KERNELS['kappa'] = np.zeros( nATOMS-1, dtype=np.float32 ) KERNELS['icvf'] = np.zeros( nATOMS-1, dtype=np.float32 ) KERNELS['norms'] = np.zeros( (self.scheme.dwi_count, nATOMS-1) ) progress = ProgressBar( n=nATOMS, prefix=" ", erase=True ) # Coupled contributions for i in xrange( len(self.IC_ODs) ): for j in xrange( len(self.IC_VFs) ): lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) ) idx = progress.i - 1 KERNELS['wm'][idx,:,:,:] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, False )[:,:,merge_idx] KERNELS['kappa'][idx] = 1.0 / np.tan( self.IC_ODs[i]*np.pi/2.0 ) KERNELS['icvf'][idx] = self.IC_VFs[j] if doMergeB0: KERNELS['norms'][:,idx] = 1 / np.linalg.norm( KERNELS['wm'][idx,0,0,1:] ) # norm of coupled atoms (for l1 minimization) else: KERNELS['norms'][:,idx] = 1 / np.linalg.norm( KERNELS['wm'][idx,0,0,self.scheme.dwi_idx] ) # norm of coupled atoms (for l1 minimization) progress.update() # Isotropic lm = np.load( pjoin( in_path, 'A_%03d.npy'%progress.i ) ) KERNELS['iso'] = amico.lut.resample_kernel( lm, self.scheme.nS, idx_out, Ylm_out, True )[merge_idx] progress.update() return KERNELS
def test_disperse_charges(): charges = np.array([[1., 0, 0], [0, 1., 0], [0, 0, 1.]]) d_sphere, pot = disperse_charges(HemiSphere(xyz=charges), 10) nt.assert_array_almost_equal(charges, d_sphere.vertices) a = np.sqrt(3)/2 charges = np.array([[3./5, 4./5, 0], [4./5, 3./5, 0]]) expected_charges = np.array([[0, 1., 0], [1., 0, 0]]) d_sphere, pot = disperse_charges(HemiSphere(xyz=charges), 1000, .2) nt.assert_array_almost_equal(expected_charges, d_sphere.vertices) for ii in xrange(1, len(pot)): # check that the potential of the system is going down nt.assert_(pot[ii] - pot[ii-1] <= 0) # Check that the disperse_charges does not blow up with a large constant d_sphere, pot = disperse_charges(HemiSphere(xyz=charges), 1000, 20.) nt.assert_array_almost_equal(expected_charges, d_sphere.vertices) for ii in xrange(1, len(pot)): # check that the potential of the system is going down nt.assert_(pot[ii] - pot[ii-1] <= 0) # check that the function seems to work with a larger number of charges charges = np.arange(21).reshape(7, 3) norms = np.sqrt((charges*charges).sum(-1)) charges = charges / norms[:, None] d_sphere, pot = disperse_charges(HemiSphere(xyz=charges), 1000, .05) for ii in xrange(1, len(pot)): # check that the potential of the system is going down nt.assert_(pot[ii] - pot[ii-1] <= 0) # check that the resulting charges all lie on the unit sphere d_charges = d_sphere.vertices norms = np.sqrt((d_charges*d_charges).sum(-1)) nt.assert_array_almost_equal(norms, 1)
def fit( self, y, dirs, KERNELS, params ) : nD = dirs.shape[0] n1 = len(self.Rs) n2 = len(self.ICVFs) n3 = len(self.d_ISOs) if self.isExvivo: nATOMS = nD*(n1+n2)+n3+1 else: nATOMS = nD*(n1+n2)+n3 # prepare DICTIONARY from dirs and lookup tables A = np.ones( (len(y), nATOMS ), dtype=np.float64, order='F' ) o = 0 for i in xrange(nD) : i1, i2 = amico.lut.dir_TO_lut_idx( dirs[i] ) A[:,o:(o+n1)] = KERNELS['wmr'][:,i1,i2,:].T o += n1 for i in xrange(nD) : i1, i2 = amico.lut.dir_TO_lut_idx( dirs[i] ) A[:,o:(o+n2)] = KERNELS['wmh'][:,i1,i2,:].T o += n2 A[:,o:] = KERNELS['iso'].T # empty dictionary if A.shape[1] == 0 : return [0, 0, 0], None, None, None # fit x = spams.lasso( np.asfortranarray( y.reshape(-1,1) ), D=A, **params ).todense().A1 # return estimates f1 = x[ :(nD*n1) ].sum() f2 = x[ (nD*n1):(nD*(n1+n2)) ].sum() v = f1 / ( f1 + f2 + 1e-16 ) xIC = x[:nD*n1].reshape(-1,n1).sum(axis=0) a = 1E6 * 2.0 * np.dot(self.Rs,xIC) / ( f1 + 1e-16 ) d = (4.0*v) / ( np.pi*a**2 + 1e-16 ) return [v, a, d], dirs, x, A
def subdivide(self, n=1): """Subdivides each face of the sphere into four new faces. New vertices are created at a, b, and c. Then each face [x, y, z] is divided into faces [x, a, c], [y, a, b], [z, b, c], and [a, b, c]. :: y /\ / \ a/____\b /\ /\ / \ / \ /____\/____\ x c z Parameters ---------- n : int, optional The number of subdivisions to preform. Returns ------- new_sphere : Sphere The subdivided sphere. """ vertices = self.vertices faces = self.faces for _ in xrange(n): edges, mapping = unique_edges(faces, return_mapping=True) new_vertices = vertices[edges].sum(1) new_vertices /= vector_norm(new_vertices, keepdims=True) mapping += len(vertices) vertices = np.vstack([vertices, new_vertices]) x, y, z = faces.T a, b, c = mapping.T face1 = np.column_stack([x, a, c]) face2 = np.column_stack([y, b, a]) face3 = np.column_stack([z, c, b]) face4 = mapping faces = np.concatenate([face1, face2, face3, face4]) if len(vertices) < 2**16: faces = np.asarray(faces, dtype='uint16') return Sphere(xyz=vertices, faces=faces)
def subdivide(self, n=1): """Subdivides each face of the sphere into four new faces. New vertices are created at a, b, and c. Then each face [x, y, z] is divided into faces [x, a, c], [y, a, b], [z, b, c], and [a, b, c]. :: y /\ / \ a/____\b /\ /\ / \ / \ /____\/____\ x c z Parameters ---------- n : int, optional The number of subdivisions to preform. Returns ------- new_sphere : Sphere The subdivided sphere. """ vertices = self.vertices faces = self.faces for i in xrange(n): edges, mapping = unique_edges(faces, return_mapping=True) new_vertices = vertices[edges].sum(1) new_vertices /= vector_norm(new_vertices, keepdims=True) mapping += len(vertices) vertices = np.vstack([vertices, new_vertices]) x, y, z = faces.T a, b, c = mapping.T face1 = np.column_stack([x, a, c]) face2 = np.column_stack([y, b, a]) face3 = np.column_stack([z, c, b]) face4 = mapping faces = np.concatenate([face1, face2, face3, face4]) if len(vertices) < 2**16: faces = np.asarray(faces, dtype='uint16') return Sphere(xyz=vertices, faces=faces)
def test_NearestNeighborInterpolator(): # Place integers values at the center of every voxel l, m, n, o = np.ogrid[0:6.01, 0:6.01, 0:6.01, 0:4] data = l + m + n + o nni = NearestNeighborInterpolator(data, (1, 1, 1)) a, b, c = np.mgrid[.5:6.5:1.6, .5:6.5:2.7, .5:6.5:3.8] for ii in xrange(a.size): x = a.flat[ii] y = b.flat[ii] z = c.flat[ii] expected_result = int(x) + int(y) + int(z) + o.ravel() assert_array_equal(nni[x, y, z], expected_result) ind = np.array([x, y, z]) assert_array_equal(nni[ind], expected_result) assert_raises(OutsideImage, nni.__getitem__, (-.1, 0, 0)) assert_raises(OutsideImage, nni.__getitem__, (0, 8.2, 0))
def aux_structures_generate( scheme, lmax = 12 ) : """Compute the auxiliary data structures to generate the high-resolution kernels. Parameters ---------- scheme : Scheme class Acquisition scheme of the acquired signal lmax : int Maximum SH order to use for the rotation phase (default : 12) Returns ------- idx_IN : numpy array Indices of the samples belonging to each shell idx_OUT : numpy array Indices of the SH corresponding to each shell """ nSH = (lmax+1)*(lmax+2)//2 idx_IN = [] idx_OUT = [] for s in xrange( len(scheme.shells) ) : idx_IN.append( range(500*s,500*(s+1)) ) idx_OUT.append( range(nSH*s,nSH*(s+1)) ) return ( idx_IN, idx_OUT )
def aux_structures_generate(scheme, lmax=12): """Compute the auxiliary data structures to generate the high-resolution kernels. Parameters ---------- scheme : Scheme class Acquisition scheme of the acquired signal lmax : int Maximum SH order to use for the rotation phase (default : 12) Returns ------- idx_IN : numpy array Indices of the samples belonging to each shell idx_OUT : numpy array Indices of the SH corresponding to each shell """ nSH = (lmax + 1) * (lmax + 2) // 2 idx_IN = [] idx_OUT = [] for s in xrange(len(scheme.shells)): idx_IN.append(range(500 * s, 500 * (s + 1))) idx_OUT.append(range(nSH * s, nSH * (s + 1))) return (idx_IN, idx_OUT)
def peak_directions_nl(sphere_eval, relative_peak_threshold=.25, min_separation_angle=25, sphere=default_sphere, xtol=1e-7): """Non Linear Direction Finder. Parameters ---------- sphere_eval : callable A function which can be evaluated on a sphere. relative_peak_threshold : float Only return peaks greater than ``relative_peak_threshold * m`` where m is the largest peak. min_separation_angle : float in [0, 90] The minimum distance between directions. If two peaks are too close only the larger of the two is returned. sphere : Sphere A discrete Sphere. The points on the sphere will be used for initial estimate of maximums. xtol : float Relative tolerance for optimization. Returns ------- directions : array (N, 3) Points on the sphere corresponding to N local maxima on the sphere. values : array (N,) Value of sphere_eval at each point on directions. """ # Find discrete peaks for use as seeds in non-linear search discrete_values = sphere_eval(sphere) values, indices = local_maxima(discrete_values, sphere.edges) seeds = np.column_stack([sphere.theta[indices], sphere.phi[indices]]) # Helper function def _helper(x): sphere = Sphere(theta=x[0], phi=x[1]) return -sphere_eval(sphere) # Non-linear search num_seeds = len(seeds) theta = np.empty(num_seeds) phi = np.empty(num_seeds) for i in xrange(num_seeds): peak = opt.fmin(_helper, seeds[i], xtol=xtol, disp=False) theta[i], phi[i] = peak # Evaluate on new-found peaks small_sphere = Sphere(theta=theta, phi=phi) values = sphere_eval(small_sphere) # Sort in descending order order = values.argsort()[::-1] values = values[order] directions = small_sphere.vertices[order] # Remove directions that are too small n = search_descending(values, relative_peak_threshold) directions = directions[:n] # Remove peaks too close to each-other directions, idx = remove_similar_vertices(directions, min_separation_angle, return_index=True) values = values[idx] return directions, values
def load_data(self, dwi_filename='DWI.nii', scheme_filename='DWI.scheme', mask_filename=None, b0_thr=0): """Load the diffusion signal and its corresponding acquisition scheme. Parameters ---------- dwi_filename : string The file name of the DWI data, relative to the subject folder (default : 'DWI.nii') scheme_filename : string The file name of the corresponding acquisition scheme (default : 'DWI.scheme') mask_filename : string The file name of the (optional) binary mask (default : None) b0_thr : float The threshold below which a b-value is considered a b0 (default : 0) """ # Loading data, acquisition scheme and mask (optional) tic = time.time() print('\n-> Loading data:') print('\t* DWI signal...') self.set_config('dwi_filename', dwi_filename) self.niiDWI = nibabel.load( pjoin(self.get_config('DATA_path'), dwi_filename)) self.niiDWI_img = self.niiDWI.get_data().astype(np.float32) hdr = self.niiDWI.header if nibabel.__version__ >= '2.0.0' else self.niiDWI.get_header( ) self.set_config('dim', self.niiDWI_img.shape[:3]) self.set_config('pixdim', tuple(hdr.get_zooms()[:3])) print('\t\t- dim = %d x %d x %d x %d' % self.niiDWI_img.shape) print('\t\t- pixdim = %.3f x %.3f x %.3f' % self.get_config('pixdim')) # Scale signal intensities (if necessary) if (np.isfinite(hdr['scl_slope']) and np.isfinite(hdr['scl_inter']) and hdr['scl_slope'] != 0 and (hdr['scl_slope'] != 1 or hdr['scl_inter'] != 0)): print('\t\t- rescaling data', end=' ') self.niiDWI_img = self.niiDWI_img * hdr['scl_slope'] + hdr[ 'scl_inter'] print("[OK]") print('\t* Acquisition scheme...') self.set_config('scheme_filename', scheme_filename) self.set_config('b0_thr', b0_thr) self.scheme = amico.scheme.Scheme( pjoin(self.get_config('DATA_path'), scheme_filename), b0_thr) print('\t\t- %d samples, %d shells' % (self.scheme.nS, len(self.scheme.shells))) print('\t\t- %d @ b=0' % (self.scheme.b0_count), end=' ') for i in xrange(len(self.scheme.shells)): print(', %d @ b=%.1f' % (len( self.scheme.shells[i]['idx']), self.scheme.shells[i]['b']), end=' ') print() if self.scheme.nS != self.niiDWI_img.shape[3]: raise ValueError('Scheme does not match with DWI data') print('\t* Binary mask...') if mask_filename is not None: self.niiMASK = nibabel.load( pjoin(self.get_config('DATA_path'), mask_filename)) self.niiMASK_img = self.niiMASK.get_data().astype(np.uint8) niiMASK_hdr = self.niiMASK.header if nibabel.__version__ >= '2.0.0' else self.niiMASK.get_header( ) print('\t\t- dim = %d x %d x %d' % self.niiMASK_img.shape[:3]) print('\t\t- pixdim = %.3f x %.3f x %.3f' % niiMASK_hdr.get_zooms()[:3]) if self.get_config('dim') != self.niiMASK_img.shape[:3]: raise ValueError('MASK geometry does not match with DWI data') else: self.niiMASK = None self.niiMASK_img = np.ones(self.get_config('dim')) print('\t\t- not specified') print('\t\t- voxels = %d' % np.count_nonzero(self.niiMASK_img)) # Preprocessing print('\n-> Preprocessing:') if self.get_config('doDebiasSignal'): print('\t* Debiasing signal...\n') sys.stdout.flush() if self.get_config('DWI-SNR') == None: raise ValueError( "Set noise variance for debiasing (eg. ae.set_config('RicianNoiseSigma', sigma))" ) self.niiDWI_img = debiasRician(self.niiDWI_img, self.get_config('DWI-SNR'), self.niiMASK_img, self.scheme) if self.get_config('doNormalizeSignal'): print('\t* Normalizing to b0...', end=' ') sys.stdout.flush() if self.scheme.b0_count > 0: self.mean_b0s = np.mean(self.niiDWI_img[:, :, :, self.scheme.b0_idx], axis=3) else: raise ValueError('No b0 volume to normalize signal with') norm_factor = self.mean_b0s.copy() idx = self.mean_b0s <= 0 norm_factor[idx] = 1 norm_factor = 1 / norm_factor norm_factor[idx] = 0 for i in xrange(self.scheme.nS): self.niiDWI_img[:, :, :, i] *= norm_factor print('[ min=%.2f, mean=%.2f, max=%.2f ]' % (self.niiDWI_img.min(), self.niiDWI_img.mean(), self.niiDWI_img.max())) if self.get_config('doMergeB0'): print('\t* Merging multiple b0 volume(s)...', end=' ') mean = np.expand_dims(np.mean(self.niiDWI_img[:, :, :, self.scheme.b0_idx], axis=3), axis=3) self.niiDWI_img = np.concatenate( (mean, self.niiDWI_img[:, :, :, self.scheme.dwi_idx]), axis=3) else: print('\t* Keeping all b0 volume(s)...') print(' [ %.1f seconds ]' % (time.time() - tic))
def save_results(self, path_suffix=None): """Save the output (directions, maps etc). Parameters ---------- path_suffix : string Text to be appended to the output path (default : None) """ if self.RESULTS is None: raise RuntimeError( 'Model not fitted to the data; call "fit()" first.') if self.get_config('OUTPUT_path') is None: RESULTS_path = pjoin('AMICO', self.model.id) if path_suffix: RESULTS_path = RESULTS_path + '_' + path_suffix self.RESULTS['RESULTS_path'] = RESULTS_path print('\n-> Saving output to "%s/*":' % RESULTS_path) # delete previous output RESULTS_path = pjoin(self.get_config('DATA_path'), RESULTS_path) else: RESULTS_path = self.get_config('OUTPUT_path') if path_suffix: RESULTS_path = RESULTS_path + '_' + path_suffix self.RESULTS['RESULTS_path'] = RESULTS_path print('\n-> Saving output to "%s/*":' % RESULTS_path) if not exists(RESULTS_path): makedirs(RESULTS_path) else: for f in glob.glob(pjoin(RESULTS_path, '*')): remove(f) # configuration print('\t- configuration', end=' ') with open(pjoin(RESULTS_path, 'config.pickle'), 'wb+') as fid: pickle.dump(self.CONFIG, fid, protocol=2) print(' [OK]') # estimated orientations print('\t- FIT_dir.nii.gz', end=' ') niiMAP_img = self.RESULTS['DIRs'] affine = self.niiDWI.affine if nibabel.__version__ >= '2.0.0' else self.niiDWI.get_affine( ) niiMAP = nibabel.Nifti1Image(niiMAP_img, affine) niiMAP_hdr = niiMAP.header if nibabel.__version__ >= '2.0.0' else niiMAP.get_header( ) niiMAP_hdr['cal_min'] = -1 niiMAP_hdr['cal_max'] = 1 niiMAP_hdr['scl_slope'] = 1 niiMAP_hdr['scl_inter'] = 0 nibabel.save(niiMAP, pjoin(RESULTS_path, 'FIT_dir.nii.gz')) print(' [OK]') # fitting error if self.get_config('doComputeNRMSE'): print('\t- FIT_nrmse.nii.gz', end=' ') niiMAP_img = self.RESULTS['NRMSE'] niiMAP = nibabel.Nifti1Image(niiMAP_img, affine) niiMAP_hdr = niiMAP.header if nibabel.__version__ >= '2.0.0' else niiMAP.get_header( ) niiMAP_hdr['cal_min'] = 0 niiMAP_hdr['cal_max'] = 1 niiMAP_hdr['scl_slope'] = 1 niiMAP_hdr['scl_inter'] = 0 nibabel.save(niiMAP, pjoin(RESULTS_path, 'FIT_nrmse.nii.gz')) print(' [OK]') if self.get_config('doSaveCorrectedDWI'): if self.model.name == 'Free-Water': print('\t- dwi_fw_corrected.nii.gz', end=' ') niiMAP_img = self.RESULTS['DWI_corrected'] niiMAP = nibabel.Nifti1Image(niiMAP_img, affine) niiMAP_hdr = niiMAP.header if nibabel.__version__ >= '2.0.0' else niiMAP.get_header( ) niiMAP_hdr['cal_min'] = 0 niiMAP_hdr['cal_max'] = 1 nibabel.save(niiMAP, pjoin(RESULTS_path, 'dwi_fw_corrected.nii.gz')) print(' [OK]') else: print( ' doSaveCorrectedDWI option not supported for %s model' % self.model.name) # voxelwise maps for i in xrange(len(self.model.maps_name)): print('\t- FIT_%s.nii.gz' % self.model.maps_name[i], end=' ') niiMAP_img = self.RESULTS['MAPs'][:, :, :, i] niiMAP = nibabel.Nifti1Image(niiMAP_img, affine) niiMAP_hdr = niiMAP.header if nibabel.__version__ >= '2.0.0' else niiMAP.get_header( ) niiMAP_hdr['descrip'] = self.model.maps_descr[i] niiMAP_hdr['cal_min'] = niiMAP_img.min() niiMAP_hdr['cal_max'] = niiMAP_img.max() niiMAP_hdr['scl_slope'] = 1 niiMAP_hdr['scl_inter'] = 0 nibabel.save( niiMAP, pjoin(RESULTS_path, 'FIT_%s.nii.gz' % self.model.maps_name[i])) print(' [OK]') print(' [ DONE ]')
def fit(self): """Fit the model to the data iterating over all voxels (in the mask) one after the other. Call the appropriate fit() method of the actual model used. """ if self.niiDWI is None: raise RuntimeError('Data not loaded; call "load_data()" first.') if self.model is None: raise RuntimeError('Model not set; call "set_model()" first.') if self.KERNELS is None: raise RuntimeError( 'Response functions not generated; call "generate_kernels()" and "load_kernels()" first.' ) if self.KERNELS['model'] != self.model.id: raise RuntimeError( 'Response functions were not created with the same model.') self.set_config('fit_time', None) totVoxels = np.count_nonzero(self.niiMASK_img) print('\n-> Fitting "%s" model to %d voxels:' % (self.model.name, totVoxels)) # setup fitting directions peaks_filename = self.get_config('peaks_filename') if peaks_filename is None: DIRs = np.zeros([ self.get_config('dim')[0], self.get_config('dim')[1], self.get_config('dim')[2], 3 ], dtype=np.float32) nDIR = 1 if self.get_config('doMergeB0'): gtab = gradient_table( np.hstack((0, self.scheme.b[self.scheme.dwi_idx])), np.vstack((np.zeros( (1, 3)), self.scheme.raw[self.scheme.dwi_idx, :3]))) else: gtab = gradient_table(self.scheme.b, self.scheme.raw[:, :3]) DTI = dti.TensorModel(gtab) else: niiPEAKS = nibabel.load( pjoin(self.get_config('DATA_path'), peaks_filename)) DIRs = niiPEAKS.get_data().astype(np.float32) nDIR = np.floor(DIRs.shape[3] / 3) print('\t* peaks dim = %d x %d x %d x %d' % DIRs.shape[:4]) if DIRs.shape[:3] != self.niiMASK_img.shape[:3]: raise ValueError('PEAKS geometry does not match with DWI data') # setup other output files MAPs = np.zeros([ self.get_config('dim')[0], self.get_config('dim')[1], self.get_config('dim')[2], len(self.model.maps_name) ], dtype=np.float32) if self.get_config('doComputeNRMSE'): NRMSE = np.zeros([ self.get_config('dim')[0], self.get_config('dim')[1], self.get_config('dim')[2] ], dtype=np.float32) if self.get_config('doSaveCorrectedDWI'): DWI_corrected = np.zeros(self.niiDWI.shape, dtype=np.float32) # fit the model to the data # ========================= t = time.time() progress = ProgressBar(n=totVoxels, prefix=" ", erase=True) for iz in xrange(self.niiMASK_img.shape[2]): for iy in xrange(self.niiMASK_img.shape[1]): for ix in xrange(self.niiMASK_img.shape[0]): if self.niiMASK_img[ix, iy, iz] == 0: continue # prepare the signal y = self.niiDWI_img[ix, iy, iz, :].astype(np.float64) y[y < 0] = 0 # [NOTE] this should not happen! # fitting directions if peaks_filename is None: dirs = DTI.fit(y).directions[0] else: dirs = DIRs[ix, iy, iz, :] # dispatch to the right handler for each model MAPs[ix, iy, iz, :], DIRs[ix, iy, iz, :], x, A = self.model.fit( y, dirs.reshape(-1, 3), self.KERNELS, self.get_config('solver_params')) # compute fitting error if self.get_config('doComputeNRMSE'): y_est = np.dot(A, x) den = np.sum(y**2) NRMSE[ix, iy, iz] = np.sqrt(np.sum( (y - y_est)**2) / den) if den > 1e-16 else 0 if self.get_config('doSaveCorrectedDWI'): if self.model.name == 'Free-Water': n_iso = len(self.model.d_isos) x[-1 * n_iso:] = 0 #print(y, x, b0, A.shape) if self.get_config('doNormalizeSignal' ) and self.scheme.b0_count > 0: y_fw_corrected = np.dot( A, x) * self.mean_b0s[ix, iy, iz] else: y_fw_corrected = np.dot(A, x) if self.get_config('doKeepb0Intact' ) and self.scheme.b0_count > 0: # put original b0 data back in. y_fw_corrected[self.scheme.b0_idx] = y[ self.scheme.b0_idx] * self.mean_b0s[ix, iy, iz] DWI_corrected[ix, iy, iz, :] = y_fw_corrected progress.update() self.set_config('fit_time', time.time() - t) print(' [ %s ]' % (time.strftime( "%Hh %Mm %Ss", time.gmtime(self.get_config('fit_time'))))) # store results self.RESULTS = {} self.RESULTS['DIRs'] = DIRs self.RESULTS['MAPs'] = MAPs if self.get_config('doComputeNRMSE'): self.RESULTS['NRMSE'] = NRMSE if self.get_config('doSaveCorrectedDWI'): self.RESULTS['DWI_corrected'] = DWI_corrected
def save_results( self, path_suffix = None ) : """Save the output (directions, maps etc). Parameters ---------- path_suffix : string Text to be appended to the output path (default : None) """ if self.RESULTS is None : raise RuntimeError( 'Model not fitted to the data; call "fit()" first.' ) if self.get_config('OUTPUT_path') is None: RESULTS_path = pjoin( 'AMICO', self.model.id ) if path_suffix : RESULTS_path = RESULTS_path +'_'+ path_suffix self.RESULTS['RESULTS_path'] = RESULTS_path print('\n-> Saving output to "%s/*":' % RESULTS_path) # delete previous output RESULTS_path = pjoin( self.get_config('DATA_path'), RESULTS_path ) else: RESULTS_path = self.get_config('OUTPUT_path') if path_suffix : RESULTS_path = RESULTS_path +'_'+ path_suffix self.RESULTS['RESULTS_path'] = RESULTS_path print('\n-> Saving output to "%s/*":' % RESULTS_path) if not exists( RESULTS_path ) : makedirs( RESULTS_path ) else : for f in glob.glob( pjoin(RESULTS_path,'*') ) : remove( f ) # configuration print('\t- configuration', end=' ') with open( pjoin(RESULTS_path,'config.pickle'), 'wb+' ) as fid : pickle.dump( self.CONFIG, fid, protocol=2 ) print(' [OK]') # estimated orientations print('\t- FIT_dir.nii.gz', end=' ') niiMAP_img = self.RESULTS['DIRs'] affine = self.niiDWI.affine if nibabel.__version__ >= '2.0.0' else self.niiDWI.get_affine() niiMAP = nibabel.Nifti1Image( niiMAP_img, affine ) niiMAP_hdr = niiMAP.header if nibabel.__version__ >= '2.0.0' else niiMAP.get_header() niiMAP_hdr['cal_min'] = -1 niiMAP_hdr['cal_max'] = 1 niiMAP_hdr['scl_slope'] = 1 niiMAP_hdr['scl_inter'] = 0 nibabel.save( niiMAP, pjoin(RESULTS_path, 'FIT_dir.nii.gz') ) print(' [OK]') # fitting error if self.get_config('doComputeNRMSE') : print('\t- FIT_nrmse.nii.gz', end=' ') niiMAP_img = self.RESULTS['NRMSE'] niiMAP = nibabel.Nifti1Image( niiMAP_img, affine ) niiMAP_hdr = niiMAP.header if nibabel.__version__ >= '2.0.0' else niiMAP.get_header() niiMAP_hdr['cal_min'] = 0 niiMAP_hdr['cal_max'] = 1 niiMAP_hdr['scl_slope'] = 1 niiMAP_hdr['scl_inter'] = 0 nibabel.save( niiMAP, pjoin(RESULTS_path, 'FIT_nrmse.nii.gz') ) print(' [OK]') if self.get_config('doSaveCorrectedDWI') : if self.model.name == 'Free-Water' : print('\t- dwi_fw_corrected.nii.gz', end=' ') niiMAP_img = self.RESULTS['DWI_corrected'] niiMAP = nibabel.Nifti1Image( niiMAP_img, affine ) niiMAP_hdr = niiMAP.header if nibabel.__version__ >= '2.0.0' else niiMAP.get_header() niiMAP_hdr['cal_min'] = 0 niiMAP_hdr['cal_max'] = 1 nibabel.save( niiMAP, pjoin(RESULTS_path, 'dwi_fw_corrected.nii.gz') ) print(' [OK]') else : print(' doSaveCorrectedDWI option not supported for %s model' % self.model.name) # voxelwise maps for i in xrange( len(self.model.maps_name) ) : print('\t- FIT_%s.nii.gz' % self.model.maps_name[i], end=' ') niiMAP_img = self.RESULTS['MAPs'][:,:,:,i] niiMAP = nibabel.Nifti1Image( niiMAP_img, affine ) niiMAP_hdr = niiMAP.header if nibabel.__version__ >= '2.0.0' else niiMAP.get_header() niiMAP_hdr['descrip'] = self.model.maps_descr[i] niiMAP_hdr['cal_min'] = niiMAP_img.min() niiMAP_hdr['cal_max'] = niiMAP_img.max() niiMAP_hdr['scl_slope'] = 1 niiMAP_hdr['scl_inter'] = 0 nibabel.save( niiMAP, pjoin(RESULTS_path, 'FIT_%s.nii.gz' % self.model.maps_name[i] ) ) print(' [OK]') print(' [ DONE ]')
def sphere_funcs(sphere_values, sphere, image=None, colormap='jet', scale=2.2, norm=True, radial_scale=True): """Plot many morphed spherical functions simultaneously. Parameters ---------- sphere_values : (M,) or (X, M) or (X, Y, M) or (X, Y, Z, M) ndarray Values on the sphere. sphere : Sphere image : None, Not yet supported. colormap : None or 'jet' If None then no color is used. scale : float, Distance between spheres. norm : bool, Normalize `sphere_values`. radial_scale : bool, Scale sphere points according to odf values. Returns ------- actor : vtkActor Spheres. Examples -------- >>> from dipy.viz import fvtk >>> r = fvtk.ren() >>> odfs = np.ones((5, 5, 724)) >>> odfs[..., 0] = 2. >>> from dipy.data import get_sphere >>> sphere = get_sphere('symmetric724') >>> fvtk.add(r, fvtk.sphere_funcs(odfs, sphere)) >>> #fvtk.show(r) """ sphere_values = np.asarray(sphere_values) if sphere_values.ndim > 4: raise ValueError("Wrong shape") sphere_values = _makeNd(sphere_values, 4) grid_shape = np.array(sphere_values.shape[:3]) faces = np.asarray(sphere.faces, dtype=int) vertices = sphere.vertices if sphere_values.shape[-1] != sphere.vertices.shape[0]: msg = 'Sphere.vertices.shape[0] should be the same as the ' msg += 'last dimensions of sphere_values i.e. sphere_values.shape[-1]' raise ValueError(msg) list_sq = [] list_cols = [] for ijk in np.ndindex(*grid_shape): m = sphere_values[ijk].copy() if norm: m /= abs(m).max() if radial_scale: xyz = vertices.T * m else: xyz = vertices.T.copy() xyz += scale * (ijk - grid_shape / 2.)[:, None] xyz = xyz.T list_sq.append(xyz) if colormap is not None: cols = create_colormap(m, colormap) cols = np.interp(cols, [0, 1], [0, 255]).astype('ubyte') list_cols.append(cols) points = vtk.vtkPoints() triangles = vtk.vtkCellArray() if colormap is not None: colors = vtk.vtkUnsignedCharArray() colors.SetNumberOfComponents(3) colors.SetName("Colors") for k in xrange(len(list_sq)): xyz = list_sq[k] if colormap is not None: cols = list_cols[k] for i in xrange(xyz.shape[0]): points.InsertNextPoint(*xyz[i]) if colormap is not None: colors.InsertNextTuple3(*cols[i]) for j in xrange(faces.shape[0]): triangle = vtk.vtkTriangle() triangle.GetPointIds().SetId(0, faces[j, 0] + k * xyz.shape[0]) triangle.GetPointIds().SetId(1, faces[j, 1] + k * xyz.shape[0]) triangle.GetPointIds().SetId(2, faces[j, 2] + k * xyz.shape[0]) triangles.InsertNextCell(triangle) del triangle polydata = vtk.vtkPolyData() polydata.SetPoints(points) polydata.SetPolys(triangles) if colormap is not None: polydata.GetPointData().SetScalars(colors) polydata.Modified() mapper = vtk.vtkPolyDataMapper() if major_version <= 5: mapper.SetInput(polydata) else: mapper.SetInputData(polydata) actor = vtk.vtkActor() actor.SetMapper(mapper) return actor
def peak_directions_nl(sphere_eval, relative_peak_threshold=.25, min_separation_angle=25, sphere=default_sphere, xtol=1e-7): """Non Linear Direction Finder Parameters ---------- sphere_eval : callable A function which can be evaluated on a sphere. relative_peak_threshold : float Only return peaks greater than ``relative_peak_threshold * m`` where m is the largest peak. min_separation_angle : float in [0, 90] The minimum distance between directions. If two peaks are too close only the larger of the two is returned. sphere : Sphere A discrete Sphere. The points on the sphere will be used for initial estimate of maximums. xtol : float Relative tolerance for optimization. Returns ------- directions : array (N, 3) Points on the sphere corresponding to N local maxima on the sphere. values : array (N,) Value of sphere_eval at each point on directions. """ # Find discrete peaks for use as seeds in non-linear search discrete_values = sphere_eval(sphere) values, indices = local_maxima(discrete_values, sphere.edges) seeds = np.column_stack([sphere.theta[indices], sphere.phi[indices]]) # Helper function def _helper(x): sphere = Sphere(theta=x[0], phi=x[1]) return -sphere_eval(sphere) # Non-linear search num_seeds = len(seeds) theta = np.empty(num_seeds) phi = np.empty(num_seeds) for i in xrange(num_seeds): peak = opt.fmin(_helper, seeds[i], xtol=xtol, disp=False) theta[i], phi[i] = peak # Evaluate on new-found peaks small_sphere = Sphere(theta=theta, phi=phi) values = sphere_eval(small_sphere) # Sort in descending order order = values.argsort()[::-1] values = values[order] directions = small_sphere.vertices[order] # Remove directions that are too small n = search_descending(values, relative_peak_threshold) directions = directions[:n] # Remove peaks too close to each-other directions, idx = remove_similar_vertices(directions, min_separation_angle, return_index=True) values = values[idx] return directions, values
def tensor(evals, evecs, scalar_colors=None, sphere=None, scale=2.2, norm=True): """Plot many tensors as ellipsoids simultaneously. Parameters ---------- evals : (3,) or (X, 3) or (X, Y, 3) or (X, Y, Z, 3) ndarray eigenvalues evecs : (3, 3) or (X, 3, 3) or (X, Y, 3, 3) or (X, Y, Z, 3, 3) ndarray eigenvectors scalar_colors : (3,) or (X, 3) or (X, Y, 3) or (X, Y, Z, 3) ndarray RGB colors used to show the tensors Default None, color the ellipsoids using ``color_fa`` sphere : Sphere, this sphere will be transformed to the tensor ellipsoid Default is None which uses a symmetric sphere with 724 points. scale : float, distance between ellipsoids. norm : boolean, Normalize `evals`. Returns ------- actor : vtkActor Ellipsoids Examples -------- >>> from dipy.viz import fvtk >>> r = fvtk.ren() >>> evals = np.array([1.4, .35, .35]) * 10 ** (-3) >>> evecs = np.eye(3) >>> from dipy.data import get_sphere >>> sphere = get_sphere('symmetric724') >>> fvtk.add(r, fvtk.tensor(evals, evecs, sphere=sphere)) >>> #fvtk.show(r) """ evals = np.asarray(evals) if evals.ndim > 4: raise ValueError("Wrong shape") evals = _makeNd(evals, 4) evecs = _makeNd(evecs, 5) grid_shape = np.array(evals.shape[:3]) if sphere is None: from dipy.data import get_sphere sphere = get_sphere('symmetric724') faces = np.asarray(sphere.faces, dtype=int) vertices = sphere.vertices colors = vtk.vtkUnsignedCharArray() colors.SetNumberOfComponents(3) colors.SetName("Colors") if scalar_colors is None: from dipy.reconst.dti import color_fa, fractional_anisotropy cfa = color_fa(fractional_anisotropy(evals), evecs) else: cfa = _makeNd(scalar_colors, 4) list_sq = [] list_cols = [] for ijk in ndindex(grid_shape): ea = evals[ijk] if norm: ea /= ea.max() ea = np.diag(ea.copy()) ev = evecs[ijk].copy() xyz = np.dot(ev, np.dot(ea, vertices.T)) xyz += scale * (ijk - grid_shape / 2.)[:, None] xyz = xyz.T list_sq.append(xyz) acolor = np.zeros(xyz.shape) acolor[:, :] = np.interp(cfa[ijk], [0, 1], [0, 255]) list_cols.append(acolor.astype('ubyte')) points = vtk.vtkPoints() triangles = vtk.vtkCellArray() for k in xrange(len(list_sq)): xyz = list_sq[k] cols = list_cols[k] for i in xrange(xyz.shape[0]): points.InsertNextPoint(*xyz[i]) colors.InsertNextTuple3(*cols[i]) for j in xrange(faces.shape[0]): triangle = vtk.vtkTriangle() triangle.GetPointIds().SetId(0, faces[j, 0] + k * xyz.shape[0]) triangle.GetPointIds().SetId(1, faces[j, 1] + k * xyz.shape[0]) triangle.GetPointIds().SetId(2, faces[j, 2] + k * xyz.shape[0]) triangles.InsertNextCell(triangle) del triangle polydata = vtk.vtkPolyData() polydata.SetPoints(points) polydata.SetPolys(triangles) polydata.GetPointData().SetScalars(colors) polydata.Modified() mapper = vtk.vtkPolyDataMapper() if major_version <= 5: mapper.SetInput(polydata) else: mapper.SetInputData(polydata) actor = vtk.vtkActor() actor.SetMapper(mapper) return actor
def load_from_table( self, data, b0_thr = 0 ) : """Build the structure from an input matrix. The first three columns represent the gradient directions. Then, we accept two formats to describe each gradient: - if the shape of data is Nx4, the 4^ column is the b-value; - if the shape of data is Nx7, the last 4 columns are, respectively, the gradient strength, big delta, small delta and TE. Parameters ---------- data : numpy.ndarray Matrix containing tall the values. b0_thr : float The threshold on the b-values to identify the b0 images (default: 0) """ if data.ndim == 1 : data = np.expand_dims( data, axis=0 ) self.raw = data # number of samples # self.nS = self.raw.shape[0] JL: incomplete getter/setter incompatible with 3.6; this is never used any as getter always returns derived value # set/calculate the b-values if self.raw.shape[1] == 4 : self.version = 0 self.b = self.raw[:,3] elif self.raw.shape[1] == 7 : self.version = 1 self.b = ( 267.513e6 * self.raw[:,3] * self.raw[:,5] )**2 * (self.raw[:,4] - self.raw[:,5]/3.0) * 1e-6 # in mm^2/s else : raise ValueError( 'Unrecognized scheme format' ) # store information about the volumes self.b0_thr = b0_thr self.b0_idx = np.where( self.b <= b0_thr )[0] self.b0_count = len( self.b0_idx ) self.dwi_idx = np.where( self.b > b0_thr )[0] self.dwi_count = len( self.dwi_idx ) # ensure the directions are in the spherical range [0,180]x[0,180] idx = np.where( self.raw[:,1] < 0 )[0] self.raw[idx,0:3] = -self.raw[idx,0:3] # store information about each shell in a dictionary self.shells = [] tmp = np.ascontiguousarray( self.raw[:,3:] ) schemeUnique, schemeUniqueInd = np.unique( tmp.view([('', tmp.dtype)]*tmp.shape[1]), return_index=True ) schemeUnique = schemeUnique.view(tmp.dtype).reshape((schemeUnique.shape[0], tmp.shape[1])) schemeUnique = [tmp[index] for index in sorted(schemeUniqueInd)] bUnique = [self.b[index] for index in sorted(schemeUniqueInd)] for i in xrange(len(schemeUnique)) : if bUnique[i] <= b0_thr : continue shell = {} shell['b'] = bUnique[i] if self.version == 0 : shell['G'] = None shell['Delta'] = None shell['delta'] = None shell['TE'] = None else : shell['G'] = schemeUnique[i][0] shell['Delta'] = schemeUnique[i][1] shell['delta'] = schemeUnique[i][2] shell['TE'] = schemeUnique[i][3] shell['idx'] = np.where((tmp == schemeUnique[i]).all(axis=1))[0] shell['grad'] = self.raw[shell['idx'],0:3] self.shells.append( shell )
def intersect_sphere(xyz, center, radius): """ If any segment of the track is intersecting with a sphere of specific center and radius return True otherwise False Parameters ---------- xyz : array, shape (N,3) representing x,y,z of the N points of the track center : array, shape (3,) center of the sphere radius : float radius of the sphere Returns ------- tf : {True, False} True if track `xyz` intersects sphere >>> from dipy.tracking.metrics import intersect_sphere >>> line=np.array(([0,0,0],[1,1,1],[2,2,2])) >>> sph_cent=np.array([1,1,1]) >>> sph_radius = 1 >>> intersect_sphere(line,sph_cent,sph_radius) True Notes ----- The ray to sphere intersection method used here is similar with http://local.wasp.uwa.edu.au/~pbourke/geometry/sphereline/ http://local.wasp.uwa.edu.au/~pbourke/geometry/sphereline/source.cpp we just applied it for every segment neglecting the intersections where the intersecting points are not inside the segment """ center = np.array(center) # print center lt = xyz.shape[0] for i in xrange(lt-1): # first point x1 = xyz[i] # second point x2 = xyz[i+1] # do the calculations as given in the Notes x = x2-x1 a = np.inner(x, x) x1c = x1-center b = 2*np.inner(x, x1c) c = (np.inner(center, center)+np.inner(x1, x1)-2*np.inner(center, x1) - radius**2) bb4ac = b*b-4*a*c # print 'bb4ac',bb4ac if abs(a) < np.finfo(float).eps or bb4ac < 0: # too small segment or # no intersection continue if bb4ac == 0: # one intersection point p mu = -b/2*a p = x1+mu*x # check if point is inside the segment # print 'p',p if np.inner(p-x1, p-x1) <= a: return True if bb4ac > 0: # two intersection points p1 and p2 mu = (-b+np.sqrt(bb4ac))/(2*a) p1 = x1+mu*x mu = (-b-np.sqrt(bb4ac))/(2*a) p2 = x1+mu*x # check if points are inside the line segment # print 'p1,p2',p1,p2 if np.inner(p1-x1, p1-x1) <= a or np.inner(p2-x1, p2-x1) <= a: return True return False
def fit( self ) : """Fit the model to the data iterating over all voxels (in the mask) one after the other. Call the appropriate fit() method of the actual model used. """ if self.niiDWI is None : raise RuntimeError( 'Data not loaded; call "load_data()" first.' ) if self.model is None : raise RuntimeError( 'Model not set; call "set_model()" first.' ) if self.KERNELS is None : raise RuntimeError( 'Response functions not generated; call "generate_kernels()" and "load_kernels()" first.' ) if self.KERNELS['model'] != self.model.id : raise RuntimeError( 'Response functions were not created with the same model.' ) self.set_config('fit_time', None) totVoxels = np.count_nonzero(self.niiMASK_img) print('\n-> Fitting "%s" model to %d voxels:' % ( self.model.name, totVoxels )) # setup fitting directions peaks_filename = self.get_config('peaks_filename') if peaks_filename is None : DIRs = np.zeros( [self.get_config('dim')[0], self.get_config('dim')[1], self.get_config('dim')[2], 3], dtype=np.float32 ) nDIR = 1 if self.get_config('doMergeB0'): gtab = gradient_table( np.hstack((0,self.scheme.b[self.scheme.dwi_idx])), np.vstack((np.zeros((1,3)),self.scheme.raw[self.scheme.dwi_idx,:3])) ) else: gtab = gradient_table( self.scheme.b, self.scheme.raw[:,:3] ) DTI = dti.TensorModel( gtab ) else : niiPEAKS = nibabel.load( pjoin( self.get_config('DATA_path'), peaks_filename) ) DIRs = niiPEAKS.get_data().astype(np.float32) nDIR = np.floor( DIRs.shape[3]/3 ) print('\t* peaks dim = %d x %d x %d x %d' % DIRs.shape[:4]) if DIRs.shape[:3] != self.niiMASK_img.shape[:3] : raise ValueError( 'PEAKS geometry does not match with DWI data' ) # setup other output files MAPs = np.zeros( [self.get_config('dim')[0], self.get_config('dim')[1], self.get_config('dim')[2], len(self.model.maps_name)], dtype=np.float32 ) if self.get_config('doComputeNRMSE') : NRMSE = np.zeros( [self.get_config('dim')[0], self.get_config('dim')[1], self.get_config('dim')[2]], dtype=np.float32 ) if self.get_config('doSaveCorrectedDWI') : DWI_corrected = np.zeros(self.niiDWI.shape, dtype=np.float32) # fit the model to the data # ========================= t = time.time() progress = ProgressBar( n=totVoxels, prefix=" ", erase=True ) for iz in xrange(self.niiMASK_img.shape[2]) : for iy in xrange(self.niiMASK_img.shape[1]) : for ix in xrange(self.niiMASK_img.shape[0]) : if self.niiMASK_img[ix,iy,iz]==0 : continue # prepare the signal y = self.niiDWI_img[ix,iy,iz,:].astype(np.float64) y[ y < 0 ] = 0 # [NOTE] this should not happen! # fitting directions if peaks_filename is None : dirs = DTI.fit( y ).directions[0] else : dirs = DIRs[ix,iy,iz,:] # dispatch to the right handler for each model MAPs[ix,iy,iz,:], DIRs[ix,iy,iz,:], x, A = self.model.fit( y, dirs.reshape(-1,3), self.KERNELS, self.get_config('solver_params') ) # compute fitting error if self.get_config('doComputeNRMSE') : y_est = np.dot( A, x ) den = np.sum(y**2) NRMSE[ix,iy,iz] = np.sqrt( np.sum((y-y_est)**2) / den ) if den > 1e-16 else 0 if self.get_config('doSaveCorrectedDWI') : if self.model.name == 'Free-Water' : n_iso = len(self.model.d_isos) x[-1*n_iso:] = 0 #print(y, x, b0, A.shape) if self.get_config('doNormalizeSignal') and self.scheme.b0_count > 0 : y_fw_corrected = np.dot( A, x ) * self.mean_b0s[ix,iy,iz] else : y_fw_corrected = np.dot( A, x ) if self.get_config('doKeepb0Intact') and self.scheme.b0_count > 0 : # put original b0 data back in. y_fw_corrected[self.scheme.b0_idx] = y[self.scheme.b0_idx]*self.mean_b0s[ix,iy,iz] DWI_corrected[ix,iy,iz,:] = y_fw_corrected progress.update() self.set_config('fit_time', time.time()-t) print(' [ %s ]' % ( time.strftime("%Hh %Mm %Ss", time.gmtime(self.get_config('fit_time')) ) )) # store results self.RESULTS = {} self.RESULTS['DIRs'] = DIRs self.RESULTS['MAPs'] = MAPs if self.get_config('doComputeNRMSE') : self.RESULTS['NRMSE'] = NRMSE if self.get_config('doSaveCorrectedDWI') : self.RESULTS['DWI_corrected'] = DWI_corrected
def load_data( self, dwi_filename = 'DWI.nii', scheme_filename = 'DWI.scheme', mask_filename = None, b0_thr = 0 ) : """Load the diffusion signal and its corresponding acquisition scheme. Parameters ---------- dwi_filename : string The file name of the DWI data, relative to the subject folder (default : 'DWI.nii') scheme_filename : string The file name of the corresponding acquisition scheme (default : 'DWI.scheme') mask_filename : string The file name of the (optional) binary mask (default : None) b0_thr : float The threshold below which a b-value is considered a b0 (default : 0) """ # Loading data, acquisition scheme and mask (optional) tic = time.time() print('\n-> Loading data:') print('\t* DWI signal...') self.set_config('dwi_filename', dwi_filename) self.niiDWI = nibabel.load( pjoin( self.get_config('DATA_path'), dwi_filename) ) self.niiDWI_img = self.niiDWI.get_data().astype(np.float32) hdr = self.niiDWI.header if nibabel.__version__ >= '2.0.0' else self.niiDWI.get_header() self.set_config('dim', self.niiDWI_img.shape[:3]) self.set_config('pixdim', tuple( hdr.get_zooms()[:3] )) print('\t\t- dim = %d x %d x %d x %d' % self.niiDWI_img.shape) print('\t\t- pixdim = %.3f x %.3f x %.3f' % self.get_config('pixdim')) # Scale signal intensities (if necessary) if ( np.isfinite(hdr['scl_slope']) and np.isfinite(hdr['scl_inter']) and hdr['scl_slope'] != 0 and ( hdr['scl_slope'] != 1 or hdr['scl_inter'] != 0 ) ): print('\t\t- rescaling data', end=' ') self.niiDWI_img = self.niiDWI_img * hdr['scl_slope'] + hdr['scl_inter'] print("[OK]") print('\t* Acquisition scheme...') self.set_config('scheme_filename', scheme_filename) self.set_config('b0_thr', b0_thr) self.scheme = amico.scheme.Scheme( pjoin( self.get_config('DATA_path'), scheme_filename), b0_thr ) print('\t\t- %d samples, %d shells' % ( self.scheme.nS, len(self.scheme.shells) )) print('\t\t- %d @ b=0' % ( self.scheme.b0_count ), end=' ') for i in xrange(len(self.scheme.shells)) : print(', %d @ b=%.1f' % ( len(self.scheme.shells[i]['idx']), self.scheme.shells[i]['b'] ), end=' ') print() if self.scheme.nS != self.niiDWI_img.shape[3] : raise ValueError( 'Scheme does not match with DWI data' ) print('\t* Binary mask...') if mask_filename is not None : self.niiMASK = nibabel.load( pjoin( self.get_config('DATA_path'), mask_filename) ) self.niiMASK_img = self.niiMASK.get_data().astype(np.uint8) niiMASK_hdr = self.niiMASK.header if nibabel.__version__ >= '2.0.0' else self.niiMASK.get_header() print('\t\t- dim = %d x %d x %d' % self.niiMASK_img.shape[:3]) print('\t\t- pixdim = %.3f x %.3f x %.3f' % niiMASK_hdr.get_zooms()[:3]) if self.get_config('dim') != self.niiMASK_img.shape[:3] : raise ValueError( 'MASK geometry does not match with DWI data' ) else : self.niiMASK = None self.niiMASK_img = np.ones( self.get_config('dim') ) print('\t\t- not specified') print('\t\t- voxels = %d' % np.count_nonzero(self.niiMASK_img)) # Preprocessing print('\n-> Preprocessing:') if self.get_config('doDebiasSignal') : print('\t* Debiasing signal...\n') sys.stdout.flush() if self.get_config('DWI-SNR') == None: raise ValueError( "Set noise variance for debiasing (eg. ae.set_config('RicianNoiseSigma', sigma))" ) self.niiDWI_img = debiasRician(self.niiDWI_img,self.get_config('DWI-SNR'),self.niiMASK_img,self.scheme) if self.get_config('doNormalizeSignal') : print('\t* Normalizing to b0...', end=' ') sys.stdout.flush() if self.scheme.b0_count > 0 : self.mean_b0s = np.mean( self.niiDWI_img[:,:,:,self.scheme.b0_idx], axis=3 ) else: raise ValueError( 'No b0 volume to normalize signal with' ) norm_factor = self.mean_b0s.copy() idx = self.mean_b0s <= 0 norm_factor[ idx ] = 1 norm_factor = 1 / norm_factor norm_factor[ idx ] = 0 for i in xrange(self.scheme.nS) : self.niiDWI_img[:,:,:,i] *= norm_factor print('[ min=%.2f, mean=%.2f, max=%.2f ]' % ( self.niiDWI_img.min(), self.niiDWI_img.mean(), self.niiDWI_img.max() )) if self.get_config('doMergeB0') : print('\t* Merging multiple b0 volume(s)...', end=' ') mean = np.expand_dims( np.mean( self.niiDWI_img[:,:,:,self.scheme.b0_idx], axis=3 ), axis=3 ) self.niiDWI_img = np.concatenate( (mean, self.niiDWI_img[:,:,:,self.scheme.dwi_idx]), axis=3 ) else : print('\t* Keeping all b0 volume(s)...') print(' [ %.1f seconds ]' % ( time.time() - tic ))
def intersect_sphere(xyz, center, radius): ''' If any segment of the track is intersecting with a sphere of specific center and radius return True otherwise False Parameters ---------- xyz : array, shape (N,3) representing x,y,z of the N points of the track center : array, shape (3,) center of the sphere radius : float radius of the sphere Returns ------- tf : {True, False} True if track `xyz` intersects sphere >>> from dipy.tracking.metrics import intersect_sphere >>> line=np.array(([0,0,0],[1,1,1],[2,2,2])) >>> sph_cent=np.array([1,1,1]) >>> sph_radius = 1 >>> intersect_sphere(line,sph_cent,sph_radius) True Notes ----- The ray to sphere intersection method used here is similar with http://local.wasp.uwa.edu.au/~pbourke/geometry/sphereline/ http://local.wasp.uwa.edu.au/~pbourke/geometry/sphereline/source.cpp we just applied it for every segment neglecting the intersections where the intersecting points are not inside the segment ''' center = np.array(center) # print center lt = xyz.shape[0] for i in xrange(lt-1): # first point x1 = xyz[i] # second point x2 = xyz[i+1] # do the calculations as given in the Notes x = x2-x1 a = np.inner(x, x) x1c = x1-center b = 2*np.inner(x, x1c) c = (np.inner(center, center)+np.inner(x1, x1)-2*np.inner(center, x1) - radius**2) bb4ac = b*b-4*a*c # print 'bb4ac',bb4ac if abs(a) < np.finfo(float).eps or bb4ac < 0: # too small segment or # no intersection continue if bb4ac == 0: # one intersection point p mu = -b/2*a p = x1+mu*x # check if point is inside the segment # print 'p',p if np.inner(p-x1, p-x1) <= a: return True if bb4ac > 0: # two intersection points p1 and p2 mu = (-b+np.sqrt(bb4ac))/(2*a) p1 = x1+mu*x mu = (-b-np.sqrt(bb4ac))/(2*a) p2 = x1+mu*x # check if points are inside the line segment # print 'p1,p2',p1,p2 if np.inner(p1-x1, p1-x1) <= a or np.inner(p2-x1, p2-x1) <= a: return True return False
def subsegment(streamlines, max_segment_length): """Splits the segments of the streamlines into small segments. Replaces each segment of each of the streamlines with the smallest possible number of equally sized smaller segments such that no segment is longer than max_segment_length. Among other things, this can useful for getting streamline counts on a grid that is smaller than the length of the streamline segments. Parameters ---------- streamlines : sequence of ndarrays The streamlines to be subsegmented. max_segment_length : float The longest allowable segment length. Returns ------- output_streamlines : generator A set of streamlines. Notes ----- Segments of 0 length are removed. If unchanged Examples -------- >>> streamlines = [np.array([[0,0,0],[2,0,0],[5,0,0]])] >>> list(subsegment(streamlines, 3.)) [array([[ 0., 0., 0.], [ 2., 0., 0.], [ 5., 0., 0.]])] >>> list(subsegment(streamlines, 1)) [array([[ 0., 0., 0.], [ 1., 0., 0.], [ 2., 0., 0.], [ 3., 0., 0.], [ 4., 0., 0.], [ 5., 0., 0.]])] >>> list(subsegment(streamlines, 1.6)) [array([[ 0. , 0. , 0. ], [ 1. , 0. , 0. ], [ 2. , 0. , 0. ], [ 3.5, 0. , 0. ], [ 5. , 0. , 0. ]])] """ for sl in streamlines: diff = (sl[1:] - sl[:-1]) length = sqrt((diff*diff).sum(-1)) num_segments = ceil(length/max_segment_length).astype('int') output_sl = empty((num_segments.sum()+1, 3), 'float') output_sl[0] = sl[0] count = 1 for ii in xrange(len(num_segments)): ns = num_segments[ii] if ns == 1: output_sl[count] = sl[ii+1] count += 1 elif ns > 1: small_d = diff[ii]/ns point = sl[ii] for _ in xrange(ns): point = point + small_d output_sl[count] = point count += 1 elif ns == 0: pass # repeated point else: # this should never happen because ns should be a positive # int assert(ns >= 0) yield output_sl
def subsegment(streamlines, max_segment_length): """Splits the segments of the streamlines into small segments. Replaces each segment of each of the streamlines with the smallest possible number of equally sized smaller segments such that no segment is longer than max_segment_length. Among other things, this can useful for getting streamline counts on a grid that is smaller than the length of the streamline segments. Parameters ---------- streamlines : sequence of ndarrays The streamlines to be subsegmented. max_segment_length : float The longest allowable segment length. Returns ------- output_streamlines : generator A set of streamlines. Notes ----- Segments of 0 length are removed. If unchanged Examples -------- >>> streamlines = [np.array([[0,0,0],[2,0,0],[5,0,0]])] >>> list(subsegment(streamlines, 3.)) [array([[ 0., 0., 0.], [ 2., 0., 0.], [ 5., 0., 0.]])] >>> list(subsegment(streamlines, 1)) [array([[ 0., 0., 0.], [ 1., 0., 0.], [ 2., 0., 0.], [ 3., 0., 0.], [ 4., 0., 0.], [ 5., 0., 0.]])] >>> list(subsegment(streamlines, 1.6)) [array([[ 0. , 0. , 0. ], [ 1. , 0. , 0. ], [ 2. , 0. , 0. ], [ 3.5, 0. , 0. ], [ 5. , 0. , 0. ]])] """ for sl in streamlines: diff = (sl[1:] - sl[:-1]) length = sqrt((diff*diff).sum(-1)) num_segments = ceil(length/max_segment_length).astype('int') output_sl = empty((num_segments.sum()+1, 3), 'float') output_sl[0] = sl[0] count = 1 for ii in xrange(len(num_segments)): ns = num_segments[ii] if ns == 1: output_sl[count] = sl[ii+1] count += 1 elif ns > 1: small_d = diff[ii]/ns point = sl[ii] for jj in xrange(ns): point = point + small_d output_sl[count] = point count += 1 elif ns == 0: pass # repeated point else: # this should never happen because ns should be a positive # int assert(ns >= 0) yield output_sl