def transform_streamlines(streamlines, mat, in_place=False): """ Apply affine transformation to streamlines Parameters ---------- streamlines : Streamlines Streamlines object mat : array, (4, 4) transformation matrix in_place : bool If True then change data in place. Be careful changes input streamlines. Returns ------- new_streamlines : Streamlines Sequence transformed 2D ndarrays of shape[-1]==3 """ # using new Streamlines API if isinstance(streamlines, Streamlines): if in_place: streamlines._data = apply_affine(mat, streamlines._data) return streamlines new_streamlines = streamlines.copy() new_streamlines._data = apply_affine(mat, new_streamlines._data) return new_streamlines # supporting old data structure of streamlines return [apply_affine(mat, s) for s in streamlines]
def slice_volume(self, projection: str=SAGITTAL, ras: Union[numpy.ndarray, list]=ORIGIN)\ -> (numpy.ndarray, numpy.ndarray, numpy.ndarray): """ This determines slice colors and axes coordinates for the slice. :param projection: one of sagittal, axial or coronal :param ras: 3D point where to do the slicing :return: X, Y, 2D data matrix """ affine_inverse = numpy.linalg.inv(self.affine_matrix) ijk_ras = numpy.round(apply_affine(affine_inverse, ras)).astype('i') slice_index_1, slice_index_2 = X_Y_INDEX[projection] slice_data = numpy.zeros( (self.dimensions[slice_index_1], self.dimensions[slice_index_2])) x_axis_coords = numpy.zeros_like(slice_data) y_axis_coords = numpy.zeros_like(slice_data) for i in range(self.dimensions[slice_index_1]): for j in range(self.dimensions[slice_index_2]): ijk_ras[slice_index_1] = i ijk_ras[slice_index_2] = j ras_coordinates = apply_affine(self.affine_matrix, ijk_ras) x_axis_coords[i, j] = ras_coordinates[slice_index_1] y_axis_coords[i, j] = ras_coordinates[slice_index_2] color = self.data[ijk_ras[0], ijk_ras[1], ijk_ras[2]] if isinstance(color, (list, numpy.ndarray)): color = color[0] slice_data[i][j] = color return x_axis_coords, y_axis_coords, slice_data
def get_MRI_values(path2mri_file,function_space,mesh, offset=None): """ ============== ============================================================== Argument Explanation ============== ============================================================== path2_mri_file The path to the MRI file with extension mgz. typically orig.mgz function_space The function space of the mesh. mesh The mesh of the brain function Instead of a return value, update the function inside. offset The translation of the origin of the mesh , need to be np.array """ import nibabel from nibabel.affines import apply_affine import numpy.linalg as npl import numpy as np img= nibabel.load(path2mri_file) inv_aff = npl.inv ( img.get_header().get_vox2ras_tkr() ) data = img.get_data() if offset==None : offset=np.array([0,0,0]) xyz = function_space.dofmap().tabulate_all_coordinates(mesh).reshape((function_space.dim(),-1)) - offset i,j,k = apply_affine(inv_aff,xyz).T # 4.7 = 5 and not 4 i= map(round,i) j= map(round,j) k= map(round,k) return np.array(map(data.item,i,j,k),dtype=float)
def _get_cut_slices(stat_map_img, cut_coords=None, threshold=None): """ For internal use. Find slice numbers for the cut. Based on find_xyz_cut_coords """ # Select coordinates for the cut if cut_coords is None: cut_coords = find_xyz_cut_coords( stat_map_img, activation_threshold=threshold) # Convert cut coordinates into cut slices try: cut_slices = apply_affine(np.linalg.inv(stat_map_img.affine), cut_coords) except ValueError: raise ValueError( "The input given for display_mode='ortho' needs to be " "a list of 3d world coordinates in (x, y, z). " "You provided cut_coords={0}".format(cut_coords)) except IndexError: raise ValueError( "The input given for display_mode='ortho' needs to be " "a list of 3d world coordinates in (x, y, z). " "You provided single cut, cut_coords={0}".format(cut_coords)) return cut_slices
def display_extent(self, x1, x2, y1, y2, z1, z2): tmp_mask = np.zeros(grid_shape, dtype=np.bool) tmp_mask[x1:x2 + 1, y1:y2 + 1, z1:z2 + 1] = True tmp_mask = np.bitwise_and(tmp_mask, mask) ijk = np.ascontiguousarray(np.array(np.nonzero(tmp_mask)).T) if len(ijk) == 0: self.SetMapper(None) return if affine is not None: ijk_trans = np.ascontiguousarray(apply_affine(affine, ijk)) list_dirs = [] for index, center in enumerate(ijk): # center = tuple(center) if affine is None: xyz = center[:, None] else: xyz = ijk_trans[index][:, None] xyz = xyz.T for i in range(peaks_dirs[tuple(center)].shape[-2]): if peaks_values is not None: pv = peaks_values[tuple(center)][i] else: pv = 1. symm = np.vstack((-peaks_dirs[tuple(center)][i] * pv + xyz, peaks_dirs[tuple(center)][i] * pv + xyz)) list_dirs.append(symm) self.mapper = line(list_dirs, colors=colors, opacity=opacity, linewidth=linewidth, lod=lod, lod_points=lod_points, lod_points_size=lod_points_size).GetMapper() self.SetMapper(self.mapper)
def _register_neighb_to_model(self, model_bundle, neighb_streamlines, metric=None, x0=None, bounds=None, select_model=400, select_target=600, method='L-BFGS-B', nb_pts=20, num_threads=None): if self.verbose: print('# Local SLR of neighb_streamlines to model') t = time() if metric is None or metric == 'symmetric': metric = BundleMinDistanceMetric(num_threads=num_threads) if metric == 'asymmetric': metric = BundleMinDistanceAsymmetricMetric() if metric == 'diagonal': metric = BundleSumDistanceMatrixMetric() if x0 is None: x0 = 'similarity' if bounds is None: bounds = [(-30, 30), (-30, 30), (-30, 30), (-45, 45), (-45, 45), (-45, 45), (0.8, 1.2)] # TODO this can be speeded up by using directly the centroids static = select_random_set_of_streamlines(model_bundle, select_model, rng=self.rng) moving = select_random_set_of_streamlines(neighb_streamlines, select_target, rng=self.rng) static = set_number_of_points(static, nb_pts) moving = set_number_of_points(moving, nb_pts) slr = StreamlineLinearRegistration(metric=metric, x0=x0, bounds=bounds, method=method) slm = slr.optimize(static, moving) transf_streamlines = neighb_streamlines.copy() transf_streamlines._data = apply_affine( slm.matrix, transf_streamlines._data) transf_matrix = slm.matrix slr_bmd = slm.fopt slr_iterations = slm.iterations if self.verbose: print(' Square-root of BMD is %.3f' % (np.sqrt(slr_bmd),)) if slr_iterations is not None: print(' Number of iterations %d' % (slr_iterations,)) print(' Matrix size {}'.format(slm.matrix.shape)) original = np.get_printoptions() np.set_printoptions(3, suppress=True) print(transf_matrix) print(slm.xopt) np.set_printoptions(**original) print(' Duration %0.3f sec. \n' % (time() - t,)) return transf_streamlines, slr_bmd
def get_data(): #dname = '/home/eleftherios/Data/Test_data_Jasmeen/Elef_Test_RecoBundles/' dname = '/home/eleftherios/Data/Elef_Test_RecoBundles/' fname = dname + 'tracts.trk' fname_npz = dname + 'tracts.npz' streamlines = nib.streamlines.compact_list.load_compact_list(fname_npz) streamlines = streamlines[::10].copy() streamlines._data -= np.mean(streamlines._data, axis=0) # Rotate brain to see a sagital view. R1 = np.eye(4) R1[:3, :3] = rodrigues_axis_rotation((0, 1, 0), theta=90) R2 = np.eye(4) R2[:3, :3] = rodrigues_axis_rotation((0, 0, 1), theta=90) R = np.dot(R2, R1) streamlines._data = apply_affine(R, streamlines._data) # renderer = window.Renderer() # bundle_actor = actor.line(streamlines) # renderer.add(bundle_actor) # window.show(renderer) return streamlines
def main(self,X,affine): inv_affine = numpy.linalg.inv(affine) shift05 = numpy.array([0.5,0.5,0.5]) points = [] for i,line in enumerate(X): points.append((apply_affine(inv_affine,line)+shift05).astype(int).tolist()) return FancyDict(points=points)
def set_space_pos(self, new_space_coord): """Set current cursor position based on RAS coordinates.""" new_coord = apply_affine(npl.inv(self._affine), new_space_coord) new_coord = np.floor(new_coord) new_coord = [int(item) for item in new_coord] if self.is_valid_coord(new_coord): self.set_cross_pos(new_coord)
def get_ref_net(x, y, z): aff = bna_img.affine aff_inv = npl.inv(bna_img.affine) # apply_affine(aff, (x,y,z)) # vox to mni vox_coord = apply_affine(aff_inv, (x, y, z)) # mni to vox return ref_dict[int(par_data[int(vox_coord[0]), int(vox_coord[1]), int(vox_coord[2])])]
def extract_cube(vol_target,vol_tdata,affine_ref,initial_position,epi_coord,cube_size): # Calculate transformation from ref to target vol # calculate the affine transf matrix from EPI to anat epi_vox2anat_vox = npl.inv(vol_target.get_affine()).dot(affine_ref) # x,y,z target_anat = apply_affine(epi_vox2anat_vox,epi_coord) # get the cube side_size = int(np.floor(cube_size/2.)) # target spacing side_size_targ = (apply_affine(epi_vox2anat_vox,np.array(epi_coord)+side_size) - target_anat) side_size_targ = side_size_targ[0] x_s,y_s,z_s = vol_target.get_data().shape x_interval = np.arange(target_anat[0]-side_size_targ,target_anat[0]+side_size_targ+1,dtype=int) y_interval = np.arange(target_anat[1]-side_size_targ,target_anat[1]+side_size_targ+1,dtype=int) z_interval = np.arange(target_anat[2]-side_size_targ,target_anat[2]+side_size_targ+1,dtype=int) # normalize the data between 0 and 1 #norm_vol_target = vol_target.get_data() norm_vol_target = vol_tdata#avg_frame_norm(vol_target.get_data()) #norm_vol_target = rescale(norm_vol_target) # check if we need padding if (x_interval>=0).all() & (x_interval<(x_s)).all() & (y_interval>=0).all() & (y_interval<(y_s)).all() & (z_interval>=0).all() & (z_interval<(z_s)).all(): small_cube = norm_vol_target[x_interval,...][:,y_interval,...][:,:,z_interval,...] else: padded_target = np.lib.pad(norm_vol_target, (side_size,side_size), 'constant', constant_values=(0)) x_interval = np.arange(target_anat[0]-side_size_targ,target_anat[0]+side_size_targ+1,dtype=int) y_interval = np.arange(target_anat[1]-side_size_targ,target_anat[1]+side_size_targ+1,dtype=int) z_interval = np.arange(target_anat[2]-side_size_targ,target_anat[2]+side_size_targ+1,dtype=int) small_cube = padded_target[x_interval+side_size,...][:,y_interval+side_size,...][:,:,z_interval+side_size,...] # pad the vols #padded_target = np.lib.pad(vol_target.get_data(), (side_size,side_size), 'constant', constant_values=(0)) #small_cube = padded_target[x_interval+side_size,...][:,y_interval+side_size,...][:,:,z_interval+side_size,...] #x_interval = np.arange(target_anat[0]-side_size,target_anat[0]+side_size,dtype=int) #y_interval = np.arange(target_anat[1]-side_size,target_anat[1]+side_size,dtype=int) #z_interval = np.arange(target_anat[2]-side_size,target_anat[2]+side_size,dtype=int) #padded_pos_vol = np.lib.pad(initial_position, (side_size,side_size), 'constant', constant_values=(0))[...,side_size:-side_size] #init_pos = padded_pos_vol[x_interval+side_size,...][:,y_interval+side_size,...][:,:,z_interval+side_size,...] #init_pos = initial_position[target_anat[0],target_anat[1],target_anat[2],...] init_pos = initial_position[epi_coord[0],epi_coord[1],epi_coord[2],...] return small_cube, init_pos
def getOrigin(fname): hdr, img = opennii(fname) print(hdr.affine) from nibabel.affines import apply_affine import numpy.linalg as npl res = apply_affine(npl.inv(hdr.affine), [0, 0, 0]) #M = hdr.affine[:3, :3] #res = M.dot([0, 0, 0]) + hdr.affine[:3, 3] return res
def real2vox(self, xcoord, ycoord, zcoord, time): x, y, z = apply_affine(self.invaffine, [xcoord, ycoord, zcoord]) t = self.real2tr(time) return ( int(np.round(x, 0)), int(np.round(y, 0)), int(np.round(z, 0)), int(np.round(t, 0)), )
def cluster_endpoints_by_vol_rois(self, rois_path, streamlines=None, mode='lh'): """ Clustering fiber endpoints by vol_rois, according to the endpoints of streamline through rois Parameters ---------- rois_path: volume rois path streamlines: streamline data mode: 'lh', 'rh' Return ------ labels: label each streamline """ img = nib.load(rois_path) rois = img.get_data() if streamlines is None: streamlines = self._fasciculus.get_data() else: streamlines = streamlines streamlines = self._fasciculus.sort_streamlines(streamlines) labels = np.array(len(streamlines) * [None]) if mode == 'lh': endpoints = [ apply_affine(npl.inv(img.affine), fib[0]).astype(int) for fib in streamlines ] elif mode == 'rh': endpoints = [ apply_affine(npl.inv(img.affine), fib[-1]).astype(int) for fib in streamlines ] for i in range(len(endpoints)): if rois[endpoints[i][0], endpoints[i][1], endpoints[i][2]] != 0: labels[i] = int(rois[endpoints[i][0], endpoints[i][1], endpoints[i][2]]) return labels
def test_creation(): # This is the simplest possible example, where there is a thing we are # optimizing, and an optional pre and post transform # Reset the aff2 object aff2_obj = Affine(AFF2.copy()) ct = ChainTransform(aff2_obj) # Check apply gives expected result assert_array_equal(ct.apply(POINTS), apply_affine(AFF2, POINTS)) # Check that result is changed by setting params assert_array_equal(ct.param, aff2_obj.param) ct.param = np.zeros((12,)) assert_array_almost_equal(ct.apply(POINTS), POINTS) # Does changing params in chain object change components passed in? assert_array_almost_equal(aff2_obj.param, np.zeros((12,))) # Reset the aff2 object aff2_obj = Affine(AFF2.copy()) # Check apply gives the expected results ct = ChainTransform(aff2_obj, pre=AFF1_OBJ) assert_array_almost_equal(AFF1_OBJ.as_affine(), AFF1) assert_array_almost_equal(aff2_obj.as_affine(), AFF2) tmp = np.dot(AFF2, AFF1) assert_array_almost_equal(ct.apply(POINTS), apply_affine(np.dot(AFF2, AFF1), POINTS)) # Check that result is changed by setting params assert_array_almost_equal(ct.param, aff2_obj.param) ct.param = np.zeros((12,)) assert_array_almost_equal(ct.apply(POINTS), apply_affine(AFF1, POINTS)) # Does changing params in chain object change components passed in? assert_array_almost_equal(aff2_obj.param, np.zeros((12,))) # Reset the aff2 object aff2_obj = Affine(AFF2.copy()) ct = ChainTransform(aff2_obj, pre=AFF1_OBJ, post=AFF3_OBJ) assert_array_almost_equal(ct.apply(POINTS), apply_affine(np.dot(AFF3, np.dot(AFF2, AFF1)), POINTS)) # Check that result is changed by setting params assert_array_equal(ct.param, aff2_obj.param) ct.param = np.zeros((12,)) assert_array_almost_equal(ct.apply(POINTS), apply_affine(np.dot(AFF3, AFF1), POINTS)) # Does changing params in chain object change components passed in? assert_array_equal(aff2_obj.param, np.zeros((12,)))
def grid_bspline_weights(target_nii, ctrl_nii): """Fast, gridded evaluation.""" from scipy.sparse import csr_matrix, vstack if isinstance(target_nii, (str, bytes, Path)): target_nii = nb.load(target_nii) if isinstance(ctrl_nii, (str, bytes, Path)): ctrl_nii = nb.load(ctrl_nii) shape = target_nii.shape[:3] ctrl_sp = ctrl_nii.header.get_zooms()[:3] ras2ijk = np.linalg.inv(ctrl_nii.affine) origin = apply_affine(ras2ijk, [tuple(target_nii.affine[:3, 3])])[0] wd = [] for i, (o, n, sp) in enumerate( zip(origin, shape, target_nii.header.get_zooms()[:3])): locations = np.arange(0, n, dtype="float32") * sp / ctrl_sp[i] + o knots = np.arange(0, ctrl_nii.shape[i], dtype="float32") distance = (locations[np.newaxis, ...] - knots[..., np.newaxis]).astype("float32") weights = np.zeros_like(distance, dtype="float32") within_support = np.abs(distance) < 2.0 d = np.abs(distance[within_support]) weights[within_support] = np.piecewise( d, [d < 1.0, d >= 1.0], [ lambda d: (4.0 - 6.0 * d**2 + 3.0 * d**3) / 6.0, lambda d: (2.0 - d)**3 / 6.0, ], ) wd.append(weights) ctrl_shape = ctrl_nii.shape[:3] data_size = np.prod(shape) wmat = None for i in range(ctrl_shape[0]): sparse_mat = ( wd[0][i, np.newaxis, np.newaxis, :, np.newaxis, np.newaxis] * wd[1][np.newaxis, :, np.newaxis, np.newaxis, :, np.newaxis] * wd[2][np.newaxis, np.newaxis, :, np.newaxis, np.newaxis, :]).reshape((-1, data_size)) sparse_mat[sparse_mat < 1e-9] = 0 if wmat is None: wmat = csr_matrix(sparse_mat) else: wmat = vstack((wmat, csr_matrix(sparse_mat))) return wmat
def transform_streamlines(streamlines, mat): """ Apply affine transformation to streamlines Parameters ---------- streamlines : list List of 2D ndarrays of shape[-1]==3 Returns ------- new_streamlines : list List of the transformed 2D ndarrays of shape[-1]==3 """ return [apply_affine(mat, s) for s in streamlines]
def draw_volume(plotter, t_map, affine): data = t_map.get_fdata()[:, :, :, 0] data -= np.min(data) data /= np.max(data) cutoff = 0.50 * np.max(data) coords = np.transpose(np.nonzero(data > cutoff)) xL, yL, zL = t_map.header.get_zooms()[:3] for coord in coords: cube = apply_affine(affine, coord) color = cold_hot(data[tuple(coord)])[:3] draw.draw_cube(plotter, cube, xL, yL, zL, color, 0.5)
def cuts_from_bbox(mask_nii, cuts=3): """Finds equi-spaced cuts for presenting images""" from nibabel.affines import apply_affine mask_data = mask_nii.get_data() > 0.0 # First, project the number of masked voxels on each axes ijk_counts = [ mask_data.sum(2).sum(1), # project sagittal planes to transverse (i) axis mask_data.sum(2).sum(0), # project coronal planes to to longitudinal (j) axis mask_data.sum(1).sum(0), # project axial planes to vertical (k) axis ] # If all voxels are masked in a slice (say that happens at k=10), # then the value for ijk_counts for the projection to k (ie. ijk_counts[2]) # at that element of the orthogonal axes (ijk_counts[2][10]) is # the total number of voxels in that slice (ie. Ni x Nj). # Here we define some thresholds to consider the plane as "masked" # The thresholds vary because of the shape of the brain # I have manually found that for the axial view requiring 30% # of the slice elements to be masked drops almost empty boxes # in the mosaic of axial planes (and also addresses #281) ijk_th = [ int((mask_data.shape[1] * mask_data.shape[2]) * 0.2), # sagittal int((mask_data.shape[0] * mask_data.shape[2]) * 0.0), # coronal int((mask_data.shape[0] * mask_data.shape[1]) * 0.3), # axial ] vox_coords = [] for ax, (c, th) in enumerate(zip(ijk_counts, ijk_th)): B = np.argwhere(c > th) if B.size: smin, smax = B.min(), B.max() # Avoid too narrow selections of cuts (very small masks) if not B.size or (th > 0 and (smin + cuts + 1) >= smax): B = np.argwhere(c > 0) # Resort to full plane if mask is seemingly empty smin, smax = B.min(), B.max() if B.size else (0, mask_data.shape[ax]) inc = (smax - smin) / (cuts + 1) vox_coords.append([smin + (i + 1) * inc for i in range(cuts)]) ras_coords = [] for cross in np.array(vox_coords).T: ras_coords.append(apply_affine( mask_nii.affine, cross).tolist()) ras_cuts = [list(coords) for coords in np.transpose(ras_coords)] return {k: v for k, v in zip(['x', 'y', 'z'], ras_cuts)}
def get_motion_deriv(aff_transforms,tmp_deriv_init): world_motion=[] point_motion_deriv = [] tmp_deriv_old = np.copy(tmp_deriv_init) for ii in range(aff_transforms.shape[2]): world_motion.append(apply_affine(aff_transforms[...,ii],tmp_deriv_old)) # compute the delta world_motion = np.array(world_motion) deriv_values = world_motion[1:]-world_motion[0:-1] if len(deriv_values.shape)>3: deriv_values = np.squeeze(np.swapaxes(deriv_values[...,np.newaxis],0,5)) else: deriv_values = np.squeeze(np.swapaxes(deriv_values[...,np.newaxis],0,2)) return deriv_values
def apply_affine(self, affine, lazy=False): """ Applies an affine transformation on the points of each streamline. If `lazy` is not specified, this is performed *in-place*. Parameters ---------- affine : ndarray of shape (4, 4) Transformation that will be applied to every streamline. lazy : {False, True}, optional If True, streamlines are *not* transformed in-place and a :class:`LazyTractogram` object is returned. Otherwise, streamlines are modified in-place. Returns ------- tractogram : :class:`Tractogram` or :class:`LazyTractogram` object Tractogram where the streamlines have been transformed according to the given affine transformation. If the `lazy` option is true, it returns a :class:`LazyTractogram` object, otherwise it returns a reference to this :class:`Tractogram` object with updated streamlines. """ if lazy: lazy_tractogram = LazyTractogram.from_tractogram(self) return lazy_tractogram.apply_affine(affine) if len(self.streamlines) == 0: return self if np.all(affine == np.eye(4)): return self # No transformation. BUFFER_SIZE = 10000000 # About 128 Mb since pts shape is 3. for start in range(0, len(self.streamlines.data), BUFFER_SIZE): end = start + BUFFER_SIZE pts = self.streamlines._data[start:end] self.streamlines.data[start:end] = apply_affine(affine, pts) if self.affine_to_rasmm is not None: # Update the affine that brings back the streamlines to RASmm. self.affine_to_rasmm = np.dot(self.affine_to_rasmm, np.linalg.inv(affine)) return self
def assert_spm_resampling_close(from_img, our_resampled, spm_resampled): """ Assert our resampling is close to SPM's, allowing for edge effects """ # To allow for differences in the way SPM and scipy.ndimage handle off-edge # interpolation, mask out voxels off edge to_img_shape = spm_resampled.shape to_img_affine = spm_resampled.affine to_vox_coords = np.indices(to_img_shape).transpose((1, 2, 3, 0)) # Coordinates of to_img mapped to from_img to_to_from = npl.inv(from_img.affine).dot(to_img_affine) resamp_coords = apply_affine(to_to_from, to_vox_coords) # Places where SPM may not return default value but scipy.ndimage will (SPM # does not return zeros <0.05 from image edges). # See: https://github.com/nipy/nibabel/pull/255#issuecomment-186774173 outside_vol = np.any((resamp_coords < 0) | (np.subtract(resamp_coords, from_img.shape) > -1), axis=-1) spm_res = np.where(outside_vol, np.nan, np.array(spm_resampled.dataobj)) assert_allclose_safely(our_resampled.dataobj, spm_res) assert_almost_equal(our_resampled.affine, spm_resampled.affine, 5)
def get_cord_from_file(header, cord_filepath, image_affine): """Return all cordinate from the txt or csv file.""" shape = header.get_data_shape() voxel_size = header.get_zooms() cord_file = open(cord_filepath, 'r') all_cords = [] all_roi_id = [] all_roi_radius = [] line = cord_file.readline() while line: try: cord = line.replace('\r\n', '').split('\t') if len(cord) != 5: raise ValueError('The cordinate ' + line.rstrip('\t\n') + ' can only be three dimension!') roi_id = int(cord[0]) new_cord = list(float(i) for i in cord[1:4]) new_cord = apply_affine(np.linalg.inv(image_affine), new_cord) new_cord = list(int(i) for i in new_cord) radius = int(cord[4]) all_roi_radius.append([int(radius * 1. / voxel_size[0]), int(radius * 1. / voxel_size[1]), int(radius * 1. / voxel_size[2])]) all_cords.append(new_cord) if (new_cord[0] < 0 or new_cord[0] >= shape[0]) or \ (new_cord[1] < 0 or new_cord[1] >= shape[1]) or \ (new_cord[2] < 0 or new_cord[2] >= shape[2]): raise ValueError('The cordinate ' + line.rstrip('\t\n') + ' out of bounds.') else: all_roi_id.append(roi_id) except: raise ValueError('The cordinate ' + line.rstrip('\t\n') + ' error!') line = cord_file.readline() cord_file.close() return all_cords, all_roi_radius, all_roi_id
def cluster_stats(source_data, cluster_data, image_affine): """Get the cluster size, and the peak value, coordinate based on the#source_data.""" if not source_data.shape == cluster_data.shape: print "Inconsistent data shape." return cluster_info = [] cluster_idx = np.unique(cluster_data) for idx in cluster_idx: if idx: mask = cluster_data.copy() mask[mask != idx] = 0 mask[mask == idx] = 1 extent = mask.sum() masked_src = source_data * mask max_val = masked_src.max() max_coord = np.unravel_index(masked_src.argmax(), masked_src.shape) max_coord = apply_affine(image_affine, np.array(max_coord)) cluster_info.append([idx, max_val, max_coord[0], max_coord[1], max_coord[2], extent]) cluster_info = np.array(cluster_info) cluster_extent = cluster_info[..., -1] cluster_info = cluster_info[np.argsort(cluster_extent)[::-1]] return cluster_info
def __init__(self,stack,init_reg,*args,**kwargs): self.stack_idx = dict([[s[1][0],i] for i,s in enumerate(stack._slabs)]) bounds = [[0,stack._shape[0]],[0,stack._shape[1]]] points = np.asarray([(x,y,0) for x in bounds[0] for y in bounds[1]]) zz = np.asarray([0,0,1]) tris_plane = np.asarray([[0,1,2],[1,2,3]]) self.points = np.array([[points+zz*s for s in slab[1]] for slab in stack._slabs]) self.coords = apply_affine(init_reg,self.points) tris = np.vstack([tris_plane+4*(i+si*len(slab[1])) for si,slab in enumerate(stack._slabs) for i,s in enumerate(slab[1])]) colors = np.hstack([np.zeros(len(slab[1])*4)+i for i,slab in enumerate(stack._slabs)]) # self.slices = mlab.triangular_mesh( # self.coords[...,0].ravel(),self.coords[...,1].ravel(),self.coords[...,2].ravel(),tris, # scalars = colors, scale_mode='none') # self.ipw = mlab.pipeline.image_plane_widget( # self.data_src, # ) plt.ion() self.ntimes = 0 self.slab_nslices = len(stack._slabs[0][1]) self.nslabs = len(stack._slabs) fig, ax = plt.subplots(1,self.slab_nslices,squeeze=False) self.slab_ims = [] for si in range(self.slab_nslices): self.slab_ims.append(ax[0,si].matshow(np.zeros(stack._shape[:2]),cmap=plt.get_cmap('gray'),vmin=0,vmax=2000)) self.motion_fig,self.motion_plot = plt.subplots() huge_num = 1000 self.motion_plot.bar( left = np.arange(0,huge_num*self.nslabs,self.nslabs)-.5, height=200*np.ones(np.ceil(huge_num)), bottom=-100, width=self.nslabs,color=['b','g'], linewidth=0,alpha=.2) # self.motion_plot_bg = fig.canvas.copy_from_bbox(self.motion_plot.bbox) self.motion_plot_lines = self.motion_plot.plot([0],[[0]*6],'+-') self.motion_plot.set_ylim(-.1,.1) self.motion_plot.set_xlim(-.5,100)
def main(self,input_nii,origin_override,index2rgb_json,rgbcenters_json,rgbvolumes_json): nii = nibabel.load(input_nii) hdr = nii.get_header() q = hdr.get_best_affine(); if origin_override: q[0:3,3] = -q[0:3,0:3].dot(origin_override) voxVol = numpy.linalg.det(q) img = nii.get_data().squeeze(); labels = numpy.unique(img) rgbcenters = {} rgbvolumes = {} unmapped = [] for b in labels: tmp = numpy.argwhere(img==b) xyz = apply_affine(q,tmp.mean(axis=0)).round(3).tolist() vol = numpy.round(tmp.shape[0]*voxVol,3) rgbcenters[b] = xyz rgbvolumes[b] = vol if index2rgb_json: with open(index2rgb_json,'r') as fp: index2rgb = json.load(fp) rgbcenters = {index2rgb[b]:ctr for b,ctr in rgbcenters.items()} rgbvolumes = {index2rgb[b]:vol for b,vol in rgbvolumes.items()} with open(rgbcenters_json,'w') as fp: json.dump(rgbcenters,fp) with open(rgbvolumes_json,'w') as fp: json.dump(rgbvolumes,fp) return FancyDict( rgbcenters_json=rgbcenters_json, rgbvolumes_json=rgbvolumes_json, unmapped=unmapped )
def scanner_coords(xyz, affine, from_world, to_world): Tv = np.dot(from_world, np.dot(affine, to_world)) XYZ = apply_affine(Tv, xyz) return XYZ[:, 0], XYZ[:, 1], XYZ[:, 2]
def apply(self, xyz): return apply_affine(self.as_affine(), xyz)
def near_roi(streamlines, region_of_interest, affine=None, tol=None, mode="any"): """Provide filtering criteria for a set of streamlines based on whether they fall within a tolerance distance from an ROI Parameters ---------- streamlines : list or generator A sequence of streamlines. Each streamline should be a (N, 3) array, where N is the length of the streamline. region_of_interest : ndarray A mask used as a target. Non-zero values are considered to be within the target region. affine : ndarray Affine transformation from voxels to streamlines. Default: identity. tol : float Distance (in the units of the streamlines, usually mm). If any coordinate in the streamline is within this distance from the center of any voxel in the ROI, the filtering criterion is set to True for this streamline, otherwise False. Defaults to the distance between the center of each voxel and the corner of the voxel. mode : string, optional One of {"any", "all", "either_end", "both_end"}, where return True if: "any" : any point is within tol from ROI. Default. "all" : all points are within tol from ROI. "either_end" : either of the end-points is within tol from ROI "both_end" : both end points are within tol from ROI. Returns ------- 1D array of boolean dtype, shape (len(streamlines), ) This contains `True` for indices corresponding to each streamline that passes within a tolerance distance from the target ROI, `False` otherwise. """ if affine is None: affine = np.eye(4) dtc = dist_to_corner(affine) if tol is None: tol = dtc elif tol < dtc: w_s = "Tolerance input provided would create gaps in your" w_s += " inclusion ROI. Setting to: %s" % dtc warn(w_s) tol = dtc roi_coords = np.array(np.where(region_of_interest)).T x_roi_coords = apply_affine(affine, roi_coords) # If it's already a list, we can save time by preallocating the output if isinstance(streamlines, list): out = np.zeros(len(streamlines), dtype=bool) for ii, sl in enumerate(streamlines): out[ii] = streamline_near_roi(sl, x_roi_coords, tol=tol, mode=mode) return out # If it's a generator, we'll need to generate the output into a list else: out = [] for sl in streamlines: out.append(streamline_near_roi(sl, x_roi_coords, tol=tol, mode=mode)) return(np.array(out, dtype=bool))
def orient_by_rois(streamlines, roi1, roi2, in_place=False, as_generator=False, affine=None): """Orient a set of streamlines according to a pair of ROIs Parameters ---------- streamlines : list or generator List or generator of 2d arrays of 3d coordinates. Each array contains the xyz coordinates of a single streamline. roi1, roi2 : ndarray Binary masks designating the location of the regions of interest, or coordinate arrays (n-by-3 array with ROI coordinate in each row). in_place : bool Whether to make the change in-place in the original list (and return a reference to the list), or to make a copy of the list and return this copy, with the relevant streamlines reoriented. Default: False. as_generator : bool Whether to return a generator as output. Default: False affine : ndarray Affine transformation from voxels to streamlines. Default: identity. Returns ------- streamlines : list or generator The same 3D arrays as a list or generator, but reoriented with respect to the ROIs Examples -------- >>> streamlines = [np.array([[0, 0., 0], ... [1, 0., 0.], ... [2, 0., 0.]]), ... np.array([[2, 0., 0.], ... [1, 0., 0], ... [0, 0, 0.]])] >>> roi1 = np.zeros((4, 4, 4), dtype=bool) >>> roi2 = np.zeros_like(roi1) >>> roi1[0, 0, 0] = True >>> roi2[1, 0, 0] = True >>> orient_by_rois(streamlines, roi1, roi2) [array([[ 0., 0., 0.], [ 1., 0., 0.], [ 2., 0., 0.]]), array([[ 0., 0., 0.], [ 1., 0., 0.], [ 2., 0., 0.]])] """ # If we don't already have coordinates on our hands: if len(roi1.shape) == 3: roi1 = np.asarray(np.where(roi1.astype(bool))).T if len(roi2.shape) == 3: roi2 = np.asarray(np.where(roi2.astype(bool))).T if affine is not None: roi1 = apply_affine(affine, roi1) roi2 = apply_affine(affine, roi2) if as_generator: if in_place: w_s = "Cannot return a generator when in_place is set to True" raise ValueError(w_s) return _orient_generator(streamlines, roi1, roi2) # If it's a generator on input, we may as well generate it # here and now: if isinstance(streamlines, types.GeneratorType): out = list(streamlines) elif in_place: out = streamlines else: # Make a copy, so you don't change the output in place: out = deepcopy(streamlines) return _orient_list(out, roi1, roi2)
def cluster_stats(zimg, mask, height_th, height_control='fpr', cluster_th=0, nulls={}): """ Return a list of clusters, each cluster being represented by a dictionary. Clusters are sorted by descending size order. Within each cluster, local maxima are sorted by descending depth order. Parameters ---------- zimg: z-score image mask: mask image height_th: cluster forming threshold height_control: string false positive control meaning of cluster forming threshold: 'fpr'|'fdr'|'bonferroni'|'none' cluster_th: cluster size threshold null_s : cluster-level calibration method: None|'rft'|array Note ---- This works only with three dimensional data """ # Masking if len(mask.shape) > 3: xyz = np.where((mask.get_data() > 0).squeeze()) zmap = zimg.get_data().squeeze()[xyz] else: xyz = np.where(mask.get_data() > 0) zmap = zimg.get_data()[xyz] xyz = np.array(xyz).T nvoxels = np.size(xyz, 0) # Thresholding if height_control == 'fpr': zth = sp_stats.norm.isf(height_th) elif height_control == 'fdr': zth = empirical_pvalue.gaussian_fdr_threshold(zmap, height_th) elif height_control == 'bonferroni': zth = sp_stats.norm.isf(height_th / nvoxels) else: ## Brute-force thresholding zth = height_th pth = sp_stats.norm.sf(zth) above_th = zmap > zth if len(np.where(above_th)[0]) == 0: return None, None ## FIXME zmap_th = zmap[above_th] xyz_th = xyz[above_th] # Clustering ## Extract local maxima and connex components above some threshold ff = field_from_graph_and_data(wgraph_from_3d_grid(xyz_th, k=18), zmap_th) maxima, depth = ff.get_local_maxima(th=zth) labels = ff.cc() ## Make list of clusters, each cluster being a dictionary clusters = [] for k in range(labels.max() + 1): s = np.sum(labels == k) if s >= cluster_th: in_cluster = labels[maxima] == k m = maxima[in_cluster] d = depth[in_cluster] sorted = d.argsort()[::-1] clusters.append({'size': s, 'maxima': m[sorted], 'depth': d[sorted]}) ## Sort clusters by descending size order def smaller(c1, c2): return int(np.sign(c2['size'] - c1['size'])) clusters.sort(cmp=smaller) # FDR-corrected p-values fdr_pvalue = empirical_pvalue.gaussian_fdr(zmap)[above_th] # Default "nulls" if not 'zmax' in nulls: nulls['zmax'] = 'bonferroni' if not 'smax' in nulls: nulls['smax'] = None if not 's' in nulls: nulls['s'] = None # Report significance levels in each cluster for c in clusters: maxima = c['maxima'] zscore = zmap_th[maxima] pval = sp_stats.norm.sf(zscore) # Replace array indices with real coordinates c['maxima'] = apply_affine(zimg.get_affine(), xyz_th[maxima]) c['zscore'] = zscore c['pvalue'] = pval c['fdr_pvalue'] = fdr_pvalue[maxima] # Voxel-level corrected p-values p = None if nulls['zmax'] == 'bonferroni': p = bonferroni(pval, nvoxels) elif isinstance(nulls['zmax'], np.ndarray): p = simulated_pvalue(zscore, nulls['zmax']) c['fwer_pvalue'] = p # Cluster-level p-values (corrected) p = None if isinstance(nulls['smax'], np.ndarray): p = simulated_pvalue(c['size'], nulls['smax']) c['cluster_fwer_pvalue'] = p # Cluster-level p-values (uncorrected) p = None if isinstance(nulls['s'], np.ndarray): p = simulated_pvalue(c['size'], nulls['s']) c['cluster_pvalue'] = p # General info info = {'nvoxels': nvoxels, 'threshold_z': zth, 'threshold_p': pth, 'threshold_pcorr': bonferroni(pth, nvoxels)} return clusters, info
def __init__(self, aff): self.func = lambda pts : apply_affine(aff, pts)
def select_by_rois(streamlines, rois, include, mode=None, affine=None, tol=None): """Select streamlines based on logical relations with several regions of interest (ROIs). For example, select streamlines that pass near ROI1, but only if they do not pass near ROI2. Parameters ---------- streamlines : list A list of candidate streamlines for selection rois : list or ndarray A list of 3D arrays, each with shape (x, y, z) corresponding to the shape of the brain volume, or a 4D array with shape (n_rois, x, y, z). Non-zeros in each volume are considered to be within the region include : array or list A list or 1D array of boolean values marking inclusion or exclusion criteria. If a streamline is near any of the inclusion ROIs, it should evaluate to True, unless it is also near any of the exclusion ROIs. mode : string, optional One of {"any", "all", "either_end", "both_end"}, where a streamline is associated with an ROI if: "any" : any point is within tol from ROI. Default. "all" : all points are within tol from ROI. "either_end" : either of the end-points is within tol from ROI "both_end" : both end points are within tol from ROI. affine : ndarray Affine transformation from voxels to streamlines. Default: identity. tol : float Distance (in the units of the streamlines, usually mm). If any coordinate in the streamline is within this distance from the center of any voxel in the ROI, the filtering criterion is set to True for this streamline, otherwise False. Defaults to the distance between the center of each voxel and the corner of the voxel. Notes ----- The only operation currently possible is "(A or B or ...) and not (X or Y or ...)", where A, B are inclusion regions and X, Y are exclusion regions. Returns ------- generator Generates the streamlines to be included based on these criteria. See also -------- :func:`dipy.tracking.utils.near_roi` :func:`dipy.tracking.utils.reduce_rois` Examples -------- >>> streamlines = [np.array([[0, 0., 0.9], ... [1.9, 0., 0.]]), ... np.array([[0., 0., 0], ... [0, 1., 1.], ... [0, 2., 2.]]), ... np.array([[2, 2, 2], ... [3, 3, 3]])] >>> mask1 = np.zeros((4, 4, 4), dtype=bool) >>> mask2 = np.zeros_like(mask1) >>> mask1[0, 0, 0] = True >>> mask2[1, 0, 0] = True >>> selection = select_by_rois(streamlines, [mask1, mask2], ... [True, True], ... tol=1) >>> list(selection) # The result is a generator [array([[ 0. , 0. , 0.9], [ 1.9, 0. , 0. ]]), array([[ 0., 0., 0.], [ 0., 1., 1.], [ 0., 2., 2.]])] >>> selection = select_by_rois(streamlines, [mask1, mask2], ... [True, False], ... tol=0.87) >>> list(selection) [array([[ 0., 0., 0.], [ 0., 1., 1.], [ 0., 2., 2.]])] >>> selection = select_by_rois(streamlines, [mask1, mask2], ... [True, True], ... mode="both_end", ... tol=1.0) >>> list(selection) [array([[ 0. , 0. , 0.9], [ 1.9, 0. , 0. ]])] >>> mask2[0, 2, 2] = True >>> selection = select_by_rois(streamlines, [mask1, mask2], ... [True, True], ... mode="both_end", ... tol=1.0) >>> list(selection) [array([[ 0. , 0. , 0.9], [ 1.9, 0. , 0. ]]), array([[ 0., 0., 0.], [ 0., 1., 1.], [ 0., 2., 2.]])] """ if affine is None: affine = np.eye(4) # This calculates the maximal distance to a corner of the voxel: dtc = dist_to_corner(affine) if tol is None: tol = dtc elif tol < dtc: w_s = "Tolerance input provided would create gaps in your" w_s += " inclusion ROI. Setting to: %s" % dist_to_corner warn(w_s) tol = dtc include_roi, exclude_roi = ut.reduce_rois(rois, include) include_roi_coords = np.array(np.where(include_roi)).T x_include_roi_coords = apply_affine(affine, include_roi_coords) exclude_roi_coords = np.array(np.where(exclude_roi)).T x_exclude_roi_coords = apply_affine(affine, exclude_roi_coords) if mode is None: mode = "any" for sl in streamlines: include = streamline_near_roi(sl, x_include_roi_coords, tol=tol, mode=mode) exclude = streamline_near_roi(sl, x_exclude_roi_coords, tol=tol, mode=mode) if include & ~exclude: yield sl
def _apply_affine(): for s in streamlines_gen: yield apply_affine(self._affine_to_apply, s)
def get_space_pos(self): """Get current cursor position in RAS space.""" return apply_affine(self._affine, self._cross_pos)
def proc_file(infile, opts): # figure out the output filename, and see if it exists basefilename = splitext_addext(os.path.basename(infile))[0] if opts.outdir is not None: # set output path basefilename = os.path.join(opts.outdir, basefilename) # prep a file if opts.compressed: verbose("Using gzip compression") outfilename = basefilename + ".nii.gz" else: outfilename = basefilename + ".nii" if os.path.isfile(outfilename) and not opts.overwrite: raise IOError('Output file "%s" exists, use --overwrite to ' "overwrite it" % outfilename) # load the PAR header and data scaling = "dv" if opts.scaling == "off" else opts.scaling infile = fname_ext_ul_case(infile) pr_img = pr.load(infile, permit_truncated=opts.permit_truncated, scaling=scaling, strict_sort=opts.strict_sort) pr_hdr = pr_img.header affine = pr_hdr.get_affine(origin=opts.origin) slope, intercept = pr_hdr.get_data_scaling(scaling) if opts.scaling != "off": verbose('Using data scaling "%s"' % opts.scaling) # get original scaling, and decide if we scale in-place or not if opts.scaling == "off": slope = np.array([1.0]) intercept = np.array([0.0]) in_data = pr_img.dataobj.get_unscaled() out_dtype = pr_hdr.get_data_dtype() elif not np.any(np.diff(slope)) and not np.any(np.diff(intercept)): # Single scalefactor case slope = slope.ravel()[0] intercept = intercept.ravel()[0] in_data = pr_img.dataobj.get_unscaled() out_dtype = pr_hdr.get_data_dtype() else: # Multi scalefactor case slope = np.array([1.0]) intercept = np.array([0.0]) in_data = np.array(pr_img.dataobj) out_dtype = np.float64 # Reorient data block to LAS+ if necessary ornt = io_orientation(np.diag([-1, 1, 1, 1]).dot(affine)) if np.all(ornt == [[0, 1], [1, 1], [2, 1]]): # already in LAS+ t_aff = np.eye(4) else: # Not in LAS+ t_aff = inv_ornt_aff(ornt, pr_img.shape) affine = np.dot(affine, t_aff) in_data = apply_orientation(in_data, ornt) bvals, bvecs = pr_hdr.get_bvals_bvecs() if not opts.keep_trace: # discard Philips DTI trace if present if bvecs is not None: bad_mask = np.logical_and(bvals != 0, (bvecs == 0).all(axis=1)) if bad_mask.sum() > 0: pl = "s" if bad_mask.sum() != 1 else "" verbose("Removing %s DTI trace volume%s" % (bad_mask.sum(), pl)) good_mask = ~bad_mask in_data = in_data[..., good_mask] bvals = bvals[good_mask] bvecs = bvecs[good_mask] # Make corresponding NIfTI image nimg = nifti1.Nifti1Image(in_data, affine, pr_hdr) nhdr = nimg.header nhdr.set_data_dtype(out_dtype) nhdr.set_slope_inter(slope, intercept) nhdr.set_sform(affine, code=1) nhdr.set_qform(affine, code=1) if "parse" in opts.minmax: # need to get the scaled data verbose("Loading (and scaling) the data to determine value range") if opts.minmax[0] == "parse": nhdr["cal_min"] = in_data.min() * slope + intercept else: nhdr["cal_min"] = float(opts.minmax[0]) if opts.minmax[1] == "parse": nhdr["cal_max"] = in_data.max() * slope + intercept else: nhdr["cal_max"] = float(opts.minmax[1]) # container for potential NIfTI1 header extensions if opts.store_header: # dump the full PAR header content into an extension with open(infile, "rb") as fobj: # contents must be bytes hdr_dump = fobj.read() dump_ext = nifti1.Nifti1Extension("comment", hdr_dump) nhdr.extensions.append(dump_ext) verbose("Writing %s" % outfilename) nibabel.save(nimg, outfilename) # write out bvals/bvecs if requested if opts.bvs: if bvals is None and bvecs is None: verbose("No DTI volumes detected, bvals and bvecs not written") elif bvecs is None: verbose("DTI volumes detected, but no diffusion direction info was" "found. Writing .bvals file only.") with open(basefilename + ".bvals", "w") as fid: # np.savetxt could do this, but it's just a loop anyway for val in bvals: fid.write("%s " % val) fid.write("\n") else: verbose("Writing .bvals and .bvecs files") # Transform bvecs with reorientation affine orig2new = npl.inv(t_aff) bv_reorient = from_matvec(to_matvec(orig2new)[0], [0, 0, 0]) bvecs = apply_affine(bv_reorient, bvecs) with open(basefilename + ".bvals", "w") as fid: # np.savetxt could do this, but it's just a loop anyway for val in bvals: fid.write("%s " % val) fid.write("\n") with open(basefilename + ".bvecs", "w") as fid: for row in bvecs.T: for val in row: fid.write("%s " % val) fid.write("\n") # export data labels varying along the 4th dimensions if requested if opts.vol_info: labels = pr_img.header.get_volume_labels() if len(labels) > 0: vol_keys = list(labels.keys()) with open(basefilename + ".ordering.csv", "w") as csvfile: csvwriter = csv.writer(csvfile, delimiter=",") csvwriter.writerow(vol_keys) for vals in zip(*[labels[k] for k in vol_keys]): csvwriter.writerow(vals) # write out dwell time if requested if opts.dwell_time: try: dwell_time = calculate_dwell_time( pr_hdr.get_water_fat_shift(), pr_hdr.get_echo_train_length(), opts.field_strength ) except MRIError: verbose("No EPI factors, dwell time not written") else: verbose("Writing dwell time (%r sec) calculated assuming %sT " "magnet" % (dwell_time, opts.field_strength)) with open(basefilename + ".dwell_time", "w") as fid: fid.write("%r\n" % dwell_time)