def ReadOthers(dir_): """ Read the given Analyze, NIfTI, Compressed NIfTI or PAR/REC file, remove singleton image dimensions and convert image orientation to RAS+ canonical coordinate system. Analyze header does not support affine transformation matrix, though cannot be converted automatically to canonical orientation. :param dir_: file path :return: imagedata object """ if not const.VTK_WARNING: log_path = os.path.join(const.USER_LOG_DIR, 'vtkoutput.txt') fow = vtk.vtkFileOutputWindow() fow.SetFileName(log_path.encode(const.FS_ENCODE)) ow = vtk.vtkOutputWindow() ow.SetInstance(fow) try: imagedata = nib.squeeze_image(nib.load(dir_)) imagedata = nib.as_closest_canonical(imagedata) imagedata.update_header() except (nib.filebasedimages.ImageFileError): return False return imagedata
def _load(self, in_file, mask_file=None): stem, ext = split_ext(in_file) self.stem, self.ext = stem, ext if mask_file is None: mask_file = self.inputs.mask self.mask = None if ext in [".nii", ".nii.gz"]: in_img = nib.load(in_file) self.in_img = in_img ndim = np.asanyarray(in_img.dataobj).ndim if ndim == 3: volumes = [in_img] elif ndim == 4: volumes = nib.four_to_three(in_img) else: raise ValueError( f'Unexpect number of dimensions {ndim:d} in "{in_file}"') volume_shape = volumes[0].shape n_voxels = np.prod(volume_shape) if (isdefined(mask_file) and isinstance(mask_file, str) and Path(mask_file).is_file()): mask_img = nib.squeeze_image(nib.load(mask_file)) assert nvol(mask_img) == 1 assert np.allclose(mask_img.affine, in_img.affine) mask_fdata = mask_img.get_fdata(dtype=np.float64) mask_bin = np.logical_not( np.logical_or(mask_fdata <= 0, np.isclose(mask_fdata, 0, atol=1e-2))) self.mask = mask_bin n_voxels = np.count_nonzero(mask_bin) n_volumes = len(volumes) array = np.zeros((n_volumes, n_voxels)) for i, volume in enumerate(volumes): volume_data = volume.get_fdata() if self.mask is not None: array[i, :] = volume_data[self.mask] else: array[i, :] = np.ravel(volume_data) else: # a text file in_df = read_spreadsheet(in_file) self.in_df = in_df array = in_df.to_numpy().astype(np.float64) return array
def ReadOthers(dir_): """ Read the given Analyze, NIfTI, Compressed NIfTI or PAR/REC file, remove singleton image dimensions and convert image orientation to RAS+ canonical coordinate system. Analyze header does not support affine transformation matrix, though cannot be converted automatically to canonical orientation. :param dir_: file path :return: imagedata object """ if not const.VTK_WARNING: log_path = os.path.join(const.USER_LOG_DIR, 'vtkoutput.txt') fow = vtk.vtkFileOutputWindow() fow.SetFileName(log_path.encode(const.FS_ENCODE)) ow = vtk.vtkOutputWindow() ow.SetInstance(fow) try: imagedata = nib.squeeze_image(nib.load(dir_)) imagedata = nib.as_closest_canonical(imagedata) imagedata.update_header() except(nib.filebasedimages.ImageFileError): return False return imagedata
def sanitize(input_fname): im = nb.as_closest_canonical(nb.squeeze_image(nb.load(str(input_fname)))) hdr = im.header.copy() dtype = 'int16' data = None if str(input_fname).endswith('_mask.nii.gz'): dtype = 'uint8' data = im.get_fdata() > 0 if str(input_fname).endswith('_probseg.nii.gz'): dtype = 'float32' hdr['cal_max'] = 1.0 hdr['cal_min'] = 0.0 data = im.get_fdata() data[data < 0] = 0 if input_fname.name.split('_')[-1].split('.')[0] in ('T1w', 'T2w', 'PD'): data = im.get_fdata() data[data < 0] = 0 hdr.set_data_dtype(dtype) nii = nb.Nifti1Image( data if data is not None else im.get_fdata().astype(dtype), im.affine, hdr) sform = nii.header.get_sform() nii.header.set_sform(sform, 4) nii.header.set_qform(sform, 4) nii.header.set_xyzt_units(xyz='mm') nii.to_filename(str(input_fname))
def _run_interface(self, runtime): ref_name = self.inputs.in_file ref_nii = nb.load(ref_name) n_volumes_to_discard = _get_vols_to_discard(ref_nii) self._results["n_volumes_to_discard"] = n_volumes_to_discard out_ref_fname = os.path.join(runtime.cwd, "ref_bold.nii.gz") if isdefined(self.inputs.sbref_file): out_ref_fname = os.path.join(runtime.cwd, "ref_sbref.nii.gz") ref_name = self.inputs.sbref_file ref_nii = nb.squeeze_image(nb.load(ref_name)) # If reference is only 1 volume, return it directly if len(ref_nii.shape) == 3: ref_nii.header.extensions.clear() ref_nii.to_filename(out_ref_fname) self._results['ref_image'] = out_ref_fname return runtime else: # Reset this variable as it no longer applies # and value for the output is stored in self._results n_volumes_to_discard = 0 # Slicing may induce inconsistencies with shape-dependent values in extensions. # For now, remove all. If this turns out to be a mistake, we can select extensions # that don't break pipeline stages. ref_nii.header.extensions.clear() if n_volumes_to_discard == 0: if ref_nii.shape[-1] > 40: ref_name = os.path.join(runtime.cwd, "slice.nii.gz") nb.Nifti1Image(ref_nii.dataobj[:, :, :, 20:40], ref_nii.affine, ref_nii.header).to_filename(ref_name) if self.inputs.mc_method == "AFNI": res = afni.Volreg(in_file=ref_name, args='-Fourier -twopass', zpad=4, outputtype='NIFTI_GZ').run() elif self.inputs.mc_method == "FSL": res = fsl.MCFLIRT(in_file=ref_name, ref_vol=0, interpolation='sinc').run() mc_slice_nii = nb.load(res.outputs.out_file) median_image_data = np.median(mc_slice_nii.get_fdata(), axis=3) else: median_image_data = np.median( ref_nii.dataobj[:, :, :, :n_volumes_to_discard], axis=3) nb.Nifti1Image(median_image_data, ref_nii.affine, ref_nii.header).to_filename(out_ref_fname) self._results["ref_image"] = out_ref_fname return runtime
def _run_interface(self, runtime): in_files = self.inputs.in_files if not isinstance(in_files, list): in_files = [self.inputs.in_files] # Generate output average name early self._results['out_avg'] = fname_presuffix(self.inputs.in_files[0], suffix='_avg', newpath=runtime.cwd) if self.inputs.to_ras: in_files = [reorient(inf, newpath=runtime.cwd) for inf in in_files] if len(in_files) == 1: filenii = nb.load(in_files[0]) # magnitude files can have an extra dimension empty if len(filenii.shape) == 5: filenii = nb.squeeze_image(filenii) if len(filenii.shape) == 5: raise RuntimeError('Input image (%s) is 5D' % in_files[0]) in_files = [ fname_presuffix(in_files[0], suffix='_squeezed', newpath=runtime.cwd) ] filenii.to_filename(in_files[0]) if filenii.dataobj.ndim < 4: self._results['out_file'] = in_files[0] self._results['out_avg'] = in_files[0] # TODO: generate identity out_mats and zero-filled out_movpar return runtime in_files = in_files[0] else: magmrg = fsl.Merge(dimension='t', in_files=self.inputs.in_files) in_files = magmrg.run().outputs.merged_file mcflirt = fsl.MCFLIRT(cost='normcorr', save_mats=True, save_plots=True, ref_vol=0, in_file=in_files) mcres = mcflirt.run() self._results['out_mats'] = mcres.outputs.mat_file self._results['out_movpar'] = mcres.outputs.par_file self._results['out_file'] = mcres.outputs.out_file hmcnii = nb.load(mcres.outputs.out_file) hmcdat = hmcnii.get_fdata().mean(axis=3) if self.inputs.zero_based_avg: hmcdat -= hmcdat.min() nb.Nifti1Image(hmcdat, hmcnii.affine, hmcnii.header).to_filename(self._results['out_avg']) return runtime
def median(in_file): """Average a 4D dataset across the last dimension using median.""" out_file = fname_presuffix(in_file, suffix="_mean.nii.gz", use_ext=False) img = nib.load(in_file) if img.dataobj.ndim == 3: return in_file if img.shape[-1] == 1: nib.squeeze_image(img).to_filename(out_file) return out_file median_data = np.median(img.get_fdata(dtype="float32"), axis=-1) hdr = img.header.copy() hdr.set_xyzt_units("mm") hdr.set_data_dtype(np.float32) nib.Nifti1Image(median_data, img.affine, hdr).to_filename(out_file) return out_file
def median(in_file, newpath=None): """Average a 4D dataset across the last dimension using median.""" out_file = fname_presuffix(in_file, suffix='_b0ref', newpath=newpath) img = nb.load(in_file) if img.dataobj.ndim == 3: return in_file if img.shape[-1] == 1: nb.squeeze_image(img).to_filename(out_file) return out_file median_data = np.median(img.get_fdata(dtype='float32'), axis=-1) hdr = img.header.copy() hdr.set_xyzt_units('mm') hdr.set_data_dtype(np.float32) nb.Nifti1Image(median_data, img.affine, hdr).to_filename(out_file) return out_file
def median(in_file, out_path=None): """Average a 4D dataset across the last dimension using median.""" if out_path is None: out_path = fname_presuffix(in_file, suffix='_b0ref', use_ext=True) img = nb.load(in_file) if img.dataobj.ndim == 3: return in_file if img.shape[-1] == 1: nb.squeeze_image(img).to_filename(out_path) return out_path dtype = img.get_data_dtype() median_data = np.median(img.get_fdata(), axis=-1) nb.Nifti1Image(median_data.astype(dtype), img.affine, img.header).to_filename(out_path) return out_path
def _run_interface(self, runtime): ref_name = self.inputs.in_file ref_nii = nb.load(ref_name) n_volumes_to_discard = _get_vols_to_discard(ref_nii) self._results["n_volumes_to_discard"] = n_volumes_to_discard out_ref_fname = os.path.join(runtime.cwd, "ref_bold.nii.gz") if isdefined(self.inputs.sbref_file): out_ref_fname = os.path.join(runtime.cwd, "ref_sbref.nii.gz") ref_name = self.inputs.sbref_file ref_nii = nb.squeeze_image(nb.load(ref_name)) # If reference is only 1 volume, return it directly if len(ref_nii.shape) == 3: ref_nii.header.extensions.clear() ref_nii.to_filename(out_ref_fname) self._results['ref_image'] = out_ref_fname return runtime else: # Reset this variable as it no longer applies # and value for the output is stored in self._results n_volumes_to_discard = 0 # Slicing may induce inconsistencies with shape-dependent values in extensions. # For now, remove all. If this turns out to be a mistake, we can select extensions # that don't break pipeline stages. ref_nii.header.extensions.clear() if n_volumes_to_discard == 0: if ref_nii.shape[-1] > 40: ref_name = os.path.join(runtime.cwd, "slice.nii.gz") nb.Nifti1Image(ref_nii.dataobj[:, :, :, 20:40], ref_nii.affine, ref_nii.header).to_filename(ref_name) if self.inputs.mc_method == "AFNI": res = afni.Volreg(in_file=ref_name, args='-Fourier -twopass', zpad=4, outputtype='NIFTI_GZ').run() elif self.inputs.mc_method == "FSL": res = fsl.MCFLIRT(in_file=ref_name, ref_vol=0, interpolation='sinc').run() mc_slice_nii = nb.load(res.outputs.out_file) median_image_data = np.median(mc_slice_nii.get_data(), axis=3) else: median_image_data = np.median( ref_nii.dataobj[:, :, :, :n_volumes_to_discard], axis=3) nb.Nifti1Image(median_image_data, ref_nii.affine, ref_nii.header).to_filename(out_ref_fname) self._results["ref_image"] = out_ref_fname return runtime
def fit( cope_files: List[Path], var_cope_files: Optional[List[Path]], mask_files: List[Path], regressors: Dict[str, List[float]], contrasts: List[Tuple], algorithms_to_run: List[str], num_threads: int, ) -> Dict: voxel_data, cmatdict = load_data( cope_files, var_cope_files, mask_files, regressors, contrasts, algorithms_to_run, ) # setup run if num_threads < 2: pool: Optional[Pool] = None it: Iterator = map(voxel_calc, voxel_data) cm: ContextManager = nullcontext() else: pool = Pool(processes=num_threads) it = pool.imap_unordered(voxel_calc, voxel_data) cm = pool # run voxel_results: Dict = defaultdict(lambda: defaultdict(dict)) with cm: for x in tqdm(it, unit="voxels"): if x is None: continue for a, d in x.items(): # transpose if d is None: continue for k, v in d.items(): if v is None: continue voxel_results[a][k].update(v) ref_image = nib.squeeze_image(nib.load(cope_files[0])) output_files = dict() for a, v in voxel_results.items(): output_files.update(algorithms[a].write_outputs( ref_image, cmatdict, v)) return output_files
def _run_interface(self, runtime): # Squeeze 4th dimension if possible (#660) nii = nb.squeeze_image(nb.load(self.inputs.in_file)) hdr = nii.header.copy() if self.inputs.check_ras: nii = nb.as_closest_canonical(nii) if self.inputs.check_dtype: changed = True datatype = int(hdr["datatype"]) if datatype == 1: config.loggers.interface.warning( 'Input image %s has a suspicious data type "%s"', self.inputs.in_file, hdr.get_data_dtype(), ) # signed char and bool to uint8 if datatype == 1 or datatype == 2 or datatype == 256: dtype = np.uint8 # int16 to uint16 elif datatype == 4: dtype = np.uint16 # Signed long, long long, etc to uint32 elif datatype == 8 or datatype == 1024 or datatype == 1280: dtype = np.uint32 # Floats over 32 bits elif datatype == 64 or datatype == 1536: dtype = np.float32 else: changed = False if changed: hdr.set_data_dtype(dtype) nii = nb.Nifti1Image(nii.get_data().astype(dtype), nii.affine, hdr) # Generate name out_file, ext = op.splitext(op.basename(self.inputs.in_file)) if ext == ".gz": out_file, ext2 = op.splitext(out_file) ext = ext2 + ext self._results["out_file"] = op.abspath("{}_conformed{}".format( out_file, ext)) nii.to_filename(self._results["out_file"]) return runtime
def _merge(in_file): import nibabel as nb import numpy as np img = nb.squeeze_image(nb.load(in_file)) data = np.asanyarray(img.dataobj) if data.ndim == 3: return in_file from pathlib import Path data = data.mean(-1) out_file = (Path() / "merged.nii.gz").absolute() img.__class__(data, img.affine, img.header).to_filename(out_file) return str(out_file)
def _run_interface(self, runtime): # Squeeze 4th dimension if possible (#660) nii = nb.squeeze_image(nb.load(self.inputs.in_file)) hdr = nii.get_header().copy() if self.inputs.check_ras: nii = nb.as_closest_canonical(nii) if self.inputs.check_dtype: changed = True datatype = int(hdr['datatype']) if datatype == 1: IFLOGGER.warn('Input image %s has a suspicious data type "%s"', self.inputs.in_file, hdr.get_data_dtype()) # signed char and bool to uint8 if datatype == 1 or datatype == 2 or datatype == 256: dtype = np.uint8 # int16 to uint16 elif datatype == 4: dtype = np.uint16 # Signed long, long long, etc to uint32 elif datatype == 8 or datatype == 1024 or datatype == 1280: dtype = np.uint32 # Floats over 32 bits elif datatype == 64 or datatype == 1536: dtype = np.float32 else: changed = False if changed: hdr.set_data_dtype(dtype) nii = nb.Nifti1Image(nii.get_data().astype(dtype), nii.get_affine(), hdr) # Generate name out_file, ext = op.splitext(op.basename(self.inputs.in_file)) if ext == '.gz': out_file, ext2 = op.splitext(out_file) ext = ext2 + ext self._results['out_file'] = op.abspath('{}_conformed{}'.format(out_file, ext)) nii.to_filename(self._results['out_file']) return runtime
def _flatten_split_merge(in_files): if isinstance(in_files, str): in_files = [in_files] nfiles = len(in_files) all_nii = [] for fname in in_files: nii = nb.squeeze_image(nb.load(fname)) if nii.get_data().ndim > 3: all_nii += nb.four_to_three(nii) else: all_nii.append(nii) if len(all_nii) == 1: LOGGER.warning('File %s cannot be split', all_nii[0]) return in_files[0], in_files if len(all_nii) == nfiles: flat_split = in_files else: splitname = fname_presuffix(in_files[0], suffix='_split%04d', newpath=os.getcwd()) flat_split = [] for i, nii in enumerate(all_nii): flat_split.append(splitname % i) nii.to_filename(flat_split[-1]) # Only one 4D file was supplied if nfiles == 1: merged = in_files[0] else: # More that one in_files - need merge merged = fname_presuffix(in_files[0], suffix='_merged', newpath=os.getcwd()) nb.concat_images(all_nii).to_filename(merged) return merged, flat_split
def rescale_b0(in_file, mask_file, out_path=None): """Rescale the input volumes using the median signal intensity.""" if out_path is None: out_path = fname_presuffix(in_file, suffix='_rescaled', use_ext=True) img = nb.squeeze_image(nb.load(in_file)) if img.dataobj.ndim == 3: return in_file, [1.0] mask_data = nb.load(mask_file).get_fdata() > 0 dtype = img.get_data_dtype() data = img.get_fdata() median_signal = np.median(data[mask_data, ...], axis=0) # Normalize to the first volume signal_drift = median_signal[0] / median_signal data /= signal_drift nb.Nifti1Image(data.astype(dtype), img.affine, img.header).to_filename(out_path) return out_path, signal_drift.tolist()
def _flatten_split_merge(in_files): from builtins import bytes, str if isinstance(in_files, (bytes, str)): in_files = [in_files] nfiles = len(in_files) all_nii = [] for fname in in_files: nii = nb.squeeze_image(nb.load(fname)) if nii.get_data().ndim > 3: all_nii += nb.four_to_three(nii) else: all_nii.append(nii) if len(all_nii) == 1: LOGGER.warn('File %s cannot be split', all_nii[0]) return in_files[0], in_files if len(all_nii) == nfiles: flat_split = in_files else: splitname = genfname(in_files[0], suffix='split%04d') flat_split = [] for i, nii in enumerate(all_nii): flat_split.append(splitname % i) nii.to_filename(flat_split[-1]) # Only one 4D file was supplied if nfiles == 1: merged = in_files[0] else: # More that one in_files - need merge merged = genfname(in_files[0], suffix='merged') nb.concat_images(all_nii).to_filename(merged) return merged, flat_split
def _run_interface(self, runtime): """ Execute this interface with the provided runtime. TODO: Is the *runtime* argument required? It doesn't seem to be used anywhere. Parameters ---------- runtime : Any Execution runtime ? Returns ------- Any Execution runtime ? """ # Squeeze 4th dimension if possible (#660) nii = nib.squeeze_image(nib.load(self.inputs.in_file)) if self.inputs.check_ras: nii = nib.as_closest_canonical(nii) if self.inputs.check_dtype: nii = self._check_dtype(nii) # Generate name out_file, ext = op.splitext(op.basename(self.inputs.in_file)) if ext == ".gz": out_file, ext2 = op.splitext(out_file) ext = ext2 + ext out_file_name = OUT_FILE.format(prefix=out_file, ext=ext) self._results["out_file"] = op.abspath(out_file_name) nii.to_filename(self._results["out_file"]) return runtime
def _run_interface(self, runtime): nii_list = [] for f in self.inputs.in_files: filenii = nb.squeeze_image(nb.load(f)) ndim = filenii.dataobj.ndim if ndim == 3: nii_list.append(filenii) continue elif self.inputs.allow_4D and ndim == 4: nii_list += nb.four_to_three(filenii) continue else: raise ValueError( "Input image has an incorrect number of dimensions" f" ({ndim}).") img_4d = nb.concat_images(nii_list) out_file = fname_presuffix(self.inputs.in_files[0], suffix="_merged", newpath=runtime.cwd) img_4d.to_filename(out_file) self._results["out_file"] = out_file return runtime
def main(): """ Visualize Freesurfer, SimNIBS headreco, and Nexstim coil locations in the scanner coordinate system. """ SHOW_AXES = True SHOW_SCENE_AXES = True SHOW_COIL_AXES = True SHOW_SKIN = True SHOW_BRAIN = True SHOW_FREESURFER = True SHOW_COIL = True SHOW_MARKERS = True TRANSF_COIL = True SHOW_PLANE = False SELECT_LANDMARKS = 'scalp' # 'all', 'mri' 'scalp' SAVE_ID = False AFFINE_IMG = True NO_SCALE = True SCREENSHOT = False reorder = [0, 2, 1] flipx = [True, False, False] # reorder = [0, 1, 2] # flipx = [False, False, False] # default folder and subject # subj = 's03' subj = 'S5' id_extra = False # 8, 9, 10, 12, False data_dir = os.environ['OneDrive'] + r'\data\nexstim_coord' # data_dir = 'P:\\tms_eeg\\mTMS\\projects\\lateral ppTMS M1\\E-fields\\' # data_subj = data_dir + subj + '\\' simnibs_dir = data_dir + r'\simnibs\m2m_ppM1_{}_nc'.format(subj) fs_dir = data_dir + r'\freesurfer\ppM1_{}'.format(subj) if id_extra: nav_dir = data_dir + r'\nav_coordinates\ppM1_{}_{}'.format( subj, id_extra) else: nav_dir = data_dir + r'\nav_coordinates\ppM1_{}'.format(subj) # filenames # coil_file = data_dir + 'magstim_fig8_coil.stl' coil_file = os.environ[ 'OneDrive'] + r'\data\nexstim_coord\magstim_fig8_coil.stl' if id_extra: coord_file = nav_dir + r'\ppM1_eximia_{}_{}.txt'.format(subj, id_extra) else: coord_file = nav_dir + r'\ppM1_eximia_{}.txt'.format(subj) # img_file = data_subj + subj + '.nii' img_file = data_dir + r'\mri\ppM1_{}\ppM1_{}.nii'.format(subj, subj) brain_file = simnibs_dir + r'\wm.stl' skin_file = simnibs_dir + r'\skin.stl' fs_file = fs_dir + r'\lh.pial.stl' fs_t1 = fs_dir + r'\mri\T1.mgz' if id_extra: output_file = nav_dir + r'\transf_mat_{}_{}'.format(subj, id_extra) else: output_file = nav_dir + r'\transf_mat_{}'.format(subj) coords = lc.load_nexstim(coord_file) # red, green, blue, maroon (dark red), # olive (shitty green), teal (petrol blue), yellow, orange col = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [1., .0, 1.], [.5, .5, 0.], [0., .5, .5], [1., 1., 0.], [1., .4, .0]] # extract image header shape and affine transformation from original nifti file imagedata = nb.squeeze_image(nb.load(img_file)) imagedata = nb.as_closest_canonical(imagedata) imagedata.update_header() pix_dim = imagedata.header.get_zooms() img_shape = imagedata.header.get_data_shape() print("Pixel size: \n") print(pix_dim) print("\nImage shape: \n") print(img_shape) if AFFINE_IMG: affine = imagedata.affine if NO_SCALE: scale, shear, angs, trans, persp = tf.decompose_matrix( imagedata.affine) affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp) print("\nAffine: \n") print(affine) else: affine = np.identity(4) # affine_I = np.identity(4) # create a camera, render window and renderer camera = vtk.vtkCamera() camera.SetPosition(0, 1000, 0) camera.SetFocalPoint(0, 0, 0) camera.SetViewUp(0, 0, 1) camera.ComputeViewPlaneNormal() camera.Azimuth(90.0) camera.Elevation(10.0) ren = vtk.vtkRenderer() ren.SetActiveCamera(camera) ren.ResetCamera() camera.Dolly(1.5) ren_win = vtk.vtkRenderWindow() ren_win.AddRenderer(ren) ren_win.SetSize(800, 800) # create a renderwindowinteractor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(ren_win) if SELECT_LANDMARKS == 'mri': # MRI landmarks coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] pts_ref = [1, 2, 3, 7, 10] elif SELECT_LANDMARKS == 'all': # all coords coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] pts_ref = [1, 2, 3, 5, 4, 6, 7, 10] elif SELECT_LANDMARKS == 'scalp': # scalp landmarks coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] hdr_mri = [ 'Nose/Nasion', 'Left ear', 'Right ear', 'Coil Loc', 'EF max' ] pts_ref = [5, 4, 6, 7, 10] coords_np = np.zeros([len(pts_ref), 3]) for n, pts_id in enumerate(pts_ref): # to keep in the MRI space use the identity as the affine # coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine_I, flipx, reorder) # affine_trans = affine_I.copy() # affine_trans = affine.copy() # affine_trans[:3, -1] = affine[:3, -1] coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine, flipx, reorder) coords_np[n, :] = coord_aux [coord_mri[n].append(s) for s in coord_aux] if SHOW_MARKERS: marker_actor = add_marker(coord_aux, ren, col[n]) print('\nOriginal coordinates from Nexstim: \n') [print(s) for s in coords] print('\nTransformed coordinates to MRI space: \n') [print(s) for s in coord_mri] # coil location, normal vector and direction vector coil_loc = coord_mri[-2][1:] coil_norm = coords[8][1:] coil_dir = coords[9][1:] # creating the coil coordinate system by adding a point in the direction of each given coil vector # the additional vector is just the cross product from coil direction and coil normal vectors # origin of the coordinate system is the coil location given by Nexstim # the vec_length is to allow line creation with visible length in VTK scene vec_length = 75 p1 = coords[7][1:] p2 = [x + vec_length * y for x, y in zip(p1, coil_norm)] p2_norm = n2m.coord_change(p2, img_shape, affine, flipx, reorder) p2 = [x + vec_length * y for x, y in zip(p1, coil_dir)] p2_dir = n2m.coord_change(p2, img_shape, affine, flipx, reorder) coil_face = np.cross(coil_norm, coil_dir) p2 = [x - vec_length * y for x, y in zip(p1, coil_face.tolist())] p2_face = n2m.coord_change(p2, img_shape, affine, flipx, reorder) # Coil face unit vector (X) u1 = np.asarray(p2_face) - np.asarray(coil_loc) u1_n = u1 / np.linalg.norm(u1) # Coil direction unit vector (Y) u2 = np.asarray(p2_dir) - np.asarray(coil_loc) u2_n = u2 / np.linalg.norm(u2) # Coil normal unit vector (Z) u3 = np.asarray(p2_norm) - np.asarray(coil_loc) u3_n = u3 / np.linalg.norm(u3) transf_matrix = np.identity(4) if TRANSF_COIL: transf_matrix[:3, 0] = u1_n transf_matrix[:3, 1] = u2_n transf_matrix[:3, 2] = u3_n transf_matrix[:3, 3] = coil_loc[:] # the absolute value of the determinant indicates the scaling factor # the sign of the determinant indicates how it affects the orientation: if positive maintain the # original orientation and if negative inverts all the orientations (flip the object inside-out)' # the negative determinant is what makes objects in VTK scene to become # black print('Transformation matrix: \n', transf_matrix, '\n') print('Determinant: ', np.linalg.det(transf_matrix)) if SAVE_ID: coord_dict = { 'm_affine': transf_matrix, 'coords_labels': hdr_mri, 'coords': coords_np } io.savemat(output_file + '.mat', coord_dict) hdr_names = ';'.join( ['m' + str(i) + str(j) for i in range(1, 5) for j in range(1, 5)]) np.savetxt(output_file + '.txt', transf_matrix.reshape([1, 16]), delimiter=';', header=hdr_names) if SHOW_BRAIN: # brain_actor = load_stl(brain_file, ren, colour=[0., 1., 1.], opacity=0.7, user_matrix=np.linalg.inv(affine)) brain_actor = load_stl(brain_file, ren, colour=[0., 1., 1.], opacity=1.) if SHOW_SKIN: # skin_actor = load_stl(skin_file, ren, opacity=0.5, user_matrix=np.linalg.inv(affine)) skin_actor = load_stl(skin_file, ren, colour="SkinColor", opacity=.4) if SHOW_FREESURFER: img = fsio.MGHImage.load(fs_t1) #print("MGH Header: ", img) #print("MGH data: ", img.header['Pxyz_c']) # skin_actor = load_stl(skin_file, ren, opacity=0.5, user_matrix=np.linalg.inv(affine)) trans_fs = np.identity(4) trans_fs[:3, -1] = img.header['Pxyz_c'] fs_actor = load_stl(fs_file, ren, colour=[1., 0., 1.], opacity=0.5, user_matrix=trans_fs) if SHOW_COIL: # reposition STL object prior to transformation matrix # [translation_x, translation_y, translation_z, rotation_x, rotation_y, rotation_z] # old translation when using Y as normal vector # repos = [0., -6., 0., 0., -90., 90.] # Translate coil loc coordinate to coil bottom # repos = [0., 0., 5.5, 0., 0., 180.] repos = [0., 0., 0., 0., 0., 180.] act_coil = load_stl(coil_file, ren, replace=repos, user_matrix=transf_matrix, opacity=.3) if SHOW_PLANE: act_plane = add_plane(ren, user_matrix=transf_matrix) # Add axes to scene origin if SHOW_AXES: add_line(ren, [0, 0, 0], [150, 0, 0], color=[1.0, 0.0, 0.0]) add_line(ren, [0, 0, 0], [0, 150, 0], color=[0.0, 1.0, 0.0]) add_line(ren, [0, 0, 0], [0, 0, 150], color=[0.0, 0.0, 1.0]) # Add axes to object origin if SHOW_COIL_AXES: add_line(ren, coil_loc, p2_norm, color=[.0, .0, 1.0]) add_line(ren, coil_loc, p2_dir, color=[.0, 1.0, .0]) add_line(ren, coil_loc, p2_face, color=[1.0, .0, .0]) # Add interactive axes to scene if SHOW_SCENE_AXES: axes = vtk.vtkAxesActor() widget = vtk.vtkOrientationMarkerWidget() widget.SetOutlineColor(0.9300, 0.5700, 0.1300) widget.SetOrientationMarker(axes) widget.SetInteractor(iren) # widget.SetViewport(0.0, 0.0, 0.4, 0.4) widget.SetEnabled(1) widget.InteractiveOn() if SCREENSHOT: # screenshot of VTK scene w2if = vtk.vtkWindowToImageFilter() w2if.SetInput(ren_win) w2if.Update() writer = vtk.vtkPNGWriter() writer.SetFileName("screenshot.png") writer.SetInput(w2if.GetOutput()) writer.Write() # Enable user interface interactor # ren_win.Render() ren.ResetCameraClippingRange() iren.Initialize() iren.Start()
def main(): SHOW_AXES = True SHOW_SCENE_AXES = True SHOW_COIL_AXES = True SHOW_SKIN = True SHOW_BRAIN = True SHOW_COIL = True SHOW_MARKERS = True TRANSF_COIL = True SHOW_PLANE = False SELECT_LANDMARKS = 'scalp' # 'all', 'mri' 'scalp' SAVE_ID = True AFFINE_IMG = True NO_SCALE = True SCREENSHOT = False SHOW_OTHER = False reorder = [0, 2, 1] flipx = [True, False, False] # reorder = [0, 1, 2] # flipx = [False, False, False] # default folder and subject # for Bert image use the translation in the base_affine (fall-back) subj_list = ['VictorSouza', 'JaakkoNieminen', 'AinoTervo', 'JuusoKorhonen', 'BaranAydogan', 'AR', 'Bert'] subj = 0 data_dir = os.environ.get('OneDrive') + r'\vh\eventos\sf 2019\mri_science_factory\{}'.format(subj_list[subj]) # filenames img_file = data_dir + r'\{}.nii'.format(subj_list[subj]) brain_file = data_dir + r'\gm.stl' skin_file = data_dir + r'\gm_sn.stl' if subj == 3: other_file = data_dir + r'\gm.ply' elif subj == 4: other_file = data_dir + r'\tracks.vtp' elif subj == 6: other_file = data_dir + r'\gm.ply' else: other_file = data_dir + r'\gm.stl' # coords = lc.load_nexstim(coord_file) # red, green, blue, maroon (dark red), # olive (shitty green), teal (petrol blue), yellow, orange col = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [1., .0, 1.], [.5, .5, 0.], [0., .5, .5], [1., 1., 0.], [1., .4, .0]] # extract image header shape and affine transformation from original nifti file imagedata = nb.squeeze_image(nb.load(img_file)) imagedata = nb.as_closest_canonical(imagedata) imagedata.update_header() pix_dim = imagedata.header.get_zooms() img_shape = imagedata.header.get_data_shape() print("Pixel size: \n") print(pix_dim) print("\nImage shape: \n") print(img_shape) print("\nSform: \n") print(imagedata.get_qform(coded=True)) print("\nQform: \n") print(imagedata.get_sform(coded=True)) print("\nFall-back: \n") print(imagedata.header.get_base_affine()) scale_back, shear_back, angs_back, trans_back, persp_back = tf.decompose_matrix(imagedata.header.get_base_affine()) if AFFINE_IMG: affine = imagedata.affine # affine = imagedata.header.get_base_affine() if NO_SCALE: scale, shear, angs, trans, persp = tf.decompose_matrix(affine) affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp) else: affine = np.identity(4) # affine_I = np.identity(4) # create a camera, render window and renderer camera = vtk.vtkCamera() camera.SetPosition(0, 1000, 0) camera.SetFocalPoint(0, 0, 0) camera.SetViewUp(0, 0, 1) camera.ComputeViewPlaneNormal() camera.Azimuth(90.0) camera.Elevation(10.0) ren = vtk.vtkRenderer() ren.SetActiveCamera(camera) ren.ResetCamera() ren.SetUseDepthPeeling(1) ren.SetOcclusionRatio(0.1) ren.SetMaximumNumberOfPeels(100) camera.Dolly(1.5) ren_win = vtk.vtkRenderWindow() ren_win.AddRenderer(ren) ren_win.SetSize(800, 800) ren_win.SetMultiSamples(0) ren_win.SetAlphaBitPlanes(1) # create a renderwindowinteractor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(ren_win) # if SELECT_LANDMARKS == 'mri': # # MRI landmarks # coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] # pts_ref = [1, 2, 3, 7, 10] # elif SELECT_LANDMARKS == 'all': # # all coords # coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Nose/Nasion'], ['Left ear'], ['Right ear'], # ['Coil Loc'], ['EF max']] # pts_ref = [1, 2, 3, 5, 4, 6, 7, 10] # elif SELECT_LANDMARKS == 'scalp': # # scalp landmarks # coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] # hdr_mri = ['Nose/Nasion', 'Left ear', 'Right ear', 'Coil Loc', 'EF max'] # pts_ref = [5, 4, 6, 7, 10] # # coords_np = np.zeros([len(pts_ref), 3]) # for n, pts_id in enumerate(pts_ref): # # to keep in the MRI space use the identity as the affine # # coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine_I, flipx, reorder) # # affine_trans = affine_I.copy() # # affine_trans = affine.copy() # # affine_trans[:3, -1] = affine[:3, -1] # coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine, flipx, reorder) # coords_np[n, :] = coord_aux # [coord_mri[n].append(s) for s in coord_aux] # if SHOW_MARKERS: # marker_actor = add_marker(coord_aux, ren, col[n]) # # print('\nOriginal coordinates from Nexstim: \n') # [print(s) for s in coords] # print('\nTransformed coordinates to MRI space: \n') # [print(s) for s in coord_mri] # # # coil location, normal vector and direction vector # coil_loc = coord_mri[-2][1:] # coil_norm = coords[8][1:] # coil_dir = coords[9][1:] # # # creating the coil coordinate system by adding a point in the direction of each given coil vector # # the additional vector is just the cross product from coil direction and coil normal vectors # # origin of the coordinate system is the coil location given by Nexstim # # the vec_length is to allow line creation with visible length in VTK scene # vec_length = 75 # p1 = coords[7][1:] # p2 = [x + vec_length * y for x, y in zip(p1, coil_norm)] # p2_norm = n2m.coord_change(p2, img_shape, affine, flipx, reorder) # # p2 = [x + vec_length * y for x, y in zip(p1, coil_dir)] # p2_dir = n2m.coord_change(p2, img_shape, affine, flipx, reorder) # # coil_face = np.cross(coil_norm, coil_dir) # p2 = [x - vec_length * y for x, y in zip(p1, coil_face.tolist())] # p2_face = n2m.coord_change(p2, img_shape, affine, flipx, reorder) # Coil face unit vector (X) # u1 = np.asarray(p2_face) - np.asarray(coil_loc) # u1_n = u1 / np.linalg.norm(u1) # # Coil direction unit vector (Y) # u2 = np.asarray(p2_dir) - np.asarray(coil_loc) # u2_n = u2 / np.linalg.norm(u2) # # Coil normal unit vector (Z) # u3 = np.asarray(p2_norm) - np.asarray(coil_loc) # u3_n = u3 / np.linalg.norm(u3) # # transf_matrix = np.identity(4) # if TRANSF_COIL: # transf_matrix[:3, 0] = u1_n # transf_matrix[:3, 1] = u2_n # transf_matrix[:3, 2] = u3_n # transf_matrix[:3, 3] = coil_loc[:] # the absolute value of the determinant indicates the scaling factor # the sign of the determinant indicates how it affects the orientation: if positive maintain the # original orientation and if negative inverts all the orientations (flip the object inside-out)' # the negative determinant is what makes objects in VTK scene to become black # print('Transformation matrix: \n', transf_matrix, '\n') # print('Determinant: ', np.linalg.det(transf_matrix)) # if SAVE_ID: # coord_dict = {'m_affine': transf_matrix, 'coords_labels': hdr_mri, 'coords': coords_np} # io.savemat(output_file + '.mat', coord_dict) # hdr_names = ';'.join(['m' + str(i) + str(j) for i in range(1, 5) for j in range(1, 5)]) # np.savetxt(output_file + '.txt', transf_matrix.reshape([1, 16]), delimiter=';', header=hdr_names) if SHOW_BRAIN: # brain_actor = load_stl(brain_file, ren, colour=[0., 1., 1.], opacity=0.7, user_matrix=np.linalg.inv(affine)) affine_orig = np.identity(4) # affine_orig = affine.copy() # affine_orig[0, 3] = affine_orig[0, 3] + pix_dim[0]*img_shape[0] # affine_orig[1, 3] = affine_orig[1, 3] + pix_dim[1]*img_shape[1] # affine_orig[0, 3] = affine_orig[0, 3] + pix_dim[0]*img_shape[0] # affine_orig[0, 3] = affine_orig[0, 3] - 5 # this partially works for DTI Baran # modified close to correct [-75.99139404 123.88291931 - 148.19839478] # fall-back [87.50042766 - 127.5 - 127.5] # affine_orig[0, 3] = -trans_back[0] # affine_orig[1, 3] = -trans_back[1] # this works for the bert image # affine_orig[0, 3] = -127 # affine_orig[1, 3] = 127 # affine_orig[2, 3] = -127 # affine_orig[:3, :3] = affine[:3, :3] # affine_orig[1, 3] = -affine_orig[1, 3]+27.5 # victorsouza # affine_orig[1, 3] = -affine_orig[1, 3]+97.5 # affine_orig[1, 3] = -affine_orig[1, 3] print('Affine original: \n', affine) scale, shear, angs, trans, persp = tf.decompose_matrix(affine) print('Angles: \n', np.rad2deg(angs)) print('Translation: \n', trans) print('Affine modified: \n', affine_orig) scale, shear, angs, trans, persp = tf.decompose_matrix(affine_orig) print('Angles: \n', np.rad2deg(angs)) print('Translation: \n', trans) # colour=[0., 1., 1.], brain_actor, brain_mesh = load_stl(brain_file, ren, replace=True, colour=[1., 0., 0.], opacity=.3, user_matrix=affine_orig) # print('Actor origin: \n', brain_actor.GetPosition()) if SHOW_SKIN: # skin_actor = load_stl(skin_file, ren, opacity=0.5, user_matrix=np.linalg.inv(affine)) # affine[0, 3] = affine[0, 3] + pix_dim[0] * img_shape[0] # this is working # affine[0, 3] = affine[0, 3] + 8. affine[1, 3] = affine[1, 3] + pix_dim[1] * img_shape[1] # affine[2, 3] = affine[2, 3] + pix_dim[2] * img_shape[2] affine_inv = np.linalg.inv(affine) # affine_inv[:3, 3] = -affine[:3, 3] # affine_inv[2, 3] = -affine_inv[2, 3] skin_actor, skin_mesh = load_stl(skin_file, ren, colour="SkinColor", opacity=1., user_matrix=affine_inv) # skin_actor, skin_mesh = load_stl(skin_file, ren, colour="SkinColor", opacity=1.) skino_actor, skino_mesh = load_stl(skin_file, ren, colour=[1., 0., 0.], opacity=1.) if SHOW_OTHER: # skin_actor = load_stl(skin_file, ren, opacity=0.5, user_matrix=np.linalg.inv(affine)) affine[1, 3] = affine[1, 3] + pix_dim[1] * img_shape[1] affine_inv = np.linalg.inv(affine) # affine_inv[:3, 3] = -affine[:3, 3] affine_inv[1, 3] = affine_inv[1, 3] # other_actor, other_mesh = load_stl(other_file, ren, opacity=1., user_matrix=affine_inv) # other_actor, other_mesh = load_stl(other_file, ren, opacity=1.) # if SHOW_COIL: # # reposition STL object prior to transformation matrix # # [translation_x, translation_y, translation_z, rotation_x, rotation_y, rotation_z] # # old translation when using Y as normal vector # # repos = [0., -6., 0., 0., -90., 90.] # # Translate coil loc coordinate to coil bottom # # repos = [0., 0., 5.5, 0., 0., 180.] # repos = [0., 0., 0., 0., 0., 180.] # act_coil = load_stl(coil_file, ren, replace=repos, user_matrix=transf_matrix, opacity=.3) # # if SHOW_PLANE: # act_plane = add_plane(ren, user_matrix=transf_matrix) # Add axes to scene origin if SHOW_AXES: add_line(ren, [0, 0, 0], [150, 0, 0], color=[1.0, 0.0, 0.0]) add_line(ren, [0, 0, 0], [0, 150, 0], color=[0.0, 1.0, 0.0]) add_line(ren, [0, 0, 0], [0, 0, 150], color=[0.0, 0.0, 1.0]) # Add axes to object origin # if SHOW_COIL_AXES: # add_line(ren, coil_loc, p2_norm, color=[.0, .0, 1.0]) # add_line(ren, coil_loc, p2_dir, color=[.0, 1.0, .0]) # add_line(ren, coil_loc, p2_face, color=[1.0, .0, .0]) # Add interactive axes to scene if SHOW_SCENE_AXES: axes = vtk.vtkAxesActor() widget = vtk.vtkOrientationMarkerWidget() widget.SetOutlineColor(0.9300, 0.5700, 0.1300) widget.SetOrientationMarker(axes) widget.SetInteractor(iren) # widget.SetViewport(0.0, 0.0, 0.4, 0.4) widget.SetEnabled(1) widget.InteractiveOn() # # if SCREENSHOT: # # screenshot of VTK scene # w2if = vtk.vtkWindowToImageFilter() # w2if.SetInput(ren_win) # w2if.Update() # # writer = vtk.vtkPNGWriter() # writer.SetFileName("screenshot.png") # writer.SetInput(w2if.GetOutput()) # writer.Write() # Enable user interface interactor # ren_win.Render() ren.ResetCameraClippingRange() iren.Initialize() iren.Start()
def main(): SHOW_AXES = True AFFINE_IMG = True NO_SCALE = True n_tracts = 240 n_threads = 2 * psutil.cpu_count() data_dir = os.environ.get( 'OneDrive') + r'\data\dti_navigation\baran\pilot_20200131' data_dir = data_dir.encode('utf-8') # FOD_path = 'Baran_FOD.nii' # trk_path = os.path.join(data_dir, FOD_path) # data_dir = b'C:\Users\deoliv1\OneDrive\data\dti' stl_path = b'wm_orig_smooth_world.stl' brain_path = os.path.join(data_dir, stl_path) # data_dir = b'C:\Users\deoliv1\OneDrive\data\dti' stl_path = b'gm.stl' brain_inv_path = os.path.join(data_dir, stl_path) nii_path = b'Baran_FOD.nii' trk_path = os.path.join(data_dir, nii_path) nii_path = b'Baran_T1_inFODspace.nii' img_path = os.path.join(data_dir, nii_path) imagedata = nb.squeeze_image(nb.load(img_path.decode('utf-8'))) imagedata = nb.as_closest_canonical(imagedata) imagedata.update_header() pix_dim = imagedata.header.get_zooms() img_shape = imagedata.header.get_data_shape() # print(imagedata.header) print("pix_dim: {}, img_shape: {}".format(pix_dim, img_shape)) if AFFINE_IMG: affine = imagedata.affine if NO_SCALE: scale, shear, angs, trans, persp = tf.decompose_matrix( imagedata.affine) affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp) else: affine = np.identity(4) print("affine: {0}\n".format(affine)) # Create a rendering window and renderer ren = vtk.vtkRenderer() ren_win = vtk.vtkRenderWindow() ren_win.AddRenderer(ren) ren_win.SetSize(800, 800) # Create a renderwindowinteractor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(ren_win) start_time = time.time() tracker = Trekker.initialize(trk_path) tracker.seed_maxTrials(1) tracker.minFODamp(0.1) tracker.writeInterval(50) tracker.maxLength(200) tracker.minLength(20) tracker.maxSamplingPerStep(100) tracker.numberOfThreads(n_threads) duration = time.time() - start_time print("Initialize Trekker: {:.2f} ms".format(1e3 * duration)) repos = [0., 0., 0., 0., 0., 0.] brain_actor = load_stl(brain_inv_path, ren, opacity=.1, colour=[1.0, 1.0, 1.0], replace=repos, user_matrix=np.identity(4)) bds = brain_actor.GetBounds() print("Y length: {} --- Bounds: {}".format(bds[3] - bds[2], bds)) # repos = [0., 0., 0., 0., 0., 0.] # brain_actor_mri = load_stl(brain_path, ren, opacity=.1, colour=[0.0, 1.0, 0.0], replace=repos, user_matrix=np.linalg.inv(affine)) # bds = brain_actor_mri.GetBounds() # print("Y length: {} --- Bounds: {}".format(bds[3] - bds[2], bds)) repos = [0., 256., 0., 0., 0., 0.] # brain_inv_actor = load_stl(brain_inv_path, ren, colour="SkinColor", opacity=0.5, replace=repos, user_matrix=np.linalg.inv(affine)) brain_inv_actor = load_stl(brain_inv_path, ren, colour="SkinColor", opacity=.1, replace=repos) # Add axes to scene origin if SHOW_AXES: add_line(ren, [0, 0, 0], [150, 0, 0], color=[1.0, 0.0, 0.0]) add_line(ren, [0, 0, 0], [0, 150, 0], color=[0.0, 1.0, 0.0]) add_line(ren, [0, 0, 0], [0, 0, 150], color=[0.0, 0.0, 1.0]) # Show tracks repos_trk = [0., -256., 0., 0., 0., 0.] matrix_vtk = vtk.vtkMatrix4x4() trans = np.identity(4) trans[1, -1] = repos_trk[1] final_matrix = np.linalg.inv(affine) @ trans print("final_matrix: {}".format(final_matrix)) for row in range(0, 4): for col in range(0, 4): matrix_vtk.SetElement(row, col, final_matrix[row, col]) root = vtk.vtkMultiBlockDataSet() # for i in range(10): # seed = np.array([[-8.49, -8.39, 2.5]]) seed = np.array([[27.53, -77.37, 46.42]]) tracts_actor = dti.single_block(tracker, seed, n_tracts, root, matrix_vtk) # out_list = [] count_tracts = 0 start_time_all = time.time() for n in range(round(n_tracts / n_threads)): branch = dti.multi_block(tracker, seed, n_threads) count_tracts += branch.GetNumberOfBlocks() # start_time = time.time() # root = dti.tracts_root(out_list, root, n) root.SetBlock(n, branch) # duration = time.time() - start_time # print("Compute root {}: {:.2f} ms".format(n, 1e3*duration)) duration = time.time() - start_time_all print("Compute multi {}: {:.2f} ms".format(n, 1e3 * duration)) print("Number computed tracts {}".format(count_tracts)) print("Number computed branches {}".format(root.GetNumberOfBlocks())) start_time = time.time() tracts_actor = dti.compute_actor(root, matrix_vtk) duration = time.time() - start_time print("Compute actor: {:.2f} ms".format(1e3 * duration)) # Assign actor to the renderer ren.AddActor(brain_actor) ren.AddActor(brain_inv_actor) start_time = time.time() ren.AddActor(tracts_actor) duration = time.time() - start_time print("Add actor: {:.2f} ms".format(1e3 * duration)) # ren.AddActor(brain_actor_mri) # Enable user interface interactor iren.Initialize() ren_win.Render() iren.Start()
def _run_interface(self, runtime): is_sbref = isdefined(self.inputs.sbref_file) ref_input = self.inputs.sbref_file if is_sbref else self.inputs.in_file if self.inputs.multiecho: if len(ref_input) < 2: input_name = "sbref_file" if is_sbref else "in_file" raise ValueError("Argument 'multiecho' is True but " f"'{input_name}' has only one element.") else: # Select only the first echo (see LIMITATION above for SBRefs) ref_input = ref_input[:1] elif not is_sbref and len(ref_input) > 1: raise ValueError( "Input 'in_file' cannot point to more than one file " "for single-echo BOLD datasets.") # Build the nibabel spatial image we will work with ref_im = [] for im_i in ref_input: nib_i = nb.squeeze_image(nb.load(im_i)) if nib_i.dataobj.ndim == 3: ref_im.append(nib_i) elif nib_i.dataobj.ndim == 4: ref_im += nb.four_to_three(nib_i) ref_im = nb.squeeze_image(nb.concat_images(ref_im)) # Volumes to discard only makes sense with BOLD inputs. if not is_sbref: n_volumes_to_discard = _get_vols_to_discard(ref_im) out_ref_fname = os.path.join(runtime.cwd, "ref_bold.nii.gz") else: n_volumes_to_discard = 0 out_ref_fname = os.path.join(runtime.cwd, "ref_sbref.nii.gz") # Set interface outputs self._results["n_volumes_to_discard"] = n_volumes_to_discard self._results["ref_image"] = out_ref_fname # Slicing may induce inconsistencies with shape-dependent values in extensions. # For now, remove all. If this turns out to be a mistake, we can select extensions # that don't break pipeline stages. ref_im.header.extensions.clear() # If reference is only 1 volume, return it directly if ref_im.dataobj.ndim == 3: ref_im.to_filename(out_ref_fname) return runtime if n_volumes_to_discard == 0: if ref_im.shape[-1] > 40: ref_im = nb.Nifti1Image(ref_im.dataobj[:, :, :, 20:40], ref_im.affine, ref_im.header) ref_name = os.path.join(runtime.cwd, "slice.nii.gz") ref_im.to_filename(ref_name) if self.inputs.mc_method == "AFNI": res = afni.Volreg( in_file=ref_name, args="-Fourier -twopass", zpad=4, outputtype="NIFTI_GZ", ).run() elif self.inputs.mc_method == "FSL": res = fsl.MCFLIRT(in_file=ref_name, ref_vol=0, interpolation="sinc").run() mc_slice_nii = nb.load(res.outputs.out_file) median_image_data = np.median(mc_slice_nii.get_fdata(), axis=3) else: median_image_data = np.median( ref_im.dataobj[:, :, :, :n_volumes_to_discard], axis=3) nb.Nifti1Image(median_image_data, ref_im.affine, ref_im.header).to_filename(out_ref_fname) return runtime
def conform_data(in_file, out_file=None, out_size=(256, 256, 256), out_zooms=(1.0, 1.0, 1.0), order=3): """Conform the input dataset to the canonical orientation. Parameters ---------- in_file: str - Path Path to the input MRI volume to conform. out_file: str - Path, default=None Path to save the conformed volume. By default the volume is saved as /tmp/conformed.nii.gz out_size: tuple of size 3, optional, default=(256, 256, 256) The shape to conform the 3D volume to. out_zooms: tuple of size 3, optional, default=(1.0, 1.0, 1.0) Factors to normalize voxel size to. order: int, optional, default=3 Order of the spline interpolation. The order has to be in the range 0-5. Returns ------- str - Path The path to where the conformed volume is saved. """ if isinstance(in_file, (str, Path)): in_file = nb.load(in_file) # Drop axes with just 1 sample (typically, a 3D file stored as 4D) in_file = nb.squeeze_image(in_file) dtype = in_file.header.get_data_dtype() # Reorient to closest canonical in_file = nb.as_closest_canonical(in_file) data = np.asanyarray(in_file.dataobj) # Calculate the factors to normalize voxel size to out_zooms normed = np.array(out_zooms) / np.array(in_file.header.get_zooms()[:3]) # Calculate the new indexes, sampling at 1mm^3 with out_size sizes. # center_ijk = 0.5 * (np.array(in_file.shape) - 1) new_ijk = normed[:, np.newaxis] * np.array( np.meshgrid( np.arange(out_size[0]), np.arange(out_size[1]), np.arange(out_size[2]), indexing="ij", )).reshape((3, -1)) offset = 0.5 * (np.max(new_ijk, axis=1) - np.array(in_file.shape)) # Align the centers of the two sampling extents new_ijk -= offset[:, np.newaxis] # Resample data in the new grid resampled = map_coordinates( data, new_ijk, output=dtype, order=order, mode="constant", cval=0, prefilter=True, ).reshape(out_size) resampled[resampled < 0] = 0 # Create a new x-form affine, aligned with cardinal axes, 1mm3 and centered. newaffine = np.eye(4) newaffine[:3, 3] = -0.5 * (np.array(out_size) - 1) nii = nb.Nifti1Image(resampled, newaffine, None) if out_file is None: out_file = Path(mkdtemp()) / "conformed.nii.gz" out_file = Path(out_file).absolute() nii.to_filename(out_file) return out_file
def ReadAnalyze(filename): anlz = squeeze_image(AnalyzeImage.from_filename(filename)) return anlz
def ReadNifti(filename): nft = squeeze_image(Nifti1Image.from_filename(filename)) return nft
def main(): SHOW_AXES = True SHOW_SCENE_AXES = True SHOW_COIL_AXES = True SHOW_SKIN = True SHOW_BRAIN = True SHOW_COIL = True SHOW_MARKERS = True TRANSF_COIL = True SHOW_PLANE = False SELECT_LANDMARKS = 'scalp' # 'all', 'mri' 'scalp' SAVE_ID = False AFFINE_IMG = True NO_SCALE = True SCREENSHOT = False reorder = [0, 2, 1] flipx = [True, False, False] # reorder = [0, 1, 2] # flipx = [False, False, False] # default folder and subject # subj = 's03' subj = 'EEGTA04' id_extra = False # 8, 9, 10, 12, False # data_dir = os.environ['OneDriveConsumer'] + '\\data\\nexstim_coord\\' data_dir = r'P:\tms_eeg\mTMS\projects\2019 EEG-based target automatization\Analysis\EEG electrode transformation' # filenames # coil_file = data_dir + 'magstim_fig8_coil.stl' coil_file = os.environ[ 'OneDrive'] + '\\data\\nexstim_coord\\magstim_fig8_coil.stl' if id_extra: coord_file = data_dir + 'ppM1_eximia_%s_%d.txt' % (subj, id_extra) else: coord_file = nav_dir + 'ppM1_eximia_%s.txt' % subj # img_file = data_subj + subj + '.nii' img_file = data_dir + 'mri\\ppM1_%s\\ppM1_%s.nii' % (subj, subj) brain_file = simnibs_dir + "wm.stl" skin_file = simnibs_dir + "skin.stl" if id_extra: output_file = nav_dir + 'transf_mat_%s_%d' % (subj, id_extra) else: output_file = nav_dir + 'transf_mat_%s' % subj coords = lc.load_nexstim(coord_file) # red, green, blue, maroon (dark red), # olive (shitty green), teal (petrol blue), yellow, orange col = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [1., .0, 1.], [.5, .5, 0.], [0., .5, .5], [1., 1., 0.], [1., .4, .0]] # extract image header shape and affine transformation from original nifti file imagedata = nb.squeeze_image(nb.load(img_file)) imagedata = nb.as_closest_canonical(imagedata) imagedata.update_header() pix_dim = imagedata.header.get_zooms() img_shape = imagedata.header.get_data_shape() print("Pixel size: \n") print(pix_dim) print("\nImage shape: \n") print(img_shape) affine_aux = imagedata.affine.copy() if NO_SCALE: scale, shear, angs, trans, persp = tf.decompose_matrix( imagedata.affine) affine_aux = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp) if AFFINE_IMG: affine = affine_aux # if NO_SCALE: # scale, shear, angs, trans, persp = tf.decompose_matrix(imagedata.affine) # affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp) else: affine = np.identity(4) # affine_I = np.identity(4) # create a camera, render window and renderer camera = vtk.vtkCamera() camera.SetPosition(0, 1000, 0) camera.SetFocalPoint(0, 0, 0) camera.SetViewUp(0, 0, 1) camera.ComputeViewPlaneNormal() camera.Azimuth(90.0) camera.Elevation(10.0) ren = vtk.vtkRenderer() ren.SetActiveCamera(camera) ren.ResetCamera() camera.Dolly(1.5) ren_win = vtk.vtkRenderWindow() ren_win.AddRenderer(ren) ren_win.SetSize(800, 800) # create a renderwindowinteractor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(ren_win) if SELECT_LANDMARKS == 'mri': # MRI landmarks coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] pts_ref = [1, 2, 3, 7, 10] elif SELECT_LANDMARKS == 'all': # all coords coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] pts_ref = [1, 2, 3, 5, 4, 6, 7, 10] elif SELECT_LANDMARKS == 'scalp': # scalp landmarks coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] hdr_mri = [ 'Nose/Nasion', 'Left ear', 'Right ear', 'Coil Loc', 'EF max' ] pts_ref = [5, 4, 6, 7, 10] coords_np = np.zeros([len(pts_ref), 3]) for n, pts_id in enumerate(pts_ref): # to keep in the MRI space use the identity as the affine # coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine_I, flipx, reorder) # affine_trans = affine_I.copy() # affine_trans = affine.copy() # affine_trans[:3, -1] = affine[:3, -1] coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine, flipx, reorder) coords_np[n, :] = coord_aux [coord_mri[n].append(s) for s in coord_aux] if SHOW_MARKERS: marker_actor = add_marker(coord_aux, ren, col[n]) if id_extra: # compare coil locations in experiments with 8, 9, 10 and 12 mm shifts # MRI Nexstim space: 8, 9, 10, 12 mm coil locations # coord_others = [[122.2, 198.8, 99.7], # [121.1, 200.4, 100.1], # [120.5, 200.7, 98.2], # [117.7, 202.9, 96.6]] if AFFINE_IMG: # World space: 8, 9, 10, 12 mm coil locations coord_others = [ [-42.60270233154297, 28.266497802734378, 81.02450256347657], [-41.50270233154296, 28.66649780273437, 82.62450256347657], [-40.90270233154297, 26.766497802734378, 82.92450256347655], [-38.10270233154297, 25.16649780273437, 85.12450256347657] ] else: # MRI space reordered and flipped: 8, 9, 10, 12 mm coil locations coord_others = [[27.8, 99.7, 198.8], [28.9, 100.1, 200.4], [29.5, 98.2, 200.7], [32.3, 96.6, 202.9]] col_others = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [0., 0., 0.]] for n, c in enumerate(coord_others): marker_actor = add_marker(c, ren, col_others[n]) print('\nOriginal coordinates from Nexstim: \n') [print(s) for s in coords] print('\nMRI coordinates flipped and reordered: \n') [print(s) for s in coords_np] print('\nTransformed coordinates to MRI space: \n') [print(s) for s in coord_mri] # coil location, normal vector and direction vector coil_loc = coord_mri[-2][1:] coil_norm = coords[8][1:] coil_dir = coords[9][1:] # creating the coil coordinate system by adding a point in the direction of each given coil vector # the additional vector is just the cross product from coil direction and coil normal vectors # origin of the coordinate system is the coil location given by Nexstim # the vec_length is to allow line creation with visible length in VTK scene vec_length = 75 p1 = coords[7][1:] p2 = [x + vec_length * y for x, y in zip(p1, coil_norm)] p2_norm = n2m.coord_change(p2, img_shape, affine, flipx, reorder) p2 = [x + vec_length * y for x, y in zip(p1, coil_dir)] p2_dir = n2m.coord_change(p2, img_shape, affine, flipx, reorder) coil_face = np.cross(coil_norm, coil_dir) p2 = [x - vec_length * y for x, y in zip(p1, coil_face.tolist())] p2_face = n2m.coord_change(p2, img_shape, affine, flipx, reorder) # Coil face unit vector (X) u1 = np.asarray(p2_face) - np.asarray(coil_loc) u1_n = u1 / np.linalg.norm(u1) # Coil direction unit vector (Y) u2 = np.asarray(p2_dir) - np.asarray(coil_loc) u2_n = u2 / np.linalg.norm(u2) # Coil normal unit vector (Z) u3 = np.asarray(p2_norm) - np.asarray(coil_loc) u3_n = u3 / np.linalg.norm(u3) transf_matrix = np.identity(4) if TRANSF_COIL: transf_matrix[:3, 0] = u1_n transf_matrix[:3, 1] = u2_n transf_matrix[:3, 2] = u3_n transf_matrix[:3, 3] = coil_loc[:] # the absolute value of the determinant indicates the scaling factor # the sign of the determinant indicates how it affects the orientation: if positive maintain the # original orientation and if negative inverts all the orientations (flip the object inside-out)' # the negative determinant is what makes objects in VTK scene to become black print('Transformation matrix: \n', transf_matrix, '\n') print('Determinant: ', np.linalg.det(transf_matrix)) if SAVE_ID: coord_dict = { 'm_affine': transf_matrix, 'coords_labels': hdr_mri, 'coords': coords_np } io.savemat(output_file + '.mat', coord_dict) hdr_names = ';'.join( ['m' + str(i) + str(j) for i in range(1, 5) for j in range(1, 5)]) np.savetxt(output_file + '.txt', transf_matrix.reshape([1, 16]), delimiter=';', header=hdr_names) if SHOW_BRAIN: if AFFINE_IMG: brain_actor = load_stl(brain_file, ren, colour=[0., 1., 1.], opacity=1.) else: # to visualize brain in MRI space brain_actor = load_stl(brain_file, ren, colour=[0., 1., 1.], opacity=1., user_matrix=np.linalg.inv(affine_aux)) if SHOW_SKIN: if AFFINE_IMG: skin_actor = load_stl(skin_file, ren, colour="SkinColor", opacity=.4) else: # to visualize skin in MRI space skin_actor = load_stl(skin_file, ren, colour="SkinColor", opacity=.4, user_matrix=np.linalg.inv(affine_aux)) if SHOW_COIL: # reposition STL object prior to transformation matrix # [translation_x, translation_y, translation_z, rotation_x, rotation_y, rotation_z] # old translation when using Y as normal vector # repos = [0., -6., 0., 0., -90., 90.] # Translate coil loc coordinate to coil bottom # repos = [0., 0., 5.5, 0., 0., 180.] repos = [0., 0., 0., 0., 0., 180.] act_coil = load_stl(coil_file, ren, replace=repos, user_matrix=transf_matrix, opacity=.3) if SHOW_PLANE: act_plane = add_plane(ren, user_matrix=transf_matrix) # Add axes to scene origin if SHOW_AXES: add_line(ren, [0, 0, 0], [150, 0, 0], color=[1.0, 0.0, 0.0]) add_line(ren, [0, 0, 0], [0, 150, 0], color=[0.0, 1.0, 0.0]) add_line(ren, [0, 0, 0], [0, 0, 150], color=[0.0, 0.0, 1.0]) # Add axes to object origin if SHOW_COIL_AXES: add_line(ren, coil_loc, p2_norm, color=[.0, .0, 1.0]) add_line(ren, coil_loc, p2_dir, color=[.0, 1.0, .0]) add_line(ren, coil_loc, p2_face, color=[1.0, .0, .0]) # Add interactive axes to scene if SHOW_SCENE_AXES: axes = vtk.vtkAxesActor() widget = vtk.vtkOrientationMarkerWidget() widget.SetOutlineColor(0.9300, 0.5700, 0.1300) widget.SetOrientationMarker(axes) widget.SetInteractor(iren) # widget.SetViewport(0.0, 0.0, 0.4, 0.4) widget.SetEnabled(1) widget.InteractiveOn() if SCREENSHOT: # screenshot of VTK scene w2if = vtk.vtkWindowToImageFilter() w2if.SetInput(ren_win) w2if.Update() writer = vtk.vtkPNGWriter() writer.SetFileName("screenshot.png") writer.SetInput(w2if.GetOutput()) writer.Write() # Enable user interface interactor # ren_win.Render() ren.ResetCameraClippingRange() iren.Initialize() iren.Start()
def main(): SHOW_AXES = True AFFINE_IMG = True NO_SCALE = True n_tracts = 240 # n_tracts = 24 # n_threads = 2*psutil.cpu_count() img_shift = 256 # 255 data_dir = os.environ.get('OneDrive') + r'\data\dti_navigation\baran\anat_reg_improve_20200609' data_dir = data_dir.encode('utf-8') # FOD_path = 'Baran_FOD.nii' # trk_path = os.path.join(data_dir, FOD_path) # data_dir = b'C:\Users\deoliv1\OneDrive\data\dti' stl_path = b'wm_orig_smooth_world.stl' brain_path = os.path.join(data_dir, stl_path) # data_dir = b'C:\Users\deoliv1\OneDrive\data\dti' stl_path = b'wm_2.stl' brain_simnibs_path = os.path.join(data_dir, stl_path) stl_path = b'wm.stl' brain_inv_path = os.path.join(data_dir, stl_path) nii_path = b'Baran_FOD.nii' trk_path = os.path.join(data_dir, nii_path) nii_path = b'Baran_T1_inFODspace.nii' img_path = os.path.join(data_dir, nii_path) nii_path = b'Baran_trekkerACTlabels_inFODspace.nii' act_path = os.path.join(data_dir, nii_path) stl_path = b'magstim_fig8_coil.stl' coil_path = os.path.join(data_dir, stl_path) imagedata = nb.squeeze_image(nb.load(img_path.decode('utf-8'))) imagedata = nb.as_closest_canonical(imagedata) imagedata.update_header() pix_dim = imagedata.header.get_zooms() img_shape = imagedata.header.get_data_shape() act_data = nb.squeeze_image(nb.load(act_path.decode('utf-8'))) act_data = nb.as_closest_canonical(act_data) act_data.update_header() act_data_arr = act_data.get_fdata() # print(imagedata.header) print("pix_dim: {}, img_shape: {}".format(pix_dim, img_shape)) if AFFINE_IMG: affine = imagedata.affine if NO_SCALE: scale, shear, angs, trans, persp = tf.decompose_matrix(imagedata.affine) affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp) else: affine = np.identity(4) print("affine: {0}\n".format(affine)) # Create a rendering window and renderer ren = vtk.vtkRenderer() ren_win = vtk.vtkRenderWindow() ren_win.AddRenderer(ren) ren_win.SetSize(800, 800) # Create a renderwindowinteractor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(ren_win) minFODamp = np.arange(0.01, 0.11, 0.01) dataSupportExponent = np.arange(0.1, 1.1, 0.1) # COMBINATION 1 # tracker = minFODamp(0.01) # tracker = dataSupportExponent(0.1) # COMBINATION "n" # tracker = minFODamp(0.01 * n) # tracker = dataSupportExponent(0.1 * n) start_time = time.time() trekker_cfg = {'seed_max': 1, 'step_size': 0.1, 'min_fod': 0.1, 'probe_quality': 3, 'max_interval': 1, 'min_radius_curv': 0.8, 'probe_length': 0.4, 'write_interval': 50, 'numb_threads': '', 'max_lenth': 200, 'min_lenth': 20, 'max_sampling_step': 100} tracker = Trekker.initialize(trk_path) tracker, n_threads = dti.set_trekker_parameters(tracker, trekker_cfg) duration = time.time() - start_time print("Initialize Trekker: {:.2f} ms".format(1e3*duration)) repos = [0., -img_shift, 0., 0., 0., 0.] # brain_actor = load_stl(brain_inv_path, ren, opacity=1., colour=[1.0, 1.0, 1.0], replace=repos, user_matrix=np.identity(4)) # the one always been used brain_actor = load_stl(brain_simnibs_path, ren, opacity=1., colour=[1.0, 1.0, 1.0], replace=repos, user_matrix=np.linalg.inv(affine)) # bds = brain_actor.GetBounds() # print("Y length: {} --- Bounds: {}".format(bds[3] - bds[2], bds)) # invesalius surface # repos = [0., 0., 0., 0., 0., 0.] # brain_actor = load_stl(brain_inv_path, ren, opacity=.5, colour=[1.0, .5, .5], replace=repos, user_matrix=np.identity(4)) # repos = [0., 0., 0., 0., 0., 0.] # brain_actor_mri = load_stl(brain_path, ren, opacity=.1, colour=[0.0, 1.0, 0.0], replace=repos, user_matrix=np.linalg.inv(affine)) # bds = brain_actor_mri.GetBounds() # print("Y length: {} --- Bounds: {}".format(bds[3] - bds[2], bds)) # repos = [0., 256., 0., 0., 0., 0.] # brain_inv_actor = load_stl(brain_inv_path, ren, colour="SkinColor", opacity=0.5, replace=repos, user_matrix=np.linalg.inv(affine)) # brain_inv_actor = load_stl(brain_inv_path, ren, colour="SkinColor", opacity=.6, replace=repos) # bds = brain_inv_actor.GetBounds() # print("Reposed: Y length: {} --- Bounds: {}".format(bds[3] - bds[2], bds)) # Add axes to scene origin if SHOW_AXES: add_line(ren, [0, 0, 0], [150, 0, 0], color=[1.0, 0.0, 0.0]) add_line(ren, [0, 0, 0], [0, 150, 0], color=[0.0, 1.0, 0.0]) add_line(ren, [0, 0, 0], [0, 0, 150], color=[0.0, 0.0, 1.0]) # Show tracks repos_trk = [0., -img_shift, 0., 0., 0., 0.] # repos_trk = [0., 0., 0., 0., 0., 0.] matrix_vtk = vtk.vtkMatrix4x4() trans = np.identity(4) trans[1, -1] = repos_trk[1] final_matrix = np.linalg.inv(affine) @ trans print("final_matrix: {}".format(final_matrix)) for row in range(0, 4): for col in range(0, 4): matrix_vtk.SetElement(row, col, final_matrix[row, col]) root = vtk.vtkMultiBlockDataSet() # for i in range(10): # seed = np.array([[-8.49, -8.39, 2.5]]) # seed = np.array([[27.53, -77.37, 46.42]]) # from the invesalius exported fiducial markers you have to multiply the Y coordinate by -1 to # transform to the regular 3D invesalius space where coil location is saved fids_inv = np.array([[168.300, -126.600, 97.000], [9.000, -120.300, 93.700], [90.100, -33.500, 150.000]]) for n in range(3): fids_actor = add_marker(fids_inv[n, :], ren, [1., 0., 0.], radius=2) seed = np.array([[-25.66, -30.07, 54.91]]) coil_pos = [40.17, 152.28, 235.78, -18.22, -25.27, 64.99] m_coil = coil_transform_pos(coil_pos) repos = [0., 0., 0., 0., 0., 90.] coil_actor = load_stl(coil_path, ren, opacity=.6, replace=repos, colour=[1., 1., 1.], user_matrix=m_coil) # coil_actor = load_stl(coil_path, ren, opacity=.6, replace=repos, colour=[1., 1., 1.]) # create coil vectors vec_length = 75 print(m_coil.shape) p1 = m_coil[:-1, -1] print(p1) coil_dir = m_coil[:-1, 0] coil_face = m_coil[:-1, 1] p2_face = p1 + vec_length * coil_face p2_dir = p1 + vec_length * coil_dir coil_norm = np.cross(coil_dir, coil_face) p2_norm = p1 - vec_length * coil_norm add_line(ren, p1, p2_dir, color=[1.0, .0, .0]) add_line(ren, p1, p2_face, color=[.0, 1.0, .0]) add_line(ren, p1, p2_norm, color=[.0, .0, 1.0]) colours = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [1., .0, 1.], [.5, .5, 0.], [0., .5, .5], [1., 1., 0.], [1., .4, .0]] marker_actor = add_marker(p1, ren, colours[0], radius=1) # p1_change = n2m.coord_change(p1) p1_change = p1.copy() p1_change[1] = -p1_change[1] # p1_change[1] += img_shift marker_actor2 = add_marker(p1_change, ren, colours[1], radius=1) offset = 40 coil_norm = coil_norm/np.linalg.norm(coil_norm) coord_offset_nav = p1 - offset * coil_norm marker_actor_seed_nav = add_marker(coord_offset_nav, ren, colours[3], radius=1) coord_offset_mri = coord_offset_nav.copy() coord_offset_mri[1] += img_shift marker_actor_seed_nav = add_marker(coord_offset_mri, ren, colours[3], radius=1) coord_mri_label = [int(s) for s in coord_offset_mri] print("offset MRI: {}, and label: {}".format(coord_mri_label, act_data_arr[tuple(coord_mri_label)])) offset_list = 10 + np.arange(0, 31, 3) coord_offset_list = p1 - np.outer(offset_list, coil_norm) coord_offset_list += [0, img_shift, 0] coord_offset_list = coord_offset_list.astype(int).tolist() # for pt in coord_offset_list: # print(pt) # if act_data_arr[tuple(pt)] == 2: # cl = colours[5] # else: # cl = colours[4] # _ = add_marker(pt, ren, cl) x = np.arange(-4, 5, 2) y = np.arange(-4, 5, 2) z = 10 + np.arange(0, 31, 3) xv, yv, zv = np.meshgrid(x, y, - z) coord_grid = np.array([xv, yv, zv]) start_time = time.time() for p in range(coord_grid.shape[1]): for n in range(coord_grid.shape[2]): for m in range(coord_grid.shape[3]): pt = coord_grid[:, p, n, m] pt = np.append(pt, 1) pt_tr = m_coil @ pt[:, np.newaxis] pt_tr = np.squeeze(pt_tr[:3]).astype(int) + [0, img_shift, 0] pt_tr = tuple(pt_tr.tolist()) if act_data_arr[pt_tr] == 2: cl = colours[6] elif act_data_arr[pt_tr] == 1: cl = colours[7] else: cl = [1., 1., 1.] # print(act_data_arr[pt_tr]) _ = add_marker(pt_tr, ren, cl, radius=1) duration = time.time() - start_time print("Compute coil grid: {:.2f} ms".format(1e3*duration)) start_time = time.time() # create grid of points grid_number = x.shape[0]*y.shape[0]*z.shape[0] coord_grid = coord_grid.reshape([3, grid_number]).T # sort grid from distance to the origin/coil center coord_list = coord_grid[np.argsort(np.linalg.norm(coord_grid, axis=1)), :] # make the coordinates homogeneous coord_list_w = np.append(coord_list.T, np.ones([1, grid_number]), axis=0) # apply the coil transformation matrix coord_list_w_tr = m_coil @ coord_list_w # convert to int so coordinates can be used as indices in the MRI image space coord_list_w_tr = coord_list_w_tr[:3, :].T.astype(int) + np.array([[0, img_shift, 0]]) # extract the first occurrence of a specific label from the MRI image labs = act_data_arr[coord_list_w_tr[..., 0], coord_list_w_tr[..., 1], coord_list_w_tr[..., 2]] lab_first = np.argmax(labs == 1) if labs[lab_first] == 1: pt_found = coord_list_w_tr[lab_first, :] _ = add_marker(pt_found, ren, [0., 0., 1.], radius=1) # convert coordinate back to invesalius 3D space pt_found_inv = pt_found - np.array([0., img_shift, 0.]) # convert to world coordinate space to use as seed for fiber tracking pt_found_tr = np.append(pt_found, 1)[np.newaxis, :].T pt_found_tr = affine @ pt_found_tr pt_found_tr = pt_found_tr[:3, 0, np.newaxis].T duration = time.time() - start_time print("Compute coil grid fast: {:.2f} ms".format(1e3*duration)) # create tracts count_tracts = 0 start_time_all = time.time() # uncertain_params = list(zip(dataSupportExponent, minFODamp)) for n in range(0, round(n_tracts/n_threads)): # branch = dti.multi_block(tracker, seed, n_threads) # branch = dti.multi_block(tracker, pt_found_tr, n_threads) # rescale n so that there is no 0 opacity tracts n_param = (n % 10) + 1 branch = dti.multi_block_uncertainty(tracker, pt_found_tr, n_threads, n_param) count_tracts += branch.GetNumberOfBlocks() # start_time = time.time() # root = dti.tracts_root(out_list, root, n) root.SetBlock(n, branch) # duration = time.time() - start_time # print("Compute root {}: {:.2f} ms".format(n, 1e3*duration)) duration = time.time() - start_time_all print("Compute multi {}: {:.2f} ms".format(n, 1e3*duration)) print("Number computed tracts {}".format(count_tracts)) print("Number computed branches {}".format(root.GetNumberOfBlocks())) start_time = time.time() tracts_actor = dti.compute_actor(root, matrix_vtk) duration = time.time() - start_time print("Compute actor: {:.2f} ms".format(1e3*duration)) # Assign actor to the renderer # ren.AddActor(brain_actor) # ren.AddActor(brain_inv_actor) # ren.AddActor(coil_actor) start_time = time.time() ren.AddActor(tracts_actor) duration = time.time() - start_time print("Add actor: {:.2f} ms".format(1e3*duration)) # ren.AddActor(brain_actor_mri) planex, planey, planez = raw_image(act_path, ren) planex.SetInteractor(iren) planex.On() planey.SetInteractor(iren) planey.On() planez.SetInteractor(iren) planez.On() _ = add_marker(np.squeeze(seed).tolist(), ren, [0., 1., 0.], radius=1) _ = add_marker(np.squeeze(pt_found_tr).tolist(), ren, [1., 0., 0.], radius=1) _ = add_marker(pt_found_inv, ren, [1., 1., 0.], radius=1) # Enable user interface interactor iren.Initialize() ren_win.Render() iren.Start()
def _run_interface(self, runtime): img = nb.load(self.inputs.in_file) # If reference is 3D, return it directly if img.dataobj.ndim == 3: self._results["out_file"] = self.inputs.in_file self._results["out_volumes"] = self.inputs.in_file self._results["out_drift"] = [1.0] return runtime fname = partial(fname_presuffix, self.inputs.in_file, newpath=runtime.cwd) # Slicing may induce inconsistencies with shape-dependent values in extensions. # For now, remove all. If this turns out to be a mistake, we can select extensions # that don't break pipeline stages. img.header.extensions.clear() img = nb.squeeze_image(img) # If reference was 4D, but single-volume - write out squeezed and return. if img.dataobj.ndim == 3: self._results["out_file"] = fname(suffix="_squeezed") img.to_filename(self._results["out_file"]) self._results["out_volumes"] = self.inputs.in_file self._results["out_drift"] = [1.0] return runtime img_len = img.shape[3] t_mask = (self.inputs.t_mask if isdefined(self.inputs.t_mask) else [True] * img_len) if len(t_mask) != img_len: raise ValueError( f"Image length ({img_len} timepoints) unmatched by mask ({len(t_mask)})" ) n_volumes = sum(t_mask) if n_volumes < 1: raise ValueError( "At least one volume should be selected for slicing") self._results["out_file"] = fname(suffix="_average") self._results["out_volumes"] = fname(suffix="_sliced") sliced = nb.concat_images( i for i, t in zip(nb.four_to_three(img), t_mask) if t) data = sliced.get_fdata(dtype="float32") # Data can come with outliers showing very high numbers - preemptively prune data = np.clip( data, a_min=0.0 if self.inputs.nonnegative else np.percentile(data, 0.2), a_max=np.percentile(data, 99.8), ) gs_drift = np.mean(data, axis=(0, 1, 2)) gs_drift /= gs_drift.max() self._results["out_drift"] = [float(i) for i in gs_drift] data /= gs_drift data = np.clip( data, a_min=0.0 if self.inputs.nonnegative else data.min(), a_max=data.max(), ) sliced.__class__(data, sliced.affine, sliced.header).to_filename( self._results["out_volumes"]) if n_volumes == 1: nb.squeeze_image(sliced).to_filename(self._results["out_file"]) self._results["out_drift"] = [1.0] return runtime if self.inputs.mc_method == "AFNI": from nipype.interfaces.afni import Volreg res = Volreg( in_file=self._results["out_volumes"], args="-Fourier -twopass", zpad=4, outputtype="NIFTI_GZ", ).run() # self._results["out_hmc"] = res.outputs.oned_matrix_save elif self.inputs.mc_method == "FSL": from nipype.interfaces.fsl import MCFLIRT res = MCFLIRT( in_file=self._results["out_volumes"], ref_vol=0, interpolation="sinc", ).run() self._results["out_hmc"] = res.outputs.mat_file if self.inputs.mc_method: data = nb.load(res.outputs.out_file).get_fdata(dtype="float32") data = np.clip( data, a_min=0.0 if self.inputs.nonnegative else data.min(), a_max=data.max(), ) sliced.__class__(np.median(data, axis=3), sliced.affine, sliced.header).to_filename(self._results["out_file"]) return runtime
def main(): SHOW_AXES = True AFFINE_IMG = True NO_SCALE = True data_dir = b'C:\Users\deoliv1\OneDrive - Aalto University\data\dti_navigation\juuso' stl_path = b'wm_orig_smooth_world.stl' brain_path = os.path.join(data_dir, stl_path) stl_path = b'wm.stl' brain_inv_path = os.path.join(data_dir, stl_path) nii_path = b'sub-P0_dwi_FOD.nii' trk_path = os.path.join(data_dir, nii_path) nii_path = b'sub-P0_T1w_biascorrected.nii' img_path = os.path.join(data_dir, nii_path) imagedata = nb.squeeze_image(nb.load(img_path.decode('utf-8'))) imagedata = nb.as_closest_canonical(imagedata) imagedata.update_header() pix_dim = imagedata.header.get_zooms() img_shape = imagedata.header.get_data_shape() # print(imagedata.header) print("pix_dim: {}, img_shape: {}".format(pix_dim, img_shape)) if AFFINE_IMG: affine = imagedata.affine if NO_SCALE: scale, shear, angs, trans, persp = tf.decompose_matrix(imagedata.affine) affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp) else: affine = np.identity(4) print("affine: {0}\n".format(affine)) # Create a rendering window and renderer ren = vtk.vtkRenderer() ren_win = vtk.vtkRenderWindow() ren_win.AddRenderer(ren) ren_win.SetSize(800, 800) # Create a renderwindowinteractor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(ren_win) tracker = Trekker.initialize(trk_path) tracker.seed_maxTrials(1) tracker.minFODamp(0.1) tracker.writeInterval(50) tracker.maxLength(200) tracker.minLength(20) tracker.maxSamplingPerStep(100) repos = [0., 0., 0., 0., 0., 0.] brain_actor = load_stl(brain_inv_path, ren, opacity=.1, colour=[1.0, 1.0, 1.0], replace=repos, user_matrix=np.identity(4)) bds = brain_actor.GetBounds() print("Y length: {} --- Bounds: {}".format(bds[3] - bds[2], bds)) repos = [0., 0., 0., 0., 0., 0.] brain_actor_mri = load_stl(brain_path, ren, opacity=.1, colour=[0.0, 1.0, 0.0], replace=repos, user_matrix=np.linalg.inv(affine)) bds = brain_actor_mri.GetBounds() print("Y length: {} --- Bounds: {}".format(bds[3] - bds[2], bds)) repos = [0., 256., 0., 0., 0., 0.] # brain_inv_actor = load_stl(brain_inv_path, ren, colour="SkinColor", opacity=0.5, replace=repos, user_matrix=np.linalg.inv(affine)) brain_inv_actor = load_stl(brain_inv_path, ren, colour="SkinColor", opacity=.1, replace=repos) # Add axes to scene origin if SHOW_AXES: add_line(ren, [0, 0, 0], [150, 0, 0], color=[1.0, 0.0, 0.0]) add_line(ren, [0, 0, 0], [0, 150, 0], color=[0.0, 1.0, 0.0]) add_line(ren, [0, 0, 0], [0, 0, 150], color=[0.0, 0.0, 1.0]) # Show tracks repos_trk = [0., -256., 0., 0., 0., 0.] matrix_vtk = vtk.vtkMatrix4x4() trans = np.identity(4) trans[1, -1] = repos_trk[1] final_matrix = np.linalg.inv(affine) @ trans print("final_matrix: {}".format(final_matrix)) for row in range(0, 4): for col in range(0, 4): matrix_vtk.SetElement(row, col, final_matrix[row, col]) for i in range(10): seed = np.array([[-8.49, -8.39, 2.5]]) visualizeTracks(ren, ren_win, tracker, seed, user_matrix=matrix_vtk) # Assign actor to the renderer ren.AddActor(brain_actor) ren.AddActor(brain_inv_actor) ren.AddActor(brain_actor_mri) # Enable user interface interactor iren.Initialize() ren_win.Render() iren.Start()
def _advanced_clip( in_file, p_min=35, p_max=99.98, nonnegative=True, dtype="int16", invert=False, newpath=None, ): """ Remove outliers at both ends of the intensity distribution and fit into a given dtype. This interface tries to emulate ANTs workflows' massaging that truncate images into the 0-255 range, and applies percentiles for clipping images. For image registration, normalizing the intensity into a compact range (e.g., uint8) is generally advised. To more robustly determine the clipping thresholds, data are removed of spikes with a median filter. Once the thresholds are calculated, the denoised data are thrown away and the thresholds are applied on the original image. """ from pathlib import Path import nibabel as nb import numpy as np from scipy import ndimage from skimage.morphology import ball out_file = (Path(newpath or "") / "clipped.nii.gz").absolute() # Load data img = nb.squeeze_image(nb.load(in_file)) if len(img.shape) != 3: raise RuntimeError(f"<{in_file}> is not a 3D file.") data = img.get_fdata(dtype="float32") # Calculate stats on denoised version, to preempt outliers from biasing denoised = ndimage.median_filter(data, footprint=ball(3)) a_min = np.percentile(denoised[denoised > 0] if nonnegative else denoised, p_min) a_max = np.percentile(denoised[denoised > 0] if nonnegative else denoised, p_max) # Clip and cast data = np.clip(data, a_min=a_min, a_max=a_max) data -= data.min() data /= data.max() if invert: data = 1.0 - data if dtype in ("uint8", "int16"): data = np.round(255 * data).astype(dtype) hdr = img.header.copy() hdr.set_data_dtype(dtype) img.__class__(data, img.affine, hdr).to_filename(out_file) return str(out_file)
import numpy as np import nibabel as nib filepath_image = "D:\\Programas\\Google Drive\\Lab\\Doutorado\\projetos\\DTI_coord_transf\\nexstim_coordinates\\OriginalImage\\5_mprage_mgh-variant.nii" #filepath_data = "D:\\Programas\\Google Drive\\Lab\\Doutorado\\projetos\\DTI_coord_transf\\nexstim_coordinates\\nexstim_coords.mks" filepath_data = "D:\\Programas\\Google Drive\\Lab\\Doutorado\\projetos\\DTI_coord_transf\\nexstim_coordinates\\nexstim_coords.mks" imagedata = nib.squeeze_image(nib.load(filepath_image)) imagedata = nib.as_closest_canonical(imagedata) imagedata.update_header() hdr = imagedata.header data = np.loadtxt(filepath_data) data_flip = data[:, 0:3] i = np.argsort([0, 2, 1]) data_flip = data[:, i] data_flip[:, 1] = hdr.get_data_shape()[1] - data[:, 1] data_flip[:, 0] = hdr.get_data_shape()[0] - data[:, 0] NBS2INV_markers = np.hstack((data_flip, data[:, 3:])) np.savetxt( "D:\\Programas\\Google Drive\\Lab\\Doutorado\\projetos\\DTI_coord_transf\\nexstim_coordinates\\NBS2INV_markers.mks", NBS2INV_markers)
def _run_interface(self, runtime): in_files = self.inputs.in_files if not isinstance(in_files, list): in_files = [self.inputs.in_files] if self.inputs.to_ras: in_files = [reorient(inf, newpath=runtime.cwd) for inf in in_files] run_hmc = self.inputs.hmc and len(in_files) > 1 nii_list = [] # Remove one-sized extra dimensions for i, f in enumerate(in_files): filenii = nb.load(f) filenii = nb.squeeze_image(filenii) if len(filenii.shape) == 5: raise RuntimeError("Input image (%s) is 5D." % f) if filenii.dataobj.ndim == 4: nii_list += nb.four_to_three(filenii) else: nii_list.append(filenii) if len(nii_list) > 1: filenii = nb.concat_images(nii_list) else: filenii = nii_list[0] merged_fname = fname_presuffix(self.inputs.in_files[0], suffix="_merged", newpath=runtime.cwd) filenii.to_filename(merged_fname) self._results["out_file"] = merged_fname self._results["out_avg"] = merged_fname if filenii.dataobj.ndim < 4: # TODO: generate identity out_mats and zero-filled out_movpar return runtime if run_hmc: mcflirt = fsl.MCFLIRT( cost="normcorr", save_mats=True, save_plots=True, ref_vol=0, in_file=merged_fname, ) mcres = mcflirt.run() filenii = nb.load(mcres.outputs.out_file) self._results["out_file"] = mcres.outputs.out_file self._results["out_mats"] = mcres.outputs.mat_file self._results["out_movpar"] = mcres.outputs.par_file hmcdata = filenii.get_fdata(dtype="float32") if self.inputs.grand_mean_scaling: if not isdefined(self.inputs.in_mask): mean = np.median(hmcdata, axis=-1) thres = np.percentile(mean, 25) mask = mean > thres else: mask = nb.load( self.inputs.in_mask).get_fdata(dtype="float32") > 0.5 nimgs = hmcdata.shape[-1] means = np.median(hmcdata[mask[..., np.newaxis]].reshape( (-1, nimgs)).T, axis=-1) max_mean = means.max() for i in range(nimgs): hmcdata[..., i] *= max_mean / means[i] hmcdata = hmcdata.mean(axis=3) if self.inputs.zero_based_avg: hmcdata -= hmcdata.min() self._results["out_avg"] = fname_presuffix(self.inputs.in_files[0], suffix="_avg", newpath=runtime.cwd) nb.Nifti1Image(hmcdata, filenii.affine, filenii.header).to_filename(self._results["out_avg"]) return runtime
def main(): SHOW_AXES = True AFFINE_IMG = True NO_SCALE = True COMPUTE_TRACTS = True n_tracts = 240 # n_tracts = 24 n_threads = 2 * psutil.cpu_count() img_shift = 0 # 255 data_dir = os.environ.get('OneDrive') + r'\data\dti_navigation\joonas' filenames = { 'T1': 'sub-S1_ses-S8741_T1w', 'FOD': 'FOD_T1_space', 'ACT': 'trekkerACTlabels', 'COIL': 'magstim_fig8_coil', 'HEAD': 'head_inv', 'BRAIN': 'brain_inv', 'BRAINSIM': 'gm', 'WM': 'skin' } img_path = os.path.join(data_dir, filenames['T1'] + '.nii') trk_path = os.path.join(data_dir, filenames['FOD'] + '.nii') act_path = os.path.join(data_dir, filenames['ACT'] + '.nii') coil_path = os.path.join(data_dir, filenames['COIL'] + '.stl') head_inv_path = os.path.join(data_dir, filenames['HEAD'] + '.stl') brain_inv_path = os.path.join(data_dir, filenames['BRAIN'] + '.stl') brain_sim_path = os.path.join(data_dir, filenames['BRAINSIM'] + '.stl') wm_sim_path = os.path.join(data_dir, filenames['WM'] + '.stl') imagedata = nb.squeeze_image(nb.load(img_path)) imagedata = nb.as_closest_canonical(imagedata) imagedata.update_header() pix_dim = imagedata.header.get_zooms() img_shape = imagedata.header.get_data_shape() act_data = nb.squeeze_image(nb.load(act_path)) act_data = nb.as_closest_canonical(act_data) act_data.update_header() act_data_arr = act_data.get_fdata() # print(imagedata.header) # print("pix_dim: {}, img_shape: {}".format(pix_dim, img_shape)) print("Pixel size: \n") print(pix_dim) print("\nImage shape: \n") print(img_shape) print("\nSform: \n") print(imagedata.get_qform(coded=True)) print("\nQform: \n") print(imagedata.get_sform(coded=True)) print("\nFall-back: \n") print(imagedata.header.get_base_affine()) if AFFINE_IMG: affine = imagedata.affine if NO_SCALE: scale, shear, angs, trans, persp = tf.decompose_matrix( imagedata.affine) affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp) else: affine = np.identity(4) print("affine: {0}\n".format(affine)) # Create a rendering window and renderer ren = vtk.vtkRenderer() ren.SetUseDepthPeeling(1) ren.SetOcclusionRatio(0.1) ren.SetMaximumNumberOfPeels(100) ren_win = vtk.vtkRenderWindow() ren_win.AddRenderer(ren) ren_win.SetSize(800, 800) ren_win.SetMultiSamples(0) ren_win.SetAlphaBitPlanes(1) # Create a renderwindowinteractor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(ren_win) repos = [0., 0., 0., 0., 0., 0.] # brain in invesalius space (STL as exported by invesalius) _ = load_stl(head_inv_path, ren, opacity=.7, colour=[1.0, 1.0, 1.0], replace=repos, user_matrix=np.identity(4)) _ = load_stl(wm_sim_path, ren, opacity=.7, colour=[1.0, 1.0, 1.0], replace=repos, user_matrix=np.identity(4)) # simnibs brain in RAS+ space # _ = load_stl(brain_sim_path, ren, opacity=1., colour=[1.0, 0., 0.], replace=repos, user_matrix=np.identity(4)) # brain in RAS+ space inv2ras = affine.copy() inv2ras[1, 3] += pix_dim[1] * img_shape[1] inv2ras[0, 3] -= 12 # _ = load_stl(brain_inv_path, ren, opacity=.6, colour="SkinColor", replace=repos, user_matrix=inv2ras) # brain in voxel space inv2voxel = np.identity(4) inv2voxel[1, 3] = inv2voxel[1, 3] + pix_dim[1] * img_shape[1] # _ = load_stl(brain_inv_path, ren, opacity=.6, colour=[0.482, 0.627, 0.698], replace=repos, user_matrix=inv2voxel) # simnibs brain in RAS+ space ras2inv = np.linalg.inv(affine.copy()) ras2inv[1, 3] -= pix_dim[1] * img_shape[1] _ = load_stl(wm_sim_path, ren, opacity=.7, colour=[0.482, 0.627, 0.698], replace=repos, user_matrix=ras2inv) repos_1 = [0., 0., 0., 0., 0., 180.] # _ = load_stl(wm_sim_path, ren, opacity=.7, colour=[1., 0., 0.], replace=repos_1, user_matrix=np.linalg.inv(affine)) # create fiducial markers # rowise the coordinates refer to: right ear, left ear, nasion # fids_inv = np.array([[168.300, 126.600, 97.000], # [9.000, 120.300, 93.700], # [90.100, 33.500, 150.000]]) fids_inv = np.array([[167.7, 120.9, 96.0], [8.2, 122.7, 91.0], [89.0, 18.6, 129.0]]) fids_inv_vtk = np.array([[167.7, 120.9, 96.0], [8.2, 122.7, 91.0], [89.0, 18.6, 129.0]]) # from the invesalius exported fiducial markers you have to multiply the Y coordinate by -1 to # transform to the regular 3D invesalius space where coil location is saved fids_inv_vtk[:, 1] *= -1 # the following code converts from the invesalius 3D space to the MRI scanner coordinate system fids_inv_vtk_w = fids_inv_vtk.copy() fids_inv_vtk_w = np.hstack((fids_inv_vtk_w, np.ones((3, 1)))) fids_scan = np.linalg.inv(ras2inv) @ fids_inv_vtk_w.T fids_vis = fids_scan.T[:3, :3] # --- fiducial markers seed = np.array([60.0, 147.0, 204.0]) seed_inv = np.array([60.0, -147.0, 204.0]) coil_pos = [43.00, 155.47, 225.22, -21.00, -37.45, 58.41] m_coil = coil_transform_pos(coil_pos) # show coil repos_coil = [0., 0., 0., 0., 0., 90.] # _ = load_stl(coil_path, ren, opacity=.6, replace=repos_coil, colour=[1., 1., 1.], user_matrix=m_coil) # create coil vectors vec_length = 75 p1 = m_coil[:-1, -1] coil_dir = m_coil[:-1, 0] coil_face = m_coil[:-1, 1] p2_face = p1 + vec_length * coil_face p2_dir = p1 + vec_length * coil_dir coil_norm = np.cross(coil_dir, coil_face) p2_norm = p1 - vec_length * coil_norm add_line(ren, p1, p2_dir, color=[1.0, .0, .0]) add_line(ren, p1, p2_face, color=[.0, 1.0, .0]) add_line(ren, p1, p2_norm, color=[.0, .0, 1.0]) # --- coil vectors p1_change = p1.copy() p1_change[1] = -p1_change[1] # offset = 40 # coil_norm = coil_norm/np.linalg.norm(coil_norm) # coord_offset_nav = p1 - offset * coil_norm # convert to world coordinate space to use as seed for fiber tracking seed_world = np.append(seed, 1)[np.newaxis, :].T seed_world = affine @ seed_world seed_world = seed_world[:3, 0, np.newaxis].T # convert to world coordinate space to use as seed for fiber tracking seed_world_true = np.append(seed_inv, 1)[np.newaxis, :].T seed_world_true = inv2ras @ seed_world_true seed_world_true = seed_world_true[:3, 0, np.newaxis].T # convert to voxel coordinate space seed_mri = np.append(seed_inv, 1)[np.newaxis, :].T seed_mri = inv2voxel @ seed_mri seed_mri = seed_mri[:3, 0, np.newaxis].T # 0: red, 1: green, 2: blue, 3: maroon (dark red), # 4: purple, 5: teal (petrol blue), 6: yellow, 7: orange colours = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [1., .0, 1.], [0.45, 0., 0.5], [0., .5, .5], [1., 1., 0.], [1., .4, .0]] # for n in range(3): # _ = add_marker(fids_inv[n, :], ren, colours[n], radius=2) for n in range(3): _ = add_marker(fids_inv_vtk[n, :], ren, colours[n], radius=2) for n in range(3): _ = add_marker(fids_vis[n, :], ren, colours[n], radius=2) _ = add_marker(p1, ren, colours[4], radius=2) _ = add_marker(seed_inv, ren, colours[5], radius=2) _ = add_marker(np.squeeze(seed_world), ren, colours[6], radius=2) _ = add_marker(np.squeeze(seed_world_true), ren, colours[3], radius=2) _ = add_marker(seed, ren, colours[7], radius=2) _ = add_marker(np.squeeze(seed_mri), ren, colours[1], radius=2) # create tracts if COMPUTE_TRACTS: # Show tracks repos_trk = [0., -(pix_dim[1] * img_shape[1]), 0., 0., 0., 0.] matrix_vtk = vtk.vtkMatrix4x4() trans = np.identity(4) trans[1, -1] = repos_trk[1] final_matrix = np.linalg.inv(affine) @ trans print("final_matrix: {}".format(final_matrix)) for row in range(0, 4): for col in range(0, 4): matrix_vtk.SetElement(row, col, final_matrix[row, col]) root = vtk.vtkMultiBlockDataSet() start_time = time.time() tracker = Trekker.initialize(bytes(trk_path, 'utf-8')) tracker.seed_maxTrials(1) tracker.minFODamp(0.1) tracker.writeInterval(50) tracker.maxLength(200) tracker.minLength(20) tracker.maxSamplingPerStep(100) tracker.numberOfThreads(n_threads) duration = time.time() - start_time print("Initialize Trekker: {:.2f} ms".format(1e3 * duration)) count_tracts = 0 start_time_all = time.time() for n in range(round(n_tracts / n_threads)): # branch = dti.multi_block(tracker, seed, n_threads) branch = dti.multi_block(tracker, seed_world_true, n_threads) count_tracts += branch.GetNumberOfBlocks() # start_time = time.time() # root = dti.tracts_root(out_list, root, n) root.SetBlock(n, branch) # duration = time.time() - start_time # print("Compute root {}: {:.2f} ms".format(n, 1e3*duration)) duration = time.time() - start_time_all print("Compute multi {}: {:.2f} ms".format(n, 1e3 * duration)) print("Number computed tracts {}".format(count_tracts)) print("Number computed branches {}".format(root.GetNumberOfBlocks())) start_time = time.time() tracts_actor = dti.compute_actor(root, matrix_vtk) duration = time.time() - start_time print("Compute actor: {:.2f} ms".format(1e3 * duration)) ren.AddActor(tracts_actor) # Add axes to scene origin if SHOW_AXES: add_line(ren, [0, 0, 0], [150, 0, 0], color=[1.0, 0.0, 0.0]) add_line(ren, [0, 0, 0], [0, 150, 0], color=[0.0, 1.0, 0.0]) add_line(ren, [0, 0, 0], [0, 0, 150], color=[0.0, 0.0, 1.0]) # Enable user interface interactor iren.Initialize() ren_win.Render() iren.Start()