def nii_save(volume,path,filename): output = nib.Nifti1Image(volume,np.eye(4)) nib.save(output,os.path.join(path,filename))
def _run_interface(self, runtime): from scipy import ndimage as sim fmap_nii = nb.load(self.inputs.in_file) data = np.squeeze(fmap_nii.get_fdata(dtype='float32')) # Despike / denoise (no-mask) if self.inputs.despike: data = _despike2d(data, self.inputs.despike_threshold) mask = None if isdefined(self.inputs.in_mask): masknii = nb.load(self.inputs.in_mask) mask = np.asanyarray(masknii.dataobj).astype('uint8') # Dilate mask if self.inputs.mask_erode > 0: struc = sim.iterate_structure(sim.generate_binary_structure(3, 2), 1) mask = sim.binary_erosion( mask, struc, iterations=self.inputs.mask_erode ).astype(np.uint8) # pylint: disable=no-member self._results['out_file'] = fname_presuffix( self.inputs.in_file, suffix='_enh', newpath=runtime.cwd) datanii = nb.Nifti1Image(data, fmap_nii.affine, fmap_nii.header) if self.inputs.unwrap: data = _unwrap(data, self.inputs.in_magnitude, mask) self._results['out_unwrapped'] = fname_presuffix( self.inputs.in_file, suffix='_unwrap', newpath=runtime.cwd) nb.Nifti1Image(data, fmap_nii.affine, fmap_nii.header).to_filename( self._results['out_unwrapped']) if not self.inputs.bspline_smooth: datanii.to_filename(self._results['out_file']) return runtime else: from ..utils import bspline as fbsp from statsmodels.robust.scale import mad # Fit BSplines (coarse) bspobj = fbsp.BSplineFieldmap(datanii, weights=mask, njobs=self.inputs.num_threads) bspobj.fit() smoothed1 = bspobj.get_smoothed() # Manipulate the difference map diffmap = data - smoothed1.get_fdata(dtype='float32') sderror = mad(diffmap[mask > 0]) LOGGER.info('SD of error after B-Spline fitting is %f', sderror) errormask = np.zeros_like(diffmap) errormask[np.abs(diffmap) > (10 * sderror)] = 1 errormask *= mask nslices = 0 try: errorslice = np.squeeze(np.argwhere(errormask.sum(0).sum(0) > 0)) nslices = errorslice[-1] - errorslice[0] except IndexError: # mask is empty, do not refine pass if nslices > 1: diffmapmsk = mask[..., errorslice[0]:errorslice[-1]] diffmapnii = nb.Nifti1Image( diffmap[..., errorslice[0]:errorslice[-1]] * diffmapmsk, datanii.affine, datanii.header) bspobj2 = fbsp.BSplineFieldmap(diffmapnii, knots_zooms=[24., 24., 4.], njobs=self.inputs.num_threads) bspobj2.fit() smoothed2 = bspobj2.get_smoothed().get_fdata(dtype='float32') final = smoothed1.get_fdata(dtype='float32').copy() final[..., errorslice[0]:errorslice[-1]] += smoothed2 else: final = smoothed1.get_fdata(dtype='float32') nb.Nifti1Image(final, datanii.affine, datanii.header).to_filename( self._results['out_file']) return runtime
def plot_brainrsa_montage(img, threshold=None, slice=[6, 6, 6], background=get_bg_ch2bet()): """ Plot the RSA-result by different cuts Parameters ---------- img : string The file path of the .nii file of the RSA results. threshold : None or int. Default is None. The threshold of the number of voxels used in correction. If threshold=n, only the similarity clusters consisting more than threshold voxels will be visible. If it is None, the threshold-correction will not work. slice : array The point where the cut is performed. If slice=[slice_x, slice_y, slice_z], slice_x, slice_y, slice_z represent the coordinates of each cut in the x, y, z direction. If slice=[[slice_x1, slice_x2], [slice_y1, slice_y2], [slice_z1, slice_z2]], slice_x1 & slice_x2 represent the coordinates of each cut in the x direction, slice_y1 & slice_y2 represent the coordinates of each cut in the y direction, slice_z1 & slice_z2 represent the coordinates of each cut in the z direction. background : Niimg-like object or string. Default is stuff.get_bg_ch2bet() The background image that the RSA results will be plotted on top of. """ imgarray = nib.load(img).get_data() if (imgarray == np.nan).all() == True: print("No Valid Results") else: if threshold != None: imgarray = nib.load(img).get_data() affine = get_affine(img) imgarray = correct_by_threshold(imgarray, threshold) img = nib.Nifti1Image(imgarray, affine) slice_x = slice[0] slice_y = slice[1] slice_z = slice[2] if slice_x != 0: plotting.plot_stat_map(stat_map_img=img, bg_img=background, display_mode='x', cut_coords=slice_x, title="Similarity -sagittal", draw_cross=True, vmax=1) if slice_y != 0: plotting.plot_stat_map(stat_map_img=img, bg_img=background, display_mode='y', cut_coords=slice_y, title="Similarity -coronal", draw_cross=True, vmax=1) if slice_z != 0: plotting.plot_stat_map(stat_map_img=img, bg_img=background, display_mode='z', cut_coords=slice_z, title="Similarity -axial", draw_cross=True, vmax=1) plt.show()
objmask = np.zeros( numpypredict.shape, dtype='uint8' ) scoreimg = np.zeros( numpypredict.shape, dtype='float16' ) # In[ ]: # FIXME - vectorize this for iii in range(nslice): print(iii,) myimage = numpypredict[:,:,iii] results = model.detect([myimage[:,:,np.newaxis] ], verbose=1) myoutput = results[0] for jjj,idclass in enumerate(myoutput['class_ids']): scoreimg[myoutput['rois'][jjj][0]:myoutput['rois'][jjj][2], myoutput['rois'][jjj][1]:myoutput['rois'][jjj][3], iii ] = myoutput['scores'][jjj] objmask[myoutput['rois'][jjj][0]:myoutput['rois'][jjj][2], myoutput['rois'][jjj][1]:myoutput['rois'][jjj][3], iii ] = 1 objmask[:,:,iii] = objmask[:,:,iii] + idclass*myoutput['masks'][:,:,jjj].astype('uint8') # write out segout_img = nib.Nifti1Image(objmask , None, header=imageheader) segout_img.to_filename( options.segmentation ) scrout_img = nib.Nifti1Image(scoreimg, None, header=imageheader) scrout_img.to_filename( '/'.join(options.segmentation.split('/')[:-1]) + '/objscore.nii.gz' ) ########################## # print help ########################## else: import keras; import tensorflow as tf print("keras version: ",keras.__version__, 'TF version:',tf.__version__) print("debug: /opt/apps/miniconda/maskrcnn/lib/python3.6/site-packages/keras/engine/training.py(1450)train_on_batch()->[566.86456, 114.579956, 168.20625, 284.07834, 0.0, 0.0]") print("debug: /opt/apps/miniconda/maskrcnn/lib/python3.6/site-packages/keras/engine/training_generator.py(174)fit_generator()") dataset_test = LoadDataset() parser.print_help()
def colorize(self, col_out_dir, final_out_dir): """ Transforms the individual channels from Pipeline.color_split using the affine/nonlinear transformation parameters from Pipeline.slice_by_slice_alignment() and the nonlinear volumetric transformation parameters from Pipeline.blockface_to_MRI_alignment() Because each transformation is independent of the others, the script will utilize all threads provided by the user to transform multiple slices simultaneously. See Transform_Wrapper for more information and the transformation code. """ #Feed transformation information to sub-processes through Transform_Wrapper out_suf_list = ['Blue', 'Green', 'Red'] skip_flag = False for i, col_vol in enumerate([ self.hist_NIFTI.Blue_vol, self.hist_NIFTI.Green_vol, self.hist_NIFTI.Red_vol ]): for j in range(len(self.hist_NIFTI.slices)): if not os.path.isfile(self.orig_slice_by_slice_loc + '/color/' + out_suf_list[i] + '/' + col_vol.slices[j].name): break else: continue break else: print( ' - All Color Channel Split Transformed Files Exist. Utilizing currently existing data.' ) skip_flag = True if skip_flag == False: print( '====================================ATTEMPTING TO MULTITHREAD====================================' ) pool = Pool(processes=self.threads) self.hist_NIFTI.Blue_vol.col = 'Blue' self.hist_NIFTI.Green_vol.col = 'Green' self.hist_NIFTI.Red_vol.col = 'Red' for col_vol in enumerate([ self.hist_NIFTI.Blue_vol, self.hist_NIFTI.Green_vol, self.hist_NIFTI.Red_vol ]): pool.map( Transform_Wrapper( col_vol, self.hist_transform, self.BF_NIFTI, self.orig_slice_by_slice_loc + '/color/'), list(range(len(self.hist_transform.slices)))) pool.close() pool.join() #Load output color channel split Stacks and convert to volume/ tmp = self.BF_NIFTI r = Stacks.NIFTI_Stack(self.orig_slice_by_slice_loc + '/color/Red/') r.affine_3D = tmp.affine_3D r.volumize(self.orig_slice_by_slice_loc + '/color/volumes/r_vol.nii.gz') g = Stacks.NIFTI_Stack(self.orig_slice_by_slice_loc + '/color/Green/') g.affine_3D = tmp.affine_3Dblockface_to_MRI_alignment.nii.gz g.volumize(self.orig_slice_by_slice_loc + '/color/volumes/g_vol.nii.gz') b = Stacks.NIFTI_Stack(self.orig_slice_by_slice_loc + '/color/Blue/') b.affine_3D = tmp.affine_3D b.volumize(self.orig_slice_by_slice_loc + '/color/volumes/b_vol.nii.gz') #Transform color-split volumes to the MRI space self.final_apply_transform( self.orig_slice_by_slice_loc + '/color/volumes/b_vol.nii.gz', self.orig_slice_by_slice_loc + '/color/volumes/final_b_vol.nii.gz') self.final_apply_transform( self.orig_slice_by_slice_loc + '/color/volumes/g_vol.nii.gz', self.orig_slice_by_slice_loc + '/color/volumes/final_g_vol.nii.gz') self.final_apply_transform( self.orig_slice_by_slice_loc + '/color/volumes/r_vol.nii.gz', self.orig_slice_by_slice_loc + '/color/volumes/final_r_vol.nii.gz') #Load transformed and color-split volumes. Merge the channels to create and RGB volume. print('Loading RGB') r_data = nib.load(self.orig_slice_by_slice_loc + '/color/volumes/final_r_vol.nii.gz').get_data() g_data = nib.load(self.orig_slice_by_slice_loc + '/color/volumes/final_g_vol.nii.gz').get_data() b_data = nib.load(self.orig_slice_by_slice_loc + '/color/volumes/final_b_vol.nii.gz').get_data() print('Merging Channels') rgb = np.empty((r_data.shape[0], r_data.shape[1], r_data.shape[2], 3)) rgb[:, :, :, 0] = b_data rgb[:, :, :, 1] = g_data rgb[:, :, :, 2] = r_data rgb = rgb.astype('u1') #Save the RGB Volume print('Saving Volume') shape_3d = rgb.shape[0:3] rgb_dtype = np.dtype([('R', 'u1'), ('G', 'u1'), ('B', 'u1')]) rgb_typed = rgb.view(rgb_dtype).reshape(shape_3d) tmp = nib.load(self.MRI) volume = nib.Nifti1Image(rgb_typed, affine=tmp.affine) nib.save(volume, final_out_dir + '/RGB_aligned_histology_vol.nii.gz')
def calc_pve(self): if self.bt_ok.text() == ("Operate" or "计算"): try: if self.lan == 0: self.bt_ok.setText("Cancel") elif self.lan == 1: self.bt_ok.setText("取消") if self.index_method == 0: for idx in range(len(self.path_cbf)): nii_cbf = ni.load(self.path_cbf[idx]) nii_gm = ni.load(self.path_gm[idx]) nii_wm = ni.load(self.path_wm[idx]) if nii_cbf.shape != nii_gm.shape: if self.bul_autoreg == 1: # 预留将结构像与CBF自动配准的功能,用resample代替 nii_cbf = resample_to_img(nii_cbf, nii_gm) else: if self.lan == 0: warn_msg(self, "Shape of CBF and T1 are different.") elif self.lan == 1: warn_msg(self, "CBF与结构像大小不一致") continue data_cbf = nii_cbf.get_fdata() data_gm = nii_gm.get_fdata() data_wm = nii_wm.get_fdata() data_temp = (data_cbf * ((data_gm + data_wm) > 0.1)) / (((data_gm) + 0.4 * (data_wm)) * ((data_gm + 0.4 * data_wm) > 0.1)) data_temp[np.isnan(data_temp)] = 0 data_temp[np.isinf(data_temp)] = 0 self.data_temp.append(ni.Nifti1Image(data_temp, nii_gm.affine, nii_gm.header)) self.temp.set_nii(self.data_temp) if self.lan == 0: self.bt_ok.setText("Operate") elif self.lan == 1: self.bt_ok.setText("计算") self.temp.fname = [item.replace(".nii", "_pve.nii") for item in self.path_cbf] if self.checkbox_autosave == 1: ni.save(ni.Nifti1Image(data_temp, nii_gm.affine, nii_gm.header), item.replace(".nii", "_pve.nii")) self.close() elif self.index_method == 1: data_temp = 0 self.lr_thread = LRThread(self, self.lan) self.lr_thread.start() self.lr_thread.progress.connect(self.update_progress) self.lr_thread.trigger.connect(self.get_data_thread) self.lr_thread.file.connect(self.get_data_each) else: pass # 预留其他方法 except FileNotFoundError: if self.lan == 0: warn_msg(self, "File not existed.") elif self.lan == 1: warn_msg(self, "选择的文件不存在") except ni.filebasedimages.ImageFileError: if self.lan == 0: warn_msg(self, "Selected file is not an nii file.") elif self.lan == 1: warn_msg(self, "选择的文件格式错误") elif self.bt_ok.text() == ("Cancel" or "取消"): self.lr_thread.stop() self.lr_thread.quit() self.lr_thread.wait() del self.lr_thread if self.lan == 0: self.bt_ok.setText("Operate") elif self.lan == 1: self.bt_ok.setText("计算")
def test(model, limit, save, bbox): """Test the model. model: the model to test. limit: the images to be used. save: whether to save the masks. limit: whether to draw the bboxes. """ per_class_ious = [] info = json.load(open(args.data + "dataset.json")) info = list(info['train_and_test']) detect_time = 0 for path in info[:limit]: path_image = path['image'] path_label = path['label'] image = nib.load(path_image).get_data().copy() label = nib.load(path_label) # load the gt-masks affine = label.affine # prepared to save the predicted mask later label = label.get_data().copy() image = np.expand_dims(image, -1) start_time = time.time() result = model.detect([image])[0] detect_time += time.time() - start_time print("detect_time:", time.time() - start_time) """The shape of result: a dict containing { "rois": final_rois, [N, (y1, x1, z1, y2, x2, z2)] in real coordinates "class_ids": final_class_ids, [N] "scores": final_scores, [N] "mask": final_mask, [mask_shape[0], mask_shape[1], mask_shape[2]] }""" rois = result["rois"] class_ids = result["class_ids"] scores = result["scores"] mask = result["mask"] # Prepare the gt-masks and pred-masks to calculate the ious. gt_masks = np.zeros((image.shape[0], image.shape[1], image.shape[2], model.config.NUM_CLASSES - 1)) pred_masks = np.zeros((image.shape[0], image.shape[1], image.shape[2], model.config.NUM_CLASSES - 1)) # Generate the per instance gt masks. for j in range(model.config.NUM_CLASSES - 1): gt_masks[:, :, :, j][label == j + 1] = 1 # Generate the per instance predicted masks. for j in range(model.config.NUM_CLASSES - 1): pred_masks[:, :, :, j][mask == j + 1] = 1 # calculate different kind of ious per_class_iou = utils.compute_per_class_mask_iou(gt_masks, pred_masks) per_class_ious.append(per_class_iou) # Save the results if save == "true": # Draw bboxes if bbox == "true": y1, x1, z1, y2, x2, z2 = rois[0, :] mask[y1, x1:x2, z1] = 10 mask[y1, x1:x2, z2] = 10 mask[y2, x1:x2, z1] = 10 mask[y2, x1:x2, z2] = 10 mask[y1:y2, x1, z1] = 10 mask[y1:y2, x2, z1] = 10 mask[y1:y2, x1, z2] = 10 mask[y1:y2, x2, z2] = 10 mask[y1, x1, z1:z2] = 10 mask[y1, x2, z1:z2] = 10 mask[y2, x1, z1:z2] = 10 mask[y2, x2, z1:z2] = 10 vol = nib.Nifti1Image(mask.astype(np.int32), affine) if not os.path.exists("./results"): os.makedirs("./results") nib.save( vol, "./results/" + str(per_class_iou.mean()) + "_" + path_image[-17:]) print(path_image[-17:] + " detected done. iou = " + str(per_class_iou)) print("Test completed.") # Print the iou results. per_class_ious = np.array(per_class_ious) print("per class iou mean:", np.mean(per_class_ious, axis=0)) print("std:", np.std(per_class_ious, axis=0)) print("Total ious mean:", per_class_ious.mean()) print("Total detect time:", detect_time)
]) #raw_img = '/gsfs0/data/poskanzc/Sherlock/preproc/fmriprep/sub-01/func/sub-01_task-sherlockPart1_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz' mask = '/gsfs0/data/poskanzc/Sherlock/preproc/fmriprep/gm_bin_mask.nii.gz' #raw = nib.load(raw_img) #raw_hdr = raw.header mfn = nib.load(mask) #masker = NiftiMasker() #masker.fit(mfn) for sub in subjects: masker = NiftiMasker() masker.fit(mfn) run1 = np.load(data_dir + sub + '/' + sub + '_compcorr/' + sub + '_GM_run_1_compcorr_TRedit.npy') raw_img = '/gsfs0/data/poskanzc/Sherlock/preproc/fmriprep/' + sub + '/func/' + sub + '_task-sherlockPart1_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz' raw = nib.load(raw_img) #raw_hdr = raw.header localizer_data = run1[0:204, :] nifti = masker.inverse_transform(localizer_data) nifti_data = nifti.get_data() nifti_array = np.array(nifti_data) final_img = nib.Nifti1Image(nifti_array, mfn.affine, raw.header) #print(nifti.header.get_zooms()) #nib.Nifti1Header.from_header(header = hdr, check = True) #final_img = nib.Nifti1Image(nifti,affine) nib.save( final_img, data_dir + sub + '/' + sub + '_compcorr/' + sub + '_GM_run_1_compcorr_localizer.nii.gz')
def __getitem__(self, index): item = self.data_list[index] input_data = self.data_file.root.data[ item] # data shape:(4, 128, 128, 128) label_data = self.data_file.root.truth[ item] # truth shape:(1, 128, 128, 128) seg_label = get_target_label(label_data, self.config) affine = self.data_file.root.affine[item] # dimessions of data n_dim = len(seg_label[0].shape) if self.phase == "train": if self.config["random_offset"] is not None: offset_factor = -0.25 + np.random.random(n_dim) else: offset_factor = None if self.config["random_flip"] is not None: flip_axis = random_flip_dimensions(n_dim) else: flip_axis = None # Apply random offset and flip to each channel according to randomly generated offset factor and flip axis respectively. data_list = list() for data_channel in range(input_data.shape[0]): # Transform ndarray data to Nifti1Image channel_image = nib.Nifti1Image( dataobj=input_data[data_channel], affine=affine) data_list.append( resample_to_img(augment_image(channel_image, flip_axis=flip_axis, offset_factor=offset_factor), channel_image, interpolation="continuous").get_data()) input_data = np.asarray(data_list) # Transform ndarray segmentation label to Nifti1Image seg_image = nib.Nifti1Image(dataobj=seg_label[0], affine=affine) seg_label = resample_to_img(augment_image( seg_image, flip_axis=flip_axis, offset_factor=offset_factor), seg_image, interpolation="nearest").get_data() seg_label = seg_label[np.newaxis] elif self.phase == "validate": data_list = list() offset_factor = None flip_axis = None for data_channel in range(input_data.shape[0]): # Transform ndarray data to Nifti1Image channel_image = nib.Nifti1Image( dataobj=input_data[data_channel], affine=affine) data_list.append( resample_to_img(augment_image(channel_image, flip_axis=flip_axis, offset_factor=offset_factor), channel_image, interpolation="continuous").get_data()) input_data = np.asarray(data_list) # Transform ndarray segmentation label to Nifti1Image seg_image = nib.Nifti1Image(dataobj=seg_label[0], affine=affine) seg_label = resample_to_img(augment_image( seg_image, flip_axis=flip_axis, offset_factor=offset_factor), seg_image, interpolation="nearest").get_data() if len(seg_label.shape) == 3: seg_label = seg_label[np.newaxis] elif self.phase == "test": pass if self.config["VAE_enable"]: # Concatenate to (5, 128, 128, 128) as network output final_label = np.concatenate((seg_label, input_data), axis=0) else: final_label = seg_label return input_data, final_label
def create_cfm(in_file, lesion_mask=None, global_mask=True, out_path=None): """ Create a mask to constrain registration. Parameters ---------- in_file : str Path to an existing image (usually a mask). If global_mask = True, this is used as a size/dimension reference. out_path : str Path/filename for the new cost function mask. lesion_mask : str, optional Path to an existing binary lesion mask. global_mask : bool Create a whole-image mask (True) or limit to reference mask (False) A whole image-mask is 1 everywhere Returns ------- str Absolute path of the new cost function mask. Notes ----- in_file and lesion_mask must be in the same image space and have the same dimensions """ import os import numpy as np import nibabel as nb from nipype.utils.filemanip import fname_presuffix if out_path is None: out_path = fname_presuffix(in_file, suffix='_cfm', newpath=os.getcwd()) else: out_path = os.path.abspath(out_path) if not global_mask and not lesion_mask: NIWORKFLOWS_LOG.warning( 'No lesion mask was provided and global_mask not requested, ' 'therefore the original mask will not be modified.') # Load the input image in_img = nb.load(in_file) # If we want a global mask, create one based on the input image. data = np.ones(in_img.shape, dtype=np.uint8) if global_mask else in_img.get_data() if set(np.unique(data)) - {0, 1}: raise ValueError( "`global_mask` must be true if `in_file` is not a binary mask") # If a lesion mask was provided, combine it with the secondary mask. if lesion_mask is not None: # Reorient the lesion mask and get the data. lm_img = nb.as_closest_canonical(nb.load(lesion_mask)) # Subtract lesion mask from secondary mask, set negatives to 0 data = np.fmax(data - lm_img.get_data(), 0) # Cost function mask will be created from subtraction # Otherwise, CFM will be created from global mask cfm_img = nb.Nifti1Image(data, in_img.affine, in_img.header) # Save the cost function mask. cfm_img.set_data_dtype(np.uint8) cfm_img.to_filename(out_path) return out_path
mri_data = {} for runii, fname in enumerate(nifti_files): for ROI in rois: # Get all cortex data and task orders lh_mask = np.array(nib.load( os.path.join(ROI_dir, 'lh.%s_vol_dil.nii.gz' % ROI)).get_data(), dtype=bool) rh_mask = np.array(nib.load( os.path.join(ROI_dir, 'rh.%s_vol_dil.nii.gz' % ROI)).get_data(), dtype=bool) # mri_data[ROI] = np.array([np.vstack([nib.load(nf).get_data()[lh_mask,:], nib.load(nf).get_data()[rh_mask,:]]) for nf in nifti_files]) old_img = nib.load(nifti_files[runii]) new_img = nib.Nifti1Image(old_img.get_data()[lh_mask + rh_mask, :], old_img.affine) new_img.to_filename( os.path.join(deriv_dir, '%s-%s-%i.nii.gz' % (subid, ROI, runii))) # Load trial data task_data = {'trial_order': [], 'trial_stimuli': [], 'trial_params': []} # for ti,par in zip(trialinfo_files, params_files): [ trial_array, trial_indices, trial_params, per_trial_parameters, per_trial_phase_durations, staircase ] = pickle.load(open(trialinfo_files[runii], 'rb')) trial_times = np.vstack(per_trial_phase_durations)[:, 0] + np.arange( len(per_trial_phase_durations)) * TR
os.path.join(path_to_images, "OAS1_*_MR1/mwc1OAS1_*dim.nii"))) images = images[:39] ### Mask data print "Nifti masker" nifti_masker = NiftiMasker( smoothing_fwhm=FWHM, memory='nilearn_cache', memory_level=1) # cache options # remove NaNs from images ref_affine = np.asarray(nibabel.load(images[0]).get_affine()) images_ = [np.asarray(nibabel.load(img).get_data()) for img in images] nonnan_images = [] for img in images_: img[np.isnan(img)] = 0. nonnan_images.append(nibabel.Nifti1Image(img, ref_affine)) # remove features with zero between-subject variance images_masked = nifti_masker.fit_transform(images) images_masked[:, images_masked.var(0) < 0.01] = 0. # final masking new_images = nifti_masker.inverse_transform(images_masked) images_masked = nifti_masker.fit_transform(new_images) n_samples, n_features = images_masked.shape print n_samples, "subjects, ", n_features, "features" ### Euclidean distance between subjects print "Compute Euclidean distances" >>>>>>> 7127fbafc1c48de982482a106d48dcc6ac422172 dist = euclidean_distances(images_masked) mahalanobis_dist = np.mean(dist, 0) - np.median(dist) threshold = stats.chi2(n_samples).isf(0.1 / float(n_samples))
save_c = click.confirm('\nSave masks? ', default=True) print('Finding MUs:') for x in progressbar.progressbar(range(1, n_points)): dims = objs[x] roi = regs == x roi_sig = np.nansum(np.ma.masked_array(stir_slice, ~roi), axis=(0, 1)) c = np.corrcoef(global_sig, roi_sig)[1, 0] corr_sig.append(c) map_2d = (np.sum(roi, axis=2) > 0) * x mu_map += map_2d if save_c: volumes.append(np.sum(np.sum(np.sum(roi, axis=0), axis=0), axis=0)) spat_size.append( np.sqrt((dims[1].stop - dims[1].start) ^ 2 + (dims[0].stop - dims[0].start) ^ 2)) mask = nib.Nifti1Image(roi.astype(int), n1_img.affine) direc = "/".join(filename.split('/')[:-1]) direc += '/mask_{}.nii'.format(x) nib.save(mask, direc) masked = np.ma.masked_where(mu_map == 0, mu_map) plt.imshow(stir_slice[:, :, 1].T, origin="upper", cmap="gray") plt.imshow(masked.T, cmap="jet", origin="upper", alpha=0.9) plt.suptitle('MU maps') plt.show() repeat = click.confirm('\nRedo analysis?', default=False) sorted_indices = np.argsort(corr_sig)[::-1] def save_to_video(filename): """If we want to export the variances as a video, this is how we save it out."""
if bvals[index] < 100: attenuatedBrainData = segmentedBrainData else: #Convert bvecs to angles x = bvecRotated[0] y = bvecRotated[1] z = bvecRotated[2] r, theta, phi = shm.cart2sphere(x, y, z) #Make design matrix B, m, n = shm.real_sym_sh_basis(order, theta, phi) #Get attenuated data print('Attenuating volume ' + str(index)) if bvals[index] < 1500: attenuatedBrainData = pl.attenuateImageSphericalHarmonics (segmentedBrainData, B, coefficientsb1000, bvals[index], 1000) elif bvals[index] > 1500 and bvals[index] < 2500: attenuatedBrainData = pl.attenuateImageSphericalHarmonics (segmentedBrainData, B, coefficientsb2000, bvals[index], 2000) attenuatedBrainNii = nib.Nifti1Image(attenuatedBrainData, segmentedBrain.get_affine(),segmentedBrain.get_header()) attenuatedBrainNii.to_filename(os.path.join(codeDir,'attenuatedBrainPy.nii.gz')) shutil.move(codeDir + "/attenuatedBrainPy.nii.gz", outputDir+ "/brain"+str(index)+".nii.gz") #Register to reference brain to get sizes right print('Registering volume ' + str(index)) call(["flirt","-in",outputDir+ "/brain"+str(index)+".nii.gz","-ref",outputDir+ "/brainref.nii.gz","-applyxfm","-out",outputDir+ "/brain"+str(index)+".nii.gz"])
def nifti_image_files(outdir, filelist, shape): for f in ensure_list(filelist): img = np.random.random(shape) nb.Nifti1Image(img, np.eye(4), None).to_filename(os.path.join(outdir, f))
if os.path.exists(clone_path): shutil.rmtree(clone_path) os.makedirs(clone_path) # create each clone channel for j in range(len(channels)): channel_img = nib.load(channels[j]) channel_data = channel_img.get_data().copy() # get data withing roi (label) roi_data = channel_data[np.nonzero(label_data)] # new data follows gaussian distribution mean_value, std_value = [np.mean(roi_data), np.std(roi_data)] channel_data[np.nonzero(label_data)] = np.array([ random.gauss(mean_value, std_value) for _ in range(roi_data.shape[0]) ]) # create modified channel for clone modified_channel = nib.Nifti1Image(channel_data, channel_img.affine) #TODO: Normalize image? # save clone channel channel_name = os.path.basename(channels[j]) nib.save( modified_channel, os.path.join(clone_path, 'clone_V2_' + str(i) + '.' + channel_name)) # save unaltered label for clone label_name = os.path.basename(label[0]) nib.save( label_img, os.path.join(clone_path, 'clone_V2_' + str(i) + '.' + label_name)) #save unaltered mask for clone mask_img = nib.load(mask[0])
# make new_brainsuite_head_label_volume # new_brainsuite_head_label_volume = np.zeros(brainsuite_head_label_volume.shape) # for label_index in label_index_brainsuite_head: # mylabel_value = find_subdict_index(inputdict_brainsuite, label_index, mylabel_value=3) # boolwhich1 = brainsuite_head_label_volume == label_index # new_brainsuite_head_label_volume[boolwhich1] = mylabel_value new_brainsuite_head_label_volume = label_index_replace( brainsuite_head_label_volume, inputdict_brainsuite, mylabel_value=3) # combine freesurfer label and brainsuite label boolwhich2 = new_freesurfer_label_volume > 0 # brainsuite_label_volume_flip[boolwhich2] = 0 # test for joint label !! # brainsuite_head_label_volume_flip[boolwhich2] = 0 # label_joint = brainsuite_head_label_volume_flip + new_freesurfer_label_volume new_brainsuite_head_label_volume[boolwhich2] = 0 label_joint = new_brainsuite_head_label_volume + new_freesurfer_label_volume # combine joint label with Lesions for l in lesion_label_path: lesion_label = nib.load(l) lesion_label = lesion_label.get_fdata() lesion_type = sample_info.loc[sample_info['INDI Subject ID'] == int(subjectID[1:]), ['Stroke type']] label_index = 14 if lesion_type.values == 'Embolic' else 15 boolwhich3 = lesion_label > 0 label_joint[boolwhich3] = label_index img = nib.Nifti1Image(label_joint, raw_affine) img.header.get_xyzt_units() nib.save( img, output_path + os.path.basename(one_sample).split('.')[0] + sample_ext)
vmax = image.max() source_affine = np.eye(4) # Use canonical vectors for affine # Give the affine an offset source_affine[:2, 3] = np.array([96, 64]) # Rotate it slightly angle = np.pi / 180 * 15 rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) source_affine[:2, :2] = rotation_matrix * 2.0 # 2.0mm voxel size # We need to turn this data into a nibabel image import nibabel img = nibabel.Nifti1Image(image[:, :, np.newaxis], affine=source_affine) ############################################################################# # Now resample the image from nilearn.image import resample_img img_in_mm_space = resample_img(img, target_affine=np.eye(4), target_shape=(512, 512, 1)) target_affine_3x3 = np.eye(3) * 2 target_affine_4x4 = np.eye(4) * 2 target_affine_4x4[3, 3] = 1. img_3d_affine = resample_img(img, target_affine=target_affine_3x3) img_4d_affine = resample_img(img, target_affine=target_affine_4x4) target_affine_mm_space_offset_changed = np.eye(4) target_affine_mm_space_offset_changed[:3, 3] = \
def get_data_each(self, int_file): self.idx_file = int_file data_temp = self.lr_thread.data_result self.data_temp.append(ni.Nifti1Image(data_temp, ni.load(self.path_cbf[self.idx_file]).affine, ni.load(self.path_cbf[self.idx_file]).header))
CCA_classes = np.zeros([pred.shape[0], pred.shape[1], pred.shape[2], 8]) error = np.zeros([pred.shape[0], pred.shape[1], pred.shape[2], 8]) for m in range(CCA_classes.shape[-1]): labelled_mask, num_labels = ndimage.label(sep_classes[:, :, :, m]) largest_cc_mask = (labelled_mask == ( np.bincount(labelled_mask.flat)[1:].argmax() + 1)) CCA_classes[:, :, :, m] = largest_cc_mask labelled_mask[largest_cc_mask] = 0 labelled_mask[labelled_mask > 0] = 1 error[:, :, :, m] = labelled_mask classes = CCA_classes.argmax(axis=3) all_errors = error.argmax(axis=3) all_errors[all_errors > 0] = 1 classes = classes.astype(np.float64) all_errors = all_errors.astype(np.float64) classes[all_errors == 1] = np.nan invalid = np.isnan(classes) ind = ndimage.distance_transform_edt(invalid, return_distances=False, return_indices=True) postproc = classes[tuple(ind)].astype('uint32') postproc[postproc == 1] = 500 postproc[postproc == 2] = 600 postproc[postproc == 3] = 420 postproc[postproc == 4] = 550 postproc[postproc == 5] = 205 postproc[postproc == 6] = 820 postproc[postproc == 7] = 850 labBox = nib.Nifti1Image(postproc, b0.affine, new_header_b) index = n + 1 nib.save(labBox, 'WHS/Results/nii/ct_test_20{}_label.nii.gz'.format(index))
def map_to_brain(image_3d,brain_mask,axis="axial",rotations=3): '''map_to_brain map 3d data matrix onto a brain_mask, specified by axis. If axis is not valid, will return False. If valid, will return nibabel.Nifti1Image :param image_3d: numpy array read in from read_png. Usually of shape (512, 512, N), where N is 3 for 3D image, and 4 for png with alpha. If an alpha channel is found, pixels with 0 alpha will be rendered as transparent (0). ''' R = image_3d[:,:,0] G = image_3d[:,:,1] B = image_3d[:,:,2] axis = axis.lower() # Convert to integer value rgb = R; rgb = (rgb << 8) + G rgb = (rgb << 8) + B # Normalize rgb = (rgb - rgb.mean()) / rgb.std() rgb = numpy.rot90(rgb, k=rotations) # Houston, we have alpha! alpha_channel = False if image_3d.shape[2] == 4: alpha_channel = True if axis == "axial": width = brain_mask.shape[0] height = brain_mask.shape[1] else: print "Invalid specification of atlas, %s. Currently only supported is axial." %axis return False # Only square images, sorry if rgb.shape[0] != rgb.shape[1]: print "Sorry, only square images are currently supported." return False # We will interpolate down the largest dimension of the image if rgb.shape[0] >= rgb.shape[1]: scale = float(width)/rgb.shape[0] else: scale = float(height)/rgb.shape[1] # order 0 means nearest interpolation scaled = scipy.ndimage.zoom(rgb, scale, order=0) if alpha_channel: scaled_alpha = scipy.ndimage.zoom(image_3d[:,:,3], scale, order=0) scaled[scaled_alpha==0] = 0 # Calculate left and right padding, keep as ints to take floor padding_width = (width - scaled.shape[0]) /2 padding_height = (height - scaled.shape[1]) /2 padded = numpy.pad(scaled, ((padding_width,padding_width), # Default value is 0 (padding_height,padding_height)),mode="constant") # Create array of same size as brainmap, just in case we are off a bit array = numpy.zeros((width,height)) # This is slow and stupid, but it will work for x in range(width): for y in range(height): try: array[x,y] = padded[x,y] except: pass # Let's write the image to all slices empty_brain = numpy.zeros(brain_mask.shape) mask = brain_mask.get_data() # Add support for other axis? if axis == "axial": for z in range(brain_mask.shape[2]): zslice = mask[:,:,z] empty_brain[zslice!=0,z] = array[zslice!=0] nii = nibabel.Nifti1Image(empty_brain,affine=brain_mask.get_affine()) return nii
def _create_image(image_shape): data = np.asarray(np.arange(np.prod(image_shape)).reshape(image_shape), dtype=np.float) affine = np.zeros((4, 4)) np.fill_diagonal(affine, 1) return nib.Nifti1Image(data, affine)
def TrainODModel(): # load database print('loading memory map db for large dataset') #npdatabase = np.load(options.globalnpfile,mmap_mode='r') npdatabase = np.load(options.globalnpfile) # Training dataset dataset_train = ShapesDataset() dataset_train.load_shapes('train',npdatabase ) dataset_train.prepare() # Validation dataset dataset_val = ShapesDataset() dataset_val.load_shapes('validate',npdatabase ) dataset_val.prepare() # ensure we get the same results each time we run the code np.random.seed(seed=0) #np.random.shuffle(dataset_train.dbsubset ) #np.random.shuffle(dataset_val.dbsubset) ## # subset within bounding box that has liver ## totnslice = len(dataset_train.dbsubset ) + len(dataset_val.dbsubset) ## slicesplit = len(dataset_train.dbsubset ) ## print("nslice: ",totnslice ," split: " ,slicesplit ) ## # FIXME - Verify stacking indicies ## x_train=np.vstack((dataset_train.dbsubset ['imagedata'],dataset_val.dbsubset['imagedata'])) ## y_train=np.vstack((dataset_train.dbsubset ['truthdata'],dataset_val.dbsubset['truthdata'])) ## TRAINING_SLICES = slice(0,slicesplit) ## VALIDATION_SLICES = slice(slicesplit,totnslice) # In[6]: # Load and display random samples image_ids = np.random.choice(dataset_train.image_ids, 20) print(image_ids) import nibabel as nib for image2did in image_ids: image = dataset_train.load_image(image2did) imageinfo = dataset_train.image_reference(image2did) mask, class_ids = dataset_train.load_mask(image2did) print(imageinfo,image2did,image.shape, mask.shape, class_ids, dataset_train.class_names) #visualize.display_top_masks(np.squeeze(image), mask, class_ids, dataset_train.class_names,image2did) original_image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_train, config, image2did, use_mini_mask=False) #visualize.display_instances(np.repeat(original_image,3,axis=2), gt_bbox, gt_mask, gt_class_id, dataset_train.class_names, figsize=(8, 8)) objmask = np.zeros( gt_mask.shape[0:2], dtype='uint8' ) for iii,idclass in enumerate(gt_class_id): objmask[gt_bbox[iii][0]:gt_bbox[iii][2], gt_bbox[iii][1]:gt_bbox[iii][3] ] = 1 objmask = objmask + idclass*gt_mask[:,:,iii].astype('uint8') segnii = nib.Nifti1Image(objmask.astype('uint8') , None ) segnii.to_filename( 'tmp/mask.%05d.nii.gz' % image2did ) imgnii = nib.Nifti1Image(image , None ) imgnii.to_filename( 'tmp/image.%05d.nii.gz' % image2did ) # ## Create Model # In[ ]: # Create model in training mode model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR) # In[7]: # Which weights to start with? init_with = "coco" # imagenet, coco, or last init_with = "last" # imagenet, coco, or last # ## Training # # Train in two stages: # 1. Only the heads. Here we're freezing all the backbone layers and training only the randomly initialized layers (i.e. the ones that we didn't use pre-trained weights from MS COCO). To train only the head layers, pass `layers='heads'` to the `train()` function. # # 2. Fine-tune all layers. For this simple example it's not necessary, but we're including it to show the process. Simply pass `layers="all` to train all layers. if init_with == "imagenet": model.load_weights(model.get_imagenet_weights(), by_name=True) raise(" freeze backbone ? input error") elif init_with == "coco": # Load weights trained on MS COCO, but skip layers that # are different due to the different number of classes # See README for instructions to download the COCO weights model.load_weights(COCO_MODEL_PATH, by_name=True, exclude=["mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask","conv1"]) # Train the head branches # Passing layers="heads" freezes all layers except the head # layers. You can also pass a regular expression to select # which layers to train by name pattern. model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=100, layers='heads') elif init_with == "last": # Load the last model you trained and continue training model.load_weights(model.find_last(), by_name=True) # Fine tune all layers # Passing layers="all" trains all layers. You can also # pass a regular expression to select which layers to # train by name pattern. model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE/10., epochs=500, layers="all") # Save weights # Typically not needed because callbacks save after every epoch # Uncomment to save manually model_path = os.path.join(MODEL_DIR, "mask_rcnn_tumor.h5") model.keras_model.save_weights(model_path)
def save_nifti(fname, data, affine): result_img = nib.Nifti1Image(data, affine) result_img.to_filename(fname)
def fit(self, niimgs, y): """Fit the searchlight Parameters ---------- niimg : niimg 4D image. y : 1D array-like Target variable to predict. Must have exactly as many elements as 3D images in niimg. Attributes ---------- `scores_` : numpy.ndarray search_light scores. Same shape as input parameter process_mask_img. """ # Compute world coordinates of all in-mask voxels. mask, mask_affine = masking._load_mask_img(self.mask_img) mask_coords = np.where(mask != 0) mask_coords = np.asarray( mask_coords + (np.ones(len(mask_coords[0]), dtype=np.int), )) mask_coords = np.dot(mask_affine, mask_coords)[:3].T # Compute world coordinates of all in-process mask voxels if self.process_mask_img is None: process_mask = mask process_mask_coords = mask_coords else: process_mask, process_mask_affine = \ masking._load_mask_img(self.process_mask_img) process_mask_coords = np.where(process_mask != 0) process_mask_coords = \ np.asarray(process_mask_coords + (np.ones(len(process_mask_coords[0]), dtype=np.int),)) process_mask_coords = np.dot(process_mask_affine, process_mask_coords)[:3].T clf = neighbors.NearestNeighbors(radius=self.radius) A = clf.fit(mask_coords).radius_neighbors_graph(process_mask_coords) del process_mask_coords, mask_coords A = A.tolil() # scores is an 1D array of CV scores with length equals to the number # of voxels in processing mask (columns in process_mask) X = masking._apply_mask_fmri( niimgs, nibabel.Nifti1Image(as_ndarray(mask, dtype=np.int8), mask_affine)) estimator = self.estimator if isinstance(estimator, basestring): estimator = ESTIMATOR_CATALOG[estimator]() scores = search_light(X, y, estimator, A, self.scoring, self.cv, self.n_jobs, self.verbose) scores_3D = np.zeros(process_mask.shape) scores_3D[process_mask] = scores self.scores_ = scores_3D return self
def saveNifti(dataIn, fileName, affMatrix=None): if affMatrix is None: affMatrix = np.eye(4) img = nib.Nifti1Image(dataIn, affMatrix) nib.save(img, fileName)
def pred_to_nib(data_lst: List[np.ndarray], z_lst: List[int], fname_ref: str, fname_out: str, slice_axis: int, debug: bool = False, kernel_dim: str = '2d', bin_thr: float = 0.5, discard_noise: bool = True, postprocessing: dict = None) -> nib.Nifti1Image: """Save the network predictions as nibabel object. Based on the header of `fname_ref` image, it creates a nibabel object from the Network predictions (`data_lst`). Args: data_lst (list of np arrays): Predictions, either 2D slices either 3D patches. z_lst (list of ints): Slice indexes to reconstruct a 3D volume for 2D slices. fname_ref (str): Filename of the input image: its header is copied to the output nibabel object. fname_out (str): If not None, then the generated nibabel object is saved with this filename. slice_axis (int): Indicates the axis used for the 2D slice extraction: Sagittal: 0, Coronal: 1, Axial: 2. debug (bool): If True, extended verbosity and intermediate outputs. kernel_dim (str): Indicates whether the predictions were done on 2D or 3D patches. Choices: '2d', '3d'. bin_thr (float): If positive, then the segmentation is binarized with this given threshold. Otherwise, a soft segmentation is output. discard_noise (bool): If True, predictions that are lower than 0.01 are set to zero. postprocessing (dict): Contains postprocessing steps to be applied. Returns: nibabel.Nifti1Image: NiBabel object containing the Network prediction. """ # Check fname_ref extention and update path if not NifTI fname_ref = imed_loader_utils.update_filename_to_nifti(fname_ref) # Load reference nibabel object nib_ref = nib.load(fname_ref) nib_ref_can = nib.as_closest_canonical(nib_ref) if kernel_dim == '2d': # complete missing z with zeros tmp_lst = [] for z in range(nib_ref_can.header.get_data_shape()[slice_axis]): if z not in z_lst: tmp_lst.append(np.zeros(data_lst[0].shape)) else: tmp_lst.append(data_lst[z_lst.index(z)]) if debug: logger.debug(f"Len {len(tmp_lst)}") for arr in tmp_lst: logger.debug(f"Shape element lst {arr.shape}") # create data and stack on depth dimension arr_pred_ref_space = np.stack(tmp_lst, axis=-1) else: arr_pred_ref_space = data_lst[0] n_channel = arr_pred_ref_space.shape[0] oriented_volumes = [] if len(arr_pred_ref_space.shape) == 4: for i in range(n_channel): oriented_volumes.append( imed_loader_utils.reorient_image(arr_pred_ref_space[i, ], slice_axis, nib_ref, nib_ref_can)) # transpose to locate the channel dimension at the end to properly see image on viewer arr_pred_ref_space = np.asarray(oriented_volumes).transpose( (1, 2, 3, 0)) else: arr_pred_ref_space = imed_loader_utils.reorient_image( arr_pred_ref_space, slice_axis, nib_ref, nib_ref_can) if bin_thr >= 0: arr_pred_ref_space = imed_postpro.threshold_predictions( arr_pred_ref_space, thr=bin_thr) elif discard_noise: # discard noise arr_pred_ref_space[arr_pred_ref_space <= 1e-2] = 0 # create nibabel object if postprocessing: fname_prefix = fname_out.split( "_pred.nii.gz")[0] if fname_out is not None else None postpro = imed_postpro.Postprocessing(postprocessing, arr_pred_ref_space, nib_ref.header['pixdim'][1:4], fname_prefix) arr_pred_ref_space = postpro.apply() # Here we prefer to copy the header (rather than just the affine matrix), in order to preserve the qform_code. # See: https://github.com/ivadomed/ivadomed/issues/711 nib_pred = nib.Nifti1Image(dataobj=arr_pred_ref_space, affine=nib_ref.header.get_best_affine(), header=nib_ref.header.copy()) # save as NifTI file if fname_out is not None: nib.save(nib_pred, fname_out) return nib_pred
def main(): cleanup=True verbose=False scriptdir=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # script directory template=resource_filename(Requirement.parse("pydeface"), 'data/mean_reg2mean.nii.gz') facemask=resource_filename(Requirement.parse("pydeface"), "data/facemask.nii.gz") try: assert os.path.exists(facemask) except: raise Exception('missing facemask: %s'%facemask) try: assert os.path.exists(template) except: raise Exception('missing template: %s'%template) if len(sys.argv)<2: usage() sys.exit(2) else: infile=sys.argv[1] if len(sys.argv)>2: outfile=sys.argv[2] else: outfile=infile.replace('.nii.gz','_defaced.nii.gz') try: assert not os.path.exists(outfile) except: raise Exception('%s already exists, remove it first'%outfile) if os.environ.has_key('FSLDIR'): FSLDIR=os.environ['FSLDIR'] else: print 'FSL must be installed and FSLDIR environment variable must be defined' sys.exit(2) foo,tmpmat=tempfile.mkstemp() tmpmat=tmpmat+'.mat' foo,tmpfile=tempfile.mkstemp() tmpfile=tmpfile+'.nii.gz' if verbose: print tmpmat print tmpfile foo,tmpfile2=tempfile.mkstemp() foo,tmpmat2=tempfile.mkstemp() print 'defacing',infile # register template to infile flirt=fsl.FLIRT() flirt.inputs.cost_func='mutualinfo' flirt.inputs.in_file=template flirt.inputs.out_matrix_file=tmpmat flirt.inputs.out_file=tmpfile2 flirt.inputs.reference=infile flirt.run() # warp facemask to infile flirt=fsl.FLIRT() flirt.inputs.in_file=facemask flirt.inputs.in_matrix_file=tmpmat flirt.inputs.apply_xfm=True flirt.inputs.reference=infile flirt.inputs.out_file=tmpfile flirt.inputs.out_matrix_file=tmpmat2 flirt.run() # multiply mask by infile and save infile_img=nibabel.load(infile) tmpfile_img=nibabel.load(tmpfile) outdata=infile_img.get_data()*tmpfile_img.get_data() outfile_img=nibabel.Nifti1Image(outdata,infile_img.get_affine(),infile_img.get_header()) outfile_img.to_filename(outfile) if cleanup: os.remove(tmpfile) os.remove(tmpfile2) os.remove(tmpmat)
def plot_brainrsa_surface(img, threshold=None): """ Plot the RSA-result into a brain surface Parameters ---------- img : string The file path of the .nii file of the RSA results. threshold : None or int. Default is None. The threshold of the number of voxels used in correction. If threshold=n, only the similarity clusters consisting more than threshold voxels will be visible. If it is None, the threshold-correction will not work. """ imgarray = nib.load(img).get_data() if (imgarray == np.nan).all() == True: print("No Valid Results") else: if threshold != None: imgarray = nib.load(img).get_data() affine = get_affine(img) imgarray = correct_by_threshold(imgarray, threshold) img = nib.Nifti1Image(imgarray, affine) fsaverage = datasets.fetch_surf_fsaverage(mesh='fsaverage') texture_left = surface.vol_to_surf(img, fsaverage.pial_left) texture_right = surface.vol_to_surf(img, fsaverage.pial_right) plotting.plot_surf_stat_map(fsaverage.pial_left, texture_left, hemi='left', threshold=0.1, bg_map=fsaverage.sulc_right, colorbar=False, vmax=0.8, darkness=0.7) plotting.plot_surf_stat_map(fsaverage.pial_right, texture_right, hemi='right', threshold=0.1, bg_map=fsaverage.sulc_right, colorbar=True, vmax=0.8, darkness=0.7) plotting.plot_surf_stat_map(fsaverage.pial_right, texture_left, hemi='left', threshold=0.1, bg_map=fsaverage.sulc_right, colorbar=False, vmax=0.8, darkness=0.7) plotting.plot_surf_stat_map(fsaverage.pial_left, texture_right, hemi='right', threshold=0.1, bg_map=fsaverage.sulc_right, colorbar=True, vmax=0.8, darkness=0.7) plt.show()
#Load data img = nib.load('./data.nii.gz') data = img.get_data() #build mask gtab = dpg.gradient_table('./bvals', './bvecs', b0_threshold=10) mean_b0 = np.mean(data[..., gtab.b0s_mask], -1) _, mask = median_otsu(mean_b0, 4, 2, False, vol_idx=np.where(gtab.b0s_mask), dilate=1) if not op.exists('./mask.nii.gz'): nib.save(nib.Nifti1Image(mask.astype(int), img.affine), 'mask.nii.gz') # denoise from dipy.denoise import nlmeans from dipy.denoise.noise_estimate import estimate_sigma sigma = estimate_sigma(data) denoised_data = nlmeans.nlmeans(data, sigma=sigma, mask=mask) if not op.exists('./denoised_data.nii.gz'): nib.save(nib.Nifti1Image(denoised_data, img.affine), 'denoised_data.nii.gz') #tensor model import dipy.reconst.dti as dti