def __init__(self, data, parallel_out = None, process_mask=None, radius=4, kwargs=None): #no scoring param self.data = data #set up parallel_out if parallel_out is None: os.system("mkdir parallel_out") self.parallel_out = os.path.join(os.getcwd(),'parallel_out') elif type(parallel_out) is str: os.system("mkdir " + parallel_out) #make directory if it does not exist self.parallel_out = parallel_out else: print(type(parallel_out)) raise ValueError("parallel_out should be a string") os.system('mkdir ' + os.path.join(self.parallel_out,'core_out') ) self.core_out = os.path.join(self.parallel_out,'core_out') #set up process_mask if type(process_mask) is str: process_mask = nib.load(process_mask) elif process_mask is None: process_mask = nib.load(os.path.join(get_resource_path(),"FSL_RIns_thr0.nii.gz")) elif type(process_mask) is not nib.nifti1.Nifti1Image: print(process_mask) print(type(process_mask)) raise ValueError("process_mask is not a nibabel instance") self.process_mask = process_mask #set up other parameters self.radius = radius self.kwargs = kwargs
def __init__(self, data, Y, algorithm=None, cv_dict=None, mask=None, output_dir='.', **kwargs): """ Initialize Predict. Args: data: nibabel data instance Y: vector of training labels subject_id: vector of labels corresponding to each subject algorithm: Algorithm to use for prediction. Must be one of 'svm', 'svr', 'linear', 'logistic', 'lasso', 'ridge', 'ridgeClassifier','randomforest', or 'randomforestClassifier' cv_dict: Type of cross_validation to use. A dictionary of {'type': 'kfolds', 'n_folds': n}, {'type': 'kfolds', 'n_folds': n, 'subject_id': holdout}, or {'type': 'loso', 'subject_id': holdout}, where n = number of folds, and subject = vector of subject ids that corresponds to self.Y mask: binary nibabel mask output_dir: Directory to use for writing all outputs **kwargs: Additional keyword arguments to pass to the prediction algorithm """ self.output_dir = output_dir if mask is not None: if type(mask) is not nib.nifti1.Nifti1Image: raise ValueError("mask is not a nibabel instance") self.mask = mask else: self.mask = nib.load( os.path.join(get_resource_path(), 'MNI152_T1_2mm_brain_mask.nii.gz')) if type(data) is list: data = nib.concat_images(data) if not isinstance(data, (nib.nifti1.Nifti1Image, nib.nifti1.Nifti1Pair)): raise ValueError("data is not a nibabel instance") self.nifti_masker = NiftiMasker(mask_img=mask) self.data = self.nifti_masker.fit_transform(data) if type(Y) is list: Y = np.array(Y) if self.data.shape[0] != len(Y): raise ValueError("Y does not match the correct size of data") self.Y = Y if algorithm is not None: self.set_algorithm(algorithm, **kwargs) if cv_dict is not None: self.cv = set_cv(cv_dict)
def __init__(self, data=None, Y=None, X=None, mask=None, output_file=None, **kwargs): if mask is not None: if not isinstance(mask, nib.Nifti1Image): if type(mask) is str: if os.path.isfile(mask): mask = nib.load(mask) else: raise ValueError("mask is not a nibabel instance") self.mask = mask else: self.mask = nib.load(os.path.join(get_resource_path(),'MNI152_T1_2mm_brain_mask.nii.gz')) self.nifti_masker = NiftiMasker(mask_img=self.mask) if data is not None: if type(data) is str: data=nib.load(data) self.data = self.nifti_masker.fit_transform(data) elif type(data) is list: # Load and transform each image in list separately (nib.concat_images(data) can't handle images of different sizes) self.data = [] for i in data: if isinstance(i,six.string_types): self.data.append(self.nifti_masker.fit_transform(nib.load(i))) elif isinstance(i,nib.Nifti1Image): self.data.append(self.nifti_masker.fit_transform(i)) self.data = np.array(self.data) elif not isinstance(data, nib.Nifti1Image): raise ValueError("data is not a nibabel instance") # Collapse any extra dimension if any([x==1 for x in self.data.shape]): self.data=self.data.squeeze() else: self.data = np.array([]) if Y is not None: if type(Y) is str: if os.path.isfile(Y): Y=pd.read_csv(Y,header=None,index_col=None) if isinstance(Y, pd.DataFrame): if self.data.shape[0]!= len(Y): raise ValueError("Y does not match the correct size of data") self.Y = Y else: raise ValueError("Make sure Y is a pandas data frame.") else: self.Y = pd.DataFrame() if X is not None: if self.data.shape[0]!= X.shape[0]: raise ValueError("X does not match the correct size of data") self.X = X else: self.X = pd.DataFrame() if output_file is not None: self.file_name = output_file else: self.file_name = []
def create_sphere(coordinates, radius=5, mask=None): """ Generate a set of spheres in the brain mask space Args: radius: vector of radius. Will create multiple spheres if len(radius) > 1 centers: a vector of sphere centers of the form [px, py, pz] or [[px1, py1, pz1], ..., [pxn, pyn, pzn]] """ if mask is not None: if not isinstance(mask,nib.Nifti1Image): if type(mask) is str: if os.path.isfile(mask): data = nib.load(mask) else: raise ValueError("mask is not a nibabel instance or a valid file name") else: mask = nib.load(os.path.join(get_resource_path(),'MNI152_T1_2mm_brain_mask.nii.gz')) dims = mask.get_data().shape def sphere(r, p, mask): """ create a sphere of given radius at some point p in the brain mask Args: r: radius of the sphere p: point (in coordinates of the brain mask) of the center of the sphere """ dims = mask.shape x, y, z = np.ogrid[-p[0]:dims[0]-p[0], -p[1]:dims[1]-p[1], -p[2]:dims[2]-p[2]] mask_r = x*x + y*y + z*z <= r*r activation = np.zeros(dims) activation[mask_r] = 1 activation = np.multiply(activation, mask.get_data()) activation = nib.Nifti1Image(activation, affine=np.eye(4)) #return the 3D numpy matrix of zeros containing the sphere as a region of ones return activation.get_data() # Initialize Spheres with options for multiple radii and centers of the spheres (or just an int and a 3D list) if type(radius) is int: radius = [radius] if coordinates is None: coordinates = [[dims[0]/2, dims[1]/2, dims[2]/2] * len(radius)] #default value for centers elif type(coordinates) is list and type(coordinates[0]) is int and len(radius) is 1: coordinates = [coordinates] if (type(radius)) is list and (type(coordinates) is list) and (len(radius) == len(coordinates)): A = np.zeros_like(mask.get_data()) for i in xrange(len(radius)): A = np.add(A, sphere(radius[i], coordinates[i], mask)) nifti_sphere = nib.Nifti1Image(A.astype(np.float32), affine=mask.get_affine()) return nifti_sphere else: raise ValueError("Data type for sphere or radius(ii) or center(s) not recognized.")
def resolve_mni_path(MNI_Template): """Helper function to resolve MNI path based on MNI_Template prefs setting.""" res = MNI_Template["resolution"] m = MNI_Template["mask_type"] if not isinstance(res, str): raise ValueError("resolution must be provided as a string!") if not isinstance(m, str): raise ValueError("mask_type must be provided as a string!") if res == "3mm": if m == "with_ventricles": MNI_Template["mask"] = os.path.join( get_resource_path(), "MNI152_T1_3mm_brain_mask.nii.gz") elif m == "no_ventricles": MNI_Template["mask"] = os.path.join( get_resource_path(), "MNI152_T1_3mm_brain_mask_no_ventricles.nii.gz") else: raise ValueError( "Available mask_types are 'with_ventricles' or 'no_ventricles'" ) MNI_Template["plot"] = os.path.join(get_resource_path(), "MNI152_T1_3mm.nii.gz") MNI_Template["brain"] = os.path.join(get_resource_path(), "MNI152_T1_3mm_brain.nii.gz") elif res == "2mm": if m == "with_ventricles": MNI_Template["mask"] = os.path.join( get_resource_path(), "MNI152_T1_2mm_brain_mask.nii.gz") elif m == "no_ventricles": MNI_Template["mask"] = os.path.join( get_resource_path(), "MNI152_T1_2mm_brain_mask_no_ventricles.nii.gz") else: raise ValueError( "Available mask_types are 'with_ventricles' or 'no_ventricles'" ) MNI_Template["plot"] = os.path.join(get_resource_path(), "MNI152_T1_2mm.nii.gz") MNI_Template["brain"] = os.path.join(get_resource_path(), "MNI152_T1_2mm_brain.nii.gz") else: raise ValueError("Available templates are '2mm' or '3mm'") return MNI_Template
def resolve_mni_path(MNI_Template): """ Helper function to resolve MNI path based on MNI_Template prefs setting.""" res = MNI_Template['resolution'] m = MNI_Template['mask_type'] if not isinstance(res, six.string_types): raise ValueError("resolution must be provided as a string!") if not isinstance(m, six.string_types): raise ValueError("mask_type must be provided as a string!") if res == '3mm': if m == 'with_ventricles': MNI_Template['mask'] = os.path.join( get_resource_path(), 'MNI152_T1_3mm_brain_mask.nii.gz') elif m == 'no_ventricles': MNI_Template['mask'] = os.path.join( get_resource_path(), 'MNI152_T1_3mm_brain_mask_no_ventricles.nii.gz') else: raise ValueError( "Available mask_types are 'with_ventricles' or 'no_ventricles'" ) MNI_Template['plot'] = os.path.join(get_resource_path(), 'MNI152_T1_3mm.nii.gz') MNI_Template['brain'] = os.path.join(get_resource_path(), 'MNI152_T1_3mm_brain.nii.gz') elif res == '2mm': if m == 'with_ventricles': MNI_Template['mask'] = os.path.join( get_resource_path(), 'MNI152_T1_2mm_brain_mask.nii.gz') elif m == 'no_ventricles': MNI_Template['mask'] = os.path.join( get_resource_path(), 'MNI152_T1_2mm_brain_mask_no_ventricles.nii.gz') else: raise ValueError( "Available mask_types are 'with_ventricles' or 'no_ventricles'" ) MNI_Template['plot'] = os.path.join(get_resource_path(), 'MNI152_T1_2mm.nii.gz') MNI_Template['brain'] = os.path.join(get_resource_path(), 'MNI152_T1_2mm_brain.nii.gz') else: raise ValueError("Available templates are '2mm' or '3mm'") return MNI_Template
def __init__(self, data, Y, algorithm=None, cv_dict=None, mask=None, output_dir='.', **kwargs): """ Initialize Predict. Args: data: nibabel data instance Y: vector of training labels subject_id: vector of labels corresponding to each subject algorithm: Algorithm to use for prediction. Must be one of 'svm', 'svr', 'linear', 'logistic', 'lasso', 'ridge', 'ridgeClassifier','randomforest', or 'randomforestClassifier' cv_dict: Type of cross_validation to use. A dictionary of {'type': 'kfolds', 'n_folds': n}, {'type': 'kfolds', 'n_folds': n, 'subject_id': holdout}, or {'type': 'loso', 'subject_id': holdout}, where n = number of folds, and subject = vector of subject ids that corresponds to self.Y mask: binary nibabel mask output_dir: Directory to use for writing all outputs **kwargs: Additional keyword arguments to pass to the prediction algorithm """ self.output_dir = output_dir if mask is not None: if type(mask) is not nib.nifti1.Nifti1Image: raise ValueError("mask is not a nibabel instance") self.mask = mask else: self.mask = nib.load(os.path.join(get_resource_path(),'MNI152_T1_2mm_brain_mask.nii.gz')) if type(data) is list: data=nib.concat_images(data) if not isinstance(data,(nib.nifti1.Nifti1Image, nib.nifti1.Nifti1Pair)): raise ValueError("data is not a nibabel instance") self.nifti_masker = NiftiMasker(mask_img=mask) self.data = self.nifti_masker.fit_transform(data) if type(Y) is list: Y=np.array(Y) if self.data.shape[0]!= len(Y): raise ValueError("Y does not match the correct size of data") self.Y = Y if algorithm is not None: self.set_algorithm(algorithm, **kwargs) if cv_dict is not None: self.cv = set_cv(cv_dict)
def __init__(self, brain_mask=None, output_dir = None): #no scoring param # self.resource_folder = os.path.join(os.getcwd(),'resources') if output_dir is None: self.output_dir = os.path.join(os.getcwd()) else: self.output_dir = output_dir if type(brain_mask) is str: brain_mask = nib.load(brain_mask) elif brain_mask is None: brain_mask = nib.load(os.path.join(get_resource_path(),'MNI152_T1_2mm_brain_mask.nii.gz')) elif type(brain_mask) is not nib.nifti1.Nifti1Image: print(brain_mask) print(type(brain_mask)) raise ValueError("brain_mask is not a string or a nibabel instance") self.brain_mask = brain_mask self.nifti_masker = NiftiMasker(mask_img=self.brain_mask)
def _save_plot(self, predictor): """ Save Plots. Args: predictor: predictor instance Returns: predicter_weightmap_montage.png: Will output a montage of axial slices of weightmap predicter_prediction.png: Will output a plot of prediction """ if not os.path.isdir(self.output_dir): os.makedirs(self.output_dir) if self.algorithm == 'lassopcr': coef = np.dot(self._pca.components_.T,self._lasso.coef_) coef_img = self.nifti_masker.inverse_transform(np.transpose(coef)) elif self.algorithm == 'pcr': coef = np.dot(self._pca.components_.T,self._regress.coef_) coef_img = self.nifti_masker.inverse_transform(np.transpose(coef)) else: coef_img = self.nifti_masker.inverse_transform(predictor.coef_) overlay_img = nib.load(os.path.join(get_resource_path(),'MNI152_T1_2mm_brain.nii.gz')) fig1 = plot_stat_map(coef_img, overlay_img, title=self.algorithm + " weights", cut_coords=range(-40, 40, 10), display_mode='z') fig1.savefig(os.path.join(self.output_dir, self.algorithm + '_weightmap_axial.png')) if self.prediction_type == 'classification': if self.algorithm not in ['svm','ridgeClassifier','ridgeClassifierCV']: fig2 = probability_plot(self.stats_output) fig2.savefig(os.path.join(self.output_dir, self.algorithm + '_prob_plot.png')) else: fig2 = dist_from_hyperplane_plot(self.stats_output) fig2.savefig(os.path.join(self.output_dir, self.algorithm + '_Distance_from_Hyperplane_xval.png')) if self.algorithm == 'svm' and self.predictor.probability: fig3 = probability_plot(self.stats_output) fig3.savefig(os.path.join(self.output_dir, self.algorithm + '_prob_plot.png')) elif self.prediction_type == 'prediction': fig2 = scatterplot(self.stats_output) fig2.savefig(os.path.join(self.output_dir, self.algorithm + '_scatterplot.png'))
def __init__(self, data, parallel_out=None, process_mask=None, radius=4, kwargs=None): #no scoring param '''The __init__ function gives the PBS_Job object access to brain data, brain mask, and all the other fundamental parameters it needs to run the task. Note: the PBS_Job runs on every core in a distributed task and each of those cores gets an identical copy of these class variables''' self.data = data #set up parallel_out if parallel_out is None: os.system("mkdir parallel_out") self.parallel_out = os.path.join(os.getcwd(), 'parallel_out') elif type(parallel_out) is str: os.system("mkdir " + parallel_out) #make directory if it does not exist self.parallel_out = parallel_out else: print(type(parallel_out)) raise ValueError("parallel_out should be a string") os.system('mkdir ' + os.path.join(self.parallel_out, 'core_out')) self.core_out = os.path.join(self.parallel_out, 'core_out') #set up process_mask if type(process_mask) is str: process_mask = nib.load(process_mask) elif process_mask is None: process_mask = nib.load( os.path.join(get_resource_path(), "FSL_RIns_thr0.nii.gz")) elif type(process_mask) is not nib.nifti1.Nifti1Image: print(process_mask) print(type(process_mask)) raise ValueError("process_mask is not a nibabel instance") self.process_mask = process_mask #set up other parameters self.radius = radius self.kwargs = kwargs
def __init__(self, data, parallel_out=None, process_mask=None, radius=4, kwargs=None): #no scoring param self.data = data #set up parallel_out if parallel_out is None: os.system("mkdir parallel_out") self.parallel_out = os.path.join(os.getcwd(), 'parallel_out') elif type(parallel_out) is str: os.system("mkdir " + parallel_out) #make directory if it does not exist self.parallel_out = parallel_out else: print(type(parallel_out)) raise ValueError("parallel_out should be a string") os.system('mkdir ' + os.path.join(self.parallel_out, 'core_out')) self.core_out = os.path.join(self.parallel_out, 'core_out') #set up process_mask if type(process_mask) is str: process_mask = nib.load(process_mask) elif process_mask is None: process_mask = nib.load( os.path.join(get_resource_path(), "FSL_RIns_thr0.nii.gz")) elif type(process_mask) is not nib.nifti1.Nifti1Image: print(process_mask) print(type(process_mask)) raise ValueError("process_mask is not a nibabel instance") self.process_mask = process_mask #set up other parameters self.radius = radius self.kwargs = kwargs
######################################################################### # Load and Manipulate an Onsets File # ------------------------------------ # # Nltools provides basic file-reading support for 2 or 3 column formatted onset files. # Users can look at the onsets_to_dm function() as a template to build more complex file readers if desired or to see additional features. # Here we simply point to an onsetfile where each event lasted exactly 1 TR, provide some basic experiment metadata, add an intercept, and get back a basic design matrix. from nltools.utils import get_resource_path from nltools.file_reader import onsets_to_dm from nltools.data import Design_Matrix import os onsetsFile = os.path.join(get_resource_path(),'onsets_example.txt') dm = onsets_to_dm(onsetsFile, TR=2.0, runLength=160, sort=True, addIntercept=True) ######################################################################### # The class stores basic meta data including convolution functions (default is glover HRF) and whether convolution has been performed, or whether the model contains a constant term. print(dm.info()) ######################################################################### # We can easily visualize the design matrix too dm.heatmap() ######################################################################### # We can also add nth order polynomial terms. In this case we'll add a linear term to capture linear trends. # By default the class will add all lower-order polynomials, but is smart enough to realize we already have a constant so it won't be duplicated.
def Couple_Preproc_Pipeline(base_dir=None, output_dir=None, subject_id=None, spm_path=None): """ Create a preprocessing workflow for the Couples Conflict Study using nipype Args: base_dir: path to data folder where raw subject folder is located output_dir: path to where key output files should be saved subject_id: subject_id (str) spm_path: path to spm folder Returns: workflow: a nipype workflow that can be run """ from nipype.interfaces.dcm2nii import Dcm2nii from nipype.interfaces.fsl import Merge, TOPUP, ApplyTOPUP import nipype.interfaces.io as nio import nipype.interfaces.utility as util from nipype.interfaces.utility import Merge as Merge_List from nipype.pipeline.engine import Node, Workflow from nipype.interfaces.fsl.maths import UnaryMaths from nipype.interfaces.nipy.preprocess import Trim from nipype.algorithms.rapidart import ArtifactDetect from nipype.interfaces import spm from nipype.interfaces.spm import Normalize12 from nipype.algorithms.misc import Gunzip from nipype.interfaces.nipy.preprocess import ComputeMask import nipype.interfaces.matlab as mlab from nltools.utils import get_resource_path, get_vox_dims, get_n_volumes from nltools.interfaces import Plot_Coregistration_Montage, PlotRealignmentParameters, Create_Covariates import os import glob ######################################## ## Setup Paths and Nodes ######################################## # Specify Paths canonical_file = os.path.join(spm_path, 'canonical', 'single_subj_T1.nii') template_file = os.path.join(spm_path, 'tpm', 'TPM.nii') # Set the way matlab should be called mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") mlab.MatlabCommand.set_default_paths(spm_path) # Get File Names for different types of scans. Parse into separate processing streams datasource = Node(interface=nio.DataGrabber( infields=['subject_id'], outfields=['struct', 'ap', 'pa']), name='datasource') datasource.inputs.base_directory = base_dir datasource.inputs.template = '*' datasource.inputs.field_template = { 'struct': '%s/Study*/t1w_32ch_mpr_08mm*', 'ap': '%s/Study*/distortion_corr_32ch_ap*', 'pa': '%s/Study*/distortion_corr_32ch_pa*' } datasource.inputs.template_args = { 'struct': [['subject_id']], 'ap': [['subject_id']], 'pa': [['subject_id']] } datasource.inputs.subject_id = subject_id datasource.inputs.sort_filelist = True # iterate over functional scans to define paths scan_file_list = glob.glob( os.path.join(base_dir, subject_id, 'Study*', '*')) func_list = [s for s in scan_file_list if "romcon_ap_32ch_mb8" in s] func_list = [s for s in func_list if "SBRef" not in s] # Exclude sbref for now. func_source = Node(interface=util.IdentityInterface(fields=['scan']), name="func_source") func_source.iterables = ('scan', func_list) # Create Separate Converter Nodes for each different type of file. (dist corr scans need to be done before functional) ap_dcm2nii = Node(interface=Dcm2nii(), name='ap_dcm2nii') ap_dcm2nii.inputs.gzip_output = True ap_dcm2nii.inputs.output_dir = '.' ap_dcm2nii.inputs.date_in_filename = False pa_dcm2nii = Node(interface=Dcm2nii(), name='pa_dcm2nii') pa_dcm2nii.inputs.gzip_output = True pa_dcm2nii.inputs.output_dir = '.' pa_dcm2nii.inputs.date_in_filename = False f_dcm2nii = Node(interface=Dcm2nii(), name='f_dcm2nii') f_dcm2nii.inputs.gzip_output = True f_dcm2nii.inputs.output_dir = '.' f_dcm2nii.inputs.date_in_filename = False s_dcm2nii = Node(interface=Dcm2nii(), name='s_dcm2nii') s_dcm2nii.inputs.gzip_output = True s_dcm2nii.inputs.output_dir = '.' s_dcm2nii.inputs.date_in_filename = False ######################################## ## Setup Nodes for distortion correction ######################################## # merge output files into list merge_to_file_list = Node(interface=Merge_List(2), infields=['in1', 'in2'], name='merge_to_file_list') # fsl merge AP + PA files (depends on direction) merger = Node(interface=Merge(dimension='t'), name='merger') merger.inputs.output_type = 'NIFTI_GZ' # use topup to create distortion correction map topup = Node(interface=TOPUP(), name='topup') topup.inputs.encoding_file = os.path.join(get_resource_path(), 'epi_params_APPA_MB8.txt') topup.inputs.output_type = "NIFTI_GZ" topup.inputs.config = 'b02b0.cnf' # apply topup to all functional images apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup') apply_topup.inputs.in_index = [1] apply_topup.inputs.encoding_file = os.path.join(get_resource_path(), 'epi_params_APPA_MB8.txt') apply_topup.inputs.output_type = "NIFTI_GZ" apply_topup.inputs.method = 'jac' apply_topup.inputs.interp = 'spline' # Clear out Zeros from spline interpolation using absolute value. abs_maths = Node(interface=UnaryMaths(), name='abs_maths') abs_maths.inputs.operation = 'abs' ######################################## ## Preprocessing ######################################## # Trim - remove first 10 TRs n_vols = 10 trim = Node(interface=Trim(), name='trim') trim.inputs.begin_index = n_vols #Realignment - 6 parameters - realign to first image of very first series. realign = Node(interface=spm.Realign(), name="realign") realign.inputs.register_to_mean = True #Coregister - 12 parameters coregister = Node(interface=spm.Coregister(), name="coregister") coregister.inputs.jobtype = 'estwrite' #Plot Realignment plot_realign = Node(interface=PlotRealignmentParameters(), name="plot_realign") #Artifact Detection art = Node(interface=ArtifactDetect(), name="art") art.inputs.use_differences = [True, False] art.inputs.use_norm = True art.inputs.norm_threshold = 1 art.inputs.zintensity_threshold = 3 art.inputs.mask_type = 'file' art.inputs.parameter_source = 'SPM' # Gunzip - unzip the functional and structural images gunzip_struc = Node(Gunzip(), name="gunzip_struc") gunzip_func = Node(Gunzip(), name="gunzip_func") # Normalize - normalizes functional and structural images to the MNI template normalize = Node(interface=Normalize12(jobtype='estwrite', tpm=template_file), name="normalize") #Plot normalization Check plot_normalization_check = Node(interface=Plot_Coregistration_Montage(), name="plot_normalization_check") plot_normalization_check.inputs.canonical_img = canonical_file #Create Mask compute_mask = Node(interface=ComputeMask(), name="compute_mask") #remove lower 5% of histogram of mean image compute_mask.inputs.m = .05 #Smooth #implicit masking (.im) = 0, dtype = 0 smooth = Node(interface=spm.Smooth(), name="smooth") smooth.inputs.fwhm = 6 #Create Covariate matrix make_cov = Node(interface=Create_Covariates(), name="make_cov") # Create a datasink to clean up output files datasink = Node(interface=nio.DataSink(), name='datasink') datasink.inputs.base_directory = output_dir datasink.inputs.container = subject_id ######################################## # Create Workflow ######################################## workflow = Workflow(name='Preprocessed') workflow.base_dir = os.path.join(base_dir, subject_id) workflow.connect([ (datasource, ap_dcm2nii, [('ap', 'source_dir')]), (datasource, pa_dcm2nii, [('pa', 'source_dir')]), (datasource, s_dcm2nii, [('struct', 'source_dir')]), (func_source, f_dcm2nii, [('scan', 'source_dir')]), (ap_dcm2nii, merge_to_file_list, [('converted_files', 'in1')]), (pa_dcm2nii, merge_to_file_list, [('converted_files', 'in2')]), (merge_to_file_list, merger, [('out', 'in_files')]), (merger, topup, [('merged_file', 'in_file')]), (topup, apply_topup, [('out_fieldcoef', 'in_topup_fieldcoef'), ('out_movpar', 'in_topup_movpar')]), (f_dcm2nii, trim, [('converted_files', 'in_file')]), (trim, apply_topup, [('out_file', 'in_files')]), (apply_topup, abs_maths, [('out_corrected', 'in_file')]), (abs_maths, gunzip_func, [('out_file', 'in_file')]), (gunzip_func, realign, [('out_file', 'in_files')]), (s_dcm2nii, gunzip_struc, [('converted_files', 'in_file')]), (gunzip_struc, coregister, [('out_file', 'source')]), (coregister, normalize, [('coregistered_source', 'image_to_align')]), (realign, coregister, [('mean_image', 'target'), ('realigned_files', 'apply_to_files')]), (realign, normalize, [(('mean_image', get_vox_dims), 'write_voxel_sizes')]), (coregister, normalize, [('coregistered_files', 'apply_to_files')]), (normalize, smooth, [('normalized_files', 'in_files')]), (realign, compute_mask, [('mean_image', 'mean_volume')]), (compute_mask, art, [('brain_mask', 'mask_file')]), (realign, art, [('realignment_parameters', 'realignment_parameters'), ('realigned_files', 'realigned_files')]), (realign, plot_realign, [('realignment_parameters', 'realignment_parameters')]), (normalize, plot_normalization_check, [('normalized_files', 'wra_img') ]), (realign, make_cov, [('realignment_parameters', 'realignment_parameters')]), (art, make_cov, [('outlier_files', 'spike_id')]), (normalize, datasink, [('normalized_files', 'structural.@normalize')]), (coregister, datasink, [('coregistered_source', 'structural.@struct') ]), (topup, datasink, [('out_fieldcoef', 'distortion.@fieldcoef')]), (topup, datasink, [('out_movpar', 'distortion.@movpar')]), (smooth, datasink, [('smoothed_files', 'functional.@smooth')]), (plot_realign, datasink, [('plot', 'functional.@plot_realign')]), (plot_normalization_check, datasink, [('plot', 'functional.@plot_normalization')]), (make_cov, datasink, [('covariates', 'functional.@covariates')]) ]) return workflow
NeuroLearn Preferences ====================== """ __all__ = ["MNI_Template", "resolve_mni_path"] __author__ = ["Luke Chang"] __license__ = "MIT" import os from nltools.utils import get_resource_path MNI_Template = dict( resolution="2mm", mask_type="with_ventricles", mask=os.path.join(get_resource_path(), "MNI152_T1_2mm_brain_mask.nii.gz"), plot=os.path.join(get_resource_path(), "MNI152_T1_2mm.nii.gz"), brain=os.path.join(get_resource_path(), "MNI152_T1_2mm_brain.nii.gz"), ) def resolve_mni_path(MNI_Template): """Helper function to resolve MNI path based on MNI_Template prefs setting.""" res = MNI_Template["resolution"] m = MNI_Template["mask_type"] if not isinstance(res, str): raise ValueError("resolution must be provided as a string!") if not isinstance(m, str): raise ValueError("mask_type must be provided as a string!")
def apply_mask(data=None, weight_map=None, mask=None, method='dot_product', save_output=False, output_dir='.'): """ Apply Nifti weight map to Nifti Images. Args: data: nibabel instance of data to be applied weight_map: nibabel instance of weight map mask: binary nibabel mask method: type of pattern expression (e.g,. 'dot_product','correlation') save_output: Boolean indicating whether or not to save output to csv file. output_dir: Directory to use for writing all outputs **kwargs: Additional parameters to pass Returns: pexp: Outputs a vector of pattern expression values """ if mask is not None: if type(mask) is not nib.nifti1.Nifti1Image: raise ValueError("Mask is not a nibabel instance") else: mask = nib.load(os.path.join(get_resource_path(),'MNI152_T1_2mm_brain_mask.nii.gz')) if type(data) is not nib.nifti1.Nifti1Image: if type(data) is str: if os.path.isfile(data): data = nib.load(data) elif type(data) is list: data = nib.funcs.concat_images(data) else: raise ValueError("Data is not a nibabel instance, list of files, or a valid file name.") nifti_masker = NiftiMasker(mask_img=mask) data_masked = nifti_masker.fit_transform(data) if len(data_masked.shape) > 2: data_masked = data_masked.squeeze() if type(weight_map) is not nib.nifti1.Nifti1Image: if type(weight_map) is str: if os.path.isfile(weight_map): data = nib.load(weight_map) elif type(weight_map) is list: weight_map = nib.funcs.concat_images(weight_map) else: raise ValueError("Weight_map is not a nibabel instance, list of files, or a valid file name.") weight_map_masked = nifti_masker.fit_transform(weight_map) if len(weight_map_masked.shape) > 2: weight_map_masked = weight_map_masked.squeeze() # Calculate pattern expression pexp = pd.DataFrame() for w in range(0, weight_map_masked.shape[0]): if method == 'dot_product': pexp = pexp.append(pd.Series(np.dot(data_masked,np.transpose(weight_map_masked[w,:]))), ignore_index=True) elif method == 'correlation': pexp = pexp.append(pd.Series(pearson(data_masked,weight_map_masked[w,:])), ignore_index=True) pexp = pexp.T if save_output: pexp.to_csv(os.path.join(output_dir,"Pattern_Expression_" + method + ".csv")) # np.savetxt(os.path.join(output_dir,"Pattern_Expression_" + method + ".csv"), pexp, delimiter=",") return pexp
def _save_plot(self, predictor): """ Save Plots. Args: predictor: predictor instance Returns: predicter_weightmap_montage.png: Will output a montage of axial slices of weightmap predicter_prediction.png: Will output a plot of prediction """ if not os.path.isdir(self.output_dir): os.makedirs(self.output_dir) if self.algorithm == 'lassopcr': coef = np.dot(self._pca.components_.T, self._lasso.coef_) coef_img = self.nifti_masker.inverse_transform(np.transpose(coef)) elif self.algorithm == 'pcr': coef = np.dot(self._pca.components_.T, self._regress.coef_) coef_img = self.nifti_masker.inverse_transform(np.transpose(coef)) else: coef_img = self.nifti_masker.inverse_transform(predictor.coef_) overlay_img = nib.load( os.path.join(get_resource_path(), 'MNI152_T1_2mm_brain.nii.gz')) fig1 = plot_stat_map(coef_img, overlay_img, title=self.algorithm + " weights", cut_coords=range(-40, 40, 10), display_mode='z') fig1.savefig( os.path.join(self.output_dir, self.algorithm + '_weightmap_axial.png')) if self.prediction_type == 'classification': if self.algorithm not in [ 'svm', 'ridgeClassifier', 'ridgeClassifierCV' ]: fig2 = probability_plot(self.stats_output) fig2.savefig( os.path.join(self.output_dir, self.algorithm + '_prob_plot.png')) else: fig2 = dist_from_hyperplane_plot(self.stats_output) fig2.savefig( os.path.join( self.output_dir, self.algorithm + '_Distance_from_Hyperplane_xval.png')) if self.algorithm == 'svm' and self.predictor.probability: fig3 = probability_plot(self.stats_output) fig3.savefig( os.path.join(self.output_dir, self.algorithm + '_prob_plot.png')) elif self.prediction_type == 'prediction': fig2 = scatterplot(self.stats_output) fig2.savefig( os.path.join(self.output_dir, self.algorithm + '_scatterplot.png'))
def test_onsets_to_dm(): fpath = os.path.join(get_resource_path(), "onsets_example.txt") data = pd.read_csv(os.path.join(get_resource_path(), "onsets_example.txt")) sampling_freq = 0.5 run_length = 1364 Duration = 10 TR = 1 / sampling_freq # Two-column # Test loading from a file dm = onsets_to_dm(fpath, sampling_freq, run_length) assert isinstance(dm, Design_Matrix) # Check it has run_length rows and nStim columns assert dm.shape == (run_length, data.Stim.nunique()) # Get the unique number of presentations of each Stim from the original file stim_counts = data.Stim.value_counts(sort=False)[dm.columns] # Check there are only as many onsets as occurences of each Stim np.allclose(stim_counts.values, dm.sum().values) # Three-column with loading from dataframe data["Duration"] = Duration dm = onsets_to_dm(data, sampling_freq, run_length) # Check it has run_length rows and nStim columns assert dm.shape == (run_length, data.Stim.nunique()) # Because timing varies in seconds and isn't TR-locked each stimulus should last at Duration/TR number of TRs and at most Duration/TR + 1 TRs # Check that the total number of TRs for each stimulus >= 1 + (Duration/TR) and <= 1 + (Duration/TR + 1) onsets = dm.sum().values durations = data.groupby("Stim").Duration.mean().values for o, c, d in zip(onsets, stim_counts, durations): assert c * (d / TR) <= o <= c * ((d / TR) + 1) # Multiple onsets dm = onsets_to_dm([data, data], sampling_freq, run_length) # Check it has run_length rows and nStim columns assert dm.shape == (run_length * 2, data.Stim.nunique()) # Multiple onsets with polynomials auto-added dm = onsets_to_dm([data, data], sampling_freq, run_length, add_poly=2) assert dm.shape == (run_length * 2, data.Stim.nunique() + (3 * 2)) dm = onsets_to_dm([data, data], sampling_freq, run_length, add_poly=2, keep_separate=False) assert dm.shape == (run_length * 2, data.Stim.nunique() + 3) # Three-column from file with variable durations data = pd.read_csv( os.path.join(get_resource_path(), "onsets_example_with_dur.txt")) run_length = 472 dm = onsets_to_dm(data, sampling_freq, run_length) assert dm.shape == (run_length, data.Stim.nunique()) onsets = dm.sum().values stim_counts = data.Stim.value_counts().values durations = data.groupby("Stim").Duration.mean().values for o, c, d in zip(onsets, stim_counts, durations): assert c * (d / TR) <= o <= c * ((d / TR) + 1)
def Couple_Preproc_Pipeline(base_dir=None, output_dir=None, subject_id=None, spm_path=None): """ Create a preprocessing workflow for the Couples Conflict Study using nipype Args: base_dir: path to data folder where raw subject folder is located output_dir: path to where key output files should be saved subject_id: subject_id (str) spm_path: path to spm folder Returns: workflow: a nipype workflow that can be run """ from nipype.interfaces.dcm2nii import Dcm2nii from nipype.interfaces.fsl import Merge, TOPUP, ApplyTOPUP import nipype.interfaces.io as nio import nipype.interfaces.utility as util from nipype.interfaces.utility import Merge as Merge_List from nipype.pipeline.engine import Node, Workflow from nipype.interfaces.fsl.maths import UnaryMaths from nipype.interfaces.nipy.preprocess import Trim from nipype.algorithms.rapidart import ArtifactDetect from nipype.interfaces import spm from nipype.interfaces.spm import Normalize12 from nipype.algorithms.misc import Gunzip from nipype.interfaces.nipy.preprocess import ComputeMask import nipype.interfaces.matlab as mlab from nltools.utils import get_resource_path, get_vox_dims, get_n_volumes from nltools.interfaces import Plot_Coregistration_Montage, PlotRealignmentParameters, Create_Covariates import os import glob ######################################## ## Setup Paths and Nodes ######################################## # Specify Paths canonical_file = os.path.join(spm_path,'canonical','single_subj_T1.nii') template_file = os.path.join(spm_path,'tpm','TPM.nii') # Set the way matlab should be called mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash") mlab.MatlabCommand.set_default_paths(spm_path) # Get File Names for different types of scans. Parse into separate processing streams datasource = Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['struct', 'ap', 'pa']),name='datasource') datasource.inputs.base_directory = base_dir datasource.inputs.template = '*' datasource.inputs.field_template = {'struct':'%s/Study*/t1w_32ch_mpr_08mm*', 'ap':'%s/Study*/distortion_corr_32ch_ap*', 'pa':'%s/Study*/distortion_corr_32ch_pa*'} datasource.inputs.template_args = {'struct':[['subject_id']],'ap':[['subject_id']],'pa':[['subject_id']]} datasource.inputs.subject_id = subject_id datasource.inputs.sort_filelist=True # iterate over functional scans to define paths scan_file_list = glob.glob(os.path.join(base_dir,subject_id,'Study*','*')) func_list = [s for s in scan_file_list if "romcon_ap_32ch_mb8" in s] func_list = [s for s in func_list if "SBRef" not in s] # Exclude sbref for now. func_source = Node(interface=util.IdentityInterface(fields=['scan']),name="func_source") func_source.iterables = ('scan', func_list) # Create Separate Converter Nodes for each different type of file. (dist corr scans need to be done before functional) ap_dcm2nii = Node(interface = Dcm2nii(),name='ap_dcm2nii') ap_dcm2nii.inputs.gzip_output = True ap_dcm2nii.inputs.output_dir = '.' ap_dcm2nii.inputs.date_in_filename = False pa_dcm2nii = Node(interface = Dcm2nii(),name='pa_dcm2nii') pa_dcm2nii.inputs.gzip_output = True pa_dcm2nii.inputs.output_dir = '.' pa_dcm2nii.inputs.date_in_filename = False f_dcm2nii = Node(interface = Dcm2nii(),name='f_dcm2nii') f_dcm2nii.inputs.gzip_output = True f_dcm2nii.inputs.output_dir = '.' f_dcm2nii.inputs.date_in_filename = False s_dcm2nii = Node(interface = Dcm2nii(),name='s_dcm2nii') s_dcm2nii.inputs.gzip_output = True s_dcm2nii.inputs.output_dir = '.' s_dcm2nii.inputs.date_in_filename = False ######################################## ## Setup Nodes for distortion correction ######################################## # merge output files into list merge_to_file_list = Node(interface=Merge_List(2), infields=['in1','in2'], name='merge_to_file_list') # fsl merge AP + PA files (depends on direction) merger = Node(interface=Merge(dimension = 't'),name='merger') merger.inputs.output_type = 'NIFTI_GZ' # use topup to create distortion correction map topup = Node(interface=TOPUP(), name='topup') topup.inputs.encoding_file = os.path.join(get_resource_path(),'epi_params_APPA_MB8.txt') topup.inputs.output_type = "NIFTI_GZ" topup.inputs.config = 'b02b0.cnf' # apply topup to all functional images apply_topup = Node(interface = ApplyTOPUP(), name='apply_topup') apply_topup.inputs.in_index = [1] apply_topup.inputs.encoding_file = os.path.join(get_resource_path(),'epi_params_APPA_MB8.txt') apply_topup.inputs.output_type = "NIFTI_GZ" apply_topup.inputs.method = 'jac' apply_topup.inputs.interp = 'spline' # Clear out Zeros from spline interpolation using absolute value. abs_maths = Node(interface=UnaryMaths(), name='abs_maths') abs_maths.inputs.operation = 'abs' ######################################## ## Preprocessing ######################################## # Trim - remove first 10 TRs n_vols = 10 trim = Node(interface = Trim(), name='trim') trim.inputs.begin_index=n_vols #Realignment - 6 parameters - realign to first image of very first series. realign = Node(interface=spm.Realign(), name="realign") realign.inputs.register_to_mean = True #Coregister - 12 parameters coregister = Node(interface=spm.Coregister(), name="coregister") coregister.inputs.jobtype = 'estwrite' #Plot Realignment plot_realign = Node(interface=PlotRealignmentParameters(), name="plot_realign") #Artifact Detection art = Node(interface=ArtifactDetect(), name="art") art.inputs.use_differences = [True,False] art.inputs.use_norm = True art.inputs.norm_threshold = 1 art.inputs.zintensity_threshold = 3 art.inputs.mask_type = 'file' art.inputs.parameter_source = 'SPM' # Gunzip - unzip the functional and structural images gunzip_struc = Node(Gunzip(), name="gunzip_struc") gunzip_func = Node(Gunzip(), name="gunzip_func") # Normalize - normalizes functional and structural images to the MNI template normalize = Node(interface=Normalize12(jobtype='estwrite',tpm=template_file), name="normalize") #Plot normalization Check plot_normalization_check = Node(interface=Plot_Coregistration_Montage(), name="plot_normalization_check") plot_normalization_check.inputs.canonical_img = canonical_file #Create Mask compute_mask = Node(interface=ComputeMask(), name="compute_mask") #remove lower 5% of histogram of mean image compute_mask.inputs.m = .05 #Smooth #implicit masking (.im) = 0, dtype = 0 smooth = Node(interface=spm.Smooth(), name = "smooth") smooth.inputs.fwhm=6 #Create Covariate matrix make_cov = Node(interface=Create_Covariates(), name="make_cov") # Create a datasink to clean up output files datasink = Node(interface=nio.DataSink(), name='datasink') datasink.inputs.base_directory = output_dir datasink.inputs.container = subject_id ######################################## # Create Workflow ######################################## workflow = Workflow(name = 'Preprocessed') workflow.base_dir = os.path.join(base_dir,subject_id) workflow.connect([(datasource, ap_dcm2nii,[('ap','source_dir')]), (datasource, pa_dcm2nii,[('pa','source_dir')]), (datasource, s_dcm2nii,[('struct','source_dir')]), (func_source, f_dcm2nii,[('scan','source_dir')]), (ap_dcm2nii, merge_to_file_list,[('converted_files','in1')]), (pa_dcm2nii, merge_to_file_list,[('converted_files','in2')]), (merge_to_file_list, merger,[('out','in_files')]), (merger, topup,[('merged_file','in_file')]), (topup, apply_topup,[('out_fieldcoef','in_topup_fieldcoef'), ('out_movpar','in_topup_movpar')]), (f_dcm2nii, trim,[('converted_files','in_file')]), (trim, apply_topup,[('out_file','in_files')]), (apply_topup, abs_maths,[('out_corrected','in_file')]), (abs_maths, gunzip_func, [('out_file', 'in_file')]), (gunzip_func, realign, [('out_file', 'in_files')]), (s_dcm2nii, gunzip_struc,[('converted_files','in_file')]), (gunzip_struc,coregister, [('out_file', 'source')]), (coregister, normalize,[('coregistered_source','image_to_align')]), (realign,coregister, [('mean_image', 'target'), ('realigned_files', 'apply_to_files')]), (realign,normalize, [(('mean_image', get_vox_dims), 'write_voxel_sizes')]), (coregister,normalize, [('coregistered_files', 'apply_to_files')]), (normalize, smooth, [('normalized_files', 'in_files')]), (realign, compute_mask, [('mean_image','mean_volume')]), (compute_mask,art,[('brain_mask','mask_file')]), (realign,art,[('realignment_parameters','realignment_parameters'), ('realigned_files','realigned_files')]), (realign,plot_realign, [('realignment_parameters', 'realignment_parameters')]), (normalize, plot_normalization_check, [('normalized_files', 'wra_img')]), (realign, make_cov, [('realignment_parameters', 'realignment_parameters')]), (art, make_cov, [('outlier_files', 'spike_id')]), (normalize, datasink, [('normalized_files', 'structural.@normalize')]), (coregister, datasink, [('coregistered_source', 'structural.@struct')]), (topup, datasink, [('out_fieldcoef', 'distortion.@fieldcoef')]), (topup, datasink, [('out_movpar', 'distortion.@movpar')]), (smooth, datasink, [('smoothed_files', 'functional.@smooth')]), (plot_realign, datasink, [('plot', 'functional.@plot_realign')]), (plot_normalization_check, datasink, [('plot', 'functional.@plot_normalization')]), (make_cov, datasink, [('covariates', 'functional.@covariates')])]) return workflow
def create_sphere(coordinates, radius=5, mask=None): """ Generate a set of spheres in the brain mask space Args: radius: vector of radius. Will create multiple spheres if len(radius) > 1 centers: a vector of sphere centers of the form [px, py, pz] or [[px1, py1, pz1], ..., [pxn, pyn, pzn]] """ from nltools.data import Brain_Data if mask is not None: if not isinstance(mask, nib.Nifti1Image): if type(mask) is str: if os.path.isfile(mask): data = nib.load(mask) else: raise ValueError("mask is not a nibabel instance or a valid " "file name") else: mask = nib.load( os.path.join(get_resource_path(), 'MNI152_T1_2mm_brain_mask.nii.gz')) dims = mask.get_data().shape def sphere(r, p, mask): """ create a sphere of given radius at some point p in the brain mask Args: r: radius of the sphere p: point (in coordinates of the brain mask) of the center of the sphere """ dims = mask.shape m = [dims[0] / 2, dims[1] / 2, dims[2] / 2] # JC edit: default value for centers x, y, z = np.ogrid[-m[0]:dims[0] - m[0], -m[1]:dims[1] - m[1], -m[2]:dims[2] - m[2]] #JC edit: creates sphere # x, y, z = np.ogrid[-p[0]:dims[0]-p[0], -p[1]:dims[1]-p[1], -p[2]:dims[2]-p[2]] mask_r = x * x + y * y + z * z <= r * r activation = np.zeros(dims) activation[mask_r] = 1 # JC edit shift mask to proper location translation_affine = np.array([[1, 0, 0, p[0] - m[0]], [0, 1, 0, p[1] - m[1]], [0, 0, 1, p[2] - m[2]], [0, 0, 0, 1]]) # activation = np.multiply(activation, mask.get_data()) # activation = nib.Nifti1Image(activation, affine=np.eye(4)) activation = nib.Nifti1Image(activation, affine=translation_affine) #return the 3D numpy matrix of zeros containing the sphere as a region of ones # return activation.get_data(), translation_affine return activation # Initialize Spheres with options for multiple radii and centers of the spheres (or just an int and a 3D list) # return sphere(radius,coordinates,mask) if type(radius) is int: radius = [radius] if coordinates is None: coordinates = [[dims[0] / 2, dims[1] / 2, dims[2] / 2] * len(radius) ] #default value for centers elif type(coordinates) is list and type( coordinates[0]) is int and len(radius) is 1: coordinates = [coordinates] if (type(radius)) is list and (type(coordinates) is list) and (len(radius) == len(coordinates)): A = np.zeros_like(mask.get_data()) A = Brain_Data(nib.Nifti1Image(A, affine=mask.affine), mask=mask) for i in range(len(radius)): A = A + Brain_Data(sphere(radius[i], coordinates[i], mask), mask=mask) A = A.to_nifti() A.get_data()[A.get_data() > 0.5] = 1 A.get_data()[A.get_data() < 0.5] = 0 return A else: raise ValueError("Data type for sphere or radius(ii) or center(s) " "not recognized.")
def apply_mask(data=None, weight_map=None, mask=None, method='dot_product', save_output=False, output_dir='.'): """ Apply Nifti weight map to Nifti Images. Args: data: nibabel instance of data to be applied weight_map: nibabel instance of weight map mask: binary nibabel mask method: type of pattern expression (e.g,. 'dot_product','correlation') save_output: Boolean indicating whether or not to save output to csv file. output_dir: Directory to use for writing all outputs **kwargs: Additional parameters to pass Returns: pexp: Outputs a vector of pattern expression values """ if mask is not None: if type(mask) is not nib.nifti1.Nifti1Image: raise ValueError("Mask is not a nibabel instance") else: mask = nib.load( os.path.join(get_resource_path(), 'MNI152_T1_2mm_brain_mask.nii.gz')) if type(data) is not nib.nifti1.Nifti1Image: if type(data) is str: if os.path.isfile(data): data = nib.load(data) elif type(data) is list: data = nib.funcs.concat_images(data) else: raise ValueError( "Data is not a nibabel instance, list of files, or a valid file name." ) nifti_masker = NiftiMasker(mask_img=mask) data_masked = nifti_masker.fit_transform(data) if len(data_masked.shape) > 2: data_masked = data_masked.squeeze() if type(weight_map) is not nib.nifti1.Nifti1Image: if type(weight_map) is str: if os.path.isfile(weight_map): data = nib.load(weight_map) elif type(weight_map) is list: weight_map = nib.funcs.concat_images(weight_map) else: raise ValueError( "Weight_map is not a nibabel instance, list of files, or a valid file name." ) weight_map_masked = nifti_masker.fit_transform(weight_map) if len(weight_map_masked.shape) > 2: weight_map_masked = weight_map_masked.squeeze() # Calculate pattern expression pexp = pd.DataFrame() for w in range(0, weight_map_masked.shape[0]): if method == 'dot_product': pexp = pexp.append(pd.Series( np.dot(data_masked, np.transpose(weight_map_masked[w, :]))), ignore_index=True) elif method == 'correlation': pexp = pexp.append(pd.Series( pearson(data_masked, weight_map_masked[w, :])), ignore_index=True) pexp = pexp.T if save_output: pexp.to_csv( os.path.join(output_dir, "Pattern_Expression_" + method + ".csv")) # np.savetxt(os.path.join(output_dir,"Pattern_Expression_" + method + ".csv"), pexp, delimiter=",") return pexp
def create_sphere(coordinates, radius=5, mask=None): """ Generate a set of spheres in the brain mask space Args: radius: vector of radius. Will create multiple spheres if len(radius) > 1 centers: a vector of sphere centers of the form [px, py, pz] or [[px1, py1, pz1], ..., [pxn, pyn, pzn]] """ if mask is not None: if not isinstance(mask, nib.Nifti1Image): if type(mask) is str: if os.path.isfile(mask): data = nib.load(mask) else: raise ValueError( "mask is not a nibabel instance or a valid file name") else: mask = nib.load( os.path.join(get_resource_path(), 'MNI152_T1_2mm_brain_mask.nii.gz')) dims = mask.get_data().shape def sphere(r, p, mask): """ create a sphere of given radius at some point p in the brain mask Args: r: radius of the sphere p: point (in coordinates of the brain mask) of the center of the sphere """ dims = mask.shape x, y, z = np.ogrid[-p[0]:dims[0] - p[0], -p[1]:dims[1] - p[1], -p[2]:dims[2] - p[2]] mask_r = x * x + y * y + z * z <= r * r activation = np.zeros(dims) activation[mask_r] = 1 activation = np.multiply(activation, mask.get_data()) activation = nib.Nifti1Image(activation, affine=np.eye(4)) #return the 3D numpy matrix of zeros containing the sphere as a region of ones return activation.get_data() # Initialize Spheres with options for multiple radii and centers of the spheres (or just an int and a 3D list) if type(radius) is int: radius = [radius] if coordinates is None: coordinates = [[dims[0] / 2, dims[1] / 2, dims[2] / 2] * len(radius) ] #default value for centers elif type(coordinates) is list and type( coordinates[0]) is int and len(radius) is 1: coordinates = [coordinates] if (type(radius)) is list and (type(coordinates) is list) and (len(radius) == len(coordinates)): A = np.zeros_like(mask.get_data()) for i in xrange(len(radius)): A = np.add(A, sphere(radius[i], coordinates[i], mask)) nifti_sphere = nib.Nifti1Image(A.astype(np.float32), affine=mask.get_affine()) return nifti_sphere else: raise ValueError( "Data type for sphere or radius(ii) or center(s) not recognized.")