Example #1
0
    def similarity(self, image, method='correlation'):
        """ Calculate similarity of Brain_Data() instance with single Brain_Data or Nibabel image

            Args:
                self: Brain_Data instance of data to be applied
                image: Brain_Data or Nibabel instance of weight map

            Returns:
                pexp: Outputs a vector of pattern expression values

        """

        if not isinstance(image, Brain_Data):
            if isinstance(image, nib.Nifti1Image):
                image = Brain_Data(image)
            else:
                raise ValueError("Image is not a Brain_Data or nibabel instance")
        dim = image.shape()

        # Check to make sure masks are the same for each dataset and if not create a union mask
        # This might be handy code for a new Brain_Data method
        if np.sum(self.nifti_masker.mask_img.get_data()==1)!=np.sum(image.nifti_masker.mask_img.get_data()==1):
            new_mask = intersect_masks([self.nifti_masker.mask_img, image.nifti_masker.mask_img], threshold=1, connected=False)
            new_nifti_masker = NiftiMasker(mask_img=new_mask)
            data2 = new_nifti_masker.fit_transform(self.to_nifti())
            image2 = new_nifti_masker.fit_transform(image.to_nifti())
        else:
            data2 = self.data
            image2 = image.data


        # Calculate pattern expression
        if method is 'dot_product':
            if len(image2.shape) > 1:
                if image2.shape[0]>1:
                    pexp = []
                    for i in range(image2.shape[0]):
                        pexp.append(np.dot(data2, image2[i,:]))
                    pexp = np.array(pexp)
                else:
                    pexp = np.dot(data2, image2)
            else:
                pexp = np.dot(data2, image2)
        elif method is 'correlation':
            if len(image2.shape) > 1:
                if image2.shape[0]>1:
                    pexp = []
                    for i in range(image2.shape[0]):
                        pexp.append(pearson(image2[i,:], data2))
                    pexp = np.array(pexp)
                else:
                    pexp = pearson(image2, data2)
            else:
                pexp = pearson(image2, data2)
        return pexp
Example #2
0
def MaskFlatten(concat_dict, mask, iter_n):
    '''Mask image data, convert to 2D feature matrix'''
    nifti_masker = NiftiMasker(mask_img=mask)
    masked_dict = {}
    for i in range(iter_n):
        masked_dict[i] = nifti_masker.fit_transform(concat_dict[i])
    return masked_dict
Example #3
0
    def transform(self, imgs, confounds=None):
        """

        Parameters
        ----------
        imgs: list of Niimg-like objects
        """
        self._check_fitted()

        if self.smoothing_fwhm:
            imgs = smooth_img(imgs, self.smoothing_fwhm)

        imgs = [_utils.check_niimg_3d(img) for img in imgs]

        for i, roi in enumerate(self.mask_img_):
            masker = NiftiMasker(mask_img=roi)
            x = masker.fit_transform(imgs)
            if self.extract_funcs is not None:
                x = np.array([FDICT[f][0](x, **FDICT[f][1]) for f in self.extract_funcs])
            if i == 0:
                X = x
            else:
                X = np.concatenate((X, x), axis=0)

        return X.swapaxes(0, 1)
	def _run_interface(self, runtime):
		from nilearn.input_data import NiftiMasker, NiftiLabelsMasker
		from nipype.utils.filemanip import split_filename
		import nibabel as nib
		import os

		functional_filename = self.inputs.in_file
		atlas_filename = self.inputs.atlas_filename
		mask_filename = self.inputs.mask_filename

		# Extracting the ROI signals
		masker = NiftiLabelsMasker(labels_img=atlas_filename,
                           background_label = 0,
                           standardize=True,
                           detrend = True,
                           verbose = 1
                           )
		time_series = masker.fit_transform(functional_filename)

		# Removing the ROI signal from the time series
		nifti_masker = NiftiMasker(mask_img=mask_filename)
		masked_data = nifti_masker.fit_transform(functional_filename, confounds=time_series[...,0])
		masked_img = nifti_masker.inverse_transform(masked_data)

		# Saving the result to disk
		outputs = self._outputs().get()
		fname = self.inputs.in_file
		_, base, _ = split_filename(fname)
		nib.save(masked_img, os.path.abspath(base + '_regressed.nii.gz'))
		return runtime
Example #5
0
class signal_extractor():

    def __init__(self, dataset = None):
        self.dataset = dataset
        if dataset.has_key('mask'):
            self.masker = NiftiMasker(mask_img = self.dataset.mask,
                                low_pass = .1,
                                high_pass = .01,
                                smoothing_fwhm =6.,
                                t_r = 1.05,
                                detrend = True,
                                standardize = False,
                                memory_level = 0,
                                verbose=5)
        else:
            self.masker = NiftiMasker(
                                low_pass = .1,
                                high_pass = .01,
                                smoothing_fwhm =6.,
                                t_r = 1.05,
                                detrend = True,
                                standardize = False,
                                memory_level = 0,
                                verbose=5)
    def extract(self):
        for idx, func in enumerate([self.dataset.func1]):
            #add mask, smoothing, filter and detrending


            for i in range(len(self.dataset.subjects)):
                tic = time.clock()
                #extract signal to x
                x = self.masker.fit_transform(func[i])
                print "loading time : "+ str(time.clock() - tic)
                yield x, self.masker
Example #6
0
def nilearn_denoise(in_file, brain_mask, wm_mask, csf_mask,
                      motreg_file, outlier_file,
                      bandpass, tr ):
    """Clean time series using Nilearn high_variance_confounds to extract 
    CompCor regressors and NiftiMasker for regression of all nuissance regressors,
    detrending, normalziation and bandpass filtering.
    """
    import numpy as np
    import nibabel as nb
    import os
    from nilearn.image import high_variance_confounds
    from nilearn.input_data import NiftiMasker
    from nipype.utils.filemanip import split_filename

    # reload niftis to round affines so that nilearn doesn't complain
    wm_nii=nb.Nifti1Image(nb.load(wm_mask).get_data(), np.around(nb.load(wm_mask).get_affine(), 2), nb.load(wm_mask).get_header())
    csf_nii=nb.Nifti1Image(nb.load(csf_mask).get_data(), np.around(nb.load(csf_mask).get_affine(), 2), nb.load(csf_mask).get_header())
    time_nii=nb.Nifti1Image(nb.load(in_file).get_data(),np.around(nb.load(in_file).get_affine(), 2), nb.load(in_file).get_header())
        
    # infer shape of confound array
    # not ideal
    confound_len = nb.load(in_file).get_data().shape[3]
    
    # create outlier regressors
    outlier_regressor = np.empty((confound_len,1))
    try:
        outlier_val = np.genfromtxt(outlier_file)
    except IOError:
        outlier_val = np.empty((0))
    for index in np.atleast_1d(outlier_val):
        outlier_vector = np.zeros((confound_len, 1))
        outlier_vector[index] = 1
        outlier_regressor = np.hstack((outlier_regressor, outlier_vector))
    
    outlier_regressor = outlier_regressor[:,1::]
        
    # load motion regressors
    motion_regressor=np.genfromtxt(motreg_file)
    
    # extract high variance confounds in wm/csf masks from motion corrected data
    wm_regressor=high_variance_confounds(time_nii, mask_img=wm_nii, detrend=True)
    csf_regressor=high_variance_confounds(time_nii, mask_img=csf_nii, detrend=True)
    
    # create Nifti Masker for denoising
    denoiser=NiftiMasker(mask_img=brain_mask, standardize=True, detrend=True, high_pass=bandpass[1], low_pass=bandpass[0], t_r=tr)
    
    # denoise and return denoise data to img
    confounds=np.hstack((outlier_regressor,wm_regressor, csf_regressor, motion_regressor))
    denoised_data=denoiser.fit_transform(in_file, confounds=confounds)
    denoised_img=denoiser.inverse_transform(denoised_data)
        
    # save  
    _, base, _ = split_filename(in_file)
    img_fname = base + '_denoised.nii.gz'
    nb.save(denoised_img, img_fname)
    
    confound_fname = os.path.join(os.getcwd(), "all_confounds.txt")
    np.savetxt(confound_fname, confounds, fmt="%.10f")
    
    return os.path.abspath(img_fname), confound_fname
Example #7
0
    def apply_mask(self, mask):
        """ Mask Brain_Data instance

        Args:
            mask: mask (Brain_Data or nifti object)
            
        """

        if isinstance(mask,Brain_Data):
            mask = mask.to_nifti() # convert to nibabel
        if not isinstance(mask, nib.Nifti1Image):
            if type(mask) is str:
                if os.path.isfile(mask):
                    mask = nib.load(mask)
               # Check if mask need to be resampled into Brain_Data mask space
                if not ((self.mask.get_affine()==mask.get_affine()).all()) & (self.mask.shape[0:3]==mask.shape[0:3]):
                    mask = resample_img(mask,target_affine=self.mask.get_affine(),target_shape=self.mask.shape)
            else:
                raise ValueError("Mask is not a nibabel instance, Brain_Data instance, or a valid file name.")

        masked = deepcopy(self)
        nifti_masker = NiftiMasker(mask_img=mask)
        masked.data = nifti_masker.fit_transform(self.to_nifti())
        if len(self.data.shape) > 2:
            masked.data = masked.data.squeeze()
        masked.nifti_masker = nifti_masker
        return masked
def extract_brain_rad(db, rad_column, rad_dir, stat, include_chim=False):
    """Replaces radiation presence by stat on whole brain ROI.

    Assumes brain mask and radiation nifti file is in rad_dir."""
    brain_mask_file = 'BrainMask_to_rd.nii.gz'
    extracted_rad_stat = {}  # Memoization of radiation statistic
    for idx, row in db.iterrows():
        if row[rad_column] == 1:
            sub_id = row['patient']
            if sub_id in extracted_rad_stat:
                db.loc[idx, rad_column] = extracted_rad_stat[sub_id]
            else:
                mask_path = os.path.join(rad_dir, sub_id, brain_mask_file)
                mask_check = os.path.isfile(mask_path)
                rad_path = os.path.join(rad_dir, sub_id, sub_id + '.nii')
                rad_check = os.path.isfile(rad_path)
                if mask_check and rad_check:
                    masker = NiftiMasker(mask_path)
                    rad_stat = stat(masker.fit_transform(rad_path))
                    extracted_rad_stat[sub_id] = rad_stat
                    db.loc[idx, rad_column] = rad_stat
                else:
                    db.loc[idx, rad_column] = None
        elif not include_chim:
            db.loc[idx, rad_column] = None

    db = db[db[rad_column].notnull()]
    return db
Example #9
0
def ts(img_path,
	mask=False,
	substitution={},
	):
	"""
	Return the mean and median of a Region of Interest (ROI) time course.

	Parameters
	----------

	img_path : str
		Path to NIfTI file from which the ROI is to be extracted.
	maks : nilearn.NiftiMasker or str, optional
		Nilearn `nifti1.Nifti1Image` object to use for masking the desired ROI, or a string specifying the path of a maskfile.
	substitution : dict, optional
		A dictionary with keys which include 'subject' and 'session'.
	"""
	if substitution:
		img_path = img_path.format(**substitution)
	img_path = path.abspath(path.expanduser(img_path))
	img = nib.load(img_path)
	try:
		masked_data = mask.fit_transform(img)
	except:
		mask = path.abspath(path.expanduser(mask))
		mask = NiftiMasker(mask_img=mask)
		masked_data = mask.fit_transform(img).T
	ts_means = np.mean(masked_data, axis=0)
	ts_medians = np.mean(masked_data, axis=0)
	return ts_means, ts_medians
Example #10
0
    def multivariate_similarity(self, images, method='ols'):
        """ Predict spatial distribution of Brain_Data() instance from linear combination of other Brain_Data() instances or Nibabel images

            Args:
                self: Brain_Data instance of data to be applied
                images: Brain_Data instance of weight map

            Returns:
                out: dictionary of regression statistics in Brain_Data instances {'beta','t','p','df','residual'}

        """
        ## Notes:  Should add ridge, and lasso, elastic net options options

        if len(self.shape()) > 1:
            raise ValueError("This method can only decompose a single brain image.")

        if not isinstance(images, Brain_Data):
            raise ValueError("Images are not a Brain_Data instance")
        dim = images.shape()

        # Check to make sure masks are the same for each dataset and if not create a union mask
        # This might be handy code for a new Brain_Data method
        if np.sum(self.nifti_masker.mask_img.get_data()==1)!=np.sum(images.nifti_masker.mask_img.get_data()==1):
            new_mask = intersect_masks([self.nifti_masker.mask_img, images.nifti_masker.mask_img], threshold=1, connected=False)
            new_nifti_masker = NiftiMasker(mask_img=new_mask)
            data2 = new_nifti_masker.fit_transform(self.to_nifti())
            image2 = new_nifti_masker.fit_transform(images.to_nifti())
        else:
            data2 = self.data
            image2 = images.data

        # Add intercept and transpose
        image2 = np.vstack((np.ones(image2.shape[1]),image2)).T

        # Calculate pattern expression
        if method is 'ols':
            b = np.dot(np.linalg.pinv(image2), data2)
            res = data2 - np.dot(image2,b)
            sigma = np.std(res,axis=0)
            stderr = np.dot(np.matrix(np.diagonal(np.linalg.inv(np.dot(image2.T,image2)))**.5).T,np.matrix(sigma))
            t_out = b /stderr
            df = image2.shape[0]-image2.shape[1]
            p = 2*(1-t.cdf(np.abs(t_out),df))

        return {'beta':b, 't':t_out, 'p':p, 'df':df, 'sigma':sigma, 'residual':res}
Example #11
0
def apply_mask(data=None, weight_map=None, mask=None, method='dot_product', save_output=False, output_dir='.'):
    """ Apply Nifti weight map to Nifti Images. 
 
        Args:
            data: nibabel instance of data to be applied
            weight_map: nibabel instance of weight map
            mask: binary nibabel mask
            method: type of pattern expression (e.g,. 'dot_product','correlation')
            save_output: Boolean indicating whether or not to save output to csv file.
            output_dir: Directory to use for writing all outputs
            **kwargs: Additional parameters to pass

        Returns:
            pexp: Outputs a vector of pattern expression values

    """ 

    if mask is not None:
        if type(mask) is not nib.nifti1.Nifti1Image:
            raise ValueError("Mask is not a nibabel instance")
    else:
        mask = nib.load(os.path.join(resource_dir,'MNI152_T1_2mm_brain_mask_dil.nii.gz'))
    
    if type(data) is not nib.nifti1.Nifti1Image:
        raise ValueError("Data is not a nibabel instance")
    
    nifti_masker = NiftiMasker(mask_img=mask)
    data_masked = nifti_masker.fit_transform(data)

    if type(weight_map) is not nib.nifti1.Nifti1Image:
        raise ValueError("Weight_map is not a nibabel instance")
    
    weight_map_masked = nifti_masker.fit_transform(weight_map)

    # Calculate pattern expression
    if method is 'dot_product':
        pexp = np.dot(data_masked,np.transpose(weight_map_masked)).squeeze()
    elif method is 'correlation':
        pexp = pearson(data_masked,weight_map_masked)

    if save_output:
        np.savetxt(os.path.join(output_dir,"Pattern_Expression_" + method + ".csv"), pexp, delimiter=",")

    return pexp
Example #12
0
def significant_signal(data_path,
	substitution={},
	mask_path='',
	exclude_ones=False,
	):
	"""Return the mean and median inverse logarithm of a p-value map.

	Parameters
	----------

	data_path : str
		Path to a p-value map in NIfTI format.
	mask_path : str
		Path to a region of interest map in NIfTI format.
		THIS IS ALMOST ALWAYS REQUIRED, as NIfTI statistic images populate the whole 3D circumscribed space around your structure of interest,
		and commonly assign null values to the background.
		In an inverse logarithm computation, null corresponds to infinity, which can considerably bias the evaluation.
	substitution : dict
		Dictionary whose keys are format identifiers present in `data_path` and whose values are strings.

	Returns
	-------

	mean : float
	median : float
	"""

	if substitution:
		data_path = data_path.format(**substitution)
	data_path = path.abspath(path.expanduser(data_path))
	try:
		img = nib.load(data_path)
	except FileNotFoundError:
		return float('NaN'), float('NaN')
	if mask_path:
		mask_path = path.abspath(path.expanduser(mask_path))
		masker = NiftiMasker(mask_img=mask_path)
		masked_data = masker.fit_transform(img).T
		data = masked_data[~np.isnan(masked_data)]
	else:
		data = img.get_data()
		data = data[~np.isnan(data)]
	# We interpret zero as the lowest p-value, and conservatively estimate it to be equal to just under half of the smallest value in the defined range
	nonzero = data[np.nonzero(data)]
	data_min = np.min(nonzero)
	data_min = data_min*0.49
	data[data == 0] = data_min
	if exclude_ones:
		data = data[data!=1]
	data = -np.log10(data)
	# We use np.ma.median() because life is complicated:
	# https://github.com/numpy/numpy/issues/7330
	median = np.ma.median(data, axis=None)
	mean = np.mean(data)

	return mean, median
Example #13
0
def _vectorize_nii(in_data_file, mask_file, parcellation_path, fwhm):
    from nilearn.input_data import NiftiMasker, NiftiLabelsMasker
    import nibabel as nib

    if parcellation_path is None:
        masker = NiftiMasker(mask_img=mask_file, smoothing_fwhm=fwhm)
    else:
        masker = NiftiLabelsMasker(labels_img=parcellation_path, smoothing_fwhm=fwhm)

    vectorized_data = masker.fit_transform(in_data_file)
    return vectorized_data, masker
def map_threshold(stat_img, mask_img, threshold, height_control='fpr',
                  cluster_threshold=0):
    """ Threshold the provvided map

    Parameters
    ----------
    stat_img : Niimg-like object,
       statistical image (presumably in z scale)

    mask_img : Niimg-like object,
        mask image

    threshold: float,
        cluster forming threshold (either a p-value or z-scale value)

    height_control: string
        false positive control meaning of cluster forming
        threshold: 'fpr'|'fdr'|'bonferroni'|'none'

    cluster_threshold : float, optional
        cluster size threshold

    Returns
    -------
    thresholded_map : Nifti1Image,
        the stat_map theresholded at the prescribed voxel- and cluster-level
    """
    # Masking
    masker = NiftiMasker(mask_img=mask_img)
    stats = np.ravel(masker.fit_transform(stat_img))
    n_voxels = np.size(stats)

    # Thresholding
    if height_control == 'fpr':
        z_th = norm.isf(threshold)
    elif height_control == 'fdr':
        z_th = fdr_threshold(stats, threshold)
    elif height_control == 'bonferroni':
        z_th = norm.isf(threshold / n_voxels)
    else:  # Brute-force thresholding
        z_th = threshold
    stats *= (stats > z_th)

    stat_map = masker.inverse_transform(stats).get_data()

    # Extract connected components above threshold
    label_map, n_labels = label(stat_map > z_th)
    labels = label_map[(masker.mask_img_.get_data() > 0)]
    for label_ in range(1, n_labels + 1):
        if np.sum(labels == label_) < cluster_threshold:
            stats[labels == label_] = 0

    return masker.inverse_transform(stats)
def preprocess_varpar(num, subj, subj_dir, **kwargs):
    from nistats.design_matrix import make_design_matrix
    from nistats.first_level_model import run_glm
    bold_path = 'BOLD/task001_run00%i/bold_dico_bold7Tp1_to_subjbold7Tp1.nii.gz' % (num+1)
    bold_path = os.path.join(DATA_DIR, subj, bold_path)
    mask = os.path.join(DATA_DIR, subj, 'templates', 'bold7Tp1', 'brain_mask.nii.gz')
    bold = load(bold_path)
    masker = NiftiMasker(mask)
    data = masker.fit_transform(bold)
    dmat = make_design_matrix(np.arange(data.shape[0])*TR, hrf_model='fir', drift_order=5,
                              **kwargs)
    labels, results = run_glm(data, dmat, noise_model='ols', verbose=1)
    img = masker.inverse_transform(StandardScaler().fit_transform(results[0.0].resid))
#    return StandardScaler().fit_transform(results[0.0].resid)
    save(img, os.path.join(subj_dir, 'run00%i.nii.gz' % num))
Example #16
0
def loader(anat, downsample, target_affine, dataroot, subject, maskpath, nrun,
           niifilename, labels, **kwargs):
    ''' 
    All parameters are submitted as cfg dictionary.
    Given parameters in cfg, return masked and concatenated over runs data 
    
    Input
    anat: MNI template
    downsample: 1 or 0
    target_affine: downsampling matrix
    dataroot: element of path to data
    subject: folder in dataroot with subject data
    maskpath: path to mask
    nrun: number of runs
    niifilename: how is the data file called
    labels: labels from load_labels function
    
    Output
    dict(nii_func=nii_func,nii_mean=nii_mean,masker=masker,nii_mask=nii_mask)
    nii_func: 4D data
    nii_mean: mean over 4th dimension
    masker: masker object from nibabel
    nii_mask: 3D mask
    '''
    nii_func = list()
    for r in range(nrun):
        fname = '{0}/{1}/run{2}/{3}'.format(dataroot, subject, r+1, niifilename) # Assumption about file location
        nii_img = load(fname, mmap=False)
        nii_img.set_sform(anat.get_sform())
        # Get mean over 4D
        nii_mean = mean_img(nii_img)
        # Masking
        nii_mask = load(maskpath)
        nii_mask.set_sform(anat.get_sform())
        # Binarize the mask
        nii_mask = check_binary(nii_mask)
        if downsample:
            nii_img = resample_img(nii_img, target_affine=target_affine)
            nii_mask = resample_img(nii_mask, target_affine=target_affine, interpolation='nearest')
        masker = NiftiMasker(nii_mask, standardize=True)
        nii_img = masker.fit_transform(nii_img)
        # Drop zero timepoints, zscore
        nii_img = drop_labels(nii_img, labels.get('to_drop_zeros')[r])
        nii_func.append(stats.zscore(nii_img, axis=0)) # zscore over time
    # throw data together
    nii_func = np.concatenate(nii_func)
    return dict(nii_func=nii_func, nii_mean=nii_mean, masker=masker, nii_mask=nii_mask)
def residualize_imgs(in_file, mask_file, confounds_file):
    '''
    * takes 4d file, mask file & confounds as np.array
    * regresses out confounds (only within mask)
    * writes residualized nii
    '''
    from nilearn.input_data import NiftiMasker
    import os
    import numpy as np

    confounds = np.loadtxt(confounds_file)
    masker = NiftiMasker(mask_img=mask_file)
    brain_data_2d = masker.fit_transform(in_file, confounds=confounds)
    out_file = os.path.join(os.getcwd(), 'residualized_data.nii.gz')
    out_img = masker.inverse_transform(brain_data_2d)
    out_img.to_filename(out_file)
    return out_file
Example #18
0
def read_data_haxby(subject, tr=2.5, masker=False):
    haxby_dataset = fetch_haxby(subjects=[subject])

    # Load fmri data
    fmri_filename = haxby_dataset.func[0]
    fmri = load_img(fmri_filename)
    # mask = haxby_dataset.mask_vt[0]
    masker = NiftiMasker(mask_strategy='epi', standardize=True, detrend=True,
                         high_pass=0.01, t_r=tr, smoothing_fwhm=5)
    fmri = masker.fit_transform(fmri)
    fmri = fmri.reshape(12, -1, fmri.shape[-1])

    # Load stimuli data
    classes = np.array(['rest', 'face', 'house', 'bottle', 'cat', 'chair',
                        'scissors', 'shoe', 'scrambledpix'])
    labels = np.recfromcsv(
        haxby_dataset.session_target[0], delimiter=" ")['labels'].reshape(
            12, -1)
    stimuli, onsets, conditions = (np.zeros((
        12, len(labels[0]), len(classes))), [], [])
    stimuli[:, 0, 0] = 1
    for session in range(12):
        onsets.append([])
        conditions.append([])
        for scan in range(1, len(fmri[session])):
            if (labels[session][scan - 1] == 'rest' and
                labels[session][scan] != 'rest'):
                label = labels[session][scan]
                stimuli[session, scan, np.where(classes == label)[0][0]] = 1
                conditions[session].append(label)
                onsets[session].append(scan * tr)
            else:
                stimuli[session, scan, 0] = 1

    if subject == 5:
        fmri = np.vstack((fmri[:8], fmri[9:]))
        stimuli = np.vstack((stimuli[:8], stimuli[9:]))
        onsets = np.vstack((onsets[:8], onsets[9:]))
        conditions = np.vstack((conditions[:8], conditions[9:]))

    if masker:
        return fmri, stimuli, onsets, conditions, masker

    return fmri, stimuli, onsets, conditions
def preprocess(num, subj, subj_dir, subj_warp_dir, force_warp=False, group_mode=False):
    bold_path = 'BOLD/task001_run00%i/bold_dico_bold7Tp1_to_subjbold7Tp1.nii.gz' % (num+1)
    bold_path = os.path.join(DATA_DIR, subj, bold_path)
    template_path = os.path.join(DATA_DIR, 'templates', 'grpbold7Tp1', 'brain.nii.gz')
    warp_path = os.path.join(DATA_DIR, subj, 'templates', 'bold7Tp1', 'in_grpbold7Tp1', 'subj2tmpl_warp.nii.gz')

    output_path = os.path.join(subj_warp_dir, 'run00%i.nii.gz' % num)
    if group_mode:
        if force_warp or not os.path.exists(output_path):
            print 'Warping image #%i...' % num
            subprocess.call(['fsl5.0-applywarp', '-i', bold_path, '-o', output_path, '-r', template_path, '-w', warp_path, '-d', 'float'])
        else:
            print 'Reusing cached warp image #%i' % num
        mask = None
        bold = output_path
    else:
        mask = os.path.join(DATA_DIR, subj, 'templates', 'bold7Tp1', 'brain_mask.nii.gz')
        bold = bold_path
    masker = NiftiMasker(mask, standardize=True, detrend=True)
    img = masker.inverse_transform(masker.fit_transform(bold))
    print 'Saving image #%i...' % num
    save(img, os.path.join(subj_dir, 'run00%i.nii.gz' % num))
    print 'Finished with image #%i' % num
def extract_data(nifti_file, mask_file, out_file, zscore, detrend, smoothing_fwmw):
    if mask_file is None:
        #whole brain, get coordinate info from nifti_file itself
        mask = nib.load(nifti_file.name)
    else:
        mask = nib.load(mask_file.name)
    affine = mask.get_affine()
    if mask_file is None:
        mask_data = mask.get_data()
        if mask_data.ndim == 4:
            #get mask in 3D
            img_data_type = mask.header.get_data_dtype()
            n_tr = mask_data.shape[3]
            mask_data = mask_data[:,:,:,n_tr//2].astype(bool)
            mask = nib.Nifti1Image(mask_data.astype(img_data_type), affine)
        else:
            mask_data = mask_data.astype(bool)
    else:
        mask_data = mask.get_data().astype(bool)

    #get voxel coordinates
    R = np.float64(np.argwhere(mask_data))

    #get scanner RAS coordinates based on voxel coordinates
    if affine is not []:
        R = (np.dot(affine[:3,:3], R.T) + affine[:3,3:4]).T

    #get ROI data, and run preprocessing
    nifti_masker = NiftiMasker(mask_img=mask, standardize=zscore, detrend=detrend, smoothing_fwhm=smoothing_fwmw)
    img = nib.load(nifti_file.name)
    all_images = np.float64(nifti_masker.fit_transform(img))
    data = all_images.T.copy()

    #save data
    subj_data = {'data': data, 'R': R}
    scipy.io.savemat(out_file.name, subj_data)
Example #21
0
def extract_one_vpv_signal(dataset):
    for idx, func in enumerate([dataset.func1, dataset.func2]):
      for i in range(len(dataset.subjects)):
	tic = time.clock()



	#maps_img = dict_to_list(func)
	#add mask, smoothing, filter and detrending
	masker = NiftiMasker(mask_img = dataset.mask,
				low_pass = .1,
				high_pass = .01,
				smoothing_fwhm =6.,
				t_r = 1.05,
				detrend = True,
				standardize = False,
				memory_level = 0,
				verbose=5)
	
	#extract signal to x
	x = masker.fit_transform(func[i])
	
	print "loading time : "+ str(time.clock() - tic)
	return x,masker
Example #22
0
# text labels.
# With scikit-learn >= 0.14, replace this line by: target = labels
_, target = sklearn.utils.fixes.unique(labels, return_inverse=True)

### Keep only data corresponding to faces or houses ###########################
condition_mask = np.logical_or(labels == 'face', labels == 'house')
target = target[condition_mask]

### Load the mask #############################################################

from nilearn.input_data import NiftiMasker
nifti_masker = NiftiMasker(mask=dataset.mask_vt[0])

# We give the nifti_masker a filename and retrieve a 2D array ready
# for machine learning with scikit-learn
fmri_masked = nifti_masker.fit_transform(dataset.func[0])

### Prediction function #######################################################

# First, we narrow to the face vs house classification
fmri_masked = fmri_masked[condition_mask]

# Here we use a Support Vector Classification, with a linear kernel and C=1
from sklearn.svm import SVC
svc = SVC(kernel='linear', C=1.)

# And we run it
svc.fit(fmri_masked, target)
y_pred = svc.predict(fmri_masked)

### Unmasking #################################################################
Example #23
0
n_samples = 94
dataset_files = datasets.fetch_localizer_contrasts(
    ['left button press (auditory cue)'], n_subjects=n_samples)
tested_var = dataset_files.ext_vars['pseudo']
# Quality check / Remove subjects with bad tested variate
mask_quality_check = np.where(tested_var != 'None')[0]
n_samples = mask_quality_check.size
contrast_maps = [dataset_files.cmaps[i] for i in mask_quality_check]
tested_var = tested_var[mask_quality_check].astype(float).reshape((-1, 1))
print("Actual number of subjects after quality check: %d" % n_samples)

### Mask data #################################################################
nifti_masker = NiftiMasker(
    smoothing_fwhm=5,
    memory='nilearn_cache', memory_level=1)  # cache options
fmri_masked = nifti_masker.fit_transform(contrast_maps)

### Anova (parametric F-scores) ###############################################
from nilearn._utils.fixes import f_regression
_, pvals_anova = f_regression(fmri_masked, tested_var, center=True)
pvals_anova *= fmri_masked.shape[1]
pvals_anova[np.isnan(pvals_anova)] = 1
pvals_anova[pvals_anova > 1] = 1
neg_log_pvals_anova = - np.log10(pvals_anova)
neg_log_pvals_anova_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals_anova)

### Perform massively univariate analysis with permuted OLS ###################
neg_log_pvals_permuted_ols, _, _ = permuted_ols(
    tested_var, fmri_masked,
    model_intercept=True,
Example #24
0
from nilearn.input_data import NiftiMasker

n_subjects = 100  # more subjects requires more memory

### Load Oasis dataset ########################################################
oasis_dataset = datasets.fetch_oasis_vbm(n_subjects=n_subjects)
gray_matter_map_filenames = oasis_dataset.gray_matter_maps
age = oasis_dataset.ext_vars['age'].astype(float)

### Preprocess data ###########################################################
nifti_masker = NiftiMasker(
    standardize=False,
    smoothing_fwhm=2,
    memory='nilearn_cache')  # cache options
# remove features with too low between-subject variance
gm_maps_masked = nifti_masker.fit_transform(gray_matter_map_filenames)
gm_maps_masked[:, gm_maps_masked.var(0) < 0.01] = 0.
# final masking
new_images = nifti_masker.inverse_transform(gm_maps_masked)
gm_maps_masked = nifti_masker.fit_transform(new_images)
n_samples, n_features = gm_maps_masked.shape
print n_samples, "subjects, ", n_features, "features"

### Prediction with SVR #######################################################
print "ANOVA + SVR"
### Define the prediction function to be used.
# Here we use a Support Vector Classification, with a linear kernel
from sklearn.svm import SVR
svr = SVR(kernel='linear')

### Dimension reduction
Example #25
0
hv_array = image.high_variance_confounds(fmri_filename)
print('Computed {0} confounds array.'.format(hv_array.shape))

###############################################################################
# Do my counfounds model noise properly? Voxel-to-voxel connectivity tells!
# -------------------------------------------------------------------------
#
# Check the relevance of chosen confounds: The distribution of voxel-to-voxel
# correlations should be tight and approximately centered to zero.
#
# Compute voxel-wise time series with and without confounds removal,
# using NiftiMasker.
from nilearn.input_data import NiftiMasker

brain_masker = NiftiMasker(detrend=True, memory='nilearn_cache', verbose=1)
timeseries_raw = brain_masker.fit_transform(fmri_filename)

###############################################################################
# Next, compute the voxel-to-voxel correlations. We Use only 1% of voxels,
# to save computation time.
import numpy as np

selected_voxels = range(0, timeseries_raw.shape[1], 100)
correlations_raw = np.corrcoef(timeseries_raw[:, selected_voxels].T)

###############################################################################
# Same thing, with counfounds removed: compute voxelwise time-series
timeseries_cleaned = brain_masker.fit_transform(fmri_filename,
                                                confounds=[hv_array])
correlations_cleaned = np.corrcoef(timeseries_cleaned[:, selected_voxels].T)
                        niimgs.get_affine())
y_test = target[condition_mask_test]

y_train[y_train=='scissors']=1
y_train[y_train=='scrambledpix']=-1
y_train=np.array(y_train.astype('double'))

y_test[y_test=='scissors']=1
y_test[y_test=='scrambledpix']=-1
y_test=np.array(y_test.astype('double'))



masker = NiftiMasker(mask_strategy='epi',standardize=True)
                        
X_train = masker.fit_transform(X_train)
X_test  = masker.transform(X_test)

mask = masker.mask_img_.get_data().astype(np.bool)
mask= _crop_mask(mask)
background_img = mean_img(data_files.func[0])

X_train, y_train, _, y_train_mean, _ = center_data(X_train, y_train, fit_intercept=True, normalize=False,copy=False)
X_test-=X_train.mean(axis=0)
X_test/=np.std(X_train,axis=0)
alpha=1
ratio=0.5
k=200


solver_params = dict(tol=1e-6, max_iter=5000,prox_max_iter=100)
Example #27
0
def roi_based(substitutions,
	beta_file_template=None,
	events_file_template=None,
	ts_file_template=None,
	design_file_template=None,
	plot_design_regressors=[0,1,2],
	roi=None,
	melodic_hit=None,
	flip=False,
	design_len=None,
	color="r",
	scale_design=1,
	):
	"""Plot timecourses and design for measurements. should be deprecated in favour of multi"""

	fig, ax = plt.subplots(figsize=(6,4) , facecolor='#eeeeee', tight_layout=True)

	if roi:
		if isinstance(roi, str):
			roi = path.abspath(path.expanduser(roi))
			roi = nib.load(roi)
		masker = NiftiMasker(mask_img=roi)
		if ts_file_template:
			ts_file = path.expanduser(ts_file_template.format(**substitutions))
			final_time_series = masker.fit_transform(ts_file).T
			final_time_series = np.mean(final_time_series, axis=0)
			if flip:
				ax.plot(final_time_series, np.arange(len(final_time_series)))
				ax.set_ylim([0,len(final_time_series)])
			else:
				ax.plot(final_time_series)
				ax.set_xlim([0,len(final_time_series)])

	if design_file_template:
		design_file = path.expanduser(design_file_template.format(**substitutions))
		design_df = pd.read_csv(design_file, skiprows=5, sep="\t", header=None, index_col=False)
		if beta_file_template and roi:
			beta_file = path.expanduser(beta_file_template.format(**substitutions))
			roi_betas = masker.fit_transform(beta_file).T
			design_df = design_df*np.mean(roi_betas)
		for i in plot_design_regressors:
			regressor = design_df[[i]].values.flatten()
			if flip:
				ax.plot(regressor.T*scale_design, np.arange(len(regressor)), lw=rcParams['lines.linewidth']*2, color=color)
			else:
				ax.plot(regressor*scale_design, lw=rcParams['lines.linewidth']*2, color=color)
		if flip:
			ax.set_ylim([0,len(regressor)])
		else:
			ax.set_xlim([0,len(regressor)])

	if events_file_template:
		events_file = path.expanduser(events_file_template.format(**substitutions))
		events_df = pd.read_csv(events_file, sep="\t")
		for d, o in zip(events_df["duration"], events_df["onset"]):
			d = round(d)
			o = round(o)
			if flip:
				ax.axhspan(o,o+d, facecolor="cyan", alpha=0.15)
			else:
				ax.axvspan(o,o+d, facecolor="cyan", alpha=0.15)
		if design_len:
			if flip:
				ax.set_ylim([0,design_len])
			else:
				ax.set_xlim([0,design_len])

	if melodic_hit:
		melodic_file = "~/ni_data/ofM.dr/20151208_182500_4007_1_4/melo10/report/t4.txt"
		melodic = np.loadtxt(melodic_file)
		if flip:
			melodic = melodic.T
		ax.plot(melodic)

	if flip:
		ax.invert_yaxis()
		plt.xticks(rotation=90)
		ax.locator_params(nbins=5, axis='x')
		ax.set_ylabel('Time [TR]', rotation=270, fontsize="smaller", va="center")
	else:
		ax.set_xlabel('Time [TR]')

	return ax
# original data directory where haxby2001 directory resides
dataDir = '/tmp/Data'
# By default 2nd subject will be fetched
haxby_dataset = datasets.fetch_haxby(data_dir=dataDir)
imgfMRI = haxby_dataset.func[0]   # fMRI data file
imgAnat = haxby_dataset.anat[0]   # structural data file
imgMask = haxby_dataset.mask   # brain mask
tableTarget = haxby_dataset.session_target[0]  # session target table file

# Masking the image data with mask, extracting voxels
masker = NiftiMasker(mask_img=imgMask,
                     standardize=True,
                     detrend=True,
                     high_pass=0.008, t_r=TR)
# Extracting the voxel time series within the mask
X_fMRI = masker.fit_transform(imgfMRI)



###### LOADING BEHAVIORAL DATA
# loading the behavior data into a dataframe
targetData = pd.read_csv(tableTarget, sep=' ')
# stimulus types
targetNames = sorted(targetData.labels.unique())
# Creating numerical labels
targetData['labelInd'] = 0
for i,iCat in enumerate(targetNames):
    targetData.loc[targetData.labels==iCat, 'labelInd'] = i


### Mask data
print "Resample images"
nifti_masker = NiftiMasker(
    smoothing_fwhm=FWHM,
    memory='nilearn_cache',
    memory_level=1)  # cache options
# remove NaNs from images
ref_affine = np.asarray(nibabel.load(images[0]).get_affine())
images_ = [np.asarray(nibabel.load(img).get_data()) for img in images]
nonnan_images = []
for img in images_:
    img[np.isnan(img)] = 0.
    nonnan_images.append(nibabel.Nifti1Image(img, ref_affine))
print "Nifti masker"
# remove features with zero between-subject variance
images_masked = nifti_masker.fit_transform(images)
images_masked[:, images_masked.var(0) < 0.01] = 0.
# final masking
new_images = nifti_masker.inverse_transform(images_masked)
images_masked = nifti_masker.fit_transform(new_images)
n_samples, n_features = images_masked.shape
print n_samples, "subjects, ", n_features, "features"

### Perform massively univariate analysis with permuted OLS ###################
print "Massively univariate model"
neg_log_pvals, all_scores, _ = permuted_ols(
    cdr, images_masked, age,  # + intercept as a covariate by default
    n_perm=1000,
    n_jobs=-1)  # can be changed to use more CPUs
neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals).get_data()[..., 0]
Example #30
0
plot_roi(masker.mask_img_, nyu_mean_img, title='EPI automatic mask')

# Generate mask with strong opening
masker = NiftiMasker(mask_strategy='epi', mask_args=dict(opening=10))
masker.fit(nyu_img)
plot_roi(masker.mask_img_, nyu_mean_img, title='EPI Mask with strong opening')

# Generate mask with a high lower cutoff
masker = NiftiMasker(mask_strategy='epi',
                     mask_args=dict(upper_cutoff=.9,
                                    lower_cutoff=.8,
                                    opening=False))
masker.fit(nyu_img)
plot_roi(masker.mask_img_, nyu_mean_img, title='EPI Mask: high lower_cutoff')

################################################################################
# Extract time series

# trended vs detrended
trended = NiftiMasker(mask_strategy='epi')
detrended = NiftiMasker(mask_strategy='epi', detrend=True)
trended_data = trended.fit_transform(nyu_img)
detrended_data = detrended.fit_transform(nyu_img)

print("Trended: mean %.2f, std %.2f" %
      (np.mean(trended_data), np.std(trended_data)))
print("Detrended: mean %.2f, std %.2f" %
      (np.mean(detrended_data), np.std(detrended_data)))

plt.show()
Example #31
0
session = session[condition_mask]
conditions = conditions[condition_mask]

# We have 2 conditions
n_conditions = np.size(np.unique(y))

### Loading step ##############################################################
from nilearn.input_data import NiftiMasker
# For decoding, standardizing is often very important
nifti_masker = NiftiMasker(mask=mask,
                           sessions=session,
                           smoothing_fwhm=4,
                           standardize=True,
                           memory="nilearn_cache",
                           memory_level=1)
X = nifti_masker.fit_transform(dataset_files.func)
# Apply our condition_mask
X = X[condition_mask]

### Prediction function #######################################################

### Define the prediction function to be used.
# Here we use a Support Vector Classification, with a linear kernel
from sklearn.svm import SVC
svc = SVC(kernel='linear')

### Dimension reduction #######################################################

from sklearn.feature_selection import SelectKBest, f_classif

### Define the dimension reduction to be used.
    "625",
    "626",
    "627",
    "629",
    "630",
    "631",
    "633",
    "634",
]
# subjects = ['101', '103']

sessions = ["pre", "post"]

df = pd.DataFrame(index=subjects, columns=["average_head_size"])

masker = NiftiMasker()

for subject in subjects:
    head_size = 0
    for i in np.arange(0, len(sessions)):
        bet_mask = join(
            data_dir,
            subject,
            "session-{0}".format(i),
            "anatomical/anatomical-0/fsl/anatomical-bet_mask.nii.gz",
        )
        mask_array = masker.fit_transform(bet_mask)
        head_size += np.sum(mask_array)
    df.at[subject] = head_size / 2
df.to_csv(join(sink_dir, "head-size_{0}.csv".format(str(datetime.now()))))
Example #33
0
#y = y_mask[condition_mask]
y = y_mask[condition_mask]
print(y.shape)
n_conditions = np.size(np.unique(y))
print(n_conditions)
#n_conditions = np.size(np.unique(y))
print(y.unique())

session = func_df[condition_mask].to_records(index=False)
print(session.dtype.name)
#prepare the fxnl data.
nifti_masker = NiftiMasker(mask_img=imag_mask,
                           smoothing_fwhm=4,
                           standardize=True,
                           memory_level=0)
fmri_trans = nifti_masker.fit_transform(fmri_subjs)
print(fmri_trans)
X = fmri_trans[condition_mask]
subs = subs[condition_mask]

svc = SVC(kernel='linear', verbose=False)
print(svc)

from sklearn.feature_selection import SelectPercentile, f_classif
feature_selection = SelectPercentile(f_classif, percentile=10)

anova_svc = Pipeline([('anova', feature_selection), ('svc', svc)])
#fit the decoder and predict
anova_svc.fit(X, y)
y_pred = anova_svc.predict(X)
Example #34
0
      localizer_dataset.cmaps[0])

tested_var = localizer_dataset.ext_vars['pseudo']
# Quality check / Remove subjects with bad tested variate
mask_quality_check = np.where(tested_var != b'None')[0]
n_samples = mask_quality_check.size
contrast_map_filenames = [localizer_dataset.cmaps[i]
                          for i in mask_quality_check]
tested_var = tested_var[mask_quality_check].astype(float).reshape((-1, 1))
print("Actual number of subjects after quality check: %d" % n_samples)

# Mask the data
nifti_masker = NiftiMasker(
    smoothing_fwhm=5,
    memory='nilearn_cache', memory_level=1)  # cache options
fmri_masked = nifti_masker.fit_transform(contrast_map_filenames)

# Perform Anova (parametric F-scores)
from sklearn.feature_selection import f_regression
_, pvals_anova = f_regression(fmri_masked, tested_var, center=True)
pvals_anova *= fmri_masked.shape[1]
pvals_anova[np.isnan(pvals_anova)] = 1
pvals_anova[pvals_anova > 1] = 1
neg_log_pvals_anova = - np.log10(pvals_anova)
neg_log_pvals_anova_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals_anova)

# Perform massively univariate analysis with permuted OLS
neg_log_pvals_permuted_ols, _, _ = permuted_ols(
    tested_var, fmri_masked,
    model_intercept=True,
from nilearn import plotting
plotting.plot_roi(mask_filename, bg_img=haxby_dataset.anat[0],
                 cmap='Paired')

###########################################################################
# Now we use the NiftiMasker.
#
# We first create a masker, giving it the options that we care
# about. Here we use standardizing of the data, as it is often important
# for decoding
from nilearn.input_data import NiftiMasker
masker = NiftiMasker(mask_img=mask_filename, standardize=True)

# We give the masker a filename and retrieve a 2D array ready
# for machine learning with scikit-learn
fmri_masked = masker.fit_transform(fmri_filename)

###########################################################################
# The variable "fmri_masked" is a numpy array:
print(fmri_masked)

###########################################################################
# Its shape corresponds to the number of time-points times the number of
# voxels in the mask
print(fmri_masked.shape)

###########################################################################
# Load the behavioral labels
# ..........................
#
# The behavioral labels are stored in a CSV file, separated by spaces.
Example #36
0
# Keep only data corresponding to shoes or bottles
condition_mask = np.logical_or(y == b'shoe', y == b'bottle')
y = y[condition_mask]

###########################################################################
# Prepare the data with the NiftiMasker
from nilearn.input_data import NiftiMasker

mask_filename = haxby_dataset.mask
# For decoding, standardizing is often very important
nifti_masker = NiftiMasker(mask_img=mask_filename, sessions=session,
                           smoothing_fwhm=4, standardize=True,
                           memory="nilearn_cache", memory_level=1)
func_filename = haxby_dataset.func[0]
X = nifti_masker.fit_transform(func_filename)
# Restrict to non rest data
X = X[condition_mask]
session = session[condition_mask]

###########################################################################
# Build the decoder that we will use

# Define the prediction function to be used.
# Here we use a Support Vector Classification, with a linear kernel
from sklearn.svm import SVC
svc = SVC(kernel='linear')


# Define the dimension reduction to be used.
# Here we use a classical univariate feature selection based on F-test,
localizer_dataset = datasets.fetch_localizer_contrasts(
    ['left button press (auditory cue)'], n_subjects=n_samples)
tested_var = localizer_dataset.ext_vars['pseudo']
# Quality check / Remove subjects with bad tested variate
mask_quality_check = np.where(tested_var != 'None')[0]
n_samples = mask_quality_check.size
contrast_map_filenames = [localizer_dataset.cmaps[i]
                          for i in mask_quality_check]
tested_var = tested_var[mask_quality_check].astype(float).reshape((-1, 1))
print("Actual number of subjects after quality check: %d" % n_samples)

### Mask data #################################################################
nifti_masker = NiftiMasker(
    smoothing_fwhm=5,
    memory='nilearn_cache', memory_level=1)  # cache options
fmri_masked = nifti_masker.fit_transform(contrast_map_filenames)

### Anova (parametric F-scores) ###############################################
from nilearn._utils.fixes import f_regression
_, pvals_anova = f_regression(fmri_masked, tested_var, center=True)
pvals_anova *= fmri_masked.shape[1]
pvals_anova[np.isnan(pvals_anova)] = 1
pvals_anova[pvals_anova > 1] = 1
neg_log_pvals_anova = - np.log10(pvals_anova)
neg_log_pvals_anova_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals_anova)

### Perform massively univariate analysis with permuted OLS ###################
neg_log_pvals_permuted_ols, _, _ = permuted_ols(
    tested_var, fmri_masked,
    model_intercept=True,
Example #38
0
#grab conditional labels
y = behavioral["Label"]

#restrict data to our target analysis
condition_mask = behavioral["Label"].isin(['LF_HS_receipt', "h20_receipt"])
y = y[condition_mask]
#confirm we have the # of condtions needed
print(y.unique())

# In[ ]:

masker = NiftiMasker(mask_img=imag_mask,
                     standardize=True,
                     memory="nilearn_cache",
                     memory_level=1)
X = masker.fit_transform(dataset)
# Apply our condition_mask
X = X[condition_mask]

# In[ ]:

from sklearn.svm import SVC

svc = SVC(kernel='linear')

from sklearn.feature_selection import SelectKBest, f_classif

feature_selection = SelectKBest(f_classif, k=500)

# We have our classifier (SVC), our feature selection (SelectKBest), and now,
# we can plug them together in a *pipeline* that performs the two operations
Example #39
0
resting_state = stimuli == "rest"

# find names of remaining active labels
categories = np.unique(stimuli[resting_state == False])

# extract tags indicating to which acquisition run a tag belongs
session_labels = labels["chunks"][resting_state == False]

# Load the fMRI data
from nilearn.input_data import NiftiMasker

# For decoding, standardizing is often very important
mask_filename = haxby_dataset.mask_vt[0]
masker = NiftiMasker(mask_img=mask_filename, standardize=True)
func_filename = haxby_dataset.func[0]
masked_timecourses = masker.fit_transform(func_filename)[resting_state ==
                                                         False]

### Classifiers definition

# A support vector classifier
from sklearn.svm import SVC
svm = SVC(C=1., kernel="linear")

from sklearn.grid_search import GridSearchCV
# GridSearchCV is slow, but note that it takes an 'n_jobs' parameter that
# can significantly speed up the fitting process on computers with
# multiple cores
svm_cv = GridSearchCV(SVC(C=1., kernel="linear"),
                      param_grid={'C': [.1, .5, 1., 5., 10., 50., 100.]},
                      scoring='f1')
Example #40
0
conditions = conditions[non_rest]
y = y[non_rest]
session = session[non_rest]

# Get the labels of the numerical conditions represented by the vector y
unique_conditions, order = np.unique(conditions, return_index=True)
# Sort the conditions by the order of appearance
unique_conditions = unique_conditions[np.argsort(order)]

### Loading step ##############################################################
from nilearn.input_data import NiftiMasker
# For decoding, standardizing is often very important
nifti_masker = NiftiMasker(mask_img=dataset_files.mask, standardize=True,
                           sessions=session, smoothing_fwhm=4,
                           memory="nilearn_cache", memory_level=1)
X = nifti_masker.fit_transform(dataset_files.func)
X = X[non_rest]

### Predictor #################################################################

### Define the prediction function to be used.
# Here we use a Support Vector Classification, with a linear kernel
from sklearn.svm import SVC
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier
from sklearn.pipeline import Pipeline

svc_ovo = OneVsOneClassifier(Pipeline([
                ('anova', SelectKBest(f_classif, k=500)),
                ('svc', SVC(kernel='linear'))
                ]))
# Make a data splitting object for cross validation
from sklearn.cross_validation import LeaveOneLabelOut, cross_val_score
cv = LeaveOneLabelOut(session_labels)

func_filename = haxby_dataset.func[0]
mask_names = ['mask_vt', 'mask_face', 'mask_house']

mask_scores = {}
mask_chance_scores = {}

for mask_name in mask_names:
    print("Working on mask %s" % mask_name)
    # For decoding, standardizing is often very important
    mask_filename = haxby_dataset[mask_name][0]
    masker = NiftiMasker(mask_img=mask_filename, standardize=True)
    masked_timecourses = masker.fit_transform(
        func_filename)[np.logical_not(resting_state)]

    mask_scores[mask_name] = {}
    mask_chance_scores[mask_name] = {}

    for category in categories:
        print("Processing %s %s" % (mask_name, category))
        classification_target = stimuli[np.logical_not(resting_state)] == category
        mask_scores[mask_name][category] = cross_val_score(
            classifier,
            masked_timecourses,
            classification_target,
            cv=cv, scoring="f1")

        mask_chance_scores[mask_name][category] = cross_val_score(
            dummy_classifier,
Example #42
0
#############################################################################
# Prepare the fMRI data :smooth and apply the mask
#-----------------------------------------------
from nilearn.input_data import NiftiMasker
mask_filename = haxby_dataset.mask

# For decoding, Standardizing is often very important
# Note that we are also smoothing the data
masker = NiftiMasker(mask_img=mask_filename,
                     smoothing_fwhm=4,
                     standardize=True,
                     memory='nilearn_cache',
                     memory_level=1)
func_filename = haxby_dataset.func[0]

X = masker.fit_transform(func_filename)

# Apply our condition_mask
X = X[condition_mask]

#decoder
#------------------------------
# Define the prediction function to be used
# Here we use a Support Vector Classification, with a linear kernel
from sklearn.svm import SVC
svc = SVC(kernel='linear')

# Define the dimension reduction to be used
# Here we use a classical univariate feature selection based on F-test
# namely Anova, we doing the full brain analysis, it is better to use
# SelectPercentile, keep 5% of voxels
Example #43
0
from nilearn import datasets
from nilearn.input_data import NiftiMasker

n_subjects = 100  # more subjects requires more memory

### Load Oasis dataset ########################################################
dataset_files = datasets.fetch_oasis_vbm(n_subjects=n_subjects)
age = dataset_files.ext_vars['age'].astype(float)

### Preprocess data ###########################################################
nifti_masker = NiftiMasker(
    standardize=False,
    smoothing_fwhm=2,
    memory='nilearn_cache')  # cache options
# remove features with too low between-subject variance
gm_maps_masked = nifti_masker.fit_transform(dataset_files.gray_matter_maps)
gm_maps_masked[:, gm_maps_masked.var(0) < 0.01] = 0.
# final masking
new_images = nifti_masker.inverse_transform(gm_maps_masked)
gm_maps_masked = nifti_masker.fit_transform(new_images)
n_samples, n_features = gm_maps_masked.shape
print n_samples, "subjects, ", n_features, "features"

### Prediction with SVR #######################################################
print "ANOVA + SVR"
### Define the prediction function to be used.
# Here we use a Support Vector Classification, with a linear kernel
from sklearn.svm import SVR
svr = SVR(kernel='linear')

### Dimension reduction
Example #44
0
### Mask data
nifti_masker = NiftiMasker(
    smoothing_fwhm=FWHM,
    memory='nilearn_cache',
    memory_level=1)  # cache options
# remove NaNs from images
ref_affine = np.asarray(nibabel.load(images[0]).get_affine())
images_ = [np.asarray(nibabel.load(img).get_data()) for img in images]
nonnan_images = []
for img in images_:
    img[np.isnan(img)] = 0.
    nonnan_images.append(nibabel.Nifti1Image(img, ref_affine))
print "Nifti masker"
# remove features with zero between-subject variance
images_masked = nifti_masker.fit_transform(images)
images_masked[:, images_masked.var(0) < 0.01] = 0.
# final masking
new_images = nifti_masker.inverse_transform(images_masked)
images_masked = nifti_masker.fit_transform(new_images)
n_samples, n_features = images_masked.shape
print n_samples, "subjects, ", n_features, "features"

### Perform massively univariate analysis with permuted OLS ###################
print "Massively univariate model"
neg_log_pvals, all_scores, _ = permuted_ols(
    age, images_masked,  # + intercept as a covariate by default
    n_perm=1000,
    n_jobs=-1)  # can be changed to use more CPUs
neg_log_pvals_unmasked = nifti_masker.inverse_transform(
    neg_log_pvals).get_data()[..., 0]
Example #45
0
class Brain_Data(object):

    """
    Brain_Data is a class to represent neuroimaging data in python as a vector rather than a 3-dimensional matrix.  
    This makes it easier to perform data manipulation and analyses.

    Args:
        data: nibabel data instance or list of files
        Y: Pandas DataFrame of training labels
        X: Pandas DataFrame Design Matrix for running univariate models 
        mask: binary nifiti file to mask brain data
        output_file: Name to write out to nifti file
        **kwargs: Additional keyword arguments to pass to the prediction algorithm

    """

    def __init__(self, data=None, Y=None, X=None, mask=None, output_file=None, **kwargs):
        if mask is not None:
            if not isinstance(mask, nib.Nifti1Image):
                if type(mask) is str:
                    if os.path.isfile(mask):
                        mask = nib.load(mask)
            else:
                raise ValueError("mask is not a nibabel instance")
            self.mask = mask
        else:
            self.mask = nib.load(os.path.join(get_resource_path(),'MNI152_T1_2mm_brain_mask.nii.gz'))
        self.nifti_masker = NiftiMasker(mask_img=self.mask)

        if data is not None:
            if type(data) is str:
                data=nib.load(data)
                self.data = self.nifti_masker.fit_transform(data)
            elif type(data) is list:
                # Load and transform each image in list separately (nib.concat_images(data) can't handle images of different sizes)
                self.data = []
                for i in data:
                    if isinstance(i,six.string_types):
                        self.data.append(self.nifti_masker.fit_transform(nib.load(i)))
                    elif isinstance(i,nib.Nifti1Image):
                        self.data.append(self.nifti_masker.fit_transform(i))
                self.data = np.array(self.data)
            elif not isinstance(data, nib.Nifti1Image):
                raise ValueError("data is not a nibabel instance")

            # Collapse any extra dimension
            if any([x==1 for x in self.data.shape]):
                self.data=self.data.squeeze()
        else:
            self.data = np.array([])

        if Y is not None:
            if type(Y) is str:
                if os.path.isfile(Y):
                    Y=pd.read_csv(Y,header=None,index_col=None)
            if isinstance(Y, pd.DataFrame):
                if self.data.shape[0]!= len(Y):
                    raise ValueError("Y does not match the correct size of data")
                self.Y = Y
            else:
                raise ValueError("Make sure Y is a pandas data frame.")
        else:
            self.Y = pd.DataFrame()

        if X is not None:
            if self.data.shape[0]!= X.shape[0]:
                raise ValueError("X does not match the correct size of data")
            self.X = X
        else:
            self.X = pd.DataFrame()

        if output_file is not None:
            self.file_name = output_file
        else:
            self.file_name = []

    def __repr__(self):
        return '%s.%s(data=%s, Y=%s, X=%s, mask=%s, output_file=%s)' % (
            self.__class__.__module__,
            self.__class__.__name__,
            self.shape(),
            len(self.Y),
            self.X.shape,
            os.path.basename(self.mask.get_filename()),
            self.file_name
            )

    def __getitem__(self, index):
        new = deepcopy(self)
        if isinstance(index, int):
            new.data = np.array(self.data[index,:]).flatten()
        else:
            new.data = np.array(self.data[index,:])           
        if not self.Y.empty:
            new.Y = self.Y.iloc[index]
        if self.X.size:
            if isinstance(self.X,pd.DataFrame):
                new.X = self.X.iloc[index]
            else:
                new.X = self.X[index,:]
        return new

    def __setitem__(self, index, value):
        if not isinstance(value,Brain_Data):
            raise ValueError('Make sure the value you are trying to set is a Brain_Data() instance.')
        self.data[index,:] = value.data
        if not value.Y.empty:
            self.Y.values[index] = value.Y
        if not value.X.empty:
            if self.X.shape[1] != value.X.shape[1]:
                raise ValueError('Make sure self.X is the same size as value.X.')
            self.X.values[index] = value.X

    def __len__(self):
        return self.shape()[0]

    def shape(self):
        """ Get images by voxels shape.

        Args:
            self: Brain_Data instance

        """

        return self.data.shape

    def mean(self):
        """ Get mean of each voxel across images.

        Args:
            self: Brain_Data instance

        Returns:
            out: Brain_Data instance
        
        """ 

        out = deepcopy(self)
        out.data = np.mean(out.data, axis=0)
        return out

    def std(self):
        """ Get standard deviation of each voxel across images.

        Args:
            self: Brain_Data instance

        Returns:
            out: Brain_Data instance
        
        """ 

        out = deepcopy(self)
        out.data = np.std(out.data, axis=0)
        return out

    def to_nifti(self):
        """ Convert Brain_Data Instance into Nifti Object

        Args:
            self: Brain_Data instance

        Returns:
            out: nibabel instance
        
        """
        
        return self.nifti_masker.inverse_transform(self.data)

    def write(self, file_name=None):
        """ Write out Brain_Data object to Nifti File.

        Args:
            self: Brain_Data instance
            file_name: name of nifti file

        """

        self.to_nifti().to_filename(file_name)

    def plot(self, limit=5, anatomical=None):
        """ Create a quick plot of self.data.  Will plot each image separately

        Args:
            limit: max number of images to return
            anatomical: nifti image or file name to overlay

        """

        if anatomical is not None:
            if not isinstance(anatomical, nib.Nifti1Image):
                if type(anatomical) is str:
                    anatomical = nib.load(anatomical)
                else:
                    raise ValueError("anatomical is not a nibabel instance")
        else:
            anatomical = get_anatomical()

    
        if self.data.ndim == 1:
            plot_stat_map(self.to_nifti(), anatomical, cut_coords=range(-40, 50, 10), display_mode='z', 
                black_bg=True, colorbar=True, draw_cross=False)
        else:
            for i in xrange(self.data.shape[0]):
                if i < limit:
                    # plot_roi(self.nifti_masker.inverse_transform(self.data[i,:]), self.anatomical)
                    # plot_stat_map(self.nifti_masker.inverse_transform(self.data[i,:]), 
                    plot_stat_map(self[i].to_nifti(), anatomical, cut_coords=range(-40, 50, 10), display_mode='z', 
                        black_bg=True, colorbar=True, draw_cross=False)

    def regress(self):
        """ run vectorized OLS regression across voxels.

        Args:
            self: Brain_Data instance

        Returns:
            out: dictionary of regression statistics in Brain_Data instances {'beta','t','p','df','residual'}
        
        """ 

        if not isinstance(self.X, pd.DataFrame):
            raise ValueError('Make sure self.X is a pandas DataFrame.')

        if self.X.empty:
            raise ValueError('Make sure self.X is not empty.')

        if self.data.shape[0]!= self.X.shape[0]:
            raise ValueError("self.X does not match the correct size of self.data")

        b = np.dot(np.linalg.pinv(self.X), self.data)
        res = self.data - np.dot(self.X,b)
        sigma = np.std(res,axis=0)
        stderr = np.dot(np.matrix(np.diagonal(np.linalg.inv(np.dot(self.X.T,self.X)))**.5).T,np.matrix(sigma))
        b_out = deepcopy(self)
        b_out.data = b
        t_out = deepcopy(self)
        t_out.data = b /stderr
        df = np.array([self.X.shape[0]-self.X.shape[1]] * t_out.data.shape[1])
        p_out = deepcopy(self)
        p_out.data = 2*(1-t.cdf(np.abs(t_out.data),df))

 
        # Might want to not output this info
        df_out = deepcopy(self)
        df_out.data = df
        sigma_out = deepcopy(self)
        sigma_out.data = sigma
        res_out = deepcopy(self)
        res_out.data = res

        return {'beta':b_out, 't':t_out, 'p':p_out, 'df':df_out, 'sigma':sigma_out, 'residual':res_out}

    def ttest(self, threshold_dict=None):
        """ Calculate one sample t-test across each voxel (two-sided)

        Args:
            self: Brain_Data instance
            threshold_dict: a dictionary of threshold parameters {'unc':.001} or {'fdr':.05}

        Returns:
            out: dictionary of regression statistics in Brain_Data instances {'t','p'}
        
        """ 

        # Notes:  Need to add FDR Option

        t = deepcopy(self)
        p = deepcopy(self)
        t.data, p.data = ttest_1samp(self.data, 0, 0)

        if threshold_dict is not None:
            if type(threshold_dict) is dict:
                if 'unc' in threshold_dict:
                    #Uncorrected Thresholding
                    t.data[np.where(p.data>threshold_dict['unc'])] = np.nan
                elif 'fdr' in threshold_dict:
                    pass
            else:
                raise ValueError("threshold_dict is not a dictionary.  Make sure it is in the form of {'unc':.001} or {'fdr':.05}")

        out = {'t':t, 'p':p}

        return out

    def append(self, data):
        """ Append data to Brain_Data instance

        Args:
            data: Brain_Data instance to append

        Returns:
            out: new appended Brain_Data instance
        """

        if not isinstance(data, Brain_Data):
            raise ValueError('Make sure data is a Brain_Data instance')
 
        if self.isempty():
            out = deepcopy(data)           
        else:
            out = deepcopy(self)
            if len(self.shape())==1 & len(data.shape())==1:
                if self.shape()[0]!=data.shape()[0]:
                    raise ValueError('Data is a different number of voxels then the weight_map.')
            elif len(self.shape())==1 & len(data.shape())>1:
                if self.shape()[0]!=data.shape()[1]:
                    raise ValueError('Data is a different number of voxels then the weight_map.')
            elif len(self.shape())>1 & len(data.shape())==1:
                if self.shape()[1]!=data.shape()[0]:
                    raise ValueError('Data is a different number of voxels then the weight_map.')
            elif self.shape()[1]!=data.shape()[1]:
                raise ValueError('Data is a different number of voxels then the weight_map.')

            out.data = np.vstack([self.data,data.data])
            if out.Y.size:
                out.Y = self.Y.append(data.Y)
            if self.X.size:
                if isinstance(self.X,pd.DataFrame):
                    out.X = self.X.append(data.X)
                else:
                    out.X = np.vstack([self.X, data.X])
        return out

    def empty(self, data=True, Y=True, X=True):
        """ Initalize Brain_Data.data as empty
        
        """
        
        tmp = deepcopy(self)
        if data:
            tmp.data = np.array([])
        if Y:
            tmp.Y = pd.DataFrame()
        if X:
            tmp.X = np.array([])
        # tmp.data = np.array([]).reshape(0,n_voxels)
        return tmp

    def isempty(self):
        """ Check if Brain_Data.data is empty
        
        Returns:
            bool
        """ 

        if isinstance(self.data,np.ndarray):
            if self.data.size:
                boolean = False
            else:
                boolean = True

        if isinstance(self.data, list):
            if not self.data:
                boolean = True
            else:
                boolean = False
        
        return boolean

    def similarity(self, image, method='correlation'):
        """ Calculate similarity of Brain_Data() instance with single Brain_Data or Nibabel image

            Args:
                self: Brain_Data instance of data to be applied
                image: Brain_Data or Nibabel instance of weight map

            Returns:
                pexp: Outputs a vector of pattern expression values

        """

        if not isinstance(image, Brain_Data):
            if isinstance(image, nib.Nifti1Image):
                image = Brain_Data(image)
            else:
                raise ValueError("Image is not a Brain_Data or nibabel instance")
        dim = image.shape()

        # Check to make sure masks are the same for each dataset and if not create a union mask
        # This might be handy code for a new Brain_Data method
        if np.sum(self.nifti_masker.mask_img.get_data()==1)!=np.sum(image.nifti_masker.mask_img.get_data()==1):
            new_mask = intersect_masks([self.nifti_masker.mask_img, image.nifti_masker.mask_img], threshold=1, connected=False)
            new_nifti_masker = NiftiMasker(mask_img=new_mask)
            data2 = new_nifti_masker.fit_transform(self.to_nifti())
            image2 = new_nifti_masker.fit_transform(image.to_nifti())
        else:
            data2 = self.data
            image2 = image.data


        # Calculate pattern expression
        if method is 'dot_product':
            if len(image2.shape) > 1:
                if image2.shape[0]>1:
                    pexp = []
                    for i in range(image2.shape[0]):
                        pexp.append(np.dot(data2, image2[i,:]))
                    pexp = np.array(pexp)
                else:
                    pexp = np.dot(data2, image2)
            else:
                pexp = np.dot(data2, image2)
        elif method is 'correlation':
            if len(image2.shape) > 1:
                if image2.shape[0]>1:
                    pexp = []
                    for i in range(image2.shape[0]):
                        pexp.append(pearson(image2[i,:], data2))
                    pexp = np.array(pexp)
                else:
                    pexp = pearson(image2, data2)
            else:
                pexp = pearson(image2, data2)
        return pexp

    def distance(self, method='euclidean', **kwargs):
        """ Calculate distance between images within a Brain_Data() instance.

            Args:
                self: Brain_Data instance of data to be applied
                method: type of distance metric (can use any scikit learn or sciypy metric)

            Returns:
                dist: Outputs a 2D distance matrix.

        """

        return pairwise_distances(self.data, metric = method, n_jobs=1)


    def multivariate_similarity(self, images, method='ols'):
        """ Predict spatial distribution of Brain_Data() instance from linear combination of other Brain_Data() instances or Nibabel images

            Args:
                self: Brain_Data instance of data to be applied
                images: Brain_Data instance of weight map

            Returns:
                out: dictionary of regression statistics in Brain_Data instances {'beta','t','p','df','residual'}

        """
        ## Notes:  Should add ridge, and lasso, elastic net options options

        if len(self.shape()) > 1:
            raise ValueError("This method can only decompose a single brain image.")

        if not isinstance(images, Brain_Data):
            raise ValueError("Images are not a Brain_Data instance")
        dim = images.shape()

        # Check to make sure masks are the same for each dataset and if not create a union mask
        # This might be handy code for a new Brain_Data method
        if np.sum(self.nifti_masker.mask_img.get_data()==1)!=np.sum(images.nifti_masker.mask_img.get_data()==1):
            new_mask = intersect_masks([self.nifti_masker.mask_img, images.nifti_masker.mask_img], threshold=1, connected=False)
            new_nifti_masker = NiftiMasker(mask_img=new_mask)
            data2 = new_nifti_masker.fit_transform(self.to_nifti())
            image2 = new_nifti_masker.fit_transform(images.to_nifti())
        else:
            data2 = self.data
            image2 = images.data

        # Add intercept and transpose
        image2 = np.vstack((np.ones(image2.shape[1]),image2)).T

        # Calculate pattern expression
        if method is 'ols':
            b = np.dot(np.linalg.pinv(image2), data2)
            res = data2 - np.dot(image2,b)
            sigma = np.std(res,axis=0)
            stderr = np.dot(np.matrix(np.diagonal(np.linalg.inv(np.dot(image2.T,image2)))**.5).T,np.matrix(sigma))
            t_out = b /stderr
            df = image2.shape[0]-image2.shape[1]
            p = 2*(1-t.cdf(np.abs(t_out),df))

        return {'beta':b, 't':t_out, 'p':p, 'df':df, 'sigma':sigma, 'residual':res}

    def predict(self, algorithm=None, cv_dict=None, plot=True, **kwargs):

        """ Run prediction

        Args:
            algorithm: Algorithm to use for prediction.  Must be one of 'svm', 'svr',
            'linear', 'logistic', 'lasso', 'ridge', 'ridgeClassifier','randomforest',
            or 'randomforestClassifier'
            cv_dict: Type of cross_validation to use. A dictionary of
                {'type': 'kfolds', 'n_folds': n},
                {'type': 'kfolds', 'n_folds': n, 'subject_id': holdout}, or
                {'type': 'loso', 'subject_id': holdout},
                where n = number of folds, and subject = vector of subject ids that corresponds to self.Y
            plot: Boolean indicating whether or not to create plots.
            **kwargs: Additional keyword arguments to pass to the prediction algorithm

        Returns:
            output: a dictionary of prediction parameters

        """

        # Set algorithm
        if algorithm is not None:
            predictor_settings = set_algorithm(algorithm, **kwargs)
        else:
            # Use SVR as a default
            predictor_settings = set_algorithm('svr', **{'kernel':"linear"})

        # Initialize output dictionary
        output = {}
        output['Y'] = np.array(self.Y).flatten()
        
        # Overall Fit for weight map
        predictor = predictor_settings['predictor']
        predictor.fit(self.data, output['Y'])
        output['yfit_all'] = predictor.predict(self.data)
        if predictor_settings['prediction_type'] == 'classification':
            if predictor_settings['algorithm'] not in ['svm','ridgeClassifier','ridgeClassifierCV']:
                output['prob_all'] = predictor.predict_proba(self.data)[:,1]
            else:
                output['dist_from_hyperplane_all'] = predictor.decision_function(self.data)
                if predictor_settings['algorithm'] == 'svm' and predictor.probability:
                    output['prob_all'] = predictor.predict_proba(self.data)[:,1]
       
        output['intercept'] = predictor.intercept_

        # Weight map
        output['weight_map'] = self.empty()
        if predictor_settings['algorithm'] == 'lassopcr':
            output['weight_map'].data = np.dot(predictor_settings['_pca'].components_.T,predictor_settings['_lasso'].coef_)
        elif predictor_settings['algorithm'] == 'pcr':
            output['weight_map'].data = np.dot(predictor_settings['_pca'].components_.T,predictor_settings['_regress'].coef_)
        else:
            output['weight_map'].data = predictor.coef_.squeeze()

        # Cross-Validation Fit
        if cv_dict is not None:
            output['cv'] = set_cv(cv_dict)

            predictor_cv = predictor_settings['predictor']
            output['yfit_xval'] = output['yfit_all'].copy()
            output['intercept_xval'] = []
            output['weight_map_xval'] = deepcopy(output['weight_map'])
            wt_map_xval = [];
            if predictor_settings['prediction_type'] == 'classification':
                if predictor_settings['algorithm'] not in ['svm','ridgeClassifier','ridgeClassifierCV']:
                    output['prob_xval'] = np.zeros(len(self.Y))
                else:
                    output['dist_from_hyperplane_xval'] = np.zeros(len(self.Y))
                    if predictor_settings['algorithm'] == 'svm' and predictor_cv.probability:
                        output['prob_xval'] = np.zeros(len(self.Y))

            for train, test in output['cv']:
                predictor_cv.fit(self.data[train], self.Y.loc[train])
                output['yfit_xval'][test] = predictor_cv.predict(self.data[test])
                if predictor_settings['prediction_type'] == 'classification':
                    if predictor_settings['algorithm'] not in ['svm','ridgeClassifier','ridgeClassifierCV']:
                        output['prob_xval'][test] = predictor_cv.predict_proba(self.data[test])[:,1]
                    else:
                        output['dist_from_hyperplane_xval'][test] = predictor_cv.decision_function(self.data[test])
                        if predictor_settings['algorithm'] == 'svm' and predictor_cv.probability:
                            output['prob_xval'][test] = predictor_cv.predict_proba(self.data[test])[:,1]
                output['intercept_xval'].append(predictor_cv.intercept_)

                # Weight map
                if predictor_settings['algorithm'] == 'lassopcr':
                    wt_map_xval.append(np.dot(predictor_settings['_pca'].components_.T,predictor_settings['_lasso'].coef_))
                elif predictor_settings['algorithm'] == 'pcr':
                    wt_map_xval.append(np.dot(predictor_settings['_pca'].components_.T,predictor_settings['_regress'].coef_))
                else:
                    wt_map_xval.append(predictor_cv.coef_.squeeze())
                output['weight_map_xval'].data = np.array(wt_map_xval)
        
        # Print Results
        if predictor_settings['prediction_type'] == 'classification':
            output['mcr_all'] = np.mean(output['yfit_all']==np.array(self.Y).flatten())
            print 'overall accuracy: %.2f' % output['mcr_all']
            if cv_dict is not None:
                output['mcr_xval'] = np.mean(output['yfit_xval']==np.array(self.Y).flatten())
                print 'overall CV accuracy: %.2f' % output['mcr_xval']
        elif predictor_settings['prediction_type'] == 'prediction':
            output['rmse_all'] = np.sqrt(np.mean((output['yfit_all']-output['Y'])**2))
            output['r_all'] = np.corrcoef(output['Y'],output['yfit_all'])[0,1]
            print 'overall Root Mean Squared Error: %.2f' % output['rmse_all']
            print 'overall Correlation: %.2f' % output['r_all']
            if cv_dict is not None:
                output['rmse_xval'] = np.sqrt(np.mean((output['yfit_xval']-output['Y'])**2))
                output['r_xval'] = np.corrcoef(output['Y'],output['yfit_xval'])[0,1]
                print 'overall CV Root Mean Squared Error: %.2f' % output['rmse_xval']
                print 'overall CV Correlation: %.2f' % output['r_xval']

        # Plot
        if plot:
            if cv_dict is not None:
                if predictor_settings['prediction_type'] == 'prediction':
                    fig2 = scatterplot(pd.DataFrame({'Y': output['Y'], 'yfit_xval':output['yfit_xval']}))
                elif predictor_settings['prediction_type'] == 'classification':
                    if predictor_settings['algorithm'] not in ['svm','ridgeClassifier','ridgeClassifierCV']:
                        output['roc'] = Roc(input_values=output['prob_xval'], binary_outcome=output['Y'].astype('bool'))
                    else:
                        output['roc'] = Roc(input_values=output['dist_from_hyperplane_xval'], binary_outcome=output['Y'].astype('bool'))
                        if predictor_settings['algorithm'] == 'svm' and predictor_cv.probability:
                            output['roc'] = Roc(input_values=output['prob_xval'], binary_outcome=output['Y'].astype('bool'))
                    fig2 = output['roc'].plot()
                    # output['roc'].summary()
            fig1=output['weight_map'].plot()

        return output

    def bootstrap(self, analysis_type=None, n_samples=10, save_weights=False, **kwargs):
        """ Bootstrap various Brain_Data analaysis methods (e.g., mean, std, regress, predict).  Currently  

        Args:
            analysis_type: Type of analysis to bootstrap (mean,std,regress,predict)
            n_samples: Number of samples to boostrap
            **kwargs: Additional keyword arguments to pass to the analysis method

        Returns:
            output: a dictionary of prediction parameters

        """

        # Notes:
        # might want to add options for [studentized, percentile, bias corrected, bias corrected accelerated] methods
        # Regress method is pretty convoluted and slow, this should be optimized better.  

        def summarize_bootstrap(sample):
            """ Calculate summary of bootstrap samples

            Args:
                sample: Brain_Data instance of samples

            Returns:
                output: dictionary of Brain_Data summary images
                
            """

            output = {}

            # Calculate SE of bootstraps
            wstd = sample.std()
            wmean = sample.mean()
            wz = deepcopy(wmean)
            wz.data = wmean.data / wstd.data
            wp = deepcopy(wmean)
            wp.data = 2*(1-norm.cdf(np.abs(wz.data)))

            # Create outputs
            output['Z'] = wz
            output['p'] = wp
            output['mean'] = wmean
            if save_weights:
                output['samples'] = sample

            return output

        analysis_list = ['mean','std','regress','predict']
        
        if analysis_type in analysis_list:
            data_row_id = range(self.shape()[0])
            sample = self.empty()
            if analysis_type is 'regress': #initialize dictionary of empty betas
                beta={}
                for i in range(self.X.shape[1]):
                    beta['b' + str(i)] = self.empty()
            for i in range(n_samples):
                this_sample = np.random.choice(data_row_id, size=len(data_row_id), replace=True) # gives sampled row numbers
                if analysis_type is 'mean':
                    sample = sample.append(self[this_sample].mean())
                elif analysis_type is 'std':
                    sample = sample.append(self[this_sample].std())
                elif analysis_type is 'regress':
                    out = self[this_sample].regress()
                    # Aggegate bootstraps for each beta separately
                    for i, b in enumerate(beta.iterkeys()):
                        beta[b]=beta[b].append(out['beta'][i])
                elif analysis_type is 'predict':
                    if 'algorithm' in kwargs:
                        algorithm = kwargs['algorithm']
                        del kwargs['algorithm']
                    else:
                        algorithm='ridge'
                    if 'cv_dict' in kwargs:
                        cv_dict = kwargs['cv_dict']
                        del kwargs['cv_dict']
                    else:
                        cv_dict=None
                    if 'plot' in ['kwargs']:
                        plot=kwargs['plot']
                        del kwargs['plot']
                    else:
                        plot=False
                    out = self[this_sample].predict(algorithm=algorithm,cv_dict=cv_dict, plot=plot,**kwargs)
                    sample = sample.append(out['weight_map'])
        else:
            raise ValueError('The analysis_type you specified (%s) is not yet implemented.' % (analysis_type))

        # Save outputs
        if analysis_type is 'regress':
            reg_out={}
            for i, b in enumerate(beta.iterkeys()):
                reg_out[b] = summarize_bootstrap(beta[b])
            output = {}
            for b in reg_out.iteritems():
                for o in b[1].iteritems():
                    if o[0] in output:
                        output[o[0]] = output[o[0]].append(o[1])
                    else:
                        output[o[0]]=o[1]
        else:
            output = summarize_bootstrap(sample)
        return output

    def apply_mask(self, mask):
        """ Mask Brain_Data instance

        Args:
            mask: mask (Brain_Data or nifti object)
            
        """

        if isinstance(mask,Brain_Data):
            mask = mask.to_nifti() # convert to nibabel
        if not isinstance(mask, nib.Nifti1Image):
            if type(mask) is str:
                if os.path.isfile(mask):
                    mask = nib.load(mask)
               # Check if mask need to be resampled into Brain_Data mask space
                if not ((self.mask.get_affine()==mask.get_affine()).all()) & (self.mask.shape[0:3]==mask.shape[0:3]):
                    mask = resample_img(mask,target_affine=self.mask.get_affine(),target_shape=self.mask.shape)
            else:
                raise ValueError("Mask is not a nibabel instance, Brain_Data instance, or a valid file name.")

        masked = deepcopy(self)
        nifti_masker = NiftiMasker(mask_img=mask)
        masked.data = nifti_masker.fit_transform(self.to_nifti())
        if len(self.data.shape) > 2:
            masked.data = masked.data.squeeze()
        masked.nifti_masker = nifti_masker
        return masked

    def resample(self, target):
        """ Resample data into target space

        Args:
            self: Brain_Data instance
            target: Brain_Data instance of target space
        
        """ 

        raise NotImplementedError()

    def searchlight(self, ncores, process_mask=None, parallel_out=None, radius=3, walltime='24:00:00', \
        email=None, algorithm='svr', cv_dict=None, kwargs={}):
        
        if len(kwargs) is 0:
            kwargs['kernel']= 'linear'
        
        # new parallel job
        pbs_kwargs = {'algorithm':algorithm,\
                  'cv_dict':cv_dict,\
                  'predict_kwargs':kwargs}
        #cv_dict={'type': 'kfolds','n_folds': 5,'stratified':dat.Y}

        parallel_job = PBS_Job(self, parallel_out=parallel_out, process_mask=process_mask, radius=radius, kwargs=pbs_kwargs)

        # make and store data we will need to access on the worker core level
        parallel_job.make_searchlight_masks()
        cPickle.dump(parallel_job, open(os.path.join(parallel_out,"pbs_searchlight.pkl"), "w"))

        #make core startup script (python)
        parallel_job.make_startup_script("core_startup.py")
        
        # make email notification script (pbs)
        if type(email) is str:
            parallel_job.make_pbs_email_alert(email)

        # make pbs job submission scripts (pbs)
        for core_i in range(ncores):
            script_name = "core_pbs_script_" + str(core_i) + ".pbs"
            parallel_job.make_pbs_scripts(script_name, core_i, ncores, walltime) # create a script
            print "python " + os.path.join(parallel_out, script_name)
            os.system( "qsub " + os.path.join(parallel_out, script_name) ) # run it on a core

    def extract_roi(self, mask, method='mean'):
        """ Extract activity from mask

        Args:
            mask: nibabel mask can be binary or numbered for different rois
            method: type of extraction method (default=mean)    

        Returns:
            out: mean within each ROI across images
        
        """

        if not isinstance(mask, nib.Nifti1Image):
            raise ValueError('Make sure mask is a nibabel instance')

        if len(np.unique(mask.get_data())) == 2:
            all_mask = Brain_Data(mask)
            if method is 'mean':
                out = np.mean(self.data[:,np.where(all_mask.data)].squeeze(),axis=1)
        elif len(np.unique(mask.get_data())) > 2:
            all_mask = expand_mask(mask)
            out = []
            for i in range(all_mask.shape()[0]):
                if method is 'mean':
                    out.append(np.mean(self.data[:,np.where(all_mask[i].data)].squeeze(),axis=1))
            out = np.array(out)

        return out

    def icc(self, icc_type='icc2'):
        ''' Calculate intraclass correlation coefficient for data within Brain_Data class
        
        ICC Formulas are based on:
        Shrout, P. E., & Fleiss, J. L. (1979). Intraclass correlations: uses in assessing rater reliability. 
        Psychological bulletin, 86(2), 420.

        icc1:  x_ij = mu + beta_j + w_ij
        icc2/3:  x_ij = mu + alpha_i + beta_j + (ab)_ij + epsilon_ij

        Code modifed from nipype algorithms.icc
        https://github.com/nipy/nipype/blob/master/nipype/algorithms/icc.py
        
        Args:
            icc_type: type of icc to calculate (icc: voxel random effect, icc2: voxel and column random effect, icc3: voxel and column fixed effect)

        Returns:
            ICC: intraclass correlation coefficient

        '''
        
        Y = self.data.T
        [n, k] = Y.shape

        # Degrees of Freedom
        dfc = k - 1
        dfe = (n - 1) * (k-1)
        dfr = n - 1

        # Sum Square Total
        mean_Y = np.mean(Y)
        SST = ((Y - mean_Y) ** 2).sum()

        # create the design matrix for the different levels
        x = np.kron(np.eye(k), np.ones((n, 1)))  # sessions
        x0 = np.tile(np.eye(n), (k, 1))  # subjects
        X = np.hstack([x, x0])

        # Sum Square Error
        predicted_Y = np.dot(np.dot(np.dot(X, np.linalg.pinv(np.dot(X.T, X))), X.T), Y.flatten('F'))
        residuals = Y.flatten('F') - predicted_Y
        SSE = (residuals ** 2).sum()

        MSE = SSE / dfe

        # Sum square column effect - between colums
        SSC = ((np.mean(Y, 0) - mean_Y) ** 2).sum() * n
        MSC = SSC / dfc / n
        
        # Sum Square subject effect - between rows/subjects
        SSR = SST - SSC - SSE
        MSR = SSR / dfr

        if icc_type == 'icc1':
            # ICC(2,1) = (mean square subject - mean square error) / (mean square subject + (k-1)*mean square error + k*(mean square columns - mean square error)/n)
            # ICC = (MSR - MSRW) / (MSR + (k-1) * MSRW)
            NotImplementedError("This method isn't implemented yet.")
            
        elif icc_type == 'icc2':
            # ICC(2,1) = (mean square subject - mean square error) / (mean square subject + (k-1)*mean square error + k*(mean square columns - mean square error)/n)
            ICC = (MSR - MSE) / (MSR + (k-1) * MSE + k * (MSC - MSE) / n)

        elif icc_type =='icc3':
            # ICC(3,1) = (mean square subject - mean square error) / (mean square subject + (k-1)*mean square error)
            ICC = (MSR - MSE) / (MSR + (k-1) * MSE)

        return ICC
Example #46
0
def seed_based_connectivity(
    ts,
    seed_mask,
    anat_path="~/ni_data/templates/DSURQEc_40micron_masked.nii.gz",
    brain_mask="~/ni_data/templates/DSURQEc_200micron_mask.nii.gz",
    smoothing_fwhm=.3,
    detrend=True,
    standardize=True,
    low_pass=0.25,
    high_pass=0.004,
    tr=1.,
    save_as="",
):
    """Return a NIfTI containing z scores for connectivity to a defined seed region

	Parameters
	----------

	ts : string
	Path to the 4D NIfTI timeseries file on which to perform the connectivity analysis.

	seed_mask : string
	Path to a 3D NIfTI-like binary mask file designating the seed region.

	smoothing_fwhm : float, optional
	Spatial smoothing kernel, passed to the NiftiMasker.

	detrend : bool, optional
	Whether to detrend the data, passed to the NiftiMasker.

	standardize : bool, optional
	Whether to standardize the data (make mean 0. and variance 1.), passed to the NiftiMasker.

	low_pass : float, optional
	Low-pass cut-off, passed to the NiftiMasker.

	high_pass : float, optional
	High-pass cut-off, passed to the NiftiMasker.

	tr : float, optional
	Repetition time, passed to the NiftiMasker.

	save_as : string, optional
	Path to save a NIfTI of the functional connectivity zstatistic to.

	Notes
	-----

	Contains sections of code copied from the nilearn examples:
	http://nilearn.github.io/auto_examples/03_connectivity/plot_seed_to_voxel_correlation.html#sphx-glr-auto-examples-03-connectivity-plot-seed-to-voxel-correlation-py
	"""

    anat_path = path.abspath(path.expanduser(anat_path))
    brain_mask = path.abspath(path.expanduser(brain_mask))
    seed_mask = path.abspath(path.expanduser(seed_mask))
    save_as = path.abspath(path.expanduser(save_as))
    ts = path.abspath(path.expanduser(ts))

    seed_masker = NiftiMasker(mask_img=seed_mask,
                              smoothing_fwhm=smoothing_fwhm,
                              detrend=detrend,
                              standardize=standardize,
                              low_pass=low_pass,
                              high_pass=high_pass,
                              t_r=tr,
                              memory='nilearn_cache',
                              memory_level=1,
                              verbose=0)
    brain_masker = NiftiMasker(mask_img=brain_mask,
                               smoothing_fwhm=smoothing_fwhm,
                               detrend=detrend,
                               standardize=standardize,
                               low_pass=low_pass,
                               high_pass=high_pass,
                               t_r=tr,
                               memory='nilearn_cache',
                               memory_level=1,
                               verbose=0)
    seed_time_series = seed_masker.fit_transform(ts, ).T
    seed_time_series = np.mean(seed_time_series, axis=0)
    brain_time_series = brain_masker.fit_transform(ts, )

    seed_based_correlations = np.dot(
        brain_time_series.T, seed_time_series) / seed_time_series.shape[0]
    try:
        print("seed-based correlation shape: (%s, %s)" %
              seed_based_correlations.shape)
    except TypeError:
        print("seed-based correlation shape: (%s, )" %
              seed_based_correlations.shape)
    print("seed-based correlation: min = %.3f; max = %.3f" %
          (seed_based_correlations.min(), seed_based_correlations.max()))

    seed_based_correlations_fisher_z = np.arctanh(seed_based_correlations)
    print(
        "seed-based correlation Fisher-z transformed: min = %.3f; max = %.3f" %
        (seed_based_correlations_fisher_z.min(),
         seed_based_correlations_fisher_z.max()))

    seed_based_correlation_img = brain_masker.inverse_transform(
        seed_based_correlations_fisher_z.T)

    if save_as:
        seed_based_correlation_img.to_filename(save_as)

    return seed_based_correlation_img
Example #47
0
File: test.py Project: FF120/python
#===================准备标签和特征============================================

os.chdir(root)
label_path = root + '\design\label.npy'
empty_tr_path = root + '\design\empty_tr.npy'
mask_path = root + '\design\mask\mask.img'
func_path = root + '\Sub001\wBoldImg4D_sub001.nii'
label = np.load(label_path)
empty_tr = np.load(empty_tr_path)

nifti_masker = NiftiMasker(mask_img=mask_path,
                           standardize=True,
                           memory="nilearn_cache",
                           memory_level=1)
X = nifti_masker.fit_transform(func_path)
X = np.delete(X, empty_tr - 1, axis=0)
y = label

np.save('X.npy', X)
np.save('y.npy', y)

#================加载标签和特征==========================

os.chdir(root)
X = np.load('X.npy')
y = np.load('y.npy')

#================特征可视化显示==========================
XX_show = XX1
mean = []
Example #48
0
from umap.umap_ import UMAP

from dyneusr import DyNeuGraph
from dyneusr.tools import visualize_mapper_stages
from dyneusr.mapper.utils import optimize_dbscan



# Fetch dataset, extract time-series from ventral temporal (VT) mask
dataset = fetch_haxby()
masker = NiftiMasker(
    dataset.mask_vt[0], 
    standardize=True, detrend=True, smoothing_fwhm=4.0,
    low_pass=0.09, high_pass=0.008, t_r=2.5,
    memory="nilearn_cache")
X = masker.fit_transform(dataset.func[0])

# Encode labels as integers
df = pd.read_csv(dataset.session_target[0], sep=" ")
target, labels = pd.factorize(df.labels.values)
y = pd.DataFrame({l:(target==i).astype(int) for i,l in enumerate(labels)})

# Extract sessions 4-5
mask_sessions = df.chunks.add(1).isin([4, 5])
X = X[mask_sessions]
y = y.loc[mask_sessions, :]
target = target[mask_sessions]



# Generate a shape graph using KeplerMapper
haxby_dataset = datasets.fetch_haxby(subjects=[2])

# print basic information on the dataset
print('Mask nifti image (3D) is located at: %s' % haxby_dataset.mask)
print('Functional nifti image (4D) is located at: %s' % haxby_dataset.func[0])

##############################################################################
# Mask data
mask_filename = haxby_dataset.mask
from nilearn.input_data import NiftiMasker
nifti_masker = NiftiMasker(
    smoothing_fwhm=8,
    mask_img=mask_filename,
    memory='nilearn_cache', memory_level=1)  # cache options
func_filename = haxby_dataset.func[0]
fmri_masked = nifti_masker.fit_transform(func_filename)

##############################################################################
# Restrict to faces and houses
import numpy as np
labels = np.recfromcsv(haxby_dataset.session_target[0], delimiter=" ")
conditions = labels['labels']
categories = np.unique(conditions)
conditions_encoded = np.zeros_like(conditions)
for c, category in enumerate(categories):
    conditions_encoded[conditions == category] = c
sessions = labels['chunks']
condition_mask = np.logical_or(conditions == b'face', conditions == b'house')
conditions_encoded = conditions_encoded[condition_mask]
fmri_masked = fmri_masked[condition_mask]
Example #50
0
def roi_based(
    beta_filename=None,
    events_filename=None,
    ts_filename=None,
    design_filename=None,
    substitutions={},
    ax=None,
    design_color="c",
    ts_color="r",
    flip=False,
    design_len=None,
    melodic_hit=None,
    plot_design_regressors=[0, 1, 2],
    roi=None,
    save_as='',
    scale_design=1,
    ylabel='Arbitrary Units',
):
    """Plot timecourses and design for measurements. should be deprecated in favour of multi.

	Parameteres
	-----------

	beta_filename : str, optional
		Path to beta (a.k.a. COPE) file, which represents the loading of the design matrix to voxelwise timecourses.
	events_filename : str, optional
		Path to events file, which conforms to the BIDS specification and contains a tab-separated table of stimulation events.
	ts_filename : str, optional
		Path to NIfTI timeseries (4D file) which contains the voxelwise timecourses, as they have been passed on to the GLM.
		It is very important that this is the exact same file which is forwarded to the GLM, as a timecourse file e.g. not subjected to a bandpass filter will not visually match the beta loading obtained by fitting a GLM to the same timecourse file after it was subjected to a bandpass filter.
	design_filename : str, optional
		Path to a mat file containing an ascii matrix for design, as outputted by FSL's `feat_model`.
	substitutions : dict, optional
		Dictionary which, if provided will be applied to formattable Python strings passed to the `beta_filename`, `events_filename`, `ts_filename`, and `design_filename` parameters (e.g. "part_of_path-{replace_this}-other_part_of_path") in order to replace occurrences of each key with its respective value pair.
	flip : bool, optional
		Whether to flip the plot by 90 degrees (plotting in "portrait" rather than "landscape" mode, as it were).
	roi : str, optional
		Path to a NIfTI mask file which determines based on what region to extract summaries from the voxelwise inputs.
	save_as : str, optional
		Path to save the plot under.
	ylabel : str, optional
		Text to place on the y label.
	"""

    if not ax:
        _, ax = plt.subplots(facecolor='#eeeeee', tight_layout=True)

    if roi:
        if isinstance(roi, str):
            roi = path.abspath(path.expanduser(roi))
            roi = nib.load(roi)
        if ts_filename:
            ts_file = path.expanduser(ts_filename.format(**substitutions))
            ts_file = nib.load(ts_file)
            masker = NiftiMasker(mask_img=roi, target_affine=ts_file.affine)
            final_time_series = masker.fit_transform(ts_file)
            final_time_series = final_time_series.T
            final_time_series = np.mean(final_time_series, axis=0)
            if flip:
                ax.plot(final_time_series,
                        np.arange(len(final_time_series)),
                        color=ts_color)
                ax.set_ylim([0, len(final_time_series)])
            else:
                ax.plot(final_time_series, color=ts_color)
                ax.set_xlim([0, len(final_time_series)])
    else:
        if ts_filename:
            ts_file = path.expanduser(ts_filename.format(**substitutions))
            ts_file = nib.load(ts_file)
            final_time_series = ts_file.get_data()
            final_time_series = np.mean(final_time_series, axis=0)
            final_time_series = np.mean(final_time_series, axis=0)
            final_time_series = np.mean(final_time_series, axis=0)
            if flip:
                ax.plot(final_time_series,
                        np.arange(len(final_time_series)),
                        color=ts_color)
                ax.set_ylim([0, len(final_time_series)])
            else:
                ax.plot(final_time_series, color=ts_color)
                ax.set_xlim([0, len(final_time_series)])

    if design_filename:
        design_file = path.expanduser(design_filename.format(**substitutions))
        design_df = pd.read_csv(design_file,
                                skiprows=5,
                                sep="\t",
                                header=None,
                                index_col=False)
        if beta_filename and roi:
            beta_file = path.expanduser(beta_filename.format(**substitutions))
            beta_file = nib.load(beta_file)
            masker = NiftiMasker(mask_img=roi, target_affine=beta_file.affine)
            roi_betas = masker.fit_transform(beta_file).T
            design_df = design_df * np.mean(roi_betas)
        for i in plot_design_regressors:
            regressor = design_df[[i]].values.flatten()
            if flip:
                ax.plot(regressor.T * scale_design,
                        np.arange(len(regressor)),
                        lw=rcParams['lines.linewidth'] * 2,
                        color=design_color)
            else:
                ax.plot(regressor * scale_design,
                        lw=rcParams['lines.linewidth'] * 2,
                        color=design_color)
        if flip:
            ax.set_ylim([0, len(regressor)])
        else:
            ax.set_xlim([0, len(regressor)])

    if events_filename:
        events_file = path.expanduser(events_filename.format(**substitutions))
        events_df = pd.read_csv(events_file, sep="\t")
        for d, o in zip(events_df["duration"], events_df["onset"]):
            d = round(d)
            o = round(o)
            if flip:
                ax.axhspan(o, o + d, facecolor="cyan", alpha=0.15)
            else:
                ax.axvspan(o, o + d, facecolor="cyan", alpha=0.15)
        if design_len:
            if flip:
                ax.set_ylim([0, design_len])
            else:
                ax.set_xlim([0, design_len])

    # Very deprecated, we keep this around since it is not yet clear whether we will remove or reimplement this feature.
    if melodic_hit:
        pass
        melodic_file = "~/ni_data/ofM.dr/20151208_182500_4007_1_4/melo10/report/t4.txt"
        melodic = np.loadtxt(melodic_file)
        if flip:
            melodic = melodic.T
        ax.plot(melodic)

    if flip:
        ax.invert_yaxis()
        plt.xticks(rotation=90)
        ax.locator_params(nbins=5, axis='x')
        ax.set_ylabel('Time [TR]',
                      rotation=270,
                      fontsize="smaller",
                      va="center")
    else:
        ax.set_ylabel(ylabel)
        ax.set_xlabel('Time [TR]')

    if save_as:
        save_as = path.abspath(path.expanduser(save_as))
        plt.savefig(save_as)
    return ax
# print basic information on the dataset
print('First subject anatomical nifti image (3D) is at: %s' %
      nyu_dataset.anat_anon[0])
print('First subject functional nifti image (4D) is at: %s' %
      nyu_dataset.func[0])  # 4D data

### Preprocess ################################################################
from nilearn.input_data import NiftiMasker

# This is resting-state data: the background has not been removed yet,
# thus we need to use mask_strategy='epi' to compute the mask from the
# EPI images
masker = NiftiMasker(smoothing_fwhm=8, memory='nilearn_cache', memory_level=1,
                     mask_strategy='epi', standardize=False)
data_masked = masker.fit_transform(func_filename)

# Concatenate all the subjects
# fmri_data = np.concatenate(data_masked, axis=1)
fmri_data = data_masked


### Apply ICA #################################################################

from sklearn.decomposition import FastICA
n_components = 20
ica = FastICA(n_components=n_components, random_state=42)
components_masked = ica.fit_transform(data_masked.T).T

# Normalize estimated components, for thresholding to make sense
components_masked -= components_masked.mean(axis=0)
Example #52
0
 def _apply_mask_to_imgs(self, imgs, **kwargs):
     masker = NiftiMasker(mask_img=self.load_mask(), **kwargs)
     return (masker.fit_transform(img) for img in imgs)