Exemple #1
0
def get_masker(mask_img=None):
    if mask_img is None:
        mask_img = load_mni152_brain_mask()
    masker = input_data.NiftiMasker(mask_img=mask_img).fit()
    return masker
##########################################################################
# Then we extract the mean time series within the seed region while
# regressing out the confounds that
# can be found in the dataset's csv file
seed_time_series = seed_masker.fit_transform(func_filename,
                                             confounds=[confound_filename])

##########################################################################
# Next, we can proceed similarly for the **brain-wide voxel-wise time
# series**, using :class:`nilearn.input_data.NiftiMasker` with the same input
# arguments as in the seed_masker in addition to smoothing with a 6 mm kernel
brain_masker = input_data.NiftiMasker(smoothing_fwhm=6,
                                      detrend=True,
                                      standardize=True,
                                      low_pass=0.1,
                                      high_pass=0.01,
                                      t_r=2.,
                                      memory='nilearn_cache',
                                      memory_level=1,
                                      verbose=0)

##########################################################################
# Then we extract the brain-wide voxel-wise time series while regressing
# out the confounds as before
brain_time_series = brain_masker.fit_transform(func_filename,
                                               confounds=[confound_filename])

##########################################################################
# We can now inspect the extracted time series. Note that the **seed time
# series** is an array with shape n_volumes, 1), while the
# **brain time series** is an array with shape (n_volumes, n_voxels).
Exemple #3
0
    )


def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
    new_cmap = colors.LinearSegmentedColormap.from_list(
        "trunc({n},{a:.2f},{b:.2f})".format(n=cmap.name, a=minval, b=maxval),
        cmap(np.linspace(minval, maxval, n)),
    )
    return new_cmap


from nilearn import image, datasets

mean_img = image.load_img("../data/statistical_map.nii")

masker = input_data.NiftiMasker(mask_img=image.resample_to_img(
    datasets.load_mni152_brain_mask(), mean_img, interpolation="nearest"))

# plot the image
masked_img = masker.fit_transform(mean_img)
unmasked_img = masker.inverse_transform(masked_img)
display = plotting.plot_glass_brain(
    unmasked_img,
    threshold=0,
    cmap=plt.cm.viridis,
    colorbar=False,
    display_mode="xz",
    axes=ax_glass_brain,
)
# ax_glass_brain.text(1, 1, '(a)', fontweight='bold')
ax1 = fig.add_axes([0.05, 0.12, 0.4, 0.03])
cmap = truncate_colormap(plt.cm.viridis, minval=0.5)
part_thr = 20

root_p = pal.Path(__file__).resolve().parents[2] / 'data'
pheno_p = root_p / 'pheno/ABIDE1_Pheno_PSM_matched_minimum_10.tsv'
mask_p = root_p / 'ATLAS/MIST/Parcellations/MIST_mask_nocereb.nii.gz'
stab_p = root_p / 'processed/stability/abide_1/abide_1_subtype_stability_mist_20_core_20_within_99.npz'
sca_p = root_p / f'preprocessed/seed_maps/abide_1/MIST_{scale}'
sca_t = f'sub_{{}}_ses_{{}}_run{{}}_mist_{scale}_nocereb.npy'
# Output
out_d = root_p / f'processed/stability/abide_1/'
out_p = out_d / f'abide_1_subtype_map_stability_mist_{scale}_core_{part_thr:d}_within_{dist_thr*100:.0f}.npy'
if not out_d.is_dir():
    out_d.mkdir()

mask_i = nib.load(str(mask_p))
masker = nii.NiftiMasker(mask_img=mask_i)
masker.fit()
stab = np.load(stab_p)
pheno = pd.read_csv(pheno_p, sep='\t')
seed_paths = [
    sca_p / sca_t.format(row['SUB_ID'], row['session'], row['run'])
    for rid, row in pheno.iterrows()
]
subject_stack = np.array([np.load(p) for p in seed_paths])

partitions = stab['partitions'][()]
train_indices = stab['train_idx']

n_samples = pheno.shape[0]
n_boot = len(partitions.keys())
n_networks = subject_stack.shape[2]
##### Step 1 #####
########################################
## here we have two "seeds" in the thalamus from Aaron that we would like to calcuate its FC with the whole brain.
# This Sprenger study got it right, with the thalamic hot spot at −14/−23/1 and −14/−23/0, https://academic.oup.com/brain/article/135/8/2536/305095
seed1_coords = [(-14, -23, 1)]
seed2_coords = [(-14, -23, 0)]

#Always double check the coordinate.
#nilearn.plotting.plot_anat(cut_coords=seed1_coords[0])
#plt.show()
#nilearn.plotting.plot_anat(cut_coords=seed2_coords[0])
#plt.show()

seed1_masker = input_data.NiftiSpheresMasker(seed1_coords, radius=5)
seed2_masker = input_data.NiftiSpheresMasker(seed2_coords, radius=5)
brain_masker = input_data.NiftiMasker()

########################################
##### Step 2 loop through functional files for MGH subjects
########################################
#get list of all subjects
subjects = pd.read_csv('/home/kahwang/bin/example_graph_pipeline/MGH_Subjects', names=['ID'])['ID'].values
#input_files = glob.glob('/data/backed_up/shared/MGH/MGH/*/MNINonLinear/rfMRI_REST.nii.gz')

# a function to run seed FC and save outputs to a folder
def run_seed_fc(s):
	try:
		file = '/data/backed_up/shared/MGH/MGH/%s/MNINonLinear/rfMRI_REST.nii.gz' %s
		########################################
		##### Step 3 extract seed time-series from the two coordinates and whole brain
		########################################
# Remove the rest condition, it is not very interesting
non_rest = conditions != b'rest'
conditions = conditions[non_rest]
y = y[non_rest]

# Get the labels of the numerical conditions represented by the vector y
unique_conditions, order = np.unique(conditions, return_index=True)
# Sort the conditions by the order of appearance
unique_conditions = unique_conditions[np.argsort(order)]

### Loading step ##############################################################
# For decoding, standardizing is often very important
nifti_masker = input_data.NiftiMasker(mask_img=mask_filename,
                                      standardize=True,
                                      detrend=True,
                                      sessions=session,
                                      smoothing_fwhm=None,
                                      memory="nilearn_cache",
                                      memory_level=1)
X = nifti_masker.fit_transform(func_filename)
X = X[non_rest]
session = session[non_rest]

# region-level analysis
n_perm = 10000
mask = nifti_masker.mask_img_.get_data()
atlas_labels = atlas.get_data()[mask > 0]

### Statistical scores ###################################################

import matplotlib.pyplot as plt

if __name__ == "__main__":

    data = sys.argv[1]
    oscdir = sys.argv[2]
    transientdir = sys.argv[3]
    img = image.load_img(data)

    oscmask = op.join(oscdir, "stats", "zfstat1.nii.gz")
    transientmask = op.join(transientdir, "stats", "zfstat1.nii.gz")

    oscmask_ = niutils.bin_img(oscmask, threshold=3.1)
    transientmask_ = niutils.bin_img(transientmask, threshold=3.1)

    oscmasker = input_data.NiftiMasker(mask_img=oscmask_, standardize="psc")
    transientmasker = input_data.NiftiMasker(mask_img=transientmask_,
                                             standardize="psc")

    oscts = oscmasker.fit_transform(img)
    transientts = transientmasker.fit_transform(img)

    fig, ax = plt.subplots()

    ax.text(
        0.01,
        0.95,
        f"{data}",
        transform=ax.transAxes,
        weight=600,
        horizontalalignment="left",
Exemple #8
0
embedder = manifold.SpectralEmbedding(1)
embedding_l = embedder.fit_transform(overall_means['l'])
embedding_r = embedder.fit_transform(overall_means['r'])

mask_l = op.join(derivatives, ds, 'mean_mask_mni_space', '_mask_stnl',
                 'sub-01_desc-stnl_mask_resampled_trans_merged_mean.nii.gz')

mask_r = op.join(derivatives, ds, 'mean_mask_mni_space', '_mask_stnr',
                 'sub-01_desc-stnr_mask_resampled_trans_merged_mean.nii.gz')

mask_l = image.math_img('mask > 0.3', mask=mask_l)
mask_r = image.math_img('mask > 0.3', mask=mask_r)

tmp = op.join(
    derivatives, ds, 'fmriprep', 'sub-01',
    'sub-01_task-randomdotmotion_run-01_space-T1w_desc-preproc_bold.nii.gz')

masker_l = input_data.NiftiMasker(mask_l)
masker_l.fit(tmp)
masker_r = input_data.NiftiMasker(mask_r)
masker_r.fit(tmp)

masker_l.inverse_transform(embedding_l.T).to_filename(
    op.join(derivatives, ds, 'embedding', 'group_desc-stnl_embedding.nii.gz'))

masker_r.inverse_transform(embedding_r.T).to_filename(
    op.join(derivatives, ds, 'embedding', 'group_desc-stnr_embedding.nii.gz'))

#if __name__ == '__main__':
#main('/home/shared/2018/subcortex/bias_task')
    / "derivatives"
    / "func_smooth-6mm"
    / subject
    / "func"
    / f"{subject}_task-heartbeat_run-1_space-MNI152NLin2009cAsym_desc-preproc-fwhm6mm_bold.nii.gz"
)

label = mask.name.split(os.sep)[-1].split(".")[0]
out_file = target_path / f"{subject}_task-heartbeat_run-1_desc-{label}_regressors.tsv"

if not out_file.is_file():
    label = mask.name.split(os.sep)[-1].split(".")[0]
    mask = str(mask)

    # load TR and dimension info
    with open(vol_path) as f:
        data = json.load(f)

    tr = data["RepetitionTime"]

    # extract signal
    seed_masker = input_data.NiftiMasker(mask, detrend=True, t_r=tr)
    seed_time_series = seed_masker.fit_transform(
        func_filename, confounds=str(confounds_path)
    )
    seed_time_series = seed_time_series.mean(axis=1)
    mean_seed = seed_time_series.mean()
    seed_time_series -= mean_seed  # mean centre
    np.savetxt(str(out_file), seed_time_series, fmt="%10.5f")
print(str(out_file))
Exemple #10
0
func_filenames = adhd_dataset.func  # list of 4D nifti files for each subject

mean_func_img = mean_img(func_filenames[0])

gm_msk = resample_to_img(source_img=gm_msk,
                         target_img=mean_func_img,
                         interpolation='nearest')
#affine_tar,
#interpolation='nearest', target_shape=sp.array([61,73,61]))
print('First functional nifti image (4D) is at: %s' % func_filenames[0]
      )  # 4D data
nifti_masker = input_data.NiftiMasker(
    standardize=True,
    mask_img=gm_msk,  #  mask_strategy='epi', #
    memory="nilearn_cache",
    memory_level=2,
    smoothing_fwhm=4,
    detrend=True)
# Compute EPI based mask
nifti_masker.fit(func_filenames[0])

mask_img = nifti_masker.mask_img_
plot_roi(mask_img, mean_func_img, display_mode='y', cut_coords=4, title="Mask")
plt.show()
# Load 75 time points per volume
nii_img = index_img(func_filenames[0], slice(5, 75))
ref_mskd = nifti_masker.transform(nii_img).T
all_data = sp.zeros(
    (ref_mskd.shape[0], ref_mskd.shape[1], len(func_filenames)))
all_data[:, :, 0] = ref_mskd
Exemple #11
0
#data_df = pd.read_csv('../data/interim/csv/info_epi_zscored_df.csv',index_col=[0],header=0)
#
#
#
## In[4]:
#
#
#data_df.tail()
#
#

# ### define whole-brain masker

# In[5]:

masker = input_data.NiftiMasker(
    '%s/data/external/MNI152_T1_2mm_brain_mask.nii.gz' % supDir).fit()

#
## In[6]:
#
#
#plotting.plot_roi(masker.mask_img_);
#
#

# ### Flip image Left-Right

# In[7]:


def make_flip(fileName):
def mask_nil(img_data, affine, mask):
    from nilearn import input_data
    nimg = nib.Nifti1Image(img_data, affine)
    mskr = input_data.NiftiMasker(mask)
    mskd = mskr.fit_transform(nimg)
    return mskd, mskr
    def plot_t(self,
               fig,
               show_time_individual=False,
               show_time_average=False,
               ica_lookup=None,
               show_spectrum=False,
               show_time_group=False,
               coords=(0, 0, 0),
               significance_threshold=0.5,
               *args,
               **kwargs):
        # Determine Axes Layout
        if not (show_time_individual or show_time_average
                ) and not show_time_group and not show_spectrum:
            return  # no plot to render
        fig.clear()
        gs = gridspec.GridSpec(2, 5)
        if not (show_time_individual or
                show_time_average) and show_time_group and not show_spectrum:
            axgr = plt.subplot(gs[:, :])
        if not (show_time_individual
                or show_time_average) and show_time_group and show_spectrum:
            axgr, axps = plt.subplot(gs[:, 3:]), plt.subplot(gs[:, :3])
        if (show_time_individual or show_time_average
            ) and not show_time_group and not show_spectrum:
            axts = plt.subplot(gs[:, :])
        if (show_time_individual or
                show_time_average) and show_time_group and not show_spectrum:
            axts, axgr = plt.subplot(gs[:, 3:]), plt.subplot(gs[:, :3])
        if (show_time_individual
                or show_time_average) and show_time_group and show_spectrum:
            axts, axgr, axps = plt.subplot(gs[:, 3:]), plt.subplot(
                gs[0, :3]), plt.subplot(gs[1, :3])

        # Process Data & Plot
        dat = np.abs(self.gd['ica'][ica_lookup]['img'].get_data().astype(
            np.float)) > significance_threshold
        masked = image.new_img_like(self.gd['smri']['img'], dat.astype(np.int))
        if show_time_individual:
            try:
                seed_masker = input_data.NiftiSpheresMasker(
                    mask_img=masked,
                    seeds=[coords],
                    radius=0,
                    detrend=False,
                    standardize=False,
                    t_r=4.,
                    memory='nilearn_cache',
                    memory_level=1,
                    verbose=0)
                ind_ts = seed_masker.fit_transform(self.gd['fmri']['img'])
            except:
                ind_ts = []
            plt.plot(ind_ts,
                     axes=axts,
                     label='Voxel (%d, %d, %d) Time-Series' %
                     (coords[0], coords[1], coords[2]))
            plt.xlabel('Time (s)')
            plt.ylabel('fMRI signal')
            axts.hold(True)
        if show_time_average:
            brain_masker = input_data.NiftiMasker(mask_img=masked,
                                                  t_r=4.,
                                                  memory='nilearn_cache',
                                                  memory_level=1,
                                                  verbose=0)

            ts = brain_masker.fit_transform(self.gd['fmri']['img'])
            ave_ts = np.mean(ts, axis=1)
            plt.plot(ave_ts, axes=axts, label="Average Signal")
            plt.xlabel('Time (s)')
            plt.ylabel('fMRI signal')
            axts.hold(False)
        if show_time_group:
            # TODO: Plot Group Logic
            pass
        if show_spectrum:
            # TODO: Plot Power Spectrum Logic
            pass
        plt.tight_layout(pad=0.1)
#    plt.show()
#    
#    break
#
#

# ## clean and z-score the timecourse of each voxel

# ### initalizing whole-brain masker objects

# In[10]:


masker = input_data.NiftiMasker(
    mask_img='%s/data/external/MNI152_T1_2mm_brain_mask.nii.gz'%supDir,
    smoothing_fwhm=6,
    detrend=True,
    standardize=True).fit()

#
## In[11]:
#
#
#plotting.plot_roi( masker.mask_img_ );
#
#

# ### extract data for one participant

# select the participant
#
Exemple #15
0
"""
Created on Wed Nov  6 15:37:15 2019

@author: Or Duek
Global Correlation and Global Correlation regression
"""
import nilearn
from nilearn import input_data
import pandas as pd
import numpy as np

mask_file = '/home/oad4/scratch60/output_kpe1480_1468/fmriprep/sub-1468/ses-1/func/sub-1468_ses-1_task-rest_space-MNI152NLin6Asym_desc-brain_mask.nii.gz'#set a file to create the same voxel map for all subjects

brain_masker = input_data.NiftiMasker(mask_img = mask_file,
        smoothing_fwhm=4,
        detrend=True, standardize=True,
        low_pass=0.1, high_pass=0.01, t_r=1.,
        memory='/home/oad4/scratch60/nilearn', memory_level=1, verbose=2)

#from nilearn.regions import Parcellations
#ward = Parcellations(method='ward', n_parcels=1000,
#                     standardize=True, detrend=True, smoothing_fwhm=2.,
#                     low_pass=0.1, high_pass=0.01, t_r=1.,
#                     memory='/media/Data/nilearn', memory_level=1,
#                     verbose=1)


def removeVars (confoundFile):
    # this method takes the csv regressors file (from fmriPrep) and chooses a few to confound. You can change those few
    import pandas as pd
    import numpy as np
Exemple #16
0
    def __init__(self,
                 n_atoms,
                 t_r,
                 n_times_atom=60,
                 hrf_model='scaled_hrf',
                 hrf_atlas='havard',
                 atlas_kwargs=dict(),
                 n_scales='scale122',
                 prox_z='tv',
                 lbda_strategy='ratio',
                 lbda=0.1,
                 rho=2.0,
                 delta_init=1.0,
                 u_init_type='ica',
                 z_init=None,
                 prox_u='l1-positive-simplex',
                 deactivate_v_learning=False,
                 deactivate_z_learning=False,
                 max_iter=100,
                 random_state=None,
                 early_stopping=True,
                 eps=1.0e-5,
                 raise_on_increase=True,
                 cache_dir='__cache__',
                 nb_fit_try=1,
                 n_jobs=1,
                 verbose=0):
        # model hyperparameters
        self.t_r = t_r
        self.n_atoms = n_atoms
        self.hrf_model = hrf_model
        self.hrf_atlas = hrf_atlas
        self.n_scales = n_scales
        self.deactivate_v_learning = deactivate_v_learning
        self.deactivate_z_learning = deactivate_z_learning
        self.n_times_atom = n_times_atom
        self.prox_z = prox_z
        self.lbda_strategy = lbda_strategy
        self.lbda = lbda
        self.rho = rho
        self.delta_init = delta_init
        self.u_init_type = u_init_type
        self.z_init = z_init
        self.prox_u = prox_u

        # convergence parameters
        self.max_iter = max_iter
        self.random_state = random_state
        self.early_stopping = early_stopping
        self.eps = eps
        self.raise_on_increase = raise_on_increase

        # technical parameters
        self.verbose = verbose
        self.n_jobs = n_jobs
        self.cache_dir = cache_dir
        self.nb_fit_try = nb_fit_try

        # HRF atlas
        if self.hrf_atlas == 'havard':
            self.mask_full_brain, self.atlas_rois = fetch_vascular_atlas()
        elif self.hrf_atlas == 'basc':
            n_scales_ = f"scale{int(n_scales)}"
            res = fetch_atlas_basc_2015(n_scales=n_scales_)
            self.mask_full_brain, self.atlas_rois = res
        elif isinstance(self.hrf_atlas, collections.Callable):
            res = self.hrf_atlas(**atlas_kwargs)
            self.mask_full_brain, self.atlas_rois = res
        else:
            raise ValueError(f"hrf_atlas should belong to ['havard', 'basc', "
                             f"given-function], got {self.hrf_atlas}")
        self.hrf_rois = dict()

        # fMRI masker
        self.masker_ = input_data.NiftiMasker(mask_img=self.mask_full_brain,
                                              t_r=self.t_r,
                                              memory=self.cache_dir,
                                              memory_level=1,
                                              verbose=0)

        # model parameters
        self.z_hat_ = None
        self.u_hat_ = None
        self.a_hat_ = None

        # derivated model parameters
        self.Dz_hat_ = None
        self.v_hat_ = None

        # auxilaries parameters
        self.roi_label_from_hrf_idx = None
        self.v_init_ = None
        self.lbda_ = lbda
        self.lobj_ = None
        self.ltime_ = None
Exemple #17
0
def main(study_dir, subject, roi, res_dir, blocks='combined'):
    # load task information
    vols = rsa.load_vol_info(study_dir, subject)

    if blocks == 'walk':
        events = vols.query('sequence_type == 1').copy()
    elif blocks == 'random':
        events = vols.query('sequence_type == 2').copy()
    elif blocks in ['combined', 'separate']:
        events = vols.query('sequence_type > 0').copy()
        if blocks == 'separate':
            # separately model trials in walk and random blocks
            n_item = events['trial_type'].nunique()
            events['trial_type'] = (events['trial_type'] +
                                    (events['sequence_type'] - 1) * n_item)
    else:
        raise ValueError(f'Invalid blocks option: {blocks}')

    # get mask image
    subject_dir = os.path.join(study_dir, f'tesser_{subject}')
    mask_image = os.path.join(subject_dir, 'anatomy', 'antsreg', 'data',
                              'funcunwarpspace', 'rois', 'mni',
                              f'{roi}.nii.gz')

    # load masked functional images
    runs = list(range(1, 7))
    bold_images = [
        os.path.join(subject_dir, 'BOLD', 'antsreg', 'data',
                     f'functional_run_{run}_bold_mcf_brain_corr_notemp.feat',
                     'filtered_func_data.nii.gz') for run in runs
    ]
    masker = input_data.NiftiMasker(mask_img=mask_image, standardize='zscore')
    image = np.vstack(
        [masker.fit_transform(bold_image) for bold_image in bold_images])

    # load confound files
    confound = {}
    for run in runs:
        confound_file = os.path.join(subject_dir, 'BOLD',
                                     f'functional_run_{run}', 'QA',
                                     'confound.txt')
        confound[run] = np.loadtxt(confound_file)

    # create full design matrix
    frame_times = np.arange(image.shape[0] / len(runs)) * 2
    n_ev = events['trial_type'].nunique()
    evs = np.arange(1, n_ev + 1)
    df_list = []
    for run in runs:
        df_run = fl.make_first_level_design_matrix(
            frame_times,
            events=events.query(f'run == {run}'),
            add_regs=confound[run])
        regs = df_run.filter(like='reg', axis=1).columns
        drifts = df_run.filter(like='drift', axis=1).columns
        columns = np.hstack((evs, drifts, ['constant'], regs))
        df_list.append(df_run.reindex(columns=columns))
    df_mat = pd.concat(df_list, axis=0)

    # with confounds included, the number of regressors varies by run.
    # Columns missing between runs are set to NaN
    df_mat.fillna(0, inplace=True)

    mat = df_mat.to_numpy()[:, :n_ev]
    nuisance = df_mat.to_numpy()[:, n_ev:]

    # run Bayesian RSA
    scan_onsets = np.arange(0, image.shape[0], image.shape[0] / len(runs))
    model = brsa.GBRSA()
    model.fit([image], [mat], nuisance=nuisance, scan_onsets=scan_onsets)

    # save results
    if not os.path.exists(res_dir):
        os.makedirs(res_dir)
    var_names = [
        'U', 'L', 'C', 'nSNR', 'sigma', 'rho', 'beta', 'beta0', 'X0',
        'beta0_null', 'X0_null', 'n_nureg'
    ]
    results = {var: getattr(model, var + '_') for var in var_names}
    out_file = os.path.join(res_dir, f'sub-{subject}_brsa.npz')
    np.savez(out_file, **results)
Exemple #18
0
                         'resting-state-0', 'nakatomi1.feat',
                         'filtered_func_data.nii.gz')
        confound = join(parent_dir, s, 'session-0', 'resting-state',
                        'resting-state-0', 'nakatomi1.feat', 'mc',
                        'prefiltered_func_data_mcf.par')
        #hypothal_roi = join(roi_dir, s, 'roi', 'hypothal-func.nii.gz')
        hb_func = join(parent_dir, s, 'session-0', 'anatomical',
                       'anatomical-0', 'hb-func.nii.gz')
        hb_roi_qc = join(roi_dir, 'roi-qc', '{0}-hb-func.png'.format(s))
        mean_func = mean_img(fmri_file)
        plotting.plot_roi(hb_func, mean_func, output_file=hb_roi_qc)

        roi_masker = input_data.NiftiMasker(hb_func,
                                            detrend=False,
                                            standardize=True,
                                            t_r=3.,
                                            memory='nilearn_cache',
                                            memory_level=1,
                                            verbose=0)
        roi_timeseries = roi_masker.fit_transform(fmri_file,
                                                  confounds=confound)
        print 'extracted hb timeseries'
        brain_masker = input_data.NiftiMasker(detrend=False,
                                              standardize=True,
                                              t_r=3.,
                                              smoothing_fwhm=3.,
                                              memory='nilearn_cache',
                                              memory_level=1,
                                              verbose=1)
        brain_timeseries = brain_masker.fit_transform(fmri_file,
                                                      confounds=confound)
Exemple #19
0
tomoya_dir = '/Shared/lss_kahwang_hpc/data/Tomoya/'

mdtb_subs = [
    'sub-02', 'sub-04', 'sub-09', 'sub-14', 'sub-17', 'sub-19', 'sub-21',
    'sub-24', 'sub-26', 'sub-28', 'sub-31', 'sub-03', 'sub-06', 'sub-12',
    'sub-15', 'sub-18', 'sub-20', 'sub-22', 'sub-25', 'sub-27', 'sub-30'
]
tomoya_subs = ['sub-01', 'sub-02', 'sub-03', 'sub-04', 'sub-05', 'sub-06']

Schaefer400 = nib.load(
    '/Shared/lss_kahwang_hpc/ROIs/Schaefer2018_400Parcels_7Networks_order_FSLMNI152_2mm.nii.gz'
)
Schaefer400_masker = input_data.NiftiLabelsMasker(Schaefer400)
mni_thalamus_mask = nib.load(
    "/Shared/lss_kahwang_hpc/ROIs//mni_atlas/MNI_thalamus_2mm.nii.gz")
mni_thalamus_masker = input_data.NiftiMasker(mni_thalamus_mask)

mdtb_fcmats = np.zeros((2445, 400, len(mdtb_subs)))
for i, sub in enumerate(mdtb_subs):
    func_file = nib.load(mdtb_dir + "3dDeconvolve/" + sub +
                         "/FIRmodel_errts_block.nii.gz")

    cortical_ts = Schaefer400_masker.fit_transform(func_file)
    roi_ts = mni_thalamus_masker.fit_transform(
        func_file)  #roi_ts = data[np.nonzero(mni_thalamus_mask.get_fdata())]

    roi_ts = np.delete(roi_ts, np.where(roi_ts.mean(axis=1) == 0)[0], axis=0)
    cortical_ts = np.delete(cortical_ts,
                            np.where(cortical_ts.mean(axis=1) == 0)[0],
                            axis=0)
Exemple #20
0
def seed_to_voxel_corr(func_filename,
                       confound_filename,
                       resultdir='.',
                       seed_coord=[(0, -52, 18)],
                       output_head='DMN',
                       maps_img='',
                       mask_img=None):
    #print(func_filename)
    #print(confound_filename)
    mask_or_seed = 0
    if maps_img == '':
        mask_or_seed = 1
        seed_masker = input_data.NiftiSpheresMasker(seed_coord,
                                                    radius=8,
                                                    detrend=True,
                                                    standardize=True,
                                                    mask_img=mask_img,
                                                    verbose=0)
        #memory='nilearn_cache', memory_level=1, verbose=0)
    else:
        mask_or_seed = 0
        seed_masker = input_data.NiftiMapsMasker(maps_img=[maps_img],
                                                 standardize=True,
                                                 mask_img=mask_img,
                                                 verbose=0)
        #memory='nilearn_cache',

    seed_time_series = seed_masker.fit_transform(func_filename,
                                                 confounds=[confound_filename])

    brain_masker = input_data.NiftiMasker(smoothing_fwhm=6,
                                          detrend=True,
                                          standardize=True,
                                          memory_level=1,
                                          verbose=0)

    brain_time_series = brain_masker.fit_transform(
        func_filename, confounds=[confound_filename])
    '''
    fig = plt.figure(figsize=(6,3), dpi=300)
    plt.plot(seed_time_series)
    plt.title('Seed time series')
    plt.xlabel('Scan number')
    plt.ylabel('Normalized signal')
    plt.tight_layout()
    fig.savefig(join(resultdir,'%s_curve.png' % output_head), bbox_inches='tight')
    '''

    seed_based_correlations = np.dot(
        brain_time_series.T, seed_time_series) / seed_time_series.shape[0]

    print("seed-based correlation shape: (%s, %s)" %
          seed_based_correlations.shape)
    print("seed-based correlation: min = %.3f; max = %.3f" %
          (seed_based_correlations.min(), seed_based_correlations.max()))

    seed_based_correlations_fisher_z = np.arctanh(seed_based_correlations)
    print(
        "seed-based correlation Fisher-z transformed: min = %.3f; max = %.3f" %
        (seed_based_correlations_fisher_z.min(),
         seed_based_correlations_fisher_z.max()))

    # Finally, we can tranform the correlation array back to a Nifti image
    # object, that we can save.
    seed_based_corr_img = brain_masker.inverse_transform(
        seed_based_correlations.T)
    seed_based_corr_img.to_filename(
        join(resultdir, '%s_z.nii.gz') % output_head)
    '''
    display = plotting.plot_stat_map(seed_based_corr_img, threshold=0.3,
                                     cut_coords=(0,0,0), draw_cross=False)
    if mask_or_seed == 1:
        display.add_markers(marker_coords=seed_coord, marker_color='g',
                            marker_size=20)
    # At last, we save the plot as pdf.
    #display.savefig(join(resultdir,'%s_z.pdf') % output_head)
    display.savefig(join(resultdir,'%s_z.jpg') % output_head)
    plt.close()
    '''
    return join(resultdir, '%s_z.nii.gz') % output_head
Exemple #21
0
    def __init__(self,
                 n_atoms,
                 t_r,
                 n_times_atom=60,
                 hrf_model='scaled_hrf',
                 hrf_atlas='aal3',
                 atlas_kwargs=dict(),
                 n_scales='scale122',
                 prox_z='tv',
                 lbda_strategy='ratio',
                 lbda=0.1,
                 rho=2.0,
                 delta_init=1.0,
                 u_init_type='ica',
                 eta=10.0,
                 z_init=None,
                 prox_u='l1-positive-simplex',
                 deactivate_v_learning=False,
                 shared_spatial_maps=False,
                 deactivate_z_learning=False,
                 idx_first_vols=0,
                 standardize=False,
                 detrend=False,
                 low_pass=None,
                 high_pass=None,
                 max_iter=100,
                 random_state=None,
                 early_stopping=True,
                 eps=1.0e-5,
                 raise_on_increase=True,
                 cache_dir='__cache__',
                 nb_fit_try=1,
                 n_jobs=1,
                 verbose=0):

        # model hyperparameters
        self.t_r = t_r
        self.n_atoms = n_atoms
        self.hrf_model = hrf_model
        self.hrf_atlas = hrf_atlas
        self.n_scales = n_scales
        self.shared_spatial_maps = shared_spatial_maps
        self.deactivate_v_learning = deactivate_v_learning
        self.deactivate_z_learning = deactivate_z_learning
        self.n_times_atom = n_times_atom
        self.prox_z = prox_z
        self.lbda_strategy = lbda_strategy
        self.lbda = lbda
        self.rho = rho
        self.delta_init = delta_init
        self.u_init_type = u_init_type
        self.eta = eta
        self.z_init = z_init
        self.prox_u = prox_u

        # convergence parameters
        self.max_iter = max_iter
        self.random_state = random_state
        self.early_stopping = early_stopping
        self.eps = eps
        self.raise_on_increase = raise_on_increase

        # preprocessing parameters
        self.idx_first_vols = idx_first_vols
        self.standardize = standardize
        self.detrend = detrend
        self.low_pass = low_pass
        self.high_pass = high_pass

        # technical parameters
        self.verbose = verbose
        self.n_jobs = n_jobs
        self.cache_dir = cache_dir
        self.nb_fit_try = nb_fit_try

        # HRF atlas
        if self.hrf_atlas == 'aal3':
            self.mask_full_brain, self.atlas_rois = fetch_aal3_vascular_atlas()
        elif self.hrf_atlas == 'aal':
            self.mask_full_brain, self.atlas_rois = fetch_aal_vascular_atlas()
        elif self.hrf_atlas == 'harvard':
            res = fetch_harvard_vascular_atlas()
            self.mask_full_brain, self.atlas_rois = res
        elif self.hrf_atlas == 'basc':
            n_scales_ = f"scale{int(n_scales)}"
            res = fetch_basc_vascular_atlas(n_scales=n_scales_)
            self.mask_full_brain, self.atlas_rois = res
        elif hasattr(self.hrf_atlas, '__call__'):
            res = self.hrf_atlas(**atlas_kwargs)
            self.mask_full_brain, self.atlas_rois = res
        else:
            raise ValueError(f"hrf_atlas should belong to ['aal3', 'harvard', "
                             f"'basc', given-function], got {self.hrf_atlas}")
        self.hrf_rois = dict()

        # fMRI masker
        self.masker_ = input_data.NiftiMasker(mask_img=self.mask_full_brain,
                                              t_r=self.t_r,
                                              memory=self.cache_dir,
                                              memory_level=1,
                                              verbose=0)

        # model parameters
        self.n_subjects = None
        self.z_hat_ = None
        self.u_hat_ = None
        self.a_hat_ = None
        self.u_hat_img_ = None
        self.a_hat_img_ = None

        # derivated model parameters
        self.Dz_hat_ = None
        self.v_hat_ = None

        # auxilaries parameters
        self.roi_label_from_hrf_idx = None
        self.v_init_ = None
        self.lbda_ = lbda
        self.lobj_ = None
        self.ltime_ = None
Exemple #22
0
    n_display_samples = 4
    memory = 'nilearn_cache'
    plots = [1, 2, 3]
    # dataset_fn = datasets.fetch_nyu_rest
    dataset_fn = datasets.fetch_haxby

    # Load data
    dataset = dataset_fn(n_subjects=subject_idx + 1)
    func_filename = dataset.func[subject_idx]

    # This is resting-state data: the background has not been removed yet,
    # thus we need to use mask_strategy='epi' to compute the mask from the
    # EPI images
    nifti_masker = input_data.NiftiMasker(memory=memory,
                                          memory_level=1,
                                          verbose=10,
                                          mask_strategy='epi',
                                          standardize=False)
    # Mask the data
    fmri_masked = nifti_masker.fit_transform(func_filename)
    mask = nifti_masker.mask_img_.get_data().astype(np.bool)

    # Augment the features with spatial information.
    X = np.asarray(np.where(mask)).T.astype(float)
    factors = (0., 0., 0.)
    types = (float, float, float)
    for ai, axis in enumerate(range(3)):
        data_axis = X[:, axis]
        data_range = float(data_axis.max() - data_axis.min())
        hemi = (data_axis - np.median(data_axis)) / float(data_range)
        if types[ai] == bool:
Exemple #23
0
def main(derivatives, ds='ds-02'):
    subjects = ['{:02d}'.format(i) for i in range(1, 16)]
    subjects.pop(0)  # 1
    subjects.pop(2)  # 4

    mask_l = op.join(
        derivatives, ds, 'mean_mask_mni_space/_mask_stnl',
        'sub-01_desc-stnl_mask_resampled_trans_merged_mean.nii.gz')
    mask_r = op.join(
        derivatives, ds, 'mean_mask_mni_space/_mask_stnr',
        'sub-01_desc-stnr_mask_resampled_trans_merged_mean.nii.gz')

    mask_l = image.math_img('mask > 0.3', mask=mask_l)
    mask_r = image.math_img('mask > 0.3', mask=mask_r)

    #mask = image.load_img(mask)
    masker_l = input_data.NiftiMasker(mask_l, detrend=True, standardize=True)
    masker_r = input_data.NiftiMasker(mask_r, detrend=True, standardize=True)

    data_stn_l = []
    data_stn_r = []

    for subject in subjects:
        surfs = []
        data_stn_l = []
        data_stn_r = []

        if subject == '07':
            n_runs = 2
        else:
            n_runs = 3

        for run in range(1, n_runs + 1):
            lh = surface.load_surf_data(
                op.join(
                    derivatives, ds,
                    'smoothed_surfaces/sub-{subject}/func/sub-{subject}_task-randomdotmotion_run-{run:02d}_space-fsaverage_desc-smoothed_hemi-lh.gii'
                    .format(**locals()))).T
            rh = surface.load_surf_data(
                op.join(
                    derivatives, ds,
                    'smoothed_surfaces/sub-{subject}/func/sub-{subject}_task-randomdotmotion_run-{run:02d}_space-fsaverage_desc-smoothed_hemi-rh.gii'
                    .format(**locals()))).T

            confounds = pd.read_table(
                op.join(
                    derivatives, ds,
                    'fmriprep/sub-{subject}/func/sub-{subject}_task-randomdotmotion_run-{run:02d}_desc-confounds_regressors.tsv'
                    .format(**locals())))
            confounds.fillna(method='bfill', inplace=True)

            im = op.join(
                derivatives, ds,
                'fmriprep/sub-{subject}/func/sub-{subject}_task-randomdotmotion_run-{run:02d}_space-T1w_desc-preproc_bold.nii.gz'
                .format(**locals()))

            surf = np.concatenate((lh, rh), 1)
            surf = signal.clean(surf,
                                standardize=True,
                                confounds=confounds[include].values)

            surfs.append(surf)

            d = masker_l.fit_transform(im, confounds=confounds[include].values)
            data_stn_l.append(d)

            d = masker_r.fit_transform(im, confounds=confounds[include].values)
            data_stn_r.append(d)

        surfs = np.array(surfs)
        data_stn_l = np.array(data_stn_l)
        data_stn_r = np.array(data_stn_r)

        rs_l = np.array([
            data_stn_l[i].T.dot(surfs[i]) / data_stn_l.shape[1]
            for i in range(n_runs)
        ])
        rs_r = np.array([
            data_stn_r[i].T.dot(surfs[i]) / data_stn_r.shape[1]
            for i in range(n_runs)
        ])

        np.savez_compressed(
            op.join(derivatives, ds, 'surface_correlations',
                    'sub-{subject}_surfs.npz'.format(**locals())), surfs)
        np.savez_compressed(
            op.join(derivatives, ds, 'surface_correlations',
                    'sub-{subject}_stn_l.npz'.format(**locals())), data_stn_l)
        np.savez_compressed(
            op.join(derivatives, ds, 'surface_correlations',
                    'sub-{subject}_stn_r.npz'.format(**locals())), data_stn_r)
        np.savez_compressed(
            op.join(derivatives, ds, 'surface_correlations',
                    'sub-{subject}_rs_l.npz'.format(**locals())), rs_l)
        np.savez_compressed(
            op.join(derivatives, ds, 'surface_correlations',
                    'sub-{subject}_rs_r.npz'.format(**locals())), rs_r)
Exemple #24
0
            hb_func = join(roi_dir, '{0}-hb-kb.nii.gz'.format(s))
            hb_roi_qc = join(sink_dir, 'roi-qc',
                             '{0}-{1}-hb-func.png'.format(s, run))
            avg_func = join(sink_dir, 'mean_func.nii.gz')
            hb = join(roi_dir, '{0}-{1}-hb-resampled.nii.gz'.format(s, run))
            mean_func = mean_img(fmri_file).to_filename(avg_func)

            hb_resampled = resample_to_img(hb_func,
                                           avg_func,
                                           interpolation='nearest')
            hb_resampled.to_filename(hb)

            plotting.plot_roi(hb, avg_func, output_file=hb_roi_qc)

            roi_masker = input_data.NiftiMasker(hb,
                                                detrend=False,
                                                standardize=True,
                                                t_r=1.)
            roi_timeseries = roi_masker.fit_transform(fmri_file)
            print 'extracted hb timeseries'
            brain_masker = input_data.NiftiMasker(detrend=False,
                                                  standardize=True,
                                                  t_r=1.,
                                                  smoothing_fwhm=3.)
            brain_timeseries = brain_masker.fit_transform(fmri_file)
            print 'extracted brain timeseries'

            hb_ts = np.mean(roi_timeseries, axis=1)
            #hb = roi_timeseries[:,1]
            print hb_ts.shape

            sbc_hb = np.dot(brain_timeseries.T, hb_ts) / hb_ts.shape[0]
Exemple #25
0
def nuisance_regress(inputimg,
                     confoundsfile,
                     inputmask,
                     inputtr=0,
                     conftype="36P",
                     spikethr=0.25,
                     smoothkern=6.0,
                     discardvols=4,
                     highpassval=0.008,
                     lowpassval=0.08):
    """
    
    returns a nibabel.nifti1.Nifti1Image that is cleaned in following ways:
        detrending, smoothed, motion parameter regress, spike regress, 
        bandpass filtered, and normalized
        
    options for motion paramter regress: 36P, 9P, 6P, or aCompCor
    
    signal cleaning params from:
        
        Parkes, L., Fulcher, B., Yücel, M., & Fornito, A. (2018). An evaluation 
        of the efficacy, reliability, and sensitivity of motion correction 
        strategies for resting-state functional MRI. NeuroImage, 171, 415-436.
        
        Ciric, R., Wolf, D. H., Power, J. D., Roalf, D. R., Baum, G. L., 
        Ruparel, K., ... & Gur, R. C. (2017). Benchmarking of participant-level
        confound regression strategies for the control of motion artifact in 
        studies of functional connectivity. Neuroimage, 154, 174-187.
    
    """

    # extract confounds
    confounds, outlier_stats = get_confounds(confoundsfile,
                                             kind=conftype,
                                             spikereg_threshold=spikethr)

    # check highpass low pass
    if highpassval >= lowpassval:
        print("high and low pass values dont make sense. exiting")
        exit(1)

    # check tr
    if inputtr == 0:
        # read the tr from the fourth dimension of zooms, this depends on the input
        # data being formatted to have the dim4 be the TR...
        tr = inputimg.header.get_zooms()[3]
        print("found that tr is: {}".format(str(tr)))

        if tr == 0:
            print("thats not a good tr. exiting")
            exit(1)

    else:
        tr = inputtr

    if inputmask is not None:
        print("cleaning image with masker")

        # masker params
        masker_params = {
            "mask_img": inputmask,
            "detrend": True,
            "standardize": True,
            "low_pass": lowpassval,
            "high_pass": highpassval,
            "t_r": tr,
            "smoothing_fwhm": smoothkern,
            "verbose": 1,
        }

        # invoke masker
        masker = input_data.NiftiMasker(**masker_params)

        # perform the nuisance regression
        time_series = masker.fit_transform(inputimg,
                                           confounds=confounds.values)

        # inverse masker operation to get the nifti object, n.b. this returns a Nifti1Image!!!
        outimg = masker.inverse_transform(time_series)
    else:
        # no mask! so no masker
        print("cleaning image with no mask")

        clean_params = {
            "confounds": confounds.values,
            "detrend": True,
            "standardize": True,
            "low_pass": lowpassval,
            "high_pass": highpassval,
            "t_r": tr,
        }

        loadimg = image.load_img(inputimg)

        outimg = image.clean_img(loadimg, **clean_params)

    # get rid of the first N volumes
    #outimgtrim = image.index_img(outimg, np.arange(discardvols, outimg.shape[3]))
    if discardvols > 0:
        outimgtrim = image_drop_dummy_trs(outimg, discardvols)
    else:
        outimgtrim = outimg

    return outimgtrim, confounds, outlier_stats
Exemple #26
0
    
  
    counter=0
#n_perm=100
    n_perm=10000
 #   perm_count=np.zeros([n_perm])
 
    all_perms=np.arange(0, n_perm)  #np.arange(0, len(mask_index[0]))
    n_splits=n_perm/100
    all_perms=np.array_split(all_perms, n_splits)  

    maskfile='./mynewgreymask.nii.gz' #MAKE SURE TO HAVE YOUR MASK IN THE CWD
    gm_mask = nib.load(maskfile)  
    print "mask loaded"
    print gm_mask.shape
    main_masker=input_data.NiftiMasker(mask_img=gm_mask)
    mm=main_masker.fit_transform(gm_mask)
#print gm_mask.shape
#We also create an index through the mask 
    current_mask=np.array((gm_mask.get_data()==1))
    print current_mask.shape[0], current_mask.shape[1],current_mask.shape[2]  
    mask_index=np.array(np.where(current_mask))
    print mask_index.shape
    mask_tree=KDTree(mask_index.T) #np.unravel_index(current_mask, 3)
####mask_coords = np.asarray(list(zip(*mask_index)))
#This one will hold the real group statistic
#spheres=mask_tree.query_radius(mask_tree, 8)
#print len(spheres)
    stat_map=np.zeros_like(mm).T

#This one will hold significances. Both need to have the same shape
Exemple #27
0
def feature_extractor(imgfile,
                      maskfile,
                      featurefile,
                      maskerfile,
                      wardfile,
                      nclusters=[
                          1000,
                      ],
                      selectfile=None,
                      targetfile=None,
                      metafile=None,
                      cachefile=None):

    resultdict = {"imgfile": imgfile, "maskfile": maskfile}
    # load data
    print "--loading data"
    nifti_masker = input_data.NiftiMasker(mask=maskfile,
                                          memory=cachefile,
                                          memory_level=1,
                                          standardize=False)
    fmri_masked = nifti_masker.fit_transform(imgfile)
    print "--getting mask"
    mask = nifti_masker.mask_img_.get_data().astype(np.bool)

    # saveit
    joblib.dump(nifti_masker, maskerfile)
    resultdict["mask"] = mask
    resultdict["Xmask"] = fmri_masked
    resultdict["maskerfile"] = maskerfile

    # get connectivity
    print "--getting connectivity"
    shape = mask.shape
    connectivity = image.grid_to_graph(n_x=shape[0],
                                       n_y=shape[1],
                                       n_z=shape[2],
                                       mask=mask)
    # saveit
    resultdict["connectivity"] = connectivity
    print "--save main file"
    np.savez(featurefile + "_main.npz", **resultdict)

    # run  ward
    y = np.load(targetfile)["ymap"]
    meta = np.load(metafile)
    train = meta["train"]
    test = meta["test"]
    ncv = meta['ycv']

    # for each cv set
    for cvx in range(ncv):
        trainidx = train[cvx]
        testidx = test[cvx]
        resultdict = {}
        wardfiles = []
        selectfiles = []
        print "--Running ward %d" % (cvx, )
        for ix, nc in enumerate(nclusters):
            ward = WardAgglomeration(n_clusters=nc,
                                     connectivity=connectivity,
                                     memory=cachefile)
            ward.fit(fmri_masked[trainidx])
            fmri_reduced_train = ward.transform(fmri_masked[trainidx])
            fmri_reduced_test = ward.transform(fmri_masked[testidx])

            # saveit
            subwardfile = wardfile + "_D%d_cv%d.pkl" % (
                nc,
                cvx,
            )
            joblib.dump(ward, subwardfile)
            resultdict["Xward_%d_train" % (nc, )] = fmri_reduced_train
            resultdict["Xward_%d_test" % (nc, )] = fmri_reduced_test
            wardfiles.append(subwardfile)

            # additional feature selection
            selector = SelectPercentile(f_classif, percentile=30)
            selector.fit(fmri_reduced_train, y[trainidx])
            fmri_select_train = selector.transform(fmri_reduced_train)
            fmri_select_test = selector.transform(fmri_reduced_test)

            # saveit
            subselectfile = selectfile + "_D%d_cv%d.pkl" % (
                nc,
                cvx,
            )
            joblib.dump(selector, subselectfile)
            resultdict["Xselect_%d_train" % (nc, )] = fmri_select_train
            resultdict["Xselect_%d_test" % (nc, )] = fmri_select_test
            selectfiles.append(subselectfile)

        resultdict["wardfiles"] = wardfiles
        resultdict["selectfiles"] = selectfiles

        # save results
        print "--save cv result"
        np.savez(featurefile + "_cv%d.npz" % (cvx, ), **resultdict)
Exemple #28
0
def abide1_partial_seed(scale, confound_scale, confound_rois, n_cpu):
    # Make sure the confound_rois are in an iterable
    try:
        iter(confound_rois)
    except TypeError:
        print(
            f'confound_rois should be an iterable. I will see if I can wrap it in a list'
        )
        if not type(confound_rois) == int:
            raise Exception(
                f'confound_rois is not an iterable and also not an integer. It is {type(confound_rois)}. '
                f'I cannot deal with this and will end here.')
        confound_rois = list(confound_rois)

    # Paths
    root_p = pal.Path(__file__).resolve().parents[2] / 'data'
    pheno_p = root_p / 'pheno/ABIDE1_Pheno_PSM_matched_minimum_10.tsv'
    atlas_p = root_p / f'ATLAS/MIST/Parcellations/MIST_{scale}_nocereb.nii.gz'
    confound_p = root_p / f'ATLAS/MIST/Parcellations/MIST_{confound_scale}_nocereb.nii.gz'
    atlas_labels_p = root_p / f'ATLAS/MIST/Parcel_Information/MIST_{scale}_nocereb.csv'
    confound_labels_p = root_p / f'ATLAS/MIST/Parcel_Information/MIST_{confound_scale}_nocereb.csv'
    mask_p = root_p / 'ATLAS/MIST/Parcellations/MIST_mask_nocereb.nii.gz'
    hier_p = root_p / 'ATLAS/MIST/Hierarchy/MIST_PARCEL_ORDER.csv'

    # Data
    fc_p = root_p / 'preprocessed/time_series/abide_1/'
    fc_t = 'fmri_sub{:07}_session{}_run{}_scrubbed.nii.gz'

    # Output
    out_t = f'sub_{{}}_ses_{{}}_run{{}}_mist_{scale}_children_of_' \
        f'{"_and_".join(map(str, confound_rois))}_mist_{confound_scale}.npy'
    out_p = root_p / f'preprocessed/seed_maps/abide_1/MIST_{scale}'
    if not out_p.is_dir():
        out_p.mkdir()

    # Load inputs
    pheno = pd.read_csv(pheno_p, sep='\t')
    mask_i = nib.load(str(mask_p))
    atlas_labels = pd.read_csv(atlas_labels_p, sep=';')
    confound_labels = pd.read_csv(confound_labels_p, sep=';')
    # Get the hierarchy
    hier = pd.read_csv(hier_p)
    ordered_labels = list()
    for i in hier[f's{scale}'].values:
        if i not in ordered_labels:
            ordered_labels.append(i)
    atlas_networks = np.array(ordered_labels)
    confound_network_list = np.array([
        hier.loc[hier[f's{scale}'] == i][f's{confound_scale}'].values[0]
        for i in ordered_labels
    ])

    mist_mask_i = nib.load(str(mask_p))
    masker = nid.NiftiMasker(mask_img=mist_mask_i)
    masker.fit()

    # Get the atlas
    atlas_i = nib.load(str(atlas_p))
    confound_i = nib.load(str(confound_p))
    atlas = masker.transform(atlas_i)
    confound = masker.transform(confound_i)

    # Find the regions and give their names
    # Pick the atlas child networks that belong to the confound regions
    confound_names = list(
        confound_labels.query('roi in @confound_rois')['label'].values)
    children = np.array([
        atlas_roi for confound_roi in confound_rois
        for atlas_roi in atlas_networks[confound_network_list == confound_roi]
    ])
    children_names = list(
        atlas_labels.query('roi in @children')['label'].values)
    print(
        f'I am generating seed maps for the children of {confound_names} from MIST_{confound_scale} '
        f'in MIST_{scale}. These children are:\n{children_names}')
    # Build the new atlases
    confound_mask = np.array([
        1 if confound[0, idx] in confound_rois else 0
        for idx in range(confound.shape[1])
    ])
    subset_atlas = np.array([
        0 if not atlas[0, idx] in children else atlas[0, idx]
        for idx in range(atlas.shape[1])
    ])
    confound_mask_i = masker.inverse_transform(confound_mask)
    subset_atlas_i = masker.inverse_transform(subset_atlas)

    path_list = [
        (fc_p / fc_t.format(row['SUB_ID'], row['session'], row['run']),
         out_p / out_t.format(row['SUB_ID'], row['session'], row['run']))
        for rid, row in pheno.iterrows()
    ]
    for path_in, path_out in path_list:
        if not path_in.is_file():
            print(f'Path does not exist: {path_in}')
    func_in_paths, func_out_paths = zip(*path_list)

    job_list = [{
        'func_in_p': p_in,
        'sca_out_p': p_out,
        'atlas_img': subset_atlas_i,
        'mask_img': mask_i,
        'confound_img': confound_mask_i
    } for p_in, p_out in path_list]
    with futures.ThreadPoolExecutor(max_workers=n_cpu) as executor:
        futs = [
            executor.submit(asdfc.wrappers.wrap_seed_based_correlation,
                            **job_args) for job_args in job_list
        ]
        for future in tqdm(futures.as_completed(futs), total=len(job_list)):
            pass
    print('All Done!')
Exemple #29
0
"""

### Load nyu_rest dataset #####################################################

import numpy as np
from nilearn import datasets
from nilearn import input_data
from nilearn.plotting.img_plotting import plot_roi, plot_epi
nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1)

# This is resting-state data: the background has not been removed yet,
# thus we need to use mask_strategy='epi' to compute the mask from the
# EPI images
nifti_masker = input_data.NiftiMasker(memory='nilearn_cache',
                                      mask_strategy='epi',
                                      memory_level=1,
                                      standardize=False)
func_filename = nyu_dataset.func[0]
fmri_masked = nifti_masker.fit_transform(func_filename)
mask = nifti_masker.mask_img_.get_data().astype(np.bool)

### Ward ######################################################################

# Compute connectivity matrix: which voxel is connected to which
from sklearn.feature_extraction import image
shape = mask.shape
connectivity = image.grid_to_graph(n_x=shape[0],
                                   n_y=shape[1],
                                   n_z=shape[2],
                                   mask=mask)
Exemple #30
0
def sbc_one_session(subject, session, fmriprep_dir, output_dir, tr):

    confounds_file, brainmask_file, rs_file, anat_file = get_files(
        fmriprep_dir, subject, session)
    out_stub = "{}_{}".format(subject, session)

    # look for output files
    out_file_nii = os.path.join(output_dir,
                                "sbc_pcc_1_" + out_stub + ".nii.gz")
    out_file_thresh = os.path.join(
        output_dir, "sbc_pcc_2_fisherz_thrsh0.5_" + out_stub + ".png")
    out_file_report = os.path.join(output_dir,
                                   "sbc_pcc_9_info_" + out_stub + ".txt")

    if not (os.path.exists(out_file_thresh) and os.path.exists(out_file_nii)
            and os.path.exists(out_file_report)):
        print("*** Running SBC for {} {} ***".format(subject, session))
        # see http://nilearn.github.io/auto_examples/03_connectivity/plot_seed_to_voxel_correlation.html
        pcc_coords = [(0, -52, 18)]
        lp_freq = 0.1
        hp_freq = 0.01

        # load confounds
        confounds, outlier_stats = get_confounds(confounds_file)

        # extract data from seed ROI
        seed_masker = input_data.NiftiSpheresMasker(pcc_coords,
                                                    radius=8,
                                                    mask_img=brainmask_file,
                                                    detrend=True,
                                                    standardize=True,
                                                    low_pass=lp_freq,
                                                    high_pass=hp_freq,
                                                    t_r=tr)
        seed_time_series = seed_masker.fit_transform(
            rs_file, confounds=confounds.values)

        #  extract data for the entire brain
        brain_masker = input_data.NiftiMasker(smoothing_fwhm=6,
                                              mask_img=brainmask_file,
                                              detrend=True,
                                              standardize=True,
                                              low_pass=lp_freq,
                                              high_pass=hp_freq,
                                              t_r=tr)
        brain_time_series = brain_masker.fit_transform(
            rs_file, confounds=confounds.values)

        #  calculate correlation
        seed_based_correlations = np.dot(brain_time_series.T, seed_time_series) / \
                                  seed_time_series.shape[0]
        seed_based_correlations_fisher_z = np.arctanh(seed_based_correlations)

        # plot
        seed_based_correlation_img = brain_masker.inverse_transform(
            seed_based_correlations_fisher_z.T)
        seed_based_correlation_img.to_filename(out_file_nii)

        display = plotting.plot_stat_map(seed_based_correlation_img,
                                         threshold=0.5,
                                         bg_img=anat_file,
                                         cut_coords=pcc_coords[0],
                                         title=out_stub + " (fisher z)")
        display.add_markers(marker_coords=pcc_coords,
                            marker_color='g',
                            marker_size=300)
        display.savefig(out_file_thresh)
        display.close()

        # report
        report = "Confounds:\n"
        report += "\n".join(confounds.columns) + "\n\n"
        report += "lp_freq: {}\n".format(lp_freq)
        report += "hp_freq: {}\n".format(hp_freq)

        report += "confounds_file: {}\n".format(confounds_file)
        report += "brainmask_file: {}\n".format(brainmask_file)
        report += "rs_file: {}\n".format(rs_file)
        report += "anat_file: {}\n\n".format(anat_file)

        report += confounds.to_string()

        with open(out_file_report, "w") as fi:
            fi.write(report)
    else:
        print("*** SBC for {} {} already computed. Do nothing. ***".format(
            subject, session))