Ejemplo n.º 1
0
def rex_bids_csv(folder_path, to_save, ftype):
    """[CSV generation for BIDS datasets]
    [This function is used to generate a csv for BIDS datasets]
    Arguments:
        folder_path {[string]} -- [Takes the folder to see where to look for
                                   the different modaliies]
        to_save {[string]} -- [Takes the folder as a string to save the csv]
        ftype {[string]} -- [Are you trying to save train, validation or test,
                             if file type is set to test, it does not look for
                             ground truths]
    """
    if ftype == "test":
        csv_file = open(os.path.join(to_save, ftype + ".csv"), "w+")
        csv_file.write("ID,")
    else:
        csv_file = open(os.path.join(to_save, ftype + ".csv"), "w+")
        csv_file.write("ID,gt_path,")
    # load BIDS dataset into memory
    layout = BIDSLayout(folder_path)
    bids_df = layout.to_df()
    bids_modality_df = {
        "t1": bids_df[bids_df["suffix"] == "T1w"],
        "t2": bids_df[bids_df["suffix"] == "T2w"],
        "flair": bids_df[bids_df["suffix"] == "FLAIR"],
        "t1ce": bids_df[bids_df["suffix"] == "T1CE"],
    }
    # check what modalities the dataset contains
    modalities = []
    for modality, df in bids_modality_df.items():
        if not df.empty:
            modalities.append(modality)
    # write headers for those modalities
    for modality in modalities[:-1]:
        csv_file.write(modality + "_path,")
    modality = modalities[-1]
    csv_file.write(modality + "_path\n")
    # write image paths for each subject
    for sub in layout.get_subjects():
        csv_file.write(sub)
        csv_file.write(",")
        if ftype != "test":
            ground_truth = glob.glob(
                os.path.join(folder_path, sub, "*mask.nii.gz"))[0]
            csv_file.write(ground_truth)
            csv_file.write(",")
        for modality in modalities[:-1]:
            img = bids_modality_df[modality][bids_df["subject"] ==
                                             sub].path.values
            csv_file.write(img[0])
            csv_file.write(",")
        modality = modalities[-1]
        img = bids_modality_df[modality][bids_df["subject"] == sub].path.values
        csv_file.write(img[0])
        csv_file.write("\n")
    csv_file.close()
Ejemplo n.º 2
0
def get_scan_duration(output_dir, modality="func", task="rest"):
    layout = BIDSLayout(output_dir)
    df = layout.to_df()
    scans_df = df.query(
        "datatype==@modality & task==@task & extension=='nii.gz'")

    scan_durations = []
    for file in scans_df.path:
        scan_durations.append(layout.get_metadata(file)["ScanDurationSec"])
    scans_df["scan_duration"] = scan_durations
    scans_df.reset_index(drop=True, inplace=True)

    return scans_df
Ejemplo n.º 3
0
def find_t1_in_bids(root_path, subject, session):
    '''
    use pybids to make the pybids object and get the T1w
    '''

    subject = str(subject.replace('sub-', ''))
    session = str(session.replace('ses-', ''))

    data = BIDSLayout(root_path)
    df = data.to_df()
    t1 = df.loc[(df['subject'] == subject) & (df['session'] == session) &
                (df['suffix'] == 'T1w') & (df['extension'] == 'nii.gz')]

    assert t1.shape[0] == 1, "Couldn't find proper T1w file in BIDS!"

    p = Path(t1.iloc[0]['path']).resolve()

    return p.parent, p.name
Ejemplo n.º 4
0
def run_conversion(raw_dir,
                   output_base_dir,
                   analysis_level,
                   info_out_dir,
                   participant_label,
                   session_label,
                   public_output,
                   use_new_ids,
                   ds_version,
                   info_list,
                   dataset_description,
                   new_id_lut_file=None,
                   bvecs_from_scanner_file=None,
                   tp6_raw_lut=None,
                   dry_run=False,
                   demo_file=None,
                   session_duration_min=120):
    # privacy settings
    private_str = "_PRIVATE" if not (public_output and use_new_ids) else ""
    output_dir = Path(
        output_base_dir) / f"LHAB_{ds_version}{private_str}" / "sourcedata"
    metainfo_dir = Path(
        output_base_dir) / f"LHAB_{ds_version}{private_str}" / "metainfo"
    metainfo_dir.mkdir(exist_ok=True, parents=True)

    output_dir.mkdir(parents=True, exist_ok=True)
    info_out_dir = Path(info_out_dir) / "PRIVATE"
    info_out_dir.mkdir(parents=True, exist_ok=True)

    if analysis_level == "participant":
        for old_subject_id in participant_label:
            submit_single_subject(
                old_subject_id,
                session_label,
                raw_dir,
                output_dir,
                info_list,
                info_out_dir,
                bvecs_from_scanner_file=bvecs_from_scanner_file,
                public_output=public_output,
                use_new_ids=use_new_ids,
                new_id_lut_file=new_id_lut_file,
                tp6_raw_lut=tp6_raw_lut,
                dry_run=dry_run,
                session_duration_min=session_duration_min)
        print("\n\n\n\nDONE.\nConverted %d subjects." % len(participant_label))
        print(participant_label)

    elif analysis_level == "group":
        ds_desc_file = output_dir / "dataset_description.json"
        if ds_desc_file.is_file():
            ds_desc_file.unlink()
        dataset_description["DataSetVersion"] = ds_version
        add_info_to_json(ds_desc_file, dataset_description, create_new=True)

        # Demos
        print("Exporting demos...")
        pwd = getpass.getpass("Enter the Password for dob file:")
        calc_demos(output_dir,
                   info_out_dir,
                   demo_file,
                   pwd,
                   new_id_lut_file=new_id_lut_file)

        # check for duplicates
        mappings = concat_tsvs(info_out_dir / "parrec_mapping_PRIVATE")
        dups = mappings[mappings.duplicated(subset="from")]
        assert len(dups) == 0, print("duplicates found", dups)

        # concat notconverted files
        unconv_df = concat_tsvs(info_out_dir / "unconverted_files")
        unconv_df.to_csv(info_out_dir / "unconverted_files.tsv",
                         sep="\t",
                         index=False)

        print("X" * 20 + "\nRuning BIDS validator")
        os.system(f"bids-validator {str(output_dir)}")

        print("\n Get BIDS layout")
        layout = BIDSLayout(output_dir)
        layout.to_df().to_csv(metainfo_dir / "layout.csv", index=False)

    else:
        raise RuntimeError(f"Analysis level unknown {analysis_level}")
Ejemplo n.º 5
0
def main(subject, sourcedata, derivatives, smoothed, n_jobs=5):

    os.environ['SUBJECTS_DIR'] = op.join(derivatives, 'freesurfer')

    source_layout = BIDSLayout(sourcedata, validate=False, derivatives=False)

    fmriprep_layout = BIDSLayout(op.join(derivatives, 'fmriprep'),
                                 validate=False)

    if smoothed:
        bold_layout = BIDSLayout(op.join(derivatives, 'smoothed'),
                                 validate=False)
        bold = bold_layout.get(subject=subject, extension='func.gii')
    else:
        bold = fmriprep_layout.get(subject=subject, extension='func.gii')

        bold = sorted([e for e in bold if 'fsaverage6' in e.filename],
                      key=lambda x: x.run)

    fmriprep_layout_df = fmriprep_layout.to_df()
    fmriprep_layout_df = fmriprep_layout_df[~fmriprep_layout_df.subject.isnull(
    )]
    fmriprep_layout_df['subject'] = fmriprep_layout_df.subject.astype(int)
    fmriprep_layout_df = fmriprep_layout_df[np.in1d(fmriprep_layout_df.suffix,
                                                    ['regressors'])]
    fmriprep_layout_df = fmriprep_layout_df[np.in1d(
        fmriprep_layout_df.extension, ['tsv'])]
    fmriprep_layout_df = fmriprep_layout_df.set_index(['subject', 'run'])

    events_df = source_layout.to_df()
    events_df = events_df[events_df.suffix == 'events']
    events_df['subject'] = events_df['subject'].astype(int)
    events_df = events_df.set_index(['subject', 'run'])

    tr = source_layout.get_tr(bold[0].path)

    if smoothed:
        base_dir = op.join(derivatives, 'glm_stim1_surf_smoothed',
                           f'sub-{subject}', 'func')
    else:
        base_dir = op.join(derivatives, 'glm_stim1_surf', f'sub-{subject}',
                           'func')

    if not op.exists(base_dir):
        os.makedirs(base_dir)

    for b in bold:
        run = b.entities['run']
        hemi = b.entities['suffix']
        #     print(run)

        confounds_ = fmriprep_layout_df.loc[(subject, run), 'path'].iloc[0]
        confounds_ = pd.read_csv(confounds_, sep='\t')
        confounds_ = confounds_[to_include].fillna(method='bfill')

        pca = PCA(n_components=7)
        confounds_ -= confounds_.mean(0)
        confounds_ /= confounds_.std(0)
        confounds_pca = pca.fit_transform(confounds_[to_include])

        events_ = events_df.loc[(subject, run), 'path']
        events_ = pd.read_csv(events_, sep='\t')
        events_['trial_type'] = events_['trial_type'].apply(
            lambda x: 'stim2' if x.startswith('stim2') else x)

        frametimes = np.arange(0, tr * len(confounds_), tr)

        X = make_first_level_design_matrix(
            frametimes,
            events_,
            add_regs=confounds_pca,
            add_reg_names=[f'confound_pca.{i}' for i in range(1, 8)])

        Y = surface.load_surf_data(b.path).T
        Y = (Y / Y.mean(0) * 100)
        Y -= Y.mean(0)

        fit = run_glm(Y, X, noise_model='ols', n_jobs=n_jobs)
        r = fit[1][0.0]
        betas = pd.DataFrame(r.theta, index=X.columns)

        stim1 = []

        for stim in 5, 7, 10, 14, 20, 28:
            stim1.append(betas.loc[f'stim1-{stim}'])

        result = pd.concat(stim1, 1).T
        print(result.shape)

        pes = nb.gifti.GiftiImage(header=nb.load(b.path).header,
                                  darrays=[
                                      nb.gifti.GiftiDataArray(row)
                                      for ix, row in result.iterrows()
                                  ])

        fn_template = op.join(
            base_dir,
            'sub-{subject}_run-{run}_space-{space}_desc-stims1_hemi-{hemi}.pe.gii'
        )
        space = 'fsaverage6'

        pes.to_filename(fn_template.format(**locals()))

        transformer = SurfaceTransform(source_subject='fsaverage6',
                                       target_subject='fsaverage',
                                       hemi={
                                           'L': 'lh',
                                           'R': 'rh'
                                       }[hemi])

        transformer.inputs.source_file = pes.get_filename()
        space = 'fsaverage'
        transformer.inputs.out_file = fn_template.format(**locals())
        # Disable on MAC OS X (SIP problem)
        transformer.run()
Ejemplo n.º 6
0
    def from_bids(cls,
                  source_dir,
                  subject=None,
                  session=None,
                  acquisition=None,
                  run=None,
                  inversion_efficiency=0.96):
        """ Creates a MEMP2RAGE-object from a properly organized BIDS-folder.

        The folder should be organized similar to this example:

        sub-01/anat/:
        # The first inversion time volumes
         * sub-01_inv-1_part-mag_MPRAGE.nii
         * sub-01_inv-1_part-phase_MPRAGE.nii

        # The four echoes of the second inversion (magnitude)
         * sub-01_inv-2_part-mag_echo-1_MPRAGE.nii
         * sub-01_inv-2_part-mag_echo-2_MPRAGE.nii
         * sub-01_inv-2_part-mag_echo-3_MPRAGE.nii
         * sub-01_inv-2_part-mag_echo-4_MPRAGE.nii

        # The four echoes of the second inversion (phase)
         * sub-01_inv-2_part-phase_echo-1_MPRAGE.nii
         * sub-01_inv-2_part-phase_echo-2_MPRAGE.nii
         * sub-01_inv-2_part-phase_echo-3_MPRAGE.nii
         * sub-01_inv-2_part-phase_echo-4_MPRAGE.nii

        # The json describing the parameters of the first inversion pulse
         * sub-01_inv-1_MPRAGE.json

        # The json describing the parameters of the second inversion pulse
         * sub-01_inv-2_echo-1_MPRAGE.json
         * sub-01_inv-2_echo-2_MPRAGE.json
         * sub-01_inv-2_echo-3_MPRAGE.json
         * sub-01_inv-2_echo-4_MPRAGE.json

         The JSON-files should contain all the necessary MP2RAGE sequence parameters
         and should look something like this:

         sub-01/anat/sub-01_inv-1_MPRAGE.json:
             {
                "InversionTime":0.67,
                "FlipAngle":7,
                "RepetitionTimeExcitation":0.0062,
                "RepetitionTimePreparation":6.723,
                "NumberShots":150,
                "FieldStrength": 7
             }

         sub-01/anat/sub-01_inv-2_echo-1_MPRAGE.json:
             {
                "InversionTime":3.855,
                "FlipAngle":6,
                "RepetitionTimeExcitation":0.0320,
                "RepetitionTimePreparation":6.723,
                "NumberShots":150,
                "EchoTime": 6.0
                "FieldStrength": 7
             }

         sub-01/anat/sub-01_inv-2_echo-2_MPRAGE.json:
             {
                "InversionTime":3.855,
                "FlipAngle":6,
                "RepetitionTimeExcitation":0.0320,
                "RepetitionTimePreparation":6.723,
                "NumberShots":150,
                "EchoTime": 14.5
                "FieldStrength": 7
             }

         sub-01/anat/sub-01_inv-2_echo-3_MPRAGE.json:
             {
                "InversionTime":3.855,
                "FlipAngle":6,
                "RepetitionTimeExcitation":0.0320,
                "RepetitionTimePreparation":6.723,
                "NumberShots":150,
                "EchoTime": 23
                "FieldStrength": 7
             }

         sub-01/anat/sub-01_inv-2_echo-4_MPRAGE.json:
             {
                "InversionTime":3.855,
                "FlipAngle":6,
                "RepetitionTimeExcitation":0.0320,
                "RepetitionTimePreparation":6.723,
                "NumberShots":150,
                "EchoTime": 31.5
                "FieldStrength": 7
             }

        A MEMP2RAGE-object can now be created from the BIDS folder as follows:

        Example:
            >>> import pymp2rage
            >>> mp2rage = pymp2rage.MEMP2RAGE.from_bids('/data/sourcedata/', '01')

        Args:
            source_dir (BIDS dir): directory containing all necessary files
            subject (str): subject identifier
            **kwargs: additional keywords that are forwarded to get-function of
            BIDSLayout. For example `ses` could be used to select specific session.
        """

        __dir__ = os.path.abspath(os.path.dirname(__file__))
        layout = BIDSLayout(source_dir,
                            validate=False,
                            config=op.join(__dir__, 'bids', 'bep001.json'))

        df = layout.to_df()

        subject = str(subject) if subject is not None else subject
        session = str(session) if session is not None else session
        run = int(run) if run is not None else run

        for var_str, var in zip(['subject', 'session', 'run', 'acquisition'],
                                [subject, session, run, acquisition]):
            if var is not None:
                df = df[df[var_str] == var]
        df = df[np.in1d(df.extension, ['nii', 'nii.gz'])]

        for key in ['echo', 'inv', 'fa']:
            if key in df.columns:
                df[key] = df[key].astype(float)

        df = df.set_index(['suffix', 'inv', 'echo', 'part'])
        df = df.loc[['MP2RAGE', 'TB1map']]

        for ix, row in df.iterrows():

            for key, value in layout.get_metadata(row.path).items():
                if key in [
                        'EchoTime', 'InversionTime',
                        'RepetitionTimePreparation',
                        'RepetitionTimeExcitation', 'NumberShots',
                        'FieldStrength', 'FlipAngle'
                ]:
                    df.loc[ix, key] = value

        if 'TB1map' in df.index:

            if len(df.loc['TB1map']) == 1:
                print('using {} as B1map'.format(
                    str(df.loc['TB1map'].iloc[0]['path'])))
                b1map = df.loc['TB1map'].iloc[0]['path']
            else:
                print('FOUND MORE THAN ONE B1-MAP! Will not use B1-correction')
                b1map = None
        else:
            b1map = None

        inv1 = df.loc[('MP2RAGE', 1, slice(None), 'mag'), 'path'].iloc[0]
        inv1ph = df.loc[('MP2RAGE', 1, slice(None), 'phase'), 'path'].iloc[0]
        inv2 = df.loc[('MP2RAGE', 2, slice(None), 'mag'), 'path'].tolist()
        inv2ph = df.loc[('MP2RAGE', 2, slice(None), 'phase'), 'path'].tolist()

        echo_times = df.loc[('MP2RAGE', 2, slice(None), 'mag'),
                            'EchoTime'].values
        MPRAGE_tr = df.loc[('MP2RAGE', 1, slice(None), 'mag'),
                           'RepetitionTimePreparation'].values[0]
        invtimesAB = df.loc[('MP2RAGE', 1, slice(None), 'mag'),
                            'InversionTime'].values[0], df.loc[(
                                'MP2RAGE', 2, slice(None),
                                'mag'), 'InversionTime'].values[0],
        nZslices = df.loc[('MP2RAGE', 1, slice(None), 'mag'),
                          'NumberShots'].values[0]
        FLASH_tr = df.loc[('MP2RAGE', 1, slice(None), 'mag'),
                          'RepetitionTimeExcitation'].values[0], df.loc[(
                              'MP2RAGE', 2, slice(None),
                              'mag'), 'RepetitionTimeExcitation'].values[0]
        B0 = df.loc[('MP2RAGE', 1, slice(None), 'mag'),
                    'FieldStrength'].values[0]
        flipangleABdegree = df.loc[('MP2RAGE', 1, slice(None), 'mag'),
                                   'FlipAngle'].values[0], df.loc[(
                                       'MP2RAGE', 2, slice(None),
                                       'mag'), 'FlipAngle'].values[0]

        mp2rageme = cls(echo_times=echo_times,
                        MPRAGE_tr=MPRAGE_tr,
                        invtimesAB=invtimesAB,
                        flipangleABdegree=flipangleABdegree,
                        nZslices=nZslices,
                        FLASH_tr=FLASH_tr,
                        inversion_efficiency=inversion_efficiency,
                        B0=B0,
                        inv1=inv1,
                        inv1ph=inv1ph,
                        inv2=inv2,
                        inv2ph=inv2ph)

        return mp2rageme
Ejemplo n.º 7
0
def main(subject, sourcedata, derivatives):
    source_layout = BIDSLayout(sourcedata, validate=False, derivatives=False)
    fmriprep_layout = BIDSLayout(op.join(derivatives, 'fmriprep'),
                                 validate=False)

    bold = fmriprep_layout.get(
        subject=subject,
        suffix='bold',
        description='preproc',
        extension='nii.gz',
    )
    bold = sorted([e for e in bold if 'MNI' in e.filename],
                  key=lambda x: x.run)

    reg = re.compile('.*_space-(?P<space>.+)_desc.*')

    fmriprep_layout_df = fmriprep_layout.to_df()
    fmriprep_layout_df = fmriprep_layout_df[~fmriprep_layout_df.subject.isnull(
    )]
    fmriprep_layout_df['subject'] = fmriprep_layout_df.subject.astype(int)
    fmriprep_layout_df = fmriprep_layout_df[np.in1d(
        fmriprep_layout_df.suffix, ['bold', 'regressors', 'mask'])]
    fmriprep_layout_df = fmriprep_layout_df[np.in1d(
        fmriprep_layout_df.extension, ['nii.gz', 'tsv'])]
    fmriprep_layout_df['space'] = fmriprep_layout_df.path.apply(
        lambda path: reg.match(path).group(1) if reg.match(path) else None)
    fmriprep_layout_df = fmriprep_layout_df.set_index(
        ['subject', 'run', 'suffix', 'space'])

    events_df = source_layout.to_df()
    events_df = events_df[events_df.suffix == 'events']
    events_df['subject'] = events_df['subject'].astype(int)
    events_df = events_df.set_index(['subject', 'run'])

    tr = source_layout.get_tr(bold[0].path)

    for b in bold:
        run = b.entities['run']
        print(run)

        confounds_ = fmriprep_layout_df.loc[(subject, run, 'regressors'),
                                            'path'].iloc[0]
        confounds_ = pd.read_csv(confounds_, sep='\t')
        confounds_ = confounds_[to_include].fillna(method='bfill')

        events_ = events_df.loc[(subject, run), 'path']
        events_ = pd.read_csv(events_, sep='\t')
        events_['trial_type'] = events_['trial_type'].apply(
            lambda x: 'stim2' if x.startswith('stim2') else x)

        model = FirstLevelModel(tr,
                                drift_model=None,
                                n_jobs=5,
                                smoothing_fwhm=4.0)
        pca = PCA(n_components=7)

        confounds_ -= confounds_.mean(0)
        confounds_ /= confounds_.std(0)
        confounds_pca = pca.fit_transform(confounds_[to_include])

        events_['onset'] += tr

        model.fit(b.path, events_, confounds_pca)

        base_dir = op.join(derivatives, 'glm_stim1', f'sub-{subject}', 'func')

        if not op.exists(base_dir):
            os.makedirs(base_dir)

        # PE
        ims = []
        for stim in 5, 7, 10, 14, 20, 28:
            im = model.compute_contrast(f'stim1-{stim}',
                                        output_type='effect_size')
            ims.append(im)
        ims = image.concat_imgs(ims)
        ims.to_filename(
            op.join(base_dir,
                    f'sub-{subject}_run-{run}_desc-stims1_pe.nii.gz'))

        # zmap
        ims = []
        for stim in 5, 7, 10, 14, 20, 28:
            im = model.compute_contrast(f'stim1-{stim}', output_type='z_score')
            ims.append(im)
        ims = image.concat_imgs(ims)
        ims.to_filename(
            op.join(base_dir,
                    f'sub-{subject}_run-{run}_desc-stims1_zmap.nii.gz'))
Ejemplo n.º 8
0
# In[19]:

layout.get(task='localizer', suffix='bold', scope='raw')[:10]

# Notice that there are nifti and event files. We can get the filename for the first particant's functional run

# In[140]:

f = layout.get(task='localizer')[0].filename
f

# If you want a summary of all the files in your BIDSLayout, but don't want to have to iterate BIDSFile objects and extract their entities, you can get a nice bird's-eye view of your dataset using the `to_df()` method.

# In[20]:

layout.to_df()

# ## Loading Data with Nibabel
# Neuroimaging data is often stored in the format of nifti files `.nii` which can also be compressed using gzip `.nii.gz`.  These files store both 3D and 4D data and also contain structured metadata in the image **header**.
#
# There is an very nice tool to access nifti data stored on your file system in python called [nibabel](http://nipy.org/nibabel/).  If you don't already have nibabel installed on your computer it is easy via `pip`. First, tell the jupyter cell that you would like to access the unix system outside of the notebook and then install nibabel using pip `!pip install nibabel`. You only need to run this once (unless you would like to update the version).
#
# nibabel objects can be initialized by simply pointing to a nifti file even if it is compressed through gzip.  First, we will import the nibabel module as `nib` (short and sweet so that we don't have to type so much when using the tool).  I'm also including a path to where the data file is located so that I don't have to constantly type this.  It is easy to change this on your own computer.
#
# We will be loading an anatomical image from subject S01 from the localizer [dataset](../content/Download_Data).  See this [paper](https://bmcneurosci.biomedcentral.com/articles/10.1186/1471-2202-8-91) for more information about this dataset.
#
# We will use pybids to grab subject S01's T1 image.

# In[26]:

import nibabel as nib
Ejemplo n.º 9
0
def main(subject,
         sourcedata,
         derivatives):
    source_layout = BIDSLayout(sourcedata, validate=False, derivatives=False)
    fmriprep_layout = BIDSLayout(
        op.join(derivatives, 'fmriprep'), validate=False)

    bold = fmriprep_layout.get(subject=subject,
                               suffix='bold',
                               description='preproc',
                               extension='nii.gz', )
    bold = sorted([e for e in bold if 'MNI' in e.filename],
                  key=lambda x: x.run)

    reg = re.compile('.*_space-(?P<space>.+)_desc.*')

    fmriprep_layout_df = fmriprep_layout.to_df()
    fmriprep_layout_df = fmriprep_layout_df[~fmriprep_layout_df.subject.isnull(
    )]
    fmriprep_layout_df['subject'] = fmriprep_layout_df.subject.astype(int)
    fmriprep_layout_df = fmriprep_layout_df[np.in1d(
        fmriprep_layout_df.suffix, ['bold', 'regressors', 'mask'])]
    fmriprep_layout_df = fmriprep_layout_df[np.in1d(
        fmriprep_layout_df.extension, ['nii.gz', 'tsv'])]
    fmriprep_layout_df['space'] = fmriprep_layout_df.path.apply(
        lambda path: reg.match(path).group(1) if reg.match(path) else None)
    fmriprep_layout_df = fmriprep_layout_df.set_index(
        ['subject', 'run', 'suffix', 'space'])

    events_df = source_layout.to_df()
    events_df = events_df[events_df.suffix == 'events']
    events_df['subject'] = events_df['subject'].astype(int)
    events_df = events_df.set_index(['subject', 'run'])

    tr = source_layout.get_tr(bold[0].path)

    imgs = []
    confounds = []
    events = []

    for b in bold:
        run = b.entities['run']

        confounds_ = fmriprep_layout_df.loc[(
            subject, run, 'regressors'), 'path'].iloc[0]
        confounds_ = pd.read_csv(confounds_, sep='\t')

        events_ = events_df.loc[(subject, run), 'path']
        events_ = pd.read_csv(events_, sep='\t')
        events_['trial_type'] = events_['trial_type'].apply(
            lambda x: 'stim2' if x.startswith('stim2') else x)

        imgs.append(b.path)
        confounds.append(confounds_[to_include].fillna(method='bfill'))
        events.append(events_)

    mask = fmriprep_layout.get(subject=subject,
                               run=1,
                               suffix='mask',
                               extension='nii.gz',
                               description='brain')
    mask = [e for e in mask if 'MNI' in e.filename]
    assert(len(mask) == 1)
    mask = mask[0].path

    model = FirstLevelModel(tr, drift_model=None, n_jobs=5, smoothing_fwhm=6.0)
    model.fit(imgs, events, confounds, )

    base_dir = op.join(derivatives, 'glm', f'sub-{subject}', 'func')

    if not op.exists(base_dir):
        os.makedirs(base_dir)

    contrasts = [('left-right', 'leftright'), ('error', 'error'),
            ('stim1-10 + stim1-14 + stim1-20 + stim1-28 + stim1-5 + stim1-7', 'stim1'),
            ('stim2', 'stim2')]

    for contrast, label in contrasts:
        con = model.compute_contrast(contrast, output_type='z_score')
        pe = model.compute_contrast(contrast, output_type='effect_size')

        con.to_filename(op.join(base_dir, f'sub-{subject}_contrast-{label}_zmap.nii.gz'))
        pe.to_filename(op.join(base_dir, f'sub-{subject}_contrast-{label}_pe.nii.gz'))
Ejemplo n.º 10
0
def main():

    # Init
    parser = get_parser()
    args = parser.parse_args()

    warnings.filterwarnings("ignore", category=FutureWarning)
    if args.api_key:
        client = flywheel.Client(args.api_key)
    else:
        client = flywheel.Client()
    assert client, "Your Flywheel CLI credentials aren't set!"

    if args.verbose:
        logger.info("Scanning BIDS directory for data...")

    local_bids = BIDSLayout(args.directory)
    local_bids_df = local_bids.to_df()
    local_t1s = (local_bids_df.loc[(local_bids_df['suffix'] == 'T1w')
                                   & (local_bids_df['extension'] == 'nii.gz')])

    assert local_t1s.shape[0] >= 1, logger.error(
        "No T1w files found in dataset!")

    if args.verbose:
        logger.info("Found the following subjects:\n{}".format(' '.join(
            local_t1s.subject.values)))
    if args.verbose:
        logger.info("Finding matching subjects on Flywheel...")

    for i, row in local_t1s.iterrows():

        print("\n")

        row_processed = False

        flywheel_bids = FlyBIDSLayout(args.project,
                                      str('sub-' + row['subject']))
        flywheel_bids_df = flywheel_bids.to_df()
        flywheel_t1s = (flywheel_bids_df.loc[
            (flywheel_bids_df['Filename'].str.contains('T1w'))
            & (flywheel_bids_df['Filename'].str.contains('.nii.gz'))])

        target_filename = Path(row['path']).name

        mask = flywheel_t1s['Filename'].str.contains(target_filename)

        file_exists = mask.any()

        if file_exists:

            if args.verbose:
                logger.info(
                    "Matching file {} found on Flywheel. Refacing local T1w..."
                    .format(row['path']))

            path_to_t1 = Path(row['path']).resolve().parent
            input_filename = Path(row['path']).resolve().name

            refaced_file, status = run_afni(str(path_to_t1),
                                            str(input_filename),
                                            args.rec,
                                            dry_run=args.dry_run,
                                            verbose=args.verbose)

            if args.verbose:
                logger.info("Replacing T1w with newly refaced version...")
            row_processed = replace_t1w(client,
                                        row,
                                        flywheel_t1s[mask],
                                        args.rec,
                                        dry_run=args.dry_run,
                                        verbose=args.verbose,
                                        delete=args.delete)

        if not row_processed:

            logger.error("Subject {} not processed!".format(row['subject']))
Suppose we were interested in getting a list of tasks included in the dataset.

layout.get_task()

We can query all of the files associated with this task.

layout.get(task='localizer', suffix='bold', scope='raw')[:10]

Notice that there are nifti and event files. We can get the filename for the first particant's functional run

f = layout.get(task='localizer')[0].filename
f

If you want a summary of all the files in your BIDSLayout, but don't want to have to iterate BIDSFile objects and extract their entities, you can get a nice bird's-eye view of your dataset using the `to_df()` method.

layout.to_df()

## Loading Data with Nibabel
Neuroimaging data is often stored in the format of nifti files `.nii` which can also be compressed using gzip `.nii.gz`.  These files store both 3D and 4D data and also contain structured metadata in the image **header**.

There is an very nice tool to access nifti data stored on your file system in python called [nibabel](http://nipy.org/nibabel/).  If you don't already have nibabel installed on your computer it is easy via `pip`. First, tell the jupyter cell that you would like to access the unix system outside of the notebook and then install nibabel using pip `!pip install nibabel`. You only need to run this once (unless you would like to update the version).

nibabel objects can be initialized by simply pointing to a nifti file even if it is compressed through gzip.  First, we will import the nibabel module as `nib` (short and sweet so that we don't have to type so much when using the tool).  I'm also including a path to where the data file is located so that I don't have to constantly type this.  It is easy to change this on your own computer.

We will be loading an anatomical image from subject S01 from the localizer [dataset](http://brainomics.cea.fr/localizer/).  See this [paper](https://bmcneurosci.biomedcentral.com/articles/10.1186/1471-2202-8-91) for more information about this dataset.

We will use pybids to grab subject S01's T1 image.

import nibabel as nib
Ejemplo n.º 12
0
def main(subject,
         sourcedata,
         derivatives):
    source_layout = BIDSLayout(sourcedata, validate=False, derivatives=False)
    fmriprep_layout = BIDSLayout(
        op.join(derivatives, 'fmriprep'), validate=False)

    bold = fmriprep_layout.get(subject=subject,
                               extension='func.gii')
    bold = sorted([e for e in bold if 'fsaverage6' in e.filename],
                  key=lambda x: x.run)

    reg = re.compile('.*_space-(?P<space>.+)_desc.*')

    fmriprep_layout_df = fmriprep_layout.to_df()
    fmriprep_layout_df = fmriprep_layout_df[~fmriprep_layout_df.subject.isnull(
    )]
    fmriprep_layout_df['subject'] = fmriprep_layout_df.subject.astype(int)
    fmriprep_layout_df = fmriprep_layout_df[np.in1d(
        fmriprep_layout_df.suffix, ['regressors'])]
    fmriprep_layout_df = fmriprep_layout_df[np.in1d(
        fmriprep_layout_df.extension, ['tsv'])]
    fmriprep_layout_df = fmriprep_layout_df.set_index(
        ['subject', 'run'])

    events_df = source_layout.to_df()
    events_df = events_df[events_df.suffix == 'events']
    events_df['subject'] = events_df['subject'].astype(int)
    events_df = events_df.set_index(['subject', 'run'])

    tr = source_layout.get_tr(bold[0].path)

    base_dir = op.join(derivatives, 'glm_stim1_trialwise_surf', f'sub-{subject}', 'func')

    if not op.exists(base_dir):
        os.makedirs(base_dir)

    for b in bold:
        run = b.entities['run']
        hemi = b.entities['suffix']
    #     print(run)

        confounds_ = fmriprep_layout_df.loc[(
            subject, run), 'path'].iloc[0]
        confounds_ = pd.read_csv(confounds_, sep='\t')
        confounds_ = confounds_[to_include].fillna(method='bfill')

        events_ = events_df.loc[(subject, run), 'path']
        events_ = pd.read_csv(events_, sep='\t')
        events_['trial_type'] = events_['trial_type'].apply(
            lambda x: 'stim2' if x.startswith('stim2') else x)

        events_['onset'] += tr

        # Split up over trials
        stim1_events = events_[events_.trial_type.apply(lambda x: x.startswith('stim1'))]
        def number_trials(d):
            return pd.Series(['{}.{}'.format(e, i+1) for i, e in enumerate(d)],
                             index=d.index)
            
        stim1_events['trial_type'] = stim1_events.groupby('trial_type').trial_type.apply(number_trials)
        events_.loc[stim1_events.index, 'trial_type'] = stim1_events['trial_type']

        frametimes = np.arange(0, tr*len(confounds_), tr)

        pca = PCA(n_components=7)
        confounds_ -= confounds_.mean(0)
        confounds_ /= confounds_.std(0)
        confounds_pca = pca.fit_transform(confounds_[to_include])

        X = make_first_level_design_matrix(frametimes,
                                           events_,
                                           add_regs=confounds_pca.values)

        Y = surface.load_surf_data(b.path).T
        Y = (Y / Y.mean(0) * 100)
        Y -= Y.mean(0)

        fit = run_glm(Y, X, noise_model='ols')
        r = fit[1][0.0]
        betas = pd.DataFrame(r.theta, index=X.columns)

        stim1 = []

        for stim in 5, 7, 10, 14, 20, 28:
            for trial in range(1, 7):
                stim1.append(betas.loc[f'stim1-{stim}.{trial}'])

        result = pd.concat(stim1, 1).T

        pes = nb.gifti.GiftiImage(header=nb.load(b.path).header,
                                  darrays=[nb.gifti.GiftiDataArray(result)])

        pes.to_filename(
            op.join(base_dir,
                f'sub-{subject}_run-{run}_desc-stims1_hemi-{hemi}.pe.gii'))