Beispiel #1
0
def run_meta_analyses(database, features, use_features=None, outdir=None):
    """
    Runs NS-style meta-analysis based on `database` and `features`

    Parameters
    ----------
    database, features : str or os.PathLike
        Path to NS-style database.txt and features.txt files
    use_features : list, optional
        List of features on which to run NS meta-analyses; if not supplied all
        terms in `features` will be used
    outdir : str or os.PathLike
        Path to output directory where derived files should be saved

    Returns
    -------
    generated : list of str
        List of filepaths to generated term meta-analysis directories
    """

    # check outdir
    if outdir is None:
        outdir = NSDIR
    outdir = Path(outdir)

    # make database and load feature names; annoyingly slow
    dataset = ns.Dataset(str(database))
    dataset.add_features(str(features))
    features = set(dataset.get_feature_names())

    # if we only want a subset of the features take the set intersection
    if use_features is not None:
        features = set(features) & set(use_features)
    pad = max([len(f) for f in features])

    generated = []
    for word in sorted(features):
        msg = f'Running meta-analysis for term: {word:<{pad}}'
        print(msg, end='\r', flush=True)

        # run meta-analysis + save specified outputs (only if they don't exist)
        path = outdir / word.replace(' ', '_')
        path.mkdir(exist_ok=True)
        if not all((path / f'{f}.nii.gz').exists() for f in IMAGES):
            ma = ns.MetaAnalysis(dataset, dataset.get_studies(features=word))
            ma.save_results(path, image_list=IMAGES)

        # project data to fsaverage5 surface and save mgh for each hemisphere
        for hemi in ['lh', 'rh']:
            fname = path / 'association-test_z.nii.gz'
            outname = path / f'{hemi}.association-test_z.mgh'
            run(VOL2SURF.format(fname, outname, hemi), quiet=True)

        # store MA path
        generated.append(path)

    print(' ' * len(msg) + '\b' * len(msg), end='', flush=True)

    return generated
Beispiel #2
0
def create_surface_grf(noise=None,
                       alpha=3.0,
                       normalize=True,
                       seed=None,
                       medial_val=None):
    """
    Generates GRF on surface (fsaverage5)

    Uses gaussian_random_field() and mri_vol2surf to generate GRF

    Parameters
    ----------
    noise : (x, y, z) array_like, optional
        Noise array to which gaussian smoothing is added. If not provided an
        array will be created by drawing from the standard normal distribution.
        Default: None
    alpha : float (positive), optional
        Power (exponent) of the power-law distribution. Default: 3.0
    normalize : bool, optional
        Normalize the returned field to unit variance. Default: True
    seed : None, int, default_rng, optional
        Random state to seed GRF. Default: None

    Returns
    -------
    data : (20484,) np.ndarray
        Surface representation of GRF
    """

    affine = np.eye(4) * 2
    affine[:, -1] = [-90, -90, -72, 1]

    gfield = gaussian_random_field(91,
                                   109,
                                   91,
                                   noise=noise,
                                   alpha=alpha,
                                   normalize=normalize,
                                   seed=seed)
    fn = make_tmpname(suffix='.nii.gz')
    nib.save(nib.nifti1.Nifti1Image(gfield, affine), fn)

    data = np.zeros((20484, ))
    for n, hemi in enumerate(('lh', 'rh')):
        outname = make_tmpname(suffix='.mgh')
        run(VOL2SURF.format(fn, outname, hemi), quiet=True)
        sl = slice(len(data) // 2 * n, len(data) // 2 * (n + 1))
        data[sl] = nib.load(outname).get_fdata().squeeze()
        os.remove(outname)

    os.remove(fn)

    if medial_val is not None:
        data = _mod_medial(_mod_medial(data, True), False, medial_val)

    return data
Beispiel #3
0
    fname.parent.mkdir(parents=True, exist_ok=True)
    data.to_csv(fname, sep=',')

    return fname


if __name__ == "__main__":
    stds = nndata.fetch_hcp_standards(data_dir=ROIDIR)
    fsaverage = nndata.fetch_fsaverage('fsaverage5', data_dir=ROIDIR)['sphere']

    # separate cifti into hemispheres (and convert to gifti)
    cifti = HCPDIR / 'S1200.MyelinMap_BC_MSMAll.32k_fs_LR.dscalar.nii'
    lhout = HCPDIR / 'S1200.L.MyelinMap_BC_MSMSAll.32k_fs_LR.func.gii'
    rhout = HCPDIR / 'S1200.R.MyelinMap_BC_MSMSAll.32k_fs_LR.func.gii'
    run(CIFTISEP.format(cifti=cifti, lhout=lhout, rhout=rhout), quiet=True)

    # for each hemisphere, resample to FreeSurfer fsaverage5 space and convert
    # the resulting GII file to MGH (for consistency with NeuroSynth data)
    for gii, hemi, surf in zip((lhout, rhout), ('L', 'R'), fsaverage):
        out = HCPDIR / f'fsaverage5.MyelinMap.{hemi}.10k_fsavg_{hemi}.func.gii'
        mgh = HCPDIR / f'{hemi.lower()}h.myelin.mgh'
        run(HCP2FS.format(gii=gii, path=stds, hemi=hemi, out=out), quiet=True)
        run(GIITOMGH.format(gii=out, surf=surf, out=mgh), quiet=True)

        # remove intermediate file
        if out.exists():
            out.unlink()

    # remove intermediate files
    for fn in [lhout, rhout]:
def combine_cammoun_500(lhannot,
                        rhannot,
                        subject_id,
                        annot=None,
                        subjects_dir=None,
                        use_cache=True,
                        quiet=False):
    """
    Combines finest parcellation from Cammoun et al., 2012 for `subject_id`

    The parcellations from Cammoun et al., 2012 have five distinct scales; the
    highest resolution parcellation (scale 500) is split into three GCS files
    for historical FreeSurfer purposes. This is a bit annoying for calculating
    statistics, plotting, etc., so this function can be run once all the GCS
    files have been used to produce annotations files for `subject_id` (using
    :py:func:`netneurotools.freesurfer.apply_prob_atlas`). This function will
    combine the three .annot files that correspond to the highest resolution
    into a single .annot file for a given subject

    Parameters
    ----------
    {lh,rh}files : (3,) list of str
        List of filepaths to {left, right} hemisphere annotation files for
        Cammoun et al., 2012 scale500 parcellation
    subject_id : str
        FreeSurfer subject ID
    annot : str, optional
        Path to output annotation file to generate. If set to None, the name is
        created from the provided `?hannot` files. If provided as a relative
        path, it is assumed to stem from `subjects_dir`/`subject_id`. Default:
        None
    subjects_dir : str, optional
        Path to FreeSurfer subject directory. If not set, will inherit from
        the environmental variable `$SUBJECTS_DIR`. Default: None
    use_cache : bool, optional
        Whether to check for existence of relevant statistics file in directory
        specified by `{subjects_dir}/{subject_id}/stats' and use, if it exists.
        If False, will create a new stats file. Default: True
    quiet : bool, optional
        Whether to restrict status messages. Default: False

    Returns
    -------
    cammoun500 : list
        List of created annotation files
    """
    from netneurotools.utils import check_fs_subjid, run

    tolabel = 'mri_annotation2label --subject {subject_id} --hemi {hemi} ' \
              '--outdir {label_dir} --annotation {annot} --sd {subjects_dir}'
    toannot = 'mris_label2annot --sd {subjects_dir} --s {subject_id} ' \
              '--ldir {label_dir} --hemi {hemi} --annot-path {annot} ' \
              '--ctab {ctab} {label}'

    subject_id, subjects_dir = check_fs_subjid(subject_id, subjects_dir)

    created = []
    for hemi, annotfiles in zip(['lh', 'rh'], [lhannot, rhannot]):
        # generate output name based on hemisphere
        out = annot.format(hemi[0].upper())
        if not out.startswith(os.path.abspath(os.sep)):
            out = os.path.join(subjects_dir, subject_id, 'label', out)

        if os.path.isfile(out) and use_cache:
            created.append(out)
            continue

        # make directory to temporarily store labels
        label_dir = os.path.join(subjects_dir, subject_id,
                                 '{}.cammoun500.labels'.format(hemi))
        os.makedirs(label_dir, exist_ok=True)

        ctab = pd.DataFrame(columns=range(5))
        for fn in annotfiles:
            run(tolabel.format(subject_id=subject_id,
                               hemi=hemi,
                               label_dir=label_dir,
                               annot=fn,
                               subjects_dir=subjects_dir),
                quiet=quiet)

            # save ctab information from annotation file
            vtx, ct, names = nib.freesurfer.read_annot(fn)
            data = np.column_stack([[f.decode() for f in names], ct[:, :-1]])
            ctab = ctab.append(pd.DataFrame(data), ignore_index=True)

        # get rid of duplicate entries and add back in unknown/corpuscallosum
        ctab = ctab.drop_duplicates(subset=[0], keep=False)
        add_back = pd.DataFrame(
            [['unknown', 25, 5, 25, 0], ['corpuscallosum', 120, 70, 50, 0]],
            index=[0, 4])
        ctab = ctab.append(add_back).sort_index().reset_index(drop=True)
        # save ctab to temporary file for creation of annotation file
        ctab_fname = os.path.join(label_dir, '{}.cammoun500.ctab'.format(hemi))
        ctab.to_csv(ctab_fname, header=False, sep='\t', index=True)

        # get all labels EXCEPT FOR UNKNOWN to combine into annotation
        # unknown will be regenerated as all the unmapped vertices
        label = ' '.join([
            '--l {}'.format(
                os.path.join(label_dir, '{hemi}.{lab}.label'.format(hemi=hemi,
                                                                    lab=lab)))
            for lab in ctab.iloc[1:, 0]
        ])
        # combine labels into annotation file
        run(toannot.format(subjects_dir=subjects_dir,
                           subject_id=subject_id,
                           label_dir=label_dir,
                           hemi=hemi,
                           ctab=ctab_fname,
                           annot=out,
                           label=label),
            quiet=quiet)
        created.append(out)

        # remove temporary label directory
        shutil.rmtree(label_dir)

    return created
Beispiel #5
0
    #####
    # this should work now!
    annotations = datasets.fetch_cammoun2012('fsaverage')

    # map (via surf2surf) fsaverage to fsaverage5/6 so we can provide those
    for trg in ['fsaverage5', 'fsaverage6']:
        for scale, (lh, rh) in annotations.items():
            for annot, hemi in [(lh, 'lh'), (rh, 'rh')]:
                tval = annot.replace('space-fsaverage', 'space-{}'.format(trg))
                tval = tval.replace('/fsaverage/', '/{}/'.format(trg))
                msg = f'Generating annotation file: {tval}'
                print(msg, end='\r', flush=True)

                run(SURFCMD.format(trgsubject=trg,
                                   annot=annot,
                                   tval=tval,
                                   hemi=hemi),
                    quiet=True)

    print(' ' * len(msg) + '\b' * len(msg), end='', flush=True)

    hcp = datasets.fetch_hcp_standards()
    fsaverage = datasets.fetch_fsaverage()
    for scale, (lh, rh) in annotations.items():
        for annot, hemi in [(lh, 'lh'), (rh, 'rh')]:
            outdir = op.join(op.dirname(op.dirname(annot)), 'fslr32k')
            gii = annot.replace('.annot', '.label.gii')
            white = fsaverage['white'][0 if hemi == 'lh' else 1]
            fname = op.basename(gii).replace('fsaverage', 'fslr32k')
            msg = f'Generating fslr32k file: {fname}'
            print(msg, end='\r', flush=True)
Beispiel #6
0
def warp_to_subject(infile, warpfile, outfile=None, template=None,
                    interpolation='NearestNeighbor', verbose=False,
                    use_cache=True):
    """
    Applys `warpfile` to `infile`

    Just a wrapper around antsApplyTransforms to be called from within Python;
    literally opens a subprocess to run the command

    Parameters
    ----------
    infile : str
        Image to be warped
    warpfile : str
        ANTs-generated warp file to apply to `infile`
    outfile : str, optional
        Name of output file. If not specified, will be determined from provided
        `warpfile` and `infile`. Default: None
    template : str, optional
        Reference image that specifies shape, dimensions, etc to be generated
        when appling `warpfile` to `infile`. If not specified the
        defaults embedded in `warpfile` will be used. Default: None
    interpolation : str, optional
        Type of interpolation to use during warping. Default: 'NearestNeighbor'
    verbose : bool, optional
        Whether to print status messages as transformation is applied. Default:
        False
    use_cache : bool, optional
        Whether to check for existence of `outfile` and use that, if it exists.
        If False, will create a new `outfile` regardless of existence. Default:
        True

    Returns
    -------
    outfile : str
        Path to warped parcellation
    """

    warpcmd = 'antsApplyTransforms -d 3 -e {imagetype} {opts} -v {verbose} ' \
              '-n {interpolation} -i {input} -o {output} -t {warpfile}'

    # scalar or timeseries, depending on the dimensionality of image (3 vs 4)
    imagetype = [0, 3][nib.load(infile).ndim > 3]

    opts = ''
    if template is not None:
        opts += f'-r {template}'

    if outfile is None:
        outfile = op.join(op.dirname(warpfile), op.basename(infile))

    if not op.isfile(outfile) or not use_cache:
        utils.run(warpcmd.format(imagetype=imagetype,
                                 opts=opts,
                                 verbose=int(verbose),
                                 interpolation=interpolation,
                                 input=infile,
                                 output=outfile,
                                 warpfile=warpfile),
                  quiet=not verbose)

    return outfile
Beispiel #7
0
            for i in idx:
                inds = np.insert(inds, i, i)
            names = [n.encode() for n in np.array(names)[inds]]
            ctab = ctab[inds]
            src, tar = np.array(inds), np.arange(len(names))
            sidx = src.argsort()
            src, tar = src[sidx], tar[sidx]
            labels = tar[np.searchsorted(src, labels)]
            nib.freesurfer.write_annot(annot, labels, ctab, names)

    #####
    # this should work now!
    annotations = datasets.fetch_cammoun2012('surface')

    # map (via surf2surf) fsaverage to fsaverage5/6 so we can provide those
    for trg in ['fsaverage5', 'fsaverage6']:
        for scale, (lh, rh) in annotations.items():
            for annot, hemi in [(lh, 'lh'), (rh, 'rh')]:
                tval = annot.replace('space-fsaverage', 'space-{}'.format(trg))

                msg = f'Generating annotation file: {tval}'
                print(msg, end='\r', flush=True)

                run(cmd.format(trgsubject=trg,
                               annot=annot,
                               tval=tval,
                               hemi=hemi),
                    quiet=True)

    print(' ' * len(msg) + '\b' * len(msg), end='', flush=True)