def fix_annot_names(subject, parc, clean_subject=None, clean_parc=None, hemi="both", subjects_dir=None):
    """Fix for Freesurfer's mri_surf2surf corrupting label names in annot files

    Notes
    -----
    Requires nibabel > 1.3.0 for annot file I/O
    """
    # process args
    subjects_dir = get_subjects_dir(subjects_dir)
    if clean_subject is None:
        clean_subject = subject
    if clean_parc is None:
        clean_parc = parc

    fpaths, hemis = _get_annot_fname(None, subject, hemi, parc, subjects_dir)
    clean_fpaths, _ = _get_annot_fname(None, clean_subject, hemi, clean_parc, subjects_dir)

    for fpath, clean_fpath, hemi in izip(fpaths, clean_fpaths, hemis):
        labels, ctab, names = read_annot(fpath)
        _, _, clean_names = read_annot(clean_fpath)
        if all(n == nc for n, nc in izip(names, clean_names)):
            continue

        if len(clean_names) != len(names):
            err = "Different names in %s annot files: %s vs. " "%s" % (hemi, str(names), str(clean_names))
            raise ValueError(err)

        for clean_name, name in izip(clean_names, names):
            if not name.startswith(clean_name):
                err = "%s does not start with %s" % (str(name), clean_name)
                raise ValueError(err)

        write_annot(fpath, labels, ctab, clean_names)
Beispiel #2
0
def parcels_to_vertices(data, *, lhannot, rhannot, drop=None):
    """
    Projects parcellated `data` to vertices defined in annotation files

    Assigns np.nan to 'unknown' and 'corpuscallosum' vertices in annotation
    files.

    Parameters
    ----------
    data : (N,) numpy.ndarray
        Parcellated data to be projected to vertices. Parcels should be ordered
        by [left, right] hemisphere; ordering within hemisphere should
        correspond to the provided annotation files.
    {lh,rh}annot : str
        Path to .annot file containing labels of parcels on the {left,right}
        hemisphere. These must be specified as keyword arguments to avoid
        accidental order switching.
    drop : list, optional
        Specifies regions in {lh,rh}annot that are not present in `data`. NaNs
        will be inserted in place of the these regions in the returned data. If
        not specified, 'unknown' and 'corpuscallosum' are assumed to not be
        present. Default: None

    Reurns
    ------
    projected : numpy.ndarray
        Vertex-level data
    """

    if drop is None:
        drop = ['unknown', 'corpuscallosum']
    drop = _decode_list(drop)

    start = end = 0
    projected = []

    # check this so we're not unduly surprised by anything...
    expected = sum([len(read_annot(a)[-1]) - 2 for a in [lhannot, rhannot]])
    if expected != len(data):
        raise ValueError('Number of parcels in provided annotation files '
                         'differs from size of parcellated data array.\n'
                         '    EXPECTED: {} parcels\n'
                         '    RECEIVED: {} parcels'.format(
                             expected, len(data)))

    for annot in [lhannot, rhannot]:
        # read files and update end index for `data`
        labels, ctab, names = read_annot(annot)
        names = _decode_list(names)
        end += len(names) - 2  # unknown and corpuscallosum

        # get indices of unknown and corpuscallosum and insert NaN values
        inds = [names.index(f) - n for n, f in enumerate(drop)]
        currdata = np.insert(data[start:end], inds, np.nan)

        # project to vertices and store
        projected.append(currdata[labels])
        start = end

    return np.hstack(projected)
Beispiel #3
0
def annot_legend(lh, rh, *args, **kwargs):
    """Plot a legend for a freesurfer parcellation

    Parameters
    ----------
    lh : str
        Path to the lh annot-file.
    rh : str
        Path to the rh annot-file.
    labels : dict (optional)
        Alternative (text) label for (brain) labels.
    h : 'auto' | scalar
        Height of the figure in inches. If 'auto' (default), the height is
        automatically increased to fit all labels.

    Returns
    -------
    legend : :class:`~eelbrain.plot.ColorList`
        Figure with legend for the parcellation.

    Notes
    -----
    Instead of :func:`~eelbrain.plot.brain.annot_legend` it is usually
    easier to use::

    >>> brain = plot.brain.annoot(annot, ...)
    >>> legend = brain.plot_legend()

    See Also
    --------
    eelbrain.plot.brain.annot : plot the parcellation on a brain model
    """
    _, lh_colors, lh_names = read_annot(lh)
    _, rh_colors, rh_names = read_annot(rh)
    lh_colors = dict(zip(lh_names, lh_colors[:, :4] / 255.))
    rh_colors = dict(zip(rh_names, rh_colors[:, :4] / 255.))
    names = set(lh_names)
    names.update(rh_names)
    colors = {}
    seq = []  # sequential order in legend
    seq_lh = []
    seq_rh = []
    for name in names:
        if name in lh_colors and name in rh_colors:
            if np.array_equal(lh_colors[name], rh_colors[name]):
                colors[name] = lh_colors[name]
                seq.append(name)
            else:
                colors[name + '-lh'] = lh_colors[name]
                colors[name + '-rh'] = rh_colors[name]
                seq_lh.append(name + '-lh')
                seq_rh.append(name + '-rh')
        elif name in lh_colors:
            colors[name + '-lh'] = lh_colors[name]
            seq_lh.append(name + '-lh')
        else:
            colors[name + '-rh'] = rh_colors[name]
            seq_rh.append(name + '-rh')
    return ColorList(colors, seq + seq_lh + seq_rh, *args, **kwargs)
Beispiel #4
0
def annot_legend(lh, rh, *args, **kwargs):
    """Plot a legend for a freesurfer parcellation

    Parameters
    ----------
    lh : str
        Path to the lh annot-file.
    rh : str
        Path to the rh annot-file.
    labels : dict (optional)
        Alternative (text) label for (brain) labels.
    h : 'auto' | scalar
        Height of the figure in inches. If 'auto' (default), the height is
        automatically increased to fit all labels.

    Returns
    -------
    legend : :class:`~eelbrain.plot.ColorList`
        Figure with legend for the parcellation.

    Notes
    -----
    Instead of :func:`~eelbrain.plot.brain.annot_legend` it is usually
    easier to use::

    >>> brain = plot.brain.annoot(annot, ...)
    >>> legend = brain.plot_legend()

    See Also
    --------
    eelbrain.plot.brain.annot : plot the parcellation on a brain model
    """
    _, lh_colors, lh_names = read_annot(lh)
    _, rh_colors, rh_names = read_annot(rh)
    lh_colors = dict(izip(lh_names, lh_colors[:, :4] / 255.))
    rh_colors = dict(izip(rh_names, rh_colors[:, :4] / 255.))
    names = set(lh_names)
    names.update(rh_names)
    colors = {}
    seq = []  # sequential order in legend
    seq_lh = []
    seq_rh = []
    for name in names:
        if name in lh_colors and name in rh_colors:
            if np.array_equal(lh_colors[name], rh_colors[name]):
                colors[name] = lh_colors[name]
                seq.append(name)
            else:
                colors[name + '-lh'] = lh_colors[name]
                colors[name + '-rh'] = rh_colors[name]
                seq_lh.append(name + '-lh')
                seq_rh.append(name + '-rh')
        elif name in lh_colors:
            colors[name + '-lh'] = lh_colors[name]
            seq_lh.append(name + '-lh')
        else:
            colors[name + '-rh'] = rh_colors[name]
            seq_rh.append(name + '-rh')
    return ColorList(colors, seq + seq_lh + seq_rh, *args, **kwargs)
Beispiel #5
0
def project_to_vertices(data, rhannot, lhannot):
    """
    Projects parcellated `data` to vertices defined in annotation files

    Assigns np.nan to 'unknown' and 'corpuscallosum' vertices in annotation
    files.

    Parameters
    ----------
    data : (N,) numpy.ndarray
        Parcellated data to be projected to vertices
    {rh,lh}annot : str
        Path to .annot file containing labels to parcels on the {right,left}
        hemisphere

    Reurns
    ------
    projected : numpy.ndarray
        Vertex-level data
    """

    drop = [b'unknown', b'corpuscallosum']
    start = end = 0
    projected = []

    # check this so we're not unduly surprised by anything...
    expected = sum([len(read_annot(a)[-1]) - 2 for a in [rhannot, lhannot]])
    if expected != len(data):
        raise ValueError('Number of parcels in provided annotation files '
                         'differs from size of parcellated data array.\n'
                         '    EXPECTED: {} parcels\n'
                         '    RECEIVED: {} parcels'.format(
                             expected, len(data)))

    for annot in [rhannot, lhannot]:
        # read files and update end index for `data`
        labels, ctab, names = read_annot(annot)
        end += len(names) - 2  # unknown and corpuscallosum

        # get indices of unknown and corpuscallosum and insert NaN values
        inds = [names.index(f) - n for n, f in enumerate(drop)]
        currdata = np.insert(data[start:end], inds, np.nan)

        # project to vertices and store
        projected.append(currdata[labels])
        start = end

    return np.hstack(projected)
Beispiel #6
0
def test_source_space():
    "Test SourceSpace dimension"
    data_dir = mne.datasets.sample.data_path()
    subjects_dir = os.path.join(data_dir, 'subjects')
    annot_path = os.path.join(subjects_dir, '%s', 'label', '%s.%s.annot')

    for subject in ['fsaverage', 'sample']:
        mne_src = datasets._mne_source_space(subject, 'ico-4', subjects_dir)
        vertno = [mne_src[0]['vertno'], mne_src[1]['vertno']]
        ss = SourceSpace(vertno, subject, 'ico-4', subjects_dir)

        # labels
        for hemi_vertices, hemi in izip(ss.vertno, ('lh', 'rh')):
            labels, _, names = read_annot(annot_path % (subject, hemi, 'aparc'))
            start = 0 if hemi == 'lh' else len(ss.lh_vertno)
            hemi_tag = '-' + hemi
            for i, v in enumerate(hemi_vertices, start):
                label = labels[v]
                if label == -1:
                    eq_(ss.parc[i], 'unknown' + hemi_tag)
                else:
                    eq_(ss.parc[i], names[label] + hemi_tag)

        # connectivity
        conn = ss.connectivity()
        mne_conn = mne.spatial_src_connectivity(mne_src)
        assert_array_equal(conn, _matrix_graph(mne_conn))

        # sub-space connectivity
        sssub = ss[ss.dimindex('superiortemporal-rh')]
        ss2 = SourceSpace(vertno, subject, 'ico-4', subjects_dir, 'aparc')
        ss2sub = ss2[ss2.dimindex('superiortemporal-rh')]
        assert_array_equal(sssub.connectivity(), ss2sub.connectivity())
Beispiel #7
0
    def read_label(self, hemi, parc_type='aparc'):
        """Read the labels (annotations) for each hemisphere.

        Parameters
        ----------
        hemi : str
            'lh' or 'rh'
        parc_type : str
            'aparc', 'aparc.a2009s', 'BA', 'BA.thresh', or 'aparc.DKTatlas40'
            'aparc.DKTatlas40' is only for recent freesurfer versions

        Returns
        -------
        numpy.ndarray
            value at each vertex, indicating the label
        numpy.ndarray
            RGB + alpha colors for each label
        list of str
            names of the labels
        """
        parc_file = self.dir / 'label' / (hemi + '.' + parc_type + '.annot')
        try:
            vert_val, region_color, region_name = read_annot(parc_file)
        except NameError:
            raise ImportError('nibabel needs to be installed for this function')
        region_name = [x.decode('utf-8') for x in region_name]
        return vert_val, region_color, region_name
Beispiel #8
0
def test_source_space():
    "Test SourceSpace dimension"
    data_dir = mne.datasets.sample.data_path()
    subjects_dir = os.path.join(data_dir, 'subjects')
    annot_path = os.path.join(subjects_dir, '%s', 'label', '%s.%s.annot')

    for subject in ['fsaverage', 'sample']:
        mne_src = datasets._mne_source_space(subject, 'ico-4', subjects_dir)
        vertices = [mne_src[0]['vertno'], mne_src[1]['vertno']]
        ss = SourceSpace(vertices, subject, 'ico-4', subjects_dir)

        # labels
        for hemi_vertices, hemi in zip(ss.vertices, ('lh', 'rh')):
            labels, _, names = read_annot(annot_path %
                                          (subject, hemi, 'aparc'))
            start = 0 if hemi == 'lh' else len(ss.lh_vertices)
            hemi_tag = '-' + hemi
            for i, v in enumerate(hemi_vertices, start):
                label = labels[v]
                if label == -1:
                    assert ss.parc[i] == 'unknown' + hemi_tag
                else:
                    assert ss.parc[i] == names[label].decode() + hemi_tag

        # connectivity
        conn = ss.connectivity()
        mne_conn = mne.spatial_src_connectivity(mne_src)
        assert_array_equal(conn, _matrix_graph(mne_conn))

        # sub-space connectivity
        sssub = ss[ss._array_index('superiortemporal-rh')]
        ss2 = SourceSpace(vertices, subject, 'ico-4', subjects_dir, 'aparc')
        ss2sub = ss2[ss2._array_index('superiortemporal-rh')]
        assert_array_equal(sssub.connectivity(), ss2sub.connectivity())
Beispiel #9
0
    def read_label(self, hemi, parc_type='aparc'):
        """Read the labels (annotations) for each hemisphere.

        Parameters
        ----------
        hemi : str
            'lh' or 'rh'
        parc_type : str
            'aparc', 'aparc.a2009s', 'BA', 'BA.thresh', or 'aparc.DKTatlas40'
            'aparc.DKTatlas40' is only for recent freesurfer versions

        Returns
        -------
        numpy.ndarray
            value at each vertex, indicating the label
        numpy.ndarray
            RGB + alpha colors for each label
        list of str
            names of the labels
        """
        parc_file = self.dir / 'label' / (hemi + '.' + parc_type + '.annot')
        try:
            vert_val, region_color, region_name = read_annot(parc_file)
        except NameError:
            raise ImportError('nibabel needs to be installed for this function')
        region_name = [x.decode('utf-8') for x in region_name]
        return vert_val, region_color, region_name
def getannot(annotname):
    #initiate DataFrame
    #may want to make concatenation/join (instead of append) 
    #so can have one column per annotation/set of labels
    annot_df=[]
    for hemi in hemilist:
        annot_data=fs.read_annot(
            '/Applications/freesurfer/'
            'subjects/fsaverage/'
            'label/'+
            hemi + 
            '.' + 
            annotname + 
             '.annot'
        )
        annot_hemi=pd.DataFrame(
            {"annot_label" : annot_data[0],
             "annot_name": annotname, 
             "vertex_index" : range(
                 len(annot_data[0])
             ), 
             "hemi": hemi_key[hemi]})
        annot_df.append(annot_hemi)
    annots=pd.concat(annot_df).set_index(
        ['hemi','vertex_index']
    )
    return annots
Beispiel #11
0
def load_segmentation(filename: str) -> Segmentation:
    """Loads a segmentation from a FreeSurfer annot file.

    Loads the a segmentation from a FreeSurfer annotation file. Because annot
    files do not have a segmentation name, the file name is used instead.

    Args:
        filename: The name of the annot file that is loaded.

    Returns:
        segmentation: A segmentation whose name is `filename`.

    """

    keys, ctab, names = nibfs.read_annot(filename)
    segmentation = Segmentation(os.path.basename(filename), keys)

    def rgbt2rgba(color):
        return color[0], color[1], color[2], 255

    # Add the label names.
    for i, (color, name) in enumerate(zip(ctab, names)):
        segmentation.add_label(i, Label(name.decode(), rgbt2rgba(color[:4])))

    return segmentation
Beispiel #12
0
def extract_time_series(lh_surf,
                        rh_surf,
                        lh_annot,
                        rh_annot,
                        output_dir,
                        prefix,
                        confounds=None,
                        confoundsName="NoConfs"):

    import numpy as np
    from connectivityworkflow.surface_processing import extract_hemisphere_time_series
    from nibabel.freesurfer import read_annot
    from nilearn.surface import load_surf_data
    from nilearn.signal import clean
    import pandas as pd
    import os
    from os.path import join as opj

    lh_mask, _, lh_names = read_annot(lh_annot)
    rh_mask, _, rh_names = read_annot(rh_annot)
    lh_names, rh_names = lh_names[1:], rh_names[1:]
    lh_surf_data = load_surf_data(lh_surf)
    rh_surf_data = load_surf_data(rh_surf)

    time_series_lh = extract_hemisphere_time_series(lh_surf_data, lh_mask)
    time_series_rh = extract_hemisphere_time_series(rh_surf_data, rh_mask)

    time_series = np.concatenate((time_series_lh, time_series_rh)).T
    if isinstance(confounds, np.ndarray):
        time_series = clean(time_series, confounds=confounds)
    to_delete = np.where(time_series.sum(axis=0) == 0)
    print("To delete: {}".format(to_delete))
    time_series = np.delete(time_series, to_delete, axis=1)
    names = [n for i, n in enumerate(lh_names) if i not in to_delete[0]]
    #Saving data
    time_seriesDF = pd.DataFrame(time_series, columns=names)
    #Name of the OutPutFile
    directory = opj(output_dir, "time_series")
    if not os.path.exists(directory):
        try:
            os.makedirs(directory)
        except Exception:
            print("exception makedirs")
    outFile = os.path.join(directory,
                           prefix + confoundsName + "TimeSeriesRoI.tsv")
    time_seriesDF.to_csv(outFile, sep="\t", index=False)
    return time_series, names, confoundsName
Beispiel #13
0
def plot_surf(stc,
              clim,
              colormap,
              time_label,
              surf,
              transparent,
              offscreen,
              subjects_dir=None,
              blnMaskUnknown=False):
    if offscreen:
        # create offscreen figure so that I can use the computer while it's saving
        from surfer.viz import _make_viewer
        figure = _make_viewer(None, 1, 2, stc.subject, (800, 1600), True)[0][0]
    else:
        figure = None

    # This raises a vtk error that has something to do with smoothing_steps=None
    # (any time smoothing steps is big enough to cover all of the vertices)
    # but it still displays the correct figure.
    # can't catch the error since it's in c, printing to console.
    brain = stc.plot(surface=surf,
                     hemi='split',
                     views='medial',
                     clim=clim,
                     colormap=colormap,
                     transparent=transparent,
                     time_unit='s',
                     time_label=time_label,
                     size=[1600, 800],
                     figure=figure,
                     smoothing_steps=None,
                     subjects_dir=subjects_dir)

    if blnMaskUnknown:
        subjects_dir = mne.utils.get_subjects_dir(subjects_dir=subjects_dir,
                                                  raise_error=True)

        for hemi in ['lh', 'rh']:
            aparc_file = op.join(subjects_dir, stc.subject, "label",
                                 '{}.aparc.annot'.format(hemi))
            labels, _, names = fs.read_annot(aparc_file)
            masked_region_inds = np.arange(len(names))[np.in1d(
                names, ['corpuscallosum', 'unknown'])]
            masked_region_inds = np.append(masked_region_inds, -1)  # unlabeled
            mask = np.in1d(labels, masked_region_inds)

            brain.add_data(mask,
                           hemi=hemi,
                           min=0,
                           max=1,
                           thresh=0.5,
                           colormap='gray',
                           colorbar=False,
                           alpha=0.99)

    return brain
Beispiel #14
0
def loadAnnotation(pathToAnnotation):
    '''
    This loads an annotation file. Format is:
    [0]    - vector of values (len#verteces)
    [1]    - coordinates of annotations... (not sure)
    [2]    - list of names for annotations
    '''
    annot = nfs.read_annot(pathToAnnotation)

    return annot
Beispiel #15
0
def annot_legend(lh, rh, *args, **kwargs):
    """Plot a legend for a freesurfer parcellation

    Parameters
    ----------
    lh : str
        Path to the lh annot-file.
    rh : str
        Path to the rh annot-file.

    Returns
    -------
    legend : plot.ColorList
        ColorList figure with legend for the parcellation.
    """
    _, lh_colors, lh_names = read_annot(lh)
    _, rh_colors, rh_names = read_annot(rh)
    lh_colors = dict(izip(lh_names, lh_colors[:, :4] / 255.0))
    rh_colors = dict(izip(rh_names, rh_colors[:, :4] / 255.0))
    names = set(lh_names)
    names.update(rh_names)
    colors = {}
    seq = []  # sequential order in legend
    seq_lh = []
    seq_rh = []
    for name in names:
        if name in lh_colors and name in rh_colors:
            if np.array_equal(lh_colors[name], rh_colors[name]):
                colors[name] = lh_colors[name]
                seq.append(name)
            else:
                colors[name + "-lh"] = lh_colors[name]
                colors[name + "-rh"] = rh_colors[name]
                seq_lh.append(name + "-lh")
                seq_rh.append(name + "-rh")
        elif name in lh_colors:
            colors[name + "-lh"] = lh_colors[name]
            seq_lh.append(name + "-lh")
        else:
            colors[name + "-rh"] = rh_colors[name]
            seq_rh.append(name + "-rh")
    return ColorList(colors, seq + seq_lh + seq_rh, *args, **kwargs)
Beispiel #16
0
def loadVertexDataFile(infile):
    """Loads the given Freesurfer vertex data, label, or annotation file.

    This function return different things depending on what ``infile`` is:

     - If ``infile`` is a vertex data file, a ``(nvertices,)`` array is
       returned, containing one value for each vertex in the mesh.

     - If ``infile`` is a ``mgh``/``mgz`` file, the image data is returned
       as-is, with dimensions of length 1 squeezed out (under the assumption
       that the image contains scalar vertex data).

     - If ``infile`` is a vertex label file, a tuple containing the following
       is returned:

       - a ``(n,)`` array, containing the indices of all vertices that are
         specified in the file.

       - a ``(n,)`` array, containing scalar value for each vertex

     - If ``infile`` is a vertex annotation file, a tuple containing the
       following is returned:

       - a ``(n,)`` array  containing the indices of all ``n`` vertices that
         are specified in the file.

       - a ``(l, 5)`` array containing the RGBA colour, and the label value,
         for every label that is specified in the file.

       - A list of length ``l``, containing the names of every label that is
         specified in the file.

    """

    if isVertexDataFile(infile):
        return nibfs.read_morph_data(infile)

    elif isVertexLabelFile(infile):
        return nibfs.read_label(infile, read_scalars=True)

    elif isVertexAnnotFile(infile):

        # nibabel 2.2.1 is broken w.r.t. .annot files.
        # raise ValueError('.annot files are not yet supported')

        labels, lut, names = nibfs.read_annot(infile, orig_ids=False)
        return labels, lut, names

    elif isVertexMGHFile(infile):
        return fslmgh.MGHImage(infile)[:].squeeze()

    else:
        raise ValueError('Unrecognised freesurfer '
                         'file type: {}'.format(infile))
Beispiel #17
0
def fix_annot_names(subject,
                    parc,
                    clean_subject=None,
                    clean_parc=None,
                    hemi='both',
                    subjects_dir=None):
    """Fix for Freesurfer's mri_surf2surf corrupting label names in annot files

    Notes
    -----
    Requires nibabel > 1.3.0 for annot file I/O
    """
    # process args
    subjects_dir = get_subjects_dir(subjects_dir)
    if clean_subject is None:
        clean_subject = subject
    if clean_parc is None:
        clean_parc = parc

    fpaths, hemis = _get_annot_fname(None, subject, hemi, parc, subjects_dir)
    clean_fpaths, _ = _get_annot_fname(None, clean_subject, hemi, clean_parc,
                                       subjects_dir)

    for fpath, clean_fpath, hemi in zip(fpaths, clean_fpaths, hemis):
        labels, ctab, names = read_annot(fpath)
        _, _, clean_names = read_annot(clean_fpath)
        if all(n == nc for n, nc in zip(names, clean_names)):
            continue

        if len(clean_names) != len(names):
            err = ("Different names in %s annot files: %s vs. "
                   "%s" % (hemi, str(names), str(clean_names)))
            raise ValueError(err)

        for clean_name, name in zip(clean_names, names):
            if not name.startswith(clean_name):
                err = "%s does not start with %s" % (str(name), clean_name)
                raise ValueError(err)

        write_annot(fpath, labels, ctab, clean_names)
Beispiel #18
0
    def load(self, meshfile, inflatedmeshpath=None, annotfile=None):
        """ Load a FreeSurfer surface.

        Parameters
        ----------
        meshfile: str (mandatory)
            the location of the file containing the FreeSurfer mesh to be
            loaded.
        inflatedmeshpath: str (optional, default None)
            the location of the file containing the FreeSurfer inflated mesh
            to be loaded.
        annotfile: str (optional, default None)
            the location of the file containing the FreeSurfer annotations to
            be loaded.

        Returns
        -------
        surf: TriSurface
            a triangular surface representation.
        """
        vertices, triangles = freesurfer.read_geometry(meshfile)
        if inflatedmeshpath is not None:
            inflated_vertices, _triangles = freesurfer.read_geometry(
                inflatedmeshpath)
            if not numpy.allclose(triangles, _triangles):
                raise ValueError("'{0}' and '{1}' do not represent the same "
                                 "surface.".format(meshfile, inflatedmeshpath))
        else:
            inflated_vertices = None
        if annotfile is not None:
            labels, ctab, regions = freesurfer.read_annot(annotfile,
                                                          orig_ids=False)
            meta = dict((index, {
                "region": item[0],
                "color": item[1][:4].tolist()
            }) for index, item in enumerate(zip(regions, ctab)))
        else:
            labels = None
            meta = None

        return TriSurface(vertices=vertices,
                          triangles=triangles,
                          labels=labels,
                          metadata=meta,
                          inflated_vertices=inflated_vertices)
Beispiel #19
0
def plot_fragment(frag_file, filename, fpath_sphere, surface):
    """
    Plot fragmented region on specific surface
    """

    # Load labels of new fragmented annotation
    labels, _, _ = freesurfer.read_annot(frag_file)

    # Specify on which surface to plot
    surf_type = fpath_sphere.replace('.sphere', '.%s' % surface)

    # Plot fragmented region on surface with colored annotation
    plot_surf_roi(surf_type,
                  roi_map=labels,
                  hemi='left',
                  view='lateral',
                  cmap='Spectral',
                  output_file=filename)
Beispiel #20
0
    def load_data(self, filename):
        """ Load the data from a surface scalar file

        Parameters
        ----------
        filename: str
            Pathstr to a surface scalar file

        Returns
        -------
        self: a Surface obejct
        """

        if filename.endswith(('.curv', '.sulc', '.volume', '.thickness', '.area')):
            data = np.expand_dims(freesurfer.read_morph_data(filename), axis=-1)

        elif filename.endswith(('.shape.gii', '.func.gii')):
            data = np.expand_dims(nib.load(filename).darrays[0].data, axis=-1)

        elif filename.endswith(('.mgz', '.mgh')):
            data = nib.load(filename).get_data()
            data = data.reshape((data.shape[0], data.shape[-1]))

        elif filename.endswith(('.dscalar.nii', '.dseries.nii')):
            data = nib.load(filename).get_data()
            data = data.T

        elif filename.endswith('.label.gii'):
            data = np.expand_dims(nib.load(filename).darrays[0].data, axis=-1)

        elif filename.endswith('.dlabel.nii'):
            data = nib.load(filename).get_data().T

        elif filename.endswith('.label'):
            data = np.expand_dims(freesurfer.read_label(filename), axis=-1)

        elif filename.endswith('.annot'):
            data, _, _ = freesurfer.read_annot(filename)

        else:
            suffix = os.path.split(filename)[1].split('.')[-1]
            raise ImageFileError('This file format-{} is not supported at present.'.format(suffix))

        self.data = data
Beispiel #21
0
def label_from_annot(sss, subject, subjects_dir, parc=None, color=(0, 0, 0)):
    """Label for known regions of a source space

    Parameters
    ----------
    sss : mne.SourceSpaces
        Source space.
    subject : str
        MRI-subject.
    subjects_dir : str
        MRI subjects-directory.
    parc : str
        Parcellation name.
    color : matplotlib color
        Label color.

    Returns
    -------
    label : mne.Label
        Label encompassing known regions of ``parc`` in ``sss``.
    """
    fname = SourceSpace._ANNOT_PATH.format(subjects_dir=subjects_dir,
                                           subject=subject,
                                           hemi='%s',
                                           parc=parc)

    # find vertices for each hemisphere
    labels = []
    for hemi, ss in zip(('lh', 'rh'), sss):
        annotation, _, names = read_annot(fname % hemi)
        bad = [-1, names.index(b'unknown')]
        keep = ~np.in1d(annotation[ss['vertno']], bad)
        if np.any(keep):
            label = mne.Label(ss['vertno'][keep], hemi=hemi, color=color)
            labels.append(label)

    # combine hemispheres
    if len(labels) == 2:
        lh, rh = labels
        return lh + rh
    elif len(labels) == 1:
        return labels.pop(0)
    else:
        raise RuntimeError("No vertices left")
Beispiel #22
0
def find_fsaverage_centroids(lhannot, rhannot, surf='sphere'):
    """
    Finds vertices corresponding to centroids of parcels in annotation files

    Note that using any other `surf` besides the default of 'sphere' may result
    in centroids that are not directly within the parcels themselves due to
    sulcal folding patterns.

    Parameters
    ----------
    {lh,rh}annot : str
        Path to .annot file containing labels to parcels on the {left,right}
        hemisphere. Switching the order of inputs (i.e., providing right before
        left) is perfectly reasonable, if desired.
    surf : str, optional
        Surface on which to find parcel centroids. Default: 'sphere'

    Returns
    -------
    centroids : (N, 3) numpy.ndarray
        xyz coordinates of vertices closest to the centroid of each parcel
        defined in `lhannot` and `rhannot`
    hemiid : (N,) numpy.ndarray
        Array denoting hemisphere designation of coordinates in `centroids`,
        where `hemiid=0` denotes the left and `hemiid=1` the right hemisphere
    """

    surfaces = fetch_fsaverage()[surf]

    centroids, hemiid = [], []
    for n, (annot, surf) in enumerate(zip([lhannot, rhannot], surfaces)):
        vertices, faces = read_geometry(surf)
        labels, ctab, names = read_annot(annot)

        for lab in np.unique(labels):
            if b'corpuscallosum' in names[lab] or b'unknown' in names[lab]:
                continue
            coords = np.atleast_2d(vertices[labels == lab].mean(axis=0))
            roi = vertices[np.argmin(cdist(vertices, coords), axis=0)[0]]
            centroids.append(roi)
            hemiid.append(n)

    return np.row_stack(centroids), np.asarray(hemiid)
Beispiel #23
0
def get_roi_mask(roi_idx, fpath_annot):
    """
    Creates a region mask specifying which vertexes to consider
    """

    # Load annotation file
    roi_labels, _, roi_names = freesurfer.read_annot(fpath_annot)

    # Get index of rois to consider
    roi_index = [roi_names.index(np.bytes_(n)) for n in roi_idx]

    # Create vertex mask
    roi_mask = np.isin(roi_labels, np.array(roi_index))

    # Take all regions if none specified
    if roi_mask.sum() == 0:
        roi_mask = np.invert(roi_mask)

    return roi_mask
Beispiel #24
0
    def load(self, meshfile, inflatedmeshpath=None, annotfile=None):
        """ Load a FreeSurfer surface.

        Parameters
        ----------
        meshfile: str (mandatory)
            the location of the file containing the FreeSurfer mesh to be
            loaded.
        inflatedmeshpath: str (optional, default None)
            the location of the file containing the FreeSurfer inflated mesh
            to be loaded.
        annotfile: str (optional, default None)
            the location of the file containing the FreeSurfer annotations to
            be loaded.

        Returns
        -------
        surf: TriSurface
            a triangular surface representation.
        """
        vertices, triangles = freesurfer.read_geometry(meshfile)
        if inflatedmeshpath is not None:
            inflated_vertices, _triangles = freesurfer.read_geometry(
                inflatedmeshpath)
            if not numpy.allclose(triangles, _triangles):
                raise ValueError("'{0}' and '{1}' do not represent the same "
                                 "surface.".format(meshfile, inflatedmeshpath))
        else:
            inflated_vertices = None
        if annotfile is not None:
            labels, ctab, regions = freesurfer.read_annot(
                annotfile, orig_ids=False)
            meta = dict(
                (index, {"region": item[0], "color": item[1][:4].tolist()})
                for index, item in enumerate(zip(regions, ctab)))
        else:
            labels = None
            meta = None

        return TriSurface(vertices=vertices, triangles=triangles,
                          labels=labels, metadata=meta,
                          inflated_vertices=inflated_vertices)
Beispiel #25
0
def label_from_annot(sss, subject, subjects_dir, parc=None, color=(0, 0, 0)):
    """Label for known regions of a source space

    Parameters
    ----------
    sss : mne.SourceSpaces
        Source space.
    subject : str
        MRI-subject.
    subjects_dir : str
        MRI subjects-directory.
    parc : str
        Parcellation name.
    color : matplotlib color
        Label color.

    Returns
    -------
    label : mne.Label
        Label encompassing known regions of ``parc`` in ``sss``.
    """
    fname = SourceSpace._ANNOT_PATH.format(subjects_dir=subjects_dir, subject=subject, hemi='%s', parc=parc)

    # find vertices for each hemisphere
    labels = []
    for hemi, ss in zip(('lh', 'rh'), sss):
        annotation, _, names = read_annot(fname % hemi)
        bad = [-1, names.index(b'unknown')]
        keep = ~np.in1d(annotation[ss['vertno']], bad)
        if np.any(keep):
            label = mne.Label(ss['vertno'][keep], hemi=hemi, color=color)
            labels.append(label)

    # combine hemispheres
    if len(labels) == 2:
        lh, rh = labels
        return lh + rh
    elif len(labels) == 1:
        return labels.pop(0)
    else:
        raise RuntimeError("No vertices left")
Beispiel #26
0
def read_cortex_surface_segmentation(fsdir, physical_to_index, fsconfig,
                                     affine=None):
    """ Read the cortex gyri surface segmentatation of freesurfer.

    Give access to the right and left hemisphere segmentations that can be
    projected on the cortical and inflated cortical surfaces.
    The vertex are expressed in the voxel coordinates.

    Parameters
    ----------
    fsdir: str( mandatory)
        the subject freesurfer segmentation directory.
    physical_to_index: array (mandatory)
        the transformation to project a physical point in an array.
    fsconfig: str (mandatory)
        the freesurfer configuration file.
    affine: array (optional, default None)
        an affine transformation in voxel coordinates that will be applied on
        the output vertex of the cortex surface.

    Returns
    -------
    segmentation: dict
        contain the two hemisphere 'lh' and 'rh' triangular surfaces and
        inflated surfaces represented in a TriSurface structure.
    """
    # Construct the path to the surface segmentation results and associated
    # labels
    meshdir = os.path.join(fsdir, "surf")
    labeldir = os.path.join(fsdir, "label")
    segfile = os.path.join(fsdir, "mri")

    # Get deformation between the ras and ras-tkregister spaces
    asegfile = os.path.join(segfile, "aseg.mgz")
    translation = tkregister_translation(asegfile, fsconfig)

    # Construct the deformation to apply on the cortex mesh
    if affine is None:
        affine = numpy.identity(4)
    deformation = numpy.dot(affine, numpy.dot(physical_to_index, translation))

    # Create an dictionary to contain all the surfaces and labels
    segmentation = {}

    # Select the hemisphere
    for hemi in ["lh", "rh"]:

        # Get annotation id at each vertex (if a vertex does not belong
        # to any label and orig_ids=False, its id will be set to -1) and
        # the names of the labels
        annotfile = os.path.join(labeldir, "{0}.aparc.annot".format(hemi))
        labels, ctab, regions = freesurfer.read_annot(
            annotfile, orig_ids=False)
        meta = dict((index, {"region": item[0], "color": item[1][:4].tolist()})
                    for index, item in enumerate(zip(regions, ctab)))

        # Select the surface type
        hemisegmentation = {}
        for surf in ["white", "inflated"]:

            # Load the mesh: a 2-uplet with vertex (x, y, z) coordinates and
            # mesh triangles
            meshfile = os.path.join(meshdir, "{0}.{1}".format(hemi, surf))
            mesh = freesurfer.read_geometry(meshfile)
            hemisegmentation[surf] = {
                "vertices": apply_affine_on_mesh(mesh[0], deformation),
                "triangles": mesh[1]
            }

        # Save the segmentation result
        segmentation[hemi] = TriSurface(
            vertices=hemisegmentation["white"]["vertices"],
            inflated_vertices=hemisegmentation["inflated"]["vertices"],
            triangles=hemisegmentation["white"]["triangles"],
            labels=labels,
            metadata=meta)

    return segmentation
Beispiel #27
0
def reduce_from_vertices(data, rhannot, lhannot):
    """
    Reduces vertex-level `data` to parcels defined in annotation files

    Takes average of vertices within each parcel, excluding np.nan values
    (i.e., np.nanmean). Assigns np.nan to parcels for which all vertices are
    np.nan.

    Parameters
    ----------
    data : (N,) numpy.ndarray
        Vertex-level data to be reduced to parcels
    {rh,lh}annot : str
        Path to .annot file containing labels to parcels on the {right,left}
        hemisphere

    Reurns
    ------
    reduced : numpy.ndarray
        Parcellated data
    """

    drop = [b'unknown', b'corpuscallosum']
    start = end = 0
    reduced = []

    # check this so we're not unduly surprised by anything...
    expected = sum([len(read_annot(a)[0]) for a in [rhannot, lhannot]])
    if expected != len(data):
        raise ValueError('Number of vertices in provided annotation files '
                         'differs from size of vertex-level data array.\n'
                         '    EXPECTED: {} vertices\n'
                         '    RECEIVED: {} vertices'.format(
                             expected, len(data)))

    for annot in [rhannot, lhannot]:
        # read files and update end index for `data`
        labels, ctab, names = read_annot(annot)
        end += len(labels)

        # get average of vertex-level data within parcels
        # set all NaN values to 0 before calling `_stats` because we are
        # returning sums, so the 0 values won't impact the sums (if we left
        # the NaNs then all parcels with a single NaN value would be NaN)
        currdata = data[start:end].copy()
        isna = np.isnan(currdata)
        currdata[isna] = 0
        counts, sums = _stats(currdata, labels, np.unique(labels))

        # however, we do need to account for the NaN values in the counts
        # so that our means are similar to what we'd get from e.g., np.nanmean
        # here, our "sums" are the counts of NaN values in our parcels
        _, nacounts = _stats(isna, labels, np.unique(labels))
        counts = (np.asanyarray(counts).astype(float) -
                  np.asanyarray(nacounts).astype(float))

        with np.errstate(divide='ignore', invalid='ignore'):
            currdata = sums / counts

        # get indices of unkown and corpuscallosum and delete from parcels
        inds = [names.index(f) for f in drop]
        currdata = np.delete(currdata, inds)

        # store parcellated data
        reduced.append(currdata)
        start = end

    return np.hstack(reduced)
Beispiel #28
0
    data_file = jf['data_file']

# Load DataFrame
df = pd.read_csv(data_file)

# Load random subject
i = np.random.randint(1, 10)
img_dir = os.path.join(dataset_folder, df['folder'].iloc[i], 'mri',
                       'brainmask.mgz')
img = nb.load(img_dir)
print('[  OK  ] Image loaded')

# Load annotations
annot_path = os.path.join(dataset_folder, df['folder'].iloc[i], 'label',
                          'lh.aparc.annot')
labels, ctab, names = nbfs.read_annot(annot_path)
print('[  OK  ] Annotations file loaded')

# Load thickness
tk_path = os.path.join(dataset_folder, df['folder'].iloc[i], 'surf',
                       'lh.thickness')
tk = nbfs.read_morph_data(tk_path)
print('[  OK  ] Thickness file loaded')

#
regions = pd.DataFrame({
    key: val
    for (key, val) in zip(names, np.linspace(-1, 36, 37, dtype=int))
})

# data = {annot[0]: tk}
def map_to_average_brain(coords, left_pial, right_pial, left_sphere,
                         right_sphere):
    """
    Maps a set of Freesurfer surface coordinates in an individual brain to the equivalent coordinates on the average
    brain.

    Method taken from the iElvis project (http://ielvis.pbworks.com), which implemented the same function in MATLAB:
    https://github.com/iELVis/iELVis/blob/master/iELVis_MAIN/iELVis_MATLAB/ELEC_LOC/sub2AvgBrain.m

    :param coords: {np.ndarray} Coordinates in the individual Freesurfer space
    :param subject_surf_dir: {str} Path to the directory containing the subject's surface meshes
    :param avg_surf_dir: {str} Path to the directory containing the fsaverage surface meshes
    :return: {np.ndarray} The matching coordinates in the average brain
    :return: {np.ndarray} The corresponding atlas labels in the average brain
    """
    hemispheres = [
        'left', 'right'
    ]  # For all surfaces, we append the right hemisphere to the left hemisphere
    fsavg_subj_dir = osp.join(
        paths.rhino_root,
        'data',
        'eeg',
        'freesurfer',
        'subjects',
        'fsaverage',
    )
    files = {
        'left_pial': left_pial,
        'right_pial': right_pial,
        'left_sphere': left_sphere,
        'right_sphere': right_sphere,
        'left_avg_sphere': osp.join(fsavg_subj_dir, 'surf', 'lh.sphere.reg'),
        'right_avg_sphere': osp.join(fsavg_subj_dir, 'surf', 'rh.sphere.reg'),
        'left_avg_pial': osp.join(fsavg_subj_dir, 'surf', 'lh.pial'),
        'right_avg_pial': osp.join(fsavg_subj_dir, 'surf', 'rh.pial'),
        'left_avg_annot': osp.join(fsavg_subj_dir, 'label', 'lh.aparc.annot'),
        'right_avg_annot': osp.join(fsavg_subj_dir, 'label', 'rh.aparc.annot')
    }

    # Find vertex indices on subject's pial surface
    pial_verts = [read_geometry(files['%s_pial' % h])[0] for h in hemispheres]

    distances = [dist.cdist(v, coords) for v in pial_verts]

    hemisphere = np.min(distances[0], 0) < np.min(distances[1], 0)
    pial_indices = [np.argmin(d, 0) for d in distances]

    # Take those vertices in sphere.reg
    sphere_verts = [
        read_geometry(files['%s_sphere' % h])[0] for h in hemispheres
    ]

    electrode_sphere_verts = [
        sv[pi] for (sv, pi) in zip(sphere_verts, pial_indices)
    ]

    # Find indices of nearest vertices in fsaverage.?h.sphere.reg
    avg_sphere_verts = [
        read_geometry(files['%s_avg_sphere' % h])[0] for h in hemispheres
    ]

    avg_sphere_indices = [
        np.argmin(dist.cdist(asv, esv), axis=0)
        for (asv, esv) in zip(avg_sphere_verts, electrode_sphere_verts)
    ]
    # Take those vertices on average pial surface
    avg_pial_verts = [
        read_geometry(files['%s_avg_pial' % h])[0] for h in hemispheres
    ]

    avg_pial_inds, _, avg_pial_labels = list(
        zip(*[read_annot(files['%s_avg_annot' % h]) for h in hemispheres]))
    avg_pial_labels = [np.array(x) for x in avg_pial_labels]

    new_pial_verts = np.where(
        hemisphere[:, None],
        *[apv[asi] for apv, asi in zip(avg_pial_verts, avg_sphere_indices)])
    new_pial_labels = np.where(
        hemisphere, *[
            np.array(apl)[(api[asi])] for apl, api, asi in zip(
                avg_pial_labels, avg_pial_inds, avg_sphere_indices)
        ])
    print((new_pial_verts.shape))
    print((new_pial_labels.shape))
    return new_pial_verts, new_pial_labels
Beispiel #30
0
def spin_parcels(*,
                 lhannot,
                 rhannot,
                 version='fsaverage',
                 n_rotate=1000,
                 spins=None,
                 drop=None,
                 verbose=False,
                 **kwargs):
    """
    Rotates parcels in `{lh,rh}annot` and re-assigns based on maximum overlap

    Vertex labels are rotated with :func:`netneurotools.stats.gen_spinsamples`
    and a new label is assigned to each *parcel* based on the region maximally
    overlapping with its boundaries.

    Parameters
    ----------
    {lh,rh}annot : str
        Path to .annot file containing labels to parcels on the {left,right}
        hemisphere
    version : str, optional
        Specifies which version of `fsaverage` provided annotation files
        correspond to. Must be one of {'fsaverage', 'fsaverage3', 'fsaverage4',
        'fsaverage5', 'fsaverage6'}. Default: 'fsaverage'
    n_rotate : int, optional
        Number of rotations to generate. Default: 1000
    spins : array_like, optional
        Pre-computed spins to use instead of generating them on the fly. If not
        provided will use other provided parameters to create them. Default:
        None
    drop : list, optional
        Specifies regions in {lh,rh}annot that are not present in `data`. NaNs
        will be inserted in place of the these regions in the returned data. If
        not specified, parcels defined in `netneurotools.freesurfer.FSIGNORE`
        are assumed to not be present. Default: None
    seed : {int, np.random.RandomState instance, None}, optional
        Seed for random number generation. Default: None
    verbose : bool, optional
        Whether to print occasional status messages. Default: False
    return_cost : bool, optional
        Whether to return cost array (specified as Euclidean distance) for each
        coordinate for each rotation. Default: True
    kwargs : key-value pairs
        Keyword arguments passed to `netneurotools.stats.gen_spinsamples`

    Returns
    -------
    spinsamples : (N, `n_rotate`) numpy.ndarray
        Resampling matrix to use in permuting data parcellated with labels from
        {lh,rh}annot, where `N` is the number of parcels. Indices of -1
        indicate that the parcel was completely encompassed by regions in
        `drop` and should be ignored.
    cost : (N, `n_rotate`,) numpy.ndarray
        Cost (specified as Euclidean distance) of re-assigning each coordinate
        for every rotation in `spinsamples`. Only provided if `return_cost` is
        True.
    """
    def overlap(vals):
        """ Returns most common non-negative value in `vals`; -1 if all neg
        """
        vals = np.asarray(vals)
        vals, counts = np.unique(vals[vals > 0], return_counts=True)
        try:
            return vals[counts.argmax()]
        except ValueError:
            return -1

    if drop is None:
        drop = FSIGNORE
    drop = _decode_list(drop)

    # get vertex-level labels (set drop labels to - values)
    vertices, end = [], 0
    for n, annot in enumerate([lhannot, rhannot]):
        labels, ctab, names = read_annot(annot)
        names = _decode_list(names)
        todrop = set(names) & set(drop)
        inds = [names.index(f) - n for n, f in enumerate(todrop)]
        labs = np.arange(len(names) - len(inds)) + (end - (len(inds) * n))
        insert = np.arange(-1, -(len(inds) + 1), -1)
        vertices.append(np.insert(labs, inds, insert)[labels])
        end += len(names)
    vertices = np.hstack(vertices)
    labels = np.unique(vertices)
    mask = labels > -1

    # get spins + cost (if requested)
    spins, cost = _get_fsaverage_spins(version=version,
                                       spins=spins,
                                       n_rotate=n_rotate,
                                       verbose=verbose,
                                       **kwargs)
    if len(vertices) != len(spins):
        raise ValueError('Provided annotation files have a different '
                         'number of vertices than the specified fsaverage '
                         'surface.\n    ANNOTATION: {} vertices\n     '
                         'FSAVERAGE:  {} vertices'.format(
                             len(vertices), len(spins)))

    # spin and assign regions based on max overlap
    regions = np.zeros((len(labels[mask]), n_rotate), dtype='int32')
    for n in range(n_rotate):
        if verbose:
            msg = f'Calculating parcel overlap: {n:>5}/{n_rotate}'
            print(msg, end='\b' * len(msg), flush=True)
        regions[:, n] = labeled_comprehension(vertices[spins[:, n]], vertices,
                                              labels, overlap, int, -1)[mask]

    if kwargs.get('return_cost'):
        return regions, cost

    return regions
Beispiel #31
0
def vertices_to_parcels(data, *, lhannot, rhannot, drop=None):
    """
    Reduces vertex-level `data` to parcels defined in annotation files

    Takes average of vertices within each parcel, excluding np.nan values
    (i.e., np.nanmean). Assigns np.nan to parcels for which all vertices are
    np.nan.

    Parameters
    ----------
    data : (N,) numpy.ndarray
        Vertex-level data to be reduced to parcels
    {lh,rh}annot : str
        Path to .annot file containing labels to parcels on the {left,right}
        hemisphere
    drop : list, optional
        Specifies regions in {lh,rh}annot that should be removed from the
        parcellated version of `data`. If not specified, vertices corresponding
        to parcels defined in `netneurotools.freesurfer.FSIGNORE` will be
        removed. Default: None

    Reurns
    ------
    reduced : numpy.ndarray
        Parcellated `data`, without regions specified in `drop`
    """

    if drop is None:
        drop = FSIGNORE
    drop = _decode_list(drop)

    data = np.vstack(data)

    n_parc = expected = 0
    for a in [lhannot, rhannot]:
        vn, _, names = read_annot(a)
        expected += len(vn)
        names = _decode_list(names)
        n_parc += len(names) - len(set(drop) & set(names))
    if expected != len(data):
        raise ValueError('Number of vertices in provided annotation files '
                         'differs from size of vertex-level data array.\n'
                         '    EXPECTED: {} vertices\n'
                         '    RECEIVED: {} vertices'.format(
                             expected, len(data)))

    reduced = np.zeros((n_parc, data.shape[-1]), dtype=data.dtype)
    start = end = n_parc = 0
    for annot in [lhannot, rhannot]:
        # read files and update end index for `data`
        labels, ctab, names = read_annot(annot)
        names = _decode_list(names)

        indices = np.unique(labels)
        end += len(labels)

        for idx in range(data.shape[-1]):
            # get average of vertex-level data within parcels
            # set all NaN values to 0 before calling `_stats` because we are
            # returning sums, so the 0 values won't impact the sums (if we left
            # the NaNs then all parcels with even one NaN entry would be NaN)
            currdata = np.squeeze(data[start:end, idx])
            isna = np.isnan(currdata)
            counts, sums = _stats(np.nan_to_num(currdata), labels, indices)

            # however, we do need to account for the NaN values in the counts
            # so that our means are similar to what we'd get from e.g.,
            # np.nanmean here, our "sums" are the counts of NaN values in our
            # parcels
            _, nacounts = _stats(isna, labels, indices)
            counts = (np.asanyarray(counts, dtype=float) -
                      np.asanyarray(nacounts, dtype=float))

            with np.errstate(divide='ignore', invalid='ignore'):
                currdata = sums / counts

            # get indices of unkown and corpuscallosum and delete from parcels
            inds = sorted([names.index(f) for f in set(drop) & set(names)])
            currdata = np.delete(currdata, inds)

            # store parcellated data
            reduced[n_parc:n_parc + len(names) - len(inds), idx] = currdata

        start = end
        n_parc += len(names) - len(inds)

    return np.squeeze(reduced)
Beispiel #32
0
def parcels_to_vertices(data, *, lhannot, rhannot, drop=None):
    """
    Projects parcellated `data` to vertices defined in annotation files

    Assigns np.nan to all ROIs in `drop`

    Parameters
    ----------
    data : (N,) numpy.ndarray
        Parcellated data to be projected to vertices. Parcels should be ordered
        by [left, right] hemisphere; ordering within hemisphere should
        correspond to the provided annotation files.
    {lh,rh}annot : str
        Path to .annot file containing labels of parcels on the {left,right}
        hemisphere. These must be specified as keyword arguments to avoid
        accidental order switching.
    drop : list, optional
        Specifies regions in {lh,rh}annot that are not present in `data`. NaNs
        will be inserted in place of the these regions in the returned data. If
        not specified, parcels defined in `netneurotools.freesurfer.FSIGNORE`
        are assumed to not be present. Default: None

    Reurns
    ------
    projected : numpy.ndarray
        Vertex-level data
    """

    if drop is None:
        drop = FSIGNORE
    drop = _decode_list(drop)

    data = np.vstack(data).astype(float)

    # check this so we're not unduly surprised by anything...
    n_vert = expected = 0
    for a in [lhannot, rhannot]:
        vn, _, names = read_annot(a)
        n_vert += len(vn)
        names = _decode_list(names)
        expected += len(names) - len(set(drop) & set(names))
    if expected != len(data):
        raise ValueError('Number of parcels in provided annotation files '
                         'differs from size of parcellated data array.\n'
                         '    EXPECTED: {} parcels\n'
                         '    RECEIVED: {} parcels'.format(
                             expected, len(data)))

    projected = np.zeros((n_vert, data.shape[-1]), dtype=data.dtype)
    start = end = n_vert = 0
    for annot in [lhannot, rhannot]:
        # read files and update end index for `data`
        labels, ctab, names = read_annot(annot)
        names = _decode_list(names)
        todrop = set(names) & set(drop)
        end += len(names) - len(todrop)  # unknown and corpuscallosum

        # get indices of unknown and corpuscallosum and insert NaN values
        inds = sorted([names.index(f) for f in todrop])
        inds = [f - n for n, f in enumerate(inds)]
        currdata = np.insert(data[start:end], inds, np.nan, axis=0)

        # project to vertices and store
        projected[n_vert:n_vert + len(labels), :] = currdata[labels]
        start = end
        n_vert += len(labels)

    return np.squeeze(projected)
Beispiel #33
0
def find_parcel_centroids(*,
                          lhannot,
                          rhannot,
                          method='surface',
                          version='fsaverage',
                          surf='sphere',
                          drop=None):
    """
    Returns vertex coords corresponding to centroids of parcels in annotations

    Note that using any other `surf` besides the default of 'sphere' may result
    in centroids that are not directly within the parcels themselves due to
    sulcal folding patterns.

    Parameters
    ----------
    {lh,rh}annot : str
        Path to .annot file containing labels of parcels on the {left,right}
        hemisphere. These must be specified as keyword arguments to avoid
        accidental order switching.
    method : {'average', 'surface', 'geodesic'}, optional
        Method for calculation of parcel centroid. See Notes for more
        information. Default: 'surface'
    version : str, optional
        Specifies which version of `fsaverage` provided annotation files
        correspond to. Must be one of {'fsaverage', 'fsaverage3', 'fsaverage4',
        'fsaverage5', 'fsaverage6'}. Default: 'fsaverage'
    surf : str, optional
        Specifies which surface projection of fsaverage to use for finding
        parcel centroids. Default: 'sphere'
    drop : list, optional
        Specifies regions in {lh,rh}annot for which the parcel centroid should
        not be calculated. If not specified, centroids for parcels defined in
        `netneurotools.freesurfer.FSIGNORE` are not calculated. Default: None

    Returns
    -------
    centroids : (N, 3) numpy.ndarray
        xyz coordinates of vertices closest to the centroid of each parcel
        defined in `lhannot` and `rhannot`
    hemiid : (N,) numpy.ndarray
        Array denoting hemisphere designation of coordinates in `centroids`,
        where `hemiid=0` denotes the left and `hemiid=1` the right hemisphere

    Notes
    -----
    The following methods can be used for finding parcel centroids:

    1. ``method='average'``

       Uses the arithmetic mean of the coordinates for the vertices in each
       parcel. Note that in this case the calculated centroids will not act
       actually fall on the surface of `surf`.

    2. ``method='surface'``

       Calculates the 'average' coordinates and then finds the closest vertex
       on `surf`, where closest is defined as the vertex with the minimum
       Euclidean distance.

    3. ``method='geodesic'``

       Uses the coordinates of the vertex with the minimum average geodesic
       distance to all other vertices in the parcel. Note that this is slightly
       more time-consuming than the other two methods, especially for
       high-resolution meshes.
    """

    methods = ['average', 'surface', 'geodesic']
    if method not in methods:
        raise ValueError('Provided method for centroid calculation {} is '
                         'invalid. Must be one of {}'.format(methods, methods))

    if drop is None:
        drop = FSIGNORE
    drop = _decode_list(drop)

    surfaces = fetch_fsaverage(version)[surf]

    centroids, hemiid = [], []
    for n, (annot, surf) in enumerate(zip([lhannot, rhannot], surfaces)):
        vertices, faces = read_geometry(surf)
        labels, ctab, names = read_annot(annot)
        names = _decode_list(names)

        for lab in np.unique(labels):
            if names[lab] in drop:
                continue
            if method in ['average', 'surface']:
                roi = np.atleast_2d(vertices[labels == lab].mean(axis=0))
                if method == 'surface':  # find closest vertex on the sphere
                    roi = vertices[np.argmin(cdist(vertices, roi), axis=0)[0]]
            elif method == 'geodesic':
                inds, = np.where(labels == lab)
                roi = _geodesic_parcel_centroid(vertices, faces, inds)
            centroids.append(roi)
            hemiid.append(n)

    return np.row_stack(centroids), np.asarray(hemiid)
Beispiel #34
0
def display_pits_parcellation(white_file,
                              parcellation_file,
                              labels=None,
                              pits_file=None,
                              parcellation_as_annotation=False,
                              interactive=True,
                              snap=False,
                              animate=False,
                              outdir=None,
                              name="pits_parcellation",
                              actor_ang=(0., 0., 0.)):
    """ Display the pits parcellation.

    The scene supports one feature activated via the keystroke:

    * 'p': Pick the data at the current mouse point. This will pop-up a window
      with information on the current pick (ie. the areal name).

    Parameters
    ----------
    white_file: str
        the white surface that will be displayed.
    parcellation_file: str
        the parcellation texture file.
    labels: dict, default None
        a mapping between an areal number and its name.
    pits_file: str, default None
        if specified the PITS locations.
    parcellation_as_annotation: bool, default False
        if set expect a FreeSurfer annotation file as a parcellation input.
    interactive: bool, default True
        if True display the renderer.
    snap: bool, default False
        if True create a snap of the scene: need a valid outdir.
    animate: bool, default False
        if True create a gif 360 degrees animation of the scene: need a valid
        outdir.
    outdir: str, default None
        an existing directory.
    name: str, default 'pits_parcellation'
        the basename of the generated files.
    actor_ang: 3-uplet, default (0, 0, 0)
        the actors x, y, z position (in degrees).
    """
    # Load the PITS if specified
    if pits_file is not None:
        image = gio.read(pits_file)
        nb_of_surfs = len(image.darrays)
        if nb_of_surfs != 1:
            raise ValueError("'{0}' does not a contain a valid pits "
                             "texture.".format(pits_file))
        pits_texture = image.darrays[0].data
    else:
        pits_texture = None

    # Create an actor for the white matter surface
    ren = pvtk.ren()
    ren.SetBackground(1, 1, 1)
    if white_file.endswith(".gii"):
        image = gio.read(white_file)
        nb_of_surfs = len(image.darrays)
        if nb_of_surfs != 2:
            raise ValueError("'{0}' does not a contain a valid white "
                             "mesh.".format(white_file))
        vertices = image.darrays[0].data
        triangles = image.darrays[1].data
    else:
        _surf = TriSurface.load(white_file)
        vertices = _surf.vertices
        triangles = _surf.triangles
    if parcellation_as_annotation:
        annotations = fio.read_annot(parcellation_file)
        texture, _, labels = annotations
    else:
        image_labels = gio.read(parcellation_file)
        texture = numpy.round(image_labels.darrays[0].data).astype(int)
    wm_surf = TriSurface(vertices, triangles, labels=texture.copy())

    # Four colors theorem to generate the cmap
    import networkx as nx
    import json
    # > define distinct colors
    colors_rgb = [(230, 25, 75), (60, 180, 75), (255, 225, 25), (0, 130, 200),
                  (245, 130, 48), (145, 30, 180),
                  (70, 240, 240), (240, 50, 230), (210, 245, 60),
                  (250, 190, 190), (0, 128, 128), (230, 190, 255),
                  (170, 110, 40), (255, 250, 200), (128, 0, 0),
                  (170, 255, 195), (128, 128, 0), (255, 215, 180), (0, 0, 128),
                  (128, 128, 128), (255, 255, 255)]
    # > create the graph nodes
    graph = nx.Graph()
    unique_labels = numpy.unique(texture)
    graph.add_nodes_from(unique_labels, color=None)
    # > get the cluster centroids & neighboor vertices
    clusters_map = {}
    for label in unique_labels:
        indices = numpy.where(wm_surf.labels == label)[0]
        cluster_triangles = wm_surf.triangles[list(
            numpy.where(numpy.isin(wm_surf.triangles, indices))[0])]
        cluster_indices = cluster_triangles[numpy.where(
            numpy.isin(cluster_triangles, indices, invert=True))]
        neighboors_indices = list(
            set(cluster_indices.astype(int)) - set(indices.astype(int)))
        clusters_map[label] = {
            "vertices": indices.tolist(),
            "neighboors": neighboors_indices
        }
    # > compute the graph edges
    edges = []
    nb_labels = len(unique_labels)
    for ind1 in range(nb_labels):
        for ind2 in range(ind1 + 1, nb_labels):
            label = unique_labels[ind1]
            other_label = unique_labels[ind2]
            if numpy.isin(clusters_map[other_label]["vertices"],
                          clusters_map[label]["neighboors"]).any():
                edges.append([label, other_label])
    graph.add_edges_from(edges)
    # > graph coloring
    colors = nx.algorithms.coloring.greedy_coloring.greedy_color(graph)
    ctab = []
    for label, color_id in colors.items():
        if label < 0:
            continue
        ctab.append(
            list(colors_rgb[color_id % len(colors_rgb)]) + [255., label])
    ctab.append([0., 0., 0., 255., unique_labels.max() + 1])
    ctab = numpy.asarray(ctab)

    # > create the actor
    wm_surf.labels = wm_surf.labels.astype(float)
    if pits_texture is not None:
        wm_surf.labels[numpy.where(pits_texture == 1)] = (unique_labels.max() +
                                                          1)
    wm_surf.labels[numpy.where(wm_surf.labels == -1)] = unique_labels.max() + 1
    actor = pvtk.surface(wm_surf.vertices,
                         wm_surf.triangles,
                         wm_surf.labels,
                         ctab=ctab,
                         opacity=1,
                         set_lut=True)
    actor.label = "white"
    actor.RotateX(actor_ang[0])
    actor.RotateY(actor_ang[1])
    actor.RotateZ(actor_ang[2])
    pvtk.add(ren, actor)

    # Show the renderer
    if interactive:
        pvtk.add(ren, actor)
        pvtk.show(ren, title=name)

    # Create a snap
    if snap:
        if not os.path.isdir(outdir):
            raise ValueError("'{0}' is not a valid directory.".format(outdir))
        pvtk.record(ren, outdir, name, n_frames=1)

    # Create an animation
    if animate:
        if not os.path.isdir(outdir):
            raise ValueError("'{0}' is not a valid directory.".format(outdir))
        pvtk.record(ren,
                    outdir,
                    name,
                    n_frames=36,
                    az_ang=10,
                    animate=True,
                    delay=25)
Beispiel #35
0
def convert_mesh(texture_file,
                 mesh_file,
                 t1_file,
                 outpattern=None,
                 mgz_file=None,
                 freesurfer_conformed=True,
                 freesurfer_native_t1_file=None):
    """ Extract texture coordinates from white matter mesh in physical
    morphological space and put them in NIFTI voxel space.

    Parameters
    ----------
    texture_file: str
        the pits or parcellations '.gii' file. The parcellation can also be
        given as an annation '.annot' file.
    mesh_file: str
        the path to white matter '.gii' mesh file.
    t1_file: str
        the t1 NIFTI file.
    outpattern: str, default None
        if set, save the mesh in native space concatenating this patern with
        'mesh.native.nii.gz'.
    mgz_file: str, default None
        a FreeSurfer '.mgz' file.
    freesurfer_conformed: bool, default True
        if set apply the translation to go from the conformed to native space.
    freesurfer_native_t1_file: str, default None
        if set, consider the input mesh as a FreeSurfer mesh in the conformed
        space, otherwise a morphologist mesh.

    Returns
    -------
    mesh_vertices: ndarray (shape (N,3))
        all mesh vertices in NIFTI voxel space.
    texture_label_indices: list of ndarray (shape (N, 1))
        the texture labels locations that can be applied on the mesh vertices.
    texture_label_names: list of str
        the name of the texture labels.
    """
    # Load pits and mesh file
    if texture_file.endswith(".gii"):
        texture_gii = gio.read(texture_file)
        texture = texture_gii.darrays[0].data
        labels = None
    elif texture_file.endswith(".annot"):
        texture, _, labels = fio.read_annot(texture_file)
    texture = texture.astype(int)
    mesh_gii = gio.read(mesh_file)
    mesh_vertices = mesh_gii.darrays[0].data

    # Get mesh vertices and pits' mask array and check data adequacy
    if mesh_vertices.shape[0] != texture.shape[0]:
        raise ValueError("Texture file and white matter surface file "
                         "should have the same number of vertices.")
    unique_labels = numpy.unique(texture).tolist()
    if 0 in unique_labels:
        unique_labels.remove(0)
    texture_label_indices = []
    texture_label_names = []
    for cnt, label in enumerate(sorted(unique_labels)):
        if labels is not None:
            texture_label_names.append("{0}-{1}".format(label, labels[cnt]))
        else:
            texture_label_names.append("{0}".format(label))
        texture_label_indices.append(numpy.where(texture == label))

    # Load image
    t1im = nibabel.load(t1_file)
    affine = t1im.affine
    shape = t1im.shape

    # Realign the mesh in voxel Nifti space
    # Morphologist mesh
    if freesurfer_native_t1_file is None:
        # > generate affine trf in morphologist coordinates
        morphcoord = numpy.eye(4)
        morphcoord[0, 0] = -1
        morphcoord[1, 1] = 1
        morphcoord[2, 2] = 1
        morphcoord[0, 3] = affine[0, 3]
        morphcoord[1, 3] = -affine[1, 3]
        morphcoord[2, 3] = -affine[2, 3]
        morphaffine = numpy.dot(morphcoord, affine)
        # > deal with axis inversion
        inv_morphaffine = numpy.linalg.inv(morphaffine)
        inv_morphaffine[1, 1] = -inv_morphaffine[1, 1]
        inv_morphaffine[2, 2] = -inv_morphaffine[2, 2]
        inv_morphaffine[1, 3] = shape[1]
        inv_morphaffine[2, 3] = shape[2]
        mesh_vertices = apply_affine_on_mesh(mesh_vertices, inv_morphaffine)
    # FreeSurfer mesh
    else:
        fs_t1_image = nibabel.load(freesurfer_native_t1_file)
        # > FreeSurfer resample the T1 image to 1iso
        freesurfer_to_original_trf = numpy.dot(numpy.linalg.inv(t1im.affine),
                                               fs_t1_image.affine)
        # > Deal with FreeSurfer inner conformed space
        physical_to_index = numpy.linalg.inv(fs_t1_image.get_affine())
        if freesurfer_conformed:
            translation = tkregister_translation(mgz_file)
        else:
            translation = numpy.eye(4)
        conformed_to_native_trf = numpy.dot(physical_to_index, translation)
        conformed_to_original_trf = numpy.dot(freesurfer_to_original_trf,
                                              conformed_to_native_trf)
        mesh_vertices = apply_affine_on_mesh(mesh_vertices,
                                             conformed_to_original_trf)

    # Save the vertices as an image
    if outpattern is not None:
        overlay_file = outpattern + "mesh.native.nii.gz"
        overlay = numpy.zeros(t1im.shape, dtype=numpy.uint)
        indices = numpy.round(mesh_vertices).astype(int).T
        indices[0, numpy.where(indices[0] >= t1im.shape[0])] = 0
        indices[1, numpy.where(indices[1] >= t1im.shape[1])] = 0
        indices[2, numpy.where(indices[2] >= t1im.shape[2])] = 0
        overlay[indices.tolist()] = 1
        overlay_image = nibabel.Nifti1Image(overlay, t1im.affine)
        nibabel.save(overlay_image, overlay_file)

    return mesh_vertices, texture_label_indices, texture_label_names
Beispiel #36
0
    def read_thickness(self, subject_id):
        """
        Extracts thickness information from annot and label files (left and right hemisphere)
        it returns a DataFrame with the regions (columns) and its mean-std values (single row)
        :param subject_id: 'XXX_S_XXXX' folder from dataset (ADNI Structure)
        :return:
            tk_stats: DataFrame with the single observation (
        """
        # Set subject folder
        subject_folder = os.path.join(self.dataset_folder, subject_id)
        print('[  FS  ] Loading subject %s' % subject_folder)

        # Load labels
        labels_id_lh, ctab_lh, labels_lh = nbfs.read_annot(
            os.path.join(subject_folder, 'label', 'lh' + '.aparc.annot'))
        labels_id_lh[labels_id_lh == -1] = 0  # -1 Correction
        thickness_lh = nbfs.read_morph_data(
            os.path.join(subject_folder, 'surf', 'lh' + '.thickness'))

        labels_id_rh, ctab_rh, labels_rh = nbfs.read_annot(
            os.path.join(subject_folder, 'label', 'rh' + '.aparc.annot'))
        labels_id_rh[labels_id_rh == -1] = 0  # -1 Correction
        thickness_rh = nbfs.read_morph_data(
            os.path.join(subject_folder, 'surf', 'rh' + '.thickness'))


        columns = map(lambda x: 'lh_' + x + '_mean', labels_lh) + map(lambda x: 'lh_' + x + '_std', labels_lh) + \
                  map(lambda x: 'rh_' + x + '_mean', labels_rh) + map(lambda x: 'rh_' + x + '_std', labels_rh)
        tk_stats = pd.DataFrame(columns=columns)

        # Left hemisphere
        tk_mean_lh = []
        tk_std_lh = []
        tk_mean_rh = []
        tk_std_rh = []

        for i, label in enumerate(labels_lh):
            if thickness_lh[labels_id_lh == i].any():
                tk_mean_lh.append(
                    np.mean(np.nan_to_num(thickness_lh[labels_id_lh == i])))
                tk_std_lh.append(
                    np.std(np.nan_to_num(thickness_lh[labels_id_lh == i])))
            else:
                tk_mean_lh.append(0)
                tk_std_lh.append(0)

        for i, label in enumerate(labels_lh):
            if thickness_lh[labels_id_lh == i].any():
                tk_mean_rh.append(
                    np.mean(np.nan_to_num(thickness_rh[labels_id_rh == i])))
                tk_std_rh.append(
                    np.std(np.nan_to_num(thickness_rh[labels_id_rh == i])))
            else:
                tk_mean_rh.append(0)
                tk_std_rh.append(0)

        tk_stats.loc[0] = tk_mean_lh + tk_std_lh + tk_mean_rh + tk_std_rh

        morph_data = {
            'labels_id': [labels_id_lh, labels_id_rh],
            'ctab': [ctab_lh, ctab_rh],
            'labels': [labels_lh, labels_rh]
        }

        return tk_stats, morph_data
Beispiel #37
0
def vertices_to_parcels(data, *, lhannot, rhannot, drop=None):
    """
    Reduces vertex-level `data` to parcels defined in annotation files

    Takes average of vertices within each parcel, excluding np.nan values
    (i.e., np.nanmean). Assigns np.nan to parcels for which all vertices are
    np.nan.

    Parameters
    ----------
    data : (N,) numpy.ndarray
        Vertex-level data to be reduced to parcels
    {lh,rh}annot : str
        Path to .annot file containing labels to parcels on the {left,right}
        hemisphere
    drop : list, optional
        Specifies regions in {lh,rh}annot that should be removed from the
        parcellated version of `data`. If not specified, 'unknown' and
        'corpuscallosum' will be removed. Default: None

    Reurns
    ------
    reduced : numpy.ndarray
        Parcellated `data`, without regions specified in `drop`
    """

    if drop is None:
        drop = ['unknown', 'corpuscallosum']
    drop = _decode_list(drop)

    start = end = 0
    reduced = []

    # check this so we're not unduly surprised by anything...
    expected = sum([len(read_annot(a)[0]) for a in [lhannot, rhannot]])
    if expected != len(data):
        raise ValueError('Number of vertices in provided annotation files '
                         'differs from size of vertex-level data array.\n'
                         '    EXPECTED: {} vertices\n'
                         '    RECEIVED: {} vertices'.format(
                             expected, len(data)))

    for annot in [lhannot, rhannot]:
        # read files and update end index for `data`
        labels, ctab, names = read_annot(annot)
        names = _decode_list(names)

        indices = np.unique(labels)
        end += len(labels)

        # get average of vertex-level data within parcels
        # set all NaN values to 0 before calling `_stats` because we are
        # returning sums, so the 0 values won't impact the sums (if we left
        # the NaNs then all parcels with even one NaN entry would be NaN)
        currdata = np.squeeze(data[start:end])
        isna = np.isnan(currdata)
        counts, sums = _stats(np.nan_to_num(currdata), labels, indices)

        # however, we do need to account for the NaN values in the counts
        # so that our means are similar to what we'd get from e.g., np.nanmean
        # here, our "sums" are the counts of NaN values in our parcels
        _, nacounts = _stats(isna, labels, indices)
        counts = (np.asanyarray(counts, dtype=float) -
                  np.asanyarray(nacounts, dtype=float))

        with np.errstate(divide='ignore', invalid='ignore'):
            currdata = sums / counts

        # get indices of unkown and corpuscallosum and delete from parcels
        inds = [names.index(f) for f in drop]
        currdata = np.delete(currdata, inds)

        # store parcellated data
        reduced.append(currdata)
        start = end

    return np.hstack(reduced)
Beispiel #38
0
def find_parcel_centroids(*,
                          lhannot,
                          rhannot,
                          version='fsaverage',
                          surf='sphere',
                          drop=None):
    """
    Returns vertex coords corresponding to centroids of parcels in annotations

    Note that using any other `surf` besides the default of 'sphere' may result
    in centroids that are not directly within the parcels themselves due to
    sulcal folding patterns.

    Parameters
    ----------
    {lh,rh}annot : str
        Path to .annot file containing labels of parcels on the {left,right}
        hemisphere. These must be specified as keyword arguments to avoid
        accidental order switching.
    version : str, optional
        Specifies which version of `fsaverage` provided annotation files
        correspond to. Must be one of {'fsaverage', 'fsaverage3', 'fsaverage4',
        'fsaverage5', 'fsaverage6'}. Default: 'fsaverage'
    surf : str, optional
        Specifies which surface projection of fsaverage to use for finding
        parcel centroids. Default: 'sphere'
    drop : list, optional
        Specifies regions in {lh,rh}annot for which the parcel centroid should
        not be calculated. If not specified, centroids for 'unknown' and
        'corpuscallosum' are not calculated. Default: None

    Returns
    -------
    centroids : (N, 3) numpy.ndarray
        xyz coordinates of vertices closest to the centroid of each parcel
        defined in `lhannot` and `rhannot`
    hemiid : (N,) numpy.ndarray
        Array denoting hemisphere designation of coordinates in `centroids`,
        where `hemiid=0` denotes the right and `hemiid=1` the left hemisphere
    """

    if drop is None:
        drop = ['unknown', 'corpuscallosum']
    drop = _decode_list(drop)

    surfaces = fetch_fsaverage(version)[surf]

    centroids, hemiid = [], []
    for n, (annot, surf) in enumerate(zip([lhannot, rhannot], surfaces)):
        vertices, faces = read_geometry(surf)
        labels, ctab, names = read_annot(annot)
        names = _decode_list(names)

        for lab in np.unique(labels):
            if names[lab] in drop:
                continue
            coords = np.atleast_2d(vertices[labels == lab].mean(axis=0))
            roi = vertices[np.argmin(cdist(vertices, coords), axis=0)[0]]
            centroids.append(roi)
            hemiid.append(n)

    return np.row_stack(centroids), np.asarray(hemiid)