Example #1
0
def test_datasets_basic(tmpdir):
    """Test simple dataset functions."""
    # XXX 'hf_sef' and 'misc' do not conform to these standards
    for dname in ('sample', 'somato', 'spm_face', 'testing', 'opm',
                  'bst_raw', 'bst_auditory', 'bst_resting', 'multimodal',
                  'bst_phantom_ctf', 'bst_phantom_elekta', 'kiloword',
                  'mtrf', 'phantom_4dbti',
                  'visual_92_categories', 'fieldtrip_cmc'):
        if dname.startswith('bst'):
            dataset = getattr(datasets.brainstorm, dname)
            check_name = 'brainstorm.%s' % (dname,)
        else:
            dataset = getattr(datasets, dname)
            check_name = dname
        if dataset.data_path(download=False) != '':
            assert isinstance(dataset.get_version(), str)
            assert datasets.utils.has_dataset(check_name)
        else:
            assert dataset.get_version() is None
            assert not datasets.utils.has_dataset(check_name)
        print('%s: %s' % (dname, datasets.utils.has_dataset(check_name)))
    tempdir = str(tmpdir)
    # don't let it read from the config file to get the directory,
    # force it to look for the default
    with modified_env(**{'_MNE_FAKE_HOME_DIR': tempdir, 'SUBJECTS_DIR': None}):
        assert (datasets.utils._get_path(None, 'foo', 'bar') ==
                op.join(tempdir, 'mne_data'))
        assert get_subjects_dir(None) is None
        _set_montage_coreg_path()
        sd = get_subjects_dir()
        assert sd.endswith('MNE-fsaverage-data')
Example #2
0
def test_datasets_basic(tmpdir):
    """Test simple dataset functions."""
    # XXX 'hf_sef' and 'misc' do not conform to these standards
    for dname in ('sample', 'somato', 'spm_face', 'testing', 'opm',
                  'bst_raw', 'bst_auditory', 'bst_resting', 'multimodal',
                  'bst_phantom_ctf', 'bst_phantom_elekta', 'kiloword',
                  'mtrf', 'phantom_4dbti',
                  'visual_92_categories', 'fieldtrip_cmc'):
        if dname.startswith('bst'):
            dataset = getattr(datasets.brainstorm, dname)
            check_name = 'brainstorm.%s' % (dname,)
        else:
            dataset = getattr(datasets, dname)
            check_name = dname
        if dataset.data_path(download=False) != '':
            assert isinstance(dataset.get_version(), str)
            assert datasets.utils.has_dataset(check_name)
        else:
            assert dataset.get_version() is None
            assert not datasets.utils.has_dataset(check_name)
        print('%s: %s' % (dname, datasets.utils.has_dataset(check_name)))
    tempdir = str(tmpdir)
    # don't let it read from the config file to get the directory,
    # force it to look for the default
    with modified_env(**{'_MNE_FAKE_HOME_DIR': tempdir, 'SUBJECTS_DIR': None}):
        assert (datasets.utils._get_path(None, 'foo', 'bar') ==
                op.join(tempdir, 'mne_data'))
        assert get_subjects_dir(None) is None
        _set_montage_coreg_path()
        sd = get_subjects_dir()
        assert sd.endswith('MNE-fsaverage-data')
Example #3
0
def test_get_subjects_dir(monkeypatch, tmpdir):
    """Test get_subjects_dir()."""
    # String
    subjects_dir = '/foo'
    assert get_subjects_dir(subjects_dir) == subjects_dir

    # Path
    subjects_dir = Path('/foo')
    assert get_subjects_dir(subjects_dir) == str(subjects_dir)

    # `None`
    monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmpdir))
    monkeypatch.delenv('SUBJECTS_DIR', raising=False)
    assert get_subjects_dir() is None
Example #4
0
def dissolve_label(labels, source, targets, subjects_dir=None,
                   hemi='both'):
    """
    Assign every point from source to the target that is closest to it.

    Parameters
    ----------
    labels : list
        List of labels (as returned by mne.read_annot).
    source : str
        Name of the source label (without hemi affix).
    targets : list of str
        List of target label names (without hemi affix).
    subjects_dir : str
        subjects_dir.
    hemi : 'both', 'lh', 'rh'
        Hemisphere(s) for which to dissolve the label.

    Notes
    -----
    Modifies ``labels`` in-place, returns None.
    """
    subjects_dir = get_subjects_dir(subjects_dir)
    subject = labels[0].subject
    if hemi == 'both':
        hemis = ('lh', 'rh')
    elif hemi == 'lh' or hemi == 'rh':
        hemis = (hemi,)
    else:
        raise ValueError("hemi=%r" % hemi)

    idx = {l.name: i for i, l in enumerate(labels)}

    rm = set()
    for hemi in hemis:
        fpath = os.path.join(subjects_dir, subject, 'surf', hemi + '.inflated')
        points, _ = mne.read_surface(fpath)

        src_name = '-'.join((source, hemi))
        src_idx = idx[src_name]
        rm.add(src_idx)
        src_label = labels[src_idx]
        tgt_names = ['-'.join((name, hemi)) for name in targets]
        tgt_idxs = [idx[name] for name in tgt_names]
        tgt_labels = [labels[i] for i in tgt_idxs]
        tgt_points = [points[label.vertices] for label in tgt_labels]

        vert_by_tgt = {i: [] for i in xrange(len(targets))}
        for src_vert in src_label.vertices:
            point = points[src_vert:src_vert + 1]
            dist = [cdist(point, pts).min() for pts in tgt_points]
            tgt = np.argmin(dist)
            vert_by_tgt[tgt].append(src_vert)

        for i, label in enumerate(tgt_labels):
            new_vertices = vert_by_tgt[i]
            label.vertices = np.union1d(label.vertices, new_vertices)

    for i in sorted(rm, reverse=True):
        del labels[i]
Example #5
0
def combine_medial_labels(labels, subject='fsaverage', surf='white',
                          dist_limit=0.02, subjects_dir=None):
    """Combine medial labels."""
    from mne.surface import _compute_nearest
    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
    rrs = dict((hemi, read_surface(op.join(subjects_dir, subject, 'surf',
                                           '%s.%s'
                                           % (hemi, surf)))[0] / 1000.)
               for hemi in ('lh', 'rh'))
    use_labels = list()
    used = np.zeros(len(labels), bool)
    logger.info('Matching medial regions for %s labels on %s %s, d=%0.1f mm'
                % (len(labels), subject, surf, 1000 * dist_limit))
    for li1, l1 in enumerate(labels):
        if used[li1]:
            continue
        used[li1] = True
        use_label = l1.copy()
        rr1 = rrs[l1.hemi][l1.vertices]
        for li2 in np.where(~used)[0]:
            l2 = labels[li2]
            same_name = (l2.name.replace(l2.hemi, '') ==
                         l1.name.replace(l1.hemi, ''))
            if l2.hemi != l1.hemi and same_name:
                rr2 = rrs[l2.hemi][l2.vertices]
                mean_min = np.mean(_compute_nearest(
                    rr1, rr2, return_dists=True)[1])
                if mean_min <= dist_limit:
                    use_label += l2
                    used[li2] = True
                    logger.info('  Matched: ' + l1.name)
        use_labels.append(use_label)
    logger.info('Total %d labels' % (len(use_labels),))
    return use_labels
Example #6
0
def fix_annot_names(subject, parc, clean_subject=None, clean_parc=None, hemi="both", subjects_dir=None):
    """Fix for Freesurfer's mri_surf2surf corrupting label names in annot files

    Notes
    -----
    Requires nibabel > 1.3.0 for annot file I/O
    """
    # process args
    subjects_dir = get_subjects_dir(subjects_dir)
    if clean_subject is None:
        clean_subject = subject
    if clean_parc is None:
        clean_parc = parc

    fpaths, hemis = _get_annot_fname(None, subject, hemi, parc, subjects_dir)
    clean_fpaths, _ = _get_annot_fname(None, clean_subject, hemi, clean_parc, subjects_dir)

    for fpath, clean_fpath, hemi in izip(fpaths, clean_fpaths, hemis):
        labels, ctab, names = read_annot(fpath)
        _, _, clean_names = read_annot(clean_fpath)
        if all(n == nc for n, nc in izip(names, clean_names)):
            continue

        if len(clean_names) != len(names):
            err = "Different names in %s annot files: %s vs. " "%s" % (hemi, str(names), str(clean_names))
            raise ValueError(err)

        for clean_name, name in izip(clean_names, names):
            if not name.startswith(clean_name):
                err = "%s does not start with %s" % (str(name), clean_name)
                raise ValueError(err)

        write_annot(fpath, labels, ctab, clean_names)
def run():
    """Run command."""
    from mne.commands.utils import get_optparser

    parser = get_optparser(__file__)

    subject = os.environ.get('SUBJECT')
    subjects_dir = get_subjects_dir()

    parser.add_option("-s",
                      "--subject",
                      dest="subject",
                      help="Subject name",
                      default=subject)
    parser.add_option("-d",
                      "--subjects-dir",
                      dest="subjects_dir",
                      help="Subjects directory",
                      default=subjects_dir)
    parser.add_option("-m",
                      "--method",
                      dest="method",
                      help=("Method used to generate the BEM model. "
                            "Can be flash or watershed."))

    options, args = parser.parse_args()

    subject = options.subject
    subjects_dir = options.subjects_dir
    method = options.method

    freeview_bem_surfaces(subject, subjects_dir, method)
def get_aligned_artifacts(info=None, trans=None, subject=None, subjects_dir=None,
                          coord_frame='mri', head_surf=None):
    head_mri_t, _ = _get_trans(trans, 'head', 'mri')
    dev_head_t, _ = _get_trans(info['dev_head_t'], 'meg', 'head')
    head_trans = head_mri_t
    mri_trans = Transform('mri', 'mri')

    mri_fiducials = mne.coreg.get_mni_fiducials(subject, subjects_dir)
    fid_loc = _fiducial_coords(mri_fiducials, FIFF.FIFFV_COORD_MRI)
    fid_loc = apply_trans(mri_trans, fid_loc)
    fid_loc = pd.DataFrame(fid_loc, index=[fid["ident"]._name.split("_")[-1] for fid in mri_fiducials],
                           columns=["x", "y", "z"])

    if head_surf is None:
        subject_dir = Path(get_subjects_dir(subjects_dir, raise_error=True)) / subject
        fname = subject_dir / 'bem' / 'sample-head.fif'
        head_surf = read_bem_surfaces(fname)[0]
        head_surf = transform_surface_to(head_surf, coord_frame, [mri_trans, head_trans], copy=True)

    eeg_picks = mne.pick_types(info, meg=False, eeg=True, ref_meg=False)
    eeg_loc = np.array([info['chs'][k]['loc'][:3] for k in eeg_picks])
    eeg_loc = apply_trans(head_trans, eeg_loc)
    eegp_loc = _project_onto_surface(eeg_loc, head_surf, project_rrs=True, return_nn=True)[2]
    eegp_loc = pd.DataFrame(eegp_loc, index=[ch["ch_name"] for ch in info['chs']], columns=["x", "y", "z"])

    return eegp_loc, fid_loc, head_surf
Example #9
0
def dissolve_label(labels, source, targets, subjects_dir=None,
                   hemi='both'):
    """
    Assign every point from source to the target that is closest to it.

    Parameters
    ----------
    labels : list
        List of labels (as returned by mne.read_annot).
    source : str
        Name of the source label (without hemi affix).
    targets : list of str
        List of target label names (without hemi affix).
    subjects_dir : str
        subjects_dir.
    hemi : 'both', 'lh', 'rh'
        Hemisphere(s) for which to dissolve the label.

    Notes
    -----
    Modifies ``labels`` in-place, returns None.
    """
    subjects_dir = get_subjects_dir(subjects_dir)
    subject = labels[0].subject
    if hemi == 'both':
        hemis = ('lh', 'rh')
    elif hemi == 'lh' or hemi == 'rh':
        hemis = (hemi,)
    else:
        raise ValueError("hemi=%r" % hemi)

    idx = {l.name: i for i, l in enumerate(labels)}

    rm = set()
    for hemi in hemis:
        fpath = os.path.join(subjects_dir, subject, 'surf', hemi + '.inflated')
        points, _ = mne.read_surface(fpath)

        src_name = '-'.join((source, hemi))
        src_idx = idx[src_name]
        rm.add(src_idx)
        src_label = labels[src_idx]
        tgt_names = ['-'.join((name, hemi)) for name in targets]
        tgt_idxs = [idx[name] for name in tgt_names]
        tgt_labels = [labels[i] for i in tgt_idxs]
        tgt_points = [points[label.vertices] for label in tgt_labels]

        vert_by_tgt = {i: [] for i in xrange(len(targets))}
        for src_vert in src_label.vertices:
            point = points[src_vert:src_vert + 1]
            dist = [cdist(point, pts).min() for pts in tgt_points]
            tgt = np.argmin(dist)
            vert_by_tgt[tgt].append(src_vert)

        for i, label in enumerate(tgt_labels):
            new_vertices = vert_by_tgt[i]
            label.vertices = np.union1d(label.vertices, new_vertices)

    for i in sorted(rm, reverse=True):
        del labels[i]
Example #10
0
def mri_annotation2label(subject='*',
                         annot='aparc',
                         dest=os.path.join("{sdir}", "label", "{annot}"),
                         hemi='*',
                         subjects_dir=None):
    """
    Calls ``mri_annotation2label`` (`freesurfer wiki
    <http://surfer.nmr.mgh.harvard.edu/fswiki/mri_annotation2label>`_)

    mri_dir : str(path)
        Path containing mri subject directories (freesurfer's ``SUBJECTS_DIR``).

    subject : str
        Name of the subject ('*' uses fnmatch to find folders in ``mri_dir``).

    annot : str
        Name of the annotation file (e.g., ``'aparc'``, ...).

    dest : str(path)
        Destination for the label files. {sdir}" and "{annot}" are filled in
        appropriately.

    hemi : 'lh' | 'rh' | '*'
        Hemisphere to process; '*' proceses both.

    """
    hemis = _fs_hemis(hemi)
    subjects = _fs_subjects(subject)
    subjects_dir = get_subjects_dir(subjects_dir)

    # progress monitor
    i_max = len(subjects) * len(hemis)
    if len(subjects) > 1:
        prog = ui.progress_monitor(i_max, "mri_annotation2label", "")
    else:
        prog = None

    for subject in subjects:
        sdir = os.path.join(subjects_dir, subject)
        outdir = dest.format(sdir=sdir, annot=annot)

        for hemi in hemis:
            if prog:
                prog.message("Processing: %s - %s" % (subject, hemi))

            cmd = [
                get_bin("freesurfer", "mri_annotation2label"),
                '--annotation',
                annot,
                '--subject',
                subject,
                '--hemi',
                hemi,
                '--outdir',
                outdir,
            ]

            _run(cmd, cwd=subjects_dir)
            if prog:
                prog.advance()
def freeview_bem_surfaces(subject, subjects_dir, method):
    """View 3-Layers BEM model with Freeview.

    Parameters
    ----------
    subject : string
        Subject name
    subjects_dir : string
        Directory containing subjects data (Freesurfer SUBJECTS_DIR)
    method : string
        Can be 'flash' or 'watershed'.
    """
    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)

    if subject is None:
        raise ValueError("subject argument is None.")

    subject_dir = op.join(subjects_dir, subject)

    if not op.isdir(subject_dir):
        raise ValueError("Wrong path: '{}'. Check subjects-dir or"
                         "subject argument.".format(subject_dir))

    env = os.environ.copy()
    env['SUBJECT'] = subject
    env['SUBJECTS_DIR'] = subjects_dir

    if 'FREESURFER_HOME' not in env:
        raise RuntimeError('The FreeSurfer environment needs to be set up.')

    mri_dir = op.join(subject_dir, 'mri')
    bem_dir = op.join(subject_dir, 'bem')
    mri = op.join(mri_dir, 'T1.mgz')

    if method == 'watershed':
        bem_dir = op.join(bem_dir, 'watershed')
        outer_skin = op.join(bem_dir, '%s_outer_skin_surface' % subject)
        outer_skull = op.join(bem_dir, '%s_outer_skull_surface' % subject)
        inner_skull = op.join(bem_dir, '%s_inner_skull_surface' % subject)
    else:
        if method == 'flash':
            bem_dir = op.join(bem_dir, 'flash')
        outer_skin = op.join(bem_dir, 'outer_skin.surf')
        outer_skull = op.join(bem_dir, 'outer_skull.surf')
        inner_skull = op.join(bem_dir, 'inner_skull.surf')

    # put together the command
    cmd = ['freeview']
    cmd += ["--volume", mri]
    cmd += ["--surface", "%s:color=red:edgecolor=red" % inner_skull]
    cmd += ["--surface", "%s:color=yellow:edgecolor=yellow" % outer_skull]
    cmd += [
        "--surface",
        "%s:color=255,170,127:edgecolor=255,170,127" % outer_skin
    ]

    run_subprocess(cmd, env=env, stdout=sys.stdout)
    print("[done]")
def freeview_bem_surfaces(subject, subjects_dir, method):
    """View 3-Layers BEM model with Freeview.

    Parameters
    ----------
    subject : string
        Subject name
    subjects_dir : string
        Directory containing subjects data (Freesurfer SUBJECTS_DIR)
    method : string
        Can be 'flash' or 'watershed'.
    """
    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)

    if subject is None:
        raise ValueError("subject argument is None.")

    subject_dir = op.join(subjects_dir, subject)

    if not op.isdir(subject_dir):
        raise ValueError("Wrong path: '{}'. Check subjects-dir or"
                         "subject argument.".format(subject_dir))

    env = os.environ.copy()
    env['SUBJECT'] = subject
    env['SUBJECTS_DIR'] = subjects_dir

    if 'FREESURFER_HOME' not in env:
        raise RuntimeError('The FreeSurfer environment needs to be set up.')

    mri_dir = op.join(subject_dir, 'mri')
    bem_dir = op.join(subject_dir, 'bem')
    mri = op.join(mri_dir, 'T1.mgz')

    if method == 'watershed':
        bem_dir = op.join(bem_dir, 'watershed')
        outer_skin = op.join(bem_dir, '%s_outer_skin_surface' % subject)
        outer_skull = op.join(bem_dir, '%s_outer_skull_surface' % subject)
        inner_skull = op.join(bem_dir, '%s_inner_skull_surface' % subject)
    else:
        if method == 'flash':
            bem_dir = op.join(bem_dir, 'flash')
        outer_skin = op.join(bem_dir, 'outer_skin.surf')
        outer_skull = op.join(bem_dir, 'outer_skull.surf')
        inner_skull = op.join(bem_dir, 'inner_skull.surf')

    # put together the command
    cmd = ['freeview']
    cmd += ["--volume", mri]
    cmd += ["--surface", "%s:color=red:edgecolor=red" % inner_skull]
    cmd += ["--surface", "%s:color=yellow:edgecolor=yellow" % outer_skull]
    cmd += ["--surface",
            "%s:color=255,170,127:edgecolor=255,170,127" % outer_skin]

    run_subprocess(cmd, env=env, stdout=sys.stdout)
    print("[done]")
Example #13
0
def _get_bem_src_trans(p, info, subj, struc):
    subjects_dir = get_subjects_dir(p.subjects_dir, raise_error=True)
    assert isinstance(subjects_dir, str)
    if struc is None:  # spherical case
        bem, src, trans = _spherical_conductor(info, subj, p.src_pos)
        bem_type = 'spherical-model'
    else:
        from mne.transforms import _ensure_trans
        trans = op.join(p.work_dir, subj, p.trans_dir, subj + '-trans.fif')
        if not op.isfile(trans):
            old = trans
            trans = op.join(p.work_dir, subj, p.trans_dir,
                            subj + '-trans_head2mri.txt')
            if not op.isfile(trans):
                raise IOError('Unable to find head<->MRI trans files in:\n'
                              '%s\n%s' % (old, trans))
        trans = read_trans(trans)
        trans = _ensure_trans(trans, 'mri', 'head')
        this_src = _handle_dict(p.src, subj)
        assert isinstance(this_src, str)
        if this_src.startswith('oct'):
            kind = 'oct'
        elif this_src.startswith('vol'):
            kind = 'vol'
        else:
            raise RuntimeError('Unknown source space type %s, must be '
                               'oct or vol' % (this_src, ))
        num = int(this_src.split(kind)[-1].split('-')[-1])
        bem = op.join(subjects_dir, struc, 'bem',
                      '%s-%s-bem-sol.fif' % (struc, p.bem_type))
        for mid in ('', '-'):
            src_space_file = op.join(
                subjects_dir, struc, 'bem',
                '%s-%s%s%s-src.fif' % (struc, kind, mid, num))
            if op.isfile(src_space_file):
                break
        else:  # if neither exists, use last filename
            print('    Creating %s%s source space for %s...' %
                  (kind, num, subj))
            if kind == 'oct':
                src = setup_source_space(struc,
                                         spacing='%s%s' % (kind, num),
                                         subjects_dir=p.subjects_dir,
                                         n_jobs=p.n_jobs)
            else:
                assert kind == 'vol'
                src = setup_volume_source_space(struc,
                                                pos=num,
                                                bem=bem,
                                                subjects_dir=p.subjects_dir)
            write_source_spaces(src_space_file, src)
        src = read_source_spaces(src_space_file)
        bem = read_bem_solution(bem, verbose=False)
        bem_type = ('%s-layer BEM' % len(bem['surfs']))
    return bem, src, trans, bem_type
Example #14
0
def _fs_subjects(arg, exclude=[], subjects_dir=None):
    if '*' in arg:
        subjects_dir = get_subjects_dir(subjects_dir)
        subjects = fnmatch.filter(os.listdir(subjects_dir), arg)
        subjects = filter(os.path.isdir, subjects)
        for subject in exclude:
            if subject in subjects:
                subjects.remove(subject)
    else:
        subjects = [arg]
    return subjects
Example #15
0
def test_get_subjects_dir(tmp_path, monkeypatch):
    """Test get_subjects_dir()."""
    subjects_dir = tmp_path / 'foo'
    subjects_dir.mkdir()

    # String
    assert get_subjects_dir(str(subjects_dir)) == str(subjects_dir)

    # Path
    assert get_subjects_dir(subjects_dir) == str(subjects_dir)

    # `None`
    monkeypatch.setenv('_MNE_FAKE_HOME_DIR', str(tmp_path))
    monkeypatch.delenv('SUBJECTS_DIR', raising=False)
    assert get_subjects_dir() is None

    # Expand `~`
    monkeypatch.setenv('HOME', str(tmp_path))
    monkeypatch.setenv('USERPROFILE', str(tmp_path))  # Windows
    assert get_subjects_dir('~/foo') == str(subjects_dir)
def plot_stc_time_point(stc, subject, limits=[5, 10, 15], time_index=0,
                        surf='inflated', measure='dSPM', subjects_dir=None):
    """Plot a time instant from a SourceEstimate using matplotlib

    The same could be done with mayavi using proper 3D.

    Parameters
    ----------
    stc : instance of SourceEstimate
        The SourceEstimate to plot.
    subject : string
        The subject name (only needed if surf is a string).
    time_index : int
        Time index to plot.
    surf : str, or instance of surfaces
        Surface to use (e.g., 'inflated' or 'white'), or pre-loaded surfaces.
    measure : str
        The label for the colorbar. None turns the colorbar off.
    subjects_dir : str, or None
        Path to the SUBJECTS_DIR. If None, the path is obtained by using
        the environment variable SUBJECTS_DIR.
    """
    subjects_dir = get_subjects_dir(subjects_dir)
    pl.figure(facecolor='k', figsize=(8, 5))
    hemis = ['lh', 'rh']
    if isinstance(surf, str):
        surf = [read_surface(op.join(subjects_dir, subject, 'surf',
                                     '%s.%s' % (h, surf))) for h in hemis]
    my_cmap = mne_analyze_colormap(limits)
    for hi, h in enumerate(hemis):
        coords = surf[hi][0][stc.vertno[hi]]
        if hi == 0:
            vals = stc_all_cluster_vis.lh_data[:, time_index]
        else:
            vals = stc_all_cluster_vis.rh_data[:, time_index]
        ax = pl.subplot(1, 2, 1 - hi, axis_bgcolor='none')
        pl.tick_params(labelbottom='off', labelleft='off')
        flipper = -1 if hi == 1 else 1
        sc = ax.scatter(flipper * coords[:, 1], coords[:, 2], c=vals,
                        vmin=-limits[2], vmax=limits[2], cmap=my_cmap,
                        edgecolors='none', s=5)
        ax.set_aspect('equal')
        pl.axis('off')
    try:
        pl.tight_layout(0)
    except:
        pass
    if measure is not None:
        cax = pl.axes([0.85, 0.15, 0.025, 0.15], axisbg='k')
        cb = pl.colorbar(sc, cax, ticks=[-limits[2], 0, limits[2]])
        cb.set_label(measure, color='w')
    pl.setp(pl.getp(cb.ax, 'yticklabels'), color='w')
    pl.draw()
    pl.show()
Example #17
0
def _fs_subjects(arg, exclude=[], subjects_dir=None):
    if '*' in arg:
        subjects_dir = get_subjects_dir(subjects_dir)
        subjects = fnmatch.filter(os.listdir(subjects_dir), arg)
        subjects = filter(os.path.isdir, subjects)
        for subject in exclude:
            if subject in subjects:
                subjects.remove(subject)
    else:
        subjects = [arg]
    return subjects
Example #18
0
def mri_annotation2label(subject='*', annot='aparc',
                         dest=os.path.join("{sdir}", "label", "{annot}"),
                         hemi='*', subjects_dir=None):
    """
    Calls ``mri_annotation2label`` (`freesurfer wiki
    <http://surfer.nmr.mgh.harvard.edu/fswiki/mri_annotation2label>`_)

    mri_dir : str(path)
        Path containing mri subject directories (freesurfer's ``SUBJECTS_DIR``).

    subject : str
        Name of the subject ('*' uses fnmatch to find folders in ``mri_dir``).

    annot : str
        Name of the annotation file (e.g., ``'aparc'``, ...).

    dest : str(path)
        Destination for the label files. {sdir}" and "{annot}" are filled in
        appropriately.

    hemi : 'lh' | 'rh' | '*'
        Hemisphere to process; '*' proceses both.

    """
    hemis = _fs_hemis(hemi)
    subjects = _fs_subjects(subject)
    subjects_dir = get_subjects_dir(subjects_dir)

    # progress monitor
    i_max = len(subjects) * len(hemis)
    if len(subjects) > 1:
        prog = ui.progress_monitor(i_max, "mri_annotation2label", "")
    else:
        prog = None

    for subject in subjects:
        sdir = os.path.join(subjects_dir, subject)
        outdir = dest.format(sdir=sdir, annot=annot)

        for hemi in hemis:
            if prog:
                prog.message("Processing: %s - %s" % (subject, hemi))

            cmd = [get_bin("freesurfer", "mri_annotation2label"),
                   '--annotation', annot,
                   '--subject', subject,
                   '--hemi', hemi,
                   '--outdir', outdir,
                   ]

            _run(cmd, cwd=subjects_dir)
            if prog:
                prog.advance()
Example #19
0
def get_atlas_roi_mask(stc, roi, atlas='IXI', atlas_subject=None,
                       subjects_dir=None):
    """Get ROI mask for a given subject/atlas.

    Parameters
    ----------
    stc : instance of mne.SourceEstimate or mne.VectorSourceEstimate
        The source estimate.
    roi : str
        The ROI to obtain a mask for.
    atlas : str
        The atlas to use. Must be "IXI" or "LBPA40".
    atlas_subject : str | None
        Atlas subject to process. Must be one of the (unwarped) subjects
        "ANTS3-0Months3T", "ANTS6-0Months3T", or "ANTS12-0Months3T".
        If None, it will be inferred from the number of vertices.

    Returns
    -------
    mask : ndarray, shape (n_vertices,)
        The mask.
    """
    import nibabel as nib
    from mne.utils import _validate_type
    from mne.surface import _compute_nearest
    _validate_type(stc, (VolSourceEstimate, VolVectorSourceEstimate), 'stc')
    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
    if atlas_subject is None:
        atlas_subject = _VERT_COUNT_MAP[len(stc.vertices)]
    fname_src = op.join(subjects_dir, atlas_subject, 'bem', '%s-vol5-src.fif'
                        % (atlas_subject,))
    src = read_source_spaces(fname_src)
    mri = op.join(subjects_dir, atlas_subject, 'mri',
                  '%s_brain_ANTS_%s_atlas.mgz' % (atlas_subject, atlas))
    if not np.in1d(stc.vertices, src[0]['vertno']).all():
        raise RuntimeError('stc does not appear to be created from %s '
                           'volumetric source space' % (atlas_subject,))
    rr = src[0]['rr'][stc.vertices]
    mapping = get_atlas_mapping(atlas)
    vol_id = mapping[roi]
    mgz = nib.load(mri)
    mgz_data = mgz.get_fdata()
    vox_bool = mgz_data == vol_id
    vox_ijk = np.array(np.where(vox_bool)).T
    vox_mri_t = mgz.header.get_vox2ras_tkr()
    vox_mri_t *= np.array([[1e-3, 1e-3, 1e-3, 1]]).T
    rr_voi = apply_trans(vox_mri_t, vox_ijk)
    dists = _compute_nearest(rr_voi, rr, return_dists=True)[1]
    maxdist = np.linalg.norm(vox_mri_t[:3, :3].sum(0) / 2.)
    mask = (dists <= maxdist)
    return mask
Example #20
0
def test_datasets_basic(tmp_path, monkeypatch):
    """Test simple dataset functions."""
    # XXX 'hf_sef' and 'misc' do not conform to these standards
    for dname in ('sample', 'somato', 'spm_face', 'testing', 'opm', 'bst_raw',
                  'bst_auditory', 'bst_resting', 'multimodal',
                  'bst_phantom_ctf', 'bst_phantom_elekta', 'kiloword', 'mtrf',
                  'phantom_4dbti', 'visual_92_categories', 'fieldtrip_cmc'):
        if dname.startswith('bst'):
            dataset = getattr(datasets.brainstorm, dname)
        else:
            dataset = getattr(datasets, dname)
        if str(dataset.data_path(download=False)) != '.':
            assert isinstance(dataset.get_version(), str)
            assert datasets.has_dataset(dname)
        else:
            assert dataset.get_version() is None
            assert not datasets.has_dataset(dname)
        print('%s: %s' % (dname, datasets.has_dataset(dname)))
    tempdir = str(tmp_path)
    # Explicitly test one that isn't preset (given the config)
    monkeypatch.setenv('MNE_DATASETS_SAMPLE_PATH', tempdir)
    dataset = datasets.sample
    assert str(dataset.data_path(download=False)) == '.'
    assert dataset.get_version() != ''
    assert dataset.get_version() is None
    # don't let it read from the config file to get the directory,
    # force it to look for the default
    monkeypatch.setenv('_MNE_FAKE_HOME_DIR', tempdir)
    monkeypatch.delenv('SUBJECTS_DIR', raising=False)
    assert (str(datasets.utils._get_path(None, 'foo', 'bar')) == op.join(
        tempdir, 'mne_data'))
    assert get_subjects_dir(None) is None
    _set_montage_coreg_path()
    sd = get_subjects_dir()
    assert sd.endswith('MNE-fsaverage-data')
    monkeypatch.setenv('MNE_DATA', str(tmp_path / 'foo'))
    with pytest.raises(FileNotFoundError, match='as specified by MNE_DAT'):
        testing.data_path(download=False)
Example #21
0
def convert_montage_to_mri(montage, subject, subjects_dir=None, verbose=None):
    """Convert a montage from scanner RAS (m) to surface RAS (m).

    Parameters
    ----------
    montage : mne.channels.DigMontage
        The montage in the "ras" coordinate frame. Note: modified in place.
    %(subject)s
    %(subjects_dir)s
    %(verbose)s

    Returns
    -------
    ras_mri_t : mne.transforms.Transform
        The transformation matrix from ``'ras'`` (``scanner RAS``) to
        ``'mri'`` (``surface RAS``).
    """
    if not has_nibabel():  # pragma: no cover
        raise ImportError('This function requires nibabel.')
    import nibabel as nib

    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
    T1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
    if not op.isfile(T1_fname):
        raise RuntimeError(f'Freesurfer subject ({subject}) and/or '
                           f'subjects_dir ({subjects_dir}, incorrectly '
                           'formatted, T1.mgz not found')
    T1 = nib.load(T1_fname)

    # transform from "ras" (scanner RAS) to "mri" (Freesurfer surface RAS)
    ras_vox_t = T1.header.get_ras2vox()
    ras_vox_t[:3, :3] *= 1000  # scale from mm to m
    ras_vox_trans = mne.transforms.Transform(fro='ras',
                                             to='mri_voxel',
                                             trans=ras_vox_t)

    vox_mri_t = T1.header.get_vox2ras_tkr()
    vox_mri_t[:3] /= 1000  # scale from mm to m
    vox_mri_trans = mne.transforms.Transform(fro='mri_voxel',
                                             to='mri',
                                             trans=vox_mri_t)
    montage.apply_trans(  # ras->vox + vox->mri = ras->mri
        mne.transforms.combine_transforms(ras_vox_trans,
                                          vox_mri_trans,
                                          fro='ras',
                                          to='mri'))
Example #22
0
def fix_annot_names(subject,
                    parc,
                    clean_subject=None,
                    clean_parc=None,
                    hemi='both',
                    subjects_dir=None):
    """Fix for Freesurfer's mri_surf2surf corrupting label names in annot files

    Notes
    -----
    Requires nibabel > 1.3.0 for annot file I/O
    """
    # process args
    subjects_dir = get_subjects_dir(subjects_dir)
    if clean_subject is None:
        clean_subject = subject
    if clean_parc is None:
        clean_parc = parc

    fpaths, hemis = _get_annot_fname(None, subject, hemi, parc, subjects_dir)
    clean_fpaths, _ = _get_annot_fname(None, clean_subject, hemi, clean_parc,
                                       subjects_dir)

    for fpath, clean_fpath, hemi in izip(fpaths, clean_fpaths, hemis):
        labels, ctab, names = read_annot(fpath)
        _, _, clean_names = read_annot(clean_fpath)
        if all(n == nc for n, nc in izip(names, clean_names)):
            continue

        if len(clean_names) != len(names):
            err = ("Different names in %s annot files: %s vs. "
                   "%s" % (hemi, str(names), str(clean_names)))
            raise ValueError(err)

        for clean_name, name in izip(clean_names, names):
            if not name.startswith(clean_name):
                err = "%s does not start with %s" % (str(name), clean_name)
                raise ValueError(err)

        write_annot(fpath, labels, ctab, clean_names)
Example #23
0
def get_fsaverage_label_operator(parc='aparc.a2009s', remove_bads=True,
                                 combine_medial=False, return_labels=False,
                                 subjects_dir=None, verbose=None):
    """Get a label operator matrix for fsaverage."""
    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
    src = read_source_spaces(op.join(
        subjects_dir, 'fsaverage', 'bem', 'fsaverage-5-src.fif'),
        verbose=False)
    fs_vertices = [np.arange(10242), np.arange(10242)]
    assert all(np.array_equal(a['vertno'], b)
               for a, b in zip(src, fs_vertices))
    labels = read_labels_from_annot('fsaverage', parc)
    # Remove bad labels
    if remove_bads:
        bads = get_fsaverage_medial_vertices(False)
        bads = dict(lh=bads[0], rh=bads[1])
        assert all(b.size > 1 for b in bads.values())
        labels = [label for label in labels
                  if np.in1d(label.vertices, bads[label.hemi]).mean() < 0.8]
        del bads
    if combine_medial:
        labels = combine_medial_labels(labels)
    offsets = dict(lh=0, rh=10242)
    rev_op = np.zeros((20484, len(labels)))
    for li, label in enumerate(labels):
        if isinstance(label, BiHemiLabel):
            use_labels = [label.lh, label.rh]
        else:
            use_labels = [label]
        for ll in use_labels:
            rev_op[ll.get_vertices_used() + offsets[ll.hemi], li:li + 1] = 1.
    # every src vertex is in exactly one label, except medial wall verts
    # assert (rev_op.sum(-1) == 1).sum()
    label_op = SourceEstimate(np.eye(20484), fs_vertices, 0, 1)
    label_op = label_op.extract_label_time_course(labels, src)
    out = (label_op, rev_op)
    if return_labels:
        out += (labels,)
    return out
def run():
    from mne.commands.utils import get_optparser

    parser = get_optparser(__file__)

    subject = os.environ.get('SUBJECT')
    subjects_dir = get_subjects_dir()

    parser.add_option("-s", "--subject", dest="subject",
                      help="Subject name", default=subject)
    parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
                      help="Subjects directory", default=subjects_dir)
    parser.add_option("-m", "--method", dest="method",
                      help=("Method used to generate the BEM model. "
                            "Can be flash or watershed."), metavar="FILE")

    options, args = parser.parse_args()

    subject = options.subject
    subjects_dir = options.subjects_dir
    method = options.method

    freeview_bem_surfaces(subject, subjects_dir, method)
Example #25
0
def dip_depth(dip, fname_trans, subject, subjects_dir):
    trans = read_trans(fname_trans)
    trans = _get_trans(trans)[0]
    subjects_dir = get_subjects_dir(subjects_dir=subjects_dir)
    fname = os.path.join(subjects_dir, subject, 'bem', 'inner_skull.surf')
    points, faces = read_surface(fname)
    points = apply_trans(trans['trans'], points * 1e-3)

    pos = dip.pos
    ori = dip.ori

    from sklearn.neighbors import NearestNeighbors
    nn = NearestNeighbors()
    nn.fit(points)
    depth, idx = nn.kneighbors(pos, 1, return_distance=True)
    idx = np.ravel(idx)

    direction = pos - points[idx]
    direction /= np.sqrt(np.sum(direction**2, axis=1))[:, None]
    ori /= np.sqrt(np.sum(ori**2, axis=1))[:, None]

    radiality = np.abs(np.sum(ori * direction, axis=1))
    return np.ravel(depth), radiality
Example #26
0
def get_fsaverage_medial_vertices(concatenate=True, subjects_dir=None,
                                  vertices=None):
    """Return fsaverage medial wall vertex numbers.

    These refer to the standard fsaverage source space
    (with vertices from 0 to 2*10242-1).

    Parameters
    ----------
    concatenate : bool
        If True, the returned vertices will be indices into the left and right
        hemisphere that are part of the medial wall. This is
        Useful when treating the source space as a single entity (e.g.,
        during clustering).
    subjects_dir : str
        Directory containing subjects data. If None use
        the Freesurfer SUBJECTS_DIR environment variable.
    vertices : None | list
        Can be None to use ``[np.arange(10242)] * 2``.

    Returns
    -------
    vertices : list of array, or array
        The medial wall vertices.
    """
    if vertices is None:
        vertices = [np.arange(10242), np.arange(10242)]
    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
    label_dir = op.join(subjects_dir, 'fsaverage', 'label')
    lh = read_label(op.join(label_dir, 'lh.Medial_wall.label'))
    rh = read_label(op.join(label_dir, 'rh.Medial_wall.label'))
    if concatenate:
        bad_left = np.where(np.in1d(vertices[0], lh.vertices))[0]
        bad_right = np.where(np.in1d(vertices[1], rh.vertices))[0]
        return np.concatenate((bad_left, bad_right + len(vertices[0])))
    else:
        return [lh.vertices, rh.vertices]
def _run(subjects_dir, subject, force, overwrite, no_decimate, verbose=None):
    this_env = copy.copy(os.environ)
    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
    this_env['SUBJECTS_DIR'] = subjects_dir
    this_env['SUBJECT'] = subject
    if 'FREESURFER_HOME' not in this_env:
        raise RuntimeError('The FreeSurfer environment needs to be set up '
                           'for this script')
    incomplete = 'warn' if force else 'raise'
    subj_path = op.join(subjects_dir, subject)
    if not op.exists(subj_path):
        raise RuntimeError('%s does not exist. Please check your subject '
                           'directory path.' % subj_path)

    mri = 'T1.mgz' if op.exists(op.join(subj_path, 'mri', 'T1.mgz')) else 'T1'

    logger.info('1. Creating a dense scalp tessellation with mkheadsurf...')

    def check_seghead(surf_path=op.join(subj_path, 'surf')):
        surf = None
        for k in ['lh.seghead', 'lh.smseghead']:
            this_surf = op.join(surf_path, k)
            if op.exists(this_surf):
                surf = this_surf
                break
        return surf

    my_seghead = check_seghead()
    if my_seghead is None:
        run_subprocess(['mkheadsurf', '-subjid', subject, '-srcvol', mri],
                       env=this_env)

    surf = check_seghead()
    if surf is None:
        raise RuntimeError('mkheadsurf did not produce the standard output '
                           'file.')

    bem_dir = op.join(subjects_dir, subject, 'bem')
    if not op.isdir(bem_dir):
        os.mkdir(bem_dir)
    dense_fname = op.join(bem_dir, '%s-head-dense.fif' % subject)
    logger.info('2. Creating %s ...' % dense_fname)
    _check_file(dense_fname, overwrite)
    surf = mne.bem._surfaces_to_bem(
        [surf], [mne.io.constants.FIFF.FIFFV_BEM_SURF_ID_HEAD], [1],
        incomplete=incomplete)[0]
    mne.write_bem_surfaces(dense_fname, surf)
    levels = 'medium', 'sparse'
    tris = [] if no_decimate else [30000, 2500]
    if os.getenv('_MNE_TESTING_SCALP', 'false') == 'true':
        tris = [len(surf['tris'])]  # don't actually decimate
    for ii, (n_tri, level) in enumerate(zip(tris, levels), 3):
        logger.info('%i. Creating %s tessellation...' % (ii, level))
        logger.info('%i.1 Decimating the dense tessellation...' % ii)
        with ETSContext():
            points, tris = mne.decimate_surface(points=surf['rr'],
                                                triangles=surf['tris'],
                                                n_triangles=n_tri)
        dec_fname = dense_fname.replace('dense', level)
        logger.info('%i.2 Creating %s' % (ii, dec_fname))
        _check_file(dec_fname, overwrite)
        dec_surf = mne.bem._surfaces_to_bem(
            [dict(rr=points, tris=tris)],
            [mne.io.constants.FIFF.FIFFV_BEM_SURF_ID_HEAD], [1], rescale=False,
            incomplete=incomplete)
        mne.write_bem_surfaces(dec_fname, dec_surf)
Example #28
0
def get_morph_src_mapping(src_from,
                          src_to,
                          subject_from=None,
                          subject_to=None,
                          subjects_dir=None,
                          indices=False):
    """Get a mapping between an original source space and its morphed version.

    It is assumed that the number of vertices and their positions match between
    the source spaces, only the ordering is different. This is commonly the
    case when using :func:`morph_source_spaces`.

    Parameters
    ----------
    src_from : instance of SourceSpaces
        The original source space that was morphed to the target subject.
    src_to : instance of SourceSpaces | list of two arrays
        Either the source space to which ``src_from`` was morphed, or the
        vertex numbers of this source space.
    subject_from : str | None
        The name of the Freesurfer subject to which ``src_from`` belongs. By
        default, the value stored in the SourceSpaces object is used.
    subject_to : str | None
        The name of the Freesurfer subject to which ``src_to`` belongs. By
        default, the value stored in the SourceSpaces object is used.
    subjects_dir : str | None
        Path to SUBJECTS_DIR if it is not set in the environment.
    indices : bool
        Whether to return mapping between vertex numbers (``False``, the
        default) or vertex indices (``True``).

    Returns
    -------
    from_to : dict | pair of dicts
        If ``indices=True``, a dictionary mapping vertex indices from
        src_from -> src_to. If ``indices=False``, for each hemisphere, a
        dictionary mapping vertex numbers from src_from -> src_to.
    to_from : dict | pair of dicts
        If ``indices=True``, a dictionary mapping vertex indices from
        src_to -> src_from. If ``indices=False``, for each hemisphere, a
        dictionary mapping vertex numbers from src_to -> src_from.

    See also
    --------
    _get_morph_src_reordering
    """
    if subject_from is None:
        subject_from = src_from[0]['subject_his_id']
    if subject_to is None:
        subject_to = src_to[0]['subject_his_id']
    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)

    src_from = _ensure_src(src_from, kind='surface')
    subject_from = _ensure_src_subject(src_from, subject_from)

    if isinstance(src_to, SourceSpaces):
        to_vert_lh = src_to[0]['vertno']
        to_vert_rh = src_to[1]['vertno']
    else:
        if subject_to is None:
            ValueError('When supplying vertex numbers as `src_to`, the '
                       '`subject_to` parameter must be set.')
        to_vert_lh, to_vert_rh = src_to

    order, from_vert = _get_morph_src_reordering([to_vert_lh, to_vert_rh],
                                                 src_from,
                                                 subject_from,
                                                 subject_to,
                                                 subjects_dir=subjects_dir)
    from_vert_lh, from_vert_rh = from_vert

    if indices:
        # Find vertex indices corresponding to the vertex numbers for src_from
        from_n_lh = src_from[0]['nuse']
        from_ind_lh = _find_indices_1d(src_from[0]['vertno'], from_vert_lh)
        from_ind_rh = _find_indices_1d(src_from[1]['vertno'], from_vert_rh)
        from_ind = np.hstack((from_ind_lh, from_ind_rh + from_n_lh))

        # The indices for src_to are easy
        to_ind = order

        # Create the mappings
        from_to = dict(zip(from_ind, to_ind))
        to_from = dict(zip(to_ind, from_ind))
    else:
        # Re-order the vertices of src_to to match the ordering of src_from
        to_n_lh = len(to_vert_lh)
        to_vert_lh = to_vert_lh[order[:to_n_lh]]
        to_vert_rh = to_vert_rh[order[to_n_lh:] - to_n_lh]

        # Create the mappings
        from_to = [
            dict(zip(from_vert_lh, to_vert_lh)),
            dict(zip(from_vert_rh, to_vert_rh))
        ]
        to_from = [
            dict(zip(to_vert_lh, from_vert_lh)),
            dict(zip(to_vert_rh, from_vert_rh))
        ]

    return from_to, to_from
Example #29
0
def write_labels_to_annot(labels,
                          subject=None,
                          parc=None,
                          overwrite=False,
                          subjects_dir=None,
                          annot_fname=None,
                          colormap='hsv',
                          hemi='both'):
    """Create a FreeSurfer annotation from a list of labels

    FIX: always write both hemispheres

    Parameters
    ----------
    labels : list with instances of mne.Label
        The labels to create a parcellation from.
    subject : str | None
        The subject for which to write the parcellation for.
    parc : str | None
        The parcellation name to use.
    overwrite : bool
        Overwrite files if they already exist.
    subjects_dir : string, or None
        Path to SUBJECTS_DIR if it is not set in the environment.
    annot_fname : str | None
        Filename of the .annot file. If not None, only this file is written
        and 'parc' and 'subject' are ignored.
    colormap : str
        Colormap to use to generate label colors for labels that do not
        have a color specified.
    hemi : 'both' | 'lh' | 'rh'
        The hemisphere(s) for which to write *.annot files (only applies if
        annot_fname is not specified; default is 'both').
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Notes
    -----
    Vertices that are not covered by any of the labels are assigned to a label
    named "unknown".
    """
    subjects_dir = get_subjects_dir(subjects_dir)

    # get the .annot filenames and hemispheres
    annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
                                          subjects_dir)

    if not overwrite:
        for fname in annot_fname:
            if op.exists(fname):
                raise ValueError('File %s exists. Use "overwrite=True" to '
                                 'overwrite it' % fname)

    # prepare container for data to save:
    to_save = []
    # keep track of issues found in the labels
    duplicate_colors = []
    invalid_colors = []
    overlap = []
    no_color = (-1, -1, -1, -1)
    no_color_rgb = (-1, -1, -1)
    for hemi, fname in zip(hemis, annot_fname):
        hemi_labels = [label for label in labels if label.hemi == hemi]
        n_hemi_labels = len(hemi_labels)

        if n_hemi_labels == 0:
            ctab = np.empty((0, 4), dtype=np.int32)
            ctab_rgb = ctab[:, :3]
        else:
            hemi_labels.sort(key=lambda label: label.name)

            # convert colors to 0-255 RGBA tuples
            hemi_colors = [
                no_color if label.color is None else tuple(
                    int(round(255 * i)) for i in label.color)
                for label in hemi_labels
            ]
            ctab = np.array(hemi_colors, dtype=np.int32)
            ctab_rgb = ctab[:, :3]

            # make color dict (for annot ID, only R, G and B count)
            labels_by_color = defaultdict(list)
            for label, color in zip(hemi_labels, ctab_rgb):
                labels_by_color[tuple(color)].append(label.name)

            # check label colors
            for color, names in labels_by_color.items():
                if color == no_color_rgb:
                    continue

                if color == (0, 0, 0):
                    # we cannot have an all-zero color, otherw. e.g. tksurfer
                    # refuses to read the parcellation
                    msg = ('At least one label contains a color with, "r=0, '
                           'g=0, b=0" value. Some FreeSurfer tools may fail '
                           'to read the parcellation')
                    logger.warning(msg)

                if any(i > 255 for i in color):
                    msg = ("%s: %s (%s)" % (color, ', '.join(names), hemi))
                    invalid_colors.append(msg)

                if len(names) > 1:
                    msg = "%s: %s (%s)" % (color, ', '.join(names), hemi)
                    duplicate_colors.append(msg)

            # replace None values (labels with unspecified color)
            if labels_by_color[no_color_rgb]:
                default_colors = _n_colors(n_hemi_labels,
                                           bytes_=True,
                                           cmap=colormap)
                safe_color_i = 0  # keep track of colors known to be in hemi_colors
                for i in xrange(n_hemi_labels):
                    if ctab[i, 0] == -1:
                        color = default_colors[i]
                        # make sure to add no duplicate color
                        while np.any(np.all(color[:3] == ctab_rgb, 1)):
                            color = default_colors[safe_color_i]
                            safe_color_i += 1
                        # assign the color
                        ctab[i] = color

        # find number of vertices in surface
        if subject is not None and subjects_dir is not None:
            fpath = op.join(subjects_dir, subject, 'surf', '%s.white' % hemi)
            points, _ = read_surface(fpath)
            n_vertices = len(points)
        else:
            if len(hemi_labels) > 0:
                max_vert = max(np.max(label.vertices) for label in hemi_labels)
                n_vertices = max_vert + 1
            else:
                n_vertices = 1
            msg = ('    Number of vertices in the surface could not be '
                   'verified because the surface file could not be found; '
                   'specify subject and subjects_dir parameters.')
            logger.warning(msg)

        # Create annot and color table array to write
        annot = np.empty(n_vertices, dtype=np.intp)
        annot[:] = -1
        # create the annotation ids from the colors
        annot_id_coding = np.array((1, 2**8, 2**16))
        annot_ids = list(np.sum(ctab_rgb * annot_id_coding, axis=1))
        for label, annot_id in zip(hemi_labels, annot_ids):
            # make sure the label is not overwriting another label
            if np.any(annot[label.vertices] != -1):
                other_ids = set(annot[label.vertices])
                other_ids.discard(-1)
                other_indices = (annot_ids.index(i) for i in other_ids)
                other_names = (hemi_labels[i].name for i in other_indices)
                other_repr = ', '.join(other_names)
                msg = "%s: %s overlaps %s" % (hemi, label.name, other_repr)
                overlap.append(msg)

            annot[label.vertices] = annot_id

        hemi_names = [label.name for label in hemi_labels]

        # Assign unlabeled vertices to an "unknown" label
        unlabeled = (annot == -1)
        if np.any(unlabeled):
            msg = ("Assigning %i unlabeled vertices to "
                   "'unknown-%s'" % (unlabeled.sum(), hemi))
            logger.info(msg)

            # find an unused color (try shades of gray first)
            for i in range(1, 257):
                if not np.any(np.all((i, i, i) == ctab_rgb, 1)):
                    break
            if i < 256:
                color = (i, i, i, 0)
            else:
                err = ("Need one free shade of gray for 'unknown' label. "
                       "Please modify your label colors, or assign the "
                       "unlabeled vertices to another label.")
                raise ValueError(err)

            # find the id
            annot_id = np.sum(annot_id_coding * color[:3])

            # update data to write
            annot[unlabeled] = annot_id
            ctab = np.vstack((ctab, color))
            hemi_names.append("unknown")

        # convert to FreeSurfer alpha values
        ctab[:, 3] = 255 - ctab[:, 3]

        # remove hemi ending in names
        hemi_names = [
            name[:-3] if name.endswith(hemi) else name for name in hemi_names
        ]

        to_save.append((fname, annot, ctab, hemi_names))

    issues = []
    if duplicate_colors:
        msg = ("Some labels have the same color values (all labels in one "
               "hemisphere must have a unique color):")
        duplicate_colors.insert(0, msg)
        issues.append('\n'.join(duplicate_colors))
    if invalid_colors:
        msg = ("Some labels have invalid color values (all colors should be "
               "RGBA tuples with values between 0 and 1)")
        invalid_colors.insert(0, msg)
        issues.append('\n'.join(invalid_colors))
    if overlap:
        msg = ("Some labels occupy vertices that are also occupied by one or "
               "more other labels. Each vertex can only be occupied by a "
               "single label in *.annot files.")
        overlap.insert(0, msg)
        issues.append('\n'.join(overlap))

    if issues:
        raise ValueError('\n\n'.join(issues))

    # write it
    for fname, annot, ctab, hemi_names in to_save:
        logger.info('   writing %d labels to %s' % (len(hemi_names), fname))
        _write_annot(fname, annot, ctab, hemi_names)

    logger.info('[done]')
def _stc_to_label(stc, src, smooth, subjects_dir=None):
    """Compute a label from the non-zero sources in an stc object.

    Parameters
    ----------
    stc : SourceEstimate
        The source estimates.
    src : SourceSpaces | str | None
        The source space over which the source estimates are defined.
        If it's a string it should the subject name (e.g. fsaverage).
        Can be None if stc.subject is not None.
    smooth : int
        Number of smoothing iterations.
    subjects_dir : str | None
        Path to SUBJECTS_DIR if it is not set in the environment.

    Returns
    -------
    labels : list of Labels | list of list of Labels
        The generated labels. If connected is False, it returns
        a list of Labels (one per hemisphere). If no Label is available
        in a hemisphere, None is returned. If connected is True,
        it returns for each hemisphere a list of connected labels
        ordered in decreasing order depending of the maximum value in the stc.
        If no Label is available in an hemisphere, an empty list is returned.
    """
    src = stc.subject if src is None else src

    if isinstance(src, string_types):
        subject = src
    else:
        subject = stc.subject

    if isinstance(src, string_types):
        subjects_dir = get_subjects_dir(subjects_dir)
        surf_path_from = op.join(subjects_dir, src, 'surf')
        rr_lh, tris_lh = read_surface(op.join(surf_path_from,
                                      'lh.white'))
        rr_rh, tris_rh = read_surface(op.join(surf_path_from,
                                      'rh.white'))
        rr = [rr_lh, rr_rh]
        tris = [tris_lh, tris_rh]
    else:
        if not isinstance(src, SourceSpaces):
            raise TypeError('src must be a string or a set of source spaces')
        if len(src) != 2:
            raise ValueError('source space should contain the 2 hemispheres')
        rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
        tris = [src[0]['tris'], src[1]['tris']]

    labels = []
    cnt = 0
    for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
            zip(['lh', 'rh'], stc.vertices, tris, rr)):
        this_data = stc.data[cnt:cnt + len(this_vertno)]
        e = mesh_edges(this_tris)
        e.data[e.data == 2] = 1
        n_vertices = e.shape[0]
        e = e + sparse.eye(n_vertices, n_vertices)

        clusters = [this_vertno[np.any(this_data, axis=1)]]

        cnt += len(this_vertno)

        clusters = [c for c in clusters if len(c) > 0]

        if len(clusters) == 0:
            this_labels = None
        else:
            this_labels = []
            colors = _n_colors(len(clusters))
            for c, color in zip(clusters, colors):
                idx_use = c
                for k in range(smooth):
                    e_use = e[:, idx_use]
                    data1 = e_use * np.ones(len(idx_use))
                    idx_use = np.where(data1)[0]

                label = Label(idx_use, this_rr[idx_use], None, hemi,
                              'Label from stc', subject=subject,
                              color=color)

                this_labels.append(label)

            this_labels = this_labels[0]

        labels.append(this_labels)

    return labels
Example #31
0
def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False,
                          subjects_dir=None, annot_fname=None,
                          colormap='hsv', hemi='both'):
    """Create a FreeSurfer annotation from a list of labels

    FIX: always write both hemispheres

    Parameters
    ----------
    labels : list with instances of mne.Label
        The labels to create a parcellation from.
    subject : str | None
        The subject for which to write the parcellation for.
    parc : str | None
        The parcellation name to use.
    overwrite : bool
        Overwrite files if they already exist.
    subjects_dir : string, or None
        Path to SUBJECTS_DIR if it is not set in the environment.
    annot_fname : str | None
        Filename of the .annot file. If not None, only this file is written
        and 'parc' and 'subject' are ignored.
    colormap : str
        Colormap to use to generate label colors for labels that do not
        have a color specified.
    hemi : 'both' | 'lh' | 'rh'
        The hemisphere(s) for which to write *.annot files (only applies if
        annot_fname is not specified; default is 'both').
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Notes
    -----
    Vertices that are not covered by any of the labels are assigned to a label
    named "unknown".
    """
    subjects_dir = get_subjects_dir(subjects_dir)

    # get the .annot filenames and hemispheres
    annot_fname, hemis = _get_annot_fname(annot_fname, subject, hemi, parc,
                                          subjects_dir)

    if not overwrite:
        for fname in annot_fname:
            if op.exists(fname):
                raise ValueError('File %s exists. Use "overwrite=True" to '
                                 'overwrite it' % fname)

    # prepare container for data to save:
    to_save = []
    # keep track of issues found in the labels
    duplicate_colors = []
    invalid_colors = []
    overlap = []
    no_color = (-1, -1, -1, -1)
    no_color_rgb = (-1, -1, -1)
    for hemi, fname in zip(hemis, annot_fname):
        hemi_labels = [label for label in labels if label.hemi == hemi]
        n_hemi_labels = len(hemi_labels)

        if n_hemi_labels == 0:
            ctab = np.empty((0, 4), dtype=np.int32)
            ctab_rgb = ctab[:, :3]
        else:
            hemi_labels.sort(key=lambda label: label.name)

            # convert colors to 0-255 RGBA tuples
            hemi_colors = [no_color if label.color is None else
                           tuple(int(round(255 * i)) for i in label.color)
                           for label in hemi_labels]
            ctab = np.array(hemi_colors, dtype=np.int32)
            ctab_rgb = ctab[:, :3]

            # make color dict (for annot ID, only R, G and B count)
            labels_by_color = defaultdict(list)
            for label, color in zip(hemi_labels, ctab_rgb):
                labels_by_color[tuple(color)].append(label.name)

            # check label colors
            for color, names in labels_by_color.items():
                if color == no_color_rgb:
                    continue

                if color == (0, 0, 0):
                    # we cannot have an all-zero color, otherw. e.g. tksurfer
                    # refuses to read the parcellation
                    msg = ('At least one label contains a color with, "r=0, '
                           'g=0, b=0" value. Some FreeSurfer tools may fail '
                           'to read the parcellation')
                    logger.warning(msg)

                if any(i > 255 for i in color):
                    msg = ("%s: %s (%s)" % (color, ', '.join(names), hemi))
                    invalid_colors.append(msg)

                if len(names) > 1:
                    msg = "%s: %s (%s)" % (color, ', '.join(names), hemi)
                    duplicate_colors.append(msg)

            # replace None values (labels with unspecified color)
            if labels_by_color[no_color_rgb]:
                default_colors = _n_colors(n_hemi_labels, bytes_=True,
                                           cmap=colormap)
                safe_color_i = 0  # keep track of colors known to be in hemi_colors
                for i in xrange(n_hemi_labels):
                    if ctab[i, 0] == -1:
                        color = default_colors[i]
                        # make sure to add no duplicate color
                        while np.any(np.all(color[:3] == ctab_rgb, 1)):
                            color = default_colors[safe_color_i]
                            safe_color_i += 1
                        # assign the color
                        ctab[i] = color

        # find number of vertices in surface
        if subject is not None and subjects_dir is not None:
            fpath = os.path.join(subjects_dir, subject, 'surf',
                                 '%s.white' % hemi)
            points, _ = read_surface(fpath)
            n_vertices = len(points)
        else:
            if len(hemi_labels) > 0:
                max_vert = max(np.max(label.vertices) for label in hemi_labels)
                n_vertices = max_vert + 1
            else:
                n_vertices = 1
            msg = ('    Number of vertices in the surface could not be '
                   'verified because the surface file could not be found; '
                   'specify subject and subjects_dir parameters.')
            logger.warning(msg)

        # Create annot and color table array to write
        annot = np.empty(n_vertices, dtype=np.int)
        annot[:] = -1
        # create the annotation ids from the colors
        annot_id_coding = np.array((1, 2 ** 8, 2 ** 16))
        annot_ids = list(np.sum(ctab_rgb * annot_id_coding, axis=1))
        for label, annot_id in zip(hemi_labels, annot_ids):
            # make sure the label is not overwriting another label
            if np.any(annot[label.vertices] != -1):
                other_ids = set(annot[label.vertices])
                other_ids.discard(-1)
                other_indices = (annot_ids.index(i) for i in other_ids)
                other_names = (hemi_labels[i].name for i in other_indices)
                other_repr = ', '.join(other_names)
                msg = "%s: %s overlaps %s" % (hemi, label.name, other_repr)
                overlap.append(msg)

            annot[label.vertices] = annot_id

        hemi_names = [label.name for label in hemi_labels]

        # Assign unlabeled vertices to an "unknown" label
        unlabeled = (annot == -1)
        if np.any(unlabeled):
            msg = ("Assigning %i unlabeled vertices to "
                   "'unknown-%s'" % (unlabeled.sum(), hemi))
            logger.info(msg)

            # find an unused color (try shades of gray first)
            for i in range(1, 257):
                if not np.any(np.all((i, i, i) == ctab_rgb, 1)):
                    break
            if i < 256:
                color = (i, i, i, 0)
            else:
                err = ("Need one free shade of gray for 'unknown' label. "
                       "Please modify your label colors, or assign the "
                       "unlabeled vertices to another label.")
                raise ValueError(err)

            # find the id
            annot_id = np.sum(annot_id_coding * color[:3])

            # update data to write
            annot[unlabeled] = annot_id
            ctab = np.vstack((ctab, color))
            hemi_names.append("unknown")

        # convert to FreeSurfer alpha values
        ctab[:, 3] = 255 - ctab[:, 3]

        # remove hemi ending in names
        hemi_names = [name[:-3] if name.endswith(hemi) else name
                      for name in hemi_names]

        to_save.append((fname, annot, ctab, hemi_names))

    issues = []
    if duplicate_colors:
        msg = ("Some labels have the same color values (all labels in one "
               "hemisphere must have a unique color):")
        duplicate_colors.insert(0, msg)
        issues.append(os.linesep.join(duplicate_colors))
    if invalid_colors:
        msg = ("Some labels have invalid color values (all colors should be "
               "RGBA tuples with values between 0 and 1)")
        invalid_colors.insert(0, msg)
        issues.append(os.linesep.join(invalid_colors))
    if overlap:
        msg = ("Some labels occupy vertices that are also occupied by one or "
               "more other labels. Each vertex can only be occupied by a "
               "single label in *.annot files.")
        overlap.insert(0, msg)
        issues.append(os.linesep.join(overlap))

    if issues:
        raise ValueError('\n\n'.join(issues))

    # write it
    for fname, annot, ctab, hemi_names in to_save:
        logger.info('   writing %d labels to %s' % (len(hemi_names), fname))
        _write_annot(fname, annot, ctab, hemi_names)

    logger.info('[done]')
Example #32
0
def _stc_to_label(stc, src, smooth, subjects_dir=None):
    """Compute a label from the non-zero sources in an stc object.

    Parameters
    ----------
    stc : SourceEstimate
        The source estimates.
    src : SourceSpaces | str | None
        The source space over which the source estimates are defined.
        If it's a string it should the subject name (e.g. fsaverage).
        Can be None if stc.subject is not None.
    smooth : int
        Number of smoothing iterations.
    subjects_dir : str | None
        Path to SUBJECTS_DIR if it is not set in the environment.

    Returns
    -------
    labels : list of Labels | list of list of Labels
        The generated labels. If connected is False, it returns
        a list of Labels (one per hemisphere). If no Label is available
        in a hemisphere, None is returned. If connected is True,
        it returns for each hemisphere a list of connected labels
        ordered in decreasing order depending of the maximum value in the stc.
        If no Label is available in an hemisphere, an empty list is returned.
    """
    src = stc.subject if src is None else src

    if isinstance(src, string_types):
        subject = src
    else:
        subject = stc.subject

    if isinstance(src, string_types):
        subjects_dir = get_subjects_dir(subjects_dir)
        surf_path_from = op.join(subjects_dir, src, 'surf')
        rr_lh, tris_lh = read_surface(op.join(surf_path_from, 'lh.white'))
        rr_rh, tris_rh = read_surface(op.join(surf_path_from, 'rh.white'))
        rr = [rr_lh, rr_rh]
        tris = [tris_lh, tris_rh]
    else:
        if not isinstance(src, SourceSpaces):
            raise TypeError('src must be a string or a set of source spaces')
        if len(src) != 2:
            raise ValueError('source space should contain the 2 hemispheres')
        rr = [1e3 * src[0]['rr'], 1e3 * src[1]['rr']]
        tris = [src[0]['tris'], src[1]['tris']]

    labels = []
    cnt = 0
    for hemi_idx, (hemi, this_vertno, this_tris, this_rr) in enumerate(
            zip(['lh', 'rh'], stc.vertices, tris, rr)):
        this_data = stc.data[cnt:cnt + len(this_vertno)]
        e = mesh_edges(this_tris)
        e.data[e.data == 2] = 1
        n_vertices = e.shape[0]
        e = e + sparse.eye(n_vertices, n_vertices)

        clusters = [this_vertno[np.any(this_data, axis=1)]]

        cnt += len(this_vertno)

        clusters = [c for c in clusters if len(c) > 0]

        if len(clusters) == 0:
            this_labels = None
        else:
            this_labels = []
            colors = _n_colors(len(clusters))
            for c, color in zip(clusters, colors):
                idx_use = c
                for k in range(smooth):
                    e_use = e[:, idx_use]
                    data1 = e_use * np.ones(len(idx_use))
                    idx_use = np.where(data1)[0]

                label = Label(idx_use,
                              this_rr[idx_use],
                              None,
                              hemi,
                              'Label from stc',
                              subject=subject,
                              color=color)

                this_labels.append(label)

            this_labels = this_labels[0]

        labels.append(this_labels)

    return labels
Example #33
0
def labels_from_mni_coords(seeds,
                           extent=30.,
                           subject='fsaverage',
                           surface='white',
                           mask=None,
                           subjects_dir=None,
                           parc=None):
    """Create a parcellation from seed coordinates in MNI space

    Parameters
    ----------
    seeds : dict
        Seed coordinates. Keys are label names, including -hemi tags. values
        are seeds (array_like of shape (3,) or (3, n_seeds)).
    extent : scalar
        Extent of the label in millimeters (maximum distance from the seed).
    subject : str
        MRI-subject to use (default 'fsaverage').
    surface : str
        Surface to use (default 'white').
    mask : None | str
        A parcellation used to mask the parcellation under construction.
    subjects_dir : str
        SUBJECTS_DIR.
    parc : None | str
        Name of the parcellation under construction (only used for error
        messages).
    """
    name_re = re.compile(r"^\w+-(lh|rh)$")
    matches = {name: name_re.match(name) for name in seeds}
    invalid = sorted(name for name, m in matches.items() if m is None)
    if invalid:
        raise ValueError(
            "Invalid seed names in parc %r: %s; seed names need to conform to "
            "the 'xxx-lh' or 'xxx-rh' scheme so that the proper hemisphere can "
            "be selected" % (parc, ', '.join(map(repr, sorted(invalid)))))

    # load surfaces
    subjects_dir = get_subjects_dir(subjects_dir)
    fpath = os.path.join(subjects_dir, subject, 'surf', '.'.join(
        ('%s', surface)))
    surfs = {hemi: mne.read_surface(fpath % hemi) for hemi in ('lh', 'rh')}

    # prepare seed properties for mne.grow_labels
    vertices = []
    names = []
    hemis = []
    for name, coords_ in seeds.items():
        coords = np.atleast_2d(coords_)
        if coords.ndim != 2 or coords.shape[1] != 3:
            raise ValueError("Invalid coordinate specification for seed %r in "
                             "parc %r: %r. Seeds need to be specified as "
                             "arrays with shape (3,) or (n_seeds, 3)." %
                             (name, parc, coords_))
        hemi = matches[name].group(1)
        seed_verts = []
        for coord in coords:
            dist = np.sqrt(np.sum((surfs[hemi][0] - coord)**2, axis=1))
            seed_verts.append(np.argmin(dist))
        vertices.append(seed_verts)
        names.append(name)
        hemis.append(hemi == 'rh')

    # grow labels
    labels = mne.grow_labels(subject, vertices, extent, hemis, subjects_dir, 1,
                             False, names, surface)

    # apply mask
    if mask is not None:
        mlabels = mne.read_labels_from_annot(subject,
                                             mask,
                                             subjects_dir=subjects_dir)
        unknown = {l.hemi: l for l in mlabels if l.name.startswith('unknown-')}

        for label in labels:
            rm = unknown[label.hemi]
            if np.any(np.in1d(label.vertices, rm.vertices)):
                label.vertices = np.setdiff1d(label.vertices, rm.vertices,
                                              True)

    return labels
Example #34
0
def get_head_mri_trans(bids_path,
                       extra_params=None,
                       t1_bids_path=None,
                       fs_subject=None,
                       fs_subjects_dir=None,
                       *,
                       kind=None,
                       verbose=None):
    """Produce transformation matrix from MEG and MRI landmark points.

    Will attempt to read the landmarks of Nasion, LPA, and RPA from the sidecar
    files of (i) the MEG and (ii) the T1-weighted MRI data. The two sets of
    points will then be used to calculate a transformation matrix from head
    coordinates to MRI coordinates.

    .. note:: The MEG and MRI data need **not** necessarily be stored in the
              same session or even in the same BIDS dataset. See the
              ``t1_bids_path`` parameter for details.

    Parameters
    ----------
    bids_path : BIDSPath
        The path of the electrophysiology recording. If ``datatype`` and
        ``suffix`` are not present, they will be set to ``'meg'``, and a
        warning will be raised.

        .. versionchanged:: 0.10
           A warning is raised it ``datatype`` or ``suffix`` are not set.
    extra_params : None | dict
        Extra parameters to be passed to :func:`mne.io.read_raw` when reading
        the MEG file.
    t1_bids_path : BIDSPath | None
        If ``None`` (default), will try to discover the T1-weighted MRI file
        based on the name and location of the MEG recording specified via the
        ``bids_path`` parameter. Alternatively, you explicitly specify which
        T1-weighted MRI scan to use for extraction of MRI landmarks. To do
        that, pass a :class:`mne_bids.BIDSPath` pointing to the scan.
        Use this parameter e.g. if the T1 scan was recorded during a different
        session than the MEG. It is even possible to point to a T1 image stored
        in an entirely different BIDS dataset than the MEG data.
    fs_subject : str
        The subject identifier used for FreeSurfer.

        .. versionchanged:: 0.10
           Does not default anymore to ``bids_path.subject`` if ``None``.
    fs_subjects_dir : path-like | None
        The FreeSurfer subjects directory. If ``None``, defaults to the
        ``SUBJECTS_DIR`` environment variable.

        .. versionadded:: 0.8
    kind : str | None
        The suffix of the anatomical landmark names in the JSON sidecar.
        A suffix might be present e.g. to distinguish landmarks between
        sessions. If provided, should not include a leading underscore ``_``.
        For example, if the landmark names in the JSON sidecar file are
        ``LPA_ses-1``, ``RPA_ses-1``, ``NAS_ses-1``, you should pass
        ``'ses-1'`` here.
        If ``None``, no suffix is appended, the landmarks named
        ``Nasion`` (or ``NAS``), ``LPA``, and ``RPA`` will be used.

        .. versionadded:: 0.10
    %(verbose)s

    Returns
    -------
    trans : mne.transforms.Transform
        The data transformation matrix from head to MRI coordinates.
    """
    if not has_nibabel():  # pragma: no cover
        raise ImportError('This function requires nibabel.')
    import nibabel as nib

    if not isinstance(bids_path, BIDSPath):
        raise RuntimeError('"bids_path" must be a BIDSPath object. Please '
                           'instantiate using mne_bids.BIDSPath().')

    # check root available
    meg_bids_path = bids_path.copy()
    del bids_path
    if meg_bids_path.root is None:
        raise ValueError('The root of the "bids_path" must be set. '
                         'Please use `bids_path.update(root="<root>")` '
                         'to set the root of the BIDS folder to read.')

    # if the bids_path is underspecified, only get info for MEG data
    if meg_bids_path.datatype is None:
        meg_bids_path.datatype = 'meg'
        warn(
            'bids_path did not have a datatype set. Assuming "meg". This '
            'will raise an exception in the future.',
            module='mne_bids',
            category=DeprecationWarning)
    if meg_bids_path.suffix is None:
        meg_bids_path.suffix = 'meg'
        warn(
            'bids_path did not have a suffix set. Assuming "meg". This '
            'will raise an exception in the future.',
            module='mne_bids',
            category=DeprecationWarning)

    # Get the sidecar file for MRI landmarks
    t1w_bids_path = ((meg_bids_path if t1_bids_path is None else
                      t1_bids_path).copy().update(datatype='anat',
                                                  suffix='T1w',
                                                  task=None))
    t1w_json_path = _find_matching_sidecar(bids_path=t1w_bids_path,
                                           extension='.json',
                                           on_error='ignore')
    del t1_bids_path

    if t1w_json_path is not None:
        t1w_json_path = Path(t1w_json_path)

    if t1w_json_path is None or not t1w_json_path.exists():
        raise FileNotFoundError(
            f'Did not find T1w JSON sidecar file, tried location: '
            f'{t1w_json_path}')
    for extension in ('.nii', '.nii.gz'):
        t1w_path_candidate = t1w_json_path.with_suffix(extension)
        if t1w_path_candidate.exists():
            t1w_bids_path = get_bids_path_from_fname(fname=t1w_path_candidate)
            break

    if not t1w_bids_path.fpath.exists():
        raise FileNotFoundError(
            f'Did not find T1w recording file, tried location: '
            f'{t1w_path_candidate.name.replace(".nii.gz", "")}[.nii, .nii.gz]')

    # Get MRI landmarks from the JSON sidecar
    t1w_json = json.loads(t1w_json_path.read_text(encoding='utf-8'))
    mri_coords_dict = t1w_json.get('AnatomicalLandmarkCoordinates', dict())

    # landmarks array: rows: [LPA, NAS, RPA]; columns: [x, y, z]
    suffix = f"_{kind}" if kind is not None else ""
    mri_landmarks = np.full((3, 3), np.nan)
    for landmark_name, coords in mri_coords_dict.items():
        if landmark_name.upper() == ('LPA' + suffix).upper():
            mri_landmarks[0, :] = coords
        elif landmark_name.upper() == ('RPA' + suffix).upper():
            mri_landmarks[2, :] = coords
        elif (landmark_name.upper() == ('NAS' + suffix).upper()
              or landmark_name.lower() == ('nasion' + suffix).lower()):
            mri_landmarks[1, :] = coords
        else:
            continue

    if np.isnan(mri_landmarks).any():
        raise RuntimeError(
            f'Could not extract fiducial points from T1w sidecar file: '
            f'{t1w_json_path}\n\n'
            f'The sidecar file SHOULD contain a key '
            f'"AnatomicalLandmarkCoordinates" pointing to an '
            f'object with the keys "LPA", "NAS", and "RPA". '
            f'Yet, the following structure was found:\n\n'
            f'{mri_coords_dict}')

    # The MRI landmarks are in "voxels". We need to convert them to the
    # Neuromag RAS coordinate system in order to compare them with MEG
    # landmarks. See also: `mne_bids.write.write_anat`
    if fs_subject is None:
        warn(
            'Passing "fs_subject=None" has been deprecated and will raise '
            'an error in future versions. Please explicitly specify the '
            'FreeSurfer subject name.', DeprecationWarning)
        fs_subject = f'sub-{meg_bids_path.subject}'

    fs_subjects_dir = get_subjects_dir(fs_subjects_dir, raise_error=False)
    fs_t1_path = Path(fs_subjects_dir) / fs_subject / 'mri' / 'T1.mgz'
    if not fs_t1_path.exists():
        raise ValueError(
            f"Could not find {fs_t1_path}. Consider running FreeSurfer's "
            f"'recon-all` for subject {fs_subject}.")
    fs_t1_mgh = nib.load(str(fs_t1_path))
    t1_nifti = nib.load(str(t1w_bids_path.fpath))

    # Convert to MGH format to access vox2ras method
    t1_mgh = nib.MGHImage(t1_nifti.dataobj, t1_nifti.affine)

    # convert to scanner RAS
    mri_landmarks = apply_trans(t1_mgh.header.get_vox2ras(), mri_landmarks)

    # convert to FreeSurfer T1 voxels (same scanner RAS as T1)
    mri_landmarks = apply_trans(fs_t1_mgh.header.get_ras2vox(), mri_landmarks)

    # now extract transformation matrix and put back to RAS coordinates of MRI
    vox2ras_tkr = fs_t1_mgh.header.get_vox2ras_tkr()
    mri_landmarks = apply_trans(vox2ras_tkr, mri_landmarks)
    mri_landmarks = mri_landmarks * 1e-3

    # Get MEG landmarks from the raw file
    _, ext = _parse_ext(meg_bids_path)
    if extra_params is None:
        extra_params = dict()
    if ext == '.fif':
        extra_params['allow_maxshield'] = True

    raw = read_raw_bids(bids_path=meg_bids_path, extra_params=extra_params)

    if (raw.get_montage() is None or raw.get_montage().get_positions() is None
            or any([
                raw.get_montage().get_positions()[fid_key] is None
                for fid_key in ('nasion', 'lpa', 'rpa')
            ])):
        raise RuntimeError(
            f'Could not extract fiducial points from ``raw`` file: '
            f'{meg_bids_path}\n\n'
            f'The ``raw`` file SHOULD contain digitization points '
            'for the nasion and left and right pre-auricular points '
            'but none were found')
    pos = raw.get_montage().get_positions()
    meg_landmarks = np.asarray((pos['lpa'], pos['nasion'], pos['rpa']))

    # Given the two sets of points, fit the transform
    trans_fitted = fit_matched_points(src_pts=meg_landmarks,
                                      tgt_pts=mri_landmarks)
    trans = mne.transforms.Transform(fro='head', to='mri', trans=trans_fitted)
    return trans
Example #35
0
def labels_from_mni_coords(seeds, extent=30., subject='fsaverage',
                           surface='white', mask=None, subjects_dir=None,
                           parc=None):
    """Create a parcellation from seed coordinates in MNI space

    Parameters
    ----------
    seeds : dict
        Seed coordinates. Keys are label names, including -hemi tags. values
        are seeds (array_like of shape (3,) or (3, n_seeds)).
    extent : scalar
        Extent of the label in millimeters (maximum distance from the seed).
    subject : str
        MRI-subject to use (default 'fsaverage').
    surface : str
        Surface to use (default 'white').
    mask : None | str
        A parcellation used to mask the parcellation under construction.
    subjects_dir : str
        SUBJECTS_DIR.
    parc : None | str
        Name of the parcellation under construction (only used for error
        messages).
    """
    name_re = re.compile("\w+-(lh|rh)$")
    if not all(name.endswith(('lh', 'rh')) for name in seeds):
        err = ("Names need to end in 'lh' or 'rh' so that the proper "
               "hemisphere can be selected")
        raise ValueError(err)

    # load surfaces
    subjects_dir = get_subjects_dir(subjects_dir)
    fpath = os.path.join(subjects_dir, subject, 'surf', '.'.join(('%s', surface)))
    surfs = {hemi: mne.read_surface(fpath % hemi) for hemi in ('lh', 'rh')}

    # prepare seed properties for mne.grow_labels
    vertices = []
    names = []
    hemis = []
    for name, coords_ in seeds.iteritems():
        m = name_re.match(name)
        if not m:
            raise ValueError("Invalid seed name in %r parc: %r. Names must "
                             "conform to the 'xxx-lh' or 'xxx-rh' scheme."
                             % (parc, name))
        coords = np.atleast_2d(coords_)
        if coords.ndim != 2 or coords.shape[1] != 3:
            raise ValueError("Invalid coordinate specification for seed %r in "
                             "parc %r: %r. Seeds need to be specified as "
                             "arrays with shape (3,) or (n_seeds, 3)."
                             % (name, parc, coords_))
        hemi = m.group(1)
        seed_verts = []
        for coord in coords:
            dist = np.sqrt(np.sum((surfs[hemi][0] - coord) ** 2, axis=1))
            seed_verts.append(np.argmin(dist))
        vertices.append(seed_verts)
        names.append(name)
        hemis.append(hemi == 'rh')

    # grow labels
    labels = mne.grow_labels(subject, vertices, extent, hemis, subjects_dir,
                             1, False, names, surface)

    # apply mask
    if mask is not None:
        mlabels = mne.read_labels_from_annot(subject, mask,
                                             subjects_dir=subjects_dir)
        unknown = {l.hemi: l for l in mlabels if l.name.startswith('unknown-')}

        for label in labels:
            rm = unknown[label.hemi]
            if np.any(np.in1d(label.vertices, rm.vertices)):
                label.vertices = np.setdiff1d(label.vertices, rm.vertices, True)

    return labels
def run():
    """Run command."""
    from mne.commands.utils import get_optparser, _add_verbose_flag

    parser = get_optparser(__file__)

    parser.add_option("-s",
                      "--subject",
                      dest="subject",
                      help="Subject name (required)",
                      default=None)
    parser.add_option("--model",
                      dest="model",
                      help="Output file name. Use a name <dir>/<name>-bem.fif",
                      default=None,
                      type='string')
    parser.add_option('--ico',
                      dest='ico',
                      help='The surface ico downsampling to use, e.g. '
                      ' 5=20484, 4=5120, 3=1280. If None, no subsampling'
                      ' is applied.',
                      default=None,
                      type='int')
    parser.add_option('--brainc',
                      dest='brainc',
                      help='Defines the brain compartment conductivity. '
                      'The default value is 0.3 S/m.',
                      default=0.3,
                      type='float')
    parser.add_option('--skullc',
                      dest='skullc',
                      help='Defines the skull compartment conductivity. '
                      'The default value is 0.006 S/m.',
                      default=None,
                      type='float')
    parser.add_option('--scalpc',
                      dest='scalpc',
                      help='Defines the scalp compartment conductivity. '
                      'The default value is 0.3 S/m.',
                      default=None,
                      type='float')
    parser.add_option('--homog',
                      dest='homog',
                      help='Use a single compartment model (brain only) '
                      'instead a three layer one (scalp, skull, and '
                      ' brain). If this flag is specified, the options '
                      '--skullc and --scalpc are irrelevant.',
                      default=None,
                      action="store_true")
    parser.add_option('-d',
                      '--subjects-dir',
                      dest='subjects_dir',
                      help='Subjects directory',
                      default=None)
    _add_verbose_flag(parser)
    options, args = parser.parse_args()

    if options.subject is None:
        parser.print_help()
        sys.exit(1)

    subject = options.subject
    fname = options.model
    subjects_dir = options.subjects_dir
    ico = options.ico
    brainc = options.brainc
    skullc = options.skullc
    scalpc = options.scalpc
    homog = True if options.homog is not None else False
    verbose = True if options.verbose is not None else False
    # Parse conductivity option
    if homog is True:
        if skullc is not None:
            warn('Trying to set the skull conductivity for a single layer '
                 'model. To use a 3 layer model, do not set the --homog flag.')
        if scalpc is not None:
            warn('Trying to set the scalp conductivity for a single layer '
                 'model. To use a 3 layer model, do not set the --homog flag.')
        # Single layer
        conductivity = [brainc]
    else:
        if skullc is None:
            skullc = 0.006
        if scalpc is None:
            scalpc = 0.3
        conductivity = [brainc, skullc, scalpc]
    # Create source space
    bem_model = mne.make_bem_model(subject,
                                   ico=ico,
                                   conductivity=conductivity,
                                   subjects_dir=subjects_dir,
                                   verbose=verbose)
    # Generate filename
    if fname is None:
        n_faces = list(str(len(surface['tris'])) for surface in bem_model)
        fname = subject + '-' + '-'.join(n_faces) + '-bem.fif'
    else:
        if not (fname.endswith('-bem.fif') or fname.endswith('_bem.fif')):
            fname = fname + "-bem.fif"
            # Save to subject's directory
    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
    fname = os.path.join(subjects_dir, subject, "bem", fname)
    # Save source space to file
    mne.write_bem_surfaces(fname, bem_model)
Example #37
0
def get_head_mri_trans(bids_path,
                       extra_params=None,
                       t1_bids_path=None,
                       fs_subject=None,
                       fs_subjects_dir=None,
                       verbose=None):
    """Produce transformation matrix from MEG and MRI landmark points.

    Will attempt to read the landmarks of Nasion, LPA, and RPA from the sidecar
    files of (i) the MEG and (ii) the T1-weighted MRI data. The two sets of
    points will then be used to calculate a transformation matrix from head
    coordinates to MRI coordinates.

    .. note:: The MEG and MRI data need **not** necessarily be stored in the
              same session or even in the same BIDS dataset. See the
              ``t1_bids_path`` parameter for details.

    Parameters
    ----------
    bids_path : mne_bids.BIDSPath
        The path of the electrophysiology recording.
    extra_params : None | dict
        Extra parameters to be passed to :func:`mne.io.read_raw` when reading
        the MEG file.
    t1_bids_path : mne_bids.BIDSPath | None
        If ``None`` (default), will try to discover the T1-weighted MRI file
        based on the name and location of the MEG recording specified via the
        ``bids_path`` parameter. Alternatively, you explicitly specify which
        T1-weighted MRI scan to use for extraction of MRI landmarks. To do
        that, pass a :class:`mne_bids.BIDSPath` pointing to the scan.
        Use this parameter e.g. if the T1 scan was recorded during a different
        session than the MEG. It is even possible to point to a T1 image stored
        in an entirely different BIDS dataset than the MEG data.
    fs_subject : str | None
        The subject identifier used for FreeSurfer. If ``None``, defaults to
        the ``subject`` entity in ``bids_path``.
    fs_subjects_dir : str | pathlib.Path | None
        The FreeSurfer subjects directory. If ``None``, defaults to the
        ``SUBJECTS_DIR`` environment variable.

        .. versionadded:: 0.8
    %(verbose)s

    Returns
    -------
    trans : mne.transforms.Transform
        The data transformation matrix from head to MRI coordinates.
    """
    if not has_nibabel():  # pragma: no cover
        raise ImportError('This function requires nibabel.')
    import nibabel as nib

    if not isinstance(bids_path, BIDSPath):
        raise RuntimeError('"bids_path" must be a BIDSPath object. Please '
                           'instantiate using mne_bids.BIDSPath().')

    # check root available
    meg_bids_path = bids_path.copy()
    del bids_path
    if meg_bids_path.root is None:
        raise ValueError('The root of the "bids_path" must be set. '
                         'Please use `bids_path.update(root="<root>")` '
                         'to set the root of the BIDS folder to read.')

    # only get this for MEG data
    meg_bids_path.update(datatype='meg', suffix='meg')

    # Get the sidecar file for MRI landmarks
    match_bids_path = meg_bids_path if t1_bids_path is None else t1_bids_path
    t1w_path = _find_matching_sidecar(match_bids_path,
                                      suffix='T1w',
                                      extension='.nii.gz')
    t1w_json_path = _find_matching_sidecar(match_bids_path,
                                           suffix='T1w',
                                           extension='.json')

    # Get MRI landmarks from the JSON sidecar
    with open(t1w_json_path, 'r', encoding='utf-8') as f:
        t1w_json = json.load(f)
    mri_coords_dict = t1w_json.get('AnatomicalLandmarkCoordinates', dict())

    # landmarks array: rows: [LPA, NAS, RPA]; columns: [x, y, z]
    mri_landmarks = np.full((3, 3), np.nan)
    for landmark_name, coords in mri_coords_dict.items():
        if landmark_name.upper() == 'LPA':
            mri_landmarks[0, :] = coords
        elif landmark_name.upper() == 'RPA':
            mri_landmarks[2, :] = coords
        elif (landmark_name.upper() == 'NAS'
              or landmark_name.lower() == 'nasion'):
            mri_landmarks[1, :] = coords
        else:
            continue

    if np.isnan(mri_landmarks).any():
        raise RuntimeError(
            f'Could not extract fiducial points from T1w sidecar file: '
            f'{t1w_json_path}\n\n'
            f'The sidecar file SHOULD contain a key '
            f'"AnatomicalLandmarkCoordinates" pointing to an '
            f'object with the keys "LPA", "NAS", and "RPA". '
            f'Yet, the following structure was found:\n\n'
            f'{mri_coords_dict}')

    # The MRI landmarks are in "voxels". We need to convert the to the
    # neuromag RAS coordinate system in order to compare the with MEG landmarks
    # see also: `mne_bids.write.write_anat`
    if fs_subject is None:
        fs_subject = f'sub-{meg_bids_path.subject}'
    fs_subjects_dir = get_subjects_dir(fs_subjects_dir, raise_error=False)
    fs_t1_fname = Path(fs_subjects_dir) / fs_subject / 'mri' / 'T1.mgz'
    if not fs_t1_fname.exists():
        raise ValueError(
            f"Could not find {fs_t1_fname}. Consider running FreeSurfer's "
            f"'recon-all` for subject {fs_subject}.")
    fs_t1_mgh = nib.load(str(fs_t1_fname))
    t1_nifti = nib.load(str(t1w_path))

    # Convert to MGH format to access vox2ras method
    t1_mgh = nib.MGHImage(t1_nifti.dataobj, t1_nifti.affine)

    # convert to scanner RAS
    mri_landmarks = apply_trans(t1_mgh.header.get_vox2ras(), mri_landmarks)

    # convert to FreeSurfer T1 voxels (same scanner RAS as T1)
    mri_landmarks = apply_trans(fs_t1_mgh.header.get_ras2vox(), mri_landmarks)

    # now extract transformation matrix and put back to RAS coordinates of MRI
    vox2ras_tkr = fs_t1_mgh.header.get_vox2ras_tkr()
    mri_landmarks = apply_trans(vox2ras_tkr, mri_landmarks)
    mri_landmarks = mri_landmarks * 1e-3

    # Get MEG landmarks from the raw file
    _, ext = _parse_ext(meg_bids_path)
    if extra_params is None:
        extra_params = dict()
    if ext == '.fif':
        extra_params['allow_maxshield'] = True

    raw = read_raw_bids(bids_path=meg_bids_path, extra_params=extra_params)
    meg_coords_dict = _extract_landmarks(raw.info['dig'])
    meg_landmarks = np.asarray((meg_coords_dict['LPA'], meg_coords_dict['NAS'],
                                meg_coords_dict['RPA']))

    # Given the two sets of points, fit the transform
    trans_fitted = fit_matched_points(src_pts=meg_landmarks,
                                      tgt_pts=mri_landmarks)
    trans = mne.transforms.Transform(fro='head', to='mri', trans=trans_fitted)
    return trans
Example #38
0
def gen_forwards(p, subjects, structurals, run_indices):
    """Generate forward solutions

    Can only complete successfully once coregistration is performed
    (usually in mne_analyze).

    Parameters
    ----------
    p : instance of Parameters
        Analysis parameters.
    subjects : list of str
        Subject names to analyze (e.g., ['Eric_SoP_001', ...]).
    structurals : list (of str or None)
        The structural data names for each subject (e.g., ['AKCLEE_101', ...]).
        If None, a spherical BEM and volume grid space will be used.
    run_indices : array-like | None
        Run indices to include.
    """
    for si, subj in enumerate(subjects):
        struc = structurals[si]
        fwd_dir = op.join(p.work_dir, subj, p.forward_dir)
        if not op.isdir(fwd_dir):
            os.mkdir(fwd_dir)
        raw_fname = get_raw_fnames(p, subj, 'sss', False, False,
                                   run_indices[si])[0]
        info = read_info(raw_fname)
        bem, src, trans, bem_type = _get_bem_src_trans(p, info, subj, struc)
        if not getattr(p, 'translate_positions', True):
            raise RuntimeError('Not translating positions is no longer '
                               'supported')
        print('  Creating forward solution(s) using a %s for %s...' %
              (bem_type, subj),
              end='')
        # XXX Don't actually need to generate a different fwd for each inv
        # anymore, since all runs are included, but changing the filename
        # would break a lot of existing pipelines :(
        try:
            subjects_dir = get_subjects_dir(p.subjects_dir, raise_error=True)
            subject = src[0]['subject_his_id']
            dist = dig_mri_distances(info,
                                     trans,
                                     subject,
                                     subjects_dir=subjects_dir)
        except Exception as exp:
            # old MNE or bad args
            print(' (dig<->MRI unknown: %s)' % (str(exp)[:20] + '...', ))
        else:
            dist = np.median(dist)
            print(' (dig<->MRI %0.1f mm)' % (1000 * dist, ))
            if dist > 5:
                warnings.warn(
                    '%s dig<->MRI distance %0.1f mm could indicate a problem '
                    'with coregistration, check coreg' %
                    (subject, 1000 * dist))
        fwd_name = get_cov_fwd_inv_fnames(p, subj, run_indices[si])[1][0]
        fwd = make_forward_solution(info,
                                    trans,
                                    src,
                                    bem,
                                    n_jobs=p.n_jobs,
                                    mindist=p.fwd_mindist)
        write_forward_solution(fwd_name, fwd, overwrite=True)
def _run(subjects_dir, subject, force, overwrite, no_decimate, verbose=None):
    this_env = copy.copy(os.environ)
    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
    this_env['SUBJECTS_DIR'] = subjects_dir
    this_env['SUBJECT'] = subject
    if 'FREESURFER_HOME' not in this_env:
        raise RuntimeError('The FreeSurfer environment needs to be set up '
                           'for this script')
    incomplete = 'warn' if force else 'raise'
    subj_path = op.join(subjects_dir, subject)
    if not op.exists(subj_path):
        raise RuntimeError('%s does not exist. Please check your subject '
                           'directory path.' % subj_path)

    mri = 'T1.mgz' if op.exists(op.join(subj_path, 'mri', 'T1.mgz')) else 'T1'

    logger.info('1. Creating a dense scalp tessellation with mkheadsurf...')

    def check_seghead(surf_path=op.join(subj_path, 'surf')):
        surf = None
        for k in ['lh.seghead', 'lh.smseghead']:
            this_surf = op.join(surf_path, k)
            if op.exists(this_surf):
                surf = this_surf
                break
        return surf

    my_seghead = check_seghead()
    if my_seghead is None:
        run_subprocess(['mkheadsurf', '-subjid', subject, '-srcvol', mri],
                       env=this_env)

    surf = check_seghead()
    if surf is None:
        raise RuntimeError('mkheadsurf did not produce the standard output '
                           'file.')

    dense_fname = '{0}/{1}/bem/{1}-head-dense.fif'.format(
        subjects_dir, subject)
    logger.info('2. Creating %s ...' % dense_fname)
    _check_file(dense_fname, overwrite)
    surf = mne.bem._surfaces_to_bem(
        [surf], [mne.io.constants.FIFF.FIFFV_BEM_SURF_ID_HEAD], [1],
        incomplete=incomplete)[0]
    mne.write_bem_surfaces(dense_fname, surf)
    levels = 'medium', 'sparse'
    tris = [] if no_decimate else [30000, 2500]
    if os.getenv('_MNE_TESTING_SCALP', 'false') == 'true':
        tris = [len(surf['tris'])]  # don't actually decimate
    for ii, (n_tri, level) in enumerate(zip(tris, levels), 3):
        logger.info('%i. Creating %s tessellation...' % (ii, level))
        logger.info('%i.1 Decimating the dense tessellation...' % ii)
        with ETSContext():
            points, tris = mne.decimate_surface(points=surf['rr'],
                                                triangles=surf['tris'],
                                                n_triangles=n_tri)
        dec_fname = dense_fname.replace('dense', level)
        logger.info('%i.2 Creating %s' % (ii, dec_fname))
        _check_file(dec_fname, overwrite)
        dec_surf = mne.bem._surfaces_to_bem(
            [dict(rr=points, tris=tris)],
            [mne.io.constants.FIFF.FIFFV_BEM_SURF_ID_HEAD], [1],
            rescale=False,
            incomplete=incomplete)
        mne.write_bem_surfaces(dec_fname, dec_surf)
Example #40
0
def _plot_3d_evoked_array(inst,
                          ea,
                          picks="hbo",
                          value="Coef.",
                          background='w',
                          figure=None,
                          clim='auto',
                          mode='weighted',
                          colormap='RdBu_r',
                          surface='pial',
                          hemi='both',
                          size=800,
                          view=None,
                          colorbar=True,
                          distance=0.03,
                          subjects_dir=None,
                          src=None,
                          verbose=False):

    # TODO: mimic behaviour of other MNE-NIRS glm plotting options
    if picks is not None:
        ea = ea.pick(picks=picks)

    if subjects_dir is None:
        subjects_dir = get_subjects_dir(raise_error=True)
    if src is None:
        fname_src_fs = os.path.join(subjects_dir, 'fsaverage', 'bem',
                                    'fsaverage-ico-5-src.fif')
        src = read_source_spaces(fname_src_fs)

    picks = np.arange(len(ea.info['ch_names']))

    # Set coord frame
    for idx in range(len(ea.ch_names)):
        ea.info['chs'][idx]['coord_frame'] = FIFF.FIFFV_COORD_HEAD

    # Generate source estimate
    kwargs = dict(evoked=ea,
                  subject='fsaverage',
                  trans='fsaverage',
                  distance=distance,
                  mode=mode,
                  surface=surface,
                  subjects_dir=subjects_dir,
                  src=src,
                  project=True)
    stc = stc_near_sensors(picks=picks, **kwargs, verbose=verbose)

    # Produce brain plot
    brain = stc.plot(src=src,
                     subjects_dir=subjects_dir,
                     hemi=hemi,
                     surface=surface,
                     initial_time=0,
                     clim=clim,
                     size=size,
                     colormap=colormap,
                     figure=figure,
                     background=background,
                     colorbar=colorbar,
                     verbose=verbose)
    if view is not None:
        brain.show_view(view)

    return brain
def plot_stc_time_point(stc,
                        subject,
                        limits=[5, 10, 15],
                        time_index=0,
                        surf='inflated',
                        measure='dSPM',
                        subjects_dir=None):
    """Plot a time instant from a SourceEstimate using matplotlib

    The same could be done with mayavi using proper 3D.

    Parameters
    ----------
    stc : instance of SourceEstimate
        The SourceEstimate to plot.
    subject : string
        The subject name (only needed if surf is a string).
    time_index : int
        Time index to plot.
    surf : str, or instance of surfaces
        Surface to use (e.g., 'inflated' or 'white'), or pre-loaded surfaces.
    measure : str
        The label for the colorbar. None turns the colorbar off.
    subjects_dir : str, or None
        Path to the SUBJECTS_DIR. If None, the path is obtained by using
        the environment variable SUBJECTS_DIR.
    """
    subjects_dir = get_subjects_dir(subjects_dir)
    pl.figure(facecolor='k', figsize=(8, 5))
    hemis = ['lh', 'rh']
    if isinstance(surf, str):
        surf = [
            read_surface(
                op.join(subjects_dir, subject, 'surf', '%s.%s' % (h, surf)))
            for h in hemis
        ]
    my_cmap = mne_analyze_colormap(limits)
    for hi, h in enumerate(hemis):
        coords = surf[hi][0][stc.vertno[hi]]
        if hi == 0:
            vals = stc_all_cluster_vis.lh_data[:, time_index]
        else:
            vals = stc_all_cluster_vis.rh_data[:, time_index]
        ax = pl.subplot(1, 2, 1 - hi, axis_bgcolor='none')
        pl.tick_params(labelbottom='off', labelleft='off')
        flipper = -1 if hi == 1 else 1
        sc = ax.scatter(flipper * coords[:, 1],
                        coords[:, 2],
                        c=vals,
                        vmin=-limits[2],
                        vmax=limits[2],
                        cmap=my_cmap,
                        edgecolors='none',
                        s=5)
        ax.set_aspect('equal')
        pl.axis('off')
    try:
        pl.tight_layout(0)
    except:
        pass
    if measure is not None:
        cax = pl.axes([0.85, 0.15, 0.025, 0.15], axisbg='k')
        cb = pl.colorbar(sc, cax, ticks=[-limits[2], 0, limits[2]])
        cb.set_label(measure, color='w')
    pl.setp(pl.getp(cb.ax, 'yticklabels'), color='w')
    pl.draw()
    pl.show()