Esempio n. 1
0
def test_read_annot():
    """Test reading labels from FreeSurfer parcellation
    """
    # test some invalid inputs
    assert_raises(ValueError, read_annot, 'sample', hemi='bla',
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, read_annot, 'sample',
                  annot_fname='bla.annot', subjects_dir=subjects_dir)

    # read labels using hemi specification
    labels_lh = read_annot('sample', hemi='lh', subjects_dir=subjects_dir)
    for label in labels_lh:
        assert_true(label.name.endswith('-lh'))
        assert_true(label.hemi == 'lh')
        assert_is_not(label.color, None)

    # read labels using annot_fname
    annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
    labels_rh = read_annot('sample', annot_fname=annot_fname,
                           subjects_dir=subjects_dir)
    for label in labels_rh:
        assert_true(label.name.endswith('-rh'))
        assert_true(label.hemi == 'rh')
        assert_is_not(label.color, None)

    # combine the lh, rh, labels and sort them
    labels_lhrh = list()
    labels_lhrh.extend(labels_lh)
    labels_lhrh.extend(labels_rh)

    names = [label.name for label in labels_lhrh]
    labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]

    # read all labels at once
    labels_both = read_annot('sample', subjects_dir=subjects_dir)

    # we have the same result
    _assert_labels_equal(labels_lhrh, labels_both)

    # aparc has 68 cortical labels
    assert_true(len(labels_both) == 68)

    # test regexp
    label = read_annot('sample', parc='aparc.a2009s', regexp='Angu',
                       subjects_dir=subjects_dir)[0]
    assert_true(label.name == 'G_pariet_inf-Angular-lh')
    # silly, but real regexp:
    label = read_annot('sample', 'aparc.a2009s', regexp='.*-.{4,}_.{3,3}-L',
                       subjects_dir=subjects_dir)[0]
    assert_true(label.name == 'G_oc-temp_med-Lingual-lh')
    assert_raises(RuntimeError, read_annot, 'sample', parc='aparc',
                  annot_fname=annot_fname, regexp='JackTheRipper',
                  subjects_dir=subjects_dir)
Esempio n. 2
0
def test_read_annot_annot2labels():
    """Test reading labels from parc. by comparing with mne_annot2labels
    """

    def _mne_annot2labels(subject, subjects_dir, parc):
        """Get labels using mne_annot2lables"""
        label_dir = _TempDir()
        cwd = os.getcwd()
        try:
            os.chdir(label_dir)
            env = os.environ.copy()
            env['SUBJECTS_DIR'] = subjects_dir
            cmd = ['mne_annot2labels', '--subject', subject, '--parc', parc]
            run_subprocess(cmd, env=env)
            label_fnames = glob.glob(label_dir + '/*.label')
            label_fnames.sort()
            labels = [read_label(fname) for fname in label_fnames]
        finally:
            del label_dir
            os.chdir(cwd)

        return labels

    labels = read_annot('sample', subjects_dir=subjects_dir)
    labels_mne = _mne_annot2labels('sample', subjects_dir, 'aparc')

    # we have the same result, mne does not fill pos, so ignore it
    _assert_labels_equal(labels, labels_mne, ignore_pos=True)
Esempio n. 3
0
def loadannot_mne(p,
                  subj,
                  subjdir,
                  labnam=None,
                  surf_type='pial',
                  surf_struct=None,
                  quiet=False):

    verbosity = 'ERROR' if quiet else 'WARNING'

    from distutils.version import LooseVersion

    if LooseVersion(mne.__version__) >= LooseVersion('0.8'):
        #MNE python changed the API to read an annotation twice in the same
        #release cycle. Check for both versions.
        try:
            annot = mne.read_labels_from_annot(parc=p,
                                               subject=subj,
                                               surf_name=surf_type,
                                               subjects_dir=subjdir,
                                               verbose=verbosity)
        except:
            annot = mne.read_annot(parc=p,
                                   subject=subj,
                                   surf_name=surf_type,
                                   subjects_dir=subjdir,
                                   verbose=verbosity)
    else:
        annot = mne.labels_from_parc(parc=p,
                                     subject=subj,
                                     surf_name=surf_type,
                                     subjects_dir=subjdir,
                                     verbose=verbosity)
        annot = annot[0]  #discard the color table
    return annot
Esempio n. 4
0
def loadannot_mne(p,subj,subjdir,labnam=None,surf_type='pial',surf_struct=None,
		quiet=False):

	verbosity = 'ERROR' if quiet else 'WARNING'

	if float(mne.__version__[:3]) >= 0.8:
		annot = mne.read_annot(parc=p, subject=subj, surf_name=surf_type,
			subjects_dir=subjdir, verbose=verbosity)
	else:
		annot = mne.labels_from_parc(parc=p, subject=subj, surf_name=surf_type,
			subjects_dir=subjdir, verbose=verbosity)
		annot = annot[0] #discard the color table
	return annot
Esempio n. 5
0
def test_split_label():
    """Test splitting labels"""
    aparc = read_annot('fsaverage',
                       'aparc',
                       'lh',
                       regexp='lingual',
                       subjects_dir=subjects_dir)
    lingual = aparc[0]

    # split with names
    parts = ('lingual_post', 'lingual_ant')
    post, ant = split_label(lingual, parts, subjects_dir=subjects_dir)

    # check output names
    assert_equal(post.name, parts[0])
    assert_equal(ant.name, parts[1])

    # check vertices add up
    lingual_reconst = post + ant
    lingual_reconst.name = lingual.name
    lingual_reconst.comment = lingual.comment
    lingual_reconst.color = lingual.color
    assert_labels_equal(lingual_reconst, lingual)

    # compare output of Label.split() method
    post1, ant1 = lingual.split(parts, subjects_dir=subjects_dir)
    assert_labels_equal(post1, post)
    assert_labels_equal(ant1, ant)

    # compare fs_like split with freesurfer split
    antmost = split_label(lingual, 40, None, subjects_dir, True)[-1]
    fs_vert = [
        210, 4401, 7405, 12079, 16276, 18956, 26356, 32713, 32716, 32719,
        36047, 36050, 42797, 42798, 42799, 59281, 59282, 59283, 71864, 71865,
        71866, 71874, 71883, 79901, 79903, 79910, 103024, 107849, 107850,
        122928, 139356, 139357, 139373, 139374, 139375, 139376, 139377, 139378,
        139381, 149117, 149118, 149120, 149127
    ]
    assert_array_equal(antmost.vertices, fs_vert)

    # check default label name
    assert_equal(antmost.name, "lingual_div40-lh")
Esempio n. 6
0
def test_source_space():
    "Test SourceSpace Dimension"
    subject = 'fsaverage'
    data_path = mne.datasets.sample.data_path()
    mri_sdir = os.path.join(data_path, 'subjects')
    mri_dir = os.path.join(mri_sdir, subject)
    src_path = os.path.join(mri_dir, 'bem', subject + '-ico-5-src.fif')
    label_dir = os.path.join(mri_dir, 'label')
    label_ba1 = mne.read_label(os.path.join(label_dir, 'lh.BA1.label'))
    label_v1 = mne.read_label(os.path.join(label_dir, 'lh.V1.label'))
    label_mt = mne.read_label(os.path.join(label_dir, 'lh.MT.label'))
    label_ba1_v1 = label_ba1 + label_v1
    label_v1_mt = label_v1 + label_mt

    src = mne.read_source_spaces(src_path)
    source = SourceSpace((src[0]['vertno'], src[1]['vertno']), subject,
                         'ico-5', mri_sdir)
    index = source.dimindex(label_v1)
    source_v1 = source[index]
    index = source.dimindex(label_ba1_v1)
    source_ba1_v1 = source[index]
    index = source.dimindex(label_v1_mt)
    source_v1_mt = source[index]
    index = source_ba1_v1.dimindex(source_v1_mt)
    source_v1_intersection = source_ba1_v1[index]
    assert_source_space_equal(source_v1, source_v1_intersection)

    # index from label
    index = source.index_for_label(label_v1)
    assert_array_equal(index.source[index.x].vertno[0],
                       np.intersect1d(source.lh_vertno, label_v1.vertices, 1))

    # parcellation and cluster localization
    if mne.__version__ < '0.8':
        return
    parc = mne.read_annot(subject, parc='aparc', subjects_dir=mri_sdir)
    indexes = [source.index_for_label(label) for label in parc
               if len(label) > 10]
    x = np.vstack([index.x for index in indexes])
    ds = source._cluster_properties(x)
    for i in xrange(ds.n_cases):
        assert_equal(ds[i, 'location'], parc[i].name)
Esempio n. 7
0
def loadannot_mne(p,subj,subjdir,labnam=None,surf_type='pial',surf_struct=None,
        quiet=False):

    verbosity = 'ERROR' if quiet else 'WARNING'

    from distutils.version import LooseVersion

    if LooseVersion(mne.__version__) >= LooseVersion('0.8'):
        #MNE python changed the API to read an annotation twice in the same
        #release cycle. Check for both versions.
        try:
            annot = mne.read_labels_from_annot(parc=p, subject=subj, 
                surf_name=surf_type, subjects_dir=subjdir, verbose=verbosity)
        except:
            annot = mne.read_annot(parc=p, subject=subj,
                surf_name=surf_type, subjects_dir=subjdir, verbose=verbosity)
    else:
        annot = mne.labels_from_parc(parc=p, subject=subj, surf_name=surf_type,
            subjects_dir=subjdir, verbose=verbosity)
        annot = annot[0] #discard the color table
    return annot
Esempio n. 8
0
def test_split_label():
    """Test splitting labels"""
    aparc = read_annot('fsaverage', 'aparc', 'lh', regexp='lingual',
                       subjects_dir=subjects_dir)
    lingual = aparc[0]

    # split with names
    parts = ('lingual_post', 'lingual_ant')
    post, ant = split_label(lingual, parts, subjects_dir=subjects_dir)

    # check output names
    assert_equal(post.name, parts[0])
    assert_equal(ant.name, parts[1])

    # check vertices add up
    lingual_reconst = post + ant
    lingual_reconst.name = lingual.name
    lingual_reconst.comment = lingual.comment
    lingual_reconst.color = lingual.color
    assert_labels_equal(lingual_reconst, lingual)

    # compare output of Label.split() method
    post1, ant1 = lingual.split(parts, subjects_dir=subjects_dir)
    assert_labels_equal(post1, post)
    assert_labels_equal(ant1, ant)

    # compare fs_like split with freesurfer split
    antmost = split_label(lingual, 40, None, subjects_dir, True)[-1]
    fs_vert = [210, 4401, 7405, 12079, 16276, 18956, 26356, 32713, 32716,
               32719, 36047, 36050, 42797, 42798, 42799, 59281, 59282, 59283,
               71864, 71865, 71866, 71874, 71883, 79901, 79903, 79910, 103024,
               107849, 107850, 122928, 139356, 139357, 139373, 139374, 139375,
               139376, 139377, 139378, 139381, 149117, 149118, 149120, 149127]
    assert_array_equal(antmost.vertices, fs_vert)

    # check default label name
    assert_equal(antmost.name, "lingual_div40-lh")
Esempio n. 9
0
def test_write_annot():
    """Test writing FreeSurfer parcellation from labels"""

    labels = read_annot('sample', subjects_dir=subjects_dir)

    # write left and right hemi labels:
    fnames = ['%s/%s-myparc' % (tempdir, hemi) for hemi in ['lh', 'rh']]

    for fname in fnames:
        write_annot(labels, annot_fname=fname)

    # read it back
    labels2 = read_annot('sample', subjects_dir=subjects_dir,
                         annot_fname=fnames[0])
    labels22 = read_annot('sample', subjects_dir=subjects_dir,
                          annot_fname=fnames[1])
    labels2.extend(labels22)

    names = [label.name for label in labels2]

    for label in labels:
        idx = names.index(label.name)
        assert_labels_equal(label, labels2[idx])

    # same with label-internal colors
    for fname in fnames:
        write_annot(labels, annot_fname=fname, overwrite=True)
    labels3 = read_annot('sample', subjects_dir=subjects_dir,
                         annot_fname=fnames[0])
    labels33 = read_annot('sample', subjects_dir=subjects_dir,
                          annot_fname=fnames[1])
    labels3.extend(labels33)
    names3 = [label.name for label in labels3]
    for label in labels:
        idx = names3.index(label.name)
        assert_labels_equal(label, labels3[idx])

    # make sure we can't overwrite things
    assert_raises(ValueError, write_annot, labels, annot_fname=fnames[0])

    # however, this works
    write_annot(labels, annot_fname=fnames[0], overwrite=True)

    # label without color
    labels_ = labels[:]
    labels_[0] = labels_[0].copy()
    labels_[0].color = None
    write_annot(labels_, annot_fname=fnames[0], overwrite=True)

    # duplicate color
    labels_[0].color = labels_[2].color
    assert_raises(ValueError, write_annot, labels_, annot_fname=fnames[0],
                  overwrite=True)

    # invalid color inputs
    labels_[0].color = (1.1, 1., 1., 1.)
    assert_raises(ValueError, write_annot, labels_, annot_fname=fnames[0],
                  overwrite=True)

    # overlapping labels
    labels_ = labels[:]
    cuneus_lh = labels[6]
    precuneus_lh = labels[50]
    labels_.append(precuneus_lh + cuneus_lh)
    assert_raises(ValueError, write_annot, labels_, annot_fname=fnames[0],
                  overwrite=True)

    # unlabeled vertices
    labels_lh = [label for label in labels if label.name.endswith('lh')]
    write_annot(labels_lh[1:], 'sample', annot_fname=fnames[0], overwrite=True,
                subjects_dir=subjects_dir)
    labels_reloaded = read_annot('sample', annot_fname=fnames[0],
                                 subjects_dir=subjects_dir)
    assert_equal(len(labels_lh), len(labels_reloaded))
    label0 = labels_lh[0]
    label1 = labels_reloaded[-1]
    assert_equal(label1.name, "unknown-lh")
    assert_true(np.all(np.in1d(label0.vertices, label1.vertices)))
Esempio n. 10
0
# Compute inverse solution
stc = apply_inverse(evoked,
                    inverse_operator,
                    lambda2,
                    method,
                    pick_normal=True)

# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()

# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.read_annot(subject,
                       parc='aparc',
                       subjects_dir=subjects_dir,
                       regexp=aparc_label_name)[0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.

func_labels, _ = mne.stc_to_label(stc_mean_label,
                                  src=src,
                                  smooth=5,
                                  subjects_dir=subjects_dir,
                                  connected=True)

# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]

# load the anatomical ROI for comparison
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
                                                    eog=150e-6))

# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list.
snr = 1.0  # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
                            pick_ori="normal", return_generator=True)

# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels = mne.read_annot('sample', parc='aparc', subjects_dir=subjects_dir)
label_colors = [label.color for label in labels]

# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
                                         return_generator=True)

# Now we are ready to compute the connectivity in the alpha band. Notice
# from the status messages, how mne-python: 1) reads an epoch from the raw
# file, 2) applies SSP and baseline correction, 3) computes the inverse to
# obtain a source estimate, 4) averages the source estimate to obtain a
# time series for each label, 5) includes the label time series in the
# connectivity computation, and then moves to the next epoch. This
# behaviour is because we are using generators and allows us to
Esempio n. 12
0
# Load data
evoked = Evoked(fname_evoked, setno=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src']  # get the source space

# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
                    pick_normal=True)

# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()

# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.read_annot(subject, parc='aparc', subjects_dir=subjects_dir,
                       regexp=aparc_label_name)[0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.

func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=5,
                                  subjects_dir=subjects_dir, connected=True)

# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]

# load the anatomical ROI for comparison
anat_label = mne.read_annot(subject, parc='aparc', subjects_dir=subjects_dir,
                            regexp=aparc_label_name)[0]

# extract the anatomical time course for each label
Esempio n. 13
0
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
                                                    eog=150e-6))

# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list.
snr = 1.0  # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
                            pick_ori="normal", return_generator=True)

# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels = mne.read_annot('sample', parc='aparc', subjects_dir=subjects_dir)
label_colors = [label.color for label in labels]

# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
                                         return_generator=True)

# Now we are ready to compute the connectivity in the alpha band. Notice
# from the status messages, how mne-python: 1) reads an epoch from the raw
# file, 2) applies SSP and baseline correction, 3) computes the inverse to
# obtain a source estimate, 4) averages the source estimate to obtain a
# time series for each label, 5) includes the label time series in the
# connectivity computation, and then moves to the next epoch. This
# behaviour is because we are using generators and allows us to