コード例 #1
0
ファイル: test_label.py プロジェクト: dichaelen/mne-python
def test_labels_from_parc():
    """Test reading labels from FreeSurfer parcellation
    """
    # test some invalid inputs
    assert_raises(ValueError, labels_from_parc, 'sample', hemi='bla',
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, labels_from_parc, 'sample',
                  annot_fname='bla.annot', subjects_dir=subjects_dir)

    # read labels using hemi specification
    labels_lh, colors_lh = labels_from_parc('sample', hemi='lh',
                                            subjects_dir=subjects_dir)
    for label in labels_lh:
        assert_true(label.name.endswith('-lh'))
        assert_true(label.hemi == 'lh')

    assert_true(len(labels_lh) == len(colors_lh))

    # read labels using annot_fname
    annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
    labels_rh, colors_rh = labels_from_parc('sample', annot_fname=annot_fname,
                                            subjects_dir=subjects_dir)

    assert_true(len(labels_rh) == len(colors_rh))

    for label in labels_rh:
        assert_true(label.name.endswith('-rh'))
        assert_true(label.hemi == 'rh')

    # combine the lh, rh, labels and sort them
    labels_lhrh = list()
    labels_lhrh.extend(labels_lh)
    labels_lhrh.extend(labels_rh)

    names = [label.name for label in labels_lhrh]
    labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]

    # read all labels at once
    labels_both, colors = labels_from_parc('sample', subjects_dir=subjects_dir)

    assert_true(len(labels_both) == len(colors))

    # we have the same result
    _assert_labels_equal(labels_lhrh, labels_both)

    # aparc has 68 cortical labels
    assert_true(len(labels_both) == 68)

    # test regexp
    label = labels_from_parc('sample', parc='aparc.a2009s', regexp='Angu',
                             subjects_dir=subjects_dir)[0][0]
    assert_true(label.name == 'G_pariet_inf-Angular-lh')
    # silly, but real regexp:
    label = labels_from_parc('sample', parc='aparc.a2009s',
                             regexp='.*-.{4,}_.{3,3}-L',
                             subjects_dir=subjects_dir)[0][0]
    assert_true(label.name == 'G_oc-temp_med-Lingual-lh')
    assert_raises(RuntimeError, labels_from_parc, 'sample', parc='aparc',
                  annot_fname=annot_fname, regexp='JackTheRipper',
                  subjects_dir=subjects_dir)
コード例 #2
0
ファイル: test_label.py プロジェクト: DonKrieger/mne-python
def test_parc_from_labels():
    """Test writing FreeSurfer parcellation from labels"""

    labels, colors = labels_from_parc('sample', subjects_dir=subjects_dir)

    # write left and right hemi labels:
    fnames = ['%s/%s-myparc' % (tempdir, hemi) for hemi in ['lh', 'rh']]

    for fname in fnames:
        parc_from_labels(labels, colors, annot_fname=fname)

    # read it back
    labels2, colors2 = labels_from_parc('sample', subjects_dir=subjects_dir,
                                        annot_fname=fnames[0])
    labels22, colors22 = labels_from_parc('sample', subjects_dir=subjects_dir,
                                          annot_fname=fnames[1])
    labels2.extend(labels22)
    colors2.extend(colors22)

    names = [label.name for label in labels2]

    for label, color in zip(labels, colors):
        idx = names.index(label.name)
        assert_labels_equal(label, labels2[idx])
        assert_array_almost_equal(np.array(color), np.array(colors2[idx]))

    # same with label-internal colors
    for fname in fnames:
        parc_from_labels(labels, annot_fname=fname, overwrite=True)
    labels3, _ = labels_from_parc('sample', subjects_dir=subjects_dir,
                                  annot_fname=fnames[0])
    labels33, _ = labels_from_parc('sample', subjects_dir=subjects_dir,
                                   annot_fname=fnames[1])
    labels3.extend(labels33)
    names3 = [label.name for label in labels3]
    for label in labels:
        idx = names3.index(label.name)
        assert_labels_equal(label, labels3[idx])

    # make sure we can't overwrite things
    assert_raises(ValueError, parc_from_labels, labels, colors,
                  annot_fname=fnames[0])

    # however, this works
    parc_from_labels(labels, colors=None, annot_fname=fnames[0],
                     overwrite=True)

    # test some other invalid inputs
    assert_raises(ValueError, parc_from_labels, labels[:-1], colors,
                  annot_fname=fnames[0], overwrite=True)
    colors2 = np.asarray(colors)
    assert_raises(ValueError, parc_from_labels, labels, colors2[:, :3],
                  annot_fname=fnames[0], overwrite=True)
    colors2[0] = 1.1
    assert_raises(ValueError, parc_from_labels, labels, colors2,
                  annot_fname=fnames[0], overwrite=True)
コード例 #3
0
ファイル: test_label.py プロジェクト: dichaelen/mne-python
def test_labels_from_parc_annot2labels():
    """Test reading labels from parc. by comparing with mne_annot2labels
    """

    def _mne_annot2labels(subject, subjects_dir, parc):
        """Get labels using mne_annot2lables"""
        label_dir = _TempDir()
        cwd = os.getcwd()
        try:
            os.chdir(label_dir)
            env = os.environ.copy()
            env['SUBJECTS_DIR'] = subjects_dir
            cmd = ['mne_annot2labels', '--subject', subject, '--parc', parc]
            run_subprocess(cmd, env=env)
            label_fnames = glob.glob(label_dir + '/*.label')
            label_fnames.sort()
            labels = [read_label(fname) for fname in label_fnames]
        finally:
            del label_dir
            os.chdir(cwd)

        return labels

    labels, _ = labels_from_parc('sample', subjects_dir=subjects_dir)
    labels_mne = _mne_annot2labels('sample', subjects_dir, 'aparc')

    # we have the same result, mne does not fill pos, so ignore it
    _assert_labels_equal(labels, labels_mne, ignore_pos=True)
コード例 #4
0
def loadannot_mne(p,
                  subj,
                  subjdir,
                  labnam=None,
                  surf_type='pial',
                  surf_struct=None,
                  quiet=False):

    verbosity = 'ERROR' if quiet else 'WARNING'

    from distutils.version import LooseVersion

    if LooseVersion(mne.__version__) >= LooseVersion('0.8'):
        #MNE python changed the API to read an annotation twice in the same
        #release cycle. Check for both versions.
        try:
            annot = mne.read_labels_from_annot(parc=p,
                                               subject=subj,
                                               surf_name=surf_type,
                                               subjects_dir=subjdir,
                                               verbose=verbosity)
        except:
            annot = mne.read_annot(parc=p,
                                   subject=subj,
                                   surf_name=surf_type,
                                   subjects_dir=subjdir,
                                   verbose=verbosity)
    else:
        annot = mne.labels_from_parc(parc=p,
                                     subject=subj,
                                     surf_name=surf_type,
                                     subjects_dir=subjdir,
                                     verbose=verbosity)
        annot = annot[0]  #discard the color table
    return annot
コード例 #5
0
ファイル: test_label.py プロジェクト: mshamalainen/mne-python
def test_labels_from_parc_annot2labels():
    """Test reading labels from parc. by comparing with mne_annot2labels
    """

    def _mne_annot2labels(subject, subjects_dir, parc):
        """Get labels using mne_annot2lables"""
        label_dir = _TempDir()
        cwd = os.getcwd()
        try:
            os.chdir(label_dir)
            cmd = 'mne_annot2labels --subject %s --parc %s' % (subject, parc)
            st, output = commands.getstatusoutput(cmd)
            if st != 0:
                raise RuntimeError('mne_annot2labels non-zero exit status %d'
                                   % st)
            label_fnames = glob.glob(label_dir + '/*.label')
            label_fnames.sort()
            labels = [read_label(fname) for fname in label_fnames]
        finally:
            del label_dir
            os.chdir(cwd)

        return labels

    labels, _ = labels_from_parc('sample', subjects_dir=subjects_dir)
    labels_mne = _mne_annot2labels('sample', subjects_dir, 'aparc')

    # we have the same result, mne does not fill pos, so ignore it
    _assert_labels_equal(labels, labels_mne, ignore_pos=True)
コード例 #6
0
def test_split_label():
    aparc = labels_from_parc('fsaverage', 'aparc', 'lh', regexp='lingual',
                             subjects_dir=subjects_dir)[0]
    lingual = aparc[0]

    # split with names
    parts = ('lingual_post', 'lingual_ant')
    post, ant = split_label(lingual, parts, subjects_dir=subjects_dir)

    # check output names
    assert_equal(post.name, parts[0])
    assert_equal(ant.name, parts[1])

    # check vertices add up
    lingual_reconst = post + ant
    lingual_reconst.name = lingual.name
    lingual_reconst.comment = lingual.comment
    assert_labels_equal(lingual_reconst, lingual)

    # compare output of Label.split() method
    post1, ant1 = lingual.split(parts, subjects_dir=subjects_dir)
    assert_labels_equal(post1, post)
    assert_labels_equal(ant1, ant)

    # compare fs_like split with freesurfer split
    antmost = split_label(lingual, 40, None, subjects_dir, True)[-1]
    fs_vert = [210, 4401, 7405, 12079, 16276, 18956, 26356, 32713, 32716,
               32719, 36047, 36050, 42797, 42798, 42799, 59281, 59282, 59283,
               71864, 71865, 71866, 71874, 71883, 79901, 79903, 79910, 103024,
               107849, 107850, 122928, 139356, 139357, 139373, 139374, 139375,
               139376, 139377, 139378, 139381, 149117, 149118, 149120, 149127]
    assert_array_equal(antmost.vertices, fs_vert)

    # check default label name
    assert_equal(antmost.name, "lingual_div40-lh")
コード例 #7
0
def test_labels_from_parc_annot2labels():
    """Test reading labels from parc. by comparing with mne_annot2labels
    """
    def _mne_annot2labels(subject, subjects_dir, parc):
        """Get labels using mne_annot2lables"""
        label_dir = _TempDir()
        cwd = os.getcwd()
        try:
            os.chdir(label_dir)
            env = os.environ.copy()
            env['SUBJECTS_DIR'] = subjects_dir
            cmd = ['mne_annot2labels', '--subject', subject, '--parc', parc]
            run_subprocess(cmd, env=env)
            label_fnames = glob.glob(label_dir + '/*.label')
            label_fnames.sort()
            labels = [read_label(fname) for fname in label_fnames]
        finally:
            del label_dir
            os.chdir(cwd)

        return labels

    labels, _ = labels_from_parc('sample', subjects_dir=subjects_dir)
    labels_mne = _mne_annot2labels('sample', subjects_dir, 'aparc')

    # we have the same result, mne does not fill pos, so ignore it
    _assert_labels_equal(labels, labels_mne, ignore_pos=True)
コード例 #8
0
ファイル: test_label.py プロジェクト: mshamalainen/mne-python
def test_labels_from_parc():
    """Test reading labels from parcellation
    """
    # test some invalid inputs
    assert_raises(ValueError, labels_from_parc, 'sample', hemi='bla',
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, labels_from_parc, 'sample',
                  annot_fname='bla.annot', subjects_dir=subjects_dir)

    # read labels using hemi specification
    labels_lh, colors_lh = labels_from_parc('sample', hemi='lh',
                                            subjects_dir=subjects_dir)
    for label in labels_lh:
        assert_true(label.name.endswith('-lh'))
        assert_true(label.hemi == 'lh')

    assert_true(len(labels_lh) == len(colors_lh))

    # read labels using annot_fname
    annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
    labels_rh, colors_rh = labels_from_parc('sample', annot_fname=annot_fname,
                                            subjects_dir=subjects_dir)

    assert_true(len(labels_rh) == len(colors_rh))

    for label in labels_rh:
        assert_true(label.name.endswith('-rh'))
        assert_true(label.hemi == 'rh')

    # combine the lh, rh, labels and sort them
    labels_lhrh = list()
    labels_lhrh.extend(labels_lh)
    labels_lhrh.extend(labels_rh)

    names = [label.name for label in labels_lhrh]
    labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]

    # read all labels at once
    labels_both, colors = labels_from_parc('sample', subjects_dir=subjects_dir)

    assert_true(len(labels_both) == len(colors))

    # we have the same result
    _assert_labels_equal(labels_lhrh, labels_both)

    # aparc has 68 cortical labels
    assert_true(len(labels_both) == 68)
コード例 #9
0
ファイル: preprocessing.py プロジェクト: mick-d/cvu
def loadannot_mne(p,subj,subjdir,labnam=None,surf_type='pial',surf_struct=None,
		quiet=False):

	verbosity = 'ERROR' if quiet else 'WARNING'

	if float(mne.__version__[:3]) >= 0.8:
		annot = mne.read_annot(parc=p, subject=subj, surf_name=surf_type,
			subjects_dir=subjdir, verbose=verbosity)
	else:
		annot = mne.labels_from_parc(parc=p, subject=subj, surf_name=surf_type,
			subjects_dir=subjdir, verbose=verbosity)
		annot = annot[0] #discard the color table
	return annot
コード例 #10
0
ファイル: preprocessing.py プロジェクト: aestrivex/cvu
def loadannot_mne(p,subj,subjdir,labnam=None,surf_type='pial',surf_struct=None,
        quiet=False):

    verbosity = 'ERROR' if quiet else 'WARNING'

    from distutils.version import LooseVersion

    if LooseVersion(mne.__version__) >= LooseVersion('0.8'):
        #MNE python changed the API to read an annotation twice in the same
        #release cycle. Check for both versions.
        try:
            annot = mne.read_labels_from_annot(parc=p, subject=subj, 
                surf_name=surf_type, subjects_dir=subjdir, verbose=verbosity)
        except:
            annot = mne.read_annot(parc=p, subject=subj,
                surf_name=surf_type, subjects_dir=subjdir, verbose=verbosity)
    else:
        annot = mne.labels_from_parc(parc=p, subject=subj, surf_name=surf_type,
            subjects_dir=subjdir, verbose=verbosity)
        annot = annot[0] #discard the color table
    return annot
コード例 #11
0
def test_split_label():
    aparc = labels_from_parc('fsaverage',
                             'aparc',
                             'lh',
                             regexp='lingual',
                             subjects_dir=subjects_dir)[0]
    lingual = aparc[0]

    # split with names
    parts = ('lingual_post', 'lingual_ant')
    post, ant = split_label(lingual, parts, subjects_dir=subjects_dir)

    # check output names
    assert_equal(post.name, parts[0])
    assert_equal(ant.name, parts[1])

    # check vertices add up
    lingual_reconst = post + ant
    lingual_reconst.name = lingual.name
    lingual_reconst.comment = lingual.comment
    assert_labels_equal(lingual_reconst, lingual)

    # compare output of Label.split() method
    post1, ant1 = lingual.split(parts, subjects_dir=subjects_dir)
    assert_labels_equal(post1, post)
    assert_labels_equal(ant1, ant)

    # compare fs_like split with freesurfer split
    antmost = split_label(lingual, 40, None, subjects_dir, True)[-1]
    fs_vert = [
        210, 4401, 7405, 12079, 16276, 18956, 26356, 32713, 32716, 32719,
        36047, 36050, 42797, 42798, 42799, 59281, 59282, 59283, 71864, 71865,
        71866, 71874, 71883, 79901, 79903, 79910, 103024, 107849, 107850,
        122928, 139356, 139357, 139373, 139374, 139375, 139376, 139377, 139378,
        139381, 149117, 149118, 149120, 149127
    ]
    assert_array_equal(antmost.vertices, fs_vert)

    # check default label name
    assert_equal(antmost.name, "lingual_div40-lh")
コード例 #12
0
ファイル: cvu_utils.py プロジェクト: mick-d/cvu
def loadannot(p,subj,subjdir,surf_type='pial'):
	import mne
	annot=mne.labels_from_parc(parc=p,subject=subj,surf_name=surf_type,
		subjects_dir=subjdir,verbose=False)
	return annot
コード例 #13
0
empty_room_dir = home+'/data/meg/empty_room/'
res = np.recfromtxt(empty_room_dir + 'closest_empty_room_data.csv',skip_header=0,delimiter=',')
window_length=13.65  #s
closest_empty_room = {}
for rec in res:
    closest_empty_room[rec[0]] = str(rec[2])

subjs_fname = home+'/data/meg/usable_subjects_5segs13p654.txt'
markers_fname = home+'/data/meg/marker_data_clean.npy'

fid = open(subjs_fname, 'r')
subjs = [line.rstrip() for line in fid]

markers = np.load(markers_fname)[()]

labels = mne.labels_from_parc('fsaverage', parc='Yeo2011_7Networks_N1000')[0]
net_labels = labels[:-2] # the last two are the medial wall
# fill them in so we can morph them later
for label in net_labels:
    label.values.fill(1.0)

for subj in subjs[:1]:
    er_fname = empty_room_dir+'empty_room_'+closest_empty_room[subj]+'_raw.fif'
    raw_fname = data_dir + 'fifs/rest/%s_rest_LP100_CP3_DS300_raw.fif'%subj
    fwd_fname = data_dir + 'analysis/rest/%s_rest_LP100_CP3_DS300_raw-5-fwd.fif'%subj
    forward = mne.read_forward_solution(fwd_fname, surf_ori=True)
    raw = mne.fiff.Raw(raw_fname, preload=True, compensation=3)
    er_raw = mne.fiff.Raw(er_fname, preload=True, compensation=3)
    picks = mne.fiff.pick_channels_regexp(raw.info['ch_names'], 'M..-*')
    raw.filter(l_freq=1, h_freq=50, picks=picks)
    er_raw.filter(l_freq=1, h_freq=50, picks=picks)
コード例 #14
0
                    skip_header=0,
                    delimiter=',')
window_length = 13.65  #s
closest_empty_room = {}
for rec in res:
    closest_empty_room[rec[0]] = str(rec[2])

subjs_fname = home + '/data/meg/usable_subjects_5segs13p654.txt'
markers_fname = home + '/data/meg/marker_data_clean.npy'

fid = open(subjs_fname, 'r')
subjs = [line.rstrip() for line in fid]

markers = np.load(markers_fname)[()]

labels = mne.labels_from_parc('fsaverage', parc='Yeo2011_7Networks_N1000')[0]
net_labels = labels[:-2]  # the last two are the medial wall
# fill them in so we can morph them later
for label in net_labels:
    label.values.fill(1.0)

for subj in subjs[:1]:
    er_fname = empty_room_dir + 'empty_room_' + closest_empty_room[
        subj] + '_raw.fif'
    raw_fname = data_dir + 'fifs/rest/%s_rest_LP100_CP3_DS300_raw.fif' % subj
    fwd_fname = data_dir + 'analysis/rest/%s_rest_LP100_CP3_DS300_raw-5-fwd.fif' % subj
    forward = mne.read_forward_solution(fwd_fname, surf_ori=True)
    raw = mne.fiff.Raw(raw_fname, preload=True, compensation=3)
    er_raw = mne.fiff.Raw(er_fname, preload=True, compensation=3)
    picks = mne.fiff.pick_channels_regexp(raw.info['ch_names'], 'M..-*')
    raw.filter(l_freq=1, h_freq=50, picks=picks)
コード例 #15
0
def test_labels_from_parc():
    """Test reading labels from parcellation
    """
    # test some invalid inputs
    assert_raises(ValueError,
                  labels_from_parc,
                  'sample',
                  hemi='bla',
                  subjects_dir=subjects_dir)
    assert_raises(ValueError,
                  labels_from_parc,
                  'sample',
                  annot_fname='bla.annot',
                  subjects_dir=subjects_dir)

    # read labels using hemi specification
    labels_lh, colors_lh = labels_from_parc('sample',
                                            hemi='lh',
                                            subjects_dir=subjects_dir)
    for label in labels_lh:
        assert_true(label.name.endswith('-lh'))
        assert_true(label.hemi == 'lh')

    assert_true(len(labels_lh) == len(colors_lh))

    # read labels using annot_fname
    annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
    labels_rh, colors_rh = labels_from_parc('sample',
                                            annot_fname=annot_fname,
                                            subjects_dir=subjects_dir)

    assert_true(len(labels_rh) == len(colors_rh))

    for label in labels_rh:
        assert_true(label.name.endswith('-rh'))
        assert_true(label.hemi == 'rh')

    # combine the lh, rh, labels and sort them
    labels_lhrh = list()
    labels_lhrh.extend(labels_lh)
    labels_lhrh.extend(labels_rh)

    names = [label.name for label in labels_lhrh]
    labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]

    # read all labels at once
    labels_both, colors = labels_from_parc('sample', subjects_dir=subjects_dir)

    assert_true(len(labels_both) == len(colors))

    # we have the same result
    _assert_labels_equal(labels_lhrh, labels_both)

    # aparc has 68 cortical labels
    assert_true(len(labels_both) == 68)

    # test regexp
    label = labels_from_parc('sample',
                             parc='aparc.a2009s',
                             regexp='Angu',
                             subjects_dir=subjects_dir)[0][0]
    assert_true(label.name == 'G_pariet_inf-Angular-lh')
    label = labels_from_parc(
        'sample',
        parc='aparc.a2009s',
        regexp='.*-.{4,}_.{3,3}-L',  # silly, but real regexp
        subjects_dir=subjects_dir)[0][0]
    assert_true(label.name == 'G_oc-temp_med-Lingual-lh')
    assert_raises(RuntimeError,
                  labels_from_parc,
                  'sample',
                  parc='aparc',
                  annot_fname=annot_fname,
                  regexp='JackTheRipper',
                  subjects_dir=subjects_dir)
コード例 #16
0
# selected_labels = ['isthmuscingulate-rh', 'superiorfrontal-rh', 'inferiorparietal-rh', 'isthmuscingulate-lh', 'superiorfrontal-lh', 'inferiorparietal-lh']
# selected_labels = ['isthmuscingulate-rh', 'superiorfrontal-rh', 'inferiorparietal-rh']

fid = open(subjs_fname, 'r')
subjs = [line.rstrip() for line in fid]
fid1 = open(g1_fname, 'r')
fid2 = open(g2_fname, 'r')
g1 = [line.rstrip() for line in fid1]
g2 = [line.rstrip() for line in fid2]

print 'g1 =', g1_fname
print 'g2 =', g2_fname
m = ['pli', 'imcoh', 'plv', 'wpli', 'pli2_unbiased', 'wpli2_debiased']
print lmethod, '-', m[cmethod]

labels, label_colors = mne.labels_from_parc(subjs[0], parc='aparc')
nlabels = len(labels)
il = np.tril_indices(nlabels, k=-1)
if len(selected_labels) > 0:
    label_names = [l.name for l in labels]
    idx = [
        l for s in selected_labels for l, label in enumerate(label_names)
        if label == s
    ]
    keep = [False] * len(il[0])
    for i in idx:
        for j in idx:
            keep = keep | ((il[0] == i) & (il[1] == j))
    il = [il[0][keep], il[1][keep]]

g1_data = [[] for b in range(len(bands))]
コード例 #17
0
ファイル: Avnielish.py プロジェクト: gsudre/research_code
    er_raw = mne.fiff.Raw(er_fname, preload=True, compensation=3)
    picks = mne.fiff.pick_channels_regexp(raw.info["ch_names"], "M..-*")
    raw.filter(l_freq=1, h_freq=50, picks=picks)
    er_raw.filter(l_freq=1, h_freq=50, picks=picks)

    noise_cov = mne.compute_raw_data_covariance(er_raw)
    # note that MNE reads CTF data as magnetometers!
    noise_cov = mne.cov.regularize(noise_cov, raw.info, mag=noise_reg)
    inverse_operator = mne.minimum_norm.make_inverse_operator(raw.info, forward, noise_cov, loose=0.2, depth=0.8)
    data, time = raw[0, :]  #
    events = fg.get_good_events(markers[subj], time, window_length)

    epochs = mne.Epochs(raw, events, None, 0, window_length, preload=True, baseline=None, detrend=0, picks=picks)
    stcs = mne.minimum_norm.apply_inverse_epochs(epochs, inverse_operator, lambda2, "MNE", return_generator=False)

    labels, label_colors = mne.labels_from_parc(subj, parc="aparc")
    label_ts = mne.extract_label_time_course(stcs, labels, forward["src"], mode=label_mode)

    # label_data is nlabels by time, so here we can use whatever connectivity method we fancy
    con, freqs, times, n_epochs, n_tapers = mne.connectivity.spectral_connectivity(
        label_ts,
        method=method,
        mode="multitaper",
        sfreq=raw.info["sfreq"],
        fmin=[1, 4, 8, 13, 30],
        fmax=[4, 8, 13, 30, 50],
        faverage=True,
        n_jobs=3,
        mt_adaptive=False,
    )
    np.save(dir_out + subj + "-" + label_mode + "-" + "-".join(method), con)
コード例 #18
0
# stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
#                            pick_ori="normal", return_generator=True)
#
##vertices_to = mne.grade_to_vertices('fsaverage', grade = 5)
##stcs  = mne.morph_data(subj, 'fsaverage', stcs_orig, grade = vertices_to)
##teststc_fname = data_path + 'ave_projon/stc/'+ subj + '_run1-spm-test-lh.stc'
###stcs.save(teststc_fname)
##
########################################################################################3
########Connectivity Circle Plotting############
# print inverse_operator['src']
from mne.viz import circular_layout, plot_connectivity_circle

# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels per hemi
labels, label_colors = mne.labels_from_parc(
    subj, parc="aparc", subjects_dir=subjects_dir
)  ##or use read_labels_from_annot()
print labels

label_names = [label.name for label in labels]
# for label in label_names:
#    print label

lh_labels = [name for name in label_names if name.endswith("lh")]

###############################Labels Temporal
import numpy as np

# temporals = ['lh.superiortemporal', 'lh.inferiortemporal' , 'lh.middletemporal',  'lh.transversetemporal', 'lh.entorhinal','lh.temporalpole', 'lh.parahippocampal']
num_plots = 20
if subj == "EP1":
コード例 #19
0
selected_labels = []
# selected_labels = ['isthmuscingulate-rh', 'superiorfrontal-rh', 'inferiorparietal-rh', 'isthmuscingulate-lh', 'superiorfrontal-lh', 'inferiorparietal-lh']
# selected_labels = ['isthmuscingulate-rh', 'superiorfrontal-rh', 'inferiorparietal-rh']

fid = open(subjs_fname, 'r')
subjs = [line.rstrip() for line in fid]
res = np.recfromtxt(sx_fname, delimiter='\t')
sx = {}
for rec in res:
    sx[rec[0]] = rec[1]

print 'sx =',sx_fname
m = ['pli','imcoh','plv','wpli','pli2_unbiased','wpli2_debiased']
print lmethod, '-', m[cmethod]

labels, label_colors = mne.labels_from_parc(subjs[0], parc='aparc')
nlabels=len(labels)
il = np.tril_indices(nlabels, k=-1)
if len(selected_labels)>0:
    label_names = [l.name for l in labels]
    idx = [l for s in selected_labels for l, label in enumerate(label_names) if label == s]
    keep = [False]*len(il[0])
    for i in idx:
        for j in idx:
            keep = keep | ((il[0]==i) & (il[1]==j))
    il = [il[0][keep], il[1][keep]]


subj_data = [[] for b in range(len(bands))]
sx_data = []
for s in subjs:
コード例 #20
0
# Compute inverse solution
stc = apply_inverse(evoked,
                    inverse_operator,
                    lambda2,
                    method,
                    pick_normal=True)

# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()

# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.labels_from_parc(subject,
                             parc='aparc',
                             subjects_dir=subjects_dir,
                             regexp=aparc_label_name)[0][0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.

func_labels, _ = mne.stc_to_label(stc_mean_label,
                                  src=src,
                                  smooth=5,
                                  subjects_dir=subjects_dir,
                                  connected=True)

# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]

# load the anatomical ROI for comparison
コード例 #21
0
# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list.
snr = 1.0  # use lower SNR for single epochs
lambda2 = 1.0 / snr**2
method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs,
                            inverse_operator,
                            lambda2,
                            method,
                            pick_ori="normal",
                            return_generator=True)

# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels, label_colors = mne.labels_from_parc('sample',
                                            parc='aparc',
                                            subjects_dir=subjects_dir)

# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs,
                                         labels,
                                         src,
                                         mode='mean_flip',
                                         return_generator=True)

# Now we are ready to compute the connectivity in the alpha band. Notice
# from the status messages, how mne-python: 1) reads an epoch from the raw
# file, 2) applies SSP and baseline correction, 3) computes the inverse to
# obtain a source estimate, 4) averages the source estimate to obtain a
コード例 #22
0
ファイル: diff_Conn.py プロジェクト: CandidaUstine/MCW_MEG
        else: 
            con_diff_temp[i,:].fill(0)  
            con_t_temp[i,:].fill(0)
      
####################################################################################################
########################## 2 sample T test - Hotelling T squared statistic  ##########################################################
#tsq_matrix_fname = '/home/custine/MEG/results/source_level/ConnectivityPlots/textfiles/Hotel-Tsq_GrandAvgConnectivityMatrix_' + freq + '_' + gp1 + '-' + gp2 + '.txt'
#tsq_plot_fname = '/home/custine/MEG/results/source_level/ConnectivityPlots/figures/Hotel-Tsq_GrandAvgConnectivityPlot_' + freq + '_' + gp1 + '-' + gp2 + '.png'
#
#tsq = np.loadtxt(tsq_matrix_fname, delimiter = ' ')
#print tsq
#     
##############################################################################################
##############################################################################################
###############Plotting the Conn - Diff or T test stats##################
labels, label_colors = mne.labels_from_parc('fsaverage', parc='aparc', subjects_dir=subjects_dir) ##or use read_labels_from_annot() 
print

#### Now, we visualize the connectivity using a circular graph layout
# First, we reorder the labels based on their location in the left hemi
label_names = [label.name for label in labels]
#print label_names[:-1] #### TO GET RID OF UNKNOWN LABEL.LH 
label_names = label_names[:-1]
for name in label_names:
    print name
#np.savetxt('/home/custine/MEG/results/source_level/ConnectivityPlots/label_names.txt', label_names)  
lh_labels = [name for name in label_names if name.endswith('lh')]

#print len(lh_labels)

# Get the y-location of the label
コード例 #23
0
                        events,
                        None,
                        0,
                        window_length,
                        preload=True,
                        baseline=None,
                        detrend=0,
                        picks=picks)
    stcs = mne.beamformer.lcmv_epochs(epochs,
                                      forward,
                                      noise_cov.as_diag(),
                                      data_cov,
                                      reg=data_reg,
                                      pick_ori='max-power')

    labels, label_colors = mne.labels_from_parc(subj,
                                                parc='Yeo2011_7Networks_N1000')
    label_ts = mne.extract_label_time_course(stcs,
                                             labels,
                                             forward['src'],
                                             mode=label_mode)

    # label_data is nlabels by time, so here we can use whatever connectivity method we fancy
    con, freqs, times, n_epochs, n_tapers = mne.connectivity.spectral_connectivity(
        label_ts,
        method=method,
        mode='multitaper',
        sfreq=raw.info['sfreq'],
        fmin=[1, 4, 8, 13, 30],
        fmax=[4, 8, 13, 30, 50],
        faverage=True,
        n_jobs=3,
コード例 #24
0
# Load data
evoked = Evoked(fname_evoked, setno=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src']  # get the source space

# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
                    pick_normal=True)

# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()

# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.labels_from_parc(subject, parc='aparc', subjects_dir=subjects_dir,
                             regexp=aparc_label_name)[0][0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.

func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=5,
                                  subjects_dir=subjects_dir, connected=True)

# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]

# load the anatomical ROI for comparison
anat_label = mne.labels_from_parc(subject, parc='aparc',
                                  subjects_dir=subjects_dir,
                                  regexp=aparc_label_name)[0][0]
コード例 #25
0
in_network = ['superiorfrontal-rh', 'inferiorparietal-rh']
bands = [[1, 4], [4, 8], [8, 13], [13, 30], [30, 50]]
subjs_fname = '/Users/sudregp/data/meg/usable_subjects_pm2std_withFamily.txt'
data_dir = '/Users/sudregp/data/meg_diagNoise_noiseRegp03_dataRegp001/'
g1_fname = '/Users/sudregp/data/meg/nv_subjs.txt'

fid = open(subjs_fname, 'r')
subjs = [line.rstrip() for line in fid]
fid1 = open(g1_fname, 'r')
g1 = [line.rstrip() for line in fid1]

in_conn = [[] for b in range(5)]
out_conn = [[] for b in range(5)]
for s in subjs:
    if s in g1:
        labels, label_colors = mne.labels_from_parc(s, parc='aparc')
        label_names = [l.name for l in labels]
        fname = data_dir + 'connectivity/pli-%s.npy' % (s)
        conn = np.load(fname)[()]
        seed_idx = label_names.index(seed)
        hemi_labels = [l for l in label_names if l.find('-' + hemi) > 0]
        out_network = list(
            np.setdiff1d(set(hemi_labels), set(in_network + [seed])))
        for b in range(5):
            in_conn[b].append(
                np.mean([
                    conn[seed_idx, label_names.index(i), b] for i in in_network
                ]))
            out_conn[b].append(
                np.mean([
                    conn[seed_idx, label_names.index(i), b]
コード例 #26
0
import mne
import numpy as np


bands = [[1, 4], [4, 8], [8, 13], [13, 30], [30, 50]]
subjs_fname = '/Users/sudregp/data/meg/usable_subjects_pm2std.txt'
data_dir = '/Users/sudregp/data/meg_diagNoise_noiseRegp03_dataRegp001/'

fid = open(subjs_fname, 'r')
subjs = [line.rstrip() for line in fid]
dir_out = data_dir + '/connectivity/'
for cnt, s in enumerate(subjs[100:]):
    print cnt+1, '/', len(subjs), ':', s
    labels, label_colors = mne.labels_from_parc(s, parc='aparc')
    fwd_fname = '/mnt/neuro/MEG_data/analysis/rest/%s_rest_LP100_CP3_DS300_raw-5-fwd.fif'%s
    forward = mne.read_forward_solution(fwd_fname, surf_ori=True)
    for l_freq, h_freq in bands:
        print 'Band %d to %d Hz'%(l_freq, h_freq)
        fname = data_dir + 'lcmv-%dto%d-'%(l_freq,h_freq) + s
        stc = mne.read_source_estimate(fname)
        label_data = stc.extract_label_time_course(labels=labels,src=forward['src'],mode='pca_flip',allow_empty=True)
        # label_data is nlabels by time, so here we can use whatever connectivity method we fancy
        conn = np.corrcoef(label_data)
        np.save(dir_out + 'labelPCACorrelation-%dto%d-'%(l_freq,h_freq) + s, conn)

コード例 #27
0
# Define epochs for left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                    baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
                                                    eog=150e-6))

# Compute inverse solution and for each epoch. By using "return_generator=True"
# stcs will be a generator object instead of a list.
snr = 1.0  # use lower SNR for single epochs
lambda2 = 1.0 / snr ** 2
method = "dSPM"  # use dSPM method (could also be MNE or sLORETA)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,
                            pick_ori="normal", return_generator=True)

# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels, label_colors = mne.labels_from_parc('sample', parc='aparc',
                                            subjects_dir=subjects_dir)

# Average the source estimates within each label using sign-flips to reduce
# signal cancellations, also here we return a generator
src = inverse_operator['src']
label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip',
                                         return_generator=True)

# Now we are ready to compute the connectivity in the alpha band. Notice
# from the status messages, how mne-python: 1) reads an epoch from the raw
# file, 2) applies SSP and baseline correction, 3) computes the inverse to
# obtain a source estimate, 4) averages the source estimate to obtain a
# time series for each label, 5) includes the label time series in the
# connectivity computation, and then moves to the next epoch. This
# behaviour is because we are using generators and allows us to
# compute connectivity in computationally efficient manner where the amount
コード例 #28
0
def test_parc_from_labels():
    """Test writing FreeSurfer parcellation from labels"""

    labels, colors = labels_from_parc('sample', subjects_dir=subjects_dir)

    # write left and right hemi labels:
    fnames = ['%s/%s-myparc' % (tempdir, hemi) for hemi in ['lh', 'rh']]

    for fname in fnames:
        parc_from_labels(labels, colors, annot_fname=fname)

    # read it back
    labels2, colors2 = labels_from_parc('sample',
                                        subjects_dir=subjects_dir,
                                        annot_fname=fnames[0])
    labels22, colors22 = labels_from_parc('sample',
                                          subjects_dir=subjects_dir,
                                          annot_fname=fnames[1])
    labels2.extend(labels22)
    colors2.extend(colors22)

    names = [label.name for label in labels2]

    for label, color in zip(labels, colors):
        idx = names.index(label.name)
        assert_labels_equal(label, labels2[idx])
        assert_array_almost_equal(np.array(color), np.array(colors2[idx]))

    # make sure we can't overwrite things
    assert_raises(ValueError,
                  parc_from_labels,
                  labels,
                  colors,
                  annot_fname=fnames[0])

    # however, this works
    parc_from_labels(labels,
                     colors=None,
                     annot_fname=fnames[0],
                     overwrite=True)

    # test some other invalid inputs
    assert_raises(ValueError,
                  parc_from_labels,
                  labels[:-1],
                  colors,
                  annot_fname=fnames[0],
                  overwrite=True)
    colors2 = np.asarray(colors)
    assert_raises(ValueError,
                  parc_from_labels,
                  labels,
                  colors2[:, :3],
                  annot_fname=fnames[0],
                  overwrite=True)
    colors2[0] = 1.1
    assert_raises(ValueError,
                  parc_from_labels,
                  labels,
                  colors2,
                  annot_fname=fnames[0],
                  overwrite=True)