예제 #1
0
def test_combination_label():
    "Test combination label creation"
    labels = {l.name: l for l in
              mne.read_labels_from_annot('fsaverage', subjects_dir=subjects_dir)}

    # standard
    l = combination_label('temporal', "superiortemporal + middletemporal + inferiortemporal", labels)
    lh = labels['superiortemporal-lh'] + labels['middletemporal-lh'] + labels['inferiortemporal-lh']
    rh = labels['superiortemporal-rh'] + labels['middletemporal-rh'] + labels['inferiortemporal-rh']
    eq_(len(l), 2)
    eq_(l[0].name, 'temporal-lh')
    eq_(l[1].name, 'temporal-rh')
    assert_array_equal(l[0].vertices, lh.vertices)
    assert_array_equal(l[1].vertices, rh.vertices)

    # only rh
    l = combination_label('temporal-rh', "superiortemporal + middletemporal + inferiortemporal", labels)
    eq_(len(l), 1)
    eq_(l[0].name, 'temporal-rh')
    assert_array_equal(l[0].vertices, rh.vertices)

    # names with .
    labels = {l.name: l for l in
              mne.read_labels_from_annot('fsaverage', 'PALS_B12_Brodmann', subjects_dir=subjects_dir)}
    l = combination_label('Ba38-lh', "Brodmann.38", labels)[0]
    assert_array_equal(l.vertices, labels['Brodmann.38-lh'].vertices)
예제 #2
0
def test_combination_label():
    "Test combination label creation"
    labels = {l.name: l for l in
              mne.read_labels_from_annot('fsaverage', subjects_dir=subjects_dir)}

    # standard
    l = combination_label('temporal', "superiortemporal + middletemporal + inferiortemporal",
                          labels, subjects_dir)
    lh = labels['superiortemporal-lh'] + labels['middletemporal-lh'] + labels['inferiortemporal-lh']
    lh.name = 'temporal-lh'
    rh = labels['superiortemporal-rh'] + labels['middletemporal-rh'] + labels['inferiortemporal-rh']
    rh.name = 'temporal-rh'
    eq_(len(l), 2)
    assert_labels_equal(l[0], lh)
    assert_labels_equal(l[1], rh)

    # only rh
    l = combination_label('temporal-rh', "superiortemporal + middletemporal + inferiortemporal",
                          labels, subjects_dir)
    eq_(len(l), 1)
    eq_(l[0].name, 'temporal-rh')
    assert_array_equal(l[0].vertices, rh.vertices)

    # with split_label
    l2 = combination_label('temporal-rh', "superiortemporal + middletemporal +"
                                          "split(inferiortemporal, 2)[0] +"
                                          "split(inferiortemporal, 2)[1]",
                           labels, subjects_dir)
    assert_labels_equal(l2[0], l[0], comment=False, color=False)

    # names with .
    labels = {l.name: l for l in
              mne.read_labels_from_annot('fsaverage', 'PALS_B12_Lobes', subjects_dir=subjects_dir)}
    l = combination_label('frontal-lh', "LOBE.FRONTAL", labels, subjects_dir)[0]
    assert_array_equal(l.vertices, labels['LOBE.FRONTAL-lh'].vertices)
예제 #3
0
def test_morph_labels():
    """Test morph_labels."""
    # Just process the first 5 labels for speed
    parc_fsaverage = read_labels_from_annot('fsaverage',
                                            'aparc',
                                            subjects_dir=subjects_dir)[:5]
    parc_sample = read_labels_from_annot('sample',
                                         'aparc',
                                         subjects_dir=subjects_dir)[:5]
    parc_fssamp = morph_labels(parc_fsaverage,
                               'sample',
                               subjects_dir=subjects_dir)
    for lf, ls, lfs in zip(parc_fsaverage, parc_sample, parc_fssamp):
        assert lf.hemi == ls.hemi == lfs.hemi
        assert lf.name == ls.name == lfs.name
        perc_1 = np.in1d(lfs.vertices, ls.vertices).mean() * 100
        perc_2 = np.in1d(ls.vertices, lfs.vertices).mean() * 100
        # Ideally this would be 100%, but we do not use the same algorithm
        # as FreeSurfer ...
        assert perc_1 > 92
        assert perc_2 > 88
    with pytest.raises(ValueError, match='wrong and fsaverage'):
        morph_labels(parc_fsaverage,
                     'sample',
                     subjects_dir=subjects_dir,
                     subject_from='wrong')
    with pytest.raises(RuntimeError, match='Number of surface vertices'):
        _load_vert_pos('sample', subjects_dir, 'white', 'lh', 1)
    for label in parc_fsaverage:
        label.subject = None
    with pytest.raises(ValueError, match='subject_from must be provided'):
        morph_labels(parc_fsaverage, 'sample', subjects_dir=subjects_dir)
예제 #4
0
def test_combination_label():
    "Test combination label creation"
    labels = {l.name: l for l in
              mne.read_labels_from_annot('fsaverage', subjects_dir=subjects_dir)}

    # standard
    l = combination_label('temporal', "superiortemporal + middletemporal + inferiortemporal",
                          labels, subjects_dir)
    lh = labels['superiortemporal-lh'] + labels['middletemporal-lh'] + labels['inferiortemporal-lh']
    lh.name = 'temporal-lh'
    rh = labels['superiortemporal-rh'] + labels['middletemporal-rh'] + labels['inferiortemporal-rh']
    rh.name = 'temporal-rh'
    eq_(len(l), 2)
    assert_labels_equal(l[0], lh)
    assert_labels_equal(l[1], rh)

    # only rh
    l = combination_label('temporal-rh', "superiortemporal + middletemporal + inferiortemporal",
                          labels, subjects_dir)
    eq_(len(l), 1)
    eq_(l[0].name, 'temporal-rh')
    assert_array_equal(l[0].vertices, rh.vertices)

    # with split_label
    l2 = combination_label('temporal-rh', "superiortemporal + middletemporal +"
                                          "split(inferiortemporal, 2)[0] +"
                                          "split(inferiortemporal, 2)[1]",
                           labels, subjects_dir)
    assert_labels_equal(l2[0], l[0], comment=False, color=False)

    # names with .
    labels = {l.name: l for l in
              mne.read_labels_from_annot('fsaverage', 'PALS_B12_Lobes', subjects_dir=subjects_dir)}
    l = combination_label('frontal-lh', "LOBE.FRONTAL", labels, subjects_dir)[0]
    assert_array_equal(l.vertices, labels['LOBE.FRONTAL-lh'].vertices)
예제 #5
0
def test_morph_labels():
    """Test morph_labels."""
    # Just process the first 5 labels for speed
    parc_fsaverage = read_labels_from_annot(
        'fsaverage', 'aparc', subjects_dir=subjects_dir)[:5]
    parc_sample = read_labels_from_annot(
        'sample', 'aparc', subjects_dir=subjects_dir)[:5]
    parc_fssamp = morph_labels(
        parc_fsaverage, 'sample', subjects_dir=subjects_dir)
    for lf, ls, lfs in zip(parc_fsaverage, parc_sample, parc_fssamp):
        assert lf.hemi == ls.hemi == lfs.hemi
        assert lf.name == ls.name == lfs.name
        perc_1 = np.in1d(lfs.vertices, ls.vertices).mean() * 100
        perc_2 = np.in1d(ls.vertices, lfs.vertices).mean() * 100
        # Ideally this would be 100%, but we do not use the same algorithm
        # as FreeSurfer ...
        assert perc_1 > 92
        assert perc_2 > 88
    with pytest.raises(ValueError, match='wrong and fsaverage'):
        morph_labels(parc_fsaverage, 'sample', subjects_dir=subjects_dir,
                     subject_from='wrong')
    with pytest.raises(RuntimeError, match='Number of surface vertices'):
        _load_vert_pos('sample', subjects_dir, 'white', 'lh', 1)
    for label in parc_fsaverage:
        label.subject = None
    with pytest.raises(ValueError, match='subject_from must be provided'):
        morph_labels(parc_fsaverage, 'sample', subjects_dir=subjects_dir)
예제 #6
0
def add_motor_labels(brain, src_df):
    try:
        lnames = src_df.index.levels[0].map(lambda l: l[:-7]).unique()
    except AttributeError:
        lnames = src_df.index.map(lambda l: l[:-7]).unique()
    
    premotor = ss.Glasser_areas[
            ss.Glasser_areas['main section']==8]['area name'].values
    
    for hemi in brain.geo.keys():
        labels = mne.read_labels_from_annot('fsaverage', parc='HCPMMP1_5_8', 
                                            hemi=hemi)
        
        hemilnames = lnames[list(lnames.map(
                lambda s: s[0] == hemi[0].upper()).values)]
        
        for label in labels:
            if label.name in hemilnames:
                if label.name[2:-7] in premotor:
                    col = 'k'
                else:
                    col = 'w'
                brain.add_label(label, borders=True, hemi=hemi, alpha=0.6, 
                                color=col)
                
        for lname, col in zip(['UpperExtremity', 'Ocular'], 
                              ['#225500', '#44aa00']):
            mlabel = mne.read_labels_from_annot(
                    'fsaverage', parc='HCPMMP1_motor', hemi=hemi, regexp=lname)
            brain.add_label(mlabel[0], borders=True, hemi=hemi, alpha=1, 
                            color=col)
예제 #7
0
def test_read_labels_from_annot():
    """Test reading labels from FreeSurfer parcellation
    """
    # test some invalid inputs
    assert_raises(ValueError, read_labels_from_annot, 'sample', hemi='bla',
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, read_labels_from_annot, 'sample',
                  annot_fname='bla.annot', subjects_dir=subjects_dir)

    # read labels using hemi specification
    labels_lh = read_labels_from_annot('sample', hemi='lh',
                                       subjects_dir=subjects_dir)
    for label in labels_lh:
        assert_true(label.name.endswith('-lh'))
        assert_true(label.hemi == 'lh')
        # XXX fails on 2.6 for some reason...
        if sys.version_info[:2] > (2, 6):
            assert_is_not(label.color, None)

    # read labels using annot_fname
    annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
    labels_rh = read_labels_from_annot('sample', annot_fname=annot_fname,
                                       subjects_dir=subjects_dir)
    for label in labels_rh:
        assert_true(label.name.endswith('-rh'))
        assert_true(label.hemi == 'rh')
        # XXX doesn't work on py26 for some reason
        if sys.version_info[:2] > (2, 6):
            assert_is_not(label.color, None)

    # combine the lh, rh, labels and sort them
    labels_lhrh = list()
    labels_lhrh.extend(labels_lh)
    labels_lhrh.extend(labels_rh)

    names = [label.name for label in labels_lhrh]
    labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]

    # read all labels at once
    labels_both = read_labels_from_annot('sample', subjects_dir=subjects_dir)

    # we have the same result
    _assert_labels_equal(labels_lhrh, labels_both)

    # aparc has 68 cortical labels
    assert_true(len(labels_both) == 68)

    # test regexp
    label = read_labels_from_annot('sample', parc='aparc.a2009s',
                                   regexp='Angu', subjects_dir=subjects_dir)[0]
    assert_true(label.name == 'G_pariet_inf-Angular-lh')
    # silly, but real regexp:
    label = read_labels_from_annot('sample', 'aparc.a2009s',
                                   regexp='.*-.{4,}_.{3,3}-L',
                                   subjects_dir=subjects_dir)[0]
    assert_true(label.name == 'G_oc-temp_med-Lingual-lh')
    assert_raises(RuntimeError, read_labels_from_annot, 'sample', parc='aparc',
                  annot_fname=annot_fname, regexp='JackTheRipper',
                  subjects_dir=subjects_dir)
예제 #8
0
def test_read_labels_from_annot():
    """Test reading labels from FreeSurfer parcellation
    """
    # test some invalid inputs
    assert_raises(ValueError, read_labels_from_annot, 'sample', hemi='bla',
                  subjects_dir=subjects_dir)
    assert_raises(ValueError, read_labels_from_annot, 'sample',
                  annot_fname='bla.annot', subjects_dir=subjects_dir)

    # read labels using hemi specification
    labels_lh = read_labels_from_annot('sample', hemi='lh',
                                       subjects_dir=subjects_dir)
    for label in labels_lh:
        assert_true(label.name.endswith('-lh'))
        assert_true(label.hemi == 'lh')
        # XXX fails on 2.6 for some reason...
        if sys.version_info[:2] > (2, 6):
            assert_is_not(label.color, None)

    # read labels using annot_fname
    annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
    labels_rh = read_labels_from_annot('sample', annot_fname=annot_fname,
                                       subjects_dir=subjects_dir)
    for label in labels_rh:
        assert_true(label.name.endswith('-rh'))
        assert_true(label.hemi == 'rh')
        # XXX doesn't work on py26 for some reason
        if sys.version_info[:2] > (2, 6):
            assert_is_not(label.color, None)

    # combine the lh, rh, labels and sort them
    labels_lhrh = list()
    labels_lhrh.extend(labels_lh)
    labels_lhrh.extend(labels_rh)

    names = [label.name for label in labels_lhrh]
    labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]

    # read all labels at once
    labels_both = read_labels_from_annot('sample', subjects_dir=subjects_dir)

    # we have the same result
    _assert_labels_equal(labels_lhrh, labels_both)

    # aparc has 68 cortical labels
    assert_true(len(labels_both) == 68)

    # test regexp
    label = read_labels_from_annot('sample', parc='aparc.a2009s',
                                   regexp='Angu', subjects_dir=subjects_dir)[0]
    assert_true(label.name == 'G_pariet_inf-Angular-lh')
    # silly, but real regexp:
    label = read_labels_from_annot('sample', 'aparc.a2009s',
                                   regexp='.*-.{4,}_.{3,3}-L',
                                   subjects_dir=subjects_dir)[0]
    assert_true(label.name == 'G_oc-temp_med-Lingual-lh')
    assert_raises(RuntimeError, read_labels_from_annot, 'sample', parc='aparc',
                  annot_fname=annot_fname, regexp='JackTheRipper',
                  subjects_dir=subjects_dir)
예제 #9
0
def test_read_labels_from_annot(tmpdir):
    """Test reading labels from FreeSurfer parcellation."""
    # test some invalid inputs
    pytest.raises(ValueError, read_labels_from_annot, 'sample', hemi='bla',
                  subjects_dir=subjects_dir)
    pytest.raises(ValueError, read_labels_from_annot, 'sample',
                  annot_fname='bla.annot', subjects_dir=subjects_dir)
    with pytest.raises(IOError, match='does not exist'):
        _read_annot_cands('foo')
    with pytest.raises(IOError, match='no candidate'):
        _read_annot(str(tmpdir))

    # read labels using hemi specification
    labels_lh = read_labels_from_annot('sample', hemi='lh',
                                       subjects_dir=subjects_dir)
    for label in labels_lh:
        assert label.name.endswith('-lh')
        assert label.hemi == 'lh'
        assert label.color is not None

    # read labels using annot_fname
    annot_fname = op.join(subjects_dir, 'sample', 'label', 'rh.aparc.annot')
    labels_rh = read_labels_from_annot('sample', annot_fname=annot_fname,
                                       subjects_dir=subjects_dir)
    for label in labels_rh:
        assert label.name.endswith('-rh')
        assert label.hemi == 'rh'
        assert label.color is not None

    # combine the lh, rh, labels and sort them
    labels_lhrh = list()
    labels_lhrh.extend(labels_lh)
    labels_lhrh.extend(labels_rh)

    names = [label.name for label in labels_lhrh]
    labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]

    # read all labels at once
    labels_both = read_labels_from_annot('sample', subjects_dir=subjects_dir)

    # we have the same result
    _assert_labels_equal(labels_lhrh, labels_both)

    # aparc has 68 cortical labels
    assert (len(labels_both) == 68)

    # test regexp
    label = read_labels_from_annot('sample', parc='aparc.a2009s',
                                   regexp='Angu', subjects_dir=subjects_dir)[0]
    assert (label.name == 'G_pariet_inf-Angular-lh')
    # silly, but real regexp:
    label = read_labels_from_annot('sample', 'aparc.a2009s',
                                   regexp='.*-.{4,}_.{3,3}-L',
                                   subjects_dir=subjects_dir)[0]
    assert (label.name == 'G_oc-temp_med-Lingual-lh')
    pytest.raises(RuntimeError, read_labels_from_annot, 'sample', parc='aparc',
                  annot_fname=annot_fname, regexp='JackTheRipper',
                  subjects_dir=subjects_dir)
예제 #10
0
def test_read_labels_from_annot():
    """Test reading labels from FreeSurfer parcellation
    """
    # test some invalid inputs
    assert_raises(ValueError, read_labels_from_annot, "sample", hemi="bla", subjects_dir=subjects_dir)
    assert_raises(ValueError, read_labels_from_annot, "sample", annot_fname="bla.annot", subjects_dir=subjects_dir)

    # read labels using hemi specification
    labels_lh = read_labels_from_annot("sample", hemi="lh", subjects_dir=subjects_dir)
    for label in labels_lh:
        assert_true(label.name.endswith("-lh"))
        assert_true(label.hemi == "lh")
        # XXX fails on 2.6 for some reason...
        if sys.version_info[:2] > (2, 6):
            assert_is_not(label.color, None)

    # read labels using annot_fname
    annot_fname = op.join(subjects_dir, "sample", "label", "rh.aparc.annot")
    labels_rh = read_labels_from_annot("sample", annot_fname=annot_fname, subjects_dir=subjects_dir)
    for label in labels_rh:
        assert_true(label.name.endswith("-rh"))
        assert_true(label.hemi == "rh")
        assert_is_not(label.color, None)

    # combine the lh, rh, labels and sort them
    labels_lhrh = list()
    labels_lhrh.extend(labels_lh)
    labels_lhrh.extend(labels_rh)

    names = [label.name for label in labels_lhrh]
    labels_lhrh = [label for (name, label) in sorted(zip(names, labels_lhrh))]

    # read all labels at once
    labels_both = read_labels_from_annot("sample", subjects_dir=subjects_dir)

    # we have the same result
    _assert_labels_equal(labels_lhrh, labels_both)

    # aparc has 68 cortical labels
    assert_true(len(labels_both) == 68)

    # test regexp
    label = read_labels_from_annot("sample", parc="aparc.a2009s", regexp="Angu", subjects_dir=subjects_dir)[0]
    assert_true(label.name == "G_pariet_inf-Angular-lh")
    # silly, but real regexp:
    label = read_labels_from_annot("sample", "aparc.a2009s", regexp=".*-.{4,}_.{3,3}-L", subjects_dir=subjects_dir)[0]
    assert_true(label.name == "G_oc-temp_med-Lingual-lh")
    assert_raises(
        RuntimeError,
        read_labels_from_annot,
        "sample",
        parc="aparc",
        annot_fname=annot_fname,
        regexp="JackTheRipper",
        subjects_dir=subjects_dir,
    )
예제 #11
0
def test_annot_io():
    """Test I/O from and to *.annot files."""
    # copy necessary files from fsaverage to tempdir
    tempdir = _TempDir()
    subject = 'fsaverage'
    label_src = os.path.join(subjects_dir, 'fsaverage', 'label')
    surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf')
    label_dir = os.path.join(tempdir, subject, 'label')
    surf_dir = os.path.join(tempdir, subject, 'surf')
    os.makedirs(label_dir)
    os.mkdir(surf_dir)
    shutil.copy(os.path.join(label_src, 'lh.PALS_B12_Lobes.annot'), label_dir)
    shutil.copy(os.path.join(label_src, 'rh.PALS_B12_Lobes.annot'), label_dir)
    shutil.copy(os.path.join(surf_src, 'lh.white'), surf_dir)
    shutil.copy(os.path.join(surf_src, 'rh.white'), surf_dir)

    # read original labels
    with pytest.raises(IOError, match='\nPALS_B12_Lobes$'):
        read_labels_from_annot(subject,
                               'PALS_B12_Lobesey',
                               subjects_dir=tempdir)
    labels = read_labels_from_annot(subject,
                                    'PALS_B12_Lobes',
                                    subjects_dir=tempdir)

    # test saving parcellation only covering one hemisphere
    parc = [l for l in labels if l.name == 'LOBE.TEMPORAL-lh']
    write_labels_to_annot(parc, subject, 'myparc', subjects_dir=tempdir)
    parc1 = read_labels_from_annot(subject, 'myparc', subjects_dir=tempdir)
    parc1 = [l for l in parc1 if not l.name.startswith('unknown')]
    assert_equal(len(parc1), len(parc))
    for l1, l in zip(parc1, parc):
        assert_labels_equal(l1, l)

    # test saving only one hemisphere
    parc = [l for l in labels if l.name.startswith('LOBE')]
    write_labels_to_annot(parc,
                          subject,
                          'myparc2',
                          hemi='lh',
                          subjects_dir=tempdir)
    annot_fname = os.path.join(tempdir, subject, 'label', '%sh.myparc2.annot')
    assert os.path.isfile(annot_fname % 'l')
    assert not os.path.isfile(annot_fname % 'r')
    parc1 = read_labels_from_annot(subject,
                                   'myparc2',
                                   annot_fname=annot_fname % 'l',
                                   subjects_dir=tempdir)
    parc_lh = [l for l in parc if l.name.endswith('lh')]
    for l1, l in zip(parc1, parc_lh):
        assert_labels_equal(l1, l)

    # test that the annotation is complete (test Label() support)
    rr = read_surface(op.join(surf_dir, 'lh.white'))[0]
    label = sum(labels, Label(hemi='lh', subject='fsaverage')).lh
    assert_array_equal(label.vertices, np.arange(len(rr)))
예제 #12
0
def prepare_parcels(subject, subjects_dir, hemi, n_parcels, random_state):
    if ((hemi == 'both') or (hemi == 'lh')):
        annot_fname_lh = 'lh.random' + str(n_parcels) + '.annot'
        annot_fname_lh = os.path.join(subjects_dir, subject, 'label',
                                      annot_fname_lh)
    if ((hemi == 'both') or (hemi == 'rh')):
        annot_fname_rh = 'rh.random' + str(n_parcels) + '.annot'
        annot_fname_rh = os.path.join(subjects_dir, subject, 'label',
                                      annot_fname_rh)

    make_random_parcellation(annot_fname_lh,
                             n_parcels,
                             'lh',
                             subjects_dir,
                             random_state,
                             subject,
                             remove_corpus_callosum=True)

    make_random_parcellation(annot_fname_rh,
                             n_parcels,
                             'rh',
                             subjects_dir,
                             random_state,
                             subject,
                             remove_corpus_callosum=True)

    # read the labels from annot
    if ((hemi == 'both') or (hemi == 'lh')):
        parcels_lh = mne.read_labels_from_annot(subject=subject,
                                                annot_fname=annot_fname_lh,
                                                hemi='lh',
                                                subjects_dir=subjects_dir)

        # remove the last, unknown label which is corpus callosum
        assert parcels_lh[-1].name[:7] == 'unknown'
        parcels_lh = parcels_lh[:-1]
        cm_lh = find_centers_of_mass(parcels_lh, subjects_dir)
    if ((hemi == 'both') or (hemi == 'rh')):
        parcels_rh = mne.read_labels_from_annot(subject=subject,
                                                annot_fname=annot_fname_rh,
                                                hemi='rh',
                                                subjects_dir=subjects_dir)
        # remove the last, unknown label which is corpus callosum
        assert parcels_rh[-1].name[:7] == 'unknown'
        parcels_rh = parcels_rh[:-1]
        cm_rh = find_centers_of_mass(parcels_rh, subjects_dir)

    if hemi == 'both':
        return [parcels_lh, parcels_rh], [cm_lh, cm_rh]
    elif hemi == 'rh':
        return [parcels_rh], [cm_rh]
    elif hemi == 'lh':
        return [parcels_lh], [cm_lh]
예제 #13
0
def test_annot_io():
    """Test I/O from and to *.annot files"""
    # copy necessary files from fsaverage to tempdir
    tempdir = _TempDir()
    subject = 'fsaverage'
    label_src = os.path.join(subjects_dir, 'fsaverage', 'label')
    surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf')
    label_dir = os.path.join(tempdir, subject, 'label')
    surf_dir = os.path.join(tempdir, subject, 'surf')
    os.makedirs(label_dir)
    os.mkdir(surf_dir)
    shutil.copy(os.path.join(label_src, 'lh.PALS_B12_Lobes.annot'), label_dir)
    shutil.copy(os.path.join(label_src, 'rh.PALS_B12_Lobes.annot'), label_dir)
    shutil.copy(os.path.join(surf_src, 'lh.white'), surf_dir)
    shutil.copy(os.path.join(surf_src, 'rh.white'), surf_dir)

    # read original labels
    assert_raises(IOError,
                  read_labels_from_annot,
                  subject,
                  'PALS_B12_Lobesey',
                  subjects_dir=tempdir)
    labels = read_labels_from_annot(subject,
                                    'PALS_B12_Lobes',
                                    subjects_dir=tempdir)

    # test saving parcellation only covering one hemisphere
    parc = [l for l in labels if l.name == 'LOBE.TEMPORAL-lh']
    write_labels_to_annot(parc, subject, 'myparc', subjects_dir=tempdir)
    parc1 = read_labels_from_annot(subject, 'myparc', subjects_dir=tempdir)
    parc1 = [l for l in parc1 if not l.name.startswith('unknown')]
    assert_equal(len(parc1), len(parc))
    for l1, l in zip(parc1, parc):
        assert_labels_equal(l1, l)

    # test saving only one hemisphere
    parc = [l for l in labels if l.name.startswith('LOBE')]
    write_labels_to_annot(parc,
                          subject,
                          'myparc2',
                          hemi='lh',
                          subjects_dir=tempdir)
    annot_fname = os.path.join(tempdir, subject, 'label', '%sh.myparc2.annot')
    assert_true(os.path.isfile(annot_fname % 'l'))
    assert_false(os.path.isfile(annot_fname % 'r'))
    parc1 = read_labels_from_annot(subject,
                                   'myparc2',
                                   annot_fname=annot_fname % 'l',
                                   subjects_dir=tempdir)
    parc_lh = [l for l in parc if l.name.endswith('lh')]
    for l1, l in zip(parc1, parc_lh):
        assert_labels_equal(l1, l)
예제 #14
0
def test_annot_io():
    """Test I/O from and to *.annot files."""
    # copy necessary files from fsaverage to tempdir
    tempdir = _TempDir()
    subject = 'fsaverage'
    label_src = os.path.join(subjects_dir, 'fsaverage', 'label')
    surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf')
    label_dir = os.path.join(tempdir, subject, 'label')
    surf_dir = os.path.join(tempdir, subject, 'surf')
    os.makedirs(label_dir)
    os.mkdir(surf_dir)
    shutil.copy(os.path.join(label_src, 'lh.PALS_B12_Lobes.annot'), label_dir)
    shutil.copy(os.path.join(label_src, 'rh.PALS_B12_Lobes.annot'), label_dir)
    shutil.copy(os.path.join(surf_src, 'lh.white'), surf_dir)
    shutil.copy(os.path.join(surf_src, 'rh.white'), surf_dir)

    # read original labels
    with pytest.raises(IOError, match='\nPALS_B12_Lobes$'):
        read_labels_from_annot(subject, 'PALS_B12_Lobesey',
                               subjects_dir=tempdir)
    labels = read_labels_from_annot(subject, 'PALS_B12_Lobes',
                                    subjects_dir=tempdir)

    # test saving parcellation only covering one hemisphere
    parc = [l for l in labels if l.name == 'LOBE.TEMPORAL-lh']
    write_labels_to_annot(parc, subject, 'myparc', subjects_dir=tempdir)
    parc1 = read_labels_from_annot(subject, 'myparc', subjects_dir=tempdir)
    parc1 = [l for l in parc1 if not l.name.startswith('unknown')]
    assert_equal(len(parc1), len(parc))
    for l1, l in zip(parc1, parc):
        assert_labels_equal(l1, l)

    # test saving only one hemisphere
    parc = [l for l in labels if l.name.startswith('LOBE')]
    write_labels_to_annot(parc, subject, 'myparc2', hemi='lh',
                          subjects_dir=tempdir)
    annot_fname = os.path.join(tempdir, subject, 'label', '%sh.myparc2.annot')
    assert os.path.isfile(annot_fname % 'l')
    assert not os.path.isfile(annot_fname % 'r')
    parc1 = read_labels_from_annot(subject, 'myparc2',
                                   annot_fname=annot_fname % 'l',
                                   subjects_dir=tempdir)
    parc_lh = [l for l in parc if l.name.endswith('lh')]
    for l1, l in zip(parc1, parc_lh):
        assert_labels_equal(l1, l)

    # test that the annotation is complete (test Label() support)
    rr = read_surface(op.join(surf_dir, 'lh.white'))[0]
    label = sum(labels, Label(hemi='lh', subject='fsaverage')).lh
    assert_array_equal(label.vertices, np.arange(len(rr)))
예제 #15
0
def test_read_labels_from_annot_annot2labels():
    """Test reading labels from parc. by comparing with mne_annot2labels
    """

    def _mne_annot2labels(subject, subjects_dir, parc):
        """Get labels using mne_annot2lables"""
        label_dir = _TempDir()
        cwd = os.getcwd()
        try:
            os.chdir(label_dir)
            env = os.environ.copy()
            env['SUBJECTS_DIR'] = subjects_dir
            cmd = ['mne_annot2labels', '--subject', subject, '--parc', parc]
            run_subprocess(cmd, env=env)
            label_fnames = glob.glob(label_dir + '/*.label')
            label_fnames.sort()
            labels = [read_label(fname) for fname in label_fnames]
        finally:
            del label_dir
            os.chdir(cwd)

        return labels

    labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
    labels_mne = _mne_annot2labels('sample', subjects_dir, 'aparc')

    # we have the same result, mne does not fill pos, so ignore it
    _assert_labels_equal(labels, labels_mne, ignore_pos=True)
def morph_labels_from_fsaverage(subject,
                                subjects_dir,
                                atlas='laus125',
                                fsaverage='fsaverage',
                                hemi='both',
                                surf_name='pial',
                                overwrite=False,
                                n_jobs=6):
    subject_dir = op.join(subjects_dir, subject)
    labels_fol = op.join(subjects_dir, fsaverage, 'label', atlas)
    morphed_labels_fol = op.join(subject_dir, 'label', atlas)
    if not op.isdir(morphed_labels_fol):
        os.makedirs(morphed_labels_fol)
    labels = mne.read_labels_from_annot(fsaverage,
                                        atlas,
                                        subjects_dir=subjects_dir,
                                        surf_name=surf_name,
                                        hemi=hemi)
    if len(labels) == 0:
        raise Exception(
            'morph_labels_from_fsaverage: No labels for {}, {}'.format(
                fsaverage, atlas))
    # Make sure we have a morph map, and if not, create it here, and not in the parallel function
    mne.surface.read_morph_map(subject, fsaverage, subjects_dir=subjects_dir)
    verts = load_surf(subject, subjects_dir)
    indices = np.array_split(np.arange(len(labels)), n_jobs)
    chunks = [([labels[ind] for ind in chunk_indices], subject, fsaverage,
               labels_fol, morphed_labels_fol, verts, subjects_dir, overwrite)
              for chunk_indices in indices]
    results = run_parallel(_morph_labels_parallel, chunks, n_jobs)
    morphed_labels = []
    for chunk_morphed_labels in results:
        morphed_labels.extend(chunk_morphed_labels)
    return morphed_labels
예제 #17
0
 def load_parc_labels(self):
     if self._labels is None:
         self._labels = mne.read_labels_from_annot(
             self.name,
             parc=self.p['parcellation'],
             subjects_dir=self.subjects_dir)
     return self._labels
예제 #18
0
def plot_aparc_parcels(matrix, ax, fig, title):
    """
    :param matrix:
        The connectivity matrix (real or simulated) representing aparc parcels

    :param ax:
        axes to plot on
    :return:
    """

    # get labels and coordinates
    labels = mne.read_labels_from_annot('fsaverage_1', parc='aparc', subjects_dir=join('/imaging/ai05/RED/RED_MEG/resting/STRUCTURALS','FS_SUBDIR'))
    labels = labels[0:-1]
    label_names = [label.name for label in labels]
    coords = []
    # TODO: Find a better way to get centre of parcel
    #get coords of centre of mass
    for i in range(len(labels)):
        if 'lh' in label_names[i]:
            hem = 1
        else:
            hem = 0
        coord = mne.vertex_to_mni(labels[i].center_of_mass(subjects_dir=join('/imaging/ai05/RED/RED_MEG/resting/STRUCTURALS','FS_SUBDIR')), subject='fsaverage_1',hemis=hem,subjects_dir=join(MAINDIR,'FS_SUBDIR'))
        coords.append(coord[0])

    plotting.plot_connectome_strength(matrix,
                                      node_coords=coords,
                                      title=title,
                                      figure=fig,
                                      axes=ax,
                                      cmap=plt.cm.YlOrRd)
예제 #19
0
def adaptive_parcellation(fwd, inv, subject, subjects_dir, hemi='both'):
    """ 
    """
    # Magic happens
    # res = mne.minimum_norm.make_inverse_resolution_matrix(fwd, inv)
    # ...

    # However, here's a magicless example
    labels_aparc = mne.read_labels_from_annot(subject, parc='aparc', hemi='lh', 
                                              subjects_dir=subjects_dir)

    # add frontal pole label to lh
    vertices = labels_aparc[5].vertices
    label1 = mne.Label(vertices, hemi='lh', name='frontalpole', 
                       subject=subject, color=(1,0,0,1))

    # add label of rightmost vertices to rh
    vertices = inv['src'][1]['vertno']
    vertices = [vv for vv in vertices if inv['src'][1]['rr'][vv, 0] > 0.05]
    label2 = mne.Label(vertices, hemi='rh', name='lateral', 
                       subject=subject, color=(0,1,0,1))
    label2 = label2.fill(inv['src'])

    labels = [label1, label2]
    return labels
예제 #20
0
def load_hcpmmp1():
    # os.environ["SUBJECTS_DIR"] = VALUE

    labels = mne.read_labels_from_annot('fsaverage', parc='HCPMMP1_combined')
    labels = sorted(labels, key=lambda x: x.name)

    return labels
예제 #21
0
def plot_V1V2(measure, hemi, clim):
    data, vert = extract_hemi_data(src_df, measure, time, hemi)

    brain = Brain('fsaverage',
                  hemi,
                  'inflated',
                  cortex='low_contrast',
                  subjects_dir='mne_subjects',
                  background='w',
                  foreground='k')

    brain.add_data(data,
                   vertices=vert,
                   min=-clim[2],
                   max=clim[2],
                   time=[time],
                   time_label=lambda t: '%d ms' % (t * 1000),
                   colormap=morph_divergent_cmap(cmap, clim),
                   hemi=hemi,
                   smoothing_steps=5)

    labels = mne.read_labels_from_annot('fsaverage',
                                        parc='HCPMMP1',
                                        hemi=hemi,
                                        regexp='[LR]_((V1)|(V2)).*')

    for label in labels:
        brain.add_label(label, borders=True)

    mlab.view(*views[hemi])

    return brain
예제 #22
0
def load_labsn_hcpmmp1_av_rois_small():

    hcp_mmp1_labels = mne.read_labels_from_annot('fsaverage',
                                                 parc='HCPMMP1_combined')
    hcp_mmp1_labels = combine_medial_labels(hcp_mmp1_labels)
    label_names = [l.name for l in hcp_mmp1_labels]

    #prim_visual_lh = label_names.index("Primary Visual Cortex (V1)-lh")
    #prim_visual_rh = label_names.index("Primary Visual Cortex (V1)-rh")
    #prim_visual_lh = hcp_mmp1_labels[prim_visual_lh]
    #prim_visual_rh = hcp_mmp1_labels[prim_visual_rh]
    prim_visual = [
        l for l in hcp_mmp1_labels if 'Primary Visual Cortex' in l.name
    ]

    # there should be only one b/c of medial merge
    prim_visual = prim_visual[0]

    early_visual_lh = label_names.index("Early Visual Cortex-lh")
    early_visual_rh = label_names.index("Early Visual Cortex-rh")
    early_visual_lh = hcp_mmp1_labels[early_visual_lh]
    early_visual_rh = hcp_mmp1_labels[early_visual_rh]

    #visual_lh = prim_visual_lh + early_visual_lh
    #visual_rh = prim_visual_rh + early_visual_rh

    visual = prim_visual + early_visual_lh + early_visual_rh
    labels = [visual]

    #labels = [visual_lh, visual_rh]

    eac_labs = [
        l for l in hcp_mmp1_labels if 'Early Auditory Cortex' in l.name
    ]
    labels.extend(eac_labs)

    tpo_labs = [
        l for l in hcp_mmp1_labels
        if 'Temporo-Parieto-Occipital Junction' in l.name
    ]
    labels.extend(tpo_labs)

    dpc_labs = [
        l for l in hcp_mmp1_labels
        if 'DorsoLateral Prefrontal Cortex' in l.name
    ]
    labels.extend(dpc_labs)

    ## extra labels KC wanted
    #pmc_labs = [l for l in hcp_mmp1_labels if 'Premotor Cortex' in l.name]
    #labels.extend(pmc_labs)

    #ips_str = glob.glob(os.path.join(subjects_dir, "fsaverage/label/*IPS*labsn*"))
    #ips_labs = [mne.read_label(fn, subject='fsaverage') for fn in ips_str]
    #labels.extend(ips_labs)

    #rtpj_labs = [l for l in hcp_mmp1_labels if 'Inferior Parietal Cortex-rh' in l.name]
    #labels.extend(rtpj_labs)

    return labels
예제 #23
0
def morph_labels_from_fsaverage(mri_sub):
    parcellations = ['aparc_sub', 'HCPMMP1_combined', 'HCPMMP1']
    if not isfile(join(mri_sub.subjects_dir, 'fsaverage/label',
                       'lh.' + parcellations[0] + '.annot')):
        mne.datasets.fetch_hcp_mmp_parcellation(subjects_dir=mri_sub.subjects_dir,
                                                verbose=True)

        mne.datasets.fetch_aparc_sub_parcellation(subjects_dir=mri_sub.subjects_dir,
                                                  verbose=True)
    else:
        print('You\'ve already downloaded the parcellations, splendid!')

    if not isfile(join(mri_sub.subjects_dir, mri_sub.name, 'label',
                       'lh.' + parcellations[0] + '.annot')):
        for pc in parcellations:
            labels = mne.read_labels_from_annot('fsaverage', pc, hemi='both')

            m_labels = mne.morph_labels(labels, mri_sub.name, 'fsaverage', mri_sub.subjects_dir,
                                        surf_name='pial')

            mne.write_labels_to_annot(m_labels, subject=mri_sub.name, parc=pc,
                                      subjects_dir=mri_sub.subjects_dir, overwrite=True)

    else:
        print(f'{parcellations} already exist')
예제 #24
0
def annotation_to_labels():
    fol = os.path.join(subjects_dir, subject, 'label', aparc_name)
    if not(os.path.isdir(fol)):
        os.mkdir(fol)
    labels = mne.read_labels_from_annot(subject, parc=aparc_name, hemi='both', surf_name='pial')
    for label in labels:
        label.save(os.path.join(fol, label.name))
예제 #25
0
def loadannot_mne(p,
                  subj,
                  subjdir,
                  labnam=None,
                  surf_type='pial',
                  surf_struct=None,
                  quiet=False):

    verbosity = 'ERROR' if quiet else 'WARNING'

    from distutils.version import LooseVersion

    if LooseVersion(mne.__version__) >= LooseVersion('0.8'):
        #MNE python changed the API to read an annotation twice in the same
        #release cycle. Check for both versions.
        try:
            annot = mne.read_labels_from_annot(parc=p,
                                               subject=subj,
                                               surf_name=surf_type,
                                               subjects_dir=subjdir,
                                               verbose=verbosity)
        except:
            annot = mne.read_annot(parc=p,
                                   subject=subj,
                                   surf_name=surf_type,
                                   subjects_dir=subjdir,
                                   verbose=verbosity)
    else:
        annot = mne.labels_from_parc(parc=p,
                                     subject=subj,
                                     surf_name=surf_type,
                                     subjects_dir=subjdir,
                                     verbose=verbosity)
        annot = annot[0]  #discard the color table
    return annot
예제 #26
0
def test_label_center_of_mass():
    """Test computing the center of mass of a label"""
    stc = read_source_estimate(stc_fname)
    stc.lh_data[:] = 0
    vertex_stc = stc.center_of_mass('sample', subjects_dir=subjects_dir)[0]
    assert_equal(vertex_stc, 124791)
    label = Label(stc.vertices[1],
                  pos=None,
                  values=stc.rh_data.mean(axis=1),
                  hemi='rh',
                  subject='sample')
    vertex_label = label.center_of_mass(subjects_dir=subjects_dir)
    assert_equal(vertex_label, vertex_stc)

    labels = read_labels_from_annot('sample',
                                    parc='aparc.a2009s',
                                    subjects_dir=subjects_dir)
    src = read_source_spaces(src_fname)
    # Try a couple of random ones, one from left and one from right
    # Visually verified in about the right place using mne_analyze
    for label, expected in zip([labels[2], labels[3], labels[-5]],
                               [141162, 145221, 55979]):
        label.values[:] = -1
        assert_raises(ValueError,
                      label.center_of_mass,
                      subjects_dir=subjects_dir)
        label.values[:] = 1
        assert_equal(label.center_of_mass(subjects_dir=subjects_dir), expected)
        assert_equal(
            label.center_of_mass(subjects_dir=subjects_dir,
                                 restrict_vertices=label.vertices), expected)
        # restrict to source space
        idx = 0 if label.hemi == 'lh' else 1
        # this simple nearest version is not equivalent, but is probably
        # close enough for many labels (including the test ones):
        pos = label.pos[np.where(label.vertices == expected)[0][0]]
        pos = (src[idx]['rr'][src[idx]['vertno']] - pos)
        pos = np.argmin(np.sum(pos * pos, axis=1))
        src_expected = src[idx]['vertno'][pos]
        # see if we actually get the same one
        src_restrict = np.intersect1d(label.vertices, src[idx]['vertno'])
        assert_equal(
            label.center_of_mass(subjects_dir=subjects_dir,
                                 restrict_vertices=src_restrict), src_expected)
        assert_equal(
            label.center_of_mass(subjects_dir=subjects_dir,
                                 restrict_vertices=src), src_expected)
    # degenerate cases
    assert_raises(ValueError,
                  label.center_of_mass,
                  subjects_dir=subjects_dir,
                  restrict_vertices='foo')
    assert_raises(TypeError,
                  label.center_of_mass,
                  subjects_dir=subjects_dir,
                  surf=1)
    assert_raises(IOError,
                  label.center_of_mass,
                  subjects_dir=subjects_dir,
                  surf='foo')
예제 #27
0
def test_fetch_parcellations(tmpdir):
    """Test fetching parcellations."""
    this_subjects_dir = str(tmpdir)
    os.mkdir(op.join(this_subjects_dir, 'fsaverage'))
    os.mkdir(op.join(this_subjects_dir, 'fsaverage', 'label'))
    os.mkdir(op.join(this_subjects_dir, 'fsaverage', 'surf'))
    for hemi in ('lh', 'rh'):
        shutil.copyfile(
            op.join(subjects_dir, 'fsaverage', 'surf', '%s.white' % hemi),
            op.join(this_subjects_dir, 'fsaverage', 'surf', '%s.white' % hemi))
    # speed up by prenteding we have one of them
    with open(op.join(this_subjects_dir, 'fsaverage', 'label',
                      'lh.aparc_sub.annot'), 'wb'):
        pass
    datasets.fetch_aparc_sub_parcellation(subjects_dir=this_subjects_dir)
    with ArgvSetter(('--accept-hcpmmp-license',)):
        datasets.fetch_hcp_mmp_parcellation(subjects_dir=this_subjects_dir)
    for hemi in ('lh', 'rh'):
        assert op.isfile(op.join(this_subjects_dir, 'fsaverage', 'label',
                                 '%s.aparc_sub.annot' % hemi))
    # test our annot round-trips here
    kwargs = dict(subject='fsaverage', hemi='both', sort=False,
                  subjects_dir=this_subjects_dir)
    labels = read_labels_from_annot(parc='HCPMMP1', **kwargs)
    write_labels_to_annot(
        labels, parc='HCPMMP1_round',
        table_name='./left.fsaverage164.label.gii', **kwargs)
    orig = op.join(this_subjects_dir, 'fsaverage', 'label', 'lh.HCPMMP1.annot')
    first = hashfunc(orig)
    new = orig[:-6] + '_round.annot'
    second = hashfunc(new)
    assert first == second
예제 #28
0
    def apply_cortical_parcellation_event_stcs(self,
                                               stcs,
                                               src,
                                               save=True,
                                               gen_mode=True):

        labels = mne.read_labels_from_annot(self.subject)
        self.labels = [lbl for lbl in labels if lbl.name != 'unknown-lh']
        stc_path = 'stcs/'
        self.stc_cp = dict()

        for key, event_stcs in stcs.items():
            stc_sub_path = stc_path + key + '/'
            event_stcs_cp = np.zeros((68, 500, 5))
            for event_id, event_stc in event_stcs.items():
                event_stc_path = stc_sub_path + event_id + '.csv'
                if gen_mode:
                    label_tc = mne.extract_label_time_course(event_stc,
                                                             self.labels,
                                                             src,
                                                             mode='pca_flip')
                else:
                    label_tc = np.genfromtxt(event_stc_path, delimiter=',')
                event_stcs_cp[:, :, int(event_id) - 1] = label_tc
                if save:
                    np.savetxt(event_stc_path, label_tc, delimiter=',')
            self.stc_cp[key] = event_stcs_cp
        return self.stc_cp
예제 #29
0
def morph_labels(subject_from,
                 subject_to,
                 parc,
                 surf_name='pial',
                 smooth=2,
                 overwrite=True):
    '''
        mne_morph_labels --from fsaverage --to mg79 --labeldir /homes/5/npeled/space3/subjects/fsaverage/label/laus500_labels --smooth 5
    '''
    # brain = Brain(subject_from, 'both', surf_name, curv=False)
    labels = mne.read_labels_from_annot(subject_from, parc, 'both', surf_name)
    morphed_labels = []
    for ind, label in enumerate(labels):
        try:
            print('label {}/{}'.format(ind, len(labels)))
            label.values.fill(1.0)
            morphed_label = label.morph(subject_from, subject_to, smooth)
            morphed_labels.append(morphed_label)
        except:
            print('cant morph label {}'.format(label.name))
            print(sys.exc_info()[1])
    print('{} labels were morphed succefully.'.format(len(morphed_labels)))
    mne.write_labels_to_annot(morphed_labels,
                              subject_to,
                              parc,
                              overwrite,
                              hemi='both')
예제 #30
0
def test_read_labels_from_annot_annot2labels():
    """Test reading labels from parc. by comparing with mne_annot2labels
    """
    def _mne_annot2labels(subject, subjects_dir, parc):
        """Get labels using mne_annot2lables"""
        label_dir = _TempDir()
        cwd = os.getcwd()
        try:
            os.chdir(label_dir)
            env = os.environ.copy()
            env['SUBJECTS_DIR'] = subjects_dir
            cmd = ['mne_annot2labels', '--subject', subject, '--parc', parc]
            run_subprocess(cmd, env=env)
            label_fnames = glob.glob(label_dir + '/*.label')
            label_fnames.sort()
            labels = [read_label(fname) for fname in label_fnames]
        finally:
            del label_dir
            os.chdir(cwd)

        return labels

    labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)
    labels_mne = _mne_annot2labels('sample', subjects_dir, 'aparc')

    # we have the same result, mne does not fill pos, so ignore it
    _assert_labels_equal(labels, labels_mne, ignore_pos=True)
예제 #31
0
def _simulate_data(fwd, idx):  # Somewhere on the frontal lobe by default
    """Simulate an oscillator on the cortex."""
    source_vertno = fwd['src'][0]['vertno'][idx]

    sfreq = 50.  # Hz.
    times = np.arange(10 * sfreq) / sfreq  # 10 seconds of data
    signal = np.sin(20 * 2 * np.pi * times)  # 20 Hz oscillator
    signal[:len(times) // 2] *= 2  # Make signal louder at the beginning
    signal *= 1e-9  # Scale to be in the ballpark of MEG data

    # Construct a SourceEstimate object that describes the signal at the
    # cortical level.
    stc = mne.SourceEstimate(
        signal[np.newaxis, :],
        vertices=[[source_vertno], []],
        tmin=0,
        tstep=1 / sfreq,
        subject='sample',
    )

    # Create an info object that holds information about the sensors
    info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad')
    info.update(fwd['info'])  # Merge in sensor position information
    # heavily decimate sensors to make it much faster
    info = mne.pick_info(info, np.arange(info['nchan'])[::5])
    fwd = mne.pick_channels_forward(fwd, info['ch_names'])

    # Run the simulated signal through the forward model, obtaining
    # simulated sensor data.
    raw = mne.apply_forward_raw(fwd, stc, info)

    # Add a little noise
    random = np.random.RandomState(42)
    noise = random.randn(*raw._data.shape) * 1e-14
    raw._data += noise

    # Define a single epoch (weird baseline but shouldn't matter)
    epochs = mne.Epochs(raw, [[0, 0, 1]],
                        event_id=1,
                        tmin=0,
                        tmax=raw.times[-1],
                        baseline=(0., 0.),
                        preload=True)
    evoked = epochs.average()

    # Compute the cross-spectral density matrix
    csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=10)

    labels = mne.read_labels_from_annot('sample',
                                        hemi='lh',
                                        subjects_dir=subjects_dir)
    label = [
        label for label in labels if np.in1d(source_vertno, label.vertices)[0]
    ]
    assert len(label) == 1
    label = label[0]
    vertices = np.intersect1d(label.vertices, fwd['src'][0]['vertno'])
    source_ind = vertices.tolist().index(source_vertno)
    assert vertices[source_ind] == source_vertno
    return epochs, evoked, csd, source_vertno, label, vertices, source_ind
예제 #32
0
def sources_to_labels(stcs,
                      age=None,
                      template=None,
                      parc='aparc',
                      mode='mean_flip',
                      allow_empty=True,
                      return_generator=False,
                      subjects_dir=None,
                      include_vol_src=True):
    template = __validate_template__(age, template, subjects_dir)
    montage, trans, bem_model, bem_solution, src = get_bem_artifacts(
        template, subjects_dir=subjects_dir, include_vol_src=include_vol_src)

    labels_parc = mne.read_labels_from_annot(template,
                                             subjects_dir=subjects_dir,
                                             parc=parc)
    labels_ts = mne.extract_label_time_course(
        stcs,
        labels_parc,
        src,
        mode=mode,
        allow_empty=allow_empty,
        return_generator=return_generator)

    if include_vol_src:
        labels_aseg = mne.get_volume_labels_from_src(src, template,
                                                     subjects_dir)
        labels = labels_parc + labels_aseg
    else:
        labels = labels_parc

    return labels_ts, labels
예제 #33
0
def region_centers_of_masse(age=None,
                            template=None,
                            parc="aparc",
                            surf_name="pial",
                            subjects_dir=None,
                            include_vol_src=True):
    template = __validate_template__(age, template, subjects_dir)
    montage, trans, bem_model, bem_solution, src = get_bem_artifacts(
        template, subjects_dir=subjects_dir, include_vol_src=include_vol_src)
    center_of_masses_dict = {}
    if include_vol_src:
        for src_obj in src[2:]:
            roi_str = src_obj["seg_name"]
            if 'left' in roi_str.lower():
                roi_str = roi_str.replace('Left-', '') + '-lh'
            elif 'right' in roi_str.lower():
                roi_str = roi_str.replace('Right-', '') + '-rh'

            center_of_masses_dict[roi_str] = np.average(
                src_obj['rr'][src_obj["vertno"]], axis=0)

    for label in mne.read_labels_from_annot(template,
                                            subjects_dir=subjects_dir,
                                            parc=parc):
        ind_com = np.where(label.vertices == label.center_of_mass(
            subject=template, subjects_dir=subjects_dir))[0]
        if len(label.pos[ind_com, :]):
            center_of_masses_dict[label.name] = label.pos[ind_com, :][0]

    center_of_masses_df = pd.DataFrame(center_of_masses_dict).T.reset_index()
    center_of_masses_df.columns = ["region", "x", "y", "z"]
    center_of_masses_df["template"] = template
    return center_of_masses_df
예제 #34
0
def find_clusters_overlapped_labeles(subject, clusters, contrast, atlas, hemi, verts, load_from_annotation=True,
                                     n_jobs=1):
    cluster_labels = []
    annot_fname = op.join(SUBJECTS_DIR, subject, 'label', '{}.{}.annot'.format(hemi, atlas))
    if load_from_annotation and op.isfile(annot_fname):
        labels = mne.read_labels_from_annot(subject, annot_fname=annot_fname, surf_name='pial')
    else:
        # todo: read only the labels from the current hemi
        labels = utils.read_labels_parallel(subject, SUBJECTS_DIR, atlas, n_jobs)
        labels = [l for l in labels if l.hemi == hemi]

    if len(labels) == 0:
        print('No labels!')
        return None
    for cluster in clusters:
        x = contrast[cluster]
        cluster_max = np.min(x) if abs(np.min(x)) > abs(np.max(x)) else np.max(x)
        inter_labels, inter_labels_tups = [], []
        for label in labels:
            overlapped_vertices = np.intersect1d(cluster, label.vertices)
            if len(overlapped_vertices) > 0:
                if 'unknown' not in label.name:
                    inter_labels_tups.append((len(overlapped_vertices), label.name))
                    # inter_labels.append(dict(name=label.name, num=len(overlapped_vertices)))
        inter_labels_tups = sorted(inter_labels_tups)[::-1]
        for inter_labels_tup in inter_labels_tups:
            inter_labels.append(dict(name=inter_labels_tup[1], num=inter_labels_tup[0]))
        if len(inter_labels) > 0:
            # max_inter = max([(il['num'], il['name']) for il in inter_labels])
            cluster_labels.append(dict(vertices=cluster, intersects=inter_labels, name=inter_labels[0]['name'],
                coordinates=verts[cluster], max=cluster_max, hemi=hemi, size=len(cluster)))
        else:
            print('No intersected labels!')
    return cluster_labels
예제 #35
0
def load_hcpmmp1_combined():

    labels = mne.read_labels_from_annot('fsaverage', parc='HCPMMP1_combined')
    labels = sorted(labels, key=lambda x: x.name)
    labels = combine_medial_labels(labels)

    return labels
예제 #36
0
def test_source_space():
    "Test SourceSpace Dimension"
    subject = 'fsaverage'
    data_path = mne.datasets.sample.data_path()
    mri_sdir = os.path.join(data_path, 'subjects')
    mri_dir = os.path.join(mri_sdir, subject)
    label_dir = os.path.join(mri_dir, 'label')
    label_ba1 = mne.read_label(os.path.join(label_dir, 'lh.BA1.label'))
    label_v1 = mne.read_label(os.path.join(label_dir, 'lh.V1.label'))
    label_mt = mne.read_label(os.path.join(label_dir, 'lh.MT.label'))
    label_ba1_v1 = label_ba1 + label_v1
    label_v1_mt = label_v1 + label_mt

    src = datasets._mne_source_space(subject, 'ico-5', mri_sdir)
    source = SourceSpace.from_mne_source_spaces(src, 'ico-5', mri_sdir)
    source_v1 = source[source.dimindex(label_v1)]
    eq_(source_v1, SourceSpace.from_mne_source_spaces(src, 'ico-5', mri_sdir,
                                                      label=label_v1))
    source_ba1_v1 = source[source.dimindex(label_ba1_v1)]
    source_v1_mt = source[source.dimindex(label_v1_mt)]
    source_v1_intersection = source_ba1_v1.intersect(source_v1_mt)
    assert_source_space_equal(source_v1, source_v1_intersection)

    # index from label
    index = source.index_for_label(label_v1)
    assert_array_equal(index.source[index.x].vertno[0],
                       np.intersect1d(source.lh_vertno, label_v1.vertices, 1))

    # parcellation and cluster localization
    parc = mne.read_labels_from_annot(subject, parc='aparc', subjects_dir=mri_sdir)
    indexes = [source.index_for_label(label) for label in parc
               if len(label) > 10]
    x = np.vstack([index.x for index in indexes])
    ds = source._cluster_properties(x)
    for i in xrange(ds.n_cases):
        eq_(ds[i, 'location'], parc[i].name)

    # multiple labels
    lingual_index = source.dimindex('lingual-lh')
    cuneus_index = source.dimindex('cuneus-lh')
    assert_array_equal(source.dimindex(('cuneus-lh', 'lingual-lh')),
                       np.logical_or(cuneus_index, lingual_index))
    lingual_source = source[lingual_index]
    cuneus_source = source[cuneus_index]
    assert_raises(IndexError, lingual_source.dimindex, cuneus_source)
    sub_source = source[source.dimindex(('cuneus-lh', 'lingual-lh'))]
    eq_(sub_source[sub_source.dimindex('lingual-lh')], lingual_source)
    eq_(sub_source[sub_source.dimindex('cuneus-lh')], cuneus_source)
    eq_(len(sub_source), len(lingual_source) + len(cuneus_source))

    # indexing
    tgt = np.hstack(sub_source.vertno)
    assert_array_equal([i for i in sub_source], tgt)
    assert_array_equal([sub_source[i] for i in xrange(len(sub_source))], tgt)
    # hemisphere indexing
    lh = source.dimindex('lh')
    source_lh = source[lh]
    eq_(source_lh.dimindex('rh'), slice(0, 0))
    eq_(source_lh.dimindex('lh'), slice(0, len(source_lh)))
예제 #37
0
def test_source_space():
    "Test SourceSpace Dimension"
    subject = 'fsaverage'
    data_path = mne.datasets.sample.data_path()
    mri_sdir = os.path.join(data_path, 'subjects')
    mri_dir = os.path.join(mri_sdir, subject)
    label_dir = os.path.join(mri_dir, 'label')
    label_ba1 = mne.read_label(os.path.join(label_dir, 'lh.BA1.label'))
    label_v1 = mne.read_label(os.path.join(label_dir, 'lh.V1.label'))
    label_mt = mne.read_label(os.path.join(label_dir, 'lh.MT.label'))
    label_ba1_v1 = label_ba1 + label_v1
    label_v1_mt = label_v1 + label_mt

    src = datasets._mne_source_space(subject, 'ico-5', mri_sdir)
    source = SourceSpace.from_mne_source_spaces(src, 'ico-5', mri_sdir)
    source_v1 = source[source.dimindex(label_v1)]
    eq_(source_v1, SourceSpace.from_mne_source_spaces(src, 'ico-5', mri_sdir,
                                                      label=label_v1))
    source_ba1_v1 = source[source.dimindex(label_ba1_v1)]
    source_v1_mt = source[source.dimindex(label_v1_mt)]
    source_v1_intersection = source_ba1_v1.intersect(source_v1_mt)
    assert_source_space_equal(source_v1, source_v1_intersection)

    # index from label
    index = source.index_for_label(label_v1)
    assert_array_equal(index.source[index.x].vertno[0],
                       np.intersect1d(source.lh_vertno, label_v1.vertices, 1))

    # parcellation and cluster localization
    parc = mne.read_labels_from_annot(subject, parc='aparc', subjects_dir=mri_sdir)
    indexes = [source.index_for_label(label) for label in parc
               if len(label) > 10]
    x = np.vstack([index.x for index in indexes])
    ds = source._cluster_properties(x)
    for i in range(ds.n_cases):
        eq_(ds[i, 'location'], parc[i].name)

    # multiple labels
    lingual_index = source.dimindex('lingual-lh')
    cuneus_index = source.dimindex('cuneus-lh')
    assert_array_equal(source.dimindex(('cuneus-lh', 'lingual-lh')),
                       np.logical_or(cuneus_index, lingual_index))
    lingual_source = source[lingual_index]
    cuneus_source = source[cuneus_index]
    assert_raises(IndexError, lingual_source.dimindex, cuneus_source)
    sub_source = source[source.dimindex(('cuneus-lh', 'lingual-lh'))]
    eq_(sub_source[sub_source.dimindex('lingual-lh')], lingual_source)
    eq_(sub_source[sub_source.dimindex('cuneus-lh')], cuneus_source)
    eq_(len(sub_source), len(lingual_source) + len(cuneus_source))

    # indexing
    tgt = np.hstack(sub_source.vertno)
    assert_array_equal([i for i in sub_source], tgt)
    assert_array_equal([sub_source[i] for i in range(len(sub_source))], tgt)
    # hemisphere indexing
    lh = source.dimindex('lh')
    source_lh = source[lh]
    eq_(source_lh.dimindex('rh'), slice(0, 0))
    eq_(source_lh.dimindex('lh'), slice(len(source_lh)))
예제 #38
0
def mne_connec(matrix, this_title, fig, *labels):
    MAINDIR = join('/imaging/ai05/RED/RED_MEG/resting/STRUCTURALS')
    if labels is None:
        labels = mne.read_labels_from_annot('fsaverage_1', parc='aparc',
                                            subjects_dir=join('/imaging/ai05/RED/RED_MEG/resting/STRUCTURALS', 'FS_SUBDIR'))
    labels = labels[0:-1]
    label_colors = [label.color for label in labels]

    label_names = [label.name for label in labels]
    coords = []
    # TODO: Find a better way to get centre of parcel
    # get coords of centre of mass
    for i in range(len(labels)):
        if 'lh' in label_names[i]:
            hem = 1
        else:
            hem = 0
        coord = mne.vertex_to_mni(
            labels[i].center_of_mass(subjects_dir=join('/imaging/ai05/RED/RED_MEG/resting/STRUCTURALS', 'FS_SUBDIR')),
            subject='fsaverage_1', hemis=hem, subjects_dir=join(MAINDIR, 'FS_SUBDIR'))
        coords.append(coord[0])

    # First, we reorder the labels based on their location in the left hemi
    lh_labels = [name for name in label_names if name.endswith('lh')]
    rh_labels = [name for name in label_names if name.endswith('rh')]

    # Get the y-location of the label
    label_ypos = list()
    for name in lh_labels:
        idx = label_names.index(name)
        ypos = np.mean(labels[idx].pos[:, 1])
        label_ypos.append(ypos)

    lh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))]

    # For the right hemi
    rh_labels = [label[:-2] + 'rh' for label in lh_labels]

    # make a list with circular plot order
    node_order = list()
    node_order.extend(lh_labels[::-1])  # reverse the order
    node_order.extend(rh_labels)

    node_order = node_order[::-1]  # reverse the whole thing

    # get a mapping to the original list
    re_order_ind = [label_names.index(x) for x in node_order]

    node_angles = mne.viz.circular_layout(label_names, node_order, start_pos=90,
                                  group_boundaries=[0, len(label_names) / 2])

    fig2, ax = mne.viz.plot_connectivity_circle(matrix, label_names, n_lines=300,
                             node_angles=node_angles, node_colors=label_colors,
                             title=this_title, fig=fig,
                                                facecolor='white',
                                                textcolor='black')

    return fig2, ax
예제 #39
0
def calc_fsaverage_labels_indices(surf_name='pial', labels_from_annot=False, labels_fol='', parc='aparc250', subjects_dir=''):
    labels_fol = os.path.join(subjects_dir, 'fsaverage', 'label', parc) if labels_fol=='' else labels_fol
    if (labels_from_annot):
        labels = mne.read_labels_from_annot('fsaverage', parc=parc, hemi='both', surf_name=surf_name)
    else:
        labels = utils.read_labels(labels_fol)
    fsave_vertices = [np.arange(10242), np.arange(10242)]
    labels_vertices, labels_names = utils.get_labels_vertices(labels, fsave_vertices)
    np.savez(op.join(LOCAL_ROOT_DIR, 'fsaverage_labels_indices'), labels_vertices=labels_vertices, labels_names=labels_names)
예제 #40
0
def test_read_labels_from_annot_annot2labels():
    """Test reading labels from parc. by comparing with mne_annot2labels."""
    label_fnames = glob.glob(label_dir + '/*.label')
    label_fnames.sort()
    labels_mne = [read_label(fname) for fname in label_fnames]
    labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)

    # we have the same result, mne does not fill pos, so ignore it
    _assert_labels_equal(labels, labels_mne, ignore_pos=True)
def generate_combined_simulation(raw_fname,
                                 fwd,
                                 subject=None,
                                 subjects_dir=None,
                                 topdir=None,
                                 label_index=None,
                                 sine_amplitude=None,
                                 sine_frequency=None):
    """Create a combined dataset of simulated plus real data and save
    to the topdir/(subjid)_(AMP)_nAm_(HZ)_hz folder"""

    os.chdir(topdir)
    if label_index == None:
        print('Must provide a label index for simulation')
        exit(1)

    raw = mne.io.read_raw_fif(raw_fname)
    rng = np.random.RandomState(0)  # random state (make reproducible)

    #Labels for simulation
    labels = mne.read_labels_from_annot(subject, subjects_dir=subjects_dir)
    labels_sim = [labels[label_index]]

    times = raw.times  #[:int(raw.info['sfreq'] * epoch_duration)]
    src = fwd['src']

    sig_generator = partial(data_fun,
                            amplitude=sine_amplitude,
                            freq=sine_frequency)

    stc = simulate_sparse_stc(src,
                              n_dipoles=1,
                              times=times,
                              data_fun=sig_generator,
                              labels=labels_sim,
                              location='center',
                              subjects_dir=subjects_dir)

    # Simulate raw data
    raw_sim = simulate_raw(raw.info, [stc] * 1, forward=fwd, verbose=True)

    #Load raw and save to outfolder
    raw.load_data()

    #Combine simulation w/raw
    comb_out_fname = '{}_{}_label_{}_nAm_{}_hz_meg.fif'.format(
        subject, str(label_index), sine_amplitude, sine_frequency)
    # comb_out_fname = op.join(outfolder, outfolder+'_meg.fif')
    combined = raw.copy()
    combined._data += raw_sim.get_data()
    combined.save(comb_out_fname)
    print('Saved {}'.format(comb_out_fname))

    #Save stc for later use
    stc_out_fname = op.join('{}_{}_label_{}_nAm_{}_hz-stc.fif'.format(
        subject, str(label_index), sine_amplitude, sine_frequency))
    stc.save(stc_out_fname)
예제 #42
0
def test_read_labels_from_annot_annot2labels():
    """Test reading labels from parc. by comparing with mne_annot2labels."""
    label_fnames = glob.glob(label_dir + '/*.label')
    label_fnames.sort()
    labels_mne = [read_label(fname) for fname in label_fnames]
    labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)

    # we have the same result, mne does not fill pos, so ignore it
    _assert_labels_equal(labels, labels_mne, ignore_pos=True)
예제 #43
0
파일: show_fmri.py 프로젝트: pelednoam/mmvt
def create_annot_csv(subject, parc, hemi, source_file, surf_name):
    labels = mne.read_labels_from_annot(subject, parc, hemi, surf_name)
    old, brain = get_hemi_data(source_file, hemi, surf_name)
    colors = np.zeros((old.mlab_data.shape[0], 3))  #  arrToColors(old.mlab_data, colorsMap='RdBu_r')[:, :3]
    brain.toggle_toolbars(True)
    for label_ind, label in enumerate(labels):
        # label = labels[46]
        brain.add_label(label)
        print(label)
예제 #44
0
def test_annot_io():
    """Test I/O from and to *.annot files"""
    # copy necessary files from fsaverage to tempdir
    tempdir = _TempDir()
    subject = 'fsaverage'
    label_src = os.path.join(subjects_dir, 'fsaverage', 'label')
    surf_src = os.path.join(subjects_dir, 'fsaverage', 'surf')
    label_dir = os.path.join(tempdir, subject, 'label')
    surf_dir = os.path.join(tempdir, subject, 'surf')
    os.makedirs(label_dir)
    os.mkdir(surf_dir)
    shutil.copy(os.path.join(label_src, 'lh.PALS_B12_Lobes.annot'), label_dir)
    shutil.copy(os.path.join(label_src, 'rh.PALS_B12_Lobes.annot'), label_dir)
    shutil.copy(os.path.join(surf_src, 'lh.white'), surf_dir)
    shutil.copy(os.path.join(surf_src, 'rh.white'), surf_dir)

    # read original labels
    assert_raises(IOError, read_labels_from_annot, subject, 'PALS_B12_Lobesey',
                  subjects_dir=tempdir)
    labels = read_labels_from_annot(subject, 'PALS_B12_Lobes',
                                    subjects_dir=tempdir)

    # test saving parcellation only covering one hemisphere
    parc = [l for l in labels if l.name == 'LOBE.TEMPORAL-lh']
    write_labels_to_annot(parc, subject, 'myparc', subjects_dir=tempdir)
    parc1 = read_labels_from_annot(subject, 'myparc', subjects_dir=tempdir)
    parc1 = [l for l in parc1 if not l.name.startswith('unknown')]
    assert_equal(len(parc1), len(parc))
    for l1, l in zip(parc1, parc):
        assert_labels_equal(l1, l)

    # test saving only one hemisphere
    parc = [l for l in labels if l.name.startswith('LOBE')]
    write_labels_to_annot(parc, subject, 'myparc2', hemi='lh',
                          subjects_dir=tempdir)
    annot_fname = os.path.join(tempdir, subject, 'label', '%sh.myparc2.annot')
    assert_true(os.path.isfile(annot_fname % 'l'))
    assert_false(os.path.isfile(annot_fname % 'r'))
    parc1 = read_labels_from_annot(subject, 'myparc2',
                                   annot_fname=annot_fname % 'l',
                                   subjects_dir=tempdir)
    parc_lh = [l for l in parc if l.name.endswith('lh')]
    for l1, l in zip(parc1, parc_lh):
        assert_labels_equal(l1, l)
예제 #45
0
파일: show_fmri.py 프로젝트: pelednoam/mmvt
def save_labels_from_annotation(subject, parc, surf_name, fol=""):
    brain = Brain(subject, "both", surf_name, curv=False)
    labels = mne.read_labels_from_annot(subject, parc, "both", surf_name)
    if fol == "":
        fol = os.path.join(os.environ["SUBJECTS_DIR"], os.environ["SUBJECT"], "label", "{}_labels".format(parc))
        if not os.path.isdir(fol):
            os.mkdir(fol)
    for ind, label in enumerate(labels):
        print("label {}/{}".format(ind, len(labels)))
        label.save(os.path.join(fol, label.name))
예제 #46
0
파일: show_fmri.py 프로젝트: pelednoam/mmvt
def create_annot_dic(subject, parc, hemi, surf_name, obj_positions):
    labels = mne.read_labels_from_annot(subject, parc, hemi, surf_name)
    for label in [labels[161]]:
        print(len(label.pos), len(obj_positions))
        for label_pos, obj_pos in zip(label.pos, obj_positions):
            label_pos = round_arr(label_pos * 1000)
            obj_pos = round_arr(obj_pos)
            eq = np.all(label_pos == obj_pos)
            if not eq:
                print(label_pos, obj_pos)
예제 #47
0
def test_label_center_of_mass():
    """Test computing the center of mass of a label."""
    stc = read_source_estimate(stc_fname)
    stc.lh_data[:] = 0
    vertex_stc = stc.center_of_mass('sample', subjects_dir=subjects_dir)[0]
    assert_equal(vertex_stc, 124791)
    label = Label(stc.vertices[1], pos=None, values=stc.rh_data.mean(axis=1),
                  hemi='rh', subject='sample')
    vertex_label = label.center_of_mass(subjects_dir=subjects_dir)
    assert_equal(vertex_label, vertex_stc)

    labels = read_labels_from_annot('sample', parc='aparc.a2009s',
                                    subjects_dir=subjects_dir)
    src = read_source_spaces(src_fname)
    # Try a couple of random ones, one from left and one from right
    # Visually verified in about the right place using mne_analyze
    for label, expected in zip([labels[2], labels[3], labels[-5]],
                               [141162, 145221, 55979]):
        label.values[:] = -1
        pytest.raises(ValueError, label.center_of_mass,
                      subjects_dir=subjects_dir)
        label.values[:] = 0
        pytest.raises(ValueError, label.center_of_mass,
                      subjects_dir=subjects_dir)
        label.values[:] = 1
        assert_equal(label.center_of_mass(subjects_dir=subjects_dir), expected)
        assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
                                          restrict_vertices=label.vertices),
                     expected)
        # restrict to source space
        idx = 0 if label.hemi == 'lh' else 1
        # this simple nearest version is not equivalent, but is probably
        # close enough for many labels (including the test ones):
        pos = label.pos[np.where(label.vertices == expected)[0][0]]
        pos = (src[idx]['rr'][src[idx]['vertno']] - pos)
        pos = np.argmin(np.sum(pos * pos, axis=1))
        src_expected = src[idx]['vertno'][pos]
        # see if we actually get the same one
        src_restrict = np.intersect1d(label.vertices, src[idx]['vertno'])
        assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
                                          restrict_vertices=src_restrict),
                     src_expected)
        assert_equal(label.center_of_mass(subjects_dir=subjects_dir,
                                          restrict_vertices=src),
                     src_expected)
    # degenerate cases
    pytest.raises(ValueError, label.center_of_mass, subjects_dir=subjects_dir,
                  restrict_vertices='foo')
    pytest.raises(TypeError, label.center_of_mass, subjects_dir=subjects_dir,
                  surf=1)
    pytest.raises(IOError, label.center_of_mass, subjects_dir=subjects_dir,
                  surf='foo')
예제 #48
0
def test_source_space():
    "Test SourceSpace Dimension"
    subject = 'fsaverage'
    data_path = mne.datasets.sample.data_path()
    mri_sdir = os.path.join(data_path, 'subjects')
    mri_dir = os.path.join(mri_sdir, subject)
    src_path = os.path.join(mri_dir, 'bem', subject + '-ico-5-src.fif')
    label_dir = os.path.join(mri_dir, 'label')
    label_ba1 = mne.read_label(os.path.join(label_dir, 'lh.BA1.label'))
    label_v1 = mne.read_label(os.path.join(label_dir, 'lh.V1.label'))
    label_mt = mne.read_label(os.path.join(label_dir, 'lh.MT.label'))
    label_ba1_v1 = label_ba1 + label_v1
    label_v1_mt = label_v1 + label_mt

    src = mne.read_source_spaces(src_path)
    source = SourceSpace((src[0]['vertno'], src[1]['vertno']), subject,
                         'ico-5', mri_sdir)
    index = source.dimindex(label_v1)
    source_v1 = source[index]
    index = source.dimindex(label_ba1_v1)
    source_ba1_v1 = source[index]
    index = source.dimindex(label_v1_mt)
    source_v1_mt = source[index]
    index = source_ba1_v1.dimindex(source_v1_mt)
    source_v1_intersection = source_ba1_v1[index]
    assert_source_space_equal(source_v1, source_v1_intersection)

    # index from label
    index = source.index_for_label(label_v1)
    assert_array_equal(index.source[index.x].vertno[0],
                       np.intersect1d(source.lh_vertno, label_v1.vertices, 1))

    # parcellation and cluster localization
    parc = mne.read_labels_from_annot(subject, parc='aparc', subjects_dir=mri_sdir)
    indexes = [source.index_for_label(label) for label in parc
               if len(label) > 10]
    x = np.vstack([index.x for index in indexes])
    ds = source._cluster_properties(x)
    for i in xrange(ds.n_cases):
        eq_(ds[i, 'location'], parc[i].name)

    # multiple labels
    lingual_index = source.dimindex('lingual-lh')
    cuneus_index = source.dimindex('cuneus-lh')
    assert_array_equal(source.dimindex(('cuneus-lh', 'lingual-lh')),
                       np.logical_or(cuneus_index, lingual_index))
    lingual_source = source[lingual_index]
    cuneus_source = source[cuneus_index]
    sub_source = source[source.dimindex(('cuneus-lh', 'lingual-lh'))]
    eq_(sub_source[sub_source.dimindex('lingual-lh')], lingual_source)
    eq_(sub_source[sub_source.dimindex('cuneus-lh')], cuneus_source)
    eq_(len(sub_source), len(lingual_source) + len(cuneus_source))
예제 #49
0
def test_annot_io():
    """Test I/O from and to *.annot files"""
    # copy necessary files from fsaverage to tempdir
    tempdir = _TempDir()
    subject = "fsaverage"
    label_src = os.path.join(subjects_dir, "fsaverage", "label")
    surf_src = os.path.join(subjects_dir, "fsaverage", "surf")
    label_dir = os.path.join(tempdir, subject, "label")
    surf_dir = os.path.join(tempdir, subject, "surf")
    os.makedirs(label_dir)
    os.mkdir(surf_dir)
    shutil.copy(os.path.join(label_src, "lh.PALS_B12_Lobes.annot"), label_dir)
    shutil.copy(os.path.join(label_src, "rh.PALS_B12_Lobes.annot"), label_dir)
    shutil.copy(os.path.join(surf_src, "lh.white"), surf_dir)
    shutil.copy(os.path.join(surf_src, "rh.white"), surf_dir)

    # read original labels
    assert_raises(IOError, read_labels_from_annot, subject, "PALS_B12_Lobesey", subjects_dir=tempdir)
    labels = read_labels_from_annot(subject, "PALS_B12_Lobes", subjects_dir=tempdir)

    # test saving parcellation only covering one hemisphere
    parc = [l for l in labels if l.name == "LOBE.TEMPORAL-lh"]
    write_labels_to_annot(parc, subject, "myparc", subjects_dir=tempdir)
    parc1 = read_labels_from_annot(subject, "myparc", subjects_dir=tempdir)
    parc1 = [l for l in parc1 if not l.name.startswith("unknown")]
    assert_equal(len(parc1), len(parc))
    for l1, l in zip(parc1, parc):
        assert_labels_equal(l1, l)

    # test saving only one hemisphere
    parc = [l for l in labels if l.name.startswith("LOBE")]
    write_labels_to_annot(parc, subject, "myparc2", hemi="lh", subjects_dir=tempdir)
    annot_fname = os.path.join(tempdir, subject, "label", "%sh.myparc2.annot")
    assert_true(os.path.isfile(annot_fname % "l"))
    assert_false(os.path.isfile(annot_fname % "r"))
    parc1 = read_labels_from_annot(subject, "myparc2", annot_fname=annot_fname % "l", subjects_dir=tempdir)
    parc_lh = [l for l in parc if l.name.endswith("lh")]
    for l1, l in zip(parc1, parc_lh):
        assert_labels_equal(l1, l)
예제 #50
0
def test_labels_to_stc():
    """Test labels_to_stc."""
    labels = read_labels_from_annot(
        'sample', 'aparc', subjects_dir=subjects_dir)
    values = np.random.RandomState(0).randn(len(labels))
    with pytest.raises(ValueError, match='1 or 2 dim'):
        labels_to_stc(labels, values[:, np.newaxis, np.newaxis])
    with pytest.raises(ValueError, match=r'values\.shape'):
        labels_to_stc(labels, values[np.newaxis])
    stc = labels_to_stc(labels, values)
    for value, label in zip(values, labels):
        stc_label = stc.in_label(label)
        assert (stc_label.data == value).all()
    stc = read_source_estimate(stc_fname, 'sample')
예제 #51
0
def test_split_label():
    """Test splitting labels"""
    aparc = read_labels_from_annot('fsaverage', 'aparc', 'lh',
                                   regexp='lingual', subjects_dir=subjects_dir)
    lingual = aparc[0]

    # Test input error
    assert_raises(ValueError, lingual.split, 'bad_input_string')

    # split with names
    parts = ('lingual_post', 'lingual_ant')
    post, ant = split_label(lingual, parts, subjects_dir=subjects_dir)

    # check output names
    assert_equal(post.name, parts[0])
    assert_equal(ant.name, parts[1])

    # check vertices add up
    lingual_reconst = post + ant
    lingual_reconst.name = lingual.name
    lingual_reconst.comment = lingual.comment
    lingual_reconst.color = lingual.color
    assert_labels_equal(lingual_reconst, lingual)

    # compare output of Label.split() method
    post1, ant1 = lingual.split(parts, subjects_dir=subjects_dir)
    assert_labels_equal(post1, post)
    assert_labels_equal(ant1, ant)

    # compare fs_like split with freesurfer split
    antmost = split_label(lingual, 40, None, subjects_dir, True)[-1]
    fs_vert = [210, 4401, 7405, 12079, 16276, 18956, 26356, 32713, 32716,
               32719, 36047, 36050, 42797, 42798, 42799, 59281, 59282, 59283,
               71864, 71865, 71866, 71874, 71883, 79901, 79903, 79910, 103024,
               107849, 107850, 122928, 139356, 139357, 139373, 139374, 139375,
               139376, 139377, 139378, 139381, 149117, 149118, 149120, 149127]
    assert_array_equal(antmost.vertices, fs_vert)

    # check default label name
    assert_equal(antmost.name, "lingual_div40-lh")

    # Apply contiguous splitting to DMN label from parcellation in Yeo, 2011
    label_default_mode = read_label(op.join(subjects_dir, 'fsaverage', 'label',
                                            'lh.7Networks_7.label'))
    DMN_sublabels = label_default_mode.split(parts='contiguous',
                                             subject='fsaverage',
                                             subjects_dir=subjects_dir)
    assert_equal([len(label.vertices) for label in DMN_sublabels],
                 [16181, 7022, 5965, 5300, 823] + [1] * 23)
예제 #52
0
파일: show_fmri.py 프로젝트: pelednoam/mmvt
def morph_labels(subject_from, subject_to, parc, surf_name="pial", smooth=2, overwrite=True):
    """
        mne_morph_labels --from fsaverage --to mg79 --labeldir /homes/5/npeled/space3/subjects/fsaverage/label/laus500_labels --smooth 5
    """
    # brain = Brain(subject_from, 'both', surf_name, curv=False)
    labels = mne.read_labels_from_annot(subject_from, parc, "both", surf_name)
    morphed_labels = []
    for ind, label in enumerate(labels):
        try:
            print("label {}/{}".format(ind, len(labels)))
            label.values.fill(1.0)
            morphed_label = label.morph(subject_from, subject_to, smooth)
            morphed_labels.append(morphed_label)
        except:
            print("cant morph label {}".format(label.name))
            print(sys.exc_info()[1])
    print("{} labels were morphed succefully.".format(len(morphed_labels)))
    mne.write_labels_to_annot(morphed_labels, subject_to, parc, overwrite, hemi="both")
예제 #53
0
def loadannot_mne(p,subj,subjdir,labnam=None,surf_type='pial',surf_struct=None,
        quiet=False):

    verbosity = 'ERROR' if quiet else 'WARNING'

    from distutils.version import LooseVersion

    if LooseVersion(mne.__version__) >= LooseVersion('0.8'):
        #MNE python changed the API to read an annotation twice in the same
        #release cycle. Check for both versions.
        try:
            annot = mne.read_labels_from_annot(parc=p, subject=subj, 
                surf_name=surf_type, subjects_dir=subjdir, verbose=verbosity)
        except:
            annot = mne.read_annot(parc=p, subject=subj,
                surf_name=surf_type, subjects_dir=subjdir, verbose=verbosity)
    else:
        annot = mne.labels_from_parc(parc=p, subject=subj, surf_name=surf_type,
            subjects_dir=subjdir, verbose=verbosity)
        annot = annot[0] #discard the color table
    return annot
예제 #54
0
def test_split_label():
    """Test splitting labels"""
    aparc = read_labels_from_annot('fsaverage', 'aparc', 'lh',
                                   regexp='lingual', subjects_dir=subjects_dir)
    lingual = aparc[0]

    # split with names
    parts = ('lingual_post', 'lingual_ant')
    post, ant = split_label(lingual, parts, subjects_dir=subjects_dir)

    # check output names
    assert_equal(post.name, parts[0])
    assert_equal(ant.name, parts[1])

    # check vertices add up
    lingual_reconst = post + ant
    lingual_reconst.name = lingual.name
    lingual_reconst.comment = lingual.comment
    lingual_reconst.color = lingual.color
    assert_labels_equal(lingual_reconst, lingual)

    # compare output of Label.split() method
    post1, ant1 = lingual.split(parts, subjects_dir=subjects_dir)
    assert_labels_equal(post1, post)
    assert_labels_equal(ant1, ant)

    # compare fs_like split with freesurfer split
    antmost = split_label(lingual, 40, None, subjects_dir, True)[-1]
    fs_vert = [210, 4401, 7405, 12079, 16276, 18956, 26356, 32713, 32716,
               32719, 36047, 36050, 42797, 42798, 42799, 59281, 59282, 59283,
               71864, 71865, 71866, 71874, 71883, 79901, 79903, 79910, 103024,
               107849, 107850, 122928, 139356, 139357, 139373, 139374, 139375,
               139376, 139377, 139378, 139381, 149117, 149118, 149120, 149127]
    assert_array_equal(antmost.vertices, fs_vert)

    # check default label name
    assert_equal(antmost.name, "lingual_div40-lh")
# Compute inverse solution and for each epoch
snr = 3.0            # use smaller SNR for raw data
inv_method = 'dSPM'  # sLORETA, MNE, dSPM
parc = 'aparc'       # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'

lambda2 = 1.0 / snr ** 2

# Compute inverse operator
inverse_operator = make_inverse_operator(evoked.info, fwd, noise_cov,
                                         depth=None, fixed=False)

stc = apply_inverse(evoked, inverse_operator, lambda2, inv_method,
                    pick_ori=None)

# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels_parc = mne.read_labels_from_annot(
    subject, parc=parc, subjects_dir=subjects_dir)

###############################################################################
# Average the source estimates within each label of the cortical parcellation
# and each sub structure contained in the src space

src = inverse_operator['src']

label_ts = mne.extract_label_time_course(
    [stc], labels_parc, src, mode='mean', allow_empty=True)

# plot the times series of 2 labels
fig, axes = plt.subplots(1)
axes.plot(1e3 * stc.times, label_ts[0][0, :], 'k', label='bankssts-lh')
axes.plot(1e3 * stc.times, label_ts[0][71, :].T, 'r', label='Brain-stem')
axes.set(xlabel='Time (ms)', ylabel='MNE current (nAm)')
예제 #56
0
import mne
import sys
import numpy as np
import pandas as pd

from mne.minimum_norm import read_inverse_operator, source_induced_power

subject = sys.argv[1]

epochs = mne.read_epochs(epochs_folder + "%s_target-epo.fif" % subject)

inv = read_inverse_operator(mne_folder + "%s-inv.fif" % subject)

labels = mne.read_labels_from_annot(
    subject,
    parc='PALS_B12_Lobes',
    # regexp="Bro",
    subjects_dir=subjects_dir)
labels_selc = labels[9], labels[10]

frequencies = np.arange(8, 13, 1)  # define frequencies of interest
n_cycles = frequencies / 3.  # different number of cycle per frequency
method = "dSPM"

sides = ["left", "right"]
conditions = ["ctl", "ent"]
cor = ["correct", "incorrect"]
phase = ["in_phase", "out_phase"]
congrunet = ["cong", "incong"]

columns_keys = ["subject", "side", "condition", "phase", "ROI"]
예제 #57
0
----------
.. [1] Glasser MF et al. (2016) A multi-modal parcellation of human
       cerebral cortex. Nature 536:171-178.
"""
# Author: Eric Larson <*****@*****.**>
#
# License: BSD (3-clause)

from surfer import Brain

import mne

subjects_dir = mne.datasets.sample.data_path() + '/subjects'
mne.datasets.fetch_hcp_mmp_parcellation(subjects_dir=subjects_dir,
                                        verbose=True)
labels = mne.read_labels_from_annot(
    'fsaverage', 'HCPMMP1', 'lh', subjects_dir=subjects_dir)

brain = Brain('fsaverage', 'lh', 'inflated', subjects_dir=subjects_dir,
              cortex='low_contrast', background='white', size=(800, 600))
brain.add_annotation('HCPMMP1')
aud_label = [label for label in labels if label.name == 'L_A1_ROI-lh'][0]
brain.add_label(aud_label, borders=False)

###############################################################################
# We can also plot a combined set of labels (23 per hemisphere).

brain = Brain('fsaverage', 'lh', 'inflated', subjects_dir=subjects_dir,
              cortex='low_contrast', background='white', size=(800, 600))
brain.add_annotation('HCPMMP1_combined')
예제 #58
0
def annot(annot, subject='fsaverage', surf='smoothwm', borders=False, alpha=0.7,
          hemi=None, views=('lat', 'med'), w=None, h=None, axw=None, axh=None,
          background=None, parallel=True, subjects_dir=None):
    """Plot the parcellation in an annotation file

    Parameters
    ----------
    annot : str
        Name of the annotation (e.g., "PALS_B12_LOBES").
    subject : str
        Name of the subject (default 'fsaverage').
    surf : 'inflated' | 'pial' | 'smoothwm' | 'sphere' | 'white'
        Freesurfer surface to use as brain geometry.
    borders : bool | int
        Show only label borders (PySurfer Brain.add_annotation() argument).
    alpha : scalar
        Alpha of the annotation (1=opaque, 0=transparent, default 0.7).
    hemi : 'lh' | 'rh' | 'both' | 'split'
        Which hemispheres to plot (default includes hemisphere with more than one
        label in the annot file).
    views : str | iterator of str
        View or views to show in the figure.
    w, h, axw, axh : scalar
        Layout parameters (figure width/height, subplot width/height).
    background : mayavi color
        Figure background color.
    parallel : bool
        Set views to parallel projection (default ``True``).
    smoothing_steps : None | int
        Number of smoothing steps if data is spatially undersampled (pysurfer
        ``Brain.add_data()`` argument).
    subjects_dir : None | str
        Override the subjects_dir associated with the source space dimension.

    Returns
    -------
    brain : surfer.Brain
        PySurfer Brain instance.
    """
    if hemi is None:
        annot_lh = mne.read_labels_from_annot(subject, annot, 'lh',
                                              subjects_dir=subjects_dir)
        use_lh = len(annot_lh) > 1
        annot_rh = mne.read_labels_from_annot(subject, annot, 'rh',
                                              subjects_dir=subjects_dir)
        use_rh = len(annot_rh) > 1
        if use_lh and use_rh:
            hemi = 'split'
        elif use_lh:
            hemi = 'lh'
        elif use_rh:
            hemi = 'rh'
        else:
            raise ValueError("Neither hemisphere contains more than one label")

    brain = _surfer_brain(subject, surf, hemi, views, w, h, axw, axh, background,
                          subjects_dir)
    brain.add_annotation(annot, borders, alpha)

    if parallel:
        _set_parallel(brain, surf)

    return brain
import sys
import numpy as np
import mne
from mne.minimum_norm import read_inverse_operator, apply_inverse_epochs

from my_settings import (mne_folder, epochs_folder, source_folder)

subject = sys.argv[1]

method = "dSPM"
snr = 1.
lambda2 = 1. / snr**2

labels = mne.read_labels_from_annot(
    subject=subject, parc="PALS_B12_Brodmann", regexp="Brodmann")

condition = "interupt"

inv = read_inverse_operator(mne_folder + "%s_%s-inv.fif" % (subject, condition
                                                            ))
epochs = mne.read_epochs(epochs_folder + "%s_%s-epo.fif" % (subject, condition
                                                            ))
# epochs.resample(500)

stcs = apply_inverse_epochs(
    epochs["press"], inv, lambda2, method=method, pick_ori=None)
ts = [
    mne.extract_label_time_course(
        stc, labels, inv["src"], mode="mean_flip") for stc in stcs
]
예제 #60
0
def test_write_labels_to_annot():
    """Test writing FreeSurfer parcellation from labels"""
    tempdir = _TempDir()

    labels = read_labels_from_annot('sample', subjects_dir=subjects_dir)

    # create temporary subjects-dir skeleton
    surf_dir = op.join(subjects_dir, 'sample', 'surf')
    temp_surf_dir = op.join(tempdir, 'sample', 'surf')
    os.makedirs(temp_surf_dir)
    shutil.copy(op.join(surf_dir, 'lh.white'), temp_surf_dir)
    shutil.copy(op.join(surf_dir, 'rh.white'), temp_surf_dir)
    os.makedirs(op.join(tempdir, 'sample', 'label'))

    # test automatic filenames
    dst = op.join(tempdir, 'sample', 'label', '%s.%s.annot')
    write_labels_to_annot(labels, 'sample', 'test1', subjects_dir=tempdir)
    assert_true(op.exists(dst % ('lh', 'test1')))
    assert_true(op.exists(dst % ('rh', 'test1')))
    # lh only
    for label in labels:
        if label.hemi == 'lh':
            break
    write_labels_to_annot([label], 'sample', 'test2', subjects_dir=tempdir)
    assert_true(op.exists(dst % ('lh', 'test2')))
    assert_true(op.exists(dst % ('rh', 'test2')))
    # rh only
    for label in labels:
        if label.hemi == 'rh':
            break
    write_labels_to_annot([label], 'sample', 'test3', subjects_dir=tempdir)
    assert_true(op.exists(dst % ('lh', 'test3')))
    assert_true(op.exists(dst % ('rh', 'test3')))
    # label alone
    assert_raises(TypeError, write_labels_to_annot, labels[0], 'sample',
                  'test4', subjects_dir=tempdir)

    # write left and right hemi labels with filenames:
    fnames = [op.join(tempdir, hemi + '-myparc') for hemi in ['lh', 'rh']]
    with warnings.catch_warnings(record=True):  # specify subject_dir param
        for fname in fnames:
                write_labels_to_annot(labels, annot_fname=fname)

    # read it back
    labels2 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
                                     annot_fname=fnames[0])
    labels22 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
                                      annot_fname=fnames[1])
    labels2.extend(labels22)

    names = [label.name for label in labels2]

    for label in labels:
        idx = names.index(label.name)
        assert_labels_equal(label, labels2[idx])

    # same with label-internal colors
    for fname in fnames:
        write_labels_to_annot(labels, 'sample', annot_fname=fname,
                              overwrite=True, subjects_dir=subjects_dir)
    labels3 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
                                     annot_fname=fnames[0])
    labels33 = read_labels_from_annot('sample', subjects_dir=subjects_dir,
                                      annot_fname=fnames[1])
    labels3.extend(labels33)
    names3 = [label.name for label in labels3]
    for label in labels:
        idx = names3.index(label.name)
        assert_labels_equal(label, labels3[idx])

    # make sure we can't overwrite things
    assert_raises(ValueError, write_labels_to_annot, labels, 'sample',
                  annot_fname=fnames[0], subjects_dir=subjects_dir)

    # however, this works
    write_labels_to_annot(labels, 'sample', annot_fname=fnames[0],
                          overwrite=True, subjects_dir=subjects_dir)

    # label without color
    labels_ = labels[:]
    labels_[0] = labels_[0].copy()
    labels_[0].color = None
    write_labels_to_annot(labels_, 'sample', annot_fname=fnames[0],
                          overwrite=True, subjects_dir=subjects_dir)

    # duplicate color
    labels_[0].color = labels_[2].color
    assert_raises(ValueError, write_labels_to_annot, labels_, 'sample',
                  annot_fname=fnames[0], overwrite=True,
                  subjects_dir=subjects_dir)

    # invalid color inputs
    labels_[0].color = (1.1, 1., 1., 1.)
    assert_raises(ValueError, write_labels_to_annot, labels_, 'sample',
                  annot_fname=fnames[0], overwrite=True,
                  subjects_dir=subjects_dir)

    # overlapping labels
    labels_ = labels[:]
    cuneus_lh = labels[6]
    precuneus_lh = labels[50]
    labels_.append(precuneus_lh + cuneus_lh)
    assert_raises(ValueError, write_labels_to_annot, labels_, 'sample',
                  annot_fname=fnames[0], overwrite=True,
                  subjects_dir=subjects_dir)

    # unlabeled vertices
    labels_lh = [label for label in labels if label.name.endswith('lh')]
    write_labels_to_annot(labels_lh[1:], 'sample', annot_fname=fnames[0],
                          overwrite=True, subjects_dir=subjects_dir)
    labels_reloaded = read_labels_from_annot('sample', annot_fname=fnames[0],
                                             subjects_dir=subjects_dir)
    assert_equal(len(labels_lh), len(labels_reloaded))
    label0 = labels_lh[0]
    label1 = labels_reloaded[-1]
    assert_equal(label1.name, "unknown-lh")
    assert_true(np.all(in1d(label0.vertices, label1.vertices)))

    # unnamed labels
    labels4 = labels[:]
    labels4[0].name = None
    assert_raises(ValueError, write_labels_to_annot, labels4,
                  annot_fname=fnames[0])