Пример #1
0
def test_stc_to_label():
    """Test stc_to_label
    """
    src = read_source_spaces(src_fname)
    src_bad = read_source_spaces(src_bad_fname)
    stc = read_source_estimate(stc_fname, 'sample')
    os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
    labels1 = stc_to_label(stc, src='sample', smooth=3)
    with warnings.catch_warnings(record=True) as w:  # connectedness warning
        warnings.simplefilter('always')
        labels2 = stc_to_label(stc, src=src, smooth=3)
    assert_true(len(w) == 1)
    assert_true(len(labels1) == len(labels2))
    for l1, l2 in zip(labels1, labels2):
        assert_labels_equal(l1, l2, decimal=4)

    with warnings.catch_warnings(record=True) as w:  # connectedness warning
        warnings.simplefilter('always')
        labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=3,
                                            connected=True)
    assert_true(len(w) == 1)
    assert_raises(ValueError, stc_to_label, stc, 'sample', smooth=3,
                  connected=True)
    assert_raises(RuntimeError, stc_to_label, stc, src=src_bad, connected=True)
    assert_true(len(labels_lh) == 1)
    assert_true(len(labels_rh) == 1)
Пример #2
0
def test_stc_to_label():
    """Test stc_to_label
    """
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        src = read_source_spaces(fwd_fname)
    src_bad = read_source_spaces(src_bad_fname)
    stc = read_source_estimate(stc_fname, 'sample')
    os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
    labels1 = _stc_to_label(stc, src='sample', smooth=3)
    labels2 = _stc_to_label(stc, src=src, smooth=3)
    assert_equal(len(labels1), len(labels2))
    for l1, l2 in zip(labels1, labels2):
        assert_labels_equal(l1, l2, decimal=4)

    with warnings.catch_warnings(record=True) as w:  # connectedness warning
        warnings.simplefilter('always')
        labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True,
                                            connected=True)

    assert_true(len(w) > 0)
    assert_raises(ValueError, stc_to_label, stc, 'sample', smooth=True,
                  connected=True)
    assert_raises(RuntimeError, stc_to_label, stc, smooth=True, src=src_bad,
                  connected=True)
    assert_equal(len(labels_lh), 1)
    assert_equal(len(labels_rh), 1)

    # test getting tris
    tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0])
    assert_raises(ValueError, spatial_tris_connectivity, tris,
                  remap_vertices=False)
    connectivity = spatial_tris_connectivity(tris, remap_vertices=True)
    assert_true(connectivity.shape[0] == len(stc.vertices[0]))

    # "src" as a subject name
    assert_raises(TypeError, stc_to_label, stc, src=1, smooth=False,
                  connected=False, subjects_dir=subjects_dir)
    assert_raises(ValueError, stc_to_label, stc, src=SourceSpaces([src[0]]),
                  smooth=False, connected=False, subjects_dir=subjects_dir)
    assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=False,
                  connected=True, subjects_dir=subjects_dir)
    assert_raises(ValueError, stc_to_label, stc, src='sample', smooth=True,
                  connected=False, subjects_dir=subjects_dir)
    labels_lh, labels_rh = stc_to_label(stc, src='sample', smooth=False,
                                        connected=False,
                                        subjects_dir=subjects_dir)
    assert_true(len(labels_lh) > 1)
    assert_true(len(labels_rh) > 1)

    # with smooth='patch'
    with warnings.catch_warnings(record=True) as w:  # connectedness warning
        warnings.simplefilter('always')
        labels_patch = stc_to_label(stc, src=src, smooth=True)
    assert_equal(len(w), 1)
    assert_equal(len(labels_patch), len(labels1))
    for l1, l2 in zip(labels1, labels2):
        assert_labels_equal(l1, l2, decimal=4)
Пример #3
0
def test_stc_to_label():
    """Test stc_to_label
    """
    src = read_source_spaces(src_fname)
    stc = read_source_estimate(stc_fname)
    os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
    labels1 = stc_to_label(stc, src='sample', smooth=3)
    labels2 = stc_to_label(stc, src=src, smooth=3)
    assert_true(len(labels1) == len(labels2))
    for l1, l2 in zip(labels1, labels2):
        assert_labels_equal(l1, l2, decimal=4)
Пример #4
0
def test_stc_to_label():
    """Test stc_to_label."""
    src = read_source_spaces(fwd_fname)
    src_bad = read_source_spaces(src_bad_fname)
    stc = read_source_estimate(stc_fname, 'sample')
    os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
    labels1 = _stc_to_label(stc, src='sample', smooth=3)
    labels2 = _stc_to_label(stc, src=src, smooth=3)
    assert_equal(len(labels1), len(labels2))
    for l1, l2 in zip(labels1, labels2):
        assert_labels_equal(l1, l2, decimal=4)

    with pytest.warns(RuntimeWarning, match='have holes'):
        labels_lh, labels_rh = stc_to_label(stc, src=src, smooth=True,
                                            connected=True)

    pytest.raises(ValueError, stc_to_label, stc, 'sample', smooth=True,
                  connected=True)
    pytest.raises(RuntimeError, stc_to_label, stc, smooth=True, src=src_bad,
                  connected=True)
    assert_equal(len(labels_lh), 1)
    assert_equal(len(labels_rh), 1)

    # test getting tris
    tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0])
    pytest.raises(ValueError, spatial_tris_connectivity, tris,
                  remap_vertices=False)
    connectivity = spatial_tris_connectivity(tris, remap_vertices=True)
    assert (connectivity.shape[0] == len(stc.vertices[0]))

    # "src" as a subject name
    pytest.raises(TypeError, stc_to_label, stc, src=1, smooth=False,
                  connected=False, subjects_dir=subjects_dir)
    pytest.raises(ValueError, stc_to_label, stc, src=SourceSpaces([src[0]]),
                  smooth=False, connected=False, subjects_dir=subjects_dir)
    pytest.raises(ValueError, stc_to_label, stc, src='sample', smooth=False,
                  connected=True, subjects_dir=subjects_dir)
    pytest.raises(ValueError, stc_to_label, stc, src='sample', smooth=True,
                  connected=False, subjects_dir=subjects_dir)
    labels_lh, labels_rh = stc_to_label(stc, src='sample', smooth=False,
                                        connected=False,
                                        subjects_dir=subjects_dir)
    assert (len(labels_lh) > 1)
    assert (len(labels_rh) > 1)

    # with smooth='patch'
    with pytest.warns(RuntimeWarning, match='have holes'):
        labels_patch = stc_to_label(stc, src=src, smooth=True)
    assert len(labels_patch) == len(labels1)
    for l1, l2 in zip(labels1, labels2):
        assert_labels_equal(l1, l2, decimal=4)
Пример #5
0
def test_stc_to_label():
    """Test stc_to_label
    """
    src = read_source_spaces(src_fname)
    stc = SourceEstimate(stc_fname)
    os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
    labels1 = stc_to_label(stc, src='sample', smooth=3)
    labels2 = stc_to_label(stc, src=src, smooth=3)
    assert_true(len(labels1) == len(labels2))
    for l1, l2 in zip(labels1, labels2):
        for key in l1.keys():
            if key in ['comment', 'hemi']:
                assert_true(l1[key] == l1[key])
            else:
                assert_array_almost_equal(l1[key], l2[key], 4)
Пример #6
0
def apply_rois(fn_stc, tmin, tmax, thr, min_subject='fsaverage'):
    #fn_avg = subjects_dir+'/fsaverage/%s_ROIs/%s-lh.stc' %(method,evt_st)
    stc_avg = mne.read_source_estimate(fn_stc)
    stc_avg = stc_avg.crop(tmin, tmax)
    src_pow = np.sum(stc_avg.data ** 2, axis=1)
    stc_avg.data[src_pow < np.percentile(src_pow, thr)] = 0.
    fn_src = subjects_dir+'/%s/bem/fsaverage-ico-5-src.fif' %min_subject
    src_inv = mne.read_source_spaces(fn_src)
    func_labels_lh, func_labels_rh = mne.stc_to_label(
                    stc_avg, src=src_inv, smooth=True,
                    subjects_dir=subjects_dir,
                    connected=True)
    # Left hemisphere definition
    i = 0
    labels_path = fn_stc[:fn_stc.rfind('-')] + '/ini'
    reset_directory(labels_path)
    while i < len(func_labels_lh):
        func_label = func_labels_lh[i]
        func_label.save(labels_path + '/ROI_%d' %(i))
        i = i + 1
    # right hemisphere definition
    j = 0
    while j < len(func_labels_rh):
        func_label = func_labels_rh[j]
        func_label.save(labels_path + '/ROI_%d' %(j))
        j = j + 1
Пример #7
0
def apply_rois(fn_stc, event, tmin=0.0, tmax=0.3, min_subject='fsaverage', thr=99):
    """
    Compute regions of interest (ROI) based on events
    ----------
    fn_stc : string
        evoked and morphed STC.
    event: string
        event of the related STC.
    tmin, tmax: float
        segment for ROIs definition.
    min_subject: string
        the subject as the common brain space.
    thr: float or int
        threshold of STC used for ROI identification.
    """
    fnlist = get_files_from_list(fn_stc)
    # loop across all filenames
    for ifn_stc in fnlist:
        subjects_dir = os.environ['SUBJECTS_DIR']
        # extract the subject infromation from the file name
        stc_path = os.path.split(ifn_stc)[0]
        #name = os.path.basename(fn_stc)
        #tri = name.split('_')[1].split('-')[0]
        min_path = subjects_dir + '/%s' % min_subject
        fn_src = min_path + '/bem/fsaverage-ico-4-src.fif'
        # Make sure the target path is exist
        labels_path = stc_path + '/%s/' %event
        reset_directory(labels_path)
        # Read the MNI source space
        src_inv = mne.read_source_spaces(fn_src)
        stc = mne.read_source_estimate(ifn_stc, subject=min_subject)
        bc_stc = stc.copy().crop(tmin, tmax)
        src_pow = np.sum(bc_stc.data ** 2, axis=1)
        bc_stc.data[src_pow < np.percentile(src_pow, thr)] = 0.
        #stc_data = stc_morph.data
        #import pdb
        #pdb.set_trace()
        #zscore stc for ROIs estimation
        #d_mu = stc_data.mean(axis=1, keepdims=True)
        #d_std = stc_data.std(axis=1, ddof=1, keepdims=True)
        #z_data = (stc_data - d_mu)/d_std
        func_labels_lh, func_labels_rh = mne.stc_to_label(
            bc_stc, src=src_inv, smooth=True,
            subjects_dir=subjects_dir,
            connected=True)
        # Left hemisphere definition
        i = 0
        while i < len(func_labels_lh):
            func_label = func_labels_lh[i]
            func_label.save(labels_path + '%s_%s' % (event, str(i)))
            i = i + 1
        # right hemisphere definition
        j = 0
        while j < len(func_labels_rh):
            func_label = func_labels_rh[j]
            func_label.save(labels_path + '%s_%s' % (event, str(j)))
            j = j + 1    
Пример #8
0
def apply_rois(fn_stc_list, event, min_subject='fsaverage', thr=0.85):
    """
    Compute regions of interest (ROI) based on events
    ----------
    fn_stc : string
        evoked and morphed STC.
    event: string
        event of the related STC.
    tmin, tmax: float
        segment for ROIs definition.
    min_subject: string
        the subject as the common brain space.
    thr: float or int
        threshold of STC used for ROI identification.
    """
    #from scipy.signal import detrend
    #from scipy.stats.mstats import zscore 
    fnlist = get_files_from_list(fn_stc_list)
    # loop across all filenames
    for fn_stc in fnlist:
        # extract the subject infromation from the file name
        stc_path = os.path.split(fn_stc)[0]
        min_path = subjects_dir + '/%s' % min_subject
        fn_src = min_path + '/bem/fsaverage-ico-5-src.fif'
        # Make sure the target path is exist
        labels_path = stc_path + '/%s/ini' %event
        reset_directory(labels_path)
      
        # Read the MNI source space
        stc = mne.read_source_estimate(fn_stc)
        src_inv = mne.read_source_spaces(fn_src)
        stc.lh_data[stc.lh_data < 0.85 * np.max(stc.lh_data)] = 0
        stc.rh_data[stc.rh_data < 0.8 * np.max(stc.rh_data)] = 0
        #data_lh=np.squeeze(stc.lh_data)
        #index_lh = np.argwhere(data_lh)
        #stc.lh_data[data_lh<np.percentile(data_lh[index_lh], thr)] = 0
        #data_rh=np.squeeze(stc.rh_data)
        #index_rh = np.argwhere(data_rh)
        #stc.rh_data[data_rh<np.percentile(data_rh[index_rh], thr)] = 0
        #non_index = np.argwhere(data)
        #stc.data[data<np.percentile(data[non_index], thr)] = 0
        func_labels_lh, func_labels_rh = mne.stc_to_label(
                    stc, src=src_inv, smooth=True,
                    subjects_dir=subjects_dir,
                    connected=True)
        # Left hemisphere definition
        i = 0
        while i < len(func_labels_lh):
            func_label = func_labels_lh[i]
            func_label.save(labels_path + '/%s_%d' %(event, i))
            i = i + 1
        # right hemisphere definition
        j = 0
        while j < len(func_labels_rh):
            func_label = func_labels_rh[j]
            func_label.save(labels_path + '/%s_%d' %(event, j))
            j = j + 1
def apply_rois(fn_stcs, event='LLst', tmin=0.0, tmax=0.6, tstep=0.05, window=0.2, 
               fmin=4, fmax=8, thr=99, min_subject='fsaverage'): 
    """
    Compute regions of interest (ROI) based on events
    ----------
    fn_stcs : the file name of morphed stc.
    evt: event related with stc
    thr: the percentile of stc's strength
    min_subject: the subject for the common brain space.
    
    """
    from mne import read_source_spaces 
    fnlist = get_files_from_list(fn_stcs)
    # loop across all filenames
    for fn_stc in fnlist:
        name = os.path.basename(fn_stc)
        subject = name.split('_')[0]
        subjects_dir = os.environ['SUBJECTS_DIR']
        min_dir = subjects_dir + '/%s' %min_subject
        labels_path = min_dir + '/DICS_ROIs/%s/%s/' %(subject, event)
        reset_directory(labels_path)
        src = min_dir + '/bem/%s-ico-4-src.fif' %min_subject
        src_inv = read_source_spaces(src)
        stc = mne.read_source_estimate(fn_stc, subject=min_subject) 
        stc = stc.crop(tmin, tmax)
        src_pow = np.sum(stc.data ** 2, axis=1)
        stc.data[src_pow < np.percentile(src_pow, thr)] = 0.
        tbeg = tmin
        while tbeg < tmax:
            tend = tbeg + window
            win_stc = stc.copy().crop(tbeg, tend)
            stc_data = win_stc.data 
            src_pow = np.sum(stc_data ** 2, axis=1)
            win_stc.data[src_pow < np.percentile(src_pow, thr)] = 0.
            func_labels_lh, func_labels_rh = mne.stc_to_label(
                win_stc, src=src_inv, smooth=True,
                subjects_dir=subjects_dir,
                connected=True)
            # Left hemisphere definition
            i = 0
            while i < len(func_labels_lh):
                func_label = func_labels_lh[i]
                func_label.save(labels_path + '%s_%s_win%.2f_%2f' % (event, str(i), tbeg, tend))
                i = i + 1
            # right hemisphere definition
            j = 0
            while j < len(func_labels_rh):
                func_label = func_labels_rh[j]
                func_label.save(labels_path + '%s_%s_win%2f_%2f' % (event, str(j), tbeg, tend))
                j = j + 1
            tbeg = tbeg + tstep
Пример #10
0
def apply_rois(fn_stcs, evt='LLst', tmin=0.05, tmax=0.25, thr=99, min_subject='fsaverage'): 
    """
    Compute regions of interest (ROI) based on events
    ----------
    fn_stcs : the file name of morphed stc.
    evt: event related with stc
    thr: the percentile of stc's strength
    min_subject: the subject for the common brain space.
    
    """
    from mne import read_source_spaces 
    fnlist = get_files_from_list(fn_stcs)
    # loop across all filenames
    for fn_stc in fnlist:
        name = os.path.basename(fn_stc)
        subject = name.split('_')[0]
        subjects_dir = os.environ['SUBJECTS_DIR']
        min_dir = subjects_dir + '/%s' %min_subject
        labels_path = min_dir + '/DICS_ROIs/%s/%s/' %(subject, evt)
        reset_directory(labels_path)
        src = min_dir + '/bem/%s-ico-4-src.fif' %min_subject
        src_inv = read_source_spaces(src)
        stc = mne.read_source_estimate(fn_stc, subject=min_subject) 
        stc = stc.crop(tmin, tmax)
        src_pow = np.sum(stc.data ** 2, axis=1)
        stc.data[src_pow < np.percentile(src_pow, thr)] = 0.
        #stc_data = stc_morph.data
        #import pdb
        #pdb.set_trace()
        #zscore stc for ROIs estimation
        #d_mu = stc_data.mean(axis=1, keepdims=True)
        #d_std = stc_data.std(axis=1, ddof=1, keepdims=True)
        #z_data = (stc_data - d_mu)/d_std
        func_labels_lh, func_labels_rh = mne.stc_to_label(
            stc, src=src_inv, smooth=True,
            subjects_dir=subjects_dir,
            connected=True)
        # Left hemisphere definition
        i = 0
        while i < len(func_labels_lh):
            func_label = func_labels_lh[i]
            func_label.save(labels_path + '%s_%s' % (evt, str(i)))
            i = i + 1
        # right hemisphere definition
        j = 0
        while j < len(func_labels_rh):
            func_label = func_labels_rh[j]
            func_label.save(labels_path + '%s_%s' % (evt, str(j)))
            j = j + 1  
Пример #11
0
def apply_rois(fn_stc, tmin, tmax, thr, min_subject='fsaverage'):

    '''
       Make ROIs using the common STCs.
        
       Parameters
       ----------
       fn_stc: string.
            The path of the common STCs
       tmin, tmax: float (s).
            The interest time range.
       thr: float or int
            The percentile of STCs strength.
       min_subject: string.
            The common subject.
       
    '''
    stc_avg = mne.read_source_estimate(fn_stc)
    stc_avg = stc_avg.crop(tmin, tmax)
    src_pow = np.sum(stc_avg.data ** 2, axis=1)
    stc_avg.data[src_pow < np.percentile(src_pow, thr)] = 0.
    fn_src = subjects_dir+'/%s/bem/fsaverage-ico-5-src.fif' %min_subject
    src_inv = mne.read_source_spaces(fn_src)
    func_labels_lh, func_labels_rh = mne.stc_to_label(
                    stc_avg, src=src_inv, smooth=True,
                    subjects_dir=subjects_dir,
                    connected=True)
    # Left hemisphere definition
    i = 0
    labels_path = fn_stc[:fn_stc.rfind('-')] + '/ini'
    reset_directory(labels_path)
    while i < len(func_labels_lh):
        func_label = func_labels_lh[i]
        func_label.save(labels_path + '/ROI_%d' %(i))
        i = i + 1
    # right hemisphere definition
    j = 0
    while j < len(func_labels_rh):
        func_label = func_labels_rh[j]
        func_label.save(labels_path + '/ROI_%d' %(j))
        j = j + 1
Пример #12
0
# Set parameters
data_path = "/home/custine/MEG/data/krns_kr3/" + subj + "/" + sess + "/"
subjects_dir = "/mnt/file1/binder/KRNS/anatomies/surfaces/"
stc_fname = data_path + "ave_projon/stc/" + subj + "_" + sess + "_" + tag + "_All_c1M-spm-lh.stc"

# stc_file = '/home/custine/MEG/data/krns_kr3/9367/s5/ave_projon/stc/stc_py/9367_s5_run1_Sentence-lh.stc' #9367_s5_Noun_People_All_c1M-spm-lh.stc'
stcMAT_fname = data_path + "ave_projon/stc/" + subj + "_" + sess + "_" + tag + "_All_c1M-spm-lh_AllVertices.txt"
print stc_fname


stc = mne.read_source_estimate(stc_fname)
print stc
print "Shape of STC"
print np.shape(stc.data)

j_labels = mne.stc_to_label(stc, src="9367", subjects_dir="/mnt/file1/binder/KRNS/anatomies/surfaces/")
print "Jane Labels"
print j_labels[0]
print

# np.savetxt(stcMAT_fname, stc.data, delimiter = ',')

vert, sampLen = np.shape(stc.data)
print sampLen

################################################################################################################################
#######################################All Vertices - Computations################################################################
################################################################################################################################
# x = sampLen ## Number of samples in the data (trials)
#
# new = np.empty([0, x])
Пример #13
0
mean_data = np.mean(np.asarray([s.data for s in stc]), axis=0)
stc_mean = mne.SourceEstimate(
    mean_data, stc[0].vertices, tmin=stc[0].tmin, tstep=stc[0].tstep)

# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name

# calc lh label
stc_mean_label = stc_mean.in_label(label_lh)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.7 * np.max(data)] = 0.

func_labels_lh, _ = mne.stc_to_label(
    stc_mean_label,
    src=src,
    smooth=True,
    subjects_dir=subjects_dir,
    connected=True)
# take first as func_labels are ordered based on maximum values in stc
func_label_lh = func_labels_lh[0]

# calc rh label
stc_mean_label = stc_mean.in_label(label_rh)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.7 * np.max(data)] = 0.

_, func_labels_rh = mne.stc_to_label(
    stc_mean_label,
    src=src,
    smooth=True,
    subjects_dir=subjects_dir,
Пример #14
0
    coord0 = mne.vertex_to_mni(vertices=src[0]['vertno'], hemis=0, subject='fsaverage_mne')
    coord1 = mne.vertex_to_mni(vertices=src[1]['vertno'], hemis=1, subject='fsaverage_mne')
    coord = np.vstack([coord0, coord1])
    # store the index of the sources within min_dist of the mask voxels
    b = []
    for i in range(gv.shape[0]):
        dist = np.sqrt((coord[:, 0] - gv[i, 0]) ** 2 + (coord[:, 1] - gv[i, 1]) ** 2 + (coord[:, 2] - gv[i, 2]) ** 2)
        if min(dist) <= min_dist:
            b.append(np.argmin(dist))
    # create a stc with 1s for the near sources
    d = np.zeros([coord.shape[0], 1])
    d[b] = 1
    stc = mne.SourceEstimate(d, vertices=[src[0]['vertno'], src[1]['vertno']],
                             tmin=0, tstep=1, subject='fsaverage_mne')
    # convert the stc to a label so we can morph it per subject later
    avg_label = mne.stc_to_label(stc, src=src, smooth=True, connected=False)
    if len(avg_label) > 2:
        raise ValueError('Found more than one label!')

data = []
for s in subjs:
    # check if it's AFNI or freesurfer label
    if roi.find('label') > 0:
        label_dir = '/Volumes/Shaw/MEG_structural/freesurfer/%s/labels/' % s
        label = mne.read_label(label_dir + roi)
        roi_pretty = roi.split('.')[0]
    else:
        roi_pretty = roi.split('/')[-1].split('+')[0]
    # right labels are normally in the second index
    if avg_label[0] is not None:
        label = avg_label[0].morph(subject_to=s)
import mne
import numpy as np
from scipy import stats

pval_thresh = .05
nverts_thresh = 600
b=3

src = mne.setup_source_space(subject='fsaverage',fname=None,spacing='ico5',surface='inflated')

# X will have the pvalues in 0
X = 1-np.array(all_stats[b][1])[:,None]  # p-values
X[X <= 1-pval_thresh] = 0
pval_stc = mne.SourceEstimate(X, vertices=stc.vertno, tmin=0, tstep=1,subject='fsaverage')
lh_labels, rh_labels = mne.stc_to_label(pval_stc, src=src, smooth=5, connected=True)
# was having problems ot mix p-values and correlations in stc, so let's keep them separated
X = np.empty([pval_stc.data.shape[0], 2])
X[:, 0] = all_stats[b][2][0]
X[:, 1] = all_stats[b][2][1]
corr_stc = mne.SourceEstimate(X, vertices=stc.vertno, tmin=0, tstep=1,subject='fsaverage')

my_txt = ['left','right']
for idx, labels in enumerate([lh_labels, rh_labels]):
    print 'Found %d seeds in %s hemisphere'%(len(labels), my_txt[idx])
    good_seeds = [label for label in labels if len(label.vertices)>nverts_thresh]
    num_sources = [pval_stc.in_label(label).data.shape[0] for label in good_seeds]
    print '%d of those are big enough'%(len(good_seeds))
    for s in range(len(good_seeds)):
        print 'Seed %d: %d sources, mean 1: %.2f, mean 2: %.2f'%(s+1, num_sources[s], 
            np.mean(corr_stc.in_label(good_seeds[s]).data[:,0]),
Пример #16
0
def test_stc_to_label():
    """Test stc_to_label."""
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        src = read_source_spaces(fwd_fname)
    src_bad = read_source_spaces(src_bad_fname)
    stc = read_source_estimate(stc_fname, 'sample')
    os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
    labels1 = _stc_to_label(stc, src='sample', smooth=3)
    labels2 = _stc_to_label(stc, src=src, smooth=3)
    assert_equal(len(labels1), len(labels2))
    for l1, l2 in zip(labels1, labels2):
        assert_labels_equal(l1, l2, decimal=4)

    with warnings.catch_warnings(record=True) as w:  # connectedness warning
        warnings.simplefilter('always')
        labels_lh, labels_rh = stc_to_label(stc,
                                            src=src,
                                            smooth=True,
                                            connected=True)

    assert_true(len(w) > 0)
    assert_raises(ValueError,
                  stc_to_label,
                  stc,
                  'sample',
                  smooth=True,
                  connected=True)
    assert_raises(RuntimeError,
                  stc_to_label,
                  stc,
                  smooth=True,
                  src=src_bad,
                  connected=True)
    assert_equal(len(labels_lh), 1)
    assert_equal(len(labels_rh), 1)

    # test getting tris
    tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0])
    assert_raises(ValueError,
                  spatial_tris_connectivity,
                  tris,
                  remap_vertices=False)
    connectivity = spatial_tris_connectivity(tris, remap_vertices=True)
    assert_true(connectivity.shape[0] == len(stc.vertices[0]))

    # "src" as a subject name
    assert_raises(TypeError,
                  stc_to_label,
                  stc,
                  src=1,
                  smooth=False,
                  connected=False,
                  subjects_dir=subjects_dir)
    assert_raises(ValueError,
                  stc_to_label,
                  stc,
                  src=SourceSpaces([src[0]]),
                  smooth=False,
                  connected=False,
                  subjects_dir=subjects_dir)
    assert_raises(ValueError,
                  stc_to_label,
                  stc,
                  src='sample',
                  smooth=False,
                  connected=True,
                  subjects_dir=subjects_dir)
    assert_raises(ValueError,
                  stc_to_label,
                  stc,
                  src='sample',
                  smooth=True,
                  connected=False,
                  subjects_dir=subjects_dir)
    labels_lh, labels_rh = stc_to_label(stc,
                                        src='sample',
                                        smooth=False,
                                        connected=False,
                                        subjects_dir=subjects_dir)
    assert_true(len(labels_lh) > 1)
    assert_true(len(labels_rh) > 1)

    # with smooth='patch'
    with warnings.catch_warnings(record=True) as w:  # connectedness warning
        warnings.simplefilter('always')
        labels_patch = stc_to_label(stc, src=src, smooth=True)
    assert_equal(len(w), 1)
    assert_equal(len(labels_patch), len(labels1))
    for l1, l2 in zip(labels1, labels2):
        assert_labels_equal(l1, l2, decimal=4)
Пример #17
0
def apply_rois(fn_stc, event, tmin=0.0, tmax=0.3, tstep=0.05, window=0.2, 
                min_subject='fsaverage', thr=99):
    """
    Compute regions of interest (ROI) based on events
    ----------
    fn_stc : string
        evoked and morphed STC.
    event: string
        event of the related STC.
    tmin, tmax: float
        segment for ROIs definition.
    min_subject: string
        the subject as the common brain space.
    thr: float or int
        threshold of STC used for ROI identification.
    """
    from scipy.signal import detrend
    from scipy.stats.mstats import zscore 
    fnlist = get_files_from_list(fn_stc)
    # loop across all filenames
    for ifn_stc in fnlist:
        subjects_dir = os.environ['SUBJECTS_DIR']
        # extract the subject infromation from the file name
        stc_path = os.path.split(ifn_stc)[0]
        #name = os.path.basename(fn_stc)
        #tri = name.split('_')[1].split('-')[0]
        min_path = subjects_dir + '/%s' % min_subject
        fn_src = min_path + '/bem/fsaverage-ico-4-src.fif'
        # Make sure the target path is exist
        labels_path = stc_path + '/%s/' %event
        reset_directory(labels_path)
        # Read the MNI source space
        src_inv = mne.read_source_spaces(fn_src)
        stc = mne.read_source_estimate(ifn_stc, subject=min_subject)
        #stc = stc.crop(tmin, tmax)
        #src_pow = np.sum(stc.data ** 2, axis=1)
        #stc.data[src_pow < np.percentile(src_pow, thr)] = 0.
        stc = stc.crop(tmin, tmax)
        cal_data = stc.data
        dt_data = detrend(cal_data, axis=-1)
        zc_data = zscore(dt_data, axis=-1)
        src_pow = np.sum(zc_data ** 2, axis=1)
        stc.data[src_pow < np.percentile(src_pow, thr)] = 0.
        tbeg = tmin
        count = 1
        while tbeg < tmax:
            tend = tbeg + window
            if tend > tmax:
                break
            win_stc = stc.copy().crop(tbeg, tend)
            stc_data = win_stc.data 
            src_pow = np.sum(stc_data ** 2, axis=1)
            win_stc.data[src_pow < np.percentile(src_pow, thr)] = 0.
            func_labels_lh, func_labels_rh = mne.stc_to_label(
                win_stc, src=src_inv, smooth=True,
                subjects_dir=subjects_dir,
                connected=True)
            # Left hemisphere definition
            i = 0
            while i < len(func_labels_lh):
                func_label = func_labels_lh[i]
                func_label.save(labels_path + '%s_%s_win%d' % (event, str(i), count))
                i = i + 1
            # right hemisphere definition
            j = 0
            while j < len(func_labels_rh):
                func_label = func_labels_rh[j]
                func_label.save(labels_path +  '%s_%s_win%d' % (event, str(j), count))
                j = j + 1
            tbeg = tbeg + tstep
            count = count + 1
Пример #18
0
stc_mean = stc.copy().crop(tmin, tmax).mean()

# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.read_labels_from_annot(subject,
                                   parc='aparc',
                                   subjects_dir=subjects_dir,
                                   regexp=aparc_label_name)[0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.

func_labels, _ = mne.stc_to_label(stc_mean_label,
                                  src=src,
                                  smooth=True,
                                  subjects_dir=subjects_dir,
                                  connected=True)

# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]

# load the anatomical ROI for comparison
anat_label = mne.read_labels_from_annot(subject,
                                        parc='aparc',
                                        subjects_dir=subjects_dir,
                                        regexp=aparc_label_name)[0]

# extract the anatomical time course for each label
stc_anat_label = stc.in_label(anat_label)
pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0]
Пример #19
0
stc_clu.subject = "fsaverage"
with open(
        "{}{}clu_{}-{}Hz_{}_{}_A".format(proc_dir, stat_dir, fr[0], fr[1],
                                         cond_str, thresh_str), "rb") as f:
    f_obs, clusters, cluster_pv, H0 = pickle.load(f)
f_thresh = np.quantile(H0, 0.95)
# stc_clu = mne.stats.summarize_clusters_stc(clu,subject="fsaverage",
#                                            p_thresh=0.05,
#                                            vertices=stc_clu.vertices)
meta_clusts = _find_clusters(stc_clu.data[:, 0], 1e-8, connectivity=cnx)[0]
clust_labels = []
for mc in meta_clusts:
    temp_stc = stc_clu.copy()
    temp_stc.data[:] = np.zeros((temp_stc.data.shape[0], 1))
    temp_stc.data[mc, 0] = 1
    lab = [x for x in mne.stc_to_label(temp_stc, src=fs_src) if x][0]
    clust_labels.append(lab)

X = np.zeros(
    (len(subjs), len(clust_labels), fr[1] - fr[0] + 1, len(conds), len(wavs)))
for sub_idx, sub in enumerate(subjs):
    src = mne.read_source_spaces("{}{}_{}-src.fif".format(
        proc_dir, sub, spacing))
    vertnos = [s["vertno"] for s in src]
    morph = mne.compute_source_morph(src,
                                     subject_from=sub_key[sub],
                                     subject_to="fsaverage",
                                     spacing=int(spacing[-1]),
                                     subjects_dir=subjects_dir,
                                     smooth=None)
Пример #20
0
                                                n_permutations=n_permutations,
                                                tail=0,
                                                stat_fun=stat_fun,
                                                connectivity=connectivity,
                                                n_jobs=n_jobs, seed=0)
    print "Time elapsed : %s (s)" % (time() - t0)

    clusters = [c.reshape(n_times, n_vertices).T for c in clusters]
	#you get a cluster for every single thing that crossed the first-stage threshold

    # stc_log_pv_cluster = copy.deepcopy(mean_stc1)
    # stc_log_pv_cluster.data = np.zeros_like(stc_log_pv_cluster.data)
    # for pv, c in zip(cluster_pv, clusters):
    #     stc_log_pv_cluster.data[c] = -np.log10(pv)
    # 
    # stc_log_pv_cluster.save(prefix + 'clusters_pv_%s_%s' % (stat_name, t))
    
    stc_cluster = copy.deepcopy(template_stc)
    #you only write out a cluster to an stc file if it crosses the second-stage threshold
    for k, c in enumerate(clusters):
        stc_cluster.data = c
        if cluster_pv[k] < 0.15:  ##This is the threshold for saving an stc file with cluster
            stcFileName = '/cluster/kuperberg/SemPrMM/MEG/results/source_space/cluster_stats/' + prefix + '%d-%d_cluster%d_%s_thresh_%s_pv_%.3f' % (args.t1*1000,args.t2*1000,k, stat_name, t, cluster_pv[k])
            stc_cluster.save(stcFileName)
            #stc_cluster.save('/cluster/kuperberg/SemPrMM/MEG/results/source_space/cluster_stats/' + prefix + '%d-%d_cluster%d_%s_thresh_%s_pv_%.3f' % (args.t1*1000,args.t2*1000,k, stat_name, t, cluster_pv[k]))
            labelArray = mne.stc_to_label(stc_cluster, 'fsaverage')
            label = labelArray[0]
            mne.write_label(stcFileName, label)            

    print 'pv : %s' % np.sort(cluster_pv)[:5]
        binarydata[Vmask] = 1

        binarydata2 = np.concatenate((np.zeros(Vmask.shape), binarydata))

        stc = np.zeros((binarydata2.shape[0], 2))
        stc[:, 0] = binarydata2

        SrcEst_label = mne.SourceEstimate(stc,
                                          vertices=templateSTC.vertices,
                                          tmin=0,
                                          tstep=templateSTC.tstep,
                                          subject=MRIsubject)

        _, RHfunclabel = mne.stc_to_label(SrcEst_label,
                                          src=src,
                                          smooth=True,
                                          connected=True,
                                          subjects_dir=subjects_dir)
        del binarydata, binarydata2, stc, SrcEst_label

        # make surface plot
        SrcPlot = SrcEst.plot(subject=MRIsubject,
                              surface='inflated',
                              hemi='rh',
                              subjects_dir=subjects_dir,
                              time_unit='ms',
                              views='ven',
                              initial_time=round(t),
                              time_label=None,
                              colorbar=True,
                              size=(600, 800),
Пример #22
0
def test_stc_to_label():
    """Test stc_to_label."""
    src = read_source_spaces(fwd_fname)
    src_bad = read_source_spaces(src_bad_fname)
    stc = read_source_estimate(stc_fname, 'sample')
    os.environ['SUBJECTS_DIR'] = op.join(data_path, 'subjects')
    labels1 = _stc_to_label(stc, src='sample', smooth=3)
    labels2 = _stc_to_label(stc, src=src, smooth=3)
    assert_equal(len(labels1), len(labels2))
    for l1, l2 in zip(labels1, labels2):
        assert_labels_equal(l1, l2, decimal=4)

    with pytest.warns(RuntimeWarning, match='have holes'):
        labels_lh, labels_rh = stc_to_label(stc,
                                            src=src,
                                            smooth=True,
                                            connected=True)

    pytest.raises(ValueError,
                  stc_to_label,
                  stc,
                  'sample',
                  smooth=True,
                  connected=True)
    pytest.raises(RuntimeError,
                  stc_to_label,
                  stc,
                  smooth=True,
                  src=src_bad,
                  connected=True)
    assert_equal(len(labels_lh), 1)
    assert_equal(len(labels_rh), 1)

    # test getting tris
    tris = labels_lh[0].get_tris(src[0]['use_tris'], vertices=stc.vertices[0])
    pytest.raises(ValueError,
                  spatial_tris_adjacency,
                  tris,
                  remap_vertices=False)
    adjacency = spatial_tris_adjacency(tris, remap_vertices=True)
    assert (adjacency.shape[0] == len(stc.vertices[0]))

    # "src" as a subject name
    pytest.raises(TypeError,
                  stc_to_label,
                  stc,
                  src=1,
                  smooth=False,
                  connected=False,
                  subjects_dir=subjects_dir)
    pytest.raises(ValueError,
                  stc_to_label,
                  stc,
                  src=SourceSpaces([src[0]]),
                  smooth=False,
                  connected=False,
                  subjects_dir=subjects_dir)
    pytest.raises(ValueError,
                  stc_to_label,
                  stc,
                  src='sample',
                  smooth=False,
                  connected=True,
                  subjects_dir=subjects_dir)
    pytest.raises(ValueError,
                  stc_to_label,
                  stc,
                  src='sample',
                  smooth=True,
                  connected=False,
                  subjects_dir=subjects_dir)
    labels_lh, labels_rh = stc_to_label(stc,
                                        src='sample',
                                        smooth=False,
                                        connected=False,
                                        subjects_dir=subjects_dir)
    assert (len(labels_lh) > 1)
    assert (len(labels_rh) > 1)

    # with smooth='patch'
    with pytest.warns(RuntimeWarning, match='have holes'):
        labels_patch = stc_to_label(stc, src=src, smooth=True)
    assert len(labels_patch) == len(labels1)
    for l1, l2 in zip(labels1, labels2):
        assert_labels_equal(l1, l2, decimal=4)
Пример #23
0
    for i in range(gv.shape[0]):
        dist = np.sqrt((coord[:, 0] - gv[i, 0])**2 +
                       (coord[:, 1] - gv[i, 1])**2 +
                       (coord[:, 2] - gv[i, 2])**2)
        if min(dist) <= min_dist:
            b.append(np.argmin(dist))
    # create a stc with 1s for the near sources
    d = np.zeros([coord.shape[0], 1])
    d[b] = 1
    stc = mne.SourceEstimate(d,
                             vertices=[src[0]['vertno'], src[1]['vertno']],
                             tmin=0,
                             tstep=1,
                             subject='fsaverage_mne')
    # convert the stc to a label so we can morph it per subject later
    avg_label = mne.stc_to_label(stc, src=src, smooth=True, connected=False)
    if len(avg_label) > 2:
        raise ValueError('Found more than one label!')

data = []
for s in subjs:
    # check if it's AFNI or freesurfer label
    if roi.find('label') > 0:
        label_dir = '/Volumes/Shaw/MEG_structural/freesurfer/%s/labels/' % s
        label = mne.read_label(label_dir + roi)
        roi_pretty = roi.split('.')[0]
    else:
        roi_pretty = roi.split('/')[-1].split('+')[0]
    # right labels are normally in the second index
    if avg_label[0] is not None:
        label = avg_label[0].morph(subject_to=s)
stcs_slow_v1 = apply_inverse_epochs(slow_epo_isi,
                                    inverse_operator,
                                    lambda2,
                                    method='sLORETA',
                                    pick_ori="normal",
                                    label=v1_label)
stcs_slow_mt = apply_inverse_epochs(slow_epo_isi,
                                    inverse_operator,
                                    lambda2,
                                    method='sLORETA',
                                    pick_ori="normal",
                                    label=mt_label)

src = inverse_operator['src']  # the source space used
stc_label_v1 = mne.stc_to_label(stcs_slow_v1[0],
                                src=src,
                                subjects_dir=subjects_dir,
                                smooth=False)
stc_label_mt = mne.stc_to_label(stcs_slow_mt[0],
                                src=src,
                                subjects_dir=subjects_dir,
                                smooth=False)

vertices_v1 = range(len(stc_label_v1[0].vertices))
vertices_mt = range(len(stc_label_mt[0].vertices))

psi_slow_v1_mt_all = np.zeros([len(vertices_v1), len(vertices_mt), 1])

for vert_num_v1 in vertices_v1:

    #one
    stcs_slow = apply_inverse_epochs(slow_epo_isi,
#temp = temp3.in_label(TPOJ1_label_lh)
#w_vertices = np.unique(np.append(w_vertices, temp.vertices[0]))

""" V1 """
temp = temp3.in_label(V1_label_lh)
v1_vertices = temp.vertices[0]

###############################################################################
""" Just to visualize the new ROI """
mask = np.logical_and(times >= 0.08, times <= 0.12)

lh_label = temp3.in_label(V1_label_lh)
data = np.max(lh_label.data[:,mask],axis=1)
lh_label.data[data < 1.72] = 0.

temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
                      subjects_dir=fs_dir, connected=False)
temp = temp3.in_label(temp_labels)
v1_vertices = temp.vertices[0]
new_label = mne.Label(v1_vertices, hemi='lh')
brain3_1.add_label(new_label, borders=True, color='k')
###############################################################################
mask = np.logical_and(times >= 0.38, times <= 0.42)
lh_label = temp3.in_label(TE2p_label_lh)
data = np.mean(lh_label.data[:,mask],axis=1)
lh_label.data[data < 1.72] = 0.

temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
                      subjects_dir=fs_dir, connected=False)
temp = temp3.in_label(temp_labels)
ventral_vertices = temp.vertices[0]
src = mne.setup_source_space(subject='fsaverage',
                             fname=None,
                             spacing='ico5',
                             surface='inflated')

# X will have the pvalues in 0
X = 1 - np.array(all_stats[b][1])[:, None]  # p-values
X[X <= 1 - pval_thresh] = 0
pval_stc = mne.SourceEstimate(X,
                              vertices=stc.vertno,
                              tmin=0,
                              tstep=1,
                              subject='fsaverage')
lh_labels, rh_labels = mne.stc_to_label(pval_stc,
                                        src=src,
                                        smooth=5,
                                        connected=True)
# was having problems ot mix p-values and correlations in stc, so let's keep them separated
X = np.empty([pval_stc.data.shape[0], 2])
X[:, 0] = all_stats[b][2][0]
X[:, 1] = all_stats[b][2][1]
corr_stc = mne.SourceEstimate(X,
                              vertices=stc.vertno,
                              tmin=0,
                              tstep=1,
                              subject='fsaverage')

my_txt = ['left', 'right']
for idx, labels in enumerate([lh_labels, rh_labels]):
    print 'Found %d seeds in %s hemisphere' % (len(labels), my_txt[idx])
    good_seeds = [
Пример #27
0
stc_mean = stc.copy().crop(tmin, tmax).mean()

# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.read_labels_from_annot(subject, parc='aparc',
                                   subjects_dir=subjects_dir,
                                   regexp=aparc_label_name)[0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.

# 8.5% of original source space vertices were omitted during forward
# calculation, suppress the warning here with verbose='error'
func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True,
                                  subjects_dir=subjects_dir, connected=True,
                                  verbose='error')

# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]

# load the anatomical ROI for comparison
anat_label = mne.read_labels_from_annot(subject, parc='aparc',
                                        subjects_dir=subjects_dir,
                                        regexp=aparc_label_name)[0]

# extract the anatomical time course for each label
stc_anat_label = stc.in_label(anat_label)
pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0]

stc_func_label = stc.in_label(func_label)
Пример #28
0
    stcs_v1 = apply_inverse_epochs(allepochs,
                                   inverse_operator,
                                   lambda2,
                                   method='sLORETA',
                                   pick_ori="normal",
                                   label=v1_label)
    stcs_v4 = apply_inverse_epochs(allepochs,
                                   inverse_operator,
                                   lambda2,
                                   method='sLORETA',
                                   pick_ori="normal",
                                   label=v4_label)

    src = inverse_operator['src']  # the source space used
    stc_label_v1 = mne.stc_to_label(stcs_v1[0],
                                    src=src,
                                    subjects_dir=subjects_dir,
                                    smooth=False)
    stc_label_v4 = mne.stc_to_label(stcs_v4[0],
                                    src=src,
                                    subjects_dir=subjects_dir,
                                    smooth=False)

    vertices_v1 = range(len(stc_label_v1[1].vertices))
    vertices_v4 = range(len(stc_label_v4[1].vertices))

    tcs_v1 = []
    tcs_v4 = []

    for vert_num_v1 in vertices_v1:

        #one
Пример #29
0
        stc_sub = stc.copy().mean()
        data = np.zeros(stc_sub.data.shape)
        data[:, 0] = stc.data[:, 0]
        abs_data = abs(data)
        #data[abs_data<np.percentile(abs_data, thr)] = 0
        print np.argwhere(abs_data).shape[0]
        #abs_data[abs_data < thr] = 0
        #print 'time_duration:%.3f' %np.percentile(abs_data[np.where(abs_data)], thr)
        #abs_data[abs_data < np.percentile(abs_data[np.where(abs_data)], thr)] = 0
        abs_data[abs_data < np.percentile(abs_data, thr)] = 0
        print np.argwhere(abs_data).shape[0]
        stc_sub.data.setfield(abs_data, np.float32)
        #stc.data.setfield(data, np.float32)
        lh_labels, rh_labels = mne.stc_to_label(stc_sub,
                                                src=src,
                                                smooth=True,
                                                subjects_dir=subjects_dir,
                                                connected=True)
        i = 0
        while i < len(lh_labels):
            lh_label = lh_labels[i]
            print 'left hemisphere ROI_%d has %d vertices' % (
                i, lh_label.vertices.shape[0])
            dist = src[0]['dist']
            label_dist = dist[lh_label.vertices, :][:, lh_label.vertices]
            max_dist = round(label_dist.max() * 1000)
            if max_dist > vert_size:
                lh_label.save(labels_path + 'ini/%s,%s_%s' %
                              (side, conf_type, str(i)))
            i = i + 1
Пример #30
0
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
                    pick_normal=True)

# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()

# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.labels_from_parc(subject, parc='aparc', subjects_dir=subjects_dir,
                             regexp=aparc_label_name)[0][0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.

func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=5,
                                  subjects_dir=subjects_dir, connected=True)

# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]

# load the anatomical ROI for comparison
anat_label = mne.labels_from_parc(subject, parc='aparc',
                                  subjects_dir=subjects_dir,
                                  regexp=aparc_label_name)[0][0]

# extract the anatomical time course for each label
stc_anat_label = stc.in_label(anat_label)
pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0]

stc_func_label = stc.in_label(func_label)
pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0]
Пример #31
0
stc = mne.read_source_estimate(fn_stc, subject=min_subject)
stc = stc.crop(tmin, tmax)
src_pow = np.sum(stc.data ** 2, axis=1)
stc.data[src_pow < np.percentile(src_pow, thr)] = 0.
tbeg = tmin
count = 1
while tbeg < tmax:
    tend = tbeg + window
    if tend > tmax:
        break
    win_stc = stc.copy().crop(tbeg, tend)
    stc_data = win_stc.data 
    src_pow = np.sum(stc_data ** 2, axis=1)
    win_stc.data[src_pow < np.percentile(src_pow, thr)] = 0.
    func_labels_lh, func_labels_rh = mne.stc_to_label(
        win_stc, src=src_inv, smooth=True, connected=True,
        subjects_dir=subjects_dir)
    # Left hemisphere definition
    i = 0
    while i < len(func_labels_lh):
        func_label = func_labels_lh[i]
        func_label.save(labels_path + '%s_%s_win%d' % (event, str(i), count))
        i = i + 1
    # right hemisphere definition
    j = 0
    while j < len(func_labels_rh):
        func_label = func_labels_rh[j]
        func_label.save(labels_path +  '%s_%s_win%d' % (event, str(j), count))
        j = j + 1
    tbeg = tbeg + tstep
    count = count + 1