Пример #1
0
def per2test(X1, X2, p_thr, p, tstep, n_per=8192, fn_clu_out=None):

    #    Note that X needs to be a multi-dimensional array of shape
    #    samples (subjects) x time x space, so we permute dimensions
    n_subjects1 = X1.shape[2]
    n_subjects2 = X2.shape[2]
    fsave_vertices = [np.arange(X1.shape[0]/2), np.arange(X1.shape[0]/2)]
    X1 = np.transpose(X1, [2, 1, 0])
    X2 = np.transpose(X2, [2, 1, 0])
    X = [X1, X2]
    #    Now let's actually do the clustering. This can take a long time...
    #    Here we set the threshold quite high to reduce computation.
    f_threshold = stats.distributions.f.ppf(1. - p_thr / 2., n_subjects1 - 1, n_subjects2 - 1)
    print('Clustering.')
    connectivity = spatial_tris_connectivity(grade_to_tris(5))
    T_obs, clusters, cluster_p_values, H0 = clu = \
        spatio_temporal_cluster_test(X, n_permutations=n_per, 
                                    connectivity=connectivity, n_jobs=1,
                                    threshold=f_threshold)
    #    Now select the clusters that are sig. at p < 0.05 (note that this value
    #    is multiple-comparisons corrected).
    good_cluster_inds = np.where(cluster_p_values < p)[0]
    print 'the amount of significant clusters are: %d' %good_cluster_inds.shape
    ###############################################################################
    # Save the clusters as stc file
    # ----------------------
    assert good_cluster_inds.shape != 0, ('Current p_threshold is %f %p_thr,\
                                 maybe you need to reset a lower p_threshold')
    np.savez(fn_clu_out, clu=clu, tstep=tstep, fsave_vertices=fsave_vertices)
Пример #2
0
def permutation_cluster_analysis(epochs, n_permutations=1000, plot=True):
    """
    Do a spatio-temporal cluster analyis to compare experimental conditions.
    """
    # get the data for each event in epochs.evet_id transpose because the cluster test requires
    # channels to be last. In this case, inference is done over items. In the same manner, we could
    # also conduct the test over, e.g., subjects.
    tfce = dict(start=.2, step=.2)
    time_unit = dict(time_unit="s")
    events = list(epochs.event_id.keys())
    if plot:
        if len(events) == 2:  # When comparing two events subtract evokeds
            evoked = combine_evoked([epochs[events[0]].average(), -epochs[events[1]].average()],
                                    weights='equal')
            title = "%s vs %s" % (events[0], events[1])
        elif len(events) > 2:  # When comparing more than two events verage them
            evoked = combine_evoked([epochs[e].average() for e in events], weights='equal')
            evoked.data /= len(events)
            title = ""
            for e in events:
                title += e+" + "
            title = title[:-2]
        evoked.plot_joint(title=title, ts_args=time_unit, topomap_args=time_unit)
        X = [epochs[e].get_data().transpose(0, 2, 1) for e in events]
        t_obs, clusters, cluster_pv, h0 = spatio_temporal_cluster_test(X, tfce, n_permutations)
        significant_points = cluster_pv.reshape(t_obs.shape).T < .05
        selections = make_1020_channel_selections(evoked.info, midline="12z")
        fig, axes = plt.subplots(nrows=3, figsize=(8, 8))
        axes = {sel: ax for sel, ax in zip(selections, axes.ravel())}
        evoked.plot_image(axes=axes, group_by=selections, colorbar=False, show=False,
                          mask=significant_points, show_names="all", titles=None,
                          **time_unit)
        plt.colorbar(axes["Left"].images[-1], ax=list(axes.values()), shrink=.3, label="µV")

    plt.show()
Пример #3
0
def mne_spatio_temporal_cluster_test(X, **kwargs):

    threshold_tfce = dict(start=0, step=0.2)
    F_obs, _, p_values, _ = spatio_temporal_cluster_test(X, n_permutations=1000,
                                        threshold=threshold_tfce, tail=1,
                                        n_jobs=1, buffer_size=None,
                                        connectivity=None)

    return F_obs, p_values
Пример #4
0
def per2test(X1, X2, p_thr, p, tstep, n_per=8192, fn_clu_out=None):
    '''
      Calculate significant clusters using 2 sample ftest.

      Parameter
      ---------
      X1, X2: array
        The shape of X should be (Vertices, timepoints, subjects)
      tstep: float
        The interval between timepoints.
      n_per: int
        The permutation for ttest.
      p_thr: float
        The significant p_values.
      p: float
        The corrected p_values for comparisons.
      fn_clu_out: string
        The fnname for saving clusters.
    '''
    #    Note that X needs to be a multi-dimensional array of shape
    #    samples (subjects) x time x space, so we permute dimensions
    n_subjects1 = X1.shape[2]
    n_subjects2 = X2.shape[2]
    fsave_vertices = [np.arange(X1.shape[0]/2), np.arange(X1.shape[0]/2)]
    X1 = np.transpose(X1, [2, 1, 0])
    X2 = np.transpose(X2, [2, 1, 0])
    X = [X1, X2]

    #    Now let's actually do the clustering. This can take a long time...
    #    Here we set the threshold quite high to reduce computation.
    f_threshold = stats.distributions.f.ppf(1. - p_thr / 2., n_subjects1 - 1,
                                            n_subjects2 - 1)
    # t_threshold = stats.distributions.t.ppf(1. - p_thr / 2., n_subjects1 - 1,
    #                                         n_subjects2 - 1)

    print('Clustering...')
    connectivity = spatial_tris_connectivity(grade_to_tris(5))
    T_obs, clusters, cluster_p_values, H0 = clu = \
        spatio_temporal_cluster_test(X, n_permutations=n_per, #step_down_p=0.001,
                                     connectivity=connectivity, n_jobs=1,
                                     # threshold=t_threshold, stat_fun=stats.ttest_ind)
                                     threshold=f_threshold)

    #    Now select the clusters that are sig. at p < 0.05 (note that this value
    #    is multiple-comparisons corrected).
    good_cluster_inds = np.where(cluster_p_values < p)[0]
    print 'the amount of significant clusters are: %d' % good_cluster_inds.shape

    # Save the clusters as stc file
    np.savez(fn_clu_out, clu=clu, tstep=tstep, fsave_vertices=fsave_vertices)
    assert good_cluster_inds.shape != 0, ('Current p_threshold is %f %p_thr,\
                                 maybe you need to reset a lower p_threshold')
Пример #5
0
def permutation_test_on_source_data_with_spatio_temporal_clustering(controls_data, patient_data, patient, cond_name,
                tstep, n_permutations, inverse_method='dSPM', n_jobs=6):
    try:
        print('permutation_test: patient {}, cond {}'.format(patient, cond_name))
        connectivity = spatial_tris_connectivity(grade_to_tris(5))
        #    Note that X needs to be a list of multi-dimensional array of shape
        #    samples (subjects_k) x time x space, so we permute dimensions
        print(controls_data.shape, patient_data.shape)
        X = [controls_data, patient_data]

        p_threshold = 0.05
        f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
                                                controls_data.shape[0] - 1, 1)
        print('Clustering. thtreshold = {}'.format(f_threshold))
        T_obs, clusters, cluster_p_values, H0 = clu =\
            spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=n_jobs, threshold=10, n_permutations=n_permutations)

        results_file_name = op.join(LOCAL_ROOT_DIR, 'clusters_results', '{}_{}_{}'.format(patient, cond_name, inverse_method))
        np.savez(results_file_name, T_obs=T_obs, clusters=clusters, cluster_p_values=cluster_p_values, H0=H0)
        #    Now select the clusters that are sig. at p < 0.05 (note that this value
        #    is multiple-comparisons corrected).
        good_cluster_inds = np.where(cluster_p_values < 0.05)[0]

        ###############################################################################
        # Visualize the clusters

        print('Visualizing clusters.')

        #    Now let's build a convenient representation of each cluster, where each
        #    cluster becomes a "time point" in the SourceEstimate
        fsave_vertices = [np.arange(10242), np.arange(10242)]
        stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
                                                     vertices=fsave_vertices,
                                                     subject='fsaverage')
        stc_all_cluster_vis.save(op.join(LOCAL_ROOT_DIR, 'stc_clusters', '{}_{}_{}'.format(patient, cond_name, inverse_method)), ftype='h5')

        # #    Let's actually plot the first "time point" in the SourceEstimate, which
        # #    shows all the clusters, weighted by duration
        # # blue blobs are for condition A != condition B
        # brain = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'both',
        #                                  subjects_dir=subjects_dir, clim='auto',
        #                                  time_label='Duration significant (ms)')
        # brain.set_data_time_index(0)
        # brain.show_view('lateral')
        # brain.save_image('clusters.png')
    except:
        print('bummer! {}, {}'.format(patient, cond_name))
        print(traceback.format_exc())
# as we only have one hemisphere we need only need half the connectivity
print('Computing connectivity.')
connectivity = mne.spatial_src_connectivity(src[:1])

#    Now let's actually do the clustering. Please relax, on a small
#    notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.0005
f_thresh = f_threshold_mway_rm(n_subjects, factor_levels, effects, pthresh)

#    To speed things up a bit we will ...
n_permutations = 128  # ... run fewer permutations (reduces sensitivity)

print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
    spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,
                                 threshold=f_thresh, stat_fun=stat_fun,
                                 n_permutations=n_permutations,
                                 buffer_size=None)
#    Now select the clusters that are sig. at p < 0.05 (note that this value
#    is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]

###############################################################################
# Visualize the clusters
# ----------------------

print('Visualizing clusters.')

#    Now let's build a convenient representation of each cluster, where each
#    cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu,
                                             tstep=tstep,
# Then we generate a distribution from the data by shuffling our conditions
# between our samples and recomputing our clusters and the test statistics.
# We test for the significance of a given cluster by computing the probability
# of observing a cluster of that size. For more background read:
# Maris/Oostenveld (2007), "Nonparametric statistical testing of EEG- and
# MEG-data" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
# doi:10.1016/j.jneumeth.2007.03.024


# set cluster threshold
threshold = 50.0  # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.001

cluster_stats = spatio_temporal_cluster_test(
    X, n_permutations=1000, threshold=threshold, tail=1, n_jobs=1, connectivity=connectivity
)

T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]

###############################################################################
# Note. The same functions work with source estimate. The only differences
# are the origin of the data, the size, and the connectivity definition.
# It can be used for single trials or for groups of subjects.
#
# Visualize clusters
# ------------------

# configure variables for visualization
times = epochs.times * 1e3
Пример #8
0
def sample2_clus(fn_list,
                 n_per=8192,
                 pthr=0.01,
                 p=0.05,
                 tail=0,
                 del_vers=None,
                 n_jobs=1):
    '''
      Calculate significant clusters using 2 sample ftest.

      Parameter
      ---------
      fn_list: list
        Paths of group arrays
      n_per: int
        The permutation for ttest.
      pct: int or float.
        The percentile of the baseline distribution.
      p: float
        The corrected p_values for comparisons.
      del_vers: None or _exclu_vers
        If is '_exclu_vers', delete the vertices in the medial wall.
    '''
    for fn_npz in fn_list:
        fn_path = os.path.dirname(fn_npz)
        name = os.path.basename(fn_npz)
        #fn_out = fn_path + '/clu2sample_%s' %name[:name.rfind('.npz')] + '_%d_pct%.2f.npz' %(n_per, pct)
        fn_out = fn_path + '/clu2sample_%s' % name[:name.rfind(
            '.npz')] + '_%d_%dtail_pthr%.4f.npz' % (n_per, 1 +
                                                    (tail == 0), pthr)
        npz = np.load(fn_npz)
        tstep = npz['tstep'].flatten()[0]
        #    Note that X needs to be a multi-dimensional array of shape
        #    samples (subjects) x time x space, so we permute dimensions
        X = npz['X']
        ppf = stats.f.ppf
        tail = 1  # tail = we are interested in an increase of variance only
        p_thresh = pthr / (
            1 + (tail == 0)
        )  # we can also adapt this to p=0.01 if the cluster size is too large
        n_samples_per_group = [len(x) for x in X]
        f_threshold = ppf(1. - p_thresh, *n_samples_per_group)
        if np.sign(tail) < 0:
            f_threshold = -f_threshold
        fsave_vertices = [
            np.arange(X.shape[-1] / 2),
            np.arange(X.shape[-1] / 2)
        ]
        print('Clustering...')
        connectivity = spatial_tris_connectivity(grade_to_tris(5))
        T_obs, clusters, cluster_p_values, H0 = clu = \
            spatio_temporal_cluster_test(X, n_permutations=n_per, #step_down_p=0.001,
                                        connectivity=connectivity, n_jobs=n_jobs,
                                        # threshold=t_threshold, stat_fun=stats.ttest_ind)
                                        threshold=f_threshold, spatial_exclude=del_vers, tail=tail)

        #    Now select the clusters that are sig. at p < 0.05 (note that this value
        #    is multiple-comparisons corrected).
        good_cluster_inds = np.where(cluster_p_values < p)[0]
        print 'the amount of significant clusters are: %d' % good_cluster_inds.shape

        # Save the clusters as stc file
        np.savez(fn_out, clu=clu, tstep=tstep, fsave_vertices=fsave_vertices)
        assert good_cluster_inds.shape != 0, (
            'Current p_threshold is %f %p_thr,\
                                    maybe you need to reset a lower p_threshold'
        )
Пример #9
0
    # required to merge epochs from differen subjects together
    ep.info["dev_head_t"] = None
    X_low = ep["low"].get_data().transpose([0, 2, 1])
    y_low = ep["low"].events[:, 2]
    X_high = ep["high"].get_data().transpose([0, 2, 1])
    y_high = ep["high"].events[:, 2]

    erf_low = ep["low"].average()
    erf_high = ep["high"].average()

    p_accept = 0.05
    cluster_stats = spatio_temporal_cluster_test(
        [X_low, X_high],
        n_permutations=100,
        # threshold=threshold,
        tail=0,
        n_jobs=1,
        buffer_size=None,
        adjacency=adjacency,
    )

    T_obs, clusters, p_values, _ = cluster_stats
    good_cluster_inds = np.where(p_values < p_accept)[0]

    # organize data for plotting
    evokeds = {"low": erf_low, "high": erf_high}

    sel_idx = pick_types(info, meg="grad")
    info.pick_channels([info.ch_names[s] for s in sel_idx])
    plot_temporal_clusters(
        good_cluster_inds, evokeds, T_obs, clusters, erf_low.times, info
# Then we generate a distribution from the data by shuffling our conditions
# between our samples and recomputing our clusters and the test statistics.
# We test for the significance of a given cluster by computing the probability
# of observing a cluster of that size. For more background read:
# Maris/Oostenveld (2007), "Nonparametric statistical testing of EEG- and
# MEG-data" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
# doi:10.1016/j.jneumeth.2007.03.024


# set cluster threshold
threshold = 50.0  # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.01

cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000,
                                             threshold=threshold, tail=1,
                                             n_jobs=1,
                                             connectivity=connectivity)

T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]

###############################################################################
# Note. The same functions work with source estimate. The only differences
# are the origin of the data, the size, and the connectivity definition.
# It can be used for single trials or for groups of subjects.
#
# Visualize clusters
# ------------------

# configure variables for visualization
colors = {"Aud": "crimson", "Vis": 'steelblue'}
Пример #11
0
# between our samples and recomputing our clusters and the test statistics.
# We test for the significance of a given cluster by computing the probability
# of observing a cluster of that size. For more background read:
# Maris/Oostenveld (2007), "Nonparametric statistical testing of EEG- and
# MEG-data" Journal of Neuroscience Methods, Vol. 164, No. 1., pp. 177-190.
# doi:10.1016/j.jneumeth.2007.03.024

# set cluster threshold
threshold = 50.0  # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.01

cluster_stats = spatio_temporal_cluster_test(X,
                                             n_permutations=1000,
                                             threshold=threshold,
                                             tail=1,
                                             n_jobs=1,
                                             buffer_size=None,
                                             adjacency=adjacency)

T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]

###############################################################################
# Note. The same functions work with source estimate. The only differences
# are the origin of the data, the size, and the adjacency definition.
# It can be used for single trials or for groups of subjects.
#
# Visualize clusters
# ------------------
connectivity = spatial_tris_connectivity(grade_to_tris(5))

#    Note that X needs to be a list of multi-dimensional array of shape
#    samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]

#    Now let's actually do the clustering. This can take a long time...
#    Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2., n_subjects1 - 1,
                                        n_subjects2 - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu =\
    spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=2,
                                 threshold=f_threshold)
#    Now select the clusters that are sig. at p < 0.05 (note that this value
#    is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]

###############################################################################
# Visualize the clusters

print('Visualizing clusters.')

#    Now let's build a convenient representation of each cluster, where each
#    cluster becomes a "time point" in the SourceEstimate
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu,
                                             tstep=tstep,
                                             vertices=fsave_vertices,
Пример #13
0
def apply_sigSTC(fn_list_v, vevent, mevent, method='dSPM', vtmin=0., vtmax=0.35,
                 mtmin=-0.3, mtmax=0.05, radius=10.0):
    from mne import spatial_tris_connectivity, grade_to_tris
    from mne.stats import spatio_temporal_cluster_test
    from scipy import stats as stats
    X1, X2 = [], []
    stcs_trial = []
    for fn_v in fn_list_v:
        fn_m = fn_v[: fn_v.rfind('evtW')] + 'evtW_%s_bc_norm_1-lh.stc' %mevent
        stc_v = mne.read_source_estimate(fn_v)
        stcs_trial.append(stc_v.copy())
        stc_m = mne.read_source_estimate(fn_m)
        stc_v.resample(200)
        stc_m.resample(200)
        X1.append(stc_v.copy().crop(vtmin, vtmax).data)
        X2.append(stc_m.copy().crop(mtmin, mtmax).data)
    stcs_path = subjects_dir+'/fsaverage/%s_ROIs/conditions/' %method
    reset_directory(stcs_path)
    fn_avg = stcs_path + '%s' %(vevent)
    stcs = np.array(stcs_trial)
    stc_avg = np.sum(stcs, axis=0)/stcs.shape[0]
    stc_avg.save(fn_avg, ftype='stc')    
    X1 = np.array(X1).transpose(0, 2, 1)
    X2 = np.array(X2).transpose(0, 2, 1)
    ###############################################################################
    # Compute statistic
    
    #    To use an algorithm optimized for spatio-temporal clustering, we
    #    just pass the spatial connectivity matrix (instead of spatio-temporal)
    print('Computing connectivity.')
    connectivity = spatial_tris_connectivity(grade_to_tris(5))
    
    #    Note that X needs to be a list of multi-dimensional array of shape
    #    samples (subjects_k) x time x space, so we permute dimensions
    X = [X1, X2]
    #    Now let's actually do the clustering. This can take a long time...
    #    Here we set the threshold quite high to reduce computation.
    p_threshold = 0.0001
    f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
                                        X1.shape[0] - 1, X1.shape[0] - 1)
    print('Clustering.')
   
    clu = spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=2,
                                    threshold=f_threshold)
    #    Now select the clusters that are sig. at p < 0.05 (note that this value
    #    is multiple-comparisons corrected).
    #fsave_vertices = [np.arange(10242), np.arange(10242)]
    tstep = stc_v.tstep
    #stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
    #                                            vertices=fsave_vertices,
    #                                            subject='fsaverage')
    #stc_sig = stc_all_cluster_vis.mean()
    #fn_sig = subjects_dir+'/fsaverage/%s_ROIs/%s' %(method,vevent)
    #stc_sig.save(fn_sig)
    tstep = stc_v.tstep
    T_obs, clusters, clu_pvals, _ = clu
    n_times, n_vertices = T_obs.shape
    good_cluster_inds = np.where(clu_pvals < 0.05)[0]
    seeds = []
    #  Build a convenient representation of each cluster, where each
    #  cluster becomes a "time point" in the SourceEstimate
    T_obs = abs(T_obs)
    if len(good_cluster_inds) > 0:
        data = np.zeros((n_vertices, n_times))
        for cluster_ind in good_cluster_inds:
            data.fill(0)
            v_inds = clusters[cluster_ind][1]
            t_inds = clusters[cluster_ind][0]
            data[v_inds, t_inds] = T_obs[t_inds, v_inds]
            # Store a nice visualization of the cluster by summing across time
            data = np.sign(data) * np.logical_not(data == 0) * tstep
            seed = np.argmax(data.sum(axis=-1))
            seeds.append(seed)
    min_subject = 'fsaverage'
    labels_path = subjects_dir + '/fsaverage/dSPM_ROIs/%s/ini' %vevent
    reset_directory(labels_path)
    seeds = np.array(seeds)
    non_index_lh = seeds[seeds < 10242]
    if non_index_lh.shape != []:    
        func_labels_lh = mne.grow_labels(min_subject, non_index_lh,
                                        extents=radius, hemis=0, 
                                        subjects_dir=subjects_dir, n_jobs=1)
        i = 0
        while i < len(func_labels_lh):
            func_label = func_labels_lh[i]
            func_label.save(labels_path + '/%s_%d' %(vevent, i))
            i = i + 1
            
    seeds_rh = seeds - 10242
    non_index_rh = seeds_rh[seeds_rh > 0]
    if non_index_rh.shape != []:
        func_labels_rh = mne.grow_labels(min_subject, non_index_rh,
                                        extents=radius, hemis=1,
                                        subjects_dir=subjects_dir, n_jobs=1)                                             
   
        # right hemisphere definition
        j = 0
        while j < len(func_labels_rh):
            func_label = func_labels_rh[j]
            func_label.save(labels_path + '/%s_%d' %(vevent, j))
            j = j + 1
Пример #14
0
def SensorStatsPlot(condcomb, ListSubj, colors):

    #ListSubj = ('sd130343','cb130477' , 'rb130313', 'jm100109',
    #             'sb120316', 'tk130502','lm130479' , 'ms130534', 'ma100253', 'sl130503',
    #             'mb140004','mp140019' , 'dm130250', 'hr130504', 'wl130316', 'rl130571')

    ListSubj = ('sd130343', 'cb130477', 'rb130313', 'jm100109', 'tk130502',
                'lm130479', 'ms130534', 'ma100253', 'sl130503', 'mb140004',
                'mp140019', 'dm130250', 'hr130504', 'rl130571')

    condcomb = ('EtPast', 'EtPre', 'EtFut')

    colors = ((1, 0, 0), (1, 0.37, 0.15), (1, 0.75, 0.3))

    #ipython --pylab
    import mne
    import numpy as np
    import matplotlib.pyplot as plt
    import itertools
    from mne.stats import spatio_temporal_cluster_test
    from mne.channels import read_ch_connectivity
    from scipy import stats as stats
    from mne.viz import plot_evoked_topo, iter_topography

    import os
    os.chdir('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/SCRIPTS/MNE_PYTHON')
    os.environ['MNE_ROOT'] = '/neurospin/local/mne'
    wdir = "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/"

    # load FieldTrip neighbor definition to setup sensor connectivity
    neighbor_file_mag = '/neurospin/local/fieldtrip/template/neighbours/neuromag306mag_neighb.mat'  # mag
    neighbor_file_grad = '/neurospin/local/fieldtrip/template/neighbours/neuromag306planar_neighb.mat'  # grad
    neighbor_file_eeg = '/neurospin/local/fieldtrip/template/neighbours/easycap64ch-avg_neighb.mat'  # eeg
    connectivity, ch_names = mne.channels.read_ch_connectivity(
        neighbor_file_eeg, picks=range(60))
    connectivity_mag, ch_names_mag = read_ch_connectivity(neighbor_file_mag)
    connectivity_grad, ch_names_grad = read_ch_connectivity(neighbor_file_grad)
    connectivity_eeg, ch_names_eeg = read_ch_connectivity(neighbor_file_eeg)

    # evoked 0 to get the size of the matrix
    fname0 = (wdir + ListSubj[0] + "/mne_python/MEEG_" + condcomb[0] + "_" +
              ListSubj[0] + "-ave.fif")
    evoked0 = mne.read_evokeds(fname0, condition=0, baseline=(-0.2, 0))
    sensordatamat_meg_mag = np.empty(
        [len(condcomb),
         len(ListSubj), 102, evoked0.data.shape[1]])
    sensordatamat_meg_grad = np.empty(
        [len(condcomb),
         len(ListSubj), 204, evoked0.data.shape[1]])
    sensordatamat_meg_eeg = np.empty(
        [len(condcomb),
         len(ListSubj), 60, evoked0.data.shape[1]])

    # define statistical threshold
    p_threshold = 0.05
    t_threshold = -stats.distributions.t.ppf(p_threshold / 2.,
                                             len(ListSubj) - 1)

    # compute grand averages
    GDAVGmag, GDAVGgrad, GDAVGeeg = [], [], []
    sensordatamat_meg_mag = np.empty(
        (len(condcomb), len(ListSubj), 102, len(evoked0.times)))
    sensordatamat_meg_grad = np.empty(
        (len(condcomb), len(ListSubj), 204, len(evoked0.times)))
    #sensordatamat_eeg       = np.empty((len(condcomb),len(ListSubj),60 ,len(evoked0.times)))

    for c in range(len(condcomb)):

        evoked2plotmag, evoked2plotgrad, evoked2ploteeg = [], [], []
        for i in range(len(ListSubj)):

            fname_ave_meg = (wdir + ListSubj[i] + "/mne_python/MEEG_" +
                             condcomb[c] + "_" + ListSubj[i] + "-ave.fif")

            tmp_evoked_meg = mne.read_evokeds(fname_ave_meg,
                                              condition=0,
                                              baseline=(-0.2, 0))
            evoked2plotmag.append(tmp_evoked_meg.pick_types('mag'))
            sensordatamat_meg_mag[c, i, ::, ::] = tmp_evoked_meg.data

            tmp_evoked_meg = mne.read_evokeds(fname_ave_meg,
                                              condition=0,
                                              baseline=(-0.2, 0))
            evoked2plotgrad.append(tmp_evoked_meg.pick_types('grad'))
            sensordatamat_meg_grad[c, i, ::, ::] = tmp_evoked_meg.data

            #tmp_evoked_meg  = mne.read_evokeds(fname_ave_meg,   condition=0, baseline=(-0.2, 0))
            #evoked2ploteeg.append(tmp_evoked_meg.pick_types('eeg'))
            #sensordatamat_eeg[c,i,::,::]  = tmp_evoked_meg.data

        GDAVGmag.append(mne.grand_average(evoked2plotmag))
        GDAVGgrad.append(mne.grand_average(evoked2plotgrad))
        #GDAVGeeg.append(mne.grand_average(evoked2ploteeg))

    # plot topomaps of grand_averages
    for ax, idx in iter_topography(GDAVGmag[0].info,
                                   fig_facecolor='black',
                                   axis_facecolor='black',
                                   axis_spinecolor='k'):
        for c, cond in enumerate(condcomb):
            ax.plot(GDAVGmag[c].data[idx], color=colors[c])
    figManager = plt.get_current_fig_manager()
    figManager.window.showMaximized()
    plt.savefig(
        "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/decoding_context_yousra/TOPOPLOT_ERF/"
        + "_".join([str(cond) for cond in condcomb]) + "_GDAVG_mags.eps",
        format='eps',
        dpi=1500)
    plt.close()

    mne.viz.plot_topo(GDAVGmag, color=['r', 'orange', 'y'])
    figManager = plt.get_current_fig_manager()
    figManager.window.showMaximized()
    plt.savefig(
        "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/decoding_context_yousra/TOPOPLOT_ERF/"
        + "_".join([str(cond) for cond in condcomb]) + "_bis_GDAVG_mags.eps",
        format='eps',
        dpi=1500)
    plt.close()

    for ax, idx in iter_topography(GDAVGgrad[0].info,
                                   fig_facecolor='black',
                                   axis_facecolor='black',
                                   axis_spinecolor='k'):
        for c, cond in enumerate(condcomb):
            ax.plot(GDAVGgrad[c].data[idx], color=colors[c])
    figManager = plt.get_current_fig_manager()
    figManager.window.showMaximized()
    plt.savefig(
        "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/decoding_context_yousra/TOPOPLOT_ERF/"
        + "_".join([str(cond) for cond in condcomb]) + "_GDAVG_grads.eps",
        format='eps',
        dpi=1500)
    plt.close()

    mne.viz.plot_topo(GDAVGmag, color=['r', 'orange', 'y'])
    figManager = plt.get_current_fig_manager()
    figManager.window.showMaximized()
    plt.savefig(
        "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/decoding_context_yousra/TOPOPLOT_ERF/"
        + "_".join([str(cond) for cond in condcomb]) + "_bis_GDAVG_grads.eps",
        format='eps',
        dpi=1500)
    plt.close()

    times = np.arange(-0.1, 0.9, 0.05)
    for c in range(len(condcomb)):

        GDAVGmag[c].plot_topomap(times,
                                 ch_type='mag',
                                 vmin=-40,
                                 vmax=40,
                                 average=0.05)
        plt.savefig(
            "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/decoding_context_yousra/TOPOPLOT_ERF/"
            + str(condcomb[c]) + "_GDAVG_mags.eps",
            format='eps',
            dpi=1500)
        plt.close()

        GDAVGgrad[c].plot_topomap(times,
                                  ch_type='grad',
                                  vmin=-10,
                                  vmax=10,
                                  average=0.05)
        plt.savefig(
            "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/decoding_context_yousra/TOPOPLOT_ERF/"
            + str(condcomb[c]) + "_GDAVG_grads.eps",
            format='eps',
            dpi=1500)
        plt.close()

    for combination in itertools.combinations(range(3), 2):
        tmp = []
        tmp = GDAVGmag[combination[0]] - GDAVGmag[combination[1]]
        tmp.plot_topomap(times, ch_type='mag', vmin=-15, vmax=15, average=0.05)
        plt.savefig(
            "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/decoding_context_yousra/TOPOPLOT_ERF/"
            + str(condcomb[combination[0]]) + '_minus_' +
            str(condcomb[combination[1]]) + "_GDAVG_mags.eps",
            format='eps',
            dpi=1000)
        plt.close()

        tmp = []
        tmp = GDAVGgrad[combination[0]] - GDAVGgrad[combination[1]]
        tmp.plot_topomap(times, ch_type='grad', vmin=0, vmax=2, average=0.05)
        plt.savefig(
            "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/decoding_context_yousra/TOPOPLOT_ERF/"
            + str(condcomb[combination[0]]) + '_minus_' +
            str(condcomb[combination[1]]) + "_GDAVG_grads.eps",
            format='eps',
            dpi=1000)
        plt.close()

    allcond_meg_mag = [
        np.transpose(x, (0, 2, 1)) for x in sensordatamat_meg_mag
    ]
    allcond_meg_grad = [
        np.transpose(x, (0, 2, 1)) for x in sensordatamat_meg_grad
    ]

    ###############################################################################

    t_threshold = -stats.distributions.t.ppf(0 / 2, len(ListSubj) - 1)
    T_obs, clusters, cluster_p_values, HO = spatio_temporal_cluster_test(
        allcond_meg_mag,
        n_permutations=1024,
        threshold=t_threshold,
        tail=0,
        n_jobs=4,
        connectivity=connectivity_mag)

    t_threshold = -stats.distributions.t.ppf(0 / 2, len(ListSubj) - 1)
    T_obs, clusters, cluster_p_values, HO = spatio_temporal_cluster_test(
        allcond_meg_grad,
        n_permutations=1024,
        threshold=t_threshold,
        tail=0,
        n_jobs=4,
        connectivity=connectivity_grad)
Пример #15
0
)
fig.set_size_inches((20, 10))
plt.show()

adjacency, ch_names = find_ch_adjacency(info, ch_type="grad")
# set cluster threshold
threshold = 10.0  # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.05

# X_low = X[y == LOW_CONF_EPOCH, ...].transpose(0, 2, 1)
# X_high = X[y == HIGH_CONF_EPOCH, ...].transpose(0, 2, 1)
cluster_stats = spatio_temporal_cluster_test(
    [theta_power_low[:, np.newaxis, :], theta_power_high[:, np.newaxis, :]],
    n_permutations=100,
    threshold=threshold,
    tail=1,
    n_jobs=1,
    buffer_size=None,
    adjacency=adjacency,
)

T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]

# organize data for plotting
evokeds = {"low": erf_low, "high": erf_high}

plot_temporal_clusters(good_cluster_inds, evokeds, T_obs, clusters,
                       erf_low.times, info)
Пример #16
0
from mne.stats import (spatio_temporal_cluster_1samp_test, summarize_clusters_stc)

print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
    spatio_temporal_cluster_1samp_test(X, connectivity=connectivity, n_jobs=1,
                                       threshold=t_threshold, buffer_size = None)
#    Now select the clusters that are sig. at p < 0.05 (note that this value
#    is multiple-comparisons corrected).

#%%
from mne.stats import spatio_temporal_cluster_test
good_cluster_inds = np.where(cluster_p_values < 0.1)[0]

a = spatio_temporal_cluster_test(
            X, threshold=None, n_permutations=1024, tail=0, stat_fun=None,
            connectivity=None, verbose=None, n_jobs=1, seed=None, max_step=1,
            spatial_exclude=None, step_down_p=0, t_power=1, out_type='indices',
            check_disjoint=False)

#%%############################################################################
# Visualize the clusters
# ----------------------
print('Visualizing clusters.')
#    Now let's build a convenient representation of each cluster, where each
#    cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, p_thresh=0.1, tstep=tstep,
                                             vertices=fsave_vertices, subject=subject)

#%%    Let's actually plot the first "time point" in the SourceEstimate, which
#    shows all the clusters, weighted by duration
subjects_dir = os.path.join(mainDir) 
Пример #17
0
    epochs = mne.read_epochs(epochs_folder +
                             '%s_trial_start-epo.fif' % subject)

    epochs.pick_types(meg="mag")

    X_ctl_left[j, :, :] = epochs["ctl/left"].average().data.T
    X_ctl_right[j, :, :] = epochs["ctl/right"].average().data.T
    X_ent_left[j, :, :] = epochs["ent/left"].average().data.T
    X_ent_right[j, :, :] = epochs["ent/right"].average().data.T

X = [X_ctl_left, X_ctl_right, X_ent_left, X_ent_right]

###############################################################################
# load FieldTrip neighbor definition to setup sensor connectivity
connectivity, ch_names = read_ch_connectivity('neuromag306mag')

# set cluster threshold
# set family-wise p-value
p_accept = 0.05

cluster_stats = spatio_temporal_cluster_test(X,
                                             n_permutations=5000,
                                             tail=0,
                                             n_jobs=3,
                                             connectivity=connectivity)

T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]

pickle.dump(cluster_stats, open(result_dir + "/cluster_stats_pow_mag.p", "wb"))
connectivity = spatial_src_connectivity(src)

#    Note that X needs to be a list of multi-dimensional array of shape
#    samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]

#    Now let's actually do the clustering. This can take a long time...
#    Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
                                        n_subjects1 - 1, n_subjects2 - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu =\
    spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,
                                 threshold=f_threshold, buffer_size=None)
#    Now select the clusters that are sig. at p < 0.05 (note that this value
#    is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]

###############################################################################
# Visualize the clusters
# ----------------------

print('Visualizing clusters.')

#    Now let's build a convenient representation of each cluster, where each
#    cluster becomes a "time point" in the SourceEstimate
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
                                             vertices=fsave_vertices,
Пример #19
0
        print(f"NO MATCH FOR {trial_path}")

X_high = np.array(X_high)
X_low = np.array(X_low)

print("X_high.shape = ", X_high.shape)
print("X_low.shape = ", X_low.shape)

adjacency = spatial_src_adjacency(src)

thresh_pv = 0.02
F_obs, clusters, cluster_pv, H0 = clu = spatio_temporal_cluster_test(
    [X_high, X_low],
    # threshold=8,
    n_permutations=100,
    adjacency=adjacency,
    out_type="indices",
    check_disjoint=True,
    stat_fun=ttest_ind_no_p,
)

stc_all_cluster_vis = summarize_clusters_stc(
    clu,
    p_thresh=thresh_pv,
    # p_thresh=0.05,
    vertices=src,
    subject="fsaverage",
)
stc_all_cluster_vis

stc_all_cluster_vis.plot(subjects_dir=dirs.fsf_subjects, hemi="both")
Пример #20
0
connectivity = spatial_src_connectivity(src)

#    Note that X needs to be a list of multi-dimensional array of shape
#    samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]

#    Now let's actually do the clustering. This can take a long time...
#    Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
                                        n_subjects1 - 1, n_subjects2 - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu =\
    spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,
                                 threshold=f_threshold, buffer_size=None)
#    Now select the clusters that are sig. at p < 0.05 (note that this value
#    is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]

###############################################################################
# Visualize the clusters
# ----------------------

print('Visualizing clusters.')

#    Now let's build a convenient representation of each cluster, where each
#    cluster becomes a "time point" in the SourceEstimate
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
                                             vertices=fsave_vertices,
Пример #21
0
plt.show()

adjacency, ch_names = find_ch_adjacency(info, ch_type=ch_type)
# set cluster threshold
threshold = 6.0  # very high, but the test is quite sensitive on this data
# set family-wise p-value
p_accept = 0.05

psds_low_t = psds_low.transpose(0, 2, 1)
psds_high_t = psds_high.transpose(0, 2, 1)

cluster_stats = spatio_temporal_cluster_test(
    [psds_low_t, psds_high_t],
    n_permutations=500,
    threshold=threshold,
    tail=1,
    n_jobs=8,
    buffer_size=None,
    adjacency=adjacency,
)

T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]
colors = {"low": "crimson", "high": "steelblue"}
linestyles = {"low": "-", "high": "--"}

# organize data for plotting
info['sfreq'] = len(freqs) / freqs[-1]
evokeds = {
    "low": EvokedArray(psds_low.mean(0), info, tmin=freqs[0]),
    "high": EvokedArray(psds_high.mean(0), info, tmin=freqs[0]),
print("Computing connectivity.")
connectivity = spatial_tris_connectivity(grade_to_tris(5))

#    Note that X needs to be a list of multi-dimensional array of shape
#    samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]

#    Now let's actually do the clustering. This can take a long time...
#    Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1.0 - p_threshold / 2.0, n_subjects1 - 1, n_subjects2 - 1)
print("Clustering.")
T_obs, clusters, cluster_p_values, H0 = clu = spatio_temporal_cluster_test(
    X, connectivity=connectivity, n_jobs=2, threshold=f_threshold
)
#    Now select the clusters that are sig. at p < 0.05 (note that this value
#    is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]

###############################################################################
# Visualize the clusters

print("Visualizing clusters.")

#    Now let's build a convenient representation of each cluster, where each
#    cluster becomes a "time point" in the SourceEstimate
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep, vertices=fsave_vertices, subject="fsaverage")
Пример #23
0
def SensorStatsPlot(condcomb, ListSubj, colors):

    #ListSubj = ('sd130343','cb130477' , 'rb130313', 'jm100109',
    #             'sb120316', 'tk130502','lm130479' , 'ms130534', 'ma100253', 'sl130503',
    #             'mb140004','mp140019' , 'dm130250', 'hr130504', 'wl130316', 'rl130571')

    #ListSubj = ('sd130343','cb130477' , 'rb130313', 'jm100109',
    #             'tk130502','lm130479' , 'ms130534', 'ma100253', 'sl130503',
    #            'mb140004','mp140019' , 'dm130250', 'hr130504', 'rl130571')

    #condcomb = ('QtPast' ,'QtPre','QtFut' )
    #condcomb = ('QsWest' ,'QsPar','QsEast')

    #ipython --pylab
    import mne
    import numpy as np
    import matplotlib.pyplot as plt
    from mpl_toolkits.axes_grid1 import make_axes_locatable
    from mne.viz import plot_topomap
    from mne.stats import spatio_temporal_cluster_test
    from mne.datasets import sample
    from mne.channels import read_ch_connectivity
    from scipy import stats as stats
    from mne.viz import plot_topo
    import os
    os.chdir('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/SCRIPTS/MNE_PYTHON')
    os.environ['MNE_ROOT'] = '/neurospin/local/mne'
    wdir = "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/"

    # load FieldTrip neighbor definition to setup sensor connectivity
    neighbor_file_mag = '/neurospin/local/fieldtrip/template/neighbours/neuromag306mag_neighb.mat'  # mag
    neighbor_file_grad = '/neurospin/local/fieldtrip/template/neighbours/neuromag306planar_neighb.mat'  # grad
    neighbor_file_eeg = '/neurospin/local/fieldtrip/template/neighbours/easycap64ch-avg_neighb.mat'  # eeg
    connectivity, ch_names = mne.channels.read_ch_connectivity(
        neighbor_file_eeg, picks=range(60))
    connectivity_mag, ch_names_mag = read_ch_connectivity(neighbor_file_mag)
    connectivity_grad, ch_names_grad = read_ch_connectivity(neighbor_file_grad)
    connectivity_eeg, ch_names_eeg = read_ch_connectivity(neighbor_file_eeg)

    # evoked 0 to get the size of the matrix
    fname0 = (wdir + ListSubj[0] + "/mne_python/MEEG_" + condcomb[0] + "_" +
              ListSubj[0] + "-ave.fif")
    evoked0 = mne.read_evokeds(fname0, condition=0, baseline=(-0.2, 0))
    sensordatamat_meg_mag = np.empty(
        [len(condcomb),
         len(ListSubj), 102, evoked0.data.shape[1]])
    sensordatamat_meg_grad = np.empty(
        [len(condcomb),
         len(ListSubj), 204, evoked0.data.shape[1]])
    sensordatamat_meg_eeg = np.empty(
        [len(condcomb),
         len(ListSubj), 60, evoked0.data.shape[1]])

    # define statistical threshold
    p_threshold = 0.05
    t_threshold = -stats.distributions.t.ppf(p_threshold / 2.,
                                             len(ListSubj) - 1)

    # compute grand averages
    GDAVGmag, GDAVGgrad, GDAVGeeg = [], [], []
    sensordatamat_meg_mag = np.empty(
        (len(condcomb), len(ListSubj), 102, len(evoked0.times)))
    sensordatamat_meg_grad = np.empty(
        (len(condcomb), len(ListSubj), 204, len(evoked0.times)))
    #sensordatamat_eeg       = np.empty((len(condcomb),len(ListSubj),60 ,len(evoked0.times)))

    for c in range(len(condcomb)):

        evoked2plotmag, evoked2plotgrad, evoked2ploteeg = [], [], []
        for i in range(len(ListSubj)):

            fname_ave_meg = (wdir + ListSubj[i] + "/mne_python/MEEG_" +
                             condcomb[c] + "_" + ListSubj[i] + "-ave.fif")

            tmp_evoked_meg = mne.read_evokeds(fname_ave_meg,
                                              condition=0,
                                              baseline=(-0.2, 0))
            evoked2plotmag.append(tmp_evoked_meg.pick_types('mag'))
            sensordatamat_meg_mag[c, i, ::, ::] = tmp_evoked_meg.data

            tmp_evoked_meg = mne.read_evokeds(fname_ave_meg,
                                              condition=0,
                                              baseline=(-0.2, 0))
            evoked2plotgrad.append(tmp_evoked_meg.pick_types('grad'))
            sensordatamat_meg_grad[c, i, ::, ::] = tmp_evoked_meg.data

            #tmp_evoked_meg  = mne.read_evokeds(fname_ave_meg,   condition=0, baseline=(-0.2, 0))
            #evoked2ploteeg.append(tmp_evoked_meg.pick_types('eeg'))
            #sensordatamat_eeg[c,i,::,::]  = tmp_evoked_meg.data

        GDAVGmag.append(mne.grand_average(evoked2plotmag))
        GDAVGgrad.append(mne.grand_average(evoked2plotgrad))
        #GDAVGeeg.append(mne.grand_average(evoked2ploteeg))

    # plot topomaps of grand_averages
    plot_topo(GDAVGmag, color=colors)
    plt.savefig(
        "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/decoding_context_yousra/"
        + "_".join([str(cond) for cond in condcomb]) + "_GDAVG_mags")

    plot_topo(GDAVGgrad, color=colors)
    plt.savefig(
        "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/decoding_context_yousra/"
        + "_".join([str(cond) for cond in condcomb]) + "_GDAVG_grads")

    times = np.arange(-0.1, 0.9, 0.05)
    for c in range(len(condcomb)):

        GDAVGmag[c].plot_topomap(times,
                                 ch_type='mag',
                                 vmin=-40,
                                 vmax=40,
                                 average=0.05)
        plt.savefig(
            "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/decoding_context_yousra/"
            + str(condcomb[c]) + "_GDAVG_mags")

        GDAVGgrad[c].plot_topomap(times,
                                  ch_type='grad',
                                  vmin=-10,
                                  vmax=10,
                                  average=0.05)
        plt.savefig(
            "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/decoding_context_yousra/"
            + str(condcomb[c]) + "_GDAVG_grads")

    allcond_meg_mag = [
        np.transpose(x, (0, 2, 1)) for x in sensordatamat_meg_mag
    ]
    allcond_meg_grad = [
        np.transpose(x, (0, 2, 1)) for x in sensordatamat_meg_grad
    ]

    ###############################################################################

    t_threshold = -stats.distributions.t.ppf(0 / 2, len(ListSubj) - 1)
    T_obs, clusters, cluster_p_values, HO = spatio_temporal_cluster_test(
        allcond_meg_mag[0::1],
        n_permutations=1024,
        threshold=t_threshold,
        tail=0,
        n_jobs=4,
        connectivity=connectivity_mag)

    t_threshold = -stats.distributions.t.ppf(0 / 2, len(ListSubj) - 1)
    T_obs, clusters, cluster_p_values, HO = spatio_temporal_cluster_test(
        allcond_meg_grad,
        n_permutations=1024,
        threshold=t_threshold,
        tail=0,
        n_jobs=4,
        connectivity=connectivity_grad)
Пример #24
0
data_invol = np.rollaxis(data_invol, 2, 1)

neighbor_file = '/home/mje/Toolboxes/fieldtrip/template/neighbours' \
                 '/biosemi64_neighb'
connectivity, ft_ch_names = mne.channels.read_ch_connectivity(neighbor_file,
                                                              picks=picks)

threshold = None
n_permutations = 2000
tail = 0

T_obs, clusters, cluster_p_values, H0 =\
    spatio_temporal_cluster_test([data_vol, data_invol],
                                 n_permutations=n_permutations,
                                 threshold=threshold,
                                 tail=tail,
                                 out_type="mask",
                                 connectivity=connectivity,
                                 n_jobs=2)

# PLOT
# Make evoked difference between the two conditions.
diff_wave = epochs["voluntary"].average() -\
                  epochs["involuntary"].average()
diff_wave.crop(test_times[0], test_times[-1])

min_cluster_index = cluster_p_values.argmin()
mask = np.squeeze(clusters[min_cluster_index][:, np.newaxis].T)
plot_times = np.arange(-0.5, 0.5, 0.1)

diff_wave.plot_topomap(times=plot_times, ch_type='eeg',
Пример #25
0
                y_df.query(
                    'metric_type=="{}" & subj_id=="{}" & channel=="{}"'.format(
                        metric_type, s, ch))['env'].values for ch in CHANNELS
            ] for s in y_df.query('fb_type=="FBMock"')['subj_id'].unique()]),
            2, 1)

        from mne.stats import spatio_temporal_cluster_test

        from mne import create_info
        from mne.channels import read_montage, find_ch_connectivity

        cnk = find_ch_connectivity(
            create_info(CHANNELS, 250, 'eeg', read_montage('standard_1005')),
            'eeg')[0]

        t_obs, clusters, cluster_pv, h0 = spatio_temporal_cluster_test(
            [g1, g2], 138, stat_fun=rankstat, tail=1, connectivity=cnk)
        cluster_pv

        good_cluster_inds = np.where(cluster_pv < 0.05)[0]

        for i_clu, clu_idx in enumerate(good_cluster_inds[:10]):
            # unpack cluster information, get unique indices
            time_inds, space_inds = np.squeeze(clusters[clu_idx])
            ch_inds = np.unique(space_inds)
            time_inds = np.unique(time_inds)

            # get topography for F stat
            f_map = t_obs[time_inds, ...].mean(axis=0)

            # get signals at the sensors contributing to the cluster
            sig_times = np.arange(30)[time_inds]
Пример #26
0
                             '%s_trial_start-epo.fif' % subject)

    epochs.pick_types(meg="mag")

    X_ctl_left[j, :, :] = epochs["ctl/left"].average().data.T
    X_ctl_right[j, :, :] = epochs["ctl/right"].average().data.T
    X_ent_left[j, :, :] = epochs["ent/left"].average().data.T
    X_ent_right[j, :, :] = epochs["ent/right"].average().data.T


X = [X_ctl_left, X_ctl_right, X_ent_left, X_ent_right]

###############################################################################
# load FieldTrip neighbor definition to setup sensor connectivity
connectivity, ch_names = read_ch_connectivity('neuromag306mag')

# set cluster threshold
# set family-wise p-value
p_accept = 0.05

cluster_stats = spatio_temporal_cluster_test(X, n_permutations=5000,
                                             tail=0,
                                             n_jobs=3,
                                             connectivity=connectivity)

T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]

pickle.dump(cluster_stats, open(result_dir +
                                "/cluster_stats_pow_mag.p", "wb"))
     print('[Main effect of Category]')
     stat_fun = stat_fun_Category
     savedirname = 'MainEffect_Category'
 elif ef == 'B':
     print('[Main effect of SF]')
     stat_fun = stat_fun_SF
     savedirname = 'MainEffect_SF'
 elif ef == 'A:B':
     print('[Category x SF interaction]')
     stat_fun = stat_fun_Interaction
     savedirname = 'Interaction'
 
 # ANOVA with cluster-based permutation test
 startT = time.time()
 scores, _, p_val, H0 = spatio_temporal_cluster_test(Dataset, threshold=thresh, n_permutations=Nperm, tail=tail,
                                                     connectivity=connectivity, n_jobs=njobs, buffer_size=None, 
                                                     spatial_exclude=excludedVerts, stat_fun=stat_fun)
 elapsed_time = (time.time() - startT)/60
 p_val = p_val.reshape(scores.shape)
 
 # save results
 os.chdir(savedir)
 if not os.path.exists('./'+savedirname):
     os.mkdir('./'+savedirname)
 os.chdir('./'+savedirname)
 
 np.save('TFCEscores.npy', scores)
 np.save('Pvalues.npy', p_val)
 np.save('ObservedClusterLevelStats.npy', H0)
 
 print('  -> Finished.')
    return coo_matrix(dists < dist), labels


if __name__ == "__main__":
    X, dfs = load_source_data()
    conf_mask = np.logical_and(dfs.confidence > 0, dfs.confidence < 100)
    X = X[conf_mask, :, :]
    dfs = dfs[conf_mask]
    low_mask = dfs.confidence < 40
    high_mask = dfs.confidence > 60
    X_low = X[low_mask, :, :].transpose(0, 2, 1)
    X_high = X[high_mask, :, :].transpose(0, 2, 1)
    adj, labels = get_label_adjacency()

    cluster_stats = spatio_temporal_cluster_test(
        [X_low, X_high], adjacency=adj, n_permutations=100, threshold=6
    )
    T_obs, clusters, p_values, _ = cluster_stats
    thresh = 0.05
    good_cluster_inds = (p_values < thresh).nonzero()[0]

    times = np.arange(-1, 1.002, 0.002)
    for g in good_cluster_inds:
        print("-" * 80)
        lab_inds = np.unique(clusters[g][1])
        time_inds = np.unique(clusters[g][0])
        for lb in lab_inds:
            print(labels[lb].name)
        print([int(times[t] * 1000) for t in time_inds])
Пример #29
0
data_invol = np.rollaxis(data_invol, 2, 1)

neighbor_file = '/home/mje/Toolboxes/fieldtrip/template/neighbours' \
                 '/biosemi64_neighb'
connectivity, ft_ch_names = mne.channels.read_ch_connectivity(neighbor_file,
                                                              picks=picks)

threshold = None
n_permutations = 2000
tail = 0

T_obs, clusters, cluster_p_values, H0 =\
    spatio_temporal_cluster_test([data_vol, data_invol],
                                 n_permutations=n_permutations,
                                 threshold=threshold,
                                 tail=tail,
                                 out_type="mask",
                                 connectivity=connectivity,
                                 n_jobs=2)

# PLOT
# Make evoked difference between the two conditions.
diff_wave = epochs["voluntary"].average() -\
                  epochs["involuntary"].average()
diff_wave.crop(test_times[0], test_times[-1])

min_cluster_index = cluster_p_values.argmin()
mask = np.squeeze(clusters[min_cluster_index][:, np.newaxis].T)
plot_times = np.arange(-0.5, 0.5, 0.1)

diff_wave.plot_topomap(times=plot_times,
Пример #30
0
# Calculate adjacency matrix between sensors from their locations
adjacency, _ = find_ch_adjacency(epochs.info, "eeg")

# Extract data: transpose because the cluster test requires channels to be last
# In this case, inference is done over items. In the same manner, we could
# also conduct the test over, e.g., subjects.
X = [
    long_words.get_data().transpose(0, 2, 1),
    short_words.get_data().transpose(0, 2, 1)
]
tfce = dict(start=.4, step=.4)  # ideally start and step would be smaller

# Calculate statistical thresholds
t_obs, clusters, cluster_pv, h0 = spatio_temporal_cluster_test(
    X, tfce, adjacency=adjacency,
    n_permutations=100)  # a more standard number would be 1000+
significant_points = cluster_pv.reshape(t_obs.shape).T < .05
print(str(significant_points.sum()) + " points selected by TFCE ...")

##############################################################################
# The results of these mass univariate analyses can be visualised by plotting
# :class:`mne.Evoked` objects as images (via :class:`mne.Evoked.plot_image`)
# and masking points for significance.
# Here, we group channels by Regions of Interest to facilitate localising
# effects on the head.

# We need an evoked object to plot the image to be masked
evoked = mne.combine_evoked(
    [long_words.average(), short_words.average()],
    weights=[1, -1])  # calculate difference wave
Пример #31
0
from scipy import stats as stats
from mne import spatial_tris_connectivity, grade_to_tris
from mne.stats import spatio_temporal_cluster_test, summarize_clusters_stc
f_threshold = 0.01
st_max, st_min = 0.3, 0.  # time period of stimulus
res_max, res_min = 0.1, -0.2  # time period of response
subjects_dir = '/home/uais_common/dong/freesurfer/subjects/'
stcs_path = subjects_dir + '/fsaverage/conf_stc/'
st_list = ['LLst', 'RRst', 'RLst', 'LRst']  # stimulus events
res_list = ['LLrt', 'RRrt', 'LRrt', 'RLrt']
do_arange = True
if do_arange:
    tstep, X = ara_trivsres(st_list, res_list, st_min, st_max, res_min,
                            res_max, stcs_path, subjects_dir)
else:
    res_mat = np.load(stcs_path + 'res.npz')
    tri_mat = np.load(stcs_path + 'tri.npz')
    X = [tri_mat['tri'], res_mat['res']]
    tstep = tri_mat['tstep']

fsave_vertices = [np.arange(10242), np.arange(10242)]
connectivity = spatial_tris_connectivity(grade_to_tris(5))
T_obs, clusters, cluster_p_values, H0 = clu = \
        spatio_temporal_cluster_test(X, n_permutations=8192/2, #step_down_p=0.001,
                                     connectivity=connectivity, n_jobs=1,
                                     # threshold=t_threshold, stat_fun=stats.ttest_ind)
                                     threshold=f_threshold)
np.savez(stcs_path + 'trivsres.npz',
         clu=clu,
         tstep=tstep,
         fsave_vertices=fsave_vertices)
Пример #32
0
# correcting for multiple tests.
# MNE offers various methods for this; amongst them, cluster-based permutation
# methods allow deriving power from the spatio-temoral correlation structure
# of the data. Here, we use TFCE.

# Calculate statistical thresholds
con = find_ch_connectivity(epochs.info, "eeg")

# Extract data: transpose because the cluster test requires channels to be last
# In this case, inference is done over items. In the same manner, we could
# also conduct the test over, e.g., subjects.
X = [long.get_data().transpose(0, 2, 1),
     short.get_data().transpose(0, 2, 1)]
tfce = dict(start=.2, step=.2)

t_obs, clusters, cluster_pv, h0 = spatio_temporal_cluster_test(
    X, tfce, n_permutations=100)
significant_points = cluster_pv.reshape(t_obs.shape).T < .05
print(str(significant_points.sum()) + " points selected by TFCE ...")

##############################################################################
# The results of these mass univariate analyses can be visualised by plotting
# :class:`mne.Evoked` objects as images (via :class:`mne.Evoked.plot_image`)
# and masking points for significance.
# Here, we group channels by Regions of Interest to facilitate localising
# effects on the head.

# We need an evoked object to plot the image to be masked
evoked = mne.combine_evoked([long.average(), -short.average()],
                            weights='equal')  # calculate difference wave
time_unit = dict(time_unit="s")
evoked.plot_joint(title="Long vs. short words", ts_args=time_unit,
Пример #33
0
# correcting for multiple tests.
# MNE offers various methods for this; amongst them, cluster-based permutation
# methods allow deriving power from the spatio-temoral correlation structure
# of the data. Here, we use TFCE.

# Calculate statistical thresholds
con = find_ch_connectivity(epochs.info, "eeg")

# Extract data: transpose because the cluster test requires channels to be last
# In this case, inference is done over items. In the same manner, we could
# also conduct the test over, e.g., subjects.
X = [long_words.get_data().transpose(0, 2, 1),
     short_words.get_data().transpose(0, 2, 1)]
tfce = dict(start=.2, step=.2)

t_obs, clusters, cluster_pv, h0 = spatio_temporal_cluster_test(
    X, tfce, n_permutations=100)  # a more standard number would be 1000+
significant_points = cluster_pv.reshape(t_obs.shape).T < .05
print(str(significant_points.sum()) + " points selected by TFCE ...")

##############################################################################
# The results of these mass univariate analyses can be visualised by plotting
# :class:`mne.Evoked` objects as images (via :class:`mne.Evoked.plot_image`)
# and masking points for significance.
# Here, we group channels by Regions of Interest to facilitate localising
# effects on the head.

# We need an evoked object to plot the image to be masked
evoked = mne.combine_evoked([long_words.average(), -short_words.average()],
                            weights='equal')  # calculate difference wave
time_unit = dict(time_unit="s")
evoked.plot_joint(title="Long vs. short words", ts_args=time_unit,
Пример #34
0
# as we only have one hemisphere we need only need half the adjacency
print('Computing adjacency.')
adjacency = mne.spatial_src_adjacency(src[:1])

# Now let's actually do the clustering. Please relax, on a small
# notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.005
f_thresh = f_threshold_mway_rm(n_subjects, factor_levels, effects, pthresh)

# To speed things up a bit we will ...
n_permutations = 50  # ... run way fewer permutations (reduces sensitivity)

print('Clustering.')
F_obs, clusters, cluster_p_values, H0 = clu = \
    spatio_temporal_cluster_test(X, adjacency=adjacency, n_jobs=None,
                                 threshold=f_thresh, stat_fun=stat_fun,
                                 n_permutations=n_permutations,
                                 buffer_size=None)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]

# %%
# Visualize the clusters
# ----------------------

print('Visualizing clusters.')

#    Now let's build a convenient representation of each cluster, where each
#    cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
                                             vertices=fsave_vertices,
Пример #35
0
def run_sensor_stats():
    for c in np.arange(len(config.stats_params)):
       
        # organise data and analysis parameters
        dat0_files = config.stats_params[c]['dat0_files']
        dat1_files = config.stats_params[c]['dat1_files']
        condnames = config.stats_params[c]['condnames']
        tmin, tmax = config.stats_params[c]['statwin']
        n_permutations = config.stats_params[c]['n_permutations']
        p_threshold = config.stats_params[c]['threshold']
        tail = config.stats_params[c]['tail']
        if tail == 0:
            p_threshold = p_threshold / 2
            tail_x = 1
        else:
            tail_x = tail

        if 'multi-subject' in config.stats_params[c] and config.stats_params[c]['multi-subject'] == True:
            # we will run the same analysis on each subject separately
            nruns = len(dat0_files)
            ismulti = True      
        else:
            nruns = 1     
            ismulti = False     

        results = [] # to store the results later

        for statrun in np.arange(nruns):                      
            
            if ismulti:
                # we will run the same analysis on each subject separately
                dat0, evokeds0, connectivity = collect_data([dat0_files[statrun]],condnames[0],tmin,tmax,ismulti)
                dat1, evokeds1, _ = collect_data([dat1_files[statrun]],condnames[1],tmin,tmax,ismulti)    
            else:
                # collect together the data to be compared
                dat0, evokeds0, connectivity = collect_data(dat0_files,condnames[0],tmin,tmax,ismulti)
                dat1, evokeds1, _ = collect_data(dat1_files,condnames[1],tmin,tmax,ismulti)        
                
            alldata = [] 

            # fix threshold to be one-sided if requested
            if type(p_threshold) != 'dict': # i.e. is NOT TFCE
                if config.stats_params[c]['stat'] == 'indep':
                    stat_fun = ttest_ind_no_p
                    if len(dat0_files) == 1: # ie is single subject stats
                        df = dat0.data.shape[0] - 1 + dat1.data.shape[0] - 1                        
                    else:
                        df = len(dat0_files) - 1 + len(dat1_files) - 1                                                            
                else: # ie is dependent data, and so is one-sample t test
                    # this will only ever be group data...
                    # If the length of dat0_files and dat1_files are different it'll crash later anyway
                    stat_fun = ttest_1samp_no_p
                    df = len(dat0_files) - 1
                threshold_stat = stats.distributions.t.ppf(1. - p_threshold, df) * tail_x
            else: # i.e. is TFCE
                threshold_stat = p_threshold      
        
            # run the stats
            if config.stats_params[c]['stat'] == 'indep':
                alldata = [dat0,dat1]
                cluster_stats = spatio_temporal_cluster_test(alldata, n_permutations=n_permutations,
                                                        threshold=threshold_stat, 
                                                        tail=tail, stat_fun=stat_fun,
                                                        n_jobs=1, buffer_size=None,
                                                        connectivity=connectivity)
            elif config.stats_params[c]['stat'] == 'dep':
                # we have to use 1-sample t-tests here so also need to subtract conditions
                alldata = dat0 - dat1
                cluster_stats = spatio_temporal_cluster_1samp_test(alldata, n_permutations=n_permutations,
                                                        threshold=threshold_stat, 
                                                        tail=tail, stat_fun=stat_fun,
                                                        n_jobs=1, buffer_size=None,
                                                        connectivity=connectivity)

            # extract stats of interest
            T_obs, clusters, p_values, _ = cluster_stats
            good_cluster_inds = np.where(p_values < config.stats_params[c]['p_accept'])[0]

            # tell the user the results
            print('There are {} significant clusters'.format(good_cluster_inds.size))
            if good_cluster_inds.size != 0:
                print('p-values: {}'.format(p_values[good_cluster_inds]))
            else:
                if p_values.any():
                    print('Minimum p-value: {}'.format(np.min(p_values)))
                else:
                    print('No clusters found')

            # some final averaging and tidying
            if len(evokeds0) == 1:
                dat0_avg = evokeds0[0].average()
                dat1_avg = evokeds1[0].average()
            else:
                dat0_avg = mne.grand_average(evokeds0)
                dat1_avg = mne.grand_average(evokeds1)
            diffcond_avg = mne.combine_evoked([dat0_avg, -dat1_avg], 'equal')
            
            # get sensor positions via layout
            pos = mne.find_layout(evokeds0[0].info).pos

            ## EVENTUALLY I WILL PUT THE PLOTTING IN A SEPARATE FUNCTION...
            do_plot = False
            
            if do_plot:
                # loop over clusters
                for i_clu, clu_idx in enumerate(good_cluster_inds):
                    # unpack cluster information, get unique indices
                    time_inds, space_inds = np.squeeze(clusters[clu_idx])
                    ch_inds = np.unique(space_inds)
                    time_inds = np.unique(time_inds)   

                    # get topography for F stat
                    f_map = T_obs[time_inds, ...].mean(axis=0)

                    # get topography of difference
                    time_shift = evokeds0[0].time_as_index(tmin)      # fix windowing shift
                    print('time_shift = {}'.format(time_shift))
                    sig_times_idx = time_inds + time_shift
                    diff_topo = np.mean(diffcond_avg.data[:,sig_times_idx],axis=1)
                    sig_times = evokeds0[0].times[sig_times_idx]
                    
                    # create spatial mask
                    mask = np.zeros((f_map.shape[0], 1), dtype=bool)
                    mask[ch_inds, :] = True

                    # initialize figure
                    fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))

                    # plot average difference and mark significant sensors
                    image, _ = plot_topomap(diff_topo, pos, mask=mask, axes=ax_topo, cmap='RdBu_r',
                                            vmin=np.min, vmax=np.max, show=False)

                    # create additional axes (for ERF and colorbar)
                    divider = make_axes_locatable(ax_topo)

                    # add axes for colorbar
                    ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
                    plt.colorbar(image, cax=ax_colorbar)
                    ax_topo.set_xlabel(
                        'Mean difference ({:0.3f} - {:0.3f} s)'.format(*sig_times[[0, -1]]))

                    # add new axis for time courses and plot time courses
                    ax_signals = divider.append_axes('right', size='300%', pad=1.2)
                    title = 'Cluster #{0}, {1} sensor'.format(i_clu + 1, len(ch_inds))
                    if len(ch_inds) > 1:
                        title += "s (mean)"
                    plot_compare_evokeds([dat0_avg, dat1_avg], title=title, picks=ch_inds, axes=ax_signals,
                                            colors=None, show=False,
                                            split_legend=False, truncate_yaxis='max_ticks')

                    # plot temporal cluster extent
                    ymin, ymax = ax_signals.get_ylim()
                    ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1],
                                                color='orange', alpha=0.3)

                    # clean up viz
                    mne.viz.tight_layout(fig=fig)
                    fig.subplots_adjust(bottom=.05)
                    plt.show()   

            results.append({                    
                'cluster_stats': cluster_stats,
                'good_cluster_inds': good_cluster_inds,
                'alldata': alldata,
                'evokeds0': evokeds0,
                'evokeds1': evokeds1
            })

        # save
        save_name = op.join(config.stat_path, config.stats_params[c]['analysis_name'] + '.dat')        
        pickle_out = open(save_name,'wb')
        pickle.dump(results, pickle_out)
        pickle_out.close()
connectivity = spatial_tris_connectivity(grade_to_tris(5))

#    Note that X needs to be a list of multi-dimensional array of shape
#    samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]

#    Now let's actually do the clustering. This can take a long time...
#    Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
                                        n_subjects1 - 1, n_subjects2 - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu =\
    spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=2,
                                 threshold=f_threshold)
#    Now select the clusters that are sig. at p < 0.05 (note that this value
#    is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]

###############################################################################
# Visualize the clusters

print('Visualizing clusters.')

#    Now let's build a convenient representation of each cluster, where each
#    cluster becomes a "time point" in the SourceEstimate
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
                                             vertno=fsave_vertices,
                                             subject='fsaverage')
adjacency = spatial_src_adjacency(src)

#    Note that X needs to be a list of multi-dimensional array of shape
#    samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]

#    Now let's actually do the clustering. This can take a long time...
#    Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
                                        n_subjects1 - 1, n_subjects2 - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu =\
    spatio_temporal_cluster_test(X, adjacency=adjacency, n_jobs=1,
                                 threshold=f_threshold, buffer_size=None)
#    Now select the clusters that are sig. at p < 0.05 (note that this value
#    is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]

###############################################################################
# Visualize the clusters
# ----------------------

print('Visualizing clusters.')

#    Now let's build a convenient representation of each cluster, where each
#    cluster becomes a "time point" in the SourceEstimate
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
                                             vertices=fsave_vertices,
lh_source_space = source_space[source_space[:, 0] < 10242]
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(lh_source_space)

#    Now let's actually do the clustering. Please relax, on a small
#    notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.0005
f_thresh = f_threshold_twoway_rm(n_subjects, factor_levels, effects, pthresh)

#    To speed things up a bit we will ...
n_permutations = 128  # ... run fewer permutations (reduces sensitivity)

print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
    spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,
                                 threshold=f_thresh, stat_fun=stat_fun,
                                 n_permutations=n_permutations,
                                 buffer_size=None)
#    Now select the clusters that are sig. at p < 0.05 (note that this value
#    is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]

###############################################################################
# Visualize the clusters

print('Visualizing clusters.')

#    Now let's build a convenient representation of each cluster, where each
#    cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
                                             vertices=fsave_vertices,
                                             subject='fsaverage')
Пример #39
0
def erp_analysis(subjects):
    from mne.stats import spatio_temporal_cluster_test
    from mne.channels import read_ch_connectivity
    from mpl_toolkits.axes_grid1 import make_axes_locatable
    from scipy import stats as stats

    all_evo = {'off_sup_lon': list(), 'off_sup_sho': list()}
    all_log = list()
    condition_names = ['S2 Longer', 'S2 Shorter']
    n_subjects = len(subjects)

    # Load
    for subj in subjects:
        epochs, log = load_subj(subj)
        epochs = epochs[['off_sup_lon', 'off_sup_sho']]
        log = log.loc[(log.Condition == 2.0) & (log.Ratio != 1.0)]

        corr_log = list()
        for k in all_evo.keys():
            c = marks_off[k]
            sub_log = log[log.condition == c]
            sub_log['epo_ix'] = np.arange(len(sub_log))
            # corr_ix = sub_log['epo_ix'].loc[(sub_log.condition == c) & (sub_log.Accuracy == 1.0)].values
            # sub_log = sub_log.loc[(sub_log.condition == c) & (sub_log.Accuracy == 1.0)]
            corr_ix = sub_log['epo_ix']
            all_evo[k].append(epochs[k][corr_ix].average())
            corr_log.append(sub_log)
            print(k, c, len(corr_ix))

        all_log.append(pd.concat(corr_log))

    all_log = pd.concat(all_log)
    all_log.groupby('condition')[['condition'
                                  ]].agg(np.count_nonzero).plot(kind='bar')

    # Plot
    evoked = {
        k: mne.combine_evoked(all_evo[k], weights='nave')
        for k in all_evo.keys()
    }
    mne.viz.plot_evoked_topo([evoked[ev] for ev in evoked.keys()])

    # Stats
    connectivity, ch_names = read_ch_connectivity(
        '/Users/lpen/Documents/MATLAB/Toolbox/fieldtrip-20170628/template/neighbours/biosemi128_neighb.mat'
    )

    #threshold = {'start': 5, 'step': 0.5}
    threshold = None
    p_threshold = 0.001
    t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)

    x = {
        k: np.array([all_evo[k][ix_s].data for ix_s in range(len(subjects))])
        for k in sorted(all_evo.keys())
    }
    x = [np.transpose(x[k], (0, 2, 1)) for k in sorted(x.keys())]

    t_obs, clusters, p_values, _ = spatio_temporal_cluster_test(
        x,
        n_permutations=1000,
        threshold=t_threshold,
        tail=0,
        n_jobs=2,
        connectivity=connectivity,
    )

    p_val = 0.01
    good_cluster_inds = np.where(p_values < p_val)[0]
    print(good_cluster_inds)
    print(len(good_cluster_inds))

    # configure variables for visualization
    times = evoked['off_sup_lon'].times * 1e3
    colors = 'r', 'b',
    linestyles = '-', '-',

    # grand average as numpy arrray
    grand_ave = np.array(x).mean(axis=1)

    # get sensor positions via layout
    pos = mne.find_layout(evoked['off_sup_lon'].info).pos

    # loop over significant clusters
    for i_clu, clu_idx in enumerate(good_cluster_inds):
        # unpack cluster infomation, get unique indices
        time_inds, space_inds = np.squeeze(clusters[clu_idx])
        ch_inds = np.unique(space_inds)
        time_inds = np.unique(time_inds)

        # get topography for F stat
        f_map = t_obs[time_inds, ...].mean(axis=0)

        # get signals at significant sensors
        signals = grand_ave[..., ch_inds].mean(axis=-1)
        sig_times = times[time_inds]

        # create spatial mask
        mask = np.zeros((f_map.shape[0], 1), dtype=bool)
        mask[ch_inds, :] = True

        # initialize figure
        fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))
        title = 'Cluster #{0}'.format(i_clu + 1)
        fig.suptitle(title, fontsize=14)

        # plot average test statistic and mark significant sensors
        image, _ = mne.viz.plot_topomap(f_map,
                                        pos,
                                        mask=mask,
                                        axes=ax_topo,
                                        cmap='magma',
                                        vmin=np.min,
                                        vmax=np.max)

        # advanced matplotlib for showing image with figure and colorbar
        # in one plot
        divider = make_axes_locatable(ax_topo)

        # add axes for colorbar
        ax_colorbar = divider.append_axes('right', size='5%', pad=0.05)
        plt.colorbar(image, cax=ax_colorbar)
        ax_topo.set_xlabel('Averaged F-map ({:0.1f} - {:0.1f} ms)'.format(
            *sig_times[[0, -1]]))

        # add new axis for time courses and plot time courses
        ax_signals = divider.append_axes('right', size='300%', pad=1.5)
        for signal, name, col, ls in zip(signals, condition_names, colors,
                                         linestyles):
            ax_signals.plot(times, signal, color=col, linestyle=ls, label=name)

        # add information
        ax_signals.axvline(0, color='k', linestyle=':', label='stimulus onset')
        ax_signals.set_xlim([times[0], times[-1]])
        ax_signals.set_ylim([-10e-7, 20e-7])
        ax_signals.set_xlabel('time [ms]')
        ax_signals.set_ylabel('Amplitude')
        ax_signals.hlines(0, xmin=times[0], xmax=times[-1], linestyles='--')

        # plot significant time range
        ymin, ymax = ax_signals.get_ylim()
        ax_signals.fill_betweenx((ymin, ymax),
                                 sig_times[0],
                                 sig_times[-1],
                                 color='orange',
                                 alpha=0.3)
        ax_signals.legend(loc='lower right')
        ax_signals.set_ylim(ymin, ymax)

        # clean up viz
        mne.viz.tight_layout(fig=fig)
        fig.subplots_adjust(bottom=.05)
        plt.show()
        fig.savefig(op.join(study_path, 'figures',
                            'ERP_off_ckust_{}.eps'.format(i_clu)),
                    format='eps',
                    dpi=300)

    # Cluster Amplitude
    # t_mask = np.arange(len(times))[(times > 300) & (times < 400)]
    # sig_amp = {k: np.array([x[ix_c][ix_s, t_mask, :][:, ch_inds].mean() for ix_s, s in enumerate(subjects)]) for ix_c, k in enumerate(['lon', 'sho'])}
    sig_amp = {
        k: np.array([
            x[ix_c][ix_s, time_inds, :][:, ch_inds].mean()
            for ix_s, s in enumerate(subjects)
        ])
        for ix_c, k in enumerate(['lon', 'sho'])
    }

    subj_cond = all_log.groupby('subject')[['RT', 'Accuracy']].agg(np.mean)
    subj_cond['acc_lon'] = all_log[all_log.condition == 90].groupby(
        'subject')[['Accuracy']].agg(np.mean)
    subj_cond['acc_sho'] = all_log[all_log.condition == 70].groupby(
        'subject')[['Accuracy']].agg(np.mean)
    subj_cond['amp_lon'] = sig_amp['lon']
    subj_cond['amp_sho'] = sig_amp['sho']
    subj_cond['amp_dif'] = subj_cond['amp_sho'] - subj_cond['amp_lon']

    subj_cond.corr(method='pearson')

    from seaborn import regplot
    from eeg_etg_fxs import permutation_pearson

    r_sho, p_sho = permutation_pearson(subj_cond['amp_dif'].values,
                                       subj_cond['acc_sho'].values, 10000)
    r_lon, p_lon = permutation_pearson(subj_cond['amp_dif'].values,
                                       subj_cond['acc_lon'].values, 10000)

    plt.style.use('ggplot')
    fig, axes = plt.subplots(1, 2, sharey=True, sharex=True)
    for ix, (r, p, c) in enumerate(
            zip([r_lon, r_sho], [p_lon, p_sho], ['acc_lon', 'acc_sho'])):
        regplot(subj_cond['amp_dif'], subj_cond[c], ci=None, ax=axes[ix])
        axes[ix].set_title('r = %0.3f   p = %0.3f' % (r, p))
    fig.savefig(op.join(study_path, 'figures', 'ERP_diff_acc.eps'),
                format='eps',
                dpi=300)

    mne.viz.plot_compare_evokeds([evoked[ev] for ev in evoked.keys()], picks=2)
    plt.savefig(op.join(study_path, 'figures', 'ERP_diff_A4.eps'),
                format='eps',
                dpi=300)