Example #1
0
def run_cluster_permutation_test_1samp(data,
                                       ch_type='eeg',
                                       nperm=2**12,
                                       threshold=None,
                                       n_jobs=6,
                                       tail=0):
    # If threshold is None, it will choose a t-threshold equivalent to p < 0.05 for the given number of observations
    # (only valid when using an t-statistic).
    data_tmp = data.copy()

    # compute connectivity
    if mne.__version__ == '0.22.0' or mne.__version__ == '0.23.0':
        adjacency = mne.channels.find_ch_adjacency(data_tmp.info,
                                                   ch_type=ch_type)[0]
    else:
        connectivity = mne.channels.find_ch_connectivity(data_tmp.info,
                                                         ch_type=ch_type)[0]

    # subset of the data, as array
    if ch_type == 'eeg':
        data_tmp.pick_types(meg=False, eeg=True)
        data_array_chtype = np.array(
            [data_tmp[c].get_data() for c in range(len(data_tmp))])
    else:
        data_tmp.pick_types(meg=ch_type, eeg=False)
        data_array_chtype = np.array(
            [data_tmp[c].get_data() for c in range(len(data_tmp))])
    data_array_chtype = np.transpose(np.squeeze(data_array_chtype),
                                     (0, 2, 1))  # transpose for clustering

    # stat func
    if mne.__version__ == '0.22.0' or mne.__version__ == '0.23.0':
        cluster_stats = permutation_cluster_1samp_test(data_array_chtype,
                                                       threshold=threshold,
                                                       n_jobs=n_jobs,
                                                       verbose=True,
                                                       tail=tail,
                                                       n_permutations=nperm,
                                                       adjacency=adjacency,
                                                       out_type='indices')
    else:
        cluster_stats = permutation_cluster_1samp_test(
            data_array_chtype,
            threshold=threshold,
            n_jobs=n_jobs,
            verbose=True,
            tail=tail,
            n_permutations=nperm,
            connectivity=connectivity,
            out_type='indices')
    return cluster_stats, data_array_chtype, ch_type
Example #2
0
def test_permutation_t_test():
    """Test T-test based on permutations."""
    # 1 sample t-test
    np.random.seed(10)
    n_samples, n_tests = 30, 5
    X = np.random.randn(n_samples, n_tests)
    X[:, :2] += 1

    t_obs, p_values, H0 = permutation_t_test(X,
                                             n_permutations=999,
                                             tail=0,
                                             seed=0)
    assert (p_values > 0).all()
    assert len(H0) == 999
    is_significant = p_values < 0.05
    assert_array_equal(is_significant, [True, True, False, False, False])

    t_obs, p_values, H0 = permutation_t_test(X,
                                             n_permutations=999,
                                             tail=1,
                                             seed=0)
    assert (p_values > 0).all()
    assert len(H0) == 999
    is_significant = p_values < 0.05
    assert_array_equal(is_significant, [True, True, False, False, False])

    t_obs, p_values, H0 = permutation_t_test(X,
                                             n_permutations=999,
                                             tail=-1,
                                             seed=0)
    is_significant = p_values < 0.05
    assert_array_equal(is_significant, [False, False, False, False, False])

    X *= -1
    t_obs, p_values, H0 = permutation_t_test(X,
                                             n_permutations=999,
                                             tail=-1,
                                             seed=0)
    assert (p_values > 0).all()
    assert len(H0) == 999
    is_significant = p_values < 0.05
    assert_array_equal(is_significant, [True, True, False, False, False])

    # check equivalence with spatio_temporal_cluster_test
    for connectivity in (sparse.eye(n_tests), False):
        t_obs_clust, _, p_values_clust, _ = permutation_cluster_1samp_test(
            X, n_permutations=999, seed=0, connectivity=connectivity)
        # the cluster tests drop any clusters that don't get thresholded
        keep = p_values < 1
        assert_allclose(t_obs_clust, t_obs)
        assert_allclose(p_values_clust, p_values[keep], atol=1e-2)

    X = np.random.randn(18, 1)
    t_obs, p_values, H0 = permutation_t_test(X, n_permutations='all')
    t_obs_scipy, p_values_scipy = stats.ttest_1samp(X[:, 0], 0)
    assert_allclose(t_obs[0], t_obs_scipy, 8)
    assert_allclose(p_values[0], p_values_scipy, rtol=1e-2)
Example #3
0
def tfr_permutation(data, title, threshold, tail):
    """Plot and test the event-related perturbation.

    Input
    -----

    * data: numpy asarray

    * title: string
        Name of the file used for saving.

    * threshold: float
        Threshold value used to find cluster.

    * tail: int
        -1, 1 for one-tailed tests.

    Output
    -----
    Save matplotlib figure as 'title' .svg

    """
    n_permutations = 5000
    T_obs, clusters, cluster_p_values, H0 = \
        permutation_cluster_1samp_test(data,
                                       n_permutations=n_permutations,
                                       threshold=threshold,
                                       tail=tail)

    # Create new stats image with only significant clusters
    T_obs_plot = np.nan * np.ones_like(T_obs)
    for c, p_val in zip(clusters, cluster_p_values):
        if p_val <= 0.05:
            T_obs_plot[c] = T_obs[c]

    plt.figure(figsize=(8, 4))
    plt.imshow(data.mean(0),
               cmap=plt.cm.get_cmap('RdBu_r', 12),
               vmin=-0.15,
               vmax=0.15,
               extent=[-0.5, 3, 3, 30],
               interpolation='gaussian',
               aspect='auto',
               origin='lower')
    clb = plt.colorbar()
    clb.ax.set_title('% change')
    plt.contour(~np.isnan(T_obs_plot),
                colors=["w"],
                extent=[-0.5, 3, 3, 30],
                linewidths=[2],
                corner_mask=False,
                antialiased=True,
                levels=[.5])
    plt.axvline(x=0, linestyle='--', linewidth=2, color='k')
    plt.ylabel('Frequencies', size=15)
    plt.xlabel('Time (s)', size=15)
    plt.savefig(cwd + '/Figures/' + title + '.svg', dpi=300)
def decod_stats(X):
    from mne.stats import permutation_cluster_1samp_test
    """Statistical test applied across subjects"""
    # check input
    X = np.array(X)

    # stats function report p_value for each cluster
    T_obs_, clusters, p_values, _ = permutation_cluster_1samp_test(
        X, out_type='mask', n_permutations=2**12, n_jobs=6, verbose=False)

    # format p_values to get same dimensionality as X
    p_values_ = np.ones_like(X[0]).T
    for cluster, pval in zip(clusters, p_values):
        p_values_[cluster] = pval

    return np.squeeze(p_values_)
Example #5
0
def inspect_specgram(specgram_avg):
    """Produces time-frequency comparison. 2-D permutation cluster tests are used to compare average (subject) spectrograms between conditions."""
    plt.tick_params(labelsize=18)
    tf_corr = specgram_avg['O1']['correct']
    tf_control = specgram_avg['O1']['control']
    
    wave_corr = tfr['O1']['correct']
    wave_ctrl = tfr['O1']['control']
    
    ee= []
    for corr, ctrl, w_corr, w_ctrl in zip(tf_corr, tf_control, wave_corr, wave_ctrl):

        ee.append((corr - ctrl)[0:30])
        
    ee = np.array(ee)
    T_obs, clusters, cluster_p_values, H0 = permutation_cluster_1samp_test(ee, n_permutations=100)
    
    # Create new stats image with only significant clusters
    T_obs_plot = np.nan * np.ones_like(T_obs)
    for c, p_val in zip(clusters, cluster_p_values):
        print(p_val)
        if p_val <= 0.05:
            print(c)
            T_obs_plot[c] = T_obs[c]
    
    vmax = np.max(np.abs(T_obs))
    vmin = -vmax
    
    fig, axes = plt.subplots(figsize = (20,10))
    
    axes.imshow(T_obs, cmap=plt.cm.gray,
           aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
    
    cbar = axes.imshow(T_obs_plot, cmap=plt.cm.RdBu_r,
           aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
    
    axes.set_ylabel('Frequency (Hz)')
    axes.set_xlabel('Time (ms)')
    #fig.suptitle('Time-Frequency Log Power Difference')
    fig.colorbar(cbar)
    axes.set_xticklabels(np.linspace(0, 5000, 11), fontsize = 18)
    axes.yaxis.set_tick_params(labelsize=18)
    axes.yaxis.set_label_coords(-0.07,0.5)

    fig.savefig('time_frequency.svg')
    fig.savefig('time_frequency.eps')
    fig.savefig('time_frequency.png',papertype='a0')
Example #6
0
def one_sample_nonparam(a,
                        threshold,
                        connectivity=None,
                        nperm=1024,
                        stat_fun=None,
                        buffer_size=None,
                        tail=0,
                        n_jobs=18):
    results = permutation_cluster_1samp_test(a,
                                             n_jobs=n_jobs,
                                             threshold=threshold,
                                             connectivity=connectivity,
                                             n_permutations=nperm,
                                             stat_fun=stat_fun,
                                             buffer_size=buffer_size,
                                             tail=tail)
    return results
Example #7
0
def permutation_correlation(diff_data, info, n_permutations, p_value):
    sensor_adjacency, ch_names = find_ch_adjacency(info, "eeg")
    adjacency = combine_adjacency(sensor_adjacency, diff_data.shape[2],
                                  diff_data.shape[3])

    T_obs, clusters, cluster_p_values, H0 = permutation_cluster_1samp_test(
        diff_data,
        n_permutations=n_permutations,
        threshold=None,
        tail=0,
        adjacency=adjacency,
        out_type="mask",
        verbose=True,
    )

    # Create new stats image with only significant clusters for plotting
    T_obs_plot = np.nan * np.ones_like(T_obs)
    for c, p_val in zip(clusters, cluster_p_values):
        if p_val <= p_value:
            print(f"Significant cluster with p-value {p_val}")
            T_obs_plot[c] = T_obs[c]

    return T_obs_plot, cluster_p_values[cluster_p_values <= p_value]
             format='png')
 ##############################################
 # Permutation testing 1D temporal clustering #
 ##############################################
 # TFCE with "hat" correction #
 ##############################
 fig, axs = plt.subplots(1, len(sensors), sharex=True, sharey=True)
 for ii, key in enumerate(sensors.keys()):
     print('     Permutation testing...\n'
           '      |deviant - standard| in %s hem...' % key)
     stat_fun_hat = partial(ttest_1samp_no_p, sigma=sigma)
     t_tfce_hat, _, p_tfce_hat, H0 = permutation_cluster_1samp_test(
         contrast[ii],
         n_jobs=18,
         threshold=threshold_tfce,
         connectivity=None,
         n_permutations=n_permutations,
         stat_fun=stat_fun_hat,
         buffer_size=None,
         tail=1)
     sig_times = times[np.where(p_tfce_hat < alpha)[0]]
     sig_times = sig_times[np.logical_and(sig_times > .2, sig_times < .55)]
     axs[ii].plot(times,
                  t_tfce_hat,
                  color='k',
                  label='$\mathrm{|deviant - standard|_{hat,TFCE}}$')
     ymin, ymax = axs[ii].get_ylim()
     axs[ii].scatter(sig_times,
                     np.ones(sig_times.shape) * ymin,
                     s=2,
                     marker='o',
Example #9
0
p_thresh = p_accept / (1 + (tail == 0))
n_samples = len(data)
threshold = -ppf(p_thresh, n_samples - 1)
if np.sign(tail) < 0:
    threshold = -threshold

# Make a triangulation between EEG channels locations to
# use as connectivity for cluster level stat
# XXX : make a mne.channels.make_eeg_connectivity function
connectivity, ch_names = mne.channels.find_ch_connectivity(contrast.info, 'eeg')


data = np.transpose(data, (0, 2, 1))  # transpose for clustering

cluster_stats = permutation_cluster_1samp_test(
    data, threshold=threshold, n_jobs=2, verbose=True, tail=1,
    connectivity=connectivity, out_type='indices',
    check_disjoint=True)

T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]

print("Good clusters: %s" % good_cluster_inds)

##############################################################################
# Visualize the spatio-temporal clusters

times = contrast.times * 1e3
colors = 'r', 'steelblue'
linestyles = '-', '--'

pos = mne.find_layout(contrast.info).pos
#     plt.ylabel('Frequency (Hz)')

#     plt.show()      
#     plt.savefig(C.pictures_path_Source_estimate+ 'two-way_RM_pca'+y_label[e]+'.png')

##############################################################################
### t-test and cluster-based correction for each ROI
tail=0
lb=C.rois_labels
# difference of SD (0:6) and LD(6:12) for aech ROI and individual
Z= X[:,0:6,:,a:-b]-X[:,6:12,:,a:-b]

for k in np.arange(0,len(lb)):
    T_obs, clusters, cluster_p_values, H0 = \
        permutation_cluster_1samp_test(Z[:,k,:,:], n_permutations=C.n_permutations,
                                        threshold=t_threshold, tail=tail,
                                        connectivity=None,out_type='mask',
                                        verbose=True)
        
    T_obs_plot = np.nan * np.ones_like(T_obs)
    for c, p_val in zip(clusters, cluster_p_values):
        if p_val <= C.pvalue:
            T_obs_plot[c] = T_obs[c]
            
    T_obs_ttest = np.nan * np.ones_like(T_obs)
    for r in np.arange(0,Z.shape[2]):
        for c in np.arange(0,Z.shape[3]):
            if abs(T_obs[r,c])>t_threshold:
                T_obs_ttest[r,c] =  T_obs[r,c]
            
    vmax = np.max(T_obs)
    vmin = np.min(T_obs)
Example #11
0
tail = 0
t_threshold = -stats.distributions.t.ppf(C.pvalue / 2., len(C.subjects) - 1)
k = 0
for ROI_y in np.arange(0, 1):
    for ROI_x in np.arange(ROI_y + 1, 6):
        # for ROI_x in np.arange(2, 3):

        k1 = ROI_y * 6 + ROI_x
        k2 = ROI_x * 6 + ROI_y
        print(k1, k2)
        Z1 = X_SD[:, k1, :, :] - X_LD[:, k1, :, :]
        Z2 = X_SD[:, k2, :, :] - X_LD[:, k2, :, :]

        T_obs1, clusters1, cluster_p_values1, H01 = \
            permutation_cluster_1samp_test(Z1, n_permutations=C.n_permutations,
                                           threshold=t_threshold, tail=tail, out_type='mask',
                                           verbose=True)

        T_obs_plot1 = np.nan * np.ones_like(T_obs1)
        for c, p_val in zip(clusters1, cluster_p_values1):
            if p_val <= C.pvalue:
                T_obs_plot1[c] = T_obs1[c]

        T_obs2, clusters2, cluster_p_values2, H02 = \
            permutation_cluster_1samp_test(Z2, n_permutations=C.n_permutations,
                                           threshold=t_threshold, tail=tail, out_type='mask',
                                           verbose=True)

        T_obs_plot2 = np.nan * np.ones_like(T_obs2)
        for c, p_val in zip(clusters2, cluster_p_values2):
            if p_val <= C.pvalue:
p_thresh = 0.01

connectivity = None
tail = 0.  # for two sided test

# set cluster threshold
n_samples = len(data)
threshold = -stats.t.ppf(p_initial / (1 + (tail == 0)), n_samples - 1)
if np.sign(tail) < 0:
    threshold = -threshold

cluster_stats = permutation_cluster_1samp_test(data,
                                               threshold=threshold,
                                               n_jobs=N_JOBS,
                                               verbose=True,
                                               tail=tail,
                                               step_down_p=0.05,
                                               connectivity=connectivity,
                                               n_permutations=n_permutations,
                                               seed=random_state)

T_obs, clusters, cluster_p_values, _ = cluster_stats

##############################################################################
# Visualize results

set_matplotlib_defaults()

times = 1e3 * contrast.times

fig, axes = plt.subplots(2, sharex=True, figsize=(3.3, 2.5))
Example #13
0
    os.chdir(folda)

    ## creating empty arrays and lists where the output from the cluster permutation stats would be stored
    T_ObsAllParcels = np.empty([nbParcels, nbTimeSamples])
    clustersAllParcels = []
    cluster_pvParcels = []
    H0_allParcels = np.empty([nbParcels, n_permutations])

    for parcel in range(nbParcels):

        T_obs, clusters, cluster_pv, H0 = permutation_cluster_1samp_test(
            data[:, parcel, :],
            threshold=None,
            n_permutations=n_permutations,
            seed=542,
            tail=0,
            connectivity=None,
            verbose=None,
            n_jobs=1,
            buffer_size=None)

        H0_allParcels[
            parcel, :] = H0  #max cluster level stats observed for permutation
        T_ObsAllParcels[
            parcel, :] = T_obs  #t-statistic observed for each sample
        clustersAllParcels.append(
            clusters)  #start and stop sample of each cluster
        cluster_pvParcels.append(cluster_pv)  # p-value of each cluster

    nama = [
        'T_ObsAllParcels', 'clustersAllParcels', 'cluster_pvParcels',
times = times[time_mask]

# The time vector reflects the original time points, not the decimated time
# points returned by single trial power. Be sure to decimate the time mask
# appropriately.
epochs_power = epochs_power[..., time_mask[::decim]]

epochs_power = epochs_power[:, 0, :, :]
epochs_power = np.log10(epochs_power)  # take log of ratio
# under the null hypothesis epochs_power should be now be 0

###############################################################################
# Compute statistic
threshold = 2.5
T_obs, clusters, cluster_p_values, H0 = \
    permutation_cluster_1samp_test(epochs_power, n_permutations=100,
                                   threshold=threshold, tail=0)

###############################################################################
# View time-frequency plots
plt.clf()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
plt.plot(times, evoked_data.T)
plt.title('Evoked response (%s)' % ch_name)
plt.xlabel('time (ms)')
plt.ylabel('Magnetic Field (fT/cm)')
plt.xlim(times[0], times[-1])
plt.ylim(-100, 250)

plt.subplot(2, 1, 2)
#     len(tfr_epochs.freqs), len(tfr_epochs.times))
# adjacency = mne.stats.combine_adjacency(
#     sensor_adjacency, len(tfr_epochs.freqs), len(tfr_epochs.times))

# # our adjacency is square with each dim matching the data size
# assert adjacency.shape[0] == adjacency.shape[1] == \
#     len(tfr_epochs.ch_names) * len(tfr_epochs.freqs) * len(tfr_epochs.times)

###############################################################################
# Compute statistic
# -----------------
threshold = 3.
n_permutations = 50  # Warning: 50 is way too small for real-world analysis.
T_obs, clusters, cluster_p_values, H0 = \
    permutation_cluster_1samp_test(epochs_power, n_permutations=n_permutations,
                                   threshold=threshold, tail=0,
                                   connectivity=None,
                                   out_type='mask', verbose=True)

###############################################################################
# View time-frequency plots
# -------------------------

evoked_data = evoked.data
times = 1e3 * evoked.times

plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)

# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
Example #16
0
tfr_epochs.apply_baseline(mode='logratio', baseline=(-.100, 0))

# Crop in time to keep only what is between 0 and 400 ms
evoked.crop(0., 0.4)
tfr_epochs.crop(0., 0.4)

epochs_power = tfr_epochs.data[:, 0, :, :]  # take the 1 channel

###############################################################################
# Compute statistic
# -----------------
threshold = 2.5
n_permutations = 100  # Warning: 100 is too small for real-world analysis.
T_obs, clusters, cluster_p_values, H0 = \
    permutation_cluster_1samp_test(epochs_power, n_permutations=n_permutations,
                                   threshold=threshold, tail=0,
                                   out_type='mask')

###############################################################################
# View time-frequency plots
# -------------------------

evoked_data = evoked.data
times = 1e3 * evoked.times

plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)

# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
Example #17
0
# vertices on a cortical surface. MNE provides several convenience functions
# for computing adjacency matrices (see the
# :ref:`Statistics API <api_reference_statistics>`).
#
# Standard clustering
# ~~~~~~~~~~~~~~~~~~~
# Here, since our data are on a grid, we can use ``adjacency=None`` to
# trigger optimized grid-based code, and run the clustering algorithm.

titles.append('Clustering')
# Reshape data to what is equivalent to (n_samples, n_space, n_time)
X.shape = (n_subjects, width, width)
# Compute threshold from t distribution (this is also the default)
threshold = stats.distributions.t.ppf(1 - alpha, n_subjects - 1)
t_clust, clusters, p_values, H0 = permutation_cluster_1samp_test(
    X, n_jobs=1, threshold=threshold, adjacency=None,
    n_permutations=n_permutations, out_type='mask')
# Put the cluster data in a viewable format
p_clust = np.ones((width, width))
for cl, p in zip(clusters, p_values):
    p_clust[cl] = p
ts.append(t_clust)
ps.append(p_clust)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])

###############################################################################
# "Hat" variance adjustment
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# This method can also be used in this context to correct for small
# variances :footcite:`RidgwayEtAl2012`:
Example #18
0
    data.append(pp)
data = np.array(data)

times = np.linspace(-1, 2.1, num=777)

minimax = (0.25, 0.5, 0.75)

tg_mean = np.mean(data, axis=0)

print("start")

# perm t test
t_obs, clusters, cluster_p, H0 = permutation_cluster_1samp_test(
    data - 0.5,
    connectivity=None,
    step_down_p=0,
    tail=0,
    n_jobs=-1,
    verbose=True)

print("done")

threshold = 0.05
bool_map = np.zeros(tg_mean.shape)
cluster_array = np.array(clusters)
cluster_amount = len(np.where(cluster_p < threshold))
cluster_sig = cluster_array[np.where(cluster_p < threshold)]
cluster_mask = np.any(cluster_sig, axis=0)
bool_map[cluster_mask] = 0.1

fig, ax = plt.subplots(figsize=(10, 10))
Example #19
0
def perm_test(fft_averages, f, ch_names):
    """Produces PSD comparison figure. 1-D Cluster perm test for the difference in average (per subject) power spectrum amplitude between conditions."""
    times = f[0:30]
    for e_name in ch_names[12:13]:
        print(e_name)
        condition1 = np.array(fft_averages[e_name]['correct'])
        condition2 = np.array(fft_averages[e_name]['control'])
        
        
        difference_arr = condition1-condition2
    
        T_obs, clusters, cluster_p_values, H0 = permutation_cluster_1samp_test(difference_arr, n_permutations=1000)
    
    
        #fig, axes = plt.subplots(2, )
        fig = plt.figure(figsize = (20,12))
        
        gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1]) 
        axes = []
        axes.append(plt.subplot(gs[0]))
        axes.append(plt.subplot(gs[1]))
        #fig.suptitle('%s\nCorrect - control'%e_name, fontweight = 'bold', fontsize = '20')
        
        # TOP BOXPLOT
        x_box = np.repeat(times, condition1.shape[0])
        y_box = difference_arr.T.flatten()
        
        df = pd.DataFrame(difference_arr, columns = times,)

        bp = sns.boxplot(df, ax = axes[0],  fliersize = 0, color = 'grey')
        
        sw = sns.swarmplot(x=x_box, y=y_box, color=".25", ax = axes[0], alpha = 0.5)
    
        bp.set(xticklabels = [])
        axes[0].set_ylabel('Log Power difference')
        axes[0].axhline(y= 0, linestyle ='--', alpha = 0.5, color = 'black')

        # BOTTOM CLUSTERS
        for i_c, c in enumerate(clusters):
            c = c[0]
            if cluster_p_values[i_c] <= 0.05:
                h =axes[1].axvspan(times[c.start], times[c.stop - 1],
                                color='#E24A33', alpha=0.3)
                sig_c = c
            else:
                axes[1].axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
                            alpha=0.3)
                
        
        for box in bp.artists[sig_c.start:sig_c.stop-1]:
            box.set_facecolor('#E24A33')
        #BOTTOM LINE
        hf = axes[1].plot(times, T_obs, '#348ABD')
        axes[1].legend((h, ), ('cluster p-value < 0.05', ), prop={'size': 28})
        axes[1].set_xlabel("Frequency (Hz)")
        axes[1].set_ylabel("T-value")
        axes[1].set_xlim(min(times), max(times))
        axes[0].yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))

        for ax in axes:
            ax.xaxis.set_tick_params(labelsize=18)
            ax.yaxis.set_tick_params(labelsize=18)
            ax.get_yaxis().set_label_coords(-0.07,0.5)

        fig.savefig('cluster_perms/'+e_name)
        fig.savefig('Cluster_O1.svg')
        fig.savefig('Cluster_O1.eps')
        fig.savefig('Cluster_O1.png', papertype = 'a0')
Example #20
0
def time_conn_analysis(subjects):
    times = np.arange(-0.55, 0.55, 0.025)
    n_pca = 2
    s_pca = np.ndarray((len(subjects), len(times), n_pca))
    all_log = list()
    s_vars = np.ndarray((len(subjects), n_pca))
    s_wsmi = list()
    for ix_s, s in enumerate(subjects):
        X, cond, sub_log = load_subject(s)
        x = X[:, :, cond == 0]

        # pca_lon = np.ndarray((X.shape[1], n_pca, X.shape[2]))
        # pca = decomposition.PCA(n_components=n_pca)
        #
        # for ep in range(x.shape[2]):
        #     epo = x[:, :, ep].T
        #     pca = decomposition.PCA(n_components=n_pca)
        #     pca_lon[:, :, ep] = pca.fit_transform(epo)

        con_avg = x.mean(axis=2).T
        s_wsmi.append(con_avg)
        pca = decomposition.PCA(n_components=n_pca)
        pca_avg = pca.fit_transform(con_avg)
        s_pca[ix_s, :, :] = pca_avg
        s_vars[ix_s, :] = pca.explained_variance_

        all_log.append(sub_log)

    zero_time = (times > -0.01) & (times < 0.01)
    subj_zero = [
        0 if
        (float(s_pca[ix_s, zero_time, 0]) > float(s_pca[ix_s, zero_time, 1]))
        else 1 for ix_s, s in enumerate(subjects)
    ]

    comp_dat = np.ndarray((len(subjects), len(times)))
    var_dat = np.empty(len(subjects))
    for ix_s, s in enumerate(subj_zero):
        # plt.plot(times, s_pca[ix_s, :, s])
        comp_dat[ix_s, :] = s_pca[ix_s, :, s]
        var_dat[ix_s] = s_vars[ix_s, s]

    comp_pk = np.abs(times[np.argmax(comp_dat, axis=1)])

    from mne.stats import permutation_cluster_1samp_test
    T_obs_, clusters, p_values, _ = permutation_cluster_1samp_test(
        comp_dat, threshold=None, n_permutations=1000, tail=0)
    good_clust = clusters[0]
    good_clust_inds = np.arange(len(times))[good_clust]

    from scipy.stats import sem, pearsonr, zscore

    plt.style.use('ggplot')
    fig, ax = plt.subplots(1, 1)
    ax.plot(times, comp_dat[:, :].mean(axis=0))
    ax.fill_between(times,
                    comp_dat[:, :].mean(axis=0) - sem(comp_dat[:, :]),
                    comp_dat[:, :].mean(axis=0) + sem(comp_dat[:, :]),
                    alpha=0.2)
    ax.set_ylim(-0.6, 0.8)
    ax.fill_between(times[good_clust], -0.6, 0.8, alpha=0.1, color='k')
    ax.vlines(0, ymin=-0.6, ymax=0.8, linestyles='--')
    ax.set_title('Mean variance explained: %0.2f' % var_dat.mean())
    ax.set_ylabel('PC (wSMI)')
    fig.savefig(op.join(study_path, 'figures', 'PCA_conn.eps'),
                format='eps',
                dpi=300)

    s_wsmi = np.array(s_wsmi)
    wSMI_gr_avg = s_wsmi.mean(axis=0)
    pc_gr_avg = comp_dat.mean(axis=0)

    corr_pc = np.empty(wSMI_gr_avg.shape[1])
    for pair in range(len(corr_pc)):
        corr_pc[pair] = pearsonr(wSMI_gr_avg[:, pair], pc_gr_avg)[0]

    from scipy.io import savemat
    savemat(
        op.join(study_path, 'results', 'wsmi', 'mov_win', 'wsmi_pc_corr.mat'),
        {'wsmi_pc_corr': corr_pc})

    plt.plot(times, zscore(wSMI_gr_avg[:, corr_pc > 0.8]))

    # behav
    conn_z = zscore(s_wsmi, axis=1)

    top_pairs = s_wsmi[:, :, corr_pc > 0.4][:, good_clust_inds, :]
    s_top = np.mean(top_pairs, axis=(1, 2))

    good_pca = comp_dat[:, good_clust_inds].mean(axis=1)

    from eeg_etg_fxs import set_dif_and_rt_exp
    log = pd.concat(all_log)
    log = log[log.condition != 80]
    log = set_dif_and_rt_exp(log)
    log['dif'].plot(kind='hist', bins=200)
    plt.xticks(np.linspace(-1.5, 1.5, 31))

    s_log = log[log.condition == 90].groupby('subject')[['Accuracy',
                                                         'RT']].agg(np.nanmean)
    s_log['con'] = s_top
    s_log['pca'] = good_pca
    s_log['pca_pk'] = np.abs(comp_pk)
    s_log.corr()

    from seaborn import regplot
    regplot(s_log['Accuracy'], s_log['pca_pk'])
Example #21
0
for r in ['HP_r', 'HP_l']:

    roi_pows_corr = [
        p.copy().apply_baseline(mode='zscore', baseline=(-0.95, -0.75))
        for p in roi_pows[r]
    ]

    power_lon = roi_pows_corr[0].crop(-0.4, 0.4)
    power_sho = roi_pows_corr[1].crop(-0.4, 0.4)

    power_c1 = power_lon.data[:, 0, :, :]
    power_c2 = power_sho.data[:, 0, :, :]
    #
    threshold = None
    T_obs, clusters, cluster_p_values, H0 = permutation_cluster_1samp_test(
        power_c2, n_permutations=1000, threshold=threshold, tail=1)

    # threshold = None
    # T_obs, clusters, cluster_p_values, H0 = \
    #     permutation_cluster_test([power_c1, power_c2],
    #                              n_permutations=500, threshold=threshold, tail=0)

    p_val = 0.01
    good_cluster_inds = np.where(cluster_p_values < p_val)[0]
    print(good_cluster_inds)
    print(len(good_cluster_inds))

    times = 1e3 * power_lon.times
    T_obs_plot = np.nan * np.ones_like(T_obs)
    for c, p_val in zip(clusters, cluster_p_values):
        if p_val <= 0.05:
times = times[time_mask]

# The time vector reflects the original time points, not the decimated time
# points returned by single trial power. Be sure to decimate the time mask
# appropriately.
epochs_power = epochs_power[..., time_mask[::decim]]

epochs_power = epochs_power[:, 0, :, :]
epochs_power = np.log10(epochs_power)  # take log of ratio
# under the null hypothesis epochs_power should be now be 0

###############################################################################
# Compute statistic
threshold = 2.5
T_obs, clusters, cluster_p_values, H0 = permutation_cluster_1samp_test(
    epochs_power, n_permutations=100, threshold=threshold, tail=0
)

###############################################################################
# View time-frequency plots
plt.clf()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
plt.plot(times, evoked_data.T)
plt.title("Evoked response (%s)" % ch_name)
plt.xlabel("time (ms)")
plt.ylabel("Magnetic Field (fT/cm)")
plt.xlim(times[0], times[-1])
plt.ylim(-100, 250)

plt.subplot(2, 1, 2)
Example #23
0
# our adjacency is square with each dim matching the data size
assert adjacency.shape[0] == adjacency.shape[1] == \
    len(power_InCongruent.ch_names) * len(power_InCongruent.freqs) * len(power_InCongruent.times)

power_data = power_InCongruent_ave.reshape(subject_num, len(use_idx),
                                           len(power_InCongruent.freqs),
                                           len(power_InCongruent.times))
n_permutations = 1000  # Warning: 50 is way too small for real-world analysis.
#threshold:min=-4.874401 max=7.152988
#HC:2.7 457:3
T_obs, clusters, cluster_p_values, H0 = permutation_cluster_1samp_test(
    power_data,
    n_permutations=1000,
    threshold=2.7,
    tail=0,
    adjacency=adjacency,
    out_type='mask',
    verbose=True,
)
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
    if p_val < 0.01:
        T_obs_plot[c] = T_obs[c]

vmax = np.max(np.abs(T_obs))
vmin = -vmax

plt.imshow((T_obs[0] + T_obs[1]) / 2,
           cmap=plt.cm.gray,
           extent=[-0.2, 0.6, 4, 30],
n_samples = len(data)
threshold = -ppf(p_thresh, n_samples - 1)
if np.sign(tail) < 0:
    threshold = -threshold

# Make a triangulation between EEG channels locations to
# use as connectivity for cluster level stat
# XXX : make a mne.channels.make_eeg_connectivity function
lay = mne.channels.make_eeg_layout(contrast.info)
neigh = spatial.Delaunay(lay.pos[:, :2]).vertices
connectivity = mne.surface.mesh_edges(neigh)

data = np.transpose(data, (0, 2, 1))  # transpose for clustering

cluster_stats = permutation_cluster_1samp_test(
    data, threshold=threshold, n_jobs=2, verbose=True, tail=1,
    connectivity=connectivity, out_type='indices',
    check_disjoint=True)

T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]

print("Good clusters: %s" % good_cluster_inds)

##############################################################################
# Visualize the spatio-temporal clusters

times = contrast.times * 1e3
colors = 'r', 'steelblue'
linestyles = '-', '--'

pos = mne.find_layout(contrast.info).pos
Example #25
0
def topoplot(data, freq, title, threshold, tail):
    """Plot and test the event-related perturbation.

    Input
    -----

    * data: numpy asarray

    * title: string
        Name of the file used for saving.

    * threshold: float
        Threshold value used to find cluster.

    * tail: int
        -1, 1 for one-tailed tests.

    Output
    -----
    Save matplotlib figure as 'title' .svg

    """
    fig, axs = plt.subplots(1,
                            7,
                            figsize=(15, 5),
                            facecolor='w',
                            edgecolor='k')
    fig.subplots_adjust(hspace=.5, wspace=.001)

    axs = axs.ravel()

    for i, rg in enumerate(range(25, 175, 25)):

        this_data = data[:, :, freq, rg:rg + 25].mean((2, 3))

        connectivity = mne.channels.find_ch_connectivity(tnt.info, 'eeg')[0]

        cluster_stats = permutation_cluster_1samp_test(
            this_data,
            threshold=threshold,
            verbose=True,
            connectivity=connectivity,
            out_type='indices',
            n_jobs=1,
            tail=tail,
            check_disjoint=True,
            step_down_p=0.05,
            seed=42)

        T_obs, clusters, p_values, _ = cluster_stats
        good_cluster_inds = np.where(p_values < 0.05)[0]

        # Extract mask and indices of active sensors in the layout
        mask = np.zeros((T_obs.shape[0], 1), dtype=bool)
        if len(clusters):
            for clus in good_cluster_inds:
                mask[clusters[clus], :] = True

        evoked = mne.EvokedArray(T_obs[:, np.newaxis],
                                 tnt.average().info,
                                 tmin=0.)

        evoked.plot_topomap(ch_type='eeg',
                            times=0,
                            scalings=1,
                            time_format=None,
                            cmap=plt.cm.get_cmap('RdBu_r', 12),
                            vmin=-6.,
                            vmax=6,
                            units='t values',
                            mask=mask,
                            axes=axs[i],
                            size=3,
                            show_names=lambda x: x[4:] + ' ' * 20,
                            time_unit='s',
                            show=False)

        plt.savefig(cwd + '/Figures/' + title + '_topo.svg')
# vertices on a cortical surface. MNE provides several convenience functions
# for computing connectivity/neighbor/adjacency matrices (see the
# :ref:`Statistics API <api_reference_statistics>`).
#
# Standard clustering
# ~~~~~~~~~~~~~~~~~~~
# Here, since our data are on a grid, we can use ``connectivity=None`` to
# trigger optimized grid-based code, and run the clustering algorithm.

titles.append('Clustering')
# Reshape data to what is equivalent to (n_samples, n_space, n_time)
X.shape = (n_subjects, width, width)
# Compute threshold from t distribution (this is also the default)
threshold = stats.distributions.t.ppf(1 - alpha, n_subjects - 1)
t_clust, clusters, p_values, H0 = permutation_cluster_1samp_test(
    X, n_jobs=1, threshold=threshold, connectivity=None,
    n_permutations=n_permutations)
# Put the cluster data in a viewable format
p_clust = np.ones((width, width))
for cl, p in zip(clusters, p_values):
    p_clust[cl] = p
ts.append(t_clust)
ps.append(p_clust)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])

###############################################################################
# "Hat" variance adjustment
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# This method can also be used in this context to correct for small
# variances [1]_:
Example #27
0
assert epochs_power.data.shape == (len(epochs_power),
                                   len(power_InCongruent.ch_names),
                                   len(power_InCongruent.freqs),
                                   len(power_InCongruent.times))
adjacency = mne.stats.combine_adjacency(sensor_adjacency,
                                        len(power_InCongruent.freqs),
                                        len(power_InCongruent.times))

# our adjacency is square with each dim matching the data size
assert adjacency.shape[0] == adjacency.shape[1] == \
    len(power_InCongruent.ch_names) * len(power_InCongruent.freqs) * len(power_InCongruent.times)
n_permutations = 5000  # Warning: 50 is way too small for real-world analysis.
T_obs, clusters, cluster_p_values, H0 = permutation_cluster_1samp_test(
    epochs_power,
    n_permutations=n_permutations,
    threshold=14,
    tail=0,
    adjacency=adjacency,
    out_type='mask',
    verbose=True)
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
    if p_val <= 0.05:
        T_obs_plot[c] = T_obs[c]

# Just plot one channel's data
ch_idx, f_idx, t_idx = np.unravel_index(np.nanargmax(np.abs(T_obs_plot)),
                                        epochs_power.shape[1:])
# ch_idx = tfr_epochs.ch_names.index('MEG 1332')  # to show a specific one

vmax = np.max(np.abs(T_obs))
vmin = -vmax
Example #28
0
# for computing connectivity/neighbor/adjacency matrices (see the
# :ref:`Statistics API <api_reference_statistics>`).
#
# Standard clustering
# ~~~~~~~~~~~~~~~~~~~
# Here, since our data are on a grid, we can use ``connectivity=None`` to
# trigger optimized grid-based code, and run the clustering algorithm.

titles.append('Clustering')
# Reshape data to what is equivalent to (n_samples, n_space, n_time)
X.shape = (n_subjects, width, width)
# Compute threshold from t distribution (this is also the default)
threshold = stats.distributions.t.ppf(1 - alpha, n_subjects - 1)
t_clust, clusters, p_values, H0 = permutation_cluster_1samp_test(
    X,
    n_jobs=1,
    threshold=threshold,
    connectivity=None,
    n_permutations=n_permutations)
# Put the cluster data in a viewable format
p_clust = np.ones((width, width))
for cl, p in zip(clusters, p_values):
    p_clust[cl] = p
ts.append(t_clust)
ps.append(p_clust)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])

###############################################################################
# "Hat" variance adjustment
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# This method can also be used in this context to correct for small
Example #29
0
adjacency = mne.stats.combine_adjacency(sensor_adjacency,
                                        len(tfr_epochs.freqs),
                                        len(tfr_epochs.times))

# our adjacency is square with each dim matching the data size
assert adjacency.shape[0] == adjacency.shape[1] == \
    len(tfr_epochs.ch_names) * len(tfr_epochs.freqs) * len(tfr_epochs.times)

# %%
# Compute statistic
# -----------------
threshold = 3.
n_permutations = 50  # Warning: 50 is way too small for real-world analysis.
T_obs, clusters, cluster_p_values, H0 = \
    permutation_cluster_1samp_test(epochs_power, n_permutations=n_permutations,
                                   threshold=threshold, tail=0,
                                   adjacency=adjacency,
                                   out_type='mask', verbose=True)

# %%
# View time-frequency plots
# -------------------------

evoked_data = evoked.data
times = 1e3 * evoked.times

plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)

# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
# set family-wise p-value
p_accept = 0.01

connectivity = None
tail = 0.  # for two sided test

# set cluster threshold
ppf = stats.t.ppf
p_thresh = p_accept / (1 + (tail == 0))
n_samples = len(data)
threshold = -ppf(p_thresh, n_samples - 1)
if np.sign(tail) < 0:
    threshold = -threshold

cluster_stats = permutation_cluster_1samp_test(
    data, threshold=threshold, n_jobs=N_JOBS, verbose=True, tail=tail,
    step_down_p=0.05, connectivity=connectivity,
    n_permutations=n_permutations)

T_obs, clusters, cluster_p_values, _ = cluster_stats

##############################################################################
# Visualize results

times = 1e3 * contrast.times

plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times, 1e6 * data.mean(axis=0), label="ERP Contrast")
plt.ylabel("EEG (uV)")
plt.ylim([-2.5, 2.5])
Example #31
0
    
    # Baseline power
    tfr_epochs.apply_baseline(mode='logratio', baseline=(-.100, 0))
    
    # Crop in time to keep only what is between 0 and 400 ms
    evoked.crop(0., 0.5)
    tfr_epochs.crop(0., 0.5)
    
    epochs_power = tfr_epochs.data[:, 0, :, :]  # take the 1 channel

###############################################################################
# Compute statistic
# -----------------
threshold = 2.5
T_obs, clusters, cluster_p_values, H0 = \
    permutation_cluster_1samp_test(epochs_power, n_permutations=100,
                                   threshold=threshold, tail=0)

###############################################################################
# View time-frequency plots
# -------------------------

evoked_data = evoked.data
times = 1e3 * evoked.times

plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)

# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
    if p_val <= 0.05:
Example #32
0
#       fig.tight_layout()
#       plt.savefig(C.pictures_path_Source_estimate+ 't-test_results_timeseries_'+lb[j]+'.png')

# # plt.close('all')
##############################################################################
# # t-test and cluster-based correction for interaction of:\
# [SD_lATL,SD_rATL,LD_lATL,LD_rATL] 1 by 1
ROI_label = ['SD_lATL', 'SD_rATL', 'LD_lATL', 'LD_rATL']
# times = np.arange(200,400)
for i in np.arange(0, len(X_SDLD_ATLs)-1):
    for j in np.arange(i+1, len(X_SDLD_ATLs)):
        S = X_SDLD_ATLs[i] - X_SDLD_ATLs[j]
        print(i, j)
        print(ROI_label[i]+' vs '+ROI_label[j])
        T_obs, clusters, cluster_p_values, h0 = permutation_cluster_1samp_test(
            S, n_jobs=4, threshold=t_threshold,
            n_permutations=n_permutations, out_type='mask')

        plt.figure()
        plt.rcParams['font.size'] = '18'

        plt.subplot(211)
        plt.title('time-series and cluster-based permutation test')
        plt.plot(times, X_SDLD_ATLs[i].mean(axis=0), 'b', label=ROI_label[i])
        plt.plot(times, X_SDLD_ATLs[j].mean(axis=0), 'r', label=ROI_label[j])
        plt.plot(times, S.mean(axis=0), 'm', label="Contrast")

        plt.ylabel("EEG/MEG")
        plt.legend(loc='upper left')
        plt.subplot(212)