Example #1
0
def permTestMask2D(diff, p_value=0.05):
    '''

	'''

    a = np.arange(diff[0].shape[1] * diff[0].shape[2]).reshape(
        (diff[0].shape[1], diff[0].shape[2]))
    adj = connected_adjacency(a, '8').toarray()
    b = grid_to_graph(diff[0].shape[1], diff[0].shape[2]).toarray()
    conn = np.array(np.array(np.add(adj, b), dtype=bool), dtype=int)
    conn = sparse.csr_matrix(conn)

    #T_obs, clusters, cluster_pv, HO = permutation_cluster_test(diff, stat_fun = paired_t, connectivity = conn)
    T_obs, clusters, cluster_pv, HO = permutation_cluster_test(
        diff, stat_fun=paired_t)
    #T_obs, clusters, cluster_pv, HO = spatio_temporal_cluster_test(diff, stat_fun = paired_t, connectivity = conn)
    #T_obs, clusters, cluster_pv, HO = spatio_temporal_cluster_test(diff, stat_fun = paired_t)
    T_obs_plot = np.nan * np.ones_like(T_obs)
    print(cluster_pv)
    for c, p_val in zip(clusters, cluster_pv):
        if p_val <= p_value:
            print(c.sum())
            T_obs_plot[c] = T_obs[c]

    return T_obs_plot
Example #2
0
def search_clusters(exp_num,target_data, nontarget_data):
    print(exp_num)
    order = 6
    fs = 1000.0       # sample rate, Hz
    cutoff = 25  # desired cutoff frequency of the filter, Hz
    target_data = butter_lowpass_filter(target_data, cutoff, fs, order)
    nontarget_data = butter_lowpass_filter(nontarget_data, cutoff, fs, order)

    X = [target_data, nontarget_data]

    f_heads(exp_num,X)
    p_thresholds = [0.00001,0.000005] #Magic
    for p_threshold in p_thresholds:
        threshold = calc_threshold(p_threshold,[len(target_data),len(nontarget_data)])
        T_obs, clusters, cluster_p_values, H0 = \
            permutation_cluster_test(X, n_permutations=1500, connectivity=connectivity[0],threshold = threshold,
                                     check_disjoint=True, tail=0,n_jobs=6,verbose=False)


        indexes = sorted(range(len(cluster_p_values)), key=lambda k: cluster_p_values[k])[:5]
        for i in indexes:
            if cluster_p_values[i] < 0.2:
                clustermask_heads(exp_num,'CM_thr=%f_p=%f' % (p_threshold,cluster_p_values[i]),clusters[i])
        cluster_sizes = [clusters[ind].sum() for ind in indexes]
        res = zip(cluster_p_values[indexes],cluster_sizes)
        print(res)
        f=open(os.path.join('results' 'res_file.txt'),'a+')
        f.write('%s: Threshold=%f, clusters = %s' % (exp_num, p_threshold,str(res)))
        f.close()
    return res
Example #3
0
def tfce(x, mask_asd, surf, n_perm=100, alpha=0.05):
    """ TFCE.

    Parameters
    ----------
    x: np.ndarray of shape (n_subjects, n_vertices)
        Data.
    mask_asd: np.ndarray of shape (n_subjects,)
        Boolean mask indicating ASD (True) and TD (False).
    surf: BSPolydata
        Cortical surface.
    n_perm: int, default=100
        Number of permutations.
    alpha: float, default=0.05
         Significance threshold.

    Returns
    -------
    t_obs: np.ndarray of shape (n_vertices,)
        t-statistic.
    sig: np.ndarray of shape (n_vertices,)
        Boolean array with significant vertices set to True.
    """

    adj = None
    if 'mask' in surf.point_keys:
        mask = surf.PointData['mask'] == 1
        adj = me.get_immediate_adjacency(surf, mask=mask)

    def stat_fun(*args):
        return stats.ttest_ind(*args)[0]

    p_F_obsp, p_cl, clp_pv, h0 = \
        permutation_cluster_test([x[mask_asd], x[~mask_asd]], tail=0,
                                 connectivity=adj, n_permutations=n_perm,
                                 out_type='indices', check_disjoint=True,
                                 seed=0, n_jobs=-1, stat_fun=stat_fun,
                                 threshold=dict(start=0, step=0.25),
                                 t_power=1, verbose=0)

    good_cluster_inds = np.where(clp_pv < alpha)[0]
    sig = np.full_like(p_F_obsp, 0, dtype=bool)
    if good_cluster_inds.size > 0:
        idx = [p_cl[i][0].ravel() for i, p in enumerate(clp_pv) if p < alpha]
        idx = np.concatenate(idx)
        sig[idx] = 1

    obs = stat_fun(x[mask_asd], x[~mask_asd])

    return obs, sig
Example #4
0
def permTestMask1D(diff, p_value=0.05):
    '''

	'''

    T_obs, clusters, cluster_pv, HO = permutation_cluster_test(
        diff, stat_fun=paired_t)
    print(cluster_pv)
    mask = np.zeros(diff[0].shape[1], dtype=bool)
    sig_clusters = []
    for cl in np.array(clusters)[np.where(cluster_pv < p_value)[0]]:
        mask[cl[0]] = True
        sig_clusters.append(cl)

    return mask, sig_clusters
Example #5
0
channel_index = 37

data_vol = data_vol[:, channel_index, temporal_mask]
data_invol = data_invol[:, channel_index, temporal_mask]

times = 1e3 * epochs.times
times = times[temporal_mask]

###############################################################################
# Compute statistic
threshold = None
n_permutations = 5000
tail = 0
T_obs, clusters, cluster_p_values, H0 = \
    permutation_cluster_test([data_vol, data_invol],
                             n_permutations=n_permutations,
                             threshold=threshold, tail=tail, n_jobs=2)

###############################################################################
# Plot
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + epochs.ch_names[channel_index])
plt.plot(times,
         data_vol.mean(axis=0) * 1e6 - data_invol.mean(axis=0) * 1e6,
         label="ERP Contrast (Voluntary - Involuntary)")
plt.ylabel("EEG (uV)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
    c = c[0]
Example #6
0
def plotIpsiContra(subject_id, header):
    '''

	'''

    # plotting parameters
    sns.set(font_scale=2.5)
    sns.set_style('white')
    sns.set_style('white', {'axes.linewidth': 2})

    if header == 'target_loc':
        rep_cond = ['DvTr0', 'DvTr3']
    else:
        rep_cond = ['DrTv0', 'DrTv3']

    # read in data
    # read in erp data
    erp = []

    for sj in subject_id:
        # read in classification dict
        with open(
                '/Users/dirk/Desktop/suppression/erp/{}/ipsi_contra_{}.pickle'.
                format(header, sj), 'rb') as handle:
            erp.append(pickle.load(handle))

    with open(
            '/Users/dirk/Desktop/suppression/erp/{}/plot_dict.pickle'.format(
                header), 'rb') as handle:
        plot_dict = pickle.load(handle)

    times = plot_dict['times']
    start, end = [np.argmin(abs(times - t)) for t in (-0.3, 0.8)]
    times = times[start:end]

    plt.figure(figsize=(30, 20))

    for cnd in ['variable', 'repeat']:

        if cnd == 'variable':

            for i, rep in enumerate(['DvTv0', 'DvTv3']):

                ax = plt.subplot(2,
                                 2,
                                 i + 1,
                                 title=cnd + rep[-1],
                                 ylabel='micro Volt',
                                 xlabel='Time (ms)',
                                 ylim=(-6, 6))

                ipsi = np.vstack(
                    [erp[i][rep]['ipsi'] for i in range(len(erp))])
                contra = np.vstack(
                    [erp[i][rep]['contra'] for i in range(len(erp))])

                T_obs, clusters, cluster_pv, HO = permutation_cluster_test(
                    [ipsi, contra], stat_fun=paired_t)

                ipsi_err = bootstrap(ipsi)
                contra_err = bootstrap(contra)

                ipsi = ipsi.mean(axis=0)
                contra = contra.mean(axis=0)

                plt.plot(times, ipsi, 'g', label='ipsi ' + rep)
                plt.plot(times, contra, 'r', label='contra ' + rep)
                plt.fill_between(times,
                                 ipsi + ipsi_err,
                                 ipsi - ipsi_err,
                                 alpha=0.2,
                                 color='g')
                plt.fill_between(times,
                                 contra + contra_err,
                                 contra - contra_err,
                                 alpha=0.2,
                                 color='r')

                mask = np.zeros(times.size, dtype=bool)
                for cl in np.array(clusters)[np.where(cluster_pv < 0.05)[0]]:
                    mask[cl[0]] = True
                plt.fill_between(times,
                                 -.1,
                                 0.1,
                                 where=mask == True,
                                 color='grey',
                                 label='p < 0.05')

                plt.axhline(y=0, ls='--')
                plt.axvline(x=0.258,
                            ls='--',
                            color='grey',
                            label='onset gabor')
                plt.legend(loc='best', shadow=True)
                sns.despine(offset=10, trim=False)

        elif cnd == 'repeat':

            for i, rep in enumerate(rep_cond):

                ax = plt.subplot(2,
                                 2,
                                 3 + i,
                                 title=cnd + rep[-1],
                                 ylabel='micro Volt',
                                 xlabel='Time (ms)',
                                 ylim=(-6, 6))

                ipsi = np.vstack(
                    [erp[i][rep]['ipsi'] for i in range(len(erp))])
                contra = np.vstack(
                    [erp[i][rep]['contra'] for i in range(len(erp))])

                T_obs, clusters, cluster_pv, HO = permutation_cluster_test(
                    [ipsi, contra], stat_fun=paired_t)

                ipsi_err = bootstrap(ipsi)
                contra_err = bootstrap(contra)

                ipsi = ipsi.mean(axis=0)
                contra = contra.mean(axis=0)

                plt.plot(times, ipsi, 'g', label='ipsi ' + rep)
                plt.plot(times, contra, 'r', label='contra ' + rep)
                plt.fill_between(times,
                                 ipsi + ipsi_err,
                                 ipsi - ipsi_err,
                                 alpha=0.2,
                                 color='g')
                plt.fill_between(times,
                                 contra + contra_err,
                                 contra - contra_err,
                                 alpha=0.2,
                                 color='r')

                mask = np.zeros(times.size, dtype=bool)
                for cl in np.array(clusters)[np.where(cluster_pv < 0.05)[0]]:
                    mask[cl[0]] = True
                plt.fill_between(times,
                                 -.1,
                                 0.1,
                                 where=mask == True,
                                 color='grey',
                                 label='p < 0.05')

                plt.axhline(y=0, ls='--')
                plt.axvline(x=0.258,
                            ls='--',
                            color='grey',
                            label='onset gabor')
                plt.legend(loc='best', shadow=True)
                sns.despine(offset=10, trim=False)

    plt.savefig(
        '/Users/dirk/Desktop/suppression/erp/{}/figs/ipsi_contra_group.pdf'.
        format(header))
    plt.close()
Example #7
0
def plotCTFSlopeAcrossTime(header, power, channel='posterior', freqs='all'):
    '''

	'''

    # plotting parameters
    sns.set(font_scale=2.5)
    sns.set_style('white')
    sns.set_style('white', {'axes.linewidth': 2})

    # read in CTF data
    ctf = []
    for sj in subject_id:
        # read in classification dict
        with open(
                '/home/dvmoors1/big_brother/Dist_suppression/ctf/{}_channels/{}/{}_slopes_{}.pickle'
                .format(channel, header, sj, freqs), 'rb') as handle:
            ctf.append(pickle.load(handle))

    with open(
            '/home/dvmoors1/big_brother/Dist_suppression/ctf/{}_channels/{}/{}_info.pickle'
            .format(channel, header, freqs), 'rb') as handle:
        plot_dict = pickle.load(handle)

    if header == 'target_loc':
        rep_cond = ['DvTr_0', 'DvTr_3']
    else:
        rep_cond = ['DrTv_0', 'DrTv_3']

    plt.figure(figsize=(20, 10))

    for idx, plot in enumerate(['variable', 'repeat']):

        ax = plt.subplot(1,
                         2,
                         idx + 1,
                         title=plot,
                         ylabel='CTF slope',
                         ylim=(-0.2, 0.2))

        if plot == 'variable':
            diff = []
            for i, cnd in enumerate(['DvTv_0', 'DvTv_3']):
                X = np.vstack([ctf[j][cnd][power] for j in range(len(ctf))])
                diff.append(X)
                error = bootstrap(X)
                X = X.mean(axis=0)

                plt.plot(plot_dict['times'], X, color=['g', 'r'][i], label=cnd)
                plt.fill_between(plot_dict['times'],
                                 X + error,
                                 X - error,
                                 alpha=0.2,
                                 color=['g', 'r'][i])

            plt.axhline(y=0, ls='--', color='black')
            plt.axvline(x=0.258, ls='--', color='grey', label='onset gabor')
            T_obs, clusters, cluster_pv, HO = permutation_cluster_test(
                diff, stat_fun=paired_t)
            print('T', header, cluster_pv)
            mask = np.zeros(plot_dict['times'].size, dtype=bool)
            for cl in np.array(clusters)[np.where(cluster_pv < 0.05)[0]]:
                mask[cl[0]] = True
            plt.fill_between(plot_dict['times'],
                             -0.002,
                             0.002,
                             where=mask == True,
                             color='grey',
                             label='p < 0.05')

            plt.legend(loc='best', shadow=True)

        elif plot == 'repeat':
            diff = []
            for i, cnd in enumerate(rep_cond):
                X = np.vstack([ctf[j][cnd][power] for j in range(len(ctf))])
                diff.append(X)
                error = bootstrap(X)
                X = X.mean(axis=0)

                plt.plot(plot_dict['times'], X, color=['g', 'r'][i], label=cnd)
                plt.fill_between(plot_dict['times'],
                                 X + error,
                                 X - error,
                                 alpha=0.2,
                                 color=['g', 'r'][i])

            plt.axhline(y=0, ls='--', color='black')
            plt.axvline(x=0.258, ls='--', color='grey', label='onset gabor')
            T_obs, clusters, cluster_pv, HO = permutation_cluster_test(
                diff, stat_fun=paired_t)
            print('T', header, cluster_pv)
            mask = np.zeros(plot_dict['times'].size, dtype=bool)
            for cl in np.array(clusters)[np.where(cluster_pv < 0.05)[0]]:
                mask[cl[0]] = True
            plt.fill_between(plot_dict['times'],
                             -0.002,
                             0.002,
                             where=mask == True,
                             color='grey',
                             label='p < 0.05')

            plt.legend(loc='best', shadow=True)
            sns.despine(offset=10, trim=False)

    plt.savefig(
        '/home/dvmoors1/big_brother/Dist_suppression/ctf/{}_channels/{}/figs/group_slopes_{}.pdf'
        .format(channel, header, power))
    plt.close()
epochs_power_2 = single_trial_power(data_condition_2, Fs=Fs,
                                   frequencies=frequencies,
                                   n_cycles=n_cycles, use_fft=False)

epochs_power_1 = epochs_power_1[:, 0, :, :]  # only 1 channel to get 3D matrix
epochs_power_2 = epochs_power_2[:, 0, :, :]  # only 1 channel to get 3D matrix

# do ratio with baseline power:
epochs_power_1 /= np.mean(epochs_power_1[:, :, times < 0], axis=2)[:, :, None]
epochs_power_2 /= np.mean(epochs_power_2[:, :, times < 0], axis=2)[:, :, None]

###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
                   permutation_cluster_test([epochs_power_1, epochs_power_2],
                               n_permutations=100, threshold=threshold, tail=0)

###############################################################################
# View time-frequency plots
import pylab as pl
pl.clf()
pl.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
pl.subplot(2, 1, 1)
evoked_contrast = np.mean(data_condition_1, 0) - np.mean(data_condition_2, 0)
pl.plot(times, evoked_contrast.T)
pl.title('Contrast of evoked response (%s)' % ch_name)
pl.xlabel('time (ms)')
pl.ylabel('Magnetic Field (fT/cm)')
pl.xlim(times[0], times[-1])
pl.ylim(-100, 200)
Example #9
0
    d_right_ent = (
        data_left_ent[right_idx, :, :] - data_right_ent[right_idx, :, :])
    d_left_ent = (
        data_left_ent[left_idx, :, :] - data_right_ent[left_idx, :, :])

    d_right_ctl = (
        data_left_ctl[right_idx, :, :] - data_right_ctl[right_idx, :, :])
    d_left_ctl = (
        data_left_ctl[left_idx, :, :] - data_right_ctl[left_idx, :, :])

d_ali_ent_right = np.asarray(d_right_ent).mean(axis=1)
d_ali_ent_left = np.asarray(d_left_ent).mean(axis=1)
d_ali_ctl_right = np.asarray(d_right_ctl).mean(axis=1)
d_ali_ctl_left = np.asarray(d_left_ctl).mean(axis=1)

T_obs, clusters, cluster_pv, H0 = permutation_cluster_test(
    [d_ali_ent_left, d_ali_ent_right], n_permutations=5000)

times = (epochs.times[::4][:-1]) * 1e3
plt.close('all')
plt.subplot(211)
plt.title("Ctl left v right")
plt.plot(
    times,
    d_ali_ent_left.mean(axis=0) - d_ali_ent_right.mean(axis=0),
    label="ERF Contrast (Event 1 - Event 2)")
plt.ylabel("MEG (T / m)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
    c = c[0]
    if cluster_pv[i_c] <= 0.05:
def wpli_analysis_time(subjects, log):
    conds = ['lon', 'sho']
    roi_cons = {}
    roi = rois['f']
    spatial_con = mne.channels.read_ch_connectivity('biosemi64')

    for ix_s, s in enumerate(subjects):
        print('subject {} of {}' .format(ix_s+1, len(subjects)))
        for ix_c, c in enumerate(conds):
            filename = op.join(study_path, 'results', 'wpli', 'over_time', '{}_{}_wpli.npz' .format(s, c))
            dat = np.load(filename)
            info = dat['info'].item()

            # Create results matrices
            if ix_s == 0:
                roi_cons[c] = np.empty((len(subjects), dat['con'].shape[0], dat['con'].shape[-2], dat['con'].shape[-1]))

            # Get ROI connectivity
            # roi_ixs = [ix for ix, ch in enumerate(info['ch_names']) if ch in roi]
            # roi_con = np.mean(dat['con'][roi_ixs, :, :, :], axis=0)
            roi_ixs = 33
            roi_con = dat['con'][roi_ixs, :, :, :]
            roi_con[roi_ixs, :, :] = 1.0
            roi_cons[c][ix_s, :, :, :] = roi_con.copy()

    avg_con = [np.mean(roi_cons[c], axis=0) for c in conds]

    for ix_c, c in enumerate(conds):
        tfr = AverageTFR(info, avg_con[ix_c], dat['times'], dat['freqs'], len(subjects))
        tfr.plot_topo(fig_facecolor='w', font_color='k', border='k', vmin=0, vmax=0.5, cmap='viridis', title=c)

    s = 12
    for c in conds:
        tfr = AverageTFR(info, roi_cons[c][s, :, :, :], dat['times'], dat['freqs'], len(subjects))
        tfr.plot_topo(fig_facecolor='w', font_color='k', border='k', vmin=0, vmax=1, cmap='viridis', title=c)

    # Stats
    test_con = [roi_cons[c][:, 19, :, :] for c in conds]

    #threshold = None
    threshold = dict(start=0, step=0.2)
    T_obs, clusters, cluster_p_values, H0 = \
        permutation_cluster_test([test_con[0], test_con[1]],
                                 n_permutations=1000, threshold=threshold, tail=0)

    times = dat['times']
    times *= 1e3
    freqs = dat['freqs']

    fig, ax = plt.subplots(1)
    T_obs_plot = np.nan * np.ones_like(T_obs)
    for c, p_val in zip(clusters, cluster_p_values):
        if p_val <= 0.05:
            T_obs_plot[c] = T_obs[c]

    ax.imshow(T_obs,
               extent=[times[0], times[-1], freqs[0], freqs[-1]],
               aspect='auto', origin='lower', cmap='gray')
    ax.imshow(T_obs_plot,
               extent=[times[0], times[-1], freqs[0], freqs[-1]],
               aspect='auto', origin='lower', cmap='RdBu_r')

    plt.xlabel('Time (ms)')
    plt.ylabel('Frequency (Hz)')
    plt.title('ROI Connectivity')
def wpli_analysis_epochs(subjects):
    # load data
    mats, freqs, chans = load_wpli_over_epochs(subjects)

    # load spatial structure
    connectivity, ch_names = mne.channels.read_ch_connectivity(
        '/Users/lpen/Documents/MATLAB/Toolbox/fieldtrip-20170628/template/neighbours/biosemi128_neighb.mat')

    # square matrix
    for c in conditions:
        for s in range(len(subjects)):
            mats[c][s, :, :, :] = create_con_mat(mats[c][s, :, :, :])

    avg_mat = {key: np.nanmean(x, axis=0) for (key, x) in mats.items()}

    # avg plot
    subj_con_fig = plt.figure(figsize=(15, 5))
    grid = ImageGrid(subj_con_fig, 111,
                     nrows_ncols=(len(conditions), 5),
                     axes_pad=0.3,
                     cbar_mode='single',
                     cbar_pad='10%',
                     cbar_location='right')

    for idx, ax in enumerate(grid):
        if idx <= 4:
            im = ax.imshow(avg_mat[0][:, :, idx], vmin=0, vmax=0.4)
        else:
            im = ax.imshow(avg_mat[1][:, :, idx-5], vmin=0, vmax=0.4)

    cb = subj_con_fig.colorbar(im, cax=grid.cbar_axes[0])
    cb.ax.set_title('wPLI', loc='right')


    # subjs plot
    n_s = len(subjects)
    n_f = freqs.shape[1]
    n_c = len(conditions)

    plt_s = np.tile(np.arange(n_s), n_f*n_c)
    plt_c = np.tile(np.concatenate((np.repeat(0, n_s), np.repeat(1, n_s))), n_f)
    plt_f = np.repeat(np.arange(0, 5), n_s*n_c)

    plt.style.use('ggplot')
    subj_con_fig = plt.figure(figsize=(15, 10))
    grid = ImageGrid(subj_con_fig, 111,
                     nrows_ncols=(len(conditions)*freqs.shape[1], len(subjects)),
                     axes_pad=0.05,
                     share_all=True,
                     aspect=True,
                     cbar_mode='single',
                     cbar_pad='10%',
                     cbar_location='right')

    for (idx, ax), s, c, f in zip(enumerate(grid), plt_s, plt_c, plt_f):
        print(idx, s, c, f)
        im = ax.imshow(mats[conditions[c]][s, :, :, f], vmin=0, vmax=0.7)
        ax.set_xticks([], [])
        ax.set_yticks([], [])
        ax.grid(False)

    cb = subj_con_fig.colorbar(im, cax=grid.cbar_axes[0], ticks=np.arange(0, 1.1, 0.1))
    cb.ax.set_title('wPLI', loc='right')
    subj_con_fig.savefig(op.join(study_path, 'figures', 'subj_wpli.eps'), format='eps', dpi=300)

    # channel mean
    avg_ch_mat = [np.nanmean(mats[x], axis=2) for x in mats]
    avg_ch_mat = [np.transpose(x, (0, 2, 1)) for x in avg_ch_mat]

    # s = 16
    # plt.imshow(avg_ch_mat[0][s, :, :], aspect='auto', vmax=1, vmin=0)
    # plt.colorbar()

    threshold = dict(start=0, step=0.1)
    n_perm = 100

    fq_dat = [np.nan_to_num(mats[x][:, :, :, 2]) for x in mats]
    fq_dat = np.nan_to_num(fq_dat)

    T_obs, clusters, cluster_p_values, H0 = permutation_cluster_test(fq_dat, n_permutations=n_perm, connectivity=connectivity,
                                                                     threshold=threshold, tail=0, n_jobs=2)

    plt.hist(cluster_p_values)
Example #12
0
def permutation_cluster_ttest(data1,
                              data2,
                              paired=False,
                              n_permutations=1000,
                              threshold=None,
                              p_threshold=0.05,
                              adjacency=None,
                              tmin=None,
                              tmax=None,
                              fmin=None,
                              fmax=None,
                              trial_level=False,
                              min_adj_ch=0):
    '''Perform cluster-based permutation test with t test as statistic.

    Parameters
    ----------
    data1 : list of mne objects
        List of objects (Evokeds, TFRs) belonging to condition one.
    data2 : list of mne objects
        List of objects (Evokeds, TFRs) belonging to condition two.
    paired : bool
        Whether to perform a paired t test. Defaults to ``True``.
    n_permutations : int
        How many permutations to perform. Defaults to ``1000``.
    threshold : value
        Cluster entry threshold defined by the value of the statistic. Defaults
        to ``None`` which calculates threshold from p value (see
        ``p_threshold``)
    p_threshold : value
        Cluster entry threshold defined by the p value.
    adjacency : boolean array | sparse array
        Information about channel adjacency.
    tmin : float
        Start of the time window of interest (in seconds). Defaults to ``None``
        which takes the earliest possible time.
    tmax : float
        End of the time window of interest (in seconds). Defaults to ``None``
        which takes the latest possible time.
    fmin : float
        Start of the frequency window of interest (in seconds). Defaults to
        ``None`` which takes the lowest possible frequency.
    fmax : float
        End of the frequency window of interest (in seconds). Defaults to
        ``None`` which takes the highest possible frequency.
    min_adj_ch: int
        Minimum number of adjacent in-cluster channels to retain a point in
        the cluster.

    Returns
    -------
    clst : borsar.cluster.Clusters
        Obtained clusters.
    '''
    if data2 is not None:
        one_sample = False
        stat_fun = ttest_rel_no_p if paired else ttest_ind_no_p
    else:
        one_sample = True
        stat_fun = lambda data: ttest_1samp_no_p(data[0])

    try:
        kwarg = 'connectivity'
        from mne.source_estimate import spatial_tris_connectivity
    except:
        kwarg = 'adjacency'
        from mne.source_estimate import spatial_tris_adjacency

    inst = data1[0]
    len1 = len(data1)
    len2 = len(data2) if data2 is not None else 0

    if paired:
        assert len1 == len2

    threshold = _compute_threshold([data1, data2], threshold, p_threshold,
                                   trial_level, paired, one_sample)

    # data1 and data2 have to be Evokeds or TFRs
    supported_types = (mne.Evoked, borsar.freq.PSD,
                       mne.time_frequency.AverageTFR,
                       mne.time_frequency.EpochsTFR)
    check_list_inst(data1, inst=supported_types)
    if data2 is not None:
        check_list_inst(data2, inst=supported_types)

    # find time and frequency ranges
    # ------------------------------
    if isinstance(inst, (mne.Evoked, mne.time_frequency.AverageTFR)):
        tmin = 0 if tmin is None else inst.time_as_index(tmin)[0]
        tmax = (len(inst.times)
                if tmax is None else inst.time_as_index(tmax)[0] + 1)
        time_slice = slice(tmin, tmax)

    if isinstance(inst, (borsar.freq.PSD, mne.time_frequency.AverageTFR)):
        fmin = 0 if fmin is None else find_index(data1[0].freqs, fmin)
        fmax = (len(inst.freqs) if fmax is None else find_index(
            data1[0].freqs, fmax))
        freq_slice = slice(fmin, fmax + 1)

    # handle object-specific data
    # ---------------------------
    if isinstance(inst, mne.time_frequency.AverageTFR):
        # + fmin, fmax
        assert not trial_level
        # data are in observations x channels x frequencies x time
        data1 = np.stack(
            [tfr.data[:, freq_slice, time_slice] for tfr in data1], axis=0)
        data2 = (np.stack(
            [tfr.data[:, freq_slice, time_slice]
             for tfr in data2], axis=0) if data2 is not None else data2)
    elif isinstance(inst, mne.time_frequency.EpochsTFR):
        assert trial_level
        data1 = inst.data[..., freq_slice, time_slice]
        data2 = (data2[0].data[..., freq_slice,
                               time_slice] if data2 is not None else data2)
    elif isinstance(inst, borsar.freq.PSD):
        if not inst._has_epochs:
            assert not trial_level
            data1 = np.stack([psd.data[:, freq_slice].T for psd in data1],
                             axis=0)
            data2 = (np.stack([psd.data[:, freq_slice].T for psd in data2],
                              axis=0) if data2 is not None else data2)
        else:
            assert trial_level
            data1 = data1[0].data[..., freq_slice].transpose((0, 2, 1))
            data2 = (data2[0].data[..., freq_slice].transpose(
                (0, 2, 1)) if data2 is not None else data2)
    else:
        data1 = np.stack([erp.data[:, time_slice].T for erp in data1], axis=0)
        data2 = (np.stack([erp.data[:, time_slice].T for erp in data2], axis=0)
                 if data2 is not None else data2)

    data_3d = data1.ndim > 3
    if (isinstance(adjacency, np.ndarray) and not sparse.issparse(adjacency)
            and not data_3d):
        adjacency = sparse.coo_matrix(adjacency)

    # perform cluster-based test
    # --------------------------
    # TODO: now our cluster-based works also for 1d and 2d etc.
    if not data_3d:
        assert min_adj_ch == 0
        adj_param = {kwarg: adjacency}
        stat, clusters, cluster_p, _ = permutation_cluster_test(
            [data1, data2],
            stat_fun=stat_fun,
            threshold=threshold,
            n_permutations=n_permutations,
            out_type='mask',
            **adj_param)
        if isinstance(inst, mne.Evoked):
            dimcoords = [inst.ch_names, inst.times[time_slice]]
            dimnames = ['chan', 'time']
        elif isinstance(inst, borsar.freq.PSD):
            dimcoords = [inst.ch_names, inst.freqs[freq_slice]]
            dimnames = ['chan', 'freq']
        return Clusters(stat.T, [c.T for c in clusters],
                        cluster_p,
                        info=inst.info,
                        dimnames=dimnames,
                        dimcoords=dimcoords)

    else:
        stat, clusters, cluster_p = permutation_cluster_test_array(
            [data1, data2],
            adjacency,
            stat_fun,
            threshold=threshold,
            n_permutations=n_permutations,
            one_sample=one_sample,
            paired=paired,
            min_adj_ch=min_adj_ch)

        # pack into Clusters object
        dimcoords = [inst.ch_names, inst.freqs, inst.times[tmin:tmax]]
        return Clusters(stat,
                        clusters,
                        cluster_p,
                        info=inst.info,
                        dimnames=['chan', 'freq', 'time'],
                        dimcoords=dimcoords)
Example #13
0
    data = np.load(tf_folder + "%s_ali.npy" % subject)
    ctl_left.append(data[0, :])
    ctl_right.append(data[1, :])
    ent_left.append(data[2, :])
    ent_right.append(data[3, :])

ctl_left = np.asarray(ctl_left)
ctl_right = np.asarray(ctl_right)
ent_left = np.asarray(ent_left)
ent_right = np.asarray(ent_right)

epochs = mne.read_epochs(
    epochs_folder + "0005_trial_start-epo.fif", preload=False)
times = (epochs.times[::4][:-1]) * 1e3

T_obs, clusters, cluster_pv, H0 = permutation_cluster_test(
    [ctl_left, ctl_right], n_permutations=5000)

plt.figure()
plt.close('all')
plt.subplot(211)
plt.title("Ctl left v right")
plt.plot(
    times,
    ctl_left.mean(axis=0) - ctl_right.mean(axis=0),
    label="Contrast (Event 1 - Event 2)")
plt.ylabel("ALI")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
    c = c[0]
    if cluster_pv[i_c] <= 0.05:
# Correct for multiple-comparisons using a cluster-based approach
###############################################################################
# Then, we perform the cluster-based correction for multiple comparisons
# between the PAC coming from the two conditions. To this end we use the
# Python package MNE-Python and in particular, the function
# :func:`mne.stats.permutation_cluster_test`

# mne requires that the first is represented by the number of trials (n_epochs)
# Therefore, we transpose the output PACs of both conditions
pac_r1 = np.transpose(pac_1, (2, 0, 1))
pac_r2 = np.transpose(pac_2, (2, 0, 1))

n_perm = 1000  # number of permutations
tail = 1  # only inspect the upper tail of the distribution
# perform the correction
t_obs, clusters, cluster_p_values, h0 = permutation_cluster_test(
    [pac_r1, pac_r2], n_permutations=n_perm, tail=tail)

###############################################################################
# Plot the significant clusters
###############################################################################
# Finally, we plot the significant clusters. To this end, we used an elegant
# solution proposed by MNE where the non significant part appears using a
# gray scale colormap while significant clusters are going to be color coded.

# create new stats image with only significant clusters
t_obs_plot = np.nan * np.ones_like(t_obs)
for c, p_val in zip(clusters, cluster_p_values):
    if p_val <= 0.001:
        t_obs_plot[c] = t_obs[c]
        t_obs[c] = np.nan
channel_index = 37

data_vol = data_vol[:, channel_index, temporal_mask]
data_invol = data_invol[:, channel_index, temporal_mask]

times = 1e3 * epochs.times
times = times[temporal_mask]

###############################################################################
# Compute statistic
threshold = None
n_permutations = 5000
tail = 0
T_obs, clusters, cluster_p_values, H0 = \
    permutation_cluster_test([data_vol, data_invol],
                             n_permutations=n_permutations,
                             threshold=threshold, tail=tail, n_jobs=2)

###############################################################################
# Plot
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + epochs.ch_names[channel_index])
plt.plot(times, data_vol.mean(axis=0)*1e6 - data_invol.mean(axis=0)*1e6,
         label="ERP Contrast (Voluntary - Involuntary)")
plt.ylabel("EEG (uV)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
    c = c[0]
    if cluster_p_values[i_c] <= 0.05:
condition1 = epochs1.get_data()  # as 3D matrix

event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                     baseline=(None, 0), reject=reject)
condition2 = epochs2.get_data()  # as 3D matrix

condition1 = condition1[:, 0, :]  # take only one channel to get a 2D array
condition2 = condition2[:, 0, :]  # take only one channel to get a 2D array

###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
                permutation_cluster_test([condition1, condition2],
                            n_permutations=1000, threshold=threshold, tail=1,
                            n_jobs=2)

###############################################################################
# Plot
times = epochs1.times
import matplotlib.pyplot as plt
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
         label="ERF Contrast (Event 1 - Event 2)")
plt.ylabel("MEG (T / m)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
        reg_data = reg_data[:, x_range[0]:x_range[1]]
        reg_data = rescale(reg_data, times, baseline, mode="mean")
        reg_mean = np.average(reg_data, axis=0)
        reg_sem = sem(reg_data, axis=0)
        odd_data = np.array(odd[group_key]) * 1e14
        odd_data = odd_data[:, x_range[0]:x_range[1]]
        odd_data = rescale(odd_data, times, baseline, mode="mean")
        odd_mean = np.average(odd_data, axis=0)
        odd_sem = sem(odd_data, axis=0)

        threshold=2.0

        T_obs, clusters, cluster_p_values, H0 = permutation_cluster_test(
            [reg_data, odd_data], 
            n_permutations=5000, 
            threshold=threshold, 
            tail=0, 
            n_jobs=-1
        )

        # plot results

        ax.plot(times, reg_mean, linewidth=1, color=reg_colour)
        ax.fill_between(
            times, 
            reg_mean+reg_sem, 
            reg_mean-reg_sem, 
            color=reg_colour, 
            alpha=0.2, 
            linewidth=0
        )
Example #18
0
    def stat_fun_rm1wayANOVA(*args):
        return f_mway_rm(np.swapaxes(args, 1, 0),
                         factor_levels=[len(levellist)],
                         effects='A',
                         correction=True,
                         return_pvals=False)[0]

    # 1-way ANOVA with cluster-based permutation test procedure
    print(' > Running 1-way ANOVA...')
    startT = time.time()
    scores, _, p_val, H0 = permutation_cluster_test(
        Dataset,
        threshold=thresh,
        n_permutations=Nperm,
        tail=tail,
        stat_fun=stat_fun_rm1wayANOVA,
        connectivity=connectivity,
        n_jobs=njobs,
        buffer_size=None,
        exclude=msk.reshape(-1),
        out_type='indices')

    # add results to data containers
    p_val = p_val.reshape(scores.shape)
    elapsed_time = (time.time() - startT) / 60

    # save data
    os.chdir(savedir)
    if not os.path.exists('./%s_at%s' % (factor, cond)):
        os.mkdir('./%s_at%s' % (factor, cond))
    os.chdir('./%s_at%s' % (factor, cond))
Example #19
0
def induced_cluster(epochs,
                    cond1,
                    cond2,
                    chs,
                    ch_str,
                    window=None,
                    threshold=7,
                    title='Hello World'):

    epochs1_tot = []
    epochs2_tot = []
    for run in RUN_LIST[task]:

        chs_int = find_cluster_inds(chs)

        if cond1 == 'par':
            epochs1 = np.concatenate((epochs[run]['par_high_fast'].data,
                                      epochs[run]['par_mid_fast'].data,
                                      epochs[run]['par_low_fast'].data,
                                      epochs[run]['par_high_slow'].data,
                                      epochs[run]['par_mid_slow'].data,
                                      epochs[run]['par_low_slow'].data),
                                     axis=0)
        if cond1 == 'nopar':
            epochs1 = np.concatenate(
                (epochs[run]['nopar_high'].data, epochs[run]['nopar_mid'].data,
                 epochs[run]['nopar_low'].data),
                axis=0)
        if cond1 == 'parFast':
            epochs1 = np.concatenate((epochs[run]['par_high_fast'].data,
                                      epochs[run]['par_mid_fast'].data,
                                      epochs[run]['par_low_fast'].data),
                                     axis=0)
        if cond1 == 'parSlow':
            epochs1 = np.concatenate((epochs[run]['par_high_slow'].data,
                                      epochs[run]['par_mid_slow'].data,
                                      epochs[run]['par_low_slow'].data),
                                     axis=0)
        if cond1 == 'high':
            epochs1 = np.concatenate((epochs[run]['nopar_high'].data,
                                      epochs[run]['par_high_fast'].data,
                                      epochs[run]['par_high_slow'].data),
                                     axis=0)
        if cond1 == 'mid':
            epochs1 = np.concatenate((epochs[run]['nopar_mid'].data,
                                      epochs[run]['par_mid_fast'].data,
                                      epochs[run]['par_mid_slow'].data),
                                     axis=0)
        if cond1 == 'low':
            epochs1 = np.concatenate((epochs[run]['nopar_low'].data,
                                      epochs[run]['par_low_fast'].data,
                                      epochs[run]['par_low_slow'].data),
                                     axis=0)

        if cond2 == 'par':
            epochs2 = np.concatenate((epochs[run]['par_high_fast'].data,
                                      epochs[run]['par_mid_fast'].data,
                                      epochs[run]['par_low_fast'].data,
                                      epochs[run]['par_high_slow'].data,
                                      epochs[run]['par_mid_slow'].data,
                                      epochs[run]['par_low_slow'].data),
                                     axis=0)
        if cond2 == 'nopar':
            epochs2 = np.concatenate(
                (epochs[run]['nopar_high'].data, epochs[run]['nopar_mid'].data,
                 epochs[run]['nopar_low'].data),
                axis=0)
        if cond2 == 'parFast':
            epochs2 = np.concatenate((epochs[run]['par_high_fast'].data,
                                      epochs[run]['par_mid_fast'].data,
                                      epochs[run]['par_low_fast'].data),
                                     axis=0)
        if cond2 == 'parSlow':
            epochs2 = np.concatenate((epochs[run]['par_high_slow'].data,
                                      epochs[run]['par_mid_slow'].data,
                                      epochs[run]['par_low_slow'].data),
                                     axis=0)
        if cond2 == 'high':
            epochs2 = np.concatenate((epochs[run]['nopar_high'].data,
                                      epochs[run]['par_high_fast'].data,
                                      epochs[run]['par_high_slow'].data),
                                     axis=0)
        if cond2 == 'mid':
            epochs2 = np.concatenate((epochs[run]['nopar_mid'].data,
                                      epochs[run]['par_mid_fast'].data,
                                      epochs[run]['par_mid_slow'].data),
                                     axis=0)
        if cond2 == 'low':
            epochs2 = np.concatenate((epochs[run]['nopar_low'].data,
                                      epochs[run]['par_low_fast'].data,
                                      epochs[run]['par_low_slow'].data),
                                     axis=0)

        if window == 'RT':
            epochs_high = mne.concatenate_epochs(
                [epochs['early_high'], epochs['late_high']])
            epochs_mid = mne.concatenate_epochs(
                [epochs['early_mid'], epochs['late_mid']])
            epochs_low = mne.concatenate_epochs(
                [epochs['early_low'], epochs['late_low']])
            epochs_parSlow = mne.concatenate_epochs(
                [epochs['late_low'], epochs['late_mid'], epochs['late_high']])
            epochs_parFast = mne.concatenate_epochs([
                epochs['early_low'], epochs['early_mid'], epochs['early_high']
            ])

        epochs1_tot.append(epochs1)
        epochs2_tot.append(epochs2)

    epochs1_tot = np.array(epochs1_tot)
    epochs2_tot = np.array(epochs2_tot)

    epochs1_tot = np.concatenate(
        (epochs1_tot[0], epochs1_tot[1], epochs1_tot[2]), axis=0)
    epochs2_tot = np.concatenate(
        (epochs2_tot[0], epochs2_tot[1], epochs2_tot[2]), axis=0)

    power1 = np.average((epochs1_tot[:, chs_int, :, :].squeeze()), axis=1)
    power2 = np.average((epochs2_tot[:, chs_int, :, :].squeeze()), axis=1)
    print(power1.shape)

    T_obs, clusters, cluster_p_values, H0 = \
    permutation_cluster_test([power1, power2],
                             n_permutations=100, threshold=threshold, tail=0)
    #if T_obs.shape[0]<113:
    #    T_obs = np.average(T_obs, axis = 0)
    #clusters = np.average(clusters, axis = 0)
    ch_name = chs
    plt.figure()

    # Create new stats image with only significant clusters
    T_obs_plot = np.nan * np.ones_like(T_obs)
    for c, p_val in zip(clusters, cluster_p_values):
        #c2 = np.average(c, axis = 0)
        if p_val <= 0.05:
            T_obs_plot[c] = T_obs[c]

    plt.imshow(T_obs,
               extent=[times[0], times[-1], freqs[0], freqs[-1]],
               aspect='auto',
               origin='lower',
               cmap='gray')
    plt.imshow(T_obs_plot,
               extent=[times[0], times[-1], freqs[0], freqs[-1]],
               aspect='auto',
               origin='lower',
               cmap='RdBu_r')

    plt.xlabel('Time (ms)')
    plt.ylabel('Frequency (Hz)')
    plt.title('Induced power (%s)' % ch_str)
    save_name, save_path = get_pareidolia_bids(FOLDERPATH,
                                               subj,
                                               task,
                                               run,
                                               stage='fig_Induced_cluster' +
                                               ch_str + cond1 + '-' + cond2,
                                               cond=None)
    plt.savefig(save_path, dpi=300)
    return
Example #20
0
def plotBdmAcrossTime():
    '''

	'''

    # plotting parameters
    sns.set(font_scale=2.5)
    sns.set_style('white')
    sns.set_style('white', {'axes.linewidth': 2})

    # read in BDM data
    bdm = []
    for sj in subject_id:
        # read in classification dict
        with open(
                '/Users/dirk/Desktop/suppression/bdm/{}/class_acc_{}.pickle'.
                format(header, sj), 'rb') as handle:
            bdm.append(pickle.load(handle))

    with open(
            '/Users/dirk/Desktop/suppression/bdm/{}/plot_dict.pickle'.format(
                header), 'rb') as handle:
        plot_dict = pickle.load(handle)

    if header == 'target_loc':
        rep_cond = ['DvTr0', 'DvTr3']
    else:
        rep_cond = ['DrTv0', 'DrTv3']

    plt.figure(figsize=(30, 20))

    for idx, plot in enumerate(['variable', 'repeat']):

        ax = plt.subplot(1,
                         2,
                         idx + 1,
                         title=plot,
                         ylabel='classification acc',
                         ylim=(0, 0.3))

        if plot == 'variable':
            diff = []
            for i, cnd in enumerate(['DvTv0', 'DvTv3']):
                X = np.vstack([bdm[j][cnd] for j in range(len(bdm))])
                diff.append(X)

                error = bootstrap(X)
                X = X.mean(axis=0)

                plt.plot(plot_dict['times'], X, color=['g', 'r'][i], label=cnd)
                plt.fill_between(plot_dict['times'],
                                 X + error,
                                 X - error,
                                 alpha=0.2,
                                 color=['g', 'r'][i])

            plt.axhline(y=1 / 6.0, ls='--', color='black', label='chance')
            plt.axvline(x=0.258, ls='--', color='grey', label='onset gabor')
            T_obs, clusters, cluster_pv, HO = permutation_cluster_test(
                diff, stat_fun=paired_t)
            print(header, cluster_pv)
            mask = np.zeros(plot_dict['times'].size, dtype=bool)
            for cl in np.array(clusters)[np.where(cluster_pv < 0.05)[0]]:
                mask[cl[0]] = True
            plt.fill_between(plot_dict['times'],
                             0.100,
                             .104,
                             where=mask == True,
                             color='grey',
                             label='p < 0.05')

            plt.legend(loc='best', shadow=True)

        elif plot == 'repeat':
            diff = []
            for i, cnd in enumerate(rep_cond):
                X = np.vstack([bdm[j][cnd] for j in range(len(bdm))])
                diff.append(X)
                error = bootstrap(X)
                X = X.mean(axis=0)

                plt.plot(plot_dict['times'], X, color=['g', 'r'][i], label=cnd)
                plt.fill_between(plot_dict['times'],
                                 X + error,
                                 X - error,
                                 alpha=0.2,
                                 color=['g', 'r'][i])

            plt.axhline(y=1 / 6.0, ls='--', color='black', label='chance')
            plt.axvline(x=0.258, ls='--', color='grey', label='onset gabor')
            T_obs, clusters, cluster_pv, HO = permutation_cluster_test(
                diff, stat_fun=paired_t)
            print(header, cluster_pv)
            mask = np.zeros(plot_dict['times'].size, dtype=bool)
            for cl in np.array(clusters)[np.where(cluster_pv < 0.05)[0]]:
                mask[cl[0]] = True
            plt.fill_between(plot_dict['times'],
                             .100,
                             .104,
                             where=mask == True,
                             color='grey',
                             label='p < 0.05')

            plt.legend(loc='best', shadow=True)
            sns.despine(offset=10, trim=False)

    plt.savefig(
        '/Users/dirk/Desktop/suppression/bdm/{}/figs/group_classification.pdf'.
        format(header))
    plt.close()
foo_power_1 = foo_power_1[:, 0, :, :]  # only 1 channel to get 3D matrix
foo_power_2 = foo_power_2[:, 0, :, :]  # only 1 channel to get 3D matrix

# Compute ratio with baseline power (be sure to correct time vector with
# decimation factor)
baseline_mask = times[::decim] < 950
foo_baseline_1 = np.mean(foo_power_1[:, :, baseline_mask], axis=2)
foo_power_1 /= foo_baseline_1[..., np.newaxis]
foo_baseline_2 = np.mean(foo_power_2[:, :, baseline_mask], axis=2)
foo_power_2 /= foo_baseline_2[..., np.newaxis]

###############################################################################
# Compute statistic
threshold = 4
T_obs, clusters, cluster_p_values, H0 = \
    permutation_cluster_test([foo_power_1, foo_power_2],
                             n_permutations=5000, threshold=threshold, tail=0)

###############################################################################
# View time-frequency plots
plt.clf()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
evoked_contrast = np.mean(data_condition_1, 0) - np.mean(data_condition_2, 0)
plt.plot(times, evoked_contrast.T)
plt.title('Contrast of evoked response (%s)' % ch_name)
plt.xlabel('time (ms)')
plt.ylabel('Magnetic Field (fT/cm)')
plt.xlim(times[0], times[-1])
plt.ylim(-100, 200)

plt.subplot(2, 1, 2)
                                                      sensor, region)
            diffFilename = '{}{}-{}-{}-diff.pickle'.format(
                statsPath, groupNames[groupID], sensor, region)
            if not allFilesExist:
                figFilename = docPath + groupName + '_' + region + '_' + sensor + '.png'
                statFilename = docPath + groupName + '_' + region + '_' + sensor + '.txt'
                c1 = np.concatenate(condition1[region], axis=0)
                c2 = np.concatenate(condition2[region], axis=0)
                diffData = np.mean(c1, axis=0) - np.mean(c2, axis=0)

                ###############################################################################
                # Compute statistic
                threshold = 1.0
                T_obs, clusters, cluster_p_values, H0 = permutation_cluster_test(
                    [c1, c2],
                    n_permutations=2500,
                    threshold=threshold,
                    tail=1,
                    n_jobs=4)

                pickle.dump(T_obs, open(tFilename, 'wb'))
                pickle.dump(clusters, open(clusterFilename, 'wb'))
                pickle.dump(cluster_p_values, open(pFilename, 'wb'))
                pickle.dump(H0, open(hFilename, 'wb'))
                pickle.dump(diffData, open(diffFilename, 'wb'))
            else:
                T_obs = pickle.load(open(tFilename, 'rb'))
                clusters = pickle.load(open(clusterFilename, 'rb'))
                cluster_p_values = pickle.load(open(pFilename, 'rb'))
                H0 = pickle.load(open(hFilename, 'rb'))
                diffData = pickle.load(open(diffFilename, 'rb'))
Example #23
0
    v_inds = clusters[cluster_ind][1]
    t_inds = clusters[cluster_ind][0]
    data[v_inds, t_inds] = T_obs[t_inds, v_inds]

stc_cluster_vis = SourceEstimate(data,
                                 fsave_vertices,
                                 tmin=dataface.tmin,
                                 tstep=dataface.tstep)
stc_cluster_vis.save(
    '/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/GROUP/plots/clusters/test_west_par'
)

########################################################################################"

# Implement gave variable
erfAAgave[s] = np.mean(erfAA_trial, axis=0)
erfAVgave[s] = np.mean(erfAV_trial, axis=0)
erfVAgave[s] = np.mean(erfVA_trial, axis=0)
erfVVgave[s] = np.mean(erfVV_trial, axis=0)

# Compute statistic between subjects
#threshold = 6.0
T_obs_Aon_inter, clusters_Aon_inter, cluster_p_values_Aon_inter, H0_Aon_inter = \
                permutation_cluster_test([erfAAgave, erfAVgave],
                            n_permutations=1000, threshold=None, tail=1,
                            n_jobs=2)
T_obs_Von_inter, clusters_Von_inter, cluster_p_values_Von_inter, H0_Von_inter = \
                permutation_cluster_test([erfVAgave, erfVVgave],
                            n_permutations=1000, threshold=None, tail=1,
                            n_jobs=2)
epochs_power_1 = single_trial_power(data_condition_1, Fs=Fs, frequencies=frequencies, n_cycles=n_cycles, use_fft=False)

epochs_power_2 = single_trial_power(data_condition_2, Fs=Fs, frequencies=frequencies, n_cycles=n_cycles, use_fft=False)

epochs_power_1 = epochs_power_1[:, 0, :, :]  # only 1 channel to get 3D matrix
epochs_power_2 = epochs_power_2[:, 0, :, :]  # only 1 channel to get 3D matrix

# do ratio with baseline power:
epochs_power_1 /= np.mean(epochs_power_1[:, :, times < 0], axis=2)[:, :, None]
epochs_power_2 /= np.mean(epochs_power_2[:, :, times < 0], axis=2)[:, :, None]

###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = permutation_cluster_test(
    [epochs_power_1, epochs_power_2], n_permutations=100, threshold=threshold, tail=0
)

###############################################################################
# View time-frequency plots
import pylab as pl

pl.clf()
pl.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
pl.subplot(2, 1, 1)
evoked_contrast = np.mean(data_condition_1, 0) - np.mean(data_condition_2, 0)
pl.plot(times, evoked_contrast.T)
pl.title("Contrast of evoked response (%s)" % ch_name)
pl.xlabel("time (ms)")
pl.ylabel("Magnetic Field (fT/cm)")
pl.xlim(times[0], times[-1])
                     tmin,
                     tmax,
                     picks=picks,
                     baseline=(None, 0),
                     reject=reject)
condition2 = epochs2.get_data()  # as 3D matrix

condition1 = condition1[:, 0, :]  # take only one channel to get a 2D array
condition2 = condition2[:, 0, :]  # take only one channel to get a 2D array

# %%
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
    permutation_cluster_test([condition1, condition2], n_permutations=1000,
                             threshold=threshold, tail=1, n_jobs=None,
                             out_type='mask')

# %%
# Plot
times = epochs1.times
fig, (ax, ax2) = plt.subplots(2, 1, figsize=(8, 4))
ax.set_title('Channel : ' + channel)
ax.plot(times,
        condition1.mean(axis=0) - condition2.mean(axis=0),
        label="ERF Contrast (Event 1 - Event 2)")
ax.set_ylabel("MEG (T / m)")
ax.legend()

for i_c, c in enumerate(clusters):
    c = c[0]
    long = fig.add_subplot(gs[0, 1:])
    # data proc
    easy = np.array(data["Long"][selection]["clockwise"]) * 1e14
    easy = np.mean(easy[:, ch_ix, :], axis=1)
    easy_mean = np.mean(easy, axis=0)
    easy_sem = sem(easy, axis=0)
    difficult = np.array(data["Long"][selection]["anti-clockwise"]) * 1e14
    difficult = np.mean(difficult[:, ch_ix, :], axis=1)
    difficult_mean = np.mean(difficult, axis=0)
    difficult_sem = sem(difficult, axis=0)

    threshold = 2.0
    T_obs, clusters, cluster_p_values, H0 = permutation_cluster_test(
        [easy, difficult],
        n_permutations=5000,
        threshold=threshold,
        tail=0,
        n_jobs=-1)

    for i_c, c in enumerate(clusters):
        c = c[0]
        if cluster_p_values[i_c] < 0.05:
            long.axvspan(long_times[c.start],
                         long_times[c.stop - 1],
                         color=sign_colour,
                         alpha=0.2)
        elif cluster_p_values[i_c] < 0.5:
            long.axvspan(long_times[c.start],
                         long_times[c.stop - 1],
                         color=non_sign_colour,
                         alpha=0.2)
Example #27
0
    event_id = 2
    epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
                         baseline=(None, 0), reject=reject)
    condition2 = epochs2.get_data()  # as 3D matrix

    X = [condition1, condition2]
    X = [np.transpose(x, (0, 2, 1)) for x in X]

    connectivity = read_ch_connectivity('neuromag306planar_neighb.mat', picks=None)

    # threshold = 6.0
    threshold = None

    T_obs, clusters, cluster_p_values, H0 = \
        permutation_cluster_test(X, n_permutations=1000,
                                 threshold=threshold, connectivity=connectivity[0], tail=0, n_jobs=8)


    times = epochs1.times
    plt.close('all')
    plt.subplot(211)
    plt.title('Channel : ')
    plt.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
             label="ERF Contrast (Event 1 - Event 2)")
    plt.ylabel("MEG (T / m)")
    plt.legend()
    plt.subplot(212)
    for i_c, c in enumerate(clusters):
        c = c[0]
        if cluster_p_values[i_c] <= 0.05:
            h = plt.axvspan(times[c.start], times[c.stop - 1],
            baseline=(-3.8, -3.3),
            mode="zscore")
        ht_pln_bs = mne.baseline.rescale(
            np.abs(ht_pln[band])**2,
            times,
            baseline=(-3.8, -3.3),
            mode="zscore")
        cls_all += [ht_cls_bs.mean(axis=0)]
        pln_all += [ht_pln_bs.mean(axis=0)]

    cls_all = np.asarray(cls_all)
    pln_all = np.asarray(pln_all)

    cluster_results = []

    for j in range(cls_all.shape[1]):
        data_1 = cls_all[:, j, :]
        data_2 = pln_all[:, j, :]

        # Compute statistic

        T_obs, clusters, cluster_p_values, H0 = \
            permutation_cluster_test([data_1, data_2],
                                     n_permutations=10000, tail=0, n_jobs=1)

        cluster_results += [cluster_p_values]

    results_all[band] = cluster_results

np.save(source_folder + "hilbert_data/perm_test_cls-pln_full.npy", results_all)
rot_odd = rescale(odd_data[:, rot_range[0]:rot_range[1]],
                  rot_times, (-0.1, 0.0),
                  mode="mean")
rot_odd_mean = np.average(rot_odd, axis=0)
rot_odd_sem = sem(rot_odd, axis=0)
obs_odd = rescale(odd_data[:, obs_range[0]:obs_range[1]],
                  obs_times, (-0.1, 0.0),
                  mode="mean")
obs_odd_mean = np.average(obs_odd, axis=0)
obs_odd_sem = sem(obs_odd, axis=0)

threshold = 2.0

rot_T_obs, rot_clusters, rot_cluster_p_values, rot_H0 = permutation_cluster_test(
    [rot_reg, rot_odd],
    n_permutations=5000,
    threshold=threshold,
    tail=0,
    n_jobs=-1)

obs_T_obs, obs_clusters, obs_cluster_p_values, obs_H0 = permutation_cluster_test(
    [obs_reg, obs_odd],
    n_permutations=5000,
    threshold=threshold,
    tail=0,
    n_jobs=-1)

gs = gridspec.GridSpec(1,
                       3,
                       wspace=0.2,
                       hspace=0.1,
                       width_ratios=[0.4, 0.2, 0.4])
Example #30
0
        ht_cls_bs = mne.baseline.rescale(np.abs(ht_cls[band])**2,
                                         times,
                                         baseline=(-3.8, -3.3),
                                         mode="zscore")
        ht_int_bs = mne.baseline.rescale(np.abs(ht_int[band])**2,
                                         times,
                                         baseline=(-3.8, -3.3),
                                         mode="zscore")
        cls_all += [ht_cls_bs.mean(axis=0)]
        int_all += [ht_int_bs.mean(axis=0)]

    cls_all = np.asarray(cls_all)
    int_all = np.asarray(int_all)

    cluster_results = []

    for j in range(cls_all.shape[1]):
        data_1 = cls_all[:, j, :]
        data_2 = int_all[:, j, :]

        # Compute statistic
        T_obs, clusters, cluster_p_values, H0 = \
            permutation_cluster_test([data_1, data_2],
                                     n_permutations=10000, tail=0, n_jobs=1)

        cluster_results += [cluster_p_values]

    results_all[band] = cluster_results

np.save(source_folder + "hilbert_data/perm_test_cls-int_full.npy", results_all)
Example #31
0
                evoked_POST.append(evoked)
    FZ = epochs['S6/ZHUO4/TAR'].ch_names.index('FZ')
    CZ = epochs['S6/ZHUO4/TAR'].ch_names.index('CZ')
    PZ = epochs['S6/ZHUO4/TAR'].ch_names.index('PZ')
    evoked_PRE = np.array(evoked_PRE)
    evoked_POST = np.array(evoked_POST)
    sub_FZ = np.subtract(evoked_POST[:, FZ, :], evoked_PRE[:, FZ, :])
    sub_CZ = np.subtract(evoked_POST[:, CZ, :], evoked_PRE[:, CZ, :])
    sub_PZ = np.subtract(evoked_POST[:, PZ, :], evoked_PRE[:, PZ, :])
    diff_FZ.append(sub_FZ)
    diff_CZ.append(sub_CZ)
    diff_PZ.append(sub_PZ)

#threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
    permutation_cluster_test([diff_FZ[0], diff_FZ[1]], n_permutations=1000,
                             tail=1, n_jobs=1)
channel = 'FZ'
times = np.arange(-0.1, 0.301, 0.001)
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times,
         diff_FZ[0].mean(axis=0) - diff_FZ[1].mean(axis=0),
         label="ERP Contrast (Target Diff - Control Diff)")
plt.ylabel("Amplitude difference")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
    c = c[0]
    if cluster_p_values[i_c] <= 0.05:
        h = plt.axvspan(times[c.start],
Example #32
0
def statscondCluster(data: list, freqs_mean: list, ch_con_freq: scipy.sparse.csr_matrix, tail: int, n_permutations: int, alpha: float) -> tuple:
    """
    Computes cluster-level statistical permutation test, corrected with
    channel connectivity across space and frequencies.

    Arguments:
        data: values from different conditions or different groups to compare,
          list of arrays (3d for time-frequency power or connectivity values).
        freqs_mean: frequencies in frequency-band-of-interest used by MNE
          for PSD or CSD calculation, list.
        ch_con_freq: connectivity or metaconnectivity matrix for PSD or CSD
          values to assess a priori connectivity between channels across
          space and frequencies based on their position, bsr_matrix.
        tail: direction of the ttest, can be set to 1, 0 or -1.
        n_permutations: number of permutations computed, can be set to 50000.
        alpha: threshold to consider clusters significant, can be set to 0.05
          or less.

    Returns:
        F_obs, clusters, cluster_pv, H0, F_obs_plot:

        - F_obs: statistic (F by default) observed for all variables,
          array of shape (n_tests,).

        - clusters: boolean array with same shape as the input data, 
          True values indicating locations that are part of a cluster, array.

        - cluster_p_values: p-value for each cluster, array.

        - H0: max cluster level stats observed under permutation, array of
          shape (n_permutations,).

        - F_obs_plot: statistical values above alpha threshold, to plot
          significant sensors (see plot_significant_sensors function in the toolbox)
          array of shape (n_tests,).
    """

    # computing the cluster permutation t test
    F_obs, clusters, cluster_p_values, H0 = permutation_cluster_test(data,
                                                                     threshold=None,
                                                                     n_permutations=n_permutations,
                                                                     tail=tail, connectivity=ch_con_freq,
                                                                     t_power=1, out_type='mask')
    # t_power = 1 weighs each location by its statistical score,
    # when set to 0 it gives a count of locations in each cluster

    # getting significant clusters for visualization
    F_obs_plot = np.nan * np.ones_like(F_obs)
    for c in cluster_p_values:
        if c <= alpha:
            i = np.where(cluster_p_values == c)
            F_obs_plot[i] = F_obs[i]
    F_obs_plot = np.nan_to_num(F_obs_plot)

    statscondClusterTuple = namedtuple('statscondCluster', [
                                       'F_obs', 'clusters', 'cluster_p_values', 'H0', 'F_obs_plot'])

    return statscondClusterTuple(
        F_obs=F_obs,
        clusters=clusters,
        cluster_p_values=cluster_p_values,
        H0=H0,
        F_obs_plot=F_obs_plot)
Example #33
0
#  Define stat function for univariate tests
def stat_fun(arg1, arg2):
    [statistic,
     p] = stats.ttest_rel(arg1,
                          arg2)  # no parallel computing # for related samples
    return (statistic)


# Do clustering
Nperm = 1000
p_threshold = 0.05
df = V3[0].shape[1] + V1[0].shape[0] - 1
t_threshold = stats.distributions.t.ppf(1. - p_threshold / 2, df)
T_obs, clusters, cluster_p_values, H0 = clu =\
     permutation_cluster_test (X, t_power=1, step_down_p=0.05, threshold=t_threshold, n_permutations=Nperm, stat_fun = stat_fun,  connectivity=connectivity, out_type='indices', n_jobs=1, tail=0)
# If threshold is None, it will choose a t-threshold equivalent to  p < 0.05 for the given number of (within-subject) observations.
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]

#  Plot/save T-stat
T_obs = np.mean(T_obs, 0)
T_obs = T_obs.reshape(len(T_obs), 1)
Tstc = stc_fsaverage_slow
Tstc.data = T_obs
Tstc.save(savefolder + 'NT_Tstat_V3vsV1_ave' + str(11) + '_' + str(17) + 'Hz')

#    Now let's build a convenient representation of each cluster, where each
#    cluster becomes a "time point" in the SourceEstimate
if good_cluster_inds.shape[0] > 0:
    stc_all_cluster_vis = summarize_clusters_stc(
        clu,
Example #34
0
                     event_id,
                     tmin,
                     tmax,
                     picks=picks,
                     baseline=(None, 0),
                     reject=reject)
condition2 = epochs2.get_data()  # as 3D matrix

condition1 = condition1[:, 0, :]  # take only one channel to get a 2D array
condition2 = condition2[:, 0, :]  # take only one channel to get a 2D array

###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
    permutation_cluster_test([condition1, condition2], n_permutations=1000,
                             threshold=threshold, tail=1, n_jobs=1)

###############################################################################
# Plot
times = epochs1.times
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times,
         condition1.mean(axis=0) - condition2.mean(axis=0),
         label="ERF Contrast (Event 1 - Event 2)")
plt.ylabel("MEG (T / m)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
    c = c[0]
        #evoked.plot()
        df_cluster1 = evoked.to_data_frame(picks=('Oz', 'O1', 'O2'))
        df_cluster2 = evoked.to_data_frame(picks=('PO7', 'PO8'))
        cluster1 = df_cluster1.mean(1)
        cluster2 = df_cluster2.mean(1)
        #cluster1.plot()
        #cluster2.plot()
        #times = np.arange(0.080, 0.110, 0.005)
        #evoked.plot_topomap(times, ch_type='eeg', time_unit='s')


if __name__ == '__main__':
    main()
#%% COMPUTE STATISTICS
T_obs, clusters, cluster_p_values, H0 = \
    permutation_cluster_test([HSF_cluster1, LSF_cluster1],threshold=None, tail=0)


#%% sLORETA
#inverse is made and applied here. Forward solution and cov matrix were made in section "EVOKED DATA" above
def run_inverse(subject_id):
    subject = "sub%03d" % subject_id
    print("processing subject: %s" % subject)
    evo_path = op.join(data_path, "EEG_Evoked")
    inv_path = op.join(data_path, "EEG_Source")
    for run in range(1, 2):
        fname_ave = op.join(evo_path,
                            'sub_%03d_LSF_HSF-ave.fif' % (subject_id, ))
        fname_cov = op.join(evo_path,
                            'sub_%03d_LSF_HSF-cov.fif' % (subject_id, ))
        fname_fwd = op.join(evo_path,
epochs_power_1 = epochs_power_1[:, 0, :, :]  # only 1 channel to get 3D matrix
epochs_power_2 = epochs_power_2[:, 0, :, :]  # only 1 channel to get 3D matrix

# Compute ratio with baseline power (be sure to correct time vector with
# decimation factor)
baseline_mask = times[::decim] < 0
epochs_baseline_1 = np.mean(epochs_power_1[:, :, baseline_mask], axis=2)
epochs_power_1 /= epochs_baseline_1[..., np.newaxis]
epochs_baseline_2 = np.mean(epochs_power_2[:, :, baseline_mask], axis=2)
epochs_power_2 /= epochs_baseline_2[..., np.newaxis]

###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
    permutation_cluster_test([epochs_power_1, epochs_power_2],
                             n_permutations=100, threshold=threshold, tail=0)

###############################################################################
# View time-frequency plots
plt.clf()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
evoked_contrast = np.mean(data_condition_1, 0) - np.mean(data_condition_2, 0)
plt.plot(times, evoked_contrast.T)
plt.title('Contrast of evoked response (%s)' % ch_name)
plt.xlabel('time (ms)')
plt.ylabel('Magnetic Field (fT/cm)')
plt.xlim(times[0], times[-1])
plt.ylim(-100, 200)

plt.subplot(2, 1, 2)
Example #37
0
            power_a['%s' % channel] = np.concatenate(
                (power_a['%s' % channel], temp_power_a), axis=0)

            power_b['%s' % channel] = np.concatenate(
                (power_b['%s' % channel], temp_power_b), axis=0)

        del temp_power_a, temp_power_b

# loop through channels of interest and test the difference between B and A cues
for channel in channels:

    # run cluster permutations test
    threshold = 10.0
    T_obs, clusters, cluster_p_values, H0 = \
        permutation_cluster_test([power_b[channel], power_a[channel]],
                                 n_permutations=10000, threshold=threshold,
                                 tail=0, n_jobs=n_jobs, out_type='mask')

    # Create new stats image with only significant clusters
    T_obs_plot = np.zeros_like(T_obs, dtype=bool)

    for a_lev in [0.05, 0.01]:

        for c, p_val in zip(clusters, cluster_p_values):
            if p_val <= a_lev:
                T_obs_plot[c] = True

        # variables for plot
        # channel in question
        ix = tfr_cue_a['subj_%s' % subjects[0]].ch_names.index(channel)
        # min max values