コード例 #1
0
def grouplevel_Ttest2(ListSubj,condition,method,mod,twin,clust_p):
    
    wdir = "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG"
        
    # load a specific STC morphed on fsaverage to get shape info
    stc0_path = (wdir + '/' + ListSubj[0] + '/mne_python/STCS/IcaCorr_' + mod + 
               '_' + ListSubj[0] + '_' + condition[0] + '_pick_oriNone_' + 
                method + '_ico-5-fwd-fsaverage.fif-rh.stc')
    stc0      = mne.read_source_estimate(stc0_path)
    ncond     = len(condition)
    nsub      = len(ListSubj)
    ntimes    = len(stc0.times)    
    nvertices = stc0.data.shape[0]        
            
    # average individual STCs morphed on fsaverage for each cond
    AVG_STC_cond  = np.empty([nvertices, ntimes,  nsub, ncond])
    for s,subj in enumerate(ListSubj):
#        multfact = 0
#        for c,cond in enumerate(condition):
#            stc_path = (wdir + '/' + subj + '/mne_python/STCS/IcaCorr_' + modality +
#                        '_' + subj + '_' + cond + '_pick_oriNone_' + 
#                        method + '_ico-5-fwd-fsaverage.fif-rh.stc')
#            stc = mne.read_source_estimate(stc_path) 
#            multfact = multfact  + np.mean(np.mean(stc.data))
            
        for c,cond in enumerate(condition):
            stc_path = (wdir + '/' + subj + '/mne_python/STCS/IcaCorr_' + modality +
                        '_' + subj + '_' + cond + '_pick_oriNone_' + 
                        method + '_ico-5-fwd-fsaverage.fif-rh.stc')
            stc = mne.read_source_estimate(stc_path)     
#            AVG_STC_cond[:,:,s,c] = stc.data/(multfact/2)  
            AVG_STC_cond[:,:,s,c] = stc.data
    # Compute statistic
    np.random.seed(42)
    
    # compute subject-by-subject mean difference            
    X = AVG_STC_cond[:,:,:,1] - AVG_STC_cond[:,:,:,0]
    
    # smooth the data (optional)
    #fsave_vertices = [np.arange(10242), np.arange(10242)]
    #morph_mat = compute_morph_matrix('sample', 'fsaverage', sample_vertices,
    #                                 fsave_vertices, 20, subjects_dir)
    #n_vertices_fsave = morph_mat.shape[0]

    # optional: restrict computation to temporal window of interest
    lower_bound = np.where(stc0.times >= twin[0])[0][0]
    upper_bound = np.where(stc0.times >= twin[1])[0][0]
    X = X[:,lower_bound:upper_bound,:]
    
    con = mne.spatial_tris_connectivity(grade_to_tris(5))
    X = np.transpose(X, [2, 1, 0])
    	
    p_threshold = clust_p
    t_threshold = -stats.distributions.t.ppf(p_threshold / 2., nsub -1)
    n_permutations = 1024
    
    T_obs, clusters, cluster_p_values, H0  = mne.stats.spatio_temporal_cluster_1samp_test(X, connectivity=con, n_jobs=40,
    	                                       threshold=t_threshold, n_permutations=n_permutations, verbose=True)                                       
    
    return T_obs, clusters, cluster_p_values, H0,stc0  
コード例 #2
0
def stat_clus(X, tstep, time_thre=0, n_per=8192, p_threshold=0.01, p=0.05, fn_clu_out=None):
    print('Computing connectivity.')
    connectivity = spatial_tris_connectivity(grade_to_tris(5))
    #    Note that X needs to be a multi-dimensional array of shape
    #    samples (subjects) x time x space, so we permute dimensions
    X = np.transpose(X, [2, 1, 0])
    n_subjects = X.shape[0]
    fsave_vertices = [np.arange(X.shape[-1]/2), np.arange(X.shape[-1]/2)]
    #    Now let's actually do the clustering. This can take a long time...
    #    Here we set the threshold quite high to reduce computation.
    t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
    print('Clustering.')
    max_step = int(time_thre * 0.001 / tstep) + 1
    T_obs, clusters, cluster_p_values, H0 = clu = \
        spatio_temporal_cluster_1samp_test(X, connectivity=connectivity, n_jobs=1, max_step=max_step,
                                        threshold=t_threshold, n_permutations=n_per)
    #    Now select the clusters that are sig. at p < 0.05 (note that this value
    #    is multiple-comparisons corrected).
    good_cluster_inds = np.where(cluster_p_values < p)[0]
    print 'the amount of significant clusters are: %d' %good_cluster_inds.shape
    ###############################################################################
    # Save the clusters as stc file
    # ----------------------
    assert good_cluster_inds.shape != 0, ('Current p_threshold is %f %p_thr,\
                                 maybe you need to reset a lower p_threshold')
    np.savez(fn_clu_out, clu=clu, tstep=tstep, fsave_vertices=fsave_vertices)
コード例 #3
0
ファイル: meg_statistics.py プロジェクト: ofek-schechner/mmvt
def run_permutation_ttest(tmin=None, tmax=None, p_threshold = 0.05, n_permutations=1024, inverse_method='dSPM', n_jobs=1):
    for cond_id, cond_name in enumerate(events_id.keys()):
        #todo: calc the 36
        controls_data = get_morphed_epochs_stcs(tmin, tmax, cond_name, get_healthy_controls(),
            36, inverse_method)
        controls_data = abs(controls_data)
        for patient in get_patients():
            try:
                print(patient, cond_name)
                patient_data = get_morphed_epochs_stcs(tmin, tmax, cond_name, [patient], None, inverse_method)
                patient_data = abs(patient_data)
                print(patient_data.shape, controls_data.shape)
                data = controls_data - patient_data
                del patient_data
                gc.collect()
                data = np.transpose(data, [2, 1, 0])
                connectivity = spatial_tris_connectivity(grade_to_tris(5))
                t_threshold = -stats.distributions.t.ppf(p_threshold / 2., data.shape[0] - 1)
                T_obs, clusters, cluster_p_values, H0 = \
                    spatio_temporal_cluster_1samp_test(data, connectivity=connectivity, n_jobs=n_jobs,
                        threshold=t_threshold, n_permutations=n_permutations)
                results_file_name = op.join(LOCAL_ROOT_DIR, 'permutation_ttest_results', '{}_{}_{}'.format(patient, cond_name, inverse_method))
                np.savez(results_file_name, T_obs=T_obs, clusters=clusters, cluster_p_values=cluster_p_values, H0=H0)
                good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
                print('good_cluster_inds: {}'.format(good_cluster_inds))
            except:
                print('bummer! {}, {}'.format(patient, cond_name))
                print(traceback.format_exc())
コード例 #4
0
def per2test(X1, X2, p_thr, p, tstep, n_per=8192, fn_clu_out=None):

    #    Note that X needs to be a multi-dimensional array of shape
    #    samples (subjects) x time x space, so we permute dimensions
    n_subjects1 = X1.shape[2]
    n_subjects2 = X2.shape[2]
    fsave_vertices = [np.arange(X1.shape[0]/2), np.arange(X1.shape[0]/2)]
    X1 = np.transpose(X1, [2, 1, 0])
    X2 = np.transpose(X2, [2, 1, 0])
    X = [X1, X2]
    #    Now let's actually do the clustering. This can take a long time...
    #    Here we set the threshold quite high to reduce computation.
    f_threshold = stats.distributions.f.ppf(1. - p_thr / 2., n_subjects1 - 1, n_subjects2 - 1)
    print('Clustering.')
    connectivity = spatial_tris_connectivity(grade_to_tris(5))
    T_obs, clusters, cluster_p_values, H0 = clu = \
        spatio_temporal_cluster_test(X, n_permutations=n_per, 
                                    connectivity=connectivity, n_jobs=1,
                                    threshold=f_threshold)
    #    Now select the clusters that are sig. at p < 0.05 (note that this value
    #    is multiple-comparisons corrected).
    good_cluster_inds = np.where(cluster_p_values < p)[0]
    print 'the amount of significant clusters are: %d' %good_cluster_inds.shape
    ###############################################################################
    # Save the clusters as stc file
    # ----------------------
    assert good_cluster_inds.shape != 0, ('Current p_threshold is %f %p_thr,\
                                 maybe you need to reset a lower p_threshold')
    np.savez(fn_clu_out, clu=clu, tstep=tstep, fsave_vertices=fsave_vertices)
コード例 #5
0
def grouplevel_spatial_linreg(ListSubj,condition,method,mod,twin,clust_p):
    
    wdir = "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG"
        
    # load a specific STC morphed on fsaverage to get shape info
    stc0_path = (wdir + '/' + ListSubj[0] + '/mne_python/STCS/IcaCorr_' + mod + 
               '_' + ListSubj[0] + '_' + condition[0] + '_pick_oriNone_' + 
                method + '_ico-5-fwd-fsaverage.fif-rh.stc')
    stc0      = mne.read_source_estimate(stc0_path)
    ncond     = len(condition)
    nsub      = len(ListSubj)
    ntimes    = len(stc0.times)    
    nvertices = stc0.data.shape[0]        
            
    # average individual STCs morphed on fsaverage for each cond
    AVG_STC_cond  = np.empty([nvertices, ntimes,  nsub, ncond])
    
    for s,subj in enumerate(ListSubj):            
        for c,cond in enumerate(condition):
            stc_path = (wdir + '/' + subj + '/mne_python/STCS/IcaCorr_' + modality +
                        '_' + subj + '_' + cond + '_pick_oriNone_' + 
                        method + '_ico-5-fwd-fsaverage.fif-rh.stc')
            stc = mne.read_source_estimate(stc_path)     
            AVG_STC_cond[:,:,s,c] = stc.data

    # optional: restrict computation to temporal window of interest
    lower_bound = np.where(stc0.times >= twin[0])[0][0]
    upper_bound = np.where(stc0.times >= twin[1])[0][0]
    
    con = mne.spatial_tris_connectivity(grade_to_tris(5))
    
    # array of shapes (obs, time, vertices)
    X, Xpred = [], []
    for c,cond in enumerate(condition):
        X.append(np.mean(np.transpose(AVG_STC_cond[:,lower_bound: upper_bound,:,c], [2, 1, 0]),1))
        Xpred.append(np.ones((nsub,nvertices))*(c+1))
        
    X     = np.array(X) 
    Xpred = np.array(Xpred) 
    
    Reg = np.zeros((nsub,nvertices))
    # lienar regression per vertex
    for s in range(nsub):
        for v in range(nvertices):
            tmp = stats.linregress(X[:,s,v],Xpred[:,s,v])   
            Reg[s,v] = tmp.slope
     
    if clust_p == None:	
        T_obs, clu, clu_p_val, H0  = mne.stats.permutation_cluster_1samp_test(Reg, None,
                                                                        connectivity=con, n_jobs=8,
                                                                        verbose=True, seed = 666)
    else:
        p_threshold = clust_p
        t_threshold = -stats.distributions.t.ppf(p_threshold / 2., nsub -1)
        T_obs, clu, clu_p_val, H0  = mne.stats.permutation_cluster_1samp_test(Reg, threshold=t_threshold,
                                                                              connectivity=con, n_jobs=8,
                                                                              verbose=True, seed = 666)                                       
     
    return T_obs, clu, clu_p_val, H0,stc0      
コード例 #6
0
def grouplevel_stats(ListSubj,condition,method,mod,twin,clust_p):
    
    wdir = "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG"
        
    # load a specific STC morphed on fsaverage to get shape info
    stc0_path = (wdir + '/' + ListSubj[0] + '/mne_python/STCS/IcaCorr_' + mod + 
               '_' + ListSubj[0] + '_' + condition[0] + '_pick_oriNone_' + 
                method + '_ico-5-fwd-fsaverage.fif-rh.stc')
    stc0      = mne.read_source_estimate(stc0_path)
    stc0.crop(-0.2,5)
    ncond     = len(condition)
    nsub      = len(ListSubj)
    ntimes    = len(stc0.times)    
    nvertices = stc0.data.shape[0]        
            
    # average individual STCs morphed on fsaverage for each cond
    AVG_STC_cond  = np.empty([nvertices, ntimes,  nsub, ncond])
    
    for s,subj in enumerate(ListSubj):            
        for c,cond in enumerate(condition):
            stc_path = (wdir + '/' + subj + '/mne_python/STCS/IcaCorr_' + modality +
                        '_' + subj + '_' + cond + '_pick_oriNone_' + 
                        method + '_ico-5-fwd-fsaverage.fif-rh.stc')
            stc = mne.read_source_estimate(stc_path) 
            stc0.crop(-0.2,5)
            AVG_STC_cond[:,:,s,c] = stc.data

    # optional: restrict computation to temporal window of interest
    lower_bound = np.where(stc0.times >= twin[0])[0][0]
    upper_bound = np.where(stc0.times >= twin[1])[0][0]
    
    con = mne.spatial_tris_connectivity(grade_to_tris(5))
    # array of shapes (obs, time, vertices)
    X = []
    for c,cond in enumerate(condition):
        X.append(np.transpose(AVG_STC_cond[:,lower_bound: upper_bound,:,c], [2, 1, 0]))
    	
    effects = 'A'
    factor_levels = [3]
   
    # get f-values only.
    def mystat_fun(*args):
        return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
                     effects=effects, return_pvals=False)[0]

    p_threshold = clust_p
    f_threshold = mne.stats.f_threshold_mway_rm(nsub,factor_levels = factor_levels, effects = effects,pvalue= p_threshold)

    T_obs, clu, clu_p_val, H0  = mne.stats.spatio_temporal_cluster_test(X, threshold=f_threshold,stat_fun=mystat_fun,
                                                                        connectivity=con, n_jobs=1,
                                                                        verbose=True, seed = 666)                                       
    
    return T_obs, clu, clu_p_val, H0,stc0      
コード例 #7
0
ファイル: stat_cluster.py プロジェクト: dongqunxi/jumeg
def per2test(X1, X2, p_thr, p, tstep, n_per=8192, fn_clu_out=None):
    '''
      Calculate significant clusters using 2 sample ftest.

      Parameter
      ---------
      X1, X2: array
        The shape of X should be (Vertices, timepoints, subjects)
      tstep: float
        The interval between timepoints.
      n_per: int
        The permutation for ttest.
      p_thr: float
        The significant p_values.
      p: float
        The corrected p_values for comparisons.
      fn_clu_out: string
        The fnname for saving clusters.
    '''
    #    Note that X needs to be a multi-dimensional array of shape
    #    samples (subjects) x time x space, so we permute dimensions
    n_subjects1 = X1.shape[2]
    n_subjects2 = X2.shape[2]
    fsave_vertices = [np.arange(X1.shape[0]/2), np.arange(X1.shape[0]/2)]
    X1 = np.transpose(X1, [2, 1, 0])
    X2 = np.transpose(X2, [2, 1, 0])
    X = [X1, X2]

    #    Now let's actually do the clustering. This can take a long time...
    #    Here we set the threshold quite high to reduce computation.
    f_threshold = stats.distributions.f.ppf(1. - p_thr / 2., n_subjects1 - 1,
                                            n_subjects2 - 1)
    # t_threshold = stats.distributions.t.ppf(1. - p_thr / 2., n_subjects1 - 1,
    #                                         n_subjects2 - 1)

    print('Clustering...')
    connectivity = spatial_tris_connectivity(grade_to_tris(5))
    T_obs, clusters, cluster_p_values, H0 = clu = \
        spatio_temporal_cluster_test(X, n_permutations=n_per, #step_down_p=0.001,
                                     connectivity=connectivity, n_jobs=1,
                                     # threshold=t_threshold, stat_fun=stats.ttest_ind)
                                     threshold=f_threshold)

    #    Now select the clusters that are sig. at p < 0.05 (note that this value
    #    is multiple-comparisons corrected).
    good_cluster_inds = np.where(cluster_p_values < p)[0]
    print 'the amount of significant clusters are: %d' % good_cluster_inds.shape

    # Save the clusters as stc file
    np.savez(fn_clu_out, clu=clu, tstep=tstep, fsave_vertices=fsave_vertices)
    assert good_cluster_inds.shape != 0, ('Current p_threshold is %f %p_thr,\
                                 maybe you need to reset a lower p_threshold')
コード例 #8
0
def perform_statistics_2(morphed_data, parameter_cache, vector, p_value=None):
    """Performs the statistical analysis using spatial_tris_connectivity.

    :param morphed_data: Morphed data obtained from morph_data function
    :param parameter_cache: Morphed parameter cache obtained from morph_data function.
    :param vector: Method to perform modelling ('sLORETA' etc.)
    :param p_value: Statistical p-value
    :return: clu, good_cluster_inds
    """
    # Unpack parameter cache dictionary
    n_subjects = parameter_cache['n_subjects']
    n_times = parameter_cache['n_times']

    # Take on the absolute
    X = np.abs(morphed_data)

    # Obtain the paired contrast
    if vector is False:
        X = X[:, :, :, 0] - X[:, :, :,
                              1]  # Dimension is (space, time, subjects)
    else:
        X = X[:, :, :, :,
              0] - X[:, :, :, :,
                     1]  # Dimension is (space, vector, time, subjects)

    print('Computing connectivity... ')
    connectivity_2 = mne.spatio_temporal_tris_connectivity(
        grade_to_tris(5), n_times)

    # Note that X needs to be a multi-dimensional array of shape [samples (subjects) x time x space]
    if vector is False:
        X = np.transpose(X, [2, 1, 0])
    else:
        X = np.transpose(X, [3, 2, 1, 0])  ##### TO DOUBLE CHECK #####

    # Perform the clustering
    p_threshold = p_value  # 0.001
    t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)

    print('Clustering... ')
    T_obs, clusters, cluster_p_values, H0 = spatio_temporal_cluster_1samp_test(
        X, connectivity=connectivity_2, n_jobs=1, threshold=t_threshold)

    # Pack the outputs into tuple
    clu = (T_obs, clusters, cluster_p_values, H0)

    # Select the clusters that are sig. at p < p_value (Note this value is multiple-comparisons corrected)
    good_cluster_inds = np.where(cluster_p_values < p_value)[0]
    return clu, good_cluster_inds
コード例 #9
0
ファイル: meg_statistics.py プロジェクト: ofek-schechner/mmvt
def permutation_test_on_source_data_with_spatio_temporal_clustering(controls_data, patient_data, patient, cond_name,
                tstep, n_permutations, inverse_method='dSPM', n_jobs=6):
    try:
        print('permutation_test: patient {}, cond {}'.format(patient, cond_name))
        connectivity = spatial_tris_connectivity(grade_to_tris(5))
        #    Note that X needs to be a list of multi-dimensional array of shape
        #    samples (subjects_k) x time x space, so we permute dimensions
        print(controls_data.shape, patient_data.shape)
        X = [controls_data, patient_data]

        p_threshold = 0.05
        f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
                                                controls_data.shape[0] - 1, 1)
        print('Clustering. thtreshold = {}'.format(f_threshold))
        T_obs, clusters, cluster_p_values, H0 = clu =\
            spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=n_jobs, threshold=10, n_permutations=n_permutations)

        results_file_name = op.join(LOCAL_ROOT_DIR, 'clusters_results', '{}_{}_{}'.format(patient, cond_name, inverse_method))
        np.savez(results_file_name, T_obs=T_obs, clusters=clusters, cluster_p_values=cluster_p_values, H0=H0)
        #    Now select the clusters that are sig. at p < 0.05 (note that this value
        #    is multiple-comparisons corrected).
        good_cluster_inds = np.where(cluster_p_values < 0.05)[0]

        ###############################################################################
        # Visualize the clusters

        print('Visualizing clusters.')

        #    Now let's build a convenient representation of each cluster, where each
        #    cluster becomes a "time point" in the SourceEstimate
        fsave_vertices = [np.arange(10242), np.arange(10242)]
        stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
                                                     vertices=fsave_vertices,
                                                     subject='fsaverage')
        stc_all_cluster_vis.save(op.join(LOCAL_ROOT_DIR, 'stc_clusters', '{}_{}_{}'.format(patient, cond_name, inverse_method)), ftype='h5')

        # #    Let's actually plot the first "time point" in the SourceEstimate, which
        # #    shows all the clusters, weighted by duration
        # # blue blobs are for condition A != condition B
        # brain = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'both',
        #                                  subjects_dir=subjects_dir, clim='auto',
        #                                  time_label='Duration significant (ms)')
        # brain.set_data_time_index(0)
        # brain.show_view('lateral')
        # brain.save_image('clusters.png')
    except:
        print('bummer! {}, {}'.format(patient, cond_name))
        print(traceback.format_exc())
コード例 #10
0
ファイル: stat_cluster.py プロジェクト: dongqunxi/jumeg
def stat_clus(X, tstep, n_per=8192, p_threshold=0.01, p=0.05, fn_clu_out=None):
    '''
      Calculate significant clusters using 1sample ttest.

      Parameter
      ---------
      X: array
        The shape of X should be (Vertices, timepoints, subjects)
      tstep: float
        The interval between timepoints.
      n_per: int
        The permutation for ttest.
      p_threshold: float
        The significant p_values.
      p: float
        The corrected p_values for comparisons.
      fn_clu_out: string
        The fnname for saving clusters.
    '''

    print('Computing connectivity.')
    connectivity = spatial_tris_connectivity(grade_to_tris(5))

    #    Note that X needs to be a multi-dimensional array of shape
    #    samples (subjects) x time x space, so we permute dimensions
    X = np.transpose(X, [2, 1, 0])
    n_subjects = X.shape[0]
    fsave_vertices = [np.arange(X.shape[-1]/2), np.arange(X.shape[-1]/2)]

    #    Now let's actually do the clustering. This can take a long time...
    #    Here we set the threshold quite high to reduce computation.
    t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
    print('Clustering.')
    T_obs, clusters, cluster_p_values, H0 = clu = \
        spatio_temporal_cluster_1samp_test(X, connectivity=connectivity,
                                           n_jobs=1, threshold=t_threshold,
                                           n_permutations=n_per)

    #    Now select the clusters that are sig. at p < 0.05 (note that this value
    #    is multiple-comparisons corrected).
    good_cluster_inds = np.where(cluster_p_values < p)[0]
    print 'the amount of significant clusters are: %d' %good_cluster_inds.shape

    # Save the clusters as stc file
    np.savez(fn_clu_out, clu=clu, tstep=tstep, fsave_vertices=fsave_vertices)
    assert good_cluster_inds.shape != 0, ('Current p_threshold is %f %p_thr,\
                                 maybe you need to reset a lower p_threshold')
コード例 #11
0
ファイル: ANOVA_subjlvl.py プロジェクト: neurospin/MTTMEG
    def sublevel_spatial_stats(X, subject, index, stc, clust_p, modality,
                               method, condition):

        con = mne.spatial_tris_connectivity(grade_to_tris(5))

        # morphing data
        subjects_dir = '/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/mri/'

        fsave_vertices = [np.arange(10242), np.arange(10242)]
        subject_vertices = [stc.lh_vertno, stc.rh_vertno]
        morph_mat = compute_morph_matrix(subject, 'fsaverage',
                                         subject_vertices, fsave_vertices, 20,
                                         subjects_dir)
        n_vertices_fsave = morph_mat.shape[0]

        nobs = index[0]
        ncond = len(index)
        ntimes = X.shape[2]
        nvertices = stc.lh_vertno.shape[0] + stc.rh_vertno.shape[0]

        #    We have to change the shape for the dot() to work properly
        X = np.transpose(X, [1, 2, 0])  # vertices * times * obs
        X = X.reshape(nvertices, ntimes * nobs * ncond)
        print('Morphing data.')
        X = morph_mat.dot(X)  # morph_mat is a sparse matrix
        X = X.reshape(n_vertices_fsave, ntimes, nobs, ncond)

        # list of array of shapes ncond*(obs, time, vertices)
        X = np.transpose(X, [2, 1, 0, 3])
        X = [np.squeeze(x) for x in np.split(X, 3, axis=-1)]

        # average over time to clusterize only on spatial dimension
        X = [np.mean(x, 1) for x in X]

        p_threshold = clust_p
        f_threshold = stats.distributions.f.ppf(1 - p_threshold, ncond - 1,
                                                ncond * (nobs - 1))

        # use default function one-way anova (not repeated measures!)
        F_obs, clu, clu_p_val, H0 = mne.stats.permutation_cluster_test(
            X,
            threshold=f_threshold,
            connectivity=con,
            n_jobs=4,
            verbose=True,
            seed=666)

        wdir = "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/"
        save_path = (wdir + subject + '/mne_python/STATS')

        if not os.path.exists(save_path):
            os.makedirs(save_path)

        # save cluster stats
        spatial_clust_F = np.array((F_obs, clu, clu_p_val, H0))
        np.save((save_path + '/' + modality + '_' + 'cluster_stats_' +
                 "_vs_".join(condition)), spatial_clust_F)

        # save F-Map
        tmp = F_obs
        tmp = tmp[:, np.newaxis]
        fsave_vertices = [np.arange(10242), np.arange(10242)]
        stc_Ftest = mne.SourceEstimate(tmp, fsave_vertices, 0, stc.tstep)
        stc_Ftest.save(
            (save_path + '/' + modality + '_' + "_vs_".join(condition)))

        # to create probability maps: threshold the map at p < 0.05 and binarize
        ind = np.where(F_obs >= f_threshold)[0]
        VertKept = np.empty((len(F_obs)))
        for v, vertices in enumerate(VertKept):
            if v in ind:
                VertKept[v] = 1
            else:
                VertKept[v] = 0

        VertKept = VertKept[:, np.newaxis]
        fsave_vertices = [np.arange(10242), np.arange(10242)]
        stc_Ftest = mne.SourceEstimate(VertKept, fsave_vertices, 0, stc.tstep)
        stc_Ftest.save((save_path + '/' + modality + '_' + 'BinForProb_' +
                        "_vs_".join(condition)))

        return F_obs, clu, clu_p_val, H0
コード例 #12
0
ファイル: stat_cluster.py プロジェクト: jdammers/vis_dev
def sample2_clus(fn_list,
                 n_per=8192,
                 pthr=0.01,
                 p=0.05,
                 tail=0,
                 del_vers=None,
                 n_jobs=1):
    '''
      Calculate significant clusters using 2 sample ftest.

      Parameter
      ---------
      fn_list: list
        Paths of group arrays
      n_per: int
        The permutation for ttest.
      pct: int or float.
        The percentile of the baseline distribution.
      p: float
        The corrected p_values for comparisons.
      del_vers: None or _exclu_vers
        If is '_exclu_vers', delete the vertices in the medial wall.
    '''
    for fn_npz in fn_list:
        fn_path = os.path.dirname(fn_npz)
        name = os.path.basename(fn_npz)
        #fn_out = fn_path + '/clu2sample_%s' %name[:name.rfind('.npz')] + '_%d_pct%.2f.npz' %(n_per, pct)
        fn_out = fn_path + '/clu2sample_%s' % name[:name.rfind(
            '.npz')] + '_%d_%dtail_pthr%.4f.npz' % (n_per, 1 +
                                                    (tail == 0), pthr)
        npz = np.load(fn_npz)
        tstep = npz['tstep'].flatten()[0]
        #    Note that X needs to be a multi-dimensional array of shape
        #    samples (subjects) x time x space, so we permute dimensions
        X = npz['X']
        ppf = stats.f.ppf
        tail = 1  # tail = we are interested in an increase of variance only
        p_thresh = pthr / (
            1 + (tail == 0)
        )  # we can also adapt this to p=0.01 if the cluster size is too large
        n_samples_per_group = [len(x) for x in X]
        f_threshold = ppf(1. - p_thresh, *n_samples_per_group)
        if np.sign(tail) < 0:
            f_threshold = -f_threshold
        fsave_vertices = [
            np.arange(X.shape[-1] / 2),
            np.arange(X.shape[-1] / 2)
        ]
        print('Clustering...')
        connectivity = spatial_tris_connectivity(grade_to_tris(5))
        T_obs, clusters, cluster_p_values, H0 = clu = \
            spatio_temporal_cluster_test(X, n_permutations=n_per, #step_down_p=0.001,
                                        connectivity=connectivity, n_jobs=n_jobs,
                                        # threshold=t_threshold, stat_fun=stats.ttest_ind)
                                        threshold=f_threshold, spatial_exclude=del_vers, tail=tail)

        #    Now select the clusters that are sig. at p < 0.05 (note that this value
        #    is multiple-comparisons corrected).
        good_cluster_inds = np.where(cluster_p_values < p)[0]
        print 'the amount of significant clusters are: %d' % good_cluster_inds.shape

        # Save the clusters as stc file
        np.savez(fn_out, clu=clu, tstep=tstep, fsave_vertices=fsave_vertices)
        assert good_cluster_inds.shape != 0, (
            'Current p_threshold is %f %p_thr,\
                                    maybe you need to reset a lower p_threshold'
        )
コード例 #13
0
for fname in files:
    # exclude due to psychotropic factors
    if fname.find('WCEYBWMO') < 0:
        stc = mne.read_source_estimate(fname[:-7])
        if after_zero:
            stc.crop(tmin=0)
        X.append(stc.data)
        subj = fname.split('/')[-1].split('_')[0]
        gf_loc.append(np.nonzero(gf.maskid == subj)[0][0])
if len(X) != len(gf_loc):
    print 'Mismatch between subject data and gf!'

# re-sort subject order in gf to match X
gf = gf.iloc[gf_loc]

connectivity = mne.spatial_tris_connectivity(mne.grade_to_tris(5))

for thresh in p_threshold:
    if my_test == 'nvVSper':
        g0 = [X[i].T for i, group in enumerate(gf.group) if group == 'NV']
        g1 = [X[i].T for i, group in enumerate(gf.group) if group == 'persistent']
        data = [np.array(g0), np.array(g1)]
        stat_obs, clusters, p_values, H0 = \
            mne.stats.spatio_temporal_cluster_test(data, n_jobs=njobs,
                                                   threshold=thresh,
                                                   connectivity=connectivity,
                                                   stat_fun=group_comparison,
                                                   tail=1,
                                                   n_permutations=nperms,
                                                   verbose=True)
    elif my_test == 'nvVSrem':
コード例 #14
0
    return f_twoway_rm(data,
                       factor_levels=factor_levels,
                       effects=effects,
                       return_pvals=return_pvals)[0]
    #  drop p-values (empty array).
    # Note. for further details on this ANOVA function consider the
    # corresponding time frequency example.


###############################################################################
# Compute clustering statistic

#    To use an algorithm optimized for spatio-temporal clustering, we
#    just pass the spatial connectivity matrix (instead of spatio-temporal)

source_space = grade_to_tris(5)
# as we only have one hemisphere we need only need half the connectivity
lh_source_space = source_space[source_space[:, 0] < 10242]
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(lh_source_space)

#    Now let's actually do the clustering. Please relax, on a small
#    notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.0005
f_thresh = f_threshold_twoway_rm(n_subjects, factor_levels, effects, pthresh)

#    To speed things up a bit we will ...
n_permutations = 100  # ... run fewer permutations (reduces sensitivity)

print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
コード例 #15
0
ファイル: cluster_ROIs.py プロジェクト: dongqunxi/ChronoProc
def apply_sigSTC(fn_list_v, vevent, mevent, method='dSPM', vtmin=0., vtmax=0.35,
                 mtmin=-0.3, mtmax=0.05, radius=10.0):
    from mne import spatial_tris_connectivity, grade_to_tris
    from mne.stats import spatio_temporal_cluster_test
    from scipy import stats as stats
    X1, X2 = [], []
    stcs_trial = []
    for fn_v in fn_list_v:
        fn_m = fn_v[: fn_v.rfind('evtW')] + 'evtW_%s_bc_norm_1-lh.stc' %mevent
        stc_v = mne.read_source_estimate(fn_v)
        stcs_trial.append(stc_v.copy())
        stc_m = mne.read_source_estimate(fn_m)
        stc_v.resample(200)
        stc_m.resample(200)
        X1.append(stc_v.copy().crop(vtmin, vtmax).data)
        X2.append(stc_m.copy().crop(mtmin, mtmax).data)
    stcs_path = subjects_dir+'/fsaverage/%s_ROIs/conditions/' %method
    reset_directory(stcs_path)
    fn_avg = stcs_path + '%s' %(vevent)
    stcs = np.array(stcs_trial)
    stc_avg = np.sum(stcs, axis=0)/stcs.shape[0]
    stc_avg.save(fn_avg, ftype='stc')    
    X1 = np.array(X1).transpose(0, 2, 1)
    X2 = np.array(X2).transpose(0, 2, 1)
    ###############################################################################
    # Compute statistic
    
    #    To use an algorithm optimized for spatio-temporal clustering, we
    #    just pass the spatial connectivity matrix (instead of spatio-temporal)
    print('Computing connectivity.')
    connectivity = spatial_tris_connectivity(grade_to_tris(5))
    
    #    Note that X needs to be a list of multi-dimensional array of shape
    #    samples (subjects_k) x time x space, so we permute dimensions
    X = [X1, X2]
    #    Now let's actually do the clustering. This can take a long time...
    #    Here we set the threshold quite high to reduce computation.
    p_threshold = 0.0001
    f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
                                        X1.shape[0] - 1, X1.shape[0] - 1)
    print('Clustering.')
   
    clu = spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=2,
                                    threshold=f_threshold)
    #    Now select the clusters that are sig. at p < 0.05 (note that this value
    #    is multiple-comparisons corrected).
    #fsave_vertices = [np.arange(10242), np.arange(10242)]
    tstep = stc_v.tstep
    #stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
    #                                            vertices=fsave_vertices,
    #                                            subject='fsaverage')
    #stc_sig = stc_all_cluster_vis.mean()
    #fn_sig = subjects_dir+'/fsaverage/%s_ROIs/%s' %(method,vevent)
    #stc_sig.save(fn_sig)
    tstep = stc_v.tstep
    T_obs, clusters, clu_pvals, _ = clu
    n_times, n_vertices = T_obs.shape
    good_cluster_inds = np.where(clu_pvals < 0.05)[0]
    seeds = []
    #  Build a convenient representation of each cluster, where each
    #  cluster becomes a "time point" in the SourceEstimate
    T_obs = abs(T_obs)
    if len(good_cluster_inds) > 0:
        data = np.zeros((n_vertices, n_times))
        for cluster_ind in good_cluster_inds:
            data.fill(0)
            v_inds = clusters[cluster_ind][1]
            t_inds = clusters[cluster_ind][0]
            data[v_inds, t_inds] = T_obs[t_inds, v_inds]
            # Store a nice visualization of the cluster by summing across time
            data = np.sign(data) * np.logical_not(data == 0) * tstep
            seed = np.argmax(data.sum(axis=-1))
            seeds.append(seed)
    min_subject = 'fsaverage'
    labels_path = subjects_dir + '/fsaverage/dSPM_ROIs/%s/ini' %vevent
    reset_directory(labels_path)
    seeds = np.array(seeds)
    non_index_lh = seeds[seeds < 10242]
    if non_index_lh.shape != []:    
        func_labels_lh = mne.grow_labels(min_subject, non_index_lh,
                                        extents=radius, hemis=0, 
                                        subjects_dir=subjects_dir, n_jobs=1)
        i = 0
        while i < len(func_labels_lh):
            func_label = func_labels_lh[i]
            func_label.save(labels_path + '/%s_%d' %(vevent, i))
            i = i + 1
            
    seeds_rh = seeds - 10242
    non_index_rh = seeds_rh[seeds_rh > 0]
    if non_index_rh.shape != []:
        func_labels_rh = mne.grow_labels(min_subject, non_index_rh,
                                        extents=radius, hemis=1,
                                        subjects_dir=subjects_dir, n_jobs=1)                                             
   
        # right hemisphere definition
        j = 0
        while j < len(func_labels_rh):
            func_label = func_labels_rh[j]
            func_label.save(labels_path + '/%s_%d' %(vevent, j))
            j = j + 1
コード例 #16
0
ファイル: stat_cluster.py プロジェクト: jdammers/vis_dev
def sample1_clus(fn_list,
                 n_per=8192,
                 pct=99,
                 p=0.01,
                 tail=1,
                 del_vers=None,
                 n_jobs=1):
    '''
      Calculate significant clusters using 1sample ttest.

      Parameter
      ---------
      fn_list: list
        Paths of group arrays
      n_per: int
        The permutation for ttest.
      pct: int or float.
        The percentile of the baseline distribution.
      p: float
        The corrected p_values for comparisons.
      tail: 1 or 0
        if tail=1, that is 1 tail test
        if tail=0, that is 2 tail test 
      del_vers: None or _exclu_vers
        If is '_exclu_vers', delete the vertices in the medial wall.
    '''

    print('Computing connectivity.')
    connectivity = spatial_tris_connectivity(grade_to_tris(5))

    # Using the percentile of baseline array as the distribution threshold
    for fn_npz in fn_list:

        npz = np.load(fn_npz)
        tstep = npz['tstep'].flatten()[0]
        #    Note that X needs to be a multi-dimensional array of shape
        #    samples (subjects) x time x space, so we permute dimensions
        X = npz['X']
        X_b = X[1]
        X = X[0]
        fn_path = os.path.dirname(fn_npz)
        name = os.path.basename(fn_npz)

        if tail == 1:
            fn_out = fn_path + '/clu1sample_%s' % name[:name.rfind(
                '.npz')] + '_%d_%dtail_pct%.3f.npz' % (n_per, tail, pct)
            X = np.abs(X)
            t_threshold = np.percentile(np.abs(X_b), pct)
        elif tail == 0:
            fn_out = fn_path + '/clu1sample_%s' % name[:name.rfind(
                '.npz')] + '_%d_%dtail_pct%.3f.npz' % (n_per, tail + 2, pct)
            t_threshold = np.percentile(X_b, pct)

        fsave_vertices = [
            np.arange(X.shape[-1] / 2),
            np.arange(X.shape[-1] / 2)
        ]

        #n_subjects = X.shape[0]
        #t_threshold = -stats.distributions.t.ppf(0.01/(1+(tail==0)), n_subjects-1)

        print('Clustering.')
        T_obs, clusters, cluster_p_values, H0 = clu = \
            spatio_temporal_cluster_1samp_test(X, connectivity=connectivity,
                                            n_jobs=n_jobs, threshold=t_threshold,
                                            n_permutations=n_per, tail=tail, spatial_exclude=del_vers)

        #    Now select the clusters that are sig. at p < 0.05 (note that this value
        #    is multiple-comparisons corrected).
        good_cluster_inds = np.where(cluster_p_values < p)[0]
        print 'the amount of significant clusters are: %d' % good_cluster_inds.shape

        # Save the clusters as stc file
        np.savez(fn_out, clu=clu, tstep=tstep, fsave_vertices=fsave_vertices)
        assert good_cluster_inds.shape != 0, (
            'Current p_threshold is %f %p_thr,\
                                    maybe you need to reset a lower p_threshold'
        )
    # subjects X conditions X observations (optional).
    # The following expression catches the list input
    # and swaps the first and the second dimension, and finally calls ANOVA.
    return f_twoway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
                        effects=effects, return_pvals=return_pvals)[0]
    # get f-values only.
    # Note. for further details on this ANOVA function consider the
    # corresponding time frequency example.

###############################################################################
# Compute clustering statistic

#    To use an algorithm optimized for spatio-temporal clustering, we
#    just pass the spatial connectivity matrix (instead of spatio-temporal)

source_space = grade_to_tris(5)
# as we only have one hemisphere we need only need half the connectivity
lh_source_space = source_space[source_space[:, 0] < 10242]
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(lh_source_space)

#    Now let's actually do the clustering. Please relax, on a small
#    notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.0005
f_thresh = f_threshold_twoway_rm(n_subjects, factor_levels, effects, pthresh)

#    To speed things up a bit we will ...
n_permutations = 128  # ... run fewer permutations (reduces sensitivity)

print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
コード例 #18
0
X2 = np.random.randn(n_vertices_fsave, n_times, n_subjects2) * 10
X1[:, :, :] += stc.data[:, :, np.newaxis]
# make the activity bigger for the second set of subjects
X2[:, :, :] += 3 * stc.data[:, :, np.newaxis]

#    We want to compare the overall activity levels for each subject
X1 = np.abs(X1)  # only magnitude
X2 = np.abs(X2)  # only magnitude

###############################################################################
# Compute statistic

#    To use an algorithm optimized for spatio-temporal clustering, we
#    just pass the spatial connectivity matrix (instead of spatio-temporal)
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(grade_to_tris(5))

#    Note that X needs to be a list of multi-dimensional array of shape
#    samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]

#    Now let's actually do the clustering. This can take a long time...
#    Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
                                        n_subjects1 - 1, n_subjects2 - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu =\
    spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=2,
コード例 #19
0
for fname in files:
    # exclude due to psychotropic factors
    if fname.find('WCEYBWMO') < 0:
        stc = mne.read_source_estimate(fname[:-7])
        if after_zero:
            stc.crop(tmin=0)
        X.append(stc.data)
        subj = fname.split('/')[-1].split('_')[0]
        gf_loc.append(np.nonzero(gf.maskid == subj)[0][0])
if len(X) != len(gf_loc):
    print 'Mismatch between subject data and gf!'

# re-sort subject order in gf to match X
gf = gf.iloc[gf_loc]

connectivity = mne.spatial_tris_connectivity(mne.grade_to_tris(5))

for thresh in p_threshold:
    if my_test == 'nvVSper':
        g0 = [X[i].T for i, group in enumerate(gf.group) if group == 'NV']
        g1 = [
            X[i].T for i, group in enumerate(gf.group) if group == 'persistent'
        ]
        data = [np.array(g0), np.array(g1)]
        stat_obs, clusters, p_values, H0 = \
            mne.stats.spatio_temporal_cluster_test(data, n_jobs=njobs,
                                                   threshold=thresh,
                                                   connectivity=connectivity,
                                                   stat_fun=group_comparison,
                                                   tail=1,
                                                   n_permutations=nperms,
コード例 #20
0
        for f in np.arange(1, 2):
            print('Clustering: ', f_label[f], '/ k:', k, '/ w: ', w)

            X_SD = np.zeros([18, 20484])
            X_LD = np.zeros([18, 20484])

            for i in np.arange(0, 18):

                X_SD[i, :] = (abs(
                    my_stc_coh_SD[i][w][k][f].data)).reshape(20484)
                X_LD[i, :] = (abs(
                    my_stc_coh_LD[i][w][k][f].data)).reshape(20484)

            Y = X_SD - X_LD

            source_space = mne.grade_to_tris(5)
            adjacency = mne.spatial_tris_connectivity(source_space)
            # adjacency = None

            tstep = my_stc_coh_SD[i][w][k][f].tstep

            T_obs, clusters, cluster_p_values, H0 = clu = \
                mne.stats.permutation_cluster_1samp_test(Y, connectivity=adjacency,
                                                         n_jobs=-1,
                                                         threshold=t_threshold,
                                                         n_permutations=5000,
                                                         step_down_p=0.05,
                                                         t_power=1, tail=0)

            if len(np.where(cluster_p_values < p)[0]) != 0:
                print('significant!')
コード例 #21
0
        stc_fsaverage.save(stc_fname)
        betas.append(stc_fsaverage._data)

    all_betas.append(betas)

# convert to np
all_betas = np.array(all_betas, float)

# dimension order should be obs x time x source
all_betas = np.transpose(all_betas, [0, 1, 3, 2])

for ii, regressor in enumerate(var_names):

    save_dir = "%s/_RESULTS/%s" % (root_dir, regressor)
    # apply the permutation test
    connectivity = spatial_tris_connectivity(grade_to_tris(spacing))
    t_obs, clusters, cluster_pv, H0 = p1samp(all_betas[:, ii, ...],
                                             threshold=4,
                                             n_permutations=n_perm,
                                             connectivity=connectivity,
                                             buffer_size=None,
                                             verbose=True)
    # save result of the cluster test
    np.save('%s/_stats_%s.npy' % (save_dir, regressor),
            (t_obs, clusters, cluster_pv, H0))

    # remove clusters that are too short or too small or not significant
    # cluster_bool = threshold_clusters(clusters, cluster_pv, min_times,
    #                                   min_sources, min_p)

    # save the t-value map as an stc. note: can extract np data with stc._data
コード例 #22
0
    tmin, tmax = time_interval
    times = mean_stc1.times
    mask = (times >= tmin) & (times <= tmax)
    X1 = np.mean(X1[:, :, mask], axis=2)[:, :, None]
    X2 = np.mean(X2[:, :, mask], axis=2)[:, :, None]
    template_stc = copy.deepcopy(template_stc)
    template_stc.crop(tmin, tmin + template_stc.tstep)

assert X1.shape == X2.shape
n_samples, n_vertices, n_times = X1.shape

X1 = np.ascontiguousarray(np.swapaxes(X1, 1, 2).reshape(n_samples, -1))
X2 = np.ascontiguousarray(np.swapaxes(X2, 1, 2).reshape(n_samples, -1))

#connectivity = mne.spatio_temporal_src_connectivity(src, n_times)
connectivity = spatial_tris_connectivity(grade_to_tris(5))

for t in thresholds:
    from time import time
    t0 = time()
    T_obs, clusters, cluster_pv, H0 = mem.cache(
        permutation_cluster_1samp_test,
        ignore=['n_jobs'])(X1 - X2,
                           threshold=t,
                           n_permutations=n_permutations,
                           tail=0,
                           stat_fun=stat_fun,
                           connectivity=connectivity,
                           n_jobs=n_jobs,
                           seed=0)
    print "Time elapsed : %s (s)" % (time() - t0)
コード例 #23
0
ファイル: ANOVA_groupjlvl.py プロジェクト: neurospin/MTTMEG
def grouplevel_spatial_stats(ListSubj,condition,method,mod,twin,clust_p):
    
    wdir = "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG"
        
    # load a specific STC morphed on fsaverage to get shape info
    stc0_path = (wdir + '/' + ListSubj[0] + '/mne_python/STCS/IcaCorr_' + mod + 
               '_' + ListSubj[0] + '_' + condition[0] + '_pick_oriNone_' + 
                method + '_ico-5-fwd-fsaverage.fif-rh.stc')
    stc0      = mne.read_source_estimate(stc0_path)
    stc0.crop(-0.2,2.5)
    ncond     = len(condition)
    nsub      = len(ListSubj)
    ntimes    = len(stc0.times)    
    nvertices = stc0.data.shape[0]        
            
    # average individual STCs morphed on fsaverage for each cond
    AVG_STC_cond  = np.empty([nvertices, ntimes,  nsub, ncond])
    
    for s,subj in enumerate(ListSubj):            
        for c,cond in enumerate(condition):
            stc_path = (wdir + '/' + subj + '/mne_python/STCS/IcaCorr_' + mod +
                        '_' + subj + '_' + cond + '_pick_oriNone_' + 
                        method + '_ico-5-fwd-fsaverage.fif-rh.stc')
            stc = mne.read_source_estimate(stc_path) 
            stc.crop(-0.2,2.5)
            AVG_STC_cond[:,:,s,c] = stc.data

    # optional: restrict computation to temporal window of interest
    lower_bound = np.where(stc0.times >= twin[0])[0][0]
    upper_bound = np.where(stc0.times >= twin[1])[0][0]
    
    con = mne.spatial_tris_connectivity(grade_to_tris(5))
    # array of shapes (obs, time, vertices)
    X = []
    for c,cond in enumerate(condition):
        X.append(np.mean(np.transpose(AVG_STC_cond[:,lower_bound: upper_bound,:,c], [2, 1, 0]),1))
 
    effects = 'A'
    factor_levels = [3]
   
    # get f-values only.
    def mystat_fun(*args):
        return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
                     effects=effects, return_pvals=False)[0]

    p_threshold = clust_p
    f_threshold = f_threshold_mway_rm(nsub,factor_levels = factor_levels, effects = effects,pvalue= p_threshold)
    
    F_obs, clu, clu_p_val, H0  = mne.stats.permutation_cluster_test(X, threshold=f_threshold,stat_fun=mystat_fun,
                                                                        connectivity=con, n_jobs=1,
                                                                    verbose=True, seed = 666)                                       
    
    wdir = "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/"
    save_path = (wdir+'GROUP/mne_python/Plot_STATS/' + "_vs_".join(condition))  
    
    if not os.path.exists(save_path):
        os.makedirs(save_path)  
    
    # save cluster stats
    spatial_clust_F = np.array((F_obs, clu, clu_p_val, H0))
    np.save((save_path+'/' + mod + '_' +'cluster_stats_f_'+ "_vs_".join(condition)),
            spatial_clust_F)        
    
    # save F-Map                                                
    tmp = F_obs
    tmp = tmp[:,np.newaxis]
    fsave_vertices = [np.arange(10242), np.arange(10242)]
    stc_Ftest = mne.SourceEstimate(tmp,fsave_vertices,0,stc.tstep) 
    stc_Ftest.save((save_path + '/fmap'  + mod + '_' + "_vs_".join(condition)))
    
    return F_obs, clu, clu_p_val, H0,stc0