예제 #1
0
def _get_adjacency(paths, study, space, ch_names, selection, src):
    '''Helper function for ``prepare_data``. Returns adjacency for given study
    and space.'''
    if not space == 'src':
        # use right-side channels in adjacency if we calculate asymmetry
        if 'asy' in selection:
            ch_names = [ch.split('-')[1] for ch in ch_names]
        neighbours = paths.get_data('neighbours', study=study)
        adjacency = construct_adjacency_matrix(neighbours,
                                               ch_names,
                                               as_sparse=True)
    else:
        import mne
        try:
            adjacency = mne.spatial_src_connectivity(src)
        except AttributeError:
            adjacency = mne.spatial_src_adjacency(src)

        if not selection == 'all':
            if isinstance(ch_names, dict):
                from .src import _to_data_vert
                data_vert = _to_data_vert(src, ch_names)
            else:
                data_vert = ch_names
            idx1, idx2 = np.ix_(data_vert, data_vert)
            adjacency = sparse.coo_matrix(adjacency.toarray()[idx1, idx2])
    return adjacency
예제 #2
0
cond_b = 'ton_p_part4'
# list for collecting stcs for group average for plotting
all_diff = []
# list for data arrays for permutation t-test on source
X_diff_s = []  # container for surface data
X_diff_v = []  # container for limbic volume data

## POWER analyses

# load fsaverage source space to morph to; prepare fsaverage adjacency matrices for cluster permutation analyses (1 surface, 1 volume)
fs_src = mne.read_source_spaces(
    "{}fsaverage_oct6_mix-src.fif".format(proc_dir))
fs_lh = fs_src.pop(0)
fs_rh = fs_src.pop(0)
fs_surf = [fs_lh] + [fs_rh]
adjacency_s = mne.spatial_src_adjacency(fs_surf)
fs_surf_vertices = [s['vertno'] for s in fs_surf]
fs_vol = mne.read_source_spaces("{}fsaverage_limb-src.fif".format(proc_dir))
adjacency_v = mne.spatial_src_adjacency(fs_vol)
fs_limb_vertices = [s['vertno'] for s in fs_vol]
fs_src = mne.read_source_spaces("{}fsaverage_oct6_mix-src.fif".format(
    proc_dir))  # have to reload after popping

## prep subject STCs, make Diff_STC and morph to 'fsaverage' -- collect for group analysis
for meg, mri in sub_dict.items():
    epo = mne.read_epochs("{}{}-epo.fif".format(proc_dir, meg))
    fwd = mne.read_forward_solution("{}{}-fwd.fif".format(proc_dir, meg))
    # load filters for DICS beamformer
    filters = mne.beamformer.read_beamformer('{}{}-gamma-dics.h5'.format(
        proc_dir, meg))
    # load CSDs for conditions to compare, apply filters
예제 #3
0
def stat_fun(*args):
    # get f-values only.
    return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
                     effects=effects, return_pvals=return_pvals)[0]


# %%
# Compute clustering statistic
# ----------------------------
#
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial adjacency matrix (instead of spatio-temporal).

# as we only have one hemisphere we need only need half the adjacency
print('Computing adjacency.')
adjacency = mne.spatial_src_adjacency(src[:1])

# Now let's actually do the clustering. Please relax, on a small
# notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.005
f_thresh = f_threshold_mway_rm(n_subjects, factor_levels, effects, pthresh)

# To speed things up a bit we will ...
n_permutations = 50  # ... run way fewer permutations (reduces sensitivity)

print('Clustering.')
F_obs, clusters, cluster_p_values, H0 = clu = \
    spatio_temporal_cluster_test(X, adjacency=adjacency, n_jobs=None,
                                 threshold=f_thresh, stat_fun=stat_fun,
                                 n_permutations=n_permutations,
                                 buffer_size=None)
X1[:, :, :] += stc.data[:, :, np.newaxis]
# make the activity bigger for the second set of subjects
X2[:, :, :] += 3 * stc.data[:, :, np.newaxis]

#    We want to compare the overall activity levels for each subject
X1 = np.abs(X1)  # only magnitude
X2 = np.abs(X2)  # only magnitude

###############################################################################
# Compute statistic
# -----------------
#
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial adjacency matrix (instead of spatio-temporal)
print('Computing adjacency.')
adjacency = spatial_src_adjacency(src)

#    Note that X needs to be a list of multi-dimensional array of shape
#    samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]

#    Now let's actually do the clustering. This can take a long time...
#    Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
                                        n_subjects1 - 1, n_subjects2 - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu =\
    spatio_temporal_cluster_test(X, adjacency=adjacency, n_jobs=1,
예제 #5
0
파일: _inverse.py 프로젝트: nordme/mnefun
def extract_roi(stc, src, label=None, thresh=0.5):
    """Extract a functional ROI.

    Parameters
    ----------
    stc : instance of SourceEstimate
        The source estimate data. The maximum positive peak will be selected.
        If you want the maximum negative peak, consider passing
        abs(stc) or -stc.
    src : instance of SourceSpaces
        The associated source space.
    label : instance of Label | None
        The label within which to select the peak.
        Can be None to use the entire STC.
    thresh : float
        Threshold value (relative to the peak value) above which vertices
        will be taken.

    Returns
    -------
    roi : instance of Label
        The functional ROI.
    """
    assert isinstance(stc, SourceEstimate)
    if label is None:
        stc_label = stc.copy()
    else:
        stc_label = stc.in_label(label)
    del label
    max_vidx, max_tidx = np.unravel_index(np.argmax(stc_label.data),
                                          stc_label.data.shape)
    max_val = stc_label.data[max_vidx, max_tidx]
    if max_vidx < len(stc_label.vertices[0]):
        hemi = 'lh'
        max_vert = stc_label.vertices[0][max_vidx]
        max_vidx = list(stc.vertices[0]).index(max_vert)
    else:
        hemi = 'rh'
        max_vert = stc_label.vertices[1][max_vidx - len(stc_label.vertices[0])]
        max_vidx = list(stc.vertices[1]).index(max_vert)
        max_vidx += len(stc.vertices[0])
    del stc_label
    assert max_val == stc.data[max_vidx, max_tidx]

    # Get contiguous vertices within 50%
    threshold = max_val * thresh
    connectivity = spatial_src_adjacency(src, verbose='error')  # holes
    _, clusters, _, _ = spatio_temporal_cluster_1samp_test(
        np.array([stc.data]), threshold, n_permutations=1,
        stat_fun=lambda x: x.mean(0), tail=1,
        connectivity=connectivity)
    for cluster in clusters:
        if max_vidx in cluster[0] and max_tidx in cluster[1]:
            break  # found our cluster
    else:  # in case we did not "break"
        raise RuntimeError('Clustering failed somehow!')
    if hemi == 'lh':
        verts = stc.vertices[0][cluster]
    else:
        verts = stc.vertices[1][cluster - len(stc.vertices[0])]
    func_label = Label(verts, hemi=hemi, subject=stc.subject)
    func_label = func_label.fill(src)
    return func_label, max_vert, max_vidx, max_tidx
예제 #6
0
def run_spatio_temporal_cluster_1samp_test(subjects,
                                           cond1,
                                           cond2,
                                           window_l,
                                           window_h,
                                           threshold=None,
                                           step_down_p=0,
                                           n_permutations=1024,
                                           n_jobs=1):
    t0 = time.time()
    contrasts = list()

    for subject in subjects:
        print(f'processing {subject}')
        # auditory
        fname_1 = op.join(
            meg_dir, subject,
            f'{subject}_audvis-dSPM-{spacing}-inverse-morph-filt-sss-{cond1}-stc'
        )
        # why `crop`: only deal with t > 0 to reduce multiple comparisons
        # why `T`: transpose to the correct shape
        stc_1 = mne.read_source_estimate(fname_1).magnitude().crop(
            window_l, window_h)

        # visual
        fname_2 = op.join(
            meg_dir, subject,
            f'{subject}_audvis-dSPM-{spacing}-inverse-morph-filt-sss-{cond2}-stc'
        )
        stc_2 = mne.read_source_estimate(fname_2).magnitude().crop(
            window_l, window_h)

        stc_diff = stc_1 - stc_2
        contrasts.append(stc_diff.data.T)

    # Get the right shape of difference data
    contrast_X = np.stack(contrasts, axis=0)
    # release memory
    del stc_1, stc_2, stc_diff, contrasts

    # prepare spatial adjacency
    fsaverage_src = mne.read_source_spaces(
        op.join(subjects_dir, 'fsaverage', 'bem',
                f'fsaverage-{spacing}-src.fif'))
    adjacency = mne.spatial_src_adjacency(fsaverage_src)

    # To use the "hat" adjustment method, sigma=1e-3 may be reasonable
    stat_fun = partial(mne.stats.ttest_1samp_no_p, sigma=1e-3)

    # Permutation test takes a long time to finish!
    t_obs, clusters, cluster_pv, H0 = \
        mne.stats.spatio_temporal_cluster_1samp_test(
            contrast_X,
            adjacency=adjacency,
            n_jobs=n_jobs,
            threshold=threshold,
            stat_fun=stat_fun,
            verbose=True)

    # save the result
    window = f'{int(window_l*1000)}_to_{int(window_h*1000)}'
    contrast_name = f'{cond1}_vs_{cond2}'
    cluster_name = op.join(rst_dir, f'{contrast_name}_{window}.h5')
    write_hdf5(cluster_name,
               dict(t_obs=t_obs,
                    clusters=clusters,
                    cluster_pv=cluster_pv,
                    H0=H0),
               title='mnepython',
               overwrite=True)
    elaped = time.time() - t0
    print(f'Save {cluster_name} after {timedelta(seconds=round(elaped))}')
예제 #7
0
## PREP PARAMETERS for Power Group Analyses
threshold = 2.898     ## choose initial T-threshold for clustering; based on p-value of .05 or .01 for df = (subj_n-1); with df=17 = 2.11, or 2.898
cond_a = 'att'      ## specifiy the conditions to contrast
cond_b = 'temo'
# fmin = 8.0
# fmax= 12.0
# list for collecting stcs for group average for plotting
all_diff = []
# list for data arrays for permutation t-test on source
X_diff = []

## POWER analyses

# load fsaverage source space to morph to; prepare fsaverage adjacency matrix for cluster permutation analyses
fs_src = mne.read_source_spaces("{}fsaverage_oct6-src.fif".format(proc_dir))
adjacency = mne.spatial_src_adjacency(fs_src)

## prep subject STCs, make Diff_STC and morph to 'fsaverage' -- collect for group analysis
for meg,mri in sub_dict.items():
    # load info and forward
    epo3_info = mne.io.read_info("{}{}_3_temo-epo.fif".format(proc_dir,meg))
    fwd = mne.read_forward_solution("{}{}_temo-fwd.fif".format(proc_dir,meg))
    # # load 'big' CSD for common filters
    # csd = mne.time_frequency.read_csd("{}{}_temo-csd.h5".format(proc_dir,meg))
    # # prep filters and save them
    # filters = mne.beamformer.make_dics(epo3_info,fwd,csd.mean(fmins,fmaxs),pick_ori='max-power',reduce_rank=False,depth=1.0,inversion='single')
    # filters.save('{}{}_temo-dics.h5'.format(proc_dir,meg))
    # del csd
    # load filters for DICS beamformer
    filters = mne.beamformer.read_beamformer('{}{}_temo-dics.h5'.format(proc_dir,meg))
    # load CSDs for conditions to compare, apply filters