Ejemplo n.º 1
0
def test_connectivity_save():
    """Test saving and loading Connectivity objects"""
    all_con = _make_alltoall_connectivity()
    tempdir = _TempDir()
    fname = op.join(tempdir, 'coh.h5')
    all_con.save(fname)
    all_con2 = read_connectivity(fname)

    assert_array_equal(all_con.data, all_con2.data)
    assert_array_equal(all_con.pairs, all_con2.pairs)
    assert_array_equal(all_con.source_degree, all_con2.source_degree)
    assert_array_equal(all_con.vertices[0], all_con2.vertices[0])
    assert_array_equal(all_con.vertices[1], all_con2.vertices[1])

    assert all_con.n_connections == all_con2.n_connections
    assert all_con.n_sources == all_con2.n_sources
    assert all_con.subject == all_con2.subject
    assert all_con.directed == all_con2.directed

    fname = op.join(tempdir, 'coh')
    all_con.save(fname)
    all_con3 = read_connectivity(fname)
    assert all_con.n_connections == all_con3.n_connections
    assert all_con.n_sources == all_con3.n_sources
    assert all_con.subject == all_con3.subject
    assert all_con.directed == all_con3.directed

    label_con = _make_label_connectivity()
    tempdir = _TempDir()
    fname = op.join(tempdir, 'coh.h5')
    label_con.save(fname)
    label_con2 = read_connectivity(fname)

    assert_array_equal(label_con.data, label_con2.data)
    assert_array_equal(label_con.pairs, label_con2.pairs)
    assert_array_equal(label_con.source_degree, label_con2.source_degree)
    assert label_con.n_connections == label_con2.n_connections
    assert label_con.n_sources == label_con2.n_sources
    assert label_con.subject == label_con2.subject
    assert label_con.directed == label_con2.directed
    assert len(label_con.labels) == len(label_con2.labels)
    assert isinstance(label_con2.labels[0], Label)
Ejemplo n.º 2
0
mne.set_log_level('INFO')

# Handle command line arguments (only --help)
parser = argparse.ArgumentParser(description=__doc__)
args = parser.parse_args()

# Connectivity will be morphed back to the fsaverage brain
fsaverage = mne.read_source_spaces(fname.fsaverage_src)

cons = dict()
for condition in conditions:
    print('Reading connectivity for condition:', condition)
    cons[condition] = list()

    for subject in subjects:
        con_subject = conpy.read_connectivity(
            fname.con(condition=condition, subject=subject))

        # Morph the Connectivity to the fsaverage brain. This is possible,
        # since the original source space was fsaverage morphed to the current
        # subject.
        con_fsaverage = con_subject.to_original_src(
            fsaverage, subjects_dir=fname.subjects_dir)
        # By now, the connection objects should define the same connection
        # pairs between the same vertices.

        cons[condition].append(con_fsaverage)

# Average the connection objects. To save memory, we add the data in-place.
print('Averaging connectivity objects...')
ga_con = dict()
for cond in conditions:
Ejemplo n.º 3
0
def cluster_permutation_test(diff_cons, Behav, cluster_threshold, src, alpha=0.05,
                             tail=0, n_permutations=1024, max_spread=0.013,
                             cluster_method='single', seed=None,
                             return_details=False, n_jobs=1, verbose=None):
    """Find significant bundles of connections using a permutation test.

    This is a variation on the cluster permutation test described in [1]_.

    First, connections are thresholded using a paired 2-sided t-test. Any
    connections that survive the threshold are grouped into "bundles". A bundle
    is a group of connections which start and end points are close together.
    Then, for each bundle, the sum of the t-values of its connections is
    evaluated against those obtained from using the same procedure on random
    permutations of the data. For further information, see [2]_.

    Parameters
    ----------
    cond1 : list of VertexConnectivity
        For each subject, the connectivity object corresponding to the first
        experimental condition. Each connectivity object should define the same
        connections.
    cond2 : list of VertexConnectivity
        For each subject, the connectivity object corresponding to the second
        experimental condition. Each connectivity object should define the same
        connections.
    cluster_threshold : float
        The threshold to use for forming the intial bundles. Only connections
        with a t-value that is either higher than ``cluster_threshold`` or
        lower than ``-cluster_threshold`` are kept.
    tail : -1 | 0 | 1
        Which "tail" of the distribution of the test statistic to use:

            -1: the hypothesis is that cond1 < cond2.
             0: the hypothesis is that cond1 != cond2.
             1: the hypothesis is that cond1 > cond2.

        Defaults to 0, meaning a two-tailed test.
    src : instance of SourceSpace
        The source space for which the connectivity is defined.
    alpha : float
        The p-value to use for null-hypothesis testing. Using random
        permutations, the distribution of t-values is estimated. Bundles with a
        t-value in the requested percentile will be deemed significant.
        Defaults to 0.05.
    n_permutations : int
        The number of random permutations to use to estimate the distribution
        of t-values. Defaults to 1024.
    max_spread : float
        Maximum amount the position (in metres) of the start and end points
        of the connections may vary in order for them to be considered part of
        the same "bundle". Defaults to 0.013.
    cluster_method : str
        Linkage method for fclusterdata. Defaults to 'single'. See
        documentation for ``scipy.cluster.hierarchy.fclusterdata`` for for more
        information.
    seed : int | None
        The seed to use for the random number generator. Use this to reproduce
        a specific result. Defaults to ``None`` so a different seed is used
        every time.
    return_details : bool
        Whether to return details about the bundles and the permulation stats.
        Defaults to False.
    n_jobs : int
        Number of jobs to run in parallel. Note that a copy of ``cond1`` and
        ``cond2`` will be made for each job in memory. Defaults to 1.
    verbose : bool | str | int | None
        If not ``None``, override default verbose level
        (see :func:`mne.verbose` and :ref:`Logging documentation <tut_logging>`
        for more).

    Returns
    -------
    connection_indices : ndarray, shape (n_connections,)
        Indices of the connections that are part of a significant bundle.
    bundles : list of list of int (optional)
        For each found bundle, the indices of the connections that are part of
        the bundle. Only returned when ``return_details=True`` is specified.
    bundle_ts : ndarray, shape (n_bundles,) (optional)
        For each found bundle, the sum of the t-values for all connections that
        are part of the bundle. These are the t-values that were used to
        determine the initial threshold for the connections. They are not
        indicative of the null-hypothesis.
        Only returned when ``return_details=True`` is specified.
    bundle_ps : ndarray, shape (n_bundles,) (optional)
        For each found bundle, the p-value based on the permutation test,
        indicative for the likelyhood that the null-hypothesis holds.
        Only returned when ``return_details=True`` is specified.
    H0 : ndarray, shape (n_permutations,) (optional)
        The maximum observed t-value during each random permutation.
        Only returned when ``return_details=True`` is specified.

    References
    ----------
    .. [1] Maris/Oostenveld (2007), "Nonparametric statistical testing of
           EEG- and MEG-data" Journal of Neuroscience Methods,
           Vol. 164, No. 1., pp. 177-190. doi:10.1016/j.jneumeth.2007.03.024.
    .. [2] van Vliet, M., Liljeström, M., Aro, S., Salmelin, R., & Kujala, J.
           (2018). Analysis of functional connectivity and oscillatory power
           using DICS: from raw MEG data to group-level statistics in Python.
           bioRxiv, 245530, 1-25. https://doi.org/10.1101/245530
    """
    # if len(cond1) != len(cond2):
    #     raise ValueError('The number of subjects in each condition must be '
    #                      'the same.')
    # n_subjects = len(cond1)
    #
    # # Check compatibility of the connection objects
    # for con in cond1 + cond2:
    #     if not isinstance(con, VertexConnectivity):
    #         raise ValueError('All connectivity objects must by of type '
    #                          'VertexConnectivity.')
    #
    #     if not np.array_equal(con.pairs, cond1[0].pairs):
    #         raise ValueError('Not all Connectivity objects have the same '
    #                          'connection pairs defined.')

    # Create pairwise contrast. We'll do a t-test against the null hypothesis
    # that the mean of this contrast is zero. This is equivalent to a paired
    # t-test.
    # Xs = np.array([con1.data - con2.data for con1, con2 in zip(cond1, cond2)])

    n_subjs = np.int(diff_cons.shape[0])
    # calculate R and T values for each connection's correlation with the behavioral variable
    X_Rval = np.empty(diff_cons.shape[1])
    X_R_Tval = np.empty(diff_cons.shape[1])
    for con_idx in range(diff_cons.shape[1]):
        X_Rval[con_idx], p = stats.pearsonr(diff_cons[:,con_idx],Behav)
    # calculate an according t-value for each r
    X_R_Tval = (X_Rval * np.sqrt((n_subjs-2))) / np.sqrt(1 - X_Rval**2)

    # Get the XYZ coordinates for the vertices between which the connectivity
    # is defined.
    con = conpy.read_connectivity('D:/NEMO_analyses/proc/NEMO_neg_vs_pos_contrast_alpha-avg-connectivity.h5')
    grid_points = np.vstack([s['rr'][v]
                             for s, v in zip(src, con.vertices)])
    grid_points = np.hstack([grid_points[inds] for inds in con.pairs])

    logger.info('Forming initial bundles of connectivity...')
    _, bundles, bundle_ts = _do_single_permutation(
        X_R_Tval, cluster_threshold, tail, grid_points, max_spread, cluster_method,
        return_bundles=True
    )
    if len(bundle_ts) == 0:
        warn('No clusters found, returning empty connectivity_indices')
        if return_details:
            return [], [], [], []
        else:
            return []
    else:
        logger.info('Retained %d connections, grouped into %d bundles.' %
                    (np.sum([len(b) for b in bundles]), len(bundles)))

    parallel, my_perm_func, _ = parallel_func(_do_single_permutation, n_jobs,
                                              verbose=verbose)

    logger.info('Permuting %d times...' % n_permutations)
    # rng = np.random.RandomState(seed)

    # # We are doing random permutations using sign flips
    # sign_flips = [rng.choice([-1, 1], size=n_subjects)[:, np.newaxis]
    #               for _ in range(n_permutations)]
    Beh_perms = []
    for i in range(n_permutations):
        random.sample(list(Behav),k=n_subjs)
        X_Rval = np.empty(diff_cons.shape[1])
        X_R_Tval = np.empty(diff_cons.shape[1])
        for con_idx in range(diff_cons.shape[1]):
            X_Rval[con_idx], p = stats.pearsonr(diff_cons[:,con_idx],Behav)
        # calculate an according t-value for each r
        X_R_Tval = (X_Rval * np.sqrt((n_subjs-2))) / np.sqrt(1 - X_Rval**2)
        Beh_perms.append(X_R_Tval)

    def permutations():
        """Generator for the permutations with optional progress bar."""
        if verbose:
            progress = ProgressBar(len(Beh_perms),
                                   mesg='Performing permutations')
            for i, Beh_perm in enumerate(Beh_perms):
                progress.update(i)
                yield Beh_perm
        else:
            for Beh_perm in Beh_perms:
                yield Beh_perm

    # Compute the random permutation stats
    perm_stats = parallel(
        my_perm_func(X_R_Tval, cluster_threshold, tail, grid_points,
                     max_spread, cluster_method)
        for Beh_perm in permutations()
    )
    H0 = np.concatenate(perm_stats)

    # Compute p-values for each initial bundle
    bundle_ps = np.array([np.mean(np.abs(H0) >= abs(t)) for t in bundle_ts])

    # All connections that are part of a significant bundle
    significant_bundles = [b for b, p in zip(bundles, bundle_ps) if p <= alpha]
    if len(significant_bundles) > 0:
        connection_indices = np.unique(np.concatenate(significant_bundles))
    else:
        connection_indices = []

    logger.info('Found %d bundles with significant p-values, containing in '
                'total %d connections.' %
                (len(significant_bundles), len(connection_indices)))

    if return_details:
        return connection_indices, bundles, bundle_ts, bundle_ps, H0
    else:
        return connection_indices
Ejemplo n.º 4
0
import conpy, mne  # Import required Python modules
con = conpy.read_connectivity('contrast-con.h5')  # Load connectivity object
l = mne.read_labels_from_annot('fsaverage', 'aparc')  # Get parcels from atlas
del l[-1]  # Drop the last parcel (unknown-lh)
# Parcellate the connectivity object and correct for the degree bias
con_parc = con.parcellate(l, summary='degree', weight_by_degree=True)
con_parc.plot()  # Plot a circle diagram showing connectivity between parcels
# Plot a vertex-wise degree map and connect for the degree bias
brain = con.make_stc('degree', weight_by_degree=True).plot(hemi='split')
brain.add_annotation('aparc')  # Draw the 'aparc' atlas on the degree-map
Ejemplo n.º 5
0
import conpy
from mayavi import mlab

from config import fname

# Read the connectivity estimates
con = conpy.read_connectivity(fname.ga_con(condition='pruned'))
con_parc = conpy.read_connectivity(fname.ga_con(condition='parcelled'))

# Plot the degree map
stc = con.make_stc(summary='degree', weight_by_degree=False)
fig = mlab.figure(size=(300, 300))
brain = stc.plot(
    subject='fsaverage',
    hemi='both',
    background='white',
    foreground='black',
    time_label='',
    initial_time=0,
    smoothing_steps=5,
    figure=fig,
)
brain.scale_data_colormap(0, 1, stc.data.max(), True)
brain.add_annotation('aparc', borders=2)

# Save some views
mlab.view(40, 90, 450, [0, 0, 0])
mlab.savefig('../paper/figures/degree_rh.png', magnification=4)
mlab.view(130, 70, 450, [0, 0, -10])
mlab.savefig('../paper/figures/degree_lh.png', magnification=4)
Ejemplo n.º 6
0
# load the fsaverage ico4 source space to morph back to
fs_src = mne.read_source_spaces("{}fsaverage_ico4-src.fif".format(meg_dir))

# first make a group contrast of tone baseline vs. resting state; then experimental contrasts

# for each frequency band:
for freq, vals in freqs.items():
    # collect the connectivity objects into a dictionary by condition with a list each containing those of all subjects
    cons = dict()
    for cond in exp_conds:
        print('Reading connectivity for condition: ', freq, cond)
        cons[cond] = list()

        for meg, mri in sub_dict.items():
            con_subject = conpy.read_connectivity(
                "{dir}nc_{meg}_{cond}_{freq}-connectivity.h5".format(
                    dir=meg_dir, meg=meg, cond=cond, freq=freq))
            # Morph the Connectivity back to the fsaverage brain.By now, the connection objects should define the same connection pairs between the same vertices.
            con_fsaverage = con_subject.to_original_src(fs_src,
                                                        subjects_dir=mri_dir)
            cons[cond].append(con_fsaverage)

    # Average the connection objects. To save memory, we add the data in-place.
    print('Averaging connectivity objects... ', freq)
    ga_con = dict()
    for cond in exp_conds:
        con = cons[cond][0].copy()
        for other_con in cons[cond][1:]:
            con += other_con
        con /= len(cons[cond])  # compute the mean
        ga_con[cond] = con
Ejemplo n.º 7
0
# get the behavioral data array ready & choose the variable
N_behav = pd.read_csv('{}NEMO_behav.csv'.format(proc_dir))
Behav = np.array(N_behav['Ton_Laut'])
cond = "Ton_Laut"
freqs = {"alpha": list(np.arange(8, 14))}
save_dir = "D:/NEMO_analyses/plots/exp_behav/"

# load the fsaverage ico4 source space to morph back to
fs_src = mne.read_source_spaces("{}fsaverage_ico4-src.fif".format(meg_dir))

for freq, vals in freqs.items():
    diff_cons = []
    for meg, mri in sub_dict.items():
        con_neg = conpy.read_connectivity(
            "{dir}nc_{meg}_neg_{freq}-connectivity.h5".format(dir=meg_dir,
                                                              meg=meg,
                                                              freq=freq))
        con_pos = conpy.read_connectivity(
            "{dir}nc_{meg}_pos_{freq}-connectivity.h5".format(dir=meg_dir,
                                                              meg=meg,
                                                              freq=freq))
        con_diff = con_neg - con_pos
        # Morph the Connectivity back to the fsaverage brain.By now, the connection objects should define the same connection pairs between the same vertices.
        con_fsaverage = con_diff.to_original_src(fs_src, subjects_dir=mri_dir)
        diff_cons.append(con_fsaverage.data)
    diff_cons = np.array(diff_cons)

# get group diff average connectivity as data container
ga_con_diff = conpy.read_connectivity(
    '{dir}NEMO_neg_vs_pos_contrast_{f}-avg-connectivity.h5'.format(dir=meg_dir,
                                                                   f=freq))
Ejemplo n.º 8
0
# Connectivity objects are morphed back to the fsaverage brain
fsaverage = mne.read_source_spaces('fsaverage-src.fif')

# For each of the subjects, read connectivity for different conditions.
# Re-order the vertices to be in the order of the fsaverage brain.
face = []
scrambled = []
contrast = []
subjects = [
    'sub002', 'sub003', 'sub004', 'sub006', 'sub007', 'sub008', 'sub009',
    'sub010', 'sub011', 'sub012', 'sub013', 'sub014', 'sub015', 'sub017',
    'sub018', 'sub019'
]
for subject in subjects:
    con_face = conpy.read_connectivity('%s-face-con.h5' % subject)
    con_face = con_face.to_original_src(fsaverage)
    con_scrambled = conpy.read_connectivity('%s-scrambled-con.h5' % subject)
    con_scrambled = con_scrambled.to_original_src(fsaverage)
    face.append(con_face)
    scrambled.append(con_scrambled)
    contrast.append(con_face - con_scrambled)  # Create contrast

# Compute the grand-average contrast
contrast = reduce(operator.add, contrast) / len(subjects)

# Perform a permutation test to only retain connections that are part of a
# significant bundle.
connection_indices = conpy.cluster_permutation_test(
    face,
    scrambled,  # The two conditions
Ejemplo n.º 9
0
exp_conds = ["exp", "ton", "neg", "pos"]
exp_conds = ["neg", "ton"]
bas_conds = ["restbas", "tonbas"]
contrasts = ["tonbas_vs_rest", "neg_vs_pos", "neg_vs_ton"]
# contrasts = ["neg_vs_ton"]

# load the fsaverage ico4 source space to morph back to
fs_src = mne.read_source_spaces("{}fsaverage_ico4-src.fif".format(meg_dir))

for contrast in contrasts:
    # for each frequency band:
    for freq, vals in freqs.items():

        # read the statistically pruned connectivity & calculate parcellations (different summary versions)
        con_clust = conpy.read_connectivity(
            '{dir}NEMO_{cont}_contrast_{f}-pruned-avg-connectivity.h5'.format(
                dir=meg_dir, cont=contrast, f=freq))
        if not con_clust.n_connections == 0:
            # Summarize the connectivity in parcels
            labels = mne.read_labels_from_annot('fsaverage',
                                                'aparc.a2009s',
                                                subjects_dir=mri_dir)
            del labels[-2:]  # drop 'unknown' labels
            con_parc_sum = con_clust.parcellate(labels,
                                                summary='sum',
                                                weight_by_degree=True)
            con_parc_sum.save(
                '{dir}NEMO_{cont}_contrast_{f}-pruned-label-sum-connectivity.h5'
                .format(dir=meg_dir, cont=contrast, f=freq))
            # print them out
            print("The following connections by sum remain for  ", contrast,
Ejemplo n.º 10
0
excluded = {
    "NEM_30": "DIU11",
    "NEM_32": "NAG83",
    "NEM_33": "FAO18_fa",
    "NEM_37": "EAM67",
    "NEM_19": "ALC81",
    "NEM_21": "WKI71_fa"
}
# sub_dict = {"NEM_26":"ENR41"}

freqs = ["theta", "alpha", "beta_low", "beta_high", "gamma", "gamma_high"]
freq = "gamma_high"

# Read the pruned connectivity estimates
con = conpy.read_connectivity(
    '{dir}NEMO_neg_vs_ton_contrast_{f}-pruned-avg-connectivity.h5'.format(
        dir=meg_dir, f=freq))
con_parc = conpy.read_connectivity(
    '{dir}NEMO_neg_vs_ton_contrast_{f}-pruned-label-avg-connectivity.h5'.
    format(dir=meg_dir, f=freq))

# Plot the degree map
stc = con.make_stc(summary='degree', weight_by_degree=False)
fig = mlab.figure(size=(300, 300))
brain = stc.plot(
    subject='fsaverage',
    hemi='both',
    surface='white',
    background='white',
    foreground='black',
    time_label='',