def test_1020_selection():
    """Test making a 10/20 selection dict."""
    base_dir = op.join(testing.data_path(download=False), 'EEGLAB')
    raw_fname = op.join(base_dir, 'test_raw.set')
    loc_fname = op.join(base_dir, 'test_chans.locs')
    raw = read_raw_eeglab(raw_fname, preload=True)
    montage = read_custom_montage(loc_fname)
    raw = raw.rename_channels(dict(zip(raw.ch_names, montage.ch_names)))
    raw.set_montage(montage)

    for input in ("a_string", 100, raw, [1, 2]):
        pytest.raises(TypeError, make_1020_channel_selections, input)

    sels = make_1020_channel_selections(raw.info)
    # are all frontal channels placed before all occipital channels?
    for name, picks in sels.items():
        fs = min([
            ii for ii, pick in enumerate(picks)
            if raw.ch_names[pick].startswith("F")
        ])
        ps = max([
            ii for ii, pick in enumerate(picks)
            if raw.ch_names[pick].startswith("O")
        ])
        assert fs > ps

    # are channels in the correct selection?
    fz_c3_c4 = [raw.ch_names.index(ch) for ch in ("Fz", "C3", "C4")]
    for channel, roi in zip(fz_c3_c4, ("Midline", "Left", "Right")):
        assert channel in sels[roi]
Exemple #2
0
def permutation_cluster_analysis(epochs, n_permutations=1000, plot=True):
    """
    Do a spatio-temporal cluster analyis to compare experimental conditions.
    """
    # get the data for each event in epochs.evet_id transpose because the cluster test requires
    # channels to be last. In this case, inference is done over items. In the same manner, we could
    # also conduct the test over, e.g., subjects.
    tfce = dict(start=.2, step=.2)
    time_unit = dict(time_unit="s")
    events = list(epochs.event_id.keys())
    if plot:
        if len(events) == 2:  # When comparing two events subtract evokeds
            evoked = combine_evoked([epochs[events[0]].average(), -epochs[events[1]].average()],
                                    weights='equal')
            title = "%s vs %s" % (events[0], events[1])
        elif len(events) > 2:  # When comparing more than two events verage them
            evoked = combine_evoked([epochs[e].average() for e in events], weights='equal')
            evoked.data /= len(events)
            title = ""
            for e in events:
                title += e+" + "
            title = title[:-2]
        evoked.plot_joint(title=title, ts_args=time_unit, topomap_args=time_unit)
        X = [epochs[e].get_data().transpose(0, 2, 1) for e in events]
        t_obs, clusters, cluster_pv, h0 = spatio_temporal_cluster_test(X, tfce, n_permutations)
        significant_points = cluster_pv.reshape(t_obs.shape).T < .05
        selections = make_1020_channel_selections(evoked.info, midline="12z")
        fig, axes = plt.subplots(nrows=3, figsize=(8, 8))
        axes = {sel: ax for sel, ax in zip(selections, axes.ravel())}
        evoked.plot_image(axes=axes, group_by=selections, colorbar=False, show=False,
                          mask=significant_points, show_names="all", titles=None,
                          **time_unit)
        plt.colorbar(axes["Left"].images[-1], ax=list(axes.values()), shrink=.3, label="µV")

    plt.show()
def test_1020_selection():
    """Test making a 10/20 selection dict."""
    base_dir = op.join(testing.data_path(download=False), 'EEGLAB')
    raw_fname = op.join(base_dir, 'test_raw.set')
    loc_fname = op.join(base_dir, 'test_chans.locs')
    raw = read_raw_eeglab(raw_fname, montage=loc_fname)

    for input in ("a_string", 100, raw, [1, 2]):
        pytest.raises(TypeError, make_1020_channel_selections, input)

    sels = make_1020_channel_selections(raw.info)
    # are all frontal channels placed before all occipital channels?
    for name, picks in sels.items():
        fs = min([ii for ii, pick in enumerate(picks)
                  if raw.ch_names[pick].startswith("F")])
        ps = max([ii for ii, pick in enumerate(picks)
                  if raw.ch_names[pick].startswith("O")])
        assert fs > ps

    # are channels in the correct selection?
    fz_c3_c4 = [raw.ch_names.index(ch) for ch in ("Fz", "C3", "C4")]
    for channel, roi in zip(fz_c3_c4, ("Midline", "Left", "Right")):
        assert channel in sels[roi]
Exemple #4
0
# :class:`mne.Evoked` objects as images (via :class:`mne.Evoked.plot_image`)
# and masking points for significance.
# Here, we group channels by Regions of Interest to facilitate localising
# effects on the head.

# We need an evoked object to plot the image to be masked
evoked = mne.combine_evoked(
    [long_words.average(), short_words.average()],
    weights=[1, -1])  # calculate difference wave
time_unit = dict(time_unit="s")
evoked.plot_joint(title="Long vs. short words",
                  ts_args=time_unit,
                  topomap_args=time_unit)  # show difference wave

# Create ROIs by checking channel labels
selections = make_1020_channel_selections(evoked.info, midline="12z")

# Visualize the results
fig, axes = plt.subplots(nrows=3, figsize=(8, 8))
axes = {sel: ax for sel, ax in zip(selections, axes.ravel())}
evoked.plot_image(axes=axes,
                  group_by=selections,
                  colorbar=False,
                  show=False,
                  mask=significant_points,
                  show_names="all",
                  titles=None,
                  **time_unit)
plt.colorbar(axes["Left"].images[-1],
             ax=list(axes.values()),
             shrink=.3,
Exemple #5
0
                                       tmin=0.,
                                       tmax=tmax,
                                       new_id=2)

epochs = mne.Epochs(raw,
                    events=new_events,
                    tmax=tmax + .1,
                    event_id={"square": 2})

###############################################################################
# Plot using GFP

# Parameters for plotting
order = rts.argsort()  # sorting from fast to slow trials

selections = make_1020_channel_selections(epochs.info, midline="12z")

# The actual plots (GFP)
epochs.plot_image(group_by=selections,
                  order=order,
                  sigma=1.5,
                  overlay_times=rts / 1000.,
                  combine='gfp',
                  ts_args=dict(vlines=[0, rts.mean() / 1000.]))

###############################################################################
# Plot using median

epochs.plot_image(group_by=selections,
                  order=order,
                  sigma=1.5,
                          file_type='epo.fif')
cue_epo = read_epochs(input_file, preload=True)
cue_epo = cue_epo['Correct A', 'Correct B'].copy()
cue_epo_nb = cue_epo.copy().crop(tmin=-0.250, tmax=2.450, include_tmax=False)
cue_epo = cue_epo.apply_baseline(baseline).crop(tmin=-0.300)

# save the generic info structure of cue epochs (i.e., channel names, number of
# channels, etc.).
epochs_info = cue_epo_nb.info
n_channels = len(epochs_info['ch_names'])
n_times = len(cue_epo_nb.times)
times = cue_epo_nb.times
tmin = cue_epo_nb.tmin

# split channels into ROIs for results section
selections = make_1020_channel_selections(epochs_info, midline='12z')

# placeholder for results
betas_evoked = dict()
r2_evoked = dict()

# ###############################################################################
# 2) loop through subjects and extract betas
for n_subj, subj in enumerate(subjects):
    subj_beta = betas[n_subj, :]
    subj_beta = subj_beta.reshape((n_channels, n_times))
    betas_evoked[str(subj)] = EvokedArray(subj_beta, epochs_info, tmin)

    subj_r2 = r2[n_subj, :]
    subj_r2 = subj_r2.reshape((n_channels, n_times))
    r2_evoked[str(subj)] = EvokedArray(subj_r2, epochs_info, tmin)
significant_points = cluster_pv.reshape(t_obs.shape).T < .05
print(str(significant_points.sum()) + " points selected by TFCE ...")

##############################################################################
# The results of these mass univariate analyses can be visualised by plotting
# :class:`mne.Evoked` objects as images (via :class:`mne.Evoked.plot_image`)
# and masking points for significance.
# Here, we group channels by Regions of Interest to facilitate localising
# effects on the head.

# We need an evoked object to plot the image to be masked
evoked = mne.combine_evoked([long_words.average(), -short_words.average()],
                            weights='equal')  # calculate difference wave
time_unit = dict(time_unit="s")
evoked.plot_joint(title="Long vs. short words", ts_args=time_unit,
                  topomap_args=time_unit)  # show difference wave

# Create ROIs by checking channel labels
selections = make_1020_channel_selections(evoked.info, midline="12z")

# Visualize the results
fig, axes = plt.subplots(nrows=3, figsize=(8, 8))
axes = {sel: ax for sel, ax in zip(selections, axes.ravel())}
evoked.plot_image(axes=axes, group_by=selections, colorbar=False, show=False,
                  mask=significant_points, show_names="all", titles=None,
                  **time_unit)
plt.colorbar(axes["Left"].images[-1], ax=list(axes.values()), shrink=.3,
             label="uV")

plt.show()
events = mne.find_events(raw)

###############################################################################
# Create Epochs

# define target events:
# 1. find response times: distance between "square" and "rt" events
# 2. extract A. "square" events B. followed by a button press within 700 msec
tmax = .7
sfreq = raw.info["sfreq"]
reference_id, target_id = 2, 1
new_events, rts = define_target_events(events, reference_id, target_id, sfreq,
                                       tmin=0., tmax=tmax, new_id=2)

epochs = Epochs(raw, events=new_events, tmax=tmax + .1,
                event_id={"square": 2}, picks=picks)

###############################################################################
# Plot

# Parameters for plotting
order = rts.argsort()  # sorting from fast to slow trials

selections = make_1020_channel_selections(epochs.info, midline="12z")

# The actual plots
for combine_measures in ('gfp', 'median'):
    epochs.plot_image(group_by=selections, order=order, sigma=1.5,
                      overlay_times=rts / 1000., combine=combine_measures,
                      ts_args=dict(vlines=[0, rts.mean() / 1000.]))
    fig.axes[0].spines['left'].set_bounds(-8, 8)
    fig.axes[0].spines['bottom'].set_bounds(-.25, 2.5)
    fig.axes[0].xaxis.set_label_coords(0.5, -0.2)
    w, h = fig.get_size_inches()
    fig.set_size_inches(w * 1.15, h * 1.15)
    fig_name = fname.figures + '/Evoked_%s.pdf' % evoked.replace(' ', '_')
    fig.savefig(fig_name, dpi=300)

###############################################################################
# 6) plot difference wave (Cue B - Cue A)

# compute difference wave
ab_diff = combine_evoked([ga_b_cue, -ga_a_cue], weights='equal')

# make channel ROIs for easier interpretation of the plot
selections = make_1020_channel_selections(ga_a_cue.info, midline='12z')

# get colormap and create figure
colormap = cm.get_cmap('RdBu_r')
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(23, 5.5))
for s, selection in enumerate(selections):
    picks = selections[selection]

    mask = abs(ab_diff.data) > 1.1e-6

    ab_diff.plot_image(
        xlim=[-0.25, 2.5],
        picks=picks,
        clim=dict(eeg=[-4, 4]),
        colorbar=False,
        axes=ax[s],