Exemplo n.º 1
0
def test_simulate_stc_labels_overlap(_get_fwd_labels):
    """Test generation of source estimate, overlapping labels."""
    fwd, labels = _get_fwd_labels
    mylabels = []
    for i, label in enumerate(labels):
        new_label = Label(vertices=label.vertices,
                          pos=label.pos,
                          values=2 * i * np.ones(len(label.values)),
                          hemi=label.hemi,
                          comment=label.comment)
        mylabels.append(new_label)
    # Adding the last label twice
    mylabels.append(new_label)

    n_times = 10
    tmin = 0
    tstep = 1e-3

    stc_data = np.ones((len(mylabels), n_times))

    # Test false
    with pytest.raises(RuntimeError, match='must be non-overlapping'):
        simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep,
                     allow_overlap=False)
    # test True
    stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep,
                       allow_overlap=True)
    assert_equal(stc.subject, 'sample')
    assert (stc.data.shape[1] == n_times)
    # Some of the elements should be equal to 2 since we have duplicate labels
    assert (2 in stc.data)
Exemplo n.º 2
0
def test_simulate_stc(_get_fwd_labels):
    """Test generation of source estimate."""
    fwd, labels = _get_fwd_labels
    mylabels = []
    for i, label in enumerate(labels):
        new_label = Label(vertices=label.vertices,
                          pos=label.pos,
                          values=2 * i * np.ones(len(label.values)),
                          hemi=label.hemi,
                          comment=label.comment)
        mylabels.append(new_label)

    n_times = 10
    tmin = 0
    tstep = 1e-3

    stc_data = np.ones((len(labels), n_times))
    stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep)
    assert_equal(stc.subject, 'sample')

    for label in labels:
        idx = _get_idx_label_stc(label, stc)
        assert (np.all(stc.data[idx] == 1.0))
        assert (stc.data[idx].shape[1] == n_times)

    # test with function
    def fun(x):
        return x**2

    stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep, fun)

    # the first label has value 0, the second value 2, the third value 6

    for i, label in enumerate(labels):
        idx = _get_idx_label_stc(label, stc)
        res = ((2. * i)**2.) * np.ones((len(idx), n_times))
        assert_array_almost_equal(stc.data[idx], res)

    # degenerate conditions
    label_subset = mylabels[:2]
    data_subset = stc_data[:2]
    stc = simulate_stc(fwd['src'], label_subset, data_subset, tmin, tstep, fun)

    pytest.raises(ValueError, simulate_stc, fwd['src'], label_subset,
                  data_subset[:-1], tmin, tstep, fun)
    pytest.raises(RuntimeError, simulate_stc, fwd['src'], label_subset * 2,
                  np.concatenate([data_subset] * 2, axis=0), tmin, tstep, fun)

    i = np.where(fwd['src'][0]['inuse'] == 0)[0][0]
    label_single_vert = Label(vertices=[i],
                              pos=fwd['src'][0]['rr'][i:i + 1, :],
                              hemi='lh')
    stc = simulate_stc(fwd['src'], [label_single_vert], stc_data[:1], tmin,
                       tstep)
    assert_equal(len(stc.lh_vertno), 1)
Exemplo n.º 3
0
def test_generate_stc_single_hemi(_get_fwd_labels):
    """Test generation of source estimate, single hemi."""
    fwd, labels = _get_fwd_labels
    labels_single_hemi = labels[1:]  # keep only labels in one hemisphere

    mylabels = []
    for i, label in enumerate(labels_single_hemi):
        new_label = Label(vertices=label.vertices,
                          pos=label.pos,
                          values=2 * i * np.ones(len(label.values)),
                          hemi=label.hemi,
                          comment=label.comment)
        mylabels.append(new_label)

    n_times = 10
    tmin = 0
    tstep = 1e-3

    stc_data = np.ones((len(labels_single_hemi), n_times))
    stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep)

    for label in labels_single_hemi:
        idx = _get_idx_label_stc(label, stc)
        assert (np.all(stc.data[idx] == 1.0))
        assert (stc.data[idx].shape[1] == n_times)

    # test with function
    def fun(x):
        return x**2

    stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep, fun)

    # the first label has value 0, the second value 2, the third value 6

    for i, label in enumerate(labels_single_hemi):
        if label.hemi == 'lh':
            hemi_idx = 0
        else:
            hemi_idx = 1

        idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
        idx = np.searchsorted(stc.vertices[hemi_idx], idx)

        if hemi_idx == 1:
            idx += len(stc.vertices[0])

        res = ((2. * i)**2.) * np.ones((len(idx), n_times))
        assert_array_almost_equal(stc.data[idx], res)
Exemplo n.º 4
0
    # Convert the center of vertex index from surface vertex list to Label's
    # vertex list.
    cent_idx = np.where(label.vertices == com)[0][0]

    # Create a mask with 1 at center vertex and zeros elsewhere.
    labels[i].values.fill(0.)
    labels[i].values[cent_idx] = 1.

###############################################################################
# Create source-space data with known signals
# -------------------------------------------
#
# Put known signals onto surface vertices using the array of signals and
# the label masks (stored in labels[i].values).
stc_gen = simulate_stc(fwd['src'], labels, signal, times[0], dt,
                       value_fun=lambda x: x)

###############################################################################
# Plot original signals
# ---------------------
#
# Note that the original signals are highly concentrated (point) sources.
#
kwargs = dict(subjects_dir=subjects_dir, hemi='split', smoothing_steps=4,
              time_unit='s', initial_time=0.05, size=1200,
              views=['lat', 'med'])
clim = dict(kind='value', pos_lims=[1e-9, 1e-8, 1e-7])
figs = [mlab.figure(1), mlab.figure(2), mlab.figure(3), mlab.figure(4)]
brain_gen = stc_gen.plot(clim=clim, figure=figs, **kwargs)

###############################################################################
def simulate_eeg(label_names,
                 signal_generator,
                 epoch_duration=3.0,
                 n_trials=10):

    # Getting the paths to filenames of dataset
    data_path = sample.data_path()
    raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
    trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
    bem_fname = (data_path +
                 '/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif')

    # Load real data as the template
    raw = mne.io.read_raw_fif(raw_fname)
    raw = raw.crop(0., n_trials * epoch_duration)

    # Loading parameters for the forward solution
    fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
    fwd = mne.read_forward_solution(fwd_fname, force_fixed=True, surf_ori=True)
    fwd = mne.pick_types_forward(fwd,
                                 meg=False,
                                 eeg=True,
                                 ref_meg=False,
                                 exclude=raw.info['bads'])

    # Get the labels and their centers
    labels = [
        mne.read_label(data_path + '/MEG/sample/labels/%s.label' % ln)
        for ln in label_names
    ]
    subjects_dir = data_path + '/subjects'
    hemi_to_ind = {'lh': 0, 'rh': 1}
    for i, label in enumerate(labels):
        # The `center_of_mass` function needs labels to have values.
        labels[i].values.fill(1.)

        # Restrict the eligible vertices to be those on the surface under
        # consideration and within the label.
        surf_vertices = fwd['src'][hemi_to_ind[label.hemi]]['vertno']
        restrict_verts = np.intersect1d(surf_vertices, label.vertices)
        com = labels[i].center_of_mass(subject='sample',
                                       subjects_dir=subjects_dir,
                                       restrict_vertices=restrict_verts,
                                       surf='white')

        # Convert the center of vertex index from surface vertex list
        # to Label's vertex list.
        cent_idx = np.where(label.vertices == com)[0][0]

        # Create a mask with 1 at center vertex and zeros elsewhere.
        labels[i].values.fill(0.)
        labels[i].values[cent_idx] = 1.

    n_labels = len(label_names)
    times = raw.times[:int(raw.info['sfreq'] * epoch_duration)]
    signal = signal_generator(n_labels, times)

    # Generate the sources in each label
    dt = times[1] - times[0]
    stc = simulate_stc(fwd['src'],
                       labels,
                       signal,
                       times[0],
                       dt,
                       value_fun=lambda x: x)

    # Simulate raw data
    raw_sim = simulate_raw(raw,
                           stc,
                           trans_fname,
                           fwd['src'],
                           bem_fname,
                           cov='simple',
                           iir_filter=[2, -2, 0.4],
                           ecg=False,
                           blink=False,
                           n_jobs=1,
                           verbose=True)

    # Get just the EEG data and the stimulus channel
    raw_eeg = raw_sim.load_data().pick_types(meg=False, eeg=True, stim=True)

    return raw_eeg
    sens_data_list_rois = []
    for ri, roi_name in enumerate(rois):
        # Deal with labels that potentially had to be loaded from parc
        if roi_name == 'G_temp_sup-G_T_transv-rh.label':
            subj_d[di]['rois'].append(aud_labels[si])
        else:
            label_fname = op.join(struct_dir, subj_d[di]['Struct'], 'label', roi_name)
            subj_d[di]['rois'].append(mne.read_label(label_fname,
                                                     subject=subj_d[di]['Struct']))

        # Construct stc with active vertices belonging to the correct label/ROI
        n_verts = len(subj_d[di]['rois'][-1].vertices)
        data_arr = np.array([(1. / n_verts)])[np.newaxis, :]

        temp_stc = simulate_stc(src=subj_d[di]['src'],
                                labels=[subj_d[di]['rois'][ri]],
                                stc_data=data_arr, tmin=0, tstep=1)

        # Create evoked object to plot on scalp map
        fake_evo = mne.simulation.simulate_evoked(fwd, temp_stc,
                                                  subj_d[di]['info'], cov,
                                                  snr=np.inf, verbose=False)
        normed_data = fake_evo.data / np.max(np.abs(fake_evo.data))
        sens_data_list_rois.append(np.squeeze(normed_data))

        subj_d[di]['evo'].append(mne.EvokedArray(normed_data,
                                                 subj_d[di]['info'], 0,
                                                 verbose=False))
    sens_data_list.append(sens_data_list_rois)
    p_bar.update(si)  # update progress bar
    verts_used = fsaverage['lab'].get_vertices_used()

    # Generate souce estimate
    stc_activation = sim_amplitude * np.ones((1, trial_len_ms * n_trials)) / \
        len(s_dict['lab'].vertices)
    # Generate a template raw object
    raw_template = mne.io.RawArray(np.zeros((len(s_dict['info']['chs']),
                                             stc_activation.shape[1])),
                                   info=s_dict['info'])

    # Get scalar for noise covariance to achieve desired SNR
    noiseless_act = np.ones((1, 3)) * sim_amplitude
    raw_template_noiseless = mne.io.RawArray(np.zeros((len(s_dict['info']['chs']),
                                                       noiseless_act.shape[1])),
                                             info=s_dict['info'])
    stc_noiseless = simulate_stc(subj_d[di]['inv']['src'], [subj_d[di]['lab']],
                                 stc_data=noiseless_act, tmin=0, tstep=0.001)
    eeg_noiseless = simulate_raw(raw_template_noiseless, stc_noiseless,
                                 s_dict['inv']['mri_head_t'],
                                 src=s_dict['inv']['src'],
                                 bem=s_dict['bem_fname'], cov=None,
                                 blink=True, n_jobs=n_jobs, verbose=False)

    s_dict['noise_scale'] = get_noise_scale(eeg_noiseless, s_dict)
    print 'Noise covariance scalar calculated: %s' % s_dict['noise_scale']

    # Simulate cortical activations.
    #    Maintain: Noise
    #    Switch:   Noise + [unit current / label area] in RTPJ
    raw_sim = []
    stc_est = []
    stc_est_sph = []