Example #1
0
def test_make_inverse_operator_fixed():
    """Test MNE inverse computation (fixed orientation)
    """
    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
    fwd_1 = read_forward_solution(fname_fwd, surf_ori=False, force_fixed=False)
    fwd_2 = read_forward_solution(fname_fwd, surf_ori=False, force_fixed=True)
    evoked = _get_evoked()
    noise_cov = read_cov(fname_cov)

    # can't make depth-weighted fixed inv without surf ori fwd
    assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_1,
                  noise_cov, depth=0.8, loose=None, fixed=True)
    # can't make fixed inv with depth weighting without free ori fwd
    assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_2,
                  noise_cov, depth=0.8, loose=None, fixed=True)

    # compare to C solution w/fixed
    inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov, depth=0.8,
                                   loose=None, fixed=True)
    _compare_io(inv_op)
    inverse_operator_fixed = read_inverse_operator(fname_inv_fixed)
    _compare_inverses_approx(inverse_operator_fixed, inv_op, evoked, 2)
    # Inverse has 306 channels - 4 proj = 302
    assert_true(compute_rank_inverse(inverse_operator_fixed) == 302)

    # now compare to C solution
    # note that the forward solution must not be surface-oriented
    # to get equivalency (surf_ori=True changes the normals)
    inv_op = make_inverse_operator(evoked.info, fwd_2, noise_cov, depth=None,
                                   loose=None, fixed=True)
    inverse_operator_nodepth = read_inverse_operator(fname_inv_nodepth)
    _compare_inverses_approx(inverse_operator_nodepth, inv_op, evoked, 2)
    # Inverse has 306 channels - 4 proj = 302
    assert_true(compute_rank_inverse(inverse_operator_fixed) == 302)
Example #2
0
def test_make_inverse_operator_fixed():
    """Test MNE inverse computation w/ fixed orientation (& no depth weighting)
    """
    fwd_op = read_forward_solution(fname_fwd, surf_ori=True)
    fwd_1 = read_forward_solution(fname_fwd, surf_ori=False, force_fixed=False)
    fwd_2 = read_forward_solution(fname_fwd, surf_ori=False, force_fixed=True)

    # can't make fixed inv without surf ori fwd
    assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_1,
                  noise_cov, depth=0.8, loose=None, fixed=True)
    # can't make fixed inv with depth weighting without free ori fwd
    assert_raises(ValueError, make_inverse_operator, evoked.info, fwd_2,
                  noise_cov, depth=0.8, loose=None, fixed=True)
    inv_op = make_inverse_operator(evoked.info, fwd_op, noise_cov, depth=0.8,
                                   loose=None, fixed=True)
    _compare_io(inv_op)
    inverse_operator_fixed = read_inverse_operator(fname_inv_fixed)
    _compare_inverses_approx(inverse_operator_fixed, inv_op, evoked, 2)
    # Inverse has 306 channels - 4 proj = 302
    assert_true(compute_rank_inverse(inverse_operator_fixed) == 302)

    # Now without depth weighting, these should be equivalent
    inv_op = make_inverse_operator(evoked.info, fwd_2, noise_cov, depth=None,
                                   loose=None, fixed=True)
    inv_2 = make_inverse_operator(evoked.info, fwd_op, noise_cov, depth=None,
                                  loose=None, fixed=True)
    _compare_inverses_approx(inv_op, inv_2, evoked, 2)
    _compare_io(inv_op)
    # now compare to C solution
    inverse_operator_nodepth = read_inverse_operator(fname_inv_nodepth)
    _compare_inverses_approx(inverse_operator_nodepth, inv_op, evoked, 2)
    # Inverse has 306 channels - 4 proj = 302
    assert_true(compute_rank_inverse(inverse_operator_fixed) == 302)
Example #3
0
def test_io_forward():
    """Test IO for forward solutions
    """
    fwd = mne.read_forward_solution(fname)
    fwd = mne.read_forward_solution(fname, force_fixed=True)
    fwd = mne.read_forward_solution(fname, surf_ori=True)
    leadfield = fwd['sol']['data']
Example #4
0
def test_convert_forward():
    """Test converting forward solution between different representations
    """
    fwd = read_forward_solution(fname_meeg)
    print(fwd)  # __repr__
    assert_true(isinstance(fwd, Forward))
    # look at surface orientation
    fwd_surf = convert_forward_solution(fwd, surf_ori=True)
    fwd_surf_io = read_forward_solution(fname_meeg, surf_ori=True)
    compare_forwards(fwd_surf, fwd_surf_io)
    # go back
    fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)
    print(fwd_new)
    assert_true(isinstance(fwd, Forward))
    compare_forwards(fwd, fwd_new)
    # now go to fixed
    fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=False,
                                         force_fixed=True)
    print(fwd_fixed)
    assert_true(isinstance(fwd_fixed, Forward))
    fwd_fixed_io = read_forward_solution(fname_meeg, surf_ori=False,
                                         force_fixed=True)
    compare_forwards(fwd_fixed, fwd_fixed_io)
    # now go back to cartesian (original condition)
    fwd_new = convert_forward_solution(fwd_fixed)
    print(fwd_new)
    assert_true(isinstance(fwd_new, Forward))
    compare_forwards(fwd, fwd_new)
Example #5
0
def test_io_forward():
    """Test IO for forward solutions
    """
    fwd = read_forward_solution(fname)
    fwd = read_forward_solution(fname, surf_ori=True)
    leadfield = fwd['sol']['data']
    assert_equal(leadfield.shape, (306, 22494))
    assert_equal(len(fwd['sol']['row_names']), 306)
    fname_temp = op.join(temp_dir, 'fwd.fif')
    write_forward_solution(fname_temp, fwd, overwrite=True)

    fwd = read_forward_solution(fname, surf_ori=True)
    fwd_read = read_forward_solution(fname_temp, surf_ori=True)
    leadfield = fwd_read['sol']['data']
    assert_equal(leadfield.shape, (306, 22494))
    assert_equal(len(fwd_read['sol']['row_names']), 306)
    assert_equal(len(fwd_read['info']['chs']), 306)
    assert_true('dev_head_t' in fwd_read['info'])
    assert_true('mri_head_t' in fwd_read)
    assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])

    fwd = read_forward_solution(fname, force_fixed=True)
    leadfield = fwd['sol']['data']
    assert_equal(leadfield.shape, (306, 22494 / 3))
    assert_equal(len(fwd['sol']['row_names']), 306)
    assert_equal(len(fwd['info']['chs']), 306)
    assert_true('dev_head_t' in fwd['info'])
    assert_true('mri_head_t' in fwd)
Example #6
0
def test_psf_ctf():
    """Test computation of PSFs and CTFs for linear estimators
    """

    inverse_operator = read_inverse_operator(fname_inv)
    forward = read_forward_solution(fname_fwd, force_fixed=False,
                                    surf_ori=True)
    forward = pick_types_forward(forward, meg=True, eeg=False)
    labels = [mne.read_label(ss) for ss in fname_label]

    method = 'MNE'
    n_svd_comp = 2

    # Test PSFs (then CTFs)
    for mode in ('sum', 'svd'):
        stc_psf, psf_ev = point_spread_function(inverse_operator,
                                                forward,
                                                method=method,
                                                labels=labels,
                                                lambda2=lambda2,
                                                pick_ori='normal',
                                                mode=mode,
                                                n_svd_comp=n_svd_comp)

        n_vert, n_samples = stc_psf.shape
        should_n_vert = (inverse_operator['src'][1]['vertno'].shape[0] +
                         inverse_operator['src'][0]['vertno'].shape[0])
        if mode == 'svd':
            should_n_samples = len(labels) * n_svd_comp + 1
        else:
            should_n_samples = len(labels) + 1

        assert_true(n_vert == should_n_vert)
        assert_true(n_samples == should_n_samples)

        n_chan, n_samples = psf_ev.data.shape
        assert_true(n_chan == forward['nchan'])

    forward = read_forward_solution(fname_fwd, force_fixed=True, surf_ori=True)
    forward = pick_types_forward(forward, meg=True, eeg=False)

    # Test CTFs
    for mode in ('sum', 'svd'):
        stc_ctf = cross_talk_function(inverse_operator, forward,
                                      labels, method=method,
                                      lambda2=lambda2,
                                      signed=False, mode=mode,
                                      n_svd_comp=n_svd_comp)

        n_vert, n_samples = stc_ctf.shape
        should_n_vert = (inverse_operator['src'][1]['vertno'].shape[0] +
                         inverse_operator['src'][0]['vertno'].shape[0])
        if mode == 'svd':
            should_n_samples = len(labels) * n_svd_comp + 1
        else:
            should_n_samples = len(labels) + 1

        assert_true(n_vert == should_n_vert)
        assert_true(n_samples == should_n_samples)
Example #7
0
def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True,
              epochs_preload=True, data_cov=True):
    """Read in data used in tests
    """
    label = mne.read_label(fname_label)
    events = mne.read_events(fname_event)
    raw = mne.fiff.Raw(fname_raw, preload=True)
    forward = mne.read_forward_solution(fname_fwd)
    if all_forward:
        forward_surf_ori = mne.read_forward_solution(fname_fwd, surf_ori=True)
        forward_fixed = mne.read_forward_solution(fname_fwd, force_fixed=True,
                                                  surf_ori=True)
        forward_vol = mne.read_forward_solution(fname_fwd_vol, surf_ori=True)
    else:
        forward_surf_ori = None
        forward_fixed = None
        forward_vol = None

    event_id, tmin, tmax = 1, tmin, tmax

    # Setup for reading the raw data
    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels

    if epochs:
        # Set up pick list: MEG - bad channels
        left_temporal_channels = mne.read_selection('Left-temporal')
        picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False,
                                    stim=True, eog=True, ref_meg=False,
                                    exclude='bads',
                                    selection=left_temporal_channels)

        # Read epochs
        epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
                            picks=picks, baseline=(None, 0),
                            preload=epochs_preload,
                            reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
        if epochs_preload:
            epochs.resample(200, npad=0, n_jobs=2)
        evoked = epochs.average()
        info = evoked.info
    else:
        epochs = None
        evoked = None
        info = raw.info

    noise_cov = mne.read_cov(fname_cov)
    noise_cov = mne.cov.regularize(noise_cov, info,
                                   mag=0.05, grad=0.05, eeg=0.1, proj=True)
    if data_cov:
        data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
    else:
        data_cov = None

    return raw, epochs, evoked, data_cov, noise_cov, label, forward,\
        forward_surf_ori, forward_fixed, forward_vol
Example #8
0
def test_restrict_forward_to_label():
    """Test restriction of source space to label
    """
    fwd = read_forward_solution(fname_meeg, force_fixed=True)
    fwd = pick_types_forward(fwd, meg=True)

    label_path = op.join(data_path, 'MEG', 'sample', 'labels')
    labels = ['Aud-lh', 'Vis-rh']
    label_lh = read_label(op.join(label_path, labels[0] + '.label'))
    label_rh = read_label(op.join(label_path, labels[1] + '.label'))

    fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])

    src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
    src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
    vertno_lh = fwd['src'][0]['vertno'][src_sel_lh]

    nuse_lh = fwd['src'][0]['nuse']
    src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
    src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)
    vertno_rh = fwd['src'][1]['vertno'][src_sel_rh]
    src_sel_rh += nuse_lh

    assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))
    assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
    assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
    assert_equal(fwd_out['src'][0]['vertno'], vertno_lh)
    assert_equal(fwd_out['src'][1]['vertno'], vertno_rh)

    fwd = read_forward_solution(fname_meeg, force_fixed=False)
    fwd = pick_types_forward(fwd, meg=True)

    label_path = op.join(data_path, 'MEG', 'sample', 'labels')
    labels = ['Aud-lh', 'Vis-rh']
    label_lh = read_label(op.join(label_path, labels[0] + '.label'))
    label_rh = read_label(op.join(label_path, labels[1] + '.label'))

    fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])

    src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
    src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
    vertno_lh = fwd['src'][0]['vertno'][src_sel_lh]

    nuse_lh = fwd['src'][0]['nuse']
    src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
    src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)
    vertno_rh = fwd['src'][1]['vertno'][src_sel_rh]
    src_sel_rh += nuse_lh

    assert_equal(fwd_out['sol']['ncol'],
                 3 * (len(src_sel_lh) + len(src_sel_rh)))
    assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
    assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
    assert_equal(fwd_out['src'][0]['vertno'], vertno_lh)
    assert_equal(fwd_out['src'][1]['vertno'], vertno_rh)
Example #9
0
def test_restrict_forward_to_stc():
    """Test restriction of source space to source SourceEstimate
    """
    start = 0
    stop = 5
    n_times = stop - start - 1
    sfreq = 10.0
    t_start = 0.123

    fwd = read_forward_solution(fname_meeg)
    fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
                                   use_cps=True)
    fwd = pick_types_forward(fwd, meg=True)

    vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
    stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
    stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)

    fwd_out = restrict_forward_to_stc(fwd, stc)
    assert_true(isinstance(fwd_out, Forward))

    assert_equal(fwd_out['sol']['ncol'], 20)
    assert_equal(fwd_out['src'][0]['nuse'], 15)
    assert_equal(fwd_out['src'][1]['nuse'], 5)
    assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
    assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])

    fwd = read_forward_solution(fname_meeg)
    fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False)
    fwd = pick_types_forward(fwd, meg=True)

    vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
    stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
    stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)

    fwd_out = restrict_forward_to_stc(fwd, stc)

    assert_equal(fwd_out['sol']['ncol'], 60)
    assert_equal(fwd_out['src'][0]['nuse'], 15)
    assert_equal(fwd_out['src'][1]['nuse'], 5)
    assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
    assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])

    # Test saving the restricted forward object. This only works if all fields
    # are properly accounted for.
    temp_dir = _TempDir()
    fname_copy = op.join(temp_dir, 'copy-fwd.fif')
    with warnings.catch_warnings(record=True):
        warnings.simplefilter('always')
        write_forward_solution(fname_copy, fwd_out, overwrite=True)
    fwd_out_read = read_forward_solution(fname_copy)
    fwd_out_read = convert_forward_solution(fwd_out_read, surf_ori=True,
                                            force_fixed=False)
    compare_forwards(fwd_out, fwd_out_read)
Example #10
0
def test_convert_forward():
    """Test converting forward solution between different representations
    """
    fwd = read_forward_solution(fname_meeg_grad)
    fwd_repr = repr(fwd)
    assert_true('306' in fwd_repr)
    assert_true('60' in fwd_repr)
    assert_true(fwd_repr)
    assert_true(isinstance(fwd, Forward))
    # look at surface orientation
    fwd_surf = convert_forward_solution(fwd, surf_ori=True)

    # The following test can be removed in 0.16
    fwd_surf_io = read_forward_solution(fname_meeg_grad, surf_ori=True)
    compare_forwards(fwd_surf, fwd_surf_io)
    del fwd_surf_io
    gc.collect()

    # go back
    fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)
    assert_true(repr(fwd_new))
    assert_true(isinstance(fwd_new, Forward))
    compare_forwards(fwd, fwd_new)
    del fwd_new
    gc.collect()

    # now go to fixed
    fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=True,
                                         force_fixed=True, use_cps=False)
    del fwd_surf
    gc.collect()
    assert_true(repr(fwd_fixed))
    assert_true(isinstance(fwd_fixed, Forward))
    assert_true(is_fixed_orient(fwd_fixed))

    # The following test can be removed in 0.16
    fwd_fixed_io = read_forward_solution(fname_meeg_grad, force_fixed=True)
    assert_true(repr(fwd_fixed_io))
    assert_true(isinstance(fwd_fixed_io, Forward))
    assert_true(is_fixed_orient(fwd_fixed_io))
    compare_forwards(fwd_fixed, fwd_fixed_io)
    del fwd_fixed_io
    gc.collect()

    # now go back to cartesian (original condition)
    fwd_new = convert_forward_solution(fwd_fixed, surf_ori=False,
                                       force_fixed=False)
    assert_true(repr(fwd_new))
    assert_true(isinstance(fwd_new, Forward))
    compare_forwards(fwd, fwd_new)
    del fwd, fwd_new, fwd_fixed
    gc.collect()
Example #11
0
def _load_forward():
    """Load forward models."""
    fwd_free = mne.read_forward_solution(fname_fwd)
    fwd_free = mne.pick_types_forward(fwd_free, meg=True, eeg=False)
    fwd_free = mne.convert_forward_solution(fwd_free, surf_ori=False)
    fwd_surf = mne.convert_forward_solution(fwd_free, surf_ori=True,
                                            use_cps=False)
    fwd_fixed = mne.convert_forward_solution(fwd_free, force_fixed=True,
                                             use_cps=False)
    fwd_vol = mne.read_forward_solution(fname_fwd_vol)
    label = mne.read_label(fname_label)

    return fwd_free, fwd_surf, fwd_fixed, fwd_vol, label
Example #12
0
def _get_data(tmin=-0.11, tmax=0.15, read_all_forward=True, compute_csds=True):
    """Read in data used in tests."""
    label = mne.read_label(fname_label)
    events = mne.read_events(fname_event)[:10]
    raw = mne.io.read_raw_fif(fname_raw, preload=False)
    raw.add_proj([], remove_existing=True)  # we'll subselect so remove proj
    forward = mne.read_forward_solution(fname_fwd)
    if read_all_forward:
        forward_surf_ori = _read_forward_solution_meg(
            fname_fwd, surf_ori=True)
        forward_fixed = _read_forward_solution_meg(
            fname_fwd, force_fixed=True, use_cps=False)
        forward_vol = mne.read_forward_solution(fname_fwd_vol)
        forward_vol = mne.convert_forward_solution(forward_vol, surf_ori=True)
    else:
        forward_surf_ori = None
        forward_fixed = None
        forward_vol = None

    event_id, tmin, tmax = 1, tmin, tmax

    # Setup for reading the raw data
    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels

    # Set up pick list: MEG - bad channels
    left_temporal_channels = mne.read_selection('Left-temporal')
    picks = mne.pick_types(raw.info, meg=True, eeg=False,
                           stim=True, eog=True, exclude='bads',
                           selection=left_temporal_channels)

    # Read epochs
    epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
                        picks=picks, baseline=(None, 0), preload=True,
                        reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
    epochs.resample(200, npad=0, n_jobs=2)
    evoked = epochs.average().crop(0, None)

    # Computing the data and noise cross-spectral density matrices
    if compute_csds:
        data_csd = csd_epochs(epochs, mode='multitaper', tmin=0.045,
                              tmax=None, fmin=8, fmax=12,
                              mt_bandwidth=72.72)
        noise_csd = csd_epochs(epochs, mode='multitaper', tmin=None,
                               tmax=0.0, fmin=8, fmax=12,
                               mt_bandwidth=72.72)
    else:
        data_csd, noise_csd = None, None

    return raw, epochs, evoked, data_csd, noise_csd, label, forward,\
        forward_surf_ori, forward_fixed, forward_vol
Example #13
0
def test_sensitivity_maps():
    """Test sensitivity map computation."""
    fwd = mne.read_forward_solution(fwd_fname)
    fwd = mne.convert_forward_solution(fwd, surf_ori=True)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        projs = read_proj(eog_fname)
        projs.extend(read_proj(ecg_fname))
    decim = 6
    for ch_type in ['eeg', 'grad', 'mag']:
        w = read_source_estimate(sensmap_fname % (ch_type, 'lh')).data
        stc = sensitivity_map(fwd, projs=None, ch_type=ch_type,
                              mode='free', exclude='bads')
        assert_array_almost_equal(stc.data, w, decim)
        assert_true(stc.subject == 'sample')
        # let's just make sure the others run
        if ch_type == 'grad':
            # fixed (2)
            w = read_source_estimate(sensmap_fname % (ch_type, '2-lh')).data
            stc = sensitivity_map(fwd, projs=None, mode='fixed',
                                  ch_type=ch_type, exclude='bads')
            assert_array_almost_equal(stc.data, w, decim)
        if ch_type == 'mag':
            # ratio (3)
            w = read_source_estimate(sensmap_fname % (ch_type, '3-lh')).data
            stc = sensitivity_map(fwd, projs=None, mode='ratio',
                                  ch_type=ch_type, exclude='bads')
            assert_array_almost_equal(stc.data, w, decim)
        if ch_type == 'eeg':
            # radiality (4), angle (5), remaining (6), and  dampening (7)
            modes = ['radiality', 'angle', 'remaining', 'dampening']
            ends = ['4-lh', '5-lh', '6-lh', '7-lh']
            for mode, end in zip(modes, ends):
                w = read_source_estimate(sensmap_fname % (ch_type, end)).data
                stc = sensitivity_map(fwd, projs=projs, mode=mode,
                                      ch_type=ch_type, exclude='bads')
                assert_array_almost_equal(stc.data, w, decim)

    # test corner case for EEG
    stc = sensitivity_map(fwd, projs=[make_eeg_average_ref_proj(fwd['info'])],
                          ch_type='eeg', exclude='bads')
    # test corner case for projs being passed but no valid ones (#3135)
    assert_raises(ValueError, sensitivity_map, fwd, projs=None, mode='angle')
    assert_raises(RuntimeError, sensitivity_map, fwd, projs=[], mode='angle')
    # test volume source space
    fname = op.join(sample_path, 'sample_audvis_trunc-meg-vol-7-fwd.fif')
    fwd = mne.read_forward_solution(fname)
    sensitivity_map(fwd)
Example #14
0
def test_pick_forward_seeg():
    """Test picking forward with SEEG
    """
    fwd = read_forward_solution(test_forward.fname_meeg)
    counts = channel_indices_by_type(fwd["info"])
    for key in counts.keys():
        counts[key] = len(counts[key])
    counts["meg"] = counts["mag"] + counts["grad"]
    fwd_ = pick_types_forward(fwd, meg=True, eeg=False, seeg=False)
    _check_fwd_n_chan_consistent(fwd_, counts["meg"])
    fwd_ = pick_types_forward(fwd, meg=False, eeg=True, seeg=False)
    _check_fwd_n_chan_consistent(fwd_, counts["eeg"])
    # should raise exception related to emptiness
    assert_raises(ValueError, pick_types_forward, fwd, meg=False, eeg=False, seeg=True)
    # change last chan from EEG to sEEG
    seeg_name = "OTp1"
    rename_channels(fwd["info"], {"EEG 060": seeg_name})
    for ch in fwd["info"]["chs"]:
        if ch["ch_name"] == seeg_name:
            ch["kind"] = FIFF.FIFFV_SEEG_CH
            ch["coil_type"] = FIFF.FIFFV_COIL_EEG
    fwd["sol"]["row_names"][-1] = fwd["info"]["chs"][-1]["ch_name"]
    counts["eeg"] -= 1
    counts["seeg"] += 1
    # repick & check
    fwd_seeg = pick_types_forward(fwd, meg=False, eeg=False, seeg=True)
    assert_equal(fwd_seeg["sol"]["row_names"], [seeg_name])
    assert_equal(fwd_seeg["info"]["ch_names"], [seeg_name])
    # should work fine
    fwd_ = pick_types_forward(fwd, meg=True, eeg=False, seeg=False)
    _check_fwd_n_chan_consistent(fwd_, counts["meg"])
    fwd_ = pick_types_forward(fwd, meg=False, eeg=True, seeg=False)
    _check_fwd_n_chan_consistent(fwd_, counts["eeg"])
    fwd_ = pick_types_forward(fwd, meg=False, eeg=False, seeg=True)
    _check_fwd_n_chan_consistent(fwd_, counts["seeg"])
Example #15
0
def _get_data():
    """Read in data used in tests."""
    # read forward model
    forward = mne.read_forward_solution(fname_fwd)
    # read data
    raw = mne.io.read_raw_fif(fname_raw, preload=True)
    events = mne.read_events(fname_event)
    event_id, tmin, tmax = 1, -0.1, 0.15

    # decimate for speed
    left_temporal_channels = mne.read_selection('Left-temporal')
    picks = mne.pick_types(raw.info, selection=left_temporal_channels)
    picks = picks[::2]
    raw.pick_channels([raw.ch_names[ii] for ii in picks])
    del picks

    raw.info.normalize_proj()  # avoid projection warnings

    epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
                        baseline=(None, 0.), preload=True, reject=reject)

    noise_cov = mne.compute_covariance(epochs, tmin=None, tmax=0.)

    data_cov = mne.compute_covariance(epochs, tmin=0.01, tmax=0.15)

    return epochs, data_cov, noise_cov, forward
Example #16
0
def test_average_forward_solution():
    """Test averaging forward solutions
    """
    fwd = read_forward_solution(fname)
    # input not a list
    assert_raises(TypeError, average_forward_solutions, 1)
    # list is too short
    assert_raises(ValueError, average_forward_solutions, [])
    # negative weights
    assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0])
    # all zero weights
    assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0])
    # weights not same length
    assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0])
    # list does not only have all dict()
    assert_raises(TypeError, average_forward_solutions, [1, fwd])

    # try an easy case
    fwd_copy = average_forward_solutions([fwd])
    assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data'])

    # modify a fwd solution, save it, use MNE to average with old one
    fwd_copy['sol']['data'] *= 0.5
    fname_copy = op.join(temp_dir, 'fwd.fif')
    write_forward_solution(fname_copy, fwd_copy, overwrite=True)
    cmd = ('mne_average_forward_solutions', '--fwd', fname, '--fwd',
           fname_copy, '--out', fname_copy)
    run_subprocess(cmd)

    # now let's actually do it, with one filename and one fwd
    fwd_ave = average_forward_solutions([fwd, fwd_copy])
    assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])
Example #17
0
def _get_fwd_labels():
    fwd = read_forward_solution(fname_fwd)
    fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=True)
    fwd = pick_types_forward(fwd, meg=True, eeg=False)
    labels = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
                         '%s.label' % label)) for label in label_names]
    return fwd, labels
Example #18
0
def test_gamma_map():
    """Test Gamma MAP inverse"""
    forward = read_forward_solution(fname_fwd, force_fixed=False,
                                    surf_ori=True)
    forward = pick_types_forward(forward, meg=False, eeg=True)
    evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
    evoked.resample(50)
    evoked.crop(tmin=0, tmax=0.3)

    cov = read_cov(fname_cov)
    cov = regularize(cov, evoked.info)

    alpha = 0.2
    stc = gamma_map(evoked, forward, cov, alpha, tol=1e-5,
                    xyz_same_gamma=True, update_mode=1, verbose=False)
    idx = np.argmax(np.sum(stc.data ** 2, axis=1))
    assert_true(np.concatenate(stc.vertices)[idx] == 96397)

    stc = gamma_map(evoked, forward, cov, alpha, tol=1e-5,
                    xyz_same_gamma=False, update_mode=1, verbose=False)
    idx = np.argmax(np.sum(stc.data ** 2, axis=1))
    assert_true(np.concatenate(stc.vertices)[idx] == 82010)

    # force fixed orientation
    stc, res = gamma_map(evoked, forward, cov, alpha, tol=1e-5,
                         xyz_same_gamma=False, update_mode=2,
                         loose=None, return_residual=True, verbose=False)
    idx = np.argmax(np.sum(stc.data ** 2, axis=1))
    # assert_true(np.concatenate(stc.vertices)[idx] == 83398)  # XXX FIX
    assert_array_almost_equal(evoked.times, res.times)
def run_inverse(subject_id):
    subject = "sub%03d" % subject_id
    print("processing subject: %s" % subject)
    data_path = op.join(meg_dir, subject)

    fname_ave = op.join(data_path, '%s-ave.fif' % subject)
    fname_cov = op.join(data_path, '%s-cov.fif' % subject)
    fname_fwd = op.join(data_path, '%s-meg-%s-fwd.fif' % (subject, spacing))
    fname_inv = op.join(data_path, '%s-meg-%s-inv.fif' % (subject, spacing))

    evokeds = mne.read_evokeds(fname_ave, condition=[0, 1, 2, 3, 4, 5])
    cov = mne.read_cov(fname_cov)
    # cov = mne.cov.regularize(cov, evokeds[0].info,
    #                                mag=0.05, grad=0.05, eeg=0.1, proj=True)

    forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
    # forward = mne.pick_types_forward(forward, meg=True, eeg=False)

    # make an M/EEG, MEG-only, and EEG-only inverse operators
    info = evokeds[0].info
    inverse_operator = make_inverse_operator(info, forward, cov,
                                             loose=0.2, depth=0.8)

    write_inverse_operator(fname_inv, inverse_operator)

    # Compute inverse solution
    snr = 3.0
    lambda2 = 1.0 / snr ** 2

    for evoked in evokeds:
        stc = apply_inverse(evoked, inverse_operator, lambda2, "dSPM",
                            pick_ori=None)

        stc.save(op.join(data_path, 'mne_dSPM_inverse-%s' % evoked.comment))
Example #20
0
def forward_operator(fwd, src, subjects_dir=None, parc="aparc", name=None):
    """Load forward operator as ``NDVar``

    Parameters
    ----------
    fwd : str | mne Forward
        MNE Forward solution, or path to forward solution.
    src : str
        Tag describing the source space (e.g., "ico-4").
    subjects_dir : str
        Location of the MRI subjects directory.
    parc : str
        Parcellation to load (corresponding to existing annot files; default
        'aparc').
    name : str
        Name the NDVar (default is the filename if a path is provifded,
        otherwise "fwd").

    Returns
    -------
    fwd : NDVar  (sensor, source)
        NDVar containing the gain matrix.
    """
    if isinstance(fwd, basestring):
        if name is None:
            name = os.path.basename(fwd)
        fwd = mne.read_forward_solution(fwd, force_fixed=True)
    elif name is None:
        name = "fwd"
    sensor = sensor_dim(fwd["info"])
    assert np.all(sensor.names == fwd["sol"]["row_names"])
    source = SourceSpace.from_mne_source_spaces(fwd["src"], src, subjects_dir, parc)
    return NDVar(fwd["sol"]["data"], (sensor, source), {}, name)
Example #21
0
def test_pick_forward_seeg():
    fwd = read_forward_solution(test_forward.fname_meeg)
    counts = channel_indices_by_type(fwd['info'])
    for key in counts.keys():
        counts[key] = len(counts[key])
    counts['meg'] = counts['mag'] + counts['grad']
    fwd_ = pick_types_forward(fwd, meg=True, eeg=False, seeg=False)
    _check_fwd_n_chan_consistent(fwd_, counts['meg'])
    fwd_ = pick_types_forward(fwd, meg=False, eeg=True, seeg=False)
    _check_fwd_n_chan_consistent(fwd_, counts['eeg'])
    # should raise exception related to emptiness
    assert_raises(ValueError, pick_types_forward, fwd, meg=False, eeg=False, 
                  seeg=True)
    # change last chan from EEG to sEEG
    seeg_name = 'OTp1'
    rename_channels(fwd['info'], {'EEG 060': (seeg_name, 'seeg')})
    fwd['sol']['row_names'][-1] = fwd['info']['chs'][-1]['ch_name']
    counts['eeg'] -= 1
    counts['seeg'] += 1
    # repick & check
    fwd_seeg = pick_types_forward(fwd, meg=False, eeg=False, seeg=True)
    assert_equal(fwd_seeg['sol']['row_names'], [seeg_name])
    assert_equal(fwd_seeg['info']['ch_names'], [seeg_name])
    # should work fine
    fwd_ = pick_types_forward(fwd, meg=True, eeg=False, seeg=False)
    _check_fwd_n_chan_consistent(fwd_, counts['meg'])
    fwd_ = pick_types_forward(fwd, meg=False, eeg=True, seeg=False)
    _check_fwd_n_chan_consistent(fwd_, counts['eeg'])
    fwd_ = pick_types_forward(fwd, meg=False, eeg=False, seeg=True)
    _check_fwd_n_chan_consistent(fwd_, counts['seeg'])
Example #22
0
def test_plot_volume_source_estimates():
    """Test interactive plotting of volume source estimates."""
    forward = read_forward_solution(fwd_fname)
    sample_src = forward['src']

    vertices = [s['vertno'] for s in sample_src]
    n_verts = sum(len(v) for v in vertices)
    n_time = 2
    data = np.random.RandomState(0).rand(n_verts, n_time)
    vol_stc = VolSourceEstimate(data, vertices, 1, 1)

    for mode in ['glass_brain', 'stat_map']:
        with pytest.warns(None):  # sometimes get scalars/index warning
            fig = vol_stc.plot(sample_src, subject='sample',
                               subjects_dir=subjects_dir,
                               mode=mode)
        # [ax_time, ax_y, ax_x, ax_z]
        for ax_idx in [0, 2, 3, 4]:
            _fake_click(fig, fig.axes[ax_idx], (0.3, 0.5))
        fig.canvas.key_press_event('left')
        fig.canvas.key_press_event('shift+right')

    with pytest.raises(ValueError, match='must be one of'):
        vol_stc.plot(sample_src, 'sample', subjects_dir, mode='abcd')
    vertices.append([])
    surface_stc = SourceEstimate(data, vertices, 1, 1)
    with pytest.raises(ValueError, match='Only Vol'):
        plot_volume_source_estimates(surface_stc, sample_src, 'sample',
                                     subjects_dir)
    with pytest.raises(ValueError, match='Negative colormap limits'):
        vol_stc.plot(sample_src, 'sample', subjects_dir,
                     clim=dict(lims=[-1, 2, 3], kind='value'))
def test_gamma_map():
    """Test Gamma MAP inverse"""
    forward = read_forward_solution(fname_fwd, force_fixed=False,
                                    surf_ori=True)
    forward = pick_types_forward(forward, meg=False, eeg=True)
    evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0),
                          proj=False)
    evoked.resample(50, npad=100)
    evoked.crop(tmin=0.1, tmax=0.16)  # crop to nice window near samp border

    cov = read_cov(fname_cov)
    cov = regularize(cov, evoked.info)

    alpha = 0.5
    stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
                    xyz_same_gamma=True, update_mode=1)
    _check_stc(stc, evoked, 68477)

    stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
                    xyz_same_gamma=False, update_mode=1)
    _check_stc(stc, evoked, 82010)

    # force fixed orientation
    stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
                    xyz_same_gamma=False, update_mode=2,
                    loose=None, return_residual=False)
    _check_stc(stc, evoked, 85739, 20)
Example #24
0
def test_io_forward():
    """Test IO for forward solutions
    """
    fwd = read_forward_solution(fname)
    fwd = read_forward_solution(fname, surf_ori=True)
    leadfield = fwd['sol']['data']
    assert_equal(leadfield.shape, (306, 22494))
    assert_equal(len(fwd['sol']['row_names']), 306)

    fwd = read_forward_solution(fname, force_fixed=True)
    leadfield = fwd['sol']['data']
    assert_equal(leadfield.shape, (306, 22494 / 3))
    assert_equal(len(fwd['sol']['row_names']), 306)
    assert_equal(len(fwd['info']['chs']), 306)
    assert_true('dev_head_t' in fwd['info'])
    assert_true('mri_head_t' in fwd)
Example #25
0
def test_gamma_map():
    """Test Gamma MAP inverse"""
    forward = read_forward_solution(fname_fwd)
    forward = convert_forward_solution(forward, surf_ori=True)

    forward = pick_types_forward(forward, meg=False, eeg=True)
    evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0),
                          proj=False)
    evoked.resample(50, npad=100)
    evoked.crop(tmin=0.1, tmax=0.16)  # crop to window around peak

    cov = read_cov(fname_cov)
    cov = regularize(cov, evoked.info)

    alpha = 0.5
    stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
                    xyz_same_gamma=True, update_mode=1)
    _check_stc(stc, evoked, 68477)

    stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
                    xyz_same_gamma=False, update_mode=1)
    _check_stc(stc, evoked, 82010)

    dips = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
                     xyz_same_gamma=False, update_mode=1,
                     return_as_dipoles=True)
    assert_true(isinstance(dips[0], Dipole))
    stc_dip = make_stc_from_dipoles(dips, forward['src'])
    _check_stcs(stc, stc_dip)

    # force fixed orientation
    stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
                    xyz_same_gamma=False, update_mode=2,
                    loose=0, return_residual=False)
    _check_stc(stc, evoked, 85739, 20)
Example #26
0
def test_apply_inverse_sphere():
    """Test applying an inverse with a sphere model (rank-deficient)."""
    evoked = _get_evoked()
    evoked.pick_channels(evoked.ch_names[:306:8])
    evoked.info['projs'] = []
    cov = make_ad_hoc_cov(evoked.info)
    sphere = make_sphere_model('auto', 'auto', evoked.info)
    fwd = read_forward_solution(fname_fwd)
    vertices = [fwd['src'][0]['vertno'][::5],
                fwd['src'][1]['vertno'][::5]]
    stc = SourceEstimate(np.zeros((sum(len(v) for v in vertices), 1)),
                         vertices, 0., 1.)
    fwd = restrict_forward_to_stc(fwd, stc)
    fwd = make_forward_solution(evoked.info, fwd['mri_head_t'], fwd['src'],
                                sphere, mindist=5.)
    evoked = EvokedArray(fwd['sol']['data'].copy(), evoked.info)
    assert fwd['sol']['nrow'] == 39
    assert fwd['nsource'] == 101
    assert fwd['sol']['ncol'] == 303
    tempdir = _TempDir()
    temp_fname = op.join(tempdir, 'temp-inv.fif')
    inv = make_inverse_operator(evoked.info, fwd, cov, loose=1.)
    # This forces everything to be float32
    write_inverse_operator(temp_fname, inv)
    inv = read_inverse_operator(temp_fname)
    stc = apply_inverse(evoked, inv, method='eLORETA',
                        method_params=dict(eps=1e-2))
    # assert zero localization bias
    assert_array_equal(np.argmax(stc.data, axis=0),
                       np.repeat(np.arange(101), 3))
Example #27
0
def test_apply_mne_inverse_fixed_raw():
    """Test MNE with fixed-orientation inverse operator on Raw
    """
    raw = fiff.Raw(fname_raw)
    start = 3
    stop = 10
    _, times = raw[0, start:stop]
    label_lh = read_label(fname_label % 'Aud-lh')

    # create a fixed-orientation inverse operator
    fwd = read_forward_solution(fname_fwd, force_fixed=False, surf_ori=True)
    noise_cov = read_cov(fname_cov)
    inv_op = make_inverse_operator(raw.info, fwd, noise_cov,
                                   loose=None, depth=0.8, fixed=True)

    stc = apply_inverse_raw(raw, inv_op, lambda2, "dSPM",
                            label=label_lh, start=start, stop=stop, nave=1,
                            pick_ori=None, buffer_size=None)

    stc2 = apply_inverse_raw(raw, inv_op, lambda2, "dSPM",
                             label=label_lh, start=start, stop=stop, nave=1,
                             pick_ori=None, buffer_size=3)

    assert_true(stc.subject == 'sample')
    assert_true(stc2.subject == 'sample')
    assert_array_almost_equal(stc.times, times)
    assert_array_almost_equal(stc2.times, times)
    assert_array_almost_equal(stc.data, stc2.data)
Example #28
0
def test_make_forward_solution_sphere():
    """Test making a forward solution with a sphere model."""
    temp_dir = _TempDir()
    fname_src_small = op.join(temp_dir, 'sample-oct-2-src.fif')
    src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir,
                             add_dist=False)
    write_source_spaces(fname_src_small, src)  # to enable working with MNE-C
    out_name = op.join(temp_dir, 'tmp-fwd.fif')
    run_subprocess(['mne_forward_solution', '--meg', '--eeg',
                    '--meas', fname_raw, '--src', fname_src_small,
                    '--mri', fname_trans, '--fwd', out_name])
    fwd = read_forward_solution(out_name)
    sphere = make_sphere_model(verbose=True)
    fwd_py = make_forward_solution(fname_raw, fname_trans, src, sphere,
                                   meg=True, eeg=True, verbose=True)
    _compare_forwards(fwd, fwd_py, 366, 108,
                      meg_rtol=5e-1, meg_atol=1e-6,
                      eeg_rtol=5e-1, eeg_atol=5e-1)
    # Since the above is pretty lax, let's check a different way
    for meg, eeg in zip([True, False], [False, True]):
        fwd_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
        fwd_py_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
        assert_allclose(np.corrcoef(fwd_['sol']['data'].ravel(),
                                    fwd_py_['sol']['data'].ravel())[0, 1],
                        1.0, rtol=1e-3)
Example #29
0
def test_make_forward_solution_sphere():
    """Test making a forward solution with a sphere model"""
    temp_dir = _TempDir()
    fname_src_small = op.join(temp_dir, "sample-oct-2-src.fif")
    src = setup_source_space("sample", fname_src_small, "oct2", subjects_dir=subjects_dir, add_dist=False)
    out_name = op.join(temp_dir, "tmp-fwd.fif")
    run_subprocess(
        [
            "mne_forward_solution",
            "--meg",
            "--eeg",
            "--meas",
            fname_raw,
            "--src",
            fname_src_small,
            "--mri",
            fname_trans,
            "--fwd",
            out_name,
        ]
    )
    fwd = read_forward_solution(out_name)
    sphere = make_sphere_model(verbose=True)
    fwd_py = make_forward_solution(fname_raw, fname_trans, src, sphere, meg=True, eeg=True, verbose=True)
    _compare_forwards(fwd, fwd_py, 366, 108, meg_rtol=5e-1, meg_atol=1e-6, eeg_rtol=5e-1, eeg_atol=5e-1)
    # Since the above is pretty lax, let's check a different way
    for meg, eeg in zip([True, False], [False, True]):
        fwd_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
        fwd_py_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
        assert_allclose(np.corrcoef(fwd_["sol"]["data"].ravel(), fwd_py_["sol"]["data"].ravel())[0, 1], 1.0, rtol=1e-3)
    def compute_forward_and_inverse_solutions(self, orientation = 'fixed'):
        """docstring for compute_forward_solution"""

        info = self.grand_average_evoked.info
        trans = mne.read_trans(op.join(self.processed_files, '%s-trans.fif' %self.subject))
        src = glob.glob(op.join(self.subjects_dir, self.subject, 'bem', '*-ico-4-src.fif'))[0]
        bem = glob.glob(op.join(self.subjects_dir, self.subject, 'bem', '*-bem-sol.fif'))[0]
        fname = op.join(self.processed_files, '%s_forward.fif' %self.subject)

        # check if fwd exists, if not, make it
        if not op.exists(fname):
            fwd = mne.make_forward_solution(info = info, trans = trans, src = src,
                                            bem = bem, fname = fname, meg = True, eeg = False,
                                            overwrite = True, ignore_ref = True)

            self.add_preprocessing_notes("Forward solution generated and saved to %s" %fname)

        if orientation == 'fixed':
            force_fixed = True
        else:
            force_fixed = False

        fwd = mne.read_forward_solution(fname,force_fixed=force_fixed)

        self.forward_solution = fwd

        inv = mne.minimum_norm.make_inverse_operator(info, self.forward_solution, self.cov_reg, loose = None, depth = None, fixed = force_fixed)
        self.inverse_solution = inv
        mne.minimum_norm.write_inverse_operator(op.join(self.processed_files, '%s_inv.fif' %self.subject), self.inverse_solution)

        self.add_preprocessing_notes("Inverse solution generated and saved to %s" %op.join(self.processed_files, '%s_inv.fif' %self.subject))
        return fwd, inv
Example #31
0
def faces_dataset(subject_id,
                  selection='all',
                  pca=False,
                  subsample=1,
                  justdims=True,
                  cnn=True,
                  locate=True,
                  treat=None,
                  rnn=True,
                  Wt=None):
    print 'Treat: ', treat
    study_path = '/home/jcasa/mne_data/openfmri'
    subjects_dir = os.path.join(study_path, 'subjects')
    meg_dir = os.path.join(study_path, 'MEG')
    os.environ["SUBJECTS_DIR"] = subjects_dir
    spacing = 'oct5'
    mindist = 5

    subject = "sub%03d" % subject_id
    print("processing %s" % subject)

    invname = '%s-meg-%s-inv.fif' % (subject, spacing)
    invpath = os.path.join(os.path.join(meg_dir, subject), invname)
    fwdname = '%s-meg-%s-fwd.fif' % (subject, spacing)
    fwdpath = os.path.join(os.path.join(meg_dir, subject), fwdname)
    eponame = '%s-epo.fif' % (subject)

    #    invname = '%s-meg-%s-inv.fif' % (subject,spacing)
    #    invpath = os.path.join(os.path.join(meg_dir,subject),invname)
    #    fwdname = '%s-meg-%s-fwd.fif' % (subject,spacing)
    #    fwdpath = os.path.join(os.path.join(meg_dir,subject),fwdname)
    #    eponame = '%s-grad-epo.fif' % (subject)

    epopath = os.path.join(os.path.join(meg_dir, subject), eponame)
    epochs = mne.read_epochs(epopath, verbose=False)
    #print epochs.info
    if treat is not None:
        epochs_eeg = epochs[treat].copy().pick_types(eeg=True, meg=False)
        epochs_meg = epochs[treat].copy().pick_types(meg=True, eeg=False)
    else:
        epochs_eeg = epochs.copy().pick_types(eeg=True, meg=False)
        epochs_meg = epochs.copy().pick_types(meg=True, eeg=False)

    fwd = mne.read_forward_solution(fwdpath, verbose=False)
    inv = read_inverse_operator(invpath, verbose=False)
    method = "MNE"
    snr = 3.
    lambda2 = 1. / snr**2
    if treat is not None:
        stc = apply_inverse_epochs(epochs[treat],
                                   inv,
                                   lambda2,
                                   method=method,
                                   pick_ori=None,
                                   verbose=False)
    else:
        stc = apply_inverse_epochs(epochs,
                                   inv,
                                   lambda2,
                                   method=method,
                                   pick_ori=None,
                                   verbose=False)

    if Wt is None:
        print 'Precalculate PCA weights:'
        #weight PCA matrix. Uses 'treat' - so to apply across all treatments, use treat=None
        Wt = Wt_calc(stc, epochs_eeg, epochs_meg, GRID)

    if justdims is True:
        meas_dims, m, p, n_steps, total_batch_size, Wt = prepro(
            stc,
            epochs,
            epochs_eeg,
            epochs_meg,
            subject,
            selection=selection,
            pca=pca,
            subsample=subsample,
            justdims=justdims,
            cnn=cnn,
            locate=locate,
            rnn=rnn,
            Wt=Wt)
        return meas_dims, m, p, n_steps, total_batch_size, Wt
    else:
        meas_img_all, qtrue_all, meas_dims, m, p, n_steps, total_batch_size, Wt = prepro(
            stc,
            epochs,
            epochs_eeg,
            epochs_meg,
            subject,
            selection=selection,
            pca=pca,
            subsample=subsample,
            justdims=justdims,
            cnn=cnn,
            locate=locate,
            rnn=rnn,
            Wt=Wt)
        return meas_img_all, qtrue_all, meas_dims, m, p, n_steps, total_batch_size, Wt
import matplotlib.pyplot as plt

from mne import read_forward_solution, read_proj, sensitivity_map

from mne.datasets import sample

print(__doc__)

data_path = sample.data_path()

subjects_dir = data_path + '/subjects'
fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg-proj.fif'

fwd = read_forward_solution(fname)

projs = read_proj(ecg_fname)
# take only one projection per channel type
projs = projs[::2]

# Compute sensitivity map
ssp_ecg_map = sensitivity_map(fwd, ch_type='grad', projs=projs, mode='angle')

# %%
# Show sensitivity map

plt.hist(ssp_ecg_map.data.ravel())
plt.show()

args = dict(clim=dict(kind='value', lims=(0.2, 0.6, 1.)),
Example #33
0
def test_localization_bias():
    """Test inverse localization bias for minimum-norm solvers."""
    # Identity input
    evoked = _get_evoked()
    evoked.pick_types(meg=True, eeg=True, exclude=())
    evoked = EvokedArray(np.eye(len(evoked.data)), evoked.info)
    noise_cov = read_cov(fname_cov)
    # restrict to limited set of verts (small src here) and one hemi for speed
    fwd_orig = read_forward_solution(fname_fwd)
    vertices = [fwd_orig['src'][0]['vertno'].copy(), []]
    stc = SourceEstimate(np.zeros((sum(len(v) for v in vertices), 1)),
                         vertices, 0., 1.)
    fwd_orig = restrict_forward_to_stc(fwd_orig, stc)

    #
    # Fixed orientation (not very different)
    #
    fwd = fwd_orig.copy()
    inv_fixed = make_inverse_operator(evoked.info, fwd, noise_cov, loose=0.,
                                      depth=0.8)
    fwd = convert_forward_solution(fwd, force_fixed=True, surf_ori=True)
    fwd = fwd['sol']['data']
    want = np.arange(fwd.shape[1])
    for method, lower, upper in (('MNE', 83, 87),
                                 ('dSPM', 96, 98),
                                 ('sLORETA', 100, 100),
                                 ('eLORETA', 100, 100)):
        inv_op = apply_inverse(evoked, inv_fixed, lambda2, method).data
        loc = np.abs(np.dot(inv_op, fwd))
        # Compute the percentage of sources for which there is no localization
        # bias:
        perc = (want == np.argmax(loc, axis=0)).mean() * 100
        assert lower <= perc <= upper, method

    #
    # Loose orientation
    #
    fwd = fwd_orig.copy()
    inv_free = make_inverse_operator(evoked.info, fwd, noise_cov, loose=0.2,
                                     depth=0.8)
    fwd = fwd['sol']['data']
    want = np.arange(fwd.shape[1]) // 3
    for method, lower, upper in (('MNE', 25, 35),
                                 ('dSPM', 25, 35),
                                 ('sLORETA', 35, 40),
                                 ('eLORETA', 40, 45)):
        inv_op = apply_inverse(evoked, inv_free, lambda2, method,
                               pick_ori='vector').data
        loc = np.linalg.norm(np.einsum('vos,sx->vxo', inv_op, fwd), axis=-1)
        # Compute the percentage of sources for which there is no localization
        # bias:
        perc = (want == np.argmax(loc, axis=0)).mean() * 100
        assert lower <= perc <= upper, method

    #
    # Free orientation
    #
    fwd = fwd_orig.copy()
    inv_free = make_inverse_operator(evoked.info, fwd, noise_cov, loose=1.,
                                     depth=0.8)
    fwd = fwd['sol']['data']
    want = np.arange(fwd.shape[1]) // 3
    force_kwargs = dict(method_params=dict(force_equal=True))
    for method, lower, upper, kwargs in (('MNE', 45, 55, {}),
                                         ('dSPM', 40, 45, {}),
                                         ('sLORETA', 90, 95, {}),
                                         ('eLORETA', 90, 95, force_kwargs),
                                         ('eLORETA', 100, 100, {}),
                                         ):
        inv_op = apply_inverse(evoked, inv_free, lambda2, method,
                               pick_ori='vector', **kwargs).data
        loc = np.linalg.norm(np.einsum('vos,sx->vxo', inv_op, fwd), axis=-1)
        # Compute the percentage of sources for which there is no localization
        # bias:
        perc = (want == np.argmax(loc, axis=0)).mean() * 100
        assert lower <= perc <= upper, method
Example #34
0
def read_forward_solution_eeg(fname, **kwargs):
    """Read EEG forward."""
    fwd = convert_forward_solution(read_forward_solution(fname), copy=False,
                                   **kwargs)
    fwd = pick_types_forward(fwd, meg=False, eeg=True)
    return fwd
Example #35
0
 def CatEpochs(subject, CondComb, CovSource, Method, modality,twin,dist,ori):
     
     snr = 3.0
     lambda2 = 1.0 / snr **2        
 
     if CondComb[0] != CondComb[1]:
         X,  stcs = [], []
         epochs_list, epochs_count, evoked_list, inverse_operator_list = [], [], [], []
         for c,cond in enumerate(CondComb):        
 
             epochs  = mne.read_epochs(wdir + subject + 
                                       "/mne_python/EPOCHS/MEEG_epochs_icacorr_" + cond +
                                       '_' + subject + "-epo.fif")
             
             epochs.crop(tmin = -0.2, tmax = 1)
             # which modality?
             if modality == 'MEG':
                 megtag=True  
                 eegtag=False
                 fname_fwd  = (wdir+subj+"/mne_python/run3_ico5_megonly_icacorr_-fwd.fif") 
                 epochs.pick_types(meg=megtag, eeg = eegtag)
             elif modality == 'EEG':
                 megtag=False  
                 eegtag=True
                 fname_fwd  = (wdir+subj+"/mne_python/run3_ico5_eegonly_icacorr_-fwd.fif") 
                 epochs.pick_types(meg=megtag, eeg = eegtag)
             elif modality == 'MEEG':
                 megtag=True  
                 eegtag=True
                 fname_fwd = (wdir+subj+"/mne_python/run3_ico5_meeg_icacorr_-fwd.fif")  
                 epochs.pick_types(meg=megtag, eeg = eegtag)                                         
             
             epochs_list.append(epochs)
             epochs_count.append(epochs.get_data().shape[0])
             
             # import nose_cov
             noise_cov = mne.read_cov((wdir + subject + "/mne_python/COVMATS/" +
                                      modality + "_noisecov_icacorr_" + CovSource + "_" +
                                      subject + "-cov.fif"))   
             # import forward
             forward = mne.read_forward_solution(fname_fwd ,surf_ori=True)
              
             # load  epochs, then pick    
             picks = mne.pick_types(epochs.info, meg=megtag,  eeg=eegtag, stim=False , eog=False)
     
             # compute evoked
             evoked       = epochs.average(picks = picks)   
             evoked_list.append(evoked)                           
             
             # compute inverse operator
             inverse_operator  = make_inverse_operator(evoked.info,  
                                                           forward,
                                                           noise_cov,  
                                                           loose=0.2, 
                                                           depth=0.8) 
             inverse_operator_list.append(inverse_operator)   
     
     Epochs = mne.epochs.concatenate_epochs(epochs_list)
     Epochs.crop(twin[0],twin[1])        
                                                                
     n_epochs  = np.sum(epochs_count)
     n_times   = len(Epochs.times)
     n_vertices = forward['nsource']
     X = np.zeros([n_epochs, n_vertices, n_times])  
     #for condition_count, ep in zip([0, n_epochs / 8], epochs_list):
     for epcount in range(len(Epochs)):
         stcs = apply_inverse_epochs(Epochs[epcount], inverse_operator, lambda2,
                         Method, pick_ori=ori,  # saves us memory
                         return_generator=True)
         for jj, stc in enumerate(stcs):
             X[epcount + jj] = stc.data
     
     tmp = []
     for index, nb in enumerate(epochs_count):
         tmp.append(np.ones((nb))*(dist[index]))
     Y = np.concatenate(tmp)    
     
     return stc, n_times, X, Y, forward
from mne.simulation import generate_sparse_stc, generate_evoked

###############################################################################
# Load real data as templates
data_path = sample.data_path()

raw = mne.fiff.Raw(data_path + '/MEG/sample/sample_audvis_raw.fif')
proj = mne.read_proj(data_path + '/MEG/sample/sample_audvis_ecg_proj.fif')
raw.info['projs'] += proj
raw.info['bads'] = ['MEG 2443', 'EEG 053']  # mark bad channels

fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'

fwd = mne.read_forward_solution(fwd_fname, force_fixed=True, surf_ori=True)
fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])

cov = mne.read_cov(cov_fname)

evoked_template = mne.fiff.read_evoked(ave_fname, setno=0, baseline=None)
evoked_template = pick_types_evoked(evoked_template,
                                    meg=True,
                                    eeg=True,
                                    exclude=raw.info['bads'])

label_names = ['Aud-lh', 'Aud-rh']
labels = [
    mne.read_label(data_path + '/MEG/sample/labels/%s.label' % ln)
    for ln in label_names
]
Example #37
0
def test_channel_name_limit(tmpdir, monkeypatch, fname):
    """Test that our remapping works properly."""
    #
    # raw
    #
    if fname.endswith('fif'):
        raw = read_raw_fif(fname)
        raw.pick_channels(raw.ch_names[:3])
        ref_names = []
        data_names = raw.ch_names
    else:
        assert fname.endswith('.ds')
        raw = read_raw_ctf(fname)
        ref_names = [
            raw.ch_names[pick]
            for pick in pick_types(raw.info, meg=False, ref_meg=True)
        ]
        data_names = raw.ch_names[32:35]
    proj = dict(data=np.ones((1, len(data_names))),
                col_names=data_names[:2].copy(),
                row_names=None,
                nrow=1)
    proj = Projection(data=proj,
                      active=False,
                      desc='test',
                      kind=0,
                      explained_var=0.)
    raw.add_proj(proj, remove_existing=True)
    raw.info.normalize_proj()
    raw.pick_channels(data_names + ref_names).crop(0, 2)
    long_names = ['123456789abcdefg' + name for name in raw.ch_names]
    fname = tmpdir.join('test-raw.fif')
    with catch_logging() as log:
        raw.save(fname)
    log = log.getvalue()
    assert 'truncated' not in log
    rename = dict(zip(raw.ch_names, long_names))
    long_data_names = [rename[name] for name in data_names]
    long_proj_names = long_data_names[:2]
    raw.rename_channels(rename)
    for comp in raw.info['comps']:
        for key in ('row_names', 'col_names'):
            for name in comp['data'][key]:
                assert name in raw.ch_names
    if raw.info['comps']:
        assert raw.compensation_grade == 0
        raw.apply_gradient_compensation(3)
        assert raw.compensation_grade == 3
    assert len(raw.info['projs']) == 1
    assert raw.info['projs'][0]['data']['col_names'] == long_proj_names
    raw.info['bads'] = bads = long_data_names[2:3]
    good_long_data_names = [
        name for name in long_data_names if name not in bads
    ]
    with catch_logging() as log:
        raw.save(fname, overwrite=True, verbose=True)
    log = log.getvalue()
    assert 'truncated to 15' in log
    for name in raw.ch_names:
        assert len(name) > 15
    # first read the full way
    with catch_logging() as log:
        raw_read = read_raw_fif(fname, verbose=True)
    log = log.getvalue()
    assert 'Reading extended channel information' in log
    for ra in (raw, raw_read):
        assert ra.ch_names == long_names
    assert raw_read.info['projs'][0]['data']['col_names'] == long_proj_names
    del raw_read
    # next read as if no longer names could be read
    monkeypatch.setattr(meas_info, '_read_extended_ch_info',
                        lambda x, y, z: None)
    with catch_logging() as log:
        raw_read = read_raw_fif(fname, verbose=True)
    log = log.getvalue()
    assert 'extended' not in log
    if raw.info['comps']:
        assert raw_read.compensation_grade == 3
        raw_read.apply_gradient_compensation(0)
        assert raw_read.compensation_grade == 0
    monkeypatch.setattr(  # restore
        meas_info, '_read_extended_ch_info', _read_extended_ch_info)
    short_proj_names = [
        f'{name[:13 - bool(len(ref_names))]}-{len(ref_names) + ni}'
        for ni, name in enumerate(long_data_names[:2])
    ]
    assert raw_read.info['projs'][0]['data']['col_names'] == short_proj_names
    #
    # epochs
    #
    epochs = Epochs(raw, make_fixed_length_events(raw))
    fname = tmpdir.join('test-epo.fif')
    epochs.save(fname)
    epochs_read = read_epochs(fname)
    for ep in (epochs, epochs_read):
        assert ep.info['ch_names'] == long_names
        assert ep.ch_names == long_names
    del raw, epochs_read
    # cov
    epochs.info['bads'] = []
    cov = compute_covariance(epochs, verbose='error')
    fname = tmpdir.join('test-cov.fif')
    write_cov(fname, cov)
    cov_read = read_cov(fname)
    for co in (cov, cov_read):
        assert co['names'] == long_data_names
        assert co['bads'] == []
    del cov_read

    #
    # evoked
    #
    evoked = epochs.average()
    evoked.info['bads'] = bads
    assert evoked.nave == 1
    fname = tmpdir.join('test-ave.fif')
    evoked.save(fname)
    evoked_read = read_evokeds(fname)[0]
    for ev in (evoked, evoked_read):
        assert ev.ch_names == long_names
        assert ev.info['bads'] == bads
    del evoked_read, epochs

    #
    # forward
    #
    with pytest.warns(None):  # not enough points for CTF
        sphere = make_sphere_model('auto', 'auto', evoked.info)
    src = setup_volume_source_space(
        pos=dict(rr=[[0, 0, 0.04]], nn=[[0, 1., 0.]]))
    fwd = make_forward_solution(evoked.info, None, src, sphere)
    fname = tmpdir.join('temp-fwd.fif')
    write_forward_solution(fname, fwd)
    fwd_read = read_forward_solution(fname)
    for fw in (fwd, fwd_read):
        assert fw['sol']['row_names'] == long_data_names
        assert fw['info']['ch_names'] == long_data_names
        assert fw['info']['bads'] == bads
    del fwd_read

    #
    # inv
    #
    inv = make_inverse_operator(evoked.info, fwd, cov)
    fname = tmpdir.join('test-inv.fif')
    write_inverse_operator(fname, inv)
    inv_read = read_inverse_operator(fname)
    for iv in (inv, inv_read):
        assert iv['info']['ch_names'] == good_long_data_names
    apply_inverse(evoked, inv)  # smoke test
Example #38
0
def read_forward(name, save_dir):
    forward_name = name + '-fwd.fif'
    forward_path = join(save_dir, forward_name)
    forward = mne.read_forward_solution(forward_path)

    return forward
Example #39
0
def test_dipole_fitting():
    """Test dipole fitting."""
    amp = 100e-9
    tempdir = _TempDir()
    rng = np.random.RandomState(0)
    fname_dtemp = op.join(tempdir, 'test.dip')
    fname_sim = op.join(tempdir, 'test-ave.fif')
    fwd = convert_forward_solution(read_forward_solution(fname_fwd),
                                   surf_ori=False,
                                   force_fixed=True,
                                   use_cps=True)
    evoked = read_evokeds(fname_evo)[0]
    cov = read_cov(fname_cov)
    n_per_hemi = 5
    vertices = [
        np.sort(rng.permutation(s['vertno'])[:n_per_hemi]) for s in fwd['src']
    ]
    nv = sum(len(v) for v in vertices)
    stc = SourceEstimate(amp * np.eye(nv), vertices, 0, 0.001)
    evoked = simulate_evoked(fwd,
                             stc,
                             evoked.info,
                             cov,
                             nave=evoked.nave,
                             random_state=rng)
    # For speed, let's use a subset of channels (strange but works)
    picks = np.sort(
        np.concatenate([
            pick_types(evoked.info, meg=True, eeg=False)[::2],
            pick_types(evoked.info, meg=False, eeg=True)[::2]
        ]))
    evoked.pick_channels([evoked.ch_names[p] for p in picks])
    evoked.add_proj(make_eeg_average_ref_proj(evoked.info))
    write_evokeds(fname_sim, evoked)

    # Run MNE-C version
    run_subprocess([
        'mne_dipole_fit',
        '--meas',
        fname_sim,
        '--meg',
        '--eeg',
        '--noise',
        fname_cov,
        '--dip',
        fname_dtemp,
        '--mri',
        fname_fwd,
        '--reg',
        '0',
        '--tmin',
        '0',
    ])
    dip_c = read_dipole(fname_dtemp)

    # Run mne-python version
    sphere = make_sphere_model(head_radius=0.1)
    with pytest.warns(RuntimeWarning, match='projection'):
        dip, residual = fit_dipole(evoked, cov, sphere, fname_fwd)
    assert isinstance(residual, Evoked)

    # Sanity check: do our residuals have less power than orig data?
    data_rms = np.sqrt(np.sum(evoked.data**2, axis=0))
    resi_rms = np.sqrt(np.sum(residual.data**2, axis=0))
    assert (data_rms > resi_rms * 0.95).all(), \
        '%s (factor: %s)' % ((data_rms / resi_rms).min(), 0.95)

    # Compare to original points
    transform_surface_to(fwd['src'][0], 'head', fwd['mri_head_t'])
    transform_surface_to(fwd['src'][1], 'head', fwd['mri_head_t'])
    assert_equal(fwd['src'][0]['coord_frame'], FIFF.FIFFV_COORD_HEAD)
    src_rr = np.concatenate([s['rr'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)
    src_nn = np.concatenate([s['nn'][v] for s, v in zip(fwd['src'], vertices)],
                            axis=0)

    # MNE-C skips the last "time" point :(
    out = dip.crop(dip_c.times[0], dip_c.times[-1])
    assert (dip is out)
    src_rr, src_nn = src_rr[:-1], src_nn[:-1]

    # check that we did about as well
    corrs, dists, gc_dists, amp_errs, gofs = [], [], [], [], []
    for d in (dip_c, dip):
        new = d.pos
        diffs = new - src_rr
        corrs += [np.corrcoef(src_rr.ravel(), new.ravel())[0, 1]]
        dists += [np.sqrt(np.mean(np.sum(diffs * diffs, axis=1)))]
        gc_dists += [
            180 / np.pi * np.mean(np.arccos(np.sum(src_nn * d.ori, axis=1)))
        ]
        amp_errs += [np.sqrt(np.mean((amp - d.amplitude)**2))]
        gofs += [np.mean(d.gof)]
    # XXX possibly some OpenBLAS numerical differences make
    # things slightly worse for us
    factor = 0.7
    assert dists[0] / factor >= dists[1], 'dists: %s' % dists
    assert corrs[0] * factor <= corrs[1], 'corrs: %s' % corrs
    assert gc_dists[0] / factor >= gc_dists[1] * 0.8, \
        'gc-dists (ori): %s' % gc_dists
    assert amp_errs[0] / factor >= amp_errs[1],\
        'amplitude errors: %s' % amp_errs
    # This one is weird because our cov/sim/picking is weird
    assert gofs[0] * factor <= gofs[1] * 2, 'gof: %s' % gofs
Example #40
0
def test_resolution_matrix():
    """Test make_inverse_resolution_matrix() function."""
    # read forward solution
    forward = mne.read_forward_solution(fname_fwd)
    # forward operator with fixed source orientations
    forward_fxd = mne.convert_forward_solution(forward, surf_ori=True,
                                               force_fixed=True)

    # noise covariance matrix
    noise_cov = mne.read_cov(fname_cov)
    # evoked data for info
    evoked = mne.read_evokeds(fname_evoked, 0)

    # make inverse operator from forward solution
    # free source orientation
    inverse_operator = mne.minimum_norm.make_inverse_operator(
        info=evoked.info, forward=forward, noise_cov=noise_cov, loose=1.,
        depth=None)
    # fixed source orientation
    inverse_operator_fxd = mne.minimum_norm.make_inverse_operator(
        info=evoked.info, forward=forward, noise_cov=noise_cov, loose=0.,
        depth=None, fixed=True)

    # regularisation parameter based on SNR
    snr = 3.0
    lambda2 = 1.0 / snr ** 2
    # resolution matrices for free source orientation
    # compute resolution matrix for MNE with free source orientations
    rm_mne_free = make_inverse_resolution_matrix(forward, inverse_operator,
                                                 method='MNE', lambda2=lambda2)
    # compute resolution matrix for MNE, fwd fixed and inv free
    rm_mne_fxdfree = make_inverse_resolution_matrix(forward_fxd,
                                                    inverse_operator,
                                                    method='MNE',
                                                    lambda2=lambda2)
    # resolution matrices for fixed source orientation
    # compute resolution matrix for MNE
    rm_mne = make_inverse_resolution_matrix(forward_fxd, inverse_operator_fxd,
                                            method='MNE', lambda2=lambda2)
    # compute resolution matrix for sLORETA
    rm_lor = make_inverse_resolution_matrix(forward_fxd, inverse_operator_fxd,
                                            method='sLORETA', lambda2=lambda2)
    # rectify resolution matrix for sLORETA before determining maxima
    rm_lor_abs = np.abs(rm_lor)

    # get maxima per column
    maxidxs = rm_lor_abs.argmax(axis=0)
    # create array with the expected stepwise increase in maximum indices
    goodidxs = np.arange(0, len(maxidxs), 1)

    # Tests
    # Does sLORETA have zero dipole localization error for columns/PSFs?
    assert_array_equal(maxidxs, goodidxs)
    # MNE resolution matrices symmetric?
    assert_array_almost_equal(rm_mne, rm_mne.T)
    assert_array_almost_equal(rm_mne_free, rm_mne_free.T)

    # Some arbitrary vertex numbers
    idx = [1, 100, 400]
    # check various summary and normalisation options
    for mode in [None, 'sum', 'mean', 'maxval', 'maxnorm', 'pca']:
        n_comps = [1, 3]
        if mode in [None, 'sum', 'mean']:
            n_comps = [1]
        for n_comp in n_comps:
            for norm in [None, 'max', 'norm', True]:
                stc_psf = get_point_spread(
                    rm_mne, forward_fxd['src'], idx, mode=mode, n_comp=n_comp,
                    norm=norm, return_pca_vars=False)
                stc_ctf = get_cross_talk(
                    rm_mne, forward_fxd['src'], idx, mode=mode, n_comp=n_comp,
                    norm=norm, return_pca_vars=False)
                # for MNE, PSF/CTFs for same vertices should be the same
                assert_array_almost_equal(stc_psf.data, stc_ctf.data)

    # check SVD variances
    stc_psf, s_vars_psf = get_point_spread(
        rm_mne, forward_fxd['src'], idx, mode=mode, n_comp=n_comp,
        norm='norm', return_pca_vars=True)
    stc_ctf, s_vars_ctf = get_cross_talk(
        rm_mne, forward_fxd['src'], idx, mode=mode, n_comp=n_comp,
        norm='norm', return_pca_vars=True)
    assert_array_almost_equal(s_vars_psf, s_vars_ctf)
    # variances for SVD components should be ordered
    assert s_vars_psf[0] > s_vars_psf[1] > s_vars_psf[2]
    # all variances should sum up to 100
    assert_allclose(s_vars_psf.sum(), 100.)

    # Test application of free inv to fixed fwd
    assert_equal(rm_mne_fxdfree.shape, (3 * rm_mne.shape[0],
                 rm_mne.shape[0]))

    # Test PSF/CTF for labels
    label = mne.read_label(fname_label)
    # must be list of Label
    label = [label]
    label2 = 2 * label
    # get relevant vertices in source space
    verts = _vertices_for_get_psf_ctf(label, forward_fxd['src'])[0]

    stc_psf_label = get_point_spread(rm_mne, forward_fxd['src'], label,
                                     norm='max')
    # for list of indices
    stc_psf_idx = get_point_spread(rm_mne, forward_fxd['src'], verts,
                                   norm='max')
    stc_ctf_label = get_cross_talk(rm_mne, forward_fxd['src'], label,
                                   norm='max')
    # For MNE, PSF and CTF for same vertices should be the same
    assert_array_almost_equal(stc_psf_label.data, stc_ctf_label.data)
    # test multiple labels
    stc_psf_label2 = get_point_spread(rm_mne, forward_fxd['src'], label2,
                                      norm='max')
    m, n = stc_psf_label.data.shape
    assert_array_equal(
        stc_psf_label.data, stc_psf_label2[0].data)
    assert_array_equal(
        stc_psf_label.data, stc_psf_label2[1].data)
    assert_array_equal(
        stc_psf_label.data, stc_psf_idx.data)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
                       exclude='bads')

# Pick the channels of interest
raw.pick_channels([raw.ch_names[pick] for pick in picks])
# Re-normalize our empty-room projectors, so they are fine after subselection
raw.info.normalize_proj()

# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
                    baseline=(None, 0), preload=True, proj=True,
                    reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()

forward = mne.read_forward_solution(fname_fwd)
forward = mne.convert_forward_solution(forward, surf_ori=True)

# Compute regularized noise and data covariances
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0, method='shrunk')
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
                                  method='shrunk')

plt.close('all')

pick_oris = [None, 'normal', 'max-power']
names = ['free', 'normal', 'max-power']
descriptions = ['Free orientation, voxel: %i', 'Normal orientation, voxel: %i',
                'Max-power orientation, voxel: %i']
colors = ['b', 'k', 'r']
Example #42
0
def read_forward_solution_meg(*args, **kwargs):
    fwd = mne.read_forward_solution(*args, **kwargs)
    return mne.pick_types_forward(fwd, meg=True, eeg=False)
Example #43
0
import mne
import numpy as np
import pylab as plt
from mne.datasets import sample

# create data
fs = 500
ch_names = [
    'Fp1', 'Fp2', 'F7', 'F3', 'Fz', 'F4', 'F8', 'Ft9', 'Fc5', 'Fc1', 'Fc2',
    'Fc6', 'Ft10', 'T7', 'C3', 'Cz', 'C4', 'T8', 'Tp9', 'Cp5', 'Cp1', 'Cp2',
    'Cp6', 'Tp10', 'P7', 'P3', 'Pz', 'P4', 'P8', 'O1', 'Oz', 'O2'
]
info = mne.create_info(ch_names=ch_names,
                       sfreq=fs,
                       montage=mne.channels.read_montage(kind='standard_1005'),
                       ch_types=['eeg' for ch in ch_names])
data = np.random.normal(loc=0,
                        scale=0.00001,
                        size=(5000, len(info["ch_names"])))
raw = mne.io.RawArray(data.T, info)
#raw.plot()
#plt.show()

# create cov matrix
noise_cov = mne.compute_raw_covariance(raw)
mne.viz.plot_cov(noise_cov, raw.info)

# forward solution
fname_fwd = sample.data_path() + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fwd = mne.read_forward_solution(fname_fwd, surf_ori=True)
def test_simulate_evoked():
    """Test simulation of evoked data."""
    raw = read_raw_fif(raw_fname)
    fwd = read_forward_solution(fwd_fname)
    fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=False)
    fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
    cov = read_cov(cov_fname)

    evoked_template = read_evokeds(ave_fname, condition=0, baseline=None)
    evoked_template.pick_types(meg=True, eeg=True, exclude=raw.info['bads'])

    cov = regularize(cov, evoked_template.info)
    nave = evoked_template.nave

    tmin = -0.1
    sfreq = 1000.  # Hz
    tstep = 1. / sfreq
    n_samples = 600
    times = np.linspace(tmin, tmin + n_samples * tstep, n_samples)

    # Generate times series for 2 dipoles
    stc = simulate_sparse_stc(fwd['src'],
                              n_dipoles=2,
                              times=times,
                              random_state=42)

    # Generate noisy evoked data
    iir_filter = [1, -0.9]
    evoked = simulate_evoked(fwd,
                             stc,
                             evoked_template.info,
                             cov,
                             iir_filter=iir_filter,
                             nave=nave)
    assert_array_almost_equal(evoked.times, stc.times)
    assert len(evoked.data) == len(fwd['sol']['data'])
    assert_equal(evoked.nave, nave)
    assert len(evoked.info['projs']) == len(cov['projs'])
    evoked_white = whiten_evoked(evoked, cov)
    assert abs(evoked_white.data[:, 0].std() - 1.) < 0.1

    # make a vertex that doesn't exist in fwd, should throw error
    stc_bad = stc.copy()
    mv = np.max(fwd['src'][0]['vertno'][fwd['src'][0]['inuse']])
    stc_bad.vertices[0][0] = mv + 1

    pytest.raises(RuntimeError, simulate_evoked, fwd, stc_bad,
                  evoked_template.info, cov)
    evoked_1 = simulate_evoked(fwd,
                               stc,
                               evoked_template.info,
                               cov,
                               nave=np.inf)
    evoked_2 = simulate_evoked(fwd,
                               stc,
                               evoked_template.info,
                               cov,
                               nave=np.inf)
    assert_array_equal(evoked_1.data, evoked_2.data)

    cov['names'] = cov.ch_names[:-2]  # Error channels are different.
    with pytest.raises(RuntimeError, match='Not all channels present'):
        simulate_evoked(fwd, stc, evoked_template.info, cov)
Example #45
0
    def CatEpochs(subject, CondComb, CovSource, Method, modality):

        snr = 3.0
        lambda2 = 1.0 / snr**2

        if CondComb[0] != CondComb[1]:
            X, stcs = [], []
            epochs_list, evoked_list, inverse_operator_list = [], [], []
            for c, cond in enumerate(CondComb):

                epochs = mne.read_epochs(wdir + subject +
                                         "/mne_python/EPOCHS/MEEG_epochs_" +
                                         cond + '_' + subject + "-epo.fif")
                epochs.crop(tmin=0, tmax=0.9)

                # which modality? Import forward model accordingly
                if modality == 'MEG':
                    fname_fwd = (wdir + subject +
                                 "/mne_python/run3_oct6_megonly_-fwd.fif")
                    megtag = True
                    eegtag = False
                    epochs.pick_types(meg=megtag, eeg=eegtag)
                elif modality == 'EEG':
                    fname_fwd = (wdir + subject +
                                 "/mne_python/run3_ico-5_eegonly_-fwd.fif")
                    megtag = False
                    eegtag = True
                    epochs.pick_types(meg=megtag, eeg=eegtag)
                elif modality == 'MEEG':
                    fname_fwd = (wdir + subject +
                                 "/mne_python/run3_ico-5_meeg_-fwd.fif")
                    megtag = True
                    eegtag = True
                    epochs.pick_types(meg=megtag, eeg=eegtag)

                epochs_list.append(epochs)

                # import nose_cov
                noise_cov = mne.read_cov(
                    (wdir + subject + "/mne_python/COVMATS/" + modality +
                     "_noisecov_" + CovSource + "_" + subject + "-cov.fif"))
                # import forward
                forward = mne.read_forward_solution(fname_fwd, surf_ori=True)

                # load  epochs, then pick
                picks = mne.pick_types(epochs.info,
                                       meg=megtag,
                                       eeg=eegtag,
                                       stim=False,
                                       eog=False)

                # compute evoked
                evoked = epochs.average(picks=picks)
                evoked_list.append(evoked)

                # compute inverse operator
                inverse_operator = make_inverse_operator(evoked.info,
                                                         forward,
                                                         noise_cov,
                                                         loose=0.2,
                                                         depth=0.8)
                inverse_operator_list.append(inverse_operator)

        n_epochs = len(epochs_list[0].events) * 2
        n_times = len(epochs_list[0].times)
        n_vertices = forward['nsource']
        X = np.zeros([n_epochs, n_vertices, n_times])
        for condition_count, ep in zip([0, n_epochs / 2], epochs_list):
            stcs = apply_inverse_epochs(
                ep,
                inverse_operator,
                lambda2,
                Method,
                pick_ori="normal",  # saves us memory
                return_generator=True)
            for jj, stc in enumerate(stcs):
                X[condition_count + jj] = stc.data

            Y = np.repeat(range(len(CondComb)),
                          len(X) /
                          len(CondComb))  # belongs to the second class
            n_times = X.shape[2]

        return stc, n_times, X, Y, forward
Example #46
0
def test_lcmv_reg_proj(proj, weight_norm):
    """Test LCMV with and without proj."""
    raw = mne.io.read_raw_fif(fname_raw, preload=True)
    events = mne.find_events(raw)
    raw.pick_types()
    assert len(raw.ch_names) == 305
    epochs = mne.Epochs(raw, events, None, preload=True, proj=proj)
    with pytest.warns(RuntimeWarning, match='Too few samples'):
        noise_cov = mne.compute_covariance(epochs, tmax=0)
        data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
    forward = mne.read_forward_solution(fname_fwd)
    filters = make_lcmv(epochs.info,
                        forward,
                        data_cov,
                        reg=0.05,
                        noise_cov=noise_cov,
                        pick_ori='max-power',
                        weight_norm='nai',
                        rank=None,
                        verbose=True)
    want_rank = 302  # 305 good channels - 3 MEG projs
    assert filters['rank'] == want_rank
    # And also with and without noise_cov
    with pytest.raises(ValueError, match='several sensor types'):
        make_lcmv(epochs.info, forward, data_cov, reg=0.05, noise_cov=None)
    epochs.pick_types('grad')
    kwargs = dict(reg=0.05, pick_ori=None, weight_norm=weight_norm)
    filters_cov = make_lcmv(epochs.info,
                            forward,
                            data_cov,
                            noise_cov=noise_cov,
                            **kwargs)
    filters_nocov = make_lcmv(epochs.info,
                              forward,
                              data_cov,
                              noise_cov=None,
                              **kwargs)
    ad_hoc = mne.make_ad_hoc_cov(epochs.info)
    filters_adhoc = make_lcmv(epochs.info,
                              forward,
                              data_cov,
                              noise_cov=ad_hoc,
                              **kwargs)
    evoked = epochs.average()
    stc_cov = apply_lcmv(evoked, filters_cov)
    stc_nocov = apply_lcmv(evoked, filters_nocov)
    stc_adhoc = apply_lcmv(evoked, filters_adhoc)

    # Compare adhoc and nocov: scale difference is necessitated by using std=1.
    if weight_norm == 'unit-noise-gain':
        scale = np.sqrt(ad_hoc['data'][0])
    else:
        scale = 1.
    assert_allclose(stc_nocov.data, stc_adhoc.data * scale)
    assert_allclose(
        np.dot(filters_nocov['weights'], filters_nocov['whitener']),
        np.dot(filters_adhoc['weights'], filters_adhoc['whitener']) * scale)

    # Compare adhoc and cov: locs might not be equivalent, but the same
    # general profile should persist, so look at the std and be lenient:
    if weight_norm == 'unit-noise-gain':
        adhoc_scale = 0.12
    else:
        adhoc_scale = 1.
    assert_allclose(np.linalg.norm(stc_adhoc.data, axis=0) * adhoc_scale,
                    np.linalg.norm(stc_cov.data, axis=0),
                    rtol=0.3)
    assert_allclose(np.linalg.norm(stc_nocov.data, axis=0) / scale *
                    adhoc_scale,
                    np.linalg.norm(stc_cov.data, axis=0),
                    rtol=0.3)

    if weight_norm == 'nai':
        # NAI is always normalized by noise-level (based on eigenvalues)
        for stc in (stc_nocov, stc_cov):
            assert_allclose(stc.data.std(), 0.39, rtol=0.1)
    elif weight_norm is None:
        # None always represents something not normalized, reflecting channel
        # weights
        for stc in (stc_nocov, stc_cov):
            assert_allclose(stc.data.std(), 2.8e-8, rtol=0.1)
    else:
        assert weight_norm == 'unit-noise-gain'
        # Channel scalings depend on presence of noise_cov
        assert_allclose(stc_nocov.data.std(), 5.3e-13, rtol=0.1)
        assert_allclose(stc_cov.data.std(), 0.13, rtol=0.1)
Example #47
0
def test_psf_ctf():
    """Test computation of PSFs and CTFs for linear estimators."""
    forward = read_forward_solution(fname_fwd)
    labels = [mne.read_label(ss) for ss in fname_label]

    method = 'MNE'
    n_svd_comp = 2

    # make sure it works for both types of inverses
    for fname_inv in (fname_inv_meg, fname_inv_meeg):
        inverse_operator = read_inverse_operator(fname_inv)
        # Test PSFs (then CTFs)
        for mode in ('sum', 'svd'):
            with pytest.deprecated_call():
                stc_psf, psf_ev = point_spread_function(inverse_operator,
                                                        forward,
                                                        method=method,
                                                        labels=labels,
                                                        lambda2=lambda2,
                                                        pick_ori='normal',
                                                        mode=mode,
                                                        n_svd_comp=n_svd_comp,
                                                        use_cps=True)

            n_vert, n_samples = stc_psf.shape
            should_n_vert = (inverse_operator['src'][1]['vertno'].shape[0] +
                             inverse_operator['src'][0]['vertno'].shape[0])
            if mode == 'svd':
                should_n_samples = len(labels) * n_svd_comp + 1
            else:
                should_n_samples = len(labels) + 1

            assert (n_vert == should_n_vert)
            assert (n_samples == should_n_samples)

            n_chan, n_samples = psf_ev.data.shape
            assert (n_chan == forward['nchan'])

        # Test CTFs
        for mode in ('sum', 'svd'):
            with pytest.deprecated_call():
                stc_ctf = cross_talk_function(inverse_operator,
                                              forward,
                                              labels,
                                              method=method,
                                              lambda2=lambda2,
                                              signed=False,
                                              mode=mode,
                                              n_svd_comp=n_svd_comp,
                                              use_cps=True)

            n_vert, n_samples = stc_ctf.shape
            should_n_vert = (inverse_operator['src'][1]['vertno'].shape[0] +
                             inverse_operator['src'][0]['vertno'].shape[0])
            if mode == 'svd':
                should_n_samples = len(labels) * n_svd_comp + 1
            else:
                should_n_samples = len(labels) + 1

            assert (n_vert == should_n_vert)
            assert (n_samples == should_n_samples)
Example #48
0
def get_mne_sample(tmin=-0.1,
                   tmax=0.4,
                   baseline=(None, 0),
                   sns=False,
                   src=None,
                   sub="modality=='A'",
                   fixed=False,
                   snr=2,
                   method='dSPM',
                   rm=False,
                   stc=False):
    """Load events and epochs from the MNE sample data

    Parameters
    ----------
    tmin, tmax baseline :
        Epoch parameters.
    sns : bool
        Add sensor space data as NDVar as ``ds['sns']``.
    src : None | 'ico' | 'vol'
        Add source space data as NDVar as ``ds['src']``.
    sub : str | None
        Expresion for subset of events to load.
    fixed : bool
        MNE inverse parameter.
    snr : scalar
        MNE inverse parameter.
    method : str
        MNE inverse parameter.
    rm : bool
        Pretend to be a repeated measures dataset (adds 'subject' variable).
    stc : bool
        Add mne SourceEstimate for source space data as ``ds['stc']``.

    Returns
    -------
    ds : Dataset
        Dataset with epochs from the MNE sample dataset.
    """
    data_dir = mne.datasets.sample.data_path()
    meg_dir = os.path.join(data_dir, 'MEG', 'sample')
    raw_file = os.path.join(meg_dir, 'sample_audvis_filt-0-40_raw.fif')
    event_file = os.path.join(meg_dir, 'sample_audvis_filt-0-40_evt.fif')
    subjects_dir = os.path.join(data_dir, 'subjects')
    subject = 'sample'
    label_path = os.path.join(subjects_dir, subject, 'label', '%s.label')

    if not os.path.exists(event_file):
        raw = mne.io.Raw(raw_file)
        events = mne.find_events(raw, stim_channel='STI 014')
        mne.write_events(event_file, events)
    ds = load.fiff.events(raw_file, events=event_file)
    ds.index()
    ds.info['subjects_dir'] = subjects_dir
    ds.info['subject'] = subject
    ds.info['label'] = label_path

    # get the trigger variable form the dataset for eaier access
    trigger = ds['trigger']

    # use trigger to add various labels to the dataset
    ds['condition'] = Factor(trigger,
                             labels={
                                 1: 'LA',
                                 2: 'RA',
                                 3: 'LV',
                                 4: 'RV',
                                 5: 'smiley',
                                 32: 'button'
                             })
    ds['side'] = Factor(trigger,
                        labels={
                            1: 'L',
                            2: 'R',
                            3: 'L',
                            4: 'R',
                            5: 'None',
                            32: 'None'
                        })
    ds['modality'] = Factor(trigger,
                            labels={
                                1: 'A',
                                2: 'A',
                                3: 'V',
                                4: 'V',
                                5: 'None',
                                32: 'None'
                            })

    if rm:
        ds = ds.sub('trigger < 5')
        ds = ds.equalize_counts('side % modality')
        subject_f = ds.eval('side % modality').enumerate_cells()
        ds['subject'] = subject_f.as_factor(labels='s%r', random=True)

    if sub:
        ds = ds.sub(sub)

    load.fiff.add_mne_epochs(ds, tmin, tmax, baseline)
    if sns:
        ds['sns'] = load.fiff.epochs_ndvar(ds['epochs'])

    if src is None:
        return ds
    elif src == 'ico':
        src_tag = 'ico-4'
    elif src == 'vol':
        src_tag = 'vol-10'
    else:
        raise ValueError("src = %r" % src)
    epochs = ds['epochs']

    # get inverse operator
    inv_file = os.path.join(meg_dir, 'sample_eelbrain_%s-inv.fif' % src_tag)
    if os.path.exists(inv_file):
        inv = mne.minimum_norm.read_inverse_operator(inv_file)
    else:
        fwd_file = os.path.join(meg_dir, 'sample-%s-fwd.fif' % src_tag)
        bem_dir = os.path.join(subjects_dir, subject, 'bem')
        bem_file = os.path.join(bem_dir, 'sample-5120-5120-5120-bem-sol.fif')
        trans_file = os.path.join(meg_dir, 'sample_audvis_raw-trans.fif')

        if os.path.exists(fwd_file):
            fwd = mne.read_forward_solution(fwd_file)
        else:
            src_file = os.path.join(bem_dir, 'sample-%s-src.fif' % src_tag)
            if os.path.exists(src_file):
                src_ = src_file
            elif src == 'ico':
                src_ = mne.setup_source_space(subject,
                                              src_file,
                                              'ico4',
                                              subjects_dir=subjects_dir)
            elif src == 'vol':
                mri_file = os.path.join(subjects_dir, subject, 'mri',
                                        'orig.mgz')
                src_ = mne.setup_volume_source_space(subject,
                                                     src_file,
                                                     pos=10.,
                                                     mri=mri_file,
                                                     bem=bem_file,
                                                     mindist=0.,
                                                     exclude=0.,
                                                     subjects_dir=subjects_dir)
            fwd = mne.make_forward_solution(epochs.info, trans_file, src_,
                                            bem_file, fwd_file)

        cov_file = os.path.join(meg_dir, 'sample_audvis-cov.fif')
        cov = mne.read_cov(cov_file)
        inv = mn.make_inverse_operator(epochs.info, fwd, cov, None, None,
                                       fixed)
        mne.minimum_norm.write_inverse_operator(inv_file, inv)
    ds.info['inv'] = inv

    stcs = mn.apply_inverse_epochs(epochs, inv, 1. / (snr**2), method)
    ds['src'] = load.fiff.stc_ndvar(stcs, subject, src_tag, subjects_dir)
    if stc:
        ds['stc'] = stcs

    return ds
Example #49
0
       '180_zd160826','201_gs150925','203_am151029',
       '204_am151120','205_ac160202','207_ah160809','210_sb160822','211_lb160823',
       'nlr_gb310170829','nlr_kb218170829','nlr_gb267170911','nlr_jb420170828',
       'nlr_hb275170828','nlr_gb355170907']

#%%
for n, s in enumerate(session2):
    os.chdir(os.path.join(raw_dir,session2[n]))
    
    os.chdir('inverse')
    fn = 'All_40-sss_eq_'+session2[n]+'-ave.fif'
    evoked = mne.read_evokeds(fn, condition=0, 
                              baseline=(None,0), kind='average', proj=True)
    
    info = evoked.info 
    
    os.chdir('../forward')
    fn = session2[n] + '-sss-fwd.fif'
    fwd = mne.read_forward_solution(fn,force_fixed=False,surf_ori=True)
    
    #Inverse here
    os.chdir('../covariance')
    fn = session2[n] + '-40-sss-cov.fif'
    cov = mne.read_cov(fn)
    
    os.chdir('../inverse')
    # Free: loose = 1; Loose: loose = 0.2
    inv = mne.minimum_norm.make_inverse_operator(info, fwd, cov, loose=0.2, depth=0.8, use_cps=True)
    
    fn = session2[n] + '-depth8-inv.fif'
    mne.minimum_norm.write_inverse_operator(fn,inv)
Example #50
0
def test_volume_source_morph(tmpdir):
    """Test volume source estimate morph, special cases and exceptions."""
    import nibabel as nib
    inverse_operator_vol = read_inverse_operator(fname_inv_vol)
    stc_vol = read_source_estimate(fname_vol, 'sample')

    # check for invalid input type
    with pytest.raises(TypeError, match='src must be'):
        compute_source_morph(src=42)

    # check for raising an error if neither
    # inverse_operator_vol['src'][0]['subject_his_id'] nor subject_from is set,
    # but attempting to perform a volume morph
    src = inverse_operator_vol['src']
    assert src._subject is None  # already None on disk (old!)

    with pytest.raises(ValueError, match='subject_from could not be inferred'):
        with pytest.warns(RuntimeWarning, match='recommend regenerating'):
            compute_source_morph(src=src, subjects_dir=subjects_dir)

    # check infer subject_from from src[0]['subject_his_id']
    src[0]['subject_his_id'] = 'sample'

    with pytest.raises(ValueError, match='Inter-hemispheric morphing'):
        compute_source_morph(src=src, subjects_dir=subjects_dir, xhemi=True)

    with pytest.raises(ValueError, match='Only surface.*sparse morph'):
        compute_source_morph(src=src, sparse=True, subjects_dir=subjects_dir)

    # terrible quality buts fast
    zooms = 20
    kwargs = dict(zooms=zooms, niter_sdr=(1,), niter_affine=(1,))
    source_morph_vol = compute_source_morph(
        subjects_dir=subjects_dir, src=fname_inv_vol,
        subject_from='sample', **kwargs)
    shape = (13,) * 3  # for the given zooms

    assert source_morph_vol.subject_from == 'sample'

    # the brain used in sample data has shape (255, 255, 255)
    assert tuple(source_morph_vol.sdr_morph.domain_shape) == shape

    assert tuple(source_morph_vol.pre_affine.domain_shape) == shape

    # proofs the above
    assert_array_equal(source_morph_vol.zooms, (zooms,) * 3)

    # assure proper src shape
    mri_size = (src[0]['mri_height'], src[0]['mri_depth'], src[0]['mri_width'])
    assert source_morph_vol.src_data['src_shape_full'] == mri_size

    fwd = read_forward_solution(fname_fwd_vol)
    fwd['src'][0]['subject_his_id'] = 'sample'  # avoid further warnings
    source_morph_vol = compute_source_morph(
        fwd['src'], 'sample', 'sample', subjects_dir=subjects_dir,
        **kwargs)

    # check wrong subject_to
    with pytest.raises(IOError, match='cannot read file'):
        compute_source_morph(fwd['src'], 'sample', '42',
                             subjects_dir=subjects_dir)

    # two different ways of saving
    source_morph_vol.save(tmpdir.join('vol'))

    # check loading
    source_morph_vol_r = read_source_morph(tmpdir.join('vol-morph.h5'))

    # check for invalid file name handling ()
    with pytest.raises(IOError, match='not found'):
        read_source_morph(tmpdir.join('42'))

    # check morph
    stc_vol_morphed = source_morph_vol.apply(stc_vol)
    # old way, verts do not match
    assert not np.array_equal(stc_vol_morphed.vertices[0], stc_vol.vertices[0])

    # vector
    stc_vol_vec = VolVectorSourceEstimate(
        np.tile(stc_vol.data[:, np.newaxis], (1, 3, 1)),
        stc_vol.vertices, 0, 1)
    stc_vol_vec_morphed = source_morph_vol.apply(stc_vol_vec)
    assert isinstance(stc_vol_vec_morphed, VolVectorSourceEstimate)
    for ii in range(3):
        assert_allclose(stc_vol_vec_morphed.data[:, ii], stc_vol_morphed.data)

    # check output as NIfTI
    assert isinstance(source_morph_vol.apply(stc_vol_vec, output='nifti2'),
                      nib.Nifti2Image)

    # check for subject_from mismatch
    source_morph_vol_r.subject_from = '42'
    with pytest.raises(ValueError, match='subject_from must match'):
        source_morph_vol_r.apply(stc_vol_morphed)

    # check if nifti is in grid morph space with voxel_size == spacing
    img_morph_res = source_morph_vol.apply(stc_vol, output='nifti1')

    # assure morph spacing
    assert isinstance(img_morph_res, nib.Nifti1Image)
    assert img_morph_res.header.get_zooms()[:3] == (zooms,) * 3

    # assure src shape
    img_mri_res = source_morph_vol.apply(stc_vol, output='nifti1',
                                         mri_resolution=True)
    assert isinstance(img_mri_res, nib.Nifti1Image)
    assert (img_mri_res.shape == (src[0]['mri_height'], src[0]['mri_depth'],
                                  src[0]['mri_width']) +
            (img_mri_res.shape[3],))

    # check if nifti is defined resolution with voxel_size == (5., 5., 5.)
    img_any_res = source_morph_vol.apply(stc_vol, output='nifti1',
                                         mri_resolution=(5., 5., 5.))
    assert isinstance(img_any_res, nib.Nifti1Image)
    assert img_any_res.header.get_zooms()[:3] == (5., 5., 5.)

    # check if morph outputs correct data
    assert isinstance(stc_vol_morphed, VolSourceEstimate)

    # check if loaded and saved objects contain the same
    assert (all([read == saved for read, saved in
                 zip(sorted(source_morph_vol_r.__dict__),
                     sorted(source_morph_vol.__dict__))]))

    # check __repr__
    assert 'volume' in repr(source_morph_vol)

    # check Nifti2Image
    assert isinstance(
        source_morph_vol.apply(stc_vol, mri_resolution=True,
                               mri_space=True, output='nifti2'),
        nib.Nifti2Image)

    # Degenerate conditions
    with pytest.raises(TypeError, match='output must be'):
        source_morph_vol.apply(stc_vol, output=1)
    with pytest.raises(ValueError, match='subject_from does not match'):
        compute_source_morph(src=src, subject_from='42')
    with pytest.raises(ValueError, match='output'):
        source_morph_vol.apply(stc_vol, output='42')
    with pytest.raises(ValueError, match='subject_to cannot be None'):
        compute_source_morph(src, 'sample', None,
                             subjects_dir=subjects_dir)
    # Check if not morphed, but voxel size not boolean, raise ValueError.
    # Note that this check requires dipy to not raise the dipy ImportError
    # before checking if the actual voxel size error will raise.
    with pytest.raises(ValueError, match='Cannot infer original voxel size'):
        stc_vol.as_volume(inverse_operator_vol['src'], mri_resolution=4)

    stc_surf = read_source_estimate(fname_stc, 'sample')
    with pytest.raises(TypeError, match='stc_from must be an instance'):
        source_morph_vol.apply(stc_surf)

    # src_to
    # zooms=20 does not match src_to zooms (7)
    with pytest.raises(ValueError, match='If src_to is provided, zooms shoul'):
        source_morph_vol = compute_source_morph(
            fwd['src'], subject_from='sample', src_to=fwd['src'],
            subject_to='sample', subjects_dir=subjects_dir, **kwargs)
    # hack the src_to "zooms" to make it seem like a pos=20. source space
    fwd['src'][0]['src_mri_t']['trans'][:3, :3] = 0.02 * np.eye(3)
    source_morph_vol = compute_source_morph(
        fwd['src'], subject_from='sample', src_to=fwd['src'],
        subject_to='sample', subjects_dir=subjects_dir, **kwargs)
    stc_vol_2 = source_morph_vol.apply(stc_vol)
    # new way, verts match
    assert_array_equal(stc_vol.vertices[0], stc_vol_2.vertices[0])
    stc_vol_bad = VolSourceEstimate(
        stc_vol.data[:-1], [stc_vol.vertices[0][:-1]],
        stc_vol.tmin, stc_vol.tstep)
    match = (
        'vertices do not match between morph \\(4157\\) and stc \\(4156\\).*'
        '\n.*\n.*\n.*Vertices were likely excluded during forward computatio.*'
    )
    with pytest.raises(ValueError, match=match):
        source_morph_vol.apply(stc_vol_bad)
Example #51
0
def test_tf_lcmv():
    """Test TF beamforming based on LCMV."""
    label = mne.read_label(fname_label)
    events = mne.read_events(fname_event)
    raw = mne.io.read_raw_fif(fname_raw, preload=True)
    forward = mne.read_forward_solution(fname_fwd)

    event_id, tmin, tmax = 1, -0.2, 0.2

    # Setup for reading the raw data
    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels

    # Set up pick list: MEG - bad channels
    left_temporal_channels = mne.read_selection('Left-temporal')
    picks = mne.pick_types(raw.info, selection=left_temporal_channels)
    picks = picks[::2]  # decimate for speed
    raw.pick_channels([raw.ch_names[ii] for ii in picks])
    raw.info.normalize_proj()  # avoid projection warnings
    del picks

    # Read epochs
    epochs = mne.Epochs(raw,
                        events,
                        event_id,
                        tmin,
                        tmax,
                        proj=True,
                        baseline=None,
                        preload=False,
                        reject=reject)
    epochs.load_data()

    freq_bins = [(4, 12), (15, 40)]
    time_windows = [(-0.1, 0.1), (0.0, 0.2)]
    win_lengths = [0.2, 0.2]
    tstep = 0.1
    reg = 0.05

    source_power = []
    noise_covs = []
    for (l_freq, h_freq), win_length in zip(freq_bins, win_lengths):
        raw_band = raw.copy()
        raw_band.filter(l_freq,
                        h_freq,
                        method='iir',
                        n_jobs=1,
                        iir_params=dict(output='ba'))
        epochs_band = mne.Epochs(raw_band,
                                 epochs.events,
                                 epochs.event_id,
                                 tmin=tmin,
                                 tmax=tmax,
                                 baseline=None,
                                 proj=True)
        noise_cov = mne.compute_covariance(epochs_band,
                                           tmin=tmin,
                                           tmax=tmin + win_length)
        noise_cov = mne.cov.regularize(noise_cov,
                                       epochs_band.info,
                                       mag=reg,
                                       grad=reg,
                                       eeg=reg,
                                       proj=True,
                                       rank=None)
        noise_covs.append(noise_cov)
        del raw_band  # to save memory

        # Manually calculating source power in on frequency band and several
        # time windows to compare to tf_lcmv results and test overlapping
        if (l_freq, h_freq) == freq_bins[0]:
            for time_window in time_windows:
                data_cov = mne.compute_covariance(epochs_band,
                                                  tmin=time_window[0],
                                                  tmax=time_window[1])
                stc_source_power = _lcmv_source_power(
                    epochs.info,
                    forward,
                    noise_cov,
                    data_cov,
                    reg=reg,
                    label=label,
                    weight_norm='unit-noise-gain')
                source_power.append(stc_source_power.data)

    pytest.raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs,
                  tmin,
                  tmax,
                  tstep,
                  win_lengths,
                  freq_bins,
                  reg=reg,
                  label=label)
    stcs = tf_lcmv(epochs,
                   forward,
                   noise_covs,
                   tmin,
                   tmax,
                   tstep,
                   win_lengths,
                   freq_bins,
                   reg=reg,
                   label=label,
                   raw=raw)

    assert (len(stcs) == len(freq_bins))
    assert (stcs[0].shape[1] == 4)

    # Averaging all time windows that overlap the time period 0 to 100 ms
    source_power = np.mean(source_power, axis=0)

    # Selecting the first frequency bin in tf_lcmv results
    stc = stcs[0]

    # Comparing tf_lcmv results with _lcmv_source_power results
    assert_array_almost_equal(stc.data[:, 2], source_power[:, 0])

    # Test if using unsupported max-power orientation is detected
    pytest.raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs,
                  tmin,
                  tmax,
                  tstep,
                  win_lengths,
                  freq_bins=freq_bins,
                  pick_ori='max-power')

    # Test if incorrect number of noise CSDs is detected
    # Test if incorrect number of noise covariances is detected
    pytest.raises(ValueError, tf_lcmv, epochs, forward, [noise_covs[0]], tmin,
                  tmax, tstep, win_lengths, freq_bins)

    # Test if freq_bins and win_lengths incompatibility is detected
    pytest.raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs,
                  tmin,
                  tmax,
                  tstep,
                  win_lengths=[0, 1, 2],
                  freq_bins=freq_bins)

    # Test if time step exceeding window lengths is detected
    pytest.raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs,
                  tmin,
                  tmax,
                  tstep=0.15,
                  win_lengths=[0.2, 0.1],
                  freq_bins=freq_bins)

    # Test if missing of noise covariance matrix is detected when more than
    # one channel type is present in the data
    pytest.raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs=None,
                  tmin=tmin,
                  tmax=tmax,
                  tstep=tstep,
                  win_lengths=win_lengths,
                  freq_bins=freq_bins)

    # Test if unsupported weight normalization specification is detected
    pytest.raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs,
                  tmin,
                  tmax,
                  tstep,
                  win_lengths,
                  freq_bins,
                  weight_norm='nai')

    # Test unsupported pick_ori (vector not supported here)
    with pytest.raises(ValueError, match='pick_ori'):
        tf_lcmv(epochs,
                forward,
                noise_covs,
                tmin,
                tmax,
                tstep,
                win_lengths,
                freq_bins,
                pick_ori='vector')
    # Test correct detection of preloaded epochs objects that do not contain
    # the underlying raw object
    epochs_preloaded = mne.Epochs(raw,
                                  events,
                                  event_id,
                                  tmin,
                                  tmax,
                                  proj=True,
                                  baseline=(None, 0),
                                  preload=True)
    epochs_preloaded._raw = None
    pytest.raises(ValueError, tf_lcmv, epochs_preloaded, forward, noise_covs,
                  tmin, tmax, tstep, win_lengths, freq_bins)

    # Pass only one epoch to test if subtracting evoked
    # responses yields zeros
    with pytest.warns(RuntimeWarning,
                      match='Too few samples .* estimate may be unreliable'):
        stcs = tf_lcmv(epochs[0],
                       forward,
                       noise_covs,
                       tmin,
                       tmax,
                       tstep,
                       win_lengths,
                       freq_bins,
                       subtract_evoked=True,
                       reg=reg,
                       label=label,
                       raw=raw)

    assert_array_almost_equal(stcs[0].data, np.zeros_like(stcs[0].data))
Example #52
0
def GetTimeCourseFromSTC_flip(wdir, ListSub, Conds, mod, method, parc, colors,
                              avg_mode):
    import mne
    import os
    from mne.stats import fdr_correction
    from scipy import stats
    import numpy as np
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as pl

    ###############################################################
    #\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
    def plot_panel_mean(Labels, Panel, Conds, stc, TC_mean, TC_std):

        pl.figure(figsize=(10, 14))
        for l, label in enumerate(Labels):

            ax = pl.subplot(6, 3, l + 1)
            combcondname = ''
            for c, cond in enumerate(Conds):

                ax.plot(stc.times,
                        TC_mean[c, label, :],
                        color=colors[c],
                        linewidth=2)
                ax.fill_between(stc.times,
                                TC_mean[c, label, :] - TC_std[c, label, :],
                                TC_mean[c, label, :] + TC_std[c, label, :],
                                alpha=0.2,
                                edgecolor=colors[c],
                                facecolor=colors[c],
                                linewidth=0)
                ax.yaxis.set_visible(False)
                ax.set_title(labels[label].name, fontsize=12)
                combcondname = combcondname + '_VS_' + Conds[c]
                pl.tight_layout()

        save_path = (wdir + '/PLOTS/' + parc + "/GROUP_" + mod + "_" + method +
                     "/" + mod + '_' + combcondname +
                     '_MEANSUBJ_notmorph_part' + str(Panel) + '.png')
        pl.savefig(save_path)
        pl.close()

    ###############################################################
    #\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
    def plot_panel_stats(Labels, Panel, Conds, stc, TC_label, TC_mean, TC_std,
                         avg_mode):

        #######################################################################
        def cluster(data, maxgap):

            data.sort()
            groups = [[data[0]]]
            for x in data[1:]:
                if abs(x - groups[-1][-1]) <= maxgap:
                    groups[-1].append(x)
                else:
                    groups.append([x])
            return groups

        #######################################################################

        pl.figure(figsize=(10, 14))
        for l, label in enumerate(Labels):

            T, pval, threshold_uncorrected, threshold_fdr = [], [], [], []
            reject_fdr, pval_fdr, X = [], [], []

            # difference between conditions
            X = TC_Label[0, :, label, :] - TC_Label[1, :, label, :]

            T, pval = stats.ttest_1samp(X, 0)
            alpha = 0.05

            n_samples, n_tests = X.shape
            threshold_uncorrected = stats.t.ppf(1.0 - alpha, n_samples - 1)
            reject_fdr, pval_fdr = fdr_correction(pval,
                                                  alpha=alpha,
                                                  method='indep')

            temp_clust_uncorr, temp_clust_fdr = [], []
            clust_uncorr, clust_fdr = [], []
            if reject_fdr.any():

                threshold_fdr = np.min(np.abs(T)[reject_fdr])
                temp_clust_uncorr = np.where(abs(T) >= threshold_uncorrected)
                temp_clust_fdr = np.where(abs(T) >= threshold_fdr)

                clust_uncorr = cluster(temp_clust_uncorr[0], 1)
                clust_fdr = cluster(temp_clust_fdr[0], 1)

            ax = pl.subplot(6, 3, l + 1)
            combcondname = ''
            for c, cond in enumerate(Conds):

                # plot conditions and +-SEM
                ax.plot(stc.times,
                        TC_mean[c, label, :],
                        color=colors[c],
                        linewidth=2)
                ax.fill_between(stc.times,
                                TC_mean[c, label, :] - TC_std[c, label, :],
                                TC_mean[c, label, :] + TC_std[c, label, :],
                                alpha=0.1,
                                edgecolor=colors[c],
                                facecolor=colors[c],
                                linewidth=0)
                #ax.yaxis.set_visible(False)
                ax.set_title(labels[label].name, fontsize=12)
                combcondname = combcondname + '_VS_' + Conds[c]
                pl.tight_layout()

            # plot stats
            if reject_fdr.any():
                for clust in clust_uncorr:
                    ax.axvspan(stc.times[clust[0]],
                               stc.times[clust[-1] - 1],
                               color='k',
                               alpha=0.1)
            if reject_fdr.any():
                for clust in clust_fdr:
                    ax.axvspan(stc.times[clust[0]],
                               stc.times[clust[-1] - 1],
                               color='k',
                               alpha=0.2)

        PlotDir = []
        PlotDir = ('/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/PLOTS/' +
                   parc + '/GROUP_' + mod + '_' + method + '/' + avg_mode)

        if not os.path.exists(PlotDir):
            os.makedirs(PlotDir)

        save_path = (PlotDir + '/' + mod + '_' + combcondname +
                     '_MEANSUBJ_notmorph_' + avg_mode + '_stats_part' +
                     str(Panel) + '.png')
        pl.savefig(save_path)
        #pl.close()

    #################################################################
    #\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
    def plot_panel_subj(ListSub, SubInd, Labels, Panel, Conds, stc, TC, TCsd):

        pl.figure(figsize=(10, 14))
        for l, label in enumerate(Labels):

            ax = pl.subplot(6, 3, l + 1)
            combcondname = ''
            for c, cond in enumerate(Conds):

                ax.plot(stc.times,
                        TC[c, SubInd, label, :],
                        color=colors[c],
                        linewidth=2)
                ax.fill_between(
                    stc.times,
                    TC[c, SubInd, label, :] - TCsd[c, SubInd, label, :],
                    TC[c, SubInd, label, :] + TCsd[c, SubInd, label, :],
                    alpha=0.2,
                    edgecolor=colors[c],
                    facecolor=colors[c],
                    linewidth=0)
                ax.yaxis.set_visible(False)
                ax.set_title(labels[label].name, fontsize=12)
                combcondname = combcondname + '_VS_' + Conds[c]
                pl.tight_layout()

        save_path = (wdir + '/PLOTS/' + parc + "/" + mod + "_" + method + "/" +
                     mod + '_' + combcondname + '_' + ListSub[SubInd] +
                     '_notmorph_part' + str(Panel) + '.png')
        pl.savefig(save_path)
        pl.close()

    ####################################################################
    #\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
    ## use fsaverage brain label with a chosen parcellation

    subjects_dir = '/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/mri/'
    labels = mne.read_labels_from_annot('fsaverage',
                                        parc,
                                        hemi='both',
                                        subjects_dir=subjects_dir)

    # init size
    stc0_path = (wdir + '/' + ListSub[0] + '/mne_python/STCS/IcaCorr_' + mod +
                 '_' + ListSub[0] + '_' + Conds[0] + '_pick_orinormal_' +
                 method + '_ico-5-fwd-fsaverage.fif-rh.stc')
    init_timepoints = mne.read_source_estimate(stc0_path)
    init_timepoints.crop(0, 1)
    ncond = len(Conds)
    nsub = len(ListSub)
    nlabel = len(labels)
    ntimes = len(init_timepoints.times)

    # get timecourse data from each subject & each label
    TC_Label = np.empty([ncond, nsub, nlabel, ntimes])
    TCsd_Label = np.empty([ncond, nsub, nlabel, ntimes])
    for l in range(nlabel - 1):
        for s, subj in enumerate(ListSub):
            labels = mne.read_labels_from_annot(subj,
                                                parc,
                                                hemi='both',
                                                subjects_dir=subjects_dir)

            # which modality?
            wdir_ = "/neurospin/meg/meg_tmp/MTT_MEG_Baptiste/MEG/"
            if mod == 'MEG':
                fname_fwd = (wdir_ + subj +
                             "/mne_python/run3_ico5_megonly_icacorr_-fwd.fif")
            elif mod == 'EEG':
                fname_fwd = (wdir_ + subj +
                             "/mne_python/run3_ico5_eegonly_icacorr_-fwd.fif")
            elif mod == 'MEEG':
                fname_fwd = (wdir_ + subj +
                             "/mne_python/run3_ico5_meeg_icacorr_-fwd.fif")

            for c, cond in enumerate(Conds):
                stc_path = (wdir + '/' + subj + '/mne_python/STCS/IcaCorr_' +
                            mod + '_' + subj + '_' + cond +
                            '_pick_orinormal_' + method +
                            '_ico-5-fwd.fif-rh.stc')
                forward = mne.read_forward_solution(fname_fwd)
                stc = mne.read_source_estimate(stc_path)
                stc.crop(0, 1)
                TC_Label[c, s,
                         l, :] = stc.extract_label_time_course(labels[l],
                                                               forward['src'],
                                                               mode=avg_mode)
                #TCsd_Label[c,s,l,:] = np.std(a.data[:,:], axis = 0)/np.sqrt(np.float32(a.shape[0]))
                del stc

        stc = mne.read_source_estimate(stc_path)
        stc.crop(0, 1)

    # suplot 2 AVG across subjects
    TC_Label_mean = np.empty([ncond, nlabel, ntimes])
    TC_Label_std = np.empty([ncond, nlabel, ntimes])

    for l in range(len(labels) - 1):
        for c, cond in enumerate(Conds):
            TC_Label_mean[c, l, :] = np.mean(TC_Label[c, :, l, :], axis=0)
            TC_Label_std[c, l, :] = (np.std(
                TC_Label[c, :, l, :], axis=0)) / np.sqrt(np.float32(nsub))

    # plot source amplitudes panel per label
    plot_panel_stats(range(0, 18), 1, Conds, stc, TC_Label, TC_Label_mean,
                     TC_Label_std, avg_mode)
    plot_panel_stats(range(18, 36), 2, Conds, stc, TC_Label, TC_Label_mean,
                     TC_Label_std, avg_mode)
    plot_panel_stats(range(36, 54), 3, Conds, stc, TC_Label, TC_Label_mean,
                     TC_Label_std, avg_mode)
    plot_panel_stats(range(54, 67), 4, Conds, stc, TC_Label, TC_Label_mean,
                     TC_Label_std, avg_mode)
Example #53
0
def _read_forward_solution_meg(*args, **kwargs):
    fwd = read_forward_solution(*args)
    fwd = convert_forward_solution(fwd, **kwargs)
    return mne.pick_types_forward(fwd, meg=True, eeg=False)
Example #54
0
def get_mne_sample(tmin=-0.1,
                   tmax=0.4,
                   baseline=(None, 0),
                   sns=False,
                   src=None,
                   sub="modality=='A'",
                   ori='free',
                   snr=2,
                   method='dSPM',
                   rm=False,
                   stc=False,
                   hpf=0):
    """Load events and epochs from the MNE sample data

    Parameters
    ----------
    tmin : scalar
        Relative time of the first sample of the epoch.
    tmax : scalar
        Relative time of the last sample of the epoch.
    baseline : {None, tuple of 2 {scalar, None}}
        Period for baseline correction.
    sns : bool | str
        Add sensor space data as NDVar as ``ds['meg']`` (default ``False``).
        Set to ``'grad'`` to load gradiometer data.
    src : False | 'ico' | 'vol'
        Add source space data as NDVar as ``ds['src']`` (default ``False``).
    sub : str | list | None
        Expression for subset of events to load. For a very small dataset use e.g.
        ``[0,1]``.
    ori : 'free' | 'fixed' | 'vector'
        Orientation of sources.
    snr : scalar
        MNE inverse parameter.
    method : str
        MNE inverse parameter.
    rm : bool
        Pretend to be a repeated measures dataset (adds 'subject' variable).
    stc : bool
        Add mne SourceEstimate for source space data as ``ds['stc']`` (default
        ``False``).
    hpf : scalar
        High pass filter cutoff.

    Returns
    -------
    ds : Dataset
        Dataset with epochs from the MNE sample dataset in ``ds['epochs']``.
    """
    if ori == 'free':
        loose = 1
        fixed = False
        pick_ori = None
    elif ori == 'fixed':
        loose = 0
        fixed = True
        pick_ori = None
    elif ori == 'vector':
        if LooseVersion(mne.__version__) < LooseVersion('0.17'):
            raise RuntimeError(
                f'mne version {mne.__version__}; vector source estimates require mne 0.17'
            )
        loose = 1
        fixed = False
        pick_ori = 'vector'
    else:
        raise ValueError(f"ori={ori!r}")

    data_dir = mne.datasets.sample.data_path()
    meg_dir = os.path.join(data_dir, 'MEG', 'sample')
    raw_file = os.path.join(meg_dir, 'sample_audvis_filt-0-40_raw.fif')
    event_file = os.path.join(meg_dir, 'sample_audvis_filt-0-40-eve.fif')
    subjects_dir = os.path.join(data_dir, 'subjects')
    subject = 'sample'
    label_path = os.path.join(subjects_dir, subject, 'label', '%s.label')

    if not os.path.exists(event_file):
        raw = mne.io.Raw(raw_file)
        events = mne.find_events(raw, stim_channel='STI 014')
        mne.write_events(event_file, events)
    ds = load.fiff.events(raw_file, events=event_file)
    if hpf:
        ds.info['raw'].load_data()
        ds.info['raw'].filter(hpf, None)
    ds.index()
    ds.info['subjects_dir'] = subjects_dir
    ds.info['subject'] = subject
    ds.info['label'] = label_path

    # get the trigger variable form the dataset for eaier access
    trigger = ds['trigger']

    # use trigger to add various labels to the dataset
    ds['condition'] = Factor(trigger,
                             labels={
                                 1: 'LA',
                                 2: 'RA',
                                 3: 'LV',
                                 4: 'RV',
                                 5: 'smiley',
                                 32: 'button'
                             })
    ds['side'] = Factor(trigger,
                        labels={
                            1: 'L',
                            2: 'R',
                            3: 'L',
                            4: 'R',
                            5: 'None',
                            32: 'None'
                        })
    ds['modality'] = Factor(trigger,
                            labels={
                                1: 'A',
                                2: 'A',
                                3: 'V',
                                4: 'V',
                                5: 'None',
                                32: 'None'
                            })

    if rm:
        ds = ds.sub('trigger < 5')
        ds = ds.equalize_counts('side % modality')
        subject_f = ds.eval('side % modality').enumerate_cells()
        ds['subject'] = subject_f.as_factor('s%r', random=True)

    if sub:
        ds = ds.sub(sub)

    load.fiff.add_mne_epochs(ds, tmin, tmax, baseline)
    if sns:
        ds['meg'] = load.fiff.epochs_ndvar(ds['epochs'],
                                           data='mag' if sns is True else sns,
                                           sysname='neuromag')

    if not src:
        return ds
    elif src == 'ico':
        src_tag = 'ico-4'
    elif src == 'vol':
        src_tag = 'vol-10'
    else:
        raise ValueError("src = %r" % src)
    epochs = ds['epochs']

    # get inverse operator
    inv_file = os.path.join(meg_dir, f'sample_eelbrain_{src_tag}-inv.fif')
    if os.path.exists(inv_file):
        inv = mne.minimum_norm.read_inverse_operator(inv_file)
    else:
        fwd_file = os.path.join(meg_dir, 'sample-%s-fwd.fif' % src_tag)
        bem_dir = os.path.join(subjects_dir, subject, 'bem')
        bem_file = os.path.join(bem_dir, 'sample-5120-5120-5120-bem-sol.fif')
        trans_file = os.path.join(meg_dir, 'sample_audvis_raw-trans.fif')

        if os.path.exists(fwd_file):
            fwd = mne.read_forward_solution(fwd_file)
        else:
            src_ = _mne_source_space(subject, src_tag, subjects_dir)
            fwd = mne.make_forward_solution(epochs.info, trans_file, src_,
                                            bem_file)
            mne.write_forward_solution(fwd_file, fwd)

        cov_file = os.path.join(meg_dir, 'sample_audvis-cov.fif')
        cov = mne.read_cov(cov_file)
        inv = mn.make_inverse_operator(epochs.info,
                                       fwd,
                                       cov,
                                       loose=loose,
                                       depth=None,
                                       fixed=fixed)
        mne.minimum_norm.write_inverse_operator(inv_file, inv)
    ds.info['inv'] = inv

    stcs = mn.apply_inverse_epochs(epochs,
                                   inv,
                                   1. / (snr**2),
                                   method,
                                   pick_ori=pick_ori)
    ds['src'] = load.fiff.stc_ndvar(stcs, subject, src_tag, subjects_dir,
                                    method, fixed)
    if stc:
        ds['stc'] = stcs

    return ds
Example #55
0
def _get_data(tmin=-0.1,
              tmax=0.15,
              all_forward=True,
              epochs=True,
              epochs_preload=True,
              data_cov=True,
              proj=True):
    """Read in data used in tests."""
    label = mne.read_label(fname_label)
    events = mne.read_events(fname_event)
    raw = mne.io.read_raw_fif(fname_raw, preload=True)
    forward = mne.read_forward_solution(fname_fwd)
    if all_forward:
        forward_surf_ori = _read_forward_solution_meg(fname_fwd, surf_ori=True)
        forward_fixed = _read_forward_solution_meg(fname_fwd,
                                                   force_fixed=True,
                                                   surf_ori=True,
                                                   use_cps=False)
        forward_vol = _read_forward_solution_meg(fname_fwd_vol)
    else:
        forward_surf_ori = None
        forward_fixed = None
        forward_vol = None

    event_id, tmin, tmax = 1, tmin, tmax

    # Setup for reading the raw data
    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bad channels
    # Set up pick list: MEG - bad channels
    left_temporal_channels = mne.read_selection('Left-temporal')
    picks = mne.pick_types(raw.info, selection=left_temporal_channels)
    picks = picks[::2]  # decimate for speed
    # add a couple channels we will consider bad
    bad_picks = [100, 101]
    bads = [raw.ch_names[pick] for pick in bad_picks]
    assert not any(pick in picks for pick in bad_picks)
    picks = np.concatenate([picks, bad_picks])
    raw.pick_channels([raw.ch_names[ii] for ii in picks])
    del picks

    raw.info['bads'] = bads  # add more bads
    if proj:
        raw.info.normalize_proj()  # avoid projection warnings
    else:
        raw.del_proj()

    if epochs:
        # Read epochs
        epochs = mne.Epochs(raw,
                            events,
                            event_id,
                            tmin,
                            tmax,
                            proj=True,
                            baseline=(None, 0),
                            preload=epochs_preload,
                            reject=reject)
        if epochs_preload:
            epochs.resample(200, npad=0)
        epochs.crop(0, None)
        evoked = epochs.average()
        info = evoked.info
    else:
        epochs = None
        evoked = None
        info = raw.info

    noise_cov = mne.read_cov(fname_cov)
    noise_cov['projs'] = []  # avoid warning
    noise_cov = mne.cov.regularize(noise_cov,
                                   info,
                                   mag=0.05,
                                   grad=0.05,
                                   eeg=0.1,
                                   proj=True,
                                   rank=None)
    if data_cov:
        data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.145)
    else:
        data_cov = None

    return raw, epochs, evoked, data_cov, noise_cov, label, forward,\
        forward_surf_ori, forward_fixed, forward_vol
Example #56
0
def test_plot_alignment(tmpdir, renderer):
    """Test plotting of -trans.fif files and MEG sensor layouts."""
    # generate fiducials file for testing
    tempdir = str(tmpdir)
    fiducials_path = op.join(tempdir, 'fiducials.fif')
    fid = [{'coord_frame': 5, 'ident': 1, 'kind': 1,
            'r': [-0.08061612, -0.02908875, -0.04131077]},
           {'coord_frame': 5, 'ident': 2, 'kind': 1,
            'r': [0.00146763, 0.08506715, -0.03483611]},
           {'coord_frame': 5, 'ident': 3, 'kind': 1,
            'r': [0.08436285, -0.02850276, -0.04127743]}]
    write_dig(fiducials_path, fid, 5)

    renderer.backend._close_all()
    evoked = read_evokeds(evoked_fname)[0]
    sample_src = read_source_spaces(src_fname)
    bti = read_raw_bti(pdf_fname, config_fname, hs_fname, convert=True,
                       preload=False).info
    infos = dict(
        Neuromag=evoked.info,
        CTF=read_raw_ctf(ctf_fname).info,
        BTi=bti,
        KIT=read_raw_kit(sqd_fname).info,
    )
    for system, info in infos.items():
        meg = ['helmet', 'sensors']
        if system == 'KIT':
            meg.append('ref')
        fig = plot_alignment(info, read_trans(trans_fname), subject='sample',
                             subjects_dir=subjects_dir, meg=meg)
        rend = renderer.backend._Renderer(fig=fig)
        rend.close()
    # KIT ref sensor coil def is defined
    renderer.backend._close_all()
    info = infos['Neuromag']
    pytest.raises(TypeError, plot_alignment, 'foo', trans_fname,
                  subject='sample', subjects_dir=subjects_dir)
    pytest.raises(OSError, plot_alignment, info, trans_fname,
                  subject='sample', subjects_dir=subjects_dir, src='foo')
    pytest.raises(ValueError, plot_alignment, info, trans_fname,
                  subject='fsaverage', subjects_dir=subjects_dir,
                  src=sample_src)
    sample_src.plot(subjects_dir=subjects_dir, head=True, skull=True,
                    brain='white')
    renderer.backend._close_all()
    # no-head version
    renderer.backend._close_all()
    # all coord frames
    plot_alignment(info)  # works: surfaces='auto' default
    for coord_frame in ('meg', 'head', 'mri'):
        fig = plot_alignment(info, meg=['helmet', 'sensors'], dig=True,
                             coord_frame=coord_frame, trans=Path(trans_fname),
                             subject='sample', mri_fiducials=fiducials_path,
                             subjects_dir=subjects_dir, src=src_fname)
    renderer.backend._close_all()
    # EEG only with strange options
    evoked_eeg_ecog_seeg = evoked.copy().pick_types(meg=False, eeg=True)
    evoked_eeg_ecog_seeg.info['projs'] = []  # "remove" avg proj
    evoked_eeg_ecog_seeg.set_channel_types({'EEG 001': 'ecog',
                                            'EEG 002': 'seeg'})
    with pytest.warns(RuntimeWarning, match='Cannot plot MEG'):
        plot_alignment(evoked_eeg_ecog_seeg.info, subject='sample',
                       trans=trans_fname, subjects_dir=subjects_dir,
                       surfaces=['white', 'outer_skin', 'outer_skull'],
                       meg=['helmet', 'sensors'],
                       eeg=['original', 'projected'], ecog=True, seeg=True)
    renderer.backend._close_all()

    sphere = make_sphere_model(info=evoked.info, r0='auto', head_radius='auto')
    bem_sol = read_bem_solution(op.join(subjects_dir, 'sample', 'bem',
                                        'sample-1280-1280-1280-bem-sol.fif'))
    bem_surfs = read_bem_surfaces(op.join(subjects_dir, 'sample', 'bem',
                                          'sample-1280-1280-1280-bem.fif'))
    sample_src[0]['coord_frame'] = 4  # hack for coverage
    plot_alignment(info, subject='sample', eeg='projected',
                   meg='helmet', bem=sphere, dig=True,
                   surfaces=['brain', 'inner_skull', 'outer_skull',
                             'outer_skin'])
    plot_alignment(info, trans_fname, subject='sample', meg='helmet',
                   subjects_dir=subjects_dir, eeg='projected', bem=sphere,
                   surfaces=['head', 'brain'], src=sample_src)
    assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI
               for surf in bem_sol['surfs'])
    plot_alignment(info, trans_fname, subject='sample', meg=[],
                   subjects_dir=subjects_dir, bem=bem_sol, eeg=True,
                   surfaces=['head', 'inflated', 'outer_skull', 'inner_skull'])
    assert all(surf['coord_frame'] == FIFF.FIFFV_COORD_MRI
               for surf in bem_sol['surfs'])
    plot_alignment(info, trans_fname, subject='sample',
                   meg=True, subjects_dir=subjects_dir,
                   surfaces=['head', 'inner_skull'], bem=bem_surfs)
    # single-layer BEM can still plot head surface
    assert bem_surfs[-1]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN
    bem_sol_homog = read_bem_solution(op.join(subjects_dir, 'sample', 'bem',
                                              'sample-1280-bem-sol.fif'))
    for use_bem in (bem_surfs[-1:], bem_sol_homog):
        with catch_logging() as log:
            plot_alignment(info, trans_fname, subject='sample',
                           meg=True, subjects_dir=subjects_dir,
                           surfaces=['head', 'inner_skull'], bem=use_bem,
                           verbose=True)
        log = log.getvalue()
        assert 'not find the surface for head in the provided BEM model' in log
    # sphere model
    sphere = make_sphere_model('auto', 'auto', evoked.info)
    src = setup_volume_source_space(sphere=sphere)
    plot_alignment(info, eeg='projected', meg='helmet', bem=sphere,
                   src=src, dig=True, surfaces=['brain', 'inner_skull',
                                                'outer_skull', 'outer_skin'])
    sphere = make_sphere_model('auto', None, evoked.info)  # one layer
    # no info is permitted
    fig = plot_alignment(trans=trans_fname, subject='sample', meg=False,
                         coord_frame='mri', subjects_dir=subjects_dir,
                         surfaces=['brain'], bem=sphere, show_axes=True)
    renderer.backend._close_all()
    if renderer._get_3d_backend() == 'mayavi':
        import mayavi  # noqa: F401 analysis:ignore
        assert isinstance(fig, mayavi.core.scene.Scene)

    # 3D coil with no defined draw (ConvexHull)
    info_cube = pick_info(info, [0])
    info['dig'] = None
    info_cube['chs'][0]['coil_type'] = 9999
    with pytest.raises(RuntimeError, match='coil definition not found'):
        plot_alignment(info_cube, meg='sensors', surfaces=())
    coil_def_fname = op.join(tempdir, 'temp')
    with open(coil_def_fname, 'w') as fid:
        fid.write(coil_3d)
    with use_coil_def(coil_def_fname):
        plot_alignment(info_cube, meg='sensors', surfaces=(), dig=True)

    # one layer bem with skull surfaces:
    with pytest.raises(ValueError, match='sphere conductor model must have'):
        plot_alignment(info=info, trans=trans_fname,
                       subject='sample', subjects_dir=subjects_dir,
                       surfaces=['brain', 'head', 'inner_skull'], bem=sphere)
    # wrong eeg value:
    with pytest.raises(ValueError, match='Invalid value for the .eeg'):
        plot_alignment(info=info, trans=trans_fname,
                       subject='sample', subjects_dir=subjects_dir, eeg='foo')
    # wrong meg value:
    with pytest.raises(ValueError, match='Invalid value for the .meg'):
        plot_alignment(info=info, trans=trans_fname,
                       subject='sample', subjects_dir=subjects_dir, meg='bar')
    # multiple brain surfaces:
    with pytest.raises(ValueError, match='Only one brain surface can be plot'):
        plot_alignment(info=info, trans=trans_fname,
                       subject='sample', subjects_dir=subjects_dir,
                       surfaces=['white', 'pial'])
    with pytest.raises(TypeError, match='all entries in surfaces must be'):
        plot_alignment(info=info, trans=trans_fname,
                       subject='sample', subjects_dir=subjects_dir,
                       surfaces=[1])
    with pytest.raises(ValueError, match='Unknown surface type'):
        plot_alignment(info=info, trans=trans_fname,
                       subject='sample', subjects_dir=subjects_dir,
                       surfaces=['foo'])
    fwd_fname = op.join(data_dir, 'MEG', 'sample',
                        'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
    fwd = read_forward_solution(fwd_fname)
    plot_alignment(subject='sample', subjects_dir=subjects_dir,
                   trans=trans_fname, fwd=fwd,
                   surfaces='white', coord_frame='head')
    fwd = convert_forward_solution(fwd, force_fixed=True)
    plot_alignment(subject='sample', subjects_dir=subjects_dir,
                   trans=trans_fname, fwd=fwd,
                   surfaces='white', coord_frame='head')

    # fNIRS
    info = read_raw_nirx(nirx_fname).info
    with catch_logging() as log:
        plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True)
    log = log.getvalue()
    assert '26 fnirs pairs' in log

    with catch_logging() as log:
        plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
                       fnirs='channels')
    log = log.getvalue()
    assert '26 fnirs locations' in log

    with catch_logging() as log:
        plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
                       fnirs='pairs')
    log = log.getvalue()
    assert '26 fnirs pairs' in log

    with catch_logging() as log:
        plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
                       fnirs='sources')
    log = log.getvalue()
    assert '26 fnirs sources' in log

    with catch_logging() as log:
        plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
                       fnirs='detectors')
    log = log.getvalue()
    assert '26 fnirs detectors' in log

    with catch_logging() as log:
        plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
                       fnirs=['channels', 'pairs'])
    log = log.getvalue()
    assert '26 fnirs pairs' in log
    assert '26 fnirs locations' in log

    with catch_logging() as log:
        plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
                       fnirs=['pairs', 'sources', 'detectors'])
    log = log.getvalue()
    assert '26 fnirs pairs' in log
    assert '26 fnirs sources' in log
    assert '26 fnirs detectors' in log

    with catch_logging() as log:
        plot_alignment(info, subject='fsaverage', surfaces=(), verbose=True,
                       fnirs=['channels', 'pairs', 'sources', 'detectors'])
    log = log.getvalue()
    assert '26 fnirs pairs' in log
    assert '26 fnirs locations' in log
    assert '26 fnirs sources' in log
    assert '26 fnirs detectors' in log

    renderer.backend._close_all()
Example #57
0
def _get_data(tmin=-0.1,
              tmax=0.15,
              all_forward=True,
              epochs=True,
              epochs_preload=True,
              data_cov=True):
    """Read in data used in tests
    """
    label = mne.read_label(fname_label)
    events = mne.read_events(fname_event)
    raw = mne.io.read_raw_fif(fname_raw, preload=True, add_eeg_ref=False)
    forward = mne.read_forward_solution(fname_fwd)
    if all_forward:
        forward_surf_ori = read_forward_solution_meg(fname_fwd, surf_ori=True)
        forward_fixed = read_forward_solution_meg(fname_fwd,
                                                  force_fixed=True,
                                                  surf_ori=True)
        forward_vol = read_forward_solution_meg(fname_fwd_vol, surf_ori=True)
    else:
        forward_surf_ori = None
        forward_fixed = None
        forward_vol = None

    event_id, tmin, tmax = 1, tmin, tmax

    # Setup for reading the raw data
    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bad channels
    # Set up pick list: MEG - bad channels
    left_temporal_channels = mne.read_selection('Left-temporal')
    picks = mne.pick_types(raw.info,
                           meg=True,
                           eeg=False,
                           stim=True,
                           eog=True,
                           ref_meg=False,
                           exclude='bads',
                           selection=left_temporal_channels)
    raw.pick_channels([raw.ch_names[ii] for ii in picks])
    raw.info.normalize_proj()  # avoid projection warnings

    if epochs:
        # Read epochs
        epochs = mne.Epochs(raw,
                            events,
                            event_id,
                            tmin,
                            tmax,
                            proj=True,
                            baseline=(None, 0),
                            preload=epochs_preload,
                            reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6),
                            add_eeg_ref=False)
        if epochs_preload:
            epochs.resample(200, npad=0, n_jobs=2)
        evoked = epochs.average()
        info = evoked.info
    else:
        epochs = None
        evoked = None
        info = raw.info

    noise_cov = mne.read_cov(fname_cov)
    with warnings.catch_warnings(record=True):  # bad proj
        noise_cov = mne.cov.regularize(noise_cov,
                                       info,
                                       mag=0.05,
                                       grad=0.05,
                                       eeg=0.1,
                                       proj=True)
    if data_cov:
        with warnings.catch_warnings(record=True):  # too few samples
            data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.145)
    else:
        data_cov = None

    return raw, epochs, evoked, data_cov, noise_cov, label, forward,\
        forward_surf_ori, forward_fixed, forward_vol
Example #58
0
trans_fname = op.join(data_path, 'MEG', 'bst_auditory',
                      'bst_auditory-trans.fif')
trans = mne.read_trans(trans_fname)

###############################################################################
# To save time and memory, the forward solution is read from a file. Set
# ``use_precomputed=False`` in the beginning of this script to build the
# forward solution from scratch. The head surfaces for constructing a BEM
# solution are read from a file. Since the data only contains MEG channels, we
# only need the inner skull surface for making the forward solution. For more
# information: :ref:`CHDBBCEJ`, :class:`mne.setup_source_space`,
# :ref:`create_bem_model`, :func:`mne.bem.make_watershed_bem`.
if use_precomputed:
    fwd_fname = op.join(data_path, 'MEG', 'bst_auditory',
                        'bst_auditory-meg-oct-6-fwd.fif')
    fwd = mne.read_forward_solution(fwd_fname)
else:
    src = mne.setup_source_space(subject,
                                 spacing='ico4',
                                 subjects_dir=subjects_dir,
                                 overwrite=True)
    model = mne.make_bem_model(subject=subject,
                               ico=4,
                               conductivity=[0.3],
                               subjects_dir=subjects_dir)
    bem = mne.make_bem_solution(model)
    fwd = mne.make_forward_solution(evoked_std.info,
                                    trans=trans,
                                    src=src,
                                    bem=bem)
Example #59
0
    def aud_dataset(self):
        print 'Treat: ', self.treat
        ###############################################################################
        # Setup for reading the raw data
        data_path = sample.data_path()
        raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
        event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
        tmin = -0.2  # start of each epoch (200ms before the trigger)
        tmax = 0.5  # end of each epoch (500ms after the trigger)
        self.subject='sample'
        raw = mne.io.read_raw_fif(raw_fname, add_eeg_ref=False, preload=True,verbose=False)

        #raw.set_eeg_reference()  # set EEG average reference

        baseline = (None, 0)  # means from the first instant to t = 0

        reject = dict(mag=4e-12, eog=150e-6)

        events = mne.read_events(event_fname)

        picks = mne.pick_types(raw.info, meg='mag', eeg=True, eog=True,
                               exclude='bads') #for simplicity ignore grad channels

        raw.rename_channels(mapping={'EOG 061': 'EOG'})

        event_id = {'left/auditory': 1, 'right/auditory': 2,
                    'left/visual': 3, 'right/visual': 4}

        epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
                            baseline=baseline, reject=reject, add_eeg_ref=False, preload=True,verbose=False)
        if self.treat is not None:
            self.epochs_eeg = epochs[self.treat].copy().pick_types(eeg=True,meg=False)
            self.epochs_meg = epochs[self.treat].copy().pick_types(meg=True,eeg=False)
        else:
            self.epochs_eeg = epochs.copy().pick_types(eeg=True,meg=False)
            self.epochs_meg = epochs.copy().pick_types(meg=True,eeg=False)

        noise_cov = mne.compute_covariance(
            epochs, tmax=0., method=['shrunk', 'empirical'],verbose=False)

        ###############################################################################
        # Inverse modeling: MNE/dSPM on evoked and raw data
        # -------------------------------------------------

        # Read the forward solution and compute the inverse operator

        fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-5-fwd.fif'
        fwd = mne.read_forward_solution(fname_fwd, surf_ori=True,verbose=False)

        # Restrict forward solution as necessary
        fwd = mne.pick_types_forward(fwd, meg=True, eeg=True)

        # make an inverse operator
        info = epochs.info
        inverse_operator = make_inverse_operator(info, fwd, noise_cov,
                                                 loose=0.2, depth=0.8,verbose=False)

        write_inverse_operator('sample_audvis-meg-oct-5-inv.fif',
                               inverse_operator,verbose=False)

        ###############################################################################
        # Compute inverse solution
        # ------------------------

        method = "MNE"
        snr = 3.
        lambda2 = 1. / snr ** 2
        if self.treat is not None:
            self.stc = apply_inverse_epochs(epochs[self.treat], inverse_operator, lambda2,
                                       method=method, pick_ori=None,verbose=False)
        else:
            self.stc = apply_inverse_epochs(epochs, inverse_operator, lambda2,
                                       method=method, pick_ori=None,verbose=False)
        if self.Wt is None:
            print 'Precalculate PCA weights:'
            #weight PCA matrix. Uses 'self.treat' - so to apply across all treatments, use treat=None
            self.Wt=Wt_calc(self.stc,self.epochs_eeg,self.epochs_meg)

        #stc.save('sample_audvis-source-epochs')
        self.prepro()
Example #60
0
def test_tf_lcmv():
    """Test TF beamforming based on LCMV
    """
    label = mne.read_label(fname_label)
    events = mne.read_events(fname_event)
    raw = mne.io.read_raw_fif(fname_raw, preload=True, add_eeg_ref=False)
    forward = mne.read_forward_solution(fname_fwd)

    event_id, tmin, tmax = 1, -0.2, 0.2

    # Setup for reading the raw data
    raw.info['bads'] = ['MEG 2443', 'EEG 053']  # 2 bads channels

    # Set up pick list: MEG - bad channels
    left_temporal_channels = mne.read_selection('Left-temporal')
    picks = mne.pick_types(raw.info,
                           meg=True,
                           eeg=False,
                           stim=True,
                           eog=True,
                           exclude='bads',
                           selection=left_temporal_channels)
    raw.pick_channels([raw.ch_names[ii] for ii in picks])
    raw.info.normalize_proj()  # avoid projection warnings
    del picks

    # Read epochs
    epochs = mne.Epochs(raw,
                        events,
                        event_id,
                        tmin,
                        tmax,
                        proj=True,
                        baseline=None,
                        preload=False,
                        reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6),
                        add_eeg_ref=False)
    epochs.drop_bad()

    freq_bins = [(4, 12), (15, 40)]
    time_windows = [(-0.1, 0.1), (0.0, 0.2)]
    win_lengths = [0.2, 0.2]
    tstep = 0.1
    reg = 0.05

    source_power = []
    noise_covs = []
    for (l_freq, h_freq), win_length in zip(freq_bins, win_lengths):
        raw_band = raw.copy()
        raw_band.filter(l_freq,
                        h_freq,
                        method='iir',
                        n_jobs=1,
                        iir_params=dict(output='ba'))
        epochs_band = mne.Epochs(raw_band,
                                 epochs.events,
                                 epochs.event_id,
                                 tmin=tmin,
                                 tmax=tmax,
                                 baseline=None,
                                 proj=True,
                                 add_eeg_ref=False)
        with warnings.catch_warnings(record=True):  # not enough samples
            noise_cov = compute_covariance(epochs_band,
                                           tmin=tmin,
                                           tmax=tmin + win_length)
        noise_cov = mne.cov.regularize(noise_cov,
                                       epochs_band.info,
                                       mag=reg,
                                       grad=reg,
                                       eeg=reg,
                                       proj=True)
        noise_covs.append(noise_cov)
        del raw_band  # to save memory

        # Manually calculating source power in on frequency band and several
        # time windows to compare to tf_lcmv results and test overlapping
        if (l_freq, h_freq) == freq_bins[0]:
            for time_window in time_windows:
                with warnings.catch_warnings(record=True):  # bad samples
                    data_cov = compute_covariance(epochs_band,
                                                  tmin=time_window[0],
                                                  tmax=time_window[1])
                with warnings.catch_warnings(record=True):  # bad proj
                    stc_source_power = _lcmv_source_power(epochs.info,
                                                          forward,
                                                          noise_cov,
                                                          data_cov,
                                                          reg=reg,
                                                          label=label)
                source_power.append(stc_source_power.data)

    with warnings.catch_warnings(record=True):
        stcs = tf_lcmv(epochs,
                       forward,
                       noise_covs,
                       tmin,
                       tmax,
                       tstep,
                       win_lengths,
                       freq_bins,
                       reg=reg,
                       label=label)

    assert_true(len(stcs) == len(freq_bins))
    assert_true(stcs[0].shape[1] == 4)

    # Averaging all time windows that overlap the time period 0 to 100 ms
    source_power = np.mean(source_power, axis=0)

    # Selecting the first frequency bin in tf_lcmv results
    stc = stcs[0]

    # Comparing tf_lcmv results with _lcmv_source_power results
    assert_array_almost_equal(stc.data[:, 2], source_power[:, 0])

    # Test if using unsupported max-power orientation is detected
    assert_raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs,
                  tmin,
                  tmax,
                  tstep,
                  win_lengths,
                  freq_bins=freq_bins,
                  pick_ori='max-power')

    # Test if incorrect number of noise CSDs is detected
    # Test if incorrect number of noise covariances is detected
    assert_raises(ValueError, tf_lcmv, epochs, forward, [noise_covs[0]], tmin,
                  tmax, tstep, win_lengths, freq_bins)

    # Test if freq_bins and win_lengths incompatibility is detected
    assert_raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs,
                  tmin,
                  tmax,
                  tstep,
                  win_lengths=[0, 1, 2],
                  freq_bins=freq_bins)

    # Test if time step exceeding window lengths is detected
    assert_raises(ValueError,
                  tf_lcmv,
                  epochs,
                  forward,
                  noise_covs,
                  tmin,
                  tmax,
                  tstep=0.15,
                  win_lengths=[0.2, 0.1],
                  freq_bins=freq_bins)

    # Test correct detection of preloaded epochs objects that do not contain
    # the underlying raw object
    epochs_preloaded = mne.Epochs(raw,
                                  events,
                                  event_id,
                                  tmin,
                                  tmax,
                                  proj=True,
                                  baseline=(None, 0),
                                  preload=True,
                                  add_eeg_ref=False)
    epochs_preloaded._raw = None
    with warnings.catch_warnings(record=True):  # not enough samples
        assert_raises(ValueError, tf_lcmv, epochs_preloaded, forward,
                      noise_covs, tmin, tmax, tstep, win_lengths, freq_bins)

    with warnings.catch_warnings(record=True):  # not enough samples
        # Pass only one epoch to test if subtracting evoked
        # responses yields zeros
        stcs = tf_lcmv(epochs[0],
                       forward,
                       noise_covs,
                       tmin,
                       tmax,
                       tstep,
                       win_lengths,
                       freq_bins,
                       subtract_evoked=True,
                       reg=reg,
                       label=label)

    assert_array_almost_equal(stcs[0].data, np.zeros_like(stcs[0].data))