Example #1
0
def test_clean_info_bads():
    """Test cleaning info['bads'] when bad_channels are excluded """

    raw_file = op.join(op.dirname(__file__), "io", "tests", "data", "test_raw.fif")
    raw = Raw(raw_file)

    # select eeg channels
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)

    # select 3 eeg channels as bads
    idx_eeg_bad_ch = picks_eeg[[1, 5, 14]]
    eeg_bad_ch = [raw.info["ch_names"][k] for k in idx_eeg_bad_ch]

    # select meg channels
    picks_meg = pick_types(raw.info, meg=True, eeg=False)

    # select randomly 3 meg channels as bads
    idx_meg_bad_ch = picks_meg[[0, 15, 34]]
    meg_bad_ch = [raw.info["ch_names"][k] for k in idx_meg_bad_ch]

    # simulate the bad channels
    raw.info["bads"] = eeg_bad_ch + meg_bad_ch

    # simulate the call to pick_info excluding the bad eeg channels
    info_eeg = pick_info(raw.info, picks_eeg)

    # simulate the call to pick_info excluding the bad meg channels
    info_meg = pick_info(raw.info, picks_meg)

    assert_equal(info_eeg["bads"], eeg_bad_ch)
    assert_equal(info_meg["bads"], meg_bad_ch)
Example #2
0
def test_pick_refs():
    """Test picking of reference sensors
    """
    infos = list()
    # KIT
    kit_dir = op.join(io_dir, 'kit', 'tests', 'data')
    sqd_path = op.join(kit_dir, 'test.sqd')
    mrk_path = op.join(kit_dir, 'test_mrk.sqd')
    elp_path = op.join(kit_dir, 'test_elp.txt')
    hsp_path = op.join(kit_dir, 'test_hsp.txt')
    raw_kit = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
    infos.append(raw_kit.info)
    # BTi
    bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
    bti_pdf = op.join(bti_dir, 'test_pdf_linux')
    bti_config = op.join(bti_dir, 'test_config_linux')
    bti_hs = op.join(bti_dir, 'test_hs_linux')
    with warnings.catch_warnings(record=True):  # weight tables
        raw_bti = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
    infos.append(raw_bti.info)
    # CTF
    fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
    raw_ctf = read_raw_fif(fname_ctf_raw, add_eeg_ref=False)
    raw_ctf.apply_gradient_compensation(2)
    infos.append(raw_ctf.info)
    for info in infos:
        info['bads'] = []
        assert_raises(ValueError, pick_types, info, meg='foo')
        assert_raises(ValueError, pick_types, info, ref_meg='foo')
        picks_meg_ref = pick_types(info, meg=True, ref_meg=True)
        picks_meg = pick_types(info, meg=True, ref_meg=False)
        picks_ref = pick_types(info, meg=False, ref_meg=True)
        assert_array_equal(picks_meg_ref,
                           np.sort(np.concatenate([picks_meg, picks_ref])))
        picks_grad = pick_types(info, meg='grad', ref_meg=False)
        picks_ref_grad = pick_types(info, meg=False, ref_meg='grad')
        picks_meg_ref_grad = pick_types(info, meg='grad', ref_meg='grad')
        assert_array_equal(picks_meg_ref_grad,
                           np.sort(np.concatenate([picks_grad,
                                                   picks_ref_grad])))
        picks_mag = pick_types(info, meg='mag', ref_meg=False)
        picks_ref_mag = pick_types(info, meg=False, ref_meg='mag')
        picks_meg_ref_mag = pick_types(info, meg='mag', ref_meg='mag')
        assert_array_equal(picks_meg_ref_mag,
                           np.sort(np.concatenate([picks_mag,
                                                   picks_ref_mag])))
        assert_array_equal(picks_meg,
                           np.sort(np.concatenate([picks_mag, picks_grad])))
        assert_array_equal(picks_ref,
                           np.sort(np.concatenate([picks_ref_mag,
                                                   picks_ref_grad])))
        assert_array_equal(picks_meg_ref, np.sort(np.concatenate(
            [picks_grad, picks_mag, picks_ref_grad, picks_ref_mag])))
        for pick in (picks_meg_ref, picks_meg, picks_ref,
                     picks_grad, picks_ref_grad, picks_meg_ref_grad,
                     picks_mag, picks_ref_mag, picks_meg_ref_mag):
            if len(pick) > 0:
                pick_info(info, pick)
Example #3
0
def test_csr_csc():
    """Test CSR and CSC."""
    info = read_info(sss_ctc_fname)
    info = pick_info(info, pick_types(info, meg=True, exclude=[]))
    sss_ctc = info['proc_history'][0]['max_info']['sss_ctc']
    ct = sss_ctc['decoupler'].copy()
    # CSC
    assert isinstance(ct, sparse.csc_matrix)
    tempdir = _TempDir()
    fname = op.join(tempdir, 'test.fif')
    write_info(fname, info)
    info_read = read_info(fname)
    ct_read = info_read['proc_history'][0]['max_info']['sss_ctc']['decoupler']
    assert isinstance(ct_read, sparse.csc_matrix)
    assert_array_equal(ct_read.toarray(), ct.toarray())
    # Now CSR
    csr = ct.tocsr()
    assert isinstance(csr, sparse.csr_matrix)
    assert_array_equal(csr.toarray(), ct.toarray())
    info['proc_history'][0]['max_info']['sss_ctc']['decoupler'] = csr
    fname = op.join(tempdir, 'test1.fif')
    write_info(fname, info)
    info_read = read_info(fname)
    ct_read = info_read['proc_history'][0]['max_info']['sss_ctc']['decoupler']
    assert isinstance(ct_read, sparse.csc_matrix)  # this gets cast to CSC
    assert_array_equal(ct_read.toarray(), ct.toarray())
Example #4
0
def test_min_distance_fit_dipole():
    """Test dipole min_dist to inner_skull."""
    subject = 'sample'
    raw = read_raw_fif(fname_raw, preload=True)

    # select eeg data
    picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
    info = pick_info(raw.info, picks)

    # Let's use cov = Identity
    cov = read_cov(fname_cov)
    cov['data'] = np.eye(cov['data'].shape[0])

    # Simulated scal map
    simulated_scalp_map = np.zeros(picks.shape[0])
    simulated_scalp_map[27:34] = 1

    simulated_scalp_map = simulated_scalp_map[:, None]

    evoked = EvokedArray(simulated_scalp_map, info, tmin=0)

    min_dist = 5.  # distance in mm

    bem = read_bem_solution(fname_bem)
    dip, residual = fit_dipole(evoked, cov, bem, fname_trans,
                               min_dist=min_dist)

    dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)

    # Constraints are not exact, so bump the minimum slightly
    assert_true(min_dist - 0.1 < (dist[0] * 1000.) < (min_dist + 1.))

    assert_raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans,
                  -1.)
Example #5
0
def make_montage(info, kind, check=False):
    from . import _reorder
    assert kind in ('mgh60', 'mgh70', 'uw_70', 'uw_60')
    picks = mne.pick_types(info, meg=False, eeg=True, exclude=())
    if kind in ('mgh60', 'mgh70'):
        ch_names = mne.utils._clean_names(
            [info['ch_names'][pick] for pick in picks], remove_whitespace=True)
        if kind == 'mgh60':
            assert len(ch_names) in (59, 60)
        else:
            assert len(ch_names) in (70,)
        montage = mne.channels.read_montage(kind, ch_names=ch_names)
    else:
        ch_names = getattr(_reorder, 'ch_names_' + kind)
        ch_names = ch_names
        montage = mne.channels.read_montage('standard_1020', ch_names=ch_names)
        assert len(montage.ch_names) == len(ch_names)
        montage.ch_names = ['EEG%03d' % ii for ii in range(1, 61)]
    sphere = mne.make_sphere_model('auto', 'auto', info)
    montage.pos /= np.linalg.norm(montage.pos, axis=-1, keepdims=True)
    montage.pos *= sphere.radius
    montage.pos += sphere['r0']
    info = mne.pick_info(info, picks)
    eeg_pos = np.array([ch['loc'][:3] for ch in info['chs']])
    assert len(eeg_pos) == len(montage.pos), (len(eeg_pos), len(montage.pos))
    if check:
        from mayavi import mlab
        mlab.figure(size=(800, 800))
        mlab.points3d(*sphere['r0'], scale_factor=2 * sphere.radius,
                      color=(0., 0., 1.), opacity=0.1, mode='sphere')
        mlab.points3d(*montage.pos.T, scale_factor=0.01,
                      color=(1, 0, 0), mode='sphere', opacity=0.5)
        mlab.points3d(*eeg_pos.T, scale_factor=0.005, color=(1, 1, 1),
                      mode='sphere', opacity=1)
    return montage, sphere
Example #6
0
def test_clean_info_bads():
    """Test cleaning info['bads'] when bad_channels are excluded """

    raw_file = op.join(op.dirname(__file__), 'io', 'tests', 'data',
                       'test_raw.fif')
    raw = Raw(raw_file)

    # select eeg channels
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)

    # select 3 eeg channels as bads
    idx_eeg_bad_ch = picks_eeg[[1, 5, 14]]
    eeg_bad_ch = [raw.info['ch_names'][k] for k in idx_eeg_bad_ch]

    # select meg channels
    picks_meg = pick_types(raw.info, meg=True, eeg=False)

    # select randomly 3 meg channels as bads
    idx_meg_bad_ch = picks_meg[[0, 15, 34]]
    meg_bad_ch = [raw.info['ch_names'][k] for k in idx_meg_bad_ch]

    # simulate the bad channels
    raw.info['bads'] = eeg_bad_ch + meg_bad_ch

    # simulate the call to pick_info excluding the bad eeg channels
    info_eeg = pick_info(raw.info, picks_eeg)

    # simulate the call to pick_info excluding the bad meg channels
    info_meg = pick_info(raw.info, picks_meg)

    assert_equal(info_eeg['bads'], eeg_bad_ch)
    assert_equal(info_meg['bads'], meg_bad_ch)

    info = pick_info(raw.info, picks_meg)
    info._check_consistency()
    info['bads'] += ['EEG 053']
    assert_raises(RuntimeError, info._check_consistency)
    info = pick_info(raw.info, picks_meg)
    info._check_consistency()
    info['ch_names'][0] += 'f'
    assert_raises(RuntimeError, info._check_consistency)
    info = pick_info(raw.info, picks_meg)
    info._check_consistency()
    info['nchan'] += 1
    assert_raises(RuntimeError, info._check_consistency)
Example #7
0
def _interpolate_bads_meg_fast(inst, picks, mode='accurate',
                               dots=None, verbose=None):
    """Interpolate bad channels from data in good channels."""
    # We can have pre-picked instances or not.
    # And we need to handle it.

    inst_picked = True
    if len(inst.ch_names) > len(picks):
        picked_info = pick_info(inst.info, picks)
        dots = _pick_dots(dots, picks, picks)
        inst_picked = False
    else:
        picked_info = inst.info.copy()

    def get_picks_bad_good(info, picks_meg):
        picks_good = [p for p in picks_meg
                      if info['ch_names'][p] not in info['bads']]

        # select the bad meg channel to be interpolated
        if len(info['bads']) == 0:
            picks_bad = []
        else:
            picks_bad = [p for p in picks_meg
                         if info['ch_names'][p] in info['bads']]
        return picks_meg, picks_good, picks_bad

    picks_meg, picks_good, picks_bad = get_picks_bad_good(
        picked_info, range(picked_info['nchan']))
    # return without doing anything if there are no meg channels
    if len(picks_meg) == 0 or len(picks_bad) == 0:
        return

    # we need to make sure that only meg channels are passed here
    # as the MNE interpolation code is not fogriving.
    # This is why we picked the info.
    mapping = _fast_map_meg_channels(
        picked_info, pick_from=picks_good, pick_to=picks_bad,
        dots=dots, mode=mode)
    # the downside is that the mapping matrix now does not match
    # the unpicked info of the data.
    # Since we may have picked the info, we need to double map
    # the indices.
    _, picks_good_, picks_bad_orig = get_picks_bad_good(
        inst.info, picks)
    ch_names_a = [picked_info['ch_names'][pp] for pp in picks_bad]
    ch_names_b = [inst.info['ch_names'][pp] for pp in picks_bad_orig]
    assert ch_names_a == ch_names_b
    if not inst_picked:
        picks_good_ = [pp for pp in picks if pp in picks_good_]
    assert len(picks_good_) == len(picks_good)
    ch_names_a = [picked_info['ch_names'][pp] for pp in picks_good]
    ch_names_b = [inst.info['ch_names'][pp] for pp in picks_good_]
    assert ch_names_a == ch_names_b
    _do_interp_dots(inst, mapping, picks_good_, picks_bad_orig)
Example #8
0
def test_clean_info_bads():
    """Test cleaning info['bads'] when bad_channels are excluded."""
    raw_file = op.join(op.dirname(_root_init_fname), 'io', 'tests', 'data',
                       'test_raw.fif')
    raw = read_raw_fif(raw_file)
    _assert_channel_types(raw.info)

    # select eeg channels
    picks_eeg = pick_types(raw.info, meg=False, eeg=True)

    # select 3 eeg channels as bads
    idx_eeg_bad_ch = picks_eeg[[1, 5, 14]]
    eeg_bad_ch = [raw.info['ch_names'][k] for k in idx_eeg_bad_ch]

    # select meg channels
    picks_meg = pick_types(raw.info, meg=True, eeg=False)

    # select randomly 3 meg channels as bads
    idx_meg_bad_ch = picks_meg[[0, 15, 34]]
    meg_bad_ch = [raw.info['ch_names'][k] for k in idx_meg_bad_ch]

    # simulate the bad channels
    raw.info['bads'] = eeg_bad_ch + meg_bad_ch

    # simulate the call to pick_info excluding the bad eeg channels
    info_eeg = pick_info(raw.info, picks_eeg)

    # simulate the call to pick_info excluding the bad meg channels
    info_meg = pick_info(raw.info, picks_meg)

    assert_equal(info_eeg['bads'], eeg_bad_ch)
    assert_equal(info_meg['bads'], meg_bad_ch)

    info = pick_info(raw.info, picks_meg)
    info._check_consistency()
    info['bads'] += ['EEG 053']
    pytest.raises(RuntimeError, info._check_consistency)
    with pytest.raises(ValueError, match='unique'):
        pick_info(raw.info, [0, 0])
Example #9
0
def _read_epochs(epochs_mat_fname, info):
    """ read the epochs from matfile """
    data = scio.loadmat(epochs_mat_fname,
                        squeeze_me=True)['data']
    ch_names = [ch for ch in data['label'].tolist()]
    info['sfreq'] = data['fsample'].tolist()
    data = np.array([data['trial'].tolist()][0].tolist())
    events = np.zeros((len(data), 3), dtype=np.int)
    events[:, 0] = np.arange(len(data))
    events[:, 2] = 99
    this_info = pick_info(
        info, [info['ch_names'].index(ch) for ch in ch_names],
        copy=True)
    return EpochsArray(data=data, info=this_info, events=events, tmin=0)
Example #10
0
def test_magnetic_dipole():
    """Test basic magnetic dipole forward calculation."""
    trans = Transform('mri', 'head')
    info = read_info(fname_raw)
    picks = pick_types(info, meg=True, eeg=False, exclude=[])
    info = pick_info(info, picks[:12])
    coils = _create_meg_coils(info['chs'], 'normal', trans)
    # magnetic dipole at device origin
    r0 = np.array([0., 13., -6.])
    for ch, coil in zip(info['chs'], coils):
        rr = (ch['loc'][:3] + r0) / 2.
        far_fwd = _magnetic_dipole_field_vec(r0[np.newaxis, :], [coil])
        near_fwd = _magnetic_dipole_field_vec(rr[np.newaxis, :], [coil])
        ratio = 8. if ch['ch_name'][-1] == '1' else 16.  # grad vs mag
        assert_allclose(np.median(near_fwd / far_fwd), ratio, atol=1e-1)
Example #11
0
def test_magnetic_dipole():
    """Test basic magnetic dipole forward calculation
    """
    trans = Transform("mri", "head", np.eye(4))
    info = read_info(fname_raw)
    picks = pick_types(info, meg=True, eeg=False, exclude=[])
    info = pick_info(info, picks[:12])
    coils = _create_meg_coils(info["chs"], "normal", trans)
    # magnetic dipole at device origin
    r0 = np.array([0.0, 13.0, -6.0])
    for ch, coil in zip(info["chs"], coils):
        rr = (ch["loc"][:3] + r0) / 2.0
        far_fwd = _magnetic_dipole_field_vec(r0[np.newaxis, :], [coil])
        near_fwd = _magnetic_dipole_field_vec(rr[np.newaxis, :], [coil])
        ratio = 8.0 if ch["ch_name"][-1] == "1" else 16.0  # grad vs mag
        assert_allclose(np.median(near_fwd / far_fwd), ratio, atol=1e-1)
def test_magnetic_dipole():
    """Basic test for magnetic dipole forward calculation
    """
    trans = {'to': FIFF.FIFFV_COORD_HEAD, 'from': FIFF.FIFFV_COORD_MRI,
             'trans': np.eye(4)}
    info = read_info(fname_raw)
    picks = pick_types(info, meg=True, eeg=False, exclude=[])
    info = pick_info(info, picks[:12])
    coils = _create_coils(info['chs'], FIFF.FWD_COIL_ACCURACY_NORMAL, trans)
    # magnetic dipole at device origin
    r0 = np.array([0., 13., -6.])
    for ch, coil in zip(info['chs'], coils):
        rr = (ch['coil_trans'][:3, 3] + r0) / 2.
        far_fwd = _magnetic_dipole_field_vec(r0[np.newaxis, :], [coil])
        near_fwd = _magnetic_dipole_field_vec(rr[np.newaxis, :], [coil])
        ratio = 8. if ch['ch_name'][-1] == '1' else 16.  # grad vs mag
        assert_allclose(np.median(near_fwd / far_fwd), ratio, atol=1e-1)
Example #13
0
def _simulate_data(fwd):
    """Simulate an oscillator on the cortex."""
    source_vertno = 146374  # Somewhere on the frontal lobe

    sfreq = 50.  # Hz.
    times = np.arange(10 * sfreq) / sfreq  # 10 seconds of data
    signal = np.sin(20 * 2 * np.pi * times)  # 20 Hz oscillator
    signal[:len(times) // 2] *= 2  # Make signal louder at the beginning
    signal *= 1e-9  # Scale to be in the ballpark of MEG data

    # Construct a SourceEstimate object that describes the signal at the
    # cortical level.
    stc = mne.SourceEstimate(
        signal[np.newaxis, :],
        vertices=[[source_vertno], []],
        tmin=0,
        tstep=1 / sfreq,
        subject='sample',
    )

    # Create an info object that holds information about the sensors
    info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad')
    info.update(fwd['info'])  # Merge in sensor position information
    # heavily decimate sensors to make it much faster
    info = mne.pick_info(info, np.arange(info['nchan'])[::5])
    fwd = mne.pick_channels_forward(fwd, info['ch_names'])

    # Run the simulated signal through the forward model, obtaining
    # simulated sensor data.
    raw = mne.apply_forward_raw(fwd, stc, info)

    # Add a little noise
    random = np.random.RandomState(42)
    noise = random.randn(*raw._data.shape) * 1e-14
    raw._data += noise

    # Define a single epoch
    epochs = mne.Epochs(raw, [[0, 0, 1]], event_id=1, tmin=0,
                        tmax=raw.times[-1], preload=True)
    evoked = epochs.average()

    # Compute the cross-spectral density matrix
    csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=10)

    return epochs, evoked, csd, source_vertno
Example #14
0
def test_min_distance_fit_dipole():
    """Test dipole min_dist to inner_skull"""
    data_path = testing.data_path()
    raw_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw.fif'

    subjects_dir = op.join(data_path, 'subjects')
    fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
    fname_trans = op.join(data_path, 'MEG', 'sample',
                          'sample_audvis_trunc-trans.fif')
    fname_bem = op.join(subjects_dir, 'sample', 'bem',
                        'sample-1280-1280-1280-bem-sol.fif')

    subject = 'sample'

    raw = Raw(raw_fname, preload=True)

    # select eeg data
    picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
    info = pick_info(raw.info, picks)

    # Let's use cov = Identity
    cov = read_cov(fname_cov)
    cov['data'] = np.eye(cov['data'].shape[0])

    # Simulated scal map
    simulated_scalp_map = np.zeros(picks.shape[0])
    simulated_scalp_map[27:34] = 1

    simulated_scalp_map = simulated_scalp_map[:, None]

    evoked = EvokedArray(simulated_scalp_map, info, tmin=0)

    min_dist = 5.  # distance in mm

    dip, residual = fit_dipole(evoked, cov, fname_bem, fname_trans,
                               min_dist=min_dist)

    dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)

    assert_true(min_dist < (dist[0] * 1000.) < (min_dist + 1.))

    assert_raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans,
                  -1.)
Example #15
0
def _fast_map_meg_channels(info, pick_from, pick_to,
                           dots=None, mode='fast'):
    from mne.io.pick import pick_info
    from mne.forward._field_interpolation import _setup_dots
    from mne.forward._field_interpolation import _compute_mapping_matrix
    from mne.forward._make_forward import _create_meg_coils, _read_coil_defs
    from mne.bem import _check_origin

    miss = 1e-4  # Smoothing criterion for MEG

    # XXX: hack to silence _compute_mapping_matrix
    verbose = mne.get_config('MNE_LOGGING_LEVEL', 'INFO')
    mne.set_log_level('WARNING')

    info_from = pick_info(info, pick_from, copy=True)
    templates = _read_coil_defs()
    coils_from = _create_meg_coils(info_from['chs'], 'normal',
                                   info_from['dev_head_t'], templates)
    my_origin = _check_origin((0., 0., 0.04), info_from)
    int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils_from, 'meg')

    # This function needs a clean input. It hates the presence of other
    # channels than MEG channels. Make sure all is picked.
    if dots is None:
        dots = self_dots, cross_dots = _compute_dots(info, mode=mode)
    else:
        self_dots, cross_dots = dots

    self_dots, cross_dots = _pick_dots(dots, pick_from, pick_to)

    ch_names = [c['ch_name'] for c in info_from['chs']]
    fmd = dict(kind='meg', ch_names=ch_names,
               origin=my_origin, noise=noise, self_dots=self_dots,
               surface_dots=cross_dots, int_rad=int_rad, miss=miss)

    fmd['data'] = _compute_mapping_matrix(fmd, info_from)
    mne.set_log_level(verbose)

    return fmd['data']
Example #16
0
def test_magnetic_dipole():
    """Test basic magnetic dipole forward calculation."""
    info = read_info(fname_raw)
    picks = pick_types(info, meg=True, eeg=False, exclude=[])
    info = pick_info(info, picks[:12])
    coils = _create_meg_coils(info['chs'], 'normal', None)
    # magnetic dipole far (meters!) from device origin
    r0 = np.array([0., 13., -6.])
    for ch, coil in zip(info['chs'], coils):
        rr = (ch['loc'][:3] + r0) / 2.  # get halfway closer
        far_fwd = _magnetic_dipole_field_vec(r0[np.newaxis, :], [coil])
        near_fwd = _magnetic_dipole_field_vec(rr[np.newaxis, :], [coil])
        ratio = 8. if ch['ch_name'][-1] == '1' else 16.  # grad vs mag
        assert_allclose(np.median(near_fwd / far_fwd), ratio, atol=1e-1)
    # degenerate case
    r0 = coils[0]['rmag'][[0]]
    with pytest.raises(RuntimeError, match='Coil too close'):
        _magnetic_dipole_field_vec(r0, coils[:1])
    with pytest.warns(RuntimeWarning, match='Coil too close'):
        fwd = _magnetic_dipole_field_vec(r0, coils[:1], too_close='warning')
    assert not np.isfinite(fwd).any()
    with np.errstate(invalid='ignore'):
        fwd = _magnetic_dipole_field_vec(r0, coils[:1], too_close='info')
    assert not np.isfinite(fwd).any()
Example #17
0
def test_find_layout():
    """Test finding layout"""
    import matplotlib.pyplot as plt
    assert_raises(ValueError, find_layout, _get_test_info(), ch_type='meep')

    sample_info = read_info(fif_fname)
    grads = pick_types(sample_info, meg='grad')
    sample_info2 = pick_info(sample_info, grads)

    mags = pick_types(sample_info, meg='mag')
    sample_info3 = pick_info(sample_info, mags)

    # mock new convention
    sample_info4 = copy.deepcopy(sample_info)
    for ii, name in enumerate(sample_info4['ch_names']):
        new = name.replace(' ', '')
        sample_info4['chs'][ii]['ch_name'] = new

    eegs = pick_types(sample_info, meg=False, eeg=True)
    sample_info5 = pick_info(sample_info, eegs)

    lout = find_layout(sample_info, ch_type=None)
    assert_equal(lout.kind, 'Vectorview-all')
    assert_true(all(' ' in k for k in lout.names))

    lout = find_layout(sample_info2, ch_type='meg')
    assert_equal(lout.kind, 'Vectorview-all')

    # test new vector-view
    lout = find_layout(sample_info4, ch_type=None)
    assert_equal(lout.kind, 'Vectorview-all')
    assert_true(all(' ' not in k for k in lout.names))

    lout = find_layout(sample_info, ch_type='grad')
    assert_equal(lout.kind, 'Vectorview-grad')
    lout = find_layout(sample_info2)
    assert_equal(lout.kind, 'Vectorview-grad')
    lout = find_layout(sample_info2, ch_type='grad')
    assert_equal(lout.kind, 'Vectorview-grad')
    lout = find_layout(sample_info2, ch_type='meg')
    assert_equal(lout.kind, 'Vectorview-all')

    lout = find_layout(sample_info, ch_type='mag')
    assert_equal(lout.kind, 'Vectorview-mag')
    lout = find_layout(sample_info3)
    assert_equal(lout.kind, 'Vectorview-mag')
    lout = find_layout(sample_info3, ch_type='mag')
    assert_equal(lout.kind, 'Vectorview-mag')
    lout = find_layout(sample_info3, ch_type='meg')
    assert_equal(lout.kind, 'Vectorview-all')

    lout = find_layout(sample_info, ch_type='eeg')
    assert_equal(lout.kind, 'EEG')
    lout = find_layout(sample_info5)
    assert_equal(lout.kind, 'EEG')
    lout = find_layout(sample_info5, ch_type='eeg')
    assert_equal(lout.kind, 'EEG')
    # no common layout, 'meg' option not supported

    lout = find_layout(read_info(fname_ctf_raw))
    assert_equal(lout.kind, 'CTF-275')

    fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
    lout = find_layout(read_info(fname_bti_raw))
    assert_equal(lout.kind, 'magnesWH3600')

    raw_kit = read_raw_kit(fname_kit_157)
    lout = find_layout(raw_kit.info)
    assert_equal(lout.kind, 'KIT-157')

    raw_kit.info['bads'] = ['MEG  13', 'MEG  14', 'MEG  15', 'MEG  16']
    lout = find_layout(raw_kit.info)
    assert_equal(lout.kind, 'KIT-157')

    raw_umd = read_raw_kit(fname_kit_umd)
    lout = find_layout(raw_umd.info)
    assert_equal(lout.kind, 'KIT-UMD-3')

    # Test plotting
    lout.plot()
    lout.plot(picks=np.arange(10))
    plt.close('all')
Example #18
0
#    cfg.reducerank    = 2;
#    leadfield2d       = ft_prepare_leadfield(cfg);

import mne
head_mri_t = mne.read_trans(
    os.path.join(recordings_path, subject,
                 '{}-head_mri-trans.fif'.format(subject)))

# Source space
src = mne.setup_source_space(subject=subject,
                             subjects_dir=fs_path,
                             add_dist=False,
                             spacing='oct6')

# This is to morph the fsaverage source model into subjects.
src_subject = mne.morph_source_spaces(src_fsaverage,
                                      subject,
                                      subjects_dir=fs_path)
# BEM
bems = mne.make_bem_model(subject,
                          conductivity=(0.3, ),
                          subjects_dir=fs_path,
                          ico=4)
bem_sol = mne.make_bem_solution(bems)

picks = mne.pick_types(info, meg=True, ref_meg=False)
info = mne.pick_info(info, picks)

# Forward
fwd = mne.make_forward_solution(info, trans=head_mri_t, bem=bem_sol, src=src)
Example #19
0
            A_pca_coeff[s][m][1, :] = -A_pca_coeff[s][m][1, :]
            A_pca_sp[s][m][:, 1] = -A_pca_sp[s][m][:, 1]

#%% Plot topomaps

plt.figure()
plt.suptitle('PCA comp 1')
vmin = A_pca_coeff[0][3][0, :].mean() - 1.96 * A_pca_coeff[0][3][0, :].std()
vmax = A_pca_coeff[0][3][0, :].mean() + 1.96 * A_pca_coeff[0][3][0, :].std()
for s in range(len(Subjects)):
    for m in range(len(m_bits)):
        plt.subplot(len(m_bits), len(Subjects), len(Subjects) * m + s + 1)
        if m == 0:
            plt.title(Subjects[s])
        mne.viz.plot_topomap(A_pca_coeff[s][m][0, :],
                             mne.pick_info(A_info_obj[s], A_ch_picks[s]),
                             vmin=vmin,
                             vmax=vmax)

plt.figure()
plt.suptitle('PCA comp 2')
vmin = A_pca_coeff[0][3][1, :].mean() - 1.96 * A_pca_coeff[0][3][1, :].std()
vmax = A_pca_coeff[0][3][1, :].mean() + 1.96 * A_pca_coeff[0][3][1, :].std()
for s in range(len(Subjects)):
    for m in range(len(m_bits)):
        plt.subplot(len(m_bits), len(Subjects), len(Subjects) * m + s + 1)
        if m == 0:
            plt.title(Subjects[s])
        mne.viz.plot_topomap(A_pca_coeff[s][m][1, :],
                             mne.pick_info(A_info_obj[s], A_ch_picks[s]),
                             vmin=vmin,
Example #20
0
    def fit(self, raw, verbose=None):
        """Fit the SNS operator

        Parameters
        ----------
        raw : Instance of Raw
            The raw data to fit.
        verbose : bool, str, int, or None
            If not None, override default verbose level (see mne.verbose).

        Returns
        -------
        sns : Instance of SensorNoiseSuppression
            The modified instance.

        Notes
        -----
        In the resulting operator, bad channels will be reconstructed by
        using the good channels.
        """
        logger.info('Processing data with sensor noise suppression algorithm')
        logger.info('    Loading raw data')
        if not isinstance(raw, BaseRaw):
            raise TypeError('raw must be an instance of Raw, got %s' %
                            type(raw))
        good_picks = _pick_data_channels(raw.info, exclude='bads')
        if self._n_neighbors > len(good_picks) - 1:
            raise ValueError('n_neighbors must be at most len(good_picks) '
                             '- 1 (%s)' % (len(good_picks) - 1, ))
        logger.info('    Loading data')
        picks = _pick_data_channels(raw.info, exclude=())
        # The following lines are equivalent to this, but require less mem use:
        # data_cov = np.cov(orig_data)
        # data_corrs = np.corrcoef(orig_data) ** 2
        logger.info('    Computing covariance for %s good channels' %
                    len(good_picks))
        data_cov = compute_raw_covariance(
            raw,
            picks=picks,
            reject=self._reject,
            flat=self._flat,
            verbose=False if verbose is None else verbose)['data']
        good_subpicks = np.searchsorted(picks, good_picks)
        del good_picks
        # scale the norms so everything is close enough to unity for our checks
        picks_list = _picks_by_type(pick_info(raw.info, picks), exclude=())
        _apply_scaling_cov(data_cov, picks_list, self._scalings)
        data_norm = np.diag(data_cov).copy()
        eps = np.finfo(np.float32).eps
        pos_mask = data_norm >= eps
        data_norm[pos_mask] = 1. / data_norm[pos_mask]
        data_norm[~pos_mask] = 0
        # normalize
        data_corrs = data_cov * data_cov
        data_corrs *= data_norm
        data_corrs *= data_norm[:, np.newaxis]
        del data_norm
        operator = np.zeros((len(picks), len(picks)))
        logger.info('    Assembling spatial operator')
        for ii in range(len(picks)):
            # For each channel, the set of other signals is orthogonalized by
            # applying PCA to obtain an orthogonal basis of the subspace
            # spanned by the other channels.
            idx = np.argsort(data_corrs[ii][good_subpicks])[::-1]
            if ii in good_subpicks:
                idx = good_subpicks[idx[:self._n_neighbors + 1]].tolist()
                idx.pop(idx.index(ii))  # should be in there iff it is good
            else:
                idx = good_subpicks[idx[:self._n_neighbors]].tolist()
            # We have already effectively thresholded by zeroing out components
            eigval, eigvec = _pca(data_cov[np.ix_(idx, idx)], thresh=None)
            # Some of the eigenvalues could be zero, don't let it blow up
            norm = np.zeros(len(eigval))
            use_mask = eigval > eps
            norm[use_mask] = 1. / np.sqrt(eigval[use_mask])
            eigvec *= norm
            del eigval
            # The channel is projected on this basis and replaced by its
            # projection
            operator[ii, idx] = np.dot(eigvec, np.dot(data_cov[ii][idx],
                                                      eigvec))
            # Equivalently (and less efficiently):
            # eigvec = linalg.block_diag([1.], eigvec)
            # idx = np.concatenate(([ii], idx))
            # corr = np.dot(np.dot(eigvec.T, data_cov[np.ix_(idx, idx)]),
            #               eigvec)
            # operator[ii, idx[1:]] = np.dot(corr[0, 1:], eigvec[1:, 1:].T)
            if operator[ii, ii] != 0:
                raise RuntimeError
        # scale our results back (the ratio of channel scales is what matters)
        _apply_scaling_array(operator.T, picks_list, self._scalings)
        _undo_scaling_array(operator, picks_list, self._scalings)
        logger.info('Done')
        self._operator = operator
        self._used_chs = [raw.ch_names[pick] for pick in picks]
        return self
Example #21
0
def test_make_forward_solution_kit():
    """Test making fwd using KIT, BTI, and CTF (compensated) files
    """
    kit_dir = op.join(op.dirname(__file__), "..", "..", "io", "kit", "tests", "data")
    sqd_path = op.join(kit_dir, "test.sqd")
    mrk_path = op.join(kit_dir, "test_mrk.sqd")
    elp_path = op.join(kit_dir, "test_elp.txt")
    hsp_path = op.join(kit_dir, "test_hsp.txt")
    trans_path = op.join(kit_dir, "trans-sample.fif")
    fname_kit_raw = op.join(kit_dir, "test_bin_raw.fif")

    bti_dir = op.join(op.dirname(__file__), "..", "..", "io", "bti", "tests", "data")
    bti_pdf = op.join(bti_dir, "test_pdf_linux")
    bti_config = op.join(bti_dir, "test_config_linux")
    bti_hs = op.join(bti_dir, "test_hs_linux")
    fname_bti_raw = op.join(bti_dir, "exported4D_linux_raw.fif")

    fname_ctf_raw = op.join(op.dirname(__file__), "..", "..", "io", "tests", "data", "test_ctf_comp_raw.fif")

    # first set up a small testing source space
    temp_dir = _TempDir()
    fname_src_small = op.join(temp_dir, "sample-oct-2-src.fif")
    src = setup_source_space("sample", fname_src_small, "oct2", subjects_dir=subjects_dir, add_dist=False)
    n_src = 108  # this is the resulting # of verts in fwd

    # first use mne-C: convert file, make forward solution
    fwd = _do_forward_solution(
        "sample",
        fname_kit_raw,
        src=fname_src_small,
        bem=fname_bem_meg,
        mri=trans_path,
        eeg=False,
        meg=True,
        subjects_dir=subjects_dir,
    )
    assert_true(isinstance(fwd, Forward))

    # now let's use python with the same raw file
    fwd_py = make_forward_solution(fname_kit_raw, trans_path, src, fname_bem_meg, eeg=False, meg=True)
    _compare_forwards(fwd, fwd_py, 157, n_src)
    assert_true(isinstance(fwd_py, Forward))

    # now let's use mne-python all the way
    raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
    # without ignore_ref=True, this should throw an error:
    assert_raises(
        NotImplementedError,
        make_forward_solution,
        raw_py.info,
        src=src,
        eeg=False,
        meg=True,
        bem=fname_bem_meg,
        trans=trans_path,
    )

    # check that asking for eeg channels (even if they don't exist) is handled
    meg_only_info = pick_info(raw_py.info, pick_types(raw_py.info, meg=True, eeg=False))
    fwd_py = make_forward_solution(
        meg_only_info, src=src, meg=True, eeg=True, bem=fname_bem_meg, trans=trans_path, ignore_ref=True
    )
    _compare_forwards(fwd, fwd_py, 157, n_src, meg_rtol=1e-3, meg_atol=1e-7)

    # BTI python end-to-end versus C
    fwd = _do_forward_solution(
        "sample",
        fname_bti_raw,
        src=fname_src_small,
        bem=fname_bem_meg,
        mri=trans_path,
        eeg=False,
        meg=True,
        subjects_dir=subjects_dir,
    )
    with warnings.catch_warnings(record=True):  # weight tables
        raw_py = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
    fwd_py = make_forward_solution(raw_py.info, src=src, eeg=False, meg=True, bem=fname_bem_meg, trans=trans_path)
    _compare_forwards(fwd, fwd_py, 248, n_src)

    # now let's test CTF w/compensation
    fwd_py = make_forward_solution(fname_ctf_raw, fname_trans, src, fname_bem_meg, eeg=False, meg=True)

    fwd = _do_forward_solution(
        "sample",
        fname_ctf_raw,
        mri=fname_trans,
        src=fname_src_small,
        bem=fname_bem_meg,
        eeg=False,
        meg=True,
        subjects_dir=subjects_dir,
    )
    _compare_forwards(fwd, fwd_py, 274, n_src)

    # CTF with compensation changed in python
    ctf_raw = Raw(fname_ctf_raw, compensation=2)

    fwd_py = make_forward_solution(ctf_raw.info, fname_trans, src, fname_bem_meg, eeg=False, meg=True)
    with warnings.catch_warnings(record=True):
        fwd = _do_forward_solution(
            "sample",
            ctf_raw,
            mri=fname_trans,
            src=fname_src_small,
            bem=fname_bem_meg,
            eeg=False,
            meg=True,
            subjects_dir=subjects_dir,
        )
    _compare_forwards(fwd, fwd_py, 274, n_src)
Example #22
0
def _simulate_data(fwd, idx):  # Somewhere on the frontal lobe by default
    """Simulate an oscillator on the cortex."""
    source_vertno = fwd['src'][0]['vertno'][idx]

    sfreq = 50.  # Hz.
    times = np.arange(10 * sfreq) / sfreq  # 10 seconds of data
    signal = np.sin(20 * 2 * np.pi * times)  # 20 Hz oscillator
    signal[:len(times) // 2] *= 2  # Make signal louder at the beginning
    signal *= 1e-9  # Scale to be in the ballpark of MEG data

    # Construct a SourceEstimate object that describes the signal at the
    # cortical level.
    stc = mne.SourceEstimate(
        signal[np.newaxis, :],
        vertices=[[source_vertno], []],
        tmin=0,
        tstep=1 / sfreq,
        subject='sample',
    )

    # Create an info object that holds information about the sensors
    info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad')
    with info._unlock():
        info.update(fwd['info'])  # Merge in sensor position information
    # heavily decimate sensors to make it much faster
    info = mne.pick_info(info, np.arange(info['nchan'])[::5])
    fwd = mne.pick_channels_forward(fwd, info['ch_names'])

    # Run the simulated signal through the forward model, obtaining
    # simulated sensor data.
    raw = mne.apply_forward_raw(fwd, stc, info)

    # Add a little noise
    random = np.random.RandomState(42)
    noise = random.randn(*raw._data.shape) * 1e-14
    raw._data += noise

    # Define a single epoch (weird baseline but shouldn't matter)
    epochs = mne.Epochs(raw, [[0, 0, 1]],
                        event_id=1,
                        tmin=0,
                        tmax=raw.times[-1],
                        baseline=(0., 0.),
                        preload=True)
    evoked = epochs.average()

    # Compute the cross-spectral density matrix
    csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=5)

    labels = mne.read_labels_from_annot('sample',
                                        hemi='lh',
                                        subjects_dir=subjects_dir)
    label = [
        label for label in labels if np.in1d(source_vertno, label.vertices)[0]
    ]
    assert len(label) == 1
    label = label[0]
    vertices = np.intersect1d(label.vertices, fwd['src'][0]['vertno'])
    source_ind = vertices.tolist().index(source_vertno)
    assert vertices[source_ind] == source_vertno
    return epochs, evoked, csd, source_vertno, label, vertices, source_ind
Example #23
0
ch_idx_by_type = mne.channel_indices_by_type(info)
print(ch_idx_by_type.keys())
print(ch_idx_by_type['eog'])

###############################################################################
# Dropping channels from an ``Info`` object
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# If you want to modify an :class:`~mne.Info` object by eliminating some of the
# channels in it, you can use the :func:`mne.pick_info` function to pick the
# channels you want to keep and omit the rest:

print(info['nchan'])
eeg_indices = mne.pick_types(info, meg=False, eeg=True)
print(mne.pick_info(info, eeg_indices)['nchan'])

###############################################################################
# We can also get a nice HTML representation in IPython like:

info

###############################################################################
# By default, :func:`~mne.pick_info` will make a copy of the original
# :class:`~mne.Info` object before modifying it; if you want to modify it
# in-place, include the parameter ``copy=False``.
#
#
# .. LINKS
#
# .. _`regular expression`: https://en.wikipedia.org/wiki/Regular_expression
Example #24
0
def test_make_forward_solution_kit():
    """Test making fwd using KIT, BTI, and CTF (compensated) files
    """
    kit_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit',
                      'tests', 'data')
    sqd_path = op.join(kit_dir, 'test.sqd')
    mrk_path = op.join(kit_dir, 'test_mrk.sqd')
    elp_path = op.join(kit_dir, 'test_elp.txt')
    hsp_path = op.join(kit_dir, 'test_hsp.txt')
    trans_path = op.join(kit_dir, 'trans-sample.fif')
    fname_kit_raw = op.join(kit_dir, 'test_bin_raw.fif')

    bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti',
                      'tests', 'data')
    bti_pdf = op.join(bti_dir, 'test_pdf_linux')
    bti_config = op.join(bti_dir, 'test_config_linux')
    bti_hs = op.join(bti_dir, 'test_hs_linux')
    fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')

    fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                            'data', 'test_ctf_comp_raw.fif')

    # first set up a small testing source space
    temp_dir = _TempDir()
    fname_src_small = op.join(temp_dir, 'sample-oct-2-src.fif')
    src = setup_source_space('sample', fname_src_small, 'oct2',
                             subjects_dir=subjects_dir, add_dist=False)
    n_src = 108  # this is the resulting # of verts in fwd

    # first use mne-C: convert file, make forward solution
    fwd = do_forward_solution('sample', fname_kit_raw, src=fname_src_small,
                              bem=fname_bem_meg, mri=trans_path,
                              eeg=False, meg=True, subjects_dir=subjects_dir)
    assert_true(isinstance(fwd, Forward))

    # now let's use python with the same raw file
    fwd_py = make_forward_solution(fname_kit_raw, trans_path, src,
                                   fname_bem_meg, eeg=False, meg=True)
    _compare_forwards(fwd, fwd_py, 157, n_src)
    assert_true(isinstance(fwd_py, Forward))

    # now let's use mne-python all the way
    raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
    # without ignore_ref=True, this should throw an error:
    assert_raises(NotImplementedError, make_forward_solution, raw_py.info,
                  src=src, eeg=False, meg=True,
                  bem=fname_bem_meg, trans=trans_path)

    # check that asking for eeg channels (even if they don't exist) is handled
    meg_only_info = pick_info(raw_py.info, pick_types(raw_py.info, meg=True,
                                                      eeg=False))
    fwd_py = make_forward_solution(meg_only_info, src=src, meg=True, eeg=True,
                                   bem=fname_bem_meg, trans=trans_path,
                                   ignore_ref=True)
    _compare_forwards(fwd, fwd_py, 157, n_src,
                      meg_rtol=1e-3, meg_atol=1e-7)

    # BTI python end-to-end versus C
    fwd = do_forward_solution('sample', fname_bti_raw, src=fname_src_small,
                              bem=fname_bem_meg, mri=trans_path,
                              eeg=False, meg=True, subjects_dir=subjects_dir)
    raw_py = read_raw_bti(bti_pdf, bti_config, bti_hs)
    fwd_py = make_forward_solution(raw_py.info, src=src, eeg=False, meg=True,
                                   bem=fname_bem_meg, trans=trans_path)
    _compare_forwards(fwd, fwd_py, 248, n_src)

    # now let's test CTF w/compensation
    fwd_py = make_forward_solution(fname_ctf_raw, fname_trans, src,
                                   fname_bem_meg, eeg=False, meg=True)

    fwd = do_forward_solution('sample', fname_ctf_raw, mri=fname_trans,
                              src=fname_src_small, bem=fname_bem_meg,
                              eeg=False, meg=True, subjects_dir=subjects_dir)
    _compare_forwards(fwd, fwd_py, 274, n_src)

    # CTF with compensation changed in python
    ctf_raw = Raw(fname_ctf_raw, compensation=2)

    fwd_py = make_forward_solution(ctf_raw.info, fname_trans, src,
                                   fname_bem_meg, eeg=False, meg=True)
    with warnings.catch_warnings(record=True):
        fwd = do_forward_solution('sample', ctf_raw, mri=fname_trans,
                                  src=fname_src_small, bem=fname_bem_meg,
                                  eeg=False, meg=True,
                                  subjects_dir=subjects_dir)
    _compare_forwards(fwd, fwd_py, 274, n_src)
plt.title('2nd component')
for t_c in range(len(t_cuts)):
    plt.plot(t_cutT[t_c],pca_sp_cuts_act[t_c][:,1])    

plt.figure()
plt.plot(t,Avg_Ht_act[31,:])
plt.xlim([0,0.5])

plt.figure()
labels = ['comp1', 'comp2']
vmin = pca_coeff_cuts_act[-1][0,:].mean() - 2 * pca_coeff_cuts_act[-1][0,:].std()
vmax = pca_coeff_cuts_act[-1][0,:].mean() + 2 * pca_coeff_cuts_act[-1][0,:].std()
for t_c in range(len(t_cuts)):
    plt.subplot(2,len(t_cuts),t_c+1)
    plt.title('ExpVar ' + str(np.round(pca_expVar_cuts_act[t_c][0]*100)) + '%')
    mne.viz.plot_topomap(pca_coeff_cuts_act[t_c][0,:], mne.pick_info(A_info_obj_act[2],A_ch_picks_act[2]),vmin=vmin,vmax=vmax)
    
    plt.subplot(2,len(t_cuts),t_c+1 + len(t_cuts))
    plt.title('ExpVar ' + str(np.round(pca_expVar_cuts_act[t_c][1]*100)) + '%')
    mne.viz.plot_topomap(pca_coeff_cuts_act[t_c][1,:], mne.pick_info(A_info_obj_act[2],A_ch_picks_act[2]),vmin=vmin,vmax=vmax)
    
    
#%% Passive Data

data_loc_passive = '/media/ravinderjit/Data_Drive/Data/EEGdata/TemporalCoding/AMmseq_10bits/'
pickle_loc_passive = data_loc_passive + 'Pickles_compActive/'

A_Tot_trials_pass = []
A_Ht_pass = []
A_Htnf_pass = []
A_info_obj_pass = []
Example #26
0
# Get a dictionary of channel indices, grouped by channel type
channel_indices_by_type = mne.io.pick.channel_indices_by_type(info)
print('The first three magnetometers:', channel_indices_by_type['mag'][:3])

###############################################################################
# Obtaining information about channels
# ------------------------------------

# Channel type of a specific channel
channel_type = mne.io.pick.channel_type(info, 75)
print('Channel #75 is of type:', channel_type)

###############################################################################
# Channel types of a collection of channels
meg_channels = mne.pick_types(info, meg=True)[:10]
channel_types = [mne.io.pick.channel_type(info, ch) for ch in meg_channels]
print('First 10 MEG channels are of type:\n', channel_types)

###############################################################################
# Dropping channels from an info structure
# ----------------------------------------
#
# It is possible to limit the info structure to only include a subset of
# channels with the :func:`mne.pick_info` function:

# Only keep EEG channels
eeg_indices = mne.pick_types(info, meg=False, eeg=True)
reduced_info = mne.pick_info(info, eeg_indices)

print(reduced_info)
Example #27
0
def test_rank():
    """Test cov rank estimation."""
    # Test that our rank estimation works properly on a simple case
    evoked = read_evokeds(ave_fname,
                          condition=0,
                          baseline=(None, 0),
                          proj=False)
    cov = read_cov(cov_fname)
    ch_names = [
        ch for ch in evoked.info['ch_names']
        if '053' not in ch and ch.startswith('EEG')
    ]
    cov = prepare_noise_cov(cov, evoked.info, ch_names, None)
    assert_equal(cov['eig'][0], 0.)  # avg projector should set this to zero
    assert_true((cov['eig'][1:] > 0).all())  # all else should be > 0

    # Now do some more comprehensive tests
    raw_sample = read_raw_fif(raw_fname)

    raw_sss = read_raw_fif(hp_fif_fname)
    raw_sss.add_proj(compute_proj_raw(raw_sss))

    cov_sample = compute_raw_covariance(raw_sample)
    cov_sample_proj = compute_raw_covariance(raw_sample.copy().apply_proj())

    cov_sss = compute_raw_covariance(raw_sss)
    cov_sss_proj = compute_raw_covariance(raw_sss.copy().apply_proj())

    picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
    picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True)

    info_sample = pick_info(raw_sample.info, picks_all_sample)
    picks_stack_sample = [('eeg', pick_types(info_sample, meg=False,
                                             eeg=True))]
    picks_stack_sample += [('meg', pick_types(info_sample, meg=True))]
    picks_stack_sample += [('all', pick_types(info_sample, meg=True,
                                              eeg=True))]

    info_sss = pick_info(raw_sss.info, picks_all_sss)
    picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))]
    picks_stack_somato += [('meg', pick_types(info_sss, meg=True))]
    picks_stack_somato += [('all', pick_types(info_sss, meg=True, eeg=True))]

    iter_tests = list(
        itt.product(
            [(cov_sample, picks_stack_sample, info_sample),
             (cov_sample_proj, picks_stack_sample, info_sample),
             (cov_sss, picks_stack_somato, info_sss),
             (cov_sss_proj, picks_stack_somato, info_sss)],  # sss
            [dict(mag=1e15, grad=1e13, eeg=1e6)]))

    for (cov, picks_list, this_info), scalings in iter_tests:
        for ch_type, picks in picks_list:

            this_very_info = pick_info(this_info, picks)

            # compute subset of projs
            this_projs = [
                c['active'] and len(
                    set(c['data']['col_names']).intersection(
                        set(this_very_info['ch_names']))) > 0
                for c in cov['projs']
            ]
            n_projs = sum(this_projs)

            # count channel types
            ch_types = [
                channel_type(this_very_info, idx) for idx in range(len(picks))
            ]
            n_eeg, n_mag, n_grad = [
                ch_types.count(k) for k in ['eeg', 'mag', 'grad']
            ]
            n_meg = n_mag + n_grad
            if ch_type in ('all', 'eeg'):
                n_projs_eeg = 1
            else:
                n_projs_eeg = 0

            # check sss
            if len(this_very_info['proc_history']) > 0:
                mf = this_very_info['proc_history'][0]['max_info']
                n_free = _get_sss_rank(mf)
                if 'mag' not in ch_types and 'grad' not in ch_types:
                    n_free = 0
                # - n_projs XXX clarify
                expected_rank = n_free + n_eeg
                if n_projs > 0 and ch_type in ('all', 'eeg'):
                    expected_rank -= n_projs_eeg
            else:
                expected_rank = n_meg + n_eeg - n_projs

            C = cov['data'][np.ix_(picks, picks)]
            est_rank = _estimate_rank_meeg_cov(C,
                                               this_very_info,
                                               scalings=scalings)

            assert_equal(expected_rank, est_rank)
Example #28
0
plot_arrowmap(evoked_mag.data[:, max_time_idx], evoked_mag.info)

# Since planar gradiometers takes gradients along latitude and longitude,
# they need to be projected to the flatten manifold span by magnetometer
# or radial gradiometers before taking the gradients in the 2D Cartesian
# coordinate system for visualization on the 2D topoplot. You can use the
# ``info_from`` and ``info_to`` parameters to interpolate from
# gradiometer data to magnetometer data.

# %%
# Plot gradiometer data as an arrowmap along with the topoplot at the time
# of the maximum sensor space activity:
plot_arrowmap(evoked_grad.data[:, max_time_idx], info_from=evoked_grad.info,
              info_to=evoked_mag.info)

# %%
# Since Vectorview 102 system perform sparse spatial sampling of the magnetic
# field, data from the Vectorview (info_from) can be projected to the high
# density CTF 272 system (info_to) for visualization
#
# Plot gradiometer data as an arrowmap along with the topoplot at the time
# of the maximum sensor space activity:
path = bst_raw.data_path()
raw_fname = (path / 'MEG' / 'bst_raw' /
             'subj001_somatosensory_20111109_01_AUX-f.ds')
raw_ctf = mne.io.read_raw_ctf(raw_fname)
raw_ctf_info = mne.pick_info(
    raw_ctf.info, mne.pick_types(raw_ctf.info, meg=True, ref_meg=False))
plot_arrowmap(evoked_grad.data[:, max_time_idx], info_from=evoked_grad.info,
              info_to=raw_ctf_info, scale=6e-10)
Example #29
0
def group_average_spectrum(experiment, spectrum_name, groups, new_name):
    """ Computes a group average spectrum item.
    """
    # check data cohesion
    keys = []
    freq_arrays = []
    for group_key, group_subjects in groups.items():
        for subject_name in group_subjects:
            try:
                subject = experiment.subjects.get(subject_name)
                spectrum = subject.spectrum.get(spectrum_name)
                keys.append(tuple(sorted(spectrum.content.keys())))
                freq_arrays.append(tuple(spectrum.freqs))
            except Exception as exc:
                continue

    assert_arrays_same(keys, 'Conditions do not match')
    assert_arrays_same(freq_arrays, 'Freqs do not match')

    # handle channel differences
    ch_names = []
    for group_key, group_subjects in groups.items():
        for subject_name in group_subjects:
            try:
                subject = experiment.subjects.get(subject_name)
                spectrum = subject.spectrum.get(spectrum_name)
                ch_names.append(tuple(clean_names(spectrum.ch_names)))
            except Exception as exc:
                continue

    if len(set(ch_names)) != 1:
        logging.getLogger('ui_logger').info(
            "PSD's contain different sets of channels. Identifying common ones..")

        common_ch_names = list(set.intersection(*map(set, ch_names)))

        logging.getLogger('ui_logger').info(
            str(len(common_ch_names)) + ' common channels found.')
    else:
        common_ch_names = ch_names[0]

    grand_psds = {}
    for group_key, group_subjects in groups.items():
        for subject in experiment.subjects.values():
            if subject.name not in group_subjects:
                continue

            spectrum = subject.spectrum.get(spectrum_name)
            if not spectrum:
                continue

            subject_ch_names = clean_names(spectrum.ch_names) 

            for spectrum_item_key, spectrum_item in spectrum.content.items():
                grand_key = (group_key, spectrum_item_key)

                # get common channels in "subject specific space"
                idxs = [subject_ch_names.index(ch_name) for ch_name 
                        in common_ch_names]

                spectrum_item = spectrum_item[idxs]

                if grand_key not in grand_psds:
                    grand_psds[grand_key] = []
                grand_psds[grand_key].append(spectrum_item)

    grand_averages = {}
    for key, grand_psd in grand_psds.items():
        new_key = str(key[1]) + '_group_' + str(key[0])
        if len(grand_psd) == 1:
            grand_averages[new_key] = grand_psd[0].copy()
        else:
            grand_averages[new_key] = np.mean(grand_psd, axis=0)

    subject = experiment.active_subject

    try:
        spectrum = subject.spectrum.get(spectrum_name)
    except Exception as exc:
        raise Exception('Active subject should be included in the groups')

    spectrum_directory = subject.spectrum_directory

    info = spectrum.info
    common_idxs = [ch_idx for ch_idx, ch_name in enumerate(clean_names(info['ch_names']))
                   if ch_name in common_ch_names]
    info = mne.pick_info(info, sel=common_idxs)

    freqs = spectrum.freqs
    data = grand_averages

    params = deepcopy(spectrum.params)

    # individual intervals not relevant in the group item
    params.pop('intervals', None)

    params['groups'] = groups
    params['conditions'] = [elem for elem in grand_averages.keys()]

    spectrum = Spectrum(new_name, subject.spectrum_directory,
                        params, data, freqs, info)

    spectrum.save_content()
    subject.add(spectrum, 'spectrum')
Example #30
0
def compute_psd(inst,
                tmin=None,
                tmax=None,
                winlen=None,
                step=None,
                padto=None,
                events=None,
                event_id=None,
                picks=None):
    """Compute power spectral density on Raw or Epochs.

    Parameters
    ----------
    inst : mne.io._BaseRaw | mne.Epochs
        mne Epochs or Raw object to compute psd on.
    tmin : float | None
        Time (in seconds) marking the start of the time segment used in PSD
        calculation.
    tmax : float | None
        Time (in seconds) marking the end of the time segment used in PSD
        calculation.
    winlen : float
        Welch window length (in seconds). The default is 2 seconds.
    step : float | None
        Welch window step interval (in seconds).
    padto : float | None
        Length in seconds to which each Welch window should be zero-padded.
    events : numpy.ndarray | None
        mne events array (n_events by 3). Used only when event-related PSD
        calculation is used.
    event_id : int | list of int | array of int
        The id of the event to consider. If a list, all events with the IDs
        specified in the list are used. If None, all events will be used.
    picks : list/array of int | list of str | None
        Channels to use in PSD calculation. The default (``None``) uses all
        data channels.

    Returns
    -------
    psd : borsar.freq.PSD
        PowerSpectralDensity (PSD) object.
    """
    from mne.time_frequency import psd_welch

    if tmin is None:
        tmin = inst.times[0]
    if tmax is None:
        tmax = inst.times[-1]
    if winlen is None:
        winlen = tmax - tmin

    # FIXME - maybe check: if one long winlen and at least some bad annotations
    #         there should be some warning in compute_psd_raw if all data is
    #         nan due to annotations
    step = winlen / 4 if step is None else step
    if isinstance(inst, mne.BaseEpochs):
        n_per_seg, n_overlap, n_fft = _psd_welch_input_seconds_to_samples(
            inst, winlen, step, padto)
        psd, freq = psd_welch(inst,
                              tmin=tmin,
                              tmax=tmax,
                              n_fft=n_fft,
                              picks=picks,
                              n_per_seg=n_per_seg,
                              n_overlap=n_overlap)
        # FIXME: this will need fixing:
        #  * inst has events and event_id
        #  * can the user pass events? should it be ignored?
        if event_id is not None:
            # check which epochs were selected
            chosen_events = (list(event_id.values()) if isinstance(
                event_id, dict) else event_id)
            msk = np.in1d(inst.events[:, -1], chosen_events)
            this_inst = inst[msk]

            events = this_inst.events
            event_id = this_inst.event_id
            metadata = this_inst.metadata
        else:
            events = inst.events
            event_id = inst.event_id
            metadata = inst.metadata

    elif isinstance(inst, mne.io.BaseRaw):
        psd, freq = compute_rest_psd(inst,
                                     events=events,
                                     event_id=event_id,
                                     tmin=tmin,
                                     tmax=tmax,
                                     winlen=winlen,
                                     step=step)
        metadata = None
    else:
        raise TypeError('`compute_psd` works only with Raw or Epochs data '
                        'formats, got {}'.format(type(inst)))

    # construct PSD object
    try:
        from mne.selection import pick_types
    except ModuleNotFoundError:
        from mne import pick_types

    picks_int = pick_types(inst.info, eeg=True, selection=picks)
    info = mne.pick_info(inst.info, sel=picks_int)

    psd = PSD(psd,
              freq,
              info,
              events=events,
              event_id=event_id,
              metadata=metadata)

    return psd
Example #31
0
###############################################################################
# Now we run the signal through the forward model to obtain simulated sensor
# data. To save computation time, we'll only simulate gradiometer data. You can
# try simulating other types of sensors as well.
#
# Some noise is added based on the baseline noise covariance matrix from the
# sample dataset, scaled to implement the desired SNR.

# Read the info from the sample dataset. This defines the location of the
# sensors and such.
info = mne.io.read_info(raw_fname)
info.update(sfreq=sfreq, bads=[])

# Only use gradiometers
picks = mne.pick_types(info, meg='grad', stim=True, exclude=())
mne.pick_info(info, picks, copy=False)

# This is the raw object that will be used as a template for the simulation.
raw = mne.io.RawArray(np.zeros((info['nchan'], len(stc.times))), info)

# Define a covariance matrix for the simulated noise. In this tutorial, we use
# a simple diagonal matrix.
cov = mne.cov.make_ad_hoc_cov(info)
cov['data'] *= (20. / snr) ** 2  # Scale the noise to achieve the desired SNR

# Simulate the raw data, with a lowpass filter on the noise
raw = simulate_raw(raw, stc, trans_fname, src_fname, bem_fname, cov=cov,
                   random_state=rand, iir_filter=[4, -4, 0.8])


###############################################################################
Example #32
0
meg_path = op.join(path, subject, "new_v1")

onset_files = files.get_files(meg_path, "epochs-resp-TD", "-epo.fif")[2]
onset_files.sort()

obs_files = files.get_files(meg_path, "epochs-TD", "-epo.fif")[2]
obs_files.sort()

onset_times = np.linspace(-0.5, 1, num=376)
obs_times = np.linspace(-0.5, 2.6, num=776)

# prepare the sensor groupings the subset

info = mne.io.read_info(onset_files[0])
ch_subset = mne.pick_types(info, ref_meg=False, eog=False, stim=False)
info = mne.pick_info(info, ch_subset)

sensor_groupings = {
    "A": [i for i in info["ch_names"] if "MLO" in i],
    "B": [i for i in info["ch_names"] if "MRO" in i],
    "C": [i for i in info["ch_names"] if "MLC" in i],
    "D": [i for i in info["ch_names"] if "MRC" in i],
    "E": [i for i in info["ch_names"] if "0" in i],
    "F": [i for i in info["ch_names"] if "C" in i]
}

# read data
beh = pd.read_pickle(beh_file)

onsets = [mne.read_epochs(i) for i in onset_files]
onsets = np.vstack([i.pick_types(ref_meg=False).get_data() for i in onsets])
Example #33
0
def test_find_layout():
    """Test finding layout."""
    pytest.raises(ValueError, find_layout, _get_test_info(), ch_type='meep')

    sample_info = read_info(fif_fname)
    grads = pick_types(sample_info, meg='grad')
    sample_info2 = pick_info(sample_info, grads)

    mags = pick_types(sample_info, meg='mag')
    sample_info3 = pick_info(sample_info, mags)

    # mock new convention
    sample_info4 = copy.deepcopy(sample_info)
    for ii, name in enumerate(sample_info4['ch_names']):
        new = name.replace(' ', '')
        sample_info4['chs'][ii]['ch_name'] = new

    eegs = pick_types(sample_info, meg=False, eeg=True)
    sample_info5 = pick_info(sample_info, eegs)

    lout = find_layout(sample_info, ch_type=None)
    assert lout.kind == 'Vectorview-all'
    assert all(' ' in k for k in lout.names)

    lout = find_layout(sample_info2, ch_type='meg')
    assert_equal(lout.kind, 'Vectorview-all')

    # test new vector-view
    lout = find_layout(sample_info4, ch_type=None)
    assert_equal(lout.kind, 'Vectorview-all')
    assert all(' ' not in k for k in lout.names)

    lout = find_layout(sample_info, ch_type='grad')
    assert_equal(lout.kind, 'Vectorview-grad')
    lout = find_layout(sample_info2)
    assert_equal(lout.kind, 'Vectorview-grad')
    lout = find_layout(sample_info2, ch_type='grad')
    assert_equal(lout.kind, 'Vectorview-grad')
    lout = find_layout(sample_info2, ch_type='meg')
    assert_equal(lout.kind, 'Vectorview-all')

    lout = find_layout(sample_info, ch_type='mag')
    assert_equal(lout.kind, 'Vectorview-mag')
    lout = find_layout(sample_info3)
    assert_equal(lout.kind, 'Vectorview-mag')
    lout = find_layout(sample_info3, ch_type='mag')
    assert_equal(lout.kind, 'Vectorview-mag')
    lout = find_layout(sample_info3, ch_type='meg')
    assert_equal(lout.kind, 'Vectorview-all')

    lout = find_layout(sample_info, ch_type='eeg')
    assert_equal(lout.kind, 'EEG')
    lout = find_layout(sample_info5)
    assert_equal(lout.kind, 'EEG')
    lout = find_layout(sample_info5, ch_type='eeg')
    assert_equal(lout.kind, 'EEG')
    # no common layout, 'meg' option not supported

    lout = find_layout(read_info(fname_ctf_raw))
    assert_equal(lout.kind, 'CTF-275')

    fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
    lout = find_layout(read_info(fname_bti_raw))
    assert_equal(lout.kind, 'magnesWH3600')

    raw_kit = read_raw_kit(fname_kit_157)
    lout = find_layout(raw_kit.info)
    assert_equal(lout.kind, 'KIT-157')

    raw_kit.info['bads'] = ['MEG 013', 'MEG 014', 'MEG 015', 'MEG 016']
    raw_kit.info._check_consistency()
    lout = find_layout(raw_kit.info)
    assert_equal(lout.kind, 'KIT-157')
    # fallback for missing IDs
    for val in (35, 52, 54, 1001):
        with raw_kit.info._unlock():
            raw_kit.info['kit_system_id'] = val
        lout = find_layout(raw_kit.info)
        assert lout.kind == 'custom'

    raw_umd = read_raw_kit(fname_kit_umd)
    lout = find_layout(raw_umd.info)
    assert_equal(lout.kind, 'KIT-UMD-3')

    # Test plotting
    lout.plot()
    lout.plot(picks=np.arange(10))
    plt.close('all')
Example #34
0
def test_find_layout():
    """Test finding layout"""
    import matplotlib.pyplot as plt
    assert_raises(ValueError, find_layout, _get_test_info(), ch_type='meep')

    sample_info = read_info(fif_fname)
    grads = pick_types(sample_info, meg='grad')
    sample_info2 = pick_info(sample_info, grads)

    mags = pick_types(sample_info, meg='mag')
    sample_info3 = pick_info(sample_info, mags)

    # mock new convention
    sample_info4 = copy.deepcopy(sample_info)
    for ii, name in enumerate(sample_info4['ch_names']):
        new = name.replace(' ', '')
        sample_info4['chs'][ii]['ch_name'] = new

    eegs = pick_types(sample_info, meg=False, eeg=True)
    sample_info5 = pick_info(sample_info, eegs)

    lout = find_layout(sample_info, ch_type=None)
    assert_equal(lout.kind, 'Vectorview-all')
    assert_true(all(' ' in k for k in lout.names))

    lout = find_layout(sample_info2, ch_type='meg')
    assert_equal(lout.kind, 'Vectorview-all')

    # test new vector-view
    lout = find_layout(sample_info4, ch_type=None)
    assert_equal(lout.kind, 'Vectorview-all')
    assert_true(all(' ' not in k for k in lout.names))

    lout = find_layout(sample_info, ch_type='grad')
    assert_equal(lout.kind, 'Vectorview-grad')
    lout = find_layout(sample_info2)
    assert_equal(lout.kind, 'Vectorview-grad')
    lout = find_layout(sample_info2, ch_type='grad')
    assert_equal(lout.kind, 'Vectorview-grad')
    lout = find_layout(sample_info2, ch_type='meg')
    assert_equal(lout.kind, 'Vectorview-all')

    lout = find_layout(sample_info, ch_type='mag')
    assert_equal(lout.kind, 'Vectorview-mag')
    lout = find_layout(sample_info3)
    assert_equal(lout.kind, 'Vectorview-mag')
    lout = find_layout(sample_info3, ch_type='mag')
    assert_equal(lout.kind, 'Vectorview-mag')
    lout = find_layout(sample_info3, ch_type='meg')
    assert_equal(lout.kind, 'Vectorview-all')

    lout = find_layout(sample_info, ch_type='eeg')
    assert_equal(lout.kind, 'EEG')
    lout = find_layout(sample_info5)
    assert_equal(lout.kind, 'EEG')
    lout = find_layout(sample_info5, ch_type='eeg')
    assert_equal(lout.kind, 'EEG')
    # no common layout, 'meg' option not supported

    lout = find_layout(read_info(fname_ctf_raw))
    assert_equal(lout.kind, 'CTF-275')

    fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
    lout = find_layout(read_info(fname_bti_raw))
    assert_equal(lout.kind, 'magnesWH3600')

    raw_kit = read_raw_kit(fname_kit_157)
    lout = find_layout(raw_kit.info)
    assert_equal(lout.kind, 'KIT-157')

    raw_kit.info['bads'] = ['MEG  13', 'MEG  14', 'MEG  15', 'MEG  16']
    lout = find_layout(raw_kit.info)
    assert_equal(lout.kind, 'KIT-157')

    raw_umd = read_raw_kit(fname_kit_umd)
    lout = find_layout(raw_umd.info)
    assert_equal(lout.kind, 'KIT-UMD-3')

    # Test plotting
    lout.plot()
    plt.close('all')
Example #35
0
def test_make_forward_solution_kit():
    """Test making fwd using KIT, BTI, and CTF (compensated) files."""
    kit_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit',
                      'tests', 'data')
    sqd_path = op.join(kit_dir, 'test.sqd')
    mrk_path = op.join(kit_dir, 'test_mrk.sqd')
    elp_path = op.join(kit_dir, 'test_elp.txt')
    hsp_path = op.join(kit_dir, 'test_hsp.txt')
    trans_path = op.join(kit_dir, 'trans-sample.fif')
    fname_kit_raw = op.join(kit_dir, 'test_bin_raw.fif')

    bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti',
                      'tests', 'data')
    bti_pdf = op.join(bti_dir, 'test_pdf_linux')
    bti_config = op.join(bti_dir, 'test_config_linux')
    bti_hs = op.join(bti_dir, 'test_hs_linux')
    fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')

    fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                            'data', 'test_ctf_comp_raw.fif')

    # first set up a small testing source space
    temp_dir = _TempDir()
    fname_src_small = op.join(temp_dir, 'sample-oct-2-src.fif')
    src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir,
                             add_dist=False)
    write_source_spaces(fname_src_small, src)  # to enable working with MNE-C
    n_src = 108  # this is the resulting # of verts in fwd

    # first use mne-C: convert file, make forward solution
    fwd = _do_forward_solution('sample', fname_kit_raw, src=fname_src_small,
                               bem=fname_bem_meg, mri=trans_path,
                               eeg=False, meg=True, subjects_dir=subjects_dir)
    assert (isinstance(fwd, Forward))

    # now let's use python with the same raw file
    fwd_py = make_forward_solution(fname_kit_raw, trans_path, src,
                                   fname_bem_meg, eeg=False, meg=True)
    _compare_forwards(fwd, fwd_py, 157, n_src)
    assert (isinstance(fwd_py, Forward))

    # now let's use mne-python all the way
    raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
    # without ignore_ref=True, this should throw an error:
    pytest.raises(NotImplementedError, make_forward_solution, raw_py.info,
                  src=src, eeg=False, meg=True,
                  bem=fname_bem_meg, trans=trans_path)

    # check that asking for eeg channels (even if they don't exist) is handled
    meg_only_info = pick_info(raw_py.info, pick_types(raw_py.info, meg=True,
                                                      eeg=False))
    fwd_py = make_forward_solution(meg_only_info, src=src, meg=True, eeg=True,
                                   bem=fname_bem_meg, trans=trans_path,
                                   ignore_ref=True)
    _compare_forwards(fwd, fwd_py, 157, n_src,
                      meg_rtol=1e-3, meg_atol=1e-7)

    # BTI python end-to-end versus C
    fwd = _do_forward_solution('sample', fname_bti_raw, src=fname_src_small,
                               bem=fname_bem_meg, mri=trans_path,
                               eeg=False, meg=True, subjects_dir=subjects_dir)
    raw_py = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
    fwd_py = make_forward_solution(raw_py.info, src=src, eeg=False, meg=True,
                                   bem=fname_bem_meg, trans=trans_path)
    _compare_forwards(fwd, fwd_py, 248, n_src)

    # now let's test CTF w/compensation
    fwd_py = make_forward_solution(fname_ctf_raw, fname_trans, src,
                                   fname_bem_meg, eeg=False, meg=True)

    fwd = _do_forward_solution('sample', fname_ctf_raw, mri=fname_trans,
                               src=fname_src_small, bem=fname_bem_meg,
                               eeg=False, meg=True, subjects_dir=subjects_dir)
    _compare_forwards(fwd, fwd_py, 274, n_src)

    # CTF with compensation changed in python
    ctf_raw = read_raw_fif(fname_ctf_raw)
    ctf_raw.info['bads'] = ['MRO24-2908']  # test that it works with some bads
    ctf_raw.apply_gradient_compensation(2)

    fwd_py = make_forward_solution(ctf_raw.info, fname_trans, src,
                                   fname_bem_meg, eeg=False, meg=True)
    fwd = _do_forward_solution('sample', ctf_raw, mri=fname_trans,
                               src=fname_src_small, bem=fname_bem_meg,
                               eeg=False, meg=True,
                               subjects_dir=subjects_dir)
    _compare_forwards(fwd, fwd_py, 274, n_src)

    temp_dir = _TempDir()
    fname_temp = op.join(temp_dir, 'test-ctf-fwd.fif')
    write_forward_solution(fname_temp, fwd_py)
    fwd_py2 = read_forward_solution(fname_temp)
    _compare_forwards(fwd_py, fwd_py2, 274, n_src)
    repr(fwd_py)
Example #36
0
def test_simulate_calculate_head_pos_chpi():
    """Test calculation of cHPI positions with simulated data."""
    # Read info dict from raw FIF file
    info = read_info(raw_fname)
    # Tune the info structure
    chpi_channel = u'STI201'
    ncoil = len(info['hpi_results'][0]['order'])
    coil_freq = 10 + np.arange(ncoil) * 5
    hpi_subsystem = {
        'event_channel':
        chpi_channel,
        'hpi_coils': [{
            'event_bits':
            np.array([256, 0, 256, 256], dtype=np.int32)
        }, {
            'event_bits':
            np.array([512, 0, 512, 512], dtype=np.int32)
        }, {
            'event_bits':
            np.array([1024, 0, 1024, 1024], dtype=np.int32)
        }, {
            'event_bits':
            np.array([2048, 0, 2048, 2048], dtype=np.int32)
        }],
        'ncoil':
        ncoil
    }

    info['hpi_subsystem'] = hpi_subsystem
    for fi, freq in enumerate(coil_freq):
        info['hpi_meas'][0]['hpi_coils'][fi]['coil_freq'] = freq
    picks = pick_types(info, meg=True, stim=True, eeg=False, exclude=[])
    info['sfreq'] = 100.  # this will speed it up a lot
    info = pick_info(info, picks)
    info['chs'][info['ch_names'].index('STI 001')]['ch_name'] = 'STI201'
    info._update_redundant()
    info['projs'] = []

    info_trans = info['dev_head_t']['trans'].copy()

    dev_head_pos_ini = np.concatenate(
        [rot_to_quat(info_trans[:3, :3]), info_trans[:3, 3]])
    ez = np.array([0, 0, 1])  # Unit vector in z-direction of head coordinates

    # Define some constants
    duration = 10  # Time / s

    # Quotient of head position sampling frequency
    # and raw sampling frequency
    head_pos_sfreq_quotient = 0.01

    # Round number of head positions to the next integer
    S = int(duration * info['sfreq'] * head_pos_sfreq_quotient)
    assert S == 10
    dz = 0.001  # Shift in z-direction is 0.1mm for each step

    dev_head_pos = np.zeros((S, 10))
    dev_head_pos[:, 0] = np.arange(S) * info['sfreq'] * head_pos_sfreq_quotient
    dev_head_pos[:, 1:4] = dev_head_pos_ini[:3]
    dev_head_pos[:, 4:7] = dev_head_pos_ini[3:] + \
        np.outer(np.arange(S) * dz, ez)
    dev_head_pos[:, 7] = 1.0

    # m/s
    dev_head_pos[:, 9] = dz / (info['sfreq'] * head_pos_sfreq_quotient)

    # Round number of samples to the next integer
    raw_data = np.zeros((len(picks), int(duration * info['sfreq'] + 0.5)))
    raw = RawArray(raw_data, info)
    add_chpi(raw, dev_head_pos)
    quats = _calculate_chpi_positions(
        raw,
        t_step_min=raw.info['sfreq'] * head_pos_sfreq_quotient,
        t_step_max=raw.info['sfreq'] * head_pos_sfreq_quotient,
        t_window=1.0)
    _assert_quats(quats,
                  dev_head_pos,
                  dist_tol=0.001,
                  angle_tol=1.,
                  vel_atol=4e-3)  # 4 mm/s
Example #37
0
def test_make_forward_solution_kit(tmpdir):
    """Test making fwd using KIT, BTI, and CTF (compensated) files."""
    kit_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit', 'tests',
                      'data')
    sqd_path = op.join(kit_dir, 'test.sqd')
    mrk_path = op.join(kit_dir, 'test_mrk.sqd')
    elp_path = op.join(kit_dir, 'test_elp.txt')
    hsp_path = op.join(kit_dir, 'test_hsp.txt')
    trans_path = op.join(kit_dir, 'trans-sample.fif')
    fname_kit_raw = op.join(kit_dir, 'test_bin_raw.fif')

    bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti', 'tests',
                      'data')
    bti_pdf = op.join(bti_dir, 'test_pdf_linux')
    bti_config = op.join(bti_dir, 'test_config_linux')
    bti_hs = op.join(bti_dir, 'test_hs_linux')
    fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')

    fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
                            'data', 'test_ctf_comp_raw.fif')

    # first set up a small testing source space
    fname_src_small = tmpdir.join('sample-oct-2-src.fif')
    src = setup_source_space('sample',
                             'oct2',
                             subjects_dir=subjects_dir,
                             add_dist=False)
    write_source_spaces(fname_src_small, src)  # to enable working with MNE-C
    n_src = 108  # this is the resulting # of verts in fwd

    # first use mne-C: convert file, make forward solution
    fwd = _do_forward_solution('sample',
                               fname_kit_raw,
                               src=fname_src_small,
                               bem=fname_bem_meg,
                               mri=trans_path,
                               eeg=False,
                               meg=True,
                               subjects_dir=subjects_dir)
    assert (isinstance(fwd, Forward))

    # now let's use python with the same raw file
    fwd_py = make_forward_solution(fname_kit_raw,
                                   trans_path,
                                   src,
                                   fname_bem_meg,
                                   eeg=False,
                                   meg=True)
    _compare_forwards(fwd, fwd_py, 157, n_src)
    assert (isinstance(fwd_py, Forward))

    # now let's use mne-python all the way
    raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
    # without ignore_ref=True, this should throw an error:
    with pytest.raises(NotImplementedError, match='Cannot.*KIT reference'):
        make_forward_solution(raw_py.info,
                              src=src,
                              eeg=False,
                              meg=True,
                              bem=fname_bem_meg,
                              trans=trans_path)

    # check that asking for eeg channels (even if they don't exist) is handled
    meg_only_info = pick_info(raw_py.info,
                              pick_types(raw_py.info, meg=True, eeg=False))
    fwd_py = make_forward_solution(meg_only_info,
                                   src=src,
                                   meg=True,
                                   eeg=True,
                                   bem=fname_bem_meg,
                                   trans=trans_path,
                                   ignore_ref=True)
    _compare_forwards(fwd, fwd_py, 157, n_src, meg_rtol=1e-3, meg_atol=1e-7)

    # BTI python end-to-end versus C
    fwd = _do_forward_solution('sample',
                               fname_bti_raw,
                               src=fname_src_small,
                               bem=fname_bem_meg,
                               mri=trans_path,
                               eeg=False,
                               meg=True,
                               subjects_dir=subjects_dir)
    raw_py = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
    fwd_py = make_forward_solution(raw_py.info,
                                   src=src,
                                   eeg=False,
                                   meg=True,
                                   bem=fname_bem_meg,
                                   trans=trans_path)
    _compare_forwards(fwd, fwd_py, 248, n_src)

    # now let's test CTF w/compensation
    fwd_py = make_forward_solution(fname_ctf_raw,
                                   fname_trans,
                                   src,
                                   fname_bem_meg,
                                   eeg=False,
                                   meg=True)

    fwd = _do_forward_solution('sample',
                               fname_ctf_raw,
                               mri=fname_trans,
                               src=fname_src_small,
                               bem=fname_bem_meg,
                               eeg=False,
                               meg=True,
                               subjects_dir=subjects_dir)
    _compare_forwards(fwd, fwd_py, 274, n_src)

    # CTF with compensation changed in python
    ctf_raw = read_raw_fif(fname_ctf_raw)
    ctf_raw.info['bads'] = ['MRO24-2908']  # test that it works with some bads
    ctf_raw.apply_gradient_compensation(2)

    fwd_py = make_forward_solution(ctf_raw.info,
                                   fname_trans,
                                   src,
                                   fname_bem_meg,
                                   eeg=False,
                                   meg=True)
    fwd = _do_forward_solution('sample',
                               ctf_raw,
                               mri=fname_trans,
                               src=fname_src_small,
                               bem=fname_bem_meg,
                               eeg=False,
                               meg=True,
                               subjects_dir=subjects_dir)
    _compare_forwards(fwd, fwd_py, 274, n_src)

    fname_temp = tmpdir.join('test-ctf-fwd.fif')
    write_forward_solution(fname_temp, fwd_py)
    fwd_py2 = read_forward_solution(fname_temp)
    _compare_forwards(fwd_py, fwd_py2, 274, n_src)
    repr(fwd_py)
Example #38
0
def test_pick_refs():
    """Test picking of reference sensors."""
    infos = list()
    # KIT
    kit_dir = op.join(io_dir, 'kit', 'tests', 'data')
    sqd_path = op.join(kit_dir, 'test.sqd')
    mrk_path = op.join(kit_dir, 'test_mrk.sqd')
    elp_path = op.join(kit_dir, 'test_elp.txt')
    hsp_path = op.join(kit_dir, 'test_hsp.txt')
    raw_kit = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
    infos.append(raw_kit.info)
    # BTi
    bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
    bti_pdf = op.join(bti_dir, 'test_pdf_linux')
    bti_config = op.join(bti_dir, 'test_config_linux')
    bti_hs = op.join(bti_dir, 'test_hs_linux')
    raw_bti = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
    infos.append(raw_bti.info)
    # CTF
    fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
    raw_ctf = read_raw_fif(fname_ctf_raw)
    raw_ctf.apply_gradient_compensation(2)
    for info in infos:
        info['bads'] = []
        pytest.raises(ValueError, pick_types, info, meg='foo')
        pytest.raises(ValueError, pick_types, info, ref_meg='foo')
        picks_meg_ref = pick_types(info, meg=True, ref_meg=True)
        picks_meg = pick_types(info, meg=True, ref_meg=False)
        picks_ref = pick_types(info, meg=False, ref_meg=True)
        assert_array_equal(picks_meg_ref,
                           np.sort(np.concatenate([picks_meg, picks_ref])))
        picks_grad = pick_types(info, meg='grad', ref_meg=False)
        picks_ref_grad = pick_types(info, meg=False, ref_meg='grad')
        picks_meg_ref_grad = pick_types(info, meg='grad', ref_meg='grad')
        assert_array_equal(picks_meg_ref_grad,
                           np.sort(np.concatenate([picks_grad,
                                                   picks_ref_grad])))
        picks_mag = pick_types(info, meg='mag', ref_meg=False)
        picks_ref_mag = pick_types(info, meg=False, ref_meg='mag')
        picks_meg_ref_mag = pick_types(info, meg='mag', ref_meg='mag')
        assert_array_equal(picks_meg_ref_mag,
                           np.sort(np.concatenate([picks_mag,
                                                   picks_ref_mag])))
        assert_array_equal(picks_meg,
                           np.sort(np.concatenate([picks_mag, picks_grad])))
        assert_array_equal(picks_ref,
                           np.sort(np.concatenate([picks_ref_mag,
                                                   picks_ref_grad])))
        assert_array_equal(picks_meg_ref, np.sort(np.concatenate(
            [picks_grad, picks_mag, picks_ref_grad, picks_ref_mag])))

        for pick in (picks_meg_ref, picks_meg, picks_ref,
                     picks_grad, picks_ref_grad, picks_meg_ref_grad,
                     picks_mag, picks_ref_mag, picks_meg_ref_mag):
            if len(pick) > 0:
                pick_info(info, pick)

    # test CTF expected failures directly
    info = raw_ctf.info
    info['bads'] = []
    picks_meg_ref = pick_types(info, meg=True, ref_meg=True)
    picks_meg = pick_types(info, meg=True, ref_meg=False)
    picks_ref = pick_types(info, meg=False, ref_meg=True)
    picks_mag = pick_types(info, meg='mag', ref_meg=False)
    picks_ref_mag = pick_types(info, meg=False, ref_meg='mag')
    picks_meg_ref_mag = pick_types(info, meg='mag', ref_meg='mag')
    for pick in (picks_meg_ref, picks_ref, picks_ref_mag, picks_meg_ref_mag):
        if len(pick) > 0:
            pick_info(info, pick)

    for pick in (picks_meg, picks_mag):
        if len(pick) > 0:
            with catch_logging() as log:
                pick_info(info, pick, verbose=True)
            assert ('Removing {} compensators'.format(len(info['comps']))
                    in log.getvalue())
Example #39
0
def test_lcmv_vector():
    """Test vector LCMV solutions."""
    info = mne.io.read_raw_fif(fname_raw).info
    # For speed and for rank-deficiency calculation simplicity,
    # just use grads:
    info = mne.pick_info(info, mne.pick_types(info, meg='grad', exclude=()))
    info.update(bads=[], projs=[])
    forward = mne.read_forward_solution(fname_fwd)
    forward = mne.pick_channels_forward(forward, info['ch_names'])
    vertices = [s['vertno'][::100] for s in forward['src']]
    n_vertices = sum(len(v) for v in vertices)
    assert 5 < n_vertices < 20
    amplitude = 100e-9
    stc = mne.SourceEstimate(amplitude * np.eye(n_vertices), vertices,
                             0, 1. / info['sfreq'])
    forward_sim = mne.convert_forward_solution(forward, force_fixed=True,
                                               use_cps=True, copy=True)
    forward_sim = mne.forward.restrict_forward_to_stc(forward_sim, stc)
    noise_cov = mne.make_ad_hoc_cov(info)
    noise_cov.update(data=np.diag(noise_cov['data']), diag=False)
    evoked = simulate_evoked(forward_sim, stc, info, noise_cov, nave=1)
    source_nn = forward_sim['source_nn']
    source_rr = forward_sim['source_rr']
    # Figure out our indices
    mask = np.concatenate([np.in1d(s['vertno'], v)
                           for s, v in zip(forward['src'], vertices)])
    mapping = np.where(mask)[0]
    assert_array_equal(source_rr, forward['source_rr'][mapping])
    # Don't check NN because we didn't rotate to surf ori
    del forward_sim

    #
    # Let's do minimum norm as a sanity check (dipole_fit is slower)
    #
    inv = make_inverse_operator(info, forward, noise_cov, loose=1.)
    stc_vector_mne = apply_inverse(evoked, inv, pick_ori='vector')
    mne_ori = stc_vector_mne.data[mapping, :, np.arange(n_vertices)]
    mne_ori /= np.linalg.norm(mne_ori, axis=-1)[:, np.newaxis]
    mne_angles = np.rad2deg(np.arccos(np.sum(mne_ori * source_nn, axis=-1)))
    assert np.mean(mne_angles) < 35

    #
    # Now let's do LCMV
    #
    data_cov = mne.make_ad_hoc_cov(info)  # just a stub for later
    with pytest.raises(ValueError, match='pick_ori must be one of'):
        make_lcmv(info, forward, data_cov, 0.05, noise_cov, pick_ori='bad')
    lcmv_ori = list()
    for ti in range(n_vertices):
        this_evoked = evoked.copy().crop(evoked.times[ti], evoked.times[ti])
        data_cov['data'] = (np.outer(this_evoked.data, this_evoked.data) +
                            noise_cov['data'])
        vals = linalg.svdvals(data_cov['data'])
        assert vals[0] / vals[-1] < 1e5  # not rank deficient
        filters = make_lcmv(info, forward, data_cov, 0.05, noise_cov)
        filters_vector = make_lcmv(info, forward, data_cov, 0.05, noise_cov,
                                   pick_ori='vector')
        stc = apply_lcmv(this_evoked, filters)
        assert isinstance(stc, mne.SourceEstimate)
        stc_vector = apply_lcmv(this_evoked, filters_vector)
        assert isinstance(stc_vector, mne.VectorSourceEstimate)
        assert_allclose(stc.data, stc_vector.magnitude().data)
        # Check the orientation by pooling across some neighbors, as LCMV can
        # have some "holes" at the points of interest
        idx = np.where(cdist(forward['source_rr'], source_rr[[ti]]) < 0.02)[0]
        lcmv_ori.append(np.mean(stc_vector.data[idx, :, 0], axis=0))
        lcmv_ori[-1] /= np.linalg.norm(lcmv_ori[-1])

    lcmv_angles = np.rad2deg(np.arccos(np.sum(lcmv_ori * source_nn, axis=-1)))
    assert np.mean(lcmv_angles) < 55
Example #40
0
def test_find_layout():
    """Test finding layout"""
    assert_raises(ValueError, find_layout, test_info, ch_type='meep')

    sample_info = Raw(fif_fname).info
    grads = pick_types(sample_info, meg='grad')
    sample_info2 = pick_info(sample_info, grads)

    mags = pick_types(sample_info, meg='mag')
    sample_info3 = pick_info(sample_info, mags)

    # mock new convention
    sample_info4 = copy.deepcopy(sample_info)
    for ii, name in enumerate(sample_info4['ch_names']):
        new = name.replace(' ', '')
        sample_info4['ch_names'][ii] = new
        sample_info4['chs'][ii]['ch_name'] = new

    eegs = pick_types(sample_info, meg=False, eeg=True)
    sample_info5 = pick_info(sample_info, eegs)

    lout = find_layout(sample_info, ch_type=None)
    assert_true(lout.kind == 'Vectorview-all')
    assert_true(all(' ' in k for k in lout.names))

    lout = find_layout(sample_info2, ch_type='meg')
    assert_true(lout.kind == 'Vectorview-all')

    # test new vector-view
    lout = find_layout(sample_info4, ch_type=None)
    assert_true(lout.kind == 'Vectorview-all')
    assert_true(all(' ' not in k for k in lout.names))

    lout = find_layout(sample_info, ch_type='grad')
    assert_true(lout.kind == 'Vectorview-grad')
    lout = find_layout(sample_info2)
    assert_true(lout.kind == 'Vectorview-grad')
    lout = find_layout(sample_info2, ch_type='grad')
    assert_true(lout.kind == 'Vectorview-grad')
    lout = find_layout(sample_info2, ch_type='meg')
    assert_true(lout.kind == 'Vectorview-all')

    lout = find_layout(sample_info, ch_type='mag')
    assert_true(lout.kind == 'Vectorview-mag')
    lout = find_layout(sample_info3)
    assert_true(lout.kind == 'Vectorview-mag')
    lout = find_layout(sample_info3, ch_type='mag')
    assert_true(lout.kind == 'Vectorview-mag')
    lout = find_layout(sample_info3, ch_type='meg')
    assert_true(lout.kind == 'Vectorview-all')

    lout = find_layout(sample_info, ch_type='eeg')
    assert_true(lout.kind == 'EEG')
    lout = find_layout(sample_info5)
    assert_true(lout.kind == 'EEG')
    lout = find_layout(sample_info5, ch_type='eeg')
    assert_true(lout.kind == 'EEG')
    # no common layout, 'meg' option not supported

    fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
    lout = find_layout(Raw(fname_bti_raw).info)
    assert_true(lout.kind == 'magnesWH3600')

    lout = find_layout(Raw(fname_ctf_raw).info)
    assert_true(lout.kind == 'CTF-275')

    lout = find_layout(read_raw_kit(fname_kit_157).info)
    assert_true(lout.kind == 'KIT-157')
Example #41
0
#%%#%% Load Template
    
template_loc = '/media/ravinderjit/Data_Drive/Data/EEGdata/TemporalCoding/AMmseq_10bits/Pickles/PCA_passive_template.pickle'

with open(template_loc,'rb') as file:
    [pca_coeffs_cuts,pca_expVar_cuts,t_cuts] = pickle.load(file)
    
bstemTemplate  = pca_coeffs_cuts[0][0,:]
cortexTemplate = pca_coeffs_cuts[2][0,:]

vmin = bstemTemplate.mean() - 2 * bstemTemplate.std()
vmax = bstemTemplate.mean() + 2 * bstemTemplate.std()

plt.figure() 
plt.subplot(1,2,1)
mne.viz.plot_topomap(bstemTemplate, mne.pick_info(A_info_obj_sd[2],np.arange(32)),vmin=vmin,vmax=vmax)
plt.title('Brainstem')

plt.subplot(1,2,2)
mne.viz.plot_topomap(cortexTemplate,  mne.pick_info(A_info_obj_sd[2],np.arange(32)),vmin=vmin,vmax=vmax)
plt.title('Cortex')

#%% Plot Ch. Cz



for sub in range(len(Subjects)):
    plt.figure()
    subject = Subjects[sub]
    
    ch =  31 #Ch. Cz
Example #42
0
def test_simulate_calculate_chpi_positions():
    """Test calculation of cHPI positions with simulated data."""
    # Read info dict from raw FIF file
    info = read_info(raw_fname)
    # Tune the info structure
    chpi_channel = u'STI201'
    ncoil = len(info['hpi_results'][0]['order'])
    coil_freq = 10 + np.arange(ncoil) * 5
    hpi_subsystem = {'event_channel': chpi_channel,
                     'hpi_coils': [{'event_bits': np.array([256, 0, 256, 256],
                                                           dtype=np.int32)},
                                   {'event_bits': np.array([512, 0, 512, 512],
                                                           dtype=np.int32)},
                                   {'event_bits':
                                       np.array([1024, 0, 1024, 1024],
                                                dtype=np.int32)},
                                   {'event_bits':
                                       np.array([2048, 0, 2048, 2048],
                                                dtype=np.int32)}],
                     'ncoil': ncoil}

    info['hpi_subsystem'] = hpi_subsystem
    for l, freq in enumerate(coil_freq):
        info['hpi_meas'][0]['hpi_coils'][l]['coil_freq'] = freq
    picks = pick_types(info, meg=True, stim=True, eeg=False, exclude=[])
    info['sfreq'] = 100.  # this will speed it up a lot
    info = pick_info(info, picks)
    info['chs'][info['ch_names'].index('STI 001')]['ch_name'] = 'STI201'
    info._update_redundant()
    info['projs'] = []

    info_trans = info['dev_head_t']['trans'].copy()

    dev_head_pos_ini = np.concatenate([rot_to_quat(info_trans[:3, :3]),
                                       info_trans[:3, 3]])
    ez = np.array([0, 0, 1])  # Unit vector in z-direction of head coordinates

    # Define some constants
    duration = 30  # Time / s

    # Quotient of head position sampling frequency
    # and raw sampling frequency
    head_pos_sfreq_quotient = 0.1

    # Round number of head positions to the next integer
    S = int(duration / (info['sfreq'] * head_pos_sfreq_quotient))
    dz = 0.001  # Shift in z-direction is 0.1mm for each step

    dev_head_pos = np.zeros((S, 10))
    dev_head_pos[:, 0] = np.arange(S) * info['sfreq'] * head_pos_sfreq_quotient
    dev_head_pos[:, 1:4] = dev_head_pos_ini[:3]
    dev_head_pos[:, 4:7] = dev_head_pos_ini[3:] + \
        np.outer(np.arange(S) * dz, ez)
    dev_head_pos[:, 7] = 1.0

    # cm/s
    dev_head_pos[:, 9] = 100 * dz / (info['sfreq'] * head_pos_sfreq_quotient)

    # Round number of samples to the next integer
    raw_data = np.zeros((len(picks), int(duration * info['sfreq'] + 0.5)))
    raw = RawArray(raw_data, info)

    dip = Dipole(np.array([0.0, 0.1, 0.2]),
                 np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
                 np.array([1e-9, 1e-9, 1e-9]),
                 np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
                 np.array([1.0, 1.0, 1.0]), 'dip')
    sphere = make_sphere_model('auto', 'auto', info=info,
                               relative_radii=(1.0, 0.9), sigmas=(0.33, 0.3))
    fwd, stc = make_forward_dipole(dip, sphere, info)
    stc.resample(info['sfreq'])
    raw = simulate_raw(raw, stc, None, fwd['src'], sphere, cov=None,
                       blink=False, ecg=False, chpi=True,
                       head_pos=dev_head_pos, mindist=1.0, interp='zero',
                       verbose=None, use_cps=True)

    quats = _calculate_chpi_positions(
        raw, t_step_min=raw.info['sfreq'] * head_pos_sfreq_quotient,
        t_step_max=raw.info['sfreq'] * head_pos_sfreq_quotient, t_window=1.0)
    _assert_quats(quats, dev_head_pos, dist_tol=0.001, angle_tol=1.)
Example #43
0
def _hcp_pick_info(info, ch_names):
    return pick_info(
        info, [info['ch_names'].index(ch) for ch in ch_names],
        copy=True)
Example #44
0
def _hcp_pick_info(info, ch_names):
    """helper to subset info"""
    return pick_info(info, [info['ch_names'].index(ch) for ch in ch_names],
                     copy=True)
Example #45
0
def test_rank():
    """Test cov rank estimation"""
    raw_sample = Raw(raw_fname)

    raw_sss = Raw(hp_fif_fname)
    raw_sss.add_proj(compute_proj_raw(raw_sss))

    cov_sample = compute_raw_data_covariance(raw_sample)
    cov_sample_proj = compute_raw_data_covariance(
        raw_sample.copy().apply_proj())

    cov_sss = compute_raw_data_covariance(raw_sss)
    cov_sss_proj = compute_raw_data_covariance(
        raw_sss.copy().apply_proj())

    picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
    picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True)

    info_sample = pick_info(raw_sample.info, picks_all_sample)
    picks_stack_sample = [('eeg', pick_types(info_sample, meg=False,
                                             eeg=True))]
    picks_stack_sample += [('meg', pick_types(info_sample, meg=True))]
    picks_stack_sample += [('all',
                            pick_types(info_sample, meg=True, eeg=True))]

    info_sss = pick_info(raw_sss.info, picks_all_sss)
    picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))]
    picks_stack_somato += [('meg', pick_types(info_sss, meg=True))]
    picks_stack_somato += [('all',
                            pick_types(info_sss, meg=True, eeg=True))]

    iter_tests = list(itt.product(
        [(cov_sample, picks_stack_sample, info_sample),
         (cov_sample_proj, picks_stack_sample, info_sample),
         (cov_sss, picks_stack_somato, info_sss),
         (cov_sss_proj, picks_stack_somato, info_sss)],  # sss
        [dict(mag=1e15, grad=1e13, eeg=1e6)]
    ))

    for (cov, picks_list, this_info), scalings in iter_tests:
        for ch_type, picks in picks_list:

            this_very_info = pick_info(this_info, picks)

            # compute subset of projs
            this_projs = [c['active'] and
                          len(set(c['data']['col_names'])
                              .intersection(set(this_very_info['ch_names']))) >
                          0 for c in cov['projs']]
            n_projs = sum(this_projs)

            # count channel types
            ch_types = [channel_type(this_very_info, idx)
                        for idx in range(len(picks))]
            n_eeg, n_mag, n_grad = [ch_types.count(k) for k in
                                    ['eeg', 'mag', 'grad']]
            n_meg = n_mag + n_grad
            if ch_type in ('all', 'eeg'):
                n_projs_eeg = 1
            else:
                n_projs_eeg = 0

            # check sss
            if 'proc_history' in this_very_info:
                mf = this_very_info['proc_history'][0]['max_info']
                n_free = _get_sss_rank(mf)
                if 'mag' not in ch_types and 'grad' not in ch_types:
                    n_free = 0
                # - n_projs XXX clarify
                expected_rank = n_free + n_eeg
                if n_projs > 0 and ch_type in ('all', 'eeg'):
                    expected_rank -= n_projs_eeg
            else:
                expected_rank = n_meg + n_eeg - n_projs

            C = cov['data'][np.ix_(picks, picks)]
            est_rank = _estimate_rank_meeg_cov(C, this_very_info,
                                               scalings=scalings)

            assert_equal(expected_rank, est_rank)
Example #46
0
path_original_vr = base_path + "\\UnityAlloEgo\\EEG\\Preprocessed\\prep_256.mat"
path_perhead_vr = base_path + "\\UnityAlloEgo\\EEG\\Preprocessed\\prep_perHeadbox_256.mat"
path_bip_vr = base_path + "\\UnityAlloEgo\\EEG\\Preprocessed\\prep_bipolar_256.mat"
path_perelectrode_vr = base_path + "\\UnityAlloEgo\\EEG\\Preprocessed\\prep_perElectrode_256.mat"
path_unity_events = base_path + "UnityAlloEgo\\EEG\\Preprocessed\\p129_unity.csv"
path_onset_events = base_path + "UnityAlloEgo\\EEG\\Preprocessed\\p129_onsets.csv"
path_montage = base_path + "UnityAlloEgo\\EEG\\Preprocessed\\p129_montage.csv"
path_montage_referenced = base_path + "UnityAlloEgo\\EEG\\Preprocessed\\p129_montage_referenced.csv"

FREQUENCY = 256
runfile('M:/Vyzkum/AV/FGU/IntracranialElectrodes/iEEG-python/base_setup.py', wdir='M:/Vyzkum/AV/FGU/IntracranialElectrodes/iEEG-python')

# PICKS
pick_perhead_hip = mnehelp.picks_all_localised(raw_perhead_vr, pd_montage_referenced, 'Hi')
pick_perhead_hip_names = mne.pick_info(raw_perhead_vr.info, pick_perhead_hip)['ch_names']
pick_perhead_ins = mnehelp.picks_all_localised(raw_perhead_vr, pd_montage_referenced, 'Ins')
pick_perhead_all = mnehelp.picks_all(raw_perhead_vr)

# BAD EPOCHS
bad_epochs_perhead = [19, 51, 52, 185, 186, 204, 205, 287, 288, 289, 302, 367, 368, 373, 374, 375, 376, 377, 378, 418, 419, 488, 489, 490, 504, 505, 506, 507]
epochs_perhead_vr.drop(bad_epochs_perhead)
#epochs_perhead_vr.plot(block = True, scalings = 'auto', picks=pick_perhead_hip)

iter_freqs = [
    ('Delta', 1, 4),
    ('Theta', 5, 8)
]

events_to_do = {'onsets_500_1500': 14,
 'pointingEnded_Allo': 9,
Example #47
0
bems = mne.make_bem_model(subject, conductivity=(0.3,),
                          subjects_dir=subjects_dir,
                          ico=None)  # ico = None for morphed SP.
bem_sol = mne.make_bem_solution(bems)
bem_sol['surfs'][0]['coord_frame'] = 5

##############################################################################
# Now we can read the channels that we want to map to the cortical locations.
# Then we can compute the forward solution.

info = hcp.read_info(subject=subject, hcp_path=hcp_path, data_type='rest',
                     run_index=0)

picks = mne.pick_types(info, meg=True, ref_meg=False)
info = mne.pick_info(info, picks)

fwd = mne.make_forward_solution(info, trans=head_mri_t, bem=bem_sol,
                                src=src_subject)
mag_map = mne.sensitivity_map(
    fwd, projs=None, ch_type='mag', mode='fixed', exclude=[], verbose=None)

##############################################################################
# we display sensitivity map on the original surface with little smoothing
# and admire the expected curvature-driven sensitivity pattern.

mag_map = mag_map.to_original_src(src_fsaverage, subjects_dir=subjects_dir)
mag_map.plot(subject='fsaverage', subjects_dir=subjects_dir,
             clim=dict(kind='percent', lims=[0, 50, 99]),
             smoothing_steps=2)
Example #48
0
def test_lcmv_vector():
    """Test vector LCMV solutions."""
    info = mne.io.read_raw_fif(fname_raw).info

    # For speed and for rank-deficiency calculation simplicity,
    # just use grads
    info = mne.pick_info(info, mne.pick_types(info, meg='grad', exclude=()))
    info.update(bads=[], projs=[])

    forward = mne.read_forward_solution(fname_fwd)
    forward = mne.pick_channels_forward(forward, info['ch_names'])
    vertices = [s['vertno'][::100] for s in forward['src']]
    n_vertices = sum(len(v) for v in vertices)
    assert 5 < n_vertices < 20

    amplitude = 100e-9
    stc = mne.SourceEstimate(amplitude * np.eye(n_vertices), vertices, 0,
                             1. / info['sfreq'])
    forward_sim = mne.convert_forward_solution(forward,
                                               force_fixed=True,
                                               use_cps=True,
                                               copy=True)
    forward_sim = mne.forward.restrict_forward_to_stc(forward_sim, stc)
    noise_cov = mne.make_ad_hoc_cov(info)
    noise_cov.update(data=np.diag(noise_cov['data']), diag=False)
    evoked = simulate_evoked(forward_sim, stc, info, noise_cov, nave=1)
    source_nn = forward_sim['source_nn']
    source_rr = forward_sim['source_rr']

    # Figure out our indices
    mask = np.concatenate(
        [np.in1d(s['vertno'], v) for s, v in zip(forward['src'], vertices)])
    mapping = np.where(mask)[0]
    assert_array_equal(source_rr, forward['source_rr'][mapping])

    # Don't check NN because we didn't rotate to surf ori
    del forward_sim

    # Let's do minimum norm as a sanity check (dipole_fit is slower)
    inv = make_inverse_operator(info, forward, noise_cov, loose=1.)
    stc_vector_mne = apply_inverse(evoked, inv, pick_ori='vector')
    mne_ori = stc_vector_mne.data[mapping, :, np.arange(n_vertices)]
    mne_ori /= np.linalg.norm(mne_ori, axis=-1)[:, np.newaxis]
    mne_angles = np.rad2deg(np.arccos(np.sum(mne_ori * source_nn, axis=-1)))
    assert np.mean(mne_angles) < 35

    # Now let's do LCMV
    data_cov = mne.make_ad_hoc_cov(info)  # just a stub for later
    with pytest.raises(ValueError, match="pick_ori"):
        make_lcmv(info, forward, data_cov, 0.05, noise_cov, pick_ori='bad')

    lcmv_ori = list()
    for ti in range(n_vertices):
        this_evoked = evoked.copy().crop(evoked.times[ti], evoked.times[ti])
        data_cov['diag'] = False
        data_cov['data'] = (np.outer(this_evoked.data, this_evoked.data) +
                            noise_cov['data'])
        vals = linalg.svdvals(data_cov['data'])
        assert vals[0] / vals[-1] < 1e5  # not rank deficient

        with catch_logging() as log:
            filters = make_lcmv(info,
                                forward,
                                data_cov,
                                0.05,
                                noise_cov,
                                verbose=True)
        log = log.getvalue()
        assert '498 sources' in log
        with catch_logging() as log:
            filters_vector = make_lcmv(info,
                                       forward,
                                       data_cov,
                                       0.05,
                                       noise_cov,
                                       pick_ori='vector',
                                       verbose=True)
        log = log.getvalue()
        assert '498 sources' in log
        stc = apply_lcmv(this_evoked, filters)
        stc_vector = apply_lcmv(this_evoked, filters_vector)
        assert isinstance(stc, mne.SourceEstimate)
        assert isinstance(stc_vector, mne.VectorSourceEstimate)
        assert_allclose(stc.data, stc_vector.magnitude().data)

        # Check the orientation by pooling across some neighbors, as LCMV can
        # have some "holes" at the points of interest
        idx = np.where(cdist(forward['source_rr'], source_rr[[ti]]) < 0.02)[0]
        lcmv_ori.append(np.mean(stc_vector.data[idx, :, 0], axis=0))
        lcmv_ori[-1] /= np.linalg.norm(lcmv_ori[-1])

    lcmv_angles = np.rad2deg(np.arccos(np.sum(lcmv_ori * source_nn, axis=-1)))
    assert np.mean(lcmv_angles) < 55
Example #49
0
def test_find_layout():
    """Test finding layout"""
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        find_layout(chs=test_info['chs'])
        assert_true(w[0].category == DeprecationWarning)
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        find_layout(test_info['chs'])
        assert_true(w[0].category == DeprecationWarning)
    assert_raises(ValueError, find_layout, dict())
    assert_raises(ValueError, find_layout, test_info, ch_type='meep')

    sample_info = Raw(fif_fname).info
    grads = pick_types(sample_info, meg='grad')
    sample_info2 = pick_info(sample_info, grads)

    mags = pick_types(sample_info, meg='mag')
    sample_info3 = pick_info(sample_info, mags)

    # mock new convention
    sample_info4 = copy.deepcopy(sample_info)
    for ii, name in enumerate(sample_info4['ch_names']):
        new = name.replace(' ', '')
        sample_info4['ch_names'][ii] = new
        sample_info4['chs'][ii]['ch_name'] = new

    mags = pick_types(sample_info, meg=False, eeg=True)
    sample_info5 = pick_info(sample_info, mags)

    lout = find_layout(sample_info, ch_type=None)
    assert_true(lout.kind == 'Vectorview-all')
    assert_true(all(' ' in k for k in lout.names))

    lout = find_layout(sample_info2, ch_type='meg')
    assert_true(lout.kind == 'Vectorview-all')

    # test new vector-view
    lout = find_layout(sample_info4, ch_type=None)
    assert_true(lout.kind == 'Vectorview-all')
    assert_true(all(not ' ' in k for k in lout.names))

    lout = find_layout(sample_info, ch_type='grad')
    assert_true(lout.kind == 'Vectorview-grad')
    lout = find_layout(sample_info2)
    assert_true(lout.kind == 'Vectorview-grad')
    lout = find_layout(sample_info2, ch_type='grad')
    assert_true(lout.kind == 'Vectorview-grad')
    lout = find_layout(sample_info2, ch_type='meg')
    assert_true(lout.kind == 'Vectorview-all')


    lout = find_layout(sample_info, ch_type='mag')
    assert_true(lout.kind == 'Vectorview-mag')
    lout = find_layout(sample_info3)
    assert_true(lout.kind == 'Vectorview-mag')
    lout = find_layout(sample_info3, ch_type='mag')
    assert_true(lout.kind == 'Vectorview-mag')
    lout = find_layout(sample_info3, ch_type='meg')
    assert_true(lout.kind == 'Vectorview-all')
    #
    lout = find_layout(sample_info, ch_type='eeg')
    assert_true(lout.kind == 'EEG')
    lout = find_layout(sample_info5)
    assert_true(lout.kind == 'EEG')
    lout = find_layout(sample_info5, ch_type='eeg')
    assert_true(lout.kind == 'EEG')
    # no common layout, 'meg' option not supported

    fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
    lout = find_layout(Raw(fname_bti_raw).info)
    assert_true(lout.kind == 'magnesWH3600')

    lout = find_layout(Raw(fname_ctf_raw).info)
    assert_true(lout.kind == 'CTF-275')

    lout = find_layout(read_raw_kit(fname_kit_157).info)
    assert_true(lout.kind == 'KIT-157')

    sample_info5['dig'] = []
    assert_raises(RuntimeError, find_layout, sample_info5)
Example #50
0
# %%
# Now we run the signal through the forward model to obtain simulated sensor
# data. To save computation time, we'll only simulate gradiometer data. You can
# try simulating other types of sensors as well.
#
# Some noise is added based on the baseline noise covariance matrix from the
# sample dataset, scaled to implement the desired SNR.

# Read the info from the sample dataset. This defines the location of the
# sensors and such.
info = mne.io.read_raw(raw_fname).crop(0, 1).resample(50).info

# Only use gradiometers
picks = mne.pick_types(info, meg='grad', stim=True, exclude=())
mne.pick_info(info, picks, copy=False)

# Define a covariance matrix for the simulated noise. In this tutorial, we use
# a simple diagonal matrix.
cov = mne.cov.make_ad_hoc_cov(info)
cov['data'] *= (20. / snr)**2  # Scale the noise to achieve the desired SNR

# Simulate the raw data, with a lowpass filter on the noise
stcs = [(stc_signal, unit_impulse(n_samp, dtype=int) * 1),
        (stc_noise, unit_impulse(n_samp, dtype=int) * 2)]  # stacked in time
duration = (len(stc_signal.times) * 2) / sfreq
raw = simulate_raw(info, stcs, forward=fwd)
add_noise(raw, cov, iir_filter=[4, -4, 0.8], random_state=rand)

# %%
# We create an :class:`mne.Epochs` object containing two trials: one with
Example #51
0
# Get a dictionary of channel indices, grouped by channel type
channel_indices_by_type = mne.io.pick.channel_indices_by_type(info)
print('The first three magnetometers:', channel_indices_by_type['mag'][:3])

###############################################################################
# Obtaining information about channels
# ------------------------------------

# Channel type of a specific channel
channel_type = mne.io.pick.channel_type(info, 75)
print('Channel #75 is of type:', channel_type)

# Channel types of a collection of channels
meg_channels = mne.pick_types(info, meg=True)[:10]
channel_types = [mne.io.pick.channel_type(info, ch) for ch in meg_channels]
print('First 10 MEG channels are of type:\n', channel_types)

###############################################################################
# Dropping channels from an info structure
# ----------------------------------------
#
# It is possible to limit the info structure to only include a subset of
# channels with the :func:`mne.pick_info` function:

# Only keep EEG channels
eeg_indices = mne.pick_types(info, meg=False, eeg=True)
reduced_info = mne.pick_info(info, eeg_indices)

print(reduced_info)
Example #52
0
mne.set_log_level(False)

fn_stc_signal = fname.stc_signal(vertex=config.vertex)
fn_simulated_raw = fname.simulated_raw(vertex=config.vertex)
fn_simulated_epochs = fname.simulated_epochs(vertex=config.vertex)

# fn_report_h5 = fname.report(vertex=config.vertex)
fn_report_h5 = None  # Don't produce a report

###############################################################################
# Simulate raw data and create epochs
###############################################################################

print('simulate data')
info = mne.io.read_info(fname.sample_raw)
info = mne.pick_info(info, mne.pick_types(info, meg=True, eeg=False))
fwd_disc_true = mne.read_forward_solution(fname.fwd_discrete_true)
fwd_disc_true = mne.pick_types_forward(fwd_disc_true, meg=True, eeg=False)
er_raw = mne.io.read_raw_fif(fname.ernoise, preload=True)

raw, stc_signal = simulate_raw(info=info,
                               fwd_disc_true=fwd_disc_true,
                               signal_vertex=config.vertex,
                               signal_freq=config.signal_freq,
                               n_trials=config.n_trials,
                               noise_multiplier=config.noise,
                               random_state=config.random,
                               n_noise_dipoles=config.n_noise_dipoles_vol,
                               er_raw=er_raw)

true_ori = fwd_disc_true['src'][0]['nn'][config.vertex]
def test_resolution_matrix_lcmv():
    """Test computation of resolution matrix for LCMV beamformers."""
    # read forward solution
    forward = mne.read_forward_solution(fname_fwd)

    # remove bad channels
    forward = mne.pick_channels_forward(forward, exclude='bads')

    # forward operator with fixed source orientations
    forward_fxd = mne.convert_forward_solution(forward,
                                               surf_ori=True,
                                               force_fixed=True)

    # evoked info
    info = mne.io.read_info(fname_evoked)
    mne.pick_info(info, mne.pick_types(info, meg=True), copy=False)  # good MEG

    # noise covariance matrix
    # ad-hoc to avoid discrepancies due to regularisation of real noise
    # covariance matrix
    noise_cov = mne.make_ad_hoc_cov(info)

    # Resolution matrix for Beamformer
    data_cov = noise_cov.copy()  # to test a property of LCMV

    # compute beamformer filters
    # reg=0. to make sure noise_cov and data_cov are as similar as possible
    filters = make_lcmv(info,
                        forward_fxd,
                        data_cov,
                        reg=0.,
                        noise_cov=noise_cov,
                        pick_ori=None,
                        rank=None,
                        weight_norm=None,
                        reduce_rank=False,
                        verbose=False)

    # Compute resolution matrix for beamformer
    resmat_lcmv = make_lcmv_resolution_matrix(filters, forward_fxd, info)

    # for noise_cov==data_cov and whitening, the filter weights should be the
    # transpose of leadfield

    # create filters with transposed whitened leadfield as weights
    forward_fxd = mne.pick_channels_forward(forward_fxd, info['ch_names'])
    filters_lfd = deepcopy(filters)
    filters_lfd['weights'][:] = forward_fxd['sol']['data'].T

    # compute resolution matrix for filters with transposed leadfield
    resmat_fwd = make_lcmv_resolution_matrix(filters_lfd, forward_fxd, info)

    # pairwise correlation for rows (CTFs) of resolution matrices for whitened
    # LCMV beamformer and transposed leadfield should be 1
    # Some rows are off by about 0.1 - not yet clear why
    corr = []

    for (f, l) in zip(resmat_fwd, resmat_lcmv):

        corr.append(np.corrcoef(f, l)[0, 1])

    # all row correlations should at least be above ~0.8
    assert_allclose(corr, 1., atol=0.2)

    # Maximum row correlation should at least be close to 1
    assert_allclose(np.max(corr), 1., atol=0.01)
Example #54
0
def test_pick_refs():
    """Test picking of reference sensors."""
    infos = list()
    # KIT
    kit_dir = op.join(io_dir, 'kit', 'tests', 'data')
    sqd_path = op.join(kit_dir, 'test.sqd')
    mrk_path = op.join(kit_dir, 'test_mrk.sqd')
    elp_path = op.join(kit_dir, 'test_elp.txt')
    hsp_path = op.join(kit_dir, 'test_hsp.txt')
    raw_kit = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
    infos.append(raw_kit.info)
    # BTi
    bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
    bti_pdf = op.join(bti_dir, 'test_pdf_linux')
    bti_config = op.join(bti_dir, 'test_config_linux')
    bti_hs = op.join(bti_dir, 'test_hs_linux')
    raw_bti = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
    infos.append(raw_bti.info)
    # CTF
    fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
    raw_ctf = read_raw_fif(fname_ctf_raw)
    raw_ctf.apply_gradient_compensation(2)
    for info in infos:
        info['bads'] = []
        _assert_channel_types(info)
        pytest.raises(ValueError, pick_types, info, meg='foo')
        pytest.raises(ValueError, pick_types, info, ref_meg='foo')
        picks_meg_ref = pick_types(info, meg=True, ref_meg=True)
        picks_meg = pick_types(info, meg=True, ref_meg=False)
        picks_ref = pick_types(info, meg=False, ref_meg=True)
        assert_array_equal(picks_meg_ref,
                           np.sort(np.concatenate([picks_meg, picks_ref])))
        picks_grad = pick_types(info, meg='grad', ref_meg=False)
        picks_ref_grad = pick_types(info, meg=False, ref_meg='grad')
        picks_meg_ref_grad = pick_types(info, meg='grad', ref_meg='grad')
        assert_array_equal(
            picks_meg_ref_grad,
            np.sort(np.concatenate([picks_grad, picks_ref_grad])))
        picks_mag = pick_types(info, meg='mag', ref_meg=False)
        picks_ref_mag = pick_types(info, meg=False, ref_meg='mag')
        picks_meg_ref_mag = pick_types(info, meg='mag', ref_meg='mag')
        assert_array_equal(picks_meg_ref_mag,
                           np.sort(np.concatenate([picks_mag, picks_ref_mag])))
        assert_array_equal(picks_meg,
                           np.sort(np.concatenate([picks_mag, picks_grad])))
        assert_array_equal(
            picks_ref, np.sort(np.concatenate([picks_ref_mag,
                                               picks_ref_grad])))
        assert_array_equal(
            picks_meg_ref,
            np.sort(
                np.concatenate(
                    [picks_grad, picks_mag, picks_ref_grad, picks_ref_mag])))

        for pick in (picks_meg_ref, picks_meg, picks_ref, picks_grad,
                     picks_ref_grad, picks_meg_ref_grad, picks_mag,
                     picks_ref_mag, picks_meg_ref_mag):
            if len(pick) > 0:
                pick_info(info, pick)

    # test CTF expected failures directly
    info = raw_ctf.info
    info['bads'] = []
    picks_meg_ref = pick_types(info, meg=True, ref_meg=True)
    picks_meg = pick_types(info, meg=True, ref_meg=False)
    picks_ref = pick_types(info, meg=False, ref_meg=True)
    picks_mag = pick_types(info, meg='mag', ref_meg=False)
    picks_ref_mag = pick_types(info, meg=False, ref_meg='mag')
    picks_meg_ref_mag = pick_types(info, meg='mag', ref_meg='mag')
    for pick in (picks_meg_ref, picks_ref, picks_ref_mag, picks_meg_ref_mag):
        if len(pick) > 0:
            pick_info(info, pick)

    for pick in (picks_meg, picks_mag):
        if len(pick) > 0:
            with catch_logging() as log:
                pick_info(info, pick, verbose=True)
            assert ('Removing {} compensators'.format(len(info['comps']))
                    in log.getvalue())
    picks_ref_grad = pick_types(info, meg=False, ref_meg='grad')
    assert set(picks_ref_mag) == set(picks_ref)
    assert len(picks_ref_grad) == 0
    all_meg = np.arange(3, 306)
    assert_array_equal(np.concatenate([picks_ref, picks_meg]), all_meg)
    assert_array_equal(picks_meg_ref_mag, all_meg)
Example #55
0
        
#Changing S001 by visual inspection
A_pca_space_IAC[0] = - A_pca_space_IAC[0]
A_pca_coeff_IAC[0] = - A_pca_coeff_IAC[0]


#%% Plot Topomaps
ch_picks = range(32)            
sbp = [3 , 3]
vmin = A_pca_coeff_IAC[0].mean(axis=1)[0] - 2*A_pca_coeff_IAC[0].std(axis=1)[0]
vmax = A_pca_coeff_IAC[0].mean(axis=1)[0] + 2*A_pca_coeff_IAC[0].std(axis=1)[0]
plt.figure()
pca_coeffs_IAC_c1 = np.zeros([len(ch_picks),len(Subjects)])
for s in range(sbp[0]*sbp[1]):
   plt.subplot(sbp[0],sbp[1],s+1)
   mne.viz.plot_topomap(A_pca_coeff_IAC[s][0,:], mne.pick_info(IAC_epochs.info, ch_picks),vmin=vmin,vmax=vmax)
   plt.title(Subjects[s])
   pca_coeffs_IAC_c1[:,s] = A_pca_coeff_IAC[s][0,:]
   
plt.figure()
mne.viz.plot_topomap(pca_coeffs_IAC_c1.mean(axis=1), mne.pick_info(IAC_epochs.info, ch_picks),vmin=vmin,vmax=vmax)
plt.title('IAC from averaging pca coefficients')

# Get all IAC nf coeffs
num_nfs = len(A_pca_coeff_IAC_nf[0])
IAC_coeffs_nf = np.zeros([32,num_nfs,len(Subjects)])
for sub in range(len(Subjects)):
    for nf in range(num_nfs):
        IAC_coeffs_nf[:,nf,sub] = A_pca_coeff_IAC_nf[sub][nf][0,:]
        
plt.figure()
Example #56
0
    def fit(self, epochs):
        """Fit the epochs on the AutoReject object.

        Parameters
        ----------
        epochs : instance of mne.Epochs
            The epochs object to be fit.

        Returns
        -------
        self : instance of AutoReject
            The instance.
        """
        self.picks_ = _handle_picks(picks=self.picks, info=epochs.info)
        _check_data(epochs, picks=self.picks_, verbose=self.verbose)
        self.cv_ = self.cv
        if isinstance(self.cv_, int):
            self.cv_ = KFold(n_splits=self.cv_)

        # XXX : maybe use an mne function in pick.py ?
        picks_by_type = _get_picks_by_type(info=epochs.info, picks=self.picks_)
        ch_types = [ch_type for ch_type, _ in picks_by_type]
        self.dots = None
        if 'mag' in ch_types or 'grad' in ch_types:
            meg_picks = pick_types(epochs.info, meg=True,
                                   eeg=False, exclude=[])
            this_info = mne.pick_info(epochs.info, meg_picks, copy=True)
            self.dots = _compute_dots(this_info)

        thresh_func = partial(_compute_thresholds, n_jobs=self.n_jobs,
                              method=self.thresh_method,
                              random_state=self.random_state,
                              dots=self.dots)

        if self.n_interpolate is None:
            if len(self.picks_) < 4:
                raise ValueError('Too few channels. autoreject is unlikely'
                                 ' to be effective')
            # XXX: dont interpolate all channels
            max_interp = min(len(self.picks_) - 1, 32)
            self.n_interpolate = np.array([1, 4, max_interp])

        self.n_interpolate_ = dict()  # rho
        self.consensus_ = dict()  # kappa
        self.threshes_ = dict()  # update
        self.loss_ = dict()
        self.local_reject_ = dict()

        for ch_type, this_picks in picks_by_type:
            if self.verbose is not False:
                print('Running autoreject on ch_type=%s' % ch_type)
            this_local_reject, this_loss = \
                _run_local_reject_cv(epochs, thresh_func, this_picks,
                                     self.n_interpolate, self.cv_,
                                     self.consensus, self.dots,
                                     self.verbose)
            self.threshes_.update(this_local_reject.threshes_)

            best_idx, best_jdx = \
                np.unravel_index(this_loss.mean(axis=-1).argmin(),
                                 this_loss.shape[:2])

            self.consensus_[ch_type] = self.consensus[best_idx]
            self.n_interpolate_[ch_type] = self.n_interpolate[best_jdx]
            self.loss_[ch_type] = this_loss

            # update local reject with best and store it
            this_local_reject.consensus_[ch_type] = self.consensus_[ch_type]
            this_local_reject.n_interpolate_[ch_type] = \
                self.n_interpolate_[ch_type]

            # needed for generating reject logs by channel
            self.local_reject_[ch_type] = this_local_reject

            if self.verbose is not False:
                print('\n\n\n\nEstimated consensus=%0.2f and n_interpolate=%d'
                      % (self.consensus_[ch_type],
                         self.n_interpolate_[ch_type]))
        return self
Example #57
0
def test_rank():
    """Test cov rank estimation."""
    # Test that our rank estimation works properly on a simple case
    evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
                          proj=False)
    cov = read_cov(cov_fname)
    ch_names = [ch for ch in evoked.info['ch_names'] if '053' not in ch and
                ch.startswith('EEG')]
    cov = prepare_noise_cov(cov, evoked.info, ch_names, None)
    assert_equal(cov['eig'][0], 0.)  # avg projector should set this to zero
    assert_true((cov['eig'][1:] > 0).all())  # all else should be > 0

    # Now do some more comprehensive tests
    raw_sample = read_raw_fif(raw_fname)

    raw_sss = read_raw_fif(hp_fif_fname)
    raw_sss.add_proj(compute_proj_raw(raw_sss))

    cov_sample = compute_raw_covariance(raw_sample)
    cov_sample_proj = compute_raw_covariance(
        raw_sample.copy().apply_proj())

    cov_sss = compute_raw_covariance(raw_sss)
    cov_sss_proj = compute_raw_covariance(
        raw_sss.copy().apply_proj())

    picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
    picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True)

    info_sample = pick_info(raw_sample.info, picks_all_sample)
    picks_stack_sample = [('eeg', pick_types(info_sample, meg=False,
                                             eeg=True))]
    picks_stack_sample += [('meg', pick_types(info_sample, meg=True))]
    picks_stack_sample += [('all',
                            pick_types(info_sample, meg=True, eeg=True))]

    info_sss = pick_info(raw_sss.info, picks_all_sss)
    picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))]
    picks_stack_somato += [('meg', pick_types(info_sss, meg=True))]
    picks_stack_somato += [('all',
                            pick_types(info_sss, meg=True, eeg=True))]

    iter_tests = list(itt.product(
        [(cov_sample, picks_stack_sample, info_sample),
         (cov_sample_proj, picks_stack_sample, info_sample),
         (cov_sss, picks_stack_somato, info_sss),
         (cov_sss_proj, picks_stack_somato, info_sss)],  # sss
        [dict(mag=1e15, grad=1e13, eeg=1e6)]
    ))

    for (cov, picks_list, this_info), scalings in iter_tests:
        for ch_type, picks in picks_list:

            this_very_info = pick_info(this_info, picks)

            # compute subset of projs
            this_projs = [c['active'] and
                          len(set(c['data']['col_names'])
                              .intersection(set(this_very_info['ch_names']))) >
                          0 for c in cov['projs']]
            n_projs = sum(this_projs)

            # count channel types
            ch_types = [channel_type(this_very_info, idx)
                        for idx in range(len(picks))]
            n_eeg, n_mag, n_grad = [ch_types.count(k) for k in
                                    ['eeg', 'mag', 'grad']]
            n_meg = n_mag + n_grad
            if ch_type in ('all', 'eeg'):
                n_projs_eeg = 1
            else:
                n_projs_eeg = 0

            # check sss
            if len(this_very_info['proc_history']) > 0:
                mf = this_very_info['proc_history'][0]['max_info']
                n_free = _get_sss_rank(mf)
                if 'mag' not in ch_types and 'grad' not in ch_types:
                    n_free = 0
                # - n_projs XXX clarify
                expected_rank = n_free + n_eeg
                if n_projs > 0 and ch_type in ('all', 'eeg'):
                    expected_rank -= n_projs_eeg
            else:
                expected_rank = n_meg + n_eeg - n_projs

            C = cov['data'][np.ix_(picks, picks)]
            est_rank = _estimate_rank_meeg_cov(C, this_very_info,
                                               scalings=scalings)

            assert_equal(expected_rank, est_rank)
def test_rank():
    """Test cov rank estimation"""
    raw_sample = Raw(raw_fname)

    raw_sss = Raw(hp_fif_fname)
    raw_sss.add_proj(compute_proj_raw(raw_sss))

    cov_sample = compute_raw_covariance(raw_sample)
    cov_sample_proj = compute_raw_covariance(
        raw_sample.copy().apply_proj())

    cov_sss = compute_raw_covariance(raw_sss)
    cov_sss_proj = compute_raw_covariance(
        raw_sss.copy().apply_proj())

    picks_all_sample = pick_types(raw_sample.info, meg=True, eeg=True)
    picks_all_sss = pick_types(raw_sss.info, meg=True, eeg=True)

    info_sample = pick_info(raw_sample.info, picks_all_sample)
    picks_stack_sample = [('eeg', pick_types(info_sample, meg=False,
                                             eeg=True))]
    picks_stack_sample += [('meg', pick_types(info_sample, meg=True))]
    picks_stack_sample += [('all',
                            pick_types(info_sample, meg=True, eeg=True))]

    info_sss = pick_info(raw_sss.info, picks_all_sss)
    picks_stack_somato = [('eeg', pick_types(info_sss, meg=False, eeg=True))]
    picks_stack_somato += [('meg', pick_types(info_sss, meg=True))]
    picks_stack_somato += [('all',
                            pick_types(info_sss, meg=True, eeg=True))]

    iter_tests = list(itt.product(
        [(cov_sample, picks_stack_sample, info_sample),
         (cov_sample_proj, picks_stack_sample, info_sample),
         (cov_sss, picks_stack_somato, info_sss),
         (cov_sss_proj, picks_stack_somato, info_sss)],  # sss
        [dict(mag=1e15, grad=1e13, eeg=1e6)]
    ))

    for (cov, picks_list, this_info), scalings in iter_tests:
        for ch_type, picks in picks_list:

            this_very_info = pick_info(this_info, picks)

            # compute subset of projs
            this_projs = [c['active'] and
                          len(set(c['data']['col_names'])
                              .intersection(set(this_very_info['ch_names']))) >
                          0 for c in cov['projs']]
            n_projs = sum(this_projs)

            # count channel types
            ch_types = [channel_type(this_very_info, idx)
                        for idx in range(len(picks))]
            n_eeg, n_mag, n_grad = [ch_types.count(k) for k in
                                    ['eeg', 'mag', 'grad']]
            n_meg = n_mag + n_grad
            if ch_type in ('all', 'eeg'):
                n_projs_eeg = 1
            else:
                n_projs_eeg = 0

            # check sss
            if 'proc_history' in this_very_info:
                mf = this_very_info['proc_history'][0]['max_info']
                n_free = _get_sss_rank(mf)
                if 'mag' not in ch_types and 'grad' not in ch_types:
                    n_free = 0
                # - n_projs XXX clarify
                expected_rank = n_free + n_eeg
                if n_projs > 0 and ch_type in ('all', 'eeg'):
                    expected_rank -= n_projs_eeg
            else:
                expected_rank = n_meg + n_eeg - n_projs

            C = cov['data'][np.ix_(picks, picks)]
            est_rank = _estimate_rank_meeg_cov(C, this_very_info,
                                               scalings=scalings)

            assert_equal(expected_rank, est_rank)
Example #59
0
def test_simulate_calculate_chpi_positions():
    """Test calculation of cHPI positions with simulated data."""
    # Read info dict from raw FIF file
    info = read_info(raw_fname)
    # Tune the info structure
    chpi_channel = u'STI201'
    ncoil = len(info['hpi_results'][0]['order'])
    coil_freq = 10 + np.arange(ncoil) * 5
    hpi_subsystem = {'event_channel': chpi_channel,
                     'hpi_coils': [{'event_bits': np.array([256, 0, 256, 256],
                                                           dtype=np.int32)},
                                   {'event_bits': np.array([512, 0, 512, 512],
                                                           dtype=np.int32)},
                                   {'event_bits':
                                       np.array([1024, 0, 1024, 1024],
                                                dtype=np.int32)},
                                   {'event_bits':
                                       np.array([2048, 0, 2048, 2048],
                                                dtype=np.int32)}],
                     'ncoil': ncoil}

    info['hpi_subsystem'] = hpi_subsystem
    for l, freq in enumerate(coil_freq):
            info['hpi_meas'][0]['hpi_coils'][l]['coil_freq'] = freq
    picks = pick_types(info, meg=True, stim=True, eeg=False, exclude=[])
    info['sfreq'] = 100.  # this will speed it up a lot
    info = pick_info(info, picks)
    info['chs'][info['ch_names'].index('STI 001')]['ch_name'] = 'STI201'
    info._update_redundant()
    info['projs'] = []

    info_trans = info['dev_head_t']['trans'].copy()

    dev_head_pos_ini = np.concatenate([rot_to_quat(info_trans[:3, :3]),
                                      info_trans[:3, 3]])
    ez = np.array([0, 0, 1])  # Unit vector in z-direction of head coordinates

    # Define some constants
    duration = 30  # Time / s

    # Quotient of head position sampling frequency
    # and raw sampling frequency
    head_pos_sfreq_quotient = 0.1

    # Round number of head positions to the next integer
    S = int(duration / (info['sfreq'] * head_pos_sfreq_quotient))
    dz = 0.001  # Shift in z-direction is 0.1mm for each step

    dev_head_pos = np.zeros((S, 10))
    dev_head_pos[:, 0] = np.arange(S) * info['sfreq'] * head_pos_sfreq_quotient
    dev_head_pos[:, 1:4] = dev_head_pos_ini[:3]
    dev_head_pos[:, 4:7] = dev_head_pos_ini[3:] + \
        np.outer(np.arange(S) * dz, ez)
    dev_head_pos[:, 7] = 1.0

    # cm/s
    dev_head_pos[:, 9] = 100 * dz / (info['sfreq'] * head_pos_sfreq_quotient)

    # Round number of samples to the next integer
    raw_data = np.zeros((len(picks), int(duration * info['sfreq'] + 0.5)))
    raw = RawArray(raw_data, info)

    dip = Dipole(np.array([0.0, 0.1, 0.2]),
                 np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
                 np.array([1e-9, 1e-9, 1e-9]),
                 np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]),
                 np.array([1.0, 1.0, 1.0]), 'dip')
    sphere = make_sphere_model('auto', 'auto', info=info,
                               relative_radii=(1.0, 0.9), sigmas=(0.33, 0.3))
    fwd, stc = make_forward_dipole(dip, sphere, info)
    stc.resample(info['sfreq'])
    raw = simulate_raw(raw, stc, None, fwd['src'], sphere, cov=None,
                       blink=False, ecg=False, chpi=True,
                       head_pos=dev_head_pos, mindist=1.0, interp='zero',
                       verbose=None, use_cps=True)

    quats = _calculate_chpi_positions(
        raw, t_step_min=raw.info['sfreq'] * head_pos_sfreq_quotient,
        t_step_max=raw.info['sfreq'] * head_pos_sfreq_quotient, t_window=1.0)
    _assert_quats(quats, dev_head_pos, dist_tol=0.001, angle_tol=1.)
Example #60
0
import mne

data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_trans = op.join(data_path, 'MEG', 'sample',
                      'sample_audvis_raw-trans.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem', 'sample-5120-bem-sol.fif')
fname_src_fs = op.join(subjects_dir, 'fsaverage', 'bem',
                       'fsaverage-ico-5-src.fif')
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')

# Get relevant channel information
info = mne.io.read_info(raw_fname)
info = mne.pick_info(info, mne.pick_types(info,
                                          meg=True,
                                          eeg=False,
                                          exclude=[]))

# Morph fsaverage's source space to sample
src_fs = mne.read_source_spaces(fname_src_fs)
src_morph = mne.morph_source_spaces(src_fs,
                                    subject_to='sample',
                                    subjects_dir=subjects_dir)

# Compute the forward with our morphed source space
fwd = mne.make_forward_solution(info,
                                trans=fname_trans,
                                src=src_morph,
                                bem=fname_bem)
mag_map = mne.sensitivity_map(fwd, ch_type='mag')