示例#1
0
def test_rap_music_simulated():
    """Test RAP-MUSIC with simulated evoked."""
    evoked, noise_cov = _get_data(ch_decim=16)
    forward = mne.read_forward_solution(fname_fwd)
    forward = mne.pick_channels_forward(forward, evoked.ch_names)
    forward_surf_ori = mne.convert_forward_solution(forward, surf_ori=True)
    forward_fixed = mne.convert_forward_solution(forward,
                                                 force_fixed=True,
                                                 surf_ori=True,
                                                 use_cps=True)

    n_dipoles = 2
    sim_evoked, stc = simu_data(evoked,
                                forward_fixed,
                                noise_cov,
                                n_dipoles,
                                evoked.times,
                                nave=evoked.nave)
    # Check dipoles for fixed ori
    with catch_logging() as log:
        dipoles = rap_music(sim_evoked,
                            forward_fixed,
                            noise_cov,
                            n_dipoles=n_dipoles,
                            verbose=True)
    assert_var_exp_log(log.getvalue(), 89, 91)
    _check_dipoles(dipoles, forward_fixed, stc, sim_evoked)
    assert 97 < dipoles[0].gof.max() < 100
    assert 91 < dipoles[1].gof.max() < 93
    assert dipoles[0].gof.min() >= 0.

    nave = 100000  # add a tiny amount of noise to the simulated evokeds
    sim_evoked, stc = simu_data(evoked,
                                forward_fixed,
                                noise_cov,
                                n_dipoles,
                                evoked.times,
                                nave=nave)
    dipoles, residual = rap_music(sim_evoked,
                                  forward_fixed,
                                  noise_cov,
                                  n_dipoles=n_dipoles,
                                  return_residual=True)
    _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)

    # Check dipoles for free ori
    dipoles, residual = rap_music(sim_evoked,
                                  forward,
                                  noise_cov,
                                  n_dipoles=n_dipoles,
                                  return_residual=True)
    _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)

    # Check dipoles for free surface ori
    dipoles, residual = rap_music(sim_evoked,
                                  forward_surf_ori,
                                  noise_cov,
                                  n_dipoles=n_dipoles,
                                  return_residual=True)
    _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)
示例#2
0
def _simulate_data(fwd, idx):  # Somewhere on the frontal lobe by default
    """Simulate an oscillator on the cortex."""
    source_vertno = fwd['src'][0]['vertno'][idx]

    sfreq = 50.  # Hz.
    times = np.arange(10 * sfreq) / sfreq  # 10 seconds of data
    signal = np.sin(20 * 2 * np.pi * times)  # 20 Hz oscillator
    signal[:len(times) // 2] *= 2  # Make signal louder at the beginning
    signal *= 1e-9  # Scale to be in the ballpark of MEG data

    # Construct a SourceEstimate object that describes the signal at the
    # cortical level.
    stc = mne.SourceEstimate(
        signal[np.newaxis, :],
        vertices=[[source_vertno], []],
        tmin=0,
        tstep=1 / sfreq,
        subject='sample',
    )

    # Create an info object that holds information about the sensors
    info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad')
    info.update(fwd['info'])  # Merge in sensor position information
    # heavily decimate sensors to make it much faster
    info = mne.pick_info(info, np.arange(info['nchan'])[::5])
    fwd = mne.pick_channels_forward(fwd, info['ch_names'])

    # Run the simulated signal through the forward model, obtaining
    # simulated sensor data.
    raw = mne.apply_forward_raw(fwd, stc, info)

    # Add a little noise
    random = np.random.RandomState(42)
    noise = random.randn(*raw._data.shape) * 1e-14
    raw._data += noise

    # Define a single epoch (weird baseline but shouldn't matter)
    epochs = mne.Epochs(raw, [[0, 0, 1]],
                        event_id=1,
                        tmin=0,
                        tmax=raw.times[-1],
                        baseline=(0., 0.),
                        preload=True)
    evoked = epochs.average()

    # Compute the cross-spectral density matrix
    csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=10)

    labels = mne.read_labels_from_annot('sample',
                                        hemi='lh',
                                        subjects_dir=subjects_dir)
    label = [
        label for label in labels if np.in1d(source_vertno, label.vertices)[0]
    ]
    assert len(label) == 1
    label = label[0]
    vertices = np.intersect1d(label.vertices, fwd['src'][0]['vertno'])
    source_ind = vertices.tolist().index(source_vertno)
    assert vertices[source_ind] == source_vertno
    return epochs, evoked, csd, source_vertno, label, vertices, source_ind
示例#3
0
def test_load_fiff_mne():
    data_path = mne.datasets.sample.data_path()
    fwd_path = os.path.join(data_path, 'MEG', 'sample', 'sample-ico-4-fwd.fif')
    evoked_path = os.path.join(data_path, 'MEG', 'sample',
                               'sample_audvis-no-filter-ave.fif')
    cov_path = os.path.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
    mri_sdir = os.path.join(data_path, 'subjects')

    mne_evoked = mne.read_evokeds(evoked_path, 'Left Auditory')
    mne_fwd = mne.read_forward_solution(fwd_path)
    mne_fwd = mne.convert_forward_solution(mne_fwd, force_fixed=True, use_cps=True)
    cov = mne.read_cov(cov_path)

    picks = mne.pick_types(mne_evoked.info, 'mag')
    channels = [mne_evoked.ch_names[i] for i in picks]

    mne_evoked = mne_evoked.pick_channels(channels)
    mne_fwd = mne.pick_channels_forward(mne_fwd, channels)
    cov = mne.pick_channels_cov(cov, channels)

    mne_inv = mne.minimum_norm.make_inverse_operator(mne_evoked.info, mne_fwd,
                                                     cov, 0, None, True)

    mne_stc = mne.minimum_norm.apply_inverse(mne_evoked, mne_inv, 1., 'MNE')

    meg = load.fiff.evoked_ndvar(mne_evoked)
    inv = load.fiff.inverse_operator(mne_inv, 'ico-4', mri_sdir)
    stc = inv.dot(meg)
    assert_array_almost_equal(stc.get_data(('source', 'time')), mne_stc.data)

    fwd = load.fiff.forward_operator(mne_fwd, 'ico-4', mri_sdir)
    reconstruct = fwd.dot(stc)
    mne_reconstruct = mne.apply_forward(mne_fwd, mne_stc, mne_evoked.info)
    assert_array_almost_equal(reconstruct.get_data(('sensor', 'time')),
                              mne_reconstruct.data)
示例#4
0
def test_set_eeg_reference_rest():
    """Test setting a REST reference."""
    raw = read_raw_fif(fif_fname).crop(0,
                                       1).pick_types(meg=False,
                                                     eeg=True,
                                                     exclude=()).load_data()
    raw.info['bads'] = ['EEG 057']  # should be excluded
    same = [raw.ch_names.index(raw.info['bads'][0])]
    picks = np.setdiff1d(np.arange(len(raw.ch_names)), same)
    trans = None
    sphere = make_sphere_model('auto', 'auto', raw.info)
    src = setup_volume_source_space(pos=20., sphere=sphere, exclude=30.)
    assert src[0]['nuse'] == 223  # low but fast
    fwd = make_forward_solution(raw.info, trans, src, sphere)
    orig_data = raw.get_data()
    avg_data = raw.copy().set_eeg_reference('average').get_data()
    assert_array_equal(avg_data[same], orig_data[same])  # not processed
    raw.set_eeg_reference('REST', forward=fwd)
    rest_data = raw.get_data()
    assert_array_equal(rest_data[same], orig_data[same])
    # should be more similar to an avg ref than nose ref
    orig_corr = np.corrcoef(rest_data[picks].ravel(),
                            orig_data[picks].ravel())[0, 1]
    avg_corr = np.corrcoef(rest_data[picks].ravel(),
                           avg_data[picks].ravel())[0, 1]
    assert -0.6 < orig_corr < -0.5
    assert 0.1 < avg_corr < 0.2
    # and applying an avg ref after should work
    avg_after = raw.set_eeg_reference('average').get_data()
    assert_allclose(avg_after, avg_data, atol=1e-12)
    with pytest.raises(TypeError, match='forward when ref_channels="REST"'):
        raw.set_eeg_reference('REST')
    fwd_bad = pick_channels_forward(fwd, raw.ch_names[:-1])
    with pytest.raises(ValueError, match='Missing channels'):
        raw.set_eeg_reference('REST', forward=fwd_bad)
    # compare to FieldTrip
    evoked = read_evokeds(ave_fname, baseline=(None, 0))[0]
    evoked.info['bads'] = []
    evoked.pick_types(meg=False, eeg=True, exclude=())
    assert len(evoked.ch_names) == 60
    # Data obtained from FieldTrip with something like (after evoked.save'ing
    # then scipy.io.savemat'ing fwd['sol']['data']):
    # dat = ft_read_data('ft-ave.fif');
    # load('leadfield.mat', 'G');
    # dat_ref = ft_preproc_rereference(dat, 'all', 'rest', true, G);
    # sprintf('%g ', dat_ref(:, 171));
    want = np.array(
        '-3.3265e-05 -3.2419e-05 -3.18758e-05 -3.24079e-05 -3.39801e-05 -3.40573e-05 -3.24163e-05 -3.26896e-05 -3.33814e-05 -3.54734e-05 -3.51289e-05 -3.53229e-05 -3.51532e-05 -3.53149e-05 -3.4505e-05 -3.03462e-05 -2.81848e-05 -3.08895e-05 -3.27158e-05 -3.4605e-05 -3.47728e-05 -3.2459e-05 -3.06552e-05 -2.53255e-05 -2.69671e-05 -2.83425e-05 -3.12836e-05 -3.30965e-05 -3.34099e-05 -3.32766e-05 -3.32256e-05 -3.36385e-05 -3.20796e-05 -2.7108e-05 -2.47054e-05 -2.49589e-05 -2.7382e-05 -3.09774e-05 -3.12003e-05 -3.1246e-05 -3.07572e-05 -2.64942e-05 -2.25505e-05 -2.67194e-05 -2.86e-05 -2.94903e-05 -2.96249e-05 -2.92653e-05 -2.86472e-05 -2.81016e-05 -2.69737e-05 -2.48076e-05 -3.00473e-05 -2.73404e-05 -2.60153e-05 -2.41608e-05 -2.61937e-05 -2.5539e-05 -2.47104e-05 -2.35194e-05'
        .split(' '), float)  # noqa: E501
    norm = np.linalg.norm(want)
    idx = np.argmin(np.abs(evoked.times - 0.083))
    assert idx == 170
    old = evoked.data[:, idx].ravel()
    exp_var = 1 - np.linalg.norm(want - old) / norm
    assert 0.006 < exp_var < 0.008
    evoked.set_eeg_reference('REST', forward=fwd)
    exp_var_old = 1 - np.linalg.norm(evoked.data[:, idx] - old) / norm
    assert 0.005 < exp_var_old <= 0.009
    exp_var = 1 - np.linalg.norm(evoked.data[:, idx] - want) / norm
    assert 0.995 < exp_var <= 1
示例#5
0
def test_inverse_residual(evoked, method):
    """Test MNE inverse application."""
    # use fname_inv as it will be faster than fname_full (fewer verts and chs)
    evoked = evoked.pick_types(meg=True)
    inv = read_inverse_operator(fname_inv_fixed_depth)
    fwd = read_forward_solution(fname_fwd)
    pick_channels_forward(fwd, evoked.ch_names, copy=False)
    fwd = convert_forward_solution(fwd, force_fixed=True, surf_ori=True)
    matcher = re.compile(r'.* ([0-9]?[0-9]?[0-9]?\.[0-9])% variance.*')

    # make it complex to ensure we handle it properly
    evoked.data = 1j * evoked.data
    with catch_logging() as log:
        stc, residual = apply_inverse(
            evoked, inv, method=method, return_residual=True, verbose=True)
    # revert the complex-ification (except STC, allow that to be complex still)
    assert_array_equal(residual.data.real, 0)
    residual.data = (-1j * residual.data).real
    evoked.data = (-1j * evoked.data).real
    # continue testing
    log = log.getvalue()
    match = matcher.match(log.replace('\n', ' '))
    assert match is not None
    match = float(match.group(1))
    assert 45 < match < 50
    if method not in ('dSPM', 'sLORETA'):
        # revert effects of STC being forced to be complex
        recon = apply_forward(fwd, stc, evoked.info)
        recon.data = (-1j * recon.data).real
        proj_op = make_projector(evoked.info['projs'], evoked.ch_names)[0]
        recon.data[:] = np.dot(proj_op, recon.data)
        residual_fwd = evoked.copy()
        residual_fwd.data -= recon.data
        corr = np.corrcoef(residual_fwd.data.ravel(),
                           residual.data.ravel())[0, 1]
        assert corr > 0.999

    if method != 'sLORETA':  # XXX divide by zero error
        with catch_logging() as log:
            _, residual = apply_inverse(
                evoked, inv, 0., method, return_residual=True, verbose=True)
        log = log.getvalue()
        match = matcher.match(log.replace('\n', ' '))
        assert match is not None
        match = float(match.group(1))
        assert match == 100.
        assert_array_less(np.abs(residual.data), 1e-15)
示例#6
0
def test_inverse_residual(evoked):
    """Test MNE inverse application."""
    # use fname_inv as it will be faster than fname_full (fewer verts and chs)
    evoked = evoked.pick_types()
    inv = read_inverse_operator(fname_inv_fixed_depth)
    fwd = read_forward_solution(fname_fwd)
    pick_channels_forward(fwd, evoked.ch_names, copy=False)
    fwd = convert_forward_solution(fwd, force_fixed=True, surf_ori=True)
    matcher = re.compile(r'.* ([0-9]?[0-9]?[0-9]?\.[0-9])% variance.*')
    for method in ('MNE', 'dSPM', 'sLORETA'):
        with catch_logging() as log:
            stc, residual = apply_inverse(evoked,
                                          inv,
                                          method=method,
                                          return_residual=True,
                                          verbose=True)
        log = log.getvalue()
        match = matcher.match(log.replace('\n', ' '))
        assert match is not None
        match = float(match.group(1))
        assert 45 < match < 50
        if method == 'MNE':  # must be first!
            recon = apply_forward(fwd, stc, evoked.info)
            proj_op = make_projector(evoked.info['projs'], evoked.ch_names)[0]
            recon.data[:] = np.dot(proj_op, recon.data)
            residual_fwd = evoked.copy()
            residual_fwd.data -= recon.data
        corr = np.corrcoef(residual_fwd.data.ravel(), residual.data.ravel())[0,
                                                                             1]
        assert corr > 0.999
    with catch_logging() as log:
        _, residual = apply_inverse(evoked,
                                    inv,
                                    0.,
                                    'MNE',
                                    return_residual=True,
                                    verbose=True)
    log = log.getvalue()
    match = matcher.match(log.replace('\n', ' '))
    assert match is not None
    match = float(match.group(1))
    assert match == 100.
    assert_array_less(np.abs(residual.data), 1e-15)

    # Degenerate: we don't have the right representation for eLORETA for this
    with pytest.raises(ValueError, match='eLORETA does not .* support .*'):
        apply_inverse(evoked, inv, method="eLORETA", return_residual=True)
示例#7
0
def test_rap_music_simulated():
    """Test RAP-MUSIC with simulated evoked."""
    evoked, noise_cov = _get_data(ch_decim=16)
    forward = mne.read_forward_solution(fname_fwd)
    forward = mne.pick_channels_forward(forward, evoked.ch_names)
    forward_surf_ori = mne.convert_forward_solution(forward, surf_ori=True)
    forward_fixed = mne.convert_forward_solution(forward,
                                                 force_fixed=True,
                                                 surf_ori=True,
                                                 use_cps=True)

    n_dipoles = 2
    sim_evoked, stc = simu_data(evoked,
                                forward_fixed,
                                noise_cov,
                                n_dipoles,
                                evoked.times,
                                nave=evoked.nave)
    # Check dipoles for fixed ori
    dipoles = rap_music(sim_evoked,
                        forward_fixed,
                        noise_cov,
                        n_dipoles=n_dipoles)
    _check_dipoles(dipoles, forward_fixed, stc, sim_evoked)
    assert (0.97 < dipoles[0].gof.max() < 1.)
    assert (dipoles[0].gof.min() >= 0.)
    assert_array_equal(dipoles[0].gof, dipoles[1].gof)

    nave = 100000  # add a tiny amount of noise to the simulated evokeds
    sim_evoked, stc = simu_data(evoked,
                                forward_fixed,
                                noise_cov,
                                n_dipoles,
                                evoked.times,
                                nave=nave)
    dipoles, residual = rap_music(sim_evoked,
                                  forward_fixed,
                                  noise_cov,
                                  n_dipoles=n_dipoles,
                                  return_residual=True)
    _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)

    # Check dipoles for free ori
    dipoles, residual = rap_music(sim_evoked,
                                  forward,
                                  noise_cov,
                                  n_dipoles=n_dipoles,
                                  return_residual=True)
    _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)

    # Check dipoles for free surface ori
    dipoles, residual = rap_music(sim_evoked,
                                  forward_surf_ori,
                                  noise_cov,
                                  n_dipoles=n_dipoles,
                                  return_residual=True)
    _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)
def make_resolution_matrix(forward,
                           inverse_operator,
                           method='dSPM',
                           lambda2=1. / 9.,
                           verbose=None):
    """Compute resolution matrix for linear inverse operator.

    Parameters
    ----------
    forward : instance of Forward
        Forward Operator.
    inverse_operator : instance of InverseOperator
        Inverse operator.
    method : 'MNE' | 'dSPM' | 'sLORETA'
        Inverse method to use (MNE, dSPM, sLORETA).
    lambda2 : float
        The regularisation parameter.
    %(verbose)s

    Returns
    -------
    resmat: array, shape (n_orient_inv * n_dipoles, n_orient_fwd * n_dipoles)
        Resolution matrix (inverse operator times forward operator).
        The result of applying the inverse operator to the forward operator.
        If source orientations are not fixed, all source components will be
        computed (i.e. for n_orient_inv > 1 or n_orient_fwd > 1).
        The columns of the resolution matrix are the point-spread functions
        (PSFs) and the rows are the cross-talk functions (CTFs).
    """
    # make sure forward and inverse operator match
    inv = inverse_operator
    fwd = _convert_forward_match_inv(forward, inv)

    # don't include bad channels
    # only use good channels from inverse operator
    bads_inv = inv['info']['bads']

    # good channels
    ch_names = [c for c in inv['info']['ch_names'] if (c not in bads_inv)]

    fwd = pick_channels_forward(fwd, ch_names, ordered=True)

    # get leadfield matrix from forward solution
    leadfield = fwd['sol']['data']

    invmat = _get_matrix_from_inverse_operator(inv,
                                               fwd,
                                               method=method,
                                               lambda2=lambda2)

    resmat = invmat.dot(leadfield)

    logger.info('Dimensions of resolution matrix: %d by %d.' % resmat.shape)

    return resmat
示例#9
0
def get_clean_forward(forward_model_path: str, mne_info: mne.Info):
    """
    Assemble the gain matrix from the forward model so that
    its rows correspond to channels in mne_info
    :param force_fixed: whether to return the gain matrix that uses
    fixed orientations of dipoles
    :param drop_missing: what to do with channels that are not
    in the forward solution? If False, zero vectors will be
    returned for them, if True, they will not be represented
    in the returned matrix.
    :param forward_model_path:
    :param mne_info:
    :return: np.ndarray with as many rows as there are dipoles
    in the forward model and as many rows as there are
    channels in mne_info (well, depending on drop_missing).
    It drop_missing is True, then also returns indices of
    channels that are both in the forward solution and mne_info

    """
    _logger.debug("Matching data channels with forward model.")
    _logger.debug("Loading forward model from %s" % forward_model_path)

    # Get the gain matrix from the forward solution
    forward = mne.read_forward_solution(forward_model_path, verbose="ERROR")
    capitalize_chnames_fwd(forward)

    # Take only the channels present in mne_info
    ch_names = mne_info["ch_names"]
    goods = mne.pick_types(mne_info,
                           eeg=True,
                           stim=False,
                           eog=False,
                           ecg=False,
                           exclude="bads")
    ch_names_data = [ch_names[i] for i in goods]
    ch_names_fwd = forward["info"]["ch_names"]
    # Take only channels from both mne_info and the forward solution
    ch_names_intersect = [
        n for n in ch_names_fwd if n.upper() in all_upper(ch_names_data)
    ]
    missing_ch_names = [
        n for n in ch_names_data if n.upper() not in all_upper(ch_names_fwd)
    ]
    _logger.debug("Channel names found in forward: %s" % ch_names_fwd)
    _logger.debug("Channel names found in data: %s" % ch_names_data)
    _logger.debug("Missing channel names: %s" % missing_ch_names)

    if ch_names_intersect:
        fwd = mne.pick_channels_forward(forward, include=ch_names_intersect)
        return fwd, missing_ch_names
    else:
        raise ValueError("No channels from data match current montage")
示例#10
0
def get_clean_forward(forward_model_path: str, mne_info: mne.Info):
    """
    Assemble the gain matrix from the forward model so that
    its rows correspond to channels in mne_info
    :param force_fixed: whether to return the gain matrix that uses
    fixed orientations of dipoles
    :param drop_missing: what to do with channels that are not
    in the forward solution? If False, zero vectors will be
    returned for them, if True, they will not be represented
    in the returned matrix.
    :param forward_model_path:
    :param mne_info:
    :return: np.ndarray with as many rows as there are dipoles
    in the forward model and as many rows as there are
    channels in mne_info (well, depending on drop_missing).
    It drop_missing is True, then also returns indices of
    channels that are both in the forward solution and mne_info

    """

    # Get the gain matrix from the forward solution
    forward = mne.read_forward_solution(forward_model_path, verbose='ERROR')

    # Take only the channels present in mne_info
    ch_names = mne_info['ch_names']
    goods = mne.pick_types(mne_info,
                           eeg=True,
                           stim=False,
                           eog=False,
                           ecg=False,
                           exclude='bads')
    ch_names_data = [ch_names[i] for i in goods]
    ch_names_fwd = forward['info']['ch_names']
    # Take only channels from both mne_info and the forward solution
    ch_names_intersect = [
        n for n in ch_names_fwd if n.upper() in all_upper(ch_names_data)
    ]
    missing_ch_names = [
        n for n in ch_names_data if n.upper() not in all_upper(ch_names_fwd)
    ]

    missing_fwd_ch_names = [
        n for n in ch_names_fwd if n.upper() not in all_upper(ch_names_data)
    ]

    if len(missing_fwd_ch_names) > 0:
        raise ValueError(mne_info['ch_names'], ch_names_fwd, mne_info['bads'])

    fwd = mne.pick_channels_forward(forward, include=ch_names_intersect)
    return fwd, missing_ch_names
示例#11
0
def _simulate_data(fwd):
    """Simulate an oscillator on the cortex."""
    source_vertno = 146374  # Somewhere on the frontal lobe

    sfreq = 50.  # Hz.
    times = np.arange(10 * sfreq) / sfreq  # 10 seconds of data
    signal = np.sin(20 * 2 * np.pi * times)  # 20 Hz oscillator
    signal[:len(times) // 2] *= 2  # Make signal louder at the beginning
    signal *= 1e-9  # Scale to be in the ballpark of MEG data

    # Construct a SourceEstimate object that describes the signal at the
    # cortical level.
    stc = mne.SourceEstimate(
        signal[np.newaxis, :],
        vertices=[[source_vertno], []],
        tmin=0,
        tstep=1 / sfreq,
        subject='sample',
    )

    # Create an info object that holds information about the sensors
    info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad')
    info.update(fwd['info'])  # Merge in sensor position information
    # heavily decimate sensors to make it much faster
    info = mne.pick_info(info, np.arange(info['nchan'])[::5])
    fwd = mne.pick_channels_forward(fwd, info['ch_names'])

    # Run the simulated signal through the forward model, obtaining
    # simulated sensor data.
    raw = mne.apply_forward_raw(fwd, stc, info)

    # Add a little noise
    random = np.random.RandomState(42)
    noise = random.randn(*raw._data.shape) * 1e-14
    raw._data += noise

    # Define a single epoch
    epochs = mne.Epochs(raw, [[0, 0, 1]],
                        event_id=1,
                        tmin=0,
                        tmax=raw.times[-1],
                        preload=True)
    evoked = epochs.average()

    # Compute the cross-spectral density matrix
    csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=10)

    return epochs, evoked, csd, source_vertno
示例#12
0
def _simulate_data(fwd):
    """Simulate an oscillator on the cortex."""
    source_vertno = 146374  # Somewhere on the frontal lobe

    sfreq = 50.  # Hz.
    times = np.arange(10 * sfreq) / sfreq  # 10 seconds of data
    signal = np.sin(20 * 2 * np.pi * times)  # 20 Hz oscillator
    signal[:len(times) // 2] *= 2  # Make signal louder at the beginning
    signal *= 1e-9  # Scale to be in the ballpark of MEG data

    # Construct a SourceEstimate object that describes the signal at the
    # cortical level.
    stc = mne.SourceEstimate(
        signal[np.newaxis, :],
        vertices=[[source_vertno], []],
        tmin=0,
        tstep=1 / sfreq,
        subject='sample',
    )

    # Create an info object that holds information about the sensors
    info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad')
    info.update(fwd['info'])  # Merge in sensor position information
    # heavily decimate sensors to make it much faster
    info = mne.pick_info(info, np.arange(info['nchan'])[::5])
    fwd = mne.pick_channels_forward(fwd, info['ch_names'])

    # Run the simulated signal through the forward model, obtaining
    # simulated sensor data.
    raw = mne.apply_forward_raw(fwd, stc, info)

    # Add a little noise
    random = np.random.RandomState(42)
    noise = random.randn(*raw._data.shape) * 1e-14
    raw._data += noise

    # Define a single epoch
    epochs = mne.Epochs(raw, [[0, 0, 1]], event_id=1, tmin=0,
                        tmax=raw.times[-1], preload=True)
    evoked = epochs.average()

    # Compute the cross-spectral density matrix
    csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=10)

    return epochs, evoked, csd, source_vertno
示例#13
0
def test_inverse_residual():
    """Test MNE inverse application."""
    # use fname_inv as it will be faster than fname_full (fewer verts and chs)
    evoked = _get_evoked().pick_types()
    inv = read_inverse_operator(fname_inv_fixed_depth)
    fwd = read_forward_solution(fname_fwd)
    fwd = convert_forward_solution(fwd, force_fixed=True, surf_ori=True)
    fwd = pick_channels_forward(fwd, evoked.ch_names)
    matcher = re.compile(r'.* ([0-9]?[0-9]?[0-9]?\.[0-9])% variance.*')
    for method in ('MNE', 'dSPM', 'sLORETA'):
        with catch_logging() as log:
            stc, residual = apply_inverse(
                evoked, inv, method=method, return_residual=True, verbose=True)
        log = log.getvalue()
        match = matcher.match(log.replace('\n', ' '))
        assert match is not None
        match = float(match.group(1))
        assert 45 < match < 50
        if method == 'MNE':  # must be first!
            recon = apply_forward(fwd, stc, evoked.info)
            proj_op = make_projector(evoked.info['projs'], evoked.ch_names)[0]
            recon.data[:] = np.dot(proj_op, recon.data)
            residual_fwd = evoked.copy()
            residual_fwd.data -= recon.data
        corr = np.corrcoef(residual_fwd.data.ravel(),
                           residual.data.ravel())[0, 1]
        assert corr > 0.999
    with catch_logging() as log:
        _, residual = apply_inverse(
            evoked, inv, 0., 'MNE', return_residual=True, verbose=True)
    log = log.getvalue()
    match = matcher.match(log.replace('\n', ' '))
    assert match is not None
    match = float(match.group(1))
    assert match == 100.
    assert_array_less(np.abs(residual.data), 1e-15)

    # Degenerate: we don't have the right representation for eLORETA for this
    with pytest.raises(ValueError, match='eLORETA does not .* support .*'):
        apply_inverse(evoked, inv, method="eLORETA", return_residual=True)
示例#14
0
def test_rap_music_simulated():
    """Test RAP-MUSIC with simulated evoked."""
    evoked, noise_cov = _get_data(ch_decim=16)
    forward = mne.read_forward_solution(fname_fwd)
    forward = mne.pick_channels_forward(forward, evoked.ch_names)
    forward_surf_ori = mne.convert_forward_solution(forward, surf_ori=True)
    forward_fixed = mne.convert_forward_solution(forward, force_fixed=True,
                                                 surf_ori=True)

    n_dipoles = 2
    sim_evoked, stc = simu_data(evoked, forward_fixed, noise_cov,
                                n_dipoles, evoked.times, nave=evoked.nave)
    # Check dipoles for fixed ori
    dipoles = rap_music(sim_evoked, forward_fixed, noise_cov,
                        n_dipoles=n_dipoles)
    _check_dipoles(dipoles, forward_fixed, stc, sim_evoked)
    assert_true(0.98 < dipoles[0].gof.max() < 1.)
    assert_true(dipoles[0].gof.min() >= 0.)
    assert_array_equal(dipoles[0].gof, dipoles[1].gof)

    nave = 100000  # add a tiny amount of noise to the simulated evokeds
    sim_evoked, stc = simu_data(evoked, forward_fixed, noise_cov,
                                n_dipoles, evoked.times, nave=nave)
    dipoles, residual = rap_music(sim_evoked, forward_fixed, noise_cov,
                                  n_dipoles=n_dipoles, return_residual=True)
    _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)

    # Check dipoles for free ori
    dipoles, residual = rap_music(sim_evoked, forward, noise_cov,
                                  n_dipoles=n_dipoles, return_residual=True)
    _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)

    # Check dipoles for free surface ori
    dipoles, residual = rap_music(sim_evoked, forward_surf_ori, noise_cov,
                                  n_dipoles=n_dipoles, return_residual=True)
    _check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)
# Seed for the random number generator
rand = np.random.RandomState(42)

###############################################################################
# Load the info, the forward solution and the noise covariance
# The forward solution also defines the employed brain discretization.
info = mne.io.read_info(raw_fname)
fwd = mne.read_forward_solution(fwd_fname)
noise_cov = mne.read_cov(erm_cov_fname)

###############################################################################
# In this example, to save computation time, we shall only simulate gradiometer
# data. You can try simulating other types of sensors as well.
picks = mne.pick_types(info, meg='grad', stim=True, exclude='bads')
mne.pick_info(info, picks, copy=False)
fwd = mne.pick_channels_forward(fwd, include=info['ch_names'])
noise_cov = mne.pick_channels_cov(noise_cov, include=info['ch_names'])

###############################################################################
# Data simulation
# ---------------
#
# The following function generates a timeseries that contains an oscillator,
# modulated by a Gaussian. The frequency of the oscillator fluctuates a little
# over time, but stays close to 10 Hz.


def gen_signal(times,
               base_freq,
               rand=None,
               t_rand=1e-3,
示例#16
0
def dics_connectivity(vertex_pairs,
                      fwd,
                      data_csd,
                      reg=0.05,
                      n_angles=50,
                      block_size=10000,
                      n_jobs=1,
                      verbose=None):
    """Compute spectral connectivity using a DICS beamformer.

    Calculates the connectivity between the given vertex pairs using a DICS
    beamformer [1]_ [2]_. Connectivity is defined in terms of coherence:

    C = Sxy^2 [Sxx * Syy]^-1

    Where Sxy is the cross-spectral density (CSD) between dipoles x and y, Sxx
    is the power spectral density (PSD) at dipole x and Syy is the PSD at
    dipole y.

    Parameters
    ----------
    vertex_pairs : pair of lists (vert_from_idx, vert_to_idx)
        Vertex pairs between which connectivity is calculated. The pairs are
        specified using two lists: the first list contains, for each pair, the
        index of the first vertex. The second list contains, for each pair, the
        index of the second vertex.
    fwd : instance of Forward
        Subject's forward solution, possibly restricted to only include
        vertices that are close to the sensors. For 'canonical' mode, the
        orientation needs to be tangential or free.
    data_csd : instance of CrossSpectralDensity
        The cross spectral density of the data.
    reg : float
        Tikhonov regularization parameter to control for trade-off between
        spatial resolution and noise sensitivity. Defaults to 0.05.
    n_angles : int
        Number of angles to try when optimizing dipole orientations. Defaults
        to 50.
    block_size : int
        Number of pairs to process in a single batch. Beware of memory
        requirements, which are ``n_jobs * block_size``. Defaults to 10000.
    n_jobs : int
        Number of blocks to process simultaneously. Defaults to 1.
    verbose : bool | str | int | None
        If not None, override default verbose level (see :func:`mne.verbose`
        and :ref:`Logging documentation <tut_logging>` for more).

    Returns
    -------
    connectivity : instance of Connectivity
        The adjacency matrix.

    See Also
    --------
    all_to_all_connectivity_pairs : Obtain pairs for all-to-all connectivity.
    one_to_all_connectivity_pairs : Obtain pairs for one-to-all connectivity.

    References
    ----------
    .. [1] Gross, J., Kujala, J., Hamalainen, M., Timmermann, L., Schnitzler,
           A., & Salmelin, R. (2001). Dynamic imaging of coherent sources:
           Studying neural interactions in the human brain. Proceedings of the
           National Academy of Sciences, 98(2), 694–699.
    .. [2] Kujala, J., Gross, J., & Salmelin, R. (2008). Localization of
           correlated network activity at the cortical level with MEG.
           NeuroImage, 39(4), 1706–1720.
    """
    fwd = pick_channels_forward(fwd, data_csd.ch_names)
    data_csd = pick_channels_csd(data_csd, fwd['info']['ch_names'])

    vertex_from, vertex_to = vertex_pairs
    if len(vertex_from) != len(vertex_to):
        raise ValueError('Lengths of the two lists of vertices do not match.')
    n_pairs = len(vertex_from)

    G = fwd['sol']['data'].copy()
    n_orient = G.shape[1] // fwd['nsource']

    if n_orient == 1:
        raise ValueError('A forward operator with free or tangential '
                         'orientation must be used.')
    elif n_orient == 3:
        # Convert forward to tangential orientation for more speed.
        fwd = forward_to_tangential(fwd)
        G = fwd['sol']['data']
        n_orient = 2

    G = G.reshape(G.shape[0], fwd['nsource'], n_orient)

    # Normalize the lead field
    G /= np.linalg.norm(G, axis=0)

    Cm = data_csd.get_data()
    Cm_inv, alpha = _reg_pinv(Cm, reg)
    del Cm

    W = np.dot(G.T, Cm_inv)

    # Pre-compute spectral power at each unique vertex
    unique_verts, vertex_map = np.unique(np.r_[vertex_from, vertex_to],
                                         return_inverse=True)
    spec_power_inv = np.array(
        [np.dot(W[:, vert, :], G[:, vert, :]) for vert in unique_verts])

    # Map vertex indices to unique indices, so the pre-computed spectral power
    # can be retrieved
    vertex_from_map = vertex_map[:len(vertex_from)]
    vertex_to_map = vertex_map[len(vertex_from):]

    coherence = np.zeros((len(vertex_from)))

    # Define a search space for dipole orientations
    angles = np.arange(n_angles) * np.pi / n_angles
    orientations = np.vstack((np.sin(angles), np.cos(angles)))

    # Create chunks of pairs to evaluate at once
    n_blocks = int(np.ceil(n_pairs / float(block_size)))
    blocks = [
        slice(i * block_size, min((i + 1) * block_size, n_pairs))
        for i in range(n_blocks)
    ]

    parallel, my_compute_dics_coherence, _ = parallel_func(
        _compute_dics_coherence, n_jobs, verbose)

    logger.info('Computing coherence between %d source pairs in %d blocks...' %
                (n_pairs, n_blocks))
    if numba_enabled:
        logger.info('Using numba optimized code path.')
    coherence = np.hstack(
        parallel(
            my_compute_dics_coherence(W, G, vertex_from_map[block],
                                      vertex_to_map[block], spec_power_inv,
                                      orientations) for block in blocks))
    logger.info('[done]')

    return VertexConnectivity(
        data=coherence,
        pairs=[v[:len(coherence)] for v in vertex_pairs],
        vertices=[s['vertno'] for s in fwd['src']],
        vertex_degree=None,  # Compute this in the constructor
        subject=fwd['src'][0]['subject_his_id'],
    )
示例#17
0
def test_resolution_matrix_lcmv():
    """Test computation of resolution matrix for LCMV beamformers."""
    # read forward solution
    forward = mne.read_forward_solution(fname_fwd)

    # remove bad channels
    forward = mne.pick_channels_forward(forward, exclude='bads')

    # forward operator with fixed source orientations
    forward_fxd = mne.convert_forward_solution(forward, surf_ori=True,
                                               force_fixed=True)

    # evoked info
    info = mne.io.read_info(fname_evoked)
    mne.pick_info(info, mne.pick_types(info), copy=False)  # good MEG channels

    # noise covariance matrix
    # ad-hoc to avoid discrepancies due to regularisation of real noise
    # covariance matrix
    noise_cov = mne.make_ad_hoc_cov(info)

    # Resolution matrix for Beamformer
    data_cov = noise_cov.copy()  # to test a property of LCMV

    # compute beamformer filters
    # reg=0. to make sure noise_cov and data_cov are as similar as possible
    filters = make_lcmv(info, forward_fxd, data_cov, reg=0.,
                        noise_cov=noise_cov,
                        pick_ori=None, rank=None,
                        weight_norm=None,
                        reduce_rank=False,
                        verbose=False)

    # Compute resolution matrix for beamformer
    resmat_lcmv = make_lcmv_resolution_matrix(filters, forward_fxd, info)

    # for noise_cov==data_cov and whitening, the filter weights should be the
    # transpose of leadfield

    # create filters with transposed whitened leadfield as weights
    forward_fxd = mne.pick_channels_forward(forward_fxd, info['ch_names'])
    filters_lfd = deepcopy(filters)
    filters_lfd['weights'][:] = forward_fxd['sol']['data'].T

    # compute resolution matrix for filters with transposed leadfield
    resmat_fwd = make_lcmv_resolution_matrix(filters_lfd, forward_fxd, info)

    # pairwise correlation for rows (CTFs) of resolution matrices for whitened
    # LCMV beamformer and transposed leadfield should be 1
    # Some rows are off by about 0.1 - not yet clear why
    corr = []

    for (f, l) in zip(resmat_fwd, resmat_lcmv):

        corr.append(np.corrcoef(f, l)[0, 1])

    # all row correlations should at least be above ~0.8
    assert_allclose(corr, 1., atol=0.2)

    # Maximum row correlation should at least be close to 1
    assert_allclose(np.max(corr), 1., atol=0.01)
示例#18
0
def DeFleCT_make_estimator(forward, noise_cov, labels, lambda2_cov=3/10.,
                           lambda2_S=1/9., pick_meg=True, pick_eeg=False,
                           mode='svd', n_svd_comp=1, verbose=None):
    """
    Create the DeFleCT estimator for a set of labels

    Parameters:
    -----------
    forward: forward solution (assumes surf_ori=True)
    noise_cov: noise covariance matrix
    lambda2_cov: regularisation paramter for noise covariance matrix (whitening)
    pick_meg: Which MEG channels to pick (True/False/'grad'/'mag')
    pick_eeg: Which EEG channels to pick (True/False)
    labels: list of labels, first one is the target for DeFleCT
    mode : 'mean' | 'sum' | 'svd' |
        PSFs can be computed for different summary measures with labels:
        'sum' or 'mean': sum or means of sub-leadfields for labels
        This corresponds to situations where labels can be assumed to be
        homogeneously activated.
        'svd': SVD components of sub-leadfields for labels
        This is better suited for situations where activation patterns are
        assumed to be more variable.
        "sub-leadfields" are the parts of the forward solutions that belong to
        vertices within invidual labels.
    n_svd_comp : integer
        Number of SVD components for which PSFs will be computed and output
        (irrelevant for 'sum' and 'mean'). Explained variances within
        sub-leadfields are shown in screen output.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns:
    --------
    w: np-array (1xn_chan), spatial filter for first column of P
    F: np-array, whitened leadfield matrix (n_chan x n_vert)
    P: np-array, whitened projection matrix (n_chan x n_comp)
    noise_cov_mat: noise covariance matrix as used in DeFleCT
    whitener: whitening matrix as used in DeFleCT
    """
    
    # get wanted channels
    picks = pick_types(forward['info'], meg=pick_meg, eeg=pick_eeg, eog=False,
                        stim=False, exclude='bads')     
    
    fwd_ch_names_all = [c['ch_name'] for c in forward['info']['chs']]
    fwd_ch_names = [fwd_ch_names_all[pp] for pp in picks]
    ch_names = [c for c in fwd_ch_names
                if ((c not in noise_cov['bads']) and
                    (c not in forward['info']['bads'])) and
                    (c in noise_cov.ch_names)]         

    if not len(forward['info']['bads']) == len(noise_cov['bads']) or \
            not all([b in noise_cov['bads'] for b in forward['info']['bads']]):
        logger.info('\nforward["info"]["bads"] and noise_cov["bads"] do not '
            'match excluding bad channels from both')

    # reduce forward to desired channels
    forward = pick_channels_forward(forward, ch_names) 
    noise_cov = pick_channels_cov(noise_cov, ch_names)
    
    logger.info("\nNoise covariance matrix has %d channels." % 
                                                    noise_cov.data.shape[0] )

    info_fwd = deepcopy(forward['info'])
    info_fwd['sfreq'] = 1000.
    if pick_eeg:        
        avgproj = make_eeg_average_ref_proj(info_fwd, activate=True)
        info_fwd['projs'] = []
        info_fwd['projs'].append(avgproj)        
    else:
        info_fwd['projs'] = noise_cov['projs']
    
    if lambda2_cov:  # regularize covariance matrix "old style"
        lbd = lambda2_cov
        noise_cov_reg = cov_regularize(noise_cov, info_fwd, mag=lbd['mag'],
                                    grad=lbd['gra'], eeg=lbd['eeg'], proj=True)
    else:  # use cov_mat as is
        noise_cov_reg = noise_cov

    fwd_info, leadfield, noise_cov_fwd, whitener, n_nzero = _prepare_forward(
                             forward, info_fwd, noise_cov_reg,
                             pca=False, rank=None, verbose=None)
    leadfield = leadfield[:,2::3]  # assumes surf_ori=True, (normal component)
    n_chan, n_vert = leadfield.shape
    logger.info("\nLeadfield has dimensions %d by %d\n" % (n_chan, n_vert))    

    # if EEG present: remove mean of columns for EEG (average-reference)
    if pick_eeg:
        print "\nReferening EEG \n"
        EEG_idx = [cc for cc in range(len(ch_names)) if ch_names[cc][:3]=='EEG']
        nr_eeg = len(EEG_idx)
        lfdmean = leadfield[EEG_idx,:].mean(axis=0)
        leadfield[EEG_idx,:] = leadfield[EEG_idx,:] - lfdmean[np.newaxis,:]

    #### CREATE SUBLEADFIELDs FOR LABELS
    # extract SUBLEADFIELDS for labels
    label_lfd_summary = DeFleCT_make_subleadfields(labels, forward, leadfield,
                            mode='svd', n_svd_comp=n_svd_comp, verbose=None)

    #### COMPUTE DEFLECT ESTIMATOR
    # rename variables to match paper
    F = np.dot( whitener, leadfield )
    P = np.dot( whitener, label_lfd_summary )
    nr_comp = P.shape[1]

    i = np.eye( nr_comp )[0,:].T          # desired sensitivity to columns in P
    t = np.zeros(n_vert).T[np.newaxis,:]  # desired CTF associated with w

    # Compute DeFleCT ESTIMATOR
    w = DeFleCT_matrix(F, P, i, t, lambda2_S)

    # add whitener on the right (i.e. input should be unwhitened)
    w = w.dot(whitener)

    return w, ch_names, leadfield, label_lfd_summary, noise_cov_fwd, whitener
示例#19
0
def test_lcmv_vector():
    """Test vector LCMV solutions."""
    info = mne.io.read_raw_fif(fname_raw).info

    # For speed and for rank-deficiency calculation simplicity,
    # just use grads
    info = mne.pick_info(info, mne.pick_types(info, meg='grad', exclude=()))
    info.update(bads=[], projs=[])

    forward = mne.read_forward_solution(fname_fwd)
    forward = mne.pick_channels_forward(forward, info['ch_names'])
    vertices = [s['vertno'][::100] for s in forward['src']]
    n_vertices = sum(len(v) for v in vertices)
    assert 5 < n_vertices < 20

    amplitude = 100e-9
    stc = mne.SourceEstimate(amplitude * np.eye(n_vertices), vertices,
                             0, 1. / info['sfreq'])
    forward_sim = mne.convert_forward_solution(forward, force_fixed=True,
                                               use_cps=True, copy=True)
    forward_sim = mne.forward.restrict_forward_to_stc(forward_sim, stc)
    noise_cov = mne.make_ad_hoc_cov(info)
    noise_cov.update(data=np.diag(noise_cov['data']), diag=False)
    evoked = simulate_evoked(forward_sim, stc, info, noise_cov, nave=1)
    source_nn = forward_sim['source_nn']
    source_rr = forward_sim['source_rr']

    # Figure out our indices
    mask = np.concatenate([np.in1d(s['vertno'], v)
                           for s, v in zip(forward['src'], vertices)])
    mapping = np.where(mask)[0]
    assert_array_equal(source_rr, forward['source_rr'][mapping])

    # Don't check NN because we didn't rotate to surf ori
    del forward_sim

    # Let's do minimum norm as a sanity check (dipole_fit is slower)
    inv = make_inverse_operator(info, forward, noise_cov, loose=1.)
    stc_vector_mne = apply_inverse(evoked, inv, pick_ori='vector')
    mne_ori = stc_vector_mne.data[mapping, :, np.arange(n_vertices)]
    mne_ori /= np.linalg.norm(mne_ori, axis=-1)[:, np.newaxis]
    mne_angles = np.rad2deg(np.arccos(np.sum(mne_ori * source_nn, axis=-1)))
    assert np.mean(mne_angles) < 35

    # Now let's do LCMV
    data_cov = mne.make_ad_hoc_cov(info)  # just a stub for later
    with pytest.raises(ValueError, match="pick_ori"):
        make_lcmv(info, forward, data_cov, 0.05, noise_cov, pick_ori='bad')

    lcmv_ori = list()
    for ti in range(n_vertices):
        this_evoked = evoked.copy().crop(evoked.times[ti], evoked.times[ti])
        data_cov['diag'] = False
        data_cov['data'] = (np.outer(this_evoked.data, this_evoked.data) +
                            noise_cov['data'])
        vals = linalg.svdvals(data_cov['data'])
        assert vals[0] / vals[-1] < 1e5  # not rank deficient

        with catch_logging() as log:
            filters = make_lcmv(info, forward, data_cov, 0.05, noise_cov,
                                verbose=True)
        log = log.getvalue()
        assert '498 sources' in log
        with catch_logging() as log:
            filters_vector = make_lcmv(info, forward, data_cov, 0.05,
                                       noise_cov, pick_ori='vector',
                                       verbose=True)
        log = log.getvalue()
        assert '498 sources' in log
        stc = apply_lcmv(this_evoked, filters)
        stc_vector = apply_lcmv(this_evoked, filters_vector)
        assert isinstance(stc, mne.SourceEstimate)
        assert isinstance(stc_vector, mne.VectorSourceEstimate)
        assert_allclose(stc.data, stc_vector.magnitude().data)

        # Check the orientation by pooling across some neighbors, as LCMV can
        # have some "holes" at the points of interest
        idx = np.where(cdist(forward['source_rr'], source_rr[[ti]]) < 0.02)[0]
        lcmv_ori.append(np.mean(stc_vector.data[idx, :, 0], axis=0))
        lcmv_ori[-1] /= np.linalg.norm(lcmv_ori[-1])

    lcmv_angles = np.rad2deg(np.arccos(np.sum(lcmv_ori * source_nn, axis=-1)))
    assert np.mean(lcmv_angles) < 55
示例#20
0
def test_lcmv_vector():
    """Test vector LCMV solutions."""
    info = mne.io.read_raw_fif(fname_raw).info
    # For speed and for rank-deficiency calculation simplicity,
    # just use grads:
    info = mne.pick_info(info, mne.pick_types(info, meg='grad', exclude=()))
    info.update(bads=[], projs=[])
    forward = mne.read_forward_solution(fname_fwd)
    forward = mne.pick_channels_forward(forward, info['ch_names'])
    vertices = [s['vertno'][::100] for s in forward['src']]
    n_vertices = sum(len(v) for v in vertices)
    assert 5 < n_vertices < 20
    amplitude = 100e-9
    stc = mne.SourceEstimate(amplitude * np.eye(n_vertices), vertices,
                             0, 1. / info['sfreq'])
    forward_sim = mne.convert_forward_solution(forward, force_fixed=True,
                                               use_cps=True, copy=True)
    forward_sim = mne.forward.restrict_forward_to_stc(forward_sim, stc)
    noise_cov = mne.make_ad_hoc_cov(info)
    noise_cov.update(data=np.diag(noise_cov['data']), diag=False)
    evoked = simulate_evoked(forward_sim, stc, info, noise_cov, nave=1)
    source_nn = forward_sim['source_nn']
    source_rr = forward_sim['source_rr']
    # Figure out our indices
    mask = np.concatenate([np.in1d(s['vertno'], v)
                           for s, v in zip(forward['src'], vertices)])
    mapping = np.where(mask)[0]
    assert_array_equal(source_rr, forward['source_rr'][mapping])
    # Don't check NN because we didn't rotate to surf ori
    del forward_sim

    #
    # Let's do minimum norm as a sanity check (dipole_fit is slower)
    #
    inv = make_inverse_operator(info, forward, noise_cov, loose=1.)
    stc_vector_mne = apply_inverse(evoked, inv, pick_ori='vector')
    mne_ori = stc_vector_mne.data[mapping, :, np.arange(n_vertices)]
    mne_ori /= np.linalg.norm(mne_ori, axis=-1)[:, np.newaxis]
    mne_angles = np.rad2deg(np.arccos(np.sum(mne_ori * source_nn, axis=-1)))
    assert np.mean(mne_angles) < 35

    #
    # Now let's do LCMV
    #
    data_cov = mne.make_ad_hoc_cov(info)  # just a stub for later
    with pytest.raises(ValueError, match='pick_ori must be one of'):
        make_lcmv(info, forward, data_cov, 0.05, noise_cov, pick_ori='bad')
    lcmv_ori = list()
    for ti in range(n_vertices):
        this_evoked = evoked.copy().crop(evoked.times[ti], evoked.times[ti])
        data_cov['data'] = (np.outer(this_evoked.data, this_evoked.data) +
                            noise_cov['data'])
        vals = linalg.svdvals(data_cov['data'])
        assert vals[0] / vals[-1] < 1e5  # not rank deficient
        filters = make_lcmv(info, forward, data_cov, 0.05, noise_cov)
        filters_vector = make_lcmv(info, forward, data_cov, 0.05, noise_cov,
                                   pick_ori='vector')
        stc = apply_lcmv(this_evoked, filters)
        assert isinstance(stc, mne.SourceEstimate)
        stc_vector = apply_lcmv(this_evoked, filters_vector)
        assert isinstance(stc_vector, mne.VectorSourceEstimate)
        assert_allclose(stc.data, stc_vector.magnitude().data)
        # Check the orientation by pooling across some neighbors, as LCMV can
        # have some "holes" at the points of interest
        idx = np.where(cdist(forward['source_rr'], source_rr[[ti]]) < 0.02)[0]
        lcmv_ori.append(np.mean(stc_vector.data[idx, :, 0], axis=0))
        lcmv_ori[-1] /= np.linalg.norm(lcmv_ori[-1])

    lcmv_angles = np.rad2deg(np.arccos(np.sum(lcmv_ori * source_nn, axis=-1)))
    assert np.mean(lcmv_angles) < 55
示例#21
0
def make_dataset_from_sample():
    # assign paths
    data_path = mne.datasets.sample.data_path()
    subjects_dir = os.path.join(data_path, 'subjects')
    mne.datasets.fetch_hcp_mmp_parcellation(subjects_dir=subjects_dir,
                                            verbose=True)

    # get parcels and remove corpus callosum
    parcels = read_labels_from_annot('fsaverage',
                                     'HCPMMP1_combined',
                                     'both',
                                     subjects_dir=subjects_dir)
    # corpus callosum labels
    aparc_file_lh = os.path.join(subjects_dir, 'fsaverage', "label",
                                 'lh.aparc.a2009s.annot')
    aparc_file_rh = os.path.join(subjects_dir, 'fsaverage', "label",
                                 'rh.aparc.a2009s.annot')

    labels_corpus_lh = read_labels_from_annot(subject='fsaverage',
                                              annot_fname=aparc_file_lh,
                                              hemi='lh',
                                              subjects_dir=subjects_dir)
    labels_corpus_rh = read_labels_from_annot(subject='fsaverage',
                                              annot_fname=aparc_file_rh,
                                              hemi='rh',
                                              subjects_dir=subjects_dir)

    assert labels_corpus_lh[-1].name[:7] == 'Unknown'  # corpus callosum
    assert labels_corpus_rh[-1].name[:7] == 'Unknown'  # corpus callosum
    corpus_callosum = [labels_corpus_lh[-1], labels_corpus_rh[-1]]

    # remove from parcels all the vertices from corpus callosum
    to_remove = []
    for idx, parcel in enumerate(parcels):
        if parcel.hemi == 'lh':
            cc_free = set(parcel.vertices) - set(corpus_callosum[0].vertices)
        elif parcel.hemi == 'rh':
            cc_free = set(parcel.vertices) - set(corpus_callosum[1].vertices)
        parcel.vertices = np.array(list(cc_free))
        if len(parcel.vertices) == 0:
            to_remove.append(idx)
    [parcels.pop(idc) for idc in to_remove[::-1]]
    # morph from fsaverage to sample
    parcels = mne.morph_labels(parcels, 'sample', subjects_dir=subjects_dir)

    raw_fname = os.path.join(data_path, 'MEG', 'sample',
                             'sample_audvis_raw.fif')
    fwd_fname = os.path.join(data_path, 'MEG', 'sample',
                             'sample_audvis-meg-eeg-oct-6-fwd.fif')
    assert os.path.exists(raw_fname)
    assert os.path.exists(fwd_fname)

    info = mne.io.read_info(raw_fname)
    sel = mne.pick_types(info, meg='grad', eeg=False, stim=True, exclude=[])
    sel_data = mne.pick_types(info,
                              meg='grad',
                              eeg=False,
                              stim=False,
                              exclude=[])
    info_data = mne.pick_info(info, sel_data)
    info = mne.pick_info(info, sel)
    tstep = 1. / info['sfreq']

    # read forward solution
    fwd = mne.read_forward_solution(fwd_fname)
    src = fwd['src']

    fwd = mne.convert_forward_solution(fwd, force_fixed=True)
    fwd = mne.pick_channels_forward(fwd,
                                    include=info_data['ch_names'],
                                    ordered=True)
    lead_field = fwd['sol']['data']

    rng = np.random.RandomState(42)

    n_samples = 3
    signal_len = 20
    n_events = 50
    n_events = 2
    add_noise = False
    source_time_series = np.sin(
        2. * np.pi * 18. * np.arange(signal_len) * tstep) * 10e-9

    events = np.zeros((n_events, 3), dtype=int)
    events[:, 0] = signal_len * len(parcels) + 200 * np.arange(n_events)
    events[:, 2] = 1  # All events have the sample id.

    signal_list = []
    true_idx = np.empty(n_samples, dtype=np.int16)
    for idx, source in enumerate(range(n_samples)):
        idx_source = rng.choice(np.arange(len(parcels)))
        true_idx[idx] = idx_source
        source = parcels[idx_source]

        location = 'center'  # Use the center of the region as a seed.
        extent = 0.
        source = mne.label.select_sources('sample',
                                          source,
                                          location=location,
                                          extent=extent,
                                          subjects_dir=subjects_dir)

        source_simulator = mne.simulation.SourceSimulator(src, tstep=tstep)
        source_simulator.add_data(source, source_time_series, events)

        raw = mne.simulation.simulate_raw(info, source_simulator, forward=fwd)

        if add_noise:
            cov = mne.make_ad_hoc_cov(raw.info)
            mne.simulation.add_noise(raw, cov, iir_filter=[0.2, -0.2, 0.02])

        evoked = mne.Epochs(raw, events, tmax=0.3).average()
        data = evoked.data[:, np.argmax((evoked.data**2).sum(axis=0))]
        signal_list.append(data)

    signal_list = np.array(signal_list)
    # data_labels = [f'e{idx + 1}' for idx in range(signal_list.shape[1])]
    data_labels = evoked.ch_names

    X = pd.DataFrame(signal_list, columns=list(data_labels))
    X['subject_id'] = 0
    X['subject'] = '0'

    y = np.zeros((n_samples, len(parcels)), dtype=int)
    y[np.arange(n_samples), true_idx] = 1

    parcel_vertices = {}
    for idx, parcel in enumerate(parcels, 1):
        parcel_name = str(idx) + parcel.name[-3:]
        parcel_vertices[parcel_name] = parcel.vertices
        parcel.name = parcel_name

    parcel_indices_lh = np.zeros(len(fwd['src'][0]['inuse']), dtype=int)
    parcel_indices_rh = np.zeros(len(fwd['src'][1]['inuse']), dtype=int)
    for label_name, label_idx in parcel_vertices.items():
        label_id = int(label_name[:-3])
        if '-lh' in label_name:
            parcel_indices_lh[label_idx] = label_id
        else:
            parcel_indices_rh[label_idx] = label_id

    # Make sure label numbers different for each hemisphere
    parcel_indices = np.concatenate((parcel_indices_lh, parcel_indices_rh),
                                    axis=0)

    # Now pick vertices that are actually used in the forward
    inuse = np.concatenate((fwd['src'][0]['inuse'], fwd['src'][1]['inuse']),
                           axis=0)

    parcel_indices = parcel_indices[np.where(inuse)[0]]
    assert len(parcel_indices) == lead_field.shape[1]

    lead_field = lead_field[:, parcel_indices != 0]
    parcel_indices = parcel_indices[parcel_indices != 0]

    return X, y, [lead_field], [parcel_indices]
示例#22
0
def generate_signal(raw_fname,
                    fwd_fname,
                    subject,
                    parcels,
                    n_events=30,
                    signal_type='eeg',
                    random_state=None):
    signal_len = 0.01  # in sec
    # Generate the signal
    info = mne.io.read_info(raw_fname)
    if signal_type == 'eeg':
        sel = mne.pick_types(info, meg=False, eeg=True, stim=False, exclude=[])
    elif signal_type == 'meg':
        sel = mne.pick_types(info, meg=True, eeg=False, stim=False, exclude=[])
    elif signal_type == 'mag' or signal_type == 'grad':
        sel = mne.pick_types(info,
                             meg=signal_type,
                             eeg=False,
                             stim=False,
                             exclude=[])
    sel_data = mne.pick_types(info,
                              meg=signal_type,
                              eeg=False,
                              stim=False,
                              exclude=[])
    info_data = mne.pick_info(info, sel_data)
    info = mne.pick_info(info, sel)
    tstep = 1. / info['sfreq']

    # To simulate sources, we also need a source space. It can be obtained from
    # the forward solution of the sample subject.
    fwd = mne.read_forward_solution(fwd_fname)
    src = fwd['src']

    fwd = mne.convert_forward_solution(fwd, force_fixed=True)
    fwd = mne.pick_channels_forward(fwd,
                                    include=info_data['ch_names'],
                                    ordered=True)
    # Define the time course of the activity for each source of the region to
    # activate. Here we use just a step of ones, the amplitude will be added at
    # later stage
    source_time_series = np.ones(int(signal_len / tstep))

    # Define when the activity occurs using events. The first column is the
    # sample of the event, the second is not used, and the third is the event
    # id. Here the events occur every 200 samples.
    events = np.zeros((n_events, 3), dtype=int)
    # Events sample
    events[:, 0] = 100 + 200 * np.arange(n_events)
    events[:, 2] = 1  # All events have the sample id.

    # Create simulated source activity. Here we use a SourceSimulator whose
    # add_data method is key. It specified where (label), what
    # (source_time_series), and when (events) an event type will occur.
    source_simulator = mne.simulation.SourceSimulator(src, tstep=tstep)

    min_amplitude = 10  # nAm
    max_amplitude = 100  # nAm
    for idx, parcel in enumerate(parcels):
        # select the amplitude of the signal between 10 and 100 nAm
        amplitude = random_state.uniform(min_amplitude, max_amplitude) * 1e-9
        source_simulator.add_data(parcel, source_time_series * amplitude,
                                  events)

    # Project the source time series to sensor space and add some noise.
    # The source simulator can be given directly to the simulate_raw function.
    raw = mne.simulation.simulate_raw(info, source_simulator, forward=fwd)
    if signal_type == 'eeg':
        raw.set_eeg_reference(projection=True)
    cov = mne.make_ad_hoc_cov(raw.info)
    mne.simulation.add_noise(raw, cov, iir_filter=[0.2, -0.2, 0.02])
    return events, source_time_series, raw