Ejemplo n.º 1
0
def test_lcmv_cov(weight_norm, pick_ori):
    """Test LCMV source power computation."""
    raw, epochs, evoked, data_cov, noise_cov, label, forward,\
        forward_surf_ori, forward_fixed, forward_vol = _get_data()
    convert_forward_solution(forward, surf_ori=True, copy=False)
    filters = make_lcmv(evoked.info, forward, data_cov, noise_cov=noise_cov,
                        weight_norm=weight_norm, pick_ori=pick_ori)
    for cov in (data_cov, noise_cov):
        this_cov = pick_channels_cov(cov, evoked.ch_names)
        this_evoked = evoked.copy().pick_channels(this_cov['names'])
        this_cov['projs'] = this_evoked.info['projs']
        assert this_evoked.ch_names == this_cov['names']
        stc = apply_lcmv_cov(this_cov, filters)
        assert stc.data.min() > 0
        assert stc.shape == (498, 1)
        ev = EvokedArray(this_cov.data, this_evoked.info)
        stc_1 = apply_lcmv(ev, filters)
        assert stc_1.data.min() < 0
        ev = EvokedArray(stc_1.data.T, this_evoked.info)
        stc_2 = apply_lcmv(ev, filters)
        assert stc_2.data.shape == (498, 498)
        data = np.diag(stc_2.data)[:, np.newaxis]
        assert data.min() > 0
        assert_allclose(data, stc.data, rtol=1e-12)
Ejemplo n.º 2
0
def test_plot_topomap_channel_distance():
    """
    Test topomap plotting with spread out channels (gh-9511, gh-9526).

    Test topomap plotting when the distance between channels is greater than
    the head radius.
    """
    ch_names = ['TP9', 'AF7', 'AF8', 'TP10']

    info = create_info(ch_names, 100, ch_types='eeg')
    evoked = EvokedArray(np.random.randn(4, 10) * 1e-6, info)
    ten_five = make_standard_montage("standard_1005")
    evoked.set_montage(ten_five)

    evoked.plot_topomap(sphere=0.05, res=8)
    plt.close('all')
Ejemplo n.º 3
0
def test_min_distance_fit_dipole():
    """Test dipole min_dist to inner_skull"""
    data_path = testing.data_path()
    raw_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw.fif'

    subjects_dir = op.join(data_path, 'subjects')
    fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
    fname_trans = op.join(data_path, 'MEG', 'sample',
                          'sample_audvis_trunc-trans.fif')
    fname_bem = op.join(subjects_dir, 'sample', 'bem',
                        'sample-1280-1280-1280-bem-sol.fif')

    subject = 'sample'

    raw = Raw(raw_fname, preload=True)

    # select eeg data
    picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
    info = pick_info(raw.info, picks)

    # Let's use cov = Identity
    cov = read_cov(fname_cov)
    cov['data'] = np.eye(cov['data'].shape[0])

    # Simulated scal map
    simulated_scalp_map = np.zeros(picks.shape[0])
    simulated_scalp_map[27:34] = 1

    simulated_scalp_map = simulated_scalp_map[:, None]

    evoked = EvokedArray(simulated_scalp_map, info, tmin=0)

    min_dist = 5.  # distance in mm

    dip, residual = fit_dipole(evoked,
                               cov,
                               fname_bem,
                               fname_trans,
                               min_dist=min_dist)

    dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)

    assert_true(min_dist < (dist[0] * 1000.) < (min_dist + 1.))

    assert_raises(ValueError, fit_dipole, evoked, cov, fname_bem, fname_trans,
                  -1.)
Ejemplo n.º 4
0
def fnirs_evoked():
    """Create an fnirs evoked structure."""
    montage = make_standard_montage('biosemi16')
    ch_names = montage.ch_names
    ch_types = ['eeg'] * 16
    info = create_info(ch_names=ch_names, sfreq=20, ch_types=ch_types)
    evoked_data = np.random.randn(16, 30)
    evoked = EvokedArray(evoked_data, info=info, tmin=-0.2, nave=4)
    evoked.set_montage(montage)
    evoked.set_channel_types(
        {
            'Fp1': 'hbo',
            'Fp2': 'hbo',
            'F4': 'hbo',
            'Fz': 'hbo'
        },
        verbose='error')
    return evoked
Ejemplo n.º 5
0
def test_equalize_channels():
    """Test equalizing channels and their ordering."""
    # This function only tests the generic functionality of equalize_channels.
    # Additional tests for each instance type are included in the accompanying
    # test suite for each type.
    pytest.raises(TypeError,
                  equalize_channels, ['foo', 'bar'],
                  match='Instances to be modified must be an instance of')

    raw = RawArray([[1.], [2.], [3.], [4.]],
                   create_info(['CH1', 'CH2', 'CH3', 'CH4'], sfreq=1.))
    epochs = EpochsArray([[[1.], [2.], [3.]]],
                         create_info(['CH5', 'CH2', 'CH1'], sfreq=1.))
    cov = make_ad_hoc_cov(
        create_info(['CH2', 'CH1', 'CH8'], sfreq=1., ch_types='eeg'))
    cov['bads'] = ['CH1']
    ave = EvokedArray([[1.], [2.]], create_info(['CH1', 'CH2'], sfreq=1.))

    raw2, epochs2, cov2, ave2 = equalize_channels([raw, epochs, cov, ave],
                                                  copy=True)

    # The Raw object was the first in the list, so should have been used as
    # template for the ordering of the channels. No bad channels should have
    # been dropped.
    assert raw2.ch_names == ['CH1', 'CH2']
    assert_array_equal(raw2.get_data(), [[1.], [2.]])
    assert epochs2.ch_names == ['CH1', 'CH2']
    assert_array_equal(epochs2.get_data(), [[[3.], [2.]]])
    assert cov2.ch_names == ['CH1', 'CH2']
    assert cov2['bads'] == cov['bads']
    assert ave2.ch_names == ave.ch_names
    assert_array_equal(ave2.data, ave.data)

    # All objects should have been copied, except for the Evoked object which
    # did not have to be touched.
    assert raw is not raw2
    assert epochs is not epochs2
    assert cov is not cov2
    assert ave is ave2

    # Test in-place operation
    raw2, epochs2 = equalize_channels([raw, epochs], copy=False)
    assert raw is raw2
    assert epochs is epochs2
Ejemplo n.º 6
0
def test_min_distance_fit_dipole():
    """Test dipole min_dist to inner_skull."""
    subject = 'sample'
    raw = read_raw_fif(fname_raw, preload=True)

    # select eeg data
    picks = pick_types(raw.info, meg=False, eeg=True, exclude='bads')
    info = pick_info(raw.info, picks)

    # Let's use cov = Identity
    cov = read_cov(fname_cov)
    cov['data'] = np.eye(cov['data'].shape[0])

    # Simulated scal map
    simulated_scalp_map = np.zeros(picks.shape[0])
    simulated_scalp_map[27:34] = 1

    simulated_scalp_map = simulated_scalp_map[:, None]

    evoked = EvokedArray(simulated_scalp_map, info, tmin=0)

    min_dist = 5.  # distance in mm

    bem = read_bem_solution(fname_bem)
    dip, residual = fit_dipole(evoked,
                               cov,
                               bem,
                               fname_trans,
                               min_dist=min_dist,
                               tol=1e-4)
    assert isinstance(residual, Evoked)

    dist = _compute_depth(dip, fname_bem, fname_trans, subject, subjects_dir)

    # Constraints are not exact, so bump the minimum slightly
    assert (min_dist - 0.1 < (dist[0] * 1000.) < (min_dist + 1.))

    with pytest.raises(ValueError, match='min_dist should be positive'):
        fit_dipole(evoked, cov, fname_bem, fname_trans, -1.)
Ejemplo n.º 7
0
def test_plot_topomap_animation():
    """Test topomap plotting."""
    # evoked
    evoked = read_evokeds(evoked_fname, 'Left Auditory',
                          baseline=(None, 0))
    # Test animation
    _, anim = evoked.animate_topomap(ch_type='grad', times=[0, 0.1],
                                     butterfly=False, time_unit='s')
    anim._func(1)  # _animate has to be tested separately on 'Agg' backend.
    plt.close('all')

    # Test plotting of fnirs types
    montage = make_standard_montage('biosemi16')
    ch_names = montage.ch_names
    ch_types = ['eeg'] * 16
    info = create_info(ch_names=ch_names, sfreq=20, ch_types=ch_types)
    evoked_data = np.random.randn(16, 30)
    evokeds = EvokedArray(evoked_data, info=info, tmin=-0.2, nave=4)
    evokeds.set_montage(montage)
    evokeds.set_channel_types({'Fp1': 'hbo', 'Fp2': 'hbo', 'F4': 'hbo',
                               'Fz': 'hbo'}, verbose='error')
    fig, anim = evokeds.animate_topomap(ch_type='hbo')
    anim._func(1)  # _animate has to be tested separately on 'Agg' backend.
    assert len(fig.axes) == 2
    def plot_filters(self,
                     info,
                     components=None,
                     ch_type=None,
                     vmin=None,
                     vmax=None,
                     cmap='RdBu_r',
                     sensors=True,
                     colorbar=True,
                     scalings=None,
                     units='a.u.',
                     res=64,
                     size=1,
                     cbar_fmt='%3.1f',
                     name_format='CSP%01d',
                     show=True,
                     show_names=False,
                     title=None,
                     mask=None,
                     mask_params=None,
                     outlines='head',
                     contours=6,
                     image_interp='bilinear',
                     average=None):
        """Plot topographic filters of components.

        The filters are used to extract discriminant neural sources from
        the measured data (a.k.a. the backward model).

        Parameters
        ----------
        info : instance of Info
            Info dictionary of the epochs used for fitting.
            If not possible, consider using ``create_info``.
        components : float | array of float | None
           The patterns to plot. If None, n_components will be shown.
        ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
            The channel type to plot. For 'grad', the gradiometers are
            collected in pairs and the RMS for each pair is plotted.
            If None, then first available channel type from order given
            above is used. Defaults to None.
        vmin : float | callable
            The value specifying the lower bound of the color range.
            If None, and vmax is None, -vmax is used. Else np.min(data).
            If callable, the output equals vmin(data).
        vmax : float | callable
            The value specifying the upper bound of the color range.
            If None, the maximum absolute value is used. If vmin is None,
            but vmax is not, defaults to np.min(data).
            If callable, the output equals vmax(data).
        cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
            Colormap to use. If tuple, the first value indicates the colormap
            to use and the second value is a boolean defining interactivity. In
            interactive mode the colors are adjustable by clicking and dragging
            the colorbar with left and right mouse button. Left mouse button
            moves the scale up and down and right mouse button adjusts the
            range. Hitting space bar resets the range. Up and down arrows can
            be used to change the colormap. If None, 'Reds' is used for all
            positive data, otherwise defaults to 'RdBu_r'. If 'interactive',
            translates to (None, True). Defaults to 'RdBu_r'.

            .. warning::  Interactive mode works smoothly only for a small
                amount of topomaps.
        sensors : bool | str
            Add markers for sensor locations to the plot. Accepts matplotlib
            plot format string (e.g., 'r+' for red plusses). If True,
            a circle will be used (via .add_artist). Defaults to True.
        colorbar : bool
            Plot a colorbar.
        scalings : dict | float | None
            The scalings of the channel types to be applied for plotting.
            If None, defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
        units : dict | str | None
            The unit of the channel type used for colorbar label. If
            scale is None the unit is automatically determined.
        res : int
            The resolution of the topomap image (n pixels along each side).
        size : float
            Side length per topomap in inches.
        cbar_fmt : str
            String format for colorbar values.
        name_format : str
            String format for topomap values. Defaults to "CSP%%01d".
        show : bool
            Show figure if True.
        show_names : bool | callable
            If True, show channel names on top of the map. If a callable is
            passed, channel names will be formatted using the callable; e.g.,
            to delete the prefix 'MEG ' from all channel names, pass the
            function lambda x: x.replace('MEG ', ''). If `mask` is not None,
            only significant sensors will be shown.
        title : str | None
            Title. If None (default), no title is displayed.
        mask : ndarray of bool, shape (n_channels, n_times) | None
            The channels to be marked as significant at a given time point.
            Indices set to `True` will be considered. Defaults to None.
        mask_params : dict | None
            Additional plotting parameters for plotting significant sensors.
            Default (None) equals::

                dict(marker='o', markerfacecolor='w', markeredgecolor='k',
                     linewidth=0, markersize=4)
        %(topomap_outlines)s
        contours : int | array of float
            The number of contour lines to draw. If 0, no contours will be
            drawn. When an integer, matplotlib ticker locator is used to find
            suitable values for the contour thresholds (may sometimes be
            inaccurate, use array for accuracy). If an array, the values
            represent the levels for the contours. Defaults to 6.
        image_interp : str
            The image interpolation to be used.
            All matplotlib options are accepted.
        average : float | None
            The time window around a given time to be used for averaging
            (seconds). For example, 0.01 would translate into window that
            starts 5 ms before and ends 5 ms after a given time point.
            Defaults to None, which means no averaging.

        Returns
        -------
        fig : instance of matplotlib.figure.Figure
           The figure.
        """
        from mne import EvokedArray
        if components is None:
            components = np.arange(self.n_components)

        # set sampling frequency to have 1 component per time point
        info = cp.deepcopy(info)
        info['sfreq'] = 1.
        # create an evoked
        filters = EvokedArray(self.filters_, info, tmin=0)
        # the call plot_topomap
        return filters.plot_topomap(times=components,
                                    ch_type=ch_type,
                                    vmin=vmin,
                                    vmax=vmax,
                                    cmap=cmap,
                                    colorbar=colorbar,
                                    res=res,
                                    cbar_fmt=cbar_fmt,
                                    sensors=sensors,
                                    scalings=scalings,
                                    units=units,
                                    time_unit='s',
                                    time_format=name_format,
                                    size=size,
                                    show_names=show_names,
                                    title=title,
                                    mask_params=mask_params,
                                    mask=mask,
                                    outlines=outlines,
                                    contours=contours,
                                    image_interp=image_interp,
                                    show=show,
                                    average=average)
Ejemplo n.º 9
0
plt.yticks(tick_marks, target_names)
fig.tight_layout()
ax.set(ylabel='True label', xlabel='Predicted label')

###############################################################################
# The ``patterns_`` attribute of a fitted Xdawn instance (here from the last
# cross-validation fold) can be used for visualization.

fig, axes = plt.subplots(nrows=len(event_id),
                         ncols=n_filter,
                         figsize=(n_filter, len(event_id) * 2))
fitted_xdawn = clf.steps[0][1]
tmp_info = epochs.info.copy()
tmp_info['sfreq'] = 1.
for ii, cur_class in enumerate(sorted(event_id)):
    cur_patterns = fitted_xdawn.patterns_[cur_class]
    pattern_evoked = EvokedArray(cur_patterns[:n_filter].T, tmp_info, tmin=0)
    pattern_evoked.plot_topomap(times=np.arange(n_filter),
                                time_format='Component %d' if ii == 0 else '',
                                colorbar=False,
                                show_names=False,
                                axes=axes[ii],
                                show=False)
    axes[ii, 0].set(ylabel=cur_class)
fig.tight_layout(h_pad=1.0, w_pad=1.0, pad=0.1)

###############################################################################
# References
# ----------
# .. footbibliography::
    sleep_epochs = sleep_epochs.crop(tmin=tmin, tmax=tmax)
    
    X1, y1 = get_Xy_balanced(sleep_epochs, contrast1)
    
    clf =  make_pipeline(Vectorizer(), StandardScaler(), LinearModel(LogisticRegression(max_iter = 4000))) #StandardScaler(),
     
    cv  = StratifiedKFold(n_splits=2, shuffle=True)
    
    coef_folds = [] 
    for train_idx, test_idx in cv.split(X1, y1):
        clf.fit(X1[train_idx], y=y1[train_idx])
        #scores1.append(clf.score(X1[test_idx], y=y1[test_idx]))
        coef_folds.append(get_coef(clf, attr='patterns_', inverse_transform=True))
    coef = np.asarray(coef_folds).mean(0).reshape([173, -1]) #mean folds and reshape
    evoked = EvokedArray(coef, sleep_epochs.info, tmin=tmin)
    evokeds.append(evoked)

ga = mne.grand_average(evokeds)

#SLEEP
f = ga.plot_topomap([0., 0.2, 0.4,  0.6, 0.8], scalings=0.1, vmin=-2, vmax=2)
#f = ga.plot_topomap([0.1, 0.2,  0.3, 0.4, 0.5, 0.6,  0.7, 0.8], scalings=0.1, vmin=-6, vmax=6)
#f = ga.plot_topomap([0.1, 0.15,  0.2, 0.25, 0.3, 0.35,  0.4, 0.45, 0.5], scalings=0.1, vmin=-6, vmax=6)

f.savefig(os.path.join(save_path, contrast1[0].split('/')[1]  + 'coefs_new_detail.tiff'))

#WAKE
# f = ga.plot_topomap([0., 0.2, 0.4,  0.6, 0.8], scalings=0.1, vmin=-2, vmax=2) 
# f.savefig(os.path.join(save_path, 'wakecoefs_new.tiff') )
    
def test_montage():
    """Test making montages"""
    tempdir = _TempDir()
    # no pep8
    input_str = [
        """FidNz 0.00000 10.56381 -2.05108
    FidT9 -7.82694 0.45386 -3.76056
    FidT10 7.82694 0.45386 -3.76056""",
        """// MatLab   Sphere coordinates [degrees]         Cartesian coordinates
    // Label       Theta       Phi    Radius         X         Y         Z       off sphere surface
      E1      37.700     -14.000       1.000    0.7677    0.5934   -0.2419  -0.00000000000000011
      E2      44.600      -0.880       1.000    0.7119    0.7021   -0.0154   0.00000000000000000
      E3      51.700      11.000       1.000    0.6084    0.7704    0.1908   0.00000000000000000""",  # noqa
        """# ASA electrode file
    ReferenceLabel  avg
    UnitPosition    mm
    NumberPositions=    68
    Positions
    -86.0761 -19.9897 -47.9860
    85.7939 -20.0093 -48.0310
    0.0083 86.8110 -39.9830
    Labels
    LPA
    RPA
    Nz
    """,
        """Site  Theta  Phi
    Fp1  -92    -72
    Fp2   92     72
    F3   -60    -51
    """,
        """346
     EEG	      F3	 -62.027	 -50.053	      85
     EEG	      Fz	  45.608	      90	      85
     EEG	      F4	   62.01	  50.103	      85
    """,
        """
    eeg Fp1 -95.0 -31.0 -3.0
    eeg AF7 -81 -59 -3
    eeg AF3 -87 -41 28
    """
    ]
    kinds = [
        'test.sfp', 'test.csd', 'test.elc', 'test.txt', 'test.elp', 'test.hpts'
    ]
    for kind, text in zip(kinds, input_str):
        fname = op.join(tempdir, kind)
        with open(fname, 'w') as fid:
            fid.write(text)
        montage = read_montage(fname)
        assert_equal(len(montage.ch_names), 3)
        assert_equal(len(montage.ch_names), len(montage.pos))
        assert_equal(montage.pos.shape, (3, 3))
        assert_equal(montage.kind, op.splitext(kind)[0])
        if kind.endswith('csd'):
            dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
                     ('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
                     ('off_sph', 'f8')]
            try:
                table = np.loadtxt(fname, skip_header=2, dtype=dtype)
            except TypeError:
                table = np.loadtxt(fname, skiprows=2, dtype=dtype)
            pos2 = np.c_[table['x'], table['y'], table['z']]
            assert_array_almost_equal(pos2, montage.pos, 4)
    # test transform
    input_str = """
    eeg Fp1 -95.0 -31.0 -3.0
    eeg AF7 -81 -59 -3
    eeg AF3 -87 -41 28
    cardinal 2 -91 0 -42
    cardinal 1 0 -91 -42
    cardinal 3 0 91 -42
    """
    kind = 'test_fid.hpts'
    fname = op.join(tempdir, kind)
    with open(fname, 'w') as fid:
        fid.write(input_str)
    montage = read_montage(op.join(tempdir, 'test_fid.hpts'), transform=True)
    # check coordinate transformation
    pos = np.array([-95.0, -31.0, -3.0])
    nasion = np.array([-91, 0, -42])
    lpa = np.array([0, -91, -42])
    rpa = np.array([0, 91, -42])
    fids = np.vstack((nasion, lpa, rpa))
    trans = get_ras_to_neuromag_trans(fids[0], fids[1], fids[2])
    pos = apply_trans(trans, pos)
    assert_array_equal(montage.pos[0], pos)
    idx = montage.ch_names.index('2')
    assert_array_equal(montage.pos[idx, [0, 2]], [0, 0])
    idx = montage.ch_names.index('1')
    assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
    idx = montage.ch_names.index('3')
    assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
    pos = np.array([-95.0, -31.0, -3.0])
    montage_fname = op.join(tempdir, 'test_fid.hpts')
    montage = read_montage(montage_fname, unit='mm')
    assert_array_equal(montage.pos[0], pos * 1e-3)

    # test with last
    info = create_info(montage.ch_names, 1e3, ['eeg'] * len(montage.ch_names))
    _set_montage(info, montage)
    pos2 = np.array([c['loc'][:3] for c in info['chs']])
    assert_array_equal(pos2, montage.pos)
    assert_equal(montage.ch_names, info['ch_names'])

    info = create_info(montage.ch_names, 1e3, ['eeg'] * len(montage.ch_names))

    evoked = EvokedArray(data=np.zeros((len(montage.ch_names), 1)),
                         info=info,
                         tmin=0)
    evoked.set_montage(montage)
    pos3 = np.array([c['loc'][:3] for c in evoked.info['chs']])
    assert_array_equal(pos3, montage.pos)
    assert_equal(montage.ch_names, evoked.info['ch_names'])
        # Use AUC because chance level is same regardless of the class balance
        clf = make_pipeline(Vectorizer(), StandardScaler(),
                            LinearModel(LogisticRegression()))

        time_decod = SlidingEstimator(clf, n_jobs=1, scoring='roc_auc')
        time_decod.fit(X, y)

        # The `inverse_transform` parameter will call this method on any estimator
        # contained in the pipeline, in reverse order.
        coef = get_coef(time_decod, 'patterns_', inverse_transform=True)

        all_coefs.append(coef)

all_coefs_ave = np.mean(all_coefs, axis=0)

test = EvokedArray(all_coefs_ave, epochs.info, tmin=epochs.times[0])

#use 170
fig = test.plot_joint(times=[0., .050, 0.100, 0.15, 0.25],
                      title='Group EEG pattern %s %s' %
                      (condition1, condition2))

fig.savefig(os.path.join(
    '/home/claire/DATA/Data_Face_House_new_proc/Analysis/Figures',
    'group_pattern-imag-face_imag-house.pdf'),
            bbox_to_inches='tight')

#for subject_id in range(1,26):
#    if subject_id in exclude:
#        continue
#    else:
Ejemplo n.º 13
0
def test_ica_additional():
    """Test additional ICA functionality."""
    import matplotlib.pyplot as plt
    tempdir = _TempDir()
    stop2 = 500
    raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
    # XXX This breaks the tests :(
    # raw.info['bads'] = [raw.ch_names[1]]
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')
    epochs = Epochs(raw,
                    events[:4],
                    event_id,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    preload=True)
    # test if n_components=None works
    with warnings.catch_warnings(record=True):
        ica = ICA(n_components=None,
                  max_pca_components=None,
                  n_pca_components=None,
                  random_state=0)
        ica.fit(epochs, picks=picks, decim=3)
    # for testing eog functionality
    picks2 = pick_types(raw.info,
                        meg=True,
                        stim=False,
                        ecg=False,
                        eog=True,
                        exclude='bads')
    epochs_eog = Epochs(raw,
                        events[:4],
                        event_id,
                        tmin,
                        tmax,
                        picks=picks2,
                        baseline=(None, 0),
                        preload=True)

    test_cov2 = test_cov.copy()
    ica = ICA(noise_cov=test_cov2,
              n_components=3,
              max_pca_components=4,
              n_pca_components=4)
    assert_true(ica.info is None)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks[:5])
    assert_true(isinstance(ica.info, Info))
    assert_true(ica.n_components_ < 5)

    ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
    assert_raises(RuntimeError, ica.save, '')
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)

    # test corrmap
    ica2 = ica.copy()
    ica3 = ica.copy()
    corrmap([ica, ica2], (0, 0),
            threshold='auto',
            label='blinks',
            plot=True,
            ch_type="mag")
    corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False)
    assert_true(ica.labels_["blinks"] == ica2.labels_["blinks"])
    assert_true(0 in ica.labels_["blinks"])
    # test retrieval of component maps as arrays
    components = ica.get_components()
    template = components[:, 0]
    EvokedArray(components, ica.info, tmin=0.).plot_topomap([0])

    corrmap([ica, ica3],
            template,
            threshold='auto',
            label='blinks',
            plot=True,
            ch_type="mag")
    assert_true(ica2.labels_["blinks"] == ica3.labels_["blinks"])

    plt.close('all')

    # test warnings on bad filenames
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
        ica.save(ica_badname)
        read_ica(ica_badname)
    assert_naming(w, 'test_ica.py', 2)

    # test decim
    ica = ICA(n_components=3, max_pca_components=4, n_pca_components=4)
    raw_ = raw.copy()
    for _ in range(3):
        raw_.append(raw_)
    n_samples = raw_._data.shape[1]
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(raw_._data.shape[1], n_samples)

    # test expl var
    ica = ICA(n_components=1.0, max_pca_components=4, n_pca_components=4)
    with warnings.catch_warnings(record=True):
        ica.fit(raw, picks=None, decim=3)
    assert_true(ica.n_components_ == 4)
    ica_var = _ica_explained_variance(ica, raw, normalize=True)
    assert_true(np.all(ica_var[:-1] >= ica_var[1:]))

    # test ica sorting
    ica.exclude = [0]
    ica.labels_ = dict(blink=[0], think=[1])
    ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True)
    assert_equal(ica_sorted.exclude, [3])
    assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2]))

    # epochs extraction from raw fit
    assert_raises(RuntimeError, ica.get_sources, epochs)
    # test reading and writing
    test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
    for cov in (None, test_cov):
        ica = ICA(noise_cov=cov,
                  n_components=2,
                  max_pca_components=4,
                  n_pca_components=4)
        with warnings.catch_warnings(record=True):  # ICA does not converge
            ica.fit(raw, picks=picks, start=start, stop=stop2)
        sources = ica.get_sources(epochs).get_data()
        assert_true(ica.mixing_matrix_.shape == (2, 2))
        assert_true(ica.unmixing_matrix_.shape == (2, 2))
        assert_true(ica.pca_components_.shape == (4, len(picks)))
        assert_true(sources.shape[1] == ica.n_components_)

        for exclude in [[], [0]]:
            ica.exclude = exclude
            ica.labels_ = {'foo': [0]}
            ica.save(test_ica_fname)
            ica_read = read_ica(test_ica_fname)
            assert_true(ica.exclude == ica_read.exclude)
            assert_equal(ica.labels_, ica_read.labels_)
            ica.exclude = []
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [])

            ica.exclude = [0, 1]
            ica.apply(raw, exclude=[1])
            assert_true(ica.exclude == [0, 1])

            ica_raw = ica.get_sources(raw)
            assert_true(
                ica.exclude ==
                [ica_raw.ch_names.index(e) for e in ica_raw.info['bads']])

        # test filtering
        d1 = ica_raw._data[0].copy()
        ica_raw.filter(4,
                       20,
                       l_trans_bandwidth='auto',
                       h_trans_bandwidth='auto',
                       filter_length='auto',
                       phase='zero',
                       fir_window='hamming')
        assert_equal(ica_raw.info['lowpass'], 20.)
        assert_equal(ica_raw.info['highpass'], 4.)
        assert_true((d1 != ica_raw._data[0]).any())
        d1 = ica_raw._data[0].copy()
        ica_raw.notch_filter([10],
                             filter_length='auto',
                             trans_bandwidth=10,
                             phase='zero',
                             fir_window='hamming')
        assert_true((d1 != ica_raw._data[0]).any())

        ica.n_pca_components = 2
        ica.method = 'fake'
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        assert_true(ica.n_pca_components == ica_read.n_pca_components)
        assert_equal(ica.method, ica_read.method)
        assert_equal(ica.labels_, ica_read.labels_)

        # check type consistency
        attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
                 'pca_explained_variance_ _pre_whitener')

        def f(x, y):
            return getattr(x, y).dtype

        for attr in attrs.split():
            assert_equal(f(ica_read, attr), f(ica, attr))

        ica.n_pca_components = 4
        ica_read.n_pca_components = 4

        ica.exclude = []
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        for attr in [
                'mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
                'pca_mean_', 'pca_explained_variance_', '_pre_whitener'
        ]:
            assert_array_almost_equal(getattr(ica, attr),
                                      getattr(ica_read, attr))

        assert_true(ica.ch_names == ica_read.ch_names)
        assert_true(isinstance(ica_read.info, Info))

        sources = ica.get_sources(raw)[:, :][0]
        sources2 = ica_read.get_sources(raw)[:, :][0]
        assert_array_almost_equal(sources, sources2)

        _raw1 = ica.apply(raw, exclude=[1])
        _raw2 = ica_read.apply(raw, exclude=[1])
        assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])

    os.remove(test_ica_fname)
    # check scrore funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(raw,
                                   target='EOG 061',
                                   score_func=func,
                                   start=0,
                                   stop=10)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(raw, score_func=stats.skew)
    # check exception handling
    assert_raises(ValueError, ica.score_sources, raw, target=np.arange(1))

    params = []
    params += [(None, -1, slice(2), [0, 1])]  # varicance, kurtosis idx params
    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
    for idx, ch_name in product(*params):
        ica.detect_artifacts(raw,
                             start_find=0,
                             stop_find=50,
                             ecg_ch=ch_name,
                             eog_ch=ch_name,
                             skew_criterion=idx,
                             var_criterion=idx,
                             kurt_criterion=idx)

    evoked = epochs.average()
    evoked_data = evoked.data.copy()
    raw_data = raw[:][0].copy()
    epochs_data = epochs.get_data().copy()
    with warnings.catch_warnings(record=True):
        idx, scores = ica.find_bads_ecg(raw, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        idx, scores = ica.find_bads_ecg(raw, method='correlation')
        assert_equal(len(scores), ica.n_components_)

        idx, scores = ica.find_bads_eog(raw)
        assert_equal(len(scores), ica.n_components_)

        idx, scores = ica.find_bads_ecg(epochs, method='ctps')
        assert_equal(len(scores), ica.n_components_)
        assert_raises(ValueError,
                      ica.find_bads_ecg,
                      epochs.average(),
                      method='ctps')
        assert_raises(ValueError,
                      ica.find_bads_ecg,
                      raw,
                      method='crazy-coupling')

        raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
        idx, scores = ica.find_bads_eog(raw)
        assert_true(isinstance(scores, list))
        assert_equal(len(scores[0]), ica.n_components_)

        idx, scores = ica.find_bads_eog(evoked, ch_name='MEG 1441')
        assert_equal(len(scores), ica.n_components_)

        idx, scores = ica.find_bads_ecg(evoked, method='correlation')
        assert_equal(len(scores), ica.n_components_)

    assert_array_equal(raw_data, raw[:][0])
    assert_array_equal(epochs_data, epochs.get_data())
    assert_array_equal(evoked_data, evoked.data)

    # check score funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(epochs_eog,
                                   target='EOG 061',
                                   score_func=func)
        assert_true(ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(epochs, score_func=stats.skew)

    # check exception handling
    assert_raises(ValueError, ica.score_sources, epochs, target=np.arange(1))

    # ecg functionality
    ecg_scores = ica.score_sources(raw,
                                   target='MEG 1531',
                                   score_func='pearsonr')

    with warnings.catch_warnings(record=True):  # filter attenuation warning
        ecg_events = ica_find_ecg_events(raw,
                                         sources[np.abs(ecg_scores).argmax()])

    assert_true(ecg_events.ndim == 2)

    # eog functionality
    eog_scores = ica.score_sources(raw,
                                   target='EOG 061',
                                   score_func='pearsonr')
    with warnings.catch_warnings(record=True):  # filter attenuation warning
        eog_events = ica_find_eog_events(raw,
                                         sources[np.abs(eog_scores).argmax()])

    assert_true(eog_events.ndim == 2)

    # Test ica fiff export
    ica_raw = ica.get_sources(raw, start=0, stop=100)
    assert_true(ica_raw.last_samp - ica_raw.first_samp == 100)
    assert_equal(len(ica_raw._filenames), 1)  # API consistency
    ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
    ica.n_components = np.int32(ica.n_components)
    ica_raw.save(test_ica_fname, overwrite=True)
    ica_raw2 = read_raw_fif(test_ica_fname, preload=True)
    assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
    ica_raw2.close()
    os.remove(test_ica_fname)

    # Test ica epochs export
    ica_epochs = ica.get_sources(epochs)
    assert_true(ica_epochs.events.shape == epochs.events.shape)
    ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
    assert_true(ica.n_components_ == len(ica_chans))
    assert_true(ica.n_components_ == ica_epochs.get_data().shape[1])
    assert_true(ica_epochs._raw is None)
    assert_true(ica_epochs.preload is True)

    # test float n pca components
    ica.pca_explained_variance_ = np.array([0.2] * 5)
    ica.n_components_ = 0
    for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
        ncomps_ = ica._check_n_pca_components(ncomps)
        assert_true(ncomps_ == expected)

    ica = ICA()
    ica.fit(raw, picks=picks[:5])
    ica.find_bads_ecg(raw)
    ica.find_bads_eog(epochs, ch_name='MEG 0121')
    assert_array_equal(raw_data, raw[:][0])

    raw.drop_channels(['MEG 0122'])
    assert_raises(RuntimeError, ica.find_bads_eog, raw)
    assert_raises(RuntimeError, ica.find_bads_ecg, raw)
    tmin = i * 1.0
    tmax = (i + 1) * 1.0

    #Create :class:'Epochs <mne.Epochs>' object
    epochs = mne.Epochs(raw,
                        events=events,
                        event_id=event_id,
                        tmin=tmin,
                        tmax=tmax,
                        baseline=None,
                        verbose=True,
                        preload=True)
    for i in range(0, len(epochs.events)):
        if i % 2 == 0:
            epochs.events[i, 2] = 3
    #epochs.plot(scalings = 'auto',block = True,n_epochs=10)
    X = epochs.pick_types(meg=False, eeg=True)
    y = epochs.events[:, -1]

    # Define a unique pipeline to sequentially:
    clf = make_pipeline(
        Vectorizer(),  # 1) vectorize across time and channels
        StandardScaler(),  # 2) normalize features across trials
        LinearModel(LogisticRegression()))  # 3) fits a logistic regression
    clf.fit(X, y)

    coef = get_coef(clf, 'patterns_', inverse_transform=True)
    evoked = EvokedArray(coef, epochs.info, tmin=epochs.tmin)
    fig = evoked.plot_topomap(title='EEG Patterns', size=3, show=False)
    fig.savefig(title + "_ti_" + str(tmin) + "_tf_" + str(tmax) + '.png')
Ejemplo n.º 15
0
    threshold=threshold,
    tail=1,
    n_jobs=8,
    buffer_size=None,
    adjacency=adjacency,
)

T_obs, clusters, p_values, _ = cluster_stats
good_cluster_inds = np.where(p_values < p_accept)[0]
colors = {"low": "crimson", "high": "steelblue"}
linestyles = {"low": "-", "high": "--"}

# organize data for plotting
info['sfreq'] = len(freqs) / freqs[-1]
evokeds = {
    "low": EvokedArray(psds_low.mean(0), info, tmin=freqs[0]),
    "high": EvokedArray(psds_high.mean(0), info, tmin=freqs[0]),
}

#
# loop over clusters
for i_clu, clu_idx in enumerate(good_cluster_inds):
    # unpack cluster information, get unique indices
    time_inds, space_inds = np.squeeze(clusters[clu_idx])
    ch_inds = np.unique(space_inds)
    time_inds = np.unique(time_inds)

    # get topography for F stat
    f_map = T_obs[time_inds, ...].mean(axis=0)

    # get signals at the sensors contributing to the cluster
Ejemplo n.º 16
0
def test_montage():
    """Test making montages."""
    tempdir = _TempDir()
    inputs = dict(
        sfp='FidNz 0       9.071585155     -2.359754454\n'
        'FidT9 -6.711765       0.040402876     -3.251600355\n'
        'very_very_very_long_name -5.831241498 -4.494821698  4.955347697\n'
        'Cz 0       0       8.899186843',
        csd=
        '// MatLab   Sphere coordinates [degrees]         Cartesian coordinates\n'  # noqa: E501
        '// Label       Theta       Phi    Radius         X         Y         Z       off sphere surface\n'  # noqa: E501
        'E1      37.700     -14.000       1.000    0.7677    0.5934   -0.2419  -0.00000000000000011\n'  # noqa: E501
        'E3      51.700      11.000       1.000    0.6084    0.7704    0.1908   0.00000000000000000\n'  # noqa: E501
        'E31      90.000     -11.000       1.000    0.0000    0.9816   -0.1908   0.00000000000000000\n'  # noqa: E501
        'E61     158.000     -17.200       1.000   -0.8857    0.3579   -0.2957  -0.00000000000000022',  # noqa: E501
        mm_elc=
        '# ASA electrode file\nReferenceLabel  avg\nUnitPosition    mm\n'  # noqa:E501
        'NumberPositions=    68\n'
        'Positions\n'
        '-86.0761 -19.9897 -47.9860\n'
        '85.7939 -20.0093 -48.0310\n'
        '0.0083 86.8110 -39.9830\n'
        '-86.0761 -24.9897 -67.9860\n'
        'Labels\nLPA\nRPA\nNz\nDummy\n',
        m_elc='# ASA electrode file\nReferenceLabel  avg\nUnitPosition    m\n'
        'NumberPositions=    68\nPositions\n-.0860761 -.0199897 -.0479860\n'  # noqa:E501
        '.0857939 -.0200093 -.0480310\n.0000083 .00868110 -.0399830\n'
        '.08 -.02 -.04\n'
        'Labels\nLPA\nRPA\nNz\nDummy\n',
        txt='Site  Theta  Phi\n'
        'Fp1  -92    -72\n'
        'Fp2   92     72\n'
        'very_very_very_long_name       -92     72\n'
        'O2        92    -90\n',
        elp='346\n'
        'EEG\t      F3\t -62.027\t -50.053\t      85\n'
        'EEG\t      Fz\t  45.608\t      90\t      85\n'
        'EEG\t      F4\t   62.01\t  50.103\t      85\n'
        'EEG\t      FCz\t   68.01\t  58.103\t      85\n',
        hpts='eeg Fp1 -95.0 -3. -3.\n'
        'eeg AF7 -1 -1 -3\n'
        'eeg A3 -2 -2 2\n'
        'eeg A 0 0 0',
    )
    # Get actual positions and save them for checking
    # csd comes from the string above, all others come from commit 2fa35d4
    poss = dict(
        sfp=[[0.0, 9.07159, -2.35975], [-6.71176, 0.0404, -3.2516],
             [-5.83124, -4.49482, 4.95535], [0.0, 0.0, 8.89919]],
        mm_elc=[[-0.08608, -0.01999, -0.04799], [0.08579, -0.02001, -0.04803],
                [1e-05, 0.08681, -0.03998], [-0.08608, -0.02499, -0.06799]],
        m_elc=[[-0.08608, -0.01999, -0.04799], [0.08579, -0.02001, -0.04803],
               [1e-05, 0.00868, -0.03998], [0.08, -0.02, -0.04]],
        txt=[[-26.25044, 80.79056, -2.96646], [26.25044, 80.79056, -2.96646],
             [-26.25044, -80.79056, -2.96646], [0.0, -84.94822, -2.96646]],
        elp=[[-48.20043, 57.55106, 39.86971], [0.0, 60.73848, 59.4629],
             [48.1426, 57.58403, 39.89198], [41.64599, 66.91489, 31.8278]],
        hpts=[[-95, -3, -3], [-1, -1., -3.], [-2, -2, 2.], [0, 0, 0]],
    )
    for key, text in inputs.items():
        kind = key.split('_')[-1]
        fname = op.join(tempdir, 'test.' + kind)
        with open(fname, 'w') as fid:
            fid.write(text)
        montage = read_montage(fname)
        if kind in ('sfp', 'txt'):
            assert_true('very_very_very_long_name' in montage.ch_names)
        assert_equal(len(montage.ch_names), 4)
        assert_equal(len(montage.ch_names), len(montage.pos))
        assert_equal(montage.pos.shape, (4, 3))
        assert_equal(montage.kind, 'test')
        if kind == 'csd':
            dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
                     ('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
                     ('off_sph', 'f8')]
            try:
                table = np.loadtxt(fname, skip_header=2, dtype=dtype)
            except TypeError:
                table = np.loadtxt(fname, skiprows=2, dtype=dtype)
            poss['csd'] = np.c_[table['x'], table['y'], table['z']]
        if kind == 'elc':
            # Make sure points are reasonable distance from geometric centroid
            centroid = np.sum(montage.pos, axis=0) / montage.pos.shape[0]
            distance_from_centroid = np.apply_along_axis(
                np.linalg.norm, 1, montage.pos - centroid)
            assert_array_less(distance_from_centroid, 0.2)
            assert_array_less(0.01, distance_from_centroid)
        assert_array_almost_equal(poss[key], montage.pos, 4, err_msg=key)

    # Test reading in different letter case.
    ch_names = [
        "F3", "FZ", "F4", "FC3", "FCz", "FC4", "C3", "CZ", "C4", "CP3", "CPZ",
        "CP4", "P3", "PZ", "P4", "O1", "OZ", "O2"
    ]
    montage = read_montage('standard_1020', ch_names=ch_names)
    assert_array_equal(ch_names, montage.ch_names)

    # test transform
    input_strs = [
        """
    eeg Fp1 -95.0 -31.0 -3.0
    eeg AF7 -81 -59 -3
    eeg AF3 -87 -41 28
    cardinal 2 -91 0 -42
    cardinal 1 0 -91 -42
    cardinal 3 0 91 -42
    """, """
    Fp1 -95.0 -31.0 -3.0
    AF7 -81 -59 -3
    AF3 -87 -41 28
    nasion -91 0 -42
    lpa 0 -91 -42
    rpa 0 91 -42
    """
    ]

    all_fiducials = [['2', '1', '3'], ['nasion', 'lpa', 'rpa']]

    kinds = ['test_fid.hpts', 'test_fid.sfp']

    for kind, fiducials, input_str in zip(kinds, all_fiducials, input_strs):
        fname = op.join(tempdir, kind)
        with open(fname, 'w') as fid:
            fid.write(input_str)
        montage = read_montage(op.join(tempdir, kind), transform=True)

        # check coordinate transformation
        pos = np.array([-95.0, -31.0, -3.0])
        nasion = np.array([-91, 0, -42])
        lpa = np.array([0, -91, -42])
        rpa = np.array([0, 91, -42])
        fids = np.vstack((nasion, lpa, rpa))
        trans = get_ras_to_neuromag_trans(fids[0], fids[1], fids[2])
        pos = apply_trans(trans, pos)
        assert_array_equal(montage.pos[0], pos)
        idx = montage.ch_names.index(fiducials[0])
        assert_array_equal(montage.pos[idx, [0, 2]], [0, 0])
        idx = montage.ch_names.index(fiducials[1])
        assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
        idx = montage.ch_names.index(fiducials[2])
        assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
        pos = np.array([-95.0, -31.0, -3.0])
        montage_fname = op.join(tempdir, kind)
        montage = read_montage(montage_fname, unit='mm')
        assert_array_equal(montage.pos[0], pos * 1e-3)

        # test with last
        info = create_info(montage.ch_names, 1e3,
                           ['eeg'] * len(montage.ch_names))
        _set_montage(info, montage)
        pos2 = np.array([c['loc'][:3] for c in info['chs']])
        assert_array_equal(pos2, montage.pos)
        assert_equal(montage.ch_names, info['ch_names'])

        info = create_info(montage.ch_names, 1e3,
                           ['eeg'] * len(montage.ch_names))

        evoked = EvokedArray(data=np.zeros((len(montage.ch_names), 1)),
                             info=info,
                             tmin=0)
        evoked.set_montage(montage)
        pos3 = np.array([c['loc'][:3] for c in evoked.info['chs']])
        assert_array_equal(pos3, montage.pos)
        assert_equal(montage.ch_names, evoked.info['ch_names'])

        # Warning should be raised when some EEG are not specified in montage
        with warnings.catch_warnings(record=True) as w:
            info = create_info(montage.ch_names + ['foo', 'bar'], 1e3,
                               ['eeg'] * (len(montage.ch_names) + 2))
            _set_montage(info, montage)
            assert_true(len(w) == 1)
Ejemplo n.º 17
0
def test_ica_additional(method):
    """Test additional ICA functionality."""
    _skip_check_picard(method)

    tempdir = _TempDir()
    stop2 = 500
    raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
    raw.del_proj()  # avoid warnings
    raw.set_annotations(Annotations([0.5], [0.5], ['BAD']))
    # XXX This breaks the tests :(
    # raw.info['bads'] = [raw.ch_names[1]]
    test_cov = read_cov(test_cov_name)
    events = read_events(event_name)
    picks = pick_types(raw.info,
                       meg=True,
                       stim=False,
                       ecg=False,
                       eog=False,
                       exclude='bads')[1::2]
    epochs = Epochs(raw,
                    events,
                    None,
                    tmin,
                    tmax,
                    picks=picks,
                    baseline=(None, 0),
                    preload=True,
                    proj=False)
    epochs.decimate(3, verbose='error')
    assert len(epochs) == 4

    # test if n_components=None works
    ica = ICA(n_components=None,
              max_pca_components=None,
              n_pca_components=None,
              random_state=0,
              method=method,
              max_iter=1)
    with pytest.warns(UserWarning, match='did not converge'):
        ica.fit(epochs)
    # for testing eog functionality
    picks2 = np.concatenate([picks, pick_types(raw.info, False, eog=True)])
    epochs_eog = Epochs(raw,
                        events[:4],
                        event_id,
                        tmin,
                        tmax,
                        picks=picks2,
                        baseline=(None, 0),
                        preload=True)
    del picks2

    test_cov2 = test_cov.copy()
    ica = ICA(noise_cov=test_cov2,
              n_components=3,
              max_pca_components=4,
              n_pca_components=4,
              method=method)
    assert (ica.info is None)
    with pytest.warns(RuntimeWarning, match='normalize_proj'):
        ica.fit(raw, picks[:5])
    assert (isinstance(ica.info, Info))
    assert (ica.n_components_ < 5)

    ica = ICA(n_components=3,
              max_pca_components=4,
              method=method,
              n_pca_components=4,
              random_state=0)
    pytest.raises(RuntimeError, ica.save, '')

    ica.fit(raw, picks=[1, 2, 3, 4, 5], start=start, stop=stop2)

    # check passing a ch_name to find_bads_ecg
    with pytest.warns(RuntimeWarning, match='longer'):
        _, scores_1 = ica.find_bads_ecg(raw)
        _, scores_2 = ica.find_bads_ecg(raw, raw.ch_names[1])
    assert scores_1[0] != scores_2[0]

    # test corrmap
    ica2 = ica.copy()
    ica3 = ica.copy()
    corrmap([ica, ica2], (0, 0),
            threshold='auto',
            label='blinks',
            plot=True,
            ch_type="mag")
    corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False)
    assert (ica.labels_["blinks"] == ica2.labels_["blinks"])
    assert (0 in ica.labels_["blinks"])
    # test retrieval of component maps as arrays
    components = ica.get_components()
    template = components[:, 0]
    EvokedArray(components, ica.info, tmin=0.).plot_topomap([0], time_unit='s')

    corrmap([ica, ica3],
            template,
            threshold='auto',
            label='blinks',
            plot=True,
            ch_type="mag")
    assert (ica2.labels_["blinks"] == ica3.labels_["blinks"])

    plt.close('all')

    # make sure a single threshold in a list works
    corrmap([ica, ica3],
            template,
            threshold=[0.5],
            label='blinks',
            plot=True,
            ch_type="mag")

    ica_different_channels = ICA(n_components=2,
                                 random_state=0).fit(raw, picks=[2, 3, 4, 5])
    pytest.raises(ValueError, corrmap, [ica_different_channels, ica], (0, 0))

    # test warnings on bad filenames
    ica_badname = op.join(op.dirname(tempdir), 'test-bad-name.fif.gz')
    with pytest.warns(RuntimeWarning, match='-ica.fif'):
        ica.save(ica_badname)
    with pytest.warns(RuntimeWarning, match='-ica.fif'):
        read_ica(ica_badname)

    # test decim
    ica = ICA(n_components=3,
              max_pca_components=4,
              n_pca_components=4,
              method=method,
              max_iter=1)
    raw_ = raw.copy()
    for _ in range(3):
        raw_.append(raw_)
    n_samples = raw_._data.shape[1]
    with pytest.warns(UserWarning, match='did not converge'):
        ica.fit(raw, picks=picks[:5], decim=3)
    assert raw_._data.shape[1] == n_samples

    # test expl var
    ica = ICA(n_components=1.0,
              max_pca_components=4,
              n_pca_components=4,
              method=method,
              max_iter=1)
    with pytest.warns(UserWarning, match='did not converge'):
        ica.fit(raw, picks=None, decim=3)
    assert (ica.n_components_ == 4)
    ica_var = _ica_explained_variance(ica, raw, normalize=True)
    assert (np.all(ica_var[:-1] >= ica_var[1:]))

    # test ica sorting
    ica.exclude = [0]
    ica.labels_ = dict(blink=[0], think=[1])
    ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True)
    assert_equal(ica_sorted.exclude, [3])
    assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2]))

    # epochs extraction from raw fit
    pytest.raises(RuntimeError, ica.get_sources, epochs)
    # test reading and writing
    test_ica_fname = op.join(op.dirname(tempdir), 'test-ica.fif')
    for cov in (None, test_cov):
        ica = ICA(noise_cov=cov,
                  n_components=2,
                  max_pca_components=4,
                  n_pca_components=4,
                  method=method,
                  max_iter=1)
        with pytest.warns(None):  # ICA does not converge
            ica.fit(raw, picks=picks[:10], start=start, stop=stop2)
        sources = ica.get_sources(epochs).get_data()
        assert (ica.mixing_matrix_.shape == (2, 2))
        assert (ica.unmixing_matrix_.shape == (2, 2))
        assert (ica.pca_components_.shape == (4, 10))
        assert (sources.shape[1] == ica.n_components_)

        for exclude in [[], [0], np.array([1, 2, 3])]:
            ica.exclude = exclude
            ica.labels_ = {'foo': [0]}
            ica.save(test_ica_fname)
            ica_read = read_ica(test_ica_fname)
            assert (list(ica.exclude) == ica_read.exclude)
            assert_equal(ica.labels_, ica_read.labels_)
            ica.apply(raw)
            ica.exclude = []
            ica.apply(raw, exclude=[1])
            assert (ica.exclude == [])

            ica.exclude = [0, 1]
            ica.apply(raw, exclude=[1])
            assert (ica.exclude == [0, 1])

            ica_raw = ica.get_sources(raw)
            assert (ica.exclude == [
                ica_raw.ch_names.index(e) for e in ica_raw.info['bads']
            ])

        # test filtering
        d1 = ica_raw._data[0].copy()
        ica_raw.filter(4, 20, fir_design='firwin2')
        assert_equal(ica_raw.info['lowpass'], 20.)
        assert_equal(ica_raw.info['highpass'], 4.)
        assert ((d1 != ica_raw._data[0]).any())
        d1 = ica_raw._data[0].copy()
        ica_raw.notch_filter([10], trans_bandwidth=10, fir_design='firwin')
        assert ((d1 != ica_raw._data[0]).any())

        ica.n_pca_components = 2
        ica.method = 'fake'
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        assert (ica.n_pca_components == ica_read.n_pca_components)
        assert_equal(ica.method, ica_read.method)
        assert_equal(ica.labels_, ica_read.labels_)

        # check type consistency
        attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
                 'pca_explained_variance_ pre_whitener_')

        def f(x, y):
            return getattr(x, y).dtype

        for attr in attrs.split():
            assert_equal(f(ica_read, attr), f(ica, attr))

        ica.n_pca_components = 4
        ica_read.n_pca_components = 4

        ica.exclude = []
        ica.save(test_ica_fname)
        ica_read = read_ica(test_ica_fname)
        for attr in [
                'mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
                'pca_mean_', 'pca_explained_variance_', 'pre_whitener_'
        ]:
            assert_array_almost_equal(getattr(ica, attr),
                                      getattr(ica_read, attr))

        assert (ica.ch_names == ica_read.ch_names)
        assert (isinstance(ica_read.info, Info))

        sources = ica.get_sources(raw)[:, :][0]
        sources2 = ica_read.get_sources(raw)[:, :][0]
        assert_array_almost_equal(sources, sources2)

        _raw1 = ica.apply(raw, exclude=[1])
        _raw2 = ica_read.apply(raw, exclude=[1])
        assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])

    os.remove(test_ica_fname)
    # check score funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(raw,
                                   target='EOG 061',
                                   score_func=func,
                                   start=0,
                                   stop=10)
        assert (ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(raw, start=0, stop=50, score_func=stats.skew)
    # check exception handling
    pytest.raises(ValueError, ica.score_sources, raw, target=np.arange(1))

    params = []
    params += [(None, -1, slice(2), [0, 1])]  # variance, kurtosis params
    params += [(None, 'MEG 1531')]  # ECG / EOG channel params
    for idx, ch_name in product(*params):
        ica.detect_artifacts(raw,
                             start_find=0,
                             stop_find=50,
                             ecg_ch=ch_name,
                             eog_ch=ch_name,
                             skew_criterion=idx,
                             var_criterion=idx,
                             kurt_criterion=idx)

    # Make sure detect_artifacts marks the right components.
    # For int criterion, the doc says "E.g. range(2) would return the two
    # sources with the highest score". Assert that's what it does.
    # Only test for skew, since it's always the same code.
    ica.exclude = []
    ica.detect_artifacts(raw,
                         start_find=0,
                         stop_find=50,
                         ecg_ch=None,
                         eog_ch=None,
                         skew_criterion=0,
                         var_criterion=None,
                         kurt_criterion=None)
    assert np.abs(scores[ica.exclude]) == np.max(np.abs(scores))

    evoked = epochs.average()
    evoked_data = evoked.data.copy()
    raw_data = raw[:][0].copy()
    epochs_data = epochs.get_data().copy()

    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_ecg(raw, method='ctps')
    assert_equal(len(scores), ica.n_components_)
    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_ecg(raw, method='correlation')
    assert_equal(len(scores), ica.n_components_)

    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_eog(raw)
    assert_equal(len(scores), ica.n_components_)

    idx, scores = ica.find_bads_ecg(epochs, method='ctps')

    assert_equal(len(scores), ica.n_components_)
    pytest.raises(ValueError,
                  ica.find_bads_ecg,
                  epochs.average(),
                  method='ctps')
    pytest.raises(ValueError, ica.find_bads_ecg, raw, method='crazy-coupling')

    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_eog(raw)
    assert_equal(len(scores), ica.n_components_)

    raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
    with pytest.warns(RuntimeWarning, match='longer'):
        idx, scores = ica.find_bads_eog(raw)
    assert (isinstance(scores, list))
    assert_equal(len(scores[0]), ica.n_components_)

    idx, scores = ica.find_bads_eog(evoked, ch_name='MEG 1441')
    assert_equal(len(scores), ica.n_components_)

    idx, scores = ica.find_bads_ecg(evoked, method='correlation')
    assert_equal(len(scores), ica.n_components_)

    assert_array_equal(raw_data, raw[:][0])
    assert_array_equal(epochs_data, epochs.get_data())
    assert_array_equal(evoked_data, evoked.data)

    # check score funcs
    for name, func in get_score_funcs().items():
        if name in score_funcs_unsuited:
            continue
        scores = ica.score_sources(epochs_eog,
                                   target='EOG 061',
                                   score_func=func)
        assert (ica.n_components_ == len(scores))

    # check univariate stats
    scores = ica.score_sources(epochs, score_func=stats.skew)

    # check exception handling
    pytest.raises(ValueError, ica.score_sources, epochs, target=np.arange(1))

    # ecg functionality
    ecg_scores = ica.score_sources(raw,
                                   target='MEG 1531',
                                   score_func='pearsonr')

    with pytest.warns(RuntimeWarning, match='longer'):
        ecg_events = ica_find_ecg_events(raw,
                                         sources[np.abs(ecg_scores).argmax()])
    assert (ecg_events.ndim == 2)

    # eog functionality
    eog_scores = ica.score_sources(raw,
                                   target='EOG 061',
                                   score_func='pearsonr')
    with pytest.warns(RuntimeWarning, match='longer'):
        eog_events = ica_find_eog_events(raw,
                                         sources[np.abs(eog_scores).argmax()])
    assert (eog_events.ndim == 2)

    # Test ica fiff export
    ica_raw = ica.get_sources(raw, start=0, stop=100)
    assert (ica_raw.last_samp - ica_raw.first_samp == 100)
    assert_equal(len(ica_raw._filenames), 1)  # API consistency
    ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
    assert (ica.n_components_ == len(ica_chans))
    test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
    ica.n_components = np.int32(ica.n_components)
    ica_raw.save(test_ica_fname, overwrite=True)
    ica_raw2 = read_raw_fif(test_ica_fname, preload=True)
    assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
    ica_raw2.close()
    os.remove(test_ica_fname)

    # Test ica epochs export
    ica_epochs = ica.get_sources(epochs)
    assert (ica_epochs.events.shape == epochs.events.shape)
    ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
    assert (ica.n_components_ == len(ica_chans))
    assert (ica.n_components_ == ica_epochs.get_data().shape[1])
    assert (ica_epochs._raw is None)
    assert (ica_epochs.preload is True)

    # test float n pca components
    ica.pca_explained_variance_ = np.array([0.2] * 5)
    ica.n_components_ = 0
    for ncomps, expected in [[0.3, 1], [0.9, 4], [1, 1]]:
        ncomps_ = ica._check_n_pca_components(ncomps)
        assert (ncomps_ == expected)

    ica = ICA(method=method)
    with pytest.warns(None):  # sometimes does not converge
        ica.fit(raw, picks=picks[:5])
    with pytest.warns(RuntimeWarning, match='longer'):
        ica.find_bads_ecg(raw)
    ica.find_bads_eog(epochs, ch_name='MEG 0121')
    assert_array_equal(raw_data, raw[:][0])

    raw.drop_channels(['MEG 0122'])
    pytest.raises(RuntimeError, ica.find_bads_eog, raw)
    with pytest.warns(RuntimeWarning, match='longer'):
        pytest.raises(RuntimeError, ica.find_bads_ecg, raw)
Ejemplo n.º 18
0
def plot_temporal_clusters(good_cluster_inds, evokeds, T_obs, clusters, times,
                           info):
    colors = {"low": "crimson", "high": "steelblue"}
    # linestyles = {"low": "-", "high": "--"}
    #
    # loop over clusters
    for i_clu, clu_idx in enumerate(good_cluster_inds):
        # unpack cluster information, get unique indices
        time_inds, space_inds = np.squeeze(clusters[clu_idx])
        ch_inds = np.unique(space_inds)
        time_inds = np.unique(time_inds)

        # get topography for F stat
        f_map = T_obs[time_inds, ...].mean(axis=0)

        # get signals at the sensors contributing to the cluster
        sig_times = times[time_inds]

        # create spatial mask
        mask = np.zeros((f_map.shape[0], 1), dtype=bool)
        mask[ch_inds, :] = True

        # initialize figure
        fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))

        # plot average test statistic and mark significant sensors
        f_evoked = EvokedArray(f_map[:, np.newaxis], info, tmin=0)
        f_evoked.plot_topomap(
            times=0,
            mask=mask,
            axes=ax_topo,
            cmap="Reds",
            vmin=np.min,
            vmax=np.max,
            show=False,
            colorbar=False,
            mask_params=dict(markersize=10),
            scalings=dict(eeg=1, mag=1, grad=1),
            res=240,
        )
        image = ax_topo.images[0]

        # create additional axes (for ERF and colorbar)
        divider = make_axes_locatable(ax_topo)

        # add axes for colorbar
        ax_colorbar = divider.append_axes("right", size="5%", pad=0.05)
        plt.colorbar(image, cax=ax_colorbar)
        ax_topo.set_xlabel(
            "Averaged F-map ({:0.3f} - {:0.3f} s)".format(*sig_times[[0, -1]]))

        # add new axis for time courses and plot time courses
        ax_signals = divider.append_axes("right", size="300%", pad=1.2)
        title = "Cluster #{0}, {1} sensor".format(i_clu + 1, len(ch_inds))
        if len(ch_inds) > 1:
            title += "s (mean)"
        plot_compare_evokeds(
            evokeds,
            title=title,
            picks=ch_inds,
            axes=ax_signals,
            colors=colors,
            # linestyles=linestyles,
            show=False,
            split_legend=True,
            truncate_yaxis="auto",
        )

        # plot temporal cluster extent
        ymin, ymax = ax_signals.get_ylim()
        ax_signals.fill_betweenx(
            (ymin, ymax),
            sig_times[0],
            sig_times[-1],
            color="orange",
            alpha=0.3,
        )

        # clean up viz
        tight_layout(fig=fig)
        fig.subplots_adjust(bottom=0.05)
        plt.show()
Ejemplo n.º 19
0
    # get topography for F stat
    f_map = T_obs[time_inds, ...].mean(axis=0)

    # get signals at the sensors contributing to the cluster
    sig_times = erf_low.times[time_inds]

    # create spatial mask
    mask = np.zeros((f_map.shape[0], 1), dtype=bool)
    mask[ch_inds, :] = True

    # initialize figure
    fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3))

    # plot average test statistic and mark significant sensors
    f_evoked = EvokedArray(f_map[:, np.newaxis], info, tmin=0)
    f_evoked.plot_topomap(times=0,
                          mask=mask,
                          axes=ax_topo,
                          vmin=np.min,
                          vmax=np.max,
                          ch_type="mag",
                          show=False,
                          colorbar=False,
                          mask_params=dict(markersize=10))
    image = ax_topo.images[0]

    # create additional axes (for ERF and colorbar)
    divider = make_axes_locatable(ax_topo)

    # add axes for colorbar
Ejemplo n.º 20
0
    def plot_filters(self,
                     info,
                     components=None,
                     fband=None,
                     ch_type=None,
                     layout=None,
                     vmin=None,
                     vmax=None,
                     cmap='RdBu_r',
                     sensors=True,
                     colorbar=True,
                     scalings=None,
                     units='a.u.',
                     res=64,
                     size=1,
                     cbar_fmt='%3.1f',
                     name_format='CSP%01d',
                     show=True,
                     show_names=False,
                     title=None,
                     mask=None,
                     mask_params=None,
                     outlines='head',
                     contours=6,
                     image_interp='bilinear',
                     average=None,
                     head_pos=None,
                     axes=None):
        """Plot found filters."""
        if components is None:
            components = np.arange(self.n_components)
        filters = self.filters_[self.fbands.index(fband)]

        # set sampling frequency to have 1 component per time point
        info = cp.deepcopy(info)
        info['sfreq'] = 1.
        filters = EvokedArray(filters, info, tmin=0)
        return filters.plot_topomap(times=components,
                                    ch_type=ch_type,
                                    layout=layout,
                                    vmin=vmin,
                                    vmax=vmax,
                                    cmap=cmap,
                                    colorbar=colorbar,
                                    res=res,
                                    cbar_fmt=cbar_fmt,
                                    sensors=sensors,
                                    scalings=scalings,
                                    units=units,
                                    time_unit='s',
                                    time_format=name_format,
                                    size=size,
                                    show_names=show_names,
                                    title=title,
                                    mask_params=mask_params,
                                    mask=mask,
                                    outlines=outlines,
                                    contours=contours,
                                    image_interp=image_interp,
                                    show=show,
                                    average=average,
                                    head_pos=head_pos,
                                    axes=axes)
        info = read_info(raw_fname)
        inv = make_inverse_operator(info, fwd, cov, loose=0.2, depth=0.8)
        save(inv, 'inv', subject=meg_subject, overwrite=True)

# Morph anatomy into common model ---------------------------------------------
from mne import EvokedArray
from mne import compute_morph_matrix
from mne.minimum_norm import apply_inverse
if True:
    for meg_subject, subject in zip(range(1, 21), subjects_id):
        if subject in bad_mri:
            continue
        raw_fname = paths('sss', subject=meg_subject, block=1)
        info = read_info(raw_fname)

        # precompute morphing matrix for faster processing
        inv = load('inv', subject=meg_subject)
        evoked = EvokedArray(np.zeros((len(info['chs']), 2)), info, 0)
        evoked.pick_types(eeg=False, meg=True)
        stc = apply_inverse(evoked,
                            inv,
                            lambda2=1.0 / (2**3.0),
                            method='dSPM',
                            pick_ori=None)
        morph = compute_morph_matrix(subject,
                                     'fsaverage',
                                     stc.vertices,
                                     vertices_to=[np.arange(10242)] * 2,
                                     subjects_dir=subjects_dir)
        save(morph, 'morph', subject=meg_subject)
Ejemplo n.º 22
0
           aspect='auto',
           extent=[epochs.times[0], epochs.times[-1], 1,
                   len(X_transform)],
           cmap='RdBu_r')
plt.xlabel('Time (ms)')
plt.ylabel('Trials (reordered by condition)')

# Plot average response
plt.figure()
plt.title('Average EMS signal')
mappings = [(key, value) for key, value in event_ids.items()]
for key, value in mappings:
    ems_ave = X_transform[y == value]
    plt.plot(epochs.times, ems_ave.mean(0), label=key)
plt.xlabel('Time (ms)')
plt.ylabel('a.u.')
plt.legend(loc='best')
plt.show()

# Visualize spatial filters across time
evoked = EvokedArray(filters, epochs.info, tmin=epochs.tmin)
evoked.plot_topomap(time_unit='s', scalings=1)

#############################################################################
# Note that a similar transformation can be applied with `compute_ems`
# However, this function replicates Schurger et al's original paper, and thus
# applies the normalization outside a leave-one-out cross-validation, which we
# recommend not to do.
epochs.equalize_event_counts(event_ids)
X_transform, filters, classes = compute_ems(epochs)
Ejemplo n.º 23
0
def _get_matrix_from_inverse_operator(inverse_operator, forward, method='dSPM',
                                      lambda2=1. / 9.):
    """Get inverse matrix from an inverse operator.

    Currently works only for fixed/loose orientation constraints
    For loose orientation constraint, the CTFs are computed for the normal
    component (pick_ori='normal').

    Parameters
    ----------
    inverse_operator : instance of InverseOperator
        The inverse operator.
    forward : instance of Forward
        The forward operator.
    method : 'MNE' | 'dSPM' | 'sLORETA'
        Inverse methods (for apply_inverse).
    lambda2 : float
        The regularization parameter (for apply_inverse).

    Returns
    -------
    invmat : array, shape (n_dipoles, n_channels)
        Inverse matrix associated with inverse operator and specified
        parameters.
    """
    # make sure forward and inverse operators match with respect to
    # surface orientation
    _convert_forward_match_inv(forward, inverse_operator)

    info_inv = _prepare_info(inverse_operator)

    # only use channels that are good for inverse operator and forward sol
    ch_names_inv = info_inv['ch_names']
    n_chs_inv = len(ch_names_inv)
    bads_inv = inverse_operator['info']['bads']

    # indices of bad channels
    ch_idx_bads = [ch_names_inv.index(ch) for ch in bads_inv]

    # create identity matrix as input for inverse operator
    # set elements to zero for non-selected channels
    id_mat = np.eye(n_chs_inv)

    # convert identity matrix to evoked data type (pretending it's an epoch)
    ev_id = EvokedArray(id_mat, info=info_inv, tmin=0.)

    # apply inverse operator to identity matrix in order to get inverse matrix
    # free orientation constraint not possible because apply_inverse would
    # combine components

    # check if inverse operator uses fixed source orientations
    is_fixed_inv = _check_fixed_ori(inverse_operator)

    # choose pick_ori according to inverse operator
    if is_fixed_inv:
        pick_ori = None
    else:
        pick_ori = 'vector'

    # columns for bad channels will be zero
    invmat_op = apply_inverse(ev_id, inverse_operator, lambda2=lambda2,
                              method=method, pick_ori=pick_ori)

    # turn source estimate into numpy array
    invmat = invmat_op.data

    # remove columns for bad channels
    # take into account it may be 3D array
    invmat = np.delete(invmat, ch_idx_bads, axis=invmat.ndim - 1)

    # if 3D array, i.e. multiple values per location (fixed and loose),
    # reshape into 2D array
    if invmat.ndim == 3:
        v0o1 = invmat[0, 1].copy()
        v3o2 = invmat[3, 2].copy()
        shape = invmat.shape
        invmat = invmat.reshape(shape[0] * shape[1], shape[2])
        # make sure that reshaping worked
        assert np.array_equal(v0o1, invmat[1])
        assert np.array_equal(v3o2, invmat[11])

    logger.info("Dimension of Inverse Matrix: %s" % str(invmat.shape))

    return invmat
Ejemplo n.º 24
0
def test_montage():
    """Test making montages"""
    tempdir = _TempDir()
    # no pep8
    input_str = [
        'FidNz 0.00000 10.56381 -2.05108\nFidT9 -7.82694 0.45386 -3.76056\n'
        'very_very_very_long_name 7.82694 0.45386 -3.76056',
        '// MatLab   Sphere coordinates [degrees]         Cartesian coordinates\n'  # noqa
        '// Label       Theta       Phi    Radius         X         Y         Z       off sphere surface\n'  # noqa
        'E1      37.700     -14.000       1.000    0.7677    0.5934   -0.2419  -0.00000000000000011\n'  # noqa
        'E2      44.600      -0.880       1.000    0.7119    0.7021   -0.0154   0.00000000000000000\n'  # noqa
        'E3      51.700      11.000       1.000    0.6084    0.7704    0.1908   0.00000000000000000',  # noqa
        '# ASA electrode file\nReferenceLabel  avg\nUnitPosition    mm\n'
        'NumberPositions=    68\nPositions\n-86.0761 -19.9897 -47.9860\n'
        '85.7939 -20.0093 -48.0310\n0.0083 86.8110 -39.9830\n'
        'Labels\nLPA\nRPA\nNz\n',
        'Site  Theta  Phi\nFp1  -92    -72\nFp2   92     72\n'
        'very_very_very_long_name   -60    -51\n',
        '346\n'
        'EEG	      F3	 -62.027	 -50.053	      85\n'
        'EEG	      Fz	  45.608	      90	      85\n'
        'EEG	      F4	   62.01	  50.103	      85\n',
        'eeg Fp1 -95.0 -31.0 -3.0\neeg AF7 -81 -59 -3\neeg AF3 -87 -41 28\n'
    ]
    kinds = ['test.sfp', 'test.csd', 'test.elc', 'test.txt', 'test.elp',
             'test.hpts']
    for kind, text in zip(kinds, input_str):
        fname = op.join(tempdir, kind)
        with open(fname, 'w') as fid:
            fid.write(text)
        montage = read_montage(fname)
        if ".sfp" in kind or ".txt" in kind:
            assert_true('very_very_very_long_name' in montage.ch_names)
        assert_equal(len(montage.ch_names), 3)
        assert_equal(len(montage.ch_names), len(montage.pos))
        assert_equal(montage.pos.shape, (3, 3))
        assert_equal(montage.kind, op.splitext(kind)[0])
        if kind.endswith('csd'):
            dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
                     ('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
                     ('off_sph', 'f8')]
            try:
                table = np.loadtxt(fname, skip_header=2, dtype=dtype)
            except TypeError:
                table = np.loadtxt(fname, skiprows=2, dtype=dtype)
            pos2 = np.c_[table['x'], table['y'], table['z']]
            assert_array_almost_equal(pos2, montage.pos, 4)
    # test transform
    input_str = """
    eeg Fp1 -95.0 -31.0 -3.0
    eeg AF7 -81 -59 -3
    eeg AF3 -87 -41 28
    cardinal 2 -91 0 -42
    cardinal 1 0 -91 -42
    cardinal 3 0 91 -42
    """
    kind = 'test_fid.hpts'
    fname = op.join(tempdir, kind)
    with open(fname, 'w') as fid:
        fid.write(input_str)
    montage = read_montage(op.join(tempdir, 'test_fid.hpts'), transform=True)
    # check coordinate transformation
    pos = np.array([-95.0, -31.0, -3.0])
    nasion = np.array([-91, 0, -42])
    lpa = np.array([0, -91, -42])
    rpa = np.array([0, 91, -42])
    fids = np.vstack((nasion, lpa, rpa))
    trans = get_ras_to_neuromag_trans(fids[0], fids[1], fids[2])
    pos = apply_trans(trans, pos)
    assert_array_equal(montage.pos[0], pos)
    idx = montage.ch_names.index('2')
    assert_array_equal(montage.pos[idx, [0, 2]], [0, 0])
    idx = montage.ch_names.index('1')
    assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
    idx = montage.ch_names.index('3')
    assert_array_equal(montage.pos[idx, [1, 2]], [0, 0])
    pos = np.array([-95.0, -31.0, -3.0])
    montage_fname = op.join(tempdir, 'test_fid.hpts')
    montage = read_montage(montage_fname, unit='mm')
    assert_array_equal(montage.pos[0], pos * 1e-3)

    # test with last
    info = create_info(montage.ch_names, 1e3, ['eeg'] * len(montage.ch_names))
    _set_montage(info, montage)
    pos2 = np.array([c['loc'][:3] for c in info['chs']])
    assert_array_equal(pos2, montage.pos)
    assert_equal(montage.ch_names, info['ch_names'])

    info = create_info(
        montage.ch_names, 1e3, ['eeg'] * len(montage.ch_names))

    evoked = EvokedArray(
        data=np.zeros((len(montage.ch_names), 1)), info=info, tmin=0)
    evoked.set_montage(montage)
    pos3 = np.array([c['loc'][:3] for c in evoked.info['chs']])
    assert_array_equal(pos3, montage.pos)
    assert_equal(montage.ch_names, evoked.info['ch_names'])

    # Warning should be raised when some EEG are not specified in the montage
    with warnings.catch_warnings(record=True) as w:
        info = create_info(montage.ch_names + ['foo', 'bar'], 1e3,
                           ['eeg'] * (len(montage.ch_names) + 2))
        _set_montage(info, montage)
        assert_true(len(w) == 1)
Ejemplo n.º 25
0
def _compare_inverses_approx(inv_1,
                             inv_2,
                             evoked,
                             rtol,
                             atol,
                             depth_atol=1e-6,
                             ctol=0.999999,
                             check_nn=True,
                             check_K=True):
    """Compare inverses."""
    # depth prior
    if inv_1['depth_prior'] is not None:
        assert_allclose(inv_1['depth_prior']['data'],
                        inv_2['depth_prior']['data'],
                        atol=depth_atol)
    else:
        assert (inv_2['depth_prior'] is None)
    # orient prior
    if inv_1['orient_prior'] is not None:
        assert_allclose(inv_1['orient_prior']['data'],
                        inv_2['orient_prior']['data'],
                        atol=1e-7)
    else:
        assert (inv_2['orient_prior'] is None)
    # source cov
    assert_allclose(inv_1['source_cov']['data'],
                    inv_2['source_cov']['data'],
                    atol=1e-7)
    for key in ('units', 'eigen_leads_weighted', 'nsource', 'coord_frame'):
        assert_equal(inv_1[key], inv_2[key], err_msg=key)
    assert_equal(inv_1['eigen_leads']['ncol'], inv_2['eigen_leads']['ncol'])
    K_1 = np.dot(inv_1['eigen_leads']['data'] * inv_1['sing'].astype(float),
                 inv_1['eigen_fields']['data'])
    K_2 = np.dot(inv_2['eigen_leads']['data'] * inv_2['sing'].astype(float),
                 inv_2['eigen_fields']['data'])
    # for free + surf ori, we only care about the ::2
    # (the other two dimensions have arbitrary direction)
    if inv_1['nsource'] * 3 == inv_1['source_nn'].shape[0]:
        # Technically this undersamples the free-orientation, non-surf-ori
        # inverse, but it's probably okay
        sl = slice(2, None, 3)
    else:
        sl = slice(None)
    if check_nn:
        assert_allclose(inv_1['source_nn'][sl],
                        inv_2['source_nn'][sl],
                        atol=1e-4)
    if check_K:
        assert_allclose(np.abs(K_1[sl]), np.abs(K_2[sl]), rtol=rtol, atol=atol)

    # Now let's do some practical tests, too
    evoked = EvokedArray(np.eye(len(evoked.ch_names)), evoked.info)
    for method in ('MNE', 'dSPM'):
        stc_1 = apply_inverse(evoked, inv_1, lambda2, method)
        stc_2 = apply_inverse(evoked, inv_2, lambda2, method)
        assert_equal(stc_1.subject, stc_2.subject)
        assert_equal(stc_1.times, stc_2.times)
        stc_1 = stc_1.data
        stc_2 = stc_2.data
        norms = np.max(stc_1, axis=-1, keepdims=True)
        stc_1 /= norms
        stc_2 /= norms
        corr = np.corrcoef(stc_1.ravel(), stc_2.ravel())[0, 1]
        assert corr > ctol
        assert_allclose(stc_1,
                        stc_2,
                        rtol=rtol,
                        atol=atol,
                        err_msg='%s: %s' % (method, corr))
Ejemplo n.º 26
0
            epok_win.append(evo)

        s_grp = {}
        n_sens = epok_win[0].shape[0]
        for s in range(epok_win[0].shape[0]):
            s_list = [x[s] for x in epok_win]
            grp = ['ASD', 'CTR']
            for g in grp:
                gix = subj_ix[g]
                s_grp[g] = [s_list[i] for i in gix]
            test, pv = ttest_ind(s_grp['ASD'], s_grp['CTR'])
            stats[c] = np.append(stats[c], pv)
        plt.figure(), plt.plot(stats[c]), plt.title([c, w]), plt.axhline(y=.05)
        stats[c] = np.array(stats[c])
        # Plot
        mask = stats[c][:, np.newaxis] <= 0.05
        evoked = EvokedArray(stats[c][:, np.newaxis], iSubj.info, tmin=0.)
        evoked.plot_topomap(title=[c, w], units='p', vmin=0., vmax=.05,
                            times=[0], scalings=1, time_format=None, mask=mask,
                            cbar_fmt='-%0.2f')










Ejemplo n.º 27
0
def test_montage():
    """Test making montages."""
    tempdir = _TempDir()
    inputs = dict(
        sfp='FidNz 0       9.071585155     -2.359754454\n'
        'FidT9 -6.711765       0.040402876     -3.251600355\n'
        'very_very_very_long_name -5.831241498 -4.494821698  4.955347697\n'
        'Cz 0       0       8.899186843',
        csd=
        '// MatLab   Sphere coordinates [degrees]         Cartesian coordinates\n'  # noqa: E501
        '// Label       Theta       Phi    Radius         X         Y         Z       off sphere surface\n'  # noqa: E501
        'E1      37.700     -14.000       1.000    0.7677    0.5934   -0.2419  -0.00000000000000011\n'  # noqa: E501
        'E3      51.700      11.000       1.000    0.6084    0.7704    0.1908   0.00000000000000000\n'  # noqa: E501
        'E31      90.000     -11.000       1.000    0.0000    0.9816   -0.1908   0.00000000000000000\n'  # noqa: E501
        'E61     158.000     -17.200       1.000   -0.8857    0.3579   -0.2957  -0.00000000000000022',  # noqa: E501
        mm_elc=
        '# ASA electrode file\nReferenceLabel  avg\nUnitPosition    mm\n'  # noqa:E501
        'NumberPositions=    68\n'
        'Positions\n'
        '-86.0761 -19.9897 -47.9860\n'
        '85.7939 -20.0093 -48.0310\n'
        '0.0083 86.8110 -39.9830\n'
        '-86.0761 -24.9897 -67.9860\n'
        'Labels\nLPA\nRPA\nNz\nDummy\n',
        m_elc='# ASA electrode file\nReferenceLabel  avg\nUnitPosition    m\n'
        'NumberPositions=    68\nPositions\n-.0860761 -.0199897 -.0479860\n'  # noqa:E501
        '.0857939 -.0200093 -.0480310\n.0000083 .00868110 -.0399830\n'
        '.08 -.02 -.04\n'
        'Labels\nLPA\nRPA\nNz\nDummy\n',
        txt='Site  Theta  Phi\n'
        'Fp1  -92    -72\n'
        'Fp2   92     72\n'
        'very_very_very_long_name       -92     72\n'
        'O2        92    -90\n',
        elp='346\n'
        'EEG\t      F3\t -62.027\t -50.053\t      85\n'
        'EEG\t      Fz\t  45.608\t      90\t      85\n'
        'EEG\t      F4\t   62.01\t  50.103\t      85\n'
        'EEG\t      FCz\t   68.01\t  58.103\t      85\n',
        hpts='eeg Fp1 -95.0 -3. -3.\n'
        'eeg AF7 -1 -1 -3\n'
        'eeg A3 -2 -2 2\n'
        'eeg A 0 0 0',
        bvef='<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n'
        '<!-- Generated by EasyCap Configurator 19.05.2014 -->\n'
        '<Electrodes defaults="false">\n'
        '  <Electrode>\n'
        '    <Name>Fp1</Name>\n'
        '    <Theta>-90</Theta>\n'
        '    <Phi>-72</Phi>\n'
        '    <Radius>1</Radius>\n'
        '    <Number>1</Number>\n'
        '  </Electrode>\n'
        '  <Electrode>\n'
        '    <Name>Fz</Name>\n'
        '    <Theta>45</Theta>\n'
        '    <Phi>90</Phi>\n'
        '    <Radius>1</Radius>\n'
        '    <Number>2</Number>\n'
        '  </Electrode>\n'
        '  <Electrode>\n'
        '    <Name>F3</Name>\n'
        '    <Theta>-60</Theta>\n'
        '    <Phi>-51</Phi>\n'
        '    <Radius>1</Radius>\n'
        '    <Number>3</Number>\n'
        '  </Electrode>\n'
        '  <Electrode>\n'
        '    <Name>F7</Name>\n'
        '    <Theta>-90</Theta>\n'
        '    <Phi>-36</Phi>\n'
        '    <Radius>1</Radius>\n'
        '    <Number>4</Number>\n'
        '  </Electrode>\n'
        '</Electrodes>',
    )
    # Get actual positions and save them for checking
    # csd comes from the string above, all others come from commit 2fa35d4
    poss = dict(
        sfp=[[0.0, 9.07159, -2.35975], [-6.71176, 0.0404, -3.2516],
             [-5.83124, -4.49482, 4.95535], [0.0, 0.0, 8.89919]],
        mm_elc=[[-0.08608, -0.01999, -0.04799], [0.08579, -0.02001, -0.04803],
                [1e-05, 0.08681, -0.03998], [-0.08608, -0.02499, -0.06799]],
        m_elc=[[-0.08608, -0.01999, -0.04799], [0.08579, -0.02001, -0.04803],
               [1e-05, 0.00868, -0.03998], [0.08, -0.02, -0.04]],
        txt=[[-26.25044, 80.79056, -2.96646], [26.25044, 80.79056, -2.96646],
             [-26.25044, -80.79056, -2.96646], [0.0, -84.94822, -2.96646]],
        elp=[[-48.20043, 57.55106, 39.86971], [0.0, 60.73848, 59.4629],
             [48.1426, 57.58403, 39.89198], [41.64599, 66.91489, 31.8278]],
        hpts=[[-95, -3, -3], [-1, -1., -3.], [-2, -2, 2.], [0, 0, 0]],
        bvef=[[-26.266444, 80.839803, 5.204748e-15],
              [3.680313e-15, 60.104076, 60.104076],
              [-46.325632, 57.207392, 42.500000],
              [-68.766444, 49.961746, 5.204748e-15]],
    )
    for key, text in inputs.items():
        kind = key.split('_')[-1]
        fname = op.join(tempdir, 'test.' + kind)
        with open(fname, 'w') as fid:
            fid.write(text)
        montage = read_montage(fname)
        if kind in ('sfp', 'txt'):
            assert ('very_very_very_long_name' in montage.ch_names)
        assert_equal(len(montage.ch_names), 4)
        assert_equal(len(montage.ch_names), len(montage.pos))
        assert_equal(montage.pos.shape, (4, 3))
        assert_equal(montage.kind, 'test')
        if kind == 'csd':
            dtype = [('label', 'S4'), ('theta', 'f8'), ('phi', 'f8'),
                     ('radius', 'f8'), ('x', 'f8'), ('y', 'f8'), ('z', 'f8'),
                     ('off_sph', 'f8')]
            try:
                table = np.loadtxt(fname, skip_header=2, dtype=dtype)
            except TypeError:
                table = np.loadtxt(fname, skiprows=2, dtype=dtype)
            poss['csd'] = np.c_[table['x'], table['y'], table['z']]
        if kind == 'elc':
            # Make sure points are reasonable distance from geometric centroid
            centroid = np.sum(montage.pos, axis=0) / montage.pos.shape[0]
            distance_from_centroid = np.apply_along_axis(
                np.linalg.norm, 1, montage.pos - centroid)
            assert_array_less(distance_from_centroid, 0.2)
            assert_array_less(0.01, distance_from_centroid)
        assert_array_almost_equal(poss[key], montage.pos, 4, err_msg=key)

    # Test reading in different letter case.
    ch_names = [
        "F3", "FZ", "F4", "FC3", "FCz", "FC4", "C3", "CZ", "C4", "CP3", "CPZ",
        "CP4", "P3", "PZ", "P4", "O1", "OZ", "O2"
    ]
    montage = read_montage('standard_1020', ch_names=ch_names)
    assert_array_equal(ch_names, montage.ch_names)

    # test transform
    input_strs = [
        """
    eeg Fp1 -95.0 -31.0 -3.0
    eeg AF7 -81 -59 -3
    eeg AF3 -87 -41 28
    cardinal 2 -91 0 -42
    cardinal 1 0 -91 -42
    cardinal 3 0 91 -42
    """, """
    Fp1 -95.0 -31.0 -3.0
    AF7 -81 -59 -3
    AF3 -87 -41 28
    FidNz -91 0 -42
    FidT9 0 -91 -42
    FidT10 0 91 -42
    """
    ]
    # sfp files seem to have Nz, T9, and T10 as fiducials:
    # https://github.com/mne-tools/mne-python/pull/4482#issuecomment-321980611

    kinds = ['test_fid.hpts', 'test_fid.sfp']

    for kind, input_str in zip(kinds, input_strs):
        fname = op.join(tempdir, kind)
        with open(fname, 'w') as fid:
            fid.write(input_str)
        montage = read_montage(op.join(tempdir, kind), transform=True)

        # check coordinate transformation
        pos = np.array([-95.0, -31.0, -3.0])
        nasion = np.array([-91, 0, -42])
        lpa = np.array([0, -91, -42])
        rpa = np.array([0, 91, -42])
        fids = np.vstack((nasion, lpa, rpa))
        trans = get_ras_to_neuromag_trans(fids[0], fids[1], fids[2])
        pos = apply_trans(trans, pos)
        assert_array_equal(montage.pos[0], pos)
        assert_array_equal(montage.nasion[[0, 2]], [0, 0])
        assert_array_equal(montage.lpa[[1, 2]], [0, 0])
        assert_array_equal(montage.rpa[[1, 2]], [0, 0])
        pos = np.array([-95.0, -31.0, -3.0])
        montage_fname = op.join(tempdir, kind)
        montage = read_montage(montage_fname, unit='mm')
        assert_array_equal(montage.pos[0], pos * 1e-3)

        # test with last
        info = create_info(montage.ch_names, 1e3,
                           ['eeg'] * len(montage.ch_names))
        _set_montage(info, montage)
        pos2 = np.array([c['loc'][:3] for c in info['chs']])
        assert_array_equal(pos2, montage.pos)
        assert_equal(montage.ch_names, info['ch_names'])

        info = create_info(montage.ch_names, 1e3,
                           ['eeg'] * len(montage.ch_names))

        evoked = EvokedArray(data=np.zeros((len(montage.ch_names), 1)),
                             info=info,
                             tmin=0)

        # test return type as well as set montage
        assert (isinstance(evoked.set_montage(montage), type(evoked)))

        pos3 = np.array([c['loc'][:3] for c in evoked.info['chs']])
        assert_array_equal(pos3, montage.pos)
        assert_equal(montage.ch_names, evoked.info['ch_names'])

        # Warning should be raised when some EEG are not specified in montage
        info = create_info(montage.ch_names + ['foo', 'bar'], 1e3,
                           ['eeg'] * (len(montage.ch_names) + 2))
        with pytest.warns(RuntimeWarning, match='position specified'):
            _set_montage(info, montage)

    # Channel names can be treated case insensitive
    info = create_info(['FP1', 'af7', 'AF3'], 1e3, ['eeg'] * 3)
    _set_montage(info, montage)

    # Unless there is a collision in names
    info = create_info(['FP1', 'Fp1', 'AF3'], 1e3, ['eeg'] * 3)
    assert (info['dig'] is None)
    with pytest.warns(RuntimeWarning, match='position specified'):
        _set_montage(info, montage)
    assert len(info['dig']) == 5  # 2 EEG w/pos, 3 fiducials
    montage.ch_names = ['FP1', 'Fp1', 'AF3']
    info = create_info(['fp1', 'AF3'], 1e3, ['eeg', 'eeg'])
    assert (info['dig'] is None)
    with pytest.warns(RuntimeWarning, match='position specified'):
        _set_montage(info, montage, set_dig=False)
    assert (info['dig'] is None)

    # test get_pos2d method
    montage = read_montage("standard_1020")
    c3 = montage.get_pos2d()[montage.ch_names.index("C3")]
    c4 = montage.get_pos2d()[montage.ch_names.index("C4")]
    fz = montage.get_pos2d()[montage.ch_names.index("Fz")]
    oz = montage.get_pos2d()[montage.ch_names.index("Oz")]
    f1 = montage.get_pos2d()[montage.ch_names.index("F1")]
    assert (c3[0] < 0)  # left hemisphere
    assert (c4[0] > 0)  # right hemisphere
    assert (fz[1] > 0)  # frontal
    assert (oz[1] < 0)  # occipital
    assert_allclose(fz[0], 0, atol=1e-2)  # midline
    assert_allclose(oz[0], 0, atol=1e-2)  # midline
    assert (f1[0] < 0 and f1[1] > 0)  # left frontal

    # test get_builtin_montages function
    montages = get_builtin_montages()
    assert (len(montages) > 0)  # MNE should always ship with montages
    assert ("standard_1020" in montages)  # 10/20 montage
    assert ("standard_1005" in montages)  # 10/05 montage
Ejemplo n.º 28
0
def test_localization_bias():
    """Test inverse localization bias for minimum-norm solvers."""
    # Identity input
    evoked = _get_evoked()
    evoked.pick_types(meg=True, eeg=True, exclude=())
    evoked = EvokedArray(np.eye(len(evoked.data)), evoked.info)
    noise_cov = read_cov(fname_cov)
    # restrict to limited set of verts (small src here) and one hemi for speed
    fwd_orig = read_forward_solution(fname_fwd)
    vertices = [fwd_orig['src'][0]['vertno'].copy(), []]
    stc = SourceEstimate(np.zeros((sum(len(v) for v in vertices), 1)),
                         vertices, 0., 1.)
    fwd_orig = restrict_forward_to_stc(fwd_orig, stc)

    #
    # Fixed orientation (not very different)
    #
    fwd = fwd_orig.copy()
    inv_fixed = make_inverse_operator(evoked.info, fwd, noise_cov, loose=0.,
                                      depth=0.8)
    fwd = convert_forward_solution(fwd, force_fixed=True, surf_ori=True)
    fwd = fwd['sol']['data']
    want = np.arange(fwd.shape[1])
    for method, lower, upper in (('MNE', 83, 87),
                                 ('dSPM', 96, 98),
                                 ('sLORETA', 100, 100),
                                 ('eLORETA', 100, 100)):
        inv_op = apply_inverse(evoked, inv_fixed, lambda2, method).data
        loc = np.abs(np.dot(inv_op, fwd))
        # Compute the percentage of sources for which there is no localization
        # bias:
        perc = (want == np.argmax(loc, axis=0)).mean() * 100
        assert lower <= perc <= upper, method

    #
    # Loose orientation
    #
    fwd = fwd_orig.copy()
    inv_free = make_inverse_operator(evoked.info, fwd, noise_cov, loose=0.2,
                                     depth=0.8)
    fwd = fwd['sol']['data']
    want = np.arange(fwd.shape[1]) // 3
    for method, lower, upper in (('MNE', 25, 35),
                                 ('dSPM', 25, 35),
                                 ('sLORETA', 35, 40),
                                 ('eLORETA', 40, 45)):
        inv_op = apply_inverse(evoked, inv_free, lambda2, method,
                               pick_ori='vector').data
        loc = np.linalg.norm(np.einsum('vos,sx->vxo', inv_op, fwd), axis=-1)
        # Compute the percentage of sources for which there is no localization
        # bias:
        perc = (want == np.argmax(loc, axis=0)).mean() * 100
        assert lower <= perc <= upper, method

    #
    # Free orientation
    #
    fwd = fwd_orig.copy()
    inv_free = make_inverse_operator(evoked.info, fwd, noise_cov, loose=1.,
                                     depth=0.8)
    fwd = fwd['sol']['data']
    want = np.arange(fwd.shape[1]) // 3
    force_kwargs = dict(method_params=dict(force_equal=True))
    for method, lower, upper, kwargs in (('MNE', 45, 55, {}),
                                         ('dSPM', 40, 45, {}),
                                         ('sLORETA', 90, 95, {}),
                                         ('eLORETA', 90, 95, force_kwargs),
                                         ('eLORETA', 100, 100, {}),
                                         ):
        inv_op = apply_inverse(evoked, inv_free, lambda2, method,
                               pick_ori='vector', **kwargs).data
        loc = np.linalg.norm(np.einsum('vos,sx->vxo', inv_op, fwd), axis=-1)
        # Compute the percentage of sources for which there is no localization
        # bias:
        perc = (want == np.argmax(loc, axis=0)).mean() * 100
        assert lower <= perc <= upper, method
Ejemplo n.º 29
0
def plot_glm_surface_projection(inst,
                                statsmodel_df,
                                picks="hbo",
                                value="Coef.",
                                background='w',
                                figure=None,
                                clim='auto',
                                mode='weighted',
                                colormap='RdBu_r',
                                surface='pial',
                                hemi='both',
                                size=800,
                                view=None,
                                colorbar=True,
                                distance=0.03,
                                subjects_dir=None,
                                src=None,
                                verbose=False):
    """
    Project GLM results on to the surface of the brain.

    Note: This function provides a convenient wrapper around low level
    MNE-Python functions. It is convenient if you wish to use a generic head
    model. If you have acquired fMRI images you may wish to use the underlying
    lower level functions.

    Note: This function does not conduct a forward model analysis with photon
    migration etc. It simply projects the values from each channel to the
    local cortical surface. It is useful for visualisation, but users should
    take this in to consideration when drawing conclusions from the
    visualisation.

    Parameters
    ----------
    inst : instance of Raw
        Haemoglobin data.
    statsmodel_df : dataframe
        As produced by produced by `statsmodels_to_results`.
    %(picks_base)s good sEEG, ECoG, and DBS channels.
    value : str
        Column from dataframe to plot.
    background : matplotlib color
        Color of the background of the display window.
    figure : mayavi.core.api.Scene, matplotlib.figure.Figure, list, int, None
        If None, a new figure will be created. If multiple views or a
        split view is requested, this must be a list of the appropriate
        length. If int is provided it will be used to identify the Mayavi
        figure by it's id or create a new figure with the given id. If an
        instance of matplotlib figure, mpl backend is used for plotting.
    %(clim)s
    mode : str
        Can be "sum" to do a linear sum of weights, "weighted" to make this
        a weighted sum, "nearest" to
        use only the weight of the nearest sensor, or "single" to
        do a distance-weight of the nearest sensor. Default is "sum".
    colormap : str
        Colormap to use.
    surface : str
        The type of surface (inflated, white etc.).
    hemi : str
        Hemisphere id (ie 'lh', 'rh', 'both', or 'split'). In the case
        of 'both', both hemispheres are shown in the same window.
        In the case of 'split' hemispheres are displayed side-by-side
        in different viewing panes.
    size : float or tuple of float
        The size of the window, in pixels. can be one number to specify
        a square window, or the (width, height) of a rectangular window.
        Has no effect with mpl backend.
    view : str
        View to set brain to.
    colorbar : bool
        If True, display colorbar on scene.
    distance : float
        Distance (m) defining the activation "ball" of the sensor.
    %(subjects_dir)s
    src : instance of SourceSpaces
        The source space.
    %(verbose)s

    Returns
    -------
    figure : instance of mne.viz.Brain | matplotlib.figure.Figure
        An instance of :class:`mne.viz.Brain` or matplotlib figure.
    """
    info = deepcopy(inst if isinstance(inst, Info) else inst.info)
    if not (info.ch_names == list(statsmodel_df['ch_name'].values)):
        raise RuntimeError('MNE data structure does not match dataframe '
                           f'results.\nMNE = {info.ch_names}.\n'
                           f'GLM = {list(statsmodel_df["ch_name"].values)}')

    ea = EvokedArray(
        np.tile(statsmodel_df[value].values.T, (1, 1)).T, info.copy())

    return _plot_3d_evoked_array(inst,
                                 ea,
                                 picks=picks,
                                 value=value,
                                 background=background,
                                 figure=figure,
                                 clim=clim,
                                 mode=mode,
                                 colormap=colormap,
                                 surface=surface,
                                 hemi=hemi,
                                 size=size,
                                 view=view,
                                 colorbar=colorbar,
                                 distance=distance,
                                 subjects_dir=subjects_dir,
                                 src=src,
                                 verbose=verbose)
Ejemplo n.º 30
0
def simulate_movement(raw,
                      pos,
                      stc,
                      trans,
                      src,
                      bem,
                      cov='simple',
                      mindist=1.0,
                      interp='linear',
                      random_state=None,
                      n_jobs=1,
                      verbose=None):
    """Simulate raw data with head movements

    Parameters
    ----------
    raw : instance of Raw
        The raw instance to use. The measurement info, including the
        head positions, will be used to simulate data.
    pos : str | dict | None
        Name of the position estimates file. Should be in the format of
        the files produced by maxfilter-produced. If dict, keys should
        be the time points and entries should be 4x3 ``dev_head_t``
        matrices. If None, the original head position (from
        ``raw.info['dev_head_t']``) will be used.
    stc : instance of SourceEstimate
        The source estimate to use to simulate data. Must have the same
        sample rate as the raw data.
    trans : dict | str
        Either a transformation filename (usually made using mne_analyze)
        or an info dict (usually opened using read_trans()).
        If string, an ending of `.fif` or `.fif.gz` will be assumed to
        be in FIF format, any other ending will be assumed to be a text
        file with a 4x4 transformation matrix (like the `--trans` MNE-C
        option).
    src : str | instance of SourceSpaces
        If string, should be a source space filename. Can also be an
        instance of loaded or generated SourceSpaces.
    bem : str
        Filename of the BEM (e.g., "sample-5120-5120-5120-bem-sol.fif").
    cov : instance of Covariance | 'simple' | None
        The sensor covariance matrix used to generate noise. If None,
        no noise will be added. If 'simple', a basic (diagonal) ad-hoc
        noise covariance will be used.
    mindist : float
        Minimum distance between sources and the inner skull boundary
        to use during forward calculation.
    interp : str
        Either 'linear' or 'zero', the type of forward-solution
        interpolation to use between provided time points.
    random_state : None | int | np.random.RandomState
        To specify the random generator state.
    n_jobs : int
        Number of jobs to use.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns
    -------
    raw : instance of Raw
        The simulated raw file.

    Notes
    -----
    Events coded with the number of the forward solution used will be placed
    in the raw files in the trigger channel STI101 at the t=0 times of the
    SourceEstimates.

    The resulting SNR will be determined by the structure of the noise
    covariance, and the amplitudes of the SourceEstimate. Note that this
    will vary as a function of position.
    """
    if isinstance(raw, string_types):
        with warnings.catch_warnings(record=True):
            raw = Raw(raw, allow_maxshield=True, preload=True, verbose=False)
    else:
        raw = raw.copy()

    if not isinstance(stc, _BaseSourceEstimate):
        raise TypeError('stc must be a SourceEstimate')
    if not np.allclose(raw.info['sfreq'], 1. / stc.tstep):
        raise ValueError('stc and raw must have same sample rate')
    rng = check_random_state(random_state)
    if interp not in ('linear', 'zero'):
        raise ValueError('interp must be "linear" or "zero"')

    if pos is None:  # use pos from file
        dev_head_ts = [raw.info['dev_head_t']] * 2
        offsets = np.array([0, raw.n_times])
        interp = 'zero'
    else:
        if isinstance(pos, string_types):
            pos = get_chpi_positions(pos, verbose=False)
        if isinstance(pos, tuple):  # can be an already-loaded pos file
            transs, rots, ts = pos
            ts -= raw.first_samp / raw.info['sfreq']  # MF files need reref
            dev_head_ts = [
                np.r_[np.c_[r, t[:, np.newaxis]], [[0, 0, 0, 1]]]
                for r, t in zip(rots, transs)
            ]
            del transs, rots
        elif isinstance(pos, dict):
            ts = np.array(list(pos.keys()), float)
            ts.sort()
            dev_head_ts = [pos[float(tt)] for tt in ts]
        else:
            raise TypeError('unknown pos type %s' % type(pos))
        if not (ts >= 0).all():  # pathological if not
            raise RuntimeError('Cannot have t < 0 in transform file')
        tend = raw.times[-1]
        assert not (ts < 0).any()
        assert not (ts > tend).any()
        if ts[0] > 0:
            ts = np.r_[[0.], ts]
            dev_head_ts.insert(0, raw.info['dev_head_t']['trans'])
        dev_head_ts = [{
            'trans': d,
            'to': raw.info['dev_head_t']['to'],
            'from': raw.info['dev_head_t']['from']
        } for d in dev_head_ts]
        if ts[-1] < tend:
            dev_head_ts.append(dev_head_ts[-1])
            ts = np.r_[ts, [tend]]
        offsets = raw.time_as_index(ts)
        offsets[-1] = raw.n_times  # fix for roundoff error
        assert offsets[-2] != offsets[-1]
        del ts
    if isinstance(cov, string_types):
        assert cov == 'simple'
        cov = make_ad_hoc_cov(raw.info, verbose=False)
    assert np.array_equal(offsets, np.unique(offsets))
    assert len(offsets) == len(dev_head_ts)
    approx_events = int(
        (raw.n_times / raw.info['sfreq']) / (stc.times[-1] - stc.times[0]))
    logger.info('Provided parameters will provide approximately %s event%s' %
                (approx_events, '' if approx_events == 1 else 's'))

    # get HPI freqs and reorder
    hpi_freqs = np.array(
        [x['custom_ref'][0] for x in raw.info['hpi_meas'][0]['hpi_coils']])
    n_freqs = len(hpi_freqs)
    order = [x['number'] - 1 for x in raw.info['hpi_meas'][0]['hpi_coils']]
    assert np.array_equal(np.unique(order), np.arange(n_freqs))
    hpi_freqs = hpi_freqs[order]
    hpi_order = raw.info['hpi_results'][0]['order'] - 1
    assert np.array_equal(np.unique(hpi_order), np.arange(n_freqs))
    hpi_freqs = hpi_freqs[hpi_order]

    # extract necessary info
    picks = pick_types(raw.info, meg=True, eeg=True)  # for simulation
    meg_picks = pick_types(raw.info, meg=True, eeg=False)  # for CHPI
    fwd_info = pick_info(raw.info, picks)
    fwd_info['projs'] = []
    logger.info('Setting up raw data simulation using %s head position%s' %
                (len(dev_head_ts), 's' if len(dev_head_ts) != 1 else ''))
    raw.preload_data(verbose=False)

    if isinstance(stc, VolSourceEstimate):
        verts = [stc.vertices]
    else:
        verts = stc.vertices
    src = _restrict_source_space_to(src, verts)

    # figure out our cHPI, ECG, and EOG dipoles
    dig = raw.info['dig']
    assert all([
        d['coord_frame'] == FIFF.FIFFV_COORD_HEAD for d in dig
        if d['kind'] == FIFF.FIFFV_POINT_HPI
    ])
    chpi_rrs = [d['r'] for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI]
    R, r0 = fit_sphere_to_headshape(raw.info, verbose=False)[:2]
    R /= 1000.
    r0 /= 1000.
    ecg_rr = np.array([[-R, 0, -3 * R]])
    eog_rr = [
        d['r'] for d in raw.info['dig']
        if d['ident'] == FIFF.FIFFV_POINT_NASION
    ][0]
    eog_rr = eog_rr - r0
    eog_rr = (eog_rr / np.sqrt(np.sum(eog_rr * eog_rr)) * 0.98 *
              R)[np.newaxis, :]
    eog_rr += r0
    eog_bem = make_sphere_model(r0,
                                head_radius=R,
                                relative_radii=(0.99, 1.),
                                sigmas=(0.33, 0.33),
                                verbose=False)
    # let's oscillate between resting (17 bpm) and reading (4.5 bpm) rate
    # http://www.ncbi.nlm.nih.gov/pubmed/9399231
    blink_rate = np.cos(2 * np.pi * 1. / 60. * raw.times)
    blink_rate *= 12.5 / 60.
    blink_rate += 4.5 / 60.
    blink_data = rng.rand(raw.n_times) < blink_rate / raw.info['sfreq']
    blink_data = blink_data * (rng.rand(raw.n_times) + 0.5)  # vary amplitudes
    blink_kernel = np.hanning(int(0.25 * raw.info['sfreq']))
    eog_data = np.convolve(blink_data, blink_kernel, 'same')[np.newaxis, :]
    eog_data += rng.randn(eog_data.shape[1]) * 0.05
    eog_data *= 100e-6
    del blink_data,

    max_beats = int(np.ceil(raw.times[-1] * 70. / 60.))
    cardiac_idx = np.cumsum(
        rng.uniform(60. / 70., 60. / 50., max_beats) *
        raw.info['sfreq']).astype(int)
    cardiac_idx = cardiac_idx[cardiac_idx < raw.n_times]
    cardiac_data = np.zeros(raw.n_times)
    cardiac_data[cardiac_idx] = 1
    cardiac_kernel = np.concatenate([
        2 * np.hanning(int(0.04 * raw.info['sfreq'])),
        -0.3 * np.hanning(int(0.05 * raw.info['sfreq'])),
        0.2 * np.hanning(int(0.26 * raw.info['sfreq']))
    ],
                                    axis=-1)
    ecg_data = np.convolve(cardiac_data, cardiac_kernel, 'same')[np.newaxis, :]
    ecg_data += rng.randn(ecg_data.shape[1]) * 0.05
    ecg_data *= 3e-4
    del cardiac_data

    # Add to data file, then rescale for simulation
    for data, scale, exg_ch in zip([eog_data, ecg_data], [1e-3, 5e-4],
                                   ['EOG062', 'ECG063']):
        ch = pick_channels(raw.ch_names, [exg_ch])
        if len(ch) == 1:
            raw._data[ch[0], :] = data
        data *= scale

    evoked = EvokedArray(np.zeros((len(picks), len(stc.times))),
                         fwd_info,
                         stc.tmin,
                         verbose=False)
    stc_event_idx = np.argmin(np.abs(stc.times))
    event_ch = pick_channels(raw.info['ch_names'], ['STI101'])[0]
    used = np.zeros(raw.n_times, bool)
    stc_indices = np.arange(raw.n_times) % len(stc.times)
    raw._data[event_ch, ].fill(0)
    hpi_mag = 25e-9
    last_fwd = last_fwd_chpi = last_fwd_eog = last_fwd_ecg = src_sel = None
    for fi, (fwd, fwd_eog, fwd_ecg, fwd_chpi) in \
        enumerate(_make_forward_solutions(
            fwd_info, trans, src, bem, eog_bem, dev_head_ts, mindist,
            chpi_rrs, eog_rr, ecg_rr, n_jobs)):
        # must be fixed orientation
        fwd = convert_forward_solution(fwd,
                                       surf_ori=True,
                                       force_fixed=True,
                                       verbose=False)
        # just use one arbitrary direction
        fwd_eog = fwd_eog['sol']['data'][:, ::3]
        fwd_ecg = fwd_ecg['sol']['data'][:, ::3]
        fwd_chpi = fwd_chpi[:, ::3]

        if src_sel is None:
            src_sel = _stc_src_sel(fwd['src'], stc)
            if isinstance(stc, VolSourceEstimate):
                verts = [stc.vertices]
            else:
                verts = stc.vertices
            diff_ = sum([len(v) for v in verts]) - len(src_sel)
            if diff_ != 0:
                warnings.warn(
                    '%s STC vertices omitted due to fwd calculation' %
                    (diff_, ))
        if last_fwd is None:
            last_fwd, last_fwd_eog, last_fwd_ecg, last_fwd_chpi = \
                fwd, fwd_eog, fwd_ecg, fwd_chpi
            continue
        n_time = offsets[fi] - offsets[fi - 1]

        time_slice = slice(offsets[fi - 1], offsets[fi])
        assert not used[time_slice].any()
        stc_idxs = stc_indices[time_slice]
        event_idxs = np.where(stc_idxs == stc_event_idx)[0] + offsets[fi - 1]
        used[time_slice] = True
        logger.info('  Simulating data for %0.3f-%0.3f sec with %s event%s' %
                    (tuple(offsets[fi - 1:fi + 1] / raw.info['sfreq']) +
                     (len(event_idxs), '' if len(event_idxs) == 1 else 's')))

        # simulate brain data
        stc_data = stc.data[:, stc_idxs][src_sel]
        data = _interp(last_fwd['sol']['data'], fwd['sol']['data'], stc_data,
                       interp)
        simulated = EvokedArray(data, evoked.info, 0)
        if cov is not None:
            noise = generate_noise_evoked(simulated, cov, [1, -1, 0.2], rng)
            simulated.data += noise.data
        assert simulated.data.shape[0] == len(picks)
        assert simulated.data.shape[1] == len(stc_idxs)
        raw._data[picks, time_slice] = simulated.data

        # add ECG, EOG, and CHPI traces
        raw._data[picks, time_slice] += \
            _interp(last_fwd_eog, fwd_eog, eog_data[:, time_slice], interp)
        raw._data[meg_picks, time_slice] += \
            _interp(last_fwd_ecg, fwd_ecg, ecg_data[:, time_slice], interp)
        this_t = np.arange(offsets[fi - 1], offsets[fi]) / raw.info['sfreq']
        sinusoids = np.zeros((n_freqs, n_time))
        for fi, freq in enumerate(hpi_freqs):
            sinusoids[fi] = 2 * np.pi * freq * this_t
            sinusoids[fi] = hpi_mag * np.sin(sinusoids[fi])
        raw._data[meg_picks, time_slice] += \
            _interp(last_fwd_chpi, fwd_chpi, sinusoids, interp)

        # add events
        raw._data[event_ch, event_idxs] = fi

        # prepare for next iteration
        last_fwd, last_fwd_eog, last_fwd_ecg, last_fwd_chpi = \
            fwd, fwd_eog, fwd_ecg, fwd_chpi
    assert used.all()
    logger.info('Done')
    return raw