Ejemplo n.º 1
0
def test_cov_scaling():
    """Test rescaling covs."""
    evoked = read_evokeds(ave_fname,
                          condition=0,
                          baseline=(None, 0),
                          proj=True)
    cov = read_cov(cov_fname)['data']
    cov2 = read_cov(cov_fname)['data']

    assert_array_equal(cov, cov2)
    evoked.pick_channels([
        evoked.ch_names[k] for k in pick_types(evoked.info, meg=True, eeg=True)
    ])
    picks_list = _picks_by_type(evoked.info)
    scalings = dict(mag=1e15, grad=1e13, eeg=1e6)

    _apply_scaling_cov(cov2, picks_list, scalings=scalings)
    _apply_scaling_cov(cov, picks_list, scalings=scalings)
    assert_array_equal(cov, cov2)
    assert cov.max() > 1

    _undo_scaling_cov(cov2, picks_list, scalings=scalings)
    _undo_scaling_cov(cov, picks_list, scalings=scalings)
    assert_array_equal(cov, cov2)
    assert cov.max() < 1

    data = evoked.data.copy()
    _apply_scaling_array(data, picks_list, scalings=scalings)
    _undo_scaling_array(data, picks_list, scalings=scalings)
    assert_allclose(data, evoked.data, atol=1e-20)
Ejemplo n.º 2
0
def test_cov_scaling():
    """Test rescaling covs."""
    evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
                          proj=True)
    cov = read_cov(cov_fname)['data']
    cov2 = read_cov(cov_fname)['data']

    assert_array_equal(cov, cov2)
    evoked.pick_channels([evoked.ch_names[k] for k in pick_types(
        evoked.info, meg=True, eeg=True
    )])
    picks_list = _picks_by_type(evoked.info)
    scalings = dict(mag=1e15, grad=1e13, eeg=1e6)

    _apply_scaling_cov(cov2, picks_list, scalings=scalings)
    _apply_scaling_cov(cov, picks_list, scalings=scalings)
    assert_array_equal(cov, cov2)
    assert cov.max() > 1

    _undo_scaling_cov(cov2, picks_list, scalings=scalings)
    _undo_scaling_cov(cov, picks_list, scalings=scalings)
    assert_array_equal(cov, cov2)
    assert cov.max() < 1

    data = evoked.data.copy()
    _apply_scaling_array(data, picks_list, scalings=scalings)
    _undo_scaling_array(data, picks_list, scalings=scalings)
    assert_allclose(data, evoked.data, atol=1e-20)
Ejemplo n.º 3
0
def _estimate_rank_meeg_signals(data,
                                info,
                                scalings,
                                tol='auto',
                                return_singular=False):
    """Estimate rank for M/EEG data.

    Parameters
    ----------
    data : np.ndarray of float, shape(n_channels, n_samples)
        The M/EEG signals.
    info : Info
        The measurement info.
    scalings : dict | 'norm' | np.ndarray | None
        The rescaling method to be applied. If dict, it will override the
        following default dict:

            dict(mag=1e15, grad=1e13, eeg=1e6)

        If 'norm' data will be scaled by channel-wise norms. If array,
        pre-specified norms will be used. If None, no scaling will be applied.
    tol : float | str
        Tolerance. See ``estimate_rank``.
    return_singular : bool
        If True, also return the singular values that were used
        to determine the rank.

    Returns
    -------
    rank : int
        Estimated rank of the data.
    s : array
        If return_singular is True, the singular values that were
        thresholded to determine the rank are also returned.
    """
    picks_list = _picks_by_type(info)
    _apply_scaling_array(data, picks_list, scalings)
    if data.shape[1] < data.shape[0]:
        ValueError("You've got fewer samples than channels, your "
                   "rank estimate might be inaccurate.")
    out = estimate_rank(data,
                        tol=tol,
                        norm=False,
                        return_singular=return_singular)
    rank = out[0] if isinstance(out, tuple) else out
    ch_type = ' + '.join(list(zip(*picks_list))[0])
    logger.info('estimated rank (%s): %d' % (ch_type, rank))
    _undo_scaling_array(data, picks_list, scalings)
    return out
Ejemplo n.º 4
0
def _estimate_rank_meeg_signals(data, info, scalings, tol='auto',
                                return_singular=False):
    """Estimate rank for M/EEG data.

    Parameters
    ----------
    data : np.ndarray of float, shape(n_channels, n_samples)
        The M/EEG signals.
    info : Info
        The measurement info.
    scalings : dict | 'norm' | np.ndarray | None
        The rescaling method to be applied. If dict, it will override the
        following default dict:

            dict(mag=1e15, grad=1e13, eeg=1e6)

        If 'norm' data will be scaled by channel-wise norms. If array,
        pre-specified norms will be used. If None, no scaling will be applied.
    tol : float | str
        Tolerance. See ``estimate_rank``.
    return_singular : bool
        If True, also return the singular values that were used
        to determine the rank.

    Returns
    -------
    rank : int
        Estimated rank of the data.
    s : array
        If return_singular is True, the singular values that were
        thresholded to determine the rank are also returned.
    """
    picks_list = _picks_by_type(info)
    _apply_scaling_array(data, picks_list, scalings)
    if data.shape[1] < data.shape[0]:
        ValueError("You've got fewer samples than channels, your "
                   "rank estimate might be inaccurate.")
    out = estimate_rank(data, tol=tol, norm=False,
                        return_singular=return_singular)
    rank = out[0] if isinstance(out, tuple) else out
    ch_type = ' + '.join(list(zip(*picks_list))[0])
    logger.info('estimated rank (%s): %d' % (ch_type, rank))
    _undo_scaling_array(data, picks_list, scalings)
    return out
Ejemplo n.º 5
0
    def fit(self, raw, verbose=None):
        """Fit the SNS operator

        Parameters
        ----------
        raw : Instance of Raw
            The raw data to fit.
        verbose : bool, str, int, or None
            If not None, override default verbose level (see mne.verbose).

        Returns
        -------
        sns : Instance of SensorNoiseSuppression
            The modified instance.

        Notes
        -----
        In the resulting operator, bad channels will be reconstructed by
        using the good channels.
        """
        logger.info('Processing data with sensor noise suppression algorithm')
        logger.info('    Loading raw data')
        if not isinstance(raw, BaseRaw):
            raise TypeError('raw must be an instance of Raw, got %s' %
                            type(raw))
        good_picks = _pick_data_channels(raw.info, exclude='bads')
        if self._n_neighbors > len(good_picks) - 1:
            raise ValueError('n_neighbors must be at most len(good_picks) '
                             '- 1 (%s)' % (len(good_picks) - 1, ))
        logger.info('    Loading data')
        picks = _pick_data_channels(raw.info, exclude=())
        # The following lines are equivalent to this, but require less mem use:
        # data_cov = np.cov(orig_data)
        # data_corrs = np.corrcoef(orig_data) ** 2
        logger.info('    Computing covariance for %s good channels' %
                    len(good_picks))
        data_cov = compute_raw_covariance(
            raw,
            picks=picks,
            reject=self._reject,
            flat=self._flat,
            verbose=False if verbose is None else verbose)['data']
        good_subpicks = np.searchsorted(picks, good_picks)
        del good_picks
        # scale the norms so everything is close enough to unity for our checks
        picks_list = _picks_by_type(pick_info(raw.info, picks), exclude=())
        _apply_scaling_cov(data_cov, picks_list, self._scalings)
        data_norm = np.diag(data_cov).copy()
        eps = np.finfo(np.float32).eps
        pos_mask = data_norm >= eps
        data_norm[pos_mask] = 1. / data_norm[pos_mask]
        data_norm[~pos_mask] = 0
        # normalize
        data_corrs = data_cov * data_cov
        data_corrs *= data_norm
        data_corrs *= data_norm[:, np.newaxis]
        del data_norm
        operator = np.zeros((len(picks), len(picks)))
        logger.info('    Assembling spatial operator')
        for ii in range(len(picks)):
            # For each channel, the set of other signals is orthogonalized by
            # applying PCA to obtain an orthogonal basis of the subspace
            # spanned by the other channels.
            idx = np.argsort(data_corrs[ii][good_subpicks])[::-1]
            if ii in good_subpicks:
                idx = good_subpicks[idx[:self._n_neighbors + 1]].tolist()
                idx.pop(idx.index(ii))  # should be in there iff it is good
            else:
                idx = good_subpicks[idx[:self._n_neighbors]].tolist()
            # We have already effectively thresholded by zeroing out components
            eigval, eigvec = _pca(data_cov[np.ix_(idx, idx)], thresh=None)
            # Some of the eigenvalues could be zero, don't let it blow up
            norm = np.zeros(len(eigval))
            use_mask = eigval > eps
            norm[use_mask] = 1. / np.sqrt(eigval[use_mask])
            eigvec *= norm
            del eigval
            # The channel is projected on this basis and replaced by its
            # projection
            operator[ii, idx] = np.dot(eigvec, np.dot(data_cov[ii][idx],
                                                      eigvec))
            # Equivalently (and less efficiently):
            # eigvec = linalg.block_diag([1.], eigvec)
            # idx = np.concatenate(([ii], idx))
            # corr = np.dot(np.dot(eigvec.T, data_cov[np.ix_(idx, idx)]),
            #               eigvec)
            # operator[ii, idx[1:]] = np.dot(corr[0, 1:], eigvec[1:, 1:].T)
            if operator[ii, ii] != 0:
                raise RuntimeError
        # scale our results back (the ratio of channel scales is what matters)
        _apply_scaling_array(operator.T, picks_list, self._scalings)
        _undo_scaling_array(operator, picks_list, self._scalings)
        logger.info('Done')
        self._operator = operator
        self._used_chs = [raw.ch_names[pick] for pick in picks]
        return self