def get_pca_mean_and_pre_whitener_epochs(epochs, picks, decim, pre_whitener):
    """Aux method based on ica._fit_epochs from mne v0.15"""

    if picks is None:
        picks = _pick_data_channels(epochs.info,
                                    exclude='bads',
                                    with_ref_meg=False)

    # filter out all the channels the raw wouldn't have initialized
    info = pick_info(epochs.info, picks)
    if info['comps']:
        info['comps'] = []

    # this should be a copy (picks a list of int)
    data = epochs.get_data()[:, picks]
    # this will be a view
    if decim is not None:
        data = data[:, :, ::decim]

    # This will make at least one copy (one from hstack, maybe one more from _pre_whiten)
    data, pre_whitener = pre_whiten(np.hstack(data), epochs.info, picks,
                                    pre_whitener)

    pca_mean_ = np.mean(data, axis=1)

    return pca_mean_, pre_whitener
Exemple #2
0
    def fit(self, inst):
        '''
        Fit Peakachu to erp data.
        This leads to selection of channels that maximize peak strength.
        These channels are then used during `transform` to search for peak.
        Different data can be passed during `fit` and `transform` - for example
        `fit` could use condition-average while transform can be used on
        separate conditions.
        '''
        from mne.evoked import Evoked
        from mne.io.pick import _pick_data_channels, pick_info
        assert isinstance(inst, (Evoked, np.ndarray)), 'inst must be either' \
            ' Evoked or numpy array, got {}.'.format(type(inst))

        # deal with bad channels and non-data channels
        picks = _pick_data_channels(inst.info)

        self._info = pick_info(inst.info, picks)
        self._all_ch_names = [inst.ch_names[i] for i in picks]

        # get peaks
        peak_val, peak_ind = self._get_peaks(inst, select=picks)

        # select n_channels
        vals = peak_val if 'max' in self.select else -peak_val
        chan_ind = select_channels(vals, N=self.n_channels,
                        connectivity=self.connectivity)
        self._chan_ind = [picks[i] for i in chan_ind]
        self._chan_names = [inst.ch_names[ch] for ch in self._chan_ind]
        self._peak_vals = peak_val
        return self
def get_pca_mean_and_pre_whitener_raw(raw, picks, start, stop, decim, reject,
                                      flat, tstep, pre_whitener,
                                      reject_by_annotation):
    """Aux method based on ica._fit_raw from mne v0.15"""

    if picks is None:  # just use good data channels
        picks = _pick_data_channels(raw.info,
                                    exclude='bads',
                                    with_ref_meg=False)

    info = pick_info(raw.info, picks)
    if info['comps']:
        info['comps'] = []

    start, stop = _check_start_stop(raw, start, stop)

    reject_by_annotation = 'omit' if reject_by_annotation else None
    # this will be a copy
    data = raw.get_data(picks, start, stop, reject_by_annotation)

    # this will be a view
    if decim is not None:
        data = data[:, ::decim]

    # this will make a copy
    if (reject is not None) or (flat is not None):
        data, drop_inds_ = _reject_data_segments(data, reject, flat, decim,
                                                 info, tstep)
    # this may operate inplace or make a copy
    data, pre_whitener = pre_whiten(data, raw.info, picks, pre_whitener)

    pca_mean_ = np.mean(data, axis=1)

    return pca_mean_, pre_whitener
Exemple #4
0
def plot_coregistration(subject, subjects_dir, hcp_path, recordings_path,
                        info_from=(('data_type', 'rest'), ('run_index', 0)),
                        view_init=(('azim', 0), ('elev', 0))):
    """A diagnostic plot to show the HCP coregistration

    Parameters
    ----------
    subject : str
        The subject
    subjects_dir : str
        The path corresponding to MNE/freesurfer SUBJECTS_DIR (to be created)
    hcp_path : str
        The path where the HCP files can be found.
    recordings_path : str
        The path to converted data (including the head<->device transform).
    info_from : tuple of tuples | dict
        The reader info concerning the data from which sensor positions
        should be read.
        Must not be empty room as sensor positions are in head
        coordinates for 4D systems, hence not available in that case.
        Note that differences between the sensor positions across runs
        are smaller than 12 digits, hence negligible.
    view_init : tuple of tuples | dict
        The initival view, defaults to azimuth and elevation of 0,
        a simple lateral view

    Returns
    -------
    fig : matplotlib.figure.Figure
        The figure object.
    """
    import matplotlib.pyplot as plt
    from mpl_toolkits.mplot3d import Axes3D  #  noqa

    if isinstance(info_from, tuple):
        info_from = dict(info_from)
    if isinstance(view_init, tuple):
        view_init = dict(view_init)

    head_mri_t = read_trans(
        op.join(recordings_path, subject,
                '{}-head_mri-trans.fif'.format(subject)))

    info = read_info(subject=subject, hcp_path=hcp_path, **info_from)

    info = pick_info(info, _pick_data_channels(info, with_ref_meg=False))
    sens_pnts = np.array([c['loc'][:3] for c in info['chs']])
    sens_pnts = apply_trans(head_mri_t, sens_pnts)
    sens_pnts *= 1e3  # put in mm scale

    pnts, tris = read_surface(
        op.join(subjects_dir, subject, 'bem', 'inner_skull.surf'))

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.scatter(*sens_pnts.T, color='purple', marker='o')
    ax.scatter(*pnts.T, color='green', alpha=0.3)
    ax.view_init(**view_init)
    fig.tight_layout()
    return fig
Exemple #5
0
def test_otp_real():
    """Test OTP on real data."""
    for fname in (erm_fname, triux_fname):
        raw = read_raw_fif(fname, allow_maxshield='yes').crop(0, 1)
        raw.load_data().pick_channels(raw.ch_names[:10])
        raw_otp = oversampled_temporal_projection(raw, 1.)
        picks = _pick_data_channels(raw.info)
        reduction = (np.linalg.norm(raw[picks][0], axis=-1) /
                     np.linalg.norm(raw_otp[picks][0], axis=-1))
        assert reduction.min() > 1

    # Handling of acquisition skips
    raw = read_raw_fif(skip_fname, preload=True)
    raw.pick_channels(raw.ch_names[:10])
    raw_otp = oversampled_temporal_projection(raw, duration=1.)
Exemple #6
0
    def apply(self, inst):
        """Apply the operator

        Parameters
        ----------
        inst : instance of Raw
            The data on which to apply the operator.

        Returns
        -------
        inst : instance of Raw
            The input instance with cleaned data (operates inplace).
        """
        if isinstance(inst, BaseRaw):
            if not inst.preload:
                raise RuntimeError('raw data must be loaded, use '
                                   'raw.load_data() or preload=True')
            offsets = np.concatenate(
                [np.arange(0, len(inst.times), 10000), [len(inst.times)]])
            info = inst.info
            picks = pick_channels(info['ch_names'], self._used_chs)
            data_chs = [
                info['ch_names'][pick]
                for pick in _pick_data_channels(info, exclude=())
            ]
            missing = set(data_chs) - set(self._used_chs)
            if len(missing) > 0:
                raise RuntimeError('Not all data channels of inst were used '
                                   'to construct the operator: %s' %
                                   sorted(missing))
            missing = set(self._used_chs) - set(info['ch_names'][pick]
                                                for pick in picks)
            if len(missing) > 0:
                raise RuntimeError('Not all channels originally used to '
                                   'construct the operator are present: %s' %
                                   sorted(missing))
            for start, stop in zip(offsets[:-1], offsets[1:]):
                time_sl = slice(start, stop)
                inst._data[picks, time_sl] = np.dot(self._operator,
                                                    inst._data[picks, time_sl])
        else:
            # XXX Eventually this could support Evoked and Epochs, too
            raise TypeError('Only Raw instances are currently supported, got '
                            '%s' % type(inst))
        return inst
Exemple #7
0
    def apply(self, inst):
        """Apply the operator

        Parameters
        ----------
        inst : instance of Raw
            The data on which to apply the operator.

        Returns
        -------
        inst : instance of Raw
            The input instance with cleaned data (operates inplace).
        """
        if isinstance(inst, BaseRaw):
            if not inst.preload:
                raise RuntimeError('raw data must be loaded, use '
                                   'raw.load_data() or preload=True')
            offsets = np.concatenate([np.arange(0, len(inst.times), 10000),
                                      [len(inst.times)]])
            info = inst.info
            picks = pick_channels(info['ch_names'], self._used_chs)
            data_chs = [info['ch_names'][pick]
                        for pick in _pick_data_channels(info, exclude=())]
            missing = set(data_chs) - set(self._used_chs)
            if len(missing) > 0:
                raise RuntimeError('Not all data channels of inst were used '
                                   'to construct the operator: %s'
                                   % sorted(missing))
            missing = set(self._used_chs) - set(info['ch_names'][pick]
                                                for pick in picks)
            if len(missing) > 0:
                raise RuntimeError('Not all channels originally used to '
                                   'construct the operator are present: %s'
                                   % sorted(missing))
            for start, stop in zip(offsets[:-1], offsets[1:]):
                time_sl = slice(start, stop)
                inst._data[picks, time_sl] = np.dot(self._operator,
                                                    inst._data[picks, time_sl])
        else:
            # XXX Eventually this could support Evoked and Epochs, too
            raise TypeError('Only Raw instances are currently supported, got '
                            '%s' % type(inst))
        return inst
Exemple #8
0
    def __init__(self, inst, picks=None):
        '''
        Initialize the Surrogates object.

        #TODO Update documentation.
        '''
        from mne.io.pick import _pick_data_channels

        # flags
        self._normalized = False
        self._fft_cached = False

        # cache
        self._original_data_fft = None
        self.instance = None

        if not isinstance(inst, (BaseEpochs, SourceEstimate, np.ndarray)):
            raise ValueError('Must be an instance of ndarray, Epochs or'
                             'SourceEstimate. Got type {0}'.format(type(inst)))

        if isinstance(inst, BaseEpochs):
            # load the data if not loaded
            if not inst.preload:
                inst.load_data()
            # make sure right picks are taken
            if picks is None:
                picks = _pick_data_channels(inst.info, with_ref_meg=False)
            # returns array of shape (n_epochs, n_channels, n_times)
            self.original_data = inst.get_data()[:, picks, :]
            # cache the instance
            self.instance = inst.copy()

        elif isinstance(inst, SourceEstimate):  # SourceEstimate
            # array of shape (n_dipoles, n_times)
            self.original_data = inst.data
            # cache the instance
            self.instance = inst.copy()

        else:  # must be ndarray
            self.original_data = inst
            self.instance = inst.copy()
Exemple #9
0
def compute_forward_stack(subjects_dir,
                          subject,
                          recordings_path,
                          info_from=(('data_type', 'rest'), ('run_index', 0)),
                          fwd_params=None,
                          src_params=None,
                          hcp_path=op.curdir,
                          n_jobs=1,
                          verbose=None):
    """
    Convenience function for conducting standard MNE analyses.

    .. note::
       this function computes bem solutions, source spaces and forward models
       optimized for connectivity computation, i.e., the fsaverage space
       is morphed onto the subject's space.

    Parameters
    ----------
    subject : str
        The subject name.
    hcp_path : str
        The directory containing the HCP data.
    recordings_path : str
        The path where MEG data and transformations are stored.
    subjects_dir : str
        The directory containing the extracted HCP subject data.
    info_from : tuple of tuples | dict
        The reader info concerning the data from which sensor positions
        should be read.
        Must not be empty room as sensor positions are in head
        coordinates for 4D systems, hence not available in that case.
        Note that differences between the sensor positions across runs
        are smaller than 12 digits, hence negligible.
    fwd_params : None | dict
        The forward parameters
    src_params : None | dict
        The src params. Defaults to:

        dict(subject='fsaverage', fname=None, spacing='oct6', n_jobs=2,
             surface='white', subjects_dir=subjects_dir, add_dist=True)
    hcp_path : str
        The prefix of the path of the HCP data.
    n_jobs : int
        The number of jobs to use in parallel.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose)

    Returns
    -------
    out : dict
        A dictionary with the following keys:
            fwd : instance of mne.Forward
                The forward solution.
            src_subject : instance of mne.SourceSpace
                The source model on the subject's surface
            src_fsaverage : instance of mne.SourceSpace
                The source model on fsaverage's surface
            bem_sol : dict
                The BEM.
            info : instance of mne.io.meas_info.Info
                The actual measurement info used.
    """
    if isinstance(info_from, tuple):
        info_from = dict(info_from)

    head_mri_t = mne.read_trans(
        op.join(recordings_path, subject,
                '{}-head_mri-trans.fif'.format(subject)))

    src_params = _update_dict_defaults(
        src_params,
        dict(subject='fsaverage',
             spacing='oct6',
             n_jobs=n_jobs,
             surface='white',
             subjects_dir=subjects_dir,
             add_dist=True))

    add_source_space_distances = False
    if src_params['add_dist']:  # we want the distances on the morphed space
        src_params['add_dist'] = False
        add_source_space_distances = True

    src_fsaverage = mne.setup_source_space(**src_params)
    src_subject = mne.morph_source_spaces(src_fsaverage,
                                          subject,
                                          subjects_dir=subjects_dir)

    if add_source_space_distances:  # and here we compute them post hoc.
        src_subject = mne.add_source_space_distances(src_subject,
                                                     n_jobs=n_jobs)

    bems = mne.make_bem_model(subject,
                              conductivity=(0.3, ),
                              subjects_dir=subjects_dir,
                              ico=None)  # ico = None for morphed SP.
    bem_sol = mne.make_bem_solution(bems)
    bem_sol['surfs'][0]['coord_frame'] = 5

    info = read_info(subject=subject, hcp_path=hcp_path, **info_from)
    picks = _pick_data_channels(info, with_ref_meg=False)
    info = pick_info(info, picks)

    # here we assume that as a result of our MNE-HCP processing
    # all other transforms in info are identity
    for trans in ['dev_head_t', 'ctf_head_t']:
        #  'dev_ctf_t' is not identity
        assert np.sum(info[trans]['trans'] - np.eye(4)) == 0

    fwd = mne.make_forward_solution(info,
                                    trans=head_mri_t,
                                    bem=bem_sol,
                                    src=src_subject,
                                    n_jobs=n_jobs)

    return dict(fwd=fwd,
                src_subject=src_subject,
                src_fsaverage=src_fsaverage,
                bem_sol=bem_sol,
                info=info)
Exemple #10
0
    def fit(self, raw, verbose=None):
        """Fit the SNS operator

        Parameters
        ----------
        raw : Instance of Raw
            The raw data to fit.
        verbose : bool, str, int, or None
            If not None, override default verbose level (see mne.verbose).

        Returns
        -------
        sns : Instance of SensorNoiseSuppression
            The modified instance.

        Notes
        -----
        In the resulting operator, bad channels will be reconstructed by
        using the good channels.
        """
        logger.info('Processing data with sensor noise suppression algorithm')
        logger.info('    Loading raw data')
        if not isinstance(raw, BaseRaw):
            raise TypeError('raw must be an instance of Raw, got %s' %
                            type(raw))
        good_picks = _pick_data_channels(raw.info, exclude='bads')
        if self._n_neighbors > len(good_picks) - 1:
            raise ValueError('n_neighbors must be at most len(good_picks) '
                             '- 1 (%s)' % (len(good_picks) - 1, ))
        logger.info('    Loading data')
        picks = _pick_data_channels(raw.info, exclude=())
        # The following lines are equivalent to this, but require less mem use:
        # data_cov = np.cov(orig_data)
        # data_corrs = np.corrcoef(orig_data) ** 2
        logger.info('    Computing covariance for %s good channels' %
                    len(good_picks))
        data_cov = compute_raw_covariance(
            raw,
            picks=picks,
            reject=self._reject,
            flat=self._flat,
            verbose=False if verbose is None else verbose)['data']
        good_subpicks = np.searchsorted(picks, good_picks)
        del good_picks
        # scale the norms so everything is close enough to unity for our checks
        picks_list = _picks_by_type(pick_info(raw.info, picks), exclude=())
        _apply_scaling_cov(data_cov, picks_list, self._scalings)
        data_norm = np.diag(data_cov).copy()
        eps = np.finfo(np.float32).eps
        pos_mask = data_norm >= eps
        data_norm[pos_mask] = 1. / data_norm[pos_mask]
        data_norm[~pos_mask] = 0
        # normalize
        data_corrs = data_cov * data_cov
        data_corrs *= data_norm
        data_corrs *= data_norm[:, np.newaxis]
        del data_norm
        operator = np.zeros((len(picks), len(picks)))
        logger.info('    Assembling spatial operator')
        for ii in range(len(picks)):
            # For each channel, the set of other signals is orthogonalized by
            # applying PCA to obtain an orthogonal basis of the subspace
            # spanned by the other channels.
            idx = np.argsort(data_corrs[ii][good_subpicks])[::-1]
            if ii in good_subpicks:
                idx = good_subpicks[idx[:self._n_neighbors + 1]].tolist()
                idx.pop(idx.index(ii))  # should be in there iff it is good
            else:
                idx = good_subpicks[idx[:self._n_neighbors]].tolist()
            # We have already effectively thresholded by zeroing out components
            eigval, eigvec = _pca(data_cov[np.ix_(idx, idx)], thresh=None)
            # Some of the eigenvalues could be zero, don't let it blow up
            norm = np.zeros(len(eigval))
            use_mask = eigval > eps
            norm[use_mask] = 1. / np.sqrt(eigval[use_mask])
            eigvec *= norm
            del eigval
            # The channel is projected on this basis and replaced by its
            # projection
            operator[ii, idx] = np.dot(eigvec, np.dot(data_cov[ii][idx],
                                                      eigvec))
            # Equivalently (and less efficiently):
            # eigvec = linalg.block_diag([1.], eigvec)
            # idx = np.concatenate(([ii], idx))
            # corr = np.dot(np.dot(eigvec.T, data_cov[np.ix_(idx, idx)]),
            #               eigvec)
            # operator[ii, idx[1:]] = np.dot(corr[0, 1:], eigvec[1:, 1:].T)
            if operator[ii, ii] != 0:
                raise RuntimeError
        # scale our results back (the ratio of channel scales is what matters)
        _apply_scaling_array(operator.T, picks_list, self._scalings)
        _undo_scaling_array(operator, picks_list, self._scalings)
        logger.info('Done')
        self._operator = operator
        self._used_chs = [raw.ch_names[pick] for pick in picks]
        return self
Exemple #11
0
def plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True,
                 res=64, axes=None, names=None, show_names=False, mask=None,
                 mask_params=None, outlines='head', image_mask=None,
                 contours=6, image_interp='bilinear', show=True,
                 head_pos=None, onselect=None, axis=None):
    ''' see the docstring for mne.viz.plot_topomap,
        which i've simply modified to return more objects '''

    from matplotlib.widgets import RectangleSelector
    from mne.io.pick import (channel_type, pick_info, _pick_data_channels)
    from mne.utils import warn
    from mne.viz.utils import (_setup_vmin_vmax, plt_show)
    from mne.defaults import _handle_default
    from mne.channels.layout import _find_topomap_coords
    from mne.io.meas_info import Info
    from mne.viz.topomap import _check_outlines, _prepare_topomap, _griddata, _make_image_mask, _plot_sensors, \
        _draw_outlines

    data = np.asarray(data)

    if isinstance(pos, Info):  # infer pos from Info object
        picks = _pick_data_channels(pos)  # pick only data channels
        pos = pick_info(pos, picks)

        # check if there is only 1 channel type, and n_chans matches the data
        ch_type = set(channel_type(pos, idx)
                      for idx, _ in enumerate(pos["chs"]))
        info_help = ("Pick Info with e.g. mne.pick_info and "
                     "mne.channels.channel_indices_by_type.")
        if len(ch_type) > 1:
            raise ValueError("Multiple channel types in Info structure. " +
                             info_help)
        elif len(pos["chs"]) != data.shape[0]:
            raise ValueError("Number of channels in the Info object and "
                             "the data array does not match. " + info_help)
        else:
            ch_type = ch_type.pop()

        if any(type_ in ch_type for type_ in ('planar', 'grad')):
            # deal with grad pairs
            from ..channels.layout import (_merge_grad_data, find_layout,
                                           _pair_grad_sensors)
            picks, pos = _pair_grad_sensors(pos, find_layout(pos))
            data = _merge_grad_data(data[picks]).reshape(-1)
        else:
            picks = list(range(data.shape[0]))
            pos = _find_topomap_coords(pos, picks=picks)

    if data.ndim > 1:
        raise ValueError("Data needs to be array of shape (n_sensors,); got "
                         "shape %s." % str(data.shape))

    # Give a helpful error message for common mistakes regarding the position
    # matrix.
    pos_help = ("Electrode positions should be specified as a 2D array with "
                "shape (n_channels, 2). Each row in this matrix contains the "
                "(x, y) position of an electrode.")
    if pos.ndim != 2:
        error = ("{ndim}D array supplied as electrode positions, where a 2D "
                 "array was expected").format(ndim=pos.ndim)
        raise ValueError(error + " " + pos_help)
    elif pos.shape[1] == 3:
        error = ("The supplied electrode positions matrix contains 3 columns. "
                 "Are you trying to specify XYZ coordinates? Perhaps the "
                 "mne.channels.create_eeg_layout function is useful for you.")
        raise ValueError(error + " " + pos_help)
    # No error is raised in case of pos.shape[1] == 4. In this case, it is
    # assumed the position matrix contains both (x, y) and (width, height)
    # values, such as Layout.pos.
    elif pos.shape[1] == 1 or pos.shape[1] > 4:
        raise ValueError(pos_help)

    if len(data) != len(pos):
        raise ValueError("Data and pos need to be of same length. Got data of "
                         "length %s, pos of length %s" % (len(data), len(pos)))

    norm = min(data) >= 0
    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
    if cmap is None:
        cmap = 'Reds' if norm else 'RdBu_r'

    pos, outlines = _check_outlines(pos, outlines, head_pos)

    if axis is not None:
        axes = axis
        warn('axis parameter is deprecated and will be removed in 0.13. '
             'Use axes instead.', DeprecationWarning)
    ax = axes if axes else plt.gca()
    pos_x, pos_y = _prepare_topomap(pos, ax)
    if outlines is None:
        xmin, xmax = pos_x.min(), pos_x.max()
        ymin, ymax = pos_y.min(), pos_y.max()
    else:
        xlim = np.inf, -np.inf,
        ylim = np.inf, -np.inf,
        mask_ = np.c_[outlines['mask_pos']]
        xmin, xmax = (np.min(np.r_[xlim[0], mask_[:, 0]]),
                      np.max(np.r_[xlim[1], mask_[:, 0]]))
        ymin, ymax = (np.min(np.r_[ylim[0], mask_[:, 1]]),
                      np.max(np.r_[ylim[1], mask_[:, 1]]))

    # interpolate data
    xi = np.linspace(xmin, xmax, res)
    yi = np.linspace(ymin, ymax, res)
    Xi, Yi = np.meshgrid(xi, yi)
    Zi = _griddata(pos_x, pos_y, data, Xi, Yi)

    if outlines is None:
        _is_default_outlines = False
    elif isinstance(outlines, dict):
        _is_default_outlines = any(k.startswith('head') for k in outlines)

    if _is_default_outlines and image_mask is None:
        # prepare masking
        image_mask, pos = _make_image_mask(outlines, pos, res)

    mask_params = _handle_default('mask_params', mask_params)

    # plot outline
    linewidth = mask_params['markeredgewidth']
    patch = None
    if 'patch' in outlines:
        patch = outlines['patch']
        patch_ = patch() if callable(patch) else patch
        patch_.set_clip_on(False)
        ax.add_patch(patch_)
        ax.set_transform(ax.transAxes)
        ax.set_clip_path(patch_)

    # plot map and countour
    im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
                   aspect='equal', extent=(xmin, xmax, ymin, ymax),
                   interpolation=image_interp)

    # This tackles an incomprehensible matplotlib bug if no contours are
    # drawn. To avoid rescalings, we will always draw contours.
    # But if no contours are desired we only draw one and make it invisible .
    no_contours = False
    if contours in (False, None):
        contours, no_contours = 1, True
    cont = ax.contour(Xi, Yi, Zi, contours, colors='k',
                      linewidths=linewidth)
    if no_contours is True:
        for col in cont.collections:
            col.set_visible(False)

    if _is_default_outlines:
        from matplotlib import patches
        patch_ = patches.Ellipse((0, 0),
                                 2 * outlines['clip_radius'][0],
                                 2 * outlines['clip_radius'][1],
                                 clip_on=True,
                                 transform=ax.transData)
    if _is_default_outlines or patch is not None:
        im.set_clip_path(patch_)
        if cont is not None:
            for col in cont.collections:
                col.set_clip_path(patch_)

    if sensors is not False and mask is None:
        _plot_sensors(pos_x, pos_y, sensors=sensors, ax=ax)
    elif sensors and mask is not None:
        idx = np.where(mask)[0]
        ax.plot(pos_x[idx], pos_y[idx], **mask_params)
        idx = np.where(~mask)[0]
        _plot_sensors(pos_x[idx], pos_y[idx], sensors=sensors, ax=ax)
    elif not sensors and mask is not None:
        idx = np.where(mask)[0]
        ax.plot(pos_x[idx], pos_y[idx], **mask_params)

    if isinstance(outlines, dict):
        _draw_outlines(ax, outlines)

    if show_names:
        if names is None:
            raise ValueError("To show names, a list of names must be provided"
                             " (see `names` keyword).")
        if show_names is True:
            def _show_names(x):
                return x
        else:
            _show_names = show_names
        show_idx = np.arange(len(names)) if mask is None else np.where(mask)[0]
        for ii, (p, ch_id) in enumerate(zip(pos, names)):
            if ii not in show_idx:
                continue
            ch_id = _show_names(ch_id)
            ax.text(p[0], p[1], ch_id, horizontalalignment='center',
                    verticalalignment='center', size='x-small')

    plt.subplots_adjust(top=.95)

    if onselect is not None:
        ax.RS = RectangleSelector(ax, onselect=onselect)
    plt_show(show)
    return ax, im, cont, pos_x, pos_y
Exemple #12
0
def make_mne_forward(anatomy_path,
                     subject,
                     recordings_path,
                     info_from=(('data_type', 'rest'), ('run_index', 0)),
                     fwd_params=None, src_params=None,
                     hcp_path=op.curdir, n_jobs=1):
    """"
    Convenience script for conducting standard MNE analyses.

    Parameters
    ----------
    subject : str
        The subject name.
    hcp_path : str
        The directory containing the HCP data.
    recordings_path : str
        The path where MEG data and transformations are stored.
    anatomy_path : str
        The directory containing the extracted HCP subject data.
    info_from : tuple of tuples | dict
        The reader info concerning the data from which sensor positions
        should be read.
        Must not be empty room as sensor positions are in head
        coordinates for 4D systems, hence not available in that case.
        Note that differences between the sensor positions across runs
        are smaller than 12 digits, hence negligible.
    fwd_params : None | dict
        The forward parameters
    src_params : None | dict
        The src params. Defaults to:

        dict(subject='fsaverage', fname=None, spacing='oct6', n_jobs=2,
             surface='white', subjects_dir=anatomy_path, add_dist=True)
    hcp_path : str
        The prefix of the path of the HCP data.
    n_jobs : int
        The number of jobs to use in parallel.
    """
    if isinstance(info_from, tuple):
        info_from = dict(info_from)

    head_mri_t = mne.read_trans(
        op.join(recordings_path, subject, '{}-head_mri-trans.fif'.format(
            subject)))

    src_params = _update_dict_defaults(
        src_params,
        dict(subject='fsaverage', fname=None, spacing='oct6', n_jobs=n_jobs,
             surface='white', subjects_dir=anatomy_path, add_dist=True))

    add_source_space_distances = False
    if src_params['add_dist']:  # we want the distances on the morphed space
        src_params['add_dist'] = False
        add_source_space_distances = True

    src_fsaverage = mne.setup_source_space(**src_params)
    src_subject = mne.morph_source_spaces(
        src_fsaverage, subject, subjects_dir=anatomy_path)

    if add_source_space_distances:  # and here we compute them post hoc.
        src_subject = mne.add_source_space_distances(
            src_subject, n_jobs=n_jobs)

    bems = mne.make_bem_model(subject, conductivity=(0.3,),
                              subjects_dir=anatomy_path,
                              ico=None)  # ico = None for morphed SP.
    bem_sol = mne.make_bem_solution(bems)

    info = read_info_hcp(subject=subject, hcp_path=hcp_path, **info_from)
    picks = _pick_data_channels(info, with_ref_meg=False)
    info = pick_info(info, picks)

    # here we assume that as a result of our MNE-HCP processing
    # all other transforms in info are identity
    for trans in ['dev_head_t', 'ctf_head_t']:
        #  'dev_ctf_t' is not identity
        assert np.sum(info[trans]['trans'] - np.eye(4)) == 0

    fwd = mne.make_forward_solution(
        info, trans=head_mri_t, bem=bem_sol, src=src_subject,
        n_jobs=n_jobs)

    return dict(fwd=fwd, src_subject=src_subject,
                src_fsaverage=src_fsaverage,
                bem_sol=bem_sol, info=info)
Exemple #13
0
    def fit(self, raw, verbose=None):
        """Fit the SNS operator

        Parameters
        ----------
        raw : Instance of Raw
            The raw data to fit.
        verbose : bool, str, int, or None
            If not None, override default verbose level (see mne.verbose).

        Returns
        -------
        sns : Instance of SensorNoiseSuppression
            The modified instance.

        Notes
        -----
        In the resulting operator, bad channels will be reconstructed by
        using the good channels.
        """
        logger.info('Processing data with sensor noise suppression algorithm')
        logger.info('    Loading raw data')
        if not isinstance(raw, BaseRaw):
            raise TypeError('raw must be an instance of Raw, got %s'
                            % type(raw))
        good_picks = _pick_data_channels(raw.info, exclude='bads')
        if self._n_neighbors > len(good_picks) - 1:
            raise ValueError('n_neighbors must be at most len(good_picks) '
                             '- 1 (%s)' % (len(good_picks) - 1,))
        logger.info('    Loading data')
        picks = _pick_data_channels(raw.info, exclude=())
        # The following lines are equivalent to this, but require less mem use:
        # data_cov = np.cov(orig_data)
        # data_corrs = np.corrcoef(orig_data) ** 2
        logger.info('    Computing covariance for %s good channels'
                    % len(good_picks))
        data_cov = compute_raw_covariance(
            raw, picks=picks, reject=self._reject, flat=self._flat,
            verbose=False if verbose is None else verbose)['data']
        good_subpicks = np.searchsorted(picks, good_picks)
        del good_picks
        # scale the norms so everything is close enough to unity for our checks
        picks_list = _picks_by_type(pick_info(raw.info, picks), exclude=())
        _apply_scaling_cov(data_cov, picks_list, self._scalings)
        data_norm = np.diag(data_cov).copy()
        eps = np.finfo(np.float32).eps
        pos_mask = data_norm >= eps
        data_norm[pos_mask] = 1. / data_norm[pos_mask]
        data_norm[~pos_mask] = 0
        # normalize
        data_corrs = data_cov * data_cov
        data_corrs *= data_norm
        data_corrs *= data_norm[:, np.newaxis]
        del data_norm
        operator = np.zeros((len(picks), len(picks)))
        logger.info('    Assembling spatial operator')
        for ii in range(len(picks)):
            # For each channel, the set of other signals is orthogonalized by
            # applying PCA to obtain an orthogonal basis of the subspace
            # spanned by the other channels.
            idx = np.argsort(data_corrs[ii][good_subpicks])[::-1]
            if ii in good_subpicks:
                idx = good_subpicks[idx[:self._n_neighbors + 1]].tolist()
                idx.pop(idx.index(ii))  # should be in there iff it is good
            else:
                idx = good_subpicks[idx[:self._n_neighbors]].tolist()
            # We have already effectively thresholded by zeroing out components
            eigval, eigvec = _pca(data_cov[np.ix_(idx, idx)], thresh=None)
            # Some of the eigenvalues could be zero, don't let it blow up
            norm = np.zeros(len(eigval))
            use_mask = eigval > eps
            norm[use_mask] = 1. / np.sqrt(eigval[use_mask])
            eigvec *= norm
            del eigval
            # The channel is projected on this basis and replaced by its
            # projection
            operator[ii, idx] = np.dot(eigvec,
                                       np.dot(data_cov[ii][idx], eigvec))
            # Equivalently (and less efficiently):
            # eigvec = linalg.block_diag([1.], eigvec)
            # idx = np.concatenate(([ii], idx))
            # corr = np.dot(np.dot(eigvec.T, data_cov[np.ix_(idx, idx)]),
            #               eigvec)
            # operator[ii, idx[1:]] = np.dot(corr[0, 1:], eigvec[1:, 1:].T)
            if operator[ii, ii] != 0:
                raise RuntimeError
        # scale our results back (the ratio of channel scales is what matters)
        _apply_scaling_array(operator.T, picks_list, self._scalings)
        _undo_scaling_array(operator, picks_list, self._scalings)
        logger.info('Done')
        self._operator = operator
        self._used_chs = [raw.ch_names[pick] for pick in picks]
        return self
def plot_coregistration(subject,
                        subjects_dir,
                        hcp_path,
                        recordings_path,
                        info_from=(('data_type', 'rest'), ('run_index', 0)),
                        view_init=(('azim', 0), ('elev', 0))):
    """A diagnostic plot to show the HCP coregistration

    Parameters
    ----------
    subject : str
        The subject
    subjects_dir : str
        The path corresponding to MNE/freesurfer SUBJECTS_DIR (to be created)
    hcp_path : str
        The path where the HCP files can be found.
    recordings_path : str
        The path to converted data (including the head<->device transform).
    info_from : tuple of tuples | dict
        The reader info concerning the data from which sensor positions
        should be read.
        Must not be empty room as sensor positions are in head
        coordinates for 4D systems, hence not available in that case.
        Note that differences between the sensor positions across runs
        are smaller than 12 digits, hence negligible.
    view_init : tuple of tuples | dict
        The initival view, defaults to azimuth and elevation of 0,
        a simple lateral view

    Returns
    -------
    fig : matplotlib.figure.Figure
        The figure object.
    """
    import matplotlib.pyplot as plt
    from mpl_toolkits.mplot3d import Axes3D  #  noqa

    if isinstance(info_from, tuple):
        info_from = dict(info_from)
    if isinstance(view_init, tuple):
        view_init = dict(view_init)

    head_mri_t = read_trans(
        op.join(recordings_path, subject,
                '{}-head_mri-trans.fif'.format(subject)))

    info = read_info(subject=subject, hcp_path=hcp_path, **info_from)

    info = pick_info(info, _pick_data_channels(info, with_ref_meg=False))
    sens_pnts = np.array([c['loc'][:3] for c in info['chs']])
    sens_pnts = apply_trans(head_mri_t, sens_pnts)
    sens_pnts *= 1e3  # put in mm scale

    pnts, tris = read_surface(
        op.join(subjects_dir, subject, 'bem', 'inner_skull.surf'))

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.scatter(*sens_pnts.T, color='purple', marker='o')
    ax.scatter(*pnts.T, color='green', alpha=0.3)
    ax.view_init(**view_init)
    fig.tight_layout()
    return fig
Exemple #15
0
    def fit(self, raw, verbose=None):
        """Fit the SNS operator

        Parameters
        ----------
        raw : Instance of Raw
            The raw data to fit.
        verbose : bool, str, int, or None
            If not None, override default verbose level (see mne.verbose).

        Returns
        -------
        sns : Instance of SensorNoiseSuppression
            The modified instance.
        """
        logger.info('Processing data with sensor noise suppression algorithm')
        logger.info('    Loading raw data')
        if not isinstance(raw, BaseRaw):
            raise TypeError('raw must be an instance of Raw, got %s'
                            % type(raw))
        good_picks = _pick_data_channels(raw.info, exclude='bads')
        if self._n_neighbors > len(good_picks) - 1:
            raise ValueError('n_neighbors must be at most len(good_picks) '
                             '- 1 (%s)' % (len(good_picks) - 1,))
        logger.info('    Loading data')
        raw = raw.copy()
        picks = _pick_data_channels(raw.info, exclude=())
        # The following lines are equivalent to this, but require less mem use:
        # data_cov = np.cov(orig_data)
        # data_corrs = np.corrcoef(orig_data) ** 2
        logger.info('    Computing covariance for %s good channels'
                    % len(good_picks))
        data_cov = np.eye(len(picks))
        good_cov = compute_raw_covariance(
            raw, picks=good_picks, reject=self._reject, flat=self._flat,
            verbose=False)['data']
        # re-index this
        good_picks = np.searchsorted(picks, good_picks)
        bad_picks = np.setdiff1d(np.arange(len(picks)), good_picks)
        data_cov[np.ix_(good_picks, good_picks)] = good_cov
        del good_picks
        data_norm = np.diag(data_cov)
        data_corrs = data_cov * data_cov
        data_corrs /= data_norm
        data_corrs /= data_norm[:, np.newaxis]
        del data_norm
        data_cov *= len(raw.times)
        operator = np.zeros((len(picks), len(picks)))
        logger.info('    Assembling spatial operator')
        for ii in range(len(picks)):
            # For each channel, the set of other signals is orthogonalized by
            # applying PCA to obtain an orthogonal basis of the subspace
            # spanned by the other channels.
            if ii in bad_picks:
                operator[ii, ii] = 1.
                continue
            idx = np.argsort(data_corrs[ii])[::-1][:self._n_neighbors + 1]
            idx = idx.tolist()
            idx.pop(idx.index(ii))  # should be in there
            # XXX Eventually we might want to actually threshold here (with
            # rank-deficient data it could matter)
            eigval, eigvec = _pca(data_cov[np.ix_(idx, idx)], thresh=None)
            eigvec *= 1. / np.sqrt(eigval)
            del eigval
            # augment with given channel
            eigvec = np.vstack(([[1] + [0] * self._n_neighbors],
                               np.hstack((np.zeros((self._n_neighbors, 1)),
                                          eigvec))))
            idx = np.concatenate(([ii], idx))
            corr = np.dot(np.dot(eigvec.T, data_cov[np.ix_(idx, idx)]), eigvec)
            # The channel is projected on this basis and replaced by its
            # projection
            operator[ii, idx[1:]] = np.dot(corr[0, 1:], eigvec[1:, 1:].T)
        logger.info('Done')
        self._operator = operator
        self._used_chs = [raw.ch_names[pick] for pick in picks]
        return self
Exemple #16
0
epochs = mne.Epochs(p25_dat,
                    events, {
                        "target": 1,
                        "not-target": 2
                    },
                    preload=True)

X = epochs["target"].get_data()
ica = UnsupervisedSpatialFilter(FastICA(), average=False)
ica_data = ica.fit_transform(X)
ev2 = mne.EvokedArray(
    np.mean(ica_data, axis=0),
    mne.create_info(32, epochs.info['sfreq'], ch_types='eeg'))

ev2.plot(show=False)

# ICA

# explore difference in components based on "epoch", seems to be a split after
# epoch ~30, is this a different test? Group by
ica = ICA(n_components=.99, method='fastica')

ica.fit(epochs["target"]).plot_components(inst=epochs["target"])

ica.fit(epochs["not-target"]).plot_components(inst=epochs["not-target"])

from mne.io.pick import _pick_data_channels
# look at target where user got correct answer vs not-target...
picks = _pick_data_channels(epochs.info, exclude='bads', with_ref_meg=False)
def _plot_topomap(data, pos, vmin=None, vmax=None, cmap=None, sensors=True,
                  res=64, axes=None, names=None, show_names=False, mask=None,
                  mask_params=None, outlines='head',
                  contours=6, image_interp='bilinear', show=True,
                  head_pos=None, onselect=None, extrapolate='box', border=0):
    import matplotlib.pyplot as plt
    from matplotlib.widgets import RectangleSelector
    data = np.asarray(data)

    if isinstance(pos, Info):  # infer pos from Info object
        picks = _pick_data_channels(pos)  # pick only data channels
        pos = pick_info(pos, picks)

        # check if there is only 1 channel type, and n_chans matches the data
        ch_type = {channel_type(pos, idx)
                   for idx, _ in enumerate(pos["chs"])}
        info_help = ("Pick Info with e.g. mne.pick_info and "
                     "mne.io.pick.channel_indices_by_type.")
        if len(ch_type) > 1:
            raise ValueError("Multiple channel types in Info structure. "
                             + info_help)
        elif len(pos["chs"]) != data.shape[0]:
            raise ValueError("Number of channels in the Info object and "
                             "the data array does not match. " + info_help)
        else:
            ch_type = ch_type.pop()

        if any(type_ in ch_type for type_ in ('planar', 'grad')):
            # deal with grad pairs
            from mne.channels.layout import (_merge_grad_data, find_layout,
                                             _pair_grad_sensors)
            picks, pos = _pair_grad_sensors(pos, find_layout(pos))
            data = _merge_grad_data(data[picks]).reshape(-1)
        else:
            picks = list(range(data.shape[0]))
            pos = _find_topomap_coords(pos, picks=picks)

    if data.ndim > 1:
        raise ValueError("Data needs to be array of shape (n_sensors,); got "
                         "shape %s." % str(data.shape))

    # Give a helpful error message for common mistakes regarding the position
    # matrix.
    pos_help = ("Electrode positions should be specified as a 2D array with "
                "shape (n_channels, 2). Each row in this matrix contains the "
                "(x, y) position of an electrode.")
    if pos.ndim != 2:
        error = ("{ndim}D array supplied as electrode positions, where a 2D "
                 "array was expected").format(ndim=pos.ndim)
        raise ValueError(error + " " + pos_help)
    elif pos.shape[1] == 3:
        error = ("The supplied electrode positions matrix contains 3 columns. "
                 "Are you trying to specify XYZ coordinates? Perhaps the "
                 "mne.channels.create_eeg_layout function is useful for you.")
        raise ValueError(error + " " + pos_help)
    # No error is raised in case of pos.shape[1] == 4. In this case, it is
    # assumed the position matrix contains both (x, y) and (width, height)
    # values, such as Layout.pos.
    elif pos.shape[1] == 1 or pos.shape[1] > 4:
        raise ValueError(pos_help)

    if len(data) != len(pos):
        raise ValueError("Data and pos need to be of same length. Got data of "
                         "length %s, pos of length %s" % (len(data), len(pos)))

    norm = min(data) >= 0
    vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
    if cmap is None:
        cmap = 'Reds' if norm else 'RdBu_r'

    pos, outlines = _check_outlines(pos, outlines, head_pos)
    assert isinstance(outlines, dict)

    ax = axes if axes else plt.gca()
    _prepare_topomap(pos, ax)

    _use_default_outlines = any(k.startswith('head') for k in outlines)

    if _use_default_outlines:
        # prepare masking
        _autoshrink(outlines, pos, res)

    mask_params = _handle_default('mask_params', mask_params)

    # find mask limits
    xlim = np.inf, -np.inf,
    ylim = np.inf, -np.inf,
    mask_ = np.c_[outlines['mask_pos']]
    xmin, xmax = (np.min(np.r_[xlim[0], mask_[:, 0]]),
                  np.max(np.r_[xlim[1], mask_[:, 0]]))
    ymin, ymax = (np.min(np.r_[ylim[0], mask_[:, 1]]),
                  np.max(np.r_[ylim[1], mask_[:, 1]]))

    # interpolate the data, we multiply clip radius by 1.06 so that pixelated
    # edges of the interpolated image would appear under the mask
    head_radius = (None if extrapolate == 'local' else
                   outlines['clip_radius'][0] * 1.06)
    xi = np.linspace(xmin, xmax, res)
    yi = np.linspace(ymin, ymax, res)
    Xi, Yi = np.meshgrid(xi, yi)
    interp = _GridData(pos, extrapolate, head_radius, border).set_values(data)
    Zi = interp.set_locations(Xi, Yi)()

    # plot outline
    patch_ = None
    if 'patch' in outlines:
        patch_ = outlines['patch']
        patch_ = patch_() if callable(patch_) else patch_
        patch_.set_clip_on(False)
        ax.add_patch(patch_)
        ax.set_transform(ax.transAxes)
        ax.set_clip_path(patch_)
    if _use_default_outlines:
        from matplotlib import patches
        patch_ = patches.Ellipse((0, 0),
                                 2 * outlines['clip_radius'][0],
                                 2 * outlines['clip_radius'][1],
                                 clip_on=True,
                                 transform=ax.transData)

    # plot interpolated map
    im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
                   aspect='equal', extent=(xmin, xmax, ymin, ymax),
                   interpolation=image_interp)

    # This tackles an incomprehensible matplotlib bug if no contours are
    # drawn. To avoid rescalings, we will always draw contours.
    # But if no contours are desired we only draw one and make it invisible.
    linewidth = mask_params['markeredgewidth']
    no_contours = False
    if isinstance(contours, (np.ndarray, list)):
        pass  # contours precomputed
    elif contours == 0:
        contours, no_contours = 1, True
    if (Zi == Zi[0, 0]).all():
        cont = None  # can't make contours for constant-valued functions
    else:
        with warnings.catch_warnings(record=True):
            warnings.simplefilter('ignore')
            cont = ax.contour(Xi, Yi, Zi, contours, colors='k',
                              linewidths=linewidth / 2.)
    if no_contours and cont is not None:
        for col in cont.collections:
            col.set_visible(False)

    if patch_ is not None:
        im.set_clip_path(patch_)
        if cont is not None:
            for col in cont.collections:
                col.set_clip_path(patch_)

    pos_x, pos_y = pos.T
    if sensors is not False and mask is None:
        _plot_sensors(pos_x, pos_y, sensors=sensors, ax=ax)
    elif sensors and mask is not None:
        idx = np.where(mask)[0]
        ax.plot(pos_x[idx], pos_y[idx], **mask_params)
        idx = np.where(~mask)[0]
        _plot_sensors(pos_x[idx], pos_y[idx], sensors=sensors, ax=ax)
    elif not sensors and mask is not None:
        idx = np.where(mask)[0]
        ax.plot(pos_x[idx], pos_y[idx], **mask_params)

    if isinstance(outlines, dict):
        _draw_outlines(ax, outlines)

    if show_names:
        if names is None:
            raise ValueError("To show names, a list of names must be provided"
                             " (see `names` keyword).")
        if show_names is True:
            def _show_names(x):
                return x
        else:
            _show_names = show_names
        show_idx = np.arange(len(names)) if mask is None else np.where(mask)[0]
        for ii, (p, ch_id) in enumerate(zip(pos, names)):
            if ii not in show_idx:
                continue
            ch_id = _show_names(ch_id)
            ax.text(p[0], p[1], ch_id, horizontalalignment='center',
                    verticalalignment='center', size='x-small')

    if onselect is not None:
        ax.RS = RectangleSelector(ax, onselect=onselect)
    plt_show(show)
    return im, cont, interp, patch_
Exemple #18
0
def compute_forward_stack(subjects_dir,
                          subject,
                          recordings_path,
                          info_from=(('data_type', 'rest'), ('run_index', 0)),
                          fwd_params=None, src_params=None,
                          hcp_path=op.curdir, n_jobs=1, verbose=None):
    """
    Convenience function for conducting standard MNE analyses.

    .. note::
       this function computes bem solutions, source spaces and forward models
       optimized for connectivity computation, i.e., the fsaverage space
       is morphed onto the subject's space.

    Parameters
    ----------
    subject : str
        The subject name.
    hcp_path : str
        The directory containing the HCP data.
    recordings_path : str
        The path where MEG data and transformations are stored.
    subjects_dir : str
        The directory containing the extracted HCP subject data.
    info_from : tuple of tuples | dict
        The reader info concerning the data from which sensor positions
        should be read.
        Must not be empty room as sensor positions are in head
        coordinates for 4D systems, hence not available in that case.
        Note that differences between the sensor positions across runs
        are smaller than 12 digits, hence negligible.
    fwd_params : None | dict
        The forward parameters
    src_params : None | dict
        The src params. Defaults to:

        dict(subject='fsaverage', fname=None, spacing='oct6', n_jobs=2,
             surface='white', subjects_dir=subjects_dir, add_dist=True)
    hcp_path : str
        The prefix of the path of the HCP data.
    n_jobs : int
        The number of jobs to use in parallel.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose)

    Returns
    -------
    out : dict
        A dictionary with the following keys:
            fwd : instance of mne.Forward
                The forward solution.
            src_subject : instance of mne.SourceSpace
                The source model on the subject's surface
            src_fsaverage : instance of mne.SourceSpace
                The source model on fsaverage's surface
            bem_sol : dict
                The BEM.
            info : instance of mne.io.meas_info.Info
                The actual measurement info used.
    """
    if isinstance(info_from, tuple):
        info_from = dict(info_from)

    head_mri_t = mne.read_trans(
        op.join(recordings_path, subject, '{}-head_mri-trans.fif'.format(
            subject)))
    
    src_defaults = dict(subject='fsaverage', spacing='oct6', n_jobs=n_jobs,
             surface='white', subjects_dir=subjects_dir, add_dist=True)
    if 'fname' in mne.fixes._get_args(mne.setup_source_space):
        # needed for mne-0.14 and below
        src_defaults.update(dict(fname=None))
    else:
        # remove 'fname' argument (if necessary) when using mne-0.15+
        if 'fname' in src_params:
            del src_params['fname']
    src_params = _update_dict_defaults(src_params, src_defaults)

    add_source_space_distances = False
    if src_params['add_dist']:  # we want the distances on the morphed space
        src_params['add_dist'] = False
        add_source_space_distances = True

    src_fsaverage = mne.setup_source_space(**src_params)
    src_subject = mne.morph_source_spaces(
        src_fsaverage, subject, subjects_dir=subjects_dir)

    if add_source_space_distances:  # and here we compute them post hoc.
        src_subject = mne.add_source_space_distances(
            src_subject, n_jobs=n_jobs)

    bems = mne.make_bem_model(subject, conductivity=(0.3,),
                              subjects_dir=subjects_dir,
                              ico=None)  # ico = None for morphed SP.
    bem_sol = mne.make_bem_solution(bems)
    bem_sol['surfs'][0]['coord_frame'] = 5

    info = read_info(subject=subject, hcp_path=hcp_path, **info_from)
    picks = _pick_data_channels(info, with_ref_meg=False)
    info = pick_info(info, picks)

    # here we assume that as a result of our MNE-HCP processing
    # all other transforms in info are identity
    for trans in ['dev_head_t', 'ctf_head_t']:
        #  'dev_ctf_t' is not identity
        assert np.sum(info[trans]['trans'] - np.eye(4)) == 0

    fwd = mne.make_forward_solution(
        info, trans=head_mri_t, bem=bem_sol, src=src_subject,
        n_jobs=n_jobs)

    return dict(fwd=fwd, src_subject=src_subject,
                src_fsaverage=src_fsaverage,
                bem_sol=bem_sol, info=info)