示例#1
0
    def _get_data(self, out=True, picks=None, item=None, verbose=None):
        """
        Return all data as numpy array.

        Parameters
        ----------
        out : bool
            Return the data.
        %(picks_all)s
        verbose: bool, str, int, or None
            If not None, override default verbose level (see
            :func:`mne.verbose`. Defaults to self.verbose.

        Returns
        -------
        np.ndarray or None
        epochs data

        Notes
        -----
        Rejection in RtEpochs already happens at epoch creation,
        not on data loading.
        """
        if out:
            item = slice(None) if item is None else item
            select = self._item_to_select(item)  # indices or slice
            use_idx = np.arange(len(self._events))[select]
            if picks is None:
                picks = slice(None)
            else:
                picks = _picks_to_idx(self.info, picks, none='all', exclude=())
            return np.array([self._epoch_queue[idx][picks] for idx in use_idx])
示例#2
0
    def get_data_as_epoch(self, n_samples=1024, picks=None):
        """Return last n_samples from current time.

        Parameters
        ----------
        n_samples : int
            Number of samples to fetch.
        %(picks_all)s

        Returns
        -------
        epoch : instance of Epochs
            The samples fetched as an Epochs object.

        See Also
        --------
        mne.Epochs.iter_evoked
        """
        # set up timeout in case LSL process hang. wait arb 5x expected time
        wait_time = n_samples * 5. / self.info['sfreq']

        # create an event at the start of the data collection
        events = np.expand_dims(np.array([0, 1, 1]), axis=0)
        samples, _ = self.client.pull_chunk(max_samples=n_samples,
                                            timeout=wait_time)
        data = np.vstack(samples).T

        picks = _picks_to_idx(self.info, picks, 'all', exclude=())
        info = pick_info(self.info, picks)
        return EpochsArray(data[picks][np.newaxis], info, events)
示例#3
0
    def get_data_as_epoch(self, n_samples=1024, picks=None):
        """Return last n_samples from current time.

        Parameters
        ----------
        n_samples : int
            Number of samples to fetch.
        %(picks_all)s

        Returns
        -------
        epoch : instance of Epochs
            The samples fetched as an Epochs object.

        See Also
        --------
        mne.Epochs.iter_evoked
        """
        ft_header = self.ft_client.getHeader()
        last_samp = ft_header.nSamples - 1
        start = last_samp - n_samples + 1
        stop = last_samp
        events = np.expand_dims(np.array([start, 1, 1]), axis=0)

        # get the data
        data = self.ft_client.getData([start, stop]).transpose()

        # create epoch from data
        picks = _picks_to_idx(self.info, picks, 'all', exclude=())
        info = pick_info(self.info, picks)
        return EpochsArray(data[picks][np.newaxis], info, events)
示例#4
0
def test_plot_bridged_electrodes():
    """Test plotting of bridged electrodes."""
    rng = np.random.default_rng(42)
    montage = make_standard_montage("biosemi64")
    info = create_info(montage.ch_names, 256, "eeg").set_montage("biosemi64")
    bridged_idx = [(0, 1), (2, 3)]
    n_epochs = 10
    ed_matrix = np.zeros((n_epochs, len(info.ch_names),
                          len(info.ch_names))) * np.nan
    triu_idx = np.triu_indices(len(info.ch_names), 1)
    for i in range(n_epochs):
        ed_matrix[i][triu_idx] = rng.random() + rng.random(triu_idx[0].size)
    fig = plot_bridged_electrodes(info, bridged_idx, ed_matrix,
                                  topomap_args=dict(names=info.ch_names,
                                                    vmax=1, show_names=True))
    # two bridged lines plus head outlines
    assert len(fig.axes[0].lines) == 6

    with pytest.raises(RuntimeError, match='Expected'):
        plot_bridged_electrodes(info, bridged_idx, np.zeros((5, 6, 7)))

    # test with multiple channel types
    raw = read_raw_fif(raw_fname, preload=True)
    picks = _picks_to_idx(raw.info, "eeg")
    raw._data[picks[0]] = raw._data[picks[1]]  # artificially bridge electrodes
    bridged_idx, ed_matrix = compute_bridged_electrodes(raw)
    plot_bridged_electrodes(raw.info, bridged_idx, ed_matrix)
示例#5
0
def write_raw_snirf(raw, fname):
    """Write continuous wave data to disk in SNIRF format.

    Parameters
    ----------
    raw : instance of Raw
        Data to write to file. Must contain only `fnirs_cw_amplitude` type.
    fname : str
        Path to the SNIRF data file.
    """

    picks = _picks_to_idx(raw.info, 'fnirs_cw_amplitude', exclude=[])
    assert len(picks) == len(raw.ch_names), 'Data must be fnirs_cw_amplitude'

    # Reordering channels
    num_chans = len(raw.ch_names)
    raw = raw.copy()
    raw.pick(picks=list(range(num_chans)[0::2]) + list(range(num_chans)[1::2]))

    with h5py.File(fname, 'w') as f:
        nirs = f.create_group('/nirs')
        f.create_dataset('formatVersion',
                         data=_str_encode(SPEC_FORMAT_VERSION))

        _add_metadata_tags(raw, nirs)
        _add_single_data_block(raw, nirs)
        _add_probe_info(raw, nirs)
        _add_stim_info(raw, nirs)
示例#6
0
def _handle_overlaps(raw, t, sphere, estimates):
    """Prepare for topomap including merging channels"""
    picks = _picks_to_idx(raw.info, t, exclude=[], allow_empty=True)
    raw_subset = raw.copy().pick(picks=picks)
    _, pos, merge_channels, ch_names, ch_type, sphere, clip_origin = \
        mne.viz.topomap._prepare_topomap_plot(raw_subset, t, sphere=sphere)
    estmrg, ch_names = _merge_ch_data(estimates.copy()[picks], t, ch_names)
    return estmrg, pos, ch_names, sphere
示例#7
0
def run_glm(raw, design_matrix, noise_model='ar1', bins=0,
            n_jobs=1, verbose=0):
    """
    GLM fit for an MNE structure containing fNIRS data.

    This is a wrapper function for nilearn.stats.first_level_model.run_glm.

    Parameters
    ----------
    raw : instance of Raw
        The haemoglobin data.
    design_matrix : as specified in Nilearn
        The design matrix as generated by
        `mne_nirs.make_first_level_design_matrix`.
        See example ``9.5.5. Examples of design matrices`` at
        https://nilearn.github.io/auto_examples/index.html
        for details on how to specify design matrices.
    noise_model : {'ar1', 'ols', 'arN', 'auto'}, optional
        The temporal variance model. Defaults to first order
        auto regressive model 'ar1'.
        The AR model can be set to any integer value by modifying the value
        of N. E.g. use `ar5` for a fifth order model.
        If the string `auto` is provided a model with order 4 times the sample
        rate will be used.
    bins : int, optional
        Maximum number of discrete bins for the AR coef histogram/clustering.
        By default the value is 0, which will set the number of bins to the
        number of channels, effectively estimating the AR model for each
        channel.
    n_jobs : int, optional
        The number of CPUs to use to do the computation. -1 means
        'all CPUs'.
    verbose : int, optional
        The verbosity level. Default is 0.

    Returns
    -------
    glm_estimates : RegressionResults
        RegressionResults class which stores the GLM results.
    """
    picks = _picks_to_idx(raw.info, 'fnirs', exclude=[], allow_empty=True)
    ch_names = raw.ch_names

    if noise_model == 'auto':
        noise_model = f"ar{int(np.round(raw.info['sfreq'] * 4))}"

    if bins == 0:
        bins = len(raw.ch_names)

    results = dict()
    for pick in picks:
        labels, glm_estimates = nilearn_glm(raw.get_data(pick).T,
                                            design_matrix.values,
                                            noise_model=noise_model, bins=bins,
                                            n_jobs=n_jobs, verbose=verbose)
        results[ch_names[pick]] = glm_estimates[labels[0]]

    return RegressionResults(raw.info, results, design_matrix)
示例#8
0
def _handle_overlaps(info, t, sphere, estimates):
    """Prepare for topomap including merging channels"""
    from mne.viz.topomap import _prepare_topomap_plot
    picks = _picks_to_idx(info, t, exclude=[], allow_empty=True)
    info_subset = pick_info(info, picks)
    _, pos, merge_channels, ch_names, ch_type, sphere, clip_origin = \
        _prepare_topomap_plot(info_subset, t, sphere=sphere)
    estmrg, ch_names = _merge_ch_data(estimates.copy()[picks], t, ch_names)
    return estmrg, pos, ch_names, sphere
示例#9
0
def _handle_picks(info, picks):
    """Pick the data channls or return picks."""
    if picks is None:
        out = mne.pick_types(info,
                             meg=True,
                             eeg=True,
                             ref_meg=False,
                             fnirs=True,
                             exclude='bads')
    else:
        out = _picks_to_idx(info, picks, exclude='bads')
    return out
示例#10
0
def run_GLM(raw,
            design_matrix,
            noise_model='ar1',
            bins=100,
            n_jobs=1,
            verbose=0):
    """
    Run GLM on data using supplied design matrix.

    This is a wrapper function for nilearn.stats.first_level_model.run_glm.

    Parameters
    ----------
    raw : instance of Raw
        The haemoglobin data.
    design_matrix : as specified in Nilearn
        The design matrix.
    noise_model : {'ar1', 'ols'}, optional
        The temporal variance model. Defaults to 'ar1'.
    bins : : int, optional
        Maximum number of discrete bins for the AR(1) coef histogram.
    n_jobs : int, optional
        The number of CPUs to use to do the computation. -1 means
        'all CPUs'.
    verbose : int, optional
        The verbosity level. Defaut is 0

    Returns
    -------
    glm_estimates : dict
        Keys correspond to the different labels values values are
        RegressionResults instances corresponding to the voxels.
    """
    from nilearn.stats.first_level_model import run_glm

    picks = _picks_to_idx(raw.info, 'fnirs', exclude=[], allow_empty=True)
    ch_names = raw.ch_names

    results = dict()
    for pick in picks:
        labels, glm_estimates = run_glm(raw.get_data(pick).T,
                                        design_matrix.values,
                                        noise_model=noise_model,
                                        bins=bins,
                                        n_jobs=n_jobs,
                                        verbose=verbose)
        results[ch_names[pick]] = glm_estimates[labels[0]]

    return results
def test_compute_distance_to_sensors(picks, limits):
    """Test computation of distances between vertices and sensors."""
    src = read_source_spaces(fname_src)
    fwd = mne.read_forward_solution(fname_fwd)
    info = fwd['info']
    trans = read_trans(trans_fname)
    # trans = fwd['info']['mri_head_t']
    if isinstance(picks, str):
        kwargs = dict()
        kwargs[picks] = True
        if picks == 'eeg':
            info['dev_head_t'] = None  # should not break anything
        use_picks = pick_types(info, **kwargs, exclude=())
    else:
        use_picks = picks
    n_picks = len(_picks_to_idx(info, use_picks, 'data', exclude=()))

    # Make sure same vertices are used in src and fwd
    src[0]['inuse'] = fwd['src'][0]['inuse']
    src[1]['inuse'] = fwd['src'][1]['inuse']
    src[0]['nuse'] = fwd['src'][0]['nuse']
    src[1]['nuse'] = fwd['src'][1]['nuse']

    n_verts = src[0]['nuse'] + src[1]['nuse']

    # minimum distances between vertices and sensors
    depths = compute_distance_to_sensors(src,
                                         info=info,
                                         picks=use_picks,
                                         trans=trans)
    assert depths.shape == (n_verts, n_picks)
    assert limits[0] * 5 > depths.min()  # meaningful choice of limits
    assert_array_less(limits[0], depths)
    assert_array_less(depths, limits[1])

    # If source space from Forward Solution and trans=None (i.e. identity) then
    # depths2 should be the same as depth.
    depths2 = compute_distance_to_sensors(src=fwd['src'],
                                          info=info,
                                          picks=use_picks,
                                          trans=None)
    assert_allclose(depths, depths2, rtol=1e-5)

    if picks != 'eeg':
        # this should break things
        info['dev_head_t'] = None
        with pytest.raises(ValueError, match='Transform between meg<->head'):
            compute_distance_to_sensors(src, info, use_picks, trans)
示例#12
0
    def send_data(self, epochs, picks, tmin, tmax, buffer_size):
        """Read from raw object and send them to RtEpochs for processing.

        Parameters
        ----------
        epochs : instance of RtEpochs
            The epochs object.
        %(picks_all)s
        tmin : float
            Time instant to start receiving buffers.
        tmax : float
            Time instant to stop receiving buffers.
        buffer_size : int
            Size of each buffer in terms of number of samples.
        """
        # this is important to emulate a thread, instead of automatically
        # or constantly sending data, we will invoke this explicitly to send
        # the next buffer

        picks = _picks_to_idx(self.info, picks, 'all', exclude=())
        sfreq = self.info['sfreq']
        tmin_samp = int(round(sfreq * tmin))
        tmax_samp = int(round(sfreq * tmax))

        iter_times = list(
            zip(list(range(tmin_samp, tmax_samp, buffer_size)),
                list(range(buffer_size, tmax_samp + 1, buffer_size))))
        last_iter_sample = iter_times[-1][1] if iter_times else 0
        if last_iter_sample < tmax_samp:
            iter_times.append((last_iter_sample, tmax_samp))

        for ii, (start, stop) in enumerate(iter_times):
            # channels are picked in _append_epoch_to_queue. No need to pick
            # here
            data, times = self.raw[:, start:stop]

            # to undo the calibration done in _process_raw_buffer
            cals = np.array([[
                self.info['chs'][k]['range'] * self.info['chs'][k]['cal']
                for k in picks
            ]]).T

            data[picks, :] = data[picks, :] / cals

            epochs._process_raw_buffer(data)
示例#13
0
    def pick(self, picks, exclude=()):
        """Pick a subset of channels.

        Parameters
        ----------
        %(picks_all)s
        exclude : list | str
            Set of channels to exclude, only used when picking based on
            types (e.g., exclude="bads" when picks="meg").

        Returns
        -------
        inst : instance of ResultsGLM
            The modified instance.
        """
        picks = _picks_to_idx(self.info, picks, 'all', exclude,
                              allow_empty=False)
        pick_info(self.info, picks, copy=False)
        self._data = {key: self._data[key] for key in self.info.ch_names}
        return self
示例#14
0
def test_check_ch_locs():
    """Test _check_ch_locs behavior."""
    info = mne.io.read_info(fname_raw)
    assert _check_ch_locs(info=info)

    for picks in ([0], [0, 1], None):
        assert _check_ch_locs(info=info, picks=picks)

    for ch_type in ('meg', 'mag', 'grad', 'eeg'):
        assert _check_ch_locs(info=info, ch_type=ch_type)

    # drop locations for EEG
    picks_eeg = _picks_to_idx(info=info, picks='eeg')
    for idx in picks_eeg:
        info['chs'][idx]['loc'][:3] = np.nan

    # EEG tests should fail now
    assert _check_ch_locs(info=info, picks=picks_eeg) is False
    assert _check_ch_locs(info=info, ch_type='eeg') is False

    # tests for other (and "all") channels should still pass
    assert _check_ch_locs(info=info)
    assert _check_ch_locs(info=info, ch_type='mag')
示例#15
0
def test_fnirs_picks():
    """Test picking of fnirs types after different conversions."""
    raw = read_raw_nirx(fname_nirx_15_0)
    picks = _picks_to_idx(raw.info, 'fnirs_cw_amplitude')
    assert len(picks) == len(raw.ch_names)
    raw_subset = raw.copy().pick(picks='fnirs_cw_amplitude')
    for ch in raw_subset.info["chs"]:
        assert ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE

    picks = _picks_to_idx(raw.info, ['fnirs_cw_amplitude', 'fnirs_od'])
    assert len(picks) == len(raw.ch_names)
    picks = _picks_to_idx(raw.info, ['fnirs_cw_amplitude', 'fnirs_od', 'hbr'])
    assert len(picks) == len(raw.ch_names)
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'fnirs_od')
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'hbo')
    pytest.raises(ValueError, _picks_to_idx, raw.info, ['hbr'])
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'fnirs_fd_phase')
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'junk')

    raw = optical_density(raw)
    picks = _picks_to_idx(raw.info, 'fnirs_od')
    assert len(picks) == len(raw.ch_names)
    raw_subset = raw.copy().pick(picks='fnirs_od')
    for ch in raw_subset.info["chs"]:
        assert ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_OD

    picks = _picks_to_idx(raw.info, ['fnirs_cw_amplitude', 'fnirs_od'])
    assert len(picks) == len(raw.ch_names)
    picks = _picks_to_idx(raw.info, ['fnirs_cw_amplitude', 'fnirs_od', 'hbr'])
    assert len(picks) == len(raw.ch_names)
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'fnirs_cw_amplitude')
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'hbo')
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'hbr')
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'fnirs_fd_phase')
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'junk')

    raw = beer_lambert_law(raw)
    picks = _picks_to_idx(raw.info, 'hbo')
    assert len(picks) == len(raw.ch_names) / 2
    raw_subset = raw.copy().pick(picks='hbo')
    for ch in raw_subset.info["chs"]:
        assert ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO

    picks = _picks_to_idx(raw.info, ['hbr'])
    assert len(picks) == len(raw.ch_names) / 2
    raw_subset = raw.copy().pick(picks=['hbr'])
    for ch in raw_subset.info["chs"]:
        assert ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR

    picks = _picks_to_idx(raw.info, ['hbo', 'hbr'])
    assert len(picks) == len(raw.ch_names)
    picks = _picks_to_idx(raw.info, ['hbo', 'fnirs_od', 'hbr'])
    assert len(picks) == len(raw.ch_names)
    picks = _picks_to_idx(raw.info, ['hbo', 'fnirs_od'])
    assert len(picks) == len(raw.ch_names) / 2
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'fnirs_cw_amplitude')
    pytest.raises(ValueError, _picks_to_idx, raw.info, ['fnirs_od'])
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'junk')
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'fnirs_fd_phase')
示例#16
0
def test_picks_to_idx():
    """Test checking type integrity checks of picks."""
    info = create_info(12, 1000., 'eeg')
    _assert_channel_types(info)
    picks = np.arange(info['nchan'])
    # Array and list
    assert_array_equal(picks, _picks_to_idx(info, picks))
    assert_array_equal(picks, _picks_to_idx(info, list(picks)))
    with pytest.raises(TypeError, match='data type of float64'):
        _picks_to_idx(info, 1.)
    # None
    assert_array_equal(picks, _picks_to_idx(info, None))
    # Type indexing
    assert_array_equal(picks, _picks_to_idx(info, 'eeg'))
    assert_array_equal(picks, _picks_to_idx(info, ['eeg']))
    # Negative indexing
    assert_array_equal([len(picks) - 1], _picks_to_idx(info, len(picks) - 1))
    assert_array_equal([len(picks) - 1], _picks_to_idx(info, -1))
    assert_array_equal([len(picks) - 1], _picks_to_idx(info, [-1]))
    # Name indexing
    assert_array_equal([2], _picks_to_idx(info, info['ch_names'][2]))
    assert_array_equal(np.arange(5, 9),
                       _picks_to_idx(info, info['ch_names'][5:9]))
    with pytest.raises(ValueError, match='must be >= '):
        _picks_to_idx(info, -len(picks) - 1)
    with pytest.raises(ValueError, match='must be < '):
        _picks_to_idx(info, len(picks))
    with pytest.raises(ValueError, match='could not be interpreted'):
        _picks_to_idx(info, ['a', 'b'])
    with pytest.raises(ValueError, match='could not be interpreted'):
        _picks_to_idx(info, 'b')
    # bads behavior
    info['bads'] = info['ch_names'][1:2]
    picks_good = np.array([0] + list(range(2, 12)))
    assert_array_equal(picks_good, _picks_to_idx(info, None))
    assert_array_equal(picks_good, _picks_to_idx(info, None,
                                                 exclude=info['bads']))
    assert_array_equal(picks, _picks_to_idx(info, None, exclude=()))
    with pytest.raises(ValueError, match=' 1D, got'):
        _picks_to_idx(info, [[1]])
    # MEG types
    info = read_info(fname_mc)
    meg_picks = np.arange(306)
    mag_picks = np.arange(2, 306, 3)
    grad_picks = np.setdiff1d(meg_picks, mag_picks)
    assert_array_equal(meg_picks, _picks_to_idx(info, 'meg'))
    assert_array_equal(meg_picks, _picks_to_idx(info, ('mag', 'grad')))
    assert_array_equal(mag_picks, _picks_to_idx(info, 'mag'))
    assert_array_equal(grad_picks, _picks_to_idx(info, 'grad'))

    info = create_info(['eeg', 'foo'], 1000., 'eeg')
    with pytest.raises(RuntimeError, match='equivalent to channel types'):
        _picks_to_idx(info, 'eeg')
    with pytest.raises(ValueError, match='same length'):
        create_info(['a', 'b'], 1000., dict(hbo=['a'], hbr=['b']))
    info = create_info(['a', 'b'], 1000., ['hbo', 'hbr'])
    assert_array_equal(np.arange(2), _picks_to_idx(info, 'fnirs'))
    assert_array_equal([0], _picks_to_idx(info, 'hbo'))
    assert_array_equal([1], _picks_to_idx(info, 'hbr'))
    info = create_info(['a', 'b'], 1000., ['hbo', 'misc'])
    assert_array_equal(np.arange(len(info['ch_names'])),
                       _picks_to_idx(info, 'all'))
    assert_array_equal([0], _picks_to_idx(info, 'data'))
    info = create_info(['a', 'b'], 1000., ['fnirs_raw', 'fnirs_od'])
    assert_array_equal(np.arange(2), _picks_to_idx(info, 'fnirs'))
    assert_array_equal([0], _picks_to_idx(info, 'fnirs_raw'))
    assert_array_equal([1], _picks_to_idx(info, 'fnirs_od'))
    info = create_info(['a', 'b'], 1000., ['fnirs_raw', 'misc'])
    assert_array_equal(np.arange(len(info['ch_names'])),
                       _picks_to_idx(info, 'all'))
    assert_array_equal([0], _picks_to_idx(info, 'data'))
    info = create_info(['a', 'b'], 1000., ['fnirs_od', 'misc'])
    assert_array_equal(np.arange(len(info['ch_names'])),
                       _picks_to_idx(info, 'all'))
    assert_array_equal([0], _picks_to_idx(info, 'data'))
示例#17
0
文件: _snirf.py 项目: seapsy/mne-nirs
def write_raw_snirf(raw, fname):
    """Writer for continuous wave SNIRF data.

    Parameters
    ----------
    raw : instance of Raw
        Data to write to file. Must contain only `fnirs_cw_amplitude` type.
    fname : str
        Path to the SNIRF data file.
    """

    picks = _picks_to_idx(raw.info, 'fnirs_cw_amplitude', exclude=[])
    assert len(picks) == len(raw.ch_names), "Data must be fnirs_cw_amplitude"

    # Reordering channels
    num_chans = len(raw.ch_names)
    raw = raw.copy()
    raw.pick(picks=list(range(num_chans)[0::2]) + list(range(num_chans)[1::2]))

    with h5py.File(fname, "w") as f:
        f.create_dataset("nirs/data1/measurementList1/dataType", data=1)
        f.create_dataset("/nirs/data1/dataTimeSeries", data=raw.get_data().T)
        f.create_dataset("/nirs/data1/time", data=raw.times)

        # Store measurement and birth date
        datestr = raw.info["meas_date"].strftime("%Y-%m-%d")
        timestr = raw.info["meas_date"].strftime("%H:%M:%SZ")
        birthstr = '{0:02d}-{1:02d}-{2:02d}'.format(
            raw.info["subject_info"]['birthday'][0],
            raw.info["subject_info"]['birthday'][1],
            raw.info["subject_info"]['birthday'][2])
        f.create_dataset("nirs/metaDataTags/"
                         "MeasurementDate",
                         data=[datestr.encode('UTF-8')])
        f.create_dataset("nirs/metaDataTags/"
                         "MeasurementTime",
                         data=[timestr.encode('UTF-8')])
        f.create_dataset("nirs/metaDataTags/"
                         "DateOfBirth",
                         data=[birthstr.encode('UTF-8')])

        # Extract info from file names
        rgx = r'S(\d+)_D(\d+) (\d+)'
        chs = raw.info['chs']
        sources = [float(re.match(rgx, r['ch_name']).groups()[0]) for r in chs]
        detectors = [
            float(re.match(rgx, r['ch_name']).groups()[1]) for r in chs
        ]
        wavelengths = [
            float(re.match(rgx, r['ch_name']).groups()[2]) for r in chs
        ]

        # Create info summary and recode
        sources_sorted = np.sort(np.unique(sources))
        detectors_sorted = np.sort(np.unique(detectors))
        wavelengths_sorted = np.sort(np.unique(wavelengths))
        sources_sorted = [
            str(int(src)).encode('UTF-8') for src in sources_sorted
        ]
        detectors_sorted = [
            str(int(det)).encode('UTF-8') for det in detectors_sorted
        ]
        wavelengths_sorted = [
            str(wve).encode('UTF-8') for wve in wavelengths_sorted
        ]

        # Store source/detector/wavelength info
        f.create_dataset("nirs/probe/sourceLabels",
                         data=[('S'.encode('UTF-8') + src)
                               for src in sources_sorted])
        f.create_dataset("nirs/probe/detectorLabels",
                         data=[('D'.encode('UTF-8') + det)
                               for det in detectors_sorted])
        f.create_dataset("nirs/probe/wavelengths",
                         data=[float(wve) for wve in wavelengths_sorted])

        # Create 3d locs and store
        srclocs = np.empty((len(np.unique(sources_sorted)), 3))
        detlocs = np.empty((len(np.unique(detectors_sorted)), 3))
        for i, src in enumerate(sources_sorted):
            idx = sources.index(float(src))
            srclocs[i, :] = raw.info['chs'][idx]['loc'][3:6]
        for i, det in enumerate(detectors_sorted):
            idx = detectors.index(float(det))
            detlocs[i, :] = raw.info['chs'][idx]['loc'][6:9]
        f.create_dataset("nirs/probe/sourcePos3D", data=srclocs)
        f.create_dataset("nirs/probe/detectorPos3D", data=detlocs)
        f.create_dataset("nirs/metaDataTags/LengthUnit",
                         data=['m'.encode('UTF-8')])

        # Prep data for storing each MNE channel as SNIRF measurementList
        channels = [
            "measurementList" + str(idx + 1)
            for idx in range(len(raw.ch_names))
        ]
        sources = np.array([float(src) for src in sources])
        detectors = np.array([float(det) for det in detectors])
        sources_sorted = [float(src) for src in sources_sorted]
        detectors_sorted = [float(det) for det in detectors_sorted]
        wavelengths_sorted = [float(wve) for wve in wavelengths_sorted]
        w = [float(wve) for wve in wavelengths]
        wavelengths_index = [wavelengths_sorted.index(wve) + 1 for wve in w]

        for idx, ch in enumerate(channels):
            f.create_dataset('nirs/data1/' + ch + '/sourceIndex',
                             data=[sources_sorted.index(sources[idx]) + 1])
            f.create_dataset('nirs/data1/' + ch + '/detectorIndex',
                             data=[detectors_sorted.index(detectors[idx]) + 1])
            f.create_dataset('nirs/data1/' + ch + '/wavelengthIndex',
                             data=[wavelengths_index[idx]])

        # Store demographic info
        subject_id = raw.info["subject_info"]['first_name']
        f.create_dataset("nirs/metaDataTags/SubjectID",
                         data=[subject_id.encode('UTF-8')])

        # Convert MNE annotations to SNIRF stims
        for desc in np.unique(raw.annotations.description):
            key = "stim" + desc
            trgs = np.where(raw.annotations.description == desc)[0]
            stims = np.zeros((len(trgs), 3))
            for idx, trg in enumerate(trgs):
                stims[idx, :] = [
                    raw.annotations.onset[trg], 5.0,
                    raw.annotations.duration[trg]
                ]
            f.create_dataset('/nirs/' + key + '/data', data=stims)

        # Store probe landmarks
        if raw.info['dig'] is not None:
            diglocs = np.empty((len(raw.info['dig']), 3))
            digname = list()
            for idx, dig in enumerate(raw.info['dig']):
                ident = re.match(r"\d+ \(FIFFV_POINT_(\w+)\)",
                                 str(dig.get("ident")))
                if ident is not None:
                    digname.append(ident[1])
                else:
                    digname.append(str(dig.get("ident")))
                diglocs[idx, :] = dig.get("r")
            digname = [d.encode('UTF-8') for d in digname]
            f.create_dataset("nirs/probe/landmarkPos3D", data=diglocs)
            f.create_dataset("nirs/probe/landmarkLabels", data=digname)

        # Add non standard (but allowed) custom metadata tags
        f.create_dataset("nirs/metaDataTags/MNE_coordFrame",
                         data=[int(raw.info['dig'][0].get("coord_frame"))])
        if 'middle_name' in raw.info["subject_info"]:
            mname = [raw.info["subject_info"]['middle_name'].encode('UTF-8')]
            f.create_dataset("nirs/metaDataTags/middleName", data=mname)
        if 'last_name' in raw.info["subject_info"]:
            lname = [raw.info["subject_info"]['last_name'].encode('UTF-8')]
            f.create_dataset("nirs/metaDataTags/lastName", data=lname)
        if 'sex' in raw.info["subject_info"]:
            sex = str(int(raw.info["subject_info"]['sex'])).encode('UTF-8')
            f.create_dataset("nirs/metaDataTags/sex", data=[sex])
示例#18
0
def test_picks_to_idx():
    """Test checking type integrity checks of picks."""
    info = create_info(12, 1000., 'eeg')
    _assert_channel_types(info)
    picks = np.arange(info['nchan'])
    # Array and list
    assert_array_equal(picks, _picks_to_idx(info, picks))
    assert_array_equal(picks, _picks_to_idx(info, list(picks)))
    with pytest.raises(TypeError, match='data type of float64'):
        _picks_to_idx(info, 1.)
    # None
    assert_array_equal(picks, _picks_to_idx(info, None))
    # Type indexing
    assert_array_equal(picks, _picks_to_idx(info, 'eeg'))
    assert_array_equal(picks, _picks_to_idx(info, ['eeg']))
    # Negative indexing
    assert_array_equal([len(picks) - 1], _picks_to_idx(info, len(picks) - 1))
    assert_array_equal([len(picks) - 1], _picks_to_idx(info, -1))
    assert_array_equal([len(picks) - 1], _picks_to_idx(info, [-1]))
    # Name indexing
    assert_array_equal([2], _picks_to_idx(info, info['ch_names'][2]))
    assert_array_equal(np.arange(5, 9),
                       _picks_to_idx(info, info['ch_names'][5:9]))
    with pytest.raises(ValueError, match='must be >= '):
        _picks_to_idx(info, -len(picks) - 1)
    with pytest.raises(ValueError, match='must be < '):
        _picks_to_idx(info, len(picks))
    with pytest.raises(ValueError, match='could not be interpreted'):
        _picks_to_idx(info, ['a', 'b'])
    with pytest.raises(ValueError, match='could not be interpreted'):
        _picks_to_idx(info, 'b')
    # bads behavior
    info['bads'] = info['ch_names'][1:2]
    picks_good = np.array([0] + list(range(2, 12)))
    assert_array_equal(picks_good, _picks_to_idx(info, None))
    assert_array_equal(picks_good, _picks_to_idx(info, None,
                                                 exclude=info['bads']))
    assert_array_equal(picks, _picks_to_idx(info, None, exclude=()))
    with pytest.raises(ValueError, match=' 1D, got'):
        _picks_to_idx(info, [[1]])
    # MEG types
    info = read_info(fname_mc)
    meg_picks = np.arange(306)
    mag_picks = np.arange(2, 306, 3)
    grad_picks = np.setdiff1d(meg_picks, mag_picks)
    assert_array_equal(meg_picks, _picks_to_idx(info, 'meg'))
    assert_array_equal(meg_picks, _picks_to_idx(info, ('mag', 'grad')))
    assert_array_equal(mag_picks, _picks_to_idx(info, 'mag'))
    assert_array_equal(grad_picks, _picks_to_idx(info, 'grad'))

    info = create_info(['eeg', 'foo'], 1000., 'eeg')
    with pytest.raises(RuntimeError, match='equivalent to channel types'):
        _picks_to_idx(info, 'eeg')
    with pytest.raises(ValueError, match='same length'):
        create_info(['a', 'b'], 1000., dict(hbo=['a'], hbr=['b']))
    info = create_info(['a', 'b'], 1000., ['hbo', 'hbr'])
    assert_array_equal(np.arange(2), _picks_to_idx(info, 'fnirs'))
    assert_array_equal([0], _picks_to_idx(info, 'hbo'))
    assert_array_equal([1], _picks_to_idx(info, 'hbr'))
    info = create_info(['a', 'b'], 1000., ['hbo', 'misc'])
    assert_array_equal(np.arange(len(info['ch_names'])),
                       _picks_to_idx(info, 'all'))
    assert_array_equal([0], _picks_to_idx(info, 'data'))
示例#19
0
def _write_optodes_tsv(raw, fname, overwrite=False, verbose=True):
    """Create a optodes.tsv file and save it.

    Parameters
    ----------
    raw : instance of Raw
        The data as MNE-Python Raw object.
    fname : str | BIDSPath
        Filename to save the optodes.tsv to.
    overwrite : bool
        Whether to overwrite the existing file.
        Defaults to False.
    verbose : bool
        Set verbose output to True or False.
    """
    picks = _picks_to_idx(raw.info, 'fnirs', exclude=[], allow_empty=True)
    sources = np.zeros(picks.shape)
    detectors = np.zeros(picks.shape)
    for ii in picks:
        # NIRS channel names take a specific form in MNE-Python.
        # The channel names always reflect the source and detector
        # pair, followed by the wavelength frequency.
        # The following code extracts the source and detector
        # numbers from the channel name.
        ch1_name_info = re.match(r'S(\d+)_D(\d+) (\d+)',
                                 raw.info['chs'][ii]['ch_name'])
        sources[ii] = ch1_name_info.groups()[0]
        detectors[ii] = ch1_name_info.groups()[1]
    unique_sources = np.unique(sources)
    n_sources = len(unique_sources)
    unique_detectors = np.unique(detectors)
    names = np.concatenate(
        (["S" + str(s) for s in unique_sources.astype(int)],
         ["D" + str(d) for d in unique_detectors.astype(int)]))

    xs = np.zeros(names.shape)
    ys = np.zeros(names.shape)
    zs = np.zeros(names.shape)
    for i, source in enumerate(unique_sources):
        s_idx = np.where(sources == source)[0][0]
        xs[i] = raw.info["chs"][s_idx]["loc"][3]
        ys[i] = raw.info["chs"][s_idx]["loc"][4]
        zs[i] = raw.info["chs"][s_idx]["loc"][5]
    for i, detector in enumerate(unique_detectors):
        d_idx = np.where(detectors == detector)[0][0]
        xs[i + n_sources] = raw.info["chs"][d_idx]["loc"][6]
        ys[i + n_sources] = raw.info["chs"][d_idx]["loc"][7]
        zs[i + n_sources] = raw.info["chs"][d_idx]["loc"][8]

    ch_data = {
        'name':
        names,
        'type':
        np.concatenate(
            (np.full(len(unique_sources),
                     'source'), np.full(len(unique_detectors), 'detector'))),
        'x':
        xs,
        'y':
        ys,
        'z':
        zs,
    }
    _write_tsv(fname, ch_data, overwrite, verbose)
示例#20
0
    def get_event_data(self,
                       event_id,
                       tmin,
                       tmax,
                       picks=None,
                       stim_channel=None,
                       min_duration=0):
        """Simulate the data for a particular event-id.

        The epochs corresponding to a particular event-id are returned. The
        method remembers the epoch that was returned in the previous call and
        returns the next epoch in sequence. Once all epochs corresponding to
        an event-id have been exhausted, the method returns None.

        Parameters
        ----------
        event_id : int
            The id of the event to consider.
        tmin : float
            Start time before event.
        tmax : float
            End time after event.
        %(picks_all)s
        stim_channel : None | string | list of string
            Name of the stim channel or all the stim channels
            affected by the trigger. If None, the config variables
            'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
            etc. are read. If these are not found, it will default to
            'STI 014'.
        min_duration : float
            The minimum duration of a change in the events channel required
            to consider it as an event (in seconds).

        Returns
        -------
        data : 2D array with shape [n_channels, n_times]
            The epochs that are being simulated
        """
        # Get the list of all events
        picks = _picks_to_idx(self.info, picks, 'all', exclude=())
        events = find_events(self.raw,
                             stim_channel=stim_channel,
                             verbose=False,
                             output='onset',
                             consecutive='increasing',
                             min_duration=min_duration)

        # Get the list of only the specified event
        idx = np.where(events[:, -1] == event_id)[0]
        event_samp = events[idx, 0]

        # Only do this the first time for each event type
        if event_id not in self._current:

            # Initialize pointer for the event to 0
            self._current[event_id] = 0
            self._last[event_id] = len(event_samp)

        # relative start and stop positions in samples
        tmin_samp = int(round(self.info['sfreq'] * tmin))
        tmax_samp = int(round(self.info['sfreq'] * tmax)) + 1

        if self._current[event_id] < self._last[event_id]:

            # Select the current event from the events list
            ev_samp = event_samp[self._current[event_id]]

            # absolute start and stop positions in samples
            start = ev_samp + tmin_samp - self.raw.first_samp
            stop = ev_samp + tmax_samp - self.raw.first_samp

            self._current[event_id] += 1  # increment pointer

            data, _ = self.raw[picks, start:stop]

            return data

        else:
            return None
示例#21
0
    def plot(self,
             fmin=0,
             fmax=None,
             proj=False,
             picks=None,
             ax=None,
             color='black',
             xscale='linear',
             area_mode='std',
             area_alpha=0.33,
             dB=True,
             estimate='auto',
             show=True,
             n_jobs=1,
             average=False,
             line_alpha=None,
             spatial_colors=True,
             verbose=None,
             sphere=None):
        from mne.viz.utils import _plot_psd, plt_show

        # set up default vars
        from packaging import version
        mne_version = version.parse(mne.__version__)
        has_new_mne = mne_version >= version.parse('0.22.0')
        has_20_mne = (mne_version >= version.parse('0.20.0')
                      and mne_version < version.parse('0.22.0'))
        if has_new_mne:
            from mne.defaults import _handle_default
            from mne.io.pick import _picks_to_idx
            from mne.viz._figure import _split_picks_by_type

            if ax is None:
                import matplotlib.pyplot as plt
                fig, ax = plt.subplots()
            else:
                fig = ax.figure
            ax_list = [ax]

            units = _handle_default('units', None)
            picks = _picks_to_idx(self.info, picks)
            titles = _handle_default('titles', None)
            scalings = _handle_default('scalings', None)

            make_label = len(ax_list) == len(fig.axes)
            xlabels_list = [False] * (len(ax_list) - 1) + [True]
            (picks_list, units_list, scalings_list,
             titles_list) = _split_picks_by_type(self, picks, units, scalings,
                                                 titles)
        elif has_20_mne:
            from mne.viz.utils import _set_psd_plot_params
            fig, picks_list, titles_list, units_list, scalings_list, \
                ax_list, make_label, xlabels_list = _set_psd_plot_params(
                    self.info, proj, picks, ax, area_mode)
        else:
            from mne.viz.utils import _set_psd_plot_params
            fig, picks_list, titles_list, units_list, scalings_list, ax_list, \
                make_label = _set_psd_plot_params(self.info, proj, picks, ax,
                                                  area_mode)
        del ax

        crop_inst = not (fmin == 0 and fmax is None)
        fmax = self.freqs[-1] if fmax is None else fmax

        inst = self.copy()
        if crop_inst:
            inst.crop(fmin=fmin, fmax=fmax)
        inst.average()

        # create list of psd's (one element for each channel type)
        psd_list = list()
        for picks in picks_list:
            psd_list.append(inst.data[picks])

        args = [
            inst, fig, inst.freqs, psd_list, picks_list, titles_list,
            units_list, scalings_list, ax_list, make_label, color, area_mode,
            area_alpha, dB, estimate, average, spatial_colors, xscale,
            line_alpha
        ]
        if has_20_mne or has_new_mne:
            args += [sphere, xlabels_list]

        fig = _plot_psd(*args)
        plt_show(show)
        return fig