Ejemplo n.º 1
0
def test_beer_lambert(fname, fmt, tmpdir):
    """Test converting NIRX files."""
    assert fmt in ('nirx', 'fif')
    raw = read_raw_nirx(fname)
    if fmt == 'fif':
        raw.save(tmpdir.join('test_raw.fif'))
        raw = read_raw_fif(tmpdir.join('test_raw.fif'))
    assert 'fnirs_cw_amplitude' in raw
    with pytest.deprecated_call():
        assert 'fnirs_raw' in raw
    assert 'fnirs_od' not in raw
    raw = optical_density(raw)
    _validate_type(raw, BaseRaw, 'raw')
    assert 'fnirs_cw_amplitude' not in raw
    with pytest.deprecated_call():
        assert 'fnirs_raw' not in raw
    assert 'fnirs_od' in raw
    assert 'hbo' not in raw
    raw = beer_lambert_law(raw)
    _validate_type(raw, BaseRaw, 'raw')
    assert 'fnirs_cw_amplitude' not in raw
    with pytest.deprecated_call():
        assert 'fnirs_raw' not in raw
    assert 'fnirs_od' not in raw
    assert 'hbo' in raw
    assert 'hbr' in raw
Ejemplo n.º 2
0
def list_detectors(raw):
    """
    List all the detectors in the fNIRS montage.

    Parameters
    ----------
    raw : instance of Raw
        Raw instance containing fNIRS data.

    Returns
    -------
    sources : list
        Unique list of all detectors in ascending order.
    """
    _validate_type(raw, BaseRaw, 'raw')

    picks = pick_types(raw.info, meg=False, eeg=False, fnirs=True,
                       exclude=[])
    if not len(picks):
        raise RuntimeError('Listing source is for fNIRS signals only.')

    detectors = list()
    ch_names = raw.ch_names
    for pick in picks:
        x = re.search(r"S(\d+)_D(\d+)", ch_names[pick])
        detectors.append(int(x.group(2)))

    detectors = np.unique(detectors)

    return detectors
Ejemplo n.º 3
0
def get_long_channels(raw, min_dist=0.01):
    """
    Return channels with a long source detector separation.


    Parameters
    ----------
    raw : instance of Raw
        The haemoglobin data.
    min_dist : number
        Minimum distance of returned channel.

    Returns
    -------
    raw : instance of Raw
        Raw instance with only long channels.
    """

    long_chans = raw.copy().load_data()
    _validate_type(long_chans, BaseRaw, 'raw')

    picks = mne.pick_types(long_chans.info, meg=False, eeg=False, fnirs=True)
    if not len(picks):
        raise RuntimeError('Short channel extraction for NIRS signals only.')

    dists = source_detector_distances(long_chans.info, picks=picks)
    long_chans.pick(picks[dists > min_dist])

    return long_chans
Ejemplo n.º 4
0
def get_short_channels(raw, max_dist=0.01):
    """
    Return channels with a short source-detector separation.

    Parameters
    ----------
    raw : instance of Raw
        Raw instance containing fNIRS data.
    max_dist : number
        Maximum distance of returned channels (m).

    Returns
    -------
    raw : instance of Raw
        Raw instance with only short channels.
    """

    short_chans = raw.copy().load_data()
    _validate_type(short_chans, BaseRaw, 'raw')

    picks = mne.pick_types(short_chans.info,
                           meg=False,
                           eeg=False,
                           fnirs=True,
                           exclude=[])
    if not len(picks):
        raise RuntimeError('Short channel extraction for NIRS signals only.')

    dists = source_detector_distances(short_chans.info, picks=picks)
    short_chans.pick(picks[dists < max_dist])

    return short_chans
Ejemplo n.º 5
0
def _check_load_fold(fold_files, atlas):
    _validate_type(fold_files, (list, 'path-like', None), 'fold_files')
    if fold_files is None:
        fold_files = mne.get_config('MNE_NIRS_FOLD_PATH')
        if fold_files is None:
            raise ValueError(
                'MNE_NIRS_FOLD_PATH not set, either set it using '
                'mne.set_config or pass fold_files as str or list')
    if not isinstance(fold_files, list):  # path-like
        fold_files = _check_fname(fold_files,
                                  overwrite='read',
                                  must_exist=True,
                                  name='fold_files',
                                  need_dir=True)
        fold_files = [op.join(fold_files, f'10-{x}.xls') for x in (5, 10)]

    fold_tbl = pd.DataFrame()
    for fi, fname in enumerate(fold_files):
        fname = _check_fname(fname,
                             overwrite='read',
                             must_exist=True,
                             name=f'fold_files[{fi}]')
        fold_tbl = pd.concat(
            [fold_tbl, _read_fold_xls(fname, atlas=atlas)], ignore_index=True)
    return fold_tbl
Ejemplo n.º 6
0
    def fit(self,
            raw: mne.io.RawArray,
            start: float = None,
            stop: float = None,
            reject_by_annotation: bool = True,
            gfp: bool = False,
            n_jobs: int = 1,
            verbose=None) -> mod_Kmeans:
        """[summary]

        Args:
            raw (mne.io.RawArray): [description]
            start (float, optional): [description]. Defaults to None.
            stop (float, optional): [description]. Defaults to None.
            reject_by_annotation (bool, optional): [description]. Defaults to True.
            gfp (bool, optional): [description]. Defaults to False.
            n_jobs (int, optional): [description]. Defaults to 1.
            verbose ([type], optional): [description]. Defaults to None.

        Returns:
            mod_Kmeans: [description]
        """
        _validate_type(raw, (BaseRaw), 'raw', 'Raw')
        reject_by_annotation = 'omit' if reject_by_annotation else None
        start, stop = _check_start_stop(raw, start, stop)
        n_jobs = check_n_jobs(n_jobs)

        if len(raw.info['bads']) is not 0:
            warn('Bad channels are present in the recording. '
                 'They will still be used to compute microstate topographies. '
                 'Consider using Raw.pick() or Raw.interpolate_bads()'
                 ' before fitting.')

        data = raw.get_data(start,
                            stop,
                            reject_by_annotation=reject_by_annotation)
        if gfp is True:
            data = _extract_gfps(data)

        best_gev = 0
        if n_jobs == 1:
            for _ in range(self.n_init):
                gev, maps, segmentation = self._run_mod_kmeans(data)
                if gev > best_gev:
                    best_gev, best_maps, best_segmentation = gev, maps, segmentation
        else:
            parallel, p_fun, _ = parallel_func(self._run_mod_kmeans,
                                               total=self.n_init,
                                               n_jobs=n_jobs)
            runs = parallel(p_fun(data) for i in range(self.n_init))
            runs = np.array(runs)
            best_run = np.argmax(runs[:, 0])
            best_gev, best_maps, best_segmentation = runs[best_run]

        self.cluster_centers = best_maps
        self.GEV = best_gev
        self.labels = best_segmentation
        self.current_fit = True
        return (self)
Ejemplo n.º 7
0
def test_optical_density():
    """Test return type for optical density."""
    raw = read_raw_nirx(fname_nirx, preload=False)
    assert 'fnirs_cw_amplitude' in raw
    assert 'fnirs_od' not in raw
    raw = optical_density(raw)
    _validate_type(raw, BaseRaw, 'raw')
    assert 'fnirs_cw_amplitude' not in raw
    assert 'fnirs_od' in raw
Ejemplo n.º 8
0
def short_channel_regression(raw, max_dist=0.01):
    """
    Systemic correction regression based on nearest short channel.

    Method as described by NIRx and based on
    :footcite:`fabbri2004optical`, :footcite:`saager2005direct`,
    and :footcite:`scholkmann2014measuring`.

    Parameters
    ----------
    raw : instance of Raw
        Raw instance containing optical density data.
    max_dist : number
        Channels less than this distance are considered short (m).

    Returns
    -------
    raw : instance of Raw
        The modified raw instance.

    References
    ----------
    .. footbibliography::
    """
    raw = raw.copy().load_data()
    _validate_type(raw, BaseRaw, 'raw')

    picks_od = pick_types(raw.info, fnirs='fnirs_od')

    if len(picks_od) == 0:
        raise RuntimeError('Data must be optical density.')

    distances = source_detector_distances(raw.info)

    picks_short = picks_od[distances[picks_od] < max_dist]
    picks_long = picks_od[distances[picks_od] > max_dist]

    if len(picks_short) == 0:
        raise RuntimeError('No short channels present.')
    if len(picks_long) == 0:
        raise RuntimeError('No long channels present.')

    for pick in picks_long:

        short_idx = _find_nearest_short(raw, pick, picks_short)

        A_l = raw.get_data(pick).ravel()
        A_s = raw.get_data(short_idx).ravel()

        # Eqn 27 Scholkmann et al 2014
        alfa = np.dot(A_s, A_l) / np.dot(A_s, A_s)

        # Eqn 26 Scholkmann et al 2014
        raw._data[pick] = A_l - alfa * A_s

    return raw
Ejemplo n.º 9
0
def enhance_negative_correlation(raw):
    """
    Apply algorithm from Cui et al 2009.

    Cui et al, Functional Near Infrared Spectroscopy (NIRS) signal
    improvement based on negative correlation between oxygenated and
    deoxygenated hemoglobin dynamics, NeuroImage
    https://doi.org/10.1016/j.neuroimage.2009.11.050

    Parameters
    ----------
    raw : instance of Raw
        Haemoglobin data.

    Returns
    -------
    raw : instance of Raw
        The modified raw instance.
    """
    raw = raw.copy().load_data()
    _validate_type(raw, BaseRaw, 'raw')

    hbo_channels = pick_types(raw.info, fnirs='hbo')
    hbr_channels = pick_types(raw.info, fnirs='hbr')

    if (not len(hbo_channels)) & (not len(hbr_channels)):
        raise RuntimeError('enhance_negative_correlation should '
                           'be run on haemoglobin data.')

    if len(hbo_channels) != len(hbr_channels):
        raise RuntimeError('Same number of hbo and hbr channels required.')

    for idx in range(len(hbo_channels)):
        if raw.info['chs'][hbo_channels[idx]]['ch_name'][:-4] != \
                raw.info['chs'][hbr_channels[idx]]['ch_name'][:-4]:
            raise RuntimeError('Channels must alternate between HBO and HBR.')

    for idx in range(len(hbo_channels)):
        hbo = raw._data[hbo_channels[idx]]
        hbr = raw._data[hbr_channels[idx]]

        hbo = hbo - np.mean(hbo)
        hbr = hbr - np.mean(hbr)

        hbo_std = np.std(hbo)
        hbr_std = np.std(hbr)

        alpha = hbo_std / hbr_std

        raw._data[hbo_channels[idx]] = 0.5 * (hbo - alpha * hbr)
        raw._data[hbr_channels[idx]] = -(1 / alpha) * \
            raw._data[hbo_channels[idx]]

    return raw
Ejemplo n.º 10
0
def test_optical_density():
    """Test return type for optical density."""
    raw = read_raw_nirx(fname_nirx, preload=False)
    assert 'fnirs_cw_amplitude' in raw
    assert 'fnirs_od' not in raw
    raw = optical_density(raw)
    _validate_type(raw, BaseRaw, 'raw')
    assert 'fnirs_cw_amplitude' not in raw
    assert 'fnirs_od' in raw
    with pytest.raises(RuntimeError, match='on continuous wave'):
        optical_density(raw)
Ejemplo n.º 11
0
def get_atlas_roi_mask(stc, roi, atlas='IXI', atlas_subject=None,
                       subjects_dir=None):
    """Get ROI mask for a given subject/atlas.

    Parameters
    ----------
    stc : instance of mne.SourceEstimate or mne.VectorSourceEstimate
        The source estimate.
    roi : str
        The ROI to obtain a mask for.
    atlas : str
        The atlas to use. Must be "IXI" or "LBPA40".
    atlas_subject : str | None
        Atlas subject to process. Must be one of the (unwarped) subjects
        "ANTS3-0Months3T", "ANTS6-0Months3T", or "ANTS12-0Months3T".
        If None, it will be inferred from the number of vertices.

    Returns
    -------
    mask : ndarray, shape (n_vertices,)
        The mask.
    """
    import nibabel as nib
    from mne.utils import _validate_type
    from mne.surface import _compute_nearest
    _validate_type(stc, (VolSourceEstimate, VolVectorSourceEstimate), 'stc')
    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
    if atlas_subject is None:
        atlas_subject = _VERT_COUNT_MAP[len(stc.vertices)]
    fname_src = op.join(subjects_dir, atlas_subject, 'bem', '%s-vol5-src.fif'
                        % (atlas_subject,))
    src = read_source_spaces(fname_src)
    mri = op.join(subjects_dir, atlas_subject, 'mri',
                  '%s_brain_ANTS_%s_atlas.mgz' % (atlas_subject, atlas))
    if not np.in1d(stc.vertices, src[0]['vertno']).all():
        raise RuntimeError('stc does not appear to be created from %s '
                           'volumetric source space' % (atlas_subject,))
    rr = src[0]['rr'][stc.vertices]
    mapping = get_atlas_mapping(atlas)
    vol_id = mapping[roi]
    mgz = nib.load(mri)
    mgz_data = mgz.get_fdata()
    vox_bool = mgz_data == vol_id
    vox_ijk = np.array(np.where(vox_bool)).T
    vox_mri_t = mgz.header.get_vox2ras_tkr()
    vox_mri_t *= np.array([[1e-3, 1e-3, 1e-3, 1]]).T
    rr_voi = apply_trans(vox_mri_t, vox_ijk)
    dists = _compute_nearest(rr_voi, rr, return_dists=True)[1]
    maxdist = np.linalg.norm(vox_mri_t[:3, :3].sum(0) / 2.)
    mask = (dists <= maxdist)
    return mask
Ejemplo n.º 12
0
    def save(self, fname, overwrite=False):
        """Save GLM results to disk.

        Parameters
        ----------
        fname : str
            The filename to use to write the HDF5 data.
            Should end in ``'glm.h5'``.
        %(overwrite)s
        """
        _validate_type(fname, 'path-like', 'fname')
        if isinstance(fname, PosixPath):
            fname = str(fname)
        if not fname.endswith('glm.h5'):
            raise IOError('The filename must end with glm.h5, '
                          f'instead received {fname}')
        write_hdf5(fname, self._get_state(),
                   overwrite=overwrite, title='mnepython')
Ejemplo n.º 13
0
    def _check_event_description(event_desc, events):
        """Check event_id and convert to default format."""
        if event_desc is None:  # convert to int to make typing-checks happy
            event_desc = list(np.unique(events[:, 2]))

        if isinstance(event_desc, dict):
            for val in event_desc.values():
                _validate_type(val, (str, None), "Event names")
        elif isinstance(event_desc, collections.Iterable):
            event_desc = np.asarray(event_desc)
            if event_desc.ndim != 1:
                raise ValueError("event_desc must be 1D, got shape {}".format(
                    event_desc.shape))
            event_desc = dict(zip(event_desc, map(str, event_desc)))
        elif callable(event_desc):
            pass
        else:
            raise ValueError(
                "Invalid type for event_desc (should be None, list, "
                "1darray, dict or callable). Got {}".format(type(event_desc)))

        return event_desc
Ejemplo n.º 14
0
def write_info(fname, info, overwrite=False):
    """Save Info object to ``.hdf5`` file.

    Parameters
    ----------
    fname : str
        Name of the file.
    info : mne.Info
        Info object to save.
    """
    from .channels import get_ch_pos
    from mne.utils import _validate_type
    from mne.externals import h5io
    from mne.io.pick import channel_indices_by_type

    # make sure the types are correct
    _validate_type(fname, 'str', item_name='fname')
    _validate_type(info, 'info', item_name='info')

    # extract type info
    tps = channel_indices_by_type(info)

    # remove empty dict keys
    for k in list(tps.keys()):
        if len(tps[k]) == 0:
            tps.pop(k)

    has_types = list(tps.keys())
    ch_type = has_types[0] if len(has_types) == 1 else tps

    # save to .hdf5
    data_dict = {
        'ch_names': info['ch_names'],
        'sfreq': info['sfreq'],
        'ch_type': ch_type,
        'pos': get_ch_pos(info)
    }
    h5io.write_hdf5(fname, data_dict, overwrite=overwrite)
Ejemplo n.º 15
0
def update_anat_landmarks(bids_path, landmarks, verbose=None):
    """Update the anatomical landmark coordinates of an MRI scan.

    This will change the ``AnatomicalLandmarkCoordinates`` entry in the
    respective JSON sidecar file, or create it if it doesn't exist.

    Parameters
    ----------
    bids_path : mne_bids.BIDSPath
        Path of the MR image.
    landmarks : mne.channels.DigMontage
        An :class:`mne.channels.DigMontage` instance with coordinates for the
        nasion and left and right pre-auricular points in MRI voxel
        coordinates.

        .. note:: :func:`mne_bids.get_anat_landmarks` provides a convenient and
                  reliable way to generate the landmark coordinates in the
                  required coordinate system.
    %(verbose)s

    Notes
    -----
    .. versionadded:: 0.8
    """
    _validate_type(item=bids_path, types=BIDSPath, item_name='bids_path')
    _validate_type(item=landmarks, types=DigMontage, item_name='landmarks')

    # Do some path verifications and fill in some gaps the users might have
    # left (datatype and extension)
    # XXX We could be more stringent (and less user-friendly) and insist on a
    # XXX full specification of all parts of the BIDSPath, thoughts?
    bids_path_mri = bids_path.copy()
    if bids_path_mri.datatype is None:
        bids_path_mri.datatype = 'anat'

    if bids_path_mri.datatype != 'anat':
        raise ValueError(
            f'Can only operate on "anat" MRI data, but the provided bids_path '
            f'points to: {bids_path_mri.datatype}')

    if bids_path_mri.suffix is None:
        raise ValueError('Please specify the "suffix" entity of the provided '
                         'bids_path.')
    elif bids_path_mri.suffix not in ('T1w', 'FLASH'):
        raise ValueError(
            f'Can only operate on "T1w" and "FLASH" images, but the bids_path '
            f'suffix indicates: {bids_path_mri.suffix}')

    valid_extensions = ('.nii', '.nii.gz')
    tried_paths = []
    file_exists = False
    if bids_path_mri.extension is None:
        # No extension was provided, start searching …
        for extension in valid_extensions:
            bids_path_mri.extension = extension
            tried_paths.append(bids_path_mri.fpath)

            if bids_path_mri.fpath.exists():
                file_exists = True
                break
    else:
        # An extension was provided
        tried_paths.append(bids_path_mri.fpath)
        if bids_path_mri.fpath.exists():
            file_exists = True

    if not file_exists:
        raise ValueError(
            f'Could not find an MRI scan. Please check the provided '
            f'bids_path. Tried the following filenames: '
            f'{", ".join([p.name for p in tried_paths])}')

    positions = landmarks.get_positions()
    coord_frame = positions['coord_frame']
    if coord_frame != 'mri_voxel':
        raise ValueError(
            f'The landmarks must be specified in MRI voxel coordinates, but '
            f'provided DigMontage is in "{coord_frame}"')

    # Extract the cardinal points
    name_to_coords_map = {
        'LPA': positions['lpa'],
        'NAS': positions['nasion'],
        'RPA': positions['rpa']
    }

    # Check if coordinates for any cardinal point are missing, and convert to
    # a list so we can easily store the data in JSON format
    missing_points = []
    for name, coords in name_to_coords_map.items():
        if coords is None:
            missing_points.append(name)
        else:
            name_to_coords_map[name] = list(coords)

    if missing_points:
        raise ValueError(
            f'The provided DigMontage did not contain all required cardinal '
            f'points (nasion and left and right pre-auricular points). The '
            f'following points are missing: '
            f'{", ".join(missing_points)}')

    mri_json = {'AnatomicalLandmarkCoordinates': name_to_coords_map}

    bids_path_json = bids_path.copy().update(extension='.json')
    if not bids_path_json.fpath.exists():  # Must exist before we can update it
        _write_json(bids_path_json.fpath, dict())

    update_sidecar_json(bids_path=bids_path_json, entries=mri_json)
Ejemplo n.º 16
0
def test_validate_type():
    """Test _validate_type."""
    _validate_type(1, 'int-like')
    with pytest.raises(TypeError, match='int-like'):
        _validate_type(False, 'int-like')
Ejemplo n.º 17
0
def plot_3d_montage(info,
                    view_map,
                    *,
                    src_det_names='auto',
                    ch_names='numbered',
                    subject='fsaverage',
                    trans='fsaverage',
                    surface='pial',
                    subjects_dir=None,
                    verbose=None):
    """
    Plot a 3D sensor montage.

    Parameters
    ----------
    info : instance of Info
        Measurement info.
    view_map : dict
        Dict of view (key) to channel-pair-numbers (value) to use when
        plotting. Note that, because these get plotted as 1-based channel
        *numbers*, the values should be 1-based rather than 0-based.
        The keys are of the form:

        ``'{side}-{view}'``
            For views like ``'left-lat'`` or ``'right-frontal'`` where the side
            matters.
        ``'{view}'``
            For views like ``'caudal'`` that are along the midline.

        See :meth:`mne.viz.Brain.show_view` for ``view`` options, and the
        Examples section below for usage examples.
    src_det_names : None | dict | str
        Source and detector names to use. "auto" (default) will see if the
        channel locations correspond to standard 10-20 locations and will
        use those if they do (otherwise will act like None). None will use
        S1, S2, ..., D1, D2, ..., etc. Can also be an explicit dict mapping,
        for example::

            src_det_names=dict(S1='Fz', D1='FCz', ...)
    ch_names : str | dict | None
        If ``'numbered'`` (default), use ``['1', '2', ...]`` for the channel
        names, or ``None`` to use ``['S1_D2', 'S2_D1', ...]``. Can also be a
        dict to provide a mapping from the ``'S1_D2'``-style names (keys) to
        other names, e.g., ``defaultdict(lambda: '')`` will prevent showing
        the names altogether.

        .. versionadded:: 0.3
    subject : str
        The subject.
    trans : str | Transform
        The subjects head<->MRI transform.
    surface : str
        The FreeSurfer surface name (e.g., 'pial', 'white').
    subjects_dir : str
        The subjects directory.
    %(verbose)s

    Returns
    -------
    figure : matplotlib.figure.Figure
        The matplotlib figimage.

    Examples
    --------
    For a Hitachi system with two sets of 12 source-detector arrangements,
    one on each side of the head, showing 1-12 on the left and 13-24 on the
    right can be accomplished using the following ``view_map``::

        >>> view_map = {
        ...     'left-lat': np.arange(1, 13),
        ...     'right-lat': np.arange(13, 25),
        ... }

    NIRx typically involves more complicated arrangements. See
    :ref:`the 3D tutorial <tut-fnirs-vis-brain-plot-3d-montage>` for
    an advanced example that incorporates the ``'caudal'`` view as well.
    """  # noqa: E501
    import matplotlib.pyplot as plt
    from scipy.spatial.distance import cdist
    _validate_type(info, Info, 'info')
    _validate_type(view_map, dict, 'views')
    _validate_type(src_det_names, (None, dict, str), 'src_det_names')
    _validate_type(ch_names, (dict, str, None), 'ch_names')
    info = pick_info(info, pick_types(info, fnirs=True, exclude=())[::2])
    if isinstance(ch_names, str):
        _check_option('ch_names', ch_names, ('numbered', ), extra='when str')
        ch_names = {
            name.split()[0]: str(ni)
            for ni, name in enumerate(info['ch_names'], 1)
        }
    info['bads'] = []
    if isinstance(src_det_names, str):
        _check_option('src_det_names',
                      src_det_names, ('auto', ),
                      extra='when str')
        # Decide if we can map to 10-20 locations
        names, pos = zip(
            *transform_to_head(make_standard_montage(
                'standard_1020')).get_positions()['ch_pos'].items())
        pos = np.array(pos, float)
        locs = dict()
        bad = False
        for ch in info['chs']:
            name = ch['ch_name']
            s_name, d_name = name.split()[0].split('_')
            for name, loc in [(s_name, ch['loc'][3:6]),
                              (d_name, ch['loc'][6:9])]:
                if name in locs:
                    continue
                # see if it's close enough
                idx = np.where(cdist(loc[np.newaxis], pos)[0] < 1e-3)[0]
                if len(idx) < 1:
                    bad = True
                    break
                # Some are duplicated (e.g., T7+T3) but we can rely on the
                # first one being the canonical one
                locs[name] = names[idx[0]]
            if bad:
                break
        if bad:
            src_det_names = None
            logger.info('Could not automatically map source/detector names to '
                        '10-20 locations.')
        else:
            src_det_names = locs
            logger.info('Source-detector names automatically mapped to 10-20 '
                        'locations')

    head_mri_t = _get_trans(trans, 'head', 'mri')[0]
    del trans
    views = list()
    for key, num in view_map.items():
        _validate_type(key, str, f'view_map key {repr(key)}')
        _validate_type(num, np.ndarray, f'view_map[{repr(key)}]')
        if '-' in key:
            hemi, v = key.split('-', maxsplit=1)
            hemi = dict(left='lh', right='rh')[hemi]
            views.append((hemi, v, num))
        else:
            views.append(('lh', key, num))
    del view_map
    size = (400 * len(views), 400)
    brain = Brain(subject,
                  'both',
                  surface,
                  views=['lat'] * len(views),
                  size=size,
                  background='w',
                  units='m',
                  view_layout='horizontal',
                  subjects_dir=subjects_dir)
    with _safe_brain_close(brain):
        brain.add_head(dense=False, alpha=0.1)
        brain.add_sensors(info,
                          trans=head_mri_t,
                          fnirs=['channels', 'pairs', 'sources', 'detectors'])
        add_text_kwargs = dict()
        if 'render' in _get_args(brain.plotter.add_text):
            add_text_kwargs['render'] = False
        for col, view in enumerate(views):
            plotted = set()
            brain.show_view(view[1],
                            hemi=view[0],
                            focalpoint=(0, -0.02, 0.02),
                            distance=0.4,
                            row=0,
                            col=col)
            brain.plotter.subplot(0, col)
            vp = brain.plotter.renderer
            for ci in view[2]:  # figure out what we need to plot
                this_ch = info['chs'][ci - 1]
                ch_name = this_ch['ch_name'].split()[0]
                s_name, d_name = ch_name.split('_')
                needed = [
                    (ch_names, 'ch_names', ch_name, this_ch['loc'][:3], 12,
                     'Centered'),
                    (src_det_names, 'src_det_names', s_name,
                     this_ch['loc'][3:6], 8, 'Bottom'),
                    (src_det_names, 'src_det_names', d_name,
                     this_ch['loc'][6:9], 8, 'Bottom'),
                ]
                for lookup, lname, name, ch_pos, font_size, va in needed:
                    if name in plotted:
                        continue
                    plotted.add(name)
                    orig_name = name
                    if lookup is not None:
                        name = lookup[name]
                    _validate_type(name, str, f'{lname}[{repr(orig_name)}]')
                    ch_pos = apply_trans(head_mri_t, ch_pos)
                    vp.SetWorldPoint(np.r_[ch_pos, 1.])
                    vp.WorldToDisplay()
                    ch_pos = (np.array(vp.GetDisplayPoint()[:2]) -
                              np.array(vp.GetOrigin()))
                    actor = brain.plotter.add_text(name,
                                                   ch_pos,
                                                   font_size=font_size,
                                                   color=(0., 0., 0.),
                                                   **add_text_kwargs)
                    prop = actor.GetTextProperty()
                    getattr(prop, f'SetVerticalJustificationTo{va}')()
                    prop.SetJustificationToCentered()
                    actor.SetTextProperty(prop)
                    prop.SetBold(True)
        img = brain.screenshot()
    return plt.figimage(img, resize=True).figure
Ejemplo n.º 18
0
def scalp_coupling_index_windowed(raw,
                                  time_window=10,
                                  threshold=0.1,
                                  l_freq=0.7,
                                  h_freq=1.5,
                                  l_trans_bandwidth=0.3,
                                  h_trans_bandwidth=0.3,
                                  verbose=False):
    """
    Compute scalp coupling index for each channel and time window.

    As described in [1]_ and [2]_.
    This method provides a metric of data quality along the duration of
    the measurement. The user can specify the window over which the
    metric is computed.

    Parameters
    ----------
    raw : instance of Raw
        The haemoglobin data.
    time_window : number
        The duration of the window over which to calculate the metric.
        Default is 10 seconds as in PHOEBE paper.
    threshold : number
        Values below this are marked as bad and annotated in the raw file.
    %(l_freq)s
    %(h_freq)s
    %(l_trans_bandwidth)s
    %(h_trans_bandwidth)s
    %(verbose)s

    Returns
    -------
    raw : instance of Raw
        The Raw data. Optionally annotated with bad segments.
    scores : array (n_nirs, n_windows)
        Array of peak power values.
    times : list
        List of the start and end times of each window used to compute the
        peak spectral power.

    References
    ----------
    .. [1] Pollonini L et al., “PHOEBE: a method for real time mapping of
           optodes-scalp coupling in functional near-infrared spectroscopy” in
           Biomed. Opt. Express 7, 5104-5119 (2016).
    .. [2] Hernandez, Samuel Montero, and Luca Pollonini. "NIRSplot: a tool for
           quality assessment of fNIRS scans." Optics and the Brain.
           Optical Society of America, 2020.
    """

    raw = raw.copy().load_data()
    _validate_type(raw, BaseRaw, 'raw')

    if not len(pick_types(raw.info, fnirs='fnirs_od')):
        raise RuntimeError('Scalp coupling index '
                           'should be run on optical density data.')

    freqs = np.unique(_channel_frequencies(raw.info))
    picks = _check_channels_ordered(raw.info, freqs)

    filtered_data = filter_data(raw._data,
                                raw.info['sfreq'],
                                l_freq,
                                h_freq,
                                picks=picks,
                                verbose=verbose,
                                l_trans_bandwidth=l_trans_bandwidth,
                                h_trans_bandwidth=h_trans_bandwidth)

    window_samples = int(np.ceil(time_window * raw.info['sfreq']))
    n_windows = int(np.floor(len(raw) / window_samples))

    scores = np.zeros((len(picks), n_windows))
    times = []

    for window in range(n_windows):

        start_sample = int(window * window_samples)
        end_sample = start_sample + window_samples
        end_sample = np.min([end_sample, len(raw) - 1])

        t_start = raw.times[start_sample]
        t_stop = raw.times[end_sample]
        times.append((t_start, t_stop))

        for ii in picks[::2]:

            c1 = filtered_data[ii][start_sample:end_sample]
            c2 = filtered_data[ii + 1][start_sample:end_sample]
            c = np.corrcoef(c1, c2)[0][1]
            scores[ii, window] = c
            scores[ii + 1, window] = c

            if (threshold is not None) & (c < threshold):
                raw.annotations.append(t_start,
                                       time_window,
                                       'BAD_SCI',
                                       ch_names=[raw.ch_names[ii:ii + 2]])

    return raw, scores, times
Ejemplo n.º 19
0
def template_to_head(info,
                     space,
                     coord_frame='auto',
                     unit='auto',
                     verbose=None):
    """Transform a BIDS standard template montage to the head coordinate frame.

    Parameters
    ----------
    %(info_not_none)s The info is modified in place.
    space : str
        The name of the BIDS standard template. See
        https://bids-specification.readthedocs.io/en/stable/99-appendices/08-coordinate-systems.html#standard-template-identifiers
        for a list of acceptable spaces.
    coord_frame : 'mri' | 'mri_voxel' | 'ras'
        BIDS template coordinate systems do not specify a coordinate frame,
        so this must be determined by inspecting the documentation for the
        dataset or the ``electrodes.tsv`` file.  If ``'auto'``, the coordinate
        frame is assumed to be ``'mri_voxel'`` if the coordinates are strictly
        positive, and ``'ras'`` (``"scanner RAS"``) otherwise.

        .. warning::

            ``scanner RAS`` and ``surface RAS`` coordinates frames are similar
            so be very careful not to assume a BIDS dataset's coordinates are
            in one when they are actually in the other. The only way to tell
            for template coordinate systems, currently, is if it is specified
            in the dataset documentation.

    unit : 'm' | 'mm' | 'auto'
        The unit that was used in the coordinate system specification.
        If ``'auto'``, ``'m'`` will be inferred if the montage
        spans less than ``-1`` to ``1``, and ``'mm'`` otherwise. If the
        ``coord_frame`` is ``'mri_voxel'``, ``unit`` will be ignored.
    %(verbose)s

    Returns
    -------
    %(info_not_none)s The modified ``Info`` object.
    trans : mne.transforms.Transform
        The data transformation matrix from ``'head'`` to ``'mri'``
        coordinates.

    """
    _validate_type(info, mne.io.Info)
    _check_option('space', space, BIDS_STANDARD_TEMPLATE_COORDINATE_SYSTEMS)
    _check_option('coord_frame', coord_frame,
                  ('auto', 'mri', 'mri_voxel', 'ras'))
    _check_option('unit', unit, ('auto', 'm', 'mm'))
    # XXX: change to after 0.11 release
    # montage = info.get_montage()
    montage = _get_montage(info)
    if montage is None:
        raise RuntimeError('No montage found in the `raw` object')
    montage.remove_fiducials()  # we will add fiducials so remove any
    pos = montage.get_positions()
    if pos['coord_frame'] not in ('mni_tal', 'unknown'):
        raise RuntimeError(
            "Montage coordinate frame '{}' not expected for a template "
            "montage, should be 'unknown' or 'mni_tal'".format(
                pos['coord_frame']))
    locs = np.array(list(pos['ch_pos'].values()))
    locs = locs[~np.any(np.isnan(locs), axis=1)]  # only channels with loc
    if locs.size == 0:
        raise RuntimeError('No channel locations found in the montage')
    if unit == 'auto':
        unit = 'm' if abs(locs - locs.mean(axis=0)).max() < 1 else 'mm'
    if coord_frame == 'auto':
        coord_frame = 'mri_voxel' if locs.min() >= 0 else 'ras'
    # transform montage to head
    data_dir = files('mne_bids.data')
    # set to the right coordinate frame as specified by the user
    for d in montage.dig:  # ensure same coordinate frame
        d['coord_frame'] = MNE_STR_TO_FRAME[coord_frame]
    # do the transforms, first ras -> vox if needed
    if montage.get_positions()['coord_frame'] == 'ras':
        ras_vox_trans = mne.read_trans(data_dir /
                                       f'space-{space}_ras-vox_trans.fif')
        if unit == 'm':  # must be in mm here
            for d in montage.dig:
                d['r'] *= 1000
        montage.apply_trans(ras_vox_trans)
    if montage.get_positions()['coord_frame'] == 'mri_voxel':
        vox_mri_trans = mne.read_trans(data_dir /
                                       f'space-{space}_vox-mri_trans.fif')
        montage.apply_trans(vox_mri_trans)
    assert montage.get_positions()['coord_frame'] == 'mri'
    if not (unit == 'm' and coord_frame == 'mri'):  # if so, already in m
        for d in montage.dig:
            d['r'] /= 1000  # mm -> m
    # now add fiducials (in mri coordinates)
    fids = mne.io.read_fiducials(data_dir / f'space-{space}_fiducials.fif')[0]
    montage.dig = fids + montage.dig  # add fiducials
    for fid in fids:  # ensure also in mri
        fid['coord_frame'] = MNE_STR_TO_FRAME['mri']
    info.set_montage(montage)  # transform to head
    # finally return montage
    return info, mne.read_trans(data_dir / f'space-{space}_trans.fif')
Ejemplo n.º 20
0
def fold_landmark_specificity(raw,
                              landmark,
                              fold_files=None,
                              atlas="Juelich",
                              interpolate=False):
    """Return the specificity of each channel to a specified brain landmark.

    Parameters
    ----------
    raw : BaseRaw
        The fNIRS data.
    landmark : str
        Landmark of interest. Must be present in fOLD toolbox data file.
    fold_files : list | path-like | None
        If None, will use the MNE_NIRS_FOLD_PATH config variable.
        If path-like, should be a path to a directory containing '10-10.xls'
        and '10-5.xls'. If list, should be paths to the fold toolbox files.
        See the Notes section of :func:`~mne_nirs.io.fold_channel_specificity`
        for details.
    atlas : str
        Brain atlas to use.
    interpolate : bool
        If the optimal source-detector pair is not found in the fOLD files
        False (default) will yield no results for that pairing, whereas
        True will use the next closest match. See Notes of
        :func:`mne_nirs.io.fold_channel_specificity` for an example.

        .. warning::
           The sensitivity profile can differ substantially for nearest
           neighbors, so use ``interpolate=True`` with caution.

    Returns
    -------
    spec : array
        Specificity values for each channel to brain landmark.

    See Also
    --------
    fold_landmark_specificity

    Notes
    -----
    Specificity values are provided by the fOLD toolbox
    :footcite:`morais2018fnirs` excel files. See the Notes section of
    :func:`~mne_nirs.io.fold_channel_specificity` for more details.

    References
    ----------
    .. footbibliography::
    """
    _validate_type(landmark, str, 'landmark')
    _validate_type(raw, BaseRaw, 'raw')

    reference_locations = _generate_montage_locations()

    fold_tbl = _check_load_fold(fold_files, atlas)

    specificity = np.zeros(len(raw.ch_names))
    for cidx in range(len(raw.ch_names)):

        tbl = _source_detector_fold_table(raw, cidx, reference_locations,
                                          fold_tbl, interpolate)

        if len(tbl) > 0:
            tbl["ContainsLmk"] = [landmark in la for la in tbl["Landmark"]]
            tbl = tbl.query("ContainsLmk == True")["Specificity"]

        if len(tbl) == 0:
            continue
            # print(f"No data for {src_name}-{det_name}")
        elif len(tbl) == 1:
            specificity[cidx] = tbl.values[0]
        else:
            raise RuntimeError("Multiple specificity values returned")

    return np.array(specificity)
Ejemplo n.º 21
0
def short_channel_regression(raw, max_dist=0.01):
    """
    Short channel regression based on nearest channel.

    Fabbri, Francesco, et al. "Optical measurements of absorption changes in
    two-layered diffusive media."
    Physics in Medicine & Biology 49.7 (2004): 1183.

    Saager, Rolf B., and Andrew J. Berger. "Direct characterization and
    removal of interfering absorption trends in two-layer turbid media."
    JOSA A 22.9 (2005): 1874-1882.

    Scholkmann, Felix, Andreas Jaakko Metz, and Martin Wolf.
    "Measuring tissue hemodynamics and oxygenation by continuous-wave
    functional near-infrared spectroscopy—how robust are the different
    calculation methods against movement artifacts?."
    Physiological measurement 35.4 (2014): 717.

    Parameters
    ----------
    raw : instance of Raw
        Haemoglobin data.
    max_dist : number
        Channels less than this distance are considered short (m).

    Returns
    -------
    raw : instance of Raw
        The modified raw instance.
    """
    raw = raw.copy().load_data()
    _validate_type(raw, BaseRaw, 'raw')

    picks_od = pick_types(raw.info, fnirs='fnirs_od')

    if len(picks_od) == 0:
        raise RuntimeError('Data must be optical density.')

    distances = source_detector_distances(raw.info)

    picks_short = picks_od[distances[picks_od] < max_dist]
    picks_long = picks_od[distances[picks_od] > max_dist]

    if len(picks_short) == 0:
        raise RuntimeError('No short channels present.')
    if len(picks_long) == 0:
        raise RuntimeError('No long channels present.')

    for pick in picks_long:

        short_idx = _find_nearest_short(raw, pick, picks_short)

        A_l = raw.get_data(pick).ravel()
        A_s = raw.get_data(short_idx).ravel()

        # Eqn 27 Scholkmann et al 2014
        alfa = np.dot(A_s, A_l) / np.dot(A_s, A_s)

        # Eqn 26 Scholkmann et al 2014
        raw._data[pick] = A_l - alfa * A_s

    return raw
Ejemplo n.º 22
0
def update_anat_landmarks(
    bids_path, landmarks, *, fs_subject=None, fs_subjects_dir=None,
    kind=None, on_missing='raise', verbose=None
):
    """Update the anatomical landmark coordinates of an MRI scan.

    This will change the ``AnatomicalLandmarkCoordinates`` entry in the
    respective JSON sidecar file, or create it if it doesn't exist.

    Parameters
    ----------
    bids_path : BIDSPath
        Path of the MR image.
    landmarks : mne.channels.DigMontage | path-like
        An :class:`mne.channels.DigMontage` instance with coordinates for the
        nasion and left and right pre-auricular points in MRI voxel
        coordinates. Alternatively, the path to a ``*-fiducials.fif`` file as
        produced by the MNE-Python coregistration GUI or via
        :func:`mne.io.write_fiducials`.

        .. note:: :func:`mne_bids.get_anat_landmarks` provides a convenient and
                  reliable way to generate the landmark coordinates in the
                  required coordinate system.

        .. note:: If ``path-like``, ``fs_subject`` and ``fs_subjects_dir``
                  must be provided as well.

        .. versionchanged:: 0.10
           Added support for ``path-like`` input.
    fs_subject : str | None
        The subject identifier used for FreeSurfer. Must be provided if
        ``landmarks`` is ``path-like``; otherwise, it will be ignored.
    fs_subjects_dir : path-like | None
        The FreeSurfer subjects directory. If ``None``, defaults to the
        ``SUBJECTS_DIR`` environment variable. Must be provided if
        ``landmarks`` is ``path-like``; otherwise, it will be ignored.
    kind : str | None
        The suffix of the anatomical landmark names in the JSON sidecar.
        A suffix might be present e.g. to distinguish landmarks between
        sessions. If provided, should not include a leading underscore ``_``.
        For example, if the landmark names in the JSON sidecar file are
        ``LPA_ses-1``, ``RPA_ses-1``, ``NAS_ses-1``, you should pass
        ``'ses-1'`` here.
        If ``None``, no suffix is appended, the landmarks named
        ``Nasion`` (or ``NAS``), ``LPA``, and ``RPA`` will be used.

        .. versionadded:: 0.10
    on_missing : 'ignore' | 'warn' | 'raise'
        How to behave if the specified landmarks cannot be found in the MRI
        JSON sidecar file.

        .. versionadded:: 0.10
    %(verbose)s

    Notes
    -----
    .. versionadded:: 0.8
    """
    _validate_type(item=bids_path, types=BIDSPath, item_name='bids_path')
    _validate_type(
        item=landmarks, types=(DigMontage, 'path-like'), item_name='landmarks'
    )
    _check_on_missing(on_missing)

    # Do some path verifications and fill in some gaps the users might have
    # left (datatype and extension)
    # XXX We could be more stringent (and less user-friendly) and insist on a
    # XXX full specification of all parts of the BIDSPath, thoughts?
    bids_path_mri = bids_path.copy()
    if bids_path_mri.datatype is None:
        bids_path_mri.datatype = 'anat'

    if bids_path_mri.datatype != 'anat':
        raise ValueError(
            f'Can only operate on "anat" MRI data, but the provided bids_path '
            f'points to: {bids_path_mri.datatype}')

    if bids_path_mri.suffix is None:
        raise ValueError('Please specify the "suffix" entity of the provided '
                         'bids_path.')
    elif bids_path_mri.suffix not in ('T1w', 'FLASH'):
        raise ValueError(
            f'Can only operate on "T1w" and "FLASH" images, but the bids_path '
            f'suffix indicates: {bids_path_mri.suffix}')

    valid_extensions = ('.nii', '.nii.gz')
    tried_paths = []
    file_exists = False
    if bids_path_mri.extension is None:
        # No extension was provided, start searching …
        for extension in valid_extensions:
            bids_path_mri.extension = extension
            tried_paths.append(bids_path_mri.fpath)

            if bids_path_mri.fpath.exists():
                file_exists = True
                break
    else:
        # An extension was provided
        tried_paths.append(bids_path_mri.fpath)
        if bids_path_mri.fpath.exists():
            file_exists = True

    if not file_exists:
        raise ValueError(
            f'Could not find an MRI scan. Please check the provided '
            f'bids_path. Tried the following filenames: '
            f'{", ".join([p.name for p in tried_paths])}')

    if not isinstance(landmarks, DigMontage):  # it's pathlike
        if fs_subject is None:
            raise ValueError(
                'You must provide the "fs_subject" parameter when passing the '
                'path to fiducials'
            )
        landmarks = _get_landmarks_from_fiducials_file(
            bids_path=bids_path,
            fname=landmarks,
            fs_subject=fs_subject,
            fs_subjects_dir=fs_subjects_dir
        )

    positions = landmarks.get_positions()
    coord_frame = positions['coord_frame']
    if coord_frame != 'mri_voxel':
        raise ValueError(
            f'The landmarks must be specified in MRI voxel coordinates, but '
            f'provided DigMontage is in "{coord_frame}"')

    # Extract the cardinal points
    name_to_coords_map = {
        'LPA': positions['lpa'],
        'NAS': positions['nasion'],
        'RPA': positions['rpa']
    }

    # Check if coordinates for any cardinal point are missing, and convert to
    # a list so we can easily store the data in JSON format
    missing_points = []
    for name, coords in name_to_coords_map.items():
        if coords is None:
            missing_points.append(name)
        else:
            # Funnily, np.float64 is JSON-serializabe, while np.float32 is not!
            # Thus, cast to float64 to avoid issues (which e.g. may arise when
            # fiducials were read from disk!)
            name_to_coords_map[name] = list(coords.astype('float64'))

    if missing_points:
        raise ValueError(
            f'The provided DigMontage did not contain all required cardinal '
            f'points (nasion and left and right pre-auricular points). The '
            f'following points are missing: '
            f'{", ".join(missing_points)}')

    bids_path_json = bids_path.copy().update(extension='.json')
    if not bids_path_json.fpath.exists():  # Must exist before we can update it
        _write_json(bids_path_json.fpath, dict())

    mri_json = json.loads(bids_path_json.fpath.read_text(encoding='utf-8'))
    if 'AnatomicalLandmarkCoordinates' not in mri_json:
        _on_missing(
            on_missing=on_missing,
            msg=f'No AnatomicalLandmarkCoordinates section found in '
                f'{bids_path_json.fpath.name}',
            error_klass=KeyError
        )
        mri_json['AnatomicalLandmarkCoordinates'] = dict()

    for name, coords in name_to_coords_map.items():
        if kind is not None:
            name = f'{name}_{kind}'

        if name not in mri_json['AnatomicalLandmarkCoordinates']:
            _on_missing(
                on_missing=on_missing,
                msg=f'Anatomical landmark not found in '
                    f'{bids_path_json.fpath.name}: {name}',
                error_klass=KeyError
            )

        mri_json['AnatomicalLandmarkCoordinates'][name] = coords

    update_sidecar_json(bids_path=bids_path_json, entries=mri_json)
Ejemplo n.º 23
0
def quantify_mayer_fooof(raw,
                         num_oscillations=1,
                         centre_frequency=0.01,
                         extra_df_fields={},
                         fmin=0.001,
                         fmax=1,
                         tmin=0,
                         tmax=None,
                         n_fft=400,
                         n_overlap=200,
                         peak_width_limits=(0.5, 12.0)):
    """
    Quantify Mayer wave properties using FOOOF analysis.

    The Fitting Oscillations & One Over F (FOOOF)
    :footcite:`donoghue2020parameterizing`
    is utilised to estimate Mayer wave oscillation parameters as described in
    :footcite:`luke2021characterization`.

    The FOOOF algorithm is applied to the mean PSD estimate of the data,
    the oscillation closest to the `centre_frequency` is assumed to be the
    Mayer wave oscillation. The parameters for this oscillation are returned
    as a dataframe. You can return multiple closest oscillations to the
    centre_frequency by increasing the `num_oscillations` parameter.

    Parameters
    ----------
    raw : instance of Raw
        The haemoglobin data.
    num_oscillations : number
        Number of parameterised oscilations to be returned. These are selected
        in increasing distance from the `centre_frequency`.
    centre_frequency : number
        Centre frequency of the Mayer wave.
    extra_df_fields : number
        Dictionary of values to be appended to the dataframe.
    fmin : float
        Min frequency of interest.
    fmax : float
        Max frequency of interest.
    tmin : float | None
        Min time of interest.
    tmax : float | None
        Max time of interest.
    n_fft : int
        The length of FFT used, must be ``>= n_per_seg`` (default: 256).
        The segments will be zero-padded if ``n_fft > n_per_seg``.
        If n_per_seg is None, n_fft must be <= number of time points
        in the data.
    n_overlap : int
        The number of points of overlap between segments. Will be adjusted
        to be <= n_per_seg. The default value is 0.
    peak_width_limits : tuple of (float, float), optional, default: (0.5, 12.0)
        Limits on possible peak width, in Hz, as (lower_bound, upper_bound).
        As used by FOOOF.

    Returns
    -------
    df : DataFrame
        Dataframe with columns.

    References
    ----------
    .. footbibliography::
    """
    _require_version('fooof', 'run the FOOOF algorithm.')
    _validate_type(raw, BaseRaw, 'raw')

    hbo_picks = pick_types(raw.info, fnirs='hbo')
    hbr_picks = pick_types(raw.info, fnirs='hbr')

    if (not len(hbo_picks)) & (not len(hbr_picks)):
        # It may be perfectly valid to compute this on optical density
        # or raw data, I just haven't tried this. Let me know if this works
        # for you and we can ease this restriction.
        raise RuntimeError('Mayer wave estimation should be run on '
                           'haemoglobin concentration data.')

    df = pd.DataFrame()

    for picks, chroma in zip([hbo_picks, hbr_picks], ["hbo", "hbr"]):
        if len(picks):

            fm_hbo = _run_fooof(raw.copy().pick(picks),
                                fmin=fmin,
                                fmax=fmax,
                                tmin=tmin,
                                tmax=tmax,
                                n_overlap=n_overlap,
                                n_fft=n_fft,
                                peak_width_limits=peak_width_limits)

            cf, pw, bw = _process_fooof_output(fm_hbo, centre_frequency)

            data = dict()
            data["Centre Frequency"] = cf
            data["Bandwidth"] = bw
            data["Power"] = pw
            data["Chromaphore"] = chroma
            data = {**data, **extra_df_fields}

            df = pd.concat([df, pd.DataFrame(data, index=[0])],
                           ignore_index=True)

    return df
Ejemplo n.º 24
0
def fold_channel_specificity(raw,
                             fold_files=None,
                             atlas="Juelich",
                             interpolate=False):
    """Return the landmarks and specificity a channel is sensitive to.

    Parameters
    ----------
    raw : BaseRaw
        The fNIRS data.
    fold_files : list | path-like | None
        If None, will use the MNE_NIRS_FOLD_PATH config variable.
        If path-like, should be a path to a directory containing '10-10.xls'
        and '10-5.xls'. If list, should be paths to the fold toolbox files.
        See Notes for details.
    atlas : str
        Brain atlas to use.
    interpolate : bool
        If the optimal source-detector pair is not found in the fOLD files
        False (default) will yield no results for that pairing, whereas
        True will use the next closest match. See Notes for an example.

        .. warning:: The sensitivity profile can differ substantially for
                     nearest neighbors, so use ``interpolate=True`` with
                     caution.

    Returns
    -------
    spec : list of DataFrame
        List of dataframes, one for each channel.

    See Also
    --------
    fold_landmark_specificity

    Notes
    -----
    **fOLD Toolbox**

    Specificity values are provided by the fOLD toolbox
    :footcite:`morais2018fnirs` excel files.
    For licensing reasons, these files are not distributed with MNE-NIRS.
    You need to download them from
    `the author's website <https://github.com/nirx/fOLD-public>`__.
    To automatically utilize the ``MNE_NIRS_FOLD_PATH`` config for the
    ``fold_files`` parameter, you can download the entire ``fOLD-public``
    repository `as a zip <https://github.com/nirx/fOLD-public/archive/refs/heads/master.zip>`__
    and expand it to some suitable location like
    ``~/mne_data/fOLD/fOLD-public-master``, and then set the config value
    on your machine by using :func:`mne:mne.set_config` like::

        >>> mne.set_config('MNE_NIRS_FOLD_PATH', '~/mne_data/fOLD/fOLD-public-master/Supplementary')

    From then on, :func:`~mne_nirs.io.fold_channel_specificity` and
    :func:`~mne_nirs.io.fold_landmark_specificity` will automatically use this
    directory to find the fOLD xls files when you pass ``fold_files=None``
    (which is the default). We recommend following this procedure so that
    the files can be reused automatically.

    **Interpolation**

    For an example of interpolation, consider the pairings P5-PO7 nor P6-PO8,
    neither of which are listed in the fOLD toolbox:

    - With ``interpolate=False``, the returned ``spec`` will not have entries
      for these channels. When
    - With ``interpolate=True``, entries like P7-PO7 and P8-PO8, respectively,
      might be used instead. A warning is emitted if such substitutions are
      made.

    References
    ----------
    .. footbibliography::
    """  # noqa: E501
    _validate_type(raw, BaseRaw, 'raw')

    reference_locations = _generate_montage_locations()

    fold_tbl = _check_load_fold(fold_files, atlas)

    chan_spec = list()
    for cidx in range(len(raw.ch_names)):

        tbl = _source_detector_fold_table(raw, cidx, reference_locations,
                                          fold_tbl, interpolate)
        chan_spec.append(tbl.reset_index(drop=True))

    return chan_spec