Пример #1
0
def test_channel_specificity(monkeypatch, tmp_path, fold_files):
    raw = read_raw_nirx(fname_nirx_15_3_short, preload=True)
    raw.pick(range(2))
    kwargs = dict()
    n_want = 6
    if fold_files is list:
        kwargs = dict(fold_files=[foldfile])
    elif fold_files is str:
        kwargs = dict(fold_files=tmp_path)
        n_want *= 2
    else:
        assert fold_files is None
        monkeypatch.setenv('MNE_NIRS_FOLD_PATH', str(tmp_path))
        assert len(kwargs) == 0
        with pytest.raises(FileNotFoundError, match=r'fold_files\[0\] does.*'):
            fold_channel_specificity(raw)
        n_want *= 2
    copyfile(foldfile, tmp_path / '10-10.xls')
    copyfile(foldfile, tmp_path / '10-5.xls')
    res = fold_channel_specificity(raw, **kwargs)
    assert len(res) == 2
    assert res[0].shape == (n_want, 14)
    montage = make_standard_montage(
        'standard_1005', head_size=0.09700884729534559)
    fids = read_fiducials(
        Path(mne.__file__).parent / 'data' / 'fsaverage' /
        'fsaverage-fiducials.fif')[0]
    for f in fids:
        f['coord_frame'] = montage.dig[0]['coord_frame']
    montage.dig[:3] = fids
    S, D = raw.ch_names[0].split()[0].split('_')
    assert S == 'S1' and D == 'D2'
    montage.rename_channels({'PO8': S, 'P6': D})  # not in the tables!
    # taken from standard_1020.elc
    s_mri = np.array([55.6666, -97.6251, 2.7300]) / 1000.
    d_mri = np.array([67.8877, -75.9043, 28.0910]) / 1000.
    trans = mne.transforms._get_trans('fsaverage', 'mri', 'head')[0]
    ch_pos = montage.get_positions()['ch_pos']
    assert_allclose(ch_pos[S], s_mri, atol=1e-6)
    assert_allclose(ch_pos[D], d_mri, atol=1e-6)
    raw.set_montage(montage)
    montage = transform_to_head(montage)
    s_head = mne.transforms.apply_trans(trans, s_mri)
    d_head = mne.transforms.apply_trans(trans, d_mri)
    assert_allclose(montage._get_ch_pos()['S1'], s_head, atol=1e-6)
    assert_allclose(montage._get_ch_pos()['D2'], d_head, atol=1e-6)
    for ch in raw.info['chs']:
        assert_allclose(ch['loc'][3:6], s_head, atol=1e-6)
        assert_allclose(ch['loc'][6:9], d_head, atol=1e-6)
    res_1 = fold_channel_specificity(raw, **kwargs)[0]
    assert res_1.shape == (0, 14)
    # TODO: This is wrong, should be P08 not P08h, and distance should be 0 mm!
    with pytest.warns(RuntimeWarning, match='.*PO8h?/P6.*TP8/T8.*'):
        res_1 = fold_channel_specificity(raw, interpolate=True, **kwargs)[0]
    montage.rename_channels({S: D, D: S})  # reversed
    with pytest.warns(RuntimeWarning, match='.*PO8h?/P6.*TP8/T8.*'):
        res_2 = fold_channel_specificity(raw, interpolate=True, **kwargs)[0]
    # We should check the whole thing, but this is probably good enough
    assert (res_1['Specificity'] == res_2['Specificity']).all()
Пример #2
0
def test_read_dig_captrak(tmpdir):
    """Test reading a captrak montage file."""
    EXPECTED_CH_NAMES_OLD = [
        'AF3', 'AF4', 'AF7', 'AF8', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'CP1',
        'CP2', 'CP3', 'CP4', 'CP5', 'CP6', 'CPz', 'Cz', 'F1', 'F2', 'F3', 'F4',
        'F5', 'F6', 'F7', 'F8', 'FC1', 'FC2', 'FC3', 'FC4', 'FC5', 'FC6',
        'FT10', 'FT7', 'FT8', 'FT9', 'Fp1', 'Fp2', 'Fz', 'GND', 'O1', 'O2',
        'Oz', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'PO10', 'PO3',
        'PO4', 'PO7', 'PO8', 'PO9', 'POz', 'Pz', 'REF', 'T7', 'T8', 'TP10',
        'TP7', 'TP8', 'TP9'
    ]
    EXPECTED_CH_NAMES = [
        'T7', 'FC5', 'F7', 'C5', 'FT7', 'FT9', 'TP7', 'TP9', 'P7', 'CP5',
        'PO7', 'C3', 'CP3', 'P5', 'P3', 'PO3', 'PO9', 'O1', 'Oz', 'POz', 'O2',
        'PO4', 'P1', 'Pz', 'P2', 'CP2', 'CP1', 'CPz', 'Cz', 'C1', 'FC1', 'FC3',
        'REF', 'F3', 'F1', 'Fz', 'F5', 'AF7', 'AF3', 'Fp1', 'GND', 'F2', 'AF4',
        'Fp2', 'F4', 'F8', 'F6', 'AF8', 'FC2', 'FC6', 'FC4', 'C2', 'C4', 'P4',
        'CP4', 'PO8', 'P8', 'P6', 'CP6', 'PO10', 'TP10', 'TP8', 'FT10', 'T8',
        'C6', 'FT8'
    ]
    assert set(EXPECTED_CH_NAMES) == set(EXPECTED_CH_NAMES_OLD)
    montage = read_dig_captrak(
        fname=op.join(data_path, 'montage', 'captrak_coords.bvct'))

    # XXX: remove with 0.22 once captrCK is deprecated
    with pytest.warns(DeprecationWarning,
                      match='read_dig_captrack is deprecated'):
        montage2 = read_dig_captrack(
            fname=op.join(data_path, 'montage', 'captrak_coords.bvct'))
        assert repr(montage) == repr(montage2)

    assert montage.ch_names == EXPECTED_CH_NAMES
    assert repr(montage) == (
        '<DigMontage | '
        '0 extras (headshape), 0 HPIs, 3 fiducials, 66 channels>')

    montage = transform_to_head(montage)  # transform_to_head has to be tested
    _check_roundtrip(montage=montage, fname=str(tmpdir.join('bvct_test.fif')))

    fid, _ = _get_fid_coords(montage.dig)
    assert_allclose(
        actual=np.array([fid.nasion, fid.lpa, fid.rpa]),
        desired=[[0, 0.11309, 0], [-0.09189, 0, 0], [0.09240, 0, 0]],
        atol=1e-5,
    )

    raw_bv = read_raw_brainvision(bv_raw_fname)
    raw_bv.set_channel_types({"HEOG": 'eog', "VEOG": 'eog', "ECG": 'ecg'})

    raw_bv.set_montage(montage)

    test_raw_bv = read_raw_fif(bv_fif_fname)

    # compare after set_montage using chs loc.
    for actual, expected in zip(raw_bv.info['chs'], test_raw_bv.info['chs']):
        assert_allclose(actual['loc'][:3], expected['loc'][:3])
        if actual['kind'] == FIFF.FIFFV_EEG_CH:
            assert_allclose(actual['loc'][3:6], [-0.005103, 0.05395, 0.144622],
                            rtol=1e-04)
Пример #3
0
def test_fif_dig_montage():
    """Test FIF dig montage support."""
    dig_montage = read_dig_fif(fif_dig_montage_fname)

    # test round-trip IO
    temp_dir = _TempDir()
    fname_temp = op.join(temp_dir, 'test.fif')
    _check_roundtrip(dig_montage, fname_temp)

    # Make a BrainVision file like the one the user would have had
    raw_bv = read_raw_brainvision(bv_fname, preload=True)
    raw_bv_2 = raw_bv.copy()
    mapping = dict()
    for ii, ch_name in enumerate(raw_bv.ch_names):
        mapping[ch_name] = 'EEG%03d' % (ii + 1,)
    raw_bv.rename_channels(mapping)
    for ii, ch_name in enumerate(raw_bv_2.ch_names):
        mapping[ch_name] = 'EEG%03d' % (ii + 33,)
    raw_bv_2.rename_channels(mapping)
    raw_bv.add_channels([raw_bv_2])
    for ch in raw_bv.info['chs']:
        ch['kind'] = FIFF.FIFFV_EEG_CH

    # Set the montage
    raw_bv.set_montage(dig_montage)

    # Check the result
    evoked = read_evokeds(evoked_fname)[0]

    # check info[chs] matches
    assert_equal(len(raw_bv.ch_names), len(evoked.ch_names) - 1)
    for ch_py, ch_c in zip(raw_bv.info['chs'], evoked.info['chs'][:-1]):
        assert_equal(ch_py['ch_name'],
                     ch_c['ch_name'].replace('EEG ', 'EEG'))
        # C actually says it's unknown, but it's not (?):
        # assert_equal(ch_py['coord_frame'], ch_c['coord_frame'])
        assert_equal(ch_py['coord_frame'], FIFF.FIFFV_COORD_HEAD)
        c_loc = ch_c['loc'].copy()
        c_loc[c_loc == 0] = np.nan
        assert_allclose(ch_py['loc'], c_loc, atol=1e-7)

    # check info[dig]
    assert_dig_allclose(raw_bv.info, evoked.info)

    # Roundtrip of non-FIF start
    montage = make_dig_montage(hsp=read_polhemus_fastscan(hsp),
                               hpi=read_mrk(hpi))
    elp_points = read_polhemus_fastscan(elp)
    ch_pos = {"EEG%03d" % (k + 1): pos for k, pos in enumerate(elp_points[8:])}
    montage += make_dig_montage(nasion=elp_points[0],
                                lpa=elp_points[1],
                                rpa=elp_points[2],
                                ch_pos=ch_pos)

    pytest.raises(RuntimeError, montage.save, fname_temp)  # must be head coord

    montage = transform_to_head(montage)
    _check_roundtrip(montage, fname_temp)
Пример #4
0
def test_egi_dig_montage():
    """Test EGI MFF XML dig montage support."""
    dig_montage = read_dig_egi(egi_dig_montage_fname)
    fid, coord = _get_fid_coords(dig_montage.dig)

    assert coord == FIFF.FIFFV_COORD_UNKNOWN
    assert_allclose(
        actual=np.array([fid[key] for key in ['nasion', 'lpa', 'rpa']]),
        desired=[
            [0., 10.564, -2.051],  # noqa
            [-8.592, 0.498, -4.128],  # noqa
            [8.592, 0.498, -4.128]
        ],  # noqa
    )

    # Test accuracy and embedding within raw object
    raw_egi = read_raw_egi(egi_raw_fname, channel_naming='EEG %03d')

    raw_egi.set_montage(dig_montage)
    test_raw_egi = read_raw_fif(egi_fif_fname)

    assert_equal(len(raw_egi.ch_names), len(test_raw_egi.ch_names))
    for ch_raw, ch_test_raw in zip(raw_egi.info['chs'],
                                   test_raw_egi.info['chs']):
        assert_equal(ch_raw['ch_name'], ch_test_raw['ch_name'])
        assert_equal(ch_raw['coord_frame'], FIFF.FIFFV_COORD_HEAD)
        assert_allclose(ch_raw['loc'], ch_test_raw['loc'], atol=1e-7)

    assert_dig_allclose(raw_egi.info, test_raw_egi.info)

    dig_montage_in_head = transform_to_head(dig_montage.copy())
    fid, coord = _get_fid_coords(dig_montage_in_head.dig)
    assert coord == FIFF.FIFFV_COORD_HEAD
    assert_allclose(
        actual=np.array([fid[key] for key in ['nasion', 'lpa', 'rpa']]),
        desired=[[0., 10.278, 0.], [-8.592, 0., 0.], [8.592, 0., 0.]],
        atol=1e-4,
    )

    # test round-trip IO
    temp_dir = _TempDir()
    fname_temp = op.join(temp_dir, 'egi_test.fif')
    _check_roundtrip(dig_montage_in_head, fname_temp)  # XXX: write forces head
Пример #5
0
def test_egi_dig_montage():
    """Test EGI MFF XML dig montage support."""
    with pytest.deprecated_call():
        dig_montage = read_dig_montage(egi=egi_dig_montage_fname, unit='m')

    # # test round-trip IO
    temp_dir = _TempDir()
    fname_temp = op.join(temp_dir, 'egi_test.fif')
    _check_roundtrip(dig_montage, fname_temp)

    with pytest.deprecated_call():
        # Test coordinate transform
        # dig_montage.transform_to_head()  # XXX: this call had no effect!!
        # nasion
        assert_almost_equal(dig_montage.nasion[0], 0)
        assert_almost_equal(dig_montage.nasion[2], 0)
        # lpa and rpa
        assert_allclose(dig_montage.lpa[1:], 0, atol=1e-16)
        assert_allclose(dig_montage.rpa[1:], 0, atol=1e-16)

    # Test accuracy and embedding within raw object
    raw_egi = read_raw_egi(egi_raw_fname, channel_naming='EEG %03d')
    raw_egi.set_montage(dig_montage)
    test_raw_egi = read_raw_fif(egi_fif_fname)

    assert_equal(len(raw_egi.ch_names), len(test_raw_egi.ch_names))
    for ch_raw, ch_test_raw in zip(raw_egi.info['chs'],
                                   test_raw_egi.info['chs']):
        assert_equal(ch_raw['ch_name'], ch_test_raw['ch_name'])
        assert_equal(ch_raw['coord_frame'], FIFF.FIFFV_COORD_HEAD)
        assert_allclose(ch_raw['loc'], ch_test_raw['loc'], atol=1e-7)
    assert_dig_allclose(raw_egi.info, test_raw_egi.info)

    # Test old way matches new way
    with pytest.deprecated_call():
        dig_montage = read_dig_montage(egi=egi_dig_montage_fname, unit='m')
    dig_montage_egi = read_dig_egi(egi_dig_montage_fname)
    dig_montage_egi = transform_to_head(dig_montage_egi)
    assert dig_montage.dig == dig_montage_egi.dig
    assert object_diff(dig_montage.ch_names, dig_montage_egi.ch_names) == ''
Пример #6
0
def test_read_dig_captrack(tmpdir):
    """Test reading a captrack montage file."""
    EXPECTED_CH_NAMES = [
        'AF3', 'AF4', 'AF7', 'AF8', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'CP1',
        'CP2', 'CP3', 'CP4', 'CP5', 'CP6', 'CPz', 'Cz', 'F1', 'F2', 'F3', 'F4',
        'F5', 'F6', 'F7', 'F8', 'FC1', 'FC2', 'FC3', 'FC4', 'FC5', 'FC6',
        'FT10', 'FT7', 'FT8', 'FT9', 'Fp1', 'Fp2', 'Fz', 'O1', 'O2', 'Oz',
        'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'PO10', 'PO3', 'PO4',
        'PO7', 'PO8', 'PO9', 'POz', 'Pz', 'T7', 'T8', 'TP10', 'TP7', 'TP8',
        'TP9'
    ]
    montage = read_dig_captrack(
        fname=op.join(data_path, 'montage', 'captrak_coords.bvct')
    )

    assert montage.ch_names == EXPECTED_CH_NAMES
    assert montage.__repr__() == (
        '<DigMontage | '
        '0 extras (headshape), 0 HPIs, 3 fiducials, 64 channels>'
    )

    montage = transform_to_head(montage)  # transform_to_head has to be tested
    _check_roundtrip(montage=montage, fname=str(tmpdir.join('bvct_test.fif')))

    with pytest.deprecated_call():
        assert_allclose(
            actual=np.array([montage.nasion, montage.lpa, montage.rpa]),
            desired=[[0, 0.11309, 0], [-0.09189, 0, 0], [0.09240, 0, 0]],
            atol=1e-5,
        )

    # I think that comparing dig should be enough. cc: @sappelhoff
    raw_bv = read_raw_brainvision(bv_raw_fname)
    with pytest.warns(RuntimeWarning, match='Did not set 3 channel pos'):
        raw_bv.set_montage(montage)
    test_raw_bv = read_raw_fif(bv_fif_fname)

    assert_dig_allclose(raw_bv.info, test_raw_bv.info)
Пример #7
0
def test_transform_to_head_and_compute_dev_head_t():
    """Test transform_to_head and compute_dev_head_t."""
    EXPECTED_DEV_HEAD_T = \
        [[-3.72201691e-02, -9.98212167e-01, -4.67667497e-02, -7.31583414e-04],
         [8.98064989e-01, -5.39382685e-02, 4.36543170e-01, 1.60134431e-02],
         [-4.38285221e-01, -2.57513699e-02, 8.98466990e-01, 6.13035748e-02],
         [0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]

    EXPECTED_FID_IN_POLHEMUS = {
        'nasion': np.array([0.001393, 0.0131613, -0.0046967]),
        'lpa': np.array([-0.0624997, -0.0737271, 0.07996]),
        'rpa': np.array([-0.0748957, 0.0873785, 0.0811943]),
    }

    EXPECTED_FID_IN_HEAD = {
        'nasion': np.array([-8.94466792e-18, 1.10559624e-01, -3.85185989e-34]),
        'lpa': np.array([-8.10816716e-02, 6.56321671e-18, 0]),
        'rpa': np.array([8.05048781e-02, -6.47441364e-18, 0]),
    }

    hpi_dev = np.array(
        [[ 2.13951493e-02,  8.47444056e-02, -5.65431188e-02],  # noqa
         [ 2.10299433e-02, -8.03141101e-02, -6.34420259e-02],  # noqa
         [ 1.05916829e-01,  8.18485672e-05,  1.19928083e-02],  # noqa
         [ 9.26595105e-02,  4.64804385e-02,  8.45141253e-03],  # noqa
         [ 9.42554419e-02, -4.35206589e-02,  8.78999363e-03]]  # noqa
    )

    hpi_polhemus = np.array(
        [[-0.0595004, -0.0704836,  0.075893 ],  # noqa
         [-0.0646373,  0.0838228,  0.0762123],  # noqa
         [-0.0135035,  0.0072522, -0.0268405],  # noqa
         [-0.0202967, -0.0351498, -0.0129305],  # noqa
         [-0.0277519,  0.0452628, -0.0222407]]  # noqa
    )

    montage_polhemus = make_dig_montage(
        **EXPECTED_FID_IN_POLHEMUS, hpi=hpi_polhemus, coord_frame='unknown'
    )

    montage_meg = make_dig_montage(hpi=hpi_dev, coord_frame='meg')

    # Test regular worflow to get dev_head_t
    montage = montage_polhemus + montage_meg
    fids, _ = _get_fid_coords(montage.dig)
    for kk in fids:
        assert_allclose(fids[kk], EXPECTED_FID_IN_POLHEMUS[kk], atol=1e-5)

    with pytest.raises(ValueError, match='set to head coordinate system'):
        _ = compute_dev_head_t(montage)

    montage = transform_to_head(montage)

    fids, _ = _get_fid_coords(montage.dig)
    for kk in fids:
        assert_allclose(fids[kk], EXPECTED_FID_IN_HEAD[kk], atol=1e-5)

    dev_head_t = compute_dev_head_t(montage)
    assert_allclose(dev_head_t['trans'], EXPECTED_DEV_HEAD_T, atol=1e-7)

    # Test errors when number of HPI points do not match
    EXPECTED_ERR_MSG = 'Device-to-Head .*Got 0 .*device and 5 points in head'
    with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):
        _ = compute_dev_head_t(transform_to_head(montage_polhemus))

    EXPECTED_ERR_MSG = 'Device-to-Head .*Got 5 .*device and 0 points in head'
    with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):
        _ = compute_dev_head_t(transform_to_head(
            montage_meg + make_dig_montage(**EXPECTED_FID_IN_POLHEMUS)
        ))

    EXPECTED_ERR_MSG = 'Device-to-Head .*Got 3 .*device and 5 points in head'
    with pytest.raises(ValueError, match=EXPECTED_ERR_MSG):
        _ = compute_dev_head_t(transform_to_head(
            DigMontage(dig=_format_dig_points(montage_meg.dig[:3])) +
            montage_polhemus
        ))
Пример #8
0
def plot_3d_montage(info,
                    view_map,
                    *,
                    src_det_names='auto',
                    ch_names='numbered',
                    subject='fsaverage',
                    trans='fsaverage',
                    surface='pial',
                    subjects_dir=None,
                    verbose=None):
    """
    Plot a 3D sensor montage.

    Parameters
    ----------
    info : instance of Info
        Measurement info.
    view_map : dict
        Dict of view (key) to channel-pair-numbers (value) to use when
        plotting. Note that, because these get plotted as 1-based channel
        *numbers*, the values should be 1-based rather than 0-based.
        The keys are of the form:

        ``'{side}-{view}'``
            For views like ``'left-lat'`` or ``'right-frontal'`` where the side
            matters.
        ``'{view}'``
            For views like ``'caudal'`` that are along the midline.

        See :meth:`mne.viz.Brain.show_view` for ``view`` options, and the
        Examples section below for usage examples.
    src_det_names : None | dict | str
        Source and detector names to use. "auto" (default) will see if the
        channel locations correspond to standard 10-20 locations and will
        use those if they do (otherwise will act like None). None will use
        S1, S2, ..., D1, D2, ..., etc. Can also be an explicit dict mapping,
        for example::

            src_det_names=dict(S1='Fz', D1='FCz', ...)
    ch_names : str | dict | None
        If ``'numbered'`` (default), use ``['1', '2', ...]`` for the channel
        names, or ``None`` to use ``['S1_D2', 'S2_D1', ...]``. Can also be a
        dict to provide a mapping from the ``'S1_D2'``-style names (keys) to
        other names, e.g., ``defaultdict(lambda: '')`` will prevent showing
        the names altogether.

        .. versionadded:: 0.3
    subject : str
        The subject.
    trans : str | Transform
        The subjects head<->MRI transform.
    surface : str
        The FreeSurfer surface name (e.g., 'pial', 'white').
    subjects_dir : str
        The subjects directory.
    %(verbose)s

    Returns
    -------
    figure : matplotlib.figure.Figure
        The matplotlib figimage.

    Examples
    --------
    For a Hitachi system with two sets of 12 source-detector arrangements,
    one on each side of the head, showing 1-12 on the left and 13-24 on the
    right can be accomplished using the following ``view_map``::

        >>> view_map = {
        ...     'left-lat': np.arange(1, 13),
        ...     'right-lat': np.arange(13, 25),
        ... }

    NIRx typically involves more complicated arrangements. See
    :ref:`the 3D tutorial <tut-fnirs-vis-brain-plot-3d-montage>` for
    an advanced example that incorporates the ``'caudal'`` view as well.
    """  # noqa: E501
    import matplotlib.pyplot as plt
    from scipy.spatial.distance import cdist
    _validate_type(info, Info, 'info')
    _validate_type(view_map, dict, 'views')
    _validate_type(src_det_names, (None, dict, str), 'src_det_names')
    _validate_type(ch_names, (dict, str, None), 'ch_names')
    info = pick_info(info, pick_types(info, fnirs=True, exclude=())[::2])
    if isinstance(ch_names, str):
        _check_option('ch_names', ch_names, ('numbered', ), extra='when str')
        ch_names = {
            name.split()[0]: str(ni)
            for ni, name in enumerate(info['ch_names'], 1)
        }
    info['bads'] = []
    if isinstance(src_det_names, str):
        _check_option('src_det_names',
                      src_det_names, ('auto', ),
                      extra='when str')
        # Decide if we can map to 10-20 locations
        names, pos = zip(
            *transform_to_head(make_standard_montage(
                'standard_1020')).get_positions()['ch_pos'].items())
        pos = np.array(pos, float)
        locs = dict()
        bad = False
        for ch in info['chs']:
            name = ch['ch_name']
            s_name, d_name = name.split()[0].split('_')
            for name, loc in [(s_name, ch['loc'][3:6]),
                              (d_name, ch['loc'][6:9])]:
                if name in locs:
                    continue
                # see if it's close enough
                idx = np.where(cdist(loc[np.newaxis], pos)[0] < 1e-3)[0]
                if len(idx) < 1:
                    bad = True
                    break
                # Some are duplicated (e.g., T7+T3) but we can rely on the
                # first one being the canonical one
                locs[name] = names[idx[0]]
            if bad:
                break
        if bad:
            src_det_names = None
            logger.info('Could not automatically map source/detector names to '
                        '10-20 locations.')
        else:
            src_det_names = locs
            logger.info('Source-detector names automatically mapped to 10-20 '
                        'locations')

    head_mri_t = _get_trans(trans, 'head', 'mri')[0]
    del trans
    views = list()
    for key, num in view_map.items():
        _validate_type(key, str, f'view_map key {repr(key)}')
        _validate_type(num, np.ndarray, f'view_map[{repr(key)}]')
        if '-' in key:
            hemi, v = key.split('-', maxsplit=1)
            hemi = dict(left='lh', right='rh')[hemi]
            views.append((hemi, v, num))
        else:
            views.append(('lh', key, num))
    del view_map
    size = (400 * len(views), 400)
    brain = Brain(subject,
                  'both',
                  surface,
                  views=['lat'] * len(views),
                  size=size,
                  background='w',
                  units='m',
                  view_layout='horizontal',
                  subjects_dir=subjects_dir)
    with _safe_brain_close(brain):
        brain.add_head(dense=False, alpha=0.1)
        brain.add_sensors(info,
                          trans=head_mri_t,
                          fnirs=['channels', 'pairs', 'sources', 'detectors'])
        add_text_kwargs = dict()
        if 'render' in _get_args(brain.plotter.add_text):
            add_text_kwargs['render'] = False
        for col, view in enumerate(views):
            plotted = set()
            brain.show_view(view[1],
                            hemi=view[0],
                            focalpoint=(0, -0.02, 0.02),
                            distance=0.4,
                            row=0,
                            col=col)
            brain.plotter.subplot(0, col)
            vp = brain.plotter.renderer
            for ci in view[2]:  # figure out what we need to plot
                this_ch = info['chs'][ci - 1]
                ch_name = this_ch['ch_name'].split()[0]
                s_name, d_name = ch_name.split('_')
                needed = [
                    (ch_names, 'ch_names', ch_name, this_ch['loc'][:3], 12,
                     'Centered'),
                    (src_det_names, 'src_det_names', s_name,
                     this_ch['loc'][3:6], 8, 'Bottom'),
                    (src_det_names, 'src_det_names', d_name,
                     this_ch['loc'][6:9], 8, 'Bottom'),
                ]
                for lookup, lname, name, ch_pos, font_size, va in needed:
                    if name in plotted:
                        continue
                    plotted.add(name)
                    orig_name = name
                    if lookup is not None:
                        name = lookup[name]
                    _validate_type(name, str, f'{lname}[{repr(orig_name)}]')
                    ch_pos = apply_trans(head_mri_t, ch_pos)
                    vp.SetWorldPoint(np.r_[ch_pos, 1.])
                    vp.WorldToDisplay()
                    ch_pos = (np.array(vp.GetDisplayPoint()[:2]) -
                              np.array(vp.GetOrigin()))
                    actor = brain.plotter.add_text(name,
                                                   ch_pos,
                                                   font_size=font_size,
                                                   color=(0., 0., 0.),
                                                   **add_text_kwargs)
                    prop = actor.GetTextProperty()
                    getattr(prop, f'SetVerticalJustificationTo{va}')()
                    prop.SetJustificationToCentered()
                    actor.SetTextProperty(prop)
                    prop.SetBold(True)
        img = brain.screenshot()
    return plt.figimage(img, resize=True).figure