示例#1
0
def test_fnirs_check_bads(fname):
    """Test checking of bad markings."""
    # No bad channels, so these should all pass
    raw = read_raw_nirx(fname)
    _fnirs_check_bads(raw.info)
    raw = optical_density(raw)
    _fnirs_check_bads(raw.info)
    raw = beer_lambert_law(raw)
    _fnirs_check_bads(raw.info)

    # Mark pairs of bad channels, so these should all pass
    raw = read_raw_nirx(fname)
    raw.info['bads'] = raw.ch_names[0:2]
    _fnirs_check_bads(raw.info)
    raw = optical_density(raw)
    _fnirs_check_bads(raw.info)
    raw = beer_lambert_law(raw)
    _fnirs_check_bads(raw.info)

    # Mark single channel as bad, so these should all fail
    raw = read_raw_nirx(fname)
    raw.info['bads'] = raw.ch_names[0:1]
    pytest.raises(RuntimeError, _fnirs_check_bads, raw.info)
    with pytest.raises(RuntimeError, match='bad labelling'):
        raw = optical_density(raw)
    pytest.raises(RuntimeError, _fnirs_check_bads, raw.info)
    with pytest.raises(RuntimeError, match='bad labelling'):
        raw = beer_lambert_law(raw)
    pytest.raises(RuntimeError, _fnirs_check_bads, raw.info)
示例#2
0
def test_beer_lambert_v_matlab():
    """Compare MNE results to MATLAB toolbox."""
    from pymatreader import read_mat
    raw = read_raw_nirx(fname_nirx_15_0)
    raw = optical_density(raw)
    raw = beer_lambert_law(raw, ppf=0.121)
    raw._data *= 1e6  # Scale to uM for comparison to MATLAB

    matlab_fname = op.join(testing_path, 'NIRx', 'nirscout', 'validation',
                           'nirx_15_0_recording_bl.mat')
    matlab_data = read_mat(matlab_fname)

    matlab_names = ["_"] * len(raw.ch_names)
    for idx in range(len(raw.ch_names)):
        matlab_names[idx] = ("S" + str(int(matlab_data['sources'][idx])) +
                             "_D" + str(int(matlab_data['detectors'][idx])) +
                             " " + matlab_data['type'][idx])
    matlab_to_mne = np.argsort(matlab_names)

    for idx in range(raw.get_data().shape[0]):

        matlab_idx = matlab_to_mne[idx]

        mean_error = np.mean(matlab_data['data'][:, matlab_idx] -
                             raw._data[idx])
        assert mean_error < 0.1
        matlab_name = ("S" + str(int(matlab_data['sources'][matlab_idx])) +
                       "_D" + str(int(matlab_data['detectors'][matlab_idx])) +
                       " " + matlab_data['type'][matlab_idx])
        assert raw.info['ch_names'][idx] == matlab_name
def test_temporal_derivative_distribution_repair(fname, tmp_path):
    """Test running artifact rejection."""
    raw = read_raw_nirx(fname)
    raw_od = optical_density(raw)
    raw_hb = beer_lambert_law(raw_od)

    # With optical densities
    # Add a baseline shift artifact about half way through data
    max_shift = np.max(np.diff(raw_od._data[0]))
    shift_amp = 5 * max_shift
    raw_od._data[0, 0:30] = raw_od._data[0, 0:30] - shift_amp
    # make one channel zero std
    raw_od._data[1] = 0.
    raw_od._data[2] = 1.
    assert np.max(np.diff(raw_od._data[0])) > shift_amp
    # Ensure that applying the algorithm reduces the step change
    raw_od = tddr(raw_od)
    assert np.max(np.diff(raw_od._data[0])) < shift_amp
    assert_allclose(raw_od._data[1], 0.)  # unchanged
    assert_allclose(raw_od._data[2], 1.)  # unchanged

    # With Hb
    # Add a baseline shift artifact about half way through data
    max_shift = np.max(np.diff(raw_hb._data[0]))
    shift_amp = 5 * max_shift
    raw_hb._data[0, 0:30] = raw_hb._data[0, 0:30] - (1.1 * shift_amp)
    # make one channel zero std
    raw_hb._data[1] = 0.
    raw_hb._data[2] = 1.
    assert np.max(np.diff(raw_hb._data[0])) > shift_amp
    # Ensure that applying the algorithm reduces the step change
    raw_hb = tddr(raw_hb)
    assert np.max(np.diff(raw_hb._data[0])) < shift_amp
    assert_allclose(raw_hb._data[1], 0.)  # unchanged
    assert_allclose(raw_hb._data[2], 1.)  # unchanged
示例#4
0
def epoch_preprocessing(bids_path):

    with open(os.devnull, "w") as f, contextlib.redirect_stdout(f):
        raw_intensity = read_raw_bids(bids_path=bids_path).load_data()

    raw_od = optical_density(raw_intensity)
    raw_od.resample(1.5)
    raw_haemo = beer_lambert_law(raw_od, ppf=6)
    raw_haemo = raw_haemo.filter(None,
                                 0.6,
                                 h_trans_bandwidth=0.05,
                                 l_trans_bandwidth=0.01,
                                 verbose=False)

    events, event_dict = events_from_annotations(raw_haemo, verbose=False)
    epochs = Epochs(raw_haemo,
                    events,
                    event_id=event_dict,
                    tmin=-5,
                    tmax=30,
                    reject=dict(hbo=100e-6),
                    reject_by_annotation=True,
                    proj=True,
                    baseline=(None, 0),
                    detrend=1,
                    preload=True,
                    verbose=False)

    epochs = epochs[["Control", "Audio"]]
    return raw_haemo, epochs
def test_beer_lambert(fname, fmt, tmpdir):
    """Test converting NIRX files."""
    assert fmt in ('nirx', 'fif')
    raw = read_raw_nirx(fname)
    if fmt == 'fif':
        raw.save(tmpdir.join('test_raw.fif'))
        raw = read_raw_fif(tmpdir.join('test_raw.fif'))
    assert 'fnirs_cw_amplitude' in raw
    with pytest.deprecated_call():
        assert 'fnirs_raw' in raw
    assert 'fnirs_od' not in raw
    raw = optical_density(raw)
    _validate_type(raw, BaseRaw, 'raw')
    assert 'fnirs_cw_amplitude' not in raw
    with pytest.deprecated_call():
        assert 'fnirs_raw' not in raw
    assert 'fnirs_od' in raw
    assert 'hbo' not in raw
    raw = beer_lambert_law(raw)
    _validate_type(raw, BaseRaw, 'raw')
    assert 'fnirs_cw_amplitude' not in raw
    with pytest.deprecated_call():
        assert 'fnirs_raw' not in raw
    assert 'fnirs_od' not in raw
    assert 'hbo' in raw
    assert 'hbr' in raw
示例#6
0
def test_fnirs_picks():
    """Test picking of fnirs types after different conversions."""
    raw = read_raw_nirx(fname_nirx_15_0)
    picks = _picks_to_idx(raw.info, 'fnirs_cw_amplitude')
    assert len(picks) == len(raw.ch_names)
    raw_subset = raw.copy().pick(picks='fnirs_cw_amplitude')
    for ch in raw_subset.info["chs"]:
        assert ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_CW_AMPLITUDE

    picks = _picks_to_idx(raw.info, ['fnirs_cw_amplitude', 'fnirs_od'])
    assert len(picks) == len(raw.ch_names)
    picks = _picks_to_idx(raw.info, ['fnirs_cw_amplitude', 'fnirs_od', 'hbr'])
    assert len(picks) == len(raw.ch_names)
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'fnirs_od')
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'hbo')
    pytest.raises(ValueError, _picks_to_idx, raw.info, ['hbr'])
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'fnirs_fd_phase')
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'junk')

    raw = optical_density(raw)
    picks = _picks_to_idx(raw.info, 'fnirs_od')
    assert len(picks) == len(raw.ch_names)
    raw_subset = raw.copy().pick(picks='fnirs_od')
    for ch in raw_subset.info["chs"]:
        assert ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_OD

    picks = _picks_to_idx(raw.info, ['fnirs_cw_amplitude', 'fnirs_od'])
    assert len(picks) == len(raw.ch_names)
    picks = _picks_to_idx(raw.info, ['fnirs_cw_amplitude', 'fnirs_od', 'hbr'])
    assert len(picks) == len(raw.ch_names)
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'fnirs_cw_amplitude')
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'hbo')
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'hbr')
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'fnirs_fd_phase')
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'junk')

    raw = beer_lambert_law(raw)
    picks = _picks_to_idx(raw.info, 'hbo')
    assert len(picks) == len(raw.ch_names) / 2
    raw_subset = raw.copy().pick(picks='hbo')
    for ch in raw_subset.info["chs"]:
        assert ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBO

    picks = _picks_to_idx(raw.info, ['hbr'])
    assert len(picks) == len(raw.ch_names) / 2
    raw_subset = raw.copy().pick(picks=['hbr'])
    for ch in raw_subset.info["chs"]:
        assert ch['coil_type'] == FIFF.FIFFV_COIL_FNIRS_HBR

    picks = _picks_to_idx(raw.info, ['hbo', 'hbr'])
    assert len(picks) == len(raw.ch_names)
    picks = _picks_to_idx(raw.info, ['hbo', 'fnirs_od', 'hbr'])
    assert len(picks) == len(raw.ch_names)
    picks = _picks_to_idx(raw.info, ['hbo', 'fnirs_od'])
    assert len(picks) == len(raw.ch_names) / 2
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'fnirs_cw_amplitude')
    pytest.raises(ValueError, _picks_to_idx, raw.info, ['fnirs_od'])
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'junk')
    pytest.raises(ValueError, _picks_to_idx, raw.info, 'fnirs_fd_phase')
示例#7
0
def test_order_agnostic(nirx_snirf):
    """Test that order does not matter to (pre)processing results."""
    raw_nirx, raw_snirf = nirx_snirf
    raw_random = raw_nirx.copy().pick(
        np.random.RandomState(0).permutation(len(raw_nirx.ch_names)))
    raws = dict(nirx=raw_nirx, snirf=raw_snirf, random=raw_random)
    del raw_nirx, raw_snirf, raw_random
    orders = dict()
    # continuous wave
    for key, r in raws.items():
        assert set(r.get_channel_types()) == {'fnirs_cw_amplitude'}
        orders[key] = [
            r.ch_names.index(name) for name in raws['nirx'].ch_names
        ]
        assert_array_equal(raws['nirx'].ch_names,
                           np.array(r.ch_names)[orders[key]])
        assert_allclose(raws['nirx'].get_data(),
                        r.get_data(orders[key]),
                        err_msg=key)
    assert_array_equal(orders['nirx'], np.arange(len(raws['nirx'].ch_names)))
    # optical density
    for key, r in raws.items():
        raws[key] = r = optical_density(r)
        assert_allclose(raws['nirx'].get_data(),
                        r.get_data(orders[key]),
                        err_msg=key)
        assert set(r.get_channel_types()) == {'fnirs_od'}
    # scalp-coupling index
    sci = dict()
    for key, r in raws.items():
        sci[key] = r = scalp_coupling_index(r)
        assert_allclose(sci['nirx'], r[orders[key]], err_msg=key, rtol=0.01)
    # TDDR (on optical)
    tddrs = dict()
    for key, r in raws.items():
        tddrs[key] = r = tddr(r)
        assert_allclose(tddrs['nirx'].get_data(),
                        r.get_data(orders[key]),
                        err_msg=key,
                        atol=1e-4)
        assert set(r.get_channel_types()) == {'fnirs_od'}
    # beer-lambert
    for key, r in raws.items():
        raws[key] = r = beer_lambert_law(r)
        assert_allclose(raws['nirx'].get_data(),
                        r.get_data(orders[key]),
                        err_msg=key,
                        rtol=2e-7)
        assert set(r.get_channel_types()) == {'hbo', 'hbr'}
    # TDDR (on haemo)
    tddrs = dict()
    for key, r in raws.items():
        tddrs[key] = r = tddr(r)
        assert_allclose(tddrs['nirx'].get_data(),
                        r.get_data(orders[key]),
                        err_msg=key,
                        atol=1e-9)
        assert set(r.get_channel_types()) == {'hbo', 'hbr'}
def individual_analysis(bids_path):

    raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False)
    raw_intensity.pick(picks=range(20)).crop(200).resample(0.3)  # Reduce load
    raw_haemo = beer_lambert_law(optical_density(raw_intensity), ppf=0.1)
    design_matrix = make_first_level_design_matrix(raw_haemo)
    glm_est = run_glm(raw_haemo, design_matrix)

    return glm_est
示例#9
0
def test_basic_reading_and_min_process(fname):
    """Test reading SNIRF files and minimum typical processing."""
    raw = read_raw_snirf(fname, preload=True)
    # SNIRF data can contain several types, so only apply appropriate functions
    if 'fnirs_cw_amplitude' in raw:
        raw = optical_density(raw)
    if 'fnirs_od' in raw:
        raw = beer_lambert_law(raw, ppf=6)
    assert 'hbo' in raw
示例#10
0
def individual_analysis(bids_path, ID):

    raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False)

    # Convert signal to haemoglobin and resample
    raw_od = optical_density(raw_intensity)
    raw_haemo = beer_lambert_law(raw_od)
    raw_haemo.resample(0.3)

    # Cut out just the short channels for creating a GLM repressor
    sht_chans = get_short_channels(raw_haemo)
    raw_haemo = get_long_channels(raw_haemo)

    # Create a design matrix
    design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=5.0)

    # Append short channels mean to design matrix
    design_matrix["ShortHbO"] = np.mean(sht_chans.copy().pick(picks="hbo").get_data(), axis=0)
    design_matrix["ShortHbR"] = np.mean(sht_chans.copy().pick(picks="hbr").get_data(), axis=0)

    # Run GLM
    glm_est = run_GLM(raw_haemo, design_matrix)

    # Define channels in each region of interest
    # List the channel pairs manually
    left = [[4, 3], [1, 3], [3, 3], [1, 2], [2, 3], [1, 1]]
    right = [[6, 7], [5, 7], [7, 7], [5, 6], [6, 7], [5, 5]]
    # Then generate the correct indices for each pair
    groups = dict(
        Left_Hemisphere=picks_pair_to_idx(raw_haemo, left, on_missing='ignore'),
        Right_Hemisphere=picks_pair_to_idx(raw_haemo, right, on_missing='ignore'))

    # Extract channel metrics
    cha = glm_to_tidy(raw_haemo, glm_est, design_matrix)
    cha["ID"] = ID  # Add the participant ID to the dataframe

    # Compute region of interest results from channel data
    roi = pd.DataFrame()
    for idx, col in enumerate(design_matrix.columns):
        roi = roi.append(glm_region_of_interest(glm_est, groups, idx, col))
    roi["ID"] = ID  # Add the participant ID to the dataframe

    # Contrast left vs right tapping
    contrast_matrix = np.eye(design_matrix.shape[1])
    basic_conts = dict([(column, contrast_matrix[i])
                        for i, column in enumerate(design_matrix.columns)])
    contrast_LvR = basic_conts['Tapping/Left'] - basic_conts['Tapping/Right']
    contrast = compute_contrast(glm_est, contrast_LvR)
    con = glm_to_tidy(raw_haemo, contrast, design_matrix)
    con["ID"] = ID  # Add the participant ID to the dataframe

    # Convert to uM for nicer plotting below.
    cha["theta"] = [t * 1.e6 for t in cha["theta"]]
    roi["theta"] = [t * 1.e6 for t in roi["theta"]]
    con["effect"] = [t * 1.e6 for t in con["effect"]]

    return raw_haemo, roi, cha, con
示例#11
0
def test_fnirs_channel_naming_and_order_readers(fname):
    """Ensure fNIRS channel checking on standard readers."""
    # fNIRS data requires specific channel naming and ordering.

    # All standard readers should pass tests
    raw = read_raw_nirx(fname)
    freqs = np.unique(_channel_frequencies(raw.info))
    assert_array_equal(freqs, [760, 850])
    chroma = np.unique(_channel_chromophore(raw.info))
    assert len(chroma) == 0

    picks = _check_channels_ordered(raw.info, freqs)
    assert len(picks) == len(raw.ch_names)  # as all fNIRS only data

    # Check that dropped channels are detected
    # For each source detector pair there must be two channels,
    # removing one should throw an error.
    raw_dropped = raw.copy().drop_channels(raw.ch_names[4])
    with pytest.raises(ValueError, match='not ordered correctly'):
        _check_channels_ordered(raw_dropped.info, freqs)

    # The ordering must be increasing for the pairs, if provided
    raw_names_reversed = raw.copy().ch_names
    raw_names_reversed.reverse()
    raw_reversed = raw.copy().pick_channels(raw_names_reversed, ordered=True)
    with pytest.raises(ValueError, match='The frequencies.*sorted.*'):
        _check_channels_ordered(raw_reversed.info, [850, 760])
    # So if we flip the second argument it should pass again
    picks = _check_channels_ordered(raw_reversed.info, freqs)
    got_first = set(raw_reversed.ch_names[pick].split()[1]
                    for pick in picks[::2])
    assert got_first == {'760'}
    got_second = set(raw_reversed.ch_names[pick].split()[1]
                     for pick in picks[1::2])
    assert got_second == {'850'}

    # Check on OD data
    raw = optical_density(raw)
    freqs = np.unique(_channel_frequencies(raw.info))
    assert_array_equal(freqs, [760, 850])
    chroma = np.unique(_channel_chromophore(raw.info))
    assert len(chroma) == 0
    picks = _check_channels_ordered(raw.info, freqs)
    assert len(picks) == len(raw.ch_names)  # as all fNIRS only data

    # Check on haemoglobin data
    raw = beer_lambert_law(raw)
    freqs = np.unique(_channel_frequencies(raw.info))
    assert len(freqs) == 0
    assert len(_channel_chromophore(raw.info)) == len(raw.ch_names)
    chroma = np.unique(_channel_chromophore(raw.info))
    assert_array_equal(chroma, ["hbo", "hbr"])
    picks = _check_channels_ordered(raw.info, chroma)
    assert len(picks) == len(raw.ch_names)
    with pytest.raises(ValueError, match='chromophore in info'):
        _check_channels_ordered(raw.info, ["hbr", "hbo"])
示例#12
0
文件: conftest.py 项目: aces/EEG2BIDS
def fnirs_epochs():
    """Create an fnirs epoch structure."""
    raw_intensity = read_raw_nirx(fname_nirx, preload=False)
    raw_od = optical_density(raw_intensity)
    raw_haemo = beer_lambert_law(raw_od, ppf=6.)
    evts, _ = events_from_annotations(raw_haemo, event_id={'1.0': 1})
    evts_dct = {'A': 1}
    tn, tx = -1, 2
    epochs = Epochs(raw_haemo, evts, event_id=evts_dct, tmin=tn, tmax=tx)
    return epochs
示例#13
0
def test_beer_lambert_unordered_errors():
    """NIRS data requires specific ordering and naming of channels."""
    raw = read_raw_nirx(fname_nirx_15_0)
    raw_od = optical_density(raw)
    raw_od.pick([0, 1, 2])
    with pytest.raises(ValueError, match='ordered'):
        beer_lambert_law(raw_od)

    # Test that an error is thrown if channel naming frequency doesn't match
    # what is stored in loc[9], which should hold the light frequency too.
    raw_od = optical_density(raw)
    ch_name = raw.ch_names[0]
    assert ch_name == 'S1_D1 760'
    idx = raw_od.ch_names.index(ch_name)
    assert idx == 0
    raw_od.info['chs'][idx]['loc'][9] = 770
    raw_od.rename_channels({ch_name: ch_name.replace('760', '770')})
    assert raw_od.ch_names[0] == 'S1_D1 770'
    with pytest.raises(ValueError, match='Exactly two frequencies'):
        beer_lambert_law(raw_od)
示例#14
0
def fnirs_epochs():
    """Create an fnirs epoch structure."""
    fname = op.join(data_path(download=False), 'NIRx', 'nirscout',
                    'nirx_15_2_recording_w_overlap')
    raw_intensity = read_raw_nirx(fname, preload=False)
    raw_od = optical_density(raw_intensity)
    raw_haemo = beer_lambert_law(raw_od)
    evts, _ = events_from_annotations(raw_haemo, event_id={'1.0': 1})
    evts_dct = {'A': 1}
    tn, tx = -1, 2
    epochs = Epochs(raw_haemo, evts, event_id=evts_dct, tmin=tn, tmax=tx)
    return epochs
示例#15
0
def individual_analysis(bids_path):

    raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False)
    # Delete annotation labeled 15, as these just signify the start and end of experiment.
    raw_intensity.annotations.delete(
        raw_intensity.annotations.description == '15.0')
    raw_intensity.pick(picks=range(20)).crop(200).resample(0.3)  # Reduce load
    raw_haemo = beer_lambert_law(optical_density(raw_intensity), ppf=0.1)
    design_matrix = make_first_level_design_matrix(raw_haemo)
    glm_est = run_glm(raw_haemo, design_matrix)

    return glm_est
示例#16
0
def test_plot_topomap_nirs_overlap():
    """Test plotting nirs topomap with overlapping channels (gh-7414)."""
    fname = op.join(data_path(download=False), 'NIRx',
                    'nirx_15_2_recording_w_overlap')
    raw_intensity = read_raw_nirx(fname, preload=False)
    raw_od = optical_density(raw_intensity)
    raw_haemo = beer_lambert_law(raw_od)
    evts, _ = events_from_annotations(raw_haemo, event_id={'1.0': 1})
    evts_dct = {'A': 1}
    tn, tx = -1, 2
    epochs = Epochs(raw_haemo, evts, event_id=evts_dct, tmin=tn, tmax=tx)
    fig = epochs['A'].average(picks='hbo').plot_topomap()
    assert len(fig.axes) == 5
    plt.close('all')
示例#17
0
def test_fnirs_channel_naming_and_order_readers(fname):
    """Ensure fNIRS channel checking on standard readers."""
    # fNIRS data requires specific channel naming and ordering.

    # All standard readers should pass tests
    raw = read_raw_nirx(fname)
    freqs = np.unique(_channel_frequencies(raw))
    assert_array_equal(freqs, [760, 850])
    chroma = np.unique(_channel_chromophore(raw))
    assert len(chroma) == 0

    picks = _check_channels_ordered(raw, freqs)
    assert len(picks) == len(raw.ch_names)  # as all fNIRS only data

    # Check that dropped channels are detected
    # For each source detector pair there must be two channels,
    # removing one should throw an error.
    raw_dropped = raw.copy().drop_channels(raw.ch_names[4])
    with pytest.raises(ValueError, match='not ordered correctly'):
        _check_channels_ordered(raw_dropped, freqs)

    # The ordering must match the passed in argument
    raw_names_reversed = raw.copy().ch_names
    raw_names_reversed.reverse()
    raw_reversed = raw.copy().pick_channels(raw_names_reversed, ordered=True)
    with pytest.raises(ValueError, match='not ordered .* frequencies'):
        _check_channels_ordered(raw_reversed, freqs)
    # So if we flip the second argument it should pass again
    _check_channels_ordered(raw_reversed, [850, 760])

    # Check on OD data
    raw = optical_density(raw)
    freqs = np.unique(_channel_frequencies(raw))
    assert_array_equal(freqs, [760, 850])
    chroma = np.unique(_channel_chromophore(raw))
    assert len(chroma) == 0
    picks = _check_channels_ordered(raw, freqs)
    assert len(picks) == len(raw.ch_names)  # as all fNIRS only data

    # Check on haemoglobin data
    raw = beer_lambert_law(raw)
    freqs = np.unique(_channel_frequencies(raw))
    assert len(freqs) == 0
    assert len(_channel_chromophore(raw)) == len(raw.ch_names)
    chroma = np.unique(_channel_chromophore(raw))
    assert_array_equal(chroma, ["hbo", "hbr"])
    picks = _check_channels_ordered(raw, chroma)
    assert len(picks) == len(raw.ch_names)
    with pytest.raises(ValueError, match='not ordered .* chromophore'):
        _check_channels_ordered(raw, ["hbx", "hbr"])
示例#18
0
def analysis(fname, ID):

    raw_intensity = read_raw_bids(bids_path=fname, verbose=False)
    # Delete annotation labeled 15, as these just signify the start and end of experiment.
    raw_intensity.annotations.delete(
        raw_intensity.annotations.description == '15.0')
    # sanitize event names
    raw_intensity.annotations.description[:] = [
        d.replace('/', '_') for d in raw_intensity.annotations.description
    ]

    # Convert signal to haemoglobin and just keep hbo
    raw_od = optical_density(raw_intensity)
    raw_haemo = beer_lambert_law(raw_od, ppf=0.1)
    raw_haemo.resample(0.5, npad="auto")

    # Cut out just the short channels for creating a GLM regressor
    short_chans = get_short_channels(raw_haemo)
    raw_haemo = get_long_channels(raw_haemo)

    # Create a design matrix
    design_matrix = make_first_level_design_matrix(raw_haemo,
                                                   hrf_model='fir',
                                                   stim_dur=1.0,
                                                   fir_delays=range(10),
                                                   drift_model='cosine',
                                                   high_pass=0.01,
                                                   oversampling=1)
    # Add short channels as regressor in GLM
    for chan in range(len(short_chans.ch_names)):
        design_matrix[f"short_{chan}"] = short_chans.get_data(chan).T

    # Run GLM
    glm_est = run_glm(raw_haemo, design_matrix)

    # Create a single ROI that includes all channels for example
    rois = dict(AllChannels=range(len(raw_haemo.ch_names)))
    # Calculate ROI for all conditions
    conditions = design_matrix.columns
    # Compute output metrics by ROI
    df_ind = glm_est.to_dataframe_region_of_interest(rois, conditions)

    df_ind["ID"] = ID
    df_ind["theta"] = [t * 1.e6 for t in df_ind["theta"]]

    return df_ind, raw_haemo, design_matrix
def individual_analysis(bids_path):

    # Read data with annotations in BIDS format
    raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False)
    raw_intensity = get_long_channels(raw_intensity, min_dist=0.01)

    # Convert signal to optical density and determine bad channels
    raw_od = optical_density(raw_intensity)
    sci = scalp_coupling_index(raw_od, h_freq=1.35, h_trans_bandwidth=0.1)
    raw_od.info["bads"] = list(compress(raw_od.ch_names, sci < 0.5))
    raw_od.interpolate_bads()

    # Downsample and apply signal cleaning techniques
    raw_od.resample(0.8)
    raw_od = temporal_derivative_distribution_repair(raw_od)

    # Convert to haemoglobin and filter
    raw_haemo = beer_lambert_law(raw_od, ppf=0.1)
    raw_haemo = raw_haemo.filter(0.02,
                                 0.3,
                                 h_trans_bandwidth=0.1,
                                 l_trans_bandwidth=0.01,
                                 verbose=False)

    # Apply further data cleaning techniques and extract epochs
    raw_haemo = enhance_negative_correlation(raw_haemo)
    # Extract events but ignore those with
    # the word Ends (i.e. drop ExperimentEnds events)
    events, event_dict = events_from_annotations(raw_haemo,
                                                 verbose=False,
                                                 regexp='^(?![Ends]).*$')
    epochs = Epochs(raw_haemo,
                    events,
                    event_id=event_dict,
                    tmin=-5,
                    tmax=20,
                    reject=dict(hbo=200e-6),
                    reject_by_annotation=True,
                    proj=True,
                    baseline=(None, 0),
                    detrend=0,
                    preload=True,
                    verbose=False)

    return raw_haemo, epochs
示例#20
0
def test_scalp_coupling_index(fname, fmt, tmpdir):
    """Test converting NIRX files."""
    assert fmt in ('nirx', 'fif')
    raw = read_raw_nirx(fname)
    with pytest.raises(RuntimeError, match='Scalp'):
        scalp_coupling_index(raw)

    raw = optical_density(raw)
    sci = scalp_coupling_index(raw)

    # All values should be between -1 and +1
    assert_array_less(sci, 1.0)
    assert_array_less(sci * -1.0, 1.0)

    # Fill in some data with known correlation values
    rng = np.random.RandomState(0)
    new_data = rng.rand(raw._data[0].shape[0])
    # Set first two channels to perfect correlation
    raw._data[0] = new_data
    raw._data[1] = new_data
    # Set next two channels to perfect correlation
    raw._data[2] = new_data
    raw._data[3] = new_data * 0.3  # check scale invariance
    # Set next two channels to anti correlation
    raw._data[4] = new_data
    raw._data[5] = new_data * -1.0
    # Set next two channels to be uncorrelated
    raw._data[6] = new_data
    raw._data[7] = rng.rand(raw._data[0].shape[0])
    # Set next channel to have zero std
    raw._data[8] = 0.
    raw._data[9] = 1.
    raw._data[10] = 2.
    raw._data[11] = 3.
    # Check values
    sci = scalp_coupling_index(raw)
    assert_allclose(sci[0:6], [1, 1, 1, 1, -1, -1], atol=0.01)
    assert np.abs(sci[6]) < 0.5
    assert np.abs(sci[7]) < 0.5
    assert_allclose(sci[8:12], 0, atol=1e-10)

    # Ensure function errors if wrong type is passed in
    raw = beer_lambert_law(raw)
    with pytest.raises(RuntimeError, match='Scalp'):
        scalp_coupling_index(raw)
示例#21
0
def test_fnirs_spread_bads(fname):
    """Test checking of bad markings."""
    # Test spreading upwards in frequency and on raw data
    raw = read_raw_nirx(fname)
    raw.info['bads'] = ['S1_D1 760']
    info = _fnirs_spread_bads(raw.info)
    assert info['bads'] == ['S1_D1 760', 'S1_D1 850']

    # Test spreading downwards in frequency and on od data
    raw = optical_density(raw)
    raw.info['bads'] = raw.ch_names[5:6]
    info = _fnirs_spread_bads(raw.info)
    assert info['bads'] == raw.ch_names[4:6]

    # Test spreading multiple bads and on chroma data
    raw = beer_lambert_law(raw)
    raw.info['bads'] = [raw.ch_names[x] for x in [1, 8]]
    info = _fnirs_spread_bads(raw.info)
    assert info['bads'] == [info.ch_names[x] for x in [0, 1, 8, 9]]
def test_beer_lambert_v_matlab():
    """Compare MNE results to MATLAB toolbox."""
    raw = read_raw_nirx(fname_nirx_15_0)
    raw = optical_density(raw)
    raw = beer_lambert_law(raw, ppf=0.121)
    raw._data *= 1e6  # Scale to uM for comparison to MATLAB

    matlab_fname = op.join(data_path(download=False), 'NIRx', 'validation',
                           'nirx_15_0_recording_bl.mat')
    matlab_data = read_mat(matlab_fname)

    for idx in range(raw.get_data().shape[0]):

        mean_error = np.mean(matlab_data['data'][:, idx] - raw._data[idx])
        assert mean_error < 0.1
        matlab_name = ("S" + str(int(matlab_data['sources'][idx])) + "_D" +
                       str(int(matlab_data['detectors'][idx])) + " " +
                       matlab_data['type'][idx])
        assert raw.info['ch_names'][idx] == matlab_name
示例#23
0
def individual_analysis(bids_path, ID):

    raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False)
    raw_intensity.annotations.delete(
        raw_intensity.annotations.description == '15.0')
    # sanitize event names
    raw_intensity.annotations.description[:] = [
        d.replace('/', '_') for d in raw_intensity.annotations.description
    ]

    # Convert signal to haemoglobin and resample
    raw_od = optical_density(raw_intensity)
    raw_haemo = beer_lambert_law(raw_od, ppf=0.1)
    raw_haemo.resample(0.3)

    # Cut out just the short channels for creating a GLM repressor
    sht_chans = get_short_channels(raw_haemo)
    raw_haemo = get_long_channels(raw_haemo)

    # Create a design matrix
    design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=5.0)

    # Append short channels mean to design matrix
    design_matrix["ShortHbO"] = np.mean(
        sht_chans.copy().pick(picks="hbo").get_data(), axis=0)
    design_matrix["ShortHbR"] = np.mean(
        sht_chans.copy().pick(picks="hbr").get_data(), axis=0)

    # Run GLM
    glm_est = run_glm(raw_haemo, design_matrix)

    # Extract channel metrics
    cha = glm_est.to_dataframe()

    # Add the participant ID to the dataframes
    cha["ID"] = ID

    # Convert to uM for nicer plotting below.
    cha["theta"] = [t * 1.e6 for t in cha["theta"]]

    return raw_haemo, cha
示例#24
0
def test_interpolation_nirs():
    """Test interpolating bad nirs channels."""
    fname = op.join(data_path(download=False), 'NIRx', 'nirscout',
                    'nirx_15_2_recording_w_overlap')
    raw_intensity = read_raw_nirx(fname, preload=False)
    raw_od = optical_density(raw_intensity)
    sci = scalp_coupling_index(raw_od)
    raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5))
    bad_0 = np.where(
        [name == raw_od.info['bads'][0] for name in raw_od.ch_names])[0][0]
    bad_0_std_pre_interp = np.std(raw_od._data[bad_0])
    bads_init = list(raw_od.info['bads'])
    raw_od.interpolate_bads(exclude=bads_init[:2])
    assert raw_od.info['bads'] == bads_init[:2]
    raw_od.interpolate_bads()
    assert raw_od.info['bads'] == []
    assert bad_0_std_pre_interp > np.std(raw_od._data[bad_0])
    raw_haemo = beer_lambert_law(raw_od, ppf=6)
    raw_haemo.info['bads'] = raw_haemo.ch_names[2:4]
    assert raw_haemo.info['bads'] == ['S1_D2 hbo', 'S1_D2 hbr']
    raw_haemo.interpolate_bads()
    assert raw_haemo.info['bads'] == []
def test_beer_lambert_unordered_errors():
    """NIRS data requires specific ordering and naming of channels."""
    raw = read_raw_nirx(fname_nirx_15_0)
    raw_od = optical_density(raw)
    raw_od.pick([0, 1, 2])
    with pytest.raises(ValueError, match='ordered'):
        beer_lambert_law(raw_od)

    # Test that an error is thrown if channel naming frequency doesn't match
    # what is stored in loc[9], which should hold the light frequency too.
    raw_od = optical_density(raw)
    raw_od.rename_channels({'S2_D2 760': 'S2_D2 770'})
    with pytest.raises(ValueError, match='frequency do not match'):
        beer_lambert_law(raw_od)

    # Test that an error is thrown if inconsistent frequencies used in data
    raw_od.info['chs'][2]['loc'][9] = 770.0
    with pytest.raises(ValueError, match='pairs with frequencies'):
        beer_lambert_law(raw_od)
def test_set_montage_artinis_basic():
    """Test that OctaMon and Brite23 montages are set properly."""
    # Test OctaMon montage
    montage_octamon = make_standard_montage('artinis-octamon')
    montage_brite23 = make_standard_montage('artinis-brite23')
    raw = _simulate_artinis_octamon()
    raw_od = optical_density(raw)
    old_info = raw.info.copy()
    old_info_od = raw_od.info.copy()
    raw.set_montage(montage_octamon)
    raw_od.set_montage(montage_octamon)
    raw_hb = beer_lambert_law(raw_od, ppf=6)  # montage needed for BLL
    # Check that the montage was actually modified
    assert_raises(AssertionError, assert_array_almost_equal,
                  old_info['chs'][0]['loc'][:9],
                  raw.info['chs'][0]['loc'][:9])
    assert_raises(AssertionError, assert_array_almost_equal,
                  old_info_od['chs'][0]['loc'][:9],
                  raw_od.info['chs'][0]['loc'][:9])

    # Check a known location
    assert_array_almost_equal(raw.info['chs'][0]['loc'][:3],
                              [0.0616, 0.075398, 0.07347])
    assert_array_almost_equal(raw.info['chs'][8]['loc'][:3],
                              [-0.033875,  0.101276,  0.077291])
    assert_array_almost_equal(raw.info['chs'][12]['loc'][:3],
                              [-0.062749,  0.080417,  0.074884])
    assert_array_almost_equal(raw_od.info['chs'][12]['loc'][:3],
                              [-0.062749,  0.080417,  0.074884])
    assert_array_almost_equal(raw_hb.info['chs'][12]['loc'][:3],
                              [-0.062749,  0.080417,  0.074884])
    # Check that locations are identical for a pair of channels (all elements
    # except the 10th which is the wavelength if not hbo and hbr type)
    assert_array_almost_equal(raw.info['chs'][0]['loc'][:9],
                              raw.info['chs'][1]['loc'][:9])
    assert_array_almost_equal(raw_od.info['chs'][0]['loc'][:9],
                              raw_od.info['chs'][1]['loc'][:9])
    assert_array_almost_equal(raw_hb.info['chs'][0]['loc'][:9],
                              raw_hb.info['chs'][1]['loc'][:9])

    # Test Brite23 montage
    raw = _simulate_artinis_brite23()
    old_info = raw.info.copy()
    raw.set_montage(montage_brite23)
    # Check that the montage was actually modified
    assert_raises(AssertionError, assert_array_almost_equal,
                  old_info['chs'][0]['loc'][:9],
                  raw.info['chs'][0]['loc'][:9])
    # Check a known location
    assert_array_almost_equal(raw.info['chs'][0]['loc'][:3],
                              [0.085583, 0.036275, 0.089426])
    assert_array_almost_equal(raw.info['chs'][8]['loc'][:3],
                              [0.069555, 0.078579, 0.069305])
    assert_array_almost_equal(raw.info['chs'][12]['loc'][:3],
                              [0.044861, 0.100952, 0.065175])
    # Check that locations are identical for a pair of channels (all elements
    # except the 10th which is the wavelength if not hbo and hbr type)
    assert_array_almost_equal(raw.info['chs'][0]['loc'][:9],
                              raw.info['chs'][1]['loc'][:9])

    # Test channel variations
    raw_old = _simulate_artinis_brite23()
    # Raw missing some channels that are in the montage: pass
    raw = raw_old.copy()
    raw.pick(['S1_D1 hbo', 'S1_D1 hbr'])
    raw.set_montage('artinis-brite23')

    # Unconventional channel pair: pass
    raw = raw_old.copy()
    info_new = create_info(['S11_D1 hbo', 'S11_D1 hbr'], raw.info['sfreq'],
                           ['hbo', 'hbr'])
    new = RawArray(np.random.normal(size=(2, len(raw))), info_new)
    raw.add_channels([new], force_update_info=True)
    raw.set_montage('artinis-brite23')

    # Source not in montage: fail
    raw = raw_old.copy()
    info_new = create_info(['S12_D7 hbo', 'S12_D7 hbr'], raw.info['sfreq'],
                           ['hbo', 'hbr'])
    new = RawArray(np.random.normal(size=(2, len(raw))), info_new)
    raw.add_channels([new], force_update_info=True)
    with pytest.raises(ValueError, match='is not in list'):
        raw.set_montage('artinis-brite23')

    # Detector not in montage: fail
    raw = raw_old.copy()
    info_new = create_info(['S11_D8 hbo', 'S11_D8 hbr'], raw.info['sfreq'],
                           ['hbo', 'hbr'])
    new = RawArray(np.random.normal(size=(2, len(raw))), info_new)
    raw.add_channels([new], force_update_info=True)
    with pytest.raises(ValueError, match='is not in list'):
        raw.set_montage('artinis-brite23')
示例#27
0
def individual_analysis(bids_path, ID):

    raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False)
    # Delete annotation labeled 15, as these just signify the start and end of experiment.
    raw_intensity.annotations.delete(raw_intensity.annotations.description == '15.0')
    # sanitize event names
    raw_intensity.annotations.description[:] = [
        d.replace('/', '_') for d in raw_intensity.annotations.description]

    # Convert signal to haemoglobin and resample
    raw_od = optical_density(raw_intensity)
    raw_haemo = beer_lambert_law(raw_od, ppf=0.1)
    raw_haemo.resample(0.3)

    # Cut out just the short channels for creating a GLM repressor
    sht_chans = get_short_channels(raw_haemo)
    raw_haemo = get_long_channels(raw_haemo)

    # Create a design matrix
    design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=5.0)

    # Append short channels mean to design matrix
    design_matrix["ShortHbO"] = np.mean(sht_chans.copy().pick(picks="hbo").get_data(), axis=0)
    design_matrix["ShortHbR"] = np.mean(sht_chans.copy().pick(picks="hbr").get_data(), axis=0)

    # Run GLM
    glm_est = run_glm(raw_haemo, design_matrix)

    # Define channels in each region of interest
    # List the channel pairs manually
    left = [[4, 3], [1, 3], [3, 3], [1, 2], [2, 3], [1, 1]]
    right = [[8, 7], [5, 7], [7, 7], [5, 6], [6, 7], [5, 5]]
    # Then generate the correct indices for each pair
    groups = dict(
        Left_Hemisphere=picks_pair_to_idx(raw_haemo, left, on_missing='ignore'),
        Right_Hemisphere=picks_pair_to_idx(raw_haemo, right, on_missing='ignore'))

    # Extract channel metrics
    cha = glm_est.to_dataframe()

    # Compute region of interest results from channel data
    roi = glm_est.to_dataframe_region_of_interest(groups,
                                                  design_matrix.columns,
                                                  demographic_info=True)

    # Define left vs right tapping contrast
    contrast_matrix = np.eye(design_matrix.shape[1])
    basic_conts = dict([(column, contrast_matrix[i])
                        for i, column in enumerate(design_matrix.columns)])
    contrast_LvR = basic_conts['Tapping_Left'] - basic_conts['Tapping_Right']

    # Compute defined contrast
    contrast = glm_est.compute_contrast(contrast_LvR)
    con = contrast.to_dataframe()

    # Add the participant ID to the dataframes
    roi["ID"] = cha["ID"] = con["ID"] = ID

    # Convert to uM for nicer plotting below.
    cha["theta"] = [t * 1.e6 for t in cha["theta"]]
    roi["theta"] = [t * 1.e6 for t in roi["theta"]]
    con["effect"] = [t * 1.e6 for t in con["effect"]]

    return raw_haemo, roi, cha, con
示例#28
0
# ------------------------------------------------
#
# As with Homer we can convert the intensity data to optical density and
# apply motion correction using the TDDR method.

raw_od = optical_density(raw_intensity)
corrected_tddr = temporal_derivative_distribution_repair(raw_od)

###############################################################################
# Convert to haemoglobin concentration
# ------------------------------------
#
# Next we convert the signal to changes in haemoglobin concentration.
# MNE uses a different default value for the partial pathlength factor (ppf),
# Homer uses a default value of ppf=6, whereas MNE uses ppf=0.1,
# To exactly match the results from Homer we can manually set the ppf value to
# 6 in MNE.

raw_h = beer_lambert_law(corrected_tddr, ppf=6.)

###############################################################################
# Further analysis details
# ------------------------------------
#
# Commonly this preprocessing is followed by an averaging analysis as described
# in the :ref:`MNE fNIRS tutorial <mne:tut-fnirs-processing>`.
# If there is useful processing in the Homer
# that is not available in MNE
# please let us know by creating an issue at
# https://github.com/mne-tools/mne-nirs/issues
# Anatomically informed weighting in region of interest analysis
# --------------------------------------------------------------
#
# As observed above, some channels have greater specificity to the desired
# brain region than other channels.
# Thus, when doing a region of interest analysis you may wish to give extra
# weight to channels with greater sensitivity to the desired ROI.
# This can be done by manually specifying the weights used in the region of
# interest function call.
# The details of the GLM analysis will not be described here, instead view the
# :ref:`fNIRS GLM tutorial <tut-fnirs-hrf>`. Instead, comments are provided
# for the weighted region of interest function call.

# Basic pipeline, simplified for example
raw_od = optical_density(raw)
raw_haemo = beer_lambert_law(raw_od)
raw_haemo.resample(0.3).pick("hbo")  # Speed increase for web server
sht_chans = get_short_channels(raw_haemo)
raw_haemo = get_long_channels(raw_haemo)
design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=13.0)
design_matrix["ShortHbO"] = np.mean(
    sht_chans.copy().pick(picks="hbo").get_data(), axis=0)
glm_est = run_glm(raw_haemo, design_matrix)

# First we create a dictionary for each region of interest.
# Here we include all channels in each ROI, as we will later be applying
# weights based on their specificity to the brain regions of interest.
rois = dict()
rois["Audio_weighted"] = range(len(glm_est.ch_names))
rois["Visual_weighted"] = range(len(glm_est.ch_names))
示例#30
0
from fooof import FOOOF


# %%
# Import and preprocess data
# --------------------------
#
# We read in the data and convert to haemoglobin concentration.

fnirs_data_folder = mne.datasets.fnirs_motor.data_path()
fnirs_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1')
raw = mne.io.read_raw_nirx(fnirs_raw_dir, verbose=True).load_data()

raw = optical_density(raw)
raw.resample(1.5)
raw = beer_lambert_law(raw, ppf=0.1)
raw = raw.pick(picks="hbo")
raw = get_long_channels(raw, min_dist=0.025, max_dist=0.045)
raw


# %%
# Process data with FOOOF
# -----------------------
#
# Next we estimate the power spectral density of the data and pass this to
# the FOOOF algorithm.
#
# I recommend using the FOOOF algorithm as provided by the authors rather
# than reimplementation or custom plotting etc. Their code is of excellent
# quality, well maintained, thoroughly documented, and they have considered