예제 #1
0
def test_temporal_derivative_distribution_repair(fname, tmpdir):
    """Test running artifact rejection."""
    raw = read_raw_nirx(fname)
    raw = optical_density(raw)

    # Add a baseline shift artifact about half way through data
    max_shift = np.max(np.diff(raw._data[0]))
    shift_amp = 5 * max_shift
    raw._data[0, 0:30] = raw._data[0, 0:30] - (shift_amp)
    assert np.max(np.diff(raw._data[0])) > shift_amp
    # Ensure that applying the algorithm reduces the step change
    raw = temporal_derivative_distribution_repair(raw)
    assert np.max(np.diff(raw._data[0])) < shift_amp
예제 #2
0
def individual_analysis(bids_path):

    # Read data with annotations in BIDS format
    raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False)
    raw_intensity = get_long_channels(raw_intensity, min_dist=0.01)

    # Convert signal to optical density and determine bad channels
    raw_od = optical_density(raw_intensity)
    sci = scalp_coupling_index(raw_od, h_freq=1.35, h_trans_bandwidth=0.1)
    raw_od.info["bads"] = list(compress(raw_od.ch_names, sci < 0.5))
    raw_od.interpolate_bads()

    # Downsample and apply signal cleaning techniques
    raw_od.resample(0.8)
    raw_od = temporal_derivative_distribution_repair(raw_od)

    # Convert to haemoglobin and filter
    raw_haemo = beer_lambert_law(raw_od, ppf=0.1)
    raw_haemo = raw_haemo.filter(0.02,
                                 0.3,
                                 h_trans_bandwidth=0.1,
                                 l_trans_bandwidth=0.01,
                                 verbose=False)

    # Apply further data cleaning techniques and extract epochs
    raw_haemo = enhance_negative_correlation(raw_haemo)
    # Extract events but ignore those with
    # the word Ends (i.e. drop ExperimentEnds events)
    events, event_dict = events_from_annotations(raw_haemo,
                                                 verbose=False,
                                                 regexp='^(?![Ends]).*$')
    epochs = Epochs(raw_haemo,
                    events,
                    event_id=event_dict,
                    tmin=-5,
                    tmax=20,
                    reject=dict(hbo=200e-6),
                    reject_by_annotation=True,
                    proj=True,
                    baseline=(None, 0),
                    detrend=0,
                    preload=True,
                    verbose=False)

    return raw_haemo, epochs
예제 #3
0
corrupted_data[:, 450:750] = corrupted_data[:, 450:750] + 0.03
corrupted_od = mne.io.RawArray(corrupted_data,
                               raw_od.info,
                               first_samp=raw_od.first_samp)
new_annotations.append([95, 145, 245], [10, 10, 10],
                       ["Spike", "Baseline", "Baseline"])
corrupted_od.set_annotations(new_annotations)

corrupted_od.plot(n_channels=15, duration=400, show_scrollbars=False)

# %%
# Apply temporal derivative distribution repair
# ---------------------------------------------
#
# This approach corrects baseline shift and spike artifacts without the need
# for any user-supplied parameters :footcite:`FishburnEtAl2019`.

corrected_tddr = temporal_derivative_distribution_repair(corrupted_od)
corrected_tddr.plot(n_channels=15, duration=400, show_scrollbars=False)

# %%
# We can see in the data above that the introduced spikes and shifts are
# largely removed, but some residual smaller artifact remains.
# The same can be said for the artifacts in the original data.

# %%
# References
# ----------
#
# .. footbibliography::