Пример #1
0
def test_long_extraction():
    fnirs_data_folder = mne.datasets.fnirs_motor.data_path()
    fnirs_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1')
    raw_intensity = mne.io.read_raw_nirx(fnirs_raw_dir).load_data()

    long_chans = get_long_channels(raw_intensity)

    original_num_channels = len(raw_intensity.ch_names)
    assert original_num_channels == 56

    long_num_channels = len(long_chans.ch_names)
    assert long_num_channels == 56 - 16

    new_lens = source_detector_distances(long_chans.info)
    assert np.min(new_lens) >= 0.01

    # Now test for non standard short length
    long_chans = get_long_channels(raw_intensity, min_dist=0.022)

    long_num_channels = len(long_chans.ch_names)
    assert long_num_channels > 16  # There are 8 SDs in this set * hbo/hbr

    new_lens = source_detector_distances(long_chans.info)
    assert np.max(new_lens) >= 0.022

    # Check that we dont run on other types, eg eeg.
    raw_intensity.pick(picks=range(2))
    raw_intensity.set_channel_types({'S1_D1 760': 'eeg', 'S1_D1 850': 'eeg'},
                                    verbose='error')
    with pytest.raises(RuntimeError, match='NIRS signals only'):
        _ = get_long_channels(raw_intensity)
Пример #2
0
def individual_analysis(bids_path, ID):

    raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False)

    # Convert signal to haemoglobin and resample
    raw_od = optical_density(raw_intensity)
    raw_haemo = beer_lambert_law(raw_od)
    raw_haemo.resample(0.3)

    # Cut out just the short channels for creating a GLM repressor
    sht_chans = get_short_channels(raw_haemo)
    raw_haemo = get_long_channels(raw_haemo)

    # Create a design matrix
    design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=5.0)

    # Append short channels mean to design matrix
    design_matrix["ShortHbO"] = np.mean(sht_chans.copy().pick(picks="hbo").get_data(), axis=0)
    design_matrix["ShortHbR"] = np.mean(sht_chans.copy().pick(picks="hbr").get_data(), axis=0)

    # Run GLM
    glm_est = run_GLM(raw_haemo, design_matrix)

    # Define channels in each region of interest
    # List the channel pairs manually
    left = [[4, 3], [1, 3], [3, 3], [1, 2], [2, 3], [1, 1]]
    right = [[6, 7], [5, 7], [7, 7], [5, 6], [6, 7], [5, 5]]
    # Then generate the correct indices for each pair
    groups = dict(
        Left_Hemisphere=picks_pair_to_idx(raw_haemo, left, on_missing='ignore'),
        Right_Hemisphere=picks_pair_to_idx(raw_haemo, right, on_missing='ignore'))

    # Extract channel metrics
    cha = glm_to_tidy(raw_haemo, glm_est, design_matrix)
    cha["ID"] = ID  # Add the participant ID to the dataframe

    # Compute region of interest results from channel data
    roi = pd.DataFrame()
    for idx, col in enumerate(design_matrix.columns):
        roi = roi.append(glm_region_of_interest(glm_est, groups, idx, col))
    roi["ID"] = ID  # Add the participant ID to the dataframe

    # Contrast left vs right tapping
    contrast_matrix = np.eye(design_matrix.shape[1])
    basic_conts = dict([(column, contrast_matrix[i])
                        for i, column in enumerate(design_matrix.columns)])
    contrast_LvR = basic_conts['Tapping/Left'] - basic_conts['Tapping/Right']
    contrast = compute_contrast(glm_est, contrast_LvR)
    con = glm_to_tidy(raw_haemo, contrast, design_matrix)
    con["ID"] = ID  # Add the participant ID to the dataframe

    # Convert to uM for nicer plotting below.
    cha["theta"] = [t * 1.e6 for t in cha["theta"]]
    roi["theta"] = [t * 1.e6 for t in roi["theta"]]
    con["effect"] = [t * 1.e6 for t in con["effect"]]

    return raw_haemo, roi, cha, con
Пример #3
0
def test_GLM_system_test():
    fnirs_data_folder = mne.datasets.fnirs_motor.data_path()
    fnirs_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1')
    raw_intensity = mne.io.read_raw_nirx(fnirs_raw_dir).load_data()
    raw_intensity.resample(1.0)
    new_des = [des for des in raw_intensity.annotations.description]
    new_des = ['Control' if x == "1.0" else x for x in new_des]
    new_des = ['Tapping/Left' if x == "2.0" else x for x in new_des]
    new_des = ['Tapping/Right' if x == "3.0" else x for x in new_des]
    annot = mne.Annotations(raw_intensity.annotations.onset,
                            raw_intensity.annotations.duration, new_des)
    raw_intensity.set_annotations(annot)
    raw_intensity.annotations.crop(35, 2967)
    raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)
    raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od)
    short_chs = get_short_channels(raw_haemo)
    raw_haemo = get_long_channels(raw_haemo)
    design_matrix = make_first_level_design_matrix(raw_intensity,
                                                   hrf_model='spm',
                                                   stim_dur=5.0,
                                                   drift_order=3,
                                                   drift_model='polynomial')
    design_matrix["ShortHbO"] = np.mean(
        short_chs.copy().pick(picks="hbo").get_data(), axis=0)
    design_matrix["ShortHbR"] = np.mean(
        short_chs.copy().pick(picks="hbr").get_data(), axis=0)
    glm_est = run_GLM(raw_haemo, design_matrix)
    df = glm_to_tidy(raw_haemo, glm_est, design_matrix)
    df = _tidy_long_to_wide(df)
    a = (df.query('condition in ["Control"]').groupby(['condition', 'Chroma'
                                                       ]).agg(['mean']))
    # Make sure false positive rate is less than 5%
    assert a["Significant"].values[0] < 0.05
    assert a["Significant"].values[1] < 0.05
    a = (df.query('condition in ["Tapping/Left", "Tapping/Right"]').groupby(
        ['condition', 'Chroma']).agg(['mean']))
    # Fairly arbitrary cutoff here, but its more than 5%
    assert a["Significant"].values[0] > 0.7
    assert a["Significant"].values[1] > 0.7
    assert a["Significant"].values[2] > 0.7
    assert a["Significant"].values[3] > 0.7

    left = [[1, 1], [1, 2], [1, 3], [2, 1], [2, 3], [2, 4], [3, 2], [3, 3],
            [4, 3], [4, 4]]
    right = [[5, 5], [5, 6], [5, 7], [6, 5], [6, 7], [6, 8], [7, 6], [7, 7],
             [8, 7], [8, 8]]

    groups = dict(Left_ROI=picks_pair_to_idx(raw_haemo, left),
                  Right_ROI=picks_pair_to_idx(raw_haemo, right))

    df = pd.DataFrame()
    for idx, col in enumerate(design_matrix.columns[:3]):
        df = df.append(glm_region_of_interest(glm_est, groups, idx, col))

    assert df.shape == (12, 8)
Пример #4
0
def test_long_extraction():
    fnirs_data_folder = mne.datasets.fnirs_motor.data_path()
    fnirs_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1')
    raw_intensity = mne.io.read_raw_nirx(fnirs_raw_dir).load_data()

    long_chans = get_long_channels(raw_intensity)

    original_num_channels = len(raw_intensity.ch_names)
    assert original_num_channels == 56

    long_num_channels = len(long_chans.ch_names)
    assert long_num_channels == 56 - 16

    new_lens = source_detector_distances(long_chans.info)
    assert np.min(new_lens) >= 0.01

    # Now test for non standard short length
    long_chans = get_long_channels(raw_intensity, min_dist=0.022)

    long_num_channels = len(long_chans.ch_names)
    assert long_num_channels > 16  # There are 8 SDs in this set * hbo/hbr

    new_lens = source_detector_distances(long_chans.info)
    assert np.max(new_lens) >= 0.022
Пример #5
0
def test_short():
    raw_intensity = _load_dataset()

    with pytest.raises(RuntimeError, match="must be optical density"):
        _ = short_channel_regression(raw_intensity)

    raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)

    raw_od_corrected = short_channel_regression(raw_od)

    assert 'fnirs_od' in raw_od_corrected

    with pytest.raises(RuntimeError, match="long channels present"):
        short_channel_regression(get_short_channels(raw_od))
    with pytest.raises(RuntimeError, match="short channels present"):
        short_channel_regression(get_long_channels(raw_od))
Пример #6
0
def analysis(fname, ID):

    raw_intensity = read_raw_bids(bids_path=fname, verbose=False)
    # Delete annotation labeled 15, as these just signify the start and end of experiment.
    raw_intensity.annotations.delete(
        raw_intensity.annotations.description == '15.0')
    # sanitize event names
    raw_intensity.annotations.description[:] = [
        d.replace('/', '_') for d in raw_intensity.annotations.description
    ]

    # Convert signal to haemoglobin and just keep hbo
    raw_od = optical_density(raw_intensity)
    raw_haemo = beer_lambert_law(raw_od, ppf=0.1)
    raw_haemo.resample(0.5, npad="auto")

    # Cut out just the short channels for creating a GLM regressor
    short_chans = get_short_channels(raw_haemo)
    raw_haemo = get_long_channels(raw_haemo)

    # Create a design matrix
    design_matrix = make_first_level_design_matrix(raw_haemo,
                                                   hrf_model='fir',
                                                   stim_dur=1.0,
                                                   fir_delays=range(10),
                                                   drift_model='cosine',
                                                   high_pass=0.01,
                                                   oversampling=1)
    # Add short channels as regressor in GLM
    for chan in range(len(short_chans.ch_names)):
        design_matrix[f"short_{chan}"] = short_chans.get_data(chan).T

    # Run GLM
    glm_est = run_glm(raw_haemo, design_matrix)

    # Create a single ROI that includes all channels for example
    rois = dict(AllChannels=range(len(raw_haemo.ch_names)))
    # Calculate ROI for all conditions
    conditions = design_matrix.columns
    # Compute output metrics by ROI
    df_ind = glm_est.to_dataframe_region_of_interest(rois, conditions)

    df_ind["ID"] = ID
    df_ind["theta"] = [t * 1.e6 for t in df_ind["theta"]]

    return df_ind, raw_haemo, design_matrix
Пример #7
0
def individual_analysis(bids_path):

    # Read data with annotations in BIDS format
    raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False)
    raw_intensity = get_long_channels(raw_intensity, min_dist=0.01)

    # Convert signal to optical density and determine bad channels
    raw_od = optical_density(raw_intensity)
    sci = scalp_coupling_index(raw_od, h_freq=1.35, h_trans_bandwidth=0.1)
    raw_od.info["bads"] = list(compress(raw_od.ch_names, sci < 0.5))
    raw_od.interpolate_bads()

    # Downsample and apply signal cleaning techniques
    raw_od.resample(0.8)
    raw_od = temporal_derivative_distribution_repair(raw_od)

    # Convert to haemoglobin and filter
    raw_haemo = beer_lambert_law(raw_od, ppf=0.1)
    raw_haemo = raw_haemo.filter(0.02,
                                 0.3,
                                 h_trans_bandwidth=0.1,
                                 l_trans_bandwidth=0.01,
                                 verbose=False)

    # Apply further data cleaning techniques and extract epochs
    raw_haemo = enhance_negative_correlation(raw_haemo)
    # Extract events but ignore those with
    # the word Ends (i.e. drop ExperimentEnds events)
    events, event_dict = events_from_annotations(raw_haemo,
                                                 verbose=False,
                                                 regexp='^(?![Ends]).*$')
    epochs = Epochs(raw_haemo,
                    events,
                    event_id=event_dict,
                    tmin=-5,
                    tmax=20,
                    reject=dict(hbo=200e-6),
                    reject_by_annotation=True,
                    proj=True,
                    baseline=(None, 0),
                    detrend=0,
                    preload=True,
                    verbose=False)

    return raw_haemo, epochs
Пример #8
0
def individual_analysis(bids_path, ID):

    raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False)
    raw_intensity.annotations.delete(
        raw_intensity.annotations.description == '15.0')
    # sanitize event names
    raw_intensity.annotations.description[:] = [
        d.replace('/', '_') for d in raw_intensity.annotations.description
    ]

    # Convert signal to haemoglobin and resample
    raw_od = optical_density(raw_intensity)
    raw_haemo = beer_lambert_law(raw_od, ppf=0.1)
    raw_haemo.resample(0.3)

    # Cut out just the short channels for creating a GLM repressor
    sht_chans = get_short_channels(raw_haemo)
    raw_haemo = get_long_channels(raw_haemo)

    # Create a design matrix
    design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=5.0)

    # Append short channels mean to design matrix
    design_matrix["ShortHbO"] = np.mean(
        sht_chans.copy().pick(picks="hbo").get_data(), axis=0)
    design_matrix["ShortHbR"] = np.mean(
        sht_chans.copy().pick(picks="hbr").get_data(), axis=0)

    # Run GLM
    glm_est = run_glm(raw_haemo, design_matrix)

    # Extract channel metrics
    cha = glm_est.to_dataframe()

    # Add the participant ID to the dataframes
    cha["ID"] = ID

    # Convert to uM for nicer plotting below.
    cha["theta"] = [t * 1.e6 for t in cha["theta"]]

    return raw_haemo, cha
Пример #9
0
# As observed above, some channels have greater specificity to the desired
# brain region than other channels.
# Thus, when doing a region of interest analysis you may wish to give extra
# weight to channels with greater sensitivity to the desired ROI.
# This can be done by manually specifying the weights used in the region of
# interest function call.
# The details of the GLM analysis will not be described here, instead view the
# :ref:`fNIRS GLM tutorial <tut-fnirs-hrf>`. Instead, comments are provided
# for the weighted region of interest function call.

# Basic pipeline, simplified for example
raw_od = optical_density(raw)
raw_haemo = beer_lambert_law(raw_od)
raw_haemo.resample(0.3).pick("hbo")  # Speed increase for web server
sht_chans = get_short_channels(raw_haemo)
raw_haemo = get_long_channels(raw_haemo)
design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=13.0)
design_matrix["ShortHbO"] = np.mean(
    sht_chans.copy().pick(picks="hbo").get_data(), axis=0)
glm_est = run_glm(raw_haemo, design_matrix)

# First we create a dictionary for each region of interest.
# Here we include all channels in each ROI, as we will later be applying
# weights based on their specificity to the brain regions of interest.
rois = dict()
rois["Audio_weighted"] = range(len(glm_est.ch_names))
rois["Visual_weighted"] = range(len(glm_est.ch_names))

# Next we compute the specificity for each channel to the auditory and visual cortex.
spec_aud = fold_landmark_specificity(
    raw_haemo,
Пример #10
0
# %%
# Import and preprocess data
# --------------------------
#
# We read in the data and convert to haemoglobin concentration.

fnirs_data_folder = mne.datasets.fnirs_motor.data_path()
fnirs_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1')
raw = mne.io.read_raw_nirx(fnirs_raw_dir, verbose=True).load_data()

raw = optical_density(raw)
raw.resample(1.5)
raw = beer_lambert_law(raw, ppf=0.1)
raw = raw.pick(picks="hbo")
raw = get_long_channels(raw, min_dist=0.025, max_dist=0.045)
raw


# %%
# Process data with FOOOF
# -----------------------
#
# Next we estimate the power spectral density of the data and pass this to
# the FOOOF algorithm.
#
# I recommend using the FOOOF algorithm as provided by the authors rather
# than reimplementation or custom plotting etc. Their code is of excellent
# quality, well maintained, thoroughly documented, and they have considered
# many edge cases.
#
Пример #11
0
def individual_analysis(bids_path, ID):

    raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False)
    # Delete annotation labeled 15, as these just signify the start and end of experiment.
    raw_intensity.annotations.delete(raw_intensity.annotations.description == '15.0')
    # sanitize event names
    raw_intensity.annotations.description[:] = [
        d.replace('/', '_') for d in raw_intensity.annotations.description]

    # Convert signal to haemoglobin and resample
    raw_od = optical_density(raw_intensity)
    raw_haemo = beer_lambert_law(raw_od, ppf=0.1)
    raw_haemo.resample(0.3)

    # Cut out just the short channels for creating a GLM repressor
    sht_chans = get_short_channels(raw_haemo)
    raw_haemo = get_long_channels(raw_haemo)

    # Create a design matrix
    design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=5.0)

    # Append short channels mean to design matrix
    design_matrix["ShortHbO"] = np.mean(sht_chans.copy().pick(picks="hbo").get_data(), axis=0)
    design_matrix["ShortHbR"] = np.mean(sht_chans.copy().pick(picks="hbr").get_data(), axis=0)

    # Run GLM
    glm_est = run_glm(raw_haemo, design_matrix)

    # Define channels in each region of interest
    # List the channel pairs manually
    left = [[4, 3], [1, 3], [3, 3], [1, 2], [2, 3], [1, 1]]
    right = [[8, 7], [5, 7], [7, 7], [5, 6], [6, 7], [5, 5]]
    # Then generate the correct indices for each pair
    groups = dict(
        Left_Hemisphere=picks_pair_to_idx(raw_haemo, left, on_missing='ignore'),
        Right_Hemisphere=picks_pair_to_idx(raw_haemo, right, on_missing='ignore'))

    # Extract channel metrics
    cha = glm_est.to_dataframe()

    # Compute region of interest results from channel data
    roi = glm_est.to_dataframe_region_of_interest(groups,
                                                  design_matrix.columns,
                                                  demographic_info=True)

    # Define left vs right tapping contrast
    contrast_matrix = np.eye(design_matrix.shape[1])
    basic_conts = dict([(column, contrast_matrix[i])
                        for i, column in enumerate(design_matrix.columns)])
    contrast_LvR = basic_conts['Tapping_Left'] - basic_conts['Tapping_Right']

    # Compute defined contrast
    contrast = glm_est.compute_contrast(contrast_LvR)
    con = contrast.to_dataframe()

    # Add the participant ID to the dataframes
    roi["ID"] = cha["ID"] = con["ID"] = ID

    # Convert to uM for nicer plotting below.
    cha["theta"] = [t * 1.e6 for t in cha["theta"]]
    roi["theta"] = [t * 1.e6 for t in roi["theta"]]
    con["effect"] = [t * 1.e6 for t in con["effect"]]

    return raw_haemo, roi, cha, con