示例#1
0
def individual_analysis(bids_path, ID):

    raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False)

    # Convert signal to haemoglobin and resample
    raw_od = optical_density(raw_intensity)
    raw_haemo = beer_lambert_law(raw_od)
    raw_haemo.resample(0.3)

    # Cut out just the short channels for creating a GLM repressor
    sht_chans = get_short_channels(raw_haemo)
    raw_haemo = get_long_channels(raw_haemo)

    # Create a design matrix
    design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=5.0)

    # Append short channels mean to design matrix
    design_matrix["ShortHbO"] = np.mean(sht_chans.copy().pick(picks="hbo").get_data(), axis=0)
    design_matrix["ShortHbR"] = np.mean(sht_chans.copy().pick(picks="hbr").get_data(), axis=0)

    # Run GLM
    glm_est = run_GLM(raw_haemo, design_matrix)

    # Define channels in each region of interest
    # List the channel pairs manually
    left = [[4, 3], [1, 3], [3, 3], [1, 2], [2, 3], [1, 1]]
    right = [[6, 7], [5, 7], [7, 7], [5, 6], [6, 7], [5, 5]]
    # Then generate the correct indices for each pair
    groups = dict(
        Left_Hemisphere=picks_pair_to_idx(raw_haemo, left, on_missing='ignore'),
        Right_Hemisphere=picks_pair_to_idx(raw_haemo, right, on_missing='ignore'))

    # Extract channel metrics
    cha = glm_to_tidy(raw_haemo, glm_est, design_matrix)
    cha["ID"] = ID  # Add the participant ID to the dataframe

    # Compute region of interest results from channel data
    roi = pd.DataFrame()
    for idx, col in enumerate(design_matrix.columns):
        roi = roi.append(glm_region_of_interest(glm_est, groups, idx, col))
    roi["ID"] = ID  # Add the participant ID to the dataframe

    # Contrast left vs right tapping
    contrast_matrix = np.eye(design_matrix.shape[1])
    basic_conts = dict([(column, contrast_matrix[i])
                        for i, column in enumerate(design_matrix.columns)])
    contrast_LvR = basic_conts['Tapping/Left'] - basic_conts['Tapping/Right']
    contrast = compute_contrast(glm_est, contrast_LvR)
    con = glm_to_tidy(raw_haemo, contrast, design_matrix)
    con["ID"] = ID  # Add the participant ID to the dataframe

    # Convert to uM for nicer plotting below.
    cha["theta"] = [t * 1.e6 for t in cha["theta"]]
    roi["theta"] = [t * 1.e6 for t in roi["theta"]]
    con["effect"] = [t * 1.e6 for t in con["effect"]]

    return raw_haemo, roi, cha, con
示例#2
0
def test_GLM_system_test():
    fnirs_data_folder = mne.datasets.fnirs_motor.data_path()
    fnirs_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1')
    raw_intensity = mne.io.read_raw_nirx(fnirs_raw_dir).load_data()
    raw_intensity.resample(1.0)
    new_des = [des for des in raw_intensity.annotations.description]
    new_des = ['Control' if x == "1.0" else x for x in new_des]
    new_des = ['Tapping/Left' if x == "2.0" else x for x in new_des]
    new_des = ['Tapping/Right' if x == "3.0" else x for x in new_des]
    annot = mne.Annotations(raw_intensity.annotations.onset,
                            raw_intensity.annotations.duration, new_des)
    raw_intensity.set_annotations(annot)
    raw_intensity.annotations.crop(35, 2967)
    raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)
    raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od)
    short_chs = get_short_channels(raw_haemo)
    raw_haemo = get_long_channels(raw_haemo)
    design_matrix = make_first_level_design_matrix(raw_intensity,
                                                   hrf_model='spm',
                                                   stim_dur=5.0,
                                                   drift_order=3,
                                                   drift_model='polynomial')
    design_matrix["ShortHbO"] = np.mean(
        short_chs.copy().pick(picks="hbo").get_data(), axis=0)
    design_matrix["ShortHbR"] = np.mean(
        short_chs.copy().pick(picks="hbr").get_data(), axis=0)
    glm_est = run_GLM(raw_haemo, design_matrix)
    df = glm_to_tidy(raw_haemo, glm_est, design_matrix)
    df = _tidy_long_to_wide(df)
    a = (df.query('condition in ["Control"]').groupby(['condition', 'Chroma'
                                                       ]).agg(['mean']))
    # Make sure false positive rate is less than 5%
    assert a["Significant"].values[0] < 0.05
    assert a["Significant"].values[1] < 0.05
    a = (df.query('condition in ["Tapping/Left", "Tapping/Right"]').groupby(
        ['condition', 'Chroma']).agg(['mean']))
    # Fairly arbitrary cutoff here, but its more than 5%
    assert a["Significant"].values[0] > 0.7
    assert a["Significant"].values[1] > 0.7
    assert a["Significant"].values[2] > 0.7
    assert a["Significant"].values[3] > 0.7

    left = [[1, 1], [1, 2], [1, 3], [2, 1], [2, 3], [2, 4], [3, 2], [3, 3],
            [4, 3], [4, 4]]
    right = [[5, 5], [5, 6], [5, 7], [6, 5], [6, 7], [6, 8], [7, 6], [7, 7],
             [8, 7], [8, 8]]

    groups = dict(Left_ROI=picks_pair_to_idx(raw_haemo, left),
                  Right_ROI=picks_pair_to_idx(raw_haemo, right))

    df = pd.DataFrame()
    for idx, col in enumerate(design_matrix.columns[:3]):
        df = df.append(glm_region_of_interest(glm_est, groups, idx, col))

    assert df.shape == (12, 8)
示例#3
0
def test_roi_picks():
    fnirs_data_folder = mne.datasets.fnirs_motor.data_path()
    fnirs_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1')
    raw = mne.io.read_raw_nirx(fnirs_raw_dir).load_data()

    picks = picks_pair_to_idx(raw, [[1, 1], [1, 2], [5, 13], [8, 16]])

    assert raw.ch_names[picks[0]] == "S1_D1 760"
    assert raw.ch_names[picks[1]] == "S1_D1 850"

    assert raw.ch_names[picks[2]] == "S1_D2 760"
    assert raw.ch_names[picks[3]] == "S1_D2 850"

    assert raw.ch_names[picks[4]] == "S5_D13 760"
    assert raw.ch_names[picks[5]] == "S5_D13 850"

    assert raw.ch_names[picks[6]] == "S8_D16 760"
    assert raw.ch_names[picks[7]] == "S8_D16 850"

    # Test what happens when a pair that doesn't exist is requested (15-13)
    with pytest.raises(ValueError, match='No matching'):
        picks_pair_to_idx(raw, [[1, 1], [1, 2], [15, 13], [8, 16]])

    with pytest.warns(RuntimeWarning, match='No matching channels'):
        picks = picks_pair_to_idx(raw, [[1, 1], [1, 2], [15, 13], [8, 16]],
                                  on_missing='warning')
    assert len(picks) == 6  # Missing should be ignored

    picks = picks_pair_to_idx(raw, [[1, 1], [1, 2], [15, 13], [8, 16]],
                              on_missing='ignore')
    assert len(picks) == 6

    # Test usage for ROI downstream functions
    group_by = dict(Left_ROI=picks_pair_to_idx(raw, [[1, 1], [1, 2], [5, 13]]),
                    Right_ROI=picks_pair_to_idx(raw, [[3, 3], [3, 11]]))
    assert group_by['Left_ROI'] == [0, 1, 2, 3, 34, 35]
    assert group_by['Right_ROI'] == [18, 19, 20, 21]

    # Ensure we dont match [1, 1] to S1_D11
    # Check easy condition
    picks = picks_pair_to_idx(raw, [[1, 1]])
    assert picks == [0, 1]
    # Force in tricky situation
    raw.info["ch_names"][2] = 'S1_D11 760'
    raw.info["ch_names"][3] = 'S1_D11 850'
    picks = picks_pair_to_idx(raw, [[1, 1]])
    assert picks == [0, 1]

    picks = picks_pair_to_idx(raw, [[21, 91], [91, 2]], on_missing='ignore')
    assert picks == []
示例#4
0
def test_roi_picks():
    fnirs_data_folder = mne.datasets.fnirs_motor.data_path()
    fnirs_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1')
    raw = mne.io.read_raw_nirx(fnirs_raw_dir).load_data()

    picks = picks_pair_to_idx(raw, [[1, 1], [1, 2], [5, 13], [8, 16]])

    assert raw.ch_names[picks[0]] == "S1_D1 760"
    assert raw.ch_names[picks[1]] == "S1_D1 850"

    assert raw.ch_names[picks[2]] == "S1_D2 760"
    assert raw.ch_names[picks[3]] == "S1_D2 850"

    assert raw.ch_names[picks[4]] == "S5_D13 760"
    assert raw.ch_names[picks[5]] == "S5_D13 850"

    assert raw.ch_names[picks[6]] == "S8_D16 760"
    assert raw.ch_names[picks[7]] == "S8_D16 850"

    with pytest.raises(ValueError, match='No matching'):
        picks_pair_to_idx(raw, [[1, 1], [1, 2], [15, 13], [8, 16]])

    picks_pair_to_idx(raw, [[1, 1], [1, 2], [15, 13], [8, 16]],
                      on_missing='warning')

    picks = picks_pair_to_idx(raw, [[1, 1], [1, 2], [15, 13], [8, 16]],
                              on_missing='ignore')

    assert len(picks) == 6

    # Test usage for ROI downstream functions
    group_by = dict(Left_ROI=picks_pair_to_idx(raw, [[1, 1], [1, 2],
                                                     [5, 13]]),
                    Right_ROI=picks_pair_to_idx(raw, [[3, 3], [3, 11],
                                                      [6, 8]]))
    assert group_by['Left_ROI'] == [0, 1, 2, 3, 34, 35]
    assert group_by['Right_ROI'] == [18, 19, 20, 21, 40, 41]
示例#5
0
        'With Short Regression'
]):
    axes[column].set_title('{}'.format(condition))

###############################################################################
# Plot hemisphere for each approach
# ---------------------------------
#
# Plot the epoch image for each approach. First we specify the source
# detector pairs for analysis.

left = [[1, 3], [2, 3], [1, 2], [4, 3]]
right = [[5, 7], [6, 7], [5, 6], [8, 7]]

groups = dict(Left_ROI=picks_pair_to_idx(raw_anti.pick(picks='hbo'),
                                         left,
                                         on_missing='warning'),
              Right_ROI=picks_pair_to_idx(raw_anti.pick(picks='hbo'),
                                          right,
                                          on_missing='warning'))

evoked_dict = {
    'Left/HbO': epochs['Tapping/Left'].average(picks='hbo'),
    'Left/HbR': epochs['Tapping/Left'].average(picks='hbr'),
    'Right/HbO': epochs['Tapping/Right'].average(picks='hbo'),
    'Right/HbR': epochs['Tapping/Right'].average(picks='hbr')
}
for condition in evoked_dict:
    evoked_dict[condition].rename_channels(lambda x: x[:-4])

evoked_dict_anti = {
示例#6
0
    axes[column].set_title('{}'.format(condition))


###############################################################################
# Plot trials for each approach
# -----------------------------
#
# Plot the epoch image for each approach. First we specify the source
# detector pairs for analysis.

left = [[1, 1], [1, 2], [1, 3], [2, 1], [2, 3],
        [2, 4], [3, 2], [3, 3], [4, 3], [4, 4]]
right = [[5, 5], [5, 6], [5, 7], [6, 5], [6, 7],
         [6, 8], [7, 6], [7, 7], [8, 7], [8, 8]]

groups = dict(Left_ROI=picks_pair_to_idx(raw_anti.pick(picks='hbo'), left),
              Right_ROI=picks_pair_to_idx(raw_anti.pick(picks='hbo'), right))


###############################################################################
# First we plot the epochs for the unprocessed data.

epochs['Tapping'].pick(picks='hbo').plot_image(combine='mean',
                                                    group_by=groups)


###############################################################################
# New we plot the epochs for the data processed with the Cui anti correlation
# method.

epochs_anti['Tapping'].pick(picks='hbo').plot_image(combine='mean',
# pairs of interest and then determining which channels these correspond to
# within the raw data structure. The channel indices are stored in a
# dictionary for access below.
# The fOLD toolbox can be used to assist in the design of ROIs.
# And consideration should be paid to ensure optimal size ROIs are selected.
#
# In this example, two ROIs are generated. One for the left motor cortex
# and one for the right motor cortex. These are called `Left_Hemisphere` and
# `Right_Hemisphere` and are stored in the `rois` dictionary.

# Specify channel pairs for each ROI
left = [[4, 3], [1, 3], [3, 3], [1, 2], [2, 3], [1, 1]]
right = [[8, 7], [5, 7], [7, 7], [5, 6], [6, 7], [5, 5]]

# Then generate the correct indices for each pair and store in dictionary
rois = dict(Left_Hemisphere=picks_pair_to_idx(raw_haemo, left),
            Right_Hemisphere=picks_pair_to_idx(raw_haemo, right))

pprint(rois)

# %%
# Create average waveform per ROI
# -------------------------------
#
# Next, an average waveform is generated per condition per region of interest.
# This allows the researcher to view the responses elicited in different
# regions of the brain for each condition.

# Specify the figure size and limits per chromophore.
fig, axes = plt.subplots(nrows=len(rois),
                         ncols=len(all_evokeds),
示例#8
0
axes[1].set_title("Hemispheres plotted independently")


###############################################################################
# Analyse regions of interest
# ---------------------------
#
# Or alternatively we can summarise the responses across regions of interest
# for each condition. And you can plot it with your favorite software.

left = [[1, 1], [1, 2], [1, 3], [2, 1], [2, 3],
        [2, 4], [3, 2], [3, 3], [4, 3], [4, 4]]
right = [[5, 5], [5, 6], [5, 7], [6, 5], [6, 7],
         [6, 8], [7, 6], [7, 7], [8, 7], [8, 8]]

groups = dict(Left_ROI=picks_pair_to_idx(raw_haemo, left),
              Right_ROI=picks_pair_to_idx(raw_haemo, right))

df = pd.DataFrame()
for idx, col in enumerate(design_matrix.columns[:3]):
    df = df.append(glm_region_of_interest(glm_est, groups, idx, col))


###############################################################################
#
# Compute contrasts
# -----------------
#
# We can also define a contrast as described in
# `Nilearn docs <http://nilearn.github.io/auto_examples/04_glm_first_level/plot_localizer_surface_analysis.html>`_
# and plot it.
示例#9
0
def individual_analysis(bids_path, ID):

    raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False)
    # Delete annotation labeled 15, as these just signify the start and end of experiment.
    raw_intensity.annotations.delete(raw_intensity.annotations.description == '15.0')
    # sanitize event names
    raw_intensity.annotations.description[:] = [
        d.replace('/', '_') for d in raw_intensity.annotations.description]

    # Convert signal to haemoglobin and resample
    raw_od = optical_density(raw_intensity)
    raw_haemo = beer_lambert_law(raw_od, ppf=0.1)
    raw_haemo.resample(0.3)

    # Cut out just the short channels for creating a GLM repressor
    sht_chans = get_short_channels(raw_haemo)
    raw_haemo = get_long_channels(raw_haemo)

    # Create a design matrix
    design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=5.0)

    # Append short channels mean to design matrix
    design_matrix["ShortHbO"] = np.mean(sht_chans.copy().pick(picks="hbo").get_data(), axis=0)
    design_matrix["ShortHbR"] = np.mean(sht_chans.copy().pick(picks="hbr").get_data(), axis=0)

    # Run GLM
    glm_est = run_glm(raw_haemo, design_matrix)

    # Define channels in each region of interest
    # List the channel pairs manually
    left = [[4, 3], [1, 3], [3, 3], [1, 2], [2, 3], [1, 1]]
    right = [[8, 7], [5, 7], [7, 7], [5, 6], [6, 7], [5, 5]]
    # Then generate the correct indices for each pair
    groups = dict(
        Left_Hemisphere=picks_pair_to_idx(raw_haemo, left, on_missing='ignore'),
        Right_Hemisphere=picks_pair_to_idx(raw_haemo, right, on_missing='ignore'))

    # Extract channel metrics
    cha = glm_est.to_dataframe()

    # Compute region of interest results from channel data
    roi = glm_est.to_dataframe_region_of_interest(groups,
                                                  design_matrix.columns,
                                                  demographic_info=True)

    # Define left vs right tapping contrast
    contrast_matrix = np.eye(design_matrix.shape[1])
    basic_conts = dict([(column, contrast_matrix[i])
                        for i, column in enumerate(design_matrix.columns)])
    contrast_LvR = basic_conts['Tapping_Left'] - basic_conts['Tapping_Right']

    # Compute defined contrast
    contrast = glm_est.compute_contrast(contrast_LvR)
    con = contrast.to_dataframe()

    # Add the participant ID to the dataframes
    roi["ID"] = cha["ID"] = con["ID"] = ID

    # Convert to uM for nicer plotting below.
    cha["theta"] = [t * 1.e6 for t in cha["theta"]]
    roi["theta"] = [t * 1.e6 for t in roi["theta"]]
    con["effect"] = [t * 1.e6 for t in con["effect"]]

    return raw_haemo, roi, cha, con