def test_run_plot_GLM_topo(): raw_intensity = _load_dataset() raw_intensity.crop(450, 600) # Keep the test fast design_matrix = make_first_level_design_matrix(raw_intensity, drift_order=1, drift_model='polynomial') raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od, ppf=0.1) glm_estimates = run_glm(raw_haemo, design_matrix) fig = plot_glm_topo(raw_haemo, glm_estimates.data, design_matrix) # 5 conditions (A,B,C,Drift,Constant) * two chroma + 2xcolorbar assert len(fig.axes) == 12 # Two conditions * two chroma + 2 x colorbar fig = plot_glm_topo(raw_haemo, glm_estimates.data, design_matrix, requested_conditions=['A', 'B']) assert len(fig.axes) == 6 # Two conditions * one chroma + 1 x colorbar with pytest.warns(RuntimeWarning, match='Reducing GLM results'): fig = plot_glm_topo(raw_haemo.copy().pick(picks="hbo"), glm_estimates.data, design_matrix, requested_conditions=['A', 'B']) assert len(fig.axes) == 3 # One conditions * two chroma + 2 x colorbar fig = plot_glm_topo(raw_haemo, glm_estimates.data, design_matrix, requested_conditions=['A']) assert len(fig.axes) == 4 # One conditions * one chroma + 1 x colorbar with pytest.warns(RuntimeWarning, match='Reducing GLM results'): fig = plot_glm_topo(raw_haemo.copy().pick(picks="hbo"), glm_estimates.data, design_matrix, requested_conditions=['A']) assert len(fig.axes) == 2 # One conditions * one chroma + 0 x colorbar with pytest.warns(RuntimeWarning, match='Reducing GLM results'): fig = plot_glm_topo(raw_haemo.copy().pick(picks="hbo"), glm_estimates.data, design_matrix, colorbar=False, requested_conditions=['A']) assert len(fig.axes) == 1 # Ensure warning thrown if glm estimates is missing channels from raw glm_estimates_subset = { a: glm_estimates.data[a] for a in raw_haemo.ch_names[0:3] } with pytest.raises(RuntimeError, match="does not match regression"): plot_glm_topo(raw_haemo, glm_estimates_subset, design_matrix)
def individual_analysis(bids_path): raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False) raw_intensity.pick(picks=range(20)).crop(200).resample(0.3) # Reduce load raw_haemo = beer_lambert_law(optical_density(raw_intensity), ppf=0.1) design_matrix = make_first_level_design_matrix(raw_haemo) glm_est = run_glm(raw_haemo, design_matrix) return glm_est
def individual_analysis(bids_path, ID): raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False) # Convert signal to haemoglobin and resample raw_od = optical_density(raw_intensity) raw_haemo = beer_lambert_law(raw_od) raw_haemo.resample(0.3) # Cut out just the short channels for creating a GLM repressor sht_chans = get_short_channels(raw_haemo) raw_haemo = get_long_channels(raw_haemo) # Create a design matrix design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=5.0) # Append short channels mean to design matrix design_matrix["ShortHbO"] = np.mean(sht_chans.copy().pick(picks="hbo").get_data(), axis=0) design_matrix["ShortHbR"] = np.mean(sht_chans.copy().pick(picks="hbr").get_data(), axis=0) # Run GLM glm_est = run_GLM(raw_haemo, design_matrix) # Define channels in each region of interest # List the channel pairs manually left = [[4, 3], [1, 3], [3, 3], [1, 2], [2, 3], [1, 1]] right = [[6, 7], [5, 7], [7, 7], [5, 6], [6, 7], [5, 5]] # Then generate the correct indices for each pair groups = dict( Left_Hemisphere=picks_pair_to_idx(raw_haemo, left, on_missing='ignore'), Right_Hemisphere=picks_pair_to_idx(raw_haemo, right, on_missing='ignore')) # Extract channel metrics cha = glm_to_tidy(raw_haemo, glm_est, design_matrix) cha["ID"] = ID # Add the participant ID to the dataframe # Compute region of interest results from channel data roi = pd.DataFrame() for idx, col in enumerate(design_matrix.columns): roi = roi.append(glm_region_of_interest(glm_est, groups, idx, col)) roi["ID"] = ID # Add the participant ID to the dataframe # Contrast left vs right tapping contrast_matrix = np.eye(design_matrix.shape[1]) basic_conts = dict([(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]) contrast_LvR = basic_conts['Tapping/Left'] - basic_conts['Tapping/Right'] contrast = compute_contrast(glm_est, contrast_LvR) con = glm_to_tidy(raw_haemo, contrast, design_matrix) con["ID"] = ID # Add the participant ID to the dataframe # Convert to uM for nicer plotting below. cha["theta"] = [t * 1.e6 for t in cha["theta"]] roi["theta"] = [t * 1.e6 for t in roi["theta"]] con["effect"] = [t * 1.e6 for t in con["effect"]] return raw_haemo, roi, cha, con
def test_GLM_system_test(): fnirs_data_folder = mne.datasets.fnirs_motor.data_path() fnirs_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1') raw_intensity = mne.io.read_raw_nirx(fnirs_raw_dir).load_data() raw_intensity.resample(1.0) new_des = [des for des in raw_intensity.annotations.description] new_des = ['Control' if x == "1.0" else x for x in new_des] new_des = ['Tapping/Left' if x == "2.0" else x for x in new_des] new_des = ['Tapping/Right' if x == "3.0" else x for x in new_des] annot = mne.Annotations(raw_intensity.annotations.onset, raw_intensity.annotations.duration, new_des) raw_intensity.set_annotations(annot) raw_intensity.annotations.crop(35, 2967) raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) short_chs = get_short_channels(raw_haemo) raw_haemo = get_long_channels(raw_haemo) design_matrix = make_first_level_design_matrix(raw_intensity, hrf_model='spm', stim_dur=5.0, drift_order=3, drift_model='polynomial') design_matrix["ShortHbO"] = np.mean( short_chs.copy().pick(picks="hbo").get_data(), axis=0) design_matrix["ShortHbR"] = np.mean( short_chs.copy().pick(picks="hbr").get_data(), axis=0) glm_est = run_GLM(raw_haemo, design_matrix) df = glm_to_tidy(raw_haemo, glm_est, design_matrix) df = _tidy_long_to_wide(df) a = (df.query('condition in ["Control"]').groupby(['condition', 'Chroma' ]).agg(['mean'])) # Make sure false positive rate is less than 5% assert a["Significant"].values[0] < 0.05 assert a["Significant"].values[1] < 0.05 a = (df.query('condition in ["Tapping/Left", "Tapping/Right"]').groupby( ['condition', 'Chroma']).agg(['mean'])) # Fairly arbitrary cutoff here, but its more than 5% assert a["Significant"].values[0] > 0.7 assert a["Significant"].values[1] > 0.7 assert a["Significant"].values[2] > 0.7 assert a["Significant"].values[3] > 0.7 left = [[1, 1], [1, 2], [1, 3], [2, 1], [2, 3], [2, 4], [3, 2], [3, 3], [4, 3], [4, 4]] right = [[5, 5], [5, 6], [5, 7], [6, 5], [6, 7], [6, 8], [7, 6], [7, 7], [8, 7], [8, 8]] groups = dict(Left_ROI=picks_pair_to_idx(raw_haemo, left), Right_ROI=picks_pair_to_idx(raw_haemo, right)) df = pd.DataFrame() for idx, col in enumerate(design_matrix.columns[:3]): df = df.append(glm_region_of_interest(glm_est, groups, idx, col)) assert df.shape == (12, 8)
def test_run_GLM_order(): raw = simulate_nirs_raw(sig_dur=200, stim_dur=5., sfreq=3) design_matrix = make_first_level_design_matrix(raw, stim_dur=5., drift_order=1, drift_model='polynomial') # Default should be first order AR glm_estimates = run_glm(raw, design_matrix) assert glm_estimates.pick("Simulated").model()[0].order == 1 # Default should be first order AR glm_estimates = run_glm(raw, design_matrix, noise_model='ar2') assert glm_estimates.pick("Simulated").model()[0].order == 2 glm_estimates = run_glm(raw, design_matrix, noise_model='ar7') assert glm_estimates.pick("Simulated").model()[0].order == 7 # Auto should be 4 times sample rate cov = Covariance(np.ones(1) * 1e-11, raw.ch_names, raw.info['bads'], raw.info['projs'], nfree=0) raw = add_noise(raw, cov, iir_filter=iir_filter) glm_estimates = run_glm(raw, design_matrix, noise_model='auto') assert glm_estimates.pick("Simulated").model()[0].order == 3 * 4 raw = simulate_nirs_raw(sig_dur=10, stim_dur=5., sfreq=2) cov = Covariance(np.ones(1) * 1e-11, raw.ch_names, raw.info['bads'], raw.info['projs'], nfree=0) raw = add_noise(raw, cov, iir_filter=iir_filter) design_matrix = make_first_level_design_matrix(raw, stim_dur=5., drift_order=1, drift_model='polynomial') # Auto should be 4 times sample rate glm_estimates = run_glm(raw, design_matrix, noise_model='auto') assert glm_estimates.pick("Simulated").model()[0].order == 2 * 4
def test_create_design(): raw_intensity = _load_dataset() raw_intensity.crop(450, 600) # Keep the test fast design_matrix = make_first_level_design_matrix(raw_intensity, drift_order=1, drift_model='polynomial') assert design_matrix.shape[0] == raw_intensity._data.shape[1] # Number of columns is number of conditions plus the drift plus constant assert design_matrix.shape[1] ==\ len(np.unique(raw_intensity.annotations.description)) + 2
def individual_analysis(bids_path): raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False) # Delete annotation labeled 15, as these just signify the start and end of experiment. raw_intensity.annotations.delete( raw_intensity.annotations.description == '15.0') raw_intensity.pick(picks=range(20)).crop(200).resample(0.3) # Reduce load raw_haemo = beer_lambert_law(optical_density(raw_intensity), ppf=0.1) design_matrix = make_first_level_design_matrix(raw_haemo) glm_est = run_glm(raw_haemo, design_matrix) return glm_est
def test_io(): num_chans = 6 fnirs_data_folder = mne.datasets.fnirs_motor.data_path() fnirs_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1') raw_intensity = mne.io.read_raw_nirx(fnirs_raw_dir).load_data() raw_intensity.resample(0.2) raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) raw_haemo = mne_nirs.channels.get_long_channels(raw_haemo) raw_haemo.pick(picks=range(num_chans)) design_matrix = make_first_level_design_matrix(raw_intensity, hrf_model='spm', stim_dur=5.0, drift_order=3, drift_model='polynomial') glm_est = run_GLM(raw_haemo, design_matrix) df = glm_to_tidy(raw_haemo, glm_est, design_matrix) df = _tidy_long_to_wide(df) assert df.shape == (48, 11) assert set(df.columns) == { 'ch_name', 'condition', 'df', 'mse', 'p_value', 't', 'theta', 'Source', 'Detector', 'Chroma', 'Significant' } num_conds = 8 # triggers (1, 2, 3, 15) + 3 drifts + constant assert df.shape[0] == num_chans * num_conds contrast_matrix = np.eye(design_matrix.shape[1]) basic_conts = dict([(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]) contrast_LvR = basic_conts['2.0'] - basic_conts['3.0'] contrast = mne_nirs.statistics.compute_contrast(glm_est, contrast_LvR) df = glm_to_tidy(raw_haemo, contrast, design_matrix) df = _tidy_long_to_wide(df) assert df.shape == (6, 10) assert set(df.columns) == { 'ch_name', 'ContrastType', 'z_score', 'stat', 'p_value', 'effect', 'Source', 'Detector', 'Chroma', 'Significant' } contrast = mne_nirs.statistics.compute_contrast(glm_est, contrast_LvR, contrast_type='F') df = glm_to_tidy(raw_haemo, contrast, design_matrix) df = _tidy_long_to_wide(df) assert df.shape == (6, 10) assert set(df.columns) == { 'ch_name', 'ContrastType', 'z_score', 'stat', 'p_value', 'effect', 'Source', 'Detector', 'Chroma', 'Significant' }
def _get_glm_contrast_result(tmin=60, tmax=400): raw = _get_minimal_haemo_data(tmin=tmin, tmax=tmax) design_matrix = make_first_level_design_matrix(raw, stim_dur=5., drift_order=1, drift_model='polynomial') glm_est = run_glm(raw, design_matrix) contrast_matrix = np.eye(design_matrix.shape[1]) basic_conts = dict([(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]) assert 'e1p' in basic_conts, sorted(basic_conts) contrast_LvR = basic_conts['e1p'] - basic_conts['e2p'] return glm_est.compute_contrast(contrast_LvR)
def test_run_plot_GLM_contrast_topo(): raw_intensity = _load_dataset() raw_intensity.crop(450, 600) # Keep the test fast design_matrix = make_first_level_design_matrix(raw_intensity, drift_order=1, drift_model='polynomial') raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) glm_est = run_GLM(raw_haemo, design_matrix) contrast_matrix = np.eye(design_matrix.shape[1]) basic_conts = dict([(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]) contrast_LvR = basic_conts['A'] - basic_conts['B'] contrast = mne_nirs.statistics.compute_contrast(glm_est, contrast_LvR) fig = mne_nirs.visualisation.plot_glm_contrast_topo(raw_haemo, contrast) assert len(fig.axes) == 3
def test_simulate_NIRS(): raw = simulate_nirs_raw(sfreq=3., amplitude=1., sig_dur=300., stim_dur=5., isi_min=15., isi_max=45.) assert 'hbo' in raw assert raw.info['sfreq'] == 3. assert raw.get_data().shape == (1, 900) assert np.max(raw.get_data()) < 1.2 * 1.e-6 assert raw.annotations.description[0] == 'A' assert raw.annotations.duration[0] == 5 assert np.min(np.diff(raw.annotations.onset)) > 15. + 5. assert np.max(np.diff(raw.annotations.onset)) < 45. + 5. with pytest.raises(AssertionError, match='Same number of'): raw = simulate_nirs_raw(sfreq=3., amplitude=[1., 2.], sig_dur=300., stim_dur=5., isi_min=15., isi_max=45.) raw = simulate_nirs_raw(sfreq=3., amplitude=[0., 2., 4.], annot_desc=['Control', 'Cond_A', 'Cond_B'], stim_dur=[5, 5, 5], sig_dur=900., isi_min=15., isi_max=45.) design_matrix = make_first_level_design_matrix(raw, stim_dur=5.0, drift_order=1, drift_model='polynomial') glm_est = run_GLM(raw, design_matrix) df = glm_to_tidy(raw, glm_est, design_matrix) df = _tidy_long_to_wide(df) assert df.query("condition in ['Control']")['theta'].values[0] == \ pytest.approx(0) assert df.query("condition in ['Cond_A']")['theta'].values[0] == \ pytest.approx(2e-6) assert df.query("condition in ['Cond_B']")['theta'].values[0] == \ pytest.approx(4e-6)
def test_run_GLM(): raw = simulate_nirs_raw(sig_dur=200, stim_dur=5.) design_matrix = make_first_level_design_matrix(raw, stim_dur=5., drift_order=1, drift_model='polynomial') glm_estimates = run_GLM(raw, design_matrix) assert len(glm_estimates) == len(raw.ch_names) # Check the estimate is correct within 10% error assert abs(glm_estimates["Simulated"].theta[0] - 1.e-6) < 0.1e-6 # ensure we return the same type as nilearn to encourage compatibility _, ni_est = nilearn.glm.first_level.run_glm( raw.get_data(0).T, design_matrix.values) assert type(ni_est) == type(glm_estimates)
def analysis(fname, ID): raw_intensity = read_raw_bids(bids_path=fname, verbose=False) # Delete annotation labeled 15, as these just signify the start and end of experiment. raw_intensity.annotations.delete( raw_intensity.annotations.description == '15.0') # sanitize event names raw_intensity.annotations.description[:] = [ d.replace('/', '_') for d in raw_intensity.annotations.description ] # Convert signal to haemoglobin and just keep hbo raw_od = optical_density(raw_intensity) raw_haemo = beer_lambert_law(raw_od, ppf=0.1) raw_haemo.resample(0.5, npad="auto") # Cut out just the short channels for creating a GLM regressor short_chans = get_short_channels(raw_haemo) raw_haemo = get_long_channels(raw_haemo) # Create a design matrix design_matrix = make_first_level_design_matrix(raw_haemo, hrf_model='fir', stim_dur=1.0, fir_delays=range(10), drift_model='cosine', high_pass=0.01, oversampling=1) # Add short channels as regressor in GLM for chan in range(len(short_chans.ch_names)): design_matrix[f"short_{chan}"] = short_chans.get_data(chan).T # Run GLM glm_est = run_glm(raw_haemo, design_matrix) # Create a single ROI that includes all channels for example rois = dict(AllChannels=range(len(raw_haemo.ch_names))) # Calculate ROI for all conditions conditions = design_matrix.columns # Compute output metrics by ROI df_ind = glm_est.to_dataframe_region_of_interest(rois, conditions) df_ind["ID"] = ID df_ind["theta"] = [t * 1.e6 for t in df_ind["theta"]] return df_ind, raw_haemo, design_matrix
def test_run_plot_GLM_topo(): raw_intensity = _load_dataset() raw_intensity.crop(450, 600) # Keep the test fast design_matrix = make_first_level_design_matrix(raw_intensity, drift_order=1, drift_model='polynomial') raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od) glm_estimates = run_GLM(raw_haemo, design_matrix) fig = plot_glm_topo(raw_haemo, glm_estimates, design_matrix) # 5 conditions (A,B,C,Drift,Constant) * two chroma + 2xcolorbar assert len(fig.axes) == 12 fig = plot_glm_topo(raw_haemo, glm_estimates, design_matrix, requested_conditions=['A', 'B']) # Two conditions * two chroma + 2xcolorbar assert len(fig.axes) == 6
def individual_analysis(bids_path, ID): raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False) raw_intensity.annotations.delete( raw_intensity.annotations.description == '15.0') # sanitize event names raw_intensity.annotations.description[:] = [ d.replace('/', '_') for d in raw_intensity.annotations.description ] # Convert signal to haemoglobin and resample raw_od = optical_density(raw_intensity) raw_haemo = beer_lambert_law(raw_od, ppf=0.1) raw_haemo.resample(0.3) # Cut out just the short channels for creating a GLM repressor sht_chans = get_short_channels(raw_haemo) raw_haemo = get_long_channels(raw_haemo) # Create a design matrix design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=5.0) # Append short channels mean to design matrix design_matrix["ShortHbO"] = np.mean( sht_chans.copy().pick(picks="hbo").get_data(), axis=0) design_matrix["ShortHbR"] = np.mean( sht_chans.copy().pick(picks="hbr").get_data(), axis=0) # Run GLM glm_est = run_glm(raw_haemo, design_matrix) # Extract channel metrics cha = glm_est.to_dataframe() # Add the participant ID to the dataframes cha["ID"] = ID # Convert to uM for nicer plotting below. cha["theta"] = [t * 1.e6 for t in cha["theta"]] return raw_haemo, cha
def test_cropped_raw(): # Ensure timing is correct for cropped signals raw = simulate_nirs_raw(sfreq=1., amplitude=1., sig_dur=300., stim_dur=1., isi_min=20., isi_max=40.) onsets = raw.annotations.onset onsets_after_crop = [onsets[idx] for idx in np.where(onsets > 100)] raw.crop(tmin=100) design_matrix = make_first_level_design_matrix(raw, drift_order=0, drift_model='polynomial') # 100 corrects for the crop time above # 4 is peak time after onset new_idx = np.round(onsets_after_crop[0][0]) - 100 + 4 assert design_matrix["A"][new_idx] > 0.1
def test_run_plot_GLM_contrast_topo_single_chroma(): raw_intensity = _load_dataset() raw_intensity.crop(450, 600) # Keep the test fast design_matrix = make_first_level_design_matrix(raw_intensity, drift_order=1, drift_model='polynomial') raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od, ppf=0.1) raw_haemo = raw_haemo.pick(picks='hbo') glm_est = run_glm(raw_haemo, design_matrix) contrast_matrix = np.eye(design_matrix.shape[1]) basic_conts = dict([(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]) contrast_LvR = basic_conts['A'] - basic_conts['B'] with pytest.deprecated_call(match='comprehensive GLM'): contrast = mne_nirs.statistics.compute_contrast( glm_est.data, contrast_LvR) with pytest.deprecated_call(match='comprehensive GLM'): fig = mne_nirs.visualisation.plot_glm_contrast_topo( raw_haemo, contrast) assert len(fig.axes) == 2
def test_simulate_NIRS_multi_channel(): raw = simulate_nirs_raw(sfreq=3., amplitude=[0., 2., 4.], annot_desc=['Control', 'Cond_A', 'Cond_B'], stim_dur=[5, 5, 5], sig_dur=1500., isi_min=5., isi_max=15., hrf_model='spm') design_matrix = make_first_level_design_matrix(raw, stim_dur=5.0, drift_order=0, drift_model='polynomial') assert len(design_matrix['Control']) == 1500 * 3 assert len(design_matrix['Cond_A']) == 1500 * 3 # Make sure no extra channels. Specifically the default isn't present. with pytest.raises(KeyError, match='A'): len(design_matrix['A'])
def test_statsmodel_to_df(func): func = getattr(smf, func) np.random.seed(0) amplitude = 1.432 df_cha = pd.DataFrame() for n in range(5): raw = simulate_nirs_raw(sfreq=3., amplitude=amplitude, sig_dur=300., stim_dur=5., isi_min=15., isi_max=45.) raw._data += np.random.normal(0, np.sqrt(1e-12), raw._data.shape) design_matrix = make_first_level_design_matrix(raw, stim_dur=5.0) glm_est = run_glm(raw, design_matrix) with pytest.warns(RuntimeWarning, match='Non standard source detect'): cha = glm_est.to_dataframe() cha["ID"] = '%02d' % n df_cha = pd.concat([df_cha, cha], ignore_index=True) df_cha["theta"] = df_cha["theta"] * 1.0e6 roi_model = func("theta ~ -1 + Condition", df_cha, groups=df_cha["ID"]).fit() df = statsmodels_to_results(roi_model) assert type(df) == pd.DataFrame assert_allclose(df["Coef."]["Condition[A]"], amplitude, rtol=0.1) assert df["Significant"]["Condition[A]"] assert df.shape == (8, 8) roi_model = smf.rlm("theta ~ -1 + Condition", df_cha, groups=df_cha["ID"]).fit() df = statsmodels_to_results(roi_model) assert type(df) == pd.DataFrame assert_allclose(df["Coef."]["Condition[A]"], amplitude, rtol=0.1) assert df["Significant"]["Condition[A]"] assert df.shape == (8, 8)
def test_run_GLM(): raw = simulate_nirs_raw(sig_dur=200, stim_dur=5.) design_matrix = make_first_level_design_matrix(raw, stim_dur=5., drift_order=1, drift_model='polynomial') glm_estimates = run_glm(raw, design_matrix) # Test backwards compatibility with pytest.deprecated_call(match='more comprehensive'): old_res = run_GLM(raw, design_matrix) assert old_res.keys() == glm_estimates.data.keys() assert (old_res["Simulated"].theta == glm_estimates.data["Simulated"].theta ).all() assert len(glm_estimates) == len(raw.ch_names) # Check the estimate is correct within 10% error assert abs(glm_estimates.pick("Simulated").theta()[0][0] - 1.e-6) < 0.1e-6 # ensure we return the same type as nilearn to encourage compatibility _, ni_est = nilearn.glm.first_level.run_glm( raw.get_data(0).T, design_matrix.values) assert isinstance(glm_estimates._data, type(ni_est))
def test_run_plot_GLM_projection(requires_pyvista): raw_intensity = _load_dataset() raw_intensity.crop(450, 600) # Keep the test fast design_matrix = make_first_level_design_matrix(raw_intensity, drift_order=1, drift_model='polynomial') raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od, ppf=0.1) glm_estimates = run_glm(raw_haemo, design_matrix) df = glm_to_tidy(raw_haemo, glm_estimates.data, design_matrix) df = df.query("Chroma in 'hbo'") df = df.query("Condition in 'A'") brain = plot_glm_surface_projection(raw_haemo.copy().pick("hbo"), df, clim='auto', view='dorsal', colorbar=True, size=(800, 700), value="theta", surface='white', subjects_dir=subjects_dir) assert type(brain) == mne.viz._brain.Brain
sig_dur=60 * 5, amplitude=amp, isi_min=15., isi_max=45.) raw.plot(duration=300, show_scrollbars=False) ############################################################################### # Create design matrix # ------------------------------------ # # Next we create a design matrix based on the annotation times in the simulated # data. We use the nilearn plotting function to visualise the design matrix. # For more details on this procedure see :ref:`tut-fnirs-hrf`. design_matrix = make_first_level_design_matrix(raw, stim_dur=5.0, drift_order=1, drift_model='polynomial') fig, ax1 = plt.subplots(figsize=(10, 6), nrows=1, ncols=1) fig = plot_design_matrix(design_matrix, ax=ax1) ############################################################################### # Estimate response on clean data # ------------------------------- # # Here we run the GLM analysis on the clean data. # The design matrix had three columns, so we get an estimate for our simulated # event, the first order drift, and the constant. # We see that the estimate of the first component is 4e-6 (4 uM), # which was the amplitude we used in the simulation. # We also see that the mean square error of the model fit is close to zero.
# %% # # Next we generate the design matrix and plot it. # This representation of the regressor is transposed, # time goes down the vertical # axis and is specified in scan number (fMRI hangover) or sample. # There is no colorbar for this plot, as specified in Nilearn. # # We can see that when each event occurs the model value increases before returning to baseline. # this is the same information as was shown in the time courses above, except displayed differently # with color representing amplitude. design_matrix = make_first_level_design_matrix( raw_intensity, # Ignore drift model for now, see section below drift_model='polynomial', drift_order=0, # Here we specify the HRF and duration hrf_model='glover', stim_dur=3.0) fig, ax1 = plt.subplots(figsize=(10, 6), nrows=1, ncols=1) fig = plot_design_matrix(design_matrix, ax=ax1) # %% # # As before we can explore the effect of modifying the duration, # the resulting regressor for each annotation is elongated. design_matrix = make_first_level_design_matrix( raw_intensity, # Ignore drift model for now, see section below
# We know the when each stimulus was presented to the lister (see Annotations) # and we have a model of how we expect the brain to react to each # stimulus presentation # (https://en.wikipedia.org/wiki/Haemodynamic_response). # From this information we can build a model of how we expect the brain # to be active during this experiment. # See :ref:`tut-fnirs-hrf` for more details on this analysis. # # Here we create the expected model neural response function using the data # and plot the frequency spectrum. # # We note there is a peak at 0.03 which corresponds approximately to # the repetition rate of the experiment. design_matrix = make_first_level_design_matrix(raw_haemo, drift_order=0, stim_dur=5.) # This is a bit of a hack. # Overwrite the first NIRS channel with the expected response. # Rescale to be in expected units of uM. hrf = raw_haemo.copy().pick(picks=[0]) hrf._data[0] = 1e-6 * (design_matrix['Tapping_Left'] + design_matrix['Tapping_Right']).T hrf.pick(picks='hbo').plot_psd(average=True, fmax=2, xscale='log', color='r', show=False) # %%
def individual_analysis(bids_path, ID): raw_intensity = read_raw_bids(bids_path=bids_path, verbose=False) # Delete annotation labeled 15, as these just signify the start and end of experiment. raw_intensity.annotations.delete(raw_intensity.annotations.description == '15.0') # sanitize event names raw_intensity.annotations.description[:] = [ d.replace('/', '_') for d in raw_intensity.annotations.description] # Convert signal to haemoglobin and resample raw_od = optical_density(raw_intensity) raw_haemo = beer_lambert_law(raw_od, ppf=0.1) raw_haemo.resample(0.3) # Cut out just the short channels for creating a GLM repressor sht_chans = get_short_channels(raw_haemo) raw_haemo = get_long_channels(raw_haemo) # Create a design matrix design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=5.0) # Append short channels mean to design matrix design_matrix["ShortHbO"] = np.mean(sht_chans.copy().pick(picks="hbo").get_data(), axis=0) design_matrix["ShortHbR"] = np.mean(sht_chans.copy().pick(picks="hbr").get_data(), axis=0) # Run GLM glm_est = run_glm(raw_haemo, design_matrix) # Define channels in each region of interest # List the channel pairs manually left = [[4, 3], [1, 3], [3, 3], [1, 2], [2, 3], [1, 1]] right = [[8, 7], [5, 7], [7, 7], [5, 6], [6, 7], [5, 5]] # Then generate the correct indices for each pair groups = dict( Left_Hemisphere=picks_pair_to_idx(raw_haemo, left, on_missing='ignore'), Right_Hemisphere=picks_pair_to_idx(raw_haemo, right, on_missing='ignore')) # Extract channel metrics cha = glm_est.to_dataframe() # Compute region of interest results from channel data roi = glm_est.to_dataframe_region_of_interest(groups, design_matrix.columns, demographic_info=True) # Define left vs right tapping contrast contrast_matrix = np.eye(design_matrix.shape[1]) basic_conts = dict([(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]) contrast_LvR = basic_conts['Tapping_Left'] - basic_conts['Tapping_Right'] # Compute defined contrast contrast = glm_est.compute_contrast(contrast_LvR) con = contrast.to_dataframe() # Add the participant ID to the dataframes roi["ID"] = cha["ID"] = con["ID"] = ID # Convert to uM for nicer plotting below. cha["theta"] = [t * 1.e6 for t in cha["theta"]] roi["theta"] = [t * 1.e6 for t in roi["theta"]] con["effect"] = [t * 1.e6 for t in con["effect"]] return raw_haemo, roi, cha, con
# brain region than other channels. # Thus, when doing a region of interest analysis you may wish to give extra # weight to channels with greater sensitivity to the desired ROI. # This can be done by manually specifying the weights used in the region of # interest function call. # The details of the GLM analysis will not be described here, instead view the # :ref:`fNIRS GLM tutorial <tut-fnirs-hrf>`. Instead, comments are provided # for the weighted region of interest function call. # Basic pipeline, simplified for example raw_od = optical_density(raw) raw_haemo = beer_lambert_law(raw_od) raw_haemo.resample(0.3).pick("hbo") # Speed increase for web server sht_chans = get_short_channels(raw_haemo) raw_haemo = get_long_channels(raw_haemo) design_matrix = make_first_level_design_matrix(raw_haemo, stim_dur=13.0) design_matrix["ShortHbO"] = np.mean( sht_chans.copy().pick(picks="hbo").get_data(), axis=0) glm_est = run_glm(raw_haemo, design_matrix) # First we create a dictionary for each region of interest. # Here we include all channels in each ROI, as we will later be applying # weights based on their specificity to the brain regions of interest. rois = dict() rois["Audio_weighted"] = range(len(glm_est.ch_names)) rois["Visual_weighted"] = range(len(glm_est.ch_names)) # Next we compute the specificity for each channel to the auditory and visual cortex. spec_aud = fold_landmark_specificity( raw_haemo, '42 - Primary and Auditory Association Cortex',
def _get_glm_result(tmax=60, tmin=0, noise_model='ar1'): raw = _get_minimal_haemo_data(tmin=tmin, tmax=tmax) design_matrix = make_first_level_design_matrix(raw, stim_dur=5., drift_order=1, drift_model='polynomial') return run_glm(raw, design_matrix, noise_model=noise_model)
def test_io(): num_chans = 6 fnirs_data_folder = mne.datasets.fnirs_motor.data_path() fnirs_raw_dir = os.path.join(fnirs_data_folder, 'Participant-1') raw_intensity = mne.io.read_raw_nirx(fnirs_raw_dir).load_data() raw_intensity.resample(0.2) raw_intensity.annotations.description[:] = [ 'e' + d.replace('.', 'p') for d in raw_intensity.annotations.description ] raw_od = mne.preprocessing.nirs.optical_density(raw_intensity) raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od, ppf=0.1) raw_haemo = mne_nirs.channels.get_long_channels(raw_haemo) raw_haemo.pick(picks=range(num_chans)) design_matrix = make_first_level_design_matrix(raw_intensity, hrf_model='spm', stim_dur=5.0, drift_order=3, drift_model='polynomial') glm_est = run_glm(raw_haemo, design_matrix) df = glm_to_tidy(raw_haemo, glm_est.data, design_matrix) assert df.shape == (48, 12) assert set(df.columns) == { 'ch_name', 'Condition', 'df', 'mse', 'p_value', 't', 'theta', 'Source', 'Detector', 'Chroma', 'Significant', 'se' } num_conds = 8 # triggers (1, 2, 3, 15) + 3 drifts + constant assert df.shape[0] == num_chans * num_conds assert len(df["se"]) == 48 assert sum(df["se"]) > 0 # Check isn't nan assert len(df["df"]) == 48 assert sum(df["df"]) > 0 # Check isn't nan assert len(df["p_value"]) == 48 assert sum(df["p_value"]) > 0 # Check isn't nan assert len(df["theta"]) == 48 assert sum(df["theta"]) > 0 # Check isn't nan assert len(df["t"]) == 48 assert sum(df["t"]) > -99999 # Check isn't nan contrast_matrix = np.eye(design_matrix.shape[1]) basic_conts = dict([(column, contrast_matrix[i]) for i, column in enumerate(design_matrix.columns)]) contrast_LvR = basic_conts['e2p0'] - basic_conts['e3p0'] contrast = mne_nirs.statistics.compute_contrast(glm_est.data, contrast_LvR) df = glm_to_tidy(raw_haemo, contrast, design_matrix) assert df.shape == (6, 10) assert set(df.columns) == { 'ch_name', 'ContrastType', 'z_score', 'stat', 'p_value', 'effect', 'Source', 'Detector', 'Chroma', 'Significant' } contrast = mne_nirs.statistics.compute_contrast(glm_est.data, contrast_LvR, contrast_type='F') df = glm_to_tidy(raw_haemo, contrast, design_matrix, wide=False) df = _tidy_long_to_wide(df) assert df.shape == (6, 10) assert set(df.columns) == { 'ch_name', 'ContrastType', 'z_score', 'stat', 'p_value', 'effect', 'Source', 'Detector', 'Chroma', 'Significant' } with pytest.raises(TypeError, match="Unknown statistic type"): glm_to_tidy(raw_haemo, [1, 2, 3], design_matrix, wide=False)
# and # `design matrix examples <https://5712-1235740-gh.circle-artifacts.com/0/doc/_build/html/auto_examples/04_glm_first_level_models/plot_design_matrix.html>`_. # # Next we create a model to fit our data to. # The model consists of various components to model different things we assume # contribute to the measured signal. # We model the expected neural response for each experimental condition # using the SPM haemodynamic response # function combined with the known stimulus event times and durations # (as described above). # We also include a third order polynomial drift and constant to model # slow fluctuations in the data and a constant DC shift. design_matrix = make_first_level_design_matrix(raw_intensity, hrf_model='spm', stim_dur=5.0, drift_order=3, drift_model='polynomial') ############################################################################### # # We also add the mean of the short channels to the design matrix. # In theory these channels contain only systemic components, so including # them in the design matrix allows us to estimate the neural component # related to each experimental condition # uncontaminated by systemic effects. design_matrix["ShortHbO"] = np.mean( short_chs.copy().pick(picks="hbo").get_data(), axis=0) design_matrix["ShortHbR"] = np.mean(
# Next we create a model to fit our data to. # The model consists of various components to model different things we assume # contribute to the measured signal. # We model the expected neural response for each experimental condition # using the SPM haemodynamic response # function (HRF) combined with the known stimulus event times and durations # (as described above). # We also include a cosine drift model with components up to the high pass # parameter value. See the nilearn documentation for recommendations on setting # these values. In short, they suggest `"The cutoff period (1/high_pass) should be # set as the longest period between two trials of the same condition multiplied by 2. # For instance, if the longest period is 32s, the high_pass frequency shall be 1/64 Hz ~ 0.016 Hz"`. design_matrix = make_first_level_design_matrix( raw_haemo, drift_model='cosine', high_pass=0.005, # Must be specified per experiment hrf_model='spm', stim_dur=5.0) # %% # # We also add the mean of the short channels to the design matrix. # In theory these channels contain only systemic components, so including # them in the design matrix allows us to estimate the neural component # related to each experimental condition # uncontaminated by systemic effects. design_matrix["ShortHbO"] = np.mean( short_chs.copy().pick(picks="hbo").get_data(), axis=0) design_matrix["ShortHbR"] = np.mean(