def test_regression(): """Test Ordinary Least Squares Regression """ tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, aud_r=2) # Setup for reading the raw data raw = mne.io.read_raw_fif(raw_fname) events = mne.read_events(event_fname)[:10] epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, baseline=(None, 0)) picks = np.arange(len(epochs.ch_names)) evoked = epochs.average(picks=picks) design_matrix = epochs.events[:, 1:].astype(np.float64) # makes the intercept design_matrix[:, 0] = 1 # creates contrast: aud_l=0, aud_r=1 design_matrix[:, 1] -= 1 with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') lm = linear_regression(epochs, design_matrix, ['intercept', 'aud']) assert_true(w[0].category == RuntimeWarning) assert_true('non-data' in '%s' % w[0].message) for predictor, parameters in lm.items(): for value in parameters: assert_equal(value.data.shape, evoked.data.shape) assert_raises(ValueError, linear_regression, [epochs, epochs], design_matrix) stc = read_source_estimate(stc_fname).crop(0, 0.02) stc_list = [stc, stc, stc] stc_gen = (s for s in stc_list) with warnings.catch_warnings(record=True): # divide by zero warnings.simplefilter('always') lm1 = linear_regression(stc_list, design_matrix[:len(stc_list)]) lm2 = linear_regression(stc_gen, design_matrix[:len(stc_list)]) for val in lm2.values(): # all p values are 0 < p <= 1 to start, but get stored in float32 # data, so can actually be truncated to 0. Thus the mlog10_p_val # actually maintains better precision for tiny p-values. assert_true(np.isfinite(val.p_val.data).all()) assert_true((val.p_val.data <= 1).all()) assert_true((val.p_val.data >= 0).all()) # all -log10(p) are non-negative assert_true(np.isfinite(val.mlog10_p_val.data).all()) assert_true((val.mlog10_p_val.data >= 0).all()) assert_true((val.mlog10_p_val.data >= 0).all()) for k in lm1: for v1, v2 in zip(lm1[k], lm2[k]): assert_array_equal(v1.data, v2.data)
def test_regression(): """Test Ordinary Least Squares Regression """ tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, aud_r=2) # Setup for reading the raw data raw = mne.io.Raw(raw_fname) events = mne.read_events(event_fname)[:10] epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, baseline=(None, 0)) picks = np.arange(len(epochs.ch_names)) evoked = epochs.average(picks=picks) design_matrix = epochs.events[:, 1:].astype(np.float64) # makes the intercept design_matrix[:, 0] = 1 # creates contrast: aud_l=0, aud_r=1 design_matrix[:, 1] -= 1 with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') lm = linear_regression(epochs, design_matrix, ['intercept', 'aud']) assert_true(w[0].category == RuntimeWarning) assert_true('non-data' in '%s' % w[0].message) for predictor, parameters in lm.items(): for value in parameters: assert_equal(value.data.shape, evoked.data.shape) assert_raises(ValueError, linear_regression, [epochs, epochs], design_matrix) stc = read_source_estimate(stc_fname).crop(0, 0.02) stc_list = [stc, stc, stc] stc_gen = (s for s in stc_list) with warnings.catch_warnings(record=True): # divide by zero warnings.simplefilter('always') lm1 = linear_regression(stc_list, design_matrix[:len(stc_list)]) lm2 = linear_regression(stc_gen, design_matrix[:len(stc_list)]) for val in lm2.values(): # all p values are 0 < p <= 1 to start, but get stored in float32 # data, so can actually be truncated to 0. Thus the mlog10_p_val # actually maintains better precision for tiny p-values. assert_true(np.isfinite(val.p_val.data).all()) assert_true((val.p_val.data <= 1).all()) assert_true((val.p_val.data >= 0).all()) # all -log10(p) are non-negative assert_true(np.isfinite(val.mlog10_p_val.data).all()) assert_true((val.mlog10_p_val.data >= 0).all()) assert_true((val.mlog10_p_val.data >= 0).all()) for k in lm1: for v1, v2 in zip(lm1[k], lm2[k]): assert_array_equal(v1.data, v2.data)
def test_regression(): """Test Ordinary Least Squares Regression """ data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, aud_r=2) # Setup for reading the raw data raw = mne.io.Raw(raw_fname) events = mne.read_events(event_fname)[:10] epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, baseline=(None, 0)) picks = np.arange(len(epochs.ch_names)) evoked = epochs.average(picks=picks) design_matrix = epochs.events[:, 1:].astype(np.float64) # makes the intercept design_matrix[:, 0] = 1 # creates contrast: aud_l=0, aud_r=1 design_matrix[:, 1] -= 1 with warnings.catch_warnings(record=True) as w: lm = linear_regression(epochs, design_matrix, ['intercept', 'aud']) assert_true(w[0].category == UserWarning) assert_true('non-data' in '%s' % w[0].message) for predictor, parameters in lm.items(): for value in parameters: assert_equal(value.data.shape, evoked.data.shape) assert_raises(ValueError, linear_regression, [epochs, epochs], design_matrix) stc = read_source_estimate(stc_fname).crop(0, 0.02) stc_list = [stc, stc, stc] stc_gen = (s for s in stc_list) with warnings.catch_warnings(record=True): # divide by zero lm1 = linear_regression(stc_list, design_matrix[:len(stc_list)]) lm2 = linear_regression(stc_gen, design_matrix[:len(stc_list)]) for k in lm1: for v1, v2 in zip(lm1[k], lm2[k]): assert_array_equal(v1.data, v2.data)
def sensor_least_squares(epochs): """This function will process the sensor least squares regression, outputting a regression coefficient that will denote the efficiency of each combination of sensor and timepoint. P-values and T statistics are also computed.""" from mne.stats.regression import linear_regression names = ['intercept', 'trial-count'] intercept = np.ones((len(epochs), ), dtype=np.float) design_matrix = np.column_stack([ intercept, # intercept np.linspace(0, 1, len(intercept)) ]) # also accepts source estimates lm = linear_regression(epochs, design_matrix, names) def plot_topomap(x, unit): x.plot_topomap(ch_type='eeg', scale=1, size=1.5, vmax=np.max, unit=unit, times=np.linspace(0.1, 0.2, 5)) trial_count = lm['trial-count'] plot_topomap(trial_count.beta, unit='z (beta)') plot_topomap(trial_count.t_val, unit='t') plot_topomap(trial_count.mlog10_p_val, unit='-log10 p') plot_topomap(trial_count.stderr, unit='z (error)')
def test_regression(): """Test Ordinary Least Squares Regression """ data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' tmin, tmax = -0.2, 0.5 event_id = dict(aud_l=1, aud_r=2) # Setup for reading the raw data raw = mne.io.Raw(raw_fname) events = mne.read_events(event_fname)[:10] epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, baseline=(None, 0)) picks = np.arange(len(epochs.ch_names)) evoked = epochs.average(picks=picks) design_matrix = epochs.events[:, 1:].astype(np.float64) # makes the intercept design_matrix[:, 0] = 1 # creates contrast: aud_l=0, aud_r=1 design_matrix[:, 1] -= 1 with warnings.catch_warnings(record=True) as w: lm = linear_regression(epochs, design_matrix, ['intercept', 'aud']) assert_true(w[0].category == UserWarning) assert_true('non-data' in '%s' % w[0].message) for predictor, parameters in lm.items(): for value in parameters: assert_equal(value.data.shape, evoked.data.shape) assert_raises(ValueError, linear_regression, [epochs, epochs], design_matrix) stc = read_source_estimate(stc_fname).crop(0, 0.02) stc_list = [stc, stc, stc] stc_gen = (s for s in stc_list) lm1 = linear_regression(stc_list, design_matrix[:len(stc_list)]) lm2 = linear_regression(stc_gen, design_matrix[:len(stc_list)]) for k in lm1: for v1, v2 in zip(lm1[k], lm2[k]): assert_array_equal(v1.data, v2.data)
preload=True, reject=reject) ############################################################################### # Run regression names = ['intercept', 'trial-count'] intercept = np.ones((len(epochs), ), dtype=np.float) design_matrix = np.column_stack([ intercept, # intercept np.linspace(0, 1, len(intercept)) ]) # also accepts source estimates lm = linear_regression(epochs, design_matrix, names) def plot_topomap(x, units): x.plot_topomap(ch_type='mag', scalings=1., size=1.5, vmax=np.max, units=units, times=np.linspace(0.1, 0.2, 5)) trial_count = lm['trial-count'] plot_topomap(trial_count.beta, units='z (beta)') plot_topomap(trial_count.t_val, units='t')
reject = dict(mag=5e-12) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks, baseline=(None, 0), preload=True, reject=reject) ############################################################################### # Run regression names = ['intercept', 'trial-count'] intercept = np.ones((len(epochs),), dtype=np.float) design_matrix = np.column_stack([intercept, # intercept np.linspace(0, 1, len(intercept))]) # also accepts source estimates lm = linear_regression(epochs, design_matrix, names) def plot_topomap(x, unit): x.plot_topomap(ch_type='mag', scale=1, size=1.5, vmax=np.max, unit=unit, times=np.linspace(0.1, 0.2, 5)) trial_count = lm['trial-count'] plot_topomap(trial_count.beta, unit='z (beta)') plot_topomap(trial_count.t_val, unit='t') plot_topomap(trial_count.mlog10_p_val, unit='-log10 p') plot_topomap(trial_count.stderr, unit='z (error)')