def load_data(self, filename=None, params=None, create_frame_seg=50): self.dataframe = np.zeros((9, 288, 22, 1000)) self.datalabel = np.zeros((9, 288)) for i in range(1, 10, 1): AT_slice = h5py.File('dataset/A0' + str(i) + 'T_slice.mat', 'r') X = np.copy(AT_slice['image']) X = X[:, :22, :] # select first 22 channels y = np.copy(AT_slice['type']) y = y[0, 0:X.shape[0]:1] y = np.asarray(y, dtype=np.int32) # replace NaN as 0 X[np.isnan(X)] = 0 self.datadict['A0' + str(i) + 'T'] = (X, y) self.dataframe[i - 1, :, :, :] = X self.datalabel[i - 1, :] = y # preprocessing using braincode for i in range(9): for j in range(228): self.dataframe[i, j] = np.transpose( signalproc.highpass_cnt(np.transpose(self.dataframe[i, j]), 4, 1000, filt_order=3, axis=0)) self.dataframe[i, j] = np.transpose( signalproc.exponential_running_standardize( np.transpose(self.dataframe[i, j]), factor_new=0.001, init_block_size=None, eps=0.0001)) print("Data filtered") if create_frame_seg: self.create_frame(create_frame_seg) print("Data fully loaded!")
def load_bbci_data(filename, low_cut_hz): load_sensor_names = None loader = BBCIDataset(filename, load_sensor_names=load_sensor_names) log.info("Loading data...") cnt = loader.load() # Cleaning: First find all trials that have absolute microvolt values # larger than +- 800 inside them and remember them for removal later log.info("Cutting trials...") marker_def = OrderedDict([('Right Hand', [1]), ('Left Hand', [2],), ('Rest', [3]), ('Feet', [4])]) clean_ival = [0, 4000] set_for_cleaning = create_signal_target_from_raw_mne(cnt, marker_def, clean_ival) clean_trial_mask = np.max(np.abs(set_for_cleaning.X), axis=(1, 2)) < 800 log.info("Clean trials: {:3d} of {:3d} ({:5.1f}%)".format( np.sum(clean_trial_mask), len(set_for_cleaning.X), np.mean(clean_trial_mask) * 100)) # now pick only sensors with C in their name # as they cover motor cortex C_sensors = ['FC5', 'FC1', 'FC2', 'FC6', 'C3', 'C4', 'CP5', 'CP1', 'CP2', 'CP6', 'FC3', 'FCz', 'FC4', 'C5', 'C1', 'C2', 'C6', 'CP3', 'CPz', 'CP4', 'FFC5h', 'FFC3h', 'FFC4h', 'FFC6h', 'FCC5h', 'FCC3h', 'FCC4h', 'FCC6h', 'CCP5h', 'CCP3h', 'CCP4h', 'CCP6h', 'CPP5h', 'CPP3h', 'CPP4h', 'CPP6h', 'FFC1h', 'FFC2h', 'FCC1h', 'FCC2h', 'CCP1h', 'CCP2h', 'CPP1h', 'CPP2h'] cnt = cnt.pick_channels(C_sensors) # Further preprocessings log.info("Resampling...") cnt = resample_cnt(cnt, 250.0) print("REREFERENCING") log.info("Highpassing...") cnt = mne_apply(lambda a: highpass_cnt(a, low_cut_hz, cnt.info['sfreq'], filt_order=3, axis=1),cnt) log.info("Standardizing...") cnt = mne_apply(lambda a: exponential_running_standardize(a.T, factor_new=1e-3,init_block_size=1000,eps=1e-4).T,cnt) # Trial interval, start at -500 already, since improved decoding for networks ival = [-500, 4000] dataset = create_signal_target_from_raw_mne(cnt, marker_def, ival) dataset.X = dataset.X[clean_trial_mask] dataset.y = dataset.y[clean_trial_mask] return dataset.X, dataset.y
def load_bbci_data(filename, low_cut_hz, debug=False): load_sensor_names = None if debug: load_sensor_names = ['C3', 'C4', 'C2'] loader = BBCIDataset(filename, load_sensor_names=load_sensor_names) log.info("Loading data...") cnt = loader.load() log.info("Cutting trials...") marker_def = OrderedDict([('Right Hand', [1]), ( 'Left Hand', [2], ), ('Rest', [3]), ('Feet', [4])]) clean_ival = [0, 4000] set_for_cleaning = create_signal_target_from_raw_mne( cnt, marker_def, clean_ival) clean_trial_mask = np.max(np.abs(set_for_cleaning.X), axis=(1, 2)) < 800 log.info("Clean trials: {:3d} of {:3d} ({:5.1f}%)".format( np.sum(clean_trial_mask), len(set_for_cleaning.X), np.mean(clean_trial_mask) * 100)) # lets convert to millivolt for numerical stability of next operations C_sensors = [ 'FC5', 'FC1', 'FC2', 'FC6', 'C3', 'C4', 'CP5', 'CP1', 'CP2', 'CP6', 'FC3', 'FCz', 'FC4', 'C5', 'C1', 'C2', 'C6', 'CP3', 'CPz', 'CP4', 'FFC5h', 'FFC3h', 'FFC4h', 'FFC6h', 'FCC5h', 'FCC3h', 'FCC4h', 'FCC6h', 'CCP5h', 'CCP3h', 'CCP4h', 'CCP6h', 'CPP5h', 'CPP3h', 'CPP4h', 'CPP6h', 'FFC1h', 'FFC2h', 'FCC1h', 'FCC2h', 'CCP1h', 'CCP2h', 'CPP1h', 'CPP2h' ] if debug: C_sensors = load_sensor_names cnt = cnt.pick_channels(C_sensors) cnt = mne_apply(lambda a: a * 1e6, cnt) log.info("Resampling...") cnt = resample_cnt(cnt, 250.0) log.info("Highpassing...") cnt = mne_apply( lambda a: highpass_cnt( a, low_cut_hz, cnt.info['sfreq'], filt_order=3, axis=1), cnt) log.info("Standardizing...") cnt = mne_apply( lambda a: exponential_running_standardize( a.T, factor_new=1e-3, init_block_size=1000, eps=1e-4).T, cnt) ival = [-500, 4000] dataset = create_signal_target_from_raw_mne(cnt, marker_def, ival) return dataset
y_labels = raw_labels['y_label'] del raw_data, raw_labels ########################################################################### ### (3) Preprocessing ##################################################### ########################################################################### from braindecode.datautil.signalproc import lowpass_cnt, highpass_cnt, exponential_running_standardize for ii in range( 0, 60): # change according to the number of trials (default = 60) # 1. Data reconstruction temp_data = FeatVect[ii, :, :] temp_data = temp_data.transpose() # 2. Lowpass filtering lowpassed_data = lowpass_cnt(temp_data, 13, 200, filt_order=3) # 3. Highpass filtering bandpassed_data = highpass_cnt(lowpassed_data, 8, 200, filt_order=3) # 4. Exponential running standardization ExpRunStand_data = exponential_running_standardize( bandpassed_data, factor_new=0.001, init_block_size=None, eps=0.0001) # 5. Renewal preprocessed data ExpRunStand_data = ExpRunStand_data.transpose() FeatVect[ii, :, :] = ExpRunStand_data del temp_data, lowpassed_data, bandpassed_data, ExpRunStand_data ########################################################################### ### (3) Convert data to braindecode format ################################ ########################################################################### # pytorch expects float 32 for input and int64 for labels.