Exemplo n.º 1
0
def get_data():
    import os
    os.sys.path.append('/home/schirrmr/braindecode/code/braindecode/')
    from braindecode.datautil.trial_segment import create_signal_target_from_raw_mne
    from braindecode.datasets.bbci import BBCIDataset
    from braindecode.mne_ext.signalproc import mne_apply, resample_cnt
    from braindecode.datautil.signalproc import exponential_running_standardize
    subject_id = 4  # 1-14
    loader = BBCIDataset(
        '/data/schirrmr/schirrmr/HGD-public/reduced/train/{:d}.mat'.format(
            subject_id),
        load_sensor_names=['C3'])
    cnt = loader.load()
    cnt = cnt.drop_channels(['STI 014'])
    from collections import OrderedDict
    marker_def = OrderedDict([('Right Hand', [1]), (
        'Left Hand',
        [2],
    ), ('Rest', [3]), ('Feet', [4])])
    # Here you can choose a larger sampling rate later
    # Right now chosen very small to allow fast initial experiments
    cnt = resample_cnt(cnt, new_fs=500)
    cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T, factor_new=1e-3, init_block_size=1000, eps=1e-4).T, cnt)
    ival = [0, 2000]  # ms to cut trial
    dataset = create_signal_target_from_raw_mne(cnt, marker_def, ival)
    return dataset.X, dataset.y
Exemplo n.º 2
0
    def load_data(filenames, sensor_names, name_to_start_codes,
                  name_to_stop_codes, trial_ival, break_ival,
                  min_break_length_ms, max_break_length_ms, input_time_length,
                  filename_to_extra_args):
        all_sets = []
        original_args = locals()
        for filename in filenames:
            kwargs = deepcopy(original_args)
            if filename in filename_to_extra_args:
                kwargs.update(filename_to_extra_args[filename])
            log.info("Loading {:s}...".format(filename))
            cnt = BBCIDataset(filename, load_sensor_names=sensor_names).load()
            cnt = cnt.drop_channels(['STI 014'])
            log.info("Resampling...")
            cnt = resample_cnt(cnt, 100)
            log.info("Standardizing...")
            cnt = mne_apply(
                lambda a: exponential_running_standardize(
                    a.T, init_block_size=50).T, cnt)

            log.info("Transform to set...")
            full_set = (create_signal_target_with_breaks_from_mne(
                cnt,
                kwargs['name_to_start_codes'],
                kwargs['trial_ival'],
                kwargs['name_to_stop_codes'],
                kwargs['min_break_length_ms'],
                kwargs['max_break_length_ms'],
                kwargs['break_ival'],
                prepad_trials_to_n_samples=kwargs['input_time_length'],
            ))
            all_sets.append(full_set)
        return all_sets
def load_bbci_data(filename, low_cut_hz):
	load_sensor_names = None
	loader = BBCIDataset(filename, load_sensor_names=load_sensor_names)


	log.info("Loading data...")
	cnt = loader.load()

	# Cleaning: First find all trials that have absolute microvolt values
	# larger than +- 800 inside them and remember them for removal later
	log.info("Cutting trials...")

	marker_def = OrderedDict([('Right Hand', [1]), ('Left Hand', [2],),
							  ('Rest', [3]), ('Feet', [4])])
	clean_ival = [0, 4000]

	set_for_cleaning = create_signal_target_from_raw_mne(cnt, marker_def,
												  clean_ival)

	clean_trial_mask = np.max(np.abs(set_for_cleaning.X), axis=(1, 2)) < 800

	log.info("Clean trials: {:3d}  of {:3d} ({:5.1f}%)".format(
		np.sum(clean_trial_mask),
		len(set_for_cleaning.X),
		np.mean(clean_trial_mask) * 100))

	# now pick only sensors with C in their name
	# as they cover motor cortex
	C_sensors = ['FC5', 'FC1', 'FC2', 'FC6', 'C3', 'C4', 'CP5',
				 'CP1', 'CP2', 'CP6', 'FC3', 'FCz', 'FC4', 'C5', 'C1', 'C2',
				 'C6',
				 'CP3', 'CPz', 'CP4', 'FFC5h', 'FFC3h', 'FFC4h', 'FFC6h',
				 'FCC5h',
				 'FCC3h', 'FCC4h', 'FCC6h', 'CCP5h', 'CCP3h', 'CCP4h', 'CCP6h',
				 'CPP5h',
				 'CPP3h', 'CPP4h', 'CPP6h', 'FFC1h', 'FFC2h', 'FCC1h', 'FCC2h',
				 'CCP1h',
				 'CCP2h', 'CPP1h', 'CPP2h']

	cnt = cnt.pick_channels(C_sensors)

	# Further preprocessings
	log.info("Resampling...")
	cnt = resample_cnt(cnt, 250.0)

	print("REREFERENCING")

	log.info("Highpassing...")
	cnt = mne_apply(lambda a: highpass_cnt(a, low_cut_hz, cnt.info['sfreq'], filt_order=3, axis=1),cnt)
	log.info("Standardizing...")
	cnt = mne_apply(lambda a: exponential_running_standardize(a.T, factor_new=1e-3,init_block_size=1000,eps=1e-4).T,cnt)

	# Trial interval, start at -500 already, since improved decoding for networks
	ival = [-500, 4000]

	dataset = create_signal_target_from_raw_mne(cnt, marker_def, ival)

	dataset.X = dataset.X[clean_trial_mask]
	dataset.y = dataset.y[clean_trial_mask]
	return dataset.X, dataset.y
    def _create_examples(self):
        name_to_code = OrderedDict([('Right', 1), ('Left', 2), ('Rest', 3),
                                    ('Feet', 4)])

        data_list_list = []
        for file_name in self.file_names:
            cnt = BBCIDataset(file_name,
                              load_sensor_names=self.load_sensor_names).load()
            cnt = cnt.drop_channels(['STI 014'])
            cnt = resample_cnt(cnt, self.sampling_freq)
            if self.normalization_type == 'exponential':
                cnt = mne_apply(
                    lambda a: exponential_running_standardize(
                        a.T, init_block_size=1000, factor_new=0.001, eps=1e-4).
                    T, cnt)

            data = create_signal_target_from_raw_mne(cnt, name_to_code,
                                                     self.segment_ival_ms)
            data_list = [(d, l) for d, l in zip(data.X, data.y)]
            data_list = self.cv_split(data_list)

            # Normalize the data
            if self.normalization_type == 'standard':
                running_statistics = RunningStatistics(
                    dim=data_list[0][0].shape[0], time_dimension_first=False)
                for data, label in data_list:
                    running_statistics.append(data)

                mean = running_statistics.mean_vector()
                sdev = np.clip(np.sqrt(running_statistics.var_vector()), 1e-5,
                               None)

                logger.info('Normalize with \n mean: %s, \n sdev: %s' %
                            (mean, sdev))
                for i in range(len(data_list)):
                    data_list[i] = ((data_list[i][0] - mean) / sdev,
                                    data_list[i][1])

            data_list_list.append(data_list)

        # Create examples for 4 classes
        for i, data_list in enumerate(data_list_list):
            for label in range(4):

                class_data_list = [
                    data for data in data_list if data[1] == label
                ]

                self.examples.append([
                    BBCIDataReaderMulti.ExampleInfo(
                        example_id=str((i, label, j)),
                        random_mode=self.random_mode,
                        offset_size=self.offset_size,
                        label=label,
                        data=data,
                        context=i)
                    for (j, (data, label)) in enumerate(class_data_list)
                ])
Exemplo n.º 5
0
def run_exp(filename, min_freq, max_freq, low_width, high_width, high_overlap,
            last_low_freq, low_overlap, n_top_bottom_csp_filters,
            n_selected_features, sensors):
    if sensors == 'all':
        cnt = BBCIDataset(filename).load()
    else:
        assert sensors == 'C_sensors'
        sensor_names = [
            'FC5', 'FC1', 'FC2', 'FC6', 'C3', 'Cz', 'C4', 'CP5', 'CP1', 'CP2',
            'CP6', 'FC3', 'FCz', 'FC4', 'C5', 'C1', 'C2', 'C6', 'CP3', 'CPz',
            'CP4', 'FFC5h', 'FFC3h', 'FFC4h', 'FFC6h', 'FCC5h', 'FCC3h',
            'FCC4h', 'FCC6h', 'CCP5h', 'CCP3h', 'CCP4h', 'CCP6h', 'CPP5h',
            'CPP3h', 'CPP4h', 'CPP6h', 'FFC1h', 'FFC2h', 'FCC1h', 'FCC2h',
            'CCP1h', 'CCP2h', 'CPP1h', 'CPP2h'
        ]
        cnt = BBCIDataset(filename, load_sensor_names=sensor_names).load()

    cnt = cnt.drop_channels(['STI 014'])
    name_to_start_codes = OrderedDict([('Left Hand', [1]), (
        'Foot',
        [2],
    ), ('Right Hand', [3]), ('Word', [4]), ('Mental Rotation', 5),
                                       ('Rest', 6)])
    name_to_stop_codes = OrderedDict([('Left Hand', [10]), (
        'Foot',
        [20],
    ), ('Right Hand', [30]), ('Word', [40]), ('Mental Rotation', 50),
                                      ('Rest', 60)])
    csp_experiment = CSPExperiment(
        cnt,
        name_to_start_codes,
        epoch_ival_ms=[500, 0],
        name_to_stop_codes=name_to_stop_codes,
        min_freq=min_freq,
        max_freq=max_freq,
        last_low_freq=last_low_freq,
        low_width=low_width,
        high_width=high_width,
        low_overlap=low_overlap,
        high_overlap=high_overlap,
        filt_order=4,
        n_folds=5,
        n_top_bottom_csp_filters=n_top_bottom_csp_filters,
        # this number times two will be number of csp filters per filterband before feature selection
        n_selected_filterbands=None,  # how many filterbands to select?
        n_selected_features=n_selected_features,
        # how many Features to select with the feature selection?
        forward_steps=2,  # feature selection param
        backward_steps=1,  # feature selection param
        stop_when_no_improvement=False,  # feature selection param
        only_last_fold=True,
        # Split into number of folds, but only run the last fold (i.e. last fold as test fold)?
        restricted_n_trials=None,
        # restrict to certain number of _clean_ trials?
        shuffle=False,  # shuffle or do blockwise folds?
        low_bound=0.)
    csp_experiment.run()
    return csp_experiment
Exemplo n.º 6
0
def load_file(filename, car=True, load_sensor_names=None):
    cnt = BBCIDataset(filename, load_sensor_names=load_sensor_names).load()
    cnt = cnt.drop_channels(["STI 014"])

    if car:

        def car(a):
            return a - np.mean(a, keepdims=True, axis=0)

        cnt = mne_apply(car, cnt)
    return cnt
Exemplo n.º 7
0
def load_cnt(file_path, channel_names, clean_on_all_channels=True):
    # if we have to run the cleaning procedure on all channels, putting
    # load_sensor_names to None will assure us the BBCIDataset class will
    # load all possible sensors
    if clean_on_all_channels is True:
        channel_names = None

    # create the loader object for BBCI standard
    loader = BBCIDataset(file_path, load_sensor_names=channel_names)

    # load data
    return loader.load()
Exemplo n.º 8
0
def load_bbci_data(filename, low_cut_hz, debug=False):
    load_sensor_names = None
    if debug:
        load_sensor_names = ['C3', 'C4', 'C2']
    loader = BBCIDataset(filename, load_sensor_names=load_sensor_names)

    log.info("Loading data...")
    cnt = loader.load()

    log.info("Cutting trials...")

    marker_def = OrderedDict([('Right Hand', [1]), (
        'Left Hand',
        [2],
    ), ('Rest', [3]), ('Feet', [4])])
    clean_ival = [0, 4000]

    set_for_cleaning = create_signal_target_from_raw_mne(
        cnt, marker_def, clean_ival)

    clean_trial_mask = np.max(np.abs(set_for_cleaning.X), axis=(1, 2)) < 800

    log.info("Clean trials: {:3d}  of {:3d} ({:5.1f}%)".format(
        np.sum(clean_trial_mask), len(set_for_cleaning.X),
        np.mean(clean_trial_mask) * 100))

    # lets convert to millivolt for numerical stability of next operations
    C_sensors = [
        'FC5', 'FC1', 'FC2', 'FC6', 'C3', 'C4', 'CP5', 'CP1', 'CP2', 'CP6',
        'FC3', 'FCz', 'FC4', 'C5', 'C1', 'C2', 'C6', 'CP3', 'CPz', 'CP4',
        'FFC5h', 'FFC3h', 'FFC4h', 'FFC6h', 'FCC5h', 'FCC3h', 'FCC4h', 'FCC6h',
        'CCP5h', 'CCP3h', 'CCP4h', 'CCP6h', 'CPP5h', 'CPP3h', 'CPP4h', 'CPP6h',
        'FFC1h', 'FFC2h', 'FCC1h', 'FCC2h', 'CCP1h', 'CCP2h', 'CPP1h', 'CPP2h'
    ]
    if debug:
        C_sensors = load_sensor_names
    cnt = cnt.pick_channels(C_sensors)
    cnt = mne_apply(lambda a: a * 1e6, cnt)
    log.info("Resampling...")
    cnt = resample_cnt(cnt, 250.0)
    log.info("Highpassing...")
    cnt = mne_apply(
        lambda a: highpass_cnt(
            a, low_cut_hz, cnt.info['sfreq'], filt_order=3, axis=1), cnt)
    log.info("Standardizing...")
    cnt = mne_apply(
        lambda a: exponential_running_standardize(
            a.T, factor_new=1e-3, init_block_size=1000, eps=1e-4).T, cnt)

    ival = [-500, 4000]

    dataset = create_signal_target_from_raw_mne(cnt, marker_def, ival)
    return dataset
Exemplo n.º 9
0
def import_EEGData_test(start=0, end=9, dir='../data_HGD/test/'):
    X, y = [], []
    for i in range(start, end):
        dataFile = str(dir + str(i + 1) + '.mat')
        print("File:", dataFile, " loading...")
        cnt = BBCIDataset(filename=dataFile, load_sensor_names=None).load()
        marker_def = OrderedDict([('Right Hand', [1]), (
            'Left Hand',
            [2],
        ), ('Rest', [3]), ('Feet', [4])])
        clean_ival = [0, 4000]

        set_for_cleaning = create_signal_target_from_raw_mne(
            cnt, marker_def, clean_ival)
        clean_trial_mask = np.max(np.abs(set_for_cleaning.X),
                                  axis=(1, 2)) < 800

        C_sensors = [
            'FC5', 'FC1', 'FC2', 'FC6', 'C3', 'C4', 'CP5', 'CP1', 'CP2', 'CP6',
            'FC3', 'FCz', 'FC4', 'C5', 'C1', 'C2', 'C6', 'CP3', 'CPz', 'CP4',
            'FFC5h', 'FFC3h', 'FFC4h', 'FFC6h', 'FCC5h', 'FCC3h', 'FCC4h',
            'FCC6h', 'CCP5h', 'CCP3h', 'CCP4h', 'CCP6h', 'CPP5h', 'CPP3h',
            'CPP4h', 'CPP6h', 'FFC1h', 'FFC2h', 'FCC1h', 'FCC2h', 'CCP1h',
            'CCP2h', 'CPP1h', 'CPP2h'
        ]

        cnt = cnt.pick_channels(C_sensors)
        cnt = resample_cnt(cnt, 250.0)
        cnt = mne_apply(
            lambda a: exponential_running_standardize(
                a.T, factor_new=1e-3, init_block_size=1000, eps=1e-4).T, cnt)
        ival = [-500, 4000]

        dataset = create_signal_target_from_raw_mne(cnt, marker_def, ival)
        dataset.X = dataset.X[clean_trial_mask]
        dataset.X = dataset.X[:, :, np.newaxis, :]
        dataset.y = dataset.y[clean_trial_mask]
        dataset.y = dataset.y[:, np.newaxis]

        X.extend(dataset.X)
        y.extend(dataset.y)

    X = data_in_one(np.array(X))
    y = np.array(y)
    print("X:", X.shape)
    print("y:", y.shape)
    dataset = EEGDataset(X, y)
    return dataset
Exemplo n.º 10
0
            timeWindowDuration = 3661  #ms
            # deep4 stride 2: 1825
            # deep4 stride 3: 3661
        elif ResNet:
            timeWindowDuration = 1005  #ms
            saveAddonText = saveAddonText + '_ResNet'
        elif EEGNet_v4:
            timeWindowDuration = 1335
            saveAddonText = saveAddonText + '_EEGNetv4'

        for subjName in Subjects:

            train_filename = './data/BBCIformat/' + subjName + '_' + str(
                samplingRate) + 'Hz_CAR.BBCI.mat'

            sensor_names = BBCIDataset.get_all_sensors(train_filename,
                                                       pattern=None)
            sensor_names_aux = ['ECG', 'Respiration']
            sensor_names_robot = [
                'robotHandPos_x', 'robotHandPos_y', 'robotHandPos_z'
            ]
            sensor_names_robot_aux = [
                'robotHandPos_x', 'robotHandPos_y', 'robotHandPos_z', 'ECG',
                'Respiration'
            ]

            if onlyRobotData:
                cnt = BBCIDataset(train_filename,
                                  load_sensor_names=sensor_names_robot).load(
                                  )  # robot pos channels
            elif onlyEEGData:
                cnt = BBCIDataset(
Exemplo n.º 11
0
    timeWindowDuration = 1335
    saveAddonText = saveAddonText + '_EEGNetv4'

saveAddonText_tmp = saveAddonText_orig + '_onlyRobotData'

cnt = []
train_sets = []
test_sets = []

# load and process data
for iSubject, subjName in enumerate(Subjects):

    train_filename = './data/BBCIformat/' + subjName + '_' + str(
        samplingRate) + 'Hz_CAR.BBCI.mat'

    sensor_names = BBCIDataset.get_all_sensors(train_filename, pattern=None)
    sensor_names_aux = ['ECG', 'Respiration']
    sensor_names_robot = ['robotHandPos_x', 'robotHandPos_y', 'robotHandPos_z']
    sensor_names_robot_aux = [
        'robotHandPos_x', 'robotHandPos_y', 'robotHandPos_z', 'ECG',
        'Respiration'
    ]

    if onlyRobotData:
        cnt.append(
            BBCIDataset(train_filename, load_sensor_names=sensor_names_robot).
            load())  # robot pos channels
    elif onlyEEGData:
        cnt.append(
            BBCIDataset(train_filename,
                        load_sensor_names=sensor_names).load())  # all channels