コード例 #1
0
    def load_data(filenames, sensor_names, name_to_start_codes,
                  name_to_stop_codes, trial_ival, break_ival,
                  min_break_length_ms, max_break_length_ms, input_time_length,
                  filename_to_extra_args):
        all_sets = []
        original_args = locals()
        for filename in filenames:
            kwargs = deepcopy(original_args)
            if filename in filename_to_extra_args:
                kwargs.update(filename_to_extra_args[filename])
            log.info("Loading {:s}...".format(filename))
            cnt = BBCIDataset(filename, load_sensor_names=sensor_names).load()
            cnt = cnt.drop_channels(['STI 014'])
            log.info("Resampling...")
            cnt = resample_cnt(cnt, 100)
            log.info("Standardizing...")
            cnt = mne_apply(
                lambda a: exponential_running_standardize(
                    a.T, init_block_size=50).T, cnt)

            log.info("Transform to set...")
            full_set = (create_signal_target_with_breaks_from_mne(
                cnt,
                kwargs['name_to_start_codes'],
                kwargs['trial_ival'],
                kwargs['name_to_stop_codes'],
                kwargs['min_break_length_ms'],
                kwargs['max_break_length_ms'],
                kwargs['break_ival'],
                prepad_trials_to_n_samples=kwargs['input_time_length'],
            ))
            all_sets.append(full_set)
        return all_sets
コード例 #2
0
    def _create_examples(self):
        name_to_code = OrderedDict([('Right', 1), ('Left', 2), ('Rest', 3),
                                    ('Feet', 4)])

        data_list_list = []
        for file_name in self.file_names:
            cnt = BBCIDataset(file_name,
                              load_sensor_names=self.load_sensor_names).load()
            cnt = cnt.drop_channels(['STI 014'])
            cnt = resample_cnt(cnt, self.sampling_freq)
            if self.normalization_type == 'exponential':
                cnt = mne_apply(
                    lambda a: exponential_running_standardize(
                        a.T, init_block_size=1000, factor_new=0.001, eps=1e-4).
                    T, cnt)

            data = create_signal_target_from_raw_mne(cnt, name_to_code,
                                                     self.segment_ival_ms)
            data_list = [(d, l) for d, l in zip(data.X, data.y)]
            data_list = self.cv_split(data_list)

            # Normalize the data
            if self.normalization_type == 'standard':
                running_statistics = RunningStatistics(
                    dim=data_list[0][0].shape[0], time_dimension_first=False)
                for data, label in data_list:
                    running_statistics.append(data)

                mean = running_statistics.mean_vector()
                sdev = np.clip(np.sqrt(running_statistics.var_vector()), 1e-5,
                               None)

                logger.info('Normalize with \n mean: %s, \n sdev: %s' %
                            (mean, sdev))
                for i in range(len(data_list)):
                    data_list[i] = ((data_list[i][0] - mean) / sdev,
                                    data_list[i][1])

            data_list_list.append(data_list)

        # Create examples for 4 classes
        for i, data_list in enumerate(data_list_list):
            for label in range(4):

                class_data_list = [
                    data for data in data_list if data[1] == label
                ]

                self.examples.append([
                    BBCIDataReaderMulti.ExampleInfo(
                        example_id=str((i, label, j)),
                        random_mode=self.random_mode,
                        offset_size=self.offset_size,
                        label=label,
                        data=data,
                        context=i)
                    for (j, (data, label)) in enumerate(class_data_list)
                ])
コード例 #3
0
ファイル: class_6_online.py プロジェクト: zhangys2019/fbcsp
def run_exp(filename, min_freq, max_freq, low_width, high_width, high_overlap,
            last_low_freq, low_overlap, n_top_bottom_csp_filters,
            n_selected_features, sensors):
    if sensors == 'all':
        cnt = BBCIDataset(filename).load()
    else:
        assert sensors == 'C_sensors'
        sensor_names = [
            'FC5', 'FC1', 'FC2', 'FC6', 'C3', 'Cz', 'C4', 'CP5', 'CP1', 'CP2',
            'CP6', 'FC3', 'FCz', 'FC4', 'C5', 'C1', 'C2', 'C6', 'CP3', 'CPz',
            'CP4', 'FFC5h', 'FFC3h', 'FFC4h', 'FFC6h', 'FCC5h', 'FCC3h',
            'FCC4h', 'FCC6h', 'CCP5h', 'CCP3h', 'CCP4h', 'CCP6h', 'CPP5h',
            'CPP3h', 'CPP4h', 'CPP6h', 'FFC1h', 'FFC2h', 'FCC1h', 'FCC2h',
            'CCP1h', 'CCP2h', 'CPP1h', 'CPP2h'
        ]
        cnt = BBCIDataset(filename, load_sensor_names=sensor_names).load()

    cnt = cnt.drop_channels(['STI 014'])
    name_to_start_codes = OrderedDict([('Left Hand', [1]), (
        'Foot',
        [2],
    ), ('Right Hand', [3]), ('Word', [4]), ('Mental Rotation', 5),
                                       ('Rest', 6)])
    name_to_stop_codes = OrderedDict([('Left Hand', [10]), (
        'Foot',
        [20],
    ), ('Right Hand', [30]), ('Word', [40]), ('Mental Rotation', 50),
                                      ('Rest', 60)])
    csp_experiment = CSPExperiment(
        cnt,
        name_to_start_codes,
        epoch_ival_ms=[500, 0],
        name_to_stop_codes=name_to_stop_codes,
        min_freq=min_freq,
        max_freq=max_freq,
        last_low_freq=last_low_freq,
        low_width=low_width,
        high_width=high_width,
        low_overlap=low_overlap,
        high_overlap=high_overlap,
        filt_order=4,
        n_folds=5,
        n_top_bottom_csp_filters=n_top_bottom_csp_filters,
        # this number times two will be number of csp filters per filterband before feature selection
        n_selected_filterbands=None,  # how many filterbands to select?
        n_selected_features=n_selected_features,
        # how many Features to select with the feature selection?
        forward_steps=2,  # feature selection param
        backward_steps=1,  # feature selection param
        stop_when_no_improvement=False,  # feature selection param
        only_last_fold=True,
        # Split into number of folds, but only run the last fold (i.e. last fold as test fold)?
        restricted_n_trials=None,
        # restrict to certain number of _clean_ trials?
        shuffle=False,  # shuffle or do blockwise folds?
        low_bound=0.)
    csp_experiment.run()
    return csp_experiment
コード例 #4
0
def load_file(filename, car=True, load_sensor_names=None):
    cnt = BBCIDataset(filename, load_sensor_names=load_sensor_names).load()
    cnt = cnt.drop_channels(["STI 014"])

    if car:

        def car(a):
            return a - np.mean(a, keepdims=True, axis=0)

        cnt = mne_apply(car, cnt)
    return cnt
コード例 #5
0
                'robotHandPos_x', 'robotHandPos_y', 'robotHandPos_z'
            ]
            sensor_names_robot_aux = [
                'robotHandPos_x', 'robotHandPos_y', 'robotHandPos_z', 'ECG',
                'Respiration'
            ]

            if onlyRobotData:
                cnt = BBCIDataset(train_filename,
                                  load_sensor_names=sensor_names_robot).load(
                                  )  # robot pos channels
            elif onlyEEGData:
                cnt = BBCIDataset(
                    train_filename,
                    load_sensor_names=sensor_names).load()  # all channels
                cnt = cnt.drop_channels(sensor_names_robot_aux)
            elif RobotEEG:
                cnt = BBCIDataset(
                    train_filename,
                    load_sensor_names=sensor_names).load()  # all channels
                cnt = cnt.drop_channels(sensor_names_aux)
            elif RobotEEGAux:
                cnt = BBCIDataset(
                    train_filename,
                    load_sensor_names=sensor_names).load()  # all channels
            elif onlyAux:
                cnt = BBCIDataset(
                    train_filename,
                    load_sensor_names=sensor_names_aux).load()  # aux channels
            elif RobotAux:
                cnt = BBCIDataset(train_filename,