コード例 #1
0
 def test_append_epo_with_different_class_names(self):
     """test_append must raise a ValueError if class_names are different."""
     a = self.dat.copy()
     a.class_names = a.class_names[:-1]
     with self.assertRaises(ValueError):
         append_epo(a, self.dat)
         append_epo(self.dat, a)
コード例 #2
0
 def test_append_epo_with_extra(self):
     """append_epo with extra must work with list and ndarrays."""
     self.dat.a = list(range(10))
     self.dat.b = np.arange(10)
     dat = append_epo(self.dat, self.dat, extra=['a', 'b'])
     self.assertEqual(dat.a, list(range(10)) + list(range(10)))
     np.testing.assert_array_equal(dat.b, np.concatenate([np.arange(10), np.arange(10)]))
コード例 #3
0
    def collect_features(self):
        n_folds = len(self.binary_csp.folds)
        n_class_pairs = len(self.binary_csp.class_pairs)
        result_shape = (n_folds, n_class_pairs)
        self.train_feature = np.empty(result_shape, dtype=object)
        self.train_feature_full_fold = np.empty(result_shape, dtype=object)
        self.test_feature = np.empty(result_shape, dtype=object)
        self.test_feature_full_fold = np.empty(result_shape, dtype=object)

        bincsp = self.binary_csp  # just to make code shorter
        filter_inds = self.selected_filter_inds
        # merge along featureaxis: axis 1
        merge_features = lambda fv1, fv2: append_epo(fv1, fv2, classaxis=1)
        for fold_i in range(n_folds):
            for class_i in range(n_class_pairs):
                self.train_feature[fold_i, class_i] = reduce(
                    merge_features, bincsp.train_feature[filter_inds, fold_i,
                                                         class_i])
                self.train_feature_full_fold[fold_i, class_i] = reduce(
                    merge_features,
                    bincsp.train_feature_full_fold[filter_inds, fold_i,
                                                   class_i])
                self.test_feature[fold_i, class_i] = reduce(
                    merge_features, bincsp.test_feature[filter_inds, fold_i,
                                                        class_i])
                self.test_feature_full_fold[fold_i, class_i] = reduce(
                    merge_features,
                    bincsp.test_feature_full_fold[filter_inds, fold_i,
                                                  class_i])
コード例 #4
0
ファイル: pipeline.py プロジェクト: robintibor/braindecode
    def collect_features(self):
        n_folds = len(self.binary_csp.folds)
        n_class_pairs = len(self.binary_csp.class_pairs)
        result_shape = (n_folds, n_class_pairs)
        self.train_feature = np.empty(result_shape, dtype=object)
        self.train_feature_full_fold = np.empty(result_shape, dtype=object)
        self.test_feature = np.empty(result_shape, dtype=object)
        self.test_feature_full_fold = np.empty(result_shape, dtype=object)

        bincsp = self.binary_csp  # just to make code shorter
        filter_inds = self.selected_filter_inds
        # merge along featureaxis: axis 1
        merge_features = lambda fv1, fv2: append_epo(fv1, fv2, classaxis=1)
        for fold_i in range(n_folds):
            for class_i in range(n_class_pairs):
                self.train_feature[fold_i, class_i] = reduce(
                    merge_features, bincsp.train_feature[filter_inds, fold_i, class_i]
                )
                self.train_feature_full_fold[fold_i, class_i] = reduce(
                    merge_features, bincsp.train_feature_full_fold[filter_inds, fold_i, class_i]
                )
                self.test_feature[fold_i, class_i] = reduce(
                    merge_features, bincsp.test_feature[filter_inds, fold_i, class_i]
                )
                self.test_feature_full_fold[fold_i, class_i] = reduce(
                    merge_features, bincsp.test_feature_full_fold[filter_inds, fold_i, class_i]
                )
コード例 #5
0
 def test_append_epo(self):
     """append_epo."""
     dat = append_epo(self.dat, self.dat)
     self.assertEqual(dat.data.shape[0], 2*self.dat.data.shape[0])
     self.assertEqual(len(dat.axes[0]), 2*len(self.dat.axes[0]))
     np.testing.assert_array_equal(dat.data, np.concatenate([self.dat.data, self.dat.data], axis=0))
     np.testing.assert_array_equal(dat.axes[0], np.concatenate([self.dat.axes[0], self.dat.axes[0]]))
     self.assertEqual(dat.class_names, self.dat.class_names)
コード例 #6
0
def process(filename):
    data_dir = os.path.join("../Datasets/", filename)
    data_path = os.path.join(data_dir, filename + '_cnt.txt')
    label_path = os.path.join(data_dir, filename + '_mrk.txt')

    data_df = pd.read_table(data_path, header=None)
    label_df = pd.read_table(label_path, header=None)

    ## data overview
    print("data shape", data_df.shape)
    print("label shape", label_df.shape)


    ## data 
    label_array = label_df.dropna().values
    train_markers = []
    for events in label_array:
        if events[1] != 0:
            for i in range(0, 400, 50):
                train_markers.append((float(events[0]) + i, str(int(events[1]))))


    markers_subject1_class_1 = [(float(events[0]),str(int(events[1]))) for events in train_markers if events[1]== '1']
    markers_subject1_class_2 = [(float(events[0]),str(int(events[1]))) for events in train_markers if events[1]== '2']


    data_array = data_df.values
    cnt1 = convert_mushu_data(data_array, markers_subject1_class_1, 50, channels)
    cnt2 = convert_mushu_data(data_array, markers_subject1_class_2, 50, channels)

    epoch_subject1_class1 = segment_dat(cnt1, md, [0, 1000]) # 640x50x118
    epoch_subject1_class2 = segment_dat(cnt2, md, [0, 1000]) # 704x50x118
    final_epoch = append_epo(epoch_subject1_class1,epoch_subject1_class2) #1344x50x118
    targets = final_epoch.axes[0]

    methods = ['_csp', '_bandpowers', '_dct', '_wavelet']
    for i, func in enumerate(['_csp', utils.bandpowers, utils.dct_features, utils.wavelet_features]):
        if func == '_csp':
            from mne.decoding import CSP
            csp = CSP(n_components=50, reg=None, log=True, norm_trace=True)
            dictionary = csp.fit_transform(final_epoch.data, targets)
        else:
            dictionary = feature_transform(final_epoch, func)
        

        ## save the data
        res = np.concatenate([dictionary, targets.reshape(-1, 1)], axis=1)
        res_df = pd.DataFrame(res)
        save_path = os.path.join(data_dir, filename + methods[i] + '.csv')
        res_df.to_csv(save_path, index=False)
        print("==> saved data at {}".format(save_path))
コード例 #7
0
ファイル: pipeline.py プロジェクト: robintibor/braindecode
    def collect_features_for_filter_selection(features, filters_for_filterband):
        n_filters_per_fb = features[0].data.shape[1] / 2
        n_filterbands = len(features)
        first_features = deepcopy(features[0])
        first_n_filters = filters_for_filterband[0]
        first_features.data = first_features.data[:, range(first_n_filters) + range(-first_n_filters, 0)]

        all_features = first_features
        for i in range(1, n_filterbands):
            this_n_filters = min(n_filters_per_fb, filters_for_filterband[i])
            if this_n_filters > 0:
                next_features = deepcopy(features[i])
                next_features.data = next_features.data[:, range(this_n_filters) + range(-this_n_filters, 0)]
                all_features = append_epo(all_features, next_features, classaxis=1)
        return all_features
コード例 #8
0
    def collect_features_for_filter_selection(features,
                                              filters_for_filterband):
        n_filters_per_fb = features[0].data.shape[1] / 2
        n_filterbands = len(features)
        first_features = deepcopy(features[0])
        first_n_filters = filters_for_filterband[0]
        first_features.data = first_features.data[:,
                                                  range(first_n_filters) +
                                                  range(-first_n_filters, 0)]

        all_features = first_features
        for i in range(1, n_filterbands):
            this_n_filters = min(n_filters_per_fb, filters_for_filterband[i])
            if (this_n_filters > 0):
                next_features = deepcopy(features[i])
                next_features.data = next_features.data[:,
                                                        range(this_n_filters) +
                                                        range(
                                                            -this_n_filters, 0
                                                        )]
                all_features = append_epo(all_features,
                                          next_features,
                                          classaxis=1)
        return all_features
コード例 #9
0
        cA, cD = pywt.dwt(epoch[i, :], 'coif1')
        cA_values.append(cA)
        cD_values.append(
            cD)  #calculating the coefficients of wavelet transform.
    for x in range(len(epoch)):
        cA_Energy.append(abs(np.sum(np.square(cA_values[x]))))
        features.append(abs(np.sum(np.square(cA_values[x]))))

    for x in range(len(epoch)):
        cD_Energy.append(abs(np.sum(np.square(cD_values[x]))))
        features.append(abs(np.sum(np.square(cD_values[x]))))

    return features


final_epoch1 = append_epo(epoch_subject1_class1, epoch_subject1_class2)
final_epoch_ch1 = append_epo(
    epoch_subject1_ch1_class1,
    epoch_subject1_ch1_class2)  #appended both the epoch data sets
w1, a1, d1 = calculate_csp(final_epoch1)
w2, a2, d2 = calculate_csp(
    final_epoch_ch1
)  #calculate csp   but why we need to append the data and calculate the csp paramters waht if we calculate it individually
fil_epoch_subject1_class1 = apply_csp(
    epoch_subject1_class1, w1,
    [0, 1, 2, 3, 4, -5, -4, -3, -2, -1
     ])  # brackets number are the column number to use
fil_epoch_subject1_class2 = apply_csp(epoch_subject1_class2, w1,
                                      [0, 1, 2, 3, 4, -5, -4, -3, -2, -1])
fil_final_epoch1 = append_epo(
    fil_epoch_subject1_class1,
コード例 #10
0
 def test_append_epo_copy(self):
     """append_epo means must not modify argument."""
     cpy = self.dat.copy()
     append_epo(self.dat, self.dat)
     self.assertEqual(self.dat, cpy)
コード例 #11
0
 def test_append_epo_swapaxes(self):
     """append_epo must work with nonstandard timeaxis."""
     dat = append_epo(swapaxes(self.dat, 0, 2), swapaxes(self.dat, 0, 2), classaxis=2)
     dat = swapaxes(dat, 0, 2)
     dat2 = append_epo(self.dat, self.dat)
     self.assertEqual(dat, dat2)