コード例 #1
0
 def test_segment_dat_swapaxes(self):
     """Segmentation must work with nonstandard axes."""
     epo = segment_dat(swapaxes(self.dat, 0, 1), self.mrk_def, [-400, 400], timeaxis=-1)
     # segment_dat added a new dimension
     epo = swapaxes(epo, 1, 2)
     epo2 = segment_dat(self.dat, self.mrk_def, [-400, 400])
     self.assertEqual(epo, epo2)
コード例 #2
0
ファイル: online_experiment.py プロジェクト: awakenting/wyrm
def train(filename):
    cnt = io.load_bcicomp3_ds2(filename)

    fs_n = cnt.fs / 2

    b, a = proc.signal.butter(5, [30 / fs_n], btype='low')
    cnt = proc.lfilter(cnt, b, a)

    b, a = proc.signal.butter(5, [.4 / fs_n], btype='high')
    cnt = proc.lfilter(cnt, b, a)

    cnt = proc.subsample(cnt, 60)

    epo = proc.segment_dat(cnt, MARKER_DEF_TRAIN, SEG_IVAL)

    #from wyrm import plot
    #plot.plot_spatio_temporal_r2_values(proc.sort_channels(epo))
    #print JUMPING_MEANS_IVALS
    #plot.plt.show()

    fv = proc.jumping_means(epo, JUMPING_MEANS_IVALS)
    fv = proc.create_feature_vectors(fv)

    cfy = proc.lda_train(fv)
    return cfy
コード例 #3
0
def train(filename):
    cnt = io.load_bcicomp3_ds2(filename)

    fs_n = cnt.fs / 2

    b, a = proc.signal.butter(5, [30 / fs_n], btype='low')
    cnt = proc.lfilter(cnt, b, a)

    b, a = proc.signal.butter(5, [.4 / fs_n], btype='high')
    cnt = proc.lfilter(cnt, b, a)

    cnt = proc.subsample(cnt, 60)

    epo = proc.segment_dat(cnt, MARKER_DEF_TRAIN, SEG_IVAL)

    #from wyrm import plot
    #plot.plot_spatio_temporal_r2_values(proc.sort_channels(epo))
    #print JUMPING_MEANS_IVALS
    #plot.plt.show()

    fv = proc.jumping_means(epo, JUMPING_MEANS_IVALS)
    fv = proc.create_feature_vectors(fv)

    cfy = proc.lda_train(fv)
    return cfy
コード例 #4
0
 def test_segment_dat_with_nonexisting_markers(self):
     """Segmentation without result should return empty .data"""
     mrk_def = {'class 1': ['FUU1'],
                'class 2': ['FUU2', 'FUU3']
               }
     epo = segment_dat(self.dat, mrk_def, [-400, 400])
     self.assertEqual(epo.data.shape[0], 0)
コード例 #5
0
def train(filename_):
    cnt = io.load_bcicomp3_ds2(filename_)

    fs_n = cnt.fs / 2

    b, a = proc.signal.butter(5, [HIGH_CUT / fs_n], btype='low')
    cnt = proc.lfilter(cnt, b, a)

    b, a = proc.signal.butter(5, [LOWER_CUT / fs_n], btype='high')
    cnt = proc.lfilter(cnt, b, a)
    print("Filtragem aplicada em [{} Hz ~ {} Hz]".format(LOWER_CUT, HIGH_CUT))

    cnt = proc.subsample(cnt, SUBSAMPLING)
    print("Sub-amostragem em {} Hz".format(SUBSAMPLING))

    epo = proc.segment_dat(cnt, MARKER_DEF_TRAIN, SEG_IVAL)
    print("Dados segmentados em intervalos de [{} ~ {}]".format(
        SEG_IVAL[0], SEG_IVAL[1]))

    fv = proc.jumping_means(epo, JUMPING_MEANS_INTERVALS)
    fv = proc.create_feature_vectors(fv)

    print("Iniciando treinamento da LDA...")
    cfy = proc.lda_train(fv)
    print("Treinamento concluido!")
    return cfy
コード例 #6
0
 def test_segment_dat_with_unequally_sized_data(self):
     """Segmentation must ignore too short or too long chunks in the result."""
     # We create a marker that is too close to the beginning of the
     # data, so its cnt will not bee of length [-400, 400] ms. It
     # should not appear in the resulting epo
     self.dat.markers.append([100, 'M1'])
     epo = segment_dat(self.dat, self.mrk_def, [-400, 400])
     self.assertEqual(epo.data.shape[0], 3)
コード例 #7
0
 def test_segment_dat_with_unequally_sized_data(self):
     """Segmentation must ignore too short or too long chunks in the result."""
     # We create a marker that is too close to the beginning of the
     # data, so its cnt will not bee of length [-400, 400] ms. It
     # should not appear in the resulting epo
     self.dat.markers.append([100, 'M1'])
     epo = segment_dat(self.dat, self.mrk_def, [-400, 400])
     self.assertEqual(epo.data.shape[0], 3)
コード例 #8
0
def preprocessing_simple(dat, MRK_DEF, *args, **kwargs):
    """Simple preprocessing that reaches 97% accuracy.
    """
    fs_n = dat.fs / 2
    b, a = proc.signal.butter(5, [10 / fs_n], btype='low')
    dat = proc.filtfilt(dat, b, a)

    dat = proc.subsample(dat, 20)
    epo = proc.segment_dat(dat, MRK_DEF, SEG_IVAL)
    fv = proc.create_feature_vectors(epo)
    return fv, epo
コード例 #9
0
def preprocessing_simple(dat, MRK_DEF, *args, **kwargs):
    """Simple preprocessing that reaches 97% accuracy.
    """
    fs_n = dat.fs / 2
    b, a = proc.signal.butter(5, [10 / fs_n], btype='low')
    dat = proc.filtfilt(dat, b, a)
   
    dat = proc.subsample(dat, 20)
    epo = proc.segment_dat(dat, MRK_DEF, SEG_IVAL)
    fv = proc.create_feature_vectors(epo)
    return fv, epo
コード例 #10
0
 def test_segment_dat_with_restriction_to_new_data_ival_pos_pos(self):
     """Online Segmentation with ival +something..+something must work correctly."""
     data = np.ones((9, 3))
     time = np.linspace(0, 900, 9, endpoint=False)
     channels = 'a', 'b', 'c'
     markers = [[100, 'x'], [200, 'x'], [300, 'x']]
     dat = Data(data, [time, channels], ['time', 'channels'], ['ms', '#'])
     dat.fs = 10
     dat.markers = markers
     mrk_def = {'class 1': ['x']}
     # each tuple has (number of new samples, expected epocs)
     samples_epos = [(0, 0), (1, 0), (2, 1), (3, 2), (4, 3), (5, 3)]
     for s, e in samples_epos:
         epo = segment_dat(dat, mrk_def, [100, 500], newsamples=s)
         self.assertEqual(epo.data.shape[0], e)
コード例 #11
0
def preprocessing(dat, MRK_DEF, JUMPING_MEANS_IVALS):
    dat = proc.sort_channels(dat)
    
    fs_n = dat.fs / 2
    b, a = proc.signal.butter(5, [30 / fs_n], btype='low')
    dat = proc.lfilter(dat, b, a)
    b, a = proc.signal.butter(5, [.4 / fs_n], btype='high')
    dat = proc.lfilter(dat, b, a)
    
    dat = proc.subsample(dat, 60)
    epo = proc.segment_dat(dat, MRK_DEF, SEG_IVAL)
    
    fv = proc.jumping_means(epo, JUMPING_MEANS_IVALS)
    fv = proc.create_feature_vectors(fv)
    return fv, epo
コード例 #12
0
def preprocessing(dat, MRK_DEF, JUMPING_MEANS_IVALS):
    dat = proc.sort_channels(dat)

    fs_n = dat.fs / 2
    b, a = proc.signal.butter(5, [30 / fs_n], btype='low')
    dat = proc.lfilter(dat, b, a)
    b, a = proc.signal.butter(5, [.4 / fs_n], btype='high')
    dat = proc.lfilter(dat, b, a)

    dat = proc.subsample(dat, 60)
    epo = proc.segment_dat(dat, MRK_DEF, SEG_IVAL)

    fv = proc.jumping_means(epo, JUMPING_MEANS_IVALS)
    fv = proc.create_feature_vectors(fv)
    return fv, epo
コード例 #13
0
def segment_data(data, win_length):
    """Splits continuous signal into windows of variable length

    Mainly delegates the call to `wyrm.processing.segment_dat`.

    Params
    ------
        data : `Data`
            continuous signal
        win_length : int
            length of window in seconds

    Returns
    -------
        data : `Data`
            chunked data

    See also
    --------
        :func: wyrm.processing.segment_dat
    """
    label = data.label
    if not isinstance(label, str):
        label = str(label)
    marker_def = {label: ['M1']}
    ival = [0, win_length * 1000]
    data = segment_dat(dat=data, marker_def=marker_def, ival=ival, timeaxis=0)
    data.axes[0] = np.repeat(label,
                             data.axes[0].shape[0])  # update class names

    # segment_dat drops samples at the borders. Since markers are defined
    # starting at t0, it will always only drop samples at the end (if at all).
    # Drop associated BIS samples before reshaping.
    data.bis = data.bis[:data.data.shape[0] * data.data.shape[1]]
    data.bis = data.bis.reshape([data.data.shape[0], -1])

    # drop nan epochs
    # this is handled here, not earlier, as to not drop samples here and there,
    # but instead exclude whole windows if at least one sample is missing, thus
    # keeping meaningful distance between any two time points
    mask1 = np.all(~np.any(np.isnan(data.data), axis=1), axis=1)
    mask2 = ~np.any(np.isnan(data.bis), axis=1)
    mask = mask1 & mask2
    data.data = data.data[mask]
    data.axes[0] = data.axes[0][mask]
    data.bis = data.bis[mask]

    return data
コード例 #14
0
 def test_segment_dat(self):
     """Test conversion from Continuous to Epoched data."""
     epo = segment_dat(self.dat, self.mrk_def, [-400, 400])
     # test if basic info was transferred from cnt
     self.assertEqual(self.dat.markers, epo.markers)
     self.assertEqual(self.dat.fs, epo.fs)
     np.testing.assert_array_equal(self.dat.axes[-1], epo.axes[-1])
     # test if the actual data is correct
     self.assertEqual(list(epo.axes[0]), [0, 1, 1])
     np.testing.assert_array_equal(epo.class_names, np.array(['class 1', 'class 2']))
     self.assertEqual(epo.data.shape, (3, 80, 3))
     for i in range(3):
         e = epo.data[i, ...]
         self.assertEqual(np.average(e), i+1)
     # test if the epo.ival is the same we cut out
     self.assertEqual(epo.axes[-2][0], -400)
     self.assertEqual(epo.axes[-2][-1], 390)
コード例 #15
0
 def test_segment_dat(self):
     """Test conversion from Continuous to Epoched data."""
     epo = segment_dat(self.dat, self.mrk_def, [-400, 400])
     # test if basic info was transferred from cnt
     self.assertEqual(self.dat.markers, epo.markers)
     self.assertEqual(self.dat.fs, epo.fs)
     np.testing.assert_array_equal(self.dat.axes[-1], epo.axes[-1])
     # test if the actual data is correct
     self.assertEqual(list(epo.axes[0]), [0, 1, 1])
     np.testing.assert_array_equal(epo.class_names,
                                   np.array(['class 1', 'class 2']))
     self.assertEqual(epo.data.shape, (3, 80, 3))
     for i in range(3):
         e = epo.data[i, ...]
         self.assertEqual(np.average(e), i + 1)
     # test if the epo.ival is the same we cut out
     self.assertEqual(epo.axes[-2][0], -400)
     self.assertEqual(epo.axes[-2][-1], 390)
コード例 #16
0
def offline_experiment(filename_, cfy_, true_labels_):
    print("\n")
    cnt = io.load_bcicomp3_ds2(filename_)

    fs_n = cnt.fs / 2

    b, a = proc.signal.butter(5, [HIGH_CUT / fs_n], btype='low')
    cnt = proc.filtfilt(cnt, b, a)

    b, a = proc.signal.butter(5, [LOWER_CUT / fs_n], btype='high')
    cnt = proc.filtfilt(cnt, b, a)

    cnt = proc.subsample(cnt, SUBSAMPLING)

    epo = proc.segment_dat(cnt, MARKER_DEF_TEST, SEG_IVAL)

    fv = proc.jumping_means(epo, JUMPING_MEANS_INTERVALS)
    fv = proc.create_feature_vectors(fv)

    lda_out = proc.lda_apply(fv, cfy_)
    markers = [fv.class_names[cls_idx] for cls_idx in fv.axes[0]]
    result = zip(markers, lda_out)
    endresult = []
    markers_processed = 0
    letter_prob = {i: 0 for i in 'abcdefghijklmnopqrstuvwxyz123456789_'}
    for s, score in result:
        if markers_processed == 180:
            endresult.append(
                sorted(letter_prob.items(), key=lambda x: x[1])[-1][0])
            letter_prob = {
                i: 0
                for i in 'abcdefghijklmnopqrstuvwxyz123456789_'
            }
            markers_processed = 0
        for letter in s:
            letter_prob[letter] += score
        markers_processed += 1

    print('Letras Encontradas-: %s' % "".join(endresult))
    print('Letras Corretas----: %s' % true_labels_)
    acc = np.count_nonzero(
        np.array(endresult) == np.array(
            list(true_labels_.lower()[:len(endresult)]))) / len(endresult)
    print("Acertividade Final : %d" % (acc * 100))
コード例 #17
0
ファイル: utils.py プロジェクト: root-ua/bci-challenge
def load_epo_data(data_cat, n_before=-3, n_len=100, subjects=None):
    # loading 'data_cat' data
    data, channels, markers = load_data(FS, folder_name, data_cat, subjects)

    # converting plain data to continuous Data object
    cnt = convert_mushu_data(data, markers, FS, channels)

    # Define the markers belonging to class 1 and 2
    markers_definitions = None
    if data_cat == 'train':
        markers_definitions = {'class 1': (train_labels.query('Prediction == 0', engine='python')['IdFeedBack']).tolist(),
                           'class 2': (train_labels.query('Prediction == 1', engine='python')['IdFeedBack']).tolist()}
    else:
        # marker classes doesn't matter for test data
        markers_definitions = {'class 1': [m[1] for m in markers], 'class 2': []}

    # segmenting continuous Data object into epoched data
    # Epoch the data -25ms(5 rows) and +500ms(100 rows) around the markers defined in markers_definitions
    return segment_dat(cnt, markers_definitions, [n_before*5, (n_before + n_len)*5])
コード例 #18
0
 def test_segment_dat_with_restriction_to_new_data_ival_pos_pos(self):
     """Online Segmentation with ival +something..+something must work correctly."""
     # [   0.,  100.,  200.,  300.,  400.,  500.,  600.,  700.,  800.]
     #         M100    200                         600
     #                M200    300                         700
     #                       M299    399                         799
     #                       M300    400                         800
     #                       M301    401                         801
     data = np.ones((9, 3))
     time = np.linspace(0, 900, 9, endpoint=False)
     channels = 'a', 'b', 'c'
     markers = [[100, 'x'], [200, 'x'], [299, 'x'], [300, 'x'], [301, 'x']]
     dat = Data(data, [time, channels], ['time', 'channels'], ['ms', '#'])
     dat.fs = 10
     dat.markers = markers
     mrk_def = {'class 1': ['x']}
     # each tuple has (number of new samples, expected epocs)
     samples_epos = [(0, 0), (1, 1), (2, 3), (3, 4), (4, 5), (5, 5)]
     for s, e in samples_epos:
         epo = segment_dat(dat, mrk_def, [100, 500], newsamples=s)
         self.assertEqual(epo.data.shape[0], e)
コード例 #19
0
ファイル: test_segment_dat.py プロジェクト: usmanayubsh/wyrm
 def test_segment_dat_with_restriction_to_new_data_ival_pos_pos(self):
     """Online Segmentation with ival +something..+something must work correctly."""
     # [   0.,  100.,  200.,  300.,  400.,  500.,  600.,  700.,  800.]
     #         M100    200                         600
     #                M200    300                         700
     #                       M299    399                         799
     #                       M300    400                         800
     #                       M301    401                         801
     data = np.ones((9, 3))
     time = np.linspace(0, 900, 9, endpoint=False)
     channels = "a", "b", "c"
     markers = [[100, "x"], [200, "x"], [299, "x"], [300, "x"], [301, "x"]]
     dat = Data(data, [time, channels], ["time", "channels"], ["ms", "#"])
     dat.fs = 10
     dat.markers = markers
     mrk_def = {"class 1": ["x"]}
     # each tuple has (number of new samples, expected epocs)
     samples_epos = [(0, 0), (1, 1), (2, 3), (3, 4), (4, 5), (5, 5)]
     for s, e in samples_epos:
         epo = segment_dat(dat, mrk_def, [100, 500], newsamples=s)
         self.assertEqual(epo.data.shape[0], e)
コード例 #20
0
ファイル: test_segment_dat.py プロジェクト: usmanayubsh/wyrm
 def test_segment_dat_with_restriction_to_new_data_ival_neg_neg(self):
     """Online Segmentation with ival -something..-something must work correctly."""
     # [   0.,  100.,  200.,  300.,  400.,  500.,  600.,  700.,  800.]
     #          100                  400   M500
     #                 200                  500   M600
     #                        299                  599   M699
     #                        300                  600   M700
     #                        301                  600   M701
     data = np.ones((9, 3))
     time = np.linspace(0, 900, 9, endpoint=False)
     channels = "a", "b", "c"
     markers = [[500, "x"], [600, "x"], [699, "x"], [700, "x"], [701, "x"]]
     dat = Data(data, [time, channels], ["time", "channels"], ["ms", "#"])
     dat.fs = 10
     dat.markers = markers
     mrk_def = {"class 1": ["x"]}
     # each tuple has (number of new samples, expected epocs)
     samples_epos = [(0, 0), (1, 0), (2, 2), (3, 4), (4, 5), (5, 5)]
     for s, e in samples_epos:
         epo = segment_dat(dat, mrk_def, [-400, -100], newsamples=s)
         self.assertEqual(epo.data.shape[0], e)
コード例 #21
0
 def test_segment_dat_with_restriction_to_new_data_ival_neg_neg(self):
     """Online Segmentation with ival -something..-something must work correctly."""
     # [   0.,  100.,  200.,  300.,  400.,  500.,  600.,  700.,  800.]
     #          100                  400   M500
     #                 200                  500   M600
     #                        299                  599   M699
     #                        300                  600   M700
     #                        301                  600   M701
     data = np.ones((9, 3))
     time = np.linspace(0, 900, 9, endpoint=False)
     channels = 'a', 'b', 'c'
     markers = [[500, 'x'], [600, 'x'], [699, 'x'], [700, 'x'], [701, 'x']]
     dat = Data(data, [time, channels], ['time', 'channels'], ['ms', '#'])
     dat.fs = 10
     dat.markers = markers
     mrk_def = {'class 1': ['x']}
     # each tuple has (number of new samples, expected epocs)
     samples_epos = [(0, 0), (1, 0), (2, 2), (3, 4), (4, 5), (5, 5)]
     for s, e in samples_epos:
         epo = segment_dat(dat, mrk_def, [-400, -100], newsamples=s)
         self.assertEqual(epo.data.shape[0], e)
コード例 #22
0
def online_experiment(amp, clf):
    amp_fs = amp.get_sampling_frequency()
    amp_channels = amp.get_channels()

    buf = BlockBuffer(4)
    rb = RingBuffer(5000)

    fn = amp.get_sampling_frequency() / 2
    b_low, a_low = proc.signal.butter(16, [30 / fn], btype='low')
    b_high, a_high = proc.signal.butter(5, [.4 / fn], btype='high')

    zi_low = proc.lfilter_zi(b_low, a_low, len(amp_channels))
    zi_high = proc.lfilter_zi(b_high, a_high, len(amp_channels))

    amp.start()
    markers_processed = 0
    current_letter_idx = 0
    current_letter = TRUE_LABELS[current_letter_idx].lower()

    letter_prob = {i: 0 for i in 'abcdefghijklmnopqrstuvwxyz123456789_'}
    endresult = []
    while True:
        # turn on for 'real time'
        #time.sleep(0.01)

        # get fresh data from the amp
        data, markers = amp.get_data()

        # we should rather wait for a specific end-of-experiment marker
        if len(data) == 0:
            break

        # convert to cnt
        cnt = io.convert_mushu_data(data, markers, amp_fs, amp_channels)

        # enter the block buffer
        buf.append(cnt)
        cnt = buf.get()
        if not cnt:
            continue

        # band-pass and subsample
        cnt, zi_low = proc.lfilter(cnt, b_low, a_low, zi=zi_low)
        cnt, zi_high = proc.lfilter(cnt, b_high, a_high, zi=zi_high)

        cnt = proc.subsample(cnt, 60)

        newsamples = cnt.data.shape[0]

        # enter the ringbuffer
        rb.append(cnt)
        cnt = rb.get()

        # segment
        epo = proc.segment_dat(cnt,
                               MARKER_DEF_TEST,
                               SEG_IVAL,
                               newsamples=newsamples)
        if not epo:
            continue

        fv = proc.jumping_means(epo, JUMPING_MEANS_IVALS)
        fv = proc.create_feature_vectors(fv)
        logger.debug(markers_processed)

        lda_out = proc.lda_apply(fv, clf)
        markers = [fv.class_names[cls_idx] for cls_idx in fv.axes[0]]
        result = zip(markers, lda_out)
        for s, score in result:
            if markers_processed == 180:
                endresult.append(
                    sorted(letter_prob.items(), key=lambda x: x[1])[-1][0])
                letter_prob = {
                    i: 0
                    for i in 'abcdefghijklmnopqrstuvwxyz123456789_'
                }
                markers_processed = 0
                current_letter_idx += 1
                current_letter = TRUE_LABELS[current_letter_idx].lower()
            for letter in s:
                letter_prob[letter] += score
            markers_processed += 1
        logger.debug("".join([
            i[0] for i in sorted(
                letter_prob.items(), key=lambda x: x[1], reverse=True)
        ]).replace(current_letter, " %s " % current_letter))
        logger.debug(TRUE_LABELS)
        logger.debug("".join(endresult))
        # calculate the current accuracy
        if len(endresult) > 0:
            acc = np.count_nonzero(
                np.array(endresult) == np.array(
                    list(TRUE_LABELS.lower()[:len(endresult)]))) / len(
                        endresult)
            print "Current accuracy:", acc * 100
        if len(endresult) == len(TRUE_LABELS):
            break
        #logger.debug("Result: %s" % result)

    acc = np.count_nonzero(
        np.array(endresult) == np.array(
            list(TRUE_LABELS.lower()[:len(endresult)]))) / len(endresult)
    print "Accuracy:", acc * 100

    amp.stop()
コード例 #23
0
 def test_segment_dat_with_negative_newsamples(self):
     """Raise an error when newsamples is not positive or None."""
     with self.assertRaises(AssertionError):
         segment_dat(self.dat, self.mrk_def, [-400, 400], newsamples=-1)
コード例 #24
0
ファイル: performance.py プロジェクト: awakenting/wyrm
def online_erp(fs, n_channels, subsample):
    logger.debug('Running Online ERP with {fs}Hz, and {channels}channels'.format(fs=fs, channels=n_channels))

    target_fs = 100
    # blocklen in ms
    blocklen = 1000 * 1 / target_fs
    # blocksize given the original fs and blocklen
    blocksize = fs * (blocklen / 1000)


    MRK_DEF = {'target': 'm'}
    SEG_IVAL = [0, 700]
    JUMPING_MEANS_IVALS = [150, 220], [200, 260], [310, 360], [550, 660]
    RING_BUFFER_CAP = 1000

    cfy = [0, 0]

    fs_n = fs / 2

    b_l, a_l = proc.signal.butter(5, [30 / fs_n], btype='low')
    b_h, a_h = proc.signal.butter(5, [.4 / fs_n], btype='high')
    zi_l = proc.lfilter_zi(b_l, a_l, n_channels)
    zi_h = proc.lfilter_zi(b_h, a_h, n_channels)

    ax_channels = np.array([str(i) for i in range(n_channels)])

    names = ['time', 'channel']
    units = ['ms', '#']

    blockbuf = BlockBuffer(blocksize)
    ringbuf = RingBuffer(RING_BUFFER_CAP)

    times = []

    # time since the last data was acquired
    t_last = time.time()

    # time since the last marker
    t_last_marker = time.time()

    # time since the experiment started
    t_start = time.time()

    full_iterations = 0
    while full_iterations < 500:

        t0 = time.time()

        dt = time.time() - t_last
        samples = int(dt * fs)
        if samples == 0:
            continue
        t_last = time.time()

        # get data
        data = np.random.random((samples, n_channels))
        ax_times = np.linspace(0, 1000 * (samples / fs), samples, endpoint=False)
        if t_last_marker + .01 < time.time():
            t_last_marker = time.time()
            markers = [[ax_times[-1], 'm']]
        else:
            markers = []

        cnt = Data(data, axes=[ax_times, ax_channels], names=names, units=units)
        cnt.fs = fs
        cnt.markers = markers

        # blockbuffer
        blockbuf.append(cnt)
        cnt = blockbuf.get()
        if not cnt:
            continue

        # filter
        cnt, zi_l = proc.lfilter(cnt, b_l, a_l, zi=zi_l)
        cnt, zi_h = proc.lfilter(cnt, b_h, a_h, zi=zi_h)

        # subsample
        if subsample:
            cnt = proc.subsample(cnt, target_fs)
        newsamples = cnt.data.shape[0]

        # ringbuffer
        ringbuf.append(cnt)
        cnt = ringbuf.get()

        # epoch
        epo = proc.segment_dat(cnt, MRK_DEF, SEG_IVAL, newsamples=newsamples)
        if not epo:
            continue

        # feature vectors
        fv = proc.jumping_means(epo, JUMPING_MEANS_IVALS)
        rv = proc.create_feature_vectors(fv)

        # classification
        proc.lda_apply(fv, cfy)

        # don't measure in the first second, where the ringbuffer is not
        # full yet.
        if time.time() - t_start < (RING_BUFFER_CAP / 1000):
            continue

        dt = time.time() - t0
        times.append(dt)

        full_iterations += 1

    return np.array(times)
コード例 #25
0
def online_experiment(amp, cfy):
    amp_fs = amp.get_sampling_frequency()
    amp_channels = amp.get_channels()

    # buf = BlockBuffer(4)
    rb = RingBuffer(5000)
    fn = amp_fs / 2
    b_low, a_low = proc.signal.butter(5, [38 / fn], btype='low')
    b_high, a_high = proc.signal.butter(5, [.1 / fn], btype='high')
    zi_low = proc.lfilter_zi(b_low, a_low, len(amp_channels))
    zi_high = proc.lfilter_zi(b_high, a_high, len(amp_channels))

    amp.start()
    print("Iniciando simulacao em 5s...")
    for x in xrange(4, 0, -1):
        time.sleep(1)
        print("%ds" % x)
        pass
    markers_processed = 0
    current_letter_idx = 0
    current_letter = TRUE_LABELS[current_letter_idx].lower()

    letter_prob = {i: 0 for i in 'abcdefghijklmnopqrstuvwxyz123456789_'}
    endresult = []
    t0 = time.time()

    while True:
        t0 = time.time()

        # get fresh data from the amp
        data, markers = amp.get_data()
        if len(data) == 0:
            continue

        # we should rather wait for a specific end-of-experiment marker
        if len(data) == 0:
            break

        # convert to cnt
        cnt = io.convert_mushu_data(data, markers, amp_fs, amp_channels)

        # enter the block buffer
        # buf.append(cnt)
        # cnt = buf.get()
        # if not cnt:
        #    continue

        # band-pass and subsample
        cnt, zi_low = proc.lfilter(cnt, b_low, a_low, zi=zi_low)
        cnt, zi_high = proc.lfilter(cnt, b_high, a_high, zi=zi_high)

        cnt = proc.subsample(cnt, 60)

        newsamples = cnt.data.shape[0]

        # enter the ringbuffer
        rb.append(cnt)
        cnt = rb.get()

        # segment
        epo = proc.segment_dat(cnt,
                               MARKER_DEF_TEST,
                               SEG_IVAL,
                               newsamples=newsamples)
        if not epo:
            continue

        fv = proc.jumping_means(epo, JUMPING_MEANS_IVALS)
        fv = proc.create_feature_vectors(fv)
        print("\n")
        logger.info('Step : %d' % markers_processed)

        lda_out = proc.lda_apply(fv, cfy)
        markers = [fv.class_names[cls_idx] for cls_idx in fv.axes[0]]
        result = zip(markers, lda_out)
        for s, score in result:
            if markers_processed == 180:
                endresult.append(
                    sorted(letter_prob.items(), key=lambda x: x[1])[-1][0])
                letter_prob = {
                    i: 0
                    for i in 'abcdefghijklmnopqrstuvwxyz123456789_'
                }
                markers_processed = 0
                current_letter_idx += 1
                current_letter = TRUE_LABELS[current_letter_idx].lower()
            for letter in s:
                letter_prob[letter] += score
            markers_processed += 1

        print('Letra Atual Correta-:  %s  ' % current_letter)
        print("Letra provavel--: %s" % "".join([
            i[0] for i in sorted(
                letter_prob.items(), key=lambda x: x[1], reverse=True)
        ]).replace(current_letter, " '%s' " % current_letter))
        print('Letras Corretas----: %s' % TRUE_LABELS)
        # discovered = BuildDiscoveredString(endresult)
        # print('Letras Encontradas-: %s' % discovered)
        print('Letras Encontradas-: %s' % "".join(endresult))

        # calculate the current accuracy
        if len(endresult) > 0:
            acc = np.count_nonzero(
                np.array(endresult) == np.array(
                    list(TRUE_LABELS.lower()[:len(endresult)]))) / len(
                        endresult)
            print('Acertividade Atual : %d' % (acc * 100))

        if len(endresult) == len(TRUE_LABELS) - 1:
            break

        # logger.debug('Resultado : %s' % result)
        timeValue = 1000 * (time.time() - t0)
        print('Tempo consumido por ciclo : %d' % timeValue)

    acc = np.count_nonzero(
        np.array(endresult) == np.array(
            list(TRUE_LABELS.lower()[:len(endresult)]))) / len(endresult)
    print("Acertividade Final : %d" % (acc * 100))

    amp.stop()
コード例 #26
0
 def test_segment_dat_copy(self):
     """segment_dat must not modify arguments."""
     cpy = self.dat.copy()
     segment_dat(self.dat, self.mrk_def, [-400, 400])
     self.assertEqual(cpy, self.dat)
コード例 #27
0
 def test_equivalent_axes(self):
     """Segmentation must deal with equivalent axis indices correctly."""
     epo0 = segment_dat(self.dat, self.mrk_def, [-400, 400], timeaxis=-2)
     epo1 = segment_dat(self.dat, self.mrk_def, [-400, 400], timeaxis=0)
     self.assertEqual(epo0, epo1)
コード例 #28
0
 def test_segment_dat_with_nonexisting_markers(self):
     """Segmentation without result should return empty .data"""
     mrk_def = {'class 1': ['FUU1'], 'class 2': ['FUU2', 'FUU3']}
     epo = segment_dat(self.dat, mrk_def, [-400, 400])
     self.assertEqual(epo.data.shape[0], 0)
コード例 #29
0
ファイル: test_segment_dat.py プロジェクト: usmanayubsh/wyrm
 def test_segment_dat_with_nonexisting_markers(self):
     """Segmentation without result should return empty .data"""
     mrk_def = {"class 1": ["FUU1"], "class 2": ["FUU2", "FUU3"]}
     epo = segment_dat(self.dat, mrk_def, [-400, 400])
     self.assertEqual(epo.data.shape[0], 0)
コード例 #30
0
 def test_segment_dat_copy(self):
     """segment_dat must not modify arguments."""
     cpy = self.dat.copy()
     segment_dat(self.dat, self.mrk_def, [-400, 400])
     self.assertEqual(cpy, self.dat)
コード例 #31
0
 def test_segment_dat_with_negative_newsamples(self):
     """Raise an error when newsamples is not positive or None."""
     with self.assertRaises(AssertionError):
         segment_dat(self.dat, self.mrk_def, [-400, 400], newsamples=-1)
コード例 #32
0
## ## print type(markers_subject1_class_1),type(markers_subject1_class_2),"markers_subject1_class1&2 data type"
cnt1 = convert_mushu_data(
    signal_array1, markers_subject1_class_1, 118,
    channels)  #convert data into continuous form   for 1st and second classs
cnt2 = convert_mushu_data(signal_array1, markers_subject1_class_2, 118,
                          channels)
cnt_ch1 = convert_mushu_data(
    signal_channel1, markers_subject1_class_1, 30, main_channels
)  #convert data into continuous form   for 1st and second classs
cnt_ch2 = convert_mushu_data(signal_channel1, markers_subject1_class_2, 30,
                             main_channels)
## ## print cnt1,"cnt1 shape"      #What type of marker data should be there  should it contain start as well as end point  or just start point is required

md = {'class 1': ['1\n'], 'class 2': ['2\n']}

epoch_subject1_class1 = segment_dat(
    cnt1, md, [0, 1000])  #epoch is a 3-d data set  class*time*channel
epoch_subject1_class2 = segment_dat(cnt2, md, [0, 1000])
epoch_subject1_ch1_class1 = segment_dat(
    cnt_ch1, md, [0, 1000])  #epoch is a 3-d data set  class*time*channel
epoch_subject1_ch1_class2 = segment_dat(cnt_ch2, md, [0, 1000])


### ## print "epoch data",epoch_subject1_class1
def bandpowers(segment):
    features = []
    ## ## print len(segment),"segment jbjb"
    for i in range(len(segment)):
        f, Psd = signal.welch(segment[i, :], 100)
        power1 = 0
        power2 = 0
        f1 = []
コード例 #33
0
def online_erp(fs, n_channels, subsample):
    logger.debug('Running Online ERP with {fs}Hz, and {channels}channels'.format(fs=fs, channels=n_channels))

    target_fs = 100
    # blocklen in ms
    blocklen = 1000 * 1 / target_fs
    # blocksize given the original fs and blocklen
    blocksize = fs * (blocklen / 1000)


    MRK_DEF = {'target': 'm'}
    SEG_IVAL = [0, 700]
    JUMPING_MEANS_IVALS = [150, 220], [200, 260], [310, 360], [550, 660]
    RING_BUFFER_CAP = 1000

    cfy = [0, 0]

    fs_n = fs / 2

    b_l, a_l = proc.signal.butter(5, [30 / fs_n], btype='low')
    b_h, a_h = proc.signal.butter(5, [.4 / fs_n], btype='high')
    zi_l = proc.lfilter_zi(b_l, a_l, n_channels)
    zi_h = proc.lfilter_zi(b_h, a_h, n_channels)

    ax_channels = np.array([str(i) for i in range(n_channels)])

    names = ['time', 'channel']
    units = ['ms', '#']

    blockbuf = BlockBuffer(blocksize)
    ringbuf = RingBuffer(RING_BUFFER_CAP)

    times = []

    # time since the last data was acquired
    t_last = time.time()

    # time since the last marker
    t_last_marker = time.time()

    # time since the experiment started
    t_start = time.time()

    full_iterations = 0
    while full_iterations < 500:

        t0 = time.time()

        dt = time.time() - t_last
        samples = int(dt * fs)
        if samples == 0:
            continue
        t_last = time.time()

        # get data
        data = np.random.random((samples, n_channels))
        ax_times = np.linspace(0, 1000 * (samples / fs), samples, endpoint=False)
        if t_last_marker + .01 < time.time():
            t_last_marker = time.time()
            markers = [[ax_times[-1], 'm']]
        else:
            markers = []

        cnt = Data(data, axes=[ax_times, ax_channels], names=names, units=units)
        cnt.fs = fs
        cnt.markers = markers

        # blockbuffer
        blockbuf.append(cnt)
        cnt = blockbuf.get()
        if not cnt:
            continue

        # filter
        cnt, zi_l = proc.lfilter(cnt, b_l, a_l, zi=zi_l)
        cnt, zi_h = proc.lfilter(cnt, b_h, a_h, zi=zi_h)

        # subsample
        if subsample:
            cnt = proc.subsample(cnt, target_fs)
        newsamples = cnt.data.shape[0]

        # ringbuffer
        ringbuf.append(cnt)
        cnt = ringbuf.get()

        # epoch
        epo = proc.segment_dat(cnt, MRK_DEF, SEG_IVAL, newsamples=newsamples)
        if not epo:
            continue

        # feature vectors
        fv = proc.jumping_means(epo, JUMPING_MEANS_IVALS)
        rv = proc.create_feature_vectors(fv)

        # classification
        proc.lda_apply(fv, cfy)

        # don't measure in the first second, where the ringbuffer is not
        # full yet.
        if time.time() - t_start < (RING_BUFFER_CAP / 1000):
            continue

        dt = time.time() - t0
        times.append(dt)

        full_iterations += 1

    return np.array(times)
コード例 #34
0
ファイル: online_experiment.py プロジェクト: awakenting/wyrm
def online_experiment(amp, cfy):
    amp_fs = amp.get_sampling_frequency()
    amp_channels = amp.get_channels()

    #buf = BlockBuffer(4)
    rb = RingBuffer(5000)

    fn = amp_fs / 2
    b_low, a_low = proc.signal.butter(5, [30 / fn], btype='low')
    b_high, a_high = proc.signal.butter(5, [.4 / fn], btype='high')

    zi_low = proc.lfilter_zi(b_low, a_low, len(amp_channels))
    zi_high = proc.lfilter_zi(b_high, a_high, len(amp_channels))

    amp.start()
    markers_processed = 0
    current_letter_idx = 0
    current_letter = TRUE_LABELS[current_letter_idx].lower()

    letter_prob = {i : 0 for i in 'abcdefghijklmnopqrstuvwxyz123456789_'}
    endresult = []
    t0 = time.time()
    while True:
        t0 = time.time()

        # get fresh data from the amp
        data, markers = amp.get_data()
        if len(data) == 0:
            continue

        # we should rather wait for a specific end-of-experiment marker
        if len(data) == 0:
            break

        # convert to cnt
        cnt = io.convert_mushu_data(data, markers, amp_fs, amp_channels)

        ## enter the block buffer
        #buf.append(cnt)
        #cnt = buf.get()
        #if not cnt:
        #    continue

        # band-pass and subsample
        cnt, zi_low = proc.lfilter(cnt, b_low, a_low, zi=zi_low)
        cnt, zi_high = proc.lfilter(cnt, b_high, a_high, zi=zi_high)

        cnt = proc.subsample(cnt, 60)

        newsamples = cnt.data.shape[0]

        # enter the ringbuffer
        rb.append(cnt)
        cnt = rb.get()

        # segment
        epo = proc.segment_dat(cnt, MARKER_DEF_TEST, SEG_IVAL, newsamples=newsamples)
        if not epo:
            continue

        fv = proc.jumping_means(epo, JUMPING_MEANS_IVALS)
        fv = proc.create_feature_vectors(fv)
        logger.debug(markers_processed)

        lda_out = proc.lda_apply(fv, cfy)
        markers = [fv.class_names[cls_idx] for cls_idx in fv.axes[0]]
        result = zip(markers, lda_out)
        for s, score in result:
            if markers_processed == 180:
                endresult.append(sorted(letter_prob.items(), key=lambda x: x[1])[-1][0])
                letter_prob = {i : 0 for i in 'abcdefghijklmnopqrstuvwxyz123456789_'}
                markers_processed = 0
                current_letter_idx += 1
                current_letter = TRUE_LABELS[current_letter_idx].lower()
            for letter in s:
                letter_prob[letter] += score
            markers_processed += 1
        logger.debug("".join([i[0] for i in sorted(letter_prob.items(), key=lambda x: x[1], reverse=True)]).replace(current_letter, " %s " % current_letter))
        logger.debug(TRUE_LABELS)
        logger.debug("".join(endresult))
        # calculate the current accuracy
        if len(endresult) > 0:
            acc = np.count_nonzero(np.array(endresult) == np.array(list(TRUE_LABELS.lower()[:len(endresult)]))) / len(endresult)
            print "Current accuracy:", acc * 100
        if len(endresult) == len(TRUE_LABELS):
            break
        #logger.debug("Result: %s" % result)
        print 1000 * (time.time() - t0)

    acc = np.count_nonzero(np.array(endresult) == np.array(list(TRUE_LABELS.lower()[:len(endresult)]))) / len(endresult)
    print "Accuracy:", acc * 100

    amp.stop()
コード例 #35
0
ファイル: test_segment_dat.py プロジェクト: usmanayubsh/wyrm
 def test_equivalent_axes(self):
     """Segmentation must deal with equivalent axis indices correctly."""
     epo0 = segment_dat(self.dat, self.mrk_def, [-400, 400], timeaxis=-2)
     epo1 = segment_dat(self.dat, self.mrk_def, [-400, 400], timeaxis=0)
     self.assertEqual(epo0, epo1)