Exemple #1
0
def record(state, amp_name, amp_serial, record_dir, eeg_only):
    # set data file name
    filename = time.strftime(record_dir + "/%Y%m%d-%H%M%S-raw.pcl",
                             time.localtime())
    qc.print_c('>> Output file: %s' % (filename), 'W')

    # test writability
    try:
        qc.make_dirs(record_dir)
        open(
            filename,
            'w').write('The data will written when the recording is finished.')
    except:
        raise RuntimeError('Problem writing to %s. Check permission.' %
                           filename)

    # start a server for sending out data filename when software trigger is used
    outlet = start_server('StreamRecorderInfo', channel_format='string',\
        source_id=filename, stype='Markers')

    # connect to EEG stream server
    sr = StreamReceiver(amp_name=amp_name,
                        amp_serial=amp_serial,
                        eeg_only=eeg_only)

    # start recording
    qc.print_c('\n>> Recording started (PID %d).' % os.getpid(), 'W')
    qc.print_c('\n>> Press Enter to stop recording', 'G')
    tm = qc.Timer(autoreset=True)
    next_sec = 1
    while state.value == 1:
        sr.acquire()
        if sr.get_buflen() > next_sec:
            duration = str(datetime.timedelta(seconds=int(sr.get_buflen())))
            print('RECORDING %s' % duration)
            next_sec += 1
        tm.sleep_atleast(0.01)

    # record stop
    qc.print_c('>> Stop requested. Copying buffer', 'G')
    buffers, times = sr.get_buffer()
    signals = buffers
    events = None

    # channels = total channels from amp, including trigger channel
    data = {
        'signals': signals,
        'timestamps': times,
        'events': events,
        'sample_rate': sr.get_sample_rate(),
        'channels': sr.get_num_channels(),
        'ch_names': sr.get_channel_names()
    }
    qc.print_c('Saving raw data ...', 'W')
    qc.save_obj(filename, data)
    print('Saved to %s\n' % filename)

    qc.print_c('Converting raw file into a fif format.', 'W')
    pcl2fif(filename)
def record(recordState, amp_name, amp_serial, record_dir, eeg_only, recordLogger=logger, queue=None):

    redirect_stdout_to_queue(recordLogger, queue, 'INFO')

    # set data file name
    timestamp = time.strftime('%Y%m%d-%H%M%S', time.localtime())
    pcl_file = "%s/%s-raw.pcl" % (record_dir, timestamp)
    eve_file = '%s/%s-eve.txt' % (record_dir, timestamp)
    recordLogger.info('>> Output file: %s' % (pcl_file))

    # test writability
    try:
        qc.make_dirs(record_dir)
        open(pcl_file, 'w').write('The data will written when the recording is finished.')
    except:
        raise RuntimeError('Problem writing to %s. Check permission.' % pcl_file)

    # start a server for sending out data pcl_file when software trigger is used
    outlet = start_server('StreamRecorderInfo', channel_format='string',\
        source_id=eve_file, stype='Markers')

    # connect to EEG stream server
    sr = StreamReceiver(buffer_size=0, amp_name=amp_name, amp_serial=amp_serial, eeg_only=eeg_only)

    # start recording
    recordLogger.info('\n>> Recording started (PID %d).' % os.getpid())
    qc.print_c('\n>> Press Enter to stop recording', 'G')
    tm = qc.Timer(autoreset=True)
    next_sec = 1
    while recordState.value == 1:
        sr.acquire()
        if sr.get_buflen() > next_sec:
            duration = str(datetime.timedelta(seconds=int(sr.get_buflen())))
            recordLogger.info('RECORDING %s' % duration)
            next_sec += 1
        tm.sleep_atleast(0.001)

    # record stop
    recordLogger.info('>> Stop requested. Copying buffer')
    buffers, times = sr.get_buffer()
    signals = buffers
    events = None

    # channels = total channels from amp, including trigger channel
    data = {'signals':signals, 'timestamps':times, 'events':events,
            'sample_rate':sr.get_sample_rate(), 'channels':sr.get_num_channels(),
            'ch_names':sr.get_channel_names(), 'lsl_time_offset':sr.lsl_time_offset}
    recordLogger.info('Saving raw data ...')
    qc.save_obj(pcl_file, data)
    recordLogger.info('Saved to %s\n' % pcl_file)

    # automatically convert to fif and use event file if it exists (software trigger)
    if os.path.exists(eve_file):
        recordLogger.info('Found matching event file, adding events.')
    else:
        eve_file = None
    recordLogger.info('Converting raw file into fif.')
    pcl2fif(pcl_file, external_event=eve_file)
Exemple #3
0
        cls = RandomForestClassifier(n_estimators=RF['trees'],
                                     max_features='auto',
                                     max_depth=RF['maxdepth'],
                                     n_jobs=n_jobs)
        cls.fit(X, Y)
        cls.n_jobs = 1  # n_jobs should be 1 for online decoding
        print(
            'Trained a Random Forest classifer with %d trees and %d maxdepth' %
            (RF['trees'], RF['maxdepth']))
        ch_names = [raw.info['ch_names'][c] for c in picks_feat]
        data = dict(sfreq=raw.info['sfreq'], ch_names=ch_names, picks=picks_feat,\
                    cls=cls, l_freq=l_freq, h_freq=h_freq, decim_factor=decim_factor)
        outdir = DATADIR + '/errp_classifier'
        qc.make_dirs(outdir)
        clsfile = outdir + '/errp_classifier.pcl'
        qc.save_obj(clsfile, data)
        print('Saved as %s' % clsfile)

    if True:
        # hoang's code
        label = epochs.events[:, 2]

        cls = rLDA_binary(0.3)

        train_data = epochs._data
        train_label = label

        ### Normalization
        (train_data_normalized, trainShiftFactor,
         trainScaleFactor) = normalizeAcrossEpoch(train_data, 'MinMax')
                w=w,b=b, l_freq=l_freq, h_freq=h_freq, decim_factor=decim_factor, pca=pca,
                shiftFactor=trainShiftFactor, scaleFactor=trainScaleFactor)
        '''
        # remember line 195:
        # t_lower = tmin-paddingLength
        # t_upper = tmax+paddingLength

        ##########################################################################
        data = dict(cls=cls, sfreq=raw.info['sfreq'], ch_names=ch_names, picks=picks_feat,\
                    l_freq=l_freq, h_freq=h_freq, decim_factor=decim_factor,\
                    shiftFactor=trainShiftFactor, scaleFactor=trainScaleFactor, pca=pca, threshold=best_threshold[0],
                    tmin=tmin, tmax=tmax, paddingIdx=paddingIdx, iir_params=dict(a=a, b=b))
        outdir = DATADIR + '/errp_classifier'
        qc.make_dirs(outdir)
        clsfile = outdir + '/errp_classifier.pcl'
        qc.save_obj(clsfile, data)
        print('Saved as %s' % clsfile)
print('Done')

#    def balance_idx(label):
#        labelsetWrong = np.where(label==3)[0]
#        labelsetCorrect = np.where(label==4)[0]
#
#        diff = len(labelsetCorrect) - len(labelsetWrong)
#
#        if diff > 0:
#            smallestSet = labelsetWrong
#            largestSet = labelsetCorrect
#        elif diff<0:
#            smallestSet = labelsetCorrect
#            largestSet = labelsetWrong
Exemple #5
0
def train_decoder(cfg, featdata, feat_file=None):
    """
    Train the final decoder using all data
    """
    # Init a classifier
    selected_classifier = cfg.CLASSIFIER['selected']
    if selected_classifier == 'GB':
        cls = GradientBoostingClassifier(
            loss='deviance',
            learning_rate=cfg.CLASSIFIER[selected_classifier]['learning_rate'],
            n_estimators=cfg.CLASSIFIER[selected_classifier]['trees'],
            subsample=1.0,
            max_depth=cfg.CLASSIFIER[selected_classifier]['depth'],
            random_state=cfg.CLASSIFIER[selected_classifier]['seed'],
            max_features='sqrt',
            verbose=0,
            warm_start=False,
            presort='auto')
    elif selected_classifier == 'XGB':
        cls = XGBClassifier(
            loss='deviance',
            learning_rate=cfg.CLASSIFIER[selected_classifier]['learning_rate'],
            n_estimators=cfg.CLASSIFIER[selected_classifier]['trees'],
            subsample=1.0,
            max_depth=cfg.CLASSIFIER[selected_classifier]['depth'],
            random_state=cfg.GB['seed'],
            max_features='sqrt',
            verbose=0,
            warm_start=False,
            presort='auto')
    elif selected_classifier == 'RF':
        cls = RandomForestClassifier(
            n_estimators=cfg.CLASSIFIER[selected_classifier]['trees'],
            max_features='auto',
            max_depth=cfg.CLASSIFIER[selected_classifier]['depth'],
            n_jobs=cfg.N_JOBS,
            random_state=cfg.CLASSIFIER[selected_classifier]['seed'],
            oob_score=False,
            class_weight='balanced_subsample')
    elif selected_classifier == 'LDA':
        cls = LDA()
    elif selected_classifier == 'rLDA':
        cls = rLDA(cfg.CLASSIFIER[selected_classifier][r_coeff])
    else:
        logger.error('Unknown classifier %s' % selected_classifier)
        raise ValueError

    # Setup features
    X_data = featdata['X_data']
    Y_data = featdata['Y_data']
    wlen = featdata['wlen']
    if cfg.FEATURES['PSD']['wlen'] is None:
        cfg.FEATURES['PSD']['wlen'] = wlen
    w_frames = featdata['w_frames']
    ch_names = featdata['ch_names']
    X_data_merged = np.concatenate(X_data)
    Y_data_merged = np.concatenate(Y_data)
    if cfg.CV['BALANCE_SAMPLES']:
        X_data_merged, Y_data_merged = balance_samples(
            X_data_merged,
            Y_data_merged,
            cfg.CV['BALANCE_SAMPLES'],
            verbose=True)

    # Start training the decoder
    logger.info_green('Training the decoder')
    timer = qc.Timer()
    cls.n_jobs = cfg.N_JOBS
    cls.fit(X_data_merged, Y_data_merged)
    logger.info('Trained %d samples x %d dimension in %.1f sec' %\
          (X_data_merged.shape[0], X_data_merged.shape[1], timer.sec()))
    cls.n_jobs = 1  # always set n_jobs=1 for testing

    # Export the decoder
    classes = {c: cfg.tdef.by_value[c] for c in np.unique(Y_data)}
    if cfg.FEATURES['selected'] == 'PSD':
        data = dict(cls=cls,
                    ch_names=ch_names,
                    psde=featdata['psde'],
                    sfreq=featdata['sfreq'],
                    picks=featdata['picks'],
                    classes=classes,
                    epochs=cfg.EPOCH,
                    w_frames=w_frames,
                    w_seconds=cfg.FEATURES['PSD']['wlen'],
                    wstep=cfg.FEATURES['PSD']['wstep'],
                    spatial=cfg.SP_FILTER,
                    spatial_ch=featdata['picks'],
                    spectral=cfg.TP_FILTER[cfg.TP_FILTER['selected']],
                    spectral_ch=featdata['picks'],
                    notch=cfg.NOTCH_FILTER[cfg.NOTCH_FILTER['selected']],
                    notch_ch=featdata['picks'],
                    multiplier=cfg.MULTIPLIER,
                    ref_ch=cfg.REREFERENCE[cfg.REREFERENCE['selected']],
                    decim=cfg.FEATURES['PSD']['decim'])
    clsfile = '%s/classifier/classifier-%s.pkl' % (cfg.DATA_PATH,
                                                   platform.architecture()[0])
    qc.make_dirs('%s/classifier' % cfg.DATA_PATH)
    qc.save_obj(clsfile, data)
    logger.info('Decoder saved to %s' % clsfile)

    # Reverse-lookup frequency from FFT
    fq = 0
    if type(cfg.FEATURES['PSD']['wlen']) == list:
        fq_res = 1.0 / cfg.FEATURES['PSD']['wlen'][0]
    else:
        fq_res = 1.0 / cfg.FEATURES['PSD']['wlen']
    fqlist = []
    while fq <= cfg.FEATURES['PSD']['fmax']:
        if fq >= cfg.FEATURES['PSD']['fmin']:
            fqlist.append(fq)
        fq += fq_res

    # Show top distinctive features
    if cfg.FEATURES['selected'] == 'PSD':
        logger.info_green('Good features ordered by importance')
        if selected_classifier in ['RF', 'GB', 'XGB']:
            keys, values = qc.sort_by_value(list(cls.feature_importances_),
                                            rev=True)
        elif selected_classifier in ['LDA', 'rLDA']:
            keys, values = qc.sort_by_value(cls.w, rev=True)
        keys = np.array(keys)
        values = np.array(values)

        if cfg.EXPORT_GOOD_FEATURES:
            if feat_file is None:
                gfout = open('%s/classifier/good_features.txt' % cfg.DATA_PATH,
                             'w')
            else:
                gfout = open(feat_file, 'w')

        if type(wlen) is not list:
            ch_names = [ch_names[c] for c in featdata['picks']]
        else:
            ch_names = []
            for w in range(len(wlen)):
                for c in featdata['picks']:
                    ch_names.append('w%d-%s' % (w, ch_names[c]))

        chlist, hzlist = features.feature2chz(keys, fqlist, ch_names=ch_names)
        valnorm = values[:cfg.FEAT_TOPN].copy()
        valsum = np.sum(valnorm)
        if valsum == 0:
            valsum = 1
        valnorm = valnorm / valsum * 100.0

        # show top-N features
        for i, (ch, hz) in enumerate(zip(chlist, hzlist)):
            if i >= cfg.FEAT_TOPN:
                break
            txt = '%-3s %5.1f Hz  normalized importance %-6s  raw importance %-6s  feature %-5d' %\
                  (ch, hz, '%.2f%%' % valnorm[i], '%.2f%%' % (values[i] * 100.0), keys[i])
            logger.info(txt)

        if cfg.EXPORT_GOOD_FEATURES:
            gfout.write('Importance(%) Channel Frequency Index\n')
            for i, (ch, hz) in enumerate(zip(chlist, hzlist)):
                gfout.write('%.3f\t%s\t%s\t%d\n' %
                            (values[i] * 100.0, ch, hz, keys[i]))
            gfout.close()
Exemple #6
0
def raw2psd(rawfile=None,
            fmin=1,
            fmax=40,
            wlen=0.5,
            wstep=1,
            tmin=0.0,
            tmax=None,
            channel_picks=None,
            excludes=[],
            n_jobs=1):
    """
    Compute PSD features over a sliding window on the entire raw file.
    Leading edge of the window is the time reference, i.e. do not use future data.

    Input
    =====
    rawfile: fif file.
    channel_picks: None or list of channel names
    tmin (sec): start time of the PSD window relative to the event onset.
    tmax (sec): end time of the PSD window relative to the event onset. None = until the end.
    fmin (Hz): minimum PSD frequency
    fmax (Hz): maximum PSD frequency
    wlen (sec): sliding window length for computing PSD (sec)
    wstep (int): sliding window step (time samples)
    excludes (list): list of channels to exclude
    """

    raw, eve = pu.load_raw(rawfile)
    sfreq = raw.info['sfreq']
    wframes = int(round(sfreq * wlen))
    raw_eeg = raw.pick_types(meg=False, eeg=True, stim=False, exclude=excludes)
    if channel_picks is None:
        rawdata = raw_eeg._data
        chlist = raw.ch_names
    else:
        chlist = []
        for ch in channel_picks:
            chlist.append(raw.ch_names.index(ch))
        rawdata = raw_eeg._data[np.array(chlist)]

    if tmax is None:
        t_end = rawdata.shape[1]
    else:
        t_end = int(round(tmax * sfreq))
    t_start = int(round(tmin * sfreq)) + wframes
    psde = mne.decoding.PSDEstimator(sfreq, fmin=fmin, fmax=fmax, n_jobs=1,\
        bandwidth=None, low_bias=True, adaptive=False, normalization='length',
        verbose=None)
    print('[PID %d] %s' % (os.getpid(), rawfile))
    psd_all = []
    evelist = []
    times = []
    t_len = t_end - t_start
    last_eve = 0
    y_i = 0
    t_last = t_start
    tm = qc.Timer()
    for t in range(t_start, t_end, wstep):
        # compute PSD
        window = rawdata[:, t - wframes:t]
        psd = psde.transform(
            window.reshape((1, window.shape[0], window.shape[1])))
        psd = psd.reshape(psd.shape[1], psd.shape[2])
        psd_all.append(psd)
        times.append(t)

        # matching events at the current window
        if y_i < eve.shape[0] and t >= eve[y_i][0]:
            last_eve = eve[y_i][2]
            y_i += 1
        evelist.append(last_eve)

        if tm.sec() >= 1:
            perc = (t - t_start) / t_len
            fps = (t - t_last) / wstep
            est = (t_end - t) / wstep / fps
            print('[PID %d] %.1f%% (%.1f FPS, %ds left)' %
                  (os.getpid(), perc * 100.0, fps, est))
            t_last = t
            tm.reset()
    print('Finished.')

    # export data
    try:
        chnames = [raw.ch_names[ch] for ch in chlist]
        psd_all = np.array(psd_all)
        [basedir, fname, fext] = qc.parse_path_list(rawfile)
        fout_header = '%s/psd-%s-header.pkl' % (basedir, fname)
        fout_psd = '%s/psd-%s-data.npy' % (basedir, fname)
        header = {
            'psdfile': fout_psd,
            'times': np.array(times),
            'sfreq': sfreq,
            'channels': chnames,
            'wframes': wframes,
            'events': evelist
        }
        qc.save_obj(fout_header, header)
        np.save(fout_psd, psd_all)
        print('Exported to:\n(header) %s\n(numpy array) %s' %
              (fout_header, fout_psd))
    except:
        import traceback
        print('(%s) Unexpected error occurred while exporting data. Dropping you into a shell for recovery.' %\
            os.path.basename(__file__))
        traceback.print_exc()
        from IPython import embed
        embed()
Exemple #7
0
def createClassifier(loadedraw,\
                     events,\
                     tmin,\
                     tmax,\
                     tlow,\
                     thigh,\
                     regcoeff,\
                     useLeaveOneOut,\
                     APPLY_CAR,\
                     APPLY_PCA,\
                     l_freq,\
                     h_freq,\
                     MAX_FPR,\
                     picks_feat,\
                     baselineRange,\
                     decim_factor,\
                     cv_container,\
                     FILTER_METHOD,\
                     best_threshold,\
                     verbose=False):
    tdef, sfreq, event_id, b, a, zi, t_lower, t_upper, epochs, wframes = preprocess(loadedraw=loadedraw,\
                                                                                    events=events,\
                                                                                    APPLY_CAR=APPLY_CAR,\
                                                                                    l_freq=l_freq,\
                                                                                    h_freq=h_freq,\
                                                                                    filter_method=FILTER_METHOD,\
                                                                                    tmin=tmin,\
                                                                                    tmax=tmax,\
                                                                                    tlow=tlow,\
                                                                                    thigh=thigh,\
                                                                                    n_jobs=n_jobs,\
                                                                                    picks_feat=picks_feat,\
                                                                                    baselineRange=baselineRange,
                                                                                    verbose=False)
    train_pcaed, pca, trainShiftFactor, trainScaleFactor = compute_features(signals=epochs._data,\
                                                                            dataset_type='train',\
                                                                            sfreq=sfreq,\
                                                                            l_freq=l_freq,\
                                                                            h_freq=h_freq,\
                                                                            decim_factor=decim_factor,\
                                                                            shiftFactor=None,\
                                                                            scaleFactor=None,\
                                                                            pca=None,\
                                                                            tmin=tmin,\
                                                                            tmax=tmax,\
                                                                            tlow=tlow,\
                                                                            thigh=thigh,\
                                                                            filter_method=FILTER_METHOD)

    cls = rLDA(regcoeff)
    label = epochs.events[:, 2]
    cls.fit(train_pcaed, label)
    ch_names = [loadedraw.info['ch_names'][c] for c in picks_feat]
    data = dict(apply_car=APPLY_CAR,
                sfreq=loadedraw.info['sfreq'],\
                picks=picks_feat,\
                decim_factor=decim_factor,\
                ch_names=ch_names,\
                tmin=tmin,\
                tmax=tmax,\
                tlow=tlow,\
                thigh=thigh,\
                l_freq=l_freq,\
                h_freq=h_freq,\
                baselineRange=baselineRange,\
                shiftFactor=trainShiftFactor,\
                scaleFactor=trainScaleFactor,\
                cls=cls,\
                pca=pca,\
                threshold=best_threshold[0],\
                filter_method=FILTER_METHOD,\
                wframes=wframes)
    outdir = DATADIR + '/errp_classifier'
    qc.make_dirs(outdir)
    clsfile = outdir + '/errp_classifier.pcl'
    qc.save_obj(clsfile, data)
    print('Saved as %s' % clsfile)
    print('Using ' + str(epochs._data.shape[0]) + ' epochs')
Exemple #8
0
def epochs2psd(rawfile,
               channel_picks,
               event_id,
               tmin,
               tmax,
               fmin,
               fmax,
               w_len,
               w_step,
               excludes=None):
    """
    Compute PSD features over a sliding window in epochs

    Exported data is 4D: [epochs] x [times] x [channels] x [freqs]

    Input
    =====
    rawfile: fif-format raw file
    channel_picks: None or list of channel indices
    event_id: { label(str) : event_id(int) }
    tmin: start time of the PSD window relative to the event onset
    tmax: end time of the PSD window relative to the event onset
    fmin: minimum PSD frequency
    fmax: maximum PSD frequency
    w_len: sliding window length for computing PSD
    w_step: sliding window step in time samples
    export: file name to be saved. It can have .mat or .pkl extension.
            .pkl exports data in pickled Python numpy format.
            .mat exports data in MATLAB format.
    """

    rawfile = rawfile.replace('\\', '/')
    raw, events = pu.load_raw(rawfile)
    sfreq = raw.info['sfreq']

    if channel_picks is None:
        picks = mne.pick_types(raw.info,
                               meg=False,
                               eeg=True,
                               stim=False,
                               eog=False,
                               exclude=excludes)
    else:
        picks = channel_picks

    # Epoching
    epochs = mne.Epochs(raw,
                        events,
                        event_id,
                        tmin=tmin,
                        tmax=tmax,
                        proj=False,
                        picks=picks,
                        baseline=(tmin, tmax),
                        preload=True,
                        add_eeg_ref=False)
    # from IPython import embed; embed()

    # Compute psd vectors over a sliding window between tmin and tmax
    w_len = int(sfreq * w_len)  # window length
    psde = mne.decoding.PSDEstimator(sfreq,
                                     fmin=fmin,
                                     fmax=fmax,
                                     n_jobs=cpu_count(),
                                     adaptive=False)
    epochmat = {e: epochs[e]._data for e in event_id}
    psdmat = {}
    for e in event_id:
        # psd = [epochs] x [windows] x [channels] x [freqs]
        psd, _ = pu.get_psd(epochs[e], psde, w_len, w_step, flatten=False)
        psdmat[e] = psd
    # psdmat[e]= np.mean(psd, 3) # for freq-averaged

    data = dict(epochs=epochmat, psds=psdmat, tmin=tmin, tmax=tmax, sfreq=epochs.info['sfreq'],\
                fmin=fmin, fmax=fmax, w_step=w_step, w_len=w_len, labels=epochs.event_id.keys())

    # Export
    [basedir, fname, fext] = qc.parse_path_list(rawfile)
    matfile = '%s/psd-%s.mat' % (basedir, fname)
    pklfile = '%s/psd-%s.pkl' % (basedir, fname)
    scipy.io.savemat(matfile, data)
    qc.save_obj(pklfile, data)
    print('Exported to %s' % matfile)
    print('Exported to %s' % pklfile)
Exemple #9
0
def log_decoding(decoder, logfile, amp_name=None, amp_serial=None, pklfile=True, matfile=False, autostop=False, prob_smooth=False):
    """
    Decode online and write results with event timestamps

    input
    -----
    decoder: Decoder or DecoderDaemon class object.
    logfile: File name to contain the result in Python pickle format.
    amp_name: LSL server name (if known).
    amp_serial: LSL server serial number (if known).
    pklfile: Export results to Python pickle format.
    matfile: Export results to Matlab .mat file if True.
    autostop: Automatically finish when no more data is received.
    prob_smooth: Use smoothed probability values according to decoder's smoothing parameter.
    """

    import cv2
    import scipy

    # run event acquisition process in the background
    state = mp.Value('i', 1)
    event_queue = mp.Queue()
    proc = mp.Process(target=log_decoding_helper, args=[state, event_queue, amp_name, amp_serial, autostop])
    proc.start()
    logger.info_green('Spawned event acquisition process.')

    # init variables and choose decoding function
    labels = decoder.get_label_names()
    probs = []
    prob_times = []
    if prob_smooth:
        decode_fn = decoder.get_prob_smooth_unread
    else:
        decode_fn = decoder.get_prob_unread
        
    # simple controller UI
    cv2.namedWindow("Decoding", cv2.WINDOW_AUTOSIZE)
    cv2.moveWindow("Decoding", 1400, 50)
    img = np.zeros([100, 400, 3], np.uint8)
    cv2.putText(img, 'Press any key to start', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, cv2.LINE_AA)
    cv2.imshow("Decoding", img)
    cv2.waitKeyEx()
    img *= 0
    cv2.putText(img, 'Press ESC to stop', (40, 60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, cv2.LINE_AA)
    cv2.imshow("Decoding", img)

    key = 0
    started = False
    tm_watchdog = qc.Timer(autoreset=True)
    tm_cls = qc.Timer()
    while key != 27:
        prob, prob_time = decode_fn(True)
        t_lsl = pylsl.local_clock()
        key = cv2.waitKeyEx(1)
        if prob is None:
            # watch dog
            if tm_cls.sec() > 5:
                if autostop and started:
                    logger.info('No more streaming data. Finishing.')
                    break
                tm_cls.reset()
            tm_watchdog.sleep_atleast(0.001)
            continue
        probs.append(prob)
        prob_times.append(prob_time)
        txt = '[%.3f] ' % prob_time
        txt += ', '.join(['%s: %.2f' % (l, p) for l, p in zip(labels, prob)])
        txt += ' (%d ms, LSL Diff = %.3f)' % (tm_cls.msec(), (t_lsl-prob_time))
        logger.info(txt)
        if not started:
            started = True
        tm_cls.reset()

    # finish up processes
    cv2.destroyAllWindows()
    logger.info('Cleaning up event acquisition process.')
    state.value = 0
    decoder.stop()
    event_times, event_values = event_queue.get()
    proc.join()

    # save values
    if len(prob_times) == 0:
        logger.error('No decoding result. Please debug.')
        import pdb
        pdb.set_trace()
    t_start = prob_times[0]
    probs = np.vstack(probs)
    event_times = np.array(event_times)
    event_times = event_times[np.where(event_times >= t_start)[0]] - t_start
    prob_times = np.array(prob_times) - t_start
    event_values = np.array(event_values)
    data = dict(probs=probs, prob_times=prob_times, event_times=event_times, event_values=event_values, labels=labels)
    if pklfile:
        qc.save_obj(logfile, data)
        logger.info('Saved to %s' % logfile)
    if matfile:
        pp = qc.parse_path(logfile)
        matout = '%s/%s.mat' % (pp.dir, pp.name)
        scipy.io.savemat(matout, data)
        logger.info('Saved to %s' % matout)