def __init__(self, cfg, bar, tdef, trigger, logfile=None): self.cfg = cfg self.tdef = tdef self.alpha1 = self.cfg.PROB_ACC_ALPHA self.alpha2 = 1.0 - self.cfg.PROB_ACC_ALPHA self.trigger = trigger self.bar = bar self.bar.fill() self.refresh_delay = 1.0 / self.cfg.REFRESH_RATE self.bar_step_left = self.cfg.BAR_STEP_LEFT self.bar_step_right = self.cfg.BAR_STEP_RIGHT self.bar_step_up = self.cfg.BAR_STEP_UP self.bar_step_down = self.cfg.BAR_STEP_DOWN self.bar_step_both = self.cfg.BAR_STEP_BOTH self.bar_bias = self.cfg.BAR_BIAS if hasattr(self.cfg, 'BAR_REACH_FINISH') and self.cfg.BAR_REACH_FINISH == True: self.premature_end = True else: self.premature_end = False self.tm_trigger = qc.Timer() self.tm_display = qc.Timer() self.tm_watchdog = qc.Timer() if logfile is not None: self.logf = open(logfile, 'w') else: self.logf = None
def sample_decoding(decoder): """ Decoding example """ # load trigger definitions for labeling labels = decoder.get_label_names() tm_watchdog = qc.Timer(autoreset=True) tm_cls = qc.Timer() while True: praw = decoder.get_prob_unread() psmooth = decoder.get_prob_smooth() if praw is None: # watch dog if tm_cls.sec() > 5: logger.warning('No classification was done in the last 5 seconds. Are you receiving data streams?') tm_cls.reset() tm_watchdog.sleep_atleast(0.001) continue txt = '[%8.1f msec]' % (tm_cls.msec()) for i, label in enumerate(labels): txt += ' %s %.3f (raw %.3f)' % (label, psmooth[i], praw[i]) maxi = qc.get_index_max(psmooth) txt += ' %s' % labels[maxi] print(txt) tm_cls.reset()
def __init__(self, cfg, viz, tdef, trigger, logfile=None): self.cfg = cfg self.tdef = tdef self.trigger = trigger self.viz = viz self.viz.fill() self.refresh_delay = 1.0 / self.cfg.REFRESH_RATE self.bar_step_left = self.cfg.BAR_STEP['left'] self.bar_step_right = self.cfg.BAR_STEP['right'] self.bar_step_up = self.cfg.BAR_STEP['up'] self.bar_step_down = self.cfg.BAR_STEP['down'] self.bar_step_both = self.cfg.BAR_STEP['both'] if type(self.cfg.BAR_BIAS) is tuple: self.bar_bias = list(self.cfg.BAR_BIAS) else: self.bar_bias = self.cfg.BAR_BIAS # New decoder: already smoothed by the decoder so bias after. #self.alpha_old = self.cfg.PROB_ACC_ALPHA #self.alpha_new = 1.0 - self.cfg.PROB_ACC_ALPHA if hasattr(self.cfg, 'BAR_REACH_FINISH') and self.cfg.BAR_REACH_FINISH == True: self.premature_end = True else: self.premature_end = False self.tm_trigger = qc.Timer() self.tm_display = qc.Timer() self.tm_watchdog = qc.Timer() if logfile is not None: self.logf = open(logfile, 'w') else: self.logf = None # STIMO only if self.cfg.WITH_STIMO is True: if self.cfg.STIMO_COMPORT is None: atens = [x for x in serial.tools.list_ports.grep('ATEN')] if len(atens) == 0: raise RuntimeError('No ATEN device found. Stop.') try: self.stimo_port = atens[0].device except AttributeError: # depends on Python distribution self.stimo_port = atens[0][0] else: self.stimo_port = self.cfg.STIMO_COMPORT self.ser = serial.Serial(self.stimo_port, self.cfg.STIMO_BAUDRATE) logger.info('STIMO serial port %s is_open = %s' % (self.stimo_port, self.ser.is_open)) # FES only if self.cfg.WITH_FES is True: self.stim = fes.Motionstim8() self.stim.OpenSerialPort(self.cfg.FES_COMPORT) self.stim.InitializeChannelListMode() logger.info('Opened FES serial port')
def __init__(self, cfg, viz, tdef, trigger, logfile=None): self.cfg = cfg self.tdef = tdef self.trigger = trigger self.viz = viz self.viz.fill() self.refresh_delay = 1.0 / self.cfg.REFRESH_RATE self.bar_step_left = self.cfg.BAR_STEP_LEFT self.bar_step_right = self.cfg.BAR_STEP_RIGHT self.bar_step_up = self.cfg.BAR_STEP_UP self.bar_step_down = self.cfg.BAR_STEP_DOWN self.bar_step_both = self.cfg.BAR_STEP_BOTH if type(self.cfg.BAR_BIAS) is tuple: self.bar_bias = list(self.cfg.BAR_BIAS) else: self.bar_bias = self.cfg.BAR_BIAS # New decoder: already smoothed by the decoder so bias after. #self.alpha_old = self.cfg.PROB_ACC_ALPHA #self.alpha_new = 1.0 - self.cfg.PROB_ACC_ALPHA if hasattr(self.cfg, 'BAR_REACH_FINISH') and self.cfg.BAR_REACH_FINISH == True: self.premature_end = True else: self.premature_end = False self.tm_trigger = qc.Timer() self.tm_display = qc.Timer() self.tm_watchdog = qc.Timer() if logfile is not None: self.logf = open(logfile, 'w') else: self.logf = None # STIMO only if self.cfg.WITH_STIMO is True: if self.cfg.STIMO_COMPORT is None: atens = [x for x in serial.tools.list_ports.grep('ATEN')] if len(atens) == 0: raise RuntimeError('No ATEN device found. Stop.') #for i, a in enumerate(atens): # print('Found', a[0]) try: self.stimo_port = atens[0].device except AttributeError: # depends on Python distribution self.stimo_port = atens[0][0] else: self.stimo_port = self.cfg.STIMO_COMPORT self.ser = serial.Serial(self.stimo_port, self.cfg.STIMO_BAUDRATE) print('STIMO serial port %s is_open = %s' % (self.stimo_port, self.ser.is_open))
def __init__(self, window_size=1.0, buffer_size=0, amp_serial=None, eeg_only=False, amp_name=None): """ Params: window_size (in seconds): keep the latest window_size seconds of the buffer. buffer_size (in seconds): keep everything if buffer_size=0. amp_name: connect to a server named 'amp_name'. None: no constraint. amp_serial: connect to a server with serial number 'amp_serial'. None: no constraint. eeg_only: ignore non-EEG servers """ self.winsec = window_size self.bufsec = buffer_size self.amp_serial = amp_serial self.eeg_only = eeg_only self.amp_name = amp_name self.tr_channel = None # trigger indx used by StreamReceiver class self.eeg_channels = [] # signal indx used by StreamReceiver class self._lsl_tr_channel = None # raw trigger indx in pylsl.pull_chunk() self._lsl_eeg_channels = [] # raw signal indx in pylsl.pull_chunk() self.ready = False # False until the buffer is filled for the first time self.bufsize = 0 # to be calculated using sampling rate self.connected = False self.buffers = [] self.timestamps = [] self.watchdog = qc.Timer() self.multiplier = 1 # 10**6 for uV unit (automatically updated for openvibe servers) self.connect()
def main(): fmin = 1 fmax = 40 channels = 64 wlen = 0.5 # window length in seconds sfreq = 512 num_iterations = 500 signal = np.random.rand(channels, int(np.round(sfreq * wlen))) psde = mne.decoding.PSDEstimator(sfreq=sfreq, fmin=fmin,\ fmax=fmax, bandwidth=None, adaptive=False, low_bias=True,\ n_jobs=1, normalization='length', verbose=None) tm = qc.Timer() times = [] for i in range(num_iterations): tm.reset() psd = psde.transform( signal.reshape((1, signal.shape[0], signal.shape[1]))) times.append(tm.msec()) if i % 100 == 0: print('%d / %d' % (i, num_iterations)) ms = np.mean(times) fps = 1000 / ms print('Average = %.1f ms (%.1f Hz)' % (ms, fps))
def log_decoding_helper(state, event_queue, amp_name=None, amp_serial=None, autostop=False): """ Helper function to run StreamReceiver object in background """ logger.info('Event acquisition subprocess started.') # wait for the start signal while state.value == 0: time.sleep(0.01) # acquire event values and returns event times and event values sr = StreamReceiver(buffer_size=0, amp_name=amp_name, amp_serial=amp_serial) tm = qc.Timer(autoreset=True) started = False while state.value == 1: chunk, ts_list = sr.acquire() if autostop: if started is True: if len(ts_list) == 0: state.value = 0 break elif len(ts_list) > 0: started = True tm.sleep_atleast(0.001) logger.info('Event acquisition subprocess finishing up ...') buffers, times = sr.get_buffer() events = buffers[:, 0] # first channel is the trigger channel event_index = np.where(events != 0)[0] event_times = times[event_index].reshape(-1).tolist() event_values = events[event_index].tolist() assert len(event_times) == len(event_values) event_queue.put((event_times, event_values))
def __init__(self, mock=False): self.BUFFER_SIZE = 1024 self.last_dir = 'L' self.timer = qc.Timer(autoreset=True) self.mock = mock if self.mock: self.print('Using a fake, mock Glass control object.')
def record(state, amp_name, amp_serial, record_dir, eeg_only): # set data file name filename = time.strftime(record_dir + "/%Y%m%d-%H%M%S-raw.pcl", time.localtime()) qc.print_c('>> Output file: %s' % (filename), 'W') # test writability try: qc.make_dirs(record_dir) open( filename, 'w').write('The data will written when the recording is finished.') except: raise RuntimeError('Problem writing to %s. Check permission.' % filename) # start a server for sending out data filename when software trigger is used outlet = start_server('StreamRecorderInfo', channel_format='string',\ source_id=filename, stype='Markers') # connect to EEG stream server sr = StreamReceiver(amp_name=amp_name, amp_serial=amp_serial, eeg_only=eeg_only) # start recording qc.print_c('\n>> Recording started (PID %d).' % os.getpid(), 'W') qc.print_c('\n>> Press Enter to stop recording', 'G') tm = qc.Timer(autoreset=True) next_sec = 1 while state.value == 1: sr.acquire() if sr.get_buflen() > next_sec: duration = str(datetime.timedelta(seconds=int(sr.get_buflen()))) print('RECORDING %s' % duration) next_sec += 1 tm.sleep_atleast(0.01) # record stop qc.print_c('>> Stop requested. Copying buffer', 'G') buffers, times = sr.get_buffer() signals = buffers events = None # channels = total channels from amp, including trigger channel data = { 'signals': signals, 'timestamps': times, 'events': events, 'sample_rate': sr.get_sample_rate(), 'channels': sr.get_num_channels(), 'ch_names': sr.get_channel_names() } qc.print_c('Saving raw data ...', 'W') qc.save_obj(filename, data) print('Saved to %s\n' % filename) qc.print_c('Converting raw file into a fif format.', 'W') pcl2fif(filename)
def record(recordState, amp_name, amp_serial, record_dir, eeg_only, recordLogger=logger, queue=None): redirect_stdout_to_queue(recordLogger, queue, 'INFO') # set data file name timestamp = time.strftime('%Y%m%d-%H%M%S', time.localtime()) pcl_file = "%s/%s-raw.pcl" % (record_dir, timestamp) eve_file = '%s/%s-eve.txt' % (record_dir, timestamp) recordLogger.info('>> Output file: %s' % (pcl_file)) # test writability try: qc.make_dirs(record_dir) open(pcl_file, 'w').write('The data will written when the recording is finished.') except: raise RuntimeError('Problem writing to %s. Check permission.' % pcl_file) # start a server for sending out data pcl_file when software trigger is used outlet = start_server('StreamRecorderInfo', channel_format='string',\ source_id=eve_file, stype='Markers') # connect to EEG stream server sr = StreamReceiver(buffer_size=0, amp_name=amp_name, amp_serial=amp_serial, eeg_only=eeg_only) # start recording recordLogger.info('\n>> Recording started (PID %d).' % os.getpid()) qc.print_c('\n>> Press Enter to stop recording', 'G') tm = qc.Timer(autoreset=True) next_sec = 1 while recordState.value == 1: sr.acquire() if sr.get_buflen() > next_sec: duration = str(datetime.timedelta(seconds=int(sr.get_buflen()))) recordLogger.info('RECORDING %s' % duration) next_sec += 1 tm.sleep_atleast(0.001) # record stop recordLogger.info('>> Stop requested. Copying buffer') buffers, times = sr.get_buffer() signals = buffers events = None # channels = total channels from amp, including trigger channel data = {'signals':signals, 'timestamps':times, 'events':events, 'sample_rate':sr.get_sample_rate(), 'channels':sr.get_num_channels(), 'ch_names':sr.get_channel_names(), 'lsl_time_offset':sr.lsl_time_offset} recordLogger.info('Saving raw data ...') qc.save_obj(pcl_file, data) recordLogger.info('Saved to %s\n' % pcl_file) # automatically convert to fif and use event file if it exists (software trigger) if os.path.exists(eve_file): recordLogger.info('Found matching event file, adding events.') else: eve_file = None recordLogger.info('Converting raw file into fif.') pcl2fif(pcl_file, external_event=eve_file)
def fit_predict_thres(cls, X_train, Y_train, X_test, Y_test, cnum, label_list, ignore_thres=None, decision_thres=None): """ Any likelihood lower than a threshold is not counted as classification score Confusion matrix, accuracy and F1 score (macro average) are computed. Params ====== ignore_thres: if not None or larger than 0, likelihood values lower than ignore_thres will be ignored while computing confusion matrix. """ timer = qc.Timer() cls.fit(X_train, Y_train) assert ignore_thres is None or ignore_thres >= 0 if ignore_thres is None or ignore_thres == 0: Y_pred = cls.predict(X_test) score = skmetrics.accuracy_score(Y_test, Y_pred) cm = skmetrics.confusion_matrix(Y_test, Y_pred, label_list) f1 = skmetrics.f1_score(Y_test, Y_pred, average='macro') else: if decision_thres is not None: logger.error( 'decision threshold and ignore_thres cannot be set at the same time.' ) raise ValueError Y_pred = cls.predict_proba(X_test) Y_pred_labels = np.argmax(Y_pred, axis=1) Y_pred_maxes = np.array([x[i] for i, x in zip(Y_pred_labels, Y_pred)]) Y_index_overthres = np.where(Y_pred_maxes >= ignore_thres)[0] Y_index_underthres = np.where(Y_pred_maxes < ignore_thres)[0] Y_pred_overthres = np.array( [cls.classes_[x] for x in Y_pred_labels[Y_index_overthres]]) Y_pred_underthres = np.array( [cls.classes_[x] for x in Y_pred_labels[Y_index_underthres]]) Y_pred_underthres_count = np.array( [np.count_nonzero(Y_pred_underthres == c) for c in label_list]) Y_test_overthres = Y_test[Y_index_overthres] score = skmetrics.accuracy_score(Y_test_overthres, Y_pred_overthres) cm = skmetrics.confusion_matrix(Y_test_overthres, Y_pred_overthres, label_list) cm = np.concatenate((cm, Y_pred_underthres_count[:, np.newaxis]), axis=1) f1 = skmetrics.f1_score(Y_test_overthres, Y_pred_overthres, average='macro') logger.info('Cross-validation %d (%.3f) - %.1f sec' % (cnum, score, timer.sec())) return score, cm, f1
def init_timer(self): self.tm = qc.Timer() # leeq QtCore.QCoreApplication.processEvents() QtCore.QCoreApplication.flush() self.timer = QtCore.QTimer(self) self.timer.timeout.connect(self.update_loop) self.timer.start(20)
def get_predict_proba(cls, X_train, Y_train, X_test, Y_test, cnum): """ All likelihoods will be collected from every fold of a cross-validaiton. Based on these likelihoods, a threshold will be computed that will balance the true positive rate of each class. Available with binary classification scenario only. """ timer = qc.Timer() cls.fit(X_train, Y_train) Y_pred = cls.predict_proba(X_test) logger.info('Cross-validation %d (%d tests) - %.1f sec' % (cnum, Y_pred.shape[0], timer.sec())) return Y_pred[:,0]
def __init__(self, window_size=1, buffer_size=1, amp_serial=None, eeg_only=False, amp_name=None): """ Params: window_size (in seconds): keep the latest window_size seconds of the buffer. buffer_size (in seconds): 1-day is the maximum size. Large buffer may lead to a delay if not pulled frequently. amp_name: connect to a server named 'amp_name'. None: no constraint. amp_serial: connect to a server with serial number 'amp_serial'. None: no constraint. eeg_only: ignore non-EEG servers """ _MAX_BUFFER_SIZE = 86400 # max buffer size allowed by StreamReceiver (24 hours) _MAX_PYLSL_STREAM_BUFSIZE = 360 # max buffer size for pylsl.StreamInlet if window_size <= 0: logger.error('Wrong window_size %d.' % window_size) raise ValueError() self.winsec = window_size if buffer_size == 0: buffer_size = _MAX_BUFFER_SIZE elif buffer_size < 0 or buffer_size > _MAX_BUFFER_SIZE: logger.error('Improper buffer size %.1f. Setting to %d.' % (buffer_size, _MAX_BUFFER_SIZE)) buffer_size = _MAX_BUFFER_SIZE elif buffer_size < self.winsec: logger.error( 'Buffer size %.1f is smaller than window size. Setting to %.1f.' % (buffer_size, self.winsec)) buffer_size = self.winsec self.bufsec = buffer_size self.bufsize = 0 # to be calculated using sampling rate self.stream_bufsec = int( math.ceil(min(_MAX_PYLSL_STREAM_BUFSIZE, self.bufsec))) self.stream_bufsize = 0 # to be calculated using sampling rate self.amp_serial = amp_serial self.eeg_only = eeg_only self.amp_name = amp_name self.tr_channel = None # trigger indx used by StreamReceiver class self.eeg_channels = [] # signal indx used by StreamReceiver class self._lsl_tr_channel = None # raw trigger indx in pylsl.pull_chunk() self._lsl_eeg_channels = [] # raw signal indx in pylsl.pull_chunk() self.ready = False # False until the buffer is filled for the first time self.connected = False self.buffers = [] self.timestamps = [] self.watchdog = qc.Timer() self.multiplier = 1 # 10**6 for uV unit (automatically updated for openvibe servers) self.connect()
def check_speed(decoder, max_count=float('inf')): tm = qc.Timer() count = 0 mslist = [] while count < max_count: while decoder.get_prob_unread() is None: pass count += 1 if tm.sec() > 1: t = tm.sec() ms = 1000.0 * t / count # show time per classification and its reciprocal print('%.0f ms/c %.1f Hz' % (ms, count/t)) mslist.append(ms) count = 0 tm.reset() print('mean = %.1f ms' % np.mean(mslist))
arduino = ArduinoCommHandler(port_name='/dev/ttyACM0', baudrate=115200) arduino.start_communication() leds_values = [0] * 191 leds_values_index_for_test = 0 mne.set_log_level('ERROR') # actually improves performance for multitaper os.environ['OMP_NUM_THREADS'] = '1' amp_name, amp_serial = pu.search_lsl() sr = StreamReceiver( window_size=1, buffer_size=1, amp_name=amp_name, amp_serial=amp_serial, eeg_only=True ) sfreq = sr.get_sample_rate() watchdog = qc.Timer() tm = qc.Timer(autoreset=True) trg_ch = sr.get_trigger_channel() last_ts = 0 # qc.print_c('Trigger channel: %d' % trg_ch, 'G') fmin = 1 fmax = 40 psde = mne.decoding.PSDEstimator( sfreq=sfreq, fmin=fmin, fmax=fmax, bandwidth=None, adaptive=False, low_bias=True, n_jobs=1, normalization='length', verbose=None ) while True: sr.acquire()
def classify(self, decoder, true_label, title_text, bar_dirs, state='start', prob_history=None): """ Run a single trial """ true_label_index = bar_dirs.index(true_label) self.tm_trigger.reset() if self.bar_bias is not None: bias_idx = bar_dirs.index(self.bar_bias[0]) if self.logf is not None: self.logf.write('True label: %s\n' % true_label) tm_classify = qc.Timer(autoreset=True) self.stimo_timer = qc.Timer() while True: self.tm_display.sleep_atleast(self.refresh_delay) self.tm_display.reset() if state == 'start' and self.tm_trigger.sec( ) > self.cfg.TIMINGS['INIT']: state = 'gap_s' if self.cfg.TRIALS_PAUSE: self.viz.put_text('Press any key') self.viz.update() key = cv2.waitKeyEx() if key == KEYS['esc'] or not self.protocol_state.value: return self.viz.fill() self.tm_trigger.reset() self.trigger.signal(self.tdef.INIT) elif state == 'gap_s': if self.cfg.TIMINGS['GAP'] > 0: self.viz.put_text(title_text) state = 'gap' self.tm_trigger.reset() elif state == 'gap' and self.tm_trigger.sec( ) > self.cfg.TIMINGS['GAP']: state = 'cue' self.viz.fill() self.viz.draw_cue() self.viz.glass_draw_cue() self.trigger.signal(self.tdef.CUE) self.tm_trigger.reset() elif state == 'cue' and self.tm_trigger.sec( ) > self.cfg.TIMINGS['READY']: state = 'dir_r' if self.cfg.SHOW_CUE is True: if self.cfg.FEEDBACK_TYPE == 'BAR': self.viz.move(true_label, 100, overlay=False, barcolor='G') elif self.cfg.FEEDBACK_TYPE == 'BODY': self.viz.put_text(DIRS[true_label], 'R') if true_label == 'L': # left self.trigger.signal(self.tdef.LEFT_READY) elif true_label == 'R': # right self.trigger.signal(self.tdef.RIGHT_READY) elif true_label == 'U': # up self.trigger.signal(self.tdef.UP_READY) elif true_label == 'D': # down self.trigger.signal(self.tdef.DOWN_READY) elif true_label == 'B': # both hands self.trigger.signal(self.tdef.BOTH_READY) else: raise RuntimeError('Unknown direction %s' % true_label) self.tm_trigger.reset() ''' if self.cfg.FEEDBACK_TYPE == 'BODY': self.viz.set_pc_feedback(False) self.viz.move(true_label, 100, overlay=False, barcolor='G') if self.cfg.FEEDBACK_TYPE == 'BODY': self.viz.set_pc_feedback(True) if self.cfg.SHOW_CUE is True: self.viz.put_text(dirs[true_label], 'R') if true_label == 'L': # left self.trigger.signal(self.tdef.LEFREADY) elif true_label == 'R': # right self.trigger.signal(self.tdef.RIGHT_READY) elif true_label == 'U': # up self.trigger.signal(self.tdef.UP_READY) elif true_label == 'D': # down self.trigger.signal(self.tdef.DOWN_READY) elif true_label == 'B': # both hands self.trigger.signal(self.tdef.BOTH_READY) else: raise RuntimeError('Unknown direction %s' % true_label) self.tm_trigger.reset() ''' elif state == 'dir_r' and self.tm_trigger.sec( ) > self.cfg.TIMINGS['DIR_CUE']: self.viz.fill() self.viz.draw_cue() self.viz.glass_draw_cue() state = 'dir' # initialize bar scores bar_label = bar_dirs[0] bar_score = 0 probs = [1.0 / len(bar_dirs)] * len(bar_dirs) self.viz.move(bar_label, bar_score, overlay=False) probs_acc = np.zeros(len(probs)) if true_label == 'L': # left self.trigger.signal(self.tdef.LEFT_GO) elif true_label == 'R': # right self.trigger.signal(self.tdef.RIGHT_GO) elif true_label == 'U': # up self.trigger.signal(self.tdef.UP_GO) elif true_label == 'D': # down self.trigger.signal(self.tdef.DOWN_GO) elif true_label == 'B': # both self.trigger.signal(self.tdef.BOTH_GO) else: raise RuntimeError('Unknown truedirection %s' % true_label) self.tm_watchdog.reset() self.tm_trigger.reset() elif state == 'dir': if self.tm_trigger.sec() > self.cfg.TIMINGS['CLASSIFY'] or ( self.premature_end and bar_score >= 100): if not hasattr( self.cfg, 'SHOW_RESULT') or self.cfg.SHOW_RESULT is True: # show classfication result if self.cfg.WITH_STIMO is True: if self.cfg.STIMO_FULLGAIT_CYCLE is not None and bar_label == 'U': res_color = 'G' elif self.cfg.TRIALS_RETRY is False or bar_label == true_label: res_color = 'G' else: res_color = 'Y' else: res_color = 'Y' if self.cfg.FEEDBACK_TYPE == 'BODY': self.viz.move(bar_label, bar_score, overlay=False, barcolor=res_color, caption=DIRS[bar_label], caption_color=res_color) else: self.viz.move(bar_label, 100, overlay=False, barcolor=res_color) else: res_color = 'Y' if self.cfg.FEEDBACK_TYPE == 'BODY': self.viz.move(bar_label, bar_score, overlay=False, barcolor=res_color, caption='TRIAL END', caption_color=res_color) else: self.viz.move(bar_label, 0, overlay=False, barcolor=res_color) self.trigger.signal(self.tdef.FEEDBACK) # STIMO if self.cfg.WITH_STIMO is True and self.cfg.STIMO_CONTINUOUS is False: if self.cfg.STIMO_FULLGAIT_CYCLE is not None: if bar_label == 'U': self.ser.write( self.cfg.STIMO_FULLGAIT_PATTERN[0]) logger.info('STIMO: Sent 1') time.sleep(self.cfg.STIMO_FULLGAIT_CYCLE) self.ser.write( self.cfg.STIMO_FULLGAIT_PATTERN[1]) logger.info('STIMO: Sent 2') time.sleep(self.cfg.STIMO_FULLGAIT_CYCLE) elif self.cfg.TRIALS_RETRY is False or bar_label == true_label: if bar_label == 'L': self.ser.write(b'1') logger.info('STIMO: Sent 1') elif bar_label == 'R': self.ser.write(b'2') logger.info('STIMO: Sent 2') if self.cfg.DEBUG_PROBS: msg = 'DEBUG: Accumulated probabilities = %s' % qc.list2string( probs_acc, '%.3f') logger.info(msg) if self.logf is not None: self.logf.write(msg + '\n') if self.logf is not None: self.logf.write('%s detected as %s (%d)\n\n' % (true_label, bar_label, bar_score)) self.logf.flush() # end of trial state = 'feedback' self.tm_trigger.reset() else: # classify probs_new = decoder.get_prob_smooth_unread() if probs_new is None: if self.tm_watchdog.sec() > 3: logger.warning( 'No classification being done. Are you receiving data streams?' ) self.tm_watchdog.reset() else: self.tm_watchdog.reset() if prob_history is not None: prob_history[true_label].append( probs_new[true_label_index]) probs_acc += np.array(probs_new) ''' New decoder: already smoothed by the decoder so bias after. ''' probs = list(probs_new) if self.bar_bias is not None: probs[bias_idx] += self.bar_bias[1] newsum = sum(probs) probs = [p / newsum for p in probs] ''' # Method 2: bias and smoothen if self.bar_bias is not None: # print('BEFORE: %.3f %.3f'% (probs_new[0], probs_new[1]) ) probs_new[bias_idx] += self.bar_bias[1] newsum = sum(probs_new) probs_new = [p / newsum for p in probs_new] # print('AFTER: %.3f %.3f'% (probs_new[0], probs_new[1]) ) for i in range(len(probs_new)): probs[i] = probs[i] * self.alpha_old + probs_new[i] * self.alpha_new ''' ''' Original method # Method 1: smoothen and bias for i in range( len(probs_new) ): probs[i] = probs[i] * self.alpha_old + probs_new[i] * self.alpha_new # bias bar if self.bar_bias is not None: probs[bias_idx] += self.bar_bias[1] newsum = sum(probs) probs = [p/newsum for p in probs] ''' # determine the direction # TODO: np.argmax(probs) max_pidx = qc.get_index_max(probs) max_label = bar_dirs[max_pidx] if self.cfg.POSITIVE_FEEDBACK is False or \ (self.cfg.POSITIVE_FEEDBACK and true_label == max_label): dx = probs[max_pidx] if max_label == 'R': dx *= self.bar_step_right elif max_label == 'L': dx *= self.bar_step_left elif max_label == 'U': dx *= self.bar_step_up elif max_label == 'D': dx *= self.bar_step_down elif max_label == 'B': dx *= self.bar_step_both else: logger.debug('Direction %s using bar step %d' % (max_label, self.bar_step_left)) dx *= self.bar_step_left # slow start selected = self.cfg.BAR_SLOW_START['selected'] if self.cfg.BAR_SLOW_START[ selected] and self.tm_trigger.sec( ) < self.cfg.BAR_SLOW_START[selected]: dx *= self.tm_trigger.sec( ) / self.cfg.BAR_SLOW_START[selected][0] # add likelihoods if max_label == bar_label: bar_score += dx else: bar_score -= dx # change of direction if bar_score < 0: bar_score = -bar_score bar_label = max_label bar_score = int(bar_score) if bar_score > 100: bar_score = 100 if self.cfg.FEEDBACK_TYPE == 'BODY': if self.cfg.SHOW_CUE: self.viz.move(bar_label, bar_score, overlay=False, caption=DIRS[true_label], caption_color='G') else: self.viz.move(bar_label, bar_score, overlay=False) else: self.viz.move(bar_label, bar_score, overlay=False) # send the confidence value continuously if self.cfg.WITH_STIMO and self.cfg.STIMO_CONTINUOUS: if self.stimo_timer.sec( ) >= self.cfg.STIMO_COOLOFF: if bar_label == 'U': stimo_code = bar_score else: stimo_code = 0 self.ser.write(bytes([stimo_code])) logger.info('Sent STIMO code %d' % stimo_code) self.stimo_timer.reset() if self.cfg.DEBUG_PROBS: if self.bar_bias is not None: biastxt = '[Bias=%s%.3f] ' % ( self.bar_bias[0], self.bar_bias[1]) else: biastxt = '' msg = '%s%s prob %s acc %s bar %s%d (%.1f ms)' % \ (biastxt, bar_dirs, qc.list2string(probs_new, '%.2f'), qc.list2string(probs, '%.2f'), bar_label, bar_score, tm_classify.msec()) logger.info(msg) if self.logf is not None: self.logf.write(msg + '\n') elif state == 'feedback' and self.tm_trigger.sec( ) > self.cfg.TIMINGS['FEEDBACK']: self.trigger.signal(self.tdef.BLANK) if self.cfg.FEEDBACK_TYPE == 'BODY': state = 'return' self.tm_trigger.reset() else: state = 'gap_s' self.viz.fill() self.viz.update() return bar_label elif state == 'return': self.viz.set_glass_feedback(False) if self.cfg.WITH_STIMO: self.viz.move(bar_label, bar_score, overlay=False, barcolor='B') else: self.viz.move(bar_label, bar_score, overlay=False, barcolor='Y') self.viz.set_glass_feedback(True) bar_score -= 5 if bar_score <= 0: state = 'gap_s' self.viz.fill() self.viz.update() return bar_label self.viz.update() key = cv2.waitKeyEx(1) if key == KEYS['esc'] or not self.protocol_state.value: return elif key == KEYS['space']: dx = 0 bar_score = 0 probs = [1.0 / len(bar_dirs)] * len(bar_dirs) self.viz.move(bar_dirs[0], bar_score, overlay=False) self.viz.update() logger.info('probs and dx reset.') self.tm_trigger.reset() elif key in ARROW_KEYS and ARROW_KEYS[key] in bar_dirs: # change bias on the fly if self.bar_bias is None: self.bar_bias = [ARROW_KEYS[key], BIAS_INCREMENT] else: if ARROW_KEYS[key] == self.bar_bias[0]: self.bar_bias[1] += BIAS_INCREMENT elif self.bar_bias[1] >= BIAS_INCREMENT: self.bar_bias[1] -= BIAS_INCREMENT else: self.bar_bias = [ARROW_KEYS[key], BIAS_INCREMENT] if self.bar_bias[1] == 0: self.bar_bias = None else: bias_idx = bar_dirs.index(self.bar_bias[0])
def cross_validate(cfg, featdata, cv_file=None): """ Perform cross validation """ # Init a classifier if cfg.CLASSIFIER == 'GB': cls = GradientBoostingClassifier(loss='deviance', learning_rate=cfg.GB['learning_rate'], n_estimators=cfg.GB['trees'], subsample=1.0, max_depth=cfg.GB['max_depth'], random_state=cfg.GB['seed'], max_features='sqrt', verbose=0, warm_start=False, presort='auto') elif cfg.CLASSIFIER == 'XGB': cls = XGBClassifier(loss='deviance', learning_rate=cfg.GB['learning_rate'], n_estimators=cfg.GB['trees'], subsample=1.0, max_depth=cfg.GB['max_depth'], random_state=cfg.GB['seed'], max_features='sqrt', verbose=0, warm_start=False, presort='auto') elif cfg.CLASSIFIER == 'RF': cls = RandomForestClassifier(n_estimators=cfg.RF['trees'], max_features='auto', max_depth=cfg.RF['max_depth'], n_jobs=cfg.N_JOBS, random_state=cfg.RF['seed'], oob_score=True, class_weight='balanced_subsample') elif cfg.CLASSIFIER == 'LDA': cls = LDA() elif cfg.CLASSIFIER == 'rLDA': cls = rLDA(cfg.RLDA_REGULARIZE_COEFF) else: raise ValueError('Unknown classifier type %s' % cfg.CLASSIFIER) # Setup features X_data = featdata['X_data'] Y_data = featdata['Y_data'] wlen = featdata['wlen'] if cfg.PSD['wlen'] is None: cfg.PSD['wlen'] = wlen # Choose CV type ntrials, nsamples, fsize = X_data.shape if cfg.CV_PERFORM == 'LeaveOneOut': print('\n>> %d-fold leave-one-out cross-validation' % ntrials) if SKLEARN_OLD: cv = LeaveOneOut(len(Y_data)) else: cv = LeaveOneOut() elif cfg.CV_PERFORM == 'StratifiedShuffleSplit': print( '\n>> %d-fold stratified cross-validation with test set ratio %.2f' % (cfg.CV_FOLDS, cfg.CV_TEST_RATIO)) if SKLEARN_OLD: cv = StratifiedShuffleSplit(Y_data[:, 0], cfg.CV_FOLDS, test_size=cfg.CV_TEST_RATIO, random_state=cfg.CV_RANDOM_SEED) else: cv = StratifiedShuffleSplit(n_splits=cfg.CV_FOLDS, test_size=cfg.CV_TEST_RATIO, random_state=cfg.CV_RANDOM_SEED) else: raise NotImplementedError('%s is not supported yet. Sorry.' % cfg.CV_PERFORM) print('%d trials, %d samples per trial, %d feature dimension' % (ntrials, nsamples, fsize)) # Do it! timer_cv = qc.Timer() scores, cm_txt = crossval_epochs(cv, X_data, Y_data, cls, cfg.tdef.by_value, cfg.BALANCE_SAMPLES, n_jobs=cfg.N_JOBS, ignore_thres=cfg.CV_IGNORE_THRES, decision_thres=cfg.CV_DECISION_THRES) t_cv = timer_cv.sec() # Export results txt = '\n>> Cross validation took %d seconds.\n' % t_cv txt += '\n- Class information\n' txt += '%d epochs, %d samples per epoch, %d feature dimension (total %d samples)\n' %\ (ntrials, nsamples, fsize, ntrials * nsamples) for ev in np.unique(Y_data): txt += '%s: %d trials\n' % (cfg.tdef.by_value[ev], len(np.where(Y_data[:, 0] == ev)[0])) if cfg.BALANCE_SAMPLES: txt += 'The number of samples was balanced across classes. Method: %s\n' % cfg.BALANCE_SAMPLES txt += '\n- Experiment conditions\n' txt += 'Spatial filter: %s (channels: %s)\n' % (cfg.SP_FILTER, cfg.SP_FILTER) txt += 'Spectral filter: %s\n' % cfg.TP_FILTER txt += 'Notch filter: %s\n' % cfg.NOTCH_FILTER txt += 'Channels: ' + ','.join( [str(featdata['ch_names'][p]) for p in featdata['picks']]) + '\n' txt += 'PSD range: %.1f - %.1f Hz\n' % (cfg.PSD['fmin'], cfg.PSD['fmax']) txt += 'Window step: %.2f msec\n' % (1000.0 * cfg.PSD['wstep'] / featdata['sfreq']) if type(wlen) is list: for i, w in enumerate(wlen): txt += 'Window size: %.1f msec\n' % (w * 1000.0) txt += 'Epoch range: %s sec\n' % (cfg.EPOCH[i]) else: txt += 'Window size: %.1f msec\n' % (cfg.PSD['wlen'] * 1000.0) txt += 'Epoch range: %s sec\n' % (cfg.EPOCH) # Compute stats cv_mean, cv_std = np.mean(scores), np.std(scores) txt += '\n- Average CV accuracy over %d epochs (random seed=%s)\n' % ( ntrials, cfg.CV_RANDOM_SEED) if cfg.CV_PERFORM in ['LeaveOneOut', 'StratifiedShuffleSplit']: txt += "mean %.3f, std: %.3f\n" % (cv_mean, cv_std) txt += 'Classifier: %s, ' % cfg.CLASSIFIER if cfg.CLASSIFIER == 'RF': txt += '%d trees, %s max depth, random state %s\n' % ( cfg.RF['trees'], cfg.RF['max_depth'], cfg.RF['seed']) elif cfg.CLASSIFIER == 'GB' or cfg.CLASSIFIER == 'XGB': txt += '%d trees, %s max depth, %s learing_rate, random state %s\n' % ( cfg.GB['trees'], cfg.GB['max_depth'], cfg.GB['learning_rate'], cfg.GB['seed']) elif cfg.CLASSIFIER == 'rLDA': txt += 'regularization coefficient %.2f\n' % cfg.RLDA_REGULARIZE_COEFF if cfg.CV_IGNORE_THRES is not None: txt += 'Decision threshold: %.2f\n' % cfg.CV_IGNORE_THRES txt += '\n- Confusion Matrix\n' + cm_txt print(txt) # Export to a file if hasattr( cfg, 'CV_EXPORT_RESULT' ) and cfg.CV_EXPORT_RESULT is True and cfg.CV_PERFORM is not None: if cv_file is None: if cfg.EXPORT_CLS is True: qc.make_dirs('%s/classifier' % cfg.DATADIR) fout = open('%s/classifier/cv_result.txt' % cfg.DATADIR, 'w') else: fout = open('%s/cv_result.txt' % cfg.DATADIR, 'w') else: fout = open(cv_file, 'w') fout.write(txt) fout.close()
def get_psd(epochs, psde, wlen, wstep, picks=None, flatten=True, preprocess=None, decim=1, n_jobs=1): """ Compute multi-taper PSDs over a sliding window Input ===== epochs: MNE Epochs object psde: MNE PSDEstimator object wlen: window length in frames wstep: window step in frames picks: channels to be used; use all if None flatten: boolean, see Returns section n_jobs: nubmer of cores to use, None = use all cores Output ====== if flatten==True: X_data: [epochs] x [windows] x [channels*freqs] else: X_data: [epochs] x [windows] x [channels] x [freqs] y_data: [epochs] x [windows] TODO: Accept input as numpy array as well, in addition to Epochs object """ tm = qc.Timer() if n_jobs is None: n_jobs = mp.cpu_count() if n_jobs > 1: logger.info('Opening a pool of %d workers' % n_jobs) pool = mp.Pool(n_jobs) # compute PSD from sliding windows of each epoch labels = epochs.events[:, -1] epochs_data = epochs.get_data() w_starts = np.arange(0, epochs_data.shape[2] - wlen, wstep) X_data = None y_data = None results = [] for ep in np.arange(len(labels)): title = 'Epoch %d / %d, Frames %d-%d' % ( ep + 1, len(labels), w_starts[0], w_starts[-1] + wlen - 1) if n_jobs == 1: # no multiprocessing results.append( slice_win(epochs_data[ep], w_starts, wlen, psde, picks, title, True, preprocess)) else: # parallel psd computation results.append( pool.apply_async(slice_win, [ epochs_data[ep], w_starts, wlen, psde, picks, title, True, preprocess ])) for ep in range(len(results)): if n_jobs == 1: r = results[ep] else: r = results[ep].get() # windows x features X = r.reshape((1, r.shape[0], r.shape[1])) # 1 x windows x features if X_data is None: X_data = X else: X_data = np.concatenate((X_data, X), axis=0) # speed comparison: http://stackoverflow.com/questions/5891410/numpy-array-initialization-fill-with-identical-values y = np.empty((1, r.shape[0])) # 1 x windows y.fill(labels[ep]) if y_data is None: y_data = y else: y_data = np.concatenate((y_data, y), axis=0) # close pool if n_jobs > 1: pool.close() pool.join() logger.info('Feature computation took %d seconds.' % tm.sec()) if flatten: return X_data, y_data.astype(np.int) else: xs = X_data.shape nch = len(epochs.ch_names) return X_data.reshape(xs[0], xs[1], nch, int(xs[2] / nch)), y_data.astype(np.int)
def cross_validate(cfg, featdata, cv_file=None): """ Perform cross validation """ # Init a classifier selected_classifier = cfg.CLASSIFIER['selected'] if selected_classifier == 'GB': cls = GradientBoostingClassifier( loss='deviance', learning_rate=cfg.CLASSIFIER['GB']['learning_rate'], presort='auto', n_estimators=cfg.CLASSIFIER['GB']['trees'], subsample=1.0, max_depth=cfg.CLASSIFIER['GB']['depth'], random_state=cfg.CLASSIFIER['GB']['seed'], max_features='sqrt', verbose=0, warm_start=False) elif selected_classifier == 'XGB': cls = XGBClassifier( loss='deviance', learning_rate=cfg.CLASSIFIER['XGB']['learning_rate'], presort='auto', n_estimators=cfg.CLASSIFIER['XGB']['trees'], subsample=1.0, max_depth=cfg.CLASSIFIER['XGB']['depth'], random_state=cfg.CLASSIFIER['XGB'], max_features='sqrt', verbose=0, warm_start=False) elif selected_classifier == 'RF': cls = RandomForestClassifier( n_estimators=cfg.CLASSIFIER['RF']['trees'], max_features='auto', max_depth=cfg.CLASSIFIER['RF']['depth'], n_jobs=cfg.N_JOBS, random_state=cfg.CLASSIFIER['RF']['seed'], oob_score=False, class_weight='balanced_subsample') elif selected_classifier == 'LDA': cls = LDA() elif selected_classifier == 'rLDA': cls = rLDA(cfg.CLASSIFIER['rLDA']['r_coeff']) else: logger.error('Unknown classifier type %s' % selected_classifier) raise ValueError # Setup features X_data = featdata['X_data'] Y_data = featdata['Y_data'] wlen = featdata['wlen'] # Choose CV type ntrials, nsamples, fsize = X_data.shape selected_cv = cfg.CV_PERFORM['selected'] if selected_cv == 'LeaveOneOut': logger.info_green('%d-fold leave-one-out cross-validation' % ntrials) if SKLEARN_OLD: cv = LeaveOneOut(len(Y_data)) else: cv = LeaveOneOut() elif selected_cv == 'StratifiedShuffleSplit': logger.info_green( '%d-fold stratified cross-validation with test set ratio %.2f' % (cfg.CV_PERFORM[selected_cv]['folds'], cfg.CV_PERFORM[selected_cv]['test_ratio'])) if SKLEARN_OLD: cv = StratifiedShuffleSplit( Y_data[:, 0], cfg.CV_PERFORM[selected_cv]['folds'], test_size=cfg.CV_PERFORM[selected_cv]['test_ratio'], random_state=cfg.CV_PERFORM[selected_cv]['seed']) else: cv = StratifiedShuffleSplit( n_splits=cfg.CV_PERFORM[selected_cv]['folds'], test_size=cfg.CV_PERFORM[selected_cv]['test_ratio'], random_state=cfg.CV_PERFORM[selected_cv]['seed']) else: logger.error('%s is not supported yet. Sorry.' % cfg.CV_PERFORM[cfg.CV_PERFORM['selected']]) raise NotImplementedError logger.info('%d trials, %d samples per trial, %d feature dimension' % (ntrials, nsamples, fsize)) # Do it! timer_cv = qc.Timer() scores, cm_txt = crossval_epochs(cv, X_data, Y_data, cls, cfg.tdef.by_value, cfg.CV['BALANCE_SAMPLES'], n_jobs=cfg.N_JOBS, ignore_thres=cfg.CV['IGNORE_THRES'], decision_thres=cfg.CV['DECISION_THRES']) t_cv = timer_cv.sec() # Export results txt = 'Cross validation took %d seconds.\n' % t_cv txt += '\n- Class information\n' txt += '%d epochs, %d samples per epoch, %d feature dimension (total %d samples)\n' %\ (ntrials, nsamples, fsize, ntrials * nsamples) for ev in np.unique(Y_data): txt += '%s: %d trials\n' % (cfg.tdef.by_value[ev], len(np.where(Y_data[:, 0] == ev)[0])) if cfg.CV['BALANCE_SAMPLES']: txt += 'The number of samples was balanced using %ssampling.\n' % cfg.BALANCE_SAMPLES.lower( ) txt += '\n- Experiment condition\n' txt += 'Sampling frequency: %.3f Hz\n' % featdata['sfreq'] txt += 'Spatial filter: %s (channels: %s)\n' % (cfg.SP_FILTER, cfg.SP_CHANNELS) txt += 'Spectral filter: %s\n' % cfg.TP_FILTER[cfg.TP_FILTER['selected']] txt += 'Notch filter: %s\n' % cfg.NOTCH_FILTER[ cfg.NOTCH_FILTER['selected']] txt += 'Channels: ' + ','.join( [str(featdata['ch_names'][p]) for p in featdata['picks']]) + '\n' txt += 'PSD range: %.1f - %.1f Hz\n' % (cfg.FEATURES['PSD']['fmin'], cfg.FEATURES['PSD']['fmax']) txt += 'Window step: %.2f msec\n' % ( 1000.0 * cfg.FEATURES['PSD']['wstep'] / featdata['sfreq']) if type(wlen) is list: for i, w in enumerate(wlen): txt += 'Window size: %.1f msec\n' % (w * 1000.0) txt += 'Epoch range: %s sec\n' % (cfg.EPOCH[i]) else: txt += 'Window size: %.1f msec\n' % (cfg.FEATURES['PSD']['wlen'] * 1000.0) txt += 'Epoch range: %s sec\n' % (cfg.EPOCH) txt += 'Decimation factor: %d\n' % cfg.FEATURES['PSD']['decim'] # Compute stats cv_mean, cv_std = np.mean(scores), np.std(scores) txt += '\n- Average CV accuracy over %d epochs (random seed=%s)\n' % ( ntrials, cfg.CV_PERFORM[cfg.CV_PERFORM['selected']]['seed']) if cfg.CV_PERFORM[cfg.CV_PERFORM['selected']] in [ 'LeaveOneOut', 'StratifiedShuffleSplit' ]: txt += "mean %.3f, std: %.3f\n" % (cv_mean, cv_std) txt += 'Classifier: %s, ' % selected_classifier if selected_classifier == 'RF': txt += '%d trees, %s max depth, random state %s\n' % ( cfg.CLASSIFIER['RF']['trees'], cfg.CLASSIFIER['RF']['depth'], cfg.CLASSIFIER['RF']['seed']) elif selected_classifier == 'GB' or selected_classifier == 'XGB': txt += '%d trees, %s max depth, %s learing_rate, random state %s\n' % ( cfg.CLASSIFIER['GB']['trees'], cfg.CLASSIFIER['GB']['depth'], cfg.CLASSIFIER['GB']['learning_rate'], cfg.CLASSIFIER['GB']['seed']) elif selected_classifier == 'rLDA': txt += 'regularization coefficient %.2f\n' % cfg.CLASSIFIER['rLDA'][ 'r_coeff'] if cfg.CV['IGNORE_THRES'] is not None: txt += 'Decision threshold: %.2f\n' % cfg.CV['IGNORE_THRES'] txt += '\n- Confusion Matrix\n' + cm_txt logger.info(txt) # Export to a file if 'export_result' in cfg.CV_PERFORM[selected_cv] and cfg.CV_PERFORM[ selected_cv]['export_result'] is True: if cv_file is None: if cfg.EXPORT_CLS is True: qc.make_dirs('%s/classifier' % cfg.DATA_PATH) fout = open('%s/classifier/cv_result.txt' % cfg.DATA_PATH, 'w') else: fout = open('%s/cv_result.txt' % cfg.DATA_PATH, 'w') else: fout = open(cv_file, 'w') fout.write(txt) fout.close()
def classify(self, decoder, true_label, title_text, bar_dirs, state='start'): """ Run a single trial """ self.tm_trigger.reset() if self.bar_bias is not None: bias_idx = bar_dirs.index(self.bar_bias[0]) if self.logf is not None: self.logf.write('True label: %s\n' % true_label) tm_classify = qc.Timer() while True: self.tm_display.sleep_atleast(self.refresh_delay) self.tm_display.reset() if state == 'start' and self.tm_trigger.sec() > self.cfg.T_INIT: state = 'gap_s' self.bar.fill() self.tm_trigger.reset() self.trigger.signal(self.tdef.INIT) elif state == 'gap_s': self.bar.put_text(title_text) state = 'gap' self.tm_trigger.reset() elif state == 'gap' and self.tm_trigger.sec() > self.cfg.T_GAP: state = 'cue' self.bar.fill() self.bar.draw_cue() self.bar.glass_draw_cue() self.trigger.signal(self.tdef.CUE) self.tm_trigger.reset() elif state == 'cue' and self.tm_trigger.sec() > self.cfg.T_READY: state = 'dir_r' if self.cfg.FEEDBACK_TYPE == 'BODY': self.bar.set_pc_feedback(False) self.bar.move(true_label, 100, overlay=False, barcolor='G') if self.cfg.FEEDBACK_TYPE == 'BODY': self.bar.set_pc_feedback(True) if self.cfg.SHOW_CUE is True: self.bar.put_text(dirs[true_label], 'R') if true_label == 'L': # left self.trigger.signal(self.tdef.LEFT_READY) elif true_label == 'R': # right self.trigger.signal(self.tdef.RIGHT_READY) elif true_label == 'U': # up self.trigger.signal(self.tdef.UP_READY) elif true_label == 'D': # down self.trigger.signal(self.tdef.DOWN_READY) elif true_label == 'B': # both hands self.trigger.signal(self.tdef.BOTH_READY) else: raise RuntimeError('Unknown direction %s' % true_label) self.tm_trigger.reset() ################################################################## ################################################################## #qc.print_c('Executing Rex action %s' % 'N', 'W') #os.system('%s/Rex/RexControlSimple.exe %s %s' % (pycnbi.ROOT, 'COM3', 'N')) ################################################################## ################################################################## elif state == 'dir_r' and self.tm_trigger.sec( ) > self.cfg.T_DIR_CUE: self.bar.fill() self.bar.draw_cue() self.bar.glass_draw_cue() state = 'dir' # initialize bar scores bar_label = bar_dirs[0] bar_score = 0 probs = [1.0 / len(bar_dirs)] * len(bar_dirs) self.bar.move(bar_label, bar_score, overlay=False) probs_acc = np.zeros(len(probs)) if true_label == 'L': # left self.trigger.signal(self.tdef.LEFT_GO) elif true_label == 'R': # right self.trigger.signal(self.tdef.RIGHT_GO) elif true_label == 'U': # up self.trigger.signal(self.tdef.UP_GO) elif true_label == 'D': # down self.trigger.signal(self.tdef.DOWN_GO) elif true_label == 'B': # both self.trigger.signal(self.tdef.BOTH_GO) else: raise RuntimeError('Unknown truedirection %s' % true_label) self.tm_watchdog.reset() self.tm_trigger.reset() elif state == 'dir': if self.tm_trigger.sec() > self.cfg.T_CLASSIFY or ( self.premature_end and bar_score >= 100): if not hasattr( self.cfg, 'SHOW_RESULT') or self.cfg.SHOW_RESULT is True: # show classfication result if self.cfg.FEEDBACK_TYPE == 'BODY': self.bar.move(bar_label, bar_score, overlay=False, barcolor='Y', caption=dirs[bar_label], caption_color='Y') else: self.bar.move(bar_label, 100, overlay=False, barcolor='Y') else: self.bar.move(bar_label, bar_score, overlay=False, barcolor='Y', caption='TRIAL END', caption_color='Y') ########################## TEST WITH BAR ############################# ''' if self.cfg.FEEDBACK_TYPE == 'BODY': self.bar.move(bar_label, bar_score, overlay=False, barcolor='Y', caption='TRIAL END', caption_color='Y') else: self.bar.move(bar_label, 0, overlay=False, barcolor='Y') ''' self.trigger.signal(self.tdef.FEEDBACK) probs_acc /= sum(probs_acc) if self.cfg.DEBUG_PROBS: msg = 'DEBUG: Accumulated probabilities = %s' % qc.list2string( probs_acc, '%.3f') logger.info(msg) if self.logf is not None: self.logf.write(msg + '\n') if self.logf is not None: self.logf.write('%s detected as %s (%d)\n\n' % (true_label, bar_label, bar_score)) self.logf.flush() # end of trial state = 'feedback' self.tm_trigger.reset() else: # classify probs_new = decoder.get_prob_unread() if probs_new is None: if self.tm_watchdog.sec() > 3: logger.warning( 'No classification being done. Are you receiving data streams?' ) self.tm_watchdog.reset() else: self.tm_watchdog.reset() # bias and accumulate if self.bar_bias is not None: probs_new[bias_idx] += self.bar_bias[1] newsum = sum(probs_new) probs_new = [p / newsum for p in probs_new] probs_acc += np.array(probs_new) for i in range(len(probs_new)): probs[i] = probs[i] * self.alpha1 + probs_new[ i] * self.alpha2 ''' Original: accumulate and bias # accumulate probs for i in range( len(probs_new) ): probs[i]= probs[i] * self.alpha1 + probs_new[i] * self.alpha2 # bias bar if self.bar_bias is not None: probs[bias_idx] += self.bar_bias[1] newsum= sum(probs) probs= [p/newsum for p in probs] ''' # determine the direction max_pidx = qc.get_index_max(probs) max_label = bar_dirs[max_pidx] if self.cfg.POSITIVE_FEEDBACK is False or \ (self.cfg.POSITIVE_FEEDBACK and true_label == max_label): dx = probs[max_pidx] if max_label == 'R': dx *= self.bar_step_right elif max_label == 'L': dx *= self.bar_step_left elif max_label == 'U': dx *= self.bar_step_up elif max_label == 'D': dx *= self.bar_step_down elif max_label == 'B': dx *= self.bar_step_both else: logger.debug('Direction %s using bar step %d' % (max_label, self.bar_step_left)) dx *= self.bar_step_left ################################################ ################################################ # slower in the beginning #if self.tm_trigger.sec() < 2.0: # dx *= self.tm_trigger.sec() * 0.5 ################################################ ################################################ # add likelihoods if max_label == bar_label: bar_score += dx else: bar_score -= dx # change of direction if bar_score < 0: bar_score = -bar_score bar_label = max_label bar_score = int(bar_score) if bar_score > 100: bar_score = 100 if self.cfg.FEEDBACK_TYPE == 'BODY': if self.cfg.SHOW_CUE: self.bar.move(bar_label, bar_score, overlay=False, caption=dirs[true_label], caption_color='G') else: self.bar.move(bar_label, bar_score, overlay=False) else: self.bar.move(bar_label, bar_score, overlay=False) if self.cfg.DEBUG_PROBS: if self.bar_bias is not None: biastxt = '[Bias=%s%.3f] ' % ( self.bar_bias[0], self.bar_bias[1]) else: biastxt = '' msg = '%s%s raw %s acc %s bar %s%d (%.1f ms)' % \ (biastxt, bar_dirs, qc.list2string(probs_new, '%.2f'), qc.list2string(probs, '%.2f'), bar_label, bar_score, tm_classify.msec()) logger.info(msg) if self.logf is not None: self.logf.write(msg + '\n') tm_classify.reset() elif state == 'feedback' and self.tm_trigger.sec( ) > self.cfg.T_FEEDBACK: self.trigger.signal(self.tdef.BLANK) if self.cfg.FEEDBACK_TYPE == 'BODY': state = 'return' self.tm_trigger.reset() else: state = 'gap_s' self.bar.fill() self.bar.update() return bar_label elif state == 'return': self.bar.set_glass_feedback(False) self.bar.move(bar_label, bar_score, overlay=False, barcolor='Y') self.bar.set_glass_feedback(True) bar_score -= 5 if bar_score <= 0: state = 'gap_s' self.bar.fill() self.bar.update() return bar_label self.bar.update() key = 0xFF & cv2.waitKey(1) if key == keys['esc']: return None if key == keys['space']: dx = 0 bar_score = 0 probs = [1.0 / len(bar_dirs)] * len(bar_dirs) self.bar.move(bar_dirs[0], bar_score, overlay=False) self.bar.update() logger.info('probs and dx reset.')
def config_run(cfg_module): cfg = imp.load_source(cfg_module, cfg_module) tdef = trigger_def(cfg.TRIGGER_DEF) refresh_delay = 1.0 / cfg.REFRESH_RATE # visualizer keys = {'left':81, 'right':83, 'up':82, 'down':84, 'pgup':85, 'pgdn':86, 'home':80, 'end':87, 'space':32, 'esc':27, ',':44, '.':46, 's':115, 'c':99, '[':91, ']':93, '1':49, '!':33, '2':50, '@':64, '3':51, '#':35} color = dict(G=(20, 140, 0), B=(210, 0, 0), R=(0, 50, 200), Y=(0, 215, 235), K=(0, 0, 0), w=(200, 200, 200)) dir_sequence = [] for x in range(cfg.TRIALS_EACH): dir_sequence.extend(cfg.DIRECTIONS) random.shuffle(dir_sequence) num_trials = len(cfg.DIRECTIONS) * cfg.TRIALS_EACH event = 'start' trial = 1 # Hardware trigger if cfg.TRIGGER_DEVICE is None: input('\n** Warning: No trigger device set. Press Ctrl+C to stop or Enter to continue.') trigger = pyLptControl.Trigger(cfg.TRIGGER_DEVICE) if trigger.init(50) == False: print('\n** Error connecting to USB2LPT device. Use a mock trigger instead?') input('Press Ctrl+C to stop or Enter to continue.') trigger = pyLptControl.MockTrigger() trigger.init(50) timer_trigger = qc.Timer() timer_dir = qc.Timer() timer_refresh = qc.Timer() bar = BarVisual(cfg.GLASS_USE, screen_pos=cfg.SCREEN_POS, screen_size=cfg.SCREEN_SIZE) bar.fill() bar.glass_draw_cue() # start while trial <= num_trials: timer_refresh.sleep_atleast(refresh_delay) timer_refresh.reset() # segment= { 'cue':(s,e), 'dir':(s,e), 'label':0-4 } (zero-based) if event == 'start' and timer_trigger.sec() > cfg.T_INIT: event = 'gap_s' bar.fill() timer_trigger.reset() trigger.signal(tdef.INIT) elif event == 'gap_s': bar.put_text('Trial %d / %d' % (trial, num_trials)) event = 'gap' elif event == 'gap' and timer_trigger.sec() > cfg.T_GAP: event = 'cue' bar.fill() bar.draw_cue() trigger.signal(tdef.CUE) timer_trigger.reset() elif event == 'cue' and timer_trigger.sec() > cfg.T_CUE: event = 'dir_r' dir = dir_sequence[trial - 1] if dir == 'L': # left bar.move('L', 100, overlay=True) trigger.signal(tdef.LEFT_READY) elif dir == 'R': # right bar.move('R', 100, overlay=True) trigger.signal(tdef.RIGHT_READY) elif dir == 'U': # up bar.move('U', 100, overlay=True) trigger.signal(tdef.UP_READY) elif dir == 'D': # down bar.move('D', 100, overlay=True) trigger.signal(tdef.DOWN_READY) elif dir == 'B': # both hands bar.move('L', 100, overlay=True) bar.move('R', 100, overlay=True) trigger.signal(tdef.BOTH_READY) else: raise RuntimeError('Unknown direction %d' % dir) timer_trigger.reset() elif event == 'dir_r' and timer_trigger.sec() > cfg.T_DIR_READY: bar.fill() bar.draw_cue() event = 'dir' timer_trigger.reset() timer_dir.reset() if dir == 'L': # left trigger.signal(tdef.LEFT_GO) elif dir == 'R': # right trigger.signal(tdef.RIGHT_GO) elif dir == 'U': # up trigger.signal(tdef.UP_GO) elif dir == 'D': # down trigger.signal(tdef.DOWN_GO) elif dir == 'B': # both trigger.signal(tdef.BOTH_GO) else: raise RuntimeError('Unknown direction %d' % dir) elif event == 'dir' and timer_trigger.sec() > cfg.T_DIR: event = 'gap_s' bar.fill() trial += 1 print('trial ' + str(trial - 1) + ' done') trigger.signal(tdef.BLANK) timer_trigger.reset() # protocol if event == 'dir': dx = min(100, int(100.0 * timer_dir.sec() / cfg.T_DIR) + 1) if dir == 'L': # L bar.move('L', dx, overlay=True) elif dir == 'R': # R bar.move('R', dx, overlay=True) elif dir == 'U': # U bar.move('U', dx, overlay=True) elif dir == 'D': # D bar.move('D', dx, overlay=True) elif dir == 'B': # Both bar.move('L', dx, overlay=True) bar.move('R', dx, overlay=True) # wait for start if event == 'start': bar.put_text('Waiting to start') bar.update() key = 0xFF & cv2.waitKey(1) if key == keys['esc']: break
def __init__(self, image_path, use_glass=False, glass_feedback=True, pc_feedback=True, screen_pos=None, screen_size=None): """ Input: use_glass: if False, mock Glass will be used glass_feedback: show feedback to the user? pc_feedback: show feedback on the pc screen? screen_pos: screen position in (x,y) screen_size: screen size in (x,y) """ # screen size and message setting if screen_size is None: if sys.platform.startswith('win'): from win32api import GetSystemMetrics screen_width = GetSystemMetrics(0) screen_height = GetSystemMetrics(1) else: screen_width = 1024 screen_height = 768 screen_size = (screen_width, screen_height) else: screen_width, screen_height = screen_size if screen_pos is None: screen_x, screen_y = (0, 0) else: screen_x, screen_y = screen_pos self.text_size = 2 self.img = np.zeros((screen_height, screen_width, 3), np.uint8) self.glass = bgi_client.GlassControl(mock=not use_glass) self.glass.connect('127.0.0.1', 59900) self.set_glass_feedback(glass_feedback) self.set_pc_feedback(pc_feedback) self.set_cue_color(boxcol='B', crosscol='W') self.width = self.img.shape[1] self.height = self.img.shape[0] hw = int(self.barwidth / 2) self.cx = int(self.width / 2) self.cy = int(self.height / 2) self.xl1 = self.cx - hw self.xl2 = self.xl1 - self.barwidth self.xr1 = self.cx + hw self.xr2 = self.xr1 + self.barwidth self.yl1 = self.cy - hw self.yl2 = self.yl1 - self.barwidth self.yr1 = self.cy + hw self.yr2 = self.yr1 + self.barwidth if os.path.isdir(image_path): # load images left_image_path = '%s/left' % image_path right_image_path = '%s/right' % image_path tm = qc.Timer() print('Reading images from %s' % left_image_path) self.left_images = read_images(left_image_path, screen_size) print('Reading images from %s' % right_image_path) self.right_images = read_images(right_image_path, screen_size) print('Took %.1f s' % tm.sec()) else: # load pickled images # note: this is painfully slow in Pytohn 2 even with cPickle (3s vs 27s) assert image_path[-4:] == '.pkl', 'The file must be of .pkl format' print('Loading image binary file %s ...' % image_path, end=' ') tm = qc.Timer() with gzip.open(image_path, 'rb') as fp: image_data = pickle.load(fp) self.left_images = image_data['left_images'] self.right_images = image_data['right_images'] feedback_w = self.left_images[0].shape[1] / 2 feedback_h = self.left_images[0].shape[0] / 2 loc_x = [int(self.cx - feedback_w), int(self.cx + feedback_w)] loc_y = [int(self.cy - feedback_h), int(self.cy + feedback_h)] img_fit = np.zeros((screen_height, screen_width, 3), np.uint8) # adjust to the current screen size print('Fitting image into the current screen size') for i, img in enumerate(self.left_images): img_fit = np.zeros((screen_height, screen_width, 3), np.uint8) img_fit[loc_y[0]:loc_y[1], loc_x[0]:loc_x[1]] = img self.left_images[i] = img_fit for i, img in enumerate(self.right_images): img_fit = np.zeros((screen_height, screen_width, 3), np.uint8) img_fit[loc_y[0]:loc_y[1], loc_x[0]:loc_x[1]] = img self.right_images[i] = img_fit print('Took %.1f s' % tm.sec()) print('Done.') cv2.namedWindow("Protocol", cv2.WND_PROP_FULLSCREEN) cv2.moveWindow("Protocol", screen_x, screen_y) cv2.setWindowProperty("Protocol", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
def __init__(self): self.BUFFER_SIZE = 1024 self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.last_dir = 'L' self.timer = qc.Timer()
import gzip import pycnbi.utils.q_common as qc from pycnbi.protocols.viz_human import read_images try: import cPickle as pickle # Python 2 (cPickle = C version of pickle) except ImportError: import pickle # Python 3 (C version is the default) if __name__ == '__main__': LEFT_IMAGE_DIR = r'D:\work\pycnbi_protocols\BodyFeedback\left_behind' RIGHT_IMAGE_DIR = r'D:\work\pycnbi_protocols\BodyFeedback\right_behind' EXPORT_IMAGE_DIR = r'D:\work\pycnbi_protocols\BodyFeedback' if pickle.HIGHEST_PROTOCOL >= 4: outfile = '%s/BodyVisuals.pkl' % EXPORT_IMAGE_DIR tm = qc.Timer() print('Reading images from %s' % LEFT_IMAGE_DIR) left_images = read_images(LEFT_IMAGE_DIR) print('Reading images from %s' % RIGHT_IMAGE_DIR) right_images = read_images(RIGHT_IMAGE_DIR) print('Took %.1f s. Start exporting ...' % tm.sec()) img_data = {'left_images': left_images, 'right_images': right_images} with gzip.open(outfile, 'wb') as fp: pickle.dump(img_data, fp) print('Exported to %s' % outfile) else: print( 'Your Python pickle protocol version is less than 4, which will be slower with loading a pickle object.' )
def test_receiver(): import mne import os CH_INDEX = [1] # channel to monitor TIME_INDEX = None # integer or None. None = average of raw values of the current window SHOW_PSD = False mne.set_log_level('ERROR') os.environ[ 'OMP_NUM_THREADS'] = '1' # actually improves performance for multitaper # connect to LSL server amp_name, amp_serial = pu.search_lsl() sr = StreamReceiver(window_size=1, buffer_size=1, amp_serial=amp_serial, eeg_only=False, amp_name=amp_name) sfreq = sr.get_sample_rate() trg_ch = sr.get_trigger_channel() logger.info('Trigger channel = %d' % trg_ch) # PSD init if SHOW_PSD: psde = mne.decoding.PSDEstimator(sfreq=sfreq, fmin=1, fmax=50, bandwidth=None, \ adaptive=False, low_bias=True, n_jobs=1, normalization='length', verbose=None) watchdog = qc.Timer() tm = qc.Timer(autoreset=True) last_ts = 0 while True: sr.acquire() window, tslist = sr.get_window() # window = [samples x channels] window = window.T # chanel x samples qc.print_c('LSL Diff = %.3f' % (pylsl.local_clock() - tslist[-1]), 'G') # print event values tsnew = np.where(np.array(tslist) > last_ts)[0] if len(tsnew) == 0: logger.warning('There seems to be delay in receiving data.') time.sleep(1) continue trigger = np.unique(window[trg_ch, tsnew[0]:]) # for Biosemi # if sr.amp_name=='BioSemi': # trigger= set( [255 & int(x-1) for x in trigger ] ) if len(trigger) > 0: logger.info('Triggers: %s' % np.array(trigger)) logger.info('[%.1f] Receiving data...' % watchdog.sec()) if TIME_INDEX is None: datatxt = qc.list2string(np.mean(window[CH_INDEX, :], axis=1), '%-15.6f') print('[%.3f : %.3f]' % (tslist[0], tslist[-1]) + ' data: %s' % datatxt) else: datatxt = qc.list2string(window[CH_INDEX, TIME_INDEX], '%-15.6f') print('[%.3f]' % tslist[TIME_INDEX] + ' data: %s' % datatxt) # show PSD if SHOW_PSD: psd = psde.transform( window.reshape((1, window.shape[0], window.shape[1]))) psd = psd.reshape((psd.shape[1], psd.shape[2])) psdmean = np.mean(psd, axis=1) for p in psdmean: print('%.1f' % p, end=' ') last_ts = tslist[-1] tm.sleep_atleast(0.05)
def record(self): """ Continue recording data until the record stop button is pressed, the recorded data are firstly saved in a buffer which will be saved to a csv file when recording finishes. TODO: Save data to file during recording """ timestamp = time.strftime('%Y%m%d-%H%M%S', time.localtime()) # start recording logger.info('\n>> Recording started (PID %d).' % os.getpid()) tm = qc.Timer(autoreset=True) next_sec = 1 while self.is_recording_running: self.streamReceiver.acquire("recorder using") if self.streamReceiver.get_buflen() > next_sec: # print("\nbuffer length: ",self.streamReceiver.get_buflen()) duration = str( datetime.timedelta( seconds=int(self.streamReceiver.get_buflen()))) logger.info('RECORDING %s' % duration) # logger.info('\nLSL clock: %s' % self.streamReceiver.get_lsl_clock()) # logger.info('Server timestamp = %s' % self.streamReceiver.get_server_clock()) # logger.info('offset {}'.format(self.streamReceiver.get_lsl_clock() - self.streamReceiver.get_server_clock())) # # self.lsl_time_list.append(self.streamReceiver.get_lsl_clock()) # self.server_time_list.append(self.streamReceiver.get_server_clock()) # self.offset_time_list.append(self.streamReceiver.get_lsl_offset()) next_sec += 1 self.streamReceiver.set_window_size(self.MRCP_window_size) self.current_window, self.current_time_stamps = self.streamReceiver.get_window( ) tm.sleep_atleast(0.001) buffers, times = self.streamReceiver.get_buffer() signals = buffers events = None data = { 'signals': signals, 'timestamps': times, 'events': events, 'sample_rate': self.streamReceiver.get_sample_rate(), 'channels': self.streamReceiver.get_num_channels(), 'ch_names': self.streamReceiver.get_channel_names(), 'lsl_time_offset': self.streamReceiver.lsl_time_offset } logger.info('Saving raw data ...') self.write_recorded_data_to_csv(data) temp_lsl_list = self.lsl_time_list.copy() temp_lsl_list.insert(0, 0) temp_lsl_list.pop() # print("lsl clock", self.lsl_time_list) # print('temp lsl list', temp_lsl_list) # print(np.subtract(self.lsl_time_list, temp_lsl_list)) # print("timestamp len before flush", len(self.streamReceiver.timestamps[0])) # print("buffer len before flushing: ", len(self.streamReceiver.buffers[0])) self.streamReceiver.flush_buffer() print("timestamp len after flush", len(self.streamReceiver.timestamps[0])) print("buffer len after flushing: ", len(self.streamReceiver.buffers[0]))
def raw2psd(rawfile=None, fmin=1, fmax=40, wlen=0.5, wstep=1, tmin=0.0, tmax=None, channel_picks=None, excludes=[], n_jobs=1): """ Compute PSD features over a sliding window on the entire raw file. Leading edge of the window is the time reference, i.e. do not use future data. Input ===== rawfile: fif file. channel_picks: None or list of channel names tmin (sec): start time of the PSD window relative to the event onset. tmax (sec): end time of the PSD window relative to the event onset. None = until the end. fmin (Hz): minimum PSD frequency fmax (Hz): maximum PSD frequency wlen (sec): sliding window length for computing PSD (sec) wstep (int): sliding window step (time samples) excludes (list): list of channels to exclude """ raw, eve = pu.load_raw(rawfile) sfreq = raw.info['sfreq'] wframes = int(round(sfreq * wlen)) raw_eeg = raw.pick_types(meg=False, eeg=True, stim=False, exclude=excludes) if channel_picks is None: rawdata = raw_eeg._data chlist = raw.ch_names else: chlist = [] for ch in channel_picks: chlist.append(raw.ch_names.index(ch)) rawdata = raw_eeg._data[np.array(chlist)] if tmax is None: t_end = rawdata.shape[1] else: t_end = int(round(tmax * sfreq)) t_start = int(round(tmin * sfreq)) + wframes psde = mne.decoding.PSDEstimator(sfreq, fmin=fmin, fmax=fmax, n_jobs=1,\ bandwidth=None, low_bias=True, adaptive=False, normalization='length', verbose=None) print('[PID %d] %s' % (os.getpid(), rawfile)) psd_all = [] evelist = [] times = [] t_len = t_end - t_start last_eve = 0 y_i = 0 t_last = t_start tm = qc.Timer() for t in range(t_start, t_end, wstep): # compute PSD window = rawdata[:, t - wframes:t] psd = psde.transform( window.reshape((1, window.shape[0], window.shape[1]))) psd = psd.reshape(psd.shape[1], psd.shape[2]) psd_all.append(psd) times.append(t) # matching events at the current window if y_i < eve.shape[0] and t >= eve[y_i][0]: last_eve = eve[y_i][2] y_i += 1 evelist.append(last_eve) if tm.sec() >= 1: perc = (t - t_start) / t_len fps = (t - t_last) / wstep est = (t_end - t) / wstep / fps print('[PID %d] %.1f%% (%.1f FPS, %ds left)' % (os.getpid(), perc * 100.0, fps, est)) t_last = t tm.reset() print('Finished.') # export data try: chnames = [raw.ch_names[ch] for ch in chlist] psd_all = np.array(psd_all) [basedir, fname, fext] = qc.parse_path_list(rawfile) fout_header = '%s/psd-%s-header.pkl' % (basedir, fname) fout_psd = '%s/psd-%s-data.npy' % (basedir, fname) header = { 'psdfile': fout_psd, 'times': np.array(times), 'sfreq': sfreq, 'channels': chnames, 'wframes': wframes, 'events': evelist } qc.save_obj(fout_header, header) np.save(fout_psd, psd_all) print('Exported to:\n(header) %s\n(numpy array) %s' % (fout_header, fout_psd)) except: import traceback print('(%s) Unexpected error occurred while exporting data. Dropping you into a shell for recovery.' %\ os.path.basename(__file__)) traceback.print_exc() from IPython import embed embed()
def train_decoder(cfg, featdata, feat_file=None): """ Train the final decoder using all data """ # Init a classifier selected_classifier = cfg.CLASSIFIER['selected'] if selected_classifier == 'GB': cls = GradientBoostingClassifier( loss='deviance', learning_rate=cfg.CLASSIFIER[selected_classifier]['learning_rate'], n_estimators=cfg.CLASSIFIER[selected_classifier]['trees'], subsample=1.0, max_depth=cfg.CLASSIFIER[selected_classifier]['depth'], random_state=cfg.CLASSIFIER[selected_classifier]['seed'], max_features='sqrt', verbose=0, warm_start=False, presort='auto') elif selected_classifier == 'XGB': cls = XGBClassifier( loss='deviance', learning_rate=cfg.CLASSIFIER[selected_classifier]['learning_rate'], n_estimators=cfg.CLASSIFIER[selected_classifier]['trees'], subsample=1.0, max_depth=cfg.CLASSIFIER[selected_classifier]['depth'], random_state=cfg.GB['seed'], max_features='sqrt', verbose=0, warm_start=False, presort='auto') elif selected_classifier == 'RF': cls = RandomForestClassifier( n_estimators=cfg.CLASSIFIER[selected_classifier]['trees'], max_features='auto', max_depth=cfg.CLASSIFIER[selected_classifier]['depth'], n_jobs=cfg.N_JOBS, random_state=cfg.CLASSIFIER[selected_classifier]['seed'], oob_score=False, class_weight='balanced_subsample') elif selected_classifier == 'LDA': cls = LDA() elif selected_classifier == 'rLDA': cls = rLDA(cfg.CLASSIFIER[selected_classifier][r_coeff]) else: logger.error('Unknown classifier %s' % selected_classifier) raise ValueError # Setup features X_data = featdata['X_data'] Y_data = featdata['Y_data'] wlen = featdata['wlen'] if cfg.FEATURES['PSD']['wlen'] is None: cfg.FEATURES['PSD']['wlen'] = wlen w_frames = featdata['w_frames'] ch_names = featdata['ch_names'] X_data_merged = np.concatenate(X_data) Y_data_merged = np.concatenate(Y_data) if cfg.CV['BALANCE_SAMPLES']: X_data_merged, Y_data_merged = balance_samples( X_data_merged, Y_data_merged, cfg.CV['BALANCE_SAMPLES'], verbose=True) # Start training the decoder logger.info_green('Training the decoder') timer = qc.Timer() cls.n_jobs = cfg.N_JOBS cls.fit(X_data_merged, Y_data_merged) logger.info('Trained %d samples x %d dimension in %.1f sec' %\ (X_data_merged.shape[0], X_data_merged.shape[1], timer.sec())) cls.n_jobs = 1 # always set n_jobs=1 for testing # Export the decoder classes = {c: cfg.tdef.by_value[c] for c in np.unique(Y_data)} if cfg.FEATURES['selected'] == 'PSD': data = dict(cls=cls, ch_names=ch_names, psde=featdata['psde'], sfreq=featdata['sfreq'], picks=featdata['picks'], classes=classes, epochs=cfg.EPOCH, w_frames=w_frames, w_seconds=cfg.FEATURES['PSD']['wlen'], wstep=cfg.FEATURES['PSD']['wstep'], spatial=cfg.SP_FILTER, spatial_ch=featdata['picks'], spectral=cfg.TP_FILTER[cfg.TP_FILTER['selected']], spectral_ch=featdata['picks'], notch=cfg.NOTCH_FILTER[cfg.NOTCH_FILTER['selected']], notch_ch=featdata['picks'], multiplier=cfg.MULTIPLIER, ref_ch=cfg.REREFERENCE[cfg.REREFERENCE['selected']], decim=cfg.FEATURES['PSD']['decim']) clsfile = '%s/classifier/classifier-%s.pkl' % (cfg.DATA_PATH, platform.architecture()[0]) qc.make_dirs('%s/classifier' % cfg.DATA_PATH) qc.save_obj(clsfile, data) logger.info('Decoder saved to %s' % clsfile) # Reverse-lookup frequency from FFT fq = 0 if type(cfg.FEATURES['PSD']['wlen']) == list: fq_res = 1.0 / cfg.FEATURES['PSD']['wlen'][0] else: fq_res = 1.0 / cfg.FEATURES['PSD']['wlen'] fqlist = [] while fq <= cfg.FEATURES['PSD']['fmax']: if fq >= cfg.FEATURES['PSD']['fmin']: fqlist.append(fq) fq += fq_res # Show top distinctive features if cfg.FEATURES['selected'] == 'PSD': logger.info_green('Good features ordered by importance') if selected_classifier in ['RF', 'GB', 'XGB']: keys, values = qc.sort_by_value(list(cls.feature_importances_), rev=True) elif selected_classifier in ['LDA', 'rLDA']: keys, values = qc.sort_by_value(cls.w, rev=True) keys = np.array(keys) values = np.array(values) if cfg.EXPORT_GOOD_FEATURES: if feat_file is None: gfout = open('%s/classifier/good_features.txt' % cfg.DATA_PATH, 'w') else: gfout = open(feat_file, 'w') if type(wlen) is not list: ch_names = [ch_names[c] for c in featdata['picks']] else: ch_names = [] for w in range(len(wlen)): for c in featdata['picks']: ch_names.append('w%d-%s' % (w, ch_names[c])) chlist, hzlist = features.feature2chz(keys, fqlist, ch_names=ch_names) valnorm = values[:cfg.FEAT_TOPN].copy() valsum = np.sum(valnorm) if valsum == 0: valsum = 1 valnorm = valnorm / valsum * 100.0 # show top-N features for i, (ch, hz) in enumerate(zip(chlist, hzlist)): if i >= cfg.FEAT_TOPN: break txt = '%-3s %5.1f Hz normalized importance %-6s raw importance %-6s feature %-5d' %\ (ch, hz, '%.2f%%' % valnorm[i], '%.2f%%' % (values[i] * 100.0), keys[i]) logger.info(txt) if cfg.EXPORT_GOOD_FEATURES: gfout.write('Importance(%) Channel Frequency Index\n') for i, (ch, hz) in enumerate(zip(chlist, hzlist)): gfout.write('%.3f\t%s\t%s\t%d\n' % (values[i] * 100.0, ch, hz, keys[i])) gfout.close()
def config_run(cfg_module): cfg = load_cfg(cfg_module) # visualizer keys = { 'left': 81, 'right': 83, 'up': 82, 'down': 84, 'pgup': 85, 'pgdn': 86, 'home': 80, 'end': 87, 'space': 32, 'esc': 27, ',': 44, '.': 46, 's': 115, 'c': 99, '[': 91, ']': 93, '1': 49, '!': 33, '2': 50, '@': 64, '3': 51, '#': 35 } color = dict(G=(20, 140, 0), B=(210, 0, 0), R=(0, 50, 200), Y=(0, 215, 235), K=(0, 0, 0), W=(255, 255, 255), w=(200, 200, 200)) dir_sequence = [] for x in range(cfg.TRIALS_EACH): dir_sequence.extend(cfg.DIRECTIONS) random.shuffle(dir_sequence) num_trials = len(cfg.DIRECTIONS) * cfg.TRIALS_EACH tdef = trigger_def(cfg.TRIGGER_DEF) refresh_delay = 1.0 / cfg.REFRESH_RATE state = 'start' trial = 1 # STIMO protocol if cfg.WITH_STIMO is True: print('Opening STIMO serial port (%s / %d bps)' % (cfg.STIMO_COMPORT, cfg.STIMO_BAUDRATE)) import serial ser = serial.Serial(cfg.STIMO_COMPORT, cfg.STIMO_BAUDRATE) print('STIMO serial port %s is_open = %s' % (cfg.STIMO_COMPORT, ser.is_open)) # init trigger if cfg.TRIGGER_DEVICE is None: input( '\n** Warning: No trigger device set. Press Ctrl+C to stop or Enter to continue.' ) trigger = pyLptControl.Trigger(cfg.TRIGGER_DEVICE) if trigger.init(50) == False: print( '\n# Error connecting to USB2LPT device. Use a mock trigger instead?' ) input('Press Ctrl+C to stop or Enter to continue.') trigger = pyLptControl.MockTrigger() trigger.init(50) # visual feedback if cfg.FEEDBACK_TYPE == 'BAR': from pycnbi.protocols.viz_bars import BarVisual visual = BarVisual(cfg.GLASS_USE, screen_pos=cfg.SCREEN_POS, screen_size=cfg.SCREEN_SIZE) elif cfg.FEEDBACK_TYPE == 'BODY': if not hasattr(cfg, 'IMAGE_PATH'): raise ValueError('IMAGE_PATH is undefined in your config.') from pycnbi.protocols.viz_human import BodyVisual visual = BodyVisual(cfg.IMAGE_PATH, use_glass=cfg.GLASS_USE, screen_pos=cfg.SCREEN_POS, screen_size=cfg.SCREEN_SIZE) visual.put_text('Waiting to start ') timer_trigger = qc.Timer() timer_dir = qc.Timer() timer_refresh = qc.Timer() # start while trial <= num_trials: timer_refresh.sleep_atleast(refresh_delay) timer_refresh.reset() # segment= { 'cue':(s,e), 'dir':(s,e), 'label':0-4 } (zero-based) if state == 'start' and timer_trigger.sec() > cfg.T_INIT: state = 'gap_s' visual.fill() timer_trigger.reset() trigger.signal(tdef.INIT) elif state == 'gap_s': visual.put_text('Trial %d / %d' % (trial, num_trials)) state = 'gap' elif state == 'gap' and timer_trigger.sec() > cfg.T_GAP: state = 'cue' visual.fill() visual.draw_cue() trigger.signal(tdef.CUE) timer_trigger.reset() elif state == 'cue' and timer_trigger.sec() > cfg.T_CUE: state = 'dir_r' dir = dir_sequence[trial - 1] if dir == 'L': # left if cfg.FEEDBACK_TYPE == 'BAR': visual.move('L', 100) else: visual.put_text('LEFT') trigger.signal(tdef.LEFT_READY) elif dir == 'R': # right if cfg.FEEDBACK_TYPE == 'BAR': visual.move('R', 100) else: visual.put_text('RIGHT') trigger.signal(tdef.RIGHT_READY) elif dir == 'U': # up if cfg.FEEDBACK_TYPE == 'BAR': visual.move('U', 100) else: visual.put_text('UP') trigger.signal(tdef.UP_READY) elif dir == 'D': # down if cfg.FEEDBACK_TYPE == 'BAR': visual.move('D', 100) else: visual.put_text('DOWN') trigger.signal(tdef.DOWN_READY) elif dir == 'B': # both hands if cfg.FEEDBACK_TYPE == 'BAR': visual.move('L', 100) visual.move('R', 100) else: visual.put_text('BOTH') trigger.signal(tdef.BOTH_READY) else: raise RuntimeError('Unknown direction %d' % dir) gait_steps = 1 timer_trigger.reset() elif state == 'dir_r' and timer_trigger.sec() > cfg.T_DIR_READY: visual.draw_cue() state = 'dir' timer_trigger.reset() timer_dir.reset() t_step = cfg.T_DIR + random.random() * cfg.RANDOMIZE_LENGTH if dir == 'L': # left trigger.signal(tdef.LEFT_GO) elif dir == 'R': # right trigger.signal(tdef.RIGHT_GO) elif dir == 'U': # up trigger.signal(tdef.UP_GO) elif dir == 'D': # down trigger.signal(tdef.DOWN_GO) elif dir == 'B': # both trigger.signal(tdef.BOTH_GO) else: raise RuntimeError('Unknown direction %d' % dir) elif state == 'dir': if timer_trigger.sec() > t_step: if cfg.FEEDBACK_TYPE == 'BODY': if cfg.WITH_STIMO is True: if dir == 'L': # left ser.write(b'1') qc.print_c('STIMO: Sent 1', 'g') trigger.signal(tdef.LEFT_STIMO) elif dir == 'R': # right ser.write(b'2') qc.print_c('STIMO: Sent 2', 'g') trigger.signal(tdef.RIGHT_STIMO) else: if dir == 'L': # left trigger.signal(tdef.LEFT_RETURN) elif dir == 'R': # right trigger.signal(tdef.RIGHT_RETURN) else: trigger.signal(tdef.FEEDBACK) state = 'return' timer_trigger.reset() else: dx = min(100, int(100.0 * timer_dir.sec() / t_step) + 1) if dir == 'L': # L visual.move('L', dx, overlay=True) elif dir == 'R': # R visual.move('R', dx, overlay=True) elif dir == 'U': # U visual.move('U', dx, overlay=True) elif dir == 'D': # D visual.move('D', dx, overlay=True) elif dir == 'B': # Both visual.move('L', dx, overlay=True) visual.move('R', dx, overlay=True) elif state == 'return': if timer_trigger.sec() > cfg.T_RETURN: if gait_steps < cfg.GAIT_STEPS: gait_steps += 1 state = 'dir' visual.move('L', 0) if dir == 'L': dir = 'R' trigger.signal(tdef.RIGHT_GO) else: dir = 'L' trigger.signal(tdef.LEFT_GO) timer_dir.reset() t_step = cfg.T_DIR + random.random() * cfg.RANDOMIZE_LENGTH else: state = 'gap_s' visual.fill() trial += 1 print('trial ' + str(trial - 1) + ' done') trigger.signal(tdef.BLANK) timer_trigger.reset() else: dx = max( 0, int(100.0 * (cfg.T_RETURN - timer_trigger.sec()) / cfg.T_RETURN)) if dir == 'L': # L visual.move('L', dx, overlay=True) elif dir == 'R': # R visual.move('R', dx, overlay=True) elif dir == 'U': # U visual.move('U', dx, overlay=True) elif dir == 'D': # D visual.move('D', dx, overlay=True) elif dir == 'B': # Both visual.move('L', dx, overlay=True) visual.move('R', dx, overlay=True) # wait for start if state == 'start': visual.put_text('Waiting to start ') visual.update() key = 0xFF & cv2.waitKey(1) if key == keys['esc']: break # STIMO protocol if cfg.WITH_STIMO is True: ser.close() print('Closed STIMO serial port %s' % cfg.STIMO_COMPORT)