def run(cfg, state=mp.Value('i', 1), queue=None): """ Online protocol for Alpha/Theta neurofeedback. """ redirect_stdout_to_queue(logger, queue, 'INFO') # Wait the recording to start (GUI) while state.value == 2: # 0: stop, 1:start, 2:wait pass # Protocol runs if state equals to 1 if not state.value: sys.exit(-1) #---------------------------------------------------------------------- # LSL stream connection #---------------------------------------------------------------------- # chooose amp amp_name, amp_serial = find_lsl_stream(cfg, state) # Connect to lsl stream sr = connect_lsl_stream(cfg, amp_name, amp_serial) # Get sampling rate sfreq = sr.get_sample_rate() # Get trigger channel trg_ch = sr.get_trigger_channel() #---------------------------------------------------------------------- # Main #---------------------------------------------------------------------- global_timer = qc.Timer(autoreset=False) internal_timer = qc.Timer(autoreset=True) while state.value == 1 and global_timer.sec() < cfg.GLOBAL_TIME: #---------------------------------------------------------------------- # Data acquisition #---------------------------------------------------------------------- sr.acquire() window, tslist = sr.get_window() # window = [samples x channels] window = window.T # window = [channels x samples] # Check if proper real-time acquisition tsnew = np.where(np.array(tslist) > last_ts)[0] if len(tsnew) == 0: logger.warning('There seems to be delay in receiving data.') time.sleep(1) continue #---------------------------------------------------------------------- # ADD YOUR CODE HERE #---------------------------------------------------------------------- last_ts = tslist[-1] internal_timer.sleep_atleast(cfg.TIMER_SLEEP)
def run(cfg, state=mp.Value('i', 1), queue=None): """ Training protocol for Alpha/Theta neurofeedback. """ redirect_stdout_to_queue(logger, queue, 'INFO') # add tdef object cfg.tdef = trigger_def(cfg.TRIGGER_FILE) # Extract features if not state.value: sys.exit(-1) freqs = np.arange(cfg.FEATURES['PSD']['fmin'], cfg.FEATURES['PSD']['fmax']+0.5, 1/cfg.FEATURES['PSD']['wlen']) featdata = features.compute_features(cfg) # Average the PSD over the windows window_avg_psd = np.mean(np.squeeze(featdata['X_data']), 0) # Alpha ref over the alpha band alpha_ref = round(np.mean(window_avg_psd[freqs>=8])) alpha_thr = round(alpha_ref - (0.5 * np.std(window_avg_psd[freqs>=8]) )) # Theta ref over Theta band theta_ref = round(np.mean(window_avg_psd[freqs<8])) theta_thr = round(theta_ref - (0.5 * np.std(window_avg_psd[freqs<8]) )) logger.info('Theta ref = {}; alpha ref ={}' .format(theta_ref, alpha_ref)) logger.info('Theta thr = {}; alpha thr ={}' .format(theta_thr, alpha_thr))
def run(cfg, state=mp.Value('i', 1), queue=None, logger=logger): ''' Main function used to run the offline protocol. Parameters ---------- cfg : python.module The loaded config module from the corresponding config_offline.py queue : mp.Queue If not None, redirect sys.stdout to GUI terminal logger : logging.logger The logger to use ''' redirect_stdout_to_queue(logger, queue, 'INFO') # Load the mapping from int to string for triggers events cfg.tdef = TriggerDef(cfg.TRIGGER_FILE) # Protocol start if equals to 1 if not state.value: sys.exit(-1) #------------------------------------- # ADD YOUR CODE HERE #------------------------------------- # TO train a decoder, look at trainer_mi.py protocol with state.get_lock(): state.value = 0
def run(cfg, state=mp.Value('i', 1), queue=None, logger=logger): ''' Main function used to run the offline protocol. Parameters ---------- cfg : python.module The loaded config module from the corresponding config_offline.py queue : mp.Queue If not None, redirect sys.stdout to GUI terminal logger : logging.logger The logger to use ''' # Use to redirect sys.stdout to GUI terminal if GUI usage redirect_stdout_to_queue(logger, queue, 'INFO') # Wait the recording to start (GUI) while state.value == 2: # 0: stop, 1:start, 2:wait pass # Protocol start if equals to 1 if not state.value: sys.exit() # Load the mapping from int to string for triggers events cfg.tdef = TriggerDef(cfg.TRIGGER_FILE) # Refresh rate refresh_delay = 1.0 / cfg.REFRESH_RATE # Trigger trigger = Trigger(lpttype=cfg.TRIGGER_DEVICE, state=state) if trigger.init(50) == False: logger.error( '\n** Error connecting to the trigger device. Use a mock trigger instead?' ) input('Press Ctrl+C to stop or Enter to continue.') trigger = Trigger(lpttype='FAKE') trigger.init(50) # timers timer_refresh = Timer() trial = 1 num_trials = cfg.TRIALS_NB # start while trial <= num_trials: timer_refresh.sleep_atleast(refresh_delay) timer_refresh.reset() #------------------------------------- # ADD YOUR CODE HERE #------------------------------------- with state.get_lock(): state.value = 0
def run(cfg, state=mp.Value('i', 1), queue=None): """ Online protocol for Alpha/Theta neurofeedback. """ redirect_stdout_to_queue(logger, queue, 'INFO') # Wait the recording to start (GUI) while state.value == 2: # 0: stop, 1:start, 2:wait pass # Protocol runs if state equals to 1 if not state.value: sys.exit(-1)
def run_gui(recordState, protocolState, record_dir, recordLogger=logger, amp_name=None, amp_serial=None, eeg_only=False, queue=None): redirect_stdout_to_queue(recordLogger, queue, 'INFO') # configure LSL server name and device serial if available if not amp_name: amp_name, amp_serial = pu.search_lsl(recordState, recordLogger, ignore_markers=True) recordLogger.info('\nOutput directory: %s' % (record_dir)) # spawn the recorder as a child process recordLogger.info('\n>> Recording started.') proc = mp.Process(target=record, args=[ recordState, amp_name, amp_serial, record_dir, eeg_only, recordLogger, queue ]) proc.start() while not recordState.value: pass # Launching the protocol (shared variable) with protocolState.get_lock(): protocolState.value = 1 # Continue recording until the shared variable changes to 0. while recordState.value: time.sleep(1) recordLogger.info('(main) Waiting for recorder process to finish.') proc.join(10) if proc.is_alive(): recordLogger.error( 'Recorder process not finishing. Are you running from Spyder?') recordLogger.error('Dropping into a shell') qc.shell() sys.stdout.flush() recordLogger.info('Recording finished.')
def redirect_stdout(self): """ Create Queue and redirect sys.stdout to this queue. Create thread that will listen on the other end of the queue, and send the text to the textedit_terminal. """ queue = mp.Queue() self.thread = QThread() self.my_receiver = MyReceiver(queue) self.my_receiver.mysignal.connect(self.on_terminal_append) self.my_receiver.moveToThread(self.thread) self.thread.started.connect(self.my_receiver.run) self.thread.start() redirect_stdout_to_queue(logger, self.my_receiver.queue, 'INFO')
def __init__(self, amp_name, amp_serial, state=mp.Value('i', 1), queue=None): super(Scope, self).__init__() self.ui = Ui_MainWindow() self.ui.setupUi(self) redirect_stdout_to_queue(logger, queue, 'INFO') logger.info('Viewer launched') self.amp_name = amp_name self.amp_serial = amp_serial self.state = state self.init_scope()
def __init__(self, amp_name, amp_serial, state=mp.Value('i', 1), queue=None): super(Scope, self).__init__() self.ui = Ui_MainWindow() self.ui.setupUi(self) self.setGeometry(100, 100, self.geometry().width(), self.geometry().height()) self.setFixedSize(self.geometry().width(), self.geometry().height()) redirect_stdout_to_queue(logger, queue, 'INFO') logger.info('Viewer launched') self.amp_name = amp_name self.amp_serial = amp_serial self.state = state self.recordState = mp.Value('i', 0) self.init_scope()
def run(cfg, state=mp.Value('i', 1), queue=None, interactive=False, cv_file=None, feat_file=None, logger=logger): redirect_stdout_to_queue(logger, queue, 'INFO') # add tdef object cfg.tdef = trigger_def(cfg.TRIGGER_FILE) # Extract features if not state.value: sys.exit(-1) featdata = features.compute_features(cfg) # Find optimal threshold for TPR balancing #balance_tpr(cfg, featdata) # Perform cross validation if not state.value: sys.exit(-1) if cfg.CV_PERFORM[cfg.CV_PERFORM['selected']] is not None: cross_validate(cfg, featdata, cv_file=cv_file) # Train a decoder if not state.value: sys.exit(-1) if cfg.EXPORT_CLS is True: train_decoder(cfg, featdata, feat_file=feat_file) with state.get_lock(): state.value = 0
def record(recordState, amp_name, amp_serial, record_dir, eeg_only, recordLogger=logger, queue=None): redirect_stdout_to_queue(recordLogger, queue, 'INFO') # set data file name timestamp = time.strftime('%Y%m%d-%H%M%S', time.localtime()) pcl_file = "%s/%s-raw.pcl" % (record_dir, timestamp) eve_file = '%s/%s-eve.txt' % (record_dir, timestamp) recordLogger.info('>> Output file: %s' % (pcl_file)) # test writability try: qc.make_dirs(record_dir) open( pcl_file, 'w').write('The data will written when the recording is finished.') except: raise RuntimeError('Problem writing to %s. Check permission.' % pcl_file) # start a server for sending out data pcl_file when software trigger is used outlet = start_server('StreamRecorderInfo', channel_format='string',\ source_id=eve_file, stype='Markers') # connect to EEG stream server sr = StreamReceiver(buffer_size=0, amp_name=amp_name, amp_serial=amp_serial, eeg_only=eeg_only) # start recording recordLogger.info('\n>> Recording started (PID %d).' % os.getpid()) with recordState.get_lock(): recordState.value = 1 tm = qc.Timer(autoreset=True) next_sec = 1 while recordState.value == 1: sr.acquire() if sr.get_buflen() > next_sec: duration = str(datetime.timedelta(seconds=int(sr.get_buflen()))) recordLogger.info('RECORDING %s' % duration) next_sec += 1 tm.sleep_atleast(0.001) # record stop recordLogger.info('>> Stop requested. Copying buffer') buffers, times = sr.get_buffer() signals = buffers events = None # channels = total channels from amp, including trigger channel data = { 'signals': signals, 'timestamps': times, 'events': events, 'sample_rate': sr.get_sample_rate(), 'channels': sr.get_num_channels(), 'ch_names': sr.get_channel_names(), 'lsl_time_offset': sr.lsl_time_offset } recordLogger.info('Saving raw data ...') qc.save_obj(pcl_file, data) recordLogger.info('Saved to %s\n' % pcl_file) # automatically convert to fif and use event file if it exists (software trigger) if os.path.exists(eve_file): recordLogger.info('Found matching event file, adding events.') else: eve_file = None recordLogger.info('Converting raw file into fif.') pcl2fif(pcl_file, external_event=eve_file)
def run(cfg, state=mp.Value('i', 1), queue=None): def confusion_matrix(Y_true, Y_pred, label_len=6): """ Generate confusion matrix in a string format Parameters ---------- Y_true : list The true labels Y_pred : list The test labels label_len : int The maximum label text length displayed (minimum length: 6) Returns ------- cfmat : str The confusion matrix in str format (X-axis: prediction, -axis: ground truth) acc : float The accuracy """ import numpy as np from sklearn.metrics import confusion_matrix as sk_confusion_matrix # find labels if type(Y_true) == np.ndarray: Y_labels = np.unique(Y_true) else: Y_labels = list(set(Y_true)) # Check the provided label name length if label_len < 6: label_len = 6 logger.warning('label_len < 6. Setting to 6.') label_tpl = '%' + '-%ds' % label_len col_tpl = '%' + '-%d.2f' % label_len # sanity check if len(Y_pred) > len(Y_true): raise RuntimeError('Y_pred has more items than Y_true') elif len(Y_pred) < len(Y_true): Y_true = Y_true[:len(Y_pred)] cm = sk_confusion_matrix(Y_true, Y_pred, Y_labels) # compute confusion matrix cm_rate = cm.copy().astype('float') cm_sum = np.sum(cm, axis=1) # Fill confusion string for r, s in zip(cm_rate, cm_sum): if s > 0: r /= s cm_txt = label_tpl % 'gt\dt' for l in Y_labels: cm_txt += label_tpl % str(l)[:label_len] cm_txt += '\n' for l, r in zip(Y_labels, cm_rate): cm_txt += label_tpl % str(l)[:label_len] for c in r: cm_txt += col_tpl % c cm_txt += '\n' # compute accuracy correct = 0.0 for c in range(cm.shape[0]): correct += cm[c][c] cm_sum = cm.sum() if cm_sum > 0: acc = correct / cm.sum() else: acc = 0.0 return cm_txt, acc redirect_stdout_to_queue(logger, queue, 'INFO') # Wait the recording to start (GUI) while state.value == 2: # 0: stop, 1:start, 2:wait pass # Protocol runs if state equals to 1 if not state.value: sys.exit(-1) if cfg.FAKE_CLS is None: # chooose amp if cfg.AMP_NAME is None: amp_name = search_lsl(ignore_markers=True, state=state) else: amp_name = cfg.AMP_NAME fake_dirs = None else: amp_name = None fake_dirs = [v for (k, v) in cfg.DIRECTIONS] # events and triggers tdef = TriggerDef(cfg.TRIGGER_FILE) #if cfg.TRIGGER_DEVICE is None: # input('\n** Warning: No trigger device set. Press Ctrl+C to stop or Enter to continue.') trigger = Trigger(cfg.TRIGGER_DEVICE, state) if trigger.init(50) == False: logger.error( 'Cannot connect to USB2LPT device. Use a mock trigger instead?') input('Press Ctrl+C to stop or Enter to continue.') trigger = Trigger('FAKE', state) trigger.init(50) # For adaptive (need to share the actual true label accross process) label = mp.Value('i', 0) # init classification decoder = BCIDecoderDaemon(amp_name, cfg.DECODER_FILE, buffer_size=1.0, fake=(cfg.FAKE_CLS is not None), fake_dirs=fake_dirs, \ parallel=cfg.PARALLEL_DECODING[cfg.PARALLEL_DECODING['selected']], alpha_new=cfg.PROB_ALPHA_NEW, label=label) # OLD: requires trigger values to be always defined #labels = [tdef.by_value[x] for x in decoder.get_labels()] # NEW: events can be mapped into integers: labels = [] dirdata = set([d[1] for d in cfg.DIRECTIONS]) for x in decoder.get_labels(): if x not in dirdata: labels.append(tdef.by_value[x]) else: labels.append(x) # map class labels to bar directions bar_def = {label: str(dir) for dir, label in cfg.DIRECTIONS} bar_dirs = [bar_def[l] for l in labels] dir_seq = [] for x in range(cfg.TRIALS_EACH): dir_seq.extend(bar_dirs) logger.info('Initializing decoder.') while decoder.is_running() == 0: time.sleep(0.01) # bar visual object if cfg.FEEDBACK_TYPE == 'BAR': from neurodecode.protocols.viz_bars import BarVisual visual = BarVisual(cfg.GLASS_USE, screen_pos=cfg.SCREEN_POS, screen_size=cfg.SCREEN_SIZE) elif cfg.FEEDBACK_TYPE == 'BODY': assert hasattr(cfg, 'FEEDBACK_IMAGE_PATH' ), 'FEEDBACK_IMAGE_PATH is undefined in your config.' from neurodecode.protocols.viz_human import BodyVisual visual = BodyVisual(cfg.FEEDBACK_IMAGE_PATH, use_glass=cfg.GLASS_USE, screen_pos=cfg.SCREEN_POS, screen_size=cfg.SCREEN_SIZE) visual.put_text('Waiting to start') if cfg.LOG_PROBS: logdir = io.parse_path(cfg.DECODER_FILE).dir probs_logfile = time.strftime(logdir + "probs-%Y%m%d-%H%M%S.txt", time.localtime()) else: probs_logfile = None feedback = Feedback(cfg, state, visual, tdef, trigger, probs_logfile) # If adaptive classifier if cfg.ADAPTIVE[cfg.ADAPTIVE['selected']]: nb_runs = cfg.ADAPTIVE[cfg.ADAPTIVE['selected']][0] adaptive = True else: nb_runs = 1 adaptive = False run = 1 while run <= nb_runs: if cfg.TRIALS_RANDOMIZE: random.shuffle(dir_seq) else: dir_seq = [d[0] for d in cfg.DIRECTIONS] * cfg.TRIALS_EACH num_trials = len(dir_seq) # For adaptive, retrain classifier if run > 1: # Allow to retrain classifier with decoder.label.get_lock(): decoder.label.value = 1 # Wait that the retraining is done while decoder.label.value == 1: time.sleep(0.01) feedback.viz.put_text('Press any key') feedback.viz.update() cv2.waitKeyEx() feedback.viz.fill() # start trial = 1 dir_detected = [] prob_history = {c: [] for c in bar_dirs} while trial <= num_trials: if cfg.SHOW_TRIALS: title_text = 'Trial %d / %d' % (trial, num_trials) else: title_text = 'Ready' true_label = dir_seq[trial - 1] # profiling feedback #import cProfile #pr = cProfile.Profile() #pr.enable() result = feedback.classify(decoder, true_label, title_text, bar_dirs, prob_history=prob_history, adaptive=adaptive) #pr.disable() #pr.print_stats(sort='time') if result is None: decoder.stop() return else: pred_label = result dir_detected.append(pred_label) if cfg.WITH_REX is True and pred_label == true_label: # if cfg.WITH_REX is True: if pred_label == 'U': rex_dir = 'N' elif pred_label == 'L': rex_dir = 'W' elif pred_label == 'R': rex_dir = 'E' elif pred_label == 'D': rex_dir = 'S' else: logger.warning('Rex cannot execute undefined action %s' % pred_label) rex_dir = None if rex_dir is not None: visual.move(pred_label, 100, overlay=False, barcolor='B') visual.update() logger.info('Executing Rex action %s' % rex_dir) os.system( '%s/Rex/RexControlSimple.exe %s %s' % (os.environ['NEUROD_ROOT'], cfg.REX_COMPORT, rex_dir)) time.sleep(8) if true_label == pred_label: msg = 'Correct' else: msg = 'Wrong' if cfg.TRIALS_RETRY is False or true_label == pred_label: logger.info('Trial %d: %s (%s -> %s)' % (trial, msg, true_label, pred_label)) trial += 1 if len(dir_detected) > 0: # write performance and log results fdir = io.parse_path(cfg.DECODER_FILE).dir logfile = time.strftime(fdir + "/online-%Y%m%d-%H%M%S.txt", time.localtime()) with open(logfile, 'w') as fout: fout.write('Ground-truth,Prediction\n') for gt, dt in zip(dir_seq, dir_detected): fout.write('%s,%s\n' % (gt, dt)) cfmat, acc = confusion_matrix(dir_seq, dir_detected) fout.write('\nAccuracy %.3f\nConfusion matrix\n' % acc) fout.write(cfmat) logger.info('Log exported to %s' % logfile) print('\nAccuracy %.3f\nConfusion matrix\n' % acc) print(cfmat) run += 1 visual.finish() with state.get_lock(): state.value = 0 if decoder.is_running(): decoder.stop() ''' # automatic thresholding if prob_history and len(bar_dirs) == 2: total = sum(len(prob_history[c]) for c in prob_history) fout = open(probs_logfile, 'a') msg = 'Automatic threshold optimization.\n' max_acc = 0 max_bias = 0 for bias in np.arange(-0.99, 1.00, 0.01): corrects = 0 for p in prob_history[bar_dirs[0]]: p_biased = (p + bias) / (bias + 1) # new sum = (p+bias) + (1-p) = bias+1 if p_biased >= 0.5: corrects += 1 for p in prob_history[bar_dirs[1]]: p_biased = (p + bias) / (bias + 1) # new sum = (p+bias) + (1-p) = bias+1 if p_biased < 0.5: corrects += 1 acc = corrects / total msg += '%s%.2f: %.3f\n' % (bar_dirs[0], bias, acc) if acc > max_acc: max_acc = acc max_bias = bias msg += 'Max acc = %.3f at bias %.2f\n' % (max_acc, max_bias) fout.write(msg) fout.close() print(msg) ''' logger.info('Finished.')
def run(cfg, state=mp.Value('i', 1), queue=None): ''' Main function used to run the online protocol. Parameters ---------- cfg : python.module The loaded config module from the corresponding config_offline.py queue : mp.Queue If not None, redirect sys.stdout to GUI terminal logger : logging.logger The logger to use ''' redirect_stdout_to_queue(logger, queue, 'INFO') # Wait the recording to start (GUI) while state.value == 2: # 0: stop, 1:start, 2:wait pass # Protocol runs if state equals to 1 if not state.value: sys.exit(-1) # events and triggers cfg.tdef = TriggerDef(cfg.TRIGGER_FILE) # To send trigger events trigger = Trigger(cfg.TRIGGER_DEVICE, state) if trigger.init(50) == False: logger.error( 'Cannot connect to trigger device. Use a mock trigger instead?') input('Press Ctrl+C to stop or Enter to continue.') trigger = Trigger('FAKE', state) trigger.init(50) # Instance a stream receiver sr = StreamReceiver(bufsize=1, winsize=0.5, stream_name=None, eeg_only=True) # Timer for acquisition rate, here 20 Hz tm = Timer(autoreset=True) # Refresh rate refresh_delay = 1.0 / cfg.REFRESH_RATE while True: # Acquire data from all the connected LSL streams by filling each associated buffers. sr.acquire() # Extract the latest window from the buffer of the chosen stream. window, tslist = sr.get_window( ) # window = [samples x channels], tslist = [samples] #------------------------------------- # ADD YOUR CODE HERE #------------------------------------- # To run a trained BCI decoder, look at online_mi.py protocol tm.sleep_atleast(refresh_delay) with state.get_lock(): state.value = 0 logger.info('Finished.')
def run(cfg, state=mp.Value('i', 1), queue=None): redirect_stdout_to_queue(logger, queue, 'INFO') # Wait the recording to start (GUI) while state.value == 2: # 0: stop, 1:start, 2:wait pass # Protocol start if equals to 1 if not state.value: sys.exit() refresh_delay = 1.0 / cfg.REFRESH_RATE cfg.tdef = trigger_def(cfg.TRIGGER_FILE) # visualizer keys = { 'left': 81, 'right': 83, 'up': 82, 'down': 84, 'pgup': 85, 'pgdn': 86, 'home': 80, 'end': 87, 'space': 32, 'esc': 27, ',': 44, '.': 46, 's': 115, 'c': 99, '[': 91, ']': 93, '1': 49, '!': 33, '2': 50, '@': 64, '3': 51, '#': 35 } color = dict(G=(20, 140, 0), B=(210, 0, 0), R=(0, 50, 200), Y=(0, 215, 235), K=(0, 0, 0), w=(200, 200, 200)) dir_sequence = [] for x in range(cfg.TRIALS_EACH): dir_sequence.extend(cfg.DIRECTIONS) random.shuffle(dir_sequence) num_trials = len(cfg.DIRECTIONS) * cfg.TRIALS_EACH event = 'start' trial = 1 # Hardware trigger if cfg.TRIGGER_DEVICE is None: logger.warning( 'No trigger device set. Press Ctrl+C to stop or Enter to continue.' ) #input() trigger = pyLptControl.Trigger(state, cfg.TRIGGER_DEVICE) if trigger.init(50) == False: logger.error( '\n** Error connecting to USB2LPT device. Use a mock trigger instead?' ) input('Press Ctrl+C to stop or Enter to continue.') trigger = pyLptControl.MockTrigger() trigger.init(50) # timers timer_trigger = qc.Timer() timer_dir = qc.Timer() timer_refresh = qc.Timer() t_dir = cfg.TIMINGS['DIR'] + random.uniform(-cfg.TIMINGS['DIR_RANDOMIZE'], cfg.TIMINGS['DIR_RANDOMIZE']) t_dir_ready = cfg.TIMINGS['READY'] + random.uniform( -cfg.TIMINGS['READY_RANDOMIZE'], cfg.TIMINGS['READY_RANDOMIZE']) bar = BarVisual(cfg.GLASS_USE, screen_pos=cfg.SCREEN_POS, screen_size=cfg.SCREEN_SIZE) bar.fill() bar.glass_draw_cue() # start while trial <= num_trials: timer_refresh.sleep_atleast(refresh_delay) timer_refresh.reset() # segment= { 'cue':(s,e), 'dir':(s,e), 'label':0-4 } (zero-based) if event == 'start' and timer_trigger.sec() > cfg.TIMINGS['INIT']: event = 'gap_s' bar.fill() timer_trigger.reset() trigger.signal(cfg.tdef.INIT) elif event == 'gap_s': if cfg.TRIAL_PAUSE: bar.put_text('Press any key') bar.update() key = cv2.waitKey() if key == keys['esc'] or not state.value: break bar.fill() bar.put_text('Trial %d / %d' % (trial, num_trials)) event = 'gap' timer_trigger.reset() elif event == 'gap' and timer_trigger.sec() > cfg.TIMINGS['GAP']: event = 'cue' bar.fill() bar.draw_cue() trigger.signal(cfg.tdef.CUE) timer_trigger.reset() elif event == 'cue' and timer_trigger.sec() > cfg.TIMINGS['CUE']: event = 'dir_r' dir = dir_sequence[trial - 1] if dir == 'L': # left bar.move('L', 100, overlay=True) trigger.signal(cfg.tdef.LEFT_READY) elif dir == 'R': # right bar.move('R', 100, overlay=True) trigger.signal(cfg.tdef.RIGHT_READY) elif dir == 'U': # up bar.move('U', 100, overlay=True) trigger.signal(cfg.tdef.UP_READY) elif dir == 'D': # down bar.move('D', 100, overlay=True) trigger.signal(cfg.tdef.DOWN_READY) elif dir == 'B': # both hands bar.move('L', 100, overlay=True) bar.move('R', 100, overlay=True) trigger.signal(cfg.tdef.BOTH_READY) else: raise RuntimeError('Unknown direction %d' % dir) timer_trigger.reset() elif event == 'dir_r' and timer_trigger.sec() > t_dir_ready: bar.fill() bar.draw_cue() event = 'dir' timer_trigger.reset() timer_dir.reset() if dir == 'L': # left trigger.signal(cfg.tdef.LEFT_GO) elif dir == 'R': # right trigger.signal(cfg.tdef.RIGHT_GO) elif dir == 'U': # up trigger.signal(cfg.tdef.UP_GO) elif dir == 'D': # down trigger.signal(cfg.tdef.DOWN_GO) elif dir == 'B': # both trigger.signal(cfg.tdef.BOTH_GO) else: raise RuntimeError('Unknown direction %d' % dir) elif event == 'dir' and timer_trigger.sec() > t_dir: event = 'gap_s' bar.fill() trial += 1 logger.info('trial ' + str(trial - 1) + ' done') trigger.signal(cfg.tdef.BLANK) timer_trigger.reset() t_dir = cfg.TIMINGS['DIR'] + random.uniform( -cfg.TIMINGS['DIR_RANDOMIZE'], cfg.TIMINGS['DIR_RANDOMIZE']) t_dir_ready = cfg.TIMINGS['READY'] + random.uniform( -cfg.TIMINGS['READY_RANDOMIZE'], cfg.TIMINGS['READY_RANDOMIZE']) # protocol if event == 'dir': dx = min(100, int(100.0 * timer_dir.sec() / t_dir) + 1) if dir == 'L': # L bar.move('L', dx, overlay=True) elif dir == 'R': # R bar.move('R', dx, overlay=True) elif dir == 'U': # U bar.move('U', dx, overlay=True) elif dir == 'D': # D bar.move('D', dx, overlay=True) elif dir == 'B': # Both bar.move('L', dx, overlay=True) bar.move('R', dx, overlay=True) # wait for start if event == 'start': bar.put_text('Waiting to start') bar.update() key = 0xFF & cv2.waitKey(1) if key == keys['esc'] or not state.value: break bar.finish() with state.get_lock(): state.value = 0
def run(cfg, state=mp.Value('i', 1), queue=None): """ Offline protocol """ # visualizer keys = { 'left': 81, 'right': 83, 'up': 82, 'down': 84, 'pgup': 85, 'pgdn': 86, 'home': 80, 'end': 87, 'space': 32, 'esc': 27, ',': 44, '.': 46, 's': 115, 'c': 99, '[': 91, ']': 93, '1': 49, '!': 33, '2': 50, '@': 64, '3': 51, '#': 35 } redirect_stdout_to_queue(logger, queue, 'INFO') # Wait the recording to start (GUI) while state.value == 2: # 0: stop, 1:start, 2:wait pass # Protocol runs if state equals to 1 if not state.value: sys.exit(-1) global_timer = qc.Timer(autoreset=False) # Init trigger communication cfg.tdef = trigger_def(cfg.TRIGGER_FILE) trigger = pyLptControl.Trigger(state, cfg.TRIGGER_DEVICE) if trigger.init(50) == False: logger.error('\n** Error connecting to trigger device.') raise RuntimeError # Preload the starting voice pgmixer.init() pgmixer.music.load(cfg.START_VOICE) # Init feedback viz = BarVisual(cfg.GLASS_USE, screen_pos=cfg.SCREEN_POS, screen_size=cfg.SCREEN_SIZE) viz.fill() viz.put_text('Close your eyes and relax') viz.update() # PLay the start voice pgmixer.music.play() # Wait a key press key = 0xFF & cv2.waitKey(0) if key == keys['esc'] or not state.value: sys.exit(-1) viz.fill() viz.put_text('Recording in progress') viz.update() #---------------------------------------------------------------------- # Main #---------------------------------------------------------------------- trigger.signal(cfg.tdef.INIT) while state.value == 1 and global_timer.sec() < cfg.GLOBAL_TIME: key = cv2.waitKey(1) if key == keys['esc']: with state.get_lock(): state.value = 0 trigger.signal(cfg.tdef.END) # Remove the text viz.fill() viz.put_text('Recording is finished') viz.update() # Ending voice pgmixer.music.load(cfg.END_VOICE) pgmixer.music.play() time.sleep(5) # Close cv2 window viz.finish()
def run(cfg, state=mp.Value('i', 1), queue=None): redirect_stdout_to_queue(logger, queue, 'INFO') # Wait the recording to start (GUI) while state.value == 2: # 0: stop, 1:start, 2:wait pass # Protocol runs if state equals to 1 if not state.value: sys.exit(-1) if cfg.FAKE_CLS is None: # chooose amp if cfg.AMP_NAME is None and cfg.AMP_SERIAL is None: amp_name, amp_serial = pu.search_lsl(state, ignore_markers=True) else: amp_name = cfg.AMP_NAME amp_serial = cfg.AMP_SERIAL fake_dirs = None else: amp_name = None amp_serial = None fake_dirs = [v for (k, v) in cfg.DIRECTIONS] # events and triggers tdef = trigger_def(cfg.TRIGGER_FILE) #if cfg.TRIGGER_DEVICE is None: # input('\n** Warning: No trigger device set. Press Ctrl+C to stop or Enter to continue.') trigger = pyLptControl.Trigger(state, cfg.TRIGGER_DEVICE) if trigger.init(50) == False: logger.error( 'Cannot connect to USB2LPT device. Use a mock trigger instead?') input('Press Ctrl+C to stop or Enter to continue.') trigger = pyLptControl.MockTrigger() trigger.init(50) # init classification decoder = BCIDecoderDaemon( cfg.DECODER_FILE, buffer_size=1.0, fake=(cfg.FAKE_CLS is not None), amp_name=amp_name, amp_serial=amp_serial, fake_dirs=fake_dirs, parallel=cfg.PARALLEL_DECODING[cfg.PARALLEL_DECODING['selected']], alpha_new=cfg.PROB_ALPHA_NEW) # OLD: requires trigger values to be always defined #labels = [tdef.by_value[x] for x in decoder.get_labels()] # NEW: events can be mapped into integers: labels = [] dirdata = set([d[1] for d in cfg.DIRECTIONS]) for x in decoder.get_labels(): if x not in dirdata: labels.append(tdef.by_value[x]) else: labels.append(x) # map class labels to bar directions bar_def = {label: str(dir) for dir, label in cfg.DIRECTIONS} bar_dirs = [bar_def[l] for l in labels] dir_seq = [] for x in range(cfg.TRIALS_EACH): dir_seq.extend(bar_dirs) if cfg.TRIALS_RANDOMIZE: random.shuffle(dir_seq) else: dir_seq = [d[0] for d in cfg.DIRECTIONS] * cfg.TRIALS_EACH num_trials = len(dir_seq) logger.info('Initializing decoder.') while decoder.is_running() is 0: time.sleep(0.01) # bar visual object if cfg.FEEDBACK_TYPE == 'BAR': from neurodecode.protocols.viz_bars import BarVisual visual = BarVisual(cfg.GLASS_USE, screen_pos=cfg.SCREEN_POS, screen_size=cfg.SCREEN_SIZE) if cfg.FEEDBACK_TYPE == 'COLORS': from neurodecode.protocols.viz_colors import ColorVisual visual = ColorVisual(cfg.GLASS_USE, screen_pos=cfg.SCREEN_POS, screen_size=cfg.SCREEN_SIZE) elif cfg.FEEDBACK_TYPE == 'BODY': assert hasattr(cfg, 'FEEDBACK_IMAGE_PATH' ), 'FEEDBACK_IMAGE_PATH is undefined in your config.' from neurodecode.protocols.viz_human import BodyVisual visual = BodyVisual(cfg.FEEDBACK_IMAGE_PATH, use_glass=cfg.GLASS_USE, screen_pos=cfg.SCREEN_POS, screen_size=cfg.SCREEN_SIZE) visual.put_text('Waiting to start') if cfg.LOG_PROBS: logdir = qc.parse_path_list(cfg.DECODER_FILE)[0] probs_logfile = time.strftime(logdir + "probs-%Y%m%d-%H%M%S.txt", time.localtime()) else: probs_logfile = None feedback = Feedback(cfg, state, visual, tdef, trigger, probs_logfile) # start trial = 1 dir_detected = [] prob_history = {c: [] for c in bar_dirs} while trial <= num_trials: if cfg.SHOW_TRIALS: title_text = 'Trial %d / %d' % (trial, num_trials) else: title_text = 'Ready' true_label = dir_seq[trial - 1] # profiling feedback #import cProfile #pr = cProfile.Profile() #pr.enable() result = feedback.classify(decoder, true_label, title_text, bar_dirs, prob_history=prob_history) #pr.disable() #pr.print_stats(sort='time') if result is None: break else: pred_label = result dir_detected.append(pred_label) if cfg.WITH_REX is True and pred_label == true_label: # if cfg.WITH_REX is True: if pred_label == 'U': rex_dir = 'N' elif pred_label == 'L': rex_dir = 'W' elif pred_label == 'R': rex_dir = 'E' elif pred_label == 'D': rex_dir = 'S' else: logger.warning('Rex cannot execute undefined action %s' % pred_label) rex_dir = None if rex_dir is not None: visual.move(pred_label, 100, overlay=False, barcolor='B') visual.update() logger.info('Executing Rex action %s' % rex_dir) os.system('%s/Rex/RexControlSimple.exe %s %s' % (pycnbi.ROOT, cfg.REX_COMPORT, rex_dir)) time.sleep(8) if true_label == pred_label: msg = 'Correct' else: msg = 'Wrong' if cfg.TRIALS_RETRY is False or true_label == pred_label: logger.info('Trial %d: %s (%s -> %s)' % (trial, msg, true_label, pred_label)) trial += 1 if len(dir_detected) > 0: # write performance and log results fdir, _, _ = qc.parse_path_list(cfg.DECODER_FILE) logfile = time.strftime(fdir + "/online-%Y%m%d-%H%M%S.txt", time.localtime()) with open(logfile, 'w') as fout: fout.write('Ground-truth,Prediction\n') for gt, dt in zip(dir_seq, dir_detected): fout.write('%s,%s\n' % (gt, dt)) cfmat, acc = qc.confusion_matrix(dir_seq, dir_detected) fout.write('\nAccuracy %.3f\nConfusion matrix\n' % acc) fout.write(cfmat) logger.info('Log exported to %s' % logfile) print('\nAccuracy %.3f\nConfusion matrix\n' % acc) print(cfmat) visual.finish() with state.get_lock(): state.value = 0 if decoder: decoder.stop() ''' # automatic thresholding if prob_history and len(bar_dirs) == 2: total = sum(len(prob_history[c]) for c in prob_history) fout = open(probs_logfile, 'a') msg = 'Automatic threshold optimization.\n' max_acc = 0 max_bias = 0 for bias in np.arange(-0.99, 1.00, 0.01): corrects = 0 for p in prob_history[bar_dirs[0]]: p_biased = (p + bias) / (bias + 1) # new sum = (p+bias) + (1-p) = bias+1 if p_biased >= 0.5: corrects += 1 for p in prob_history[bar_dirs[1]]: p_biased = (p + bias) / (bias + 1) # new sum = (p+bias) + (1-p) = bias+1 if p_biased < 0.5: corrects += 1 acc = corrects / total msg += '%s%.2f: %.3f\n' % (bar_dirs[0], bias, acc) if acc > max_acc: max_acc = acc max_bias = bias msg += 'Max acc = %.3f at bias %.2f\n' % (max_acc, max_bias) fout.write(msg) fout.close() print(msg) ''' logger.info('Finished.')