def record(state, amp_name, amp_serial, record_dir, eeg_only): # set data file name filename = time.strftime(record_dir + "/%Y%m%d-%H%M%S-raw.pcl", time.localtime()) qc.print_c('>> Output file: %s' % (filename), 'W') # test writability try: qc.make_dirs(record_dir) open( filename, 'w').write('The data will written when the recording is finished.') except: raise RuntimeError('Problem writing to %s. Check permission.' % filename) # start a server for sending out data filename when software trigger is used outlet = start_server('StreamRecorderInfo', channel_format='string',\ source_id=filename, stype='Markers') # connect to EEG stream server sr = StreamReceiver(amp_name=amp_name, amp_serial=amp_serial, eeg_only=eeg_only) # start recording qc.print_c('\n>> Recording started (PID %d).' % os.getpid(), 'W') qc.print_c('\n>> Press Enter to stop recording', 'G') tm = qc.Timer(autoreset=True) next_sec = 1 while state.value == 1: sr.acquire() if sr.get_buflen() > next_sec: duration = str(datetime.timedelta(seconds=int(sr.get_buflen()))) print('RECORDING %s' % duration) next_sec += 1 tm.sleep_atleast(0.01) # record stop qc.print_c('>> Stop requested. Copying buffer', 'G') buffers, times = sr.get_buffer() signals = buffers events = None # channels = total channels from amp, including trigger channel data = { 'signals': signals, 'timestamps': times, 'events': events, 'sample_rate': sr.get_sample_rate(), 'channels': sr.get_num_channels(), 'ch_names': sr.get_channel_names() } qc.print_c('Saving raw data ...', 'W') qc.save_obj(filename, data) print('Saved to %s\n' % filename) qc.print_c('Converting raw file into a fif format.', 'W') pcl2fif(filename)
def record(recordState, amp_name, amp_serial, record_dir, eeg_only, recordLogger=logger, queue=None): redirect_stdout_to_queue(recordLogger, queue, 'INFO') # set data file name timestamp = time.strftime('%Y%m%d-%H%M%S', time.localtime()) pcl_file = "%s/%s-raw.pcl" % (record_dir, timestamp) eve_file = '%s/%s-eve.txt' % (record_dir, timestamp) recordLogger.info('>> Output file: %s' % (pcl_file)) # test writability try: qc.make_dirs(record_dir) open(pcl_file, 'w').write('The data will written when the recording is finished.') except: raise RuntimeError('Problem writing to %s. Check permission.' % pcl_file) # start a server for sending out data pcl_file when software trigger is used outlet = start_server('StreamRecorderInfo', channel_format='string',\ source_id=eve_file, stype='Markers') # connect to EEG stream server sr = StreamReceiver(buffer_size=0, amp_name=amp_name, amp_serial=amp_serial, eeg_only=eeg_only) # start recording recordLogger.info('\n>> Recording started (PID %d).' % os.getpid()) qc.print_c('\n>> Press Enter to stop recording', 'G') tm = qc.Timer(autoreset=True) next_sec = 1 while recordState.value == 1: sr.acquire() if sr.get_buflen() > next_sec: duration = str(datetime.timedelta(seconds=int(sr.get_buflen()))) recordLogger.info('RECORDING %s' % duration) next_sec += 1 tm.sleep_atleast(0.001) # record stop recordLogger.info('>> Stop requested. Copying buffer') buffers, times = sr.get_buffer() signals = buffers events = None # channels = total channels from amp, including trigger channel data = {'signals':signals, 'timestamps':times, 'events':events, 'sample_rate':sr.get_sample_rate(), 'channels':sr.get_num_channels(), 'ch_names':sr.get_channel_names(), 'lsl_time_offset':sr.lsl_time_offset} recordLogger.info('Saving raw data ...') qc.save_obj(pcl_file, data) recordLogger.info('Saved to %s\n' % pcl_file) # automatically convert to fif and use event file if it exists (software trigger) if os.path.exists(eve_file): recordLogger.info('Found matching event file, adding events.') else: eve_file = None recordLogger.info('Converting raw file into fif.') pcl2fif(pcl_file, external_event=eve_file)
if __name__ == '__main__': sr = StreamReceiver(window_size=w_seconds, amp_name=amp_name, amp_serial=amp_serial) sfreq = sr.sample_rate psde = PSDEstimator(sfreq=sfreq, fmin=fmin, fmax=fmax, bandwidth=None, adaptive=False, low_bias=True, n_jobs=1, normalization='length', verbose=None) ch_names = sr.get_channel_names() fq_res = 1 / w_seconds hz_list = [] f = fmin while f < fmax: hz_list.append(f) f += fq_res picks = [ch_names.index(ch) for ch in channel_picks] psd = get_psd(sr, psde, picks).T # freq x ch assert len(hz_list) == psd.shape[0], (len(hz_list), psd.shape[0]) cv2.namedWindow("img", cv2.WINDOW_AUTOSIZE) cv2.moveWindow("img", screen_offset_x, screen_offset_y) #cv2.setWindowProperty("img", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN); mul_x = 50 mul_y = 20 fq_width = 55
class MainView(QMainWindow, SubjectInfo, TaskManager, SequenceManager, ExpProtocol, EventNumber, FilePathManager,\ ChannelScaleManager, ChannelSelector, ChannelFilter, BadEpochMonitor, MRCPExtractor, MainSwitch,\ ScopeSwitch, RecordSwitch, TaskSwitch, EventPlot, SSVEPExpProtocol, EyeTracker, RunTimer, GUITimer): """ MainView class controls the GUI frontend interaction """ def __init__(self, amp_name, amp_serial, state=mp.Value('i', 1), queue=None): """ Initialize experimenter window GUI and subject view window GUI :amp_name: amplifier name passed from LSL :amp_serial: amplifier serial passed from LSL """ super(MainView, self).__init__() self.router = Router() self.ui = main_layout.Ui_MainWindow() self.ui.setupUi(self) self.window = QMainWindow() self.SV_window = subject_layout.Ui_SV() self.SV_window.setupUi(self.window) self.eye_tracker_dialog = QDialog() self.eye_tracker_window = eye_tracker_layout.Ui_Dialog() self.eye_tracker_window.setupUi(self.eye_tracker_dialog) # redirect_stdout_to_queue(logger, queue, 'INFO') logger.info('Viewer launched') self.amp_name = amp_name self.amp_serial = amp_serial self.state = state self.init_all() def init_all(self): """ Initialize specialized functions inside GUI """ self.init_config_file() self.init_loop() self.init_panel_GUI() self.init_event_functions() self.init_SV_GUI() self.init_scope_GUI() self.init_timer() # timer for scope refreshing self.init_Runtimer() # timer for record, train and test self.init_eye_tracker() def init_config_file(self): """ Initialize config file """ self.scope_settings = RawConfigParser(allow_no_value=True, inline_comment_prefixes=('#', ';')) if (len(sys.argv) == 1): self.show_channel_names = 0 self.device_name = "" else: if (sys.argv[1].find("gtec") > -1): self.device_name = "gtec" self.show_channel_names = 1 elif (sys.argv[1].find("biosemi") > -1): self.device_name = "biosemi" self.show_channel_names = 1 elif (sys.argv[1].find("hiamp") > -1): self.device_name = "hiamp" self.show_channel_names = 1 else: self.device_name = "" self.show_channel_names = 0 # self.scope_settings.read(os.getenv("HOME") + "/.scope_settings.ini") self.scope_settings.read('.scope_settings.ini') def init_loop(self): """ Initialize loop related variables like StreamReceiver and self.eeg """ self.updating = False logger.info("init_loop runs") self.sr = StreamReceiver(window_size=1, buffer_size=10, amp_serial=Variables.get_amp_serial(), amp_name=Variables.get_amp_name()) srate = int(self.sr.sample_rate) # n_channels= self.sr.channels # 12 unsigned ints (4 bytes) ########## TODO: assumkng 32 samples chunk => make it read from LSL header data = [ 'EEG', srate, ['L', 'R'], 32, len(self.sr.get_eeg_channels()), 0, self.sr.get_trigger_channel(), None, None, None, None, None ] logger.info('Trigger channel is %d' % self.sr.get_trigger_channel()) self.config = { 'id': data[0], 'sf': data[1], 'labels': data[2], 'samples': data[3], 'eeg_channels': data[4], 'exg_channels': data[5], 'tri_channels': data[6], 'eeg_type': data[8], 'exg_type': data[9], 'tri_type': data[10], 'lbl_type': data[11], 'tim_size': 1, 'idx_size': 1 } self.tri = np.zeros(self.config['samples']) self.last_tri = 0 self.eeg = np.zeros( (self.config['samples'], self.config['eeg_channels']), dtype=np.float) self.exg = np.zeros( (self.config['samples'], self.config['exg_channels']), dtype=np.float) self.ts_list = [] self.ts_list_tri = [] def init_event_functions(self): """ Initialize event listeners for widgets in GUI """ # Control buttons self.ui.pushButton_Main_switch.clicked.connect( self.onClicked_button_Main_switch) self.ui.pushButton_start_SV.clicked.connect( self.onClicked_button_start_SV) self.ui.pushButton_scope_switch.clicked.connect( self.onClicked_button_scope_switch) self.ui.pushButton_rec.clicked.connect(self.onClicked_button_rec) # self.ui.pushButton_start_train.clicked.connect(self.onClicked_button_train) # self.ui.pushButton_start_test.clicked.connect(self.onClicked_button_test) # Subject information self.ui.pushButton_save.clicked.connect( self.onClicked_button_save_subject_information) # Experimental protocol self.ui.pushButton_define_task_done.clicked.connect( self.onClicked_button_define_task_done) self.ui.pushButton_define_task_add.clicked.connect( self.onClicked_button_define_task_add) self.ui.pushButton_create_sequence.clicked.connect( self.onClicked_button_create_sequence) self.ui.pushButton_randomize.clicked.connect( self.onClicked_button_randomize) self.ui.toolButton_choose_image_task.clicked.connect( self.onClicked_toolButton_choose_image_task) self.ui.toolButton_choose_sound_task.clicked.connect( self.onClicked_toolButton_choose_sound_task) self.ui.pushButton_experimental_protocol_finish.clicked.connect( self.onClicked_experimental_protocol_finish) self.ui.pushButton_save_protocol.clicked.connect( self.onClicked_button_save_protocol) self.ui.toolButton_load_protocol.clicked.connect( self.onClicked_toolButton_load_protocol) # Event management tab self.ui.pushButton_save_event_number.clicked.connect( self.onClicked_button_save_event_number) # Oscilloscope self.ui.comboBox_scale.activated.connect( self.onActivated_combobox_scale) self.ui.spinBox_time.valueChanged.connect( self.onValueChanged_spinbox_time) self.ui.checkBox_car.stateChanged.connect( self.onActivated_checkbox_car) self.ui.checkBox_bandpass.stateChanged.connect( self.onActivated_checkbox_bandpass) self.ui.checkBox_notch.stateChanged.connect( self.onActivated_checkbox_notch) self.ui.pushButton_bp.clicked.connect(self.onClicked_button_bp) self.ui.pushButton_apply_notch.clicked.connect( self.onClicked_button_notch) self.ui.table_channels.itemSelectionChanged.connect( self.onSelectionChanged_table) self.ui.table_channels.doubleClicked.connect( self.onDoubleClicked_channel_table) self.ui.pushButton_update_channel_name.clicked.connect( self.onClicked_button_update_channel_name) self.ui.table_channels.viewport().installEventFilter(self) # SSVEP self.ui.pushButton_ssvep_task.clicked.connect( self.onClicked_pushButton_ssvep_task) # eye tracker self.ui.pushButton_open_eye_tracker_ui.clicked.connect( self.onClicked_pushButton_open_eye_tracker_ui) # MRCP tab self.ui.pushButton_temp_clear.clicked.connect( self.onClicked_button_temp_clear) self.ui.pushButton_temp_mean.clicked.connect( self.onClicked_button_temp_mean) self.ui.pushButton_temp_view.clicked.connect( self.onClicked_button_temp_view) self.ui.pushButton_temp_remove.clicked.connect( self.onClicked_button_temp_remove) def init_panel_GUI(self): """ Initialize experimenter GUI """ # Tabs self.ui.tab_experimental_protocol.setEnabled(False) self.ui.tab_subjec_information.setEnabled(False) self.ui.tab_event_and_file_management.setEnabled(False) # self.ui.tab_Oscilloscope.setEnabled(False) self.ui.tab_experiment_type.setEnabled(False) # Experimental protocol self.task_list = [] self.new_task_list = [] self.task_descriptor_list = [] self.task_image_path = "" self.task_image_path_list = [] self.task_sound_path = "" self.task_sound_path_list = [] self.task_table = np.ndarray([]) self.new_task_table = np.ndarray([]) self.task_counter = 0 self.protocol_path = "" # Button self.init_task_name_table() self.ui.groupBox_sequence_manager.setEnabled(False) # Event management tab self.event_timestamp_list = [] self.init_task_event_number_table() self.event_list = [] # Button self.ui.pushButton_save_event_number.clicked.connect( self.onClicked_button_save_event_number) self.event_file_path = "" self.mrcp_template_file_path = "" self.raw_eeg_file_path = "" self.raw_mrcp_file_path = "" self.subject_file_path = "" # Oscilloscope self.ui.comboBox_scale.setCurrentIndex(4) self.ui.checkBox_notch.setChecked(True) # self.ui.checkBox_car.setChecked( # int(self.scope_settings.get("filtering", "apply_car_filter"))) # self.ui.checkBox_bandpass.setChecked( # int(self.scope_settings.get("filtering", "apply_bandpass_filter"))) self.ui.pushButton_apply_notch.setEnabled(True) self.ui.doubleSpinBox_lc_notch.setEnabled(True) self.ui.doubleSpinBox_hc_notch.setEnabled(True) # initialize channel selection panel in main view GUI self.channels_to_show_idx = [] idx = 0 for y in range(0, 4): for x in range(0, NUM_X_CHANNELS): if idx < self.config['eeg_channels']: # self.table_channels.item(x,y).setTextAlignment(QtCore.Qt.AlignCenter) self.ui.table_channels.item(x, y).setSelected(True) # Qt5 # self.table_channels.setItemSelected(self.table_channels.item(x, y), True) # Qt4 only self.channels_to_show_idx.append(idx) else: self.ui.table_channels.setItem(x, y, QTableWidgetItem("N/A")) self.ui.table_channels.item(x, y).setFlags( QtCore.Qt.NoItemFlags) self.ui.table_channels.item(x, y).setTextAlignment( QtCore.Qt.AlignCenter) idx += 1 self.ui.table_channels.verticalHeader().setStretchLastSection(True) self.ui.table_channels.horizontalHeader().setStretchLastSection(True) self.channel_to_scale_row_index = -1 self.channel_to_scale_column_index = -1 self.selected_channel_row_index = 0 self.selected_channel_column_index = 0 self.single_channel_scale = 1 # MRCP tab self.init_class_epoch_counter_table() self.init_class_bad_epoch_table() self.show_TID_events = False self.show_LPT_events = False self.show_Key_events = False self.raw_trial_MRCP = np.ndarray([]) self.processed_trial_MRCP = np.ndarray([]) self.total_trials_MRCP = [] self.total_trials_raw_MRCP = [] self.total_MRCP_inds = [] self.temp_counter = 0 self.temp_counter_list = [] self.input_temp_list = [] self.display_temp_list = [] self.selected_temp = "" self.list_selected_temp = [] self.template_buffer = np.zeros( (6 * int(self.sr.sample_rate), self.config['eeg_channels']), dtype=float) self.b_lp, self.a_lp = Utils.butter_lowpass(3, int(self.sr.sample_rate), 2) self.b_hp, self.a_hp = Utils.butter_highpass(0.05, int(self.sr.sample_rate), 2) self.initial_condition_list_lp = Utils.construct_initial_condition_list( self.b_lp, self.a_lp, self.config['eeg_channels']) self.initial_condition_list_hp = Utils.construct_initial_condition_list( self.b_hp, self.a_hp, self.config['eeg_channels']) self.ui.pushButton_bad_epoch.clicked.connect( self.onClicked_button_bad_epoch) self.screen_width = 522 self.screen_height = 160 # self.setGeometry(100,100, self.screen_width, self.screen_height) # self.setFixedSize(self.screen_width, self.screen_height) self.setWindowTitle('EEG Scope Panel') self.setFocusPolicy(QtCore.Qt.ClickFocus) self.setFocus() logger.info('GUI show') self.show() def init_panel_GUI_stop_recording(self): """ Initialize experimenter GUI when stop recording button pressed. This is used to prepare for next run. """ # Tabs self.ui.tab_experimental_protocol.setEnabled(False) self.ui.tab_subjec_information.setEnabled(False) self.ui.tab_event_and_file_management.setEnabled(False) # self.ui.tab_Oscilloscope.setEnabled(False) self.ui.tab_experiment_type.setEnabled(False) # Experimental protocol self.task_list = [] self.new_task_list = [] self.task_descriptor_list = [] self.task_image_path = "" self.task_image_path_list = [] self.task_sound_path = "" self.task_sound_path_list = [] self.task_table = np.ndarray([]) self.new_task_table = np.ndarray([]) self.task_counter = 0 self.protocol_path = "" # Button self.init_task_name_table() self.ui.groupBox_sequence_manager.setEnabled(False) # Event management tab self.event_timestamp_list = [] self.init_task_event_number_table() self.event_list = [] # Button self.ui.pushButton_save_event_number.clicked.connect( self.onClicked_button_save_event_number) self.event_file_path = "" self.mrcp_template_file_path = "" self.raw_eeg_file_path = "" self.raw_mrcp_file_path = "" self.subject_file_path = "" # Oscilloscope self.ui.comboBox_scale.setCurrentIndex(4) self.ui.checkBox_notch.setChecked(True) # self.ui.checkBox_car.setChecked( # int(self.scope_settings.get("filtering", "apply_car_filter"))) # self.ui.checkBox_bandpass.setChecked( # int(self.scope_settings.get("filtering", "apply_bandpass_filter"))) # self.ui.pushButton_apply_notch.setEnabled(False) self.ui.doubleSpinBox_lc_notch.setEnabled(False) self.ui.doubleSpinBox_hc_notch.setEnabled(False) # # initialize channel selection panel in main view GUI # self.channels_to_show_idx = [] # idx = 0 # for y in range(0, 4): # for x in range(0, NUM_X_CHANNELS): # if idx < self.config['eeg_channels']: # # self.table_channels.item(x,y).setTextAlignment(QtCore.Qt.AlignCenter) # self.ui.table_channels.item(x, y).setSelected(True) # Qt5 # # self.table_channels.setItemSelected(self.table_channels.item(x, y), True) # Qt4 only # self.channels_to_show_idx.append(idx) # else: # self.ui.table_channels.setItem(x, y, # QTableWidgetItem("N/A")) # self.ui.table_channels.item(x, y).setFlags( # QtCore.Qt.NoItemFlags) # self.ui.table_channels.item(x, y).setTextAlignment( # QtCore.Qt.AlignCenter) # idx += 1 self.ui.table_channels.verticalHeader().setStretchLastSection(True) self.ui.table_channels.horizontalHeader().setStretchLastSection(True) self.channel_to_scale_row_index = -1 self.channel_to_scale_column_index = -1 self.selected_channel_row_index = 0 self.selected_channel_column_index = 0 self.single_channel_scale = 1 # MRCP tab self.init_class_epoch_counter_table() self.init_class_bad_epoch_table() self.show_TID_events = False self.show_LPT_events = False self.show_Key_events = False self.raw_trial_MRCP = np.ndarray([]) self.processed_trial_MRCP = np.ndarray([]) self.total_trials_MRCP = [] self.total_trials_raw_MRCP = [] self.total_MRCP_inds = [] self.temp_counter = 0 self.temp_counter_list = [] self.input_temp_list = [] self.display_temp_list = [] self.selected_temp = "" self.list_selected_temp = [] self.template_buffer = np.zeros( (6 * int(self.sr.sample_rate), self.config['eeg_channels']), dtype=float) self.b_lp, self.a_lp = Utils.butter_lowpass(3, int(self.sr.sample_rate), 2) self.b_hp, self.a_hp = Utils.butter_highpass(0.05, int(self.sr.sample_rate), 2) self.initial_condition_list_lp = Utils.construct_initial_condition_list( self.b_lp, self.a_lp, self.config['eeg_channels']) self.initial_condition_list_hp = Utils.construct_initial_condition_list( self.b_hp, self.a_hp, self.config['eeg_channels']) self.ui.pushButton_bad_epoch.clicked.connect( self.onClicked_button_bad_epoch) self.screen_width = 522 self.screen_height = 160 # self.setGeometry(100,100, self.screen_width, self.screen_height) # self.setFixedSize(self.screen_width, self.screen_height) self.setWindowTitle('EEG Scope Panel') self.setFocusPolicy(QtCore.Qt.ClickFocus) self.setFocus() self.show() def init_SV_GUI(self): """ Initialize subject view GUI """ self.SVStatus = 0 self.starttime = 0 self.SV_time = 0 self.idle_time = int(self.ui.idleTimeLineEdit.text()) self.focus_time = self.idle_time + int( self.ui.focusTimeLineEdit.text()) self.prepare_time = self.focus_time + int( self.ui.prepareTimeLineEdit.text()) self.two_time = self.prepare_time + int(self.ui.twoTimeLineEdit.text()) self.one_time = self.two_time + int(self.ui.oneTimeLineEdit.text()) self.task_time = self.one_time + int(self.ui.taskTimeLineEdit.text()) self.relax_time = self.task_time + 2 self.cycle_time = self.relax_time self.is_experiment_on = False def init_scope_GUI(self): """ Initialize oscilloscope GUI """ self.bool_parser = {True: '1', False: '0'} # PyQTGraph plot initialization self.win = pg.GraphicsWindow() self.win.setWindowTitle('EEG Scope') self.win.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) self.win.keyPressEvent = self.keyPressEvent # self.win.show() self.main_plot_handler = self.win.addPlot() self.win.resize(1280, 800) # Scales available in the GUI. If you change the options in the GUI # you should change them here as well self.scales_range = [1, 10, 25, 50, 100, 250, 500, 1000, 2500, 100000] self.single_scales_range = [ 0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.5, 1.7, 1.8, 2 ] # Scale in uV self.scale = 100 # Time window to show in seconds self.seconds_to_show = 10 # Y Tick labels. Use values from the config file. self.channel_labels = [] values = [] ''' For non-LSL systems having no channel names for x in range(0, self.config['eeg_channels']): if (self.show_channel_names): self.channel_labels.append("(" + str(x + 1) + ") " + self.scope_settings.get("internal", "channel_names_" + self.device_name + str( self.config['eeg_channels'])).split(', ')[x]) else: self.channel_labels.append('CH ' + str(x + 1)) ''' ch_names = np.array(self.sr.get_channel_names()) self.channel_labels = ch_names[self.sr.get_eeg_channels()] for x in range(0, len(self.channels_to_show_idx)): values.append((-x * self.scale, self.channel_labels[self.channels_to_show_idx[x]])) values_axis = [] values_axis.append(values) values_axis.append([]) # Update table labels with current names idx = 0 for y in range(0, 4): for x in range(0, NUM_X_CHANNELS): if (idx < self.config['eeg_channels']): self.ui.table_channels.item(x, y).setText( self.channel_labels[idx]) idx += 1 # Plot initialization # Plotting colors. If channels > 16, colors will roll back to the beginning self.colors = np.array([[255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0], [0, 255, 255], [255, 0, 255], [128, 100, 100], [0, 128, 0], [0, 128, 128], [128, 128, 0], [255, 128, 128], [128, 0, 128], [128, 255, 0], [255, 128, 0], [0, 255, 128], [128, 0, 255]]) # pen = pg.mkColor(self.colors) # self.main_plot_handler.getAxis('left').setTextPen('b') self.main_plot_handler.getAxis('left').setTicks(values_axis) self.main_plot_handler.setRange( xRange=[0, self.seconds_to_show], yRange=[ +1.5 * self.scale, -0.5 * self.scale - self.scale * self.config['eeg_channels'] ]) self.main_plot_handler.disableAutoRange() self.main_plot_handler.showGrid(y=True) self.main_plot_handler.setLabel(axis='left', text='Scale (uV): ' + str(self.scale)) self.main_plot_handler.setLabel(axis='bottom', text='Time (s)') # X axis self.x_ticks = np.zeros(self.config['sf'] * self.seconds_to_show) for x in range(0, self.config['sf'] * self.seconds_to_show): self.x_ticks[x] = (x * 1) / float(self.config['sf']) # We want a lightweight scope, so we downsample the plotting to 64 Hz self.subsampling_value = self.config['sf'] / 64 # EEG data for plotting self.data_plot = np.zeros((self.config['sf'] * self.seconds_to_show, self.config['eeg_channels'])) print('self.data plot shape: ', self.data_plot.shape) self.curve_eeg = [] for x in range(0, len(self.channels_to_show_idx)): self.curve_eeg.append( self.main_plot_handler.plot( x=self.x_ticks, y=self.data_plot[:, self.channels_to_show_idx[x]], pen=pg.mkColor(self.colors[self.channels_to_show_idx[x] % 16, :]))) # self.curve_eeg[-1].setDownsampling(ds=self.subsampling_value, auto=False, method="mean") # Events data self.events_detected = [] self.events_curves = [] self.events_text = [] # CAR initialization self.apply_car = False self.matrix_car = np.zeros( (self.config['eeg_channels'], self.config['eeg_channels']), dtype=float) self.matrix_car[:, :] = -1 / float(self.config['eeg_channels']) np.fill_diagonal(self.matrix_car, 1 - (1 / float(self.config['eeg_channels']))) # Laplacian initalization. TO BE DONE self.matrix_lap = np.zeros( (self.config['eeg_channels'], self.config['eeg_channels']), dtype=float) np.fill_diagonal(self.matrix_lap, 1) self.matrix_lap[2, 0] = -1 self.matrix_lap[0, 2] = -0.25 self.matrix_lap[0, 2] = -0.25 # BP initialization self.apply_bandpass = 1 if (self.apply_bandpass): self.ui.doubleSpinBox_lp.setValue(40.0) self.ui.doubleSpinBox_hp.setValue(1.0) self.ui.doubleSpinBox_lp.setMinimum(0) self.ui.doubleSpinBox_lp.setMaximum(self.sr.sample_rate / 2 - 0.1) self.ui.doubleSpinBox_lp.setSingleStep(1) self.ui.doubleSpinBox_hp.setMinimum(0) self.ui.doubleSpinBox_hp.setMaximum(self.sr.sample_rate / 2 - 0.1) self.ui.doubleSpinBox_hp.setSingleStep(1) self.ui.pushButton_bp.click() # notch initialization self.apply_notch = 1 if (self.apply_notch): self.ui.doubleSpinBox_lc_notch.setValue(58.0) self.ui.doubleSpinBox_hc_notch.setValue(62.0) self.ui.doubleSpinBox_lc_notch.setMinimum(0.1) self.ui.doubleSpinBox_lc_notch.setMaximum(self.sr.sample_rate / 2 - 0.1) self.ui.doubleSpinBox_lc_notch.setSingleStep(1) self.ui.doubleSpinBox_hc_notch.setMinimum(0.1) self.ui.doubleSpinBox_hc_notch.setMaximum(self.sr.sample_rate / 2 - 0.1) self.ui.doubleSpinBox_hc_notch.setSingleStep(1) self.ui.pushButton_apply_notch.click() self.ui.checkBox_bandpass.setChecked(self.apply_bandpass) self.b_bandpass_scope_refilter = self.b_bandpass_scope self.a_bandpass_scope_refilter = self.a_bandpass_scope self.zi_bandpass_scope_refilter = self.zi_bandpass_scope self.b_notch_scope_refilter = self.b_notch_scope self.a_notch_scope_refilter = self.a_notch_scope self.zi_notch_scope_refilter = self.zi_notch_scope self.update_title_scope() # Help variables self.show_help = 0 self.help = pg.TextItem( "CNBI EEG Scope v0.3 \n" + "----------------------------------------------------------------------------------\n" + "C: De/activate CAR Filter\n" + "B: De/activate Bandpass Filter (with current settings)\n" + "T: Show/hide TiD events\n" + "L: Show/hide LPT events\n" + "K: Show/hide Key events. If not shown, they are NOT recorded!\n" + "0-9: Add a user-specific Key event. Do not forget to write down why you marked it.\n" + "Up, down arrow keys: Increase/decrease the scale, steps of 10 uV\n" + "Left, right arrow keys: Increase/decrease the time to show, steps of 1 s\n" + "Spacebar: Stop the scope plotting, whereas data acquisition keeps running (EXPERIMENTAL)\n" + "Esc: Exits the scope", anchor=(0, 0), border=(70, 70, 70), fill=pg.mkColor(20, 20, 20, 200), color=(255, 255, 255)) # Stop plot functionality self.stop_plot = 0 # Force repaint even when we shouldn't repaint. self.force_repaint = 1 def init_timer(self): """ Initialize main timer used for refreshing oscilloscope window. This refreshes every 20ms. """ self.os_time_list1 = [] QtCore.QCoreApplication.processEvents() QtCore.QCoreApplication.flush() self.timer = QtCore.QTimer(self) self.timer.setTimerType(QtCore.Qt.PreciseTimer) self.timer.timeout.connect(self.update_loop) self.timer.start(20) def init_Runtimer(self): """ Initialize task related timer which controls the timing for visual cues """ self.time_show = 0 self.os_time_list = [] self.Runtimer = task.LoopingCall(self.Time) def init_eye_tracker(self): self.eye_tracker_window.tableWidget.setRowCount(9) self.eye_tracker_window.pushButton_1.clicked.connect(self.update_cal1) self.eye_tracker_window.pushButton_2.clicked.connect(self.update_cal2) self.eye_tracker_window.pushButton_3.clicked.connect(self.update_cal3) self.eye_tracker_window.pushButton_4.clicked.connect(self.update_cal4) self.eye_tracker_window.pushButton_5.clicked.connect(self.update_cal5) self.eye_tracker_window.pushButton_6.clicked.connect(self.update_cal6) self.eye_tracker_window.pushButton_7.clicked.connect(self.update_cal7) self.eye_tracker_window.pushButton_8.clicked.connect(self.update_cal8) self.eye_tracker_window.pushButton_9.clicked.connect(self.update_cal9) self.eye_tracker_window.pushButton_12.clicked.connect( self.update_current_gaze_loc) self.eye_tracker_window.pushButton_13.clicked.connect( self.recording_data) self.eye_tracker_window.pushButton_14.clicked.connect( self.recording_stop) self.rec_time = int(self.eye_tracker_window.LineEdit_rec.text()) # self.LineEdit_rec.clicked.conntect(self.update_rec_time(int(self.LineEdit_rec.text()))) self.gaze_x = 0 self.gaze_y = 0 self.table_row = 0 self.table_col = 0 # print(self.gaze_x, self.gaze_y) self.UTC_time = 0 # List of values in 9 points self.points = np.zeros((9, 2)) self.gaze_loc = 0 def trigger_help(self): """Shows / hide help in the scope window""" if self.show_help: self.help.setPos(0, self.scale) self.main_plot_handler.addItem(self.help) self.help.setZValue(1) else: self.main_plot_handler.removeItem(self.help) def eventFilter(self, source, event): """ Select single channel to scale by right clicking :param source: channel table content :param event: right mouse button press :return: ID of the selected channel """ if (event.type() == QtCore.QEvent.MouseButtonPress and event.buttons() == QtCore.Qt.RightButton and source is self.ui.table_channels.viewport()): item = self.ui.table_channels.itemAt(event.pos()) # print('Global Pos:', event.globalPos()) if item is not None: self.channel_to_scale_row_index = item.row() self.channel_to_scale_column_index = item.column() print("RRRRRRRRR", self.channel_to_scale_row_index, self.channel_to_scale_column_index) # print('Table Item:', item.row(), item.column()) # self.menu = QMenu(self) # self.menu.addAction(item.text()) #(QAction('test')) # menu.exec_(event.globalPos()) return super(MainView, self).eventFilter(source, event)
class RecordingFromHardWare(): def __init__(self): self.start_recording = False self.new_data = [] self.sr = StreamReceiver(buffer_size=0) self.record_dir = '%s/records' % os.getcwd() self.current_window = np.ndarray([]) self.current_time_stamps = np.ndarray([]) self.MRCP_window_size = 6 print("I RUMNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN") # ============================================================================= # def runInfinitecording(self): # while self.start_recording: # self.sr.acquire() # if self.sr.get_buflen() > 20: # duration = str(datetime.timedelta(seconds=int(self.sr.get_buflen()))) # #recordLogger.info('RECORDING %s' % duration) # #next_sec += 1 # buffers, times = self.sr.get_buffer() # signals = buffers # events = None # # # channels = total channels from amp, including trigger channel # data = {'signals':signals, 'timestamps':times, 'events':events, # 'sample_rate':self.sr.get_sample_rate(), 'channels':self.sr.get_num_channels(), # 'ch_names':self.sr.get_channel_names(), 'lsl_time_offset':self.sr.lsl_time_offset} # print(data) # self.new_data.append(data) # ============================================================================= def startRecording(self): # do some stuff self.start_recording = True #download_thread = threading.Thread(target=self.runInfinitecording) recordLogger = logger thread = threading.Thread(target=self.record) thread.start() def stopRecording(self): self.start_recording = False #Write into file #self.new_data = [] def isRecordingIsRunning(self): return self.start_recording def set_MRCP_window_size(self, MRCP_window_size): self.MRCP_window_size = MRCP_window_size def record(self): recordLogger = logger #redirect_stdout_to_queue(recordLogger, 'INFO') # set data file name timestamp = time.strftime('%Y%m%d-%H%M%S', time.localtime()) pcl_file = "%s/%s-raw.pcl" % (self.record_dir, timestamp) eve_file = '%s/%s-eve.txt' % (self.record_dir, timestamp) recordLogger.info('>> Output file: %s' % (pcl_file)) # test writability try: qc.make_dirs(self.record_dir) open(pcl_file, 'w').write( 'The data will written when the recording is finished.') except: raise RuntimeError('Problem writing to %s. Check permission.' % pcl_file) # start a server for sending out data pcl_file when software trigger is used outlet = start_server('StreamRecorderInfo', channel_format='string',\ source_id=eve_file, stype='Markers') # connect to EEG stream server #sr = StreamReceiver(buffer_size=0, eeg_only=eeg_only) # start recording recordLogger.info('\n>> Recording started (PID %d).' % os.getpid()) qc.print_c('\n>> Press Enter to stop recording', 'G') tm = qc.Timer(autoreset=True) next_sec = 1 while self.start_recording: self.sr.acquire() if self.sr.get_buflen() > next_sec: duration = str( datetime.timedelta(seconds=int(self.sr.get_buflen()))) #recordLogger.info('RECORDING %s' % duration) next_sec += 1 #current_buffer, current_times = self.sr.get_buffer() self.sr.set_window_size(self.MRCP_window_size) self.current_window, self.current_time_stamps = self.sr.get_window( ) #print("window shape: {} time stamps shape: {}".format(self.current_window.shape, self.current_time_stamps.shape)) tm.sleep_atleast(0.001) # record stop recordLogger.info('>> Stop requested. Copying buffer') buffers, times = self.sr.get_buffer() signals = buffers events = None # channels = total channels from amp, including trigger channel data = { 'signals': signals, 'timestamps': times, 'events': events, 'sample_rate': self.sr.get_sample_rate(), 'channels': self.sr.get_num_channels(), 'ch_names': self.sr.get_channel_names(), 'lsl_time_offset': self.sr.lsl_time_offset } print("data length : {}".format(data['signals'].shape)) self.new_data = data['signals'] recordLogger.info('Saving raw data ...') qc.save_obj(pcl_file, data) recordLogger.info('Saved to %s\n' % pcl_file) # automatically convert to fif and use event file if it exists (software trigger) if os.path.exists(eve_file): recordLogger.info('Found matching event file, adding events.') else: eve_file = None recordLogger.info('Converting raw file into fif.') pcl2fif(pcl_file, external_event=eve_file)
class BCIDecoder(object): """ Decoder class The label order of self.labels and self.label_names match likelihood orders computed by get_prob() """ def __init__(self, classifier=None, buffer_size=1.0, fake=False, amp_serial=None, amp_name=None): """ Params ------ classifier: classifier file spatial: spatial filter to use buffer_size: length of the signal buffer in seconds """ self.classifier = classifier self.buffer_sec = buffer_size self.fake = fake self.amp_serial = amp_serial self.amp_name = amp_name if self.fake == False: model = qc.load_obj(self.classifier) if model == None: self.print('Error loading %s' % model) sys.exit(-1) self.cls = model['cls'] self.psde = model['psde'] self.labels = list(self.cls.classes_) self.label_names = [model['classes'][k] for k in self.labels] self.spatial = model['spatial'] self.spectral = model['spectral'] self.notch = model['notch'] self.w_seconds = model['w_seconds'] self.w_frames = model['w_frames'] self.wstep = model['wstep'] self.sfreq = model['sfreq'] if not int(self.sfreq * self.w_seconds) == self.w_frames: raise RuntimeError('sfreq * w_sec %d != w_frames %d' % (int(self.sfreq * self.w_seconds), self.w_frames)) if 'multiplier' in model: self.multiplier = model['multiplier'] else: self.multiplier = 1 # Stream Receiver self.sr = StreamReceiver(window_size=self.w_seconds, amp_name=self.amp_name, amp_serial=self.amp_serial) if self.sfreq != self.sr.sample_rate: raise RuntimeError('Amplifier sampling rate (%.1f) != model sampling rate (%.1f). Stop.' % ( self.sr.sample_rate, self.sfreq)) # Map channel indices based on channel names of the streaming server self.spatial_ch = model['spatial_ch'] self.spectral_ch = model['spectral_ch'] self.notch_ch = model['notch_ch'] self.ref_new = model['ref_new'] self.ref_old = model['ref_old'] self.ch_names = self.sr.get_channel_names() mc = model['ch_names'] self.picks = [self.ch_names.index(mc[p]) for p in model['picks']] if self.spatial_ch is not None: self.spatial_ch = [self.ch_names.index(mc[p]) for p in model['spatial_ch']] if self.spectral_ch is not None: self.spectral_ch = [self.ch_names.index(mc[p]) for p in model['spectral_ch']] if self.notch_ch is not None: self.notch_ch = [self.ch_names.index(mc[p]) for p in model['notch_ch']] if self.ref_new is not None: self.ref_new = self.ch_names.index(mc[model['ref_new']]) if self.ref_old is not None: self.ref_old = self.ch_names.index(mc[model['ref_old']]) # PSD buffer psd_temp = self.psde.transform(np.zeros((1, len(self.picks), self.w_frames))) self.psd_shape = psd_temp.shape self.psd_size = psd_temp.size self.psd_buffer = np.zeros((0, self.psd_shape[1], self.psd_shape[2])) self.ts_buffer = [] else: # Fake left-right decoder model = None self.psd_shape = None self.psd_size = None # TODO: parameterize directions using fake_dirs self.labels = [11, 9] self.label_names = ['LEFT_GO', 'RIGHT_GO'] def print(self, *args): if len(args) > 0: print('[BCIDecoder] ', end='') print(*args) def get_labels(self): """ Returns ------- Class labels numbers in the same order as the likelihoods returned by get_prob() """ return self.labels def get_label_names(self): """ Returns ------- Class label names in the same order as get_labels() """ return self.label_names def start(self): pass def stop(self): pass def get_prob(self): """ Read the latest window Returns ------- The likelihood P(X|C), where X=window, C=model """ if self.fake: # fake deocder: biased likelihood for the first class probs = [random.uniform(0.0, 1.0)] # others class likelihoods are just set to equal p_others = (1 - probs[0]) / (len(self.labels) - 1) for x in range(1, len(self.labels)): probs.append(p_others) time.sleep(0.0625) # simulated delay for PSD + RF else: self.sr.acquire() w, ts = self.sr.get_window() # w = times x channels w = w.T # -> channels x times # apply filters. Important: maintain the original channel order at this point. pu.preprocess(w, sfreq=self.sfreq, spatial=self.spatial, spatial_ch=self.spatial_ch, spectral=self.spectral, spectral_ch=self.spectral_ch, notch=self.notch, notch_ch=self.notch_ch, multiplier=self.multiplier) # select the same channels used for training w = w[self.picks] # debug: show max - min # c=1; print( '### %d: %.1f - %.1f = %.1f'% ( self.picks[c], max(w[c]), min(w[c]), max(w[c])-min(w[c]) ) ) # psd = channels x freqs psd = self.psde.transform(w.reshape((1, w.shape[0], w.shape[1]))) # update psd buffer ( < 1 msec overhead ) self.psd_buffer = np.concatenate((self.psd_buffer, psd), axis=0) self.ts_buffer.append(ts[0]) if ts[0] - self.ts_buffer[0] > self.buffer_sec: # search speed comparison for ordered arrays: # http://stackoverflow.com/questions/16243955/numpy-first-occurence-of-value-greater-than-existing-value t_index = np.searchsorted(self.ts_buffer, ts[0] - 1.0) self.ts_buffer = self.ts_buffer[t_index:] self.psd_buffer = self.psd_buffer[t_index:, :, :] # numpy delete is slower # assert ts[0] - self.ts_buffer[0] <= self.buffer_sec # make a feautre vector and classify feats = np.concatenate(psd[0]).reshape(1, -1) # compute likelihoods probs = self.cls.predict_proba(feats)[0] return probs def get_prob_unread(self): return self.get_prob() def get_psd(self): """ Returns ------- The latest computed PSD """ return self.psd_buffer[-1].reshape((1, -1)) def is_ready(self): """ Ready to decode? Returns True if buffer is not empty. """ return self.sr.is_ready()
class BCIDecoder(object): """ Decoder class The label order of self.labels and self.label_names match likelihood orders computed by get_prob() """ def __init__(self, classifier=None, buffer_size=1.0, fake=False, amp_serial=None, amp_name=None): """ Params ------ classifier: classifier file spatial: spatial filter to use buffer_size: length of the signal buffer in seconds """ self.classifier = classifier self.buffer_sec = buffer_size self.fake = fake self.amp_serial = amp_serial self.amp_name = amp_name if self.fake == False: model = qc.load_obj(self.classifier) if model is None: logger.error('Classifier model is None.') raise ValueError self.cls = model['cls'] self.psde = model['psde'] self.labels = list(self.cls.classes_) self.label_names = [model['classes'][k] for k in self.labels] self.spatial = model['spatial'] self.spectral = model['spectral'] self.notch = model['notch'] self.w_seconds = model['w_seconds'] self.w_frames = model['w_frames'] self.wstep = model['wstep'] self.sfreq = model['sfreq'] if 'decim' not in model: model['decim'] = 1 self.decim = model['decim'] if not int(round(self.sfreq * self.w_seconds)) == self.w_frames: logger.error('sfreq * w_sec %d != w_frames %d' % (int(round(self.sfreq * self.w_seconds)), self.w_frames)) raise RuntimeError if 'multiplier' in model: self.multiplier = model['multiplier'] else: self.multiplier = 1 # Stream Receiver self.sr = StreamReceiver(window_size=self.w_seconds, amp_name=self.amp_name, amp_serial=self.amp_serial) if self.sfreq != self.sr.sample_rate: logger.error('Amplifier sampling rate (%.3f) != model sampling rate (%.3f). Stop.' % (self.sr.sample_rate, self.sfreq)) raise RuntimeError # Map channel indices based on channel names of the streaming server self.spatial_ch = model['spatial_ch'] self.spectral_ch = model['spectral_ch'] self.notch_ch = model['notch_ch'] #self.ref_ch = model['ref_ch'] # not supported yet self.ch_names = self.sr.get_channel_names() mc = model['ch_names'] self.picks = [self.ch_names.index(mc[p]) for p in model['picks']] if self.spatial_ch is not None: self.spatial_ch = [self.ch_names.index(mc[p]) for p in model['spatial_ch']] if self.spectral_ch is not None: self.spectral_ch = [self.ch_names.index(mc[p]) for p in model['spectral_ch']] if self.notch_ch is not None: self.notch_ch = [self.ch_names.index(mc[p]) for p in model['notch_ch']] # PSD buffer #psd_temp = self.psde.transform(np.zeros((1, len(self.picks), self.w_frames // self.decim))) #self.psd_shape = psd_temp.shape #self.psd_size = psd_temp.size #self.psd_buffer = np.zeros((0, self.psd_shape[1], self.psd_shape[2])) #self.psd_buffer = None self.ts_buffer = [] logger.info_green('Loaded classifier %s (sfreq=%.3f, decim=%d)' % (' vs '.join(self.label_names), self.sfreq, self.decim)) else: # Fake left-right decoder model = None self.psd_shape = None self.psd_size = None # TODO: parameterize directions using fake_dirs self.labels = [11, 9] self.label_names = ['LEFT_GO', 'RIGHT_GO'] def get_labels(self): """ Returns ------- Class labels numbers in the same order as the likelihoods returned by get_prob() """ return self.labels def get_label_names(self): """ Returns ------- Class label names in the same order as get_labels() """ return self.label_names def start(self): pass def stop(self): pass def get_prob(self, timestamp=False): """ Read the latest window Input ----- timestamp: If True, returns LSL timestamp of the leading edge of the window used for decoding. Returns ------- The likelihood P(X|C), where X=window, C=model """ if self.fake: # fake deocder: biased likelihood for the first class probs = [random.uniform(0.0, 1.0)] # others class likelihoods are just set to equal p_others = (1 - probs[0]) / (len(self.labels) - 1) for x in range(1, len(self.labels)): probs.append(p_others) time.sleep(0.0625) # simulated delay t_prob = pylsl.local_clock() else: self.sr.acquire(blocking=True) w, ts = self.sr.get_window() # w = times x channels t_prob = ts[-1] w = w.T # -> channels x times # re-reference channels # TODO: add re-referencing function to preprocess() # apply filters. Important: maintain the original channel order at this point. w = pu.preprocess(w, sfreq=self.sfreq, spatial=self.spatial, spatial_ch=self.spatial_ch, spectral=self.spectral, spectral_ch=self.spectral_ch, notch=self.notch, notch_ch=self.notch_ch, multiplier=self.multiplier, decim=self.decim) # select the same channels used for training w = w[self.picks] # debug: show max - min # c=1; print( '### %d: %.1f - %.1f = %.1f'% ( self.picks[c], max(w[c]), min(w[c]), max(w[c])-min(w[c]) ) ) # psd = channels x freqs psd = self.psde.transform(w.reshape((1, w.shape[0], w.shape[1]))) # make a feautre vector and classify feats = np.concatenate(psd[0]).reshape(1, -1) # compute likelihoods probs = self.cls.predict_proba(feats)[0] # update psd buffer ( < 1 msec overhead ) ''' if self.psd_buffer is None: self.psd_buffer = psd else: self.psd_buffer = np.concatenate((self.psd_buffer, psd), axis=0) # TODO: CHECK THIS BLOCK self.ts_buffer.append(ts[0]) if ts[0] - self.ts_buffer[0] > self.buffer_sec: # search speed comparison for ordered arrays: # http://stackoverflow.com/questions/16243955/numpy-first-occurence-of-value-greater-than-existing-value #t_index = np.searchsorted(self.ts_buffer, ts[0] - 1.0) t_index = np.searchsorted(self.ts_buffer, ts[0] - self.buffer_sec) self.ts_buffer = self.ts_buffer[t_index:] self.psd_buffer = self.psd_buffer[t_index:, :, :] # numpy delete is slower # assert ts[0] - self.ts_buffer[0] <= self.buffer_sec ''' if timestamp: return probs, t_prob else: return probs def get_prob_unread(self, timestamp=False): return self.get_prob(timestamp) def get_psd(self): """ Returns ------- The latest computed PSD """ raise NotImplementedError('Sorry! PSD buffer is under testing.') return self.psd_buffer[-1].reshape((1, -1)) def is_ready(self): """ Ready to decode? Returns True if buffer is not empty. """ return self.sr.is_ready()