Ejemplo n.º 1
0
def log_decoding_helper(state, event_queue, amp_name=None, amp_serial=None, autostop=False):
    """
    Helper function to run StreamReceiver object in background
    """
    logger.info('Event acquisition subprocess started.')

    # wait for the start signal
    while state.value == 0:
        time.sleep(0.01)
    
    # acquire event values and returns event times and event values
    sr = StreamReceiver(buffer_size=0, amp_name=amp_name, amp_serial=amp_serial)
    tm = qc.Timer(autoreset=True)
    started = False
    while state.value == 1:
        chunk, ts_list = sr.acquire()
        if autostop:
            if started is True:
                if len(ts_list) == 0:
                    state.value = 0
                    break
            elif len(ts_list) > 0:
                started = True
        tm.sleep_atleast(0.001)
    logger.info('Event acquisition subprocess finishing up ...')

    buffers, times = sr.get_buffer()
    events = buffers[:, 0] # first channel is the trigger channel
    event_index = np.where(events != 0)[0]
    event_times = times[event_index].reshape(-1).tolist()
    event_values = events[event_index].tolist()
    assert len(event_times) == len(event_values)
    event_queue.put((event_times, event_values))
Ejemplo n.º 2
0
    def init_loop(self):

        self.updating = False

        self.sr = StreamReceiver(window_size=1, buffer_size=10,
            amp_serial=self.amp_serial, amp_name=self.amp_name)
        srate = int(self.sr.sample_rate)
        # n_channels= self.sr.channels

        # 12 unsigned ints (4 bytes)
        ########## TODO: assumkng 32 samples chunk => make it read from LSL header
        data = ['EEG', srate, ['L', 'R'], 32, len(self.sr.get_eeg_channels()),
            0, self.sr.get_trigger_channel(), None, None, None, None, None]
        logger.info('Trigger channel is %d' % self.sr.get_trigger_channel())

        self.config = {'id':data[0], 'sf':data[1], 'labels':data[2],
            'samples':data[3], 'eeg_channels':data[4], 'exg_channels':data[5],
            'tri_channels':data[6], 'eeg_type':data[8], 'exg_type':data[9],
            'tri_type':data[10], 'lbl_type':data[11], 'tim_size':1,
            'idx_size':1}

        self.tri = np.zeros(self.config['samples'])
        self.last_tri = 0
        self.eeg = np.zeros(
            (self.config['samples'], self.config['eeg_channels']),
            dtype=np.float)
        self.exg = np.zeros(
            (self.config['samples'], self.config['exg_channels']),
            dtype=np.float)
        self.ts_list = []
        self.ts_list_tri = []
Ejemplo n.º 3
0
def record(state, amp_name, amp_serial, record_dir, eeg_only):
    # set data file name
    filename = time.strftime(record_dir + "/%Y%m%d-%H%M%S-raw.pcl",
                             time.localtime())
    qc.print_c('>> Output file: %s' % (filename), 'W')

    # test writability
    try:
        qc.make_dirs(record_dir)
        open(
            filename,
            'w').write('The data will written when the recording is finished.')
    except:
        raise RuntimeError('Problem writing to %s. Check permission.' %
                           filename)

    # start a server for sending out data filename when software trigger is used
    outlet = start_server('StreamRecorderInfo', channel_format='string',\
        source_id=filename, stype='Markers')

    # connect to EEG stream server
    sr = StreamReceiver(amp_name=amp_name,
                        amp_serial=amp_serial,
                        eeg_only=eeg_only)

    # start recording
    qc.print_c('\n>> Recording started (PID %d).' % os.getpid(), 'W')
    qc.print_c('\n>> Press Enter to stop recording', 'G')
    tm = qc.Timer(autoreset=True)
    next_sec = 1
    while state.value == 1:
        sr.acquire()
        if sr.get_buflen() > next_sec:
            duration = str(datetime.timedelta(seconds=int(sr.get_buflen())))
            print('RECORDING %s' % duration)
            next_sec += 1
        tm.sleep_atleast(0.01)

    # record stop
    qc.print_c('>> Stop requested. Copying buffer', 'G')
    buffers, times = sr.get_buffer()
    signals = buffers
    events = None

    # channels = total channels from amp, including trigger channel
    data = {
        'signals': signals,
        'timestamps': times,
        'events': events,
        'sample_rate': sr.get_sample_rate(),
        'channels': sr.get_num_channels(),
        'ch_names': sr.get_channel_names()
    }
    qc.print_c('Saving raw data ...', 'W')
    qc.save_obj(filename, data)
    print('Saved to %s\n' % filename)

    qc.print_c('Converting raw file into a fif format.', 'W')
    pcl2fif(filename)
def record(recordState, amp_name, amp_serial, record_dir, eeg_only, recordLogger=logger, queue=None):

    redirect_stdout_to_queue(recordLogger, queue, 'INFO')

    # set data file name
    timestamp = time.strftime('%Y%m%d-%H%M%S', time.localtime())
    pcl_file = "%s/%s-raw.pcl" % (record_dir, timestamp)
    eve_file = '%s/%s-eve.txt' % (record_dir, timestamp)
    recordLogger.info('>> Output file: %s' % (pcl_file))

    # test writability
    try:
        qc.make_dirs(record_dir)
        open(pcl_file, 'w').write('The data will written when the recording is finished.')
    except:
        raise RuntimeError('Problem writing to %s. Check permission.' % pcl_file)

    # start a server for sending out data pcl_file when software trigger is used
    outlet = start_server('StreamRecorderInfo', channel_format='string',\
        source_id=eve_file, stype='Markers')

    # connect to EEG stream server
    sr = StreamReceiver(buffer_size=0, amp_name=amp_name, amp_serial=amp_serial, eeg_only=eeg_only)

    # start recording
    recordLogger.info('\n>> Recording started (PID %d).' % os.getpid())
    qc.print_c('\n>> Press Enter to stop recording', 'G')
    tm = qc.Timer(autoreset=True)
    next_sec = 1
    while recordState.value == 1:
        sr.acquire()
        if sr.get_buflen() > next_sec:
            duration = str(datetime.timedelta(seconds=int(sr.get_buflen())))
            recordLogger.info('RECORDING %s' % duration)
            next_sec += 1
        tm.sleep_atleast(0.001)

    # record stop
    recordLogger.info('>> Stop requested. Copying buffer')
    buffers, times = sr.get_buffer()
    signals = buffers
    events = None

    # channels = total channels from amp, including trigger channel
    data = {'signals':signals, 'timestamps':times, 'events':events,
            'sample_rate':sr.get_sample_rate(), 'channels':sr.get_num_channels(),
            'ch_names':sr.get_channel_names(), 'lsl_time_offset':sr.lsl_time_offset}
    recordLogger.info('Saving raw data ...')
    qc.save_obj(pcl_file, data)
    recordLogger.info('Saved to %s\n' % pcl_file)

    # automatically convert to fif and use event file if it exists (software trigger)
    if os.path.exists(eve_file):
        recordLogger.info('Found matching event file, adding events.')
    else:
        eve_file = None
    recordLogger.info('Converting raw file into fif.')
    pcl2fif(pcl_file, external_event=eve_file)
Ejemplo n.º 5
0
 def __init__(self):
     self.start_recording = False
     self.new_data = []
     self.sr = StreamReceiver(buffer_size=0)
     self.record_dir = '%s/records' % os.getcwd()
     self.current_window = np.ndarray([])
     self.current_time_stamps = np.ndarray([])
     self.MRCP_window_size = 6
     print("I RUMNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN")
Ejemplo n.º 6
0
 def start_recording_data(self):
     """
     Start recording data, set the flag to Ture and create a new thread
     """
     self.is_recording_running = True
     self.streamReceiver = StreamReceiver(buffer_size=0,
                                          amp_serial=Variables.get_amp_serial(), amp_name=Variables.get_amp_name())
     self.thread = threading.Thread(target=self.record)
     self.thread.start()
Ejemplo n.º 7
0
    # select the same channels used for training
    w = w[picks]

    # debug: show max - min
    # c=1; print( '### %d: %.1f - %.1f = %.1f'% ( picks[c], max(w[c]), min(w[c]), max(w[c])-min(w[c]) ) )

    # psde.transform = [channels x freqs]
    psd = psde.transform(w)

    return psd


if __name__ == '__main__':
    sr = StreamReceiver(window_size=w_seconds,
                        amp_name=amp_name,
                        amp_serial=amp_serial)
    sfreq = sr.sample_rate
    psde = PSDEstimator(sfreq=sfreq,
                        fmin=fmin,
                        fmax=fmax,
                        bandwidth=None,
                        adaptive=False,
                        low_bias=True,
                        n_jobs=1,
                        normalization='length',
                        verbose=None)
    ch_names = sr.get_channel_names()
    fq_res = 1 / w_seconds
    hz_list = []
    f = fmin
Ejemplo n.º 8
0
class MainView(QMainWindow, SubjectInfo, TaskManager, SequenceManager, ExpProtocol, EventNumber, FilePathManager,\
               ChannelScaleManager, ChannelSelector, ChannelFilter, BadEpochMonitor, MRCPExtractor, MainSwitch,\
               ScopeSwitch, RecordSwitch, TaskSwitch, EventPlot, SSVEPExpProtocol, EyeTracker, RunTimer, GUITimer):
    """
    MainView class controls the GUI frontend interaction
    """
    def __init__(self,
                 amp_name,
                 amp_serial,
                 state=mp.Value('i', 1),
                 queue=None):
        """
        Initialize experimenter window GUI and subject view window GUI

        :amp_name: amplifier name passed from LSL
        :amp_serial: amplifier serial passed from LSL
        """
        super(MainView, self).__init__()
        self.router = Router()
        self.ui = main_layout.Ui_MainWindow()
        self.ui.setupUi(self)

        self.window = QMainWindow()
        self.SV_window = subject_layout.Ui_SV()
        self.SV_window.setupUi(self.window)

        self.eye_tracker_dialog = QDialog()
        self.eye_tracker_window = eye_tracker_layout.Ui_Dialog()
        self.eye_tracker_window.setupUi(self.eye_tracker_dialog)

        # redirect_stdout_to_queue(logger, queue, 'INFO')
        logger.info('Viewer launched')

        self.amp_name = amp_name
        self.amp_serial = amp_serial
        self.state = state
        self.init_all()

    def init_all(self):
        """
        Initialize specialized functions inside GUI
        """

        self.init_config_file()
        self.init_loop()
        self.init_panel_GUI()
        self.init_event_functions()
        self.init_SV_GUI()
        self.init_scope_GUI()
        self.init_timer()  # timer for scope refreshing
        self.init_Runtimer()  # timer for record, train and test
        self.init_eye_tracker()

    def init_config_file(self):
        """
        Initialize config file
        """
        self.scope_settings = RawConfigParser(allow_no_value=True,
                                              inline_comment_prefixes=('#',
                                                                       ';'))
        if (len(sys.argv) == 1):
            self.show_channel_names = 0
            self.device_name = ""
        else:
            if (sys.argv[1].find("gtec") > -1):
                self.device_name = "gtec"
                self.show_channel_names = 1
            elif (sys.argv[1].find("biosemi") > -1):
                self.device_name = "biosemi"
                self.show_channel_names = 1
            elif (sys.argv[1].find("hiamp") > -1):
                self.device_name = "hiamp"
                self.show_channel_names = 1
            else:
                self.device_name = ""
                self.show_channel_names = 0
        # self.scope_settings.read(os.getenv("HOME") + "/.scope_settings.ini")
        self.scope_settings.read('.scope_settings.ini')

    def init_loop(self):
        """
        Initialize loop related variables like StreamReceiver and self.eeg
        """
        self.updating = False
        logger.info("init_loop runs")
        self.sr = StreamReceiver(window_size=1,
                                 buffer_size=10,
                                 amp_serial=Variables.get_amp_serial(),
                                 amp_name=Variables.get_amp_name())
        srate = int(self.sr.sample_rate)
        # n_channels= self.sr.channels

        # 12 unsigned ints (4 bytes)
        ########## TODO: assumkng 32 samples chunk => make it read from LSL header
        data = [
            'EEG', srate, ['L', 'R'], 32,
            len(self.sr.get_eeg_channels()), 0,
            self.sr.get_trigger_channel(), None, None, None, None, None
        ]

        logger.info('Trigger channel is %d' % self.sr.get_trigger_channel())

        self.config = {
            'id': data[0],
            'sf': data[1],
            'labels': data[2],
            'samples': data[3],
            'eeg_channels': data[4],
            'exg_channels': data[5],
            'tri_channels': data[6],
            'eeg_type': data[8],
            'exg_type': data[9],
            'tri_type': data[10],
            'lbl_type': data[11],
            'tim_size': 1,
            'idx_size': 1
        }

        self.tri = np.zeros(self.config['samples'])
        self.last_tri = 0
        self.eeg = np.zeros(
            (self.config['samples'], self.config['eeg_channels']),
            dtype=np.float)
        self.exg = np.zeros(
            (self.config['samples'], self.config['exg_channels']),
            dtype=np.float)
        self.ts_list = []
        self.ts_list_tri = []

    def init_event_functions(self):
        """
        Initialize event listeners for widgets in GUI
        """
        # Control buttons
        self.ui.pushButton_Main_switch.clicked.connect(
            self.onClicked_button_Main_switch)
        self.ui.pushButton_start_SV.clicked.connect(
            self.onClicked_button_start_SV)
        self.ui.pushButton_scope_switch.clicked.connect(
            self.onClicked_button_scope_switch)
        self.ui.pushButton_rec.clicked.connect(self.onClicked_button_rec)
        # self.ui.pushButton_start_train.clicked.connect(self.onClicked_button_train)
        # self.ui.pushButton_start_test.clicked.connect(self.onClicked_button_test)

        # Subject information
        self.ui.pushButton_save.clicked.connect(
            self.onClicked_button_save_subject_information)

        # Experimental protocol
        self.ui.pushButton_define_task_done.clicked.connect(
            self.onClicked_button_define_task_done)
        self.ui.pushButton_define_task_add.clicked.connect(
            self.onClicked_button_define_task_add)
        self.ui.pushButton_create_sequence.clicked.connect(
            self.onClicked_button_create_sequence)
        self.ui.pushButton_randomize.clicked.connect(
            self.onClicked_button_randomize)
        self.ui.toolButton_choose_image_task.clicked.connect(
            self.onClicked_toolButton_choose_image_task)
        self.ui.toolButton_choose_sound_task.clicked.connect(
            self.onClicked_toolButton_choose_sound_task)
        self.ui.pushButton_experimental_protocol_finish.clicked.connect(
            self.onClicked_experimental_protocol_finish)
        self.ui.pushButton_save_protocol.clicked.connect(
            self.onClicked_button_save_protocol)
        self.ui.toolButton_load_protocol.clicked.connect(
            self.onClicked_toolButton_load_protocol)

        # Event management tab
        self.ui.pushButton_save_event_number.clicked.connect(
            self.onClicked_button_save_event_number)

        # Oscilloscope
        self.ui.comboBox_scale.activated.connect(
            self.onActivated_combobox_scale)
        self.ui.spinBox_time.valueChanged.connect(
            self.onValueChanged_spinbox_time)
        self.ui.checkBox_car.stateChanged.connect(
            self.onActivated_checkbox_car)
        self.ui.checkBox_bandpass.stateChanged.connect(
            self.onActivated_checkbox_bandpass)
        self.ui.checkBox_notch.stateChanged.connect(
            self.onActivated_checkbox_notch)

        self.ui.pushButton_bp.clicked.connect(self.onClicked_button_bp)
        self.ui.pushButton_apply_notch.clicked.connect(
            self.onClicked_button_notch)

        self.ui.table_channels.itemSelectionChanged.connect(
            self.onSelectionChanged_table)
        self.ui.table_channels.doubleClicked.connect(
            self.onDoubleClicked_channel_table)
        self.ui.pushButton_update_channel_name.clicked.connect(
            self.onClicked_button_update_channel_name)
        self.ui.table_channels.viewport().installEventFilter(self)

        # SSVEP
        self.ui.pushButton_ssvep_task.clicked.connect(
            self.onClicked_pushButton_ssvep_task)

        # eye tracker
        self.ui.pushButton_open_eye_tracker_ui.clicked.connect(
            self.onClicked_pushButton_open_eye_tracker_ui)

        # MRCP tab
        self.ui.pushButton_temp_clear.clicked.connect(
            self.onClicked_button_temp_clear)
        self.ui.pushButton_temp_mean.clicked.connect(
            self.onClicked_button_temp_mean)
        self.ui.pushButton_temp_view.clicked.connect(
            self.onClicked_button_temp_view)
        self.ui.pushButton_temp_remove.clicked.connect(
            self.onClicked_button_temp_remove)

    def init_panel_GUI(self):
        """
        Initialize experimenter GUI
        """
        # Tabs
        self.ui.tab_experimental_protocol.setEnabled(False)
        self.ui.tab_subjec_information.setEnabled(False)
        self.ui.tab_event_and_file_management.setEnabled(False)
        # self.ui.tab_Oscilloscope.setEnabled(False)
        self.ui.tab_experiment_type.setEnabled(False)

        # Experimental protocol
        self.task_list = []
        self.new_task_list = []
        self.task_descriptor_list = []
        self.task_image_path = ""
        self.task_image_path_list = []
        self.task_sound_path = ""
        self.task_sound_path_list = []
        self.task_table = np.ndarray([])
        self.new_task_table = np.ndarray([])
        self.task_counter = 0
        self.protocol_path = ""
        # Button
        self.init_task_name_table()
        self.ui.groupBox_sequence_manager.setEnabled(False)

        # Event management tab
        self.event_timestamp_list = []
        self.init_task_event_number_table()
        self.event_list = []
        # Button
        self.ui.pushButton_save_event_number.clicked.connect(
            self.onClicked_button_save_event_number)
        self.event_file_path = ""
        self.mrcp_template_file_path = ""
        self.raw_eeg_file_path = ""
        self.raw_mrcp_file_path = ""
        self.subject_file_path = ""

        # Oscilloscope
        self.ui.comboBox_scale.setCurrentIndex(4)
        self.ui.checkBox_notch.setChecked(True)
        # self.ui.checkBox_car.setChecked(
        #     int(self.scope_settings.get("filtering", "apply_car_filter")))
        # self.ui.checkBox_bandpass.setChecked(
        #     int(self.scope_settings.get("filtering", "apply_bandpass_filter")))
        self.ui.pushButton_apply_notch.setEnabled(True)
        self.ui.doubleSpinBox_lc_notch.setEnabled(True)
        self.ui.doubleSpinBox_hc_notch.setEnabled(True)

        # initialize channel selection panel in main view GUI
        self.channels_to_show_idx = []
        idx = 0
        for y in range(0, 4):
            for x in range(0, NUM_X_CHANNELS):
                if idx < self.config['eeg_channels']:
                    # self.table_channels.item(x,y).setTextAlignment(QtCore.Qt.AlignCenter)
                    self.ui.table_channels.item(x, y).setSelected(True)  # Qt5
                    # self.table_channels.setItemSelected(self.table_channels.item(x, y), True) # Qt4 only
                    self.channels_to_show_idx.append(idx)
                else:
                    self.ui.table_channels.setItem(x, y,
                                                   QTableWidgetItem("N/A"))
                    self.ui.table_channels.item(x, y).setFlags(
                        QtCore.Qt.NoItemFlags)
                    self.ui.table_channels.item(x, y).setTextAlignment(
                        QtCore.Qt.AlignCenter)
                idx += 1

        self.ui.table_channels.verticalHeader().setStretchLastSection(True)
        self.ui.table_channels.horizontalHeader().setStretchLastSection(True)
        self.channel_to_scale_row_index = -1
        self.channel_to_scale_column_index = -1

        self.selected_channel_row_index = 0
        self.selected_channel_column_index = 0
        self.single_channel_scale = 1

        # MRCP tab
        self.init_class_epoch_counter_table()
        self.init_class_bad_epoch_table()
        self.show_TID_events = False
        self.show_LPT_events = False
        self.show_Key_events = False
        self.raw_trial_MRCP = np.ndarray([])
        self.processed_trial_MRCP = np.ndarray([])
        self.total_trials_MRCP = []
        self.total_trials_raw_MRCP = []
        self.total_MRCP_inds = []
        self.temp_counter = 0
        self.temp_counter_list = []
        self.input_temp_list = []
        self.display_temp_list = []
        self.selected_temp = ""
        self.list_selected_temp = []
        self.template_buffer = np.zeros(
            (6 * int(self.sr.sample_rate), self.config['eeg_channels']),
            dtype=float)

        self.b_lp, self.a_lp = Utils.butter_lowpass(3,
                                                    int(self.sr.sample_rate),
                                                    2)
        self.b_hp, self.a_hp = Utils.butter_highpass(0.05,
                                                     int(self.sr.sample_rate),
                                                     2)
        self.initial_condition_list_lp = Utils.construct_initial_condition_list(
            self.b_lp, self.a_lp, self.config['eeg_channels'])
        self.initial_condition_list_hp = Utils.construct_initial_condition_list(
            self.b_hp, self.a_hp, self.config['eeg_channels'])
        self.ui.pushButton_bad_epoch.clicked.connect(
            self.onClicked_button_bad_epoch)
        self.screen_width = 522
        self.screen_height = 160
        # self.setGeometry(100,100, self.screen_width, self.screen_height)
        # self.setFixedSize(self.screen_width, self.screen_height)
        self.setWindowTitle('EEG Scope Panel')
        self.setFocusPolicy(QtCore.Qt.ClickFocus)
        self.setFocus()
        logger.info('GUI show')
        self.show()

    def init_panel_GUI_stop_recording(self):
        """
        Initialize experimenter GUI when stop recording button pressed. This is used to
        prepare for next run.

        """
        # Tabs
        self.ui.tab_experimental_protocol.setEnabled(False)
        self.ui.tab_subjec_information.setEnabled(False)
        self.ui.tab_event_and_file_management.setEnabled(False)
        # self.ui.tab_Oscilloscope.setEnabled(False)
        self.ui.tab_experiment_type.setEnabled(False)

        # Experimental protocol
        self.task_list = []
        self.new_task_list = []
        self.task_descriptor_list = []
        self.task_image_path = ""
        self.task_image_path_list = []
        self.task_sound_path = ""
        self.task_sound_path_list = []
        self.task_table = np.ndarray([])
        self.new_task_table = np.ndarray([])
        self.task_counter = 0
        self.protocol_path = ""
        # Button
        self.init_task_name_table()
        self.ui.groupBox_sequence_manager.setEnabled(False)

        # Event management tab
        self.event_timestamp_list = []
        self.init_task_event_number_table()
        self.event_list = []
        # Button
        self.ui.pushButton_save_event_number.clicked.connect(
            self.onClicked_button_save_event_number)
        self.event_file_path = ""
        self.mrcp_template_file_path = ""
        self.raw_eeg_file_path = ""
        self.raw_mrcp_file_path = ""
        self.subject_file_path = ""

        # Oscilloscope
        self.ui.comboBox_scale.setCurrentIndex(4)
        self.ui.checkBox_notch.setChecked(True)
        # self.ui.checkBox_car.setChecked(
        #     int(self.scope_settings.get("filtering", "apply_car_filter")))
        # self.ui.checkBox_bandpass.setChecked(
        #     int(self.scope_settings.get("filtering", "apply_bandpass_filter")))
        # self.ui.pushButton_apply_notch.setEnabled(False)
        self.ui.doubleSpinBox_lc_notch.setEnabled(False)
        self.ui.doubleSpinBox_hc_notch.setEnabled(False)

        # # initialize channel selection panel in main view GUI
        # self.channels_to_show_idx = []
        # idx = 0
        # for y in range(0, 4):
        #     for x in range(0, NUM_X_CHANNELS):
        #         if idx < self.config['eeg_channels']:
        #             # self.table_channels.item(x,y).setTextAlignment(QtCore.Qt.AlignCenter)
        #             self.ui.table_channels.item(x, y).setSelected(True)  # Qt5
        #             # self.table_channels.setItemSelected(self.table_channels.item(x, y), True) # Qt4 only
        #             self.channels_to_show_idx.append(idx)
        #         else:
        #             self.ui.table_channels.setItem(x, y,
        #                                            QTableWidgetItem("N/A"))
        #             self.ui.table_channels.item(x, y).setFlags(
        #                 QtCore.Qt.NoItemFlags)
        #             self.ui.table_channels.item(x, y).setTextAlignment(
        #                 QtCore.Qt.AlignCenter)
        #         idx += 1

        self.ui.table_channels.verticalHeader().setStretchLastSection(True)
        self.ui.table_channels.horizontalHeader().setStretchLastSection(True)
        self.channel_to_scale_row_index = -1
        self.channel_to_scale_column_index = -1

        self.selected_channel_row_index = 0
        self.selected_channel_column_index = 0
        self.single_channel_scale = 1

        # MRCP tab
        self.init_class_epoch_counter_table()
        self.init_class_bad_epoch_table()
        self.show_TID_events = False
        self.show_LPT_events = False
        self.show_Key_events = False
        self.raw_trial_MRCP = np.ndarray([])
        self.processed_trial_MRCP = np.ndarray([])
        self.total_trials_MRCP = []
        self.total_trials_raw_MRCP = []
        self.total_MRCP_inds = []
        self.temp_counter = 0
        self.temp_counter_list = []
        self.input_temp_list = []
        self.display_temp_list = []
        self.selected_temp = ""
        self.list_selected_temp = []
        self.template_buffer = np.zeros(
            (6 * int(self.sr.sample_rate), self.config['eeg_channels']),
            dtype=float)

        self.b_lp, self.a_lp = Utils.butter_lowpass(3,
                                                    int(self.sr.sample_rate),
                                                    2)
        self.b_hp, self.a_hp = Utils.butter_highpass(0.05,
                                                     int(self.sr.sample_rate),
                                                     2)
        self.initial_condition_list_lp = Utils.construct_initial_condition_list(
            self.b_lp, self.a_lp, self.config['eeg_channels'])
        self.initial_condition_list_hp = Utils.construct_initial_condition_list(
            self.b_hp, self.a_hp, self.config['eeg_channels'])
        self.ui.pushButton_bad_epoch.clicked.connect(
            self.onClicked_button_bad_epoch)
        self.screen_width = 522
        self.screen_height = 160
        # self.setGeometry(100,100, self.screen_width, self.screen_height)
        # self.setFixedSize(self.screen_width, self.screen_height)
        self.setWindowTitle('EEG Scope Panel')
        self.setFocusPolicy(QtCore.Qt.ClickFocus)
        self.setFocus()
        self.show()

    def init_SV_GUI(self):
        """
        Initialize subject view GUI
        """
        self.SVStatus = 0
        self.starttime = 0
        self.SV_time = 0

        self.idle_time = int(self.ui.idleTimeLineEdit.text())
        self.focus_time = self.idle_time + int(
            self.ui.focusTimeLineEdit.text())
        self.prepare_time = self.focus_time + int(
            self.ui.prepareTimeLineEdit.text())
        self.two_time = self.prepare_time + int(self.ui.twoTimeLineEdit.text())
        self.one_time = self.two_time + int(self.ui.oneTimeLineEdit.text())
        self.task_time = self.one_time + int(self.ui.taskTimeLineEdit.text())
        self.relax_time = self.task_time + 2
        self.cycle_time = self.relax_time
        self.is_experiment_on = False

    def init_scope_GUI(self):
        """
        Initialize oscilloscope GUI
        """
        self.bool_parser = {True: '1', False: '0'}

        # PyQTGraph plot initialization
        self.win = pg.GraphicsWindow()
        self.win.setWindowTitle('EEG Scope')
        self.win.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
        self.win.keyPressEvent = self.keyPressEvent
        # self.win.show()
        self.main_plot_handler = self.win.addPlot()
        self.win.resize(1280, 800)

        # Scales available in the GUI. If you change the options in the GUI
        # you should change them here as well
        self.scales_range = [1, 10, 25, 50, 100, 250, 500, 1000, 2500, 100000]
        self.single_scales_range = [
            0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.5, 1.7, 1.8, 2
        ]

        # Scale in uV
        self.scale = 100
        # Time window to show in seconds
        self.seconds_to_show = 10

        # Y Tick labels. Use values from the config file.
        self.channel_labels = []
        values = []
        ''' For non-LSL systems having no channel names
        for x in range(0, self.config['eeg_channels']):
            if (self.show_channel_names):
                self.channel_labels.append("(" + str(x + 1) + ") " +
                    self.scope_settings.get("internal",
                    "channel_names_" + self.device_name + str(
                    self.config['eeg_channels'])).split(', ')[x])
            else:
                self.channel_labels.append('CH ' + str(x + 1))
        '''
        ch_names = np.array(self.sr.get_channel_names())
        self.channel_labels = ch_names[self.sr.get_eeg_channels()]
        for x in range(0, len(self.channels_to_show_idx)):
            values.append((-x * self.scale,
                           self.channel_labels[self.channels_to_show_idx[x]]))

        values_axis = []
        values_axis.append(values)
        values_axis.append([])

        # Update table labels with current names
        idx = 0
        for y in range(0, 4):
            for x in range(0, NUM_X_CHANNELS):
                if (idx < self.config['eeg_channels']):
                    self.ui.table_channels.item(x, y).setText(
                        self.channel_labels[idx])
                idx += 1

        # Plot initialization
        # Plotting colors. If channels > 16, colors will roll back to the beginning
        self.colors = np.array([[255, 0, 0], [0, 255, 0], [0, 0, 255],
                                [255, 255, 0], [0, 255, 255], [255, 0, 255],
                                [128, 100, 100], [0, 128, 0], [0, 128, 128],
                                [128, 128, 0], [255, 128, 128], [128, 0, 128],
                                [128, 255, 0], [255, 128, 0], [0, 255, 128],
                                [128, 0, 255]])

        # pen = pg.mkColor(self.colors)
        # self.main_plot_handler.getAxis('left').setTextPen('b')

        self.main_plot_handler.getAxis('left').setTicks(values_axis)

        self.main_plot_handler.setRange(
            xRange=[0, self.seconds_to_show],
            yRange=[
                +1.5 * self.scale,
                -0.5 * self.scale - self.scale * self.config['eeg_channels']
            ])
        self.main_plot_handler.disableAutoRange()
        self.main_plot_handler.showGrid(y=True)
        self.main_plot_handler.setLabel(axis='left',
                                        text='Scale (uV): ' + str(self.scale))
        self.main_plot_handler.setLabel(axis='bottom', text='Time (s)')

        # X axis
        self.x_ticks = np.zeros(self.config['sf'] * self.seconds_to_show)
        for x in range(0, self.config['sf'] * self.seconds_to_show):
            self.x_ticks[x] = (x * 1) / float(self.config['sf'])

        # We want a lightweight scope, so we downsample the plotting to 64 Hz
        self.subsampling_value = self.config['sf'] / 64

        # EEG data for plotting
        self.data_plot = np.zeros((self.config['sf'] * self.seconds_to_show,
                                   self.config['eeg_channels']))

        print('self.data plot shape: ', self.data_plot.shape)

        self.curve_eeg = []
        for x in range(0, len(self.channels_to_show_idx)):
            self.curve_eeg.append(
                self.main_plot_handler.plot(
                    x=self.x_ticks,
                    y=self.data_plot[:, self.channels_to_show_idx[x]],
                    pen=pg.mkColor(self.colors[self.channels_to_show_idx[x] %
                                               16, :])))
        # self.curve_eeg[-1].setDownsampling(ds=self.subsampling_value, auto=False, method="mean")

        # Events data
        self.events_detected = []
        self.events_curves = []
        self.events_text = []

        # CAR initialization
        self.apply_car = False
        self.matrix_car = np.zeros(
            (self.config['eeg_channels'], self.config['eeg_channels']),
            dtype=float)
        self.matrix_car[:, :] = -1 / float(self.config['eeg_channels'])
        np.fill_diagonal(self.matrix_car,
                         1 - (1 / float(self.config['eeg_channels'])))

        # Laplacian initalization. TO BE DONE
        self.matrix_lap = np.zeros(
            (self.config['eeg_channels'], self.config['eeg_channels']),
            dtype=float)
        np.fill_diagonal(self.matrix_lap, 1)
        self.matrix_lap[2, 0] = -1
        self.matrix_lap[0, 2] = -0.25
        self.matrix_lap[0, 2] = -0.25

        # BP initialization
        self.apply_bandpass = 1
        if (self.apply_bandpass):
            self.ui.doubleSpinBox_lp.setValue(40.0)
            self.ui.doubleSpinBox_hp.setValue(1.0)
            self.ui.doubleSpinBox_lp.setMinimum(0)
            self.ui.doubleSpinBox_lp.setMaximum(self.sr.sample_rate / 2 - 0.1)
            self.ui.doubleSpinBox_lp.setSingleStep(1)
            self.ui.doubleSpinBox_hp.setMinimum(0)
            self.ui.doubleSpinBox_hp.setMaximum(self.sr.sample_rate / 2 - 0.1)
            self.ui.doubleSpinBox_hp.setSingleStep(1)
            self.ui.pushButton_bp.click()

        # notch initialization
        self.apply_notch = 1
        if (self.apply_notch):
            self.ui.doubleSpinBox_lc_notch.setValue(58.0)
            self.ui.doubleSpinBox_hc_notch.setValue(62.0)
            self.ui.doubleSpinBox_lc_notch.setMinimum(0.1)
            self.ui.doubleSpinBox_lc_notch.setMaximum(self.sr.sample_rate / 2 -
                                                      0.1)
            self.ui.doubleSpinBox_lc_notch.setSingleStep(1)
            self.ui.doubleSpinBox_hc_notch.setMinimum(0.1)
            self.ui.doubleSpinBox_hc_notch.setMaximum(self.sr.sample_rate / 2 -
                                                      0.1)
            self.ui.doubleSpinBox_hc_notch.setSingleStep(1)
            self.ui.pushButton_apply_notch.click()

        self.ui.checkBox_bandpass.setChecked(self.apply_bandpass)

        self.b_bandpass_scope_refilter = self.b_bandpass_scope
        self.a_bandpass_scope_refilter = self.a_bandpass_scope
        self.zi_bandpass_scope_refilter = self.zi_bandpass_scope
        self.b_notch_scope_refilter = self.b_notch_scope
        self.a_notch_scope_refilter = self.a_notch_scope
        self.zi_notch_scope_refilter = self.zi_notch_scope

        self.update_title_scope()

        # Help variables
        self.show_help = 0
        self.help = pg.TextItem(
            "CNBI EEG Scope v0.3 \n" +
            "----------------------------------------------------------------------------------\n"
            + "C: De/activate CAR Filter\n" +
            "B: De/activate Bandpass Filter (with current settings)\n" +
            "T: Show/hide TiD events\n" + "L: Show/hide LPT events\n" +
            "K: Show/hide Key events. If not shown, they are NOT recorded!\n" +
            "0-9: Add a user-specific Key event. Do not forget to write down why you marked it.\n"
            +
            "Up, down arrow keys: Increase/decrease the scale, steps of 10 uV\n"
            +
            "Left, right arrow keys: Increase/decrease the time to show, steps of 1 s\n"
            +
            "Spacebar: Stop the scope plotting, whereas data acquisition keeps running (EXPERIMENTAL)\n"
            + "Esc: Exits the scope",
            anchor=(0, 0),
            border=(70, 70, 70),
            fill=pg.mkColor(20, 20, 20, 200),
            color=(255, 255, 255))

        # Stop plot functionality
        self.stop_plot = 0

        # Force repaint even when we shouldn't repaint.
        self.force_repaint = 1

    def init_timer(self):
        """
        Initialize main timer used for refreshing oscilloscope window. This refreshes every 20ms.
        """
        self.os_time_list1 = []
        QtCore.QCoreApplication.processEvents()
        QtCore.QCoreApplication.flush()
        self.timer = QtCore.QTimer(self)
        self.timer.setTimerType(QtCore.Qt.PreciseTimer)
        self.timer.timeout.connect(self.update_loop)
        self.timer.start(20)

    def init_Runtimer(self):
        """
        Initialize task related timer which controls the timing for visual cues
        """
        self.time_show = 0
        self.os_time_list = []

        self.Runtimer = task.LoopingCall(self.Time)

    def init_eye_tracker(self):
        self.eye_tracker_window.tableWidget.setRowCount(9)

        self.eye_tracker_window.pushButton_1.clicked.connect(self.update_cal1)
        self.eye_tracker_window.pushButton_2.clicked.connect(self.update_cal2)
        self.eye_tracker_window.pushButton_3.clicked.connect(self.update_cal3)
        self.eye_tracker_window.pushButton_4.clicked.connect(self.update_cal4)
        self.eye_tracker_window.pushButton_5.clicked.connect(self.update_cal5)
        self.eye_tracker_window.pushButton_6.clicked.connect(self.update_cal6)
        self.eye_tracker_window.pushButton_7.clicked.connect(self.update_cal7)
        self.eye_tracker_window.pushButton_8.clicked.connect(self.update_cal8)
        self.eye_tracker_window.pushButton_9.clicked.connect(self.update_cal9)

        self.eye_tracker_window.pushButton_12.clicked.connect(
            self.update_current_gaze_loc)
        self.eye_tracker_window.pushButton_13.clicked.connect(
            self.recording_data)
        self.eye_tracker_window.pushButton_14.clicked.connect(
            self.recording_stop)

        self.rec_time = int(self.eye_tracker_window.LineEdit_rec.text())
        # self.LineEdit_rec.clicked.conntect(self.update_rec_time(int(self.LineEdit_rec.text())))

        self.gaze_x = 0
        self.gaze_y = 0
        self.table_row = 0
        self.table_col = 0
        # print(self.gaze_x, self.gaze_y)

        self.UTC_time = 0

        # List of values in 9 points
        self.points = np.zeros((9, 2))

        self.gaze_loc = 0

    def trigger_help(self):
        """Shows / hide help in the scope window"""
        if self.show_help:
            self.help.setPos(0, self.scale)
            self.main_plot_handler.addItem(self.help)
            self.help.setZValue(1)
        else:
            self.main_plot_handler.removeItem(self.help)

    def eventFilter(self, source, event):
        """
        Select single channel to scale by right clicking
        :param source: channel table content
        :param event: right mouse button press
        :return: ID of the selected channel
        """
        if (event.type() == QtCore.QEvent.MouseButtonPress
                and event.buttons() == QtCore.Qt.RightButton
                and source is self.ui.table_channels.viewport()):
            item = self.ui.table_channels.itemAt(event.pos())
            # print('Global Pos:', event.globalPos())
            if item is not None:
                self.channel_to_scale_row_index = item.row()
                self.channel_to_scale_column_index = item.column()
                print("RRRRRRRRR", self.channel_to_scale_row_index,
                      self.channel_to_scale_column_index)

                # print('Table Item:', item.row(), item.column())
                # self.menu = QMenu(self)
                # self.menu.addAction(item.text())         #(QAction('test'))
                # menu.exec_(event.globalPos())
        return super(MainView, self).eventFilter(source, event)
Ejemplo n.º 9
0
    def __init__(self, lpttype='USB2LPT', portaddr=None, verbose=True):
        self.evefile = None
        self.lpttype = lpttype
        self.verbose = verbose

        if self.lpttype in ['USB2LPT', 'DESKTOP']:
            if self.lpttype == 'USB2LPT':
                if ctypes.sizeof(ctypes.c_voidp) == 4:
                    dllname = 'LptControl_USB2LPT32.dll'  # 32 bit
                else:
                    dllname = 'LptControl_USB2LPT64.dll'  # 64 bit
                if portaddr not in [0x278, 0x378]:
                    self.print('Warning: LPT port address %d is unusual.' %
                               portaddr)

            elif self.lpttype == 'DESKTOP':
                if ctypes.sizeof(ctypes.c_voidp) == 4:
                    dllname = 'LptControl_Desktop32.dll'  # 32 bit
                else:
                    dllname = 'LptControl_Desktop64.dll'  # 64 bit
                if portaddr not in [0x278, 0x378]:
                    self.print('Warning: LPT port address %d is unusual.' %
                               portaddr)

            self.portaddr = portaddr
            search = []
            search.append(os.path.dirname(__file__) + '/' + dllname)
            search.append(os.path.dirname(__file__) + '/libs/' + dllname)
            search.append(os.getcwd() + '/' + dllname)
            search.append(os.getcwd() + '/libs/' + dllname)
            for f in search:
                if os.path.exists(f):
                    dllpath = f
                    break
            else:
                self.print('ERROR: Cannot find the required library %s' %
                           dllname)
                raise RuntimeError

            self.print('Loading %s' % dllpath)
            self.lpt = ctypes.cdll.LoadLibrary(dllpath)

        elif self.lpttype == 'ARDUINO':
            import serial, serial.tools.list_ports
            BAUD_RATE = 115200

            # portaddr should be None or in the form of 'COM1', 'COM2', etc.
            if portaddr is None:
                arduinos = [x for x in serial.tools.list_ports.grep('Arduino')]
                if len(arduinos) == 0:
                    print('No Arduino found. Stop.')
                    sys.exit()

                for i, a in enumerate(arduinos):
                    print('Found', a[0])
                try:
                    com_port = arduinos[0].device
                except AttributeError:  # depends on Python distribution
                    com_port = arduinos[0][0]
            else:
                com_port = portaddr

            self.ser = serial.Serial(com_port, BAUD_RATE)
            time.sleep(1)  # doesn't work without this delay. why?
            print('Connected to %s.' % com_port)

        elif self.lpttype == 'SOFTWARE':
            from pycnbi.stream_receiver.stream_receiver import StreamReceiver
            self.print('Using software trigger')

            # get data file location
            LSL_SERVER = 'StreamRecorderInfo'
            inlet = cnbi_lsl.start_client(LSL_SERVER)
            fname = inlet.info().source_id()
            if fname[-4:] != '.pcl':
                self.print('ERROR: Received wrong record file name format %s' %
                           fname)
                sys.exit(-1)
            evefile = fname[:-8] + '-eve.txt'
            eveoffset_file = fname[:-8] + '-eve-offset.txt'
            self.print('Event file is: %s' % evefile)
            self.evefile = open(evefile, 'a')

            # check server LSL time server integrity
            self.print(
                "Checking LSL server's timestamp integrity for logging software triggers."
            )
            amp_name, amp_serial = pu.search_lsl()
            sr = StreamReceiver(window_size=1,
                                buffer_size=1,
                                amp_serial=amp_serial,
                                eeg_only=False,
                                amp_name=amp_name)
            local_time = pylsl.local_clock()
            server_time = sr.get_window_list()[1][-1]
            lsl_time_offset = local_time - server_time
            with open(eveoffset_file, 'a') as f:
                f.write('Local time: %.6f, Server time: %.6f, Offset: %.6f\n' %
                        (local_time, server_time, lsl_time_offset))
            self.print('LSL timestamp offset (%.3f) saved to %s' %
                       (lsl_time_offset, eveoffset_file))

        elif self.lpttype == 'FAKE' or self.lpttype is None or self.lpttype is False:
            self.print('WARNING: Using a fake trigger.')
            self.lpttype = 'FAKE'
            self.lpt = None

        else:
            self.print('ERROR: Unknown LPT port type %s' % lpttype)
            sys.exit(-1)
Ejemplo n.º 10
0
TIME_INDEX = None  # integer or None. None = average of raw values of the current window
SHOW_PSD = False

import os
import mne
import numpy as np
import matplotlib.pyplot as plt
from pycnbi.stream_receiver.stream_receiver import StreamReceiver
import pycnbi.utils.pycnbi_utils as pu
import pycnbi.utils.q_common as qc
mne.set_log_level('ERROR')
# actually improves performance for multitaper
os.environ['OMP_NUM_THREADS'] = '1'

amp_name, amp_serial = pu.search_lsl()
stream_receiver = StreamReceiver(
    window_size=1, buffer_size=1, amp_serial=amp_serial, eeg_only=False, amp_name=amp_name)
sfreq = stream_receiver.get_sample_rate()
watchdog = qc.Timer()
tm = qc.Timer(autoreset=True)
trg_ch = stream_receiver.get_trigger_channel()
last_ts = 0
qc.print_c('Trigger channel: %d' % trg_ch, 'G')

if SHOW_PSD:
    psde = mne.decoding.PSDEstimator(sfreq=sfreq, fmin=1, fmax=50, bandwidth=None,
                                     adaptive=False, low_bias=True, n_jobs=1, normalization='length', verbose=None)

sfreq = 512
interval = 1. / sfreq
fig, ax = plt.subplots(1, 1)
time_range = 1
Ejemplo n.º 11
0
class Hardware(_hardware.HardwareAdditionalMethods):
    """
    Hardware class controls recording data in a different thread
    """
    def __init__(self):
        logger.info('TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT')
        self.is_recording_running = False
        self.recorded_data = np.array([])
        self.current_window = np.ndarray([])
        self.current_time_stamps = np.ndarray([])
        self.MRCP_window_size = 6
        self.finished_recording = False
        self.lsl_time_list = []
        self.server_time_list = []
        self.offset_time_list = []
        self.eeg_file_path = ''



    def connect_with_hardware(self):
        pass

    def is_recording_running(self):
        """
        Check if record start or stop button pressed
        """
        return self.is_recording_running

    def start_recording_data(self):
        """
        Start recording data, set the flag to Ture and create a new thread
        """
        self.is_recording_running = True
        self.streamReceiver = StreamReceiver(buffer_size=0,
                                             amp_serial=Variables.get_amp_serial(), amp_name=Variables.get_amp_name())
        self.thread = threading.Thread(target=self.record)
        self.thread.start()


    def stop_recording_data(self):
        """
        Stop recording by setting the flag to False
        """
        self.is_recording_running = False


    def cancel_recording_data(self):
        """
        Interrupt recording and delete current buffer
        """
        self.is_recording_running = False
        self.recorded_data = np.array([])

    def get_current_window(self):
        """
        Get one window data
        :return: window data
        """
        return self.current_window

    def get_LSL_clock(self):
        """
        Get timestamps of LSL
        :return: current LSL timestamps
        """
        return self.streamReceiver.get_lsl_clock()

    def get_lsl_offset(self):
        """
        Get offset calculated by LSL, which is the delay of transmission
        :return: LSL offset
        """
        return self.streamReceiver.get_lsl_offset()

    def get_server_clock(self):
        """
        Get timestamps from instrument
        :return: timestamps of instrument
        """
        return self.streamReceiver.get_server_clock()

    def set_eeg_file_path(self):
        """
        Set recording directory by reading path from Variable class
        """
        self.eeg_file_path = Variables.get_raw_eeg_file_path()
Ejemplo n.º 12
0
    def __init__(self, classifier=None, buffer_size=1.0, fake=False, amp_serial=None, amp_name=None):
        """
        Params
        ------
        classifier: classifier file
        spatial: spatial filter to use
        buffer_size: length of the signal buffer in seconds
        """

        self.classifier = classifier
        self.buffer_sec = buffer_size
        self.fake = fake
        self.amp_serial = amp_serial
        self.amp_name = amp_name

        if self.fake == False:
            model = qc.load_obj(self.classifier)
            if model == None:
                self.print('Error loading %s' % model)
                sys.exit(-1)
            self.cls = model['cls']
            self.psde = model['psde']
            self.labels = list(self.cls.classes_)
            self.label_names = [model['classes'][k] for k in self.labels]
            self.spatial = model['spatial']
            self.spectral = model['spectral']
            self.notch = model['notch']
            self.w_seconds = model['w_seconds']
            self.w_frames = model['w_frames']
            self.wstep = model['wstep']
            self.sfreq = model['sfreq']
            if not int(self.sfreq * self.w_seconds) == self.w_frames:
                raise RuntimeError('sfreq * w_sec %d != w_frames %d' % (int(self.sfreq * self.w_seconds), self.w_frames))

            if 'multiplier' in model:
                self.multiplier = model['multiplier']
            else:
                self.multiplier = 1

            # Stream Receiver
            self.sr = StreamReceiver(window_size=self.w_seconds, amp_name=self.amp_name, amp_serial=self.amp_serial)
            if self.sfreq != self.sr.sample_rate:
                raise RuntimeError('Amplifier sampling rate (%.1f) != model sampling rate (%.1f). Stop.' % (
                    self.sr.sample_rate, self.sfreq))

            # Map channel indices based on channel names of the streaming server
            self.spatial_ch = model['spatial_ch']
            self.spectral_ch = model['spectral_ch']
            self.notch_ch = model['notch_ch']
            self.ref_new = model['ref_new']
            self.ref_old = model['ref_old']
            self.ch_names = self.sr.get_channel_names()
            mc = model['ch_names']
            self.picks = [self.ch_names.index(mc[p]) for p in model['picks']]
            if self.spatial_ch is not None:
                self.spatial_ch = [self.ch_names.index(mc[p]) for p in model['spatial_ch']]
            if self.spectral_ch is not None:
                self.spectral_ch = [self.ch_names.index(mc[p]) for p in model['spectral_ch']]
            if self.notch_ch is not None:
                self.notch_ch = [self.ch_names.index(mc[p]) for p in model['notch_ch']]
            if self.ref_new is not None:
                self.ref_new = self.ch_names.index(mc[model['ref_new']])
            if self.ref_old is not None:
                self.ref_old = self.ch_names.index(mc[model['ref_old']])

            # PSD buffer
            psd_temp = self.psde.transform(np.zeros((1, len(self.picks), self.w_frames)))
            self.psd_shape = psd_temp.shape
            self.psd_size = psd_temp.size
            self.psd_buffer = np.zeros((0, self.psd_shape[1], self.psd_shape[2]))
            self.ts_buffer = []

        else:
            # Fake left-right decoder
            model = None
            self.psd_shape = None
            self.psd_size = None
            # TODO: parameterize directions using fake_dirs
            self.labels = [11, 9]
            self.label_names = ['LEFT_GO', 'RIGHT_GO']
Ejemplo n.º 13
0
class BCIDecoder(object):
    """
    Decoder class

    The label order of self.labels and self.label_names match likelihood orders computed by get_prob()

    """

    def __init__(self, classifier=None, buffer_size=1.0, fake=False, amp_serial=None, amp_name=None):
        """
        Params
        ------
        classifier: classifier file
        spatial: spatial filter to use
        buffer_size: length of the signal buffer in seconds
        """

        self.classifier = classifier
        self.buffer_sec = buffer_size
        self.fake = fake
        self.amp_serial = amp_serial
        self.amp_name = amp_name

        if self.fake == False:
            model = qc.load_obj(self.classifier)
            if model == None:
                self.print('Error loading %s' % model)
                sys.exit(-1)
            self.cls = model['cls']
            self.psde = model['psde']
            self.labels = list(self.cls.classes_)
            self.label_names = [model['classes'][k] for k in self.labels]
            self.spatial = model['spatial']
            self.spectral = model['spectral']
            self.notch = model['notch']
            self.w_seconds = model['w_seconds']
            self.w_frames = model['w_frames']
            self.wstep = model['wstep']
            self.sfreq = model['sfreq']
            if not int(self.sfreq * self.w_seconds) == self.w_frames:
                raise RuntimeError('sfreq * w_sec %d != w_frames %d' % (int(self.sfreq * self.w_seconds), self.w_frames))

            if 'multiplier' in model:
                self.multiplier = model['multiplier']
            else:
                self.multiplier = 1

            # Stream Receiver
            self.sr = StreamReceiver(window_size=self.w_seconds, amp_name=self.amp_name, amp_serial=self.amp_serial)
            if self.sfreq != self.sr.sample_rate:
                raise RuntimeError('Amplifier sampling rate (%.1f) != model sampling rate (%.1f). Stop.' % (
                    self.sr.sample_rate, self.sfreq))

            # Map channel indices based on channel names of the streaming server
            self.spatial_ch = model['spatial_ch']
            self.spectral_ch = model['spectral_ch']
            self.notch_ch = model['notch_ch']
            self.ref_new = model['ref_new']
            self.ref_old = model['ref_old']
            self.ch_names = self.sr.get_channel_names()
            mc = model['ch_names']
            self.picks = [self.ch_names.index(mc[p]) for p in model['picks']]
            if self.spatial_ch is not None:
                self.spatial_ch = [self.ch_names.index(mc[p]) for p in model['spatial_ch']]
            if self.spectral_ch is not None:
                self.spectral_ch = [self.ch_names.index(mc[p]) for p in model['spectral_ch']]
            if self.notch_ch is not None:
                self.notch_ch = [self.ch_names.index(mc[p]) for p in model['notch_ch']]
            if self.ref_new is not None:
                self.ref_new = self.ch_names.index(mc[model['ref_new']])
            if self.ref_old is not None:
                self.ref_old = self.ch_names.index(mc[model['ref_old']])

            # PSD buffer
            psd_temp = self.psde.transform(np.zeros((1, len(self.picks), self.w_frames)))
            self.psd_shape = psd_temp.shape
            self.psd_size = psd_temp.size
            self.psd_buffer = np.zeros((0, self.psd_shape[1], self.psd_shape[2]))
            self.ts_buffer = []

        else:
            # Fake left-right decoder
            model = None
            self.psd_shape = None
            self.psd_size = None
            # TODO: parameterize directions using fake_dirs
            self.labels = [11, 9]
            self.label_names = ['LEFT_GO', 'RIGHT_GO']

    def print(self, *args):
        if len(args) > 0: print('[BCIDecoder] ', end='')
        print(*args)

    def get_labels(self):
        """
        Returns
        -------
        Class labels numbers in the same order as the likelihoods returned by get_prob()
        """
        return self.labels

    def get_label_names(self):
        """
        Returns
        -------
        Class label names in the same order as get_labels()
        """
        return self.label_names

    def start(self):
        pass

    def stop(self):
        pass

    def get_prob(self):
        """
        Read the latest window

        Returns
        -------
        The likelihood P(X|C), where X=window, C=model
        """
        if self.fake:
            # fake deocder: biased likelihood for the first class
            probs = [random.uniform(0.0, 1.0)]
            # others class likelihoods are just set to equal
            p_others = (1 - probs[0]) / (len(self.labels) - 1)
            for x in range(1, len(self.labels)):
                probs.append(p_others)
            time.sleep(0.0625)  # simulated delay for PSD + RF
        else:
            self.sr.acquire()
            w, ts = self.sr.get_window()  # w = times x channels
            w = w.T  # -> channels x times

            # apply filters. Important: maintain the original channel order at this point.
            pu.preprocess(w, sfreq=self.sfreq, spatial=self.spatial, spatial_ch=self.spatial_ch,
                          spectral=self.spectral, spectral_ch=self.spectral_ch, notch=self.notch,
                          notch_ch=self.notch_ch, multiplier=self.multiplier)

            # select the same channels used for training
            w = w[self.picks]

            # debug: show max - min
            # c=1; print( '### %d: %.1f - %.1f = %.1f'% ( self.picks[c], max(w[c]), min(w[c]), max(w[c])-min(w[c]) ) )

            # psd = channels x freqs
            psd = self.psde.transform(w.reshape((1, w.shape[0], w.shape[1])))

            # update psd buffer ( < 1 msec overhead )
            self.psd_buffer = np.concatenate((self.psd_buffer, psd), axis=0)
            self.ts_buffer.append(ts[0])
            if ts[0] - self.ts_buffer[0] > self.buffer_sec:
                # search speed comparison for ordered arrays:
                # http://stackoverflow.com/questions/16243955/numpy-first-occurence-of-value-greater-than-existing-value
                t_index = np.searchsorted(self.ts_buffer, ts[0] - 1.0)
                self.ts_buffer = self.ts_buffer[t_index:]
                self.psd_buffer = self.psd_buffer[t_index:, :, :]  # numpy delete is slower
            # assert ts[0] - self.ts_buffer[0] <= self.buffer_sec

            # make a feautre vector and classify
            feats = np.concatenate(psd[0]).reshape(1, -1)

            # compute likelihoods
            probs = self.cls.predict_proba(feats)[0]

        return probs

    def get_prob_unread(self):
        return self.get_prob()

    def get_psd(self):
        """
        Returns
        -------
        The latest computed PSD
        """
        return self.psd_buffer[-1].reshape((1, -1))

    def is_ready(self):
        """
        Ready to decode? Returns True if buffer is not empty.
        """
        return self.sr.is_ready()
Ejemplo n.º 14
0
    def __init__(self, classifier=None, buffer_size=1.0, fake=False, amp_serial=None, amp_name=None):
        """
        Params
        ------
        classifier: classifier file
        spatial: spatial filter to use
        buffer_size: length of the signal buffer in seconds
        """

        self.classifier = classifier
        self.buffer_sec = buffer_size
        self.fake = fake
        self.amp_serial = amp_serial
        self.amp_name = amp_name

        if self.fake == False:
            model = qc.load_obj(self.classifier)
            if model is None:
                logger.error('Classifier model is None.')
                raise ValueError
            self.cls = model['cls']
            self.psde = model['psde']
            self.labels = list(self.cls.classes_)
            self.label_names = [model['classes'][k] for k in self.labels]
            self.spatial = model['spatial']
            self.spectral = model['spectral']
            self.notch = model['notch']
            self.w_seconds = model['w_seconds']
            self.w_frames = model['w_frames']
            self.wstep = model['wstep']
            self.sfreq = model['sfreq']
            if 'decim' not in model:
                model['decim'] = 1
            self.decim = model['decim']
            if not int(round(self.sfreq * self.w_seconds)) == self.w_frames:
                logger.error('sfreq * w_sec %d != w_frames %d' % (int(round(self.sfreq * self.w_seconds)), self.w_frames))
                raise RuntimeError

            if 'multiplier' in model:
                self.multiplier = model['multiplier']
            else:
                self.multiplier = 1

            # Stream Receiver
            self.sr = StreamReceiver(window_size=self.w_seconds, amp_name=self.amp_name, amp_serial=self.amp_serial)
            if self.sfreq != self.sr.sample_rate:
                logger.error('Amplifier sampling rate (%.3f) != model sampling rate (%.3f). Stop.' % (self.sr.sample_rate, self.sfreq))
                raise RuntimeError

            # Map channel indices based on channel names of the streaming server
            self.spatial_ch = model['spatial_ch']
            self.spectral_ch = model['spectral_ch']
            self.notch_ch = model['notch_ch']
            #self.ref_ch = model['ref_ch'] # not supported yet
            self.ch_names = self.sr.get_channel_names()
            mc = model['ch_names']
            self.picks = [self.ch_names.index(mc[p]) for p in model['picks']]
            if self.spatial_ch is not None:
                self.spatial_ch = [self.ch_names.index(mc[p]) for p in model['spatial_ch']]
            if self.spectral_ch is not None:
                self.spectral_ch = [self.ch_names.index(mc[p]) for p in model['spectral_ch']]
            if self.notch_ch is not None:
                self.notch_ch = [self.ch_names.index(mc[p]) for p in model['notch_ch']]

            # PSD buffer
            #psd_temp = self.psde.transform(np.zeros((1, len(self.picks), self.w_frames // self.decim)))
            #self.psd_shape = psd_temp.shape
            #self.psd_size = psd_temp.size
            #self.psd_buffer = np.zeros((0, self.psd_shape[1], self.psd_shape[2]))
            #self.psd_buffer = None

            self.ts_buffer = []

            logger.info_green('Loaded classifier %s (sfreq=%.3f, decim=%d)' % (' vs '.join(self.label_names), self.sfreq, self.decim))
        else:
            # Fake left-right decoder
            model = None
            self.psd_shape = None
            self.psd_size = None
            # TODO: parameterize directions using fake_dirs
            self.labels = [11, 9]
            self.label_names = ['LEFT_GO', 'RIGHT_GO']
Ejemplo n.º 15
0
class BCIDecoder(object):
    """
    Decoder class

    The label order of self.labels and self.label_names match likelihood orders computed by get_prob()

    """

    def __init__(self, classifier=None, buffer_size=1.0, fake=False, amp_serial=None, amp_name=None):
        """
        Params
        ------
        classifier: classifier file
        spatial: spatial filter to use
        buffer_size: length of the signal buffer in seconds
        """

        self.classifier = classifier
        self.buffer_sec = buffer_size
        self.fake = fake
        self.amp_serial = amp_serial
        self.amp_name = amp_name

        if self.fake == False:
            model = qc.load_obj(self.classifier)
            if model is None:
                logger.error('Classifier model is None.')
                raise ValueError
            self.cls = model['cls']
            self.psde = model['psde']
            self.labels = list(self.cls.classes_)
            self.label_names = [model['classes'][k] for k in self.labels]
            self.spatial = model['spatial']
            self.spectral = model['spectral']
            self.notch = model['notch']
            self.w_seconds = model['w_seconds']
            self.w_frames = model['w_frames']
            self.wstep = model['wstep']
            self.sfreq = model['sfreq']
            if 'decim' not in model:
                model['decim'] = 1
            self.decim = model['decim']
            if not int(round(self.sfreq * self.w_seconds)) == self.w_frames:
                logger.error('sfreq * w_sec %d != w_frames %d' % (int(round(self.sfreq * self.w_seconds)), self.w_frames))
                raise RuntimeError

            if 'multiplier' in model:
                self.multiplier = model['multiplier']
            else:
                self.multiplier = 1

            # Stream Receiver
            self.sr = StreamReceiver(window_size=self.w_seconds, amp_name=self.amp_name, amp_serial=self.amp_serial)
            if self.sfreq != self.sr.sample_rate:
                logger.error('Amplifier sampling rate (%.3f) != model sampling rate (%.3f). Stop.' % (self.sr.sample_rate, self.sfreq))
                raise RuntimeError

            # Map channel indices based on channel names of the streaming server
            self.spatial_ch = model['spatial_ch']
            self.spectral_ch = model['spectral_ch']
            self.notch_ch = model['notch_ch']
            #self.ref_ch = model['ref_ch'] # not supported yet
            self.ch_names = self.sr.get_channel_names()
            mc = model['ch_names']
            self.picks = [self.ch_names.index(mc[p]) for p in model['picks']]
            if self.spatial_ch is not None:
                self.spatial_ch = [self.ch_names.index(mc[p]) for p in model['spatial_ch']]
            if self.spectral_ch is not None:
                self.spectral_ch = [self.ch_names.index(mc[p]) for p in model['spectral_ch']]
            if self.notch_ch is not None:
                self.notch_ch = [self.ch_names.index(mc[p]) for p in model['notch_ch']]

            # PSD buffer
            #psd_temp = self.psde.transform(np.zeros((1, len(self.picks), self.w_frames // self.decim)))
            #self.psd_shape = psd_temp.shape
            #self.psd_size = psd_temp.size
            #self.psd_buffer = np.zeros((0, self.psd_shape[1], self.psd_shape[2]))
            #self.psd_buffer = None

            self.ts_buffer = []

            logger.info_green('Loaded classifier %s (sfreq=%.3f, decim=%d)' % (' vs '.join(self.label_names), self.sfreq, self.decim))
        else:
            # Fake left-right decoder
            model = None
            self.psd_shape = None
            self.psd_size = None
            # TODO: parameterize directions using fake_dirs
            self.labels = [11, 9]
            self.label_names = ['LEFT_GO', 'RIGHT_GO']

    def get_labels(self):
        """
        Returns
        -------
        Class labels numbers in the same order as the likelihoods returned by get_prob()
        """
        return self.labels

    def get_label_names(self):
        """
        Returns
        -------
        Class label names in the same order as get_labels()
        """
        return self.label_names

    def start(self):
        pass

    def stop(self):
        pass

    def get_prob(self, timestamp=False):
        """
        Read the latest window

        Input
        -----
        timestamp: If True, returns LSL timestamp of the leading edge of the window used for decoding.

        Returns
        -------
        The likelihood P(X|C), where X=window, C=model
        """
        if self.fake:
            # fake deocder: biased likelihood for the first class
            probs = [random.uniform(0.0, 1.0)]
            # others class likelihoods are just set to equal
            p_others = (1 - probs[0]) / (len(self.labels) - 1)
            for x in range(1, len(self.labels)):
                probs.append(p_others)
            time.sleep(0.0625)  # simulated delay
            t_prob = pylsl.local_clock()
        else:
            self.sr.acquire(blocking=True)
            w, ts = self.sr.get_window()  # w = times x channels
            t_prob = ts[-1]
            w = w.T  # -> channels x times

            # re-reference channels
            # TODO: add re-referencing function to preprocess()

            # apply filters. Important: maintain the original channel order at this point.
            w = pu.preprocess(w, sfreq=self.sfreq, spatial=self.spatial, spatial_ch=self.spatial_ch,
                          spectral=self.spectral, spectral_ch=self.spectral_ch, notch=self.notch,
                          notch_ch=self.notch_ch, multiplier=self.multiplier, decim=self.decim)

            # select the same channels used for training
            w = w[self.picks]

            # debug: show max - min
            # c=1; print( '### %d: %.1f - %.1f = %.1f'% ( self.picks[c], max(w[c]), min(w[c]), max(w[c])-min(w[c]) ) )

            # psd = channels x freqs
            psd = self.psde.transform(w.reshape((1, w.shape[0], w.shape[1])))

            # make a feautre vector and classify
            feats = np.concatenate(psd[0]).reshape(1, -1)

            # compute likelihoods
            probs = self.cls.predict_proba(feats)[0]

            # update psd buffer ( < 1 msec overhead )
            '''
            if self.psd_buffer is None:
                self.psd_buffer = psd
            else:
                self.psd_buffer = np.concatenate((self.psd_buffer, psd), axis=0)
                # TODO: CHECK THIS BLOCK
                self.ts_buffer.append(ts[0])
                if ts[0] - self.ts_buffer[0] > self.buffer_sec:
                    # search speed comparison for ordered arrays:
                    # http://stackoverflow.com/questions/16243955/numpy-first-occurence-of-value-greater-than-existing-value
                    #t_index = np.searchsorted(self.ts_buffer, ts[0] - 1.0)
                    t_index = np.searchsorted(self.ts_buffer, ts[0] - self.buffer_sec)
                    self.ts_buffer = self.ts_buffer[t_index:]
                    self.psd_buffer = self.psd_buffer[t_index:, :, :]  # numpy delete is slower
                # assert ts[0] - self.ts_buffer[0] <= self.buffer_sec
            '''

        if timestamp:
            return probs, t_prob
        else:
            return probs

    def get_prob_unread(self, timestamp=False):
        return self.get_prob(timestamp)

    def get_psd(self):
        """
        Returns
        -------
        The latest computed PSD
        """
        raise NotImplementedError('Sorry! PSD buffer is under testing.')
        return self.psd_buffer[-1].reshape((1, -1))

    def is_ready(self):
        """
        Ready to decode? Returns True if buffer is not empty.
        """
        return self.sr.is_ready()
Ejemplo n.º 16
0
if __name__ == '__main__':
    brainhack = BrainHackEEGProcessing(sampling_frequency=300,
                                       eeg_ch_names=EEG_CH_NAMES.copy())
    arduino = ArduinoCommHandler(port_name='/dev/ttyACM0', baudrate=115200)
    arduino.start_communication()
    leds_values = [0] * 191
    leds_values_index_for_test = 0

    mne.set_log_level('ERROR')
    # actually improves performance for multitaper
    os.environ['OMP_NUM_THREADS'] = '1'

    amp_name, amp_serial = pu.search_lsl()
    sr = StreamReceiver(
        window_size=1, buffer_size=1, amp_name=amp_name,
        amp_serial=amp_serial, eeg_only=True
    )
    sfreq = sr.get_sample_rate()
    watchdog = qc.Timer()
    tm = qc.Timer(autoreset=True)
    trg_ch = sr.get_trigger_channel()
    last_ts = 0
    # qc.print_c('Trigger channel: %d' % trg_ch, 'G')

    fmin = 1
    fmax = 40
    psde = mne.decoding.PSDEstimator(
        sfreq=sfreq, fmin=fmin, fmax=fmax, bandwidth=None,
        adaptive=False, low_bias=True, n_jobs=1,
        normalization='length', verbose=None
    )
Ejemplo n.º 17
0
class Scope(QMainWindow):
    def __init__(self,
                 amp_name,
                 amp_serial,
                 state=mp.Value('i', 1),
                 queue=None):
        super(Scope, self).__init__()

        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)

        redirect_stdout_to_queue(logger, queue, 'INFO')
        logger.info('Viewer launched')

        self.amp_name = amp_name
        self.amp_serial = amp_serial
        self.state = state

        self.init_scope()

    #
    # 	Main init function
    #
    def init_scope(self):

        # pg.setConfigOption('background', 'w')
        # pg.setConfigOption('foreground', 'k')

        self.init_config_file()
        self.init_loop()
        self.init_panel_GUI()
        self.init_scope_GUI()
        self.init_timer()

    #
    #	Initializes config file
    #
    def init_config_file(self):
        self.scope_settings = RawConfigParser(allow_no_value=True,
                                              inline_comment_prefixes=('#',
                                                                       ';'))
        if (len(sys.argv) == 1):
            self.show_channel_names = 0
            self.device_name = ""
        else:
            if (sys.argv[1].find("gtec") > -1):
                self.device_name = "gtec"
                self.show_channel_names = 1
            elif (sys.argv[1].find("biosemi") > -1):
                self.device_name = "biosemi"
                self.show_channel_names = 1
            elif (sys.argv[1].find("hiamp") > -1):
                self.device_name = "hiamp"
                self.show_channel_names = 1
            else:
                self.device_name = ""
                self.show_channel_names = 0
        # self.scope_settings.read(os.getenv("HOME") + "/.scope_settings.ini")
        self.scope_settings.read(
            str(path2_viewerFolder / '.scope_settings.ini'))

    #
    # 	Initialize control panel parameter
    #
    def init_panel_GUI(self):

        self.show_TID_events = False
        self.show_LPT_events = False
        self.show_Key_events = False

        # Event handler
        self.ui.comboBox_scale.activated.connect(
            self.onActivated_combobox_scale)
        self.ui.spinBox_time.valueChanged.connect(
            self.onValueChanged_spinbox_time)
        self.ui.checkBox_car.stateChanged.connect(
            self.onActivated_checkbox_car)
        self.ui.checkBox_bandpass.stateChanged.connect(
            self.onActivated_checkbox_bandpass)
        self.ui.checkBox_showTID.stateChanged.connect(
            self.onActivated_checkbox_TID)
        self.ui.checkBox_showLPT.stateChanged.connect(
            self.onActivated_checkbox_LPT)
        self.ui.checkBox_showKey.stateChanged.connect(
            self.onActivated_checkbox_Key)
        self.ui.pushButton_bp.clicked.connect(self.onClicked_button_bp)
        self.ui.pushButton_rec.clicked.connect(self.onClicked_button_rec)
        self.ui.pushButton_stoprec.clicked.connect(
            self.onClicked_button_stoprec)

        self.ui.pushButton_stoprec.setEnabled(False)
        self.ui.comboBox_scale.setCurrentIndex(4)
        self.ui.checkBox_car.setChecked(
            int(self.scope_settings.get("filtering", "apply_car_filter")))
        self.ui.checkBox_bandpass.setChecked(
            int(self.scope_settings.get("filtering", "apply_bandpass_filter")))
        self.ui.checkBox_showTID.setChecked(
            int(self.scope_settings.get("plot", "show_TID_events")))
        self.ui.checkBox_showLPT.setChecked(
            int(self.scope_settings.get("plot", "show_LPT_events")))
        self.ui.checkBox_showKey.setChecked(
            int(self.scope_settings.get("plot", "show_KEY_events")))
        self.ui.statusBar.showMessage("[Not recording]")

        self.channels_to_show_idx = []
        idx = 0
        for y in range(0, 4):
            for x in range(0, NUM_X_CHANNELS):
                if (idx < self.config['eeg_channels']):
                    # self.table_channels.item(x,y).setTextAlignment(QtCore.Qt.AlignCenter)
                    self.ui.table_channels.item(x, y).setSelected(True)  # Qt5
                    #self.table_channels.setItemSelected(self.table_channels.item(x, y), True) # Qt4 only
                    self.channels_to_show_idx.append(idx)
                else:
                    self.ui.table_channels.setItem(x, y,
                                                   QTableWidgetItem("N/A"))
                    self.ui.table_channels.item(x, y).setFlags(
                        QtCore.Qt.NoItemFlags)
                    self.ui.table_channels.item(x, y).setTextAlignment(
                        QtCore.Qt.AlignCenter)
                idx += 1

        self.ui.table_channels.verticalHeader().setStretchLastSection(True)
        self.ui.table_channels.horizontalHeader().setStretchLastSection(True)
        self.ui.table_channels.itemSelectionChanged.connect(
            self.onSelectionChanged_table)

        self.screen_width = 522
        self.screen_height = 160
        # self.setGeometry(100,100, self.screen_width, self.screen_height)
        # self.setFixedSize(self.screen_width, self.screen_height)
        self.setWindowTitle('EEG Scope Panel')
        self.setFocusPolicy(QtCore.Qt.ClickFocus)
        self.setFocus()
        self.show()

    #
    #	Initialize scope parameters
    #
    def init_scope_GUI(self):

        self.bool_parser = {True: '1', False: '0'}

        # PyQTGraph plot initialization
        self.win = pg.GraphicsWindow()
        self.win.setWindowTitle('EEG Scope')
        self.win.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
        self.win.keyPressEvent = self.keyPressEvent
        self.win.show()
        self.main_plot_handler = self.win.addPlot()
        self.win.resize(1280, 800)

        # Scales available in the GUI. If you change the options in the GUI
        # you should change them here as well
        self.scales_range = [1, 10, 25, 50, 100, 250, 500, 1000, 2500, 100000]

        # Scale in uV
        self.scale = int(self.scope_settings.get("plot", "scale_plot"))
        # Time window to show in seconds
        self.seconds_to_show = int(self.scope_settings.get(
            "plot", "time_plot"))

        # Y Tick labels. Use values from the config file.
        self.channel_labels = []
        values = []
        ''' For non-LSL systems having no channel names
        for x in range(0, self.config['eeg_channels']):
            if (self.show_channel_names):
                self.channel_labels.append("(" + str(x + 1) + ") " +
                    self.scope_settings.get("internal",
                    "channel_names_" + self.device_name + str(
                    self.config['eeg_channels'])).split(', ')[x])
            else:
                self.channel_labels.append('CH ' + str(x + 1))
        '''
        ch_names = np.array(self.sr.get_channel_names())
        self.channel_labels = ch_names[self.sr.get_eeg_channels()]
        for x in range(0, len(self.channels_to_show_idx)):
            values.append((-x * self.scale,
                           self.channel_labels[self.channels_to_show_idx[x]]))

        values_axis = []
        values_axis.append(values)
        values_axis.append([])

        # Update table labels with current names
        idx = 0
        for y in range(0, 4):
            for x in range(0, NUM_X_CHANNELS):
                if (idx < self.config['eeg_channels']):
                    self.ui.table_channels.item(x, y).setText(
                        self.channel_labels[idx])
                idx += 1

        # Plot initialization
        self.main_plot_handler.getAxis('left').setTicks(values_axis)
        self.main_plot_handler.setRange(
            xRange=[0, self.seconds_to_show],
            yRange=[
                +1.5 * self.scale,
                -0.5 * self.scale - self.scale * self.config['eeg_channels']
            ])
        self.main_plot_handler.disableAutoRange()
        self.main_plot_handler.showGrid(y=True)
        self.main_plot_handler.setLabel(axis='left',
                                        text='Scale (uV): ' + str(self.scale))
        self.main_plot_handler.setLabel(axis='bottom', text='Time (s)')

        # X axis
        self.x_ticks = np.zeros(self.config['sf'] * self.seconds_to_show)
        for x in range(0, self.config['sf'] * self.seconds_to_show):
            self.x_ticks[x] = (x * 1) / float(self.config['sf'])

        # Plotting colors. If channels > 16, colors will roll back to the beginning
        self.colors = np.array([[255, 0, 0], [0, 255, 0], [0, 0, 255],
                                [255, 255, 0], [0, 255, 255], [255, 0, 255],
                                [128, 100, 100], [0, 128, 0], [0, 128, 128],
                                [128, 128, 0], [255, 128, 128], [128, 0, 128],
                                [128, 255, 0], [255, 128, 0], [0, 255, 128],
                                [128, 0, 255]])

        # We want a lightweight scope, so we downsample the plotting to 64 Hz
        self.subsampling_value = self.config['sf'] / 64

        # EEG data for plotting
        self.data_plot = np.zeros((self.config['sf'] * self.seconds_to_show,
                                   self.config['eeg_channels']))
        self.curve_eeg = []
        for x in range(0, len(self.channels_to_show_idx)):
            self.curve_eeg.append(
                self.main_plot_handler.plot(
                    x=self.x_ticks,
                    y=self.data_plot[:, self.channels_to_show_idx[x]],
                    pen=pg.mkColor(self.colors[self.channels_to_show_idx[x] %
                                               16, :])))
        # self.curve_eeg[-1].setDownsampling(ds=self.subsampling_value, auto=False, method="mean")

        # Events data
        self.events_detected = []
        self.events_curves = []
        self.events_text = []

        # CAR initialization
        self.apply_car = int(
            self.scope_settings.get("filtering", "apply_car_filter"))
        self.matrix_car = np.zeros(
            (self.config['eeg_channels'], self.config['eeg_channels']),
            dtype=float)
        self.matrix_car[:, :] = -1 / float(self.config['eeg_channels'])
        np.fill_diagonal(self.matrix_car,
                         1 - (1 / float(self.config['eeg_channels'])))

        # Laplacian initalization. TO BE DONE
        self.matrix_lap = np.zeros(
            (self.config['eeg_channels'], self.config['eeg_channels']),
            dtype=float)
        np.fill_diagonal(self.matrix_lap, 1)
        '''
        self.matrix_lap[2, 0] = -1
        self.matrix_lap[0, 2] = -0.25
        self.matrix_lap[0, 2] = -0.25
        '''

        # BP initialization
        self.apply_bandpass = int(
            self.scope_settings.get("filtering", "apply_bandpass_filter"))
        if (self.apply_bandpass):
            self.ui.doubleSpinBox_hp.setValue(
                float(
                    self.scope_settings.get(
                        "filtering",
                        "bandpass_cutoff_frequency").split(' ')[0]))
            self.ui.doubleSpinBox_lp.setValue(
                float(
                    self.scope_settings.get(
                        "filtering",
                        "bandpass_cutoff_frequency").split(' ')[1]))
            self.ui.doubleSpinBox_lp.setMinimum(0.1)
            self.ui.doubleSpinBox_lp.setMaximum(self.sr.sample_rate / 2 - 0.1)
            self.ui.doubleSpinBox_lp.setDecimals(1)
            self.ui.doubleSpinBox_lp.setSingleStep(1)
            self.ui.doubleSpinBox_hp.setMinimum(0.1)
            self.ui.doubleSpinBox_hp.setMaximum(self.sr.sample_rate / 2 - 0.1)
            self.ui.doubleSpinBox_hp.setDecimals(1)
            self.ui.doubleSpinBox_hp.setSingleStep(1)
            self.ui.pushButton_bp.click()

        self.ui.checkBox_bandpass.setChecked(self.apply_car)
        self.ui.checkBox_bandpass.setChecked(self.apply_bandpass)

        self.update_title_scope()

        # Help variables
        self.show_help = 0
        self.help = pg.TextItem(
            "CNBI EEG Scope v0.3 \n" +
            "----------------------------------------------------------------------------------\n"
            + "C: De/activate CAR Filter\n" +
            "B: De/activate Bandpass Filter (with current settings)\n" +
            "T: Show/hide TiD events\n" + "L: Show/hide LPT events\n" +
            "K: Show/hide Key events. If not shown, they are NOT recorded!\n" +
            "0-9: Add a user-specific Key event. Do not forget to write down why you marked it.\n"
            +
            "Up, down arrow keys: Increase/decrease the scale, steps of 10 uV\n"
            +
            "Left, right arrow keys: Increase/decrease the time to show, steps of 1 s\n"
            +
            "Spacebar: Stop the scope plotting, whereas data acquisition keeps running (EXPERIMENTAL)\n"
            + "Esc: Exits the scope",
            anchor=(0, 0),
            border=(70, 70, 70),
            fill=pg.mkColor(20, 20, 20, 200),
            color=(255, 255, 255))

        # Stop plot functionality
        self.stop_plot = 0

        # Force repaint even when we shouldn't repaint.
        self.force_repaint = 0

    # For some strange reason when the width is > 1 px the scope runs slow.
    # self.pen_plot = []
    # for x in range(0, self.config['eeg_channels']):
    # 	self.pen_plot.append(pg.mkPen(self.colors[x%16,:], width=3))

    #
    # 	Initializes the BCI loop parameters
    #
    def init_loop_cnbiloop(self):

        self.fin = open(self.scope_settings.get("internal", "path_pipe"), 'r')

        # 12 unsigned ints (4 bytes)
        data = struct.unpack("<12I", self.fin.read(4 * 12))

        self.config = {
            'id': data[0],
            'sf': data[1],
            'labels': data[2],
            'samples': data[3],
            'eeg_channels': data[4],
            'exg_channels': data[5],
            'tri_channels': data[6],
            'eeg_type': data[8],
            'exg_type': data[9],
            'tri_type': data[10],
            'lbl_type': data[11],
            'tim_size': 1,
            'idx_size': 1
        }

        self.tri = np.zeros(self.config['samples'])
        self.eeg = np.zeros(
            (self.config['samples'], self.config['eeg_channels']),
            dtype=np.float)
        self.exg = np.zeros(
            (self.config['samples'], self.config['exg_channels']),
            dtype=np.float)

        # TID initialization
        self.bci = BCI.BciInterface()

    #
    # 	Initializes the BCI loop parameters
    #
    def init_loop(self):

        self.updating = False

        self.sr = StreamReceiver(window_size=1,
                                 buffer_size=10,
                                 amp_serial=self.amp_serial,
                                 amp_name=self.amp_name)
        srate = int(self.sr.sample_rate)
        # n_channels= self.sr.channels

        # 12 unsigned ints (4 bytes)
        ########## TODO: assumkng 32 samples chunk => make it read from LSL header
        data = [
            'EEG', srate, ['L', 'R'], 32,
            len(self.sr.get_eeg_channels()), 0,
            self.sr.get_trigger_channel(), None, None, None, None, None
        ]
        logger.info('Trigger channel is %d' % self.sr.get_trigger_channel())

        self.config = {
            'id': data[0],
            'sf': data[1],
            'labels': data[2],
            'samples': data[3],
            'eeg_channels': data[4],
            'exg_channels': data[5],
            'tri_channels': data[6],
            'eeg_type': data[8],
            'exg_type': data[9],
            'tri_type': data[10],
            'lbl_type': data[11],
            'tim_size': 1,
            'idx_size': 1
        }

        self.tri = np.zeros(self.config['samples'])
        self.last_tri = 0
        self.eeg = np.zeros(
            (self.config['samples'], self.config['eeg_channels']),
            dtype=np.float)
        self.exg = np.zeros(
            (self.config['samples'], self.config['exg_channels']),
            dtype=np.float)
        self.ts_list = []
        self.ts_list_tri = []

    #
    # 	Initializes the QT timer, which will call the update function every 20 ms
    #
    def init_timer(self):

        self.tm = qc.Timer()  # leeq

        QtCore.QCoreApplication.processEvents()
        QtCore.QCoreApplication.flush()
        self.timer = QtCore.QTimer(self)
        self.timer.timeout.connect(self.update_loop)
        self.timer.start(20)

    # QtCore.QTimer.singleShot( 20, self.update_loop )

    #
    #	Main update function (connected to the timer)
    #
    def update_loop(self):

        #  Sharing variable to stop at the GUI level
        if not self.state.value:
            logger.info('Viewer stopped')
            sys.exit()

        try:
            # assert self.updating==False, 'thread destroyed?'
            # self.updating= True

            # self.handle_tobiid_input()	# Read TiDs
            self.read_eeg()  # Read new chunk
            if len(self.ts_list) > 0:
                self.filter_signal()  # Filter acquired data
                self.update_ringbuffers()  # Update the plotting infor
                if (not self.stop_plot):
                    self.repaint()  # Call paint event
        except:
            logger.exception('Exception. Dropping into a shell.')
            pdb.set_trace()
        finally:
            # self.updating= False
            # using singleShot instead
            # QtCore.QTimer.singleShot( 20, self.update_loop )
            pass

    #
    #	Read EEG
    #
    def read_eeg(self):

        # if self.updating==True: print( '##### ERROR: thread destroyed ? ######' )
        # self.updating= True

        try:
            # data, self.ts_list= self.sr.inlets[0].pull_chunk(max_samples=self.config['sf']) # [frames][channels]
            data, self.ts_list = self.sr.acquire(blocking=False)

            # TODO: check and change to these two lines
            #self.sr.acquire(blocking=False, decim=DECIM)
            #data, self.ts_list = self.sr.get_window()

            if len(self.ts_list) == 0:
                # self.eeg= None
                # self.tri= None
                return

            n = self.config['eeg_channels']
            '''
            x= np.array( data )
            trg_ch= self.config['tri_channels']
            if trg_ch is not None:
                self.tri= np.reshape( x[:,trg_ch], (-1,1) ) # samples x 1
            self.eeg= np.reshape( x[:,self.sr.eeg_channels], (-1,n) ) # samples x channels
            '''
            trg_ch = self.config['tri_channels']
            if trg_ch is not None:
                self.tri = np.reshape(data[:, trg_ch], (-1, 1))  # samples x 1
            self.eeg = np.reshape(data[:, self.sr.eeg_channels],
                                  (-1, n))  # samples x channels

            if DEBUG_TRIGGER:
                # show trigger value
                try:
                    trg_value = max(self.tri)
                    if trg_value > 0:
                        logger.info('Received trigger %s' % trg_value)
                except:
                    logger.exception('Error! self.tri = %s' % self.tri)

                    # Read exg. self.config.samples*self.config.exg_ch, type float
                    # bexg = np.random.rand( 1, self.config['samples'] * self.config['exg_channels'] )
                    # self.exg = np.reshape(list(bexg), (self.config['samples'],self.config['exg_channels']))
        except WindowsError:
            # print('**** Access violation in read_eeg():\n%s\n%s'% (sys.exc_info()[0], sys.exc_info()[1]))
            pass
        except:
            logger.exception()
            pdb.set_trace()

    #
    #	Read EEG
    #
    def read_eeg_cnbiloop(self):

        # Reading in python is blocking, so it will wait until having the amount of data needed
        # Read timestamp. 1 value, type double
        timestamp = struct.unpack("<d", self.fin.read(8 * 1))
        # Read index. 1 value, type uint64
        index = struct.unpack("<Q", self.fin.read(8 * 1))
        # Read labels. self.config.labels, type double
        labels = struct.unpack("<" + str(self.config['labels']) + "I",
                               self.fin.read(4 * self.config['labels']))
        # Read eeg. self.config.samples*self.config.eeg_ch, type float
        beeg = struct.unpack(
            "<" + str(self.config['samples'] * self.config['eeg_channels']) +
            "f",
            self.fin.read(4 * self.config['samples'] *
                          self.config['eeg_channels']))
        self.eeg = np.reshape(
            list(beeg), (self.config['samples'], self.config['eeg_channels']))
        # Read exg. self.config.samples*self.config.exg_ch, type float
        bexg = struct.unpack(
            "<" + str(self.config['samples'] * self.config['exg_channels']) +
            "f",
            self.fin.read(4 * self.config['samples'] *
                          self.config['exg_channels']))
        self.exg = np.reshape(
            list(bexg), (self.config['samples'], self.config['exg_channels']))
        # Read tri. self.config.samples*self.config.tri_ch, type float
        self.tri = struct.unpack(
            "<" + str(self.config['samples'] * self.config['tri_channels']) +
            "i",
            self.fin.read(4 * self.config['samples'] *
                          self.config['tri_channels']))

    #
    #	Bandpas + CAR filtering
    #
    def filter_signal(self):

        if (self.apply_bandpass):
            for x in range(0, self.eeg.shape[1]):
                self.eeg[:, x], self.zi[:,
                                        x] = lfilter(self.b, self.a,
                                                     self.eeg[:, x], -1,
                                                     self.zi[:, x])

        # We only apply CAR if selected AND there are at least 2 channels. Otherwise it makes no sense
        if (self.apply_car) and (len(self.channels_to_show_idx) > 1):
            self.eeg = np.dot(self.matrix_car, np.transpose(self.eeg))
            self.eeg = np.transpose(self.eeg)

    #
    #	Update ringbuffers and events for plotting
    #
    def update_ringbuffers(self):
        # leeq
        self.data_plot = np.roll(self.data_plot, -len(self.ts_list), 0)
        self.data_plot[-len(self.ts_list):, :] = self.eeg

        # We have to remove those indexes that reached time = 0
        delete_indices_e = []
        delete_indices_c = []
        for x in range(0, len(self.events_detected), 2):
            xh = int(x / 2)
            self.events_detected[x] -= len(self.ts_list)  # leeq
            if (self.events_detected[x] < 0) and (not self.stop_plot):
                delete_indices_e.append(x)
                delete_indices_e.append(x + 1)
                delete_indices_c.append(xh)
                self.events_curves[xh].clear()
                self.main_plot_handler.removeItem(self.events_text[xh])

        self.events_detected = [
            i for j, i in enumerate(self.events_detected)
            if j not in delete_indices_e
        ]
        self.events_curves = [
            i for j, i in enumerate(self.events_curves)
            if j not in delete_indices_c
        ]
        self.events_text = [
            i for j, i in enumerate(self.events_text)
            if j not in delete_indices_c
        ]

        # Find LPT events and add them
        if (self.show_LPT_events) and (not self.stop_plot):
            for x in range(len(self.tri)):
                tri = int(self.tri[x])
                if tri != 0 and (tri > self.last_tri):
                    self.addEventPlot("LPT", tri)
                    logger.info('Trigger %d received' % tri)
                self.last_tri = tri

    #
    #	Called by repaint()
    #
    def paintEvent(self, e):
        # Distinguish between paint events from timer and event QT widget resizing, clicking etc (sender is None)
        # We should only paint when the timer triggered the event.
        # Just in case, there's a flag to force a repaint even when we shouldn't repaint
        sender = self.sender()
        if 'force_repaint' not in self.__dict__.keys():
            logger.warning('force_repaint is not set! Is it a Qt bug?')
            self.force_repaint = 0
        if (sender is None) and (not self.force_repaint):
            pass
        else:
            self.force_repaint = 0
            qp = QPainter()
            qp.begin(self)
            # Update the interface
            self.paintInterface(qp)
            qp.end()

    #
    #	Update stuff on the interface. Only graphical updates should be added here
    #
    def paintInterface(self, qp):

        # Update EEG channels
        for x in range(0, len(self.channels_to_show_idx)):
            self.curve_eeg[x].setData(
                x=self.x_ticks,
                y=self.data_plot[:, self.channels_to_show_idx[x]] -
                x * self.scale)

        # Update events
        for x in range(0, len(self.events_detected), 2):
            xh = int(x / 2)
            self.events_curves[xh].setData(
                x=np.array([
                    self.x_ticks[self.events_detected[x]],
                    self.x_ticks[self.events_detected[x]]
                ]),
                y=np.array([
                    +1.5 * self.scale, -0.5 * self.scale -
                    self.scale * self.config['eeg_channels']
                ]))
            self.events_text[xh].setPos(self.x_ticks[self.events_detected[x]],
                                        self.scale)

    #
    #	Do necessary stuff when scale has changed
    #
    def update_plot_scale(self, new_scale):

        if (new_scale < 1):
            new_scale = 1
        # commented out by dbdq.
        # else:
        #	new_scale = new_scale - new_scale%10

        self.scale = new_scale

        # Y Tick labels
        values = []
        for x in range(0, len(self.channels_to_show_idx)):
            values.append((-x * self.scale,
                           self.channel_labels[self.channels_to_show_idx[x]]))

        values_axis = []
        values_axis.append(values)
        values_axis.append([])

        self.main_plot_handler.getAxis('left').setTicks(values_axis)
        self.main_plot_handler.setRange(
            yRange=[+self.scale, -self.scale * len(self.channels_to_show_idx)])
        self.main_plot_handler.setLabel(axis='left',
                                        text='Scale (uV): ' + str(self.scale))
        self.trigger_help()

        # We force an immediate repaint to avoid "shakiness".
        if (not self.stop_plot):
            self.force_repaint = 1
            self.repaint()

    #
    #	Do necessary stuff when seconds to show have changed
    #
    def update_plot_seconds(self, new_seconds):

        # Do nothing unless...
        if (new_seconds != self.seconds_to_show) and (new_seconds > 0) and (
                new_seconds < 100):
            self.ui.spinBox_time.setValue(new_seconds)
            self.main_plot_handler.setRange(xRange=[0, new_seconds])
            self.x_ticks = np.zeros(self.config['sf'] * new_seconds)
            for x in range(0, self.config['sf'] * new_seconds):
                self.x_ticks[x] = (x * 1) / float(self.config['sf'])

            if (new_seconds > self.seconds_to_show):
                padded_signal = np.zeros((self.config['sf'] * new_seconds,
                                          self.config['eeg_channels']))
                padded_signal[padded_signal.shape[0] -
                              self.data_plot.shape[0]:, :] = self.data_plot
                for x in range(0, len(self.events_detected), 2):
                    self.events_detected[x] += padded_signal.shape[0] - \
                                               self.data_plot.shape[0]
                self.data_plot = padded_signal

            else:
                for x in range(0, len(self.events_detected), 2):
                    self.events_detected[x] -= self.data_plot.shape[0] - \
                                               self.config['sf'] * new_seconds
                self.data_plot = self.data_plot[self.data_plot.shape[0] -
                                                self.config['sf'] *
                                                new_seconds:, :]

            self.seconds_to_show = new_seconds
            self.trigger_help()

            # We force an immediate repaint to avoid "shakiness".
            if (not self.stop_plot):
                self.force_repaint = 1
                self.repaint()

    #
    # Handle TOBI iD events
    #
    def handle_tobiid_input(self):

        data = None
        try:
            data = self.bci.iDsock_bus.recv(512)
            self.bci.idStreamer_bus.Append(data)
        except:
            self.nS = False
            self.dec = 0
            pass

        # deserialize ID message
        if data:
            if self.bci.idStreamer_bus.Has("<tobiid", "/>"):
                msg = self.bci.idStreamer_bus.Extract("<tobiid", "/>")
                self.bci.id_serializer_bus.Deserialize(msg)
                self.bci.idStreamer_bus.Clear()
                tmpmsg = int(self.bci.id_msg_bus.GetEvent())
                if (self.show_TID_events) and (not self.stop_plot):
                    self.addEventPlot("TID", tmpmsg)

            elif self.bci.idStreamer_bus.Has("<tcstatus", "/>"):
                MsgNum = self.bci.idStreamer_bus.Count("<tcstatus")
                for i in range(1, MsgNum - 1):
                    # Extract most of these messages and trash them
                    msg_useless = self.bci.idStreamer_bus.Extract(
                        "<tcstatus", "/>")

    #
    # 	Add an event to the scope
    #
    def addEventPlot(self, event_name, event_id):
        if (event_name == "TID"):
            color = pg.mkColor(0, 0, 255)
        elif (event_name == "KEY"):
            color = pg.mkColor(255, 0, 0)
        elif (event_name == "LPT"):
            color = pg.mkColor(0, 255, 0)
        else:
            color = pg.mkColor(255, 255, 255)

        self.events_detected.append(self.data_plot.shape[0] - 1)
        self.events_detected.append(event_id)
        self.events_curves.append(
            self.main_plot_handler.plot(
                pen=color,
                x=np.array([self.x_ticks[-1], self.x_ticks[-1]]),
                y=np.array([
                    +1.5 * self.scale,
                    -1.5 * self.scale * self.config['eeg_channels']
                ])))
        # text = pg.TextItem(event_name + "(" + str(self.events_detected[-1]) + ")", anchor=(1.1,0), fill=(0,0,0), color=color)
        text = pg.TextItem(str(self.events_detected[-1]),
                           anchor=(1.1, 0),
                           fill=(0, 0, 0),
                           color=color)
        text.setPos(self.x_ticks[-1], self.scale)
        self.events_text.append(text)
        self.main_plot_handler.addItem(self.events_text[-1])

    #
    #	Calculation of bandpass coefficients.
    #	Order is computed automatically.
    #	Note that if filter is unstable this function crashes (TODO handle problems)
    #
    def butter_bandpass(self, highcut, lowcut, fs, num_ch):
        low = lowcut / (0.5 * fs)
        high = highcut / (0.5 * fs)
        # get the order. TO BE DONE: Sometimes it fails
        #ord = buttord(high, low, 2, 40)
        #b, a = butter(ord[0], [high, low], btype='band')
        b, a = butter(2, [high, low], btype='band')
        zi = np.zeros([a.shape[0] - 1, num_ch])
        return b, a, zi

    #
    #	Updates the title shown in the scope
    #
    def update_title_scope(self):
        if (hasattr(self, 'main_plot_handler')):
            self.main_plot_handler.setTitle(
                title='TLK: ' + self.bool_parser[self.show_TID_events] +
                self.bool_parser[self.show_LPT_events] +
                self.bool_parser[self.show_Key_events] + ', CAR: ' +
                self.bool_parser[self.apply_car] + ', BP: ' +
                self.bool_parser[self.apply_bandpass] + ' [' +
                str(self.ui.doubleSpinBox_hp.value()) + '-' +
                str(self.ui.doubleSpinBox_lp.value()) + '] Hz')
            # ', BP: ' + self.bool_parser[self.apply_bandpass] + (' [' + str(self.doubleSpinBox_hp.value()) + '-' + str(self.doubleSpinBox_lp.value()) + '] Hz' if self.apply_bandpass else ''))

    #
    #	Shows / hide help in the scope window
    #
    def trigger_help(self):
        if self.show_help:
            self.help.setPos(0, self.scale)
            self.main_plot_handler.addItem(self.help)
            self.help.setZValue(1)
        else:
            self.main_plot_handler.removeItem(self.help)

    # ----------------------------------------------------------------------------------------------------
    # 			EVENT HANDLERS
    # ----------------------------------------------------------------------------------------------------
    def onClicked_button_rec(self):
        # Simply call cl_rpc for this.
        if (len(self.lineEdit_recFilename.text()) > 0):
            if ".gdf" in self.lineEdit_recFilename.text():
                self.ui.pushButton_stoprec.setEnabled(True)
                self.ui.pushButton_rec.setEnabled(False)
                # Popen is more efficient than os.open, since it is non-blocking
                subprocess.Popen([
                    "cl_rpc", "openxdf",
                    str(self.ui.lineEdit_recFilename.text()), "dummy_log",
                    "dummy_log"
                ],
                                 close_fds=True)
                self.statusBar.showMessage(
                    "Recording file " +
                    str(self.ui.lineEdit_recFilename.text()))
            elif ".bdf" in self.ui.lineEdit_recFilename.text():
                self.ui.pushButton_stoprec.setEnabled(True)
                self.ui.pushButton_rec.setEnabled(False)
                subprocess.Popen([
                    "cl_rpc", "openxdf",
                    str(self.ui.lineEdit_recFilename.text()), "dummy_log",
                    "dummy_log"
                ],
                                 close_fds=True)
                self.statusBar.showMessage(
                    "Recording file " +
                    str(self.ui.lineEdit_recFilename.text()))
            else:
                pass

    def onClicked_button_stoprec(self):
        subprocess.Popen(["cl_rpc", "closexdf"], close_fds=True)
        self.ui.pushButton_rec.setEnabled(True)
        self.ui.pushButton_stoprec.setEnabled(False)
        self.ui.statusBar.showMessage("Not recording")

    def onActivated_checkbox_bandpass(self):
        self.apply_bandpass = False
        self.ui.pushButton_bp.setEnabled(self.ui.checkBox_bandpass.isChecked())
        self.ui.doubleSpinBox_hp.setEnabled(
            self.ui.checkBox_bandpass.isChecked())
        self.ui.doubleSpinBox_lp.setEnabled(
            self.ui.checkBox_bandpass.isChecked())
        self.update_title_scope()

    def onActivated_checkbox_car(self):
        self.apply_car = self.ui.checkBox_car.isChecked()
        self.update_title_scope()

    def onActivated_checkbox_TID(self):
        self.show_TID_events = self.ui.checkBox_showTID.isChecked()
        self.update_title_scope()

    def onActivated_checkbox_LPT(self):
        self.show_LPT_events = self.ui.checkBox_showLPT.isChecked()
        self.update_title_scope()

    def onActivated_checkbox_Key(self):
        self.show_Key_events = self.ui.checkBox_showKey.isChecked()
        self.update_title_scope()

    def onValueChanged_spinbox_time(self):
        self.update_plot_seconds(self.ui.spinBox_time.value())

    def onActivated_combobox_scale(self):
        self.update_plot_scale(
            self.scales_range[self.ui.comboBox_scale.currentIndex()])

    def onClicked_button_bp(self):
        if (self.ui.doubleSpinBox_lp.value() >
                self.ui.doubleSpinBox_hp.value()):
            self.apply_bandpass = True
            self.b, self.a, self.zi = self.butter_bandpass(
                self.ui.doubleSpinBox_hp.value(),
                self.ui.doubleSpinBox_lp.value(), self.config['sf'],
                self.config['eeg_channels'])
        self.update_title_scope()

    def onSelectionChanged_table(self):

        # Remove current plot
        for x in range(0, len(self.channels_to_show_idx)):
            self.main_plot_handler.removeItem(self.curve_eeg[x])

        # Which channels should I plot?
        self.channels_to_show_idx = []
        self.channels_to_hide_idx = []
        idx = 0
        for y in range(0, 4):
            for x in range(0, NUM_X_CHANNELS):
                if (idx < self.config['eeg_channels']):
                    #if (self.table_channels.isItemSelected( # Qt4 only
                    if (QTableWidgetItem.isSelected(  # Qt5
                            self.ui.table_channels.item(x, y))):
                        self.channels_to_show_idx.append(idx)
                    else:
                        self.channels_to_hide_idx.append(idx)
                    idx += 1

        # Add new plots
        self.curve_eeg = []
        for x in range(0, len(self.channels_to_show_idx)):
            self.curve_eeg.append(
                self.main_plot_handler.plot(
                    x=self.x_ticks,
                    y=self.data_plot[:, self.channels_to_show_idx[x]],
                    pen=self.colors[self.channels_to_show_idx[x] %
                                    NUM_X_CHANNELS, :]))
            self.curve_eeg[-1].setDownsampling(ds=self.subsampling_value,
                                               auto=False,
                                               method="mean")

        # Update CAR so it's computed based only on the shown channels
        if (len(self.channels_to_show_idx) > 1):
            self.matrix_car = np.zeros(
                (self.config['eeg_channels'], self.config['eeg_channels']),
                dtype=float)
            self.matrix_car[:, :] = -1 / float(len(self.channels_to_show_idx))
            np.fill_diagonal(self.matrix_car,
                             1 - (1 / float(len(self.channels_to_show_idx))))
            for x in range(0, len(self.channels_to_hide_idx)):
                self.matrix_car[self.channels_to_hide_idx[x], :] = 0
                self.matrix_car[:, self.channels_to_hide_idx[x]] = 0

        # Refresh the plot
        self.update_plot_scale(self.scale)

    def keyPressEvent(self, event):
        key = event.key()
        if (key == QtCore.Qt.Key_Escape):
            self.closeEvent(None)
        if (key == QtCore.Qt.Key_H):
            self.show_help = not self.show_help
            self.trigger_help()
        if (key == QtCore.Qt.Key_Up):
            # Python's log(x, 10) has a rounding bug. Use log10(x) instead.
            new_scale = self.scale + max(1, 10**int(math.log10(self.scale)))
            self.update_plot_scale(new_scale)
        if (key == QtCore.Qt.Key_Space):
            self.stop_plot = not self.stop_plot
        if (key == QtCore.Qt.Key_Down):
            if self.scale >= 2:
                # Python's log(x, 10) has a rounding bug. Use log10(x) instead.
                new_scale = self.scale - max(
                    1, 10**int(math.log10(self.scale - 1)))
                self.update_plot_scale(new_scale)
        if (key == QtCore.Qt.Key_Left):
            self.update_plot_seconds(self.seconds_to_show - 1)
        if (key == QtCore.Qt.Key_Right):
            self.update_plot_seconds(self.seconds_to_show + 1)
        if (key == QtCore.Qt.Key_L):
            self.ui.checkBox_showLPT.setChecked(
                not self.ui.checkBox_showLPT.isChecked())
        if (key == QtCore.Qt.Key_T):
            self.ui.checkBox_showTID.setChecked(
                not self.ui.checkBox_showTID.isChecked())
        if (key == QtCore.Qt.Key_K):
            self.ui.checkBox_showKey.setChecked(
                not self.ui.checkBox_showKey.isChecked())
        if (key == QtCore.Qt.Key_C):
            self.ui.checkBox_car.setChecked(
                not self.ui.checkBox_car.isChecked())
        if (key == QtCore.Qt.Key_B):
            self.ui.checkBox_bandpass.setChecked(
                not self.ui.checkBox_bandpass.isChecked())
            if self.ui.checkBox_bandpass.isChecked():
                self.ui.pushButton_bp.click()
        if ((key >= QtCore.Qt.Key_0) and (key <= QtCore.Qt.Key_9)):
            if (self.show_Key_events) and (not self.stop_plot):
                self.addEventPlot("KEY", 990 + key - QtCore.Qt.Key_0)
                # self.bci.id_msg_bus.SetEvent(990 + key - QtCore.Qt.Key_0)
                # self.bci.iDsock_bus.sendall(self.bci.id_serializer_bus.Serialize());
                # 666

    #
    #	Function called when a closing event was triggered.
    #
    def closeEvent(self, event):
        '''
        reply = QtGui.QMessageBox.question(self, "Quit", "Are you sure you want to quit?", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.Yes)
        if (reply == QtGui.QMessageBox.Yes):
            if (self.pushButton_stoprec.isEnabled()):
                subprocess.Popen(["cl_rpc", "closexdf"], close_fds=True)
            self.fin.close()
            exit()
        '''

        # leeq
        if (self.ui.pushButton_stoprec.isEnabled()):
            subprocess.Popen(["cl_rpc", "closexdf"], close_fds=True)
        with self.state.get_lock():
            self.state.value = 0
Ejemplo n.º 18
0
class RecordingFromHardWare():
    def __init__(self):
        self.start_recording = False
        self.new_data = []
        self.sr = StreamReceiver(buffer_size=0)
        self.record_dir = '%s/records' % os.getcwd()
        self.current_window = np.ndarray([])
        self.current_time_stamps = np.ndarray([])
        self.MRCP_window_size = 6
        print("I RUMNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN")

# =============================================================================
#     def runInfinitecording(self):
#         while self.start_recording:
#                self.sr.acquire()
#                if self.sr.get_buflen() > 20:
#                    duration = str(datetime.timedelta(seconds=int(self.sr.get_buflen())))
#                    #recordLogger.info('RECORDING %s' % duration)
#                    #next_sec += 1
#                buffers, times = self.sr.get_buffer()
#                signals = buffers
#                events = None
#
#                 # channels = total channels from amp, including trigger channel
#                data = {'signals':signals, 'timestamps':times, 'events':events,
#                         'sample_rate':self.sr.get_sample_rate(), 'channels':self.sr.get_num_channels(),
#                         'ch_names':self.sr.get_channel_names(), 'lsl_time_offset':self.sr.lsl_time_offset}
#                print(data)
#                self.new_data.append(data)
# =============================================================================

    def startRecording(self):
        # do some stuff
        self.start_recording = True
        #download_thread = threading.Thread(target=self.runInfinitecording)
        recordLogger = logger
        thread = threading.Thread(target=self.record)
        thread.start()

    def stopRecording(self):
        self.start_recording = False
        #Write into file
        #self.new_data = []

    def isRecordingIsRunning(self):
        return self.start_recording

    def set_MRCP_window_size(self, MRCP_window_size):
        self.MRCP_window_size = MRCP_window_size

    def record(self):
        recordLogger = logger
        #redirect_stdout_to_queue(recordLogger, 'INFO')

        # set data file name
        timestamp = time.strftime('%Y%m%d-%H%M%S', time.localtime())
        pcl_file = "%s/%s-raw.pcl" % (self.record_dir, timestamp)
        eve_file = '%s/%s-eve.txt' % (self.record_dir, timestamp)
        recordLogger.info('>> Output file: %s' % (pcl_file))

        # test writability
        try:
            qc.make_dirs(self.record_dir)
            open(pcl_file, 'w').write(
                'The data will written when the recording is finished.')
        except:
            raise RuntimeError('Problem writing to %s. Check permission.' %
                               pcl_file)

        # start a server for sending out data pcl_file when software trigger is used
        outlet = start_server('StreamRecorderInfo', channel_format='string',\
            source_id=eve_file, stype='Markers')

        # connect to EEG stream server
        #sr = StreamReceiver(buffer_size=0, eeg_only=eeg_only)

        # start recording
        recordLogger.info('\n>> Recording started (PID %d).' % os.getpid())
        qc.print_c('\n>> Press Enter to stop recording', 'G')
        tm = qc.Timer(autoreset=True)
        next_sec = 1
        while self.start_recording:
            self.sr.acquire()
            if self.sr.get_buflen() > next_sec:
                duration = str(
                    datetime.timedelta(seconds=int(self.sr.get_buflen())))
                #recordLogger.info('RECORDING %s' % duration)
                next_sec += 1

            #current_buffer, current_times = self.sr.get_buffer()
            self.sr.set_window_size(self.MRCP_window_size)
            self.current_window, self.current_time_stamps = self.sr.get_window(
            )
            #print("window shape: {} time stamps shape: {}".format(self.current_window.shape, self.current_time_stamps.shape))
            tm.sleep_atleast(0.001)

        # record stop
        recordLogger.info('>> Stop requested. Copying buffer')
        buffers, times = self.sr.get_buffer()
        signals = buffers
        events = None

        # channels = total channels from amp, including trigger channel
        data = {
            'signals': signals,
            'timestamps': times,
            'events': events,
            'sample_rate': self.sr.get_sample_rate(),
            'channels': self.sr.get_num_channels(),
            'ch_names': self.sr.get_channel_names(),
            'lsl_time_offset': self.sr.lsl_time_offset
        }
        print("data length : {}".format(data['signals'].shape))
        self.new_data = data['signals']
        recordLogger.info('Saving raw data ...')
        qc.save_obj(pcl_file, data)
        recordLogger.info('Saved to %s\n' % pcl_file)

        # automatically convert to fif and use event file if it exists (software trigger)
        if os.path.exists(eve_file):
            recordLogger.info('Found matching event file, adding events.')
        else:
            eve_file = None
        recordLogger.info('Converting raw file into fif.')
        pcl2fif(pcl_file, external_event=eve_file)