def event_timestamps_to_indices(sigfile, eventfile): """ Convert LSL timestamps to sample indices for separetely recorded events. Parameters: sigfile: raw signal file (Python Pickle) recorded with stream_recorder.py. eventfile: event file where events are indexed with LSL timestamps. Returns: events list, which can be used as an input to mne.io.RawArray.add_events(). """ raw = qc.load_obj(sigfile) ts = raw['timestamps'].reshape(-1) ts_min = min(ts) ts_max = max(ts) events = [] with open(eventfile) as f: for l in f: data = l.strip().split('\t') event_ts = float(data[0]) event_value = int(data[2]) # find the first index not smaller than ts next_index = np.searchsorted(ts, event_ts) if next_index >= len(ts): logger.warning( 'Event %d at time %.3f is out of time range (%.3f - %.3f).' % (event_value, event_ts, ts_min, ts_max)) else: events.append([next_index, 0, event_value]) return events
def get_decoder_info(classifier): """ Get only the classifier information without connecting to a server Params ------ classifier: model file Returns ------- info dictionary object """ model = qc.load_obj(classifier) if model == None: print('>> Error loading %s' % model) sys.exit(-1) cls = model['cls'] psde = model['psde'] labels = list(cls.classes_) w_seconds = model['w_seconds'] w_frames = model['w_frames'] wstep = model['wstep'] sfreq = model['sfreq'] psd_temp = psde.transform(np.zeros((1, len(model['picks']), w_frames))) psd_shape = psd_temp.shape psd_size = psd_temp.size info = dict(labels=labels, cls=cls, psde=psde, w_seconds=w_seconds, w_frames=w_frames,\ wstep=wstep, sfreq=sfreq, psd_shape=psd_shape, psd_size=psd_size) return info
def __init__(self, classifier=None, buffer_size=1.0, fake=False, amp_serial=None,\ amp_name=None, fake_dirs=None, parallel=None, alpha_new=None): """ Params ------ classifier: file name of the classifier buffer_size: buffer window size in seconds fake: False: Connect to an amplifier LSL server and decode True: Create a mock decoder (fake probabilities biased to 1.0) buffer_size: Buffer size in seconds. parallel: dict(period, stride, num_strides) period: Decoding period length for a single decoder in seconds. stride: Time step between decoders in seconds. num_strides: Number of decoders to run in parallel. alpha_new: exponential smoothing factor, real value in [0, 1]. p_new = p_new * alpha_new + p_old * (1 - alpha_new) Example: If the decoder runs 32ms per cycle, we can set period=0.04, stride=0.01, num_strides=4 to achieve 100 Hz decoding. """ self.classifier = classifier self.buffer_sec = buffer_size self.startmsg = 'Decoder daemon started.' self.stopmsg = 'Decoder daemon stopped.' self.fake = fake self.amp_serial = amp_serial self.amp_name = amp_name self.parallel = parallel if alpha_new is None: alpha_new = 1 if not 0 <= alpha_new <= 1: raise ValueError('alpha_new must be a real number between 0 and 1.') self.alpha_new = alpha_new self.alpha_old = 1 - alpha_new if fake == False or fake is None: self.model = qc.load_obj(self.classifier) if self.model == None: raise IOError('Error loading %s' % self.model) else: self.labels = self.model['cls'].classes_ self.label_names = [self.model['classes'][k] for k in self.labels] else: # create a fake decoder with LEFT/RIGHT classes self.model = None tdef = trigger_def('triggerdef_16.ini') if type(fake_dirs) is not list: raise RuntimeError('Decoder(): wrong argument type of fake_dirs: %s.' % type(fake_dirs)) self.labels = [tdef.by_key[t] for t in fake_dirs] self.label_names = [tdef.by_value[v] for v in self.labels] self.startmsg = '** WARNING: FAKE ' + self.startmsg self.stopmsg = 'FAKE ' + self.stopmsg self.psdlock = mp.Lock() self.reset() self.start()
def on_new_decoder_file(self, key, filePath): """ Update the event QComboBox with the new events from the new . """ cls = qc.load_obj(filePath) events = cls['cls'].classes_ # Finds the events on which the decoder has been trained on self.events = list(map(int, events)) self.nb_directions = len(events) if self.tdef: self.on_update_VBoxLayout()
def pcl2fif(filename, interactive=False, outdir=None, external_event=None, offset=0, overwrite=False, precision='single'): """ PyCNBI Python pickle file Params -------- outdir: If None, it will be the subdirectory of the fif file. external_event: Event file in text format. Each row should be: "SAMPLE_INDEX 0 EVENT_TYPE" precision: Data matrix format. 'single' improves backward compatability. """ fdir, fname, fext = qc.parse_path_list(filename) if outdir is None: outdir = fdir + 'fif/' elif outdir[-1] != '/': outdir += '/' data = qc.load_obj(filename) if type(data['signals']) == list: signals_raw = np.array(data['signals'][0]).T # to channels x samples else: signals_raw = data['signals'].T # to channels x samples sample_rate = data['sample_rate'] if 'ch_names' not in data: ch_names = ['CH%d' % (x + 1) for x in range(signals_raw.shape[0])] else: ch_names = data['ch_names'] # search for event channel trig_ch = pu.find_event_channel(signals_raw, ch_names) ''' TODO: REMOVE # exception if trig_ch is None: logger.warning('Inferred event channel is None.') if interactive: logger.warning('If you are sure everything is alright, press Enter.') input() # fix wrong event channel elif trig_ch_guess != trig_ch: logger.warning('Specified event channel (%d) != inferred event channel (%d).' % (trig_ch, trig_ch_guess)) if interactive: input('Press Enter to fix. Event channel will be set to %d.' % trig_ch_guess) ch_names.insert(trig_ch_guess, ch_names.pop(trig_ch)) trig_ch = trig_ch_guess logger.info('New channel list:') for c in ch_names: logger.info('%s' % c) logger.info('Event channel is now set to %d' % trig_ch) ''' # move trigger channel to index 0 if trig_ch is None: # assuming no event channel exists, add a event channel to index 0 for consistency. logger.warning( 'No event channel was not found. Adding a blank event channel to index 0.' ) eventch = np.zeros([1, signals_raw.shape[1]]) signals = np.concatenate((eventch, signals_raw), axis=0) num_eeg_channels = signals_raw.shape[ 0] # data['channels'] is not reliable any more trig_ch = 0 ch_names = ['TRIGGER' ] + ['CH%d' % (x + 1) for x in range(num_eeg_channels)] elif trig_ch == 0: signals = signals_raw num_eeg_channels = data['channels'] - 1 else: # move event channel to 0 logger.info('Moving event channel %d to 0.' % trig_ch) signals = np.concatenate( (signals_raw[[trig_ch]], signals_raw[:trig_ch], signals_raw[trig_ch + 1:]), axis=0) assert signals_raw.shape == signals.shape num_eeg_channels = data['channels'] - 1 ch_names.pop(trig_ch) trig_ch = 0 ch_names.insert(trig_ch, 'TRIGGER') logger.info('New channel list:') for c in ch_names: logger.info('%s' % c) ch_info = ['stim'] + ['eeg'] * num_eeg_channels info = mne.create_info(ch_names, sample_rate, ch_info) # create Raw object raw = mne.io.RawArray(signals, info) raw._times = data['timestamps'] # seems to have no effect if external_event is not None: raw._data[0] = 0 # erase current events events_index = event_timestamps_to_indices(filename, external_event, offset) if len(events_index) == 0: logger.warning('No events were found in the event file') else: logger.info('Found %d events' % len(events_index)) raw.add_events(events_index, stim_channel='TRIGGER') qc.make_dirs(outdir) fiffile = outdir + fname + '.fif' raw.save(fiffile, verbose=False, overwrite=overwrite, fmt=precision) logger.info('Saved to %s' % fiffile) saveChannels2txt(outdir, ch_names) return True
def __init__(self, classifier=None, buffer_size=1.0, fake=False, amp_serial=None, amp_name=None): """ Params ------ classifier: classifier file spatial: spatial filter to use buffer_size: length of the signal buffer in seconds """ self.classifier = classifier self.buffer_sec = buffer_size self.fake = fake self.amp_serial = amp_serial self.amp_name = amp_name if self.fake == False: model = qc.load_obj(self.classifier) if model == None: self.print('Error loading %s' % model) sys.exit(-1) self.cls = model['cls'] self.psde = model['psde'] self.labels = list(self.cls.classes_) self.label_names = [model['classes'][k] for k in self.labels] self.spatial = model['spatial'] self.spectral = model['spectral'] self.notch = model['notch'] self.w_seconds = model['w_seconds'] self.w_frames = model['w_frames'] self.wstep = model['wstep'] self.sfreq = model['sfreq'] if not int(self.sfreq * self.w_seconds) == self.w_frames: raise RuntimeError('sfreq * w_sec %d != w_frames %d' % (int(self.sfreq * self.w_seconds), self.w_frames)) if 'multiplier' in model: self.multiplier = model['multiplier'] else: self.multiplier = 1 # Stream Receiver self.sr = StreamReceiver(window_size=self.w_seconds, amp_name=self.amp_name, amp_serial=self.amp_serial) if self.sfreq != self.sr.sample_rate: raise RuntimeError('Amplifier sampling rate (%.1f) != model sampling rate (%.1f). Stop.' % ( self.sr.sample_rate, self.sfreq)) # Map channel indices based on channel names of the streaming server self.spatial_ch = model['spatial_ch'] self.spectral_ch = model['spectral_ch'] self.notch_ch = model['notch_ch'] self.ref_new = model['ref_new'] self.ref_old = model['ref_old'] self.ch_names = self.sr.get_channel_names() mc = model['ch_names'] self.picks = [self.ch_names.index(mc[p]) for p in model['picks']] if self.spatial_ch is not None: self.spatial_ch = [self.ch_names.index(mc[p]) for p in model['spatial_ch']] if self.spectral_ch is not None: self.spectral_ch = [self.ch_names.index(mc[p]) for p in model['spectral_ch']] if self.notch_ch is not None: self.notch_ch = [self.ch_names.index(mc[p]) for p in model['notch_ch']] if self.ref_new is not None: self.ref_new = self.ch_names.index(mc[model['ref_new']]) if self.ref_old is not None: self.ref_old = self.ch_names.index(mc[model['ref_old']]) # PSD buffer psd_temp = self.psde.transform(np.zeros((1, len(self.picks), self.w_frames))) self.psd_shape = psd_temp.shape self.psd_size = psd_temp.size self.psd_buffer = np.zeros((0, self.psd_shape[1], self.psd_shape[2])) self.ts_buffer = [] else: # Fake left-right decoder model = None self.psd_shape = None self.psd_size = None # TODO: parameterize directions using fake_dirs self.labels = [11, 9] self.label_names = ['LEFT_GO', 'RIGHT_GO']
def __init__(self, classifier=None, buffer_size=1.0, fake=False, amp_serial=None, amp_name=None): """ Params ------ classifier: classifier file spatial: spatial filter to use buffer_size: length of the signal buffer in seconds """ self.classifier = classifier self.buffer_sec = buffer_size self.fake = fake self.amp_serial = amp_serial self.amp_name = amp_name if self.fake == False: model = qc.load_obj(self.classifier) if model is None: logger.error('Classifier model is None.') raise ValueError self.cls = model['cls'] self.psde = model['psde'] self.labels = list(self.cls.classes_) self.label_names = [model['classes'][k] for k in self.labels] self.spatial = model['spatial'] self.spectral = model['spectral'] self.notch = model['notch'] self.w_seconds = model['w_seconds'] self.w_frames = model['w_frames'] self.wstep = model['wstep'] self.sfreq = model['sfreq'] if 'decim' not in model: model['decim'] = 1 self.decim = model['decim'] if not int(round(self.sfreq * self.w_seconds)) == self.w_frames: logger.error('sfreq * w_sec %d != w_frames %d' % (int(round(self.sfreq * self.w_seconds)), self.w_frames)) raise RuntimeError if 'multiplier' in model: self.multiplier = model['multiplier'] else: self.multiplier = 1 # Stream Receiver self.sr = StreamReceiver(window_size=self.w_seconds, amp_name=self.amp_name, amp_serial=self.amp_serial) if self.sfreq != self.sr.sample_rate: logger.error('Amplifier sampling rate (%.3f) != model sampling rate (%.3f). Stop.' % (self.sr.sample_rate, self.sfreq)) raise RuntimeError # Map channel indices based on channel names of the streaming server self.spatial_ch = model['spatial_ch'] self.spectral_ch = model['spectral_ch'] self.notch_ch = model['notch_ch'] #self.ref_ch = model['ref_ch'] # not supported yet self.ch_names = self.sr.get_channel_names() mc = model['ch_names'] self.picks = [self.ch_names.index(mc[p]) for p in model['picks']] if self.spatial_ch is not None: self.spatial_ch = [self.ch_names.index(mc[p]) for p in model['spatial_ch']] if self.spectral_ch is not None: self.spectral_ch = [self.ch_names.index(mc[p]) for p in model['spectral_ch']] if self.notch_ch is not None: self.notch_ch = [self.ch_names.index(mc[p]) for p in model['notch_ch']] # PSD buffer #psd_temp = self.psde.transform(np.zeros((1, len(self.picks), self.w_frames // self.decim))) #self.psd_shape = psd_temp.shape #self.psd_size = psd_temp.size #self.psd_buffer = np.zeros((0, self.psd_shape[1], self.psd_shape[2])) #self.psd_buffer = None self.ts_buffer = [] logger.info_green('Loaded classifier %s (sfreq=%.3f, decim=%d)' % (' vs '.join(self.label_names), self.sfreq, self.decim)) else: # Fake left-right decoder model = None self.psd_shape = None self.psd_size = None # TODO: parameterize directions using fake_dirs self.labels = [11, 9] self.label_names = ['LEFT_GO', 'RIGHT_GO']
def disp_params(self, cfg_template_module, cfg_module): """ Displays the parameters in the corresponding UI scrollArea. cfg = config module """ self.clear_params() # Extract the parameters and their possible values from the template modules. params = inspect.getmembers(cfg_template_module) # Extract the chosen values from the subject's specific module. all_chosen_values = inspect.getmembers(cfg_module) filePath = self.ui.lineEdit_pathSearch.text() # Load channels if self.modality == 'train': subjectDataPath = '%s/%s/fif' % (os.environ['PYCNBI_DATA'], filePath.split('/')[-1]) self.channels = read_params_from_txt(subjectDataPath, 'channelsList.txt') self.directions = () # Iterates over the classes for par in range(2): param = inspect.getmembers(params[par][1]) # Create layouts layout = QFormLayout() # Iterates over the list for p in param: # Remove useless attributes if '__' in p[0]: continue # Iterates over the dict for key, values in p[1].items(): chosen_value = self.extract_value_from_module( key, all_chosen_values) # For the feedback directions [offline and online]. if 'DIRECTIONS' in key: self.directions = values if self.modality is 'offline': nb_directions = 4 directions = Connect_Directions( key, chosen_value, values, nb_directions) elif self.modality is 'online': cls_path = self.paramsWidgets[ 'DECODER_FILE'].lineEdit_pathSearch.text() cls = qc.load_obj(cls_path) events = cls[ 'cls'].classes_ # Finds the events on which the decoder has been trained on events = list(map(int, events)) nb_directions = len(events) chosen_events = [ event[1] for event in chosen_value ] chosen_value = [val[0] for val in chosen_value] # Need tdef to convert int to str trigger values try: [tdef.by_value(i) for i in events] except: trigger_file = self.extract_value_from_module( 'TRIGGER_FILE', all_chosen_values) tdef = trigger_def(trigger_file) # self.on_guichanges('tdef', tdef) events = [tdef.by_value[i] for i in events] directions = Connect_Directions_Online( key, chosen_value, values, nb_directions, chosen_events, events) directions.signal_paramChanged.connect( self.on_guichanges) self.paramsWidgets.update({key: directions}) layout.addRow(key, directions.l) # For providing a folder path. elif 'PATH' in key: pathfolderfinder = PathFolderFinder( key, DEFAULT_PATH, chosen_value) pathfolderfinder.signal_pathChanged.connect( self.on_guichanges) self.paramsWidgets.update({key: pathfolderfinder}) layout.addRow(key, pathfolderfinder.layout) continue # For providing a file path. elif 'FILE' in key: pathfilefinder = PathFileFinder(key, chosen_value) pathfilefinder.signal_pathChanged.connect( self.on_guichanges) self.paramsWidgets.update({key: pathfilefinder}) layout.addRow(key, pathfilefinder.layout) continue # For the special case of choosing the trigger classes to train on elif 'TRIGGER_DEF' in key: trigger_file = self.extract_value_from_module( 'TRIGGER_FILE', all_chosen_values) tdef = trigger_def(trigger_file) # self.on_guichanges('tdef', tdef) nb_directions = 4 # Convert 'None' to real None (real None is removed when selected in the GUI) tdef_values = [ None if i == 'None' else i for i in list(tdef.by_name) ] directions = Connect_Directions( key, chosen_value, tdef_values, nb_directions) directions.signal_paramChanged.connect( self.on_guichanges) self.paramsWidgets.update({key: directions}) layout.addRow(key, directions.l) continue # To select specific electrodes elif '_CHANNELS' in key or 'CHANNELS_' in key: ch_select = Channel_Select(key, self.channels, chosen_value) ch_select.signal_paramChanged.connect( self.on_guichanges) self.paramsWidgets.update({key: ch_select}) layout.addRow(key, ch_select.layout) elif 'BIAS' in key: # Add None to the list in case of no bias wanted self.directions = tuple([None] + list(self.directions)) bias = Connect_Bias(key, self.directions, chosen_value) bias.signal_paramChanged.connect(self.on_guichanges) self.paramsWidgets.update({key: bias}) layout.addRow(key, bias.l) # For all the int values. elif values is int: spinBox = Connect_SpinBox(key, chosen_value) spinBox.signal_paramChanged.connect(self.on_guichanges) self.paramsWidgets.update({key: spinBox}) layout.addRow(key, spinBox.w) continue # For all the float values. elif values is float: doublespinBox = Connect_DoubleSpinBox( key, chosen_value) doublespinBox.signal_paramChanged.connect( self.on_guichanges) self.paramsWidgets.update({key: doublespinBox}) layout.addRow(key, doublespinBox.w) continue # For parameters with multiple non-fixed values in a list (user can modify them) elif values is list: modifiable_list = Connect_Modifiable_List( key, chosen_value) modifiable_list.signal_paramChanged.connect( self.on_guichanges) self.paramsWidgets.update({key: modifiable_list}) layout.addRow(key, modifiable_list.frame) continue # For parameters containing a string to modify elif values is str: lineEdit = Connect_LineEdit(key, chosen_value) lineEdit.signal_paramChanged[str, str].connect( self.on_guichanges) lineEdit.signal_paramChanged[str, type(None)].connect( self.on_guichanges) self.paramsWidgets.update({key: lineEdit}) layout.addRow(key, lineEdit.w) continue # For parameters with multiple fixed values. elif type(values) is tuple: comboParams = Connect_ComboBox(key, chosen_value, values) comboParams.signal_paramChanged.connect( self.on_guichanges) comboParams.signal_additionalParamChanged.connect( self.on_guichanges) self.paramsWidgets.update({key: comboParams}) layout.addRow(key, comboParams.layout) continue # For parameters with multiple non-fixed values in a dict (user can modify them) elif type(values) is dict: try: selection = chosen_value['selected'] comboParams = Connect_ComboBox( key, chosen_value, values) comboParams.signal_paramChanged.connect( self.on_guichanges) comboParams.signal_additionalParamChanged.connect( self.on_guichanges) self.paramsWidgets.update({key: comboParams}) layout.addRow(key, comboParams.layout) except: modifiable_dict = Connect_Modifiable_Dict( key, chosen_value, values) modifiable_dict.signal_paramChanged.connect( self.on_guichanges) self.paramsWidgets.update({key: modifiable_dict}) layout.addRow(key, modifiable_dict.frame) continue # Add a horizontal line to separate parameters' type. if p != param[-1]: separator = QFrame() separator.setFrameShape(QFrame.HLine) separator.setFrameShadow(QFrame.Sunken) layout.addRow(separator) # Display the parameters according to their types. if params[par][0] == 'Basic': self.ui.scrollAreaWidgetContents_Basics.setLayout(layout) elif params[par][0] == 'Advanced': self.ui.scrollAreaWidgetContents_Adv.setLayout(layout)