コード例 #1
0
def run(cfg, state=mp.Value('i', 1), queue=None):
    """
    Training protocol for Alpha/Theta neurofeedback.
    """
    redirect_stdout_to_queue(logger, queue, 'INFO')

    # add tdef object
    cfg.tdef = trigger_def(cfg.TRIGGER_FILE)

    # Extract features
    if not state.value:
        sys.exit(-1)
        
    freqs = np.arange(cfg.FEATURES['PSD']['fmin'], cfg.FEATURES['PSD']['fmax']+0.5, 1/cfg.FEATURES['PSD']['wlen'])
    featdata = features.compute_features(cfg)
    
    # Average the PSD over the windows
    window_avg_psd =  np.mean(np.squeeze(featdata['X_data']), 0)
    
    # Alpha ref over the alpha band
    alpha_ref = round(np.mean(window_avg_psd[freqs>=8]))
    alpha_thr = round(alpha_ref - (0.5 * np.std(window_avg_psd[freqs>=8]) ))
    
    # Theta ref over Theta band
    theta_ref = round(np.mean(window_avg_psd[freqs<8]))
    theta_thr = round(theta_ref - (0.5 * np.std(window_avg_psd[freqs<8]) ))
        
    logger.info('Theta ref = {}; alpha ref ={}' .format(theta_ref, alpha_ref))
    logger.info('Theta thr = {}; alpha thr ={}' .format(theta_thr, alpha_thr))
コード例 #2
0
def main():
    fmin = 1
    fmax = 40
    channels = 64
    wlen = 0.5  # window length in seconds
    sfreq = 512
    num_iterations = 500

    signal = np.random.rand(channels, int(np.round(sfreq * wlen)))
    psde = mne.decoding.PSDEstimator(sfreq=sfreq, fmin=fmin,\
        fmax=fmax, bandwidth=None, adaptive=False, low_bias=True,\
        n_jobs=1, normalization='length', verbose=None)

    tm = qc.Timer()
    times = []
    for i in range(num_iterations):
        tm.reset()
        psd = psde.transform(
            signal.reshape((1, signal.shape[0], signal.shape[1])))
        times.append(tm.msec())
        if i % 100 == 0:
            logger.info('%d / %d' % (i, num_iterations))
    ms = np.mean(times)
    fps = 1000 / ms
    logger.info('Average = %.1f ms (%.1f Hz)' % (ms, fps))
コード例 #3
0
ファイル: epochs2mat.py プロジェクト: vferat/NeuroDecode
def epochs2mat(data_dir,
               channel_picks,
               event_id,
               tmin,
               tmax,
               merge_epochs=False,
               spfilter=None,
               spchannels=None):
    if merge_epochs:
        # load all raw files in the directory and merge epochs
        fiflist = []
        for data_file in qc.get_file_list(data_dir, fullpath=True):
            if data_file[-4:] != '.fif':
                continue
            fiflist.append(data_file)
        raw, events = pu.load_multi(fiflist,
                                    spfilter=spfilter,
                                    spchannels=spchannels)
        matfile = data_dir + '/epochs_all.mat'
        save_mat(raw, events, channel_picks, event_id, tmin, tmax, matfile)
    else:
        # process individual raw file separately
        for data_file in qc.get_file_list(data_dir, fullpath=True):
            if data_file[-4:] != '.fif':
                continue
            [base, fname, fext] = qc.parse_path_list(data_file)
            matfile = '%s/%s-epochs.mat' % (base, fname)
            raw, events = pu.load_raw(data_file)
            save_mat(raw, events, channel_picks, event_id, tmin, tmax, matfile)

    logger.info('Exported to %s' % matfile)
コード例 #4
0
    def update_loop(self):

        #  Sharing variable to stop at the GUI level
        if not self.state.value:
            logger.info('Viewer stopped')
            sys.exit()

        try:
            # assert self.updating==False, 'thread destroyed?'
            # self.updating= True

            # self.handle_tobiid_input()	# Read TiDs
            self.read_eeg()  # Read new chunk
            if len(self.ts_list) > 0:
                self.filter_signal()  # Filter acquired data
                self.update_ringbuffers()  # Update the plotting infor
                if (not self.stop_plot):
                    self.repaint()  # Call paint event
        except:
            logger.exception('Exception. Dropping into a shell.')
            pdb.set_trace()
        finally:
            # self.updating= False
            # using singleShot instead
            # QtCore.QTimer.singleShot( 20, self.update_loop )
            pass
コード例 #5
0
ファイル: convert2fif.py プロジェクト: vferat/NeuroDecode
def any2fif(filename, interactive=False, outdir=None, channel_file=None):
    """
    Generic file format converter
    """
    p = qc.parse_path(filename)
    if outdir is not None:
        qc.make_dirs(outdir)

    if p.ext == 'pcl':
        eve_file = '%s/%s.txt' % (p.dir, p.name.replace('raw', 'eve'))
        if os.path.exists(eve_file):
            logger.info('Adding events from %s' % eve_file)
        else:
            eve_file = None
        pcl2fif(filename,
                interactive=interactive,
                outdir=outdir,
                external_event=eve_file)
    elif p.ext == 'eeg':
        eeg2fif(filename, interactive=interactive, outdir=outdir)
    elif p.ext in ['edf', 'bdf']:
        bdf2fif(filename, interactive=interactive, outdir=outdir)
    elif p.ext == 'gdf':
        gdf2fif(filename,
                interactive=interactive,
                outdir=outdir,
                channel_file=channel_file)
    elif p.ext == 'xdf':
        xdf2fif(filename, interactive=interactive, outdir=outdir)
    else:  # unknown format
        logger.error(
            'Ignored unrecognized file extension %s. It should be [.pcl | .eeg | .gdf | .bdf]'
            % p.ext)
コード例 #6
0
def config_run(featfile=None):
    if featfile is None or len(featfile.strip()) == 0:
        if os.path.exists('good_features.txt'):
            featfile = os.path.realpath('good_features.txt').replace('\\', '/')
            logger.info('Found %s in the current folder.' % featfile)
        else:
            featfile = input('Feature file path? ')
    feature_info(featfile)
コード例 #7
0
    def __init__(self, cfg, viz, tdef, trigger, logfile=None):
        self.cfg = cfg
        self.tdef = tdef
        self.trigger = trigger
        self.viz = viz
        self.viz.fill()
        self.refresh_delay = 1.0 / self.cfg.REFRESH_RATE
        self.bar_step_left = self.cfg.BAR_STEP['left']
        self.bar_step_right = self.cfg.BAR_STEP['right']
        self.bar_step_up = self.cfg.BAR_STEP['up']
        self.bar_step_down = self.cfg.BAR_STEP['down']
        self.bar_step_both = self.cfg.BAR_STEP['both']

        if type(self.cfg.BAR_BIAS) is tuple:
            self.bar_bias = list(self.cfg.BAR_BIAS)
        else:
            self.bar_bias = self.cfg.BAR_BIAS

        # New decoder: already smoothed by the decoder so bias after.
        #self.alpha_old = self.cfg.PROB_ACC_ALPHA
        #self.alpha_new = 1.0 - self.cfg.PROB_ACC_ALPHA

        if hasattr(self.cfg,
                   'BAR_REACH_FINISH') and self.cfg.BAR_REACH_FINISH == True:
            self.premature_end = True
        else:
            self.premature_end = False

        self.tm_trigger = qc.Timer()
        self.tm_display = qc.Timer()
        self.tm_watchdog = qc.Timer()
        if logfile is not None:
            self.logf = open(logfile, 'w')
        else:
            self.logf = None

        # STIMO only
        if self.cfg.WITH_STIMO is True:
            if self.cfg.STIMO_COMPORT is None:
                atens = [x for x in serial.tools.list_ports.grep('ATEN')]
                if len(atens) == 0:
                    raise RuntimeError('No ATEN device found. Stop.')
                try:
                    self.stimo_port = atens[0].device
                except AttributeError:  # depends on Python distribution
                    self.stimo_port = atens[0][0]
            else:
                self.stimo_port = self.cfg.STIMO_COMPORT
            self.ser = serial.Serial(self.stimo_port, self.cfg.STIMO_BAUDRATE)
            logger.info('STIMO serial port %s is_open = %s' %
                        (self.stimo_port, self.ser.is_open))

        # FES only
        if self.cfg.WITH_FES is True:
            self.stim = fes.Motionstim8()
            self.stim.OpenSerialPort(self.cfg.FES_COMPORT)
            self.stim.InitializeChannelListMode()
            logger.info('Opened FES serial port')
コード例 #8
0
ファイル: trainer_mi.py プロジェクト: vferat/NeuroDecode
def fit_predict_thres(cls,
                      X_train,
                      Y_train,
                      X_test,
                      Y_test,
                      cnum,
                      label_list,
                      ignore_thres=None,
                      decision_thres=None):
    """
    Any likelihood lower than a threshold is not counted as classification score
    Confusion matrix, accuracy and F1 score (macro average) are computed.

    Params
    ======
    ignore_thres:
    if not None or larger than 0, likelihood values lower than ignore_thres will be ignored
    while computing confusion matrix.

    """
    timer = qc.Timer()
    cls.fit(X_train, Y_train)
    assert ignore_thres is None or ignore_thres >= 0
    if ignore_thres is None or ignore_thres == 0:
        Y_pred = cls.predict(X_test)
        score = skmetrics.accuracy_score(Y_test, Y_pred)
        cm = skmetrics.confusion_matrix(Y_test, Y_pred, label_list)
        f1 = skmetrics.f1_score(Y_test, Y_pred, average='macro')
    else:
        if decision_thres is not None:
            logger.error(
                'decision threshold and ignore_thres cannot be set at the same time.'
            )
            raise ValueError
        Y_pred = cls.predict_proba(X_test)
        Y_pred_labels = np.argmax(Y_pred, axis=1)
        Y_pred_maxes = np.array([x[i] for i, x in zip(Y_pred_labels, Y_pred)])
        Y_index_overthres = np.where(Y_pred_maxes >= ignore_thres)[0]
        Y_index_underthres = np.where(Y_pred_maxes < ignore_thres)[0]
        Y_pred_overthres = np.array(
            [cls.classes_[x] for x in Y_pred_labels[Y_index_overthres]])
        Y_pred_underthres = np.array(
            [cls.classes_[x] for x in Y_pred_labels[Y_index_underthres]])
        Y_pred_underthres_count = np.array(
            [np.count_nonzero(Y_pred_underthres == c) for c in label_list])
        Y_test_overthres = Y_test[Y_index_overthres]
        score = skmetrics.accuracy_score(Y_test_overthres, Y_pred_overthres)
        cm = skmetrics.confusion_matrix(Y_test_overthres, Y_pred_overthres,
                                        label_list)
        cm = np.concatenate((cm, Y_pred_underthres_count[:, np.newaxis]),
                            axis=1)
        f1 = skmetrics.f1_score(Y_test_overthres,
                                Y_pred_overthres,
                                average='macro')

    logger.info('Cross-validation %d (%.3f) - %.1f sec' %
                (cnum, score, timer.sec()))
    return score, cm, f1
コード例 #9
0
ファイル: trainer_mi.py プロジェクト: vferat/NeuroDecode
def balance_samples(X, Y, balance_type, verbose=False):
    if balance_type == 'OVER':
        """
        Oversample from classes that lack samples
        """
        label_set = np.unique(Y)
        max_set = []
        X_balanced = np.array(X)
        Y_balanced = np.array(Y)

        # find a class with maximum number of samples
        for c in label_set:
            yl = np.where(Y == c)[0]
            if len(max_set) == 0 or len(yl) > max_set[1]:
                max_set = [c, len(yl)]
        for c in label_set:
            if c == max_set[0]: continue
            yl = np.where(Y == c)[0]
            extra_samples = max_set[1] - len(yl)
            extra_idx = np.random.choice(yl, extra_samples)
            X_balanced = np.append(X_balanced, X[extra_idx], axis=0)
            Y_balanced = np.append(Y_balanced, Y[extra_idx], axis=0)
    elif balance_type == 'UNDER':
        """
        Undersample from classes that are excessive
        """
        label_set = np.unique(Y)
        min_set = []

        # find a class with minimum number of samples
        for c in label_set:
            yl = np.where(Y == c)[0]
            if len(min_set) == 0 or len(yl) < min_set[1]:
                min_set = [c, len(yl)]
        yl = np.where(Y == min_set[0])[0]
        X_balanced = np.array(X[yl])
        Y_balanced = np.array(Y[yl])
        for c in label_set:
            if c == min_set[0]: continue
            yl = np.where(Y == c)[0]
            reduced_idx = np.random.choice(yl, min_set[1])
            X_balanced = np.append(X_balanced, X[reduced_idx], axis=0)
            Y_balanced = np.append(Y_balanced, Y[reduced_idx], axis=0)
    elif balance_type is None or balance_type is False:
        return X, Y
    else:
        logger.error('Unknown balancing type %s' % balance_type)
        raise ValueError

    logger.info_green('\nNumber of samples after %ssampling' %
                      balance_type.lower())
    for c in label_set:
        logger.info(
            '%s: %d -> %d' %
            (c, len(np.where(Y == c)[0]), len(np.where(Y_balanced == c)[0])))

    return X_balanced, Y_balanced
コード例 #10
0
def raw2mat(infile, outfile):
    '''
    Convert raw data file to MATLAB file
    '''
    raw, events = load_raw(infile)
    header = dict(bads=raw.info['bads'], ch_names=raw.info['ch_names'],\
                  sfreq=raw.info['sfreq'], events=events)
    scipy.io.savemat(outfile, dict(signals=raw._data, header=header))
    logger.info('Exported to %s' % outfile)
コード例 #11
0
 def set_pin(self, pin):
     if self.lpttype == 'SOFTWARE':
         logger.error('set_pin() not supported for software trigger.')
         return False
     elif self.lpttype == 'FAKE':
         logger.info('FAKE trigger pin %s' % pin)
         return True
     else:
         self.set_data(2**(pin - 1))
コード例 #12
0
 def signal_off(self):
     if self.lpttype == 'SOFTWARE':
         return self.write_event(0)
     elif self.lpttype == 'FAKE':
         logger.info('FAKE trigger off')
         return True
     else:
         self.set_data(0)
         self.offtimer = threading.Timer(self.delay, self.signal_off)
コード例 #13
0
 def __del__(self):
     # STIMO only
     if self.cfg.WITH_STIMO is True:
         self.ser.close()
         logger.info('Closed STIMO serial port %s' % self.stimo_port)
     # FES only
     if self.cfg.WITH_FES is True:
         stim_code = [0, 0, 0, 0, 0, 0, 0, 0]
         self.stim.UpdateChannelSettings(stim_code)
         self.stim.CloseSerialPort()
         logger.info('Closed FES serial port')
コード例 #14
0
ファイル: trainer_mi.py プロジェクト: vferat/NeuroDecode
def get_predict_proba(cls, X_train, Y_train, X_test, Y_test, cnum):
    """
    All likelihoods will be collected from every fold of a cross-validaiton. Based on these likelihoods,
    a threshold will be computed that will balance the true positive rate of each class.
    Available with binary classification scenario only.
    """
    timer = qc.Timer()
    cls.fit(X_train, Y_train)
    Y_pred = cls.predict_proba(X_test)
    logger.info('Cross-validation %d (%d tests) - %.1f sec' %
                (cnum, Y_pred.shape[0], timer.sec()))
    return Y_pred[:, 0]
コード例 #15
0
def config_run(featfile=None, topo_layout_file=None):
    if featfile is None or len(featfile.strip()) == 0:
        if os.path.exists('good_features.txt'):
            featfile = os.path.realpath('good_features.txt').replace('\\', '/')
            logger.info('Found %s in the current folder.' % featfile)
        else:
            featfile = input('Feature file path? ')

    if topo_layout_file is None or len(topo_layout_file.strip()) == 0:
        topo_layout_file = 'antneuro_64ch.lay'

    feature_importances(featfile, topo_layout_file)
コード例 #16
0
def cva_features(datadir):
    """
    (DEPRECATED FUNCTION)
    """
    for fin in qc.get_file_list(datadir, fullpath=True):
        if fin[-4:] != '.gdf': continue
        fout = fin + '.cva'
        if os.path.exists(fout):
            logger.info('Skipping', fout)
            continue
        logger.info("cva_features('%s')" % fin)
        qc.matlab("cva_features('%s')" % fin)
コード例 #17
0
def fif_resample(fif_dir, sfreq_target):
    out_dir = fif_dir + '/fif_resample%d' % sfreq_target
    qc.make_dirs(out_dir)
    for f in qc.get_file_list(fif_dir):
        pp = qc.parse_path(f)
        if pp.ext != 'fif':
            continue
        logger.info('Resampling %s' % f)
        raw, events = pu.load_raw(f)
        raw.resample(sfreq_target)
        fif_out = '%s/%s.fif' % (out_dir, pp.name)
        raw.save(fif_out)
        logger.info('Exported to %s' % fif_out)
コード例 #18
0
    def read_eeg(self):

        # if self.updating==True: print( '##### ERROR: thread destroyed ? ######' )
        # self.updating= True

        try:
            # data, self.ts_list= self.sr.inlets[0].pull_chunk(max_samples=self.config['sf']) # [frames][channels]
            data, self.ts_list = self.sr.acquire(blocking=False)

            # TODO: check and change to these two lines
            #self.sr.acquire(blocking=False, decim=DECIM)
            #data, self.ts_list = self.sr.get_window()

            if len(self.ts_list) == 0:
                # self.eeg= None
                # self.tri= None
                return

            n = self.config['eeg_channels']
            '''
            x= np.array( data )
            trg_ch= self.config['tri_channels']
            if trg_ch is not None:
                self.tri= np.reshape( x[:,trg_ch], (-1,1) ) # samples x 1
            self.eeg= np.reshape( x[:,self.sr.eeg_channels], (-1,n) ) # samples x channels
            '''
            trg_ch = self.config['tri_channels']
            if trg_ch is not None:
                self.tri = np.reshape(data[:, trg_ch], (-1, 1))  # samples x 1
            self.eeg = np.reshape(data[:, self.sr.eeg_channels],
                                  (-1, n))  # samples x channels

            if DEBUG_TRIGGER:
                # show trigger value
                try:
                    trg_value = max(self.tri)
                    if trg_value > 0:
                        logger.info('Received trigger %s' % trg_value)
                except:
                    logger.exception('Error! self.tri = %s' % self.tri)

                    # Read exg. self.config.samples*self.config.exg_ch, type float
                    # bexg = np.random.rand( 1, self.config['samples'] * self.config['exg_channels'] )
                    # self.exg = np.reshape(list(bexg), (self.config['samples'],self.config['exg_channels']))
        except WindowsError:
            # print('**** Access violation in read_eeg():\n%s\n%s'% (sys.exc_info()[0], sys.exc_info()[1]))
            pass
        except:
            logger.exception()
            pdb.set_trace()
コード例 #19
0
ファイル: convert2fif.py プロジェクト: vferat/NeuroDecode
def main(input_dir, channel_file=None):
    count = 0
    for f in qc.get_file_list(input_dir, fullpath=True, recursive=True):
        p = qc.parse_path(f)
        outdir = p.dir + '/fif/'
        if p.ext in ['pcl', 'bdf', 'edf', 'gdf', 'eeg', 'xdf']:
            logger.info('Converting %s' % f)
            any2fif(f,
                    interactive=True,
                    outdir=outdir,
                    channel_file=channel_file)
            count += 1

    logger.info('%d files converted.' % count)
コード例 #20
0
ファイル: convert2fif.py プロジェクト: vferat/NeuroDecode
def convert2mat(filename, matfile):
    """
    Convert to mat using MATLAB BioSig sload().
    """
    basename = '.'.join(filename.split('.')[:-1])
    # extension= filename.split('.')[-1]
    matfile = basename + '.mat'
    if not os.path.exists(matfile):
        logger.info('Converting input to mat file')
        run = "[sig,header]=sload('%s'); save('%s.mat','sig','header');" % (
            filename, basename)
        qc.matlab(run)
        if not os.path.exists(matfile):
            logger.error('mat file convertion error.')
            sys.exit()
コード例 #21
0
ファイル: decoder.py プロジェクト: fcbg-hnp/NeuroDecode
 def stop(self):
     """
     Stop the daemon
     """
     if self.is_running() == 0:
         logger.warning('Decoder already stopped.')
         return
     for running in self.running:
         running.value = 0
     for proc in self.procs:
         proc.join(10)
         if proc.is_alive():
             logger.warning('Process %s did not die properly.' % proc.pid())
     self.reset()
     logger.info(self.stopmsg)
コード例 #22
0
 def set_data(self, value):
     if self.lpttype == 'SOFTWARE':
         logger.error('set_data() not supported for software trigger.')
         return False
     elif self.lpttype == 'FAKE':
         logger.info('FAKE trigger value %s' % value)
         return True
     else:
         if self.lpttype == 'USB2LPT':
             self.lpt.setdata(value)
         elif self.lpttype == 'DESKTOP':
             self.lpt.setdata(self.portaddr, value)
         elif self.lpttype == 'ARDUINO':
             self.ser.write(bytes([value]))
         else:
             raise RuntimeError('Wrong trigger device')
コード例 #23
0
ファイル: decoder.py プロジェクト: fcbg-hnp/NeuroDecode
 def start(self):
     """
     Start the daemon
     """
     if self.is_running() > 0:
         msg = 'Cannot start. Daemon already running. (PID' + ', '.join(
             ['%d' % proc.pid for proc in self.procs]) + ')'
         logger.error(msg)
         return
     for proc in self.procs:
         proc.start()
     if self.wait_init:
         for running in self.running:
             while running.value == 0:
                 time.sleep(0.001)
     logger.info(self.startmsg)
コード例 #24
0
    def init_loop(self):

        self.updating = False

        self.sr = StreamReceiver(window_size=1,
                                 buffer_size=10,
                                 amp_serial=self.amp_serial,
                                 amp_name=self.amp_name)
        srate = int(self.sr.sample_rate)
        # n_channels= self.sr.channels

        # 12 unsigned ints (4 bytes)
        ########## TODO: assumkng 32 samples chunk => make it read from LSL header
        data = [
            'EEG', srate, ['L', 'R'], 32,
            len(self.sr.get_eeg_channels()), 0,
            self.sr.get_trigger_channel(), None, None, None, None, None
        ]
        logger.info('Trigger channel is %d' % self.sr.get_trigger_channel())

        self.config = {
            'id': data[0],
            'sf': data[1],
            'labels': data[2],
            'samples': data[3],
            'eeg_channels': data[4],
            'exg_channels': data[5],
            'tri_channels': data[6],
            'eeg_type': data[8],
            'exg_type': data[9],
            'tri_type': data[10],
            'lbl_type': data[11],
            'tim_size': 1,
            'idx_size': 1
        }

        self.tri = np.zeros(self.config['samples'])
        self.last_tri = 0
        self.eeg = np.zeros(
            (self.config['samples'], self.config['eeg_channels']),
            dtype=np.float)
        self.exg = np.zeros(
            (self.config['samples'], self.config['exg_channels']),
            dtype=np.float)
        self.ts_list = []
        self.ts_list_tri = []
コード例 #25
0
ファイル: fif2mat.py プロジェクト: vferat/NeuroDecode
def fif2mat(data_dir):
    out_dir = '%s/mat_files' % data_dir
    qc.make_dirs(out_dir)
    for rawfile in qc.get_file_list(data_dir, fullpath=True):
        if rawfile[-4:] != '.fif': continue
        raw, events = pu.load_raw(rawfile)
        events[:, 0] += 1  # MATLAB uses 1-based indexing
        sfreq = raw.info['sfreq']
        data = dict(signals=raw._data,
                    events=events,
                    sfreq=sfreq,
                    ch_names=raw.ch_names)
        fname = qc.parse_path(rawfile).name
        matfile = '%s/%s.mat' % (out_dir, fname)
        scipy.io.savemat(matfile, data)
        logger.info('Exported to %s' % matfile)
    logger.info('Finished exporting.')
コード例 #26
0
ファイル: q_common.py プロジェクト: vferat/NeuroDecode
def shell():
    """
    Enter interactive shell within the caller's scope
    """
    logger.info('*** Entering interactive shell. Ctrl+D to return. ***')
    stack = inspect.stack()
    try:  # globals are first loaded, then overwritten by locals
        globals_ = {}
        globals_.update(
            {key: value
             for key, value in stack[1][0].f_globals.items()})
        globals_.update(
            {key: value
             for key, value in stack[1][0].f_locals.items()})
    finally:
        del stack
    code.InteractiveConsole(globals_).interact()
コード例 #27
0
    def __init__(self,
                 amp_name,
                 amp_serial,
                 state=mp.Value('i', 1),
                 queue=None):
        super(Scope, self).__init__()

        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)

        redirect_stdout_to_queue(logger, queue, 'INFO')
        logger.info('Viewer launched')

        self.amp_name = amp_name
        self.amp_serial = amp_serial
        self.state = state

        self.init_scope()
コード例 #28
0
ファイル: decoder.py プロジェクト: fcbg-hnp/NeuroDecode
def _log_decoding_helper(state, event_queue, amp_name=None, autostop=False):
    """
    Helper function to run StreamReceiver object in background

    Parameters
    ----------
    state : mp.Value
        The multiprocessing sharing variable
    event_queue : mp.Queue
        The queue used to share new events
    amp_name : str
        The stream name to connect to
    autostop : bool
        If True, automatically finish when no more data is received.
    """
    logger.info('Event acquisition subprocess started.')

    # wait for the start signal
    while state.value == 0:
        time.sleep(0.01)

    # acquire event values and returns event times and event values
    sr = StreamReceiver(bufsize=0, stream_name=amp_name)
    tm = Timer(autoreset=True)
    started = False
    while state.value == 1:
        chunk, ts_list = sr.acquire()
        if autostop:
            if started is True:
                if len(ts_list) == 0:
                    state.value = 0
                    break
            elif len(ts_list) > 0:
                started = True
        tm.sleep_atleast(0.001)
    logger.info('Event acquisition subprocess finishing up ...')

    buffers, times = sr.get_buffer()
    events = buffers[:, 0]  # first channel is the trigger channel
    event_index = np.where(events != 0)[0]
    event_times = times[event_index].reshape(-1).tolist()
    event_values = events[event_index].tolist()
    assert len(event_times) == len(event_values)
    event_queue.put((event_times, event_values))
コード例 #29
0
ファイル: q_common.py プロジェクト: vferat/NeuroDecode
def run_multi(cmd_list, cores=0, quiet=False):
    """
    Input
    -----
    cmd_list: list of commands just like when you type on bash
    cores: number of cores to use (use all cores if 0)
    Logging tip: "command args > log.txt 2>&1"
    """
    if cores == 0: cores = mp.cpu_count()
    pool = mp.Pool(cores)
    processes = []
    for c in cmd_list:
        if not quiet:
            logger.info(cmd)
        processes.append(pool.apply_async(os.system, [cmd]))
    for proc in processes:
        proc.get()
    pool.close()
    pool.join()
コード例 #30
0
def export_topo(data,
                pos,
                pngfile,
                xlabel='',
                vmin=None,
                vmax=None,
                chan_vis=None,
                res=64,
                contours=0):
    mne.viz.plot_topomap(data,
                         pos,
                         names=chan_vis,
                         show_names=True,
                         res=res,
                         contours=contours,
                         show=False)
    plt.suptitle(xlabel)
    plt.savefig(pngfile)
    logger.info('Exported %s' % pngfile)