Exemple #1
0
def main():
    alpha = False
    print("Looking for stream")
    
    inlet = StreamInlet(resolve_stream('type', 'EEG')[0])

    chunk, timestamp = inlet.pull_chunk(timeout=3, max_samples=chunk_size)
    buffer = np.array(chunk).transpose()

    print("Filling buffer")
    for i in range(window_size - 1):
        chunk, timestamp = inlet.pull_chunk(timeout=3, max_samples=chunk_size)
        chunk = np.array(chunk).transpose()
        buffer = np.hstack((buffer, chunk))
        print(i)

    print(buffer.shape)
    print("Processing")
    while True:
        chunk, timestamp = inlet.pull_chunk(timeout=3, max_samples=chunk_size)
        chunk = np.array(chunk).transpose()

        buffer = np.hstack((buffer, chunk))

        freqs, psd = welch(buffer, fs=250., nperseg=4*250)
        psd = np.average(psd, axis=0)

        freq_res = freqs[1] - freqs[0] 

        total_power = simps(psd, dx=freq_res)

        band_power = []
        for band in eeg_bands:
            idx = np.logical_and(freqs >= eeg_bands[band][0], freqs <= eeg_bands[band][1])
            tmp = simps(psd[idx], dx=freq_res)
            band_power.append(tmp)
            print(band, tmp, tmp/total_power)

        print()

        if alpha:
            if band_power[2] < band_power[1] or band_power[2] < band_power[3]:
                alpha = False
            else:
                print("Meditação")
        else:
            if band_power[2] / band_power[1] >= 1.3 and band_power[2] / band_power[3] >= 1.3:
                alpha = True

        buffer = np.delete(buffer, slice(250), axis=1)
Exemple #2
0
class BetaInlet(object):
    def __init__(self):
        print("looking for an EEG stream...")
        streams = resolve_byprop("type", "EEG")

        # create a new inlet to read from the stream
        proc_flags = proc_clocksync | proc_dejitter | proc_monotonize
        self.inlet = StreamInlet(streams[0], processing_flags=proc_flags)

        stream_info = self.inlet.info()
        stream_xml = stream_info.desc()
        chans_xml = stream_xml.child("channels")
        self.channel_list = []
        ch = chans_xml.child("channel")
        while ch.name() == "channel":
            self.channel_list.append(ch)
            ch = ch.next_sibling("channel")

    def update(self):
        max_samps = 3276 * 2
        data = np.nan * np.ones(
            (max_samps, len(self.channel_list)), dtype=np.float32)
        _, timestamps = self.inlet.pull_chunk(max_samples=max_samps,
                                              dest_obj=data)
        data = data[:len(timestamps), :]
        return data, np.asarray(timestamps)

    def sampling_rate(self):
        return self.inlet.info().nominal_srate()
Exemple #3
0
def product():
    streams = resolve_stream('type', 'EEG')
    inlet = StreamInlet(streams[0])
    while True:
        # sample, timestamp = inlet.pull_sample()
        sample, timestamp = inlet.pull_chunk(timeout=1.0)
        eeg_queue.put(sample)
class BetaInlet(object):
    def __init__(self):
        print("looking for an EEG stream...")
        streams = resolve_byprop("type", "EEG")

        # create a new inlet to read from the stream
        proc_flags = proc_clocksync | proc_dejitter | proc_monotonize
        self.inlet = StreamInlet(streams[0], processing_flags=proc_flags)

        # The following is an example of how to read stream info
        stream_info = self.inlet.info()
        stream_Fs = stream_info.nominal_srate()
        stream_xml = stream_info.desc()
        chans_xml = stream_xml.child("channels")
        chan_xml_list = []
        ch = chans_xml.child("channel")
        while ch.name() == "channel":
            chan_xml_list.append(ch)
            ch = ch.next_sibling("channel")
        self.channel_names = [ch_xml.child_value("label") for ch_xml in chan_xml_list]
        print("Reading from inlet named {} with channels {} sending data at {} Hz".format(stream_info.name(),
                                                                                          self.channel_names, stream_Fs))

    def update(self):
        max_samps = 3276*2
        data = np.nan * np.ones((max_samps, len(self.channel_names)), dtype=np.float32)
        _, timestamps = self.inlet.pull_chunk(max_samples=max_samps, dest_obj=data)
        data = data[:len(timestamps), :]
        print("Beta inlet retrieved {} samples.".format(len(timestamps)))
        return data, np.asarray(timestamps)
Exemple #5
0
def recordeeg(duration):
    warnings.filterwarnings('ignore')

    BUFFER_LENGTH = 5
    EPOCH_LENGTH = 1
    OVERLAP_LENGTH = 0.8
    SHIFT_LENGTH = EPOCH_LENGTH - OVERLAP_LENGTH
    streams = resolve_byprop('type', 'EEG', timeout=2)
    if len(streams) == 0:
        raise RuntimeError('Can\'t find EEG stream.')
    inlet = StreamInlet(streams[0], max_chunklen=12)
    eeg_time_correction = inlet.time_correction()
    info = inlet.info()
    description = info.desc()
    fs = int(info.nominal_srate())
    eeg_buffer = np.zeros((int(fs * BUFFER_LENGTH), 1))
    filter_state = None
    n_win_test = int(
        np.floor((BUFFER_LENGTH - EPOCH_LENGTH) / SHIFT_LENGTH + 1))

    band_buffer = np.zeros((n_win_test, 4))
    musedata = []

    while True:
        eeg_data, timestamp = inlet.pull_chunk(timeout=1,
                                               max_samples=int(SHIFT_LENGTH *
                                                               fs))
        musedata += eeg_data
        if len(musedata) > duration * fs:
            return musedata
            break
class BetaInlet(object):
    def __init__(self):
        print("looking for an EEG stream...")
        streams = resolve_byprop("type", "EEG")

        # create a new inlet to read from the stream
        proc_flags = proc_clocksync | proc_dejitter | proc_monotonize
        self.inlet = StreamInlet(streams[0], processing_flags=proc_flags)

        # The following is an example of how to read stream info
        stream_info = self.inlet.info()
        stream_Fs = stream_info.nominal_srate()
        stream_xml = stream_info.desc()
        chans_xml = stream_xml.child("channels")
        chan_xml_list = []
        ch = chans_xml.child("channel")
        while ch.name() == "channel":
            chan_xml_list.append(ch)
            ch = ch.next_sibling("channel")
        self.channel_names = [
            ch_xml.child_value("label") for ch_xml in chan_xml_list
        ]
        print(
            "Reading from inlet named {} with channels {} sending data at {} Hz"
            .format(stream_info.name(), self.channel_names, stream_Fs))

    def update(self):
        max_samps = 3276 * 2
        data = np.nan * np.ones(
            (max_samps, len(self.channel_names)), dtype=np.float32)
        _, timestamps = self.inlet.pull_chunk(max_samples=max_samps,
                                              dest_obj=data)
        data = data[:len(timestamps), :]
        print("Beta inlet retrieved {} samples.".format(len(timestamps)))
        return data, np.asarray(timestamps)
Exemple #7
0
def main():
    model = load_model('RNN')
    while True:
        try:
            print("looking for an EEG stream...")
            streams = resolve_stream('name', 'NER_2015_BCI_Challenge_Samples')
            # create a new inlet to read from the stream
            inlet = StreamInlet(streams[0])
            # Pred stream
            info_pred = StreamInfo('NER_2015_BCI_Challenge_PRED', 'PRED', 1,
                                   IRREGULAR_RATE, 'float32', 'PREDID')
            outlet_pred = StreamOutlet(info_pred)
            while True:
                sample = np.empty((0, 56))
                timestamps = []
                while len(sample) < 2001:
                    chunk, ts = inlet.pull_chunk()
                    if ts:
                        ts = timestamps + ts
                        sample = np.concatenate([sample, chunk], axis=0)
                pred = model.predict(
                    np.expand_dims(sample[:2001].transpose(), axis=0))
                outlet_pred.push_chunk(pred[0, :, 0].tolist())
                print('Pushed inference results')
        except LostError:
            pass
Exemple #8
0
 def pull_lsl_stream_chunk(stream: pylsl.StreamInlet, timeout: float = 0.0):
   try:
     sample, timestamps = stream.pull_chunk(timeout)
     return sample, timestamps, None
   except Exception as e:
     logger.info(f'LSL connection lost')
     return None, None, e
def pull_chunk(stream: StreamInlet, timeout: float):
    try:
        sample, timestamps = stream.pull_chunk(timeout)
        return sample, timestamps, None
    except Exception as e:
        logger.debug(f'LSL connection lost')
        return None, None, e
def record_eeg_filtered(r_length, freq, channel_i, notch = False, filter_state=None):
    streams = resolve_byprop('type', 'EEG', timeout=2)
    inlet = StreamInlet(streams[0], max_chunklen=12)

    """ Records EEG data from headset
    Arguments:
    r_length  -- how many seconds of data to record
    freq      -- sample rate
    channel_i -- channels to keep data from
    Returns:
    data      -- array of recorded data [sample, channel]
    """

    data, timestamps = inlet.pull_chunk(
        timeout=r_length + 1,
        max_samples=int(freq * r_length))

    data = np.array(data)[:, channel_i]

    if notch:
        if filter_state is None:
            filter_state = np.tile(lfilter_zi(NOTCH_B, NOTCH_A),
                                   (data.shape[1], 1)).T

        data, filter_state = lfilter(NOTCH_B, NOTCH_A, data, axis=0,
                                         zi=filter_state)
Exemple #11
0
class Stream (Thread):
    def __init__(self):
        Thread.__init__(self)
        # Enough for 1 sec at 256 Hz                                                                               
        self.BUFFER = 256
        print("looking for an EEG stream...")
        self.streams = resolve_byprop('type', 'EEG', timeout=2)
        
        if len(self.streams) == 0:
            raise(RuntimeError("Cant find EEG stream"))
        print("Start aquiring data")

        self.stream = self.streams[0]

        self.inlet = StreamInlet(self.stream, max_chunklen=BUFFER)
        self.times = []
        self.count = 0
        self.chunks = 3
        self.ave_len = 10
        self.ave = [0.0, 0.0]
        self.buf = CircularBuffer(self.chunks)
        self.state = 'noise'


    def run(self):

        while True:
            # Sample is a 2d array of [ [channel_i]*5 ] * BUFFER
            samples, timestamps = self.inlet.pull_chunk(timeout=2.0, max_samples=self.BUFFER)
            if timestamps:
                data = np.vstack(samples)
                data = np.transpose(data)
                print(np.shape(data))
                self.buf.write(data)

            # Check so that the buffer is filled before any filtering
            if self.count >= self.chunks:
                channels = list([0])
                lower_freq = 8
                higher_freq = 12
                filtered_data = mne.filter.filter_data(self.buf.window, 256, lower_freq, higher_freq, filter_length=256*self.chunks-1, fir_design='firwin')
            #for channel in filtered_data:
            #    print(alpha(channel))
                if(self.count < self.chunks+self.ave_len):
                    self.ave[0] += alpha(filtered_data[1]) * (1./self.ave_len)
                    self.ave[1] += alpha(filtered_data[2]) * (1./self.ave_len)
                
                elif(alpha(filtered_data[2]) > self.ave[1]*0.90):
                    if(alpha(filtered_data[1]) > self.ave[0]*1.05):
                        #print('Noise')
                        self.state = 'noise'
                    else:
                        self.state = 'yes'
                        
                else:
                    self.state = 'no'
                
            self.count += 1
Exemple #12
0
class AIYVoiceInterface:
    def __init__(self, lsl_data_type,
                 num_channels):  # default board_id 2 for Cyton
        self.lsl_data_type = lsl_data_type
        self.lsl_num_channels = num_channels

        self.streams = resolve_byprop('name', self.lsl_data_type, timeout=1)
        if len(self.streams) < 1:
            raise AttributeError(
                'Unable to find LSL Stream with given type {0}'.format(
                    lsl_data_type))
        self.inlet = StreamInlet(self.streams[0])
        # TO-DO: fix this, we need to re-stream this since sometimes unity doesn't pick up AIY data for some reason
        info = StreamInfo('VoiceBox', 'Voice', num_channels, 0.0, 'string',
                          'voice')
        self.outlet = StreamOutlet(info)
        pass

    def start_sensor(self):
        # connect to the sensor
        self.streams = resolve_byprop('name', self.lsl_data_type, timeout=1)
        if len(self.streams) < 1:
            raise AttributeError(
                'Unable to find LSL Stream with given type {0}'.format(
                    self.lsl_data_type))
        self.inlet = StreamInlet(self.streams[0])
        self.inlet.open_stream()
        print(
            'LSLInletInterface: resolved, created and opened inlet for lsl stream with type '
            + self.lsl_data_type)

        # read the channel names is there's any
        # tell the sensor to start sending frames

    def process_frames(self):
        # return one or more frames of the sensor
        try:
            frames, timestamps = self.inlet.pull_chunk()
            if len(frames) > 0:
                self.outlet.push_sample(frames[0])  # TO-DO: see above
        except LostError:
            frames, timestamps = [], []
            pass  # TODO handle stream lost
        return np.transpose(frames), timestamps

    def stop_sensor(self):
        if self.inlet:
            self.inlet.close_stream()
        print('LSLInletInterface: inlet stream closed.')

    def info(self):
        return self.inlet.info()

    def get_num_chan(self):
        return self.lsl_num_channels

    def get_nominal_srate(self):
        return self.streams[0].nominal_srate()
Exemple #13
0
async def listen(inlet: pylsl.StreamInlet):
    while True:
        samples, timestamps = inlet.pull_chunk()
        for sample, timestamp in zip(samples, timestamps):
            print('\t'.join(
                map(lambda x: str(x).ljust(20),
                    [inlet.info().type(), timestamp, sample])),
                  flush=True)
        await asyncio.sleep(0.1)
class LSLInlet:
    def __init__(self,
                 name=LSL_STREAM_NAMES[2],
                 max_chunklen=8,
                 n_channels=20):
        streams = resolve_byprop('name', name, timeout=LSL_RESOLVE_TIMEOUT)
        self.inlet = None
        self.dtype = 'float64'
        if len(streams) > 0:
            self.inlet = StreamInlet(streams[0],
                                     max_buflen=1,
                                     max_chunklen=max_chunklen)
            # self.dtype = fmt2string[self.inlet.info().channel_format()]
            print(self.dtype)
        self.n_channels = n_channels if n_channels else self.inlet.info(
        ).channel_count()

    def get_next_chunk(self, mode=0):
        # get next chunk
        chunk, timestamp = self.inlet.pull_chunk()
        # convert to numpy array
        chunk = np.array(chunk, dtype=self.dtype)
        # return first n_channels channels or None if empty chunk
        return chunk[:, :self.n_channels] if chunk.shape[0] > 0 else None

    def update_action(self):
        pass

    def save_info(self, file):
        with open(file, 'w', encoding="utf-8") as f:
            f.write(self.inlet.info().as_xml())

    def get_frequency(self):
        return self.inlet.info().nominal_srate()

    def get_n_channels(self):
        return self.n_channels

    def get_channels_labels_bad(self):
        time.sleep(0.001)
        labels = []
        ch = self.inlet.info().desc().child("channels").child("channel")
        for k in range(self.get_n_channels()):
            labels.append(ch.child_value("label"))
            ch = ch.next_sibling()
        return

    def get_channels_labels(self):
        return ch_names[:self.n_channels]

    def disconnect(self):
        del self.inlet
        self.inlet = None
Exemple #15
0
    def receive(self):
        streams = resolve_byprop('type',
                                 self.settings.type,
                                 timeout=LSL_SCAN_TIMEOUT)

        if len(streams) == 0:
            print("Can't find %s stream." % self.settings.type)
            return

        print("Started acquiring data.")
        inlet = StreamInlet(streams[0], max_chunklen=self.settings.chunk)

        info = inlet.info()
        description = info.desc()
        n_channels = info.channel_count()

        ch = description.child('channels').first_child()
        ch_names = [ch.child_value('label')]
        for i in range(1, n_channels):
            ch = ch.next_sibling()
            ch_names.append(ch.child_value('label'))

        channel_descriptor = ChannelDescriptor(self.settings.type, ch_names,
                                               info.nominal_srate())

        res = []
        timestamps = []
        t_init = time()
        time_correction = inlet.time_correction()
        print('Start recording at time t=%.3f' % t_init)
        print('Time correction: ', time_correction)

        while (time() - t_init) < 5000:
            try:
                data, timestamp = inlet.pull_chunk(
                    timeout=1.0, max_samples=self.settings.chunk)

                if timestamp:
                    res.append(data)
                    self.subscription.notify_all_subscribers(
                        self.settings.type, data, timestamp,
                        channel_descriptor)
                    timestamps.extend(timestamp)
            except KeyboardInterrupt:
                break

        time_correction = inlet.time_correction()
        print('Time correction: ', time_correction)

        res = np.concatenate(res, axis=0)
        timestamps = np.array(timestamps) + time_correction

        print('Done .')
Exemple #16
0
def testing():
    dummy_streamer = ble2lsl.Dummy(muse2016)  #

    streams = resolve_byprop(
        "type", "EEG", timeout=5
    )  #type: EEG, minimum return streams = 1, timeout after 5 seconds

    streamIn = StreamInlet(
        streams[0], max_chunklen=12, recover=True
    )  #Grab first stream from streams, MUSE chunk 12, drop lost stream
    print(streamIn)
    print(streamIn.info().channel_count())
    streamIn.open_stream(
    )  #This actually isn't required: pull_sample() and pull_chunk() implicitly open the stream.
    #But it's good to be explicit because it makes the code clearer
    print("Pull Sample")
    print(streamIn.pull_sample()
          )  #Returns a tuple with the actual values we want.
    #The first element is the list of channel values, the second element is a timestamp. This is a snapshot of our stream
    #at a certain point in time.
    print("Pull Chunk")
    ts = time.time()
    while (1):
        x = streamIn.pull_chunk()
        if all(x):
            #if not np.shape(x) == (2, 0):
            print(np.shape(x))
            print(np.shape(x[1]))
            t = [t - ts for t in x[1]]
            print(t)
            print(t[-1] - t[0])

        # for y in x:
        #     for z in y:
        #         print(z)
        #print("\n")

    plt.style.use('ggplot')

    # data first then time stamps, sick

    pprint(streamIn.info().as_xml())  #what
    timeC = streamIn.time_correction()
    print(timeC)

    #Clean up time

    streams.clear()
    streamIn.close_stream()  #calls lsl_close_stream
    streamIn.__del__()  #Not throwing errors
    dummy_streamer.stop()
Exemple #17
0
class LSLInletInterface:
    def __init__(self, lsl_data_type):
        self.streams = resolve_byprop('name', lsl_data_type, timeout=0.1)
        if len(self.streams) < 1:
            raise AttributeError(
                'Unable to find LSL Stream with given type {0}'.format(
                    lsl_data_type))
        self.inlet = StreamInlet(self.streams[0])
        self.lsl_data_type = lsl_data_type
        self.lsl_num_channels = self.inlet.channel_count
        pass

    def start_sensor(self):
        # connect to the sensor
        self.streams = resolve_byprop('name', self.lsl_data_type, timeout=0.1)
        if len(self.streams) < 1:
            raise AttributeError(
                'Unable to find LSL Stream with given type {0}'.format(
                    self.lsl_data_type))
        if not self.inlet:
            self.inlet = StreamInlet(self.streams[0])
        self.inlet.open_stream()
        print(
            'LSLInletInterface: resolved, created and opened inlet for lsl stream with type '
            + self.lsl_data_type)

        # read the channel names is there's any
        # tell the sensor to start sending frames

    def process_frames(self):
        # return one or more frames of the sensor
        try:
            frames, timestamps = self.inlet.pull_chunk()
        except LostError:
            frames, timestamps = [], []
            pass  # TODO handle stream lost
        return np.transpose(frames), timestamps

    def stop_sensor(self):
        if self.inlet:
            self.inlet.close_stream()
        print('LSLInletInterface: inlet stream closed.')

    def info(self):
        return self.inlet.info()

    def get_num_chan(self):
        return self.lsl_num_channels

    def get_nominal_srate(self):
        return self.streams[0].nominal_srate()
Exemple #18
0
def main():
    # first resolve an EEG stream on the lab network
    print("looking for an EEG stream...")
    streams = resolve_stream('type', 'EEG')

    # create a new inlet to read from the stream
    inlet = StreamInlet(streams[0])

    while True:
        # get a new sample (you can also omit the timestamp part if you're not
        # interested in it)
        chunk, timestamps = inlet.pull_chunk()
        if timestamps:
            print(timestamps, chunk)
Exemple #19
0
class LSLClient(QThread):
    """docstring for LSLClient"""

    StoppedState = 0
    PausedState = 1
    RecordingState = 2

    def __init__(self):
        super(LSLClient, self).__init__()
        self.D = list()
        self.T = list()
        self.state = self.StoppedState
        self.streams = None
        self.inlet = None

    def setStream(self, tag, value):
        self.streams = resolve_stream(tag, value)
        self.inlet = StreamInlet(self.streams[0])
        self.D.clear()
        self.T.clear()

    def getData(self):
        return self.D

    def getState(self):
        return self.state

    def record(self):
        self.state = self.RecordingState
        self.start()

    def pause(self):
        self.state = self.PausedState

    def toggle(self):
        self.state = self.PausedState if self.state == self.RecordingState else self.RecordingState

    def stop(self):
        self.state = self.StoppedState
        self.quit()

    def run(self):
        while True:
            if self.state == self.RecordingState:
                chunk, timestamps = self.inlet.pull_chunk()
                self.D.extend(chunk)
            elif self.state == self.PausedState:
                pass
            else:
                break
Exemple #20
0
    def _muse_get_recent(self, max_samples=100, restart_inlet=False):
        
        if self._muse_recent_inlet and restart_inlet == False:
            inlet = self._muse_recent_inlet

        else:
            # Initiate a new lsl stream
            streams = resolve_byprop('type', 'EEG', timeout=mlsl_cnsts.LSL_SCAN_TIMEOUT)
            inlet = StreamInlet(streams[0], max_chunklen=mlsl_cnsts.LSL_EEG_CHUNK)

        self._muse_recent_inlet = inlet

        _ = inlet.pull_chunk() # seems to be necessary to do this first...
        time.sleep(1)
        samples, timestamps = inlet.pull_chunk(timeout=0.0, max_samples=max_samples)

        samples = np.array(samples)
        timestamps = np.array(timestamps)

        info = inlet.info()
        description = info.desc()
        sfreq = info.nominal_srate()
        #window = 10
        #n_samples = int(self.sfreq * window)
        n_chans = info.channel_count()
        ch = description.child('channels').first_child()
        ch_names = [ch.child_value('label')]
        for i in range(n_chans):
            ch = ch.next_sibling()
            lab = ch.child_value('label')
            if lab != '':
                ch_names.append(lab)
        df = pd.DataFrame(samples, index=timestamps, columns=ch_names) 
        
        
        return df
def acquire_eeg(duration,
                callback=print_eeg_callback,
                eeg_chunck=LSL_EEG_CHUNK):

    DATA_SOURCE = "EEG"

    print("Looking for a %s stream..." % (DATA_SOURCE))
    streams = resolve_byprop('type', DATA_SOURCE, timeout=LSL_SCAN_TIMEOUT)

    if len(streams) == 0:
        print("Can't find %s stream." % (DATA_SOURCE))
        return

    print("Started acquiring data.")
    inlet = StreamInlet(streams[0], max_chunklen=eeg_chunck)

    info = inlet.info()
    description = info.desc()
    Nchan = info.channel_count()

    ch = description.child('channels').first_child()
    ch_names = [ch.child_value('label')]
    for i in range(1, Nchan):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value('label'))

    timestamps = []
    t_init = time()
    time_correction = inlet.time_correction()

    print('Start acquiring at time t=%.3f' % t_init)
    print('Time correction: ', time_correction)

    while (time() - t_init) < duration:
        try:
            chunk, timestamps = inlet.pull_chunk(timeout=1.0,
                                                 max_samples=eeg_chunck)

            if timestamps:
                samples = {
                    key: [sample[i] for sample in chunk]
                    for i, key in enumerate(ch_names)
                }
                callback(timestamps, samples)
        except KeyboardInterrupt:
            break

    print('Acquisition is done')
Exemple #22
0
class LSLInlet:
    def __init__(self, name=LSL_STREAM_NAMES[2], max_chunklen=8, n_channels=20):
        streams = resolve_byprop('name', name, timeout=LSL_RESOLVE_TIMEOUT)
        self.inlet = None
        self.dtype = 'float64'
        if len(streams) > 0:
            self.inlet = StreamInlet(streams[0], max_buflen=1, max_chunklen=max_chunklen)
            # self.dtype = fmt2string[self.inlet.info().channel_format()]
            print(self.dtype)
        self.n_channels = n_channels if n_channels else self.inlet.info().channel_count()

    def get_next_chunk(self):
        # get next chunk
        chunk, timestamp = self.inlet.pull_chunk()
        # convert to numpy array
        chunk = np.array(chunk, dtype=self.dtype)
        # return first n_channels channels or None if empty chunk
        return chunk[:, :self.n_channels] if chunk.shape[0] > 0 else None

    def update_action(self):
        pass

    def save_info(self, file):
        with open(file, 'w') as f:
            f.write(self.inlet.info().as_xml())

    def get_frequency(self):
        return self.inlet.info().nominal_srate()

    def get_n_channels(self):
        return self.n_channels

    def get_channels_labels_bad(self):
        time.sleep(0.001)
        labels = []
        ch = self.inlet.info().desc().child("channels").child("channel")
        for k in range(self.get_n_channels()):
            labels.append(ch.child_value("label"))
            ch = ch.next_sibling()
        return

    def get_channels_labels(self):
        return ch_names[:self.n_channels]

    def disconnect(self):
        del self.inlet
        self.inlet = None
Exemple #23
0
    def stream_from_lsl(self):
        """
        Streaming operation which pulls individual samples from LSL
        """
        stream = self.find_given_stream()
        stream_inlet = StreamInlet(stream)

        chunk = np.zeros((32, stream.channel_count()), dtype=np.float32)

        while True:
            _, timestamp = stream_inlet.pull_chunk(max_samples=32, dest_obj=chunk, timeout=FOREVER)

            if not self.timestamp_stored and self.store_first_timestamp_to is not None:
                np.save(self.store_first_timestamp_to, np.array([timestamp[0]]))
                self.timestamp_stored = True

            self.output_data(chunk[:, self.mask])
Exemple #24
0
class BetaInlet(object):
    def __init__(self):
        print("looking for an EEG stream...")
        streams = resolve_byprop("type", "EEG")

        # create a new inlet to read from the stream
        proc_flags = proc_clocksync | proc_dejitter | proc_monotonize
        self.inlet = StreamInlet(streams[0], processing_flags=proc_flags)

    def update(self):
        max_samps = 3276*2
        data = np.nan * np.ones((max_samps, 25), dtype=np.float32)
        _, timestamps = self.inlet.pull_chunk(max_samples=max_samps, dest_obj=data)
        data = data[:len(timestamps), :]
        return data, np.asarray(timestamps)

    def sampling_rate(self):
    	return self.inlet.info().nominal_srate()
Exemple #25
0
    def _muse_get_recent(self,
                         n_samples: int = 256,
                         restart_inlet: bool = False):
        if self._muse_recent_inlet and not restart_inlet:
            inlet = self._muse_recent_inlet
        else:
            # Initiate a new lsl stream
            streams = resolve_byprop("type",
                                     "EEG",
                                     timeout=mlsl_cnsts.LSL_SCAN_TIMEOUT)
            if not streams:
                raise Exception(
                    "Couldn't find any stream, is your device connected?")
            inlet = StreamInlet(streams[0],
                                max_chunklen=mlsl_cnsts.LSL_EEG_CHUNK)
            self._muse_recent_inlet = inlet

        info = inlet.info()
        sfreq = info.nominal_srate()
        description = info.desc()
        n_chans = info.channel_count()

        self.sfreq = sfreq
        self.info = info
        self.n_chans = n_chans

        timeout = (n_samples / sfreq) + 0.5
        samples, timestamps = inlet.pull_chunk(timeout=timeout,
                                               max_samples=n_samples)

        samples = np.array(samples)
        timestamps = np.array(timestamps)

        ch = description.child("channels").first_child()
        ch_names = [ch.child_value("label")]
        for i in range(n_chans):
            ch = ch.next_sibling()
            lab = ch.child_value("label")
            if lab != "":
                ch_names.append(lab)

        df = pd.DataFrame(samples, index=timestamps, columns=ch_names)
        return df
class MarkerInlet(object):
    def __init__(self):
        self.task = {'phase': 'precue', 'class': 1, 'target': 1}
        print("Looking for stream with type Markers")
        streams = resolve_bypred("type='Markers'", minimum=1)
        proc_flags = 0  # Marker events are relatively rare. No need to post-process.
        self.inlet = StreamInlet(streams[0], processing_flags=proc_flags)
        # The following is an example of how to read stream info
        stream_info = self.inlet.info()
        stream_Fs = stream_info.nominal_srate()
        stream_xml = stream_info.desc()
        chans_xml = stream_xml.child("channels")
        chan_xml_list = []
        ch = chans_xml.child("channel")
        while ch.name() == "channel":
            chan_xml_list.append(ch)
            ch = ch.next_sibling("channel")
        stream_ch_names = [
            ch_xml.child_value("label") for ch_xml in chan_xml_list
        ]
        print("Reading from inlet named {} with channels {}".format(
            stream_info.name(), stream_ch_names))

    def update(self):
        marker_samples, marker_timestamps = self.inlet.pull_chunk(timeout=0.0)
        if (marker_timestamps):
            [phase_str, class_str,
             targ_str] = marker_samples[-1][0].split(', ')
            if phase_str in ['TargetCue']:
                self.task['phase'] = 'cue'
            elif phase_str in ['GoCue']:
                self.task['phase'] = 'go'
            elif phase_str in ['Miss', 'Hit']:
                self.task['phase'] = 'evaluate'
            elif phase_str[:8] == 'NewTrial':
                self.task['phase'] = 'precue'
            else:
                print(phase_str)
            self.task['class'] = int(class_str.split(' ')[1])
            self.task['target'] = int(targ_str.split(' ')[1])
            print("Marker inlet updated with task {}".format(self.task))
Exemple #27
0
class MuseHelmet(Helmet):
    def __init__(self):
        from muselsl import stream, list_muses
        from pylsl import StreamInlet, resolve_byprop

        muses = list_muses()

        # stream module connects with Muse, but doesn't start streaming data
        self.muse_ble_connect = multiprocessing.Process(
            target=stream, args=(muses[0]["address"], ))
        self.muse_ble_connect.start()

        time.sleep(10)

        self.streams = resolve_byprop("type", "EEG", timeout=2)
        self.inlet = StreamInlet(self.streams[0], max_chunklen=1)

        Helmet.__init__(self)

        self.channels_number = 4
        self.sampling_rate = 256

    # Muse lsl streams data in chunks with predefined timestamps
    # so putting data in queue is in streaming func, passing insert_sample
    def streaming(self):
        # channels: TP9, AF7, AF8, TP10, Right Aux (not used)
        while True:

            eeg_data, timestamp = self.inlet.pull_chunk(timeout=0)

            # todo put chunk, concat eeg and time arrays
            for i in range(len(timestamp)):
                self.q.put(eeg_data[i][0:4] + [timestamp[i]] +
                           [int(timestamp[i])])

            time.sleep(0.01)

    # todo make unique signature with inlet.pull_sample
    def insert_sample_record(self, sample):
        pass
Exemple #28
0
def getData(classifier, mu_ft, std_ft):
    print('looking for an EEG stream...')
    streams = resolve_stream('type', 'EEG')
    if len(streams) == 0:
        raise (RunTimeError('Cant find EEG stream :('))
    window = 1
    inlet = StreamInlet(streams[0])
    info = inlet.info()
    descriptions = info.desc()
    sfreq = info.nominal_srate()
    n_samples = int(window * sfreq)
    n_chan = info.channel_count()
    print('Acquiring data...')
    data = np.zeros((n_samples, n_chan))
    times = np.arange(-window, 0, 1. / sfreq)
    timer = time.time()
    while True:
        samples, timestamps = inlet.pull_chunk(timeout=1.0, max_samples=12)
        if timestamps:
            timestamps = np.float64(
                np.arange(len(timestamps))
            )  #creates an array of numbers of numbers from 0 to length timestamps
            timestamps /= sfreq  #divides that array by our sampling freq
            timestamps += times[-1] + 1 / sfreq  # not sure
            times = np.concatenate(
                [times,
                 timestamps])  #adds timestamps to the end of the times array
            times = times[-n_samples:]  #takes the last n_samples from times
            data = np.vstack([data, samples
                              ])  #adds our new samples to the data array
            data = data[-n_samples:]
            timer = time.time()
            n_samples, n_chan = data.shape
            epochs, remainder = tools.epoching(data,
                                               n_samples,
                                               samples_overlap=0)
            feature_matrix = tools.compute_feature_matrix(epochs, sfreq)
            y_hat = tools.classifier_test(classifier, feature_matrix, mu_ft,
                                          std_ft)
            print(y_hat)
Exemple #29
0
def imp_check_pauser(imp_limit):
    from pylsl import StreamInlet, resolve_stream
    # first resolve an EEG stream on the lab network
    print('Searching for Impedances Stream...')
    streams = resolve_stream('type', 'Impeadance')  # type='Impeadance')
    # create a new inlet to read from the stream
    inlet = StreamInlet(streams[0])
    # Keyword Paramters.
    imps = []
    channels = ['Fz', 'Cz', 'Pz', 'P4', 'P3', 'O1', 'O2']
    controller = 0
    num_chans = 7
    if imp_limit is None:
        imp_limit = 15
    # Timed Imp Check Controller Operatin/
    while controller == 0:
        # Grab Impedances Data Chunks.
        chunk, timestamps = inlet.pull_chunk(timeout=2, max_samples=500)
        if timestamps:
            imps = np.asarray(chunk)
            imps = imps[:, 0:num_chans]
            print(imps.shape)
            imps = zero_mean(imps)
            for j in range(1):
                con_list = np.zeros(num_chans)  # , dtype=int
                for i in range(num_chans):
                    # Range value of channel durng pause.
                    r_val = np.amax(imps[:, i]) - np.amin(imps[:, i])
                    if r_val > imp_limit:
                        print(
                            '----Channel Impedances Awaiting Stabilization: {0}  |  Range Value: {1}'
                            .format(channels[i], r_val))
                    elif r_val < imp_limit:
                        con_list[i] = 1
                        print(
                            '----Channel Impedances Stabilised: {0}  |  Range Value: {1}'
                            .format(channels[i], r_val))
                    if np.sum(con_list) == num_chans:
                        controller = 1
    def startRecording(self):
        self.recording = True

        try:
            while (self.recording == True):
                streams = resolve_byprop('type', 'EEG', timeout=2)
                inlet = StreamInlet(streams[0], max_chunklen=12)
                # Obtain EEG data from the LSL stream
                eeg_data, timestamp = inlet.pull_chunk(
                    timeout=1, max_samples=int(self.SHIFT_LENGTH * self.freq))

                ch_data = np.array(eeg_data)[:, self.INDEX_CHANNEL]

                # Update EEG data and apply filter
                self.eeg_raw, self.filter_state = BCI.update_buffer(
                    self.eeg_raw,
                    ch_data,
                    notch=True,
                    filter_state=self.filter_state)

        except KeyboardInterrupt:
            print("Exception")
Exemple #31
0
class BciThread (threading.Thread):
    def __init__(self, name):
        threading.Thread.__init__(self)
        self.__streamsEEG = resolve_byprop('type', 'EEG', timeout=TIMEOUT)
        if len(self.__streamsEEG) == 0:
            raise RuntimeError("Can't find EEG stream.")
        self.__inlet = StreamInlet(self.__streamsEEG[0], max_chunklen=12)
        self.__eeg_time_correction = self.__inlet.time_correction()
        self.__info = self.__inlet.info()
        self.__fs = int(self.__info.nominal_srate())
        self.name = name
        self.__lock = threading.Lock()
        self.__work = False
        self.__observers = set()

    def attach(self, observer: Observer):
        self.__observers.add(observer)

    def event(self, info):
        for observer in self.__observers:
            observer.update_data(info)

    def detach(self, observer: Observer):
        self.__observers.remove(observer)

    def run(self):
        self.__work = True
        while self.__work:
            eeg_data, timestamp = self.__inlet.pull_chunk(timeout=1, max_samples=int(SHIFT_LENGTH * self.__fs))
            info = np.column_stack((timestamp, eeg_data))
            self.event(info)

    def get_fs(self):
        return self.__fs

    def stop(self):
        self.__lock.acquire()
        self.__work = False
        self.__lock.release()
def dataReaderLSLChunk(streamName, q):
    while True:
        print("Waiting for LSL stream")
        try:
            results = resolve_byprop(prop='name', value=streamName)
            while len(results) == 0:
                time.sleep(0.25)
            info = results[0]
            inlet = StreamInlet(info, recover=False)
            print("Streaming...")
            # Read data in forever
            try:
                while True:
                    chunk, timestamps = inlet.pull_chunk()
                    if len(chunk) > 0:
                        q.put(np.array(chunk[len(chunk) - 1]))
                    time.sleep(1 / 120)
            except Exception as e:
                print(e)
                pass
        except Exception as e:
            print(e)
            pass
class MarkerInlet(object):
    def __init__(self):
        self.task = {'phase':'precue', 'class':1, 'target':1}
        print("Looking for stream with type Markers")
        streams = resolve_bypred("type='Markers'", minimum=1)
        proc_flags = 0  # Marker events are relatively rare. No need to post-process.
        self.inlet = StreamInlet(streams[0], processing_flags=proc_flags)
        # The following is an example of how to read stream info
        stream_info = self.inlet.info()
        stream_Fs = stream_info.nominal_srate()
        stream_xml = stream_info.desc()
        chans_xml = stream_xml.child("channels")
        chan_xml_list = []
        ch = chans_xml.child("channel")
        while ch.name() == "channel":
            chan_xml_list.append(ch)
            ch = ch.next_sibling("channel")
        stream_ch_names = [ch_xml.child_value("label") for ch_xml in chan_xml_list]
        print("Reading from inlet named {} with channels {}".format(stream_info.name(), stream_ch_names))

    def update(self):
        marker_samples, marker_timestamps = self.inlet.pull_chunk(timeout=0.0)
        if (marker_timestamps):
            [phase_str, class_str, targ_str] = marker_samples[-1][0].split(', ')
            if phase_str in ['TargetCue']:
                self.task['phase'] = 'cue'
            elif phase_str in ['GoCue']:
                self.task['phase'] = 'go'
            elif phase_str in ['Miss', 'Hit']:
                self.task['phase'] = 'evaluate'
            elif phase_str[:8] == 'NewTrial':
                self.task['phase'] = 'precue'
            else:
                print(phase_str)
            self.task['class'] = int(class_str.split(' ')[1])
            self.task['target'] = int(targ_str.split(' ')[1])
            print("Marker inlet updated with task {}".format(self.task))
Exemple #34
0

# first resolve an EEG stream on the lab network
print("looking for an EEG stream...")
streams = resolve_stream('type', 'Data')

# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])

globalstart = time.time()

while (time.time() - globalstart < exptime):
    
    startwhile = time.time()

    chunk, timestamp = inlet.pull_chunk()
    np_ar_chunk = np.asarray(chunk)
    chunk_size = np_ar_chunk.shape[0]
    
    if chunk_size > 0:
        
        data_chunk_test = np_ar_chunk.T
        
        received_data_buf[:,pos:(pos+chunk_size)] = data_chunk_test
        pos = pos + chunk_size + 1
        
        [data_chunk_test,Zlast_pre_high[0,:,:]] = spsig.lfilter(b_pre_high[0], a_pre_high[0], data_chunk_test, 1, Zlast_pre_high[0,:,:])
        [data_chunk_test,Zlast_pre_low[0,:,:]] = spsig.lfilter(b_pre_low[0], a_pre_low[0], data_chunk_test, 1, Zlast_pre_low[0,:,:])
        
        data_chunk_test = data_chunk_test[without_emp_mask,:]
        #chan_names_test_used = chan_names_test[:,without_emp_mask]
    # Name of our channel for plotting purposes
    ch_names = [ch_names[i] for i in index_channel]
    n_channels = len(index_channel)

    # Get names of features
    # ex. ['delta - CH1', 'pwr-theta - CH1', 'pwr-alpha - CH1',...]
    feature_names = BCIw.get_feature_names(ch_names)

    # Number of seconds to collect training data for (one class)
    training_length = 20

    """ 3. RECORD TRAINING DATA """

    # Record data for mental activity 0
    BCIw.beep()
    eeg_data0, timestamps0 = inlet.pull_chunk(
            timeout=training_length+1, max_samples=fs * training_length)
    eeg_data0 = np.array(eeg_data0)[:, index_channel]

    print('\nClose your eyes!\n')

    # Record data for mental activity 1
    BCIw.beep()  # Beep sound
    eeg_data1, timestamps1 = inlet.pull_chunk(
            timeout=training_length+1, max_samples=fs * training_length)
    eeg_data1 = np.array(eeg_data1)[:, index_channel]

    # Divide data into epochs
    eeg_epochs0 = BCIw.epoch(eeg_data0, epoch_length * fs,
                             overlap_length * fs)
    eeg_epochs1 = BCIw.epoch(eeg_data1, epoch_length * fs,
                             overlap_length * fs)
                                    1 / shift_length)

    """ 3. GET DATA """

    # The try/except structure allows to quit the while loop by aborting the
    # script with <Ctrl-C>
    print('Press Ctrl-C in the console to break the while loop.')

    try:
        # The following loop does what we see in the diagram of Exercise 1:
        # acquire data, compute features, visualize raw EEG and the features
        while True:

            """ 3.1 ACQUIRE DATA """
            # Obtain EEG data from the LSL stream
            eeg_data, timestamp = inlet.pull_chunk(
                    timeout=1, max_samples=int(shift_length * fs))

            # Only keep the channel we're interested in
            ch_data = np.array(eeg_data)[:, index_channel]

            # Update EEG buffer
            eeg_buffer, filter_state = BCIw.update_buffer(
                    eeg_buffer, ch_data, notch=True,
                    filter_state=filter_state)

            """ 3.2 COMPUTE FEATURES """
            # Get newest samples from the buffer
            data_epoch = BCIw.get_last_data(eeg_buffer,
                                            epoch_length * fs)

            # Compute features
                                       timeout=100)[0]
    print('GUI source discovered')

    #print(experiment_info, bci_info)

    experiment_inlet = StreamInlet(experiment_info)
    bci_inlet = StreamInlet(bci_info)

    bci_results = [[], []]
    experiment_results = [[], []]

    print('Recording')
    while True:
        # get a new sample (you can also omit the timestamp part if you're not
        # interested in it)
        bci_values, bci_timestamps = bci_inlet.pull_chunk(max_samples=device.sfreq * 60 *6)
        bci_results[0].extend(bci_values)
        bci_results[1].extend(bci_timestamps)

        experiment_values, experiment_timestamps = experiment_inlet.pull_chunk()
        experiment_results[0].extend(experiment_values)
        experiment_results[1].extend(experiment_timestamps)

        if len(experiment_results[0]) > 0 and experiment_results[0][-1][0] < 0:
            break

    experiment_results = np.hstack([np.array(experiment_results[1])[:, None],
                                    experiment_results[0]])
    np.savetxt(filename+'_experiment'+'.csv', experiment_results, delimiter=';')

    bci_results = np.hstack([np.array(bci_results[1])[:, None],