def record(
    duration: int,
    filename=None,
    dejitter=False,
    data_source="EEG",
    continuous: bool = True,
) -> None:
    chunk_length = LSL_EEG_CHUNK
    if data_source == "PPG":
        chunk_length = LSL_PPG_CHUNK
    if data_source == "ACC":
        chunk_length = LSL_ACC_CHUNK
    if data_source == "GYRO":
        chunk_length = LSL_GYRO_CHUNK

    if not filename:
        filename = os.path.join(
            os.getcwd(), "%s_recording_%s.csv" %
            (data_source, strftime('%Y-%m-%d-%H.%M.%S', gmtime())))

    channel_idx = 1
    num_of_data = 178
    existing = pd.DataFrame()
    model = load_model(
        "/home/pi/.virtualenvs/muse_lsl_env/lib/python3.7/site-packages/muselsl/Epilepsy.h5"
    )
    print("Initialized Variables")

    print("Looking for a %s stream..." % (data_source))
    streams = resolve_byprop('type', data_source, timeout=LSL_SCAN_TIMEOUT)

    if len(streams) == 0:
        print("Can't find %s stream." % (data_source))
        return

    print("Started acquiring data.")
    inlet = StreamInlet(streams[0], max_chunklen=chunk_length)
    # eeg_time_correction = inlet.time_correction()

    print("Looking for a Markers stream...")
    marker_streams = resolve_byprop('name',
                                    'Markers',
                                    timeout=LSL_SCAN_TIMEOUT)

    if marker_streams:
        inlet_marker = StreamInlet(marker_streams[0])
    else:
        inlet_marker = False
        print("Can't find Markers stream.")

    info = inlet.info()
    description = info.desc()

    Nchan = info.channel_count()

    ch = description.child('channels').first_child()
    ch_names = [ch.child_value('label')]
    for i in range(1, Nchan):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value('label'))

    res = []
    timestamps = []
    markers = []
    t_init = time()
    time_correction = inlet.time_correction()
    last_written_timestamp = None
    print('Start recording at time t=%.3f' % t_init)
    print('Time correction: ', time_correction)
    while (time() - t_init) < duration:
        try:
            data, timestamp = inlet.pull_chunk(timeout=1.0,
                                               max_samples=chunk_length)

            if timestamp:
                # print("Data: " + str(data))
                new_arr = pd.DataFrame(data)
                combine = [existing, new_arr]
                existing = pd.concat(combine).reset_index(drop=True)
                # print(len(existing))

                if len(existing) >= num_of_data:
                    row = existing[0:num_of_data]
                    row = row[channel_idx]
                    row = row.values.reshape(-1, 178, 1)
                    existing.drop(existing.index[0:178], inplace=True)
                    existing = existing.reset_index(drop=True)
                    predictions = model.predict(
                        (row[:, ::4] - row.mean()) / row.std())
                    result = np.argmax(predictions[0]) + 1
                    print("Result: " + str(result))
                res.append(data)
                timestamps.extend(timestamp)
                tr = time()
            if inlet_marker:
                marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
                if timestamp:
                    markers.append([marker, timestamp])

            # Save every 5s
            if continuous and (last_written_timestamp is None
                               or last_written_timestamp + 5 < timestamps[-1]):
                _save(
                    filename,
                    res,
                    timestamps,
                    time_correction,
                    dejitter,
                    inlet_marker,
                    markers,
                    ch_names,
                    last_written_timestamp=last_written_timestamp,
                )
                last_written_timestamp = timestamps[-1]

        except KeyboardInterrupt:
            break

    time_correction = inlet.time_correction()
    print("Time correction: ", time_correction)

    _save(
        filename,
        res,
        timestamps,
        time_correction,
        dejitter,
        inlet_marker,
        markers,
        ch_names,
    )

    print("Done - wrote file: {}".format(filename))
Ejemplo n.º 2
0
def record(duration, filename=None, dejitter=False, data_source="EEG"):
    chunk_length = LSL_EEG_CHUNK
    if data_source == "PPG":
        chunk_length = LSL_PPG_CHUNK
    if data_source == "ACC":
        chunk_length = LSL_ACC_CHUNK
    if data_source == "GYRO":
        chunk_length = LSL_GYRO_CHUNK

    if not filename:
        filename = os.path.join(
            os.getcwd(),
            "%s_recording_%s.csv" %
            (data_source, strftime("%Y-%m-%d-%H.%M.%S", gmtime())),
        )

    print("Looking for a %s stream..." % (data_source))
    streams = resolve_byprop("type", data_source, timeout=LSL_SCAN_TIMEOUT)

    if len(streams) == 0:
        print("Can't find %s stream." % (data_source))
        return

    print("Started acquiring data.")
    inlet = StreamInlet(streams[0], max_chunklen=chunk_length)
    # eeg_time_correction = inlet.time_correction()

    print("Looking for a Markers stream...")
    marker_streams = resolve_byprop("name",
                                    "Markers",
                                    timeout=LSL_SCAN_TIMEOUT)

    if marker_streams:
        inlet_marker = StreamInlet(marker_streams[0])
    else:
        inlet_marker = False
        print("Can't find Markers stream.")

    info = inlet.info()
    description = info.desc()

    Nchan = info.channel_count()

    ch = description.child("channels").first_child()
    ch_names = [ch.child_value("label")]
    for i in range(1, Nchan):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value("label"))

    res = []
    timestamps = []
    markers = []
    t_init = time()
    time_correction = inlet.time_correction()
    print("Start recording at time t=%.3f" % t_init)
    print("Time correction: ", time_correction)
    while (time() - t_init) < duration:
        try:
            data, timestamp = inlet.pull_chunk(timeout=1.0,
                                               max_samples=chunk_length)

            if timestamp:
                res.append(data)
                timestamps.extend(timestamp)
            if inlet_marker:
                marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
                if timestamp:
                    markers.append([marker, timestamp])
        except KeyboardInterrupt:
            break

    time_correction = inlet.time_correction()
    print("Time correction: ", time_correction)

    res = np.concatenate(res, axis=0)
    timestamps = np.array(timestamps) + time_correction

    if dejitter:
        y = timestamps
        X = np.atleast_2d(np.arange(0, len(y))).T
        lr = LinearRegression()
        lr.fit(X, y)
        timestamps = lr.predict(X)

    res = np.c_[timestamps, res]
    data = pd.DataFrame(data=res, columns=["timestamps"] + ch_names)

    if inlet_marker and markers:
        n_markers = len(markers[0][0])
        for ii in range(n_markers):
            data["Marker%d" % ii] = 0
        # process markers:
        for marker in markers:
            # find index of markers
            ix = np.argmin(np.abs(marker[1] - timestamps))
            for ii in range(n_markers):
                data.loc[ix, "Marker%d" % ii] = marker[0][ii]

    directory = os.path.dirname(filename)
    if not os.path.exists(directory):
        os.makedirs(directory)

    data.to_csv(filename, float_format="%.3f", index=False)

    print("Done - wrote file: " + filename + ".")
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
counter = 0

fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
plt.xlabel('Time')
plt.ylabel('Micro Volts')
plt.title('EEG Channel 1')

data = []
time = []

print("pull samples...")
while True:
    # get a new sample (you can also omit the timestamp part if you're not
    # interested in it)
    sample = inlet.pull_sample()
    counter += 1

    data.append(sample[0][0])
    time.append(counter)

    ax1.clear()
    ax1.plot(time, data)
    plt.pause(0.05)

    print(sample)
plt.show()
# resolve an EMG stream on the lab network and notify the user
print("Looking for an EMG stream...")
streams = resolve_stream('type', 'EEG')
inlet = StreamInlet(streams[0])
#inlet_ch2 = StreamInlet(streams[1])
print("EMG stream found!")

# initialize time threshold and variables for storing time
time_thres = 500
prev_time = 0
flex_thres = 0.7

while True:

    samples, timestamp = inlet.pull_sample(
    )  # get EMG data sample and its timestamp

    curr_time = int(round(time.time() *
                          1000))  # get current time in milliseconds

    if ((samples[0] >= flex_thres) & (curr_time - time_thres > prev_time)
        ):  # if an EMG spike is detected from the cheek muscles send 'G'
        prev_time = int(round(time.time() * 1000))  # update time
        ser.write(b'Y')

    elif ((samples[1] >= flex_thres) & (curr_time - time_thres > prev_time)
          ):  # if an EMG spike is detected from the eyebrow muscles send 'R'
        prev_time = int(round(time.time() * 1000))  # update time
        ser.write(b'R')

    elif (curr_time - time_thres >
Ejemplo n.º 5
0
"""Example program to show how to read a multi-channel time series from LSL."""

from pylsl import StreamInlet, resolve_stream

# first resolve an EEG stream on the lab network
print("looking for EEG stream...")
streams = resolve_stream('type', 'EEG')

# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
rows, cols = (8, 125)
samples = [[0 for i in range(cols)] for j in range(rows)]

while True:
    # get a new sample (you can also omit the timestamp part if you're not
    # interested in it)
    for i in range(0, 8):
        samples[i], timestamp = inlet.pull_sample()
    sums = [0] * 8
    for y in range(0, 8):
        for x in range(100, 125):
            sums[y] += samples[y][x]
    for j in range(0, 8):
        sums[j] /= 25
        print(sums[j])
Ejemplo n.º 6
0
#saves data in a constantly updating 5 second window in file called eegdata.npy

from pylsl import StreamInlet, resolve_stream
import time
import numpy as np

streams = resolve_stream('type', 'EEG')  #initialize connection to lsl stream

data = []

inlet = StreamInlet(streams[0])

while 1:
    sample, timestamp = inlet.pull_sample()  #grab data from lsl
    if sample:
        data.append(sample)
        if len(data) > 1250:
            #if data reaches a certain length (1250 lines), remove the oldest data point when adding a new one
            data.pop(0)
            a = np.array(data)
            np.save("eegdata", a)
Ejemplo n.º 7
0
thresh_msg = Quaternion()
thresh_msg.x = threshes[0]
thresh_msg.y = threshes[1]
thresh_msg.z = threshes[2]
thresh_msg.w = threshes[3]
time.sleep(1)
thresh_pub.publish(thresh_msg)

#accel_msg = Imu()
eeg_msg = Quaternion()

t_old = 0

clear = False
while not clear:
    val = eeg_inlet.pull_sample(0.0)
    if val[0] == None:
        clear = True

print("Sending EEG data...")
while not rospy.is_shutdown():
    # get a new sample (you can also omit the timestamp part if you're not
    # interested in it)
    #accel_sample, timestamp = accel_inlet.pull_sample()
    #print(timestamp, sample)

    eeg_sample, eeg_tstamp = eeg_inlet.pull_sample(0.0)
    if eeg_sample == None:
        continue
    #print eeg_sample
Ejemplo n.º 8
0
# first resolve an EEG stream on the lab network
print("Looking for an EEG stream")
streams = resolve_stream("type","EEG",)
inlet = StreamInlet(streams[0])
print("Stream Found")


datastream = []
time.sleep(ignore_first_secs);
timeout = time.time() + float(sys.argv[2]) - ignore_last_secs
while True:
  if time.time() > timeout:
    break
  #sample[0] has the data, sample[1] has a timestamp
  sample = inlet.pull_sample()
  datastream.append(sample[0])

#Build folder structure
zpad = 6
path = os.path.abspath(os.path.join(__file__,sys.argv[1]))
custompath =  "/" + sys.argv[3] + "/" + sys.argv[4] + "/" + "id_" + sys.argv[5].zfill(zpad)
fullpath = path + custompath
#Create folder to save data into
if not os.path.exists(os.path.dirname(fullpath)):
  try:
    os.makedirs(os.path.dirname(fullpath))
  except OSError as exc:
    if exc.errno != errno.EEXIST:
      raise
Ejemplo n.º 9
0
def record(duration, filename=None, dejitter=False, data_source="EEG", exp=None):
    chunk_length = LSL_EEG_CHUNK
    if data_source == "PPG":
        chunk_length = LSL_PPG_CHUNK
    if data_source == "ACC":
        chunk_length = LSL_ACC_CHUNK
    if data_source == "GYRO":
        chunk_length = LSL_GYRO_CHUNK

    experiments = ["baseline","auditory_oddball","visual_oddball","posture"]

    if exp not in experiments:
        print("Please enter either baseline or oddball for the experiment.")
        return

#    if exp != "baseline":
#        if exp != "oddball":
#            if exp != "posture":
#                print("Please enter either baseline or oddball for the experiment.")
#                return
# line 44 creates another file ("Stop_EEG2.csv") so muselsl knows when to start/stop recording.
    if not filename:
        filename = os.path.join(os.getcwd(
        ), "%s_recording_%s.csv" % (data_source, strftime('%Y-%m-%d-%H.%M.%S', gmtime())))

    print("Looking for a %s stream..." % (data_source))
    streams = resolve_byprop('type', data_source, timeout=LSL_SCAN_TIMEOUT)

    if len(streams) == 0:
        print("Can't find %s stream." % (data_source))
        return

    print("Started acquiring data.")
    inlet = StreamInlet(streams[0], max_chunklen=chunk_length)

    stop_eeg = [0]
    np.savetxt("/Users/mathlab/muse_exp/Experiments/Stop_EEG2.csv", (stop_eeg), delimiter=',',fmt="%s")
    # eeg_time_correction = inlet.time_correction()

    print("Looking for a Markers stream...")
    marker_streams = resolve_byprop(
        'name', 'Markers', timeout=LSL_SCAN_TIMEOUT)

    if marker_streams:
        inlet_marker = StreamInlet(marker_streams[0])
    else:
        inlet_marker = False
        print("Can't find Markers stream.")

    info = inlet.info()
    description = info.desc()

    Nchan = info.channel_count()

    ch = description.child('channels').first_child()
    ch_names = [ch.child_value('label')]
    for i in range(1, Nchan):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value('label'))

# lines 77-90 will constantly check to see if the above file exists
    res = []
    timestamps = []
    markers = []
    t_init = time()
    time_correction = inlet.time_correction()
    print('Start recording at time t=%.3f' % t_init)
    print('Time correction: ', time_correction)
    while os.path.isfile("/Users/mathlab/muse_exp/Experiments/Stop_EEG2.csv") == True:
        try:
            data, timestamp = inlet.pull_chunk(timeout=1.0,
                                               max_samples=chunk_length)

            if timestamp:
                res.append(data)
                timestamps.extend(timestamp)
            if inlet_marker:
                marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
                if timestamp:
                    markers.append([marker, timestamp])
        except KeyboardInterrupt:
            break

    time_correction = inlet.time_correction()
    print('Time correction: ', time_correction)

    res = np.concatenate(res, axis=0)
    timestamps = np.array(timestamps) + time_correction

    if dejitter:
        y = timestamps
        X = np.atleast_2d(np.arange(0, len(y))).T
        lr = LinearRegression()
        lr.fit(X, y)
        timestamps = lr.predict(X)

    res = np.c_[timestamps, res]
    data = pd.DataFrame(data=res, columns=['timestamps'] + ch_names)

# lines 120-143 will create the file name for our recorded data based on our experiment, and it will also check to see what participant number (ie. 001, 002, etc) should be used. It starts at "001" and if a file already exists with that number, increases it by 1 and checks again
    if inlet_marker:
        n_markers = len(markers[0][0])
        for ii in range(n_markers):
            data['Marker%d' % ii] = 0
        # process markers:
        for marker in markers:
            # find index of markers
            ix = np.argmin(np.abs(marker[1] - timestamps))
            for ii in range(n_markers):
                data.loc[ix, 'Marker%d' % ii] = marker[0][ii]

    directory = os.path.dirname(filename)
    if not os.path.exists(directory):
        os.makedirs(directory)

    if exp == 'baseline':
        exp_loc = 'Baseline'
    elif exp == 'auditory_oddball':
        exp_loc = 'Auditory_P3'
    elif exp == 'visual_oddball':
        exp_loc = 'Visual_P3'
    elif exp == 'posture':
        exp_loc = 'Posture_EKG'

    partnum = '001'
    while os.path.isfile('/Users/mathlab/Desktop/MUSE_STROKE_ STUDY/Muse_Baseline_Data/EEG_data/' + partnum + '_' + data_source + '_' + exp + '_stroke_study_updated.csv') == True:
        if int(partnum) >= 9:
            partnum = '0' + str(int(partnum) + 1)
        else:
            partnum = '00' + str(int(partnum) + 1)

    filename = '/Users/mathlab/Desktop/MUSE_STROKE_ STUDY/Muse_Baseline_Data/EEG_data/' + partnum + '_' + data_source + '_' + exp + '_stroke_study_updated.csv'

    data.to_csv(filename, float_format='%.3f', index=False)

    print('Done - wrote file: ' + filename + '.')
Ejemplo n.º 10
0
    def getDataO(self,tm,disp):
        if disp == 1 :
            fs = 128.0    #Frecuencia de muestreo
            N = fs*tm     #Numero de muestras
            ct = 0        #Contador
            dt = []       #Vector de datos
            with Emotiv(display_output=False, verbose=True) as headset:
                while ct < N:
                    packet = headset.dequeue()
                    if packet is not None:
                        # print packet.sensors
                        # print "########################" 
                        dic = {}
                        for key, value in packet.sensors.iteritems():
                            value = packet.sensors[key]['value']
                            quality = packet.sensors[key]['quality']
                            dic[key] = (value,quality)                
                        dt.append(dic)                
                        ct+=1
                    time.sleep(0.007)
            ldic = dt[:]
            dicx = ldic[0].copy()
            for key,value in dicx.iteritems():
                dicx[key] = []

            for i in ldic:
                for key, value in i.iteritems():
                    value = i[key][0]
                    quality = i[key][1]
                    dicx[key].append((quality,value))
                    pass
            return dicx
        
        if disp == 0 : 
            stream_name = 'NIC'
            streams = resolve_stream('type', 'EEG')
            fs = 500 # Frecuencia de muestreo
            N=fs*tm #Numero de muestras 
            c=0;
            muestras = []
            try:
                for i in range (len(streams)):

                    if (streams[i].name() == stream_name):
                        index = i
                        print ("NIC stream available")

                print ("Connecting to NIC stream... \n")
                inlet = StreamInlet(streams[index])   

            except NameError:
                print ("Error: NIC stream not available\n\n\n")

            while c<N:
                sample, timestamp = inlet.pull_sample()
                muestras.append(sample)
                c+=1
                
            #Diccionario con los datos de los electrodos
            dic = {} 
            for electrodos in range(0,len(sample)):
                dic[electrodos+1] = []
                for muestra in muestras:
                    dic[electrodos+1].append(muestra[electrodos])

            return dic
Ejemplo n.º 11
0
# first create a new stream info (here we set the name to BioSemi,
# the content-type to EEG, 8 channels, 100 Hz, and float-valued data) The
# last value would be the serial number of the device or some other more or
# less locally unique identifier for the stream as far as available (you
# could also omit it but interrupted connections wouldn't auto-recover)
fs = 1000
info = StreamInfo('python', 'EEG', 2)

# next make an outlet
outlet = StreamOutlet(info)

from pylsl import StreamInlet, resolve_stream
print('resolving stream')
streams = resolve_stream('name', 'matlab')
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
print('resolved')

t = 0
mean_time = 0
while True:
    #time.sleep(0.002)
    t += 1
    clock = local_clock()
    outlet.push_sample([0, 1])
    sample, timestamp = inlet.pull_sample(timeout=1)
    dt = local_clock() - clock
    mean_time += dt
    print(mean_time / t, dt)
    #time.sleep(0.001)
Ejemplo n.º 12
0
    streams = resolve_stream('type', 'EEG')
    # 创建inlet读取数据
    inlet = StreamInlet(streams[0])

    csp = CSP(n_components=4, reg=None, log=False, norm_trace=False)
    csp_name = "1621049839"
    csp.filters_ = np.load(
        os.path.join("csp_estimator", csp_name, "filters.npy"))
    csp.patterns_ = np.load(
        os.path.join("csp_estimator", csp_name, "patterns.npy"))
    csp.mean_ = np.load(os.path.join("csp_estimator", csp_name, "mean.npy"))
    csp.std_ = np.load(os.path.join("csp_estimator", csp_name, "std.npy"))

    datas = []
    while (1):
        sample, _ = inlet.pull_sample()
        datas.append(sample)
        l = len(datas)
        if l >= CONTROL_SLOT:
            head = l - CONTROL_SLOT
            raw_data = np.array(datas).reshape(-1, 8)[head:]
            cut_data = load_rawdata.cutData(raw_data)
            input_data = csp.transform(cut_data)
            _, c = input_data.shape
            input_data = input_data.reshape(-1, c, 1)
            outputs = MODEL.predict(input_data)
            action = decide(outputs)
            BLT.send(action)
        else:
            BLT.send("none")
Ejemplo n.º 13
0
class Graph(object):
    def __init__(self, size=(600, 350)):
        self.running = True
        self.ProcessedSig = []
        self.SecondTimes = []
        self.count = -1

        plt.ion()
        plt.hold(False)
        self.lineHandle = plt.plot(self.SecondTimes, self.ProcessedSig)
        plt.title("Streaming Live EMG Data")
        plt.xlabel('Time (s)')
        plt.ylabel('Volts')
        plt.show()

    def _graph_lsl(self):
        print('checking if stream has be initialized')
        self.streams = resolve_byprop('name', 'bci', timeout=2.5)
        try:
            self.inlet = StreamInlet(self.streams[0])
        except IndexError:
            raise ValueError('Make sure stream name=bci is opened first.')
        while self.running:
            # initial run
            self.sample, self.timestamp = self.inlet.pull_sample(timeout=5)
            # time correction to sync to local_clock()
            try:
                if self.timestamp is not None and self.sample is not None:
                    self.timestamp = self.timestamp + self.inlet.time_correction(
                        timeout=5)

            except TimeoutError:
                pass
            self.SecondTimes.append(
                self.sample[1])  #add time stamps to array 'timeValSeconds'
            self.ProcessedSig.append(
                self.sample[0])  #add processed signal values to 'processedSig'

            self.count = self.count + 1

            if ((self.count % 20 == 0) and
                (self.count !=
                 0)):  #every 20 samples (ie ~ 0.10 s) is when plot updates
                self.lineHandle[0].set_ydata(self.ProcessedSig)
                self.lineHandle[0].set_xdata(self.SecondTimes)
                #plt.xlim(0, 5)
                plt.xlim(self.SecondTimes[0], self.SecondTimes[-1])
                plt.ylim(0, 10)
                plt.pause(0.01)

            if (self.count >= 399):
                self.ProcessedSig.pop(0)
                self.SecondTimes.pop(0)

        plt.pause(0.01)
        print('closing graphing utility')
        self.inlet.close_stream()

    def start(self):
        self.lsl_data = threading.Thread(target=random_lsl.start)
        #self.lsl_thread = threading.Thread(target=self._graph_lsl)
        self.lsl_data.start()
        print('lsl data stream has started')
        time.sleep(6)
        #self.lsl_thread.start()
        print('graphing will begin')
        self._graph_lsl()

    def stop(self):
        self.running = False
        self.lsl_thread.join(5)
Ejemplo n.º 14
0
from pylsl import StreamInlet, resolve_stream
"""EEG"""
# first resolve an EEG stream on the lab network
print("looking for an EEG stream...")
streams = resolve_stream('type', 'EEG')

# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
"""MARKERS"""
# first resolve a marker stream on the lab network
print("looking for a marker stream...")
streamsMARKER = resolve_stream('type', 'Markers')

# create a new inlet to read from the stream
inletMARKER = StreamInlet(streamsMARKER[0])

while True:
    # get a new sample (you can also omit the timestamp part if you're not
    # interested in it)
    chunk, timestamps = inlet.pull_chunk()
    if timestamps:
        print('**********************INLET_DATA*******************')
        print('SAMPLE_STAMP:' + str(timestamps))
        print('SAMPLE:' + str(chunk))

    marker, timestampM = inletMARKER.pull_sample()
    if timestampM:
        print('MARKER_STAMP:' + str(timestamps))
        print('MARKER:' + str(chunk))
        print(timestampM, marker)
Ejemplo n.º 15
0
class OWLSLDataReceiver(OWWidget):
    name = "LSL Data Receiver"
    description = "Receives EEG LSL data"
    icon = "icons/icon_datareceiver.svg"
    priority = 10

    class Information(OWWidget.Information):
        streams_found = Msg("Streams were found, collecting data.")
        data_loaded = Msg("Data was successfully loaded.")
        stopped_collecting = Msg("No longer collecting data.")

    class Error(OWWidget.Error):
        no_markers = Msg("Unable to find markers stream with these settings.")
        no_data = Msg("Unable to find data stream with these settings.")
        no_streams = Msg(
            "Unable to find either of the streams with these settings.")

    class Warning(OWWidget.Warning):
        not_valid_timeout = Msg(
            "{} is not a valid timeout value, continuing with {}.")

    class Outputs:
        data = Output("Raw data", mne.io.Raw)

    want_main_area = False
    resizing_enabled = True

    def __init__(self):
        super().__init__()
        self.data = None
        self.data_inlet = None
        self.marker_inlet = None
        self.data_pointer = -1
        self.events = []
        self.running = True
        self.ch_names = None
        self.ch_types = None
        self.has_stim = False
        self.timeout = 2
        self.main_thread = threading.current_thread()

        self.data_stream_property = "type"
        self.data_stream_value = "EEG"
        self.markers_stream_property = "type"
        self.markers_stream_value = "Markers"

        elements_width = 135

        # GUI
        layout = QGridLayout()
        box = gui.widgetBox(self.controlArea, "Info", orientation=layout)
        box.setAlignment(Qt.AlignCenter)
        self.info_data_stream = QLabel("Data stream: no stream yet.")
        self.info_markers_stream = QLabel("Markers stream: no stream yet.")
        layout.addWidget(self.info_data_stream)
        layout.addWidget(self.info_markers_stream)

        stream_box = QVBoxLayout()
        box = gui.widgetBox(self.controlArea,
                            "Streams settings",
                            orientation=stream_box)
        box.setAlignment(Qt.AlignCenter)

        data_stream_box = QHBoxLayout()

        data_label_box = QVBoxLayout()
        data_field_box = QVBoxLayout()

        data_stream_property_label = QLabel("Data stream property: ")
        data_stream_property_label.setToolTip(
            "The StreamInfo property that should have a specific value (e.g., 'name', 'type', 'source_id' 'desc/manufaturer'."
        )
        data_label_box.addWidget(data_stream_property_label)
        self.data_stream_property_field = style.create_field(
            self.data_stream_property, width=elements_width)
        data_field_box.addWidget(self.data_stream_property_field)

        data_stream_value_label = QLabel("Data stream property value: ")
        data_stream_value_label.setToolTip(
            "The string value that the property should have (e.g., 'EEG' as the type property)."
        )
        data_label_box.addWidget(data_stream_value_label)
        self.data_stream_value_field = style.create_field(
            self.data_stream_value, width=elements_width)
        data_field_box.addWidget(self.data_stream_value_field)

        data_stream_box.addLayout(data_label_box)
        data_stream_box.addLayout(data_field_box)

        markers_stream_box = QHBoxLayout()

        markers_label_box = QVBoxLayout()
        markers_field_box = QVBoxLayout()

        markers_stream_property_label = QLabel("Markers stream property: ")
        markers_stream_property_label.setToolTip(
            "The StreamInfo property that should have a specific value (e.g., 'name', 'type', 'source_id' 'desc/manufaturer'."
        )
        markers_label_box.addWidget(markers_stream_property_label)
        self.markers_stream_property_field = style.create_field(
            self.markers_stream_property, width=elements_width)
        markers_field_box.addWidget(self.markers_stream_property_field)

        markers_stream_value_label = QLabel("Markers stream property value: ")
        markers_stream_value_label.setToolTip(
            "The string value that the property should have (e.g., 'Markers' as the type property)."
        )
        markers_label_box.addWidget(markers_stream_value_label)
        self.markers_stream_value_field = style.create_field(
            self.markers_stream_value, width=elements_width)
        markers_field_box.addWidget(self.markers_stream_value_field)

        markers_stream_box.addLayout(markers_label_box)
        markers_stream_box.addLayout(markers_field_box)

        stream_box.setSpacing(10)
        stream_box.addLayout(data_stream_box)
        stream_box.addLayout(markers_stream_box)

        btn_box = QVBoxLayout()

        timeout_box = QHBoxLayout()
        timeout_label = QLabel("Timeout: ")
        timeout_label.setToolTip(
            "How long to look for the streams in seconds.")
        timeout_box.addWidget(timeout_label)
        btn_box.addLayout(timeout_box)

        self.timeout_field = style.create_field(str(self.timeout),
                                                width=elements_width)
        timeout_box.addWidget(self.timeout_field)

        btn_box.setAlignment(Qt.AlignCenter)
        save_settings_btn = QPushButton("Save stream settings")
        save_settings_btn.setStyleSheet(style.btn_success)
        save_settings_btn.clicked.connect(self.save_stream_settings)
        save_settings_btn.setSizePolicy(Policy.Maximum, Policy.Fixed)
        save_settings_btn.setMinimumSize(QSize(elements_width, 25))
        btn_box.addWidget(save_settings_btn)
        stream_box.addLayout(btn_box)

        layout = QGridLayout()
        gui.widgetBox(self.controlArea, orientation=layout)

        self.find_stream_btn = QPushButton("Find streams")
        self.find_stream_btn.setStyleSheet(style.btn_primary)
        self.find_stream_btn.clicked.connect(self.find_stream)
        self.find_stream_btn.setSizePolicy(Policy.Maximum, Policy.Fixed)
        self.find_stream_btn.setMinimumSize(QSize(elements_width, 25))
        layout.addWidget(self.find_stream_btn)

        output = QPushButton("Send data")
        output.setToolTip(
            "Sends the accumulated data from the streams on the output.")
        output.setStyleSheet(style.btn_primary)
        output.clicked.connect(self.commit)
        output.setSizePolicy(Policy.Maximum, Policy.Fixed)
        output.setMinimumSize(QSize(elements_width, 25))
        layout.addWidget(output)

        self.stop_collecting_btn = QPushButton("Stop")
        self.stop_collecting_btn.setStyleSheet(style.btn_warning)
        self.stop_collecting_btn.clicked.connect(self.stop_collecting)
        self.stop_collecting_btn.setSizePolicy(Policy.Maximum, Policy.Fixed)
        self.stop_collecting_btn.setMinimumSize(QSize(elements_width, 25))
        layout.addWidget(self.stop_collecting_btn)

        self.setMinimumSize(self.layout().sizeHint())

    def onDeleteWidget(self):
        super().onDeleteWidget()
        self.running = False

    def save_stream_settings(self):
        self.data_stream_property = self.data_stream_property_field.text()
        self.data_stream_value = self.data_stream_value_field.text()

        self.markers_stream_property = self.markers_stream_property_field.text(
        )
        self.markers_stream_value = self.markers_stream_value_field.text()

        try:
            self.timeout = int(self.timeout_field.text())
            self.Warning.clear()
        except ValueError:
            self.Warning.not_valid_timeout(self.timeout_field.text(),
                                           str(self.timeout))

    def find_stream(self):
        """Tries to find the data and markers stream, when the streams are found starts collecting the data."""

        if self.data_inlet is None:
            data_streams = resolve_byprop(self.data_stream_property,
                                          self.data_stream_value, 1,
                                          self.timeout)
            markers_streams = resolve_byprop(self.markers_stream_property,
                                             self.markers_stream_value, 1,
                                             self.timeout)

            if data_streams and markers_streams:
                self.Error.clear()
                self.Information.clear()
                self.data_inlet = StreamInlet(data_streams[0])
                self.info_data_stream.setText(
                    "Data stream: data stream found.")
                self.marker_inlet = StreamInlet(markers_streams[0])
                self.info_markers_stream.setText(
                    "Markers stream: markers stream found.")
                self.Information.streams_found()

                thread = threading.Thread(target=self.first_sample_data)
                thread.start()
                self.find_stream_btn.setEnabled(False)
                self.find_stream_btn.setStyleSheet(None)
            else:
                self.Error.clear()
                if data_streams:
                    self.Error.no_markers()
                elif markers_streams:
                    self.Error.no_data()
                else:
                    self.Error.no_streams()

    def create_raw_array(self):
        """Creates a new RawArray from the data collected by the LSL data stream, if no data was found returns None."""

        sample, timestamp = self.data_inlet.pull_sample()

        if sample is None or len(sample) is 0:
            return None

        if self.ch_names is None:
            # Get the channel info from the data_inlet
            ch_names = []
            ch_types = []
            try:
                ch = self.data_inlet.info().desc().child("channels").child(
                    "channel")
                for k in range(self.data_inlet.info().channel_count()):
                    ch_names.append(ch.child_value("label"))
                    ch_types.append(str(ch.child_value('type')).lower())
                    ch = ch.next_sibling()
            except (OSError, UnicodeDecodeError):
                return None

            if len(ch_names) != len(sample) or len(ch_types) != len(sample):
                return None

            for i in range(len(ch_types)):
                if ch_types[i] == "stim":
                    self.has_stim = True

            if not self.has_stim:
                ch_names.append("STI 014")
                ch_types.append("stim")

            self.ch_names = ch_names
            self.ch_types = ch_types

        if not self.has_stim:
            sample.append(0)

        info = mne.create_info(self.ch_names,
                               self.data_inlet.info().nominal_srate(),
                               ch_types=self.ch_types)

        # Create an array from the data of the channels
        data = []
        for i in range(len(sample)):
            row = [sample[i]]
            data.append(row)
        self.data_pointer += 1

        return RawArray(data, info, verbose=False)

    def first_sample_data(self):
        """Collects the first data sample setting the main data instance."""

        while self.data is None:
            self.data = self.create_raw_array()
        self.data = self.data.load_data()
        self.sample()

    def sample(self):
        """Collects data from the LSL data stream until the user clicks on the Stop button or when the main
		application is closed."""

        while self.running:
            # End sampling when the app closes
            if not self.main_thread.is_alive():
                break

            next_raw = self.create_raw_array()
            if next_raw is not None:
                #strings, timestamp = self.marker_inlet.pull_chunk()
                strings, timestamp = self.marker_inlet.pull_sample()

                self.data.append(next_raw, preload=True)
                events = []

                if len(strings) != 0:

                    event_ids = []
                    for i in range(len(strings)):
                        string = strings[i]  #[0]
                        event_id = int(
                            [s for s in string.split(" ") if s != ""][1])
                        event_ids.append(event_id)

                    for i in range(len(strings)):
                        row = [self.data_pointer, 0, event_ids[i]]
                        events.append(row)

                # If any events were received add them to the data
                if events:
                    for i in range(len(events)):
                        self.events.append(events[i])

                    self.data.add_events(self.events, replace=True)

                self.data.load_data()

    def stop_collecting(self):
        """Sets the self.running attribute to false, stopping the collecting thread."""

        self.Information.clear()
        self.running = False
        self.Information.stopped_collecting()

    def commit(self):
        """Sends the read data on the output."""

        if self.data is not None:
            self.Outputs.data.send(self.data.copy())
Ejemplo n.º 16
0
"""Example program to show how to read a multi-channel time series from LSL."""

from pylsl import StreamInlet, resolve_stream

# first resolve an EEG stream on the lab network
print("looking for an EEG stream...")
streams = resolve_stream('type', 'EEG')

# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])

while True:
    # get a new sample (you can also omit the timestamp part if you're not
    # interested in it)
    sample, timestamp = inlet.pull_sample()
    print(timestamp, sample)
Ejemplo n.º 17
0
    def run(self):
        # self.senal = signalemg()
        # self.boolean = False
        # Resolve an available OpenSignals stream
        print("# Looking for an available OpenSignals stream...")
        os_stream = resolve_stream("name", "OpenSignals")

        # Create an inlet to receive signal samples from the stream
        inlet = StreamInlet(os_stream[0])

        # Get information about the stream
        stream_info = inlet.info()

        # Get individual attributes
        stream_name = stream_info.name()
        stream_mac = stream_info.type()
        stream_host = stream_info.hostname()
        stream_n_channels = stream_info.channel_count()

        # Store sensor channel info & units in dictionary
        stream_channels = dict()
        channels = stream_info.desc().child("channels").child("channel")

        # Loop through all available channels
        for i in range(stream_n_channels - 1):
            # Get the channel number (e.g. 1)
            channel = i + 1

            # Get the channel type (e.g. ECG)
            sensor = channels.child_value("sensor")

            # Get the channel unit (e.g. mV)
            unit = channels.child_value("unit")

            # Store the information in the stream_channels dictionary
            stream_channels.update({channel: [sensor, unit]})
            channels = channels.next_sibling()

        while True:
            # Receive samples
            samplei, timestamp = inlet.pull_sample()

            sample = list(samplei)
            # print(samplei)
            try:
                # if sample is not None:
                Ui_Form.devices.emgdev.add_sample(samplechunk=sample[1])
            #  senal.add_sample(samplechunk=sample[0])

            except:
                print(Exception)
            sleep(0.2)
            # print(Ui_Form.devices.emgdev.signal)
            Ui_Form.devices.emgdev.limit_chunk()
            if Ui_Form.devices.emgdev.calibration_threshold != 0:
                print(samplei[1])
                if samplei[1] > Ui_Form.devices.emgdev.calibration_threshold:
                    print("llegooooooo")
                    # Ui_Form.devices.emgdev.activate_boolean()
                    # speakerthread = SThread()
                    # speakerthread.run()
                    sleep(
                        2
                    )  #Security rest time??????????????????????????????????????????????????????????????????
Ejemplo n.º 18
0
def mlproducer(queue):

    BUFFER_SIZE_SECONDS = 0.5
    BUFFER_DIST_SECONDS = 0.5
    OPENBCI_HERTZ = 250
    BUFFER_SIZE = round(OPENBCI_HERTZ * BUFFER_SIZE_SECONDS)
    BUFFER_DIST = round(OPENBCI_HERTZ * BUFFER_DIST_SECONDS)
    FEATURES = ['var']
    DEBUG = True

    # model_file = 'NeuroTech-ML/models/model_windows_date_all_subject_all_mode_1_2_4_groups_ok_good.pkl'
    model_file = 'NeuroTech-ML/models/knn_final_500ms.pkl'
    bci_buffer = np.zeros([8, 1])
    # predictor = Prediction(model_filename=model_file, shift=BUFFER_DIST/BUFFER_SIZE)
    predictor = Prediction(model_filename=model_file,
                           shift=BUFFER_DIST_SECONDS)

    print(
        "Attempting to connect to OpenBCI. Please make sure OpenBCI is open with LSL enabled."
    )

    # Set up streaming over lsl from OpenBCI. 0 picks up the first of three
    streams = resolve_stream('type', 'EEG')
    inlet = StreamInlet(streams[0])

    while True:
        # Pull and append sample from OpenBCI to buffer
        sample, timestamp = inlet.pull_sample()
        sample_np = np.array([sample]).transpose()
        bci_buffer = np.append(bci_buffer, sample_np, axis=1)

        # Check if buffer is large enough to make a prediction
        if (bci_buffer.shape[1] == BUFFER_SIZE):
            # Build filter buffer
            timestamp = round(time.time() * 1000)

            filter_buffer, feature_dict, finger_probs = predictor.get_filtered_features_prediction(
                np.array(bci_buffer))

            # Predict finger pressed
            finger_index = np.argmax(finger_probs)
            formatted_feature_dict = {}
            formatted_feature_dict["timestamp"] = timestamp

            # construct feature dictionary for frontend
            for feature in FEATURES:
                feature_array = []
                for i in range(1, 9):
                    # [0] because elements of feature_dict array of length 1
                    feature_array.append(feature_dict["channel " + str(i) +
                                                      "_" + feature][0])

                formatted_feature_dict[feature] = feature_array

            # Push predictions to queue
            queue.put({
                'Finger': int(finger_index),
                'FingerProbs': str(finger_probs[0].tolist()),
                'Feature_Data': formatted_feature_dict,
                'Filtered_Signal_Data': {
                    "data": str(filter_buffer[:, (BUFFER_SIZE - 1)].tolist()),
                    "timestamp": timestamp
                }
            })

            if (DEBUG):
                print(finger_probs[0])

            # Remove BUFFER_DIST from beginning of buffer
            bci_buffer = np.delete(bci_buffer, np.arange(0, BUFFER_DIST, 1), 1)
Ejemplo n.º 19
0
def plotNodes(i):
    global data

    start_time = time.time()
    inlet = StreamInlet(streams[0])

    # get a new sample
    sample = inlet.pull_sample()
    newdata = np.asarray(sample[0][:n])
    # print(newdata)

    # delete first row of data
    data = np.delete(data, 0, 0)

    # add newdata as a row at the end of data. columns=electrodes rows=timestep
    data = np.vstack([data, newdata])
    data = np.transpose(data)

    # compute power spectrum of data
    f, ps = sps.welch(data, fs=26)


# get the amplitudes associated with the various bands of frequencies
    extractAmplitudeDelta = getAmplitudesByFrequencyBand(ps, 0)
    extractAmplitudeTheta = getAmplitudesByFrequencyBand(ps, 1)
    extractAmplitudeAlpha = getAmplitudesByFrequencyBand(ps, 2)
    tempDelta = np.asarray(extractAmplitudeDelta)
    tempTheta = np.asarray(extractAmplitudeTheta)
    tempAlpha = np.asarray(extractAmplitudeAlpha)

    # temp holds mean of each row in extractAmplitude
    tempDelta = np.mean(tempDelta, axis=1)
    tempTheta = np.mean(tempTheta, axis=1)
    tempAlpha = np.mean(tempAlpha, axis=1)

    # square all values to make them 0 <= x <= 1
    tempDelta = np.square(tempDelta)
    tempTheta = np.square(tempTheta)
    tempAlpha = np.square(tempAlpha)

    # calculate zscores for the array
    zscoreArrayDelta = stats.zscore(tempDelta)
    zscoreArrayTheta = stats.zscore(tempTheta)
    zscoreArrayAlpha = stats.zscore(tempAlpha)

    sumOfZscores = zscoreArrayDelta + zscoreArrayTheta + zscoreArrayAlpha

    zscoreArrayDelta = np.divide(zscoreArrayDelta, sumOfZscores)
    zscoreArrayTheta = np.divide(zscoreArrayTheta, sumOfZscores)
    zscoreArrayAlpha = np.divide(zscoreArrayAlpha, sumOfZscores)

    # next line creates positive and negative zscores, so if the value was between 0 to 0.5, it is
    # scaled to between -1 and 0, and if the value was between 0.5 and 1, it is scaled to between
    # 0 and 1
    zscoreArrayDelta = (
        (zscoreArrayDelta / np.amax(zscoreArrayDelta)) / 2) + 0.5
    zscoreArrayTheta = (
        (zscoreArrayTheta / np.amax(zscoreArrayTheta)) / 2) + 0.5
    zscoreArrayAlpha = (
        (zscoreArrayAlpha / np.amax(zscoreArrayAlpha)) / 2) + 0.5

    print(zscoreArrayAlpha)
    # define vectors for plot colors and opacity
    # altColors = freqs / 33
    colorsDelta = cmap(zscoreArrayDelta)
    colorsTheta = cmap(zscoreArrayTheta)
    colorsAlpha = cmap(zscoreArrayAlpha)

    # colors.astype(float)
    # colors[:, -1] = maxes / maxes.max()
    # print(altColors)
    # print(colors)

    ax1.set_xlim(-6, 6)
    ax1.set_ylim(-6, 6)
    ax2.set_xlim(-6, 6)
    ax2.set_ylim(-6, 6)
    ax3.set_xlim(-6, 6)
    ax3.set_ylim(-6, 6)
    # ax1.scatter(x, y, s = 100, c = altColors, cmap = plt.cm.jet_r)
    ax1.scatter(x, y, s=100, c=colorsDelta)
    ax2.scatter(x, y, s=100, c=colorsTheta)
    ax3.scatter(x, y, s=100, c=colorsAlpha)

    elapsed_time = time.time() - start_time
Ejemplo n.º 20
0
def record(
    duration: int,
    filename=None,
    dejitter=False,
    data_source="EEG",
    continuous: bool = True,
) -> None:
    chunk_length = LSL_EEG_CHUNK
    if data_source == "PPG":
        chunk_length = LSL_PPG_CHUNK
    if data_source == "ACC":
        chunk_length = LSL_ACC_CHUNK
    if data_source == "GYRO":
        chunk_length = LSL_GYRO_CHUNK

    if not filename:
        filename = os.path.join(
            os.getcwd(), "%s_recording_%s.csv" %
            (data_source, strftime('%Y-%m-%d-%H.%M.%S', gmtime())))

    channel_idx = 0
    num_of_data = 750
    existing = pd.DataFrame()
    index = index_list
    model = torch.load("Emotion_Detector.pt")

    print("Looking for a %s stream..." % (data_source))
    streams = resolve_byprop('type', data_source, timeout=LSL_SCAN_TIMEOUT)

    if len(streams) == 0:
        print("Can't find %s stream." % (data_source))
        return

    print("Started acquiring data.")
    inlet = StreamInlet(streams[0], max_chunklen=chunk_length)
    # eeg_time_correction = inlet.time_correction()

    print("Looking for a Markers stream...")
    marker_streams = resolve_byprop('name',
                                    'Markers',
                                    timeout=LSL_SCAN_TIMEOUT)

    if marker_streams:
        inlet_marker = StreamInlet(marker_streams[0])
    else:
        inlet_marker = False
        print("Can't find Markers stream.")

    info = inlet.info()
    description = info.desc()

    Nchan = info.channel_count()

    ch = description.child('channels').first_child()
    ch_names = [ch.child_value('label')]
    for i in range(1, Nchan):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value('label'))

    res = []
    timestamps = []
    markers = []
    t_init = time()
    time_correction = inlet.time_correction()
    last_written_timestamp = None
    print('Start recording at time t=%.3f' % t_init)
    print('Time correction: ', time_correction)
    while (time() - t_init) < duration:
        try:
            data, timestamp = inlet.pull_chunk(timeout=1.0,
                                               max_samples=chunk_length)

            if timestamp:
                # print("Data: " + str(data))
                new_arr = pd.DataFrame(arr)
                combine = [existing, new_arr]
                existing = pd.concat(combine).reset_index(drop=True)
                print(len(existing))
                if len(existing) >= num_of_data:
                    row = existing[0:num_of_data]
                    row = row[channel_idx]
                    row = pd.DataFrame(row)
                    row = row.T
                    row.columns = index
                    existing.drop(existing.index[0:num_of_data], inplace=True)
                    existing = existing.reset_index(drop=True)
                    row = row.iloc[0, :]
                    row, clas, probs = model.predict(row)
                    if clas.int() == 2:
                        print("Negative emotion predicted!")
                    elif clas.int() == 1:
                        print("Positive emotion predicted!")
                    elif clas.int() == 0:
                        print("Neutral emotion")
                    else:
                        pass

                res.append(data)
                timestamps.extend(timestamp)
                tr = time()
            if inlet_marker:
                marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
                if timestamp:
                    markers.append([marker, timestamp])

            # Save every 5s
            if continuous and (last_written_timestamp is None
                               or last_written_timestamp + 5 < timestamps[-1]):
                _save(
                    filename,
                    res,
                    timestamps,
                    time_correction,
                    dejitter,
                    inlet_marker,
                    markers,
                    ch_names,
                    last_written_timestamp=last_written_timestamp,
                )
                last_written_timestamp = timestamps[-1]

        except KeyboardInterrupt:
            break

    time_correction = inlet.time_correction()
    print("Time correction: ", time_correction)

    _save(
        filename,
        res,
        timestamps,
        time_correction,
        dejitter,
        inlet_marker,
        markers,
        ch_names,
    )

    print("Done - wrote file: {}".format(filename))
Ejemplo n.º 21
0
def record(duration, filename=None, dejitter=False):
    if not filename:
        filename = os.path.join(
            os.getcwd(),
            ("recording_%s.csv" % strftime("%Y-%m-%d-%H.%M.%S", gmtime())))

    print("Looking for an EEG stream...")
    streams = resolve_byprop('type', 'EEG', timeout=LSL_SCAN_TIMEOUT)

    if len(streams) == 0:
        raise (RuntimeError("Can't find EEG stream."))

    print("Started acquiring data.")
    inlet = StreamInlet(streams[0], max_chunklen=LSL_CHUNK)
    # eeg_time_correction = inlet.time_correction()

    print("Looking for a Markers stream...")
    marker_streams = resolve_byprop('name',
                                    'Markers',
                                    timeout=LSL_SCAN_TIMEOUT)

    if marker_streams:
        inlet_marker = StreamInlet(marker_streams[0])
        # marker_time_correction = inlet_marker.time_correction()
    else:
        inlet_marker = False
        print("Can't find Markers stream.")

    info = inlet.info()
    description = info.desc()

    # freq = info.nominal_srate()
    Nchan = info.channel_count()

    ch = description.child('channels').first_child()
    ch_names = [ch.child_value('label')]
    for i in range(1, Nchan):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value('label'))

    res = []
    timestamps = []
    markers = []
    t_init = time()
    time_correction = inlet.time_correction()
    print('Start recording at time t=%.3f' % t_init)
    print('Time correction: ', time_correction)
    while (time() - t_init) < duration:
        try:
            data, timestamp = inlet.pull_chunk(timeout=1.0,
                                               max_samples=LSL_CHUNK)

            if timestamp:
                res.append(data)
                timestamps.extend(timestamp)
            if inlet_marker:
                marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
                if timestamp:
                    markers.append([marker, timestamp])
        except KeyboardInterrupt:
            break

    time_correction = inlet.time_correction()
    print('Time correction: ', time_correction)

    res = np.concatenate(res, axis=0)
    timestamps = np.array(timestamps) + time_correction

    if dejitter:
        y = timestamps
        X = np.atleast_2d(np.arange(0, len(y))).T
        lr = LinearRegression()
        lr.fit(X, y)
        timestamps = lr.predict(X)

    res = np.c_[timestamps, res]
    data = pd.DataFrame(data=res, columns=['timestamps'] + ch_names)

    if inlet_marker:
        n_markers = len(markers[0][0])
        for ii in range(n_markers):
            data['Marker%d' % ii] = 0
        # process markers:
        for marker in markers:
            # find index of markers
            ix = np.argmin(np.abs(marker[1] - timestamps))
            for ii in range(n_markers):
                data.loc[ix, 'Marker%d' % ii] = marker[0][ii]

    directory = os.path.dirname(filename)
    if not os.path.exists(directory):
        os.makedirs(directory)

    data.to_csv(filename, float_format='%.3f', index=False)

    print('Done - wrote file: ' + filename + '.')
Ejemplo n.º 22
0
class Recorder():
    def __init__(self, config, q_from_display_to_recorder = None):
        
        # initialize basic configuration
        self.config = config
        self.q_from_display_to_recorder = q_from_display_to_recorder
        
        # variables, changed by commands form q
        
        # inlet_state:
        # 0 - recording disabled
        # 1 - recording enabled
        self.inlet_state = 0
        
        # patient states:
        # -1 - none (not recording)
        # 0 - rest
        # 1 - objects
        # 2 - actions
        self.patient_state = 0
        
        # picture state:
        # 0 - none
        # 1 - start
        # 2 - stop
        self.picture_state = 0
        
        self.pause = 0
        #self.patient_state_paused = -1
        
        # initialize memort variables
        self.memory = [[], [], []]
        self.picture_indices = [[], [], []]
        
        # 
        self.picture_pause = 0
        self.picture_end = 0
        self.index_picture_start = (-1, -1)
        self.index_picture_stop = (-1, -1)
        self.index_pause = (-1, -1)        
        
        # resolve lsl stream
        stream_name = self.config['recorder']['lsl_stream_name']
        #stream_name = "Debug"
        streams = resolve_stream('name', stream_name)
        self._printm('Resolving stream \'{}\', {} streams found'.format(stream_name, len(streams)))
        self.inlet = StreamInlet(streams[0], 2048)
        self._printm('Stream resolved')

    



    def record(self):
        self._printm('Start recording, if \'Recording...\' progress bar is not filling, check lsl input stream')
        
        self._resolve_q()
        
        channel_sample = 69
        channel_timestemp = 70
        channel_patient_state = 71
        channel_picture_pause = 72
        channel_picture_state = 73
        dataset_width = self.config['recorder'].getint('dataset_width')
        
        with Bar('Recording...', max=1000) as bar:
            while self.inlet_state:
                self._resolve_q()
                sample, timestamp = self.inlet.pull_sample()
                if bar.index < 999:
                    bar.next()
                elif bar.index == 999:
                    bar.next()
                    bar.finish()
                                
                # if patient state is 0 - skip
                if self.patient_state == -1:
                    continue
                elif self.pause:
                    self.picture_pause = 1
                    continue
                
                # if timestamp exists, concatenate sample with previous data
                if timestamp:
                    sample_index = len(self.memory[self.patient_state])
                    
                    big_sample = np.zeros(dataset_width)
                    # add ecog data
                    big_sample[0:channel_sample] = np.asarray(sample)
                    # add timestemp
                    big_sample[channel_timestemp-1] = timestamp
                    # add patient_state
                    big_sample[channel_patient_state-1] = self.patient_state
                    # add picture_pause
                    big_sample[channel_picture_pause-1] = self.picture_pause
                    # add picture_state
                    big_sample[channel_picture_state-1] = self.picture_state
                    # put big_sample into the memory
                    self.memory[self.patient_state].append(big_sample)
                    
                    if self.picture_pause:
                        self.index_pause = (sample_index - 1, self.patient_state)
                        self.picture_pause = 0
                    if self.picture_state == 1:
                        self.index_picture_start = (sample_index, self.patient_state)
                    elif self.picture_state == 2:
                        self.index_picture_stop = (sample_index, self.patient_state)
                        if self._good_picture():
                            self.picture_indices[self.patient_state].append((self.index_picture_start[0], self.index_picture_stop[0]))
                    self.picture_state = 0     
                        
        self._printm('Stop recording')
        t = time.time()
        self._save()
        self._printm('Data saved: {}s:'.format(time.time()-t))


    def _good_picture(self):
        current_state = self.patient_state == self.index_picture_stop[1]
        same_patient_state = self.index_picture_start[1] == self.index_picture_stop[1]
        pause_not_inside_picture = not (self.index_picture_start[1] == self.index_pause[1] and \
                                self.index_picture_start[0] <= self.index_pause[0])
        return current_state and same_patient_state and pause_not_inside_picture


    def _save(self):
        experiment_data_path = Path(self.config['paths']['experiment_data_path'])
        dataset_width = self.config['recorder'].getint('dataset_width')
        groups = self.config['recorder']['group_names'].split(' ')
        with h5py.File(experiment_data_path, 'a') as file:
            for i in range(len(self.memory)):
                if len(self.memory[i]) > 0:
                    stacked_data = np.vstack(self.memory[i])
                    if len(self.picture_indices[i]) > 0:
                        stacked_indices = np.vstack(self.picture_indices[i])
                    elif i == 0:
                        stacked_indices = np.array([0, stacked_data.shape[0]-1]).reshape((1,2))
                    else:
                        stacked_indices = np.array(()).reshape((0,2))
                    file[groups[i]+'/raw_data'] = stacked_data
                    file[groups[i]+'/picture_indices'] = stacked_indices
                    self.memory[i] = []
                    self.picture_indices[i] = []
                    self._printm('Saved {}, {}, {} pictures'.format(groups[i], stacked_data.shape, stacked_indices.shape[0]))
                else:
                    empty_shape = (0, dataset_width)
                    file.create_dataset(groups[i]+'/raw_data', empty_shape)
                    file.create_dataset(groups[i]+'/picture_indices', (0, 2))
                    self._printm('Saved {}, {}'.format(groups[i], empty_shape))
            file.create_dataset('fs', data=np.array(self.config['recorder'].getint('fs')))
            

    # resolve commands from Display object to navigate recording of data
    def _resolve_q(self):
        while not self.q_from_display_to_recorder.empty():
            key, value = self.q_from_display_to_recorder.get()
            #if self.config['general'].getboolean('debug_mode'):
            #    self._printm('key: {}, value: {}'.format(key, value))
            if key == 'inlet_state':
                self.inlet_state = value
            elif key == 'patient_state':
                self.patient_state = value
            elif key == 'picture_state':
                self.picture_state = value
            elif key == 'pause':
                self.pause = value
            else:
                self._printm('wrong key in queue: {}'.format(key))


    def _printm(self, message):
        print('{} {}: '.format(time.strftime('%H:%M:%S'), type(self).__name__) + message)
Ejemplo n.º 23
0
def record(duration, recording_path):
    data_source = "EEG"
    dejitter = False
    filename = recording_path
    print("looking for an EEG stream...")
    streams = resolve_byprop('type', 'EEG', timeout=2)

    if len(streams) == 0:
        raise (RuntimeError, "Cant find EEG stream")

    print("Start aquiring data")
    inlet = StreamInlet(streams[0], max_chunklen=12)
    eeg_time_correction = inlet.time_correction()

    print("looking for a Markers stream...")
    marker_streams = resolve_byprop('type', 'Markers', timeout=2)

    if marker_streams:
        inlet_marker = StreamInlet(marker_streams[0])
        marker_time_correction = inlet_marker.time_correction()
    else:
        inlet_marker = False
        print("Cant find Markers stream")

    info = inlet.info()
    description = info.desc()

    freq = info.nominal_srate()
    Nchan = info.channel_count()

    ch = description.child('channels').first_child()
    ch_names = [ch.child_value('label')]
    for i in range(1, Nchan):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value('label'))

    res = []
    timestamps = []
    markers = []
    t_init = time()
    print('Start recording at time t=%.3f' % t_init)
    while (time() - t_init) < duration:
        try:
            data, timestamp = inlet.pull_chunk(timeout=1.0, max_samples=12)
            if timestamp:
                res.append(data)
                timestamps.extend(timestamp)
            if inlet_marker:
                marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
                if timestamp:
                    markers.append([marker, timestamp])
        except KeyboardInterrupt:
            break

    res = np.concatenate(res, axis=0)
    timestamps = np.array(timestamps)

    if dejitter:
        y = timestamps
        X = np.atleast_2d(np.arange(0, len(y))).T
        lr = LinearRegression()
        lr.fit(X, y)
        timestamps = lr.predict(X)

    res = np.c_[timestamps, res]
    data = pd.DataFrame(data=res, columns=['timestamps'] + ch_names)

    data['Marker'] = 0
    # process markers:
    for marker in markers:
        # find index of margers
        ix = np.argmin(np.abs(marker[1] - timestamps))
        val = timestamps[ix]
        data.loc[ix, 'Marker'] = marker[0][0]

    data.to_csv(filename, float_format='%.3f', index=False)
Ejemplo n.º 24
0
    if time() >= t_word:
        currentTerm = random.choice(termBank)
        print(
            "\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n"
            + str(currentWord) + ": " + currentTerm)
        currentWord += 1
        t_word = time() + 1 * 2
    try:
        data, timestamp = inlet.pull_chunk(timeout=1.0, max_samples=12)
        if timestamp:
            res.append(data)
            timestamps.extend(timestamp)
            words.extend([currentWord] * len(timestamp))
            terms.extend([currentTerm] * len(timestamp))
        if inlet_marker:
            marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
            if timestamp:
                markers.append([marker, timestamp])
    except KeyboardInterrupt:  # Ctrl-C
        break

res = np.concatenate(res, axis=0)
timestamps = np.array(timestamps)

if dejitter:
    y = timestamps
    X = np.atleast_2d(np.arange(0, len(y))).T
    lr = LinearRegression()
    lr.fit(X, y)
    timestamps = lr.predict(X)
Ejemplo n.º 25
0
# n_lines = int(sys.argv[1])

# first resolve an Mouse stream on the lab network
print("looking for an mouse stream...")
streams = resolve_stream('type', 'mouse')

# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])

# time = datetime.datetime.now().strftime("-%Y-%m-%d-%H%M%S")

f = ""

n = 0
while (True):
    sample, timestamp = inlet.pull_sample(timeout=1)
    if (sample is None):
        break
    string = "{},{},{},{}\n".format(timestamp, sample[0], sample[1],
                                    task[sample[2]])
    f += string
    n += 1
    # print(timestamp, sample)
# print(f)
jsonString = {
    "type": "raw",
    "device": "mouse",
    "apiUrl": "none,",
    "id": "test",
    "attributes": "timestamp,xpos,ypos,task",
    "data": f
filename = str(participant_id) + '_ETdata.csv'

# first resolve an EEG stream on the lab network
print("looking for EyeTracking LSL stream...")
streams = resolve_stream('name', 'Tobii')

# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
print("...found. Pulling data:")

#create CSV file and write received data to it
os.chdir(os.getcwd() + "\data")  #change cd to data folder
with open(
        filename, 'wb'
) as csvfile:  #mode needs to be "wb" in python 2 to remove blank rows
    csvwriter = csv.writer(csvfile)

    #main loop
    while True:
        ETdata, timestamp = inlet.pull_sample()
        ETdata.append(
            timestamp *
            1000)  #change timestamp to ms format and add to ET data list
        csvwriter.writerow(ETdata)

        sts = ETdata[31]  #local timestamp

        if sts > last_report + 250:  #printspupillometry data to screen every 250ms
            print([round(ETdata[29], 3), round(ETdata[30], 3)])
            last_report = sts
Ejemplo n.º 27
0
    def record_multiple(self, filename=None):
        self.processing = False
        print("Looking for streams")
        # Gets all LSL streams within the system
        streams = resolve_streams()
        # print(len(streams))
        print(filename)
        if len(streams) < 3:
            raise ValueError("Insufficient Streams")
        # Assign each used stream to an inlet
        for stream in streams:
            if stream.type() == 'EEG':
                inlet_eeg = StreamInlet(stream, max_chunklen=LSL_EEG_CHUNK)
            elif stream.type() == 'PPG':
                inlet_ppg = StreamInlet(stream, max_chunklen=LSL_PPG_CHUNK)
            elif stream.type() == 'Markers':
                inlet_markers = StreamInlet(stream)

        # Get info and description of channels names for data dumping
        # Info for PPG
        info_eeg = inlet_eeg.info()
        description_eeg = info_eeg.desc()
        nchan_eeg = info_eeg.channel_count()
        ch_eeg = description_eeg.child('channels').first_child()
        ch_names_eeg = [ch_eeg.child_value('label')]
        for i in range(1, nchan_eeg):
            ch_eeg = ch_eeg.next_sibling()
            ch_names_eeg.append(ch_eeg.child_value('label'))

        # Info for PPG
        info_ppg = inlet_ppg.info()
        description_ppg = info_ppg.desc()
        nchan_ppg = info_ppg.channel_count()
        ch_ppg = description_ppg.child('channels').first_child()
        ch_names_ppg = [ch_ppg.child_value('label')]
        for i in range(1, nchan_ppg):
            ch_ppg = ch_ppg.next_sibling()
            ch_names_ppg.append(ch_ppg.child_value('label'))

        res_eeg = []
        timestamps_eeg = []
        res_ppg = []
        timestamps_ppg = []
        markers = []
        # ppgs = []
        # timestamp_markers = []
        t_init = time.time()
        last_timestamp = 0
        time_correction_eeg = inlet_eeg.time_correction()
        time_correction_ppg = inlet_ppg.time_correction()

        print("Start recording")
        while self.recording:
            # print(last_timestamp - t_init)
            try:
                chunk_eeg, ts_eeg = inlet_eeg.pull_chunk(
                    max_samples=LSL_EEG_CHUNK)
                chunk_ppg, ts_ppg = inlet_ppg.pull_chunk(
                    max_samples=LSL_PPG_CHUNK)
                marker, timestamp_markers = inlet_markers.pull_sample()
                # print("Seconds elapsed %.4f" % (time.time() - t_init))
                # if timestamp_markers and ts_eeg and ts_ppg:
                if ts_eeg:
                    # print('I am here')
                    res_eeg.append(chunk_eeg)
                    timestamps_eeg.extend(ts_eeg)
                if ts_ppg:
                    res_ppg.append(chunk_ppg)
                    timestamps_ppg.extend(ts_ppg)
                if timestamp_markers:
                    markers.append([marker, timestamp_markers])
                    last_timestamp = timestamp_markers
                    # print(last_timestamp)
                # progress = (last_timestamp - t_init)/(duration+1.4)*100
                # print(progress)
                if time.time() - t_init + 1.2 > (10 * 60.0):
                    self.recording = False

            except KeyboardInterrupt:
                break

        self.processing = True
        time_correction_eeg = inlet_eeg.time_correction()
        time_correction_ppg = inlet_ppg.time_correction()
        print("Time corrections: EEG {}, PPG {}".format(
            time_correction_eeg, time_correction_ppg))

        res_eeg = np.concatenate(res_eeg, axis=0)
        res_ppg = np.concatenate(res_ppg, axis=0)
        timestamps_ppg = np.array(timestamps_ppg) + time_correction_ppg
        timestamps_eeg = np.array(timestamps_eeg) + time_correction_eeg

        ts_df_eeg = pd.DataFrame(np.c_[timestamps_eeg - timestamps_eeg[0]],
                                 columns=['timestamps'])
        ts_df_ppg = pd.DataFrame(np.c_[timestamps_ppg - timestamps_ppg[0]],
                                 columns=['timestamps'])

        res_eeg = np.c_[timestamps_eeg, res_eeg]
        res_ppg = np.c_[timestamps_ppg, res_ppg]
        data_eeg = pd.DataFrame(data=res_eeg,
                                columns=['timestamps'] + ch_names_eeg)
        data_ppg = pd.DataFrame(data=res_ppg,
                                columns=['timestamps'] + ch_names_ppg)

        n_markers = len(markers[0][0])
        t = time.time()
        n = 0
        for ii in range(n_markers):
            data_eeg['Marker%d' % ii] = "NaN"
            data_ppg['Marker%d' % ii] = 'NaN'
            # Process markers
            for marker in markers:
                ix_eeg = np.argmin(np.abs(marker[1] - timestamps_eeg))
                ix_ppg = np.argmin(np.abs(marker[1] - timestamps_ppg))
                self.progress = int(n / len(markers) * 100)
                n += 1
                for i in range(n_markers):
                    # print("Time elapsed: {0} (s)".format(time.time()-t))
                    data_eeg.loc[ix_eeg, 'Marker%d' % i] = marker[0][i]
                    data_ppg.loc[ix_ppg, 'Marker%d' % i] = marker[0][i]
        print("Process took {0} seconds to complete".format(time.time() - t))
        data_eeg.update(ts_df_eeg)
        data_ppg.update(ts_df_ppg)

        recordings_path = os.path.join(os.getcwd(), 'recordings')
        if not os.path.exists(recordings_path):
            os.mkdir(recordings_path)
        # Change to the directory
        os.chdir(recordings_path)
        print(recordings_path)

        data_ppg.to_csv('PPG_' + filename + '.csv',
                        float_format='%.3f',
                        index=False)
        data_eeg.to_csv('EEG_' + filename + '.csv',
                        float_format='%.3f',
                        index=False)
        self.processing = False

        print("Success! Both files written")
        os.chdir('..')
Ejemplo n.º 28
0
class MyOVBox(OVBox):
  def __init__(self):
    OVBox.__init__(self)

    
  # the initialize method reads settings and outputs the first header
  def initialize(self):
    self.initLabel = 0
    self.debug=self.setting['debug'] == "true"
    print "Debug: ", self.debug
    self.stream_type=self.setting['Stream type']
    self.stream_name=self.setting['Stream name'] 
    # total channels for all streams
    self.channelCount = 0
    #self.stream_name=self.setting['Stream name'] # in case !all_streams
    print "Looking for streams of type: " + self.stream_type
    streams = resolve_stream('type',self.stream_type)
    print "Nb streams: " + str( len(streams))
    self.nb_streams = len(streams)
    if self.nb_streams == 0:
      raise Exception("Error: no stream found.")
    self.inlet = StreamInlet(streams[0], max_buflen=1)
    self.info = self.inlet.info()
    self.channelCount = self.info.channel_count()
    print "Stream name: " + self.info.name()
    stream_freq = self.info.nominal_srate()
    if stream_freq != 0:
	  raise Exception("Error: no irregular stream found.")
    # we append to the box output a stimulation header. This is just a header, dates are 0.
    self.output[0].append(OVStimulationHeader(0., 0.))
    self.init = False
  # The process method will be called by openvibe on every clock tick
  def process(self):
    # A stimulation set is a chunk which starts at current time and end time is the time step between two calls
    # init here and filled within triger()
    self.stimSet = OVStimulationSet(self.getCurrentTime(), self.getCurrentTime()+1./self.getClock())
    if self.init == False :
     local_time = local_clock()
     initSecond=int(local_time) 
     initMillis=int((local_time-initSecond)*1000)
     self.stimSet.append(OVStimulation(self.initLabel, self.getCurrentTime(), 0.))
     self.stimSet.append(OVStimulation(initSecond, self.getCurrentTime(), 0.))
     self.stimSet.append(OVStimulation(initMillis, self.getCurrentTime(), 0.))
     self.init=True
	# read all available stream
    samples=[]
    sample,timestamp = self.inlet.pull_sample(0)
    while sample != None:
     samples += sample
     sample,timestamp = self.inlet.pull_sample(0)
     # every value will be converted to openvibe code and a stim will be create
    for label in samples: 
      label = str(label)
      if self.debug:
        print "Got label: ", label
      self.stimSet.append(OVStimulation(float(label), self.getCurrentTime(), 0.))	
    # even if it's empty we have to send stim list to keep the rest in sync
    self.output[0].append(self.stimSet)

  def uninitialize(self):
    # we send a stream end.
    end = self.getCurrentTime()
    self.output[0].append(OVStimulationEnd(end, end))
    self.inlet.close_stream()
Ejemplo n.º 29
0
class Graph(object):
  def __init__(self, size=(600,350)):
    streams = resolve_byprop('name', 'bci', timeout=2.5)
    try:
      self.inlet = StreamInlet(streams[0])
    except IndexError:
      raise ValueError('Make sure stream name=bci is opened first.')
    
    self.running = True
    
    self.frequency = 250.0
    self.sampleinterval = (1/self.frequency)
    self.timewindow = 10
    self._bufsize = int(self.timewindow/self.sampleinterval)
    self.dataBuffer = collections.deque([0.0] * self._bufsize, self._bufsize)
    self.timeBuffer = collections.deque([0.0] * self._bufsize, self._bufsize)
    self.x = np.empty(self._bufsize,dtype='float64')
    self.y = np.empty(self._bufsize,dtype='float64')
    self.app = QtGui.QApplication([])
    self.plt = pg.plot(title='EEG data from OpenBCI')
    self.plt.resize(*size)
    self.plt.showGrid(x=True,y=True)
    self.plt.setLabel('left','Amplitude','V')
    self.plt.setLabel('bottom','Time','s')
    self.curve = self.plt.plot(self.x,self.y,pen=(255,0,0))
    self.sample = np.zeros(8)
    self.timestamp = 0.0

    #QTimer
    self.timer = QtCore.QTimer()
    self.timer.timeout.connect(self.update)
    self.timer.start(self.sampleinterval)

  def _graph_lsl(self):
    while self.running:
      # initial run
      self.sample, self.timestamp = self.inlet.pull_sample(timeout=5)
      if self.timeBuffer[0] == 0.0:
        self.timeBuffer = collections.deque([self.timestamp] * self._bufsize, self._bufsize)

      # time correction to sync to local_clock()
      try:
        if self.timestamp is not None and self.sample is not None:
          self.timestamp = self.timestamp + self.inlet.time_correction(timeout=5) 

      except TimeoutError:
        pass

    print('closing graphing utility')
    self.inlet.close_stream()

  def update(self):
    self.dataBuffer.append(self.sample[3])
    self.y[:] = self.dataBuffer
    self.timeBuffer.append(self.timestamp)
    self.x[:] = self.timeBuffer

    if len(self.x):
      print(self.x[0])
    else:
      print('no data yet')

    self.curve.setData(self.x,self.y)
    self.app.processEvents()

  def start(self):
    self.lsl_thread = threading.Thread(target=self._graph_lsl)
    self.lsl_thread.start()
  
  def stop(self):
    self.running = False
    self.lsl_thread.join(5)
Ejemplo n.º 30
0
class Recorder():
    def __init__(self,
                 config,
                 maxbuffer_size,
                 q_from_display_to_recorder=None):

        # initialize basic configuration
        self.config = config
        self.maxbuffer_size = maxbuffer_size
        self.q_from_display_to_recorder = q_from_display_to_recorder
        #self.q_from_recorder_to_decoder = Queue()

        # private variables, changed by commands form q
        self.inlet_state = False
        # patient states:
        # -1 - none (not recording)
        # 0 - rest
        # 1 - objects
        # 2 - actions
        self.patient_state = 0
        # picture state:
        # 0 - none
        # 1 - start
        # 2 - stop
        self.picture_state = 0
        self.pause = 0
        self.patient_state_paused = -1
        self.picture_pause = 0

        # initialize configuration based on saving method (though buffer or without it)
        self.memory = [[], [], []]
        self.picture_indices = [[], [], []]
        self.pause_mark = False

        self.index_picture_start = -1
        self.index_picture_stop = -1
        self.index_pause = -1
        self.picture_end = False

        # resolve lsl stream
        stream_name = self.config['general']['lsl_stream_name']
        streams = resolve_stream('name', stream_name)
        self._printm('Resolving stream \'{}\', {} streams found'.format(
            stream_name, len(streams)))
        self.inlet = StreamInlet(streams[0], self.maxbuffer_size)
        self._printm('Stream resolved')

    # not used
    def record(self):
        self._printm(
            'Start recording, if \'Recording...\' progress bar is not filling, check lsl input stream'
        )

        self._resolve_q()
        with Bar('Recording...', max=1000) as bar:
            while self.inlet_state:
                self._resolve_q()
                sample, timestamp = self.inlet.pull_sample(timeout=0.0)
                if bar.index < 999:
                    bar.next()
                elif bar.index == 999:
                    bar.next()
                    bar.finish()

                # if patient state is 'none' - skip
                if self.patient_state == -1:
                    continue
                elif self.pause:
                    self.pause_mark = True
                    continue

                # if timestamp exists, concatenate sample with previous data
                if timestamp:
                    sample_index = len(self.memory[self.patient_state])
                    if self.picture_state == 1:
                        self.index_picture_start = (sample_index,
                                                    self.picture_state)
                    elif self.picture_state == 2:
                        self.index_picture_stop = (sample_index,
                                                   self.picture_state)
                        self.picture_end = True
                    if self.pause_mark:
                        self.index_pause = (sample_index - 1,
                                            self.picture_state)
                        self.pause_mark = False

                    sample = np.reshape(np.asarray(sample), (1, 69))
                    timestamp = np.array([[timestamp]])
                    picture_type_array = np.array([[self.patient_state]])
                    picture_state = np.array([[self.picture_state]])
                    self.picture_state = 0
                    picture_pause = np.array([[self.picture_pause]])
                    self.picture_pause = 0
                    big_sample = np.concatenate(
                        (sample, timestamp, picture_type_array, picture_pause,
                         picture_state),
                        axis=1)
                    self.memory[self.patient_state].append(big_sample)

                    if self.picture_end:
                        if self._good_picture():
                            self.picture_indices.append(
                                (self.index_picture_start,
                                 self.index_picture_stop))
                            #ecog_picture = np.vstack(self.memory[self.patient_state][self.indx_picture_begining:])

        self._printm('Stop recording')
        t1 = time.time()
        self._save()
        t2 = time.time()
        self._printm('Data saved: {}s:'.format(t2 - t1))

    def _good_picture(self):
        same_patient_state = self.index_picture_start[
            1] == self.index_picture_stop[1]
        pause_not_inside_picture = not (self.index_picture_start[1] == self.index_pause[1] and \
                                self.index_picture_start[0] <= self.index_pause[0])
        return same_patient_state and pause_not_inside_picture

    # not used
    def _save(self):
        experiment_data_path = Path(
            self.config['paths']['experiment_data_path'])
        dataset_width = self.config['recorder'].getint('dataset_width')
        groups = self.config['recorder']['group_names'].split(' ')
        with h5py.File(experiment_data_path, 'a') as file:
            for i in range(len(self.memory)):
                if len(self.memory[i]) > 0:
                    stacked_data = np.vstack(self.memory[i])
                    stacked_indices = np.vstack(self.picture_indices[i])
                    file[groups[i] + '/raw_data'] = stacked_data
                    file[groups[i] + '/picture_indices'] = stacked_indices
                    self.memory[i] = []
                    self.picture_indices[i] = []
                    self._printm('Saved {}, {}, {} pictures'.format(
                        groups[i], stacked_data.shape,
                        stacked_indices.shape[0]))
                else:
                    empty_shape = (0, dataset_width)
                    file.create_dataset(groups[i] + '/raw_data', empty_shape)
                    file.create_dataset(groups[i] + '/picture_indices', (0, 2))
                    self._printm('Saved {}, {}'.format(groups[i], empty_shape))
            file.create_dataset('fs',
                                data=np.array(
                                    self.config['recorder'].getint('fs')))

    # resolve commands from Display object to navigate recording of data
    def _resolve_q(self):
        while not self.q_from_display_to_recorder.empty():
            key, value = self.q_from_display_to_recorder.get()
            if self.config['general'].getboolean('debug_mode'):
                pass
                #self._printm('key: {}, value: {}'.format(key, value))
            if key == 'inlet_state':
                self.inlet_state = value
            elif key == 'patient_state':
                self.patient_state = value
            elif key == 'picture_state':
                self.picture_state = value
            elif key == 'pause':
                self.pause = value
            else:
                self._printm('wrong key in queue: {}'.format(key))

    def _printm(self, message):
        print('{} {}: '.format(time.strftime('%H:%M:%S'),
                               type(self).__name__) + message)
Ejemplo n.º 31
0
class LslStream(object):
    '''
    This class creates the basic connection between the computer and a Lab Streaming
    Layer data stream. With it connecting is made simpler and pulling and processing
    information directly is made trivial.

    METHODS:
        __init__(**stream_info): Initiates a connection when the class is called
        connect(**stream_info): Connects to a data stream in the network given
                defined by the keyword args
        pull(**kwargs): Pulls a sample from the connected data stream
        chunk(**kwargs): Pulls a chunk of samples from the data stream

    ATTRIBUTES:
        streams: List of found LSL streams in the network
        inlet: Stream inlet used to pull data from the stream
        metainfo: Metadata from the stream
    '''
    def __init__(self, **stream_info):
        self.connect(**stream_info)

    def connect(self, **stream_info):
        '''
        This method connects to a LSL data stream. It accepts keyword arguments that define
        the data stream we are searching. Normally this would be (use keywords given between
        quotes as key for the argument) 'name' (e.g. 'Cognionics Quick-20'), 'type' (e.g. 'EEG'),
        'channels' (e.g. 8), 'freq' (from frequency, e.g. 500), 'dtype' (type of data, e.g.
        'float32'), 'serialn' (e.g. 'quick_20').

        After receiving the information of the stream, the script searches for it in the network
        and resolves it, and then connects to it (or the first one in case there are many, that's
        the reason why one has to be as specific as possible if many instances of LSL are being used
        in the lab). It prints some of the metadata of the data stream to the screen so the user
        can check if it is right, and returns the inlet to be used in other routines.

        INPUT:
            **kwargs: Keyword arguments defining the data stream

        RELATED ATTRIBUTES:
            streams, inlet, metainfo
        '''
        # Put the known information of the stream in a tuple. It is better to know as much
        # as possible if more than one kit is running LSL at the same time.
        stream_info_list = []
        for key, val in stream_info.items():
            stream_info_list.append(key)
            stream_info_list.append(val)

        # Resolve the stream from the lab network
        self.streams = resolve_stream(*stream_info_list)

        # Create a new inlet to read from the stream
        self.inlet = StreamInlet(self.streams[0])

        # Get stream information (including custom meta-data) and break it down
        self.metainfo = self.inlet.info()

    def pull(self, **kwargs):
        '''
        This method pulls data from the connected stream (using more information
        for the pull as given by kwargs).

        INPUT:
            kwargs: Extra specifications for the data pull from the stream

        OUTPUT:
            the data from the stream
        '''
        # Retrieve data from the data stream
        return self.inlet.pull_sample(**kwargs)

    def chunk(self, **kwargs):
        '''
        This method pulls chunks. Uses sames formating as .pull
        '''
        # chunk, timestamp = self.inlet.pull_chunk(**kwargs)
        return self.inlet.pull_chunk(**kwargs)
Ejemplo n.º 32
0
class EEGStateAdapter:

    # n_timepoints
    def __init__(self,
                 n_freq=30,
                 n_chan=8,
                 eeg_feed_rate=250,
                 samples_per_output=1,
                 spectrogram_timespan=10,
                 n_spectrogram_timepoints=10):
        self.num_channels = n_chan
        self.num_freqs = n_freq
        self.n_spectrogram_timepoints = n_spectrogram_timepoints
        self.eeg_fifo_len = spectrogram_timespan * eeg_feed_rate  #assuming spectrogram_timespan is in seconds

        # Verify this is an int, then cast to int
        assert self.eeg_fifo_len.is_integer(), "Spectrogram timespan (" + str(
            spectrogram_timespan) + ") * SPS (" + str(
                eeg_feed_rate) + ") must be an integer, is: " + str(
                    self.eeg_fifo_len)
        self.eeg_fifo_len = int(self.eeg_fifo_len)

        self.cache_interval = int(samples_per_output)
        self.eeg_thread_event = Event()
        self.eeg_data_cache = list()
        self.eeg_fifo = deque([], maxlen=self.eeg_fifo_len)

        # Init other adapters
        self.imprintAdapter = ImprintAdapter(dbg=False)
        self.rpvAdapter = RewardPunishAdapter(dbg=False)

        self.rpv_data_dict = dict()
        self.imprint_data_dict = dict()

    def sync_state_labels(self):
        '''
        Grab all available labels data from imprint and rpv adapters and sync 
        timestamps.
        '''
        '''
        syncing data is basically a matter of comparing timestamps and associating the 
        closest ones. This should be easy if we are storing lots of data...
        
        we can collect it all to be able to associate the closest, then we can 
        clear cache after a second or so.
        '''

        # Sync rpv data
        self.rpv_data_dict = self.sync_data(self.eeg_data_cache,
                                            self.rpvAdapter.get_data())

        # Sync Imprint data
        self.imprint_data_dict = self.sync_data(self.eeg_data_cache,
                                                self.imprintAdapter.get_data())

    def retrieve_latest_data(self):
        '''
        For V,F - T,F - V,B - PI,B
        '''
        if len(self.eeg_data_cache) > 0:

            #==============================================================================
            #             data = (
            #              np.asarray([d[0] for d in self.eeg_data_cache]),
            #              np.ones((1,1,10,240)), #np.empty(0),#[], #
            #              np.asarray([(1,0)]),# np.empty(0),#[], #np.ones((1,1,10,240)),
            #              np.ones((1,1,10,240)),#np.empty(0),#[], #np.ones((1,1,10,240)),
            #              np.asarray([44])
            #                      #np.empty(0)#[]
            #              )
            #             self.clear_caches()
            #             return data
            #==============================================================================

            # Sync timestamps
            self.sync_state_labels()

            # remove timestamps from eeg data
            eeg_data = np.asarray([d[0] for d in self.eeg_data_cache])

            # setup structure for tensorflow model

            rpv_inputs = np.asarray(self.rpv_data_dict['inputs'])
            rpv_labels = np.asarray(self.rpv_data_dict['labels'])

            assert rpv_inputs.shape[0] == rpv_labels.shape[
                0]  #TODO error string here

            imp_inputs = np.asarray(self.imprint_data_dict['inputs'])
            imp_labels = np.asarray(
                self.imprint_data_dict['labels'])  #TODO error

            assert imp_inputs.shape[0] == imp_labels.shape[0]

            data = eeg_data, rpv_inputs, rpv_labels, imp_inputs, imp_labels

            # clear caches
            self.clear_caches()

            return data
        else:
            return ([], [], [], [], [])

    def launch_eeg_adapter(self, manual_stream_select=True):

        self.imprintAdapter.launch_imprint_adapter()
        self.rpvAdapter.launch_rpv_adapter()

        print("Resolving EEG marker stream...")
        streams = resolve_stream('type', 'PERIODO')
        snum = 0
        if manual_stream_select:
            for i, s in enumerate(streams):
                print(i, s.name())
            snum = input("Select EEGStateAdapter stream: ")
        self.inlet = StreamInlet(streams[int(snum)])
        # launch thread
        self.eeg_thread_event.set()
        thread = Thread(target=self.eeg_rx_thread)
        thread.start()

    def stop_eeg_thread(self):
        self.imprintAdapter.stop_imprint_thread()
        self.rpvAdapter.stop_rpv_thread()
        self.eeg_thread_event.clear()

    def clear_caches(self, clear_subadapters=False):
        self.rpv_data_dict = dict()
        self.imprint_data_dict = dict()
        self.eeg_data_cache = list()
        if clear_subadapters:
            self.rpvAdapter.get_data()
            self.imprintAdapter.get_data()

    def eeg_rx_thread(self):
        '''
        Receiver will need to select correct stream, then continuously accept and 
        process commands as they arrive.
        '''

        rx_counter = 0
        fifo_idx = np.linspace(0, self.eeg_fifo_len - 1,
                               self.n_spectrogram_timepoints).astype(int)
        while self.eeg_thread_event.isSet():

            # get command
            eeg_periodo, timestamp = self.inlet.pull_sample(timeout=1)
            if eeg_periodo == None:
                continue  #if timed out, check if thread is sitll alive

            assert len(
                eeg_periodo
            ) == self.num_channels * self.num_freqs  #lsl output is flattened periodo

            # add new periodogram to fifo
            self.eeg_fifo.append(eeg_periodo)

            # inc rx count
            rx_counter += 1

            # cache if apt.
            if (len(self.eeg_fifo)
                    == self.eeg_fifo_len) and (rx_counter % self.cache_interval
                                               == 0):
                self.eeg_data_cache += [
                    (np.asarray(self.eeg_fifo)[fifo_idx, :], timestamp)
                ]

    def sync_data(self, _inputs, labels):
        '''
        assume inputs and labels both lists of tuples with first val value, second val timestamp.
        
        assume we have many more inputs than labels
        
        strategy is to search for the closest label for each input, also we can only,
        use one input per label so if the next closest is further than a removed one 
        then we would like to collect statistics on that.
        
        '''

        ts_diffs = []
        synced_inputs = []
        synced_outputs = []

        # copy inputs
        inputs = list(_inputs)
        previously_used = list()

        # loop over labels to find closest input
        for label in labels:

            # extract timestamps to array
            inputs_ts = np.asarray([i[1] for i in inputs])

            # extract label ts
            label_ts = label[1]

            # find nearest input to label
            inputs_ts = abs(inputs_ts - label_ts)
            amin = np.argmin(inputs_ts)

            # check if the value at this index has been used before; if so, skip the label (undefined behaviour)
            if amin in previously_used: continue
            previously_used.append(amin)

            closest_input = inputs[amin][0]

            # collect metrics
            ts_diffs += [inputs_ts[amin]]

            # add pair
            #synced_pairs += [(closest_input, label[0])]
            synced_inputs += [closest_input]
            synced_outputs += [label[0]]

            # delete used input


#            del inputs[amin]

        return {
            'inputs': synced_inputs,
            'labels': synced_outputs,
            'diffs': ts_diffs
        }
Ejemplo n.º 33
0
                    events.append(event)
                    events_read += 1
                    if events_read == 1:
                        first_epoch_time = event[0]
                    elif events_read == 12:
                        last_epoch_time = event[0]
        except:
            pass

        #==========================================================#
        #                 Read Sample From LSL                     #
        #==========================================================#

        if args.live_mode or args.training_mode:
            # Get sample and time_stamp from data stream
            sample, time_stamp = inlet.pull_sample(timeout=0.01)
            
            if sample != None:
                # Store the sample in the data history
                sample_row = np.append(time_stamp, np.append(eeg_event_codes.NO_EVENT, sample[0:len(config.EEG_CHANNELS)]))
                data_history = np.vstack((data_history, sample_row))
        

        #==========================================================#
        #          Processing the End of the Sequence              #
        #==========================================================#
        
        # Sequence complete
        if events_read == 12:
            sequences_complete += 1
            events_read = 0
Ejemplo n.º 34
0
import sys; sys.path.append('..') # help python find pylsl relative to this example program
from pylsl import StreamInlet, resolve_stream

# first resolve an EEG stream on the lab network
print("looking for an EEG stream...")
streams = resolve_stream('type','EEG')

# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])

while True:
	# get a new sample (you can also omit the timestamp part if you're not interested in it)
	sample,timestamp = inlet.pull_sample()
	print(timestamp, sample)
Ejemplo n.º 35
0
class Renderer(app.Canvas):
	def __init__(self):
		app.Canvas.__init__(self, title='Use your wheel to zoom!',
							keys='interactive')

		
		# first resolve an EEG stream on the lab network
		print("looking for an EEG stream...")
		streams = resolve_stream('name', 'RandomSpehricalData')
		streamInfo = streams[0]
		# create a new inlet to read from the stream
		self.inlet = StreamInlet(streamInfo)
		# Number of cols and rows in the table.
		self.nrows = streamInfo.channel_count()
		
		n = streamInfo.nominal_srate()
		ncols = 1

		# Number of signals.
		m = self.nrows*ncols

		# Various signal amplitudes.
		amplitudes = .1 + .2 * np.random.rand(m, 1).astype(np.float32)

		# Generate the signals as a (m, n) array.
		self.y = amplitudes * np.random.randn(m, n).astype(np.float32)
		
		color = np.repeat(np.random.uniform(size=(m, 3), low=.5, high=.9),
						  n, axis=0).astype(np.float32)


		# Signal 2D index of each vertex (row and col) and x-index (sample index
		# within each signal).
		index = np.c_[np.repeat(np.repeat(np.arange(ncols), self.nrows), n),
					  np.repeat(np.tile(np.arange(self.nrows), ncols), n),
					  np.tile(np.arange(n), m)].astype(np.float32)


		self.program = gloo.Program(VERT_SHADER, FRAG_SHADER)
		self.program['a_position'] = self.y.reshape(-1, 1)
		self.program['a_color'] = color
		self.program['a_index'] = index
		self.program['u_scale'] = (1., 1.)
		self.program['u_size'] = (self.nrows, ncols)
		self.program['u_n'] = n

		gloo.set_viewport(0, 0, *self.physical_size)

		self._timer = app.Timer('auto', connect=self.on_timer, start=True)

		gloo.set_state(clear_color='black', blend=True,
					   blend_func=('src_alpha', 'one_minus_src_alpha'))
		
		self.sampleFromLSL = None
		
		self.show()

	def on_resize(self, event):
		gloo.set_viewport(0, 0, *event.physical_size)

	def on_mouse_wheel(self, event):
		dx = np.sign(event.delta[1]) * .05
		scale_x, scale_y = self.program['u_scale']
		scale_x_new, scale_y_new = (scale_x * math.exp(2.5*dx),
									scale_y * math.exp(0.0*dx))
		self.program['u_scale'] = (max(1, scale_x_new), max(1, scale_y_new))
		self.update()

	def on_timer(self, event):
		"""Add some data at the end of each signal (real-time signals)."""
		k = 0 # need to become the count of samples available on this timer call
		sampleSet = None
		sample, timestamp = self.inlet.pull_sample(0.0)  
		sample = np.array([sample])
		while sample.any():
			k = k + 1
			sample, timestamp = self.inlet.pull_sample(0.0)
			sample = np.array([sample])
			sampleSet = np.c_[ sampleSet, sample ] 

		if k > 0:
			self.y[:, :-k] = self.y[:, k:]
			#y[:, -k:] = amplitudes * np.random.randn(m, k)
			self.y[:, -k:] = sampleSet

		self.program['a_position'].set_data(self.y.ravel().astype(np.float32))
		self.update()

	def on_draw(self, event):
		gloo.clear()
		self.program.draw('line_strip')