Esempio n. 1
0
    def receive(self):
        streams = resolve_byprop('type',
                                 self.settings.type,
                                 timeout=LSL_SCAN_TIMEOUT)

        if len(streams) == 0:
            print("Can't find %s stream." % self.settings.type)
            return

        print("Started acquiring data.")
        inlet = StreamInlet(streams[0], max_chunklen=self.settings.chunk)

        info = inlet.info()
        description = info.desc()
        n_channels = info.channel_count()

        ch = description.child('channels').first_child()
        ch_names = [ch.child_value('label')]
        for i in range(1, n_channels):
            ch = ch.next_sibling()
            ch_names.append(ch.child_value('label'))

        channel_descriptor = ChannelDescriptor(self.settings.type, ch_names,
                                               info.nominal_srate())

        res = []
        timestamps = []
        t_init = time()
        time_correction = inlet.time_correction()
        print('Start recording at time t=%.3f' % t_init)
        print('Time correction: ', time_correction)

        while (time() - t_init) < 5000:
            try:
                data, timestamp = inlet.pull_chunk(
                    timeout=1.0, max_samples=self.settings.chunk)

                if timestamp:
                    res.append(data)
                    self.subscription.notify_all_subscribers(
                        self.settings.type, data, timestamp,
                        channel_descriptor)
                    timestamps.extend(timestamp)
            except KeyboardInterrupt:
                break

        time_correction = inlet.time_correction()
        print('Time correction: ', time_correction)

        res = np.concatenate(res, axis=0)
        timestamps = np.array(timestamps) + time_correction

        print('Done .')
Esempio n. 2
0
def recordeeg(duration):
    warnings.filterwarnings('ignore')

    BUFFER_LENGTH = 5
    EPOCH_LENGTH = 1
    OVERLAP_LENGTH = 0.8
    SHIFT_LENGTH = EPOCH_LENGTH - OVERLAP_LENGTH
    streams = resolve_byprop('type', 'EEG', timeout=2)
    if len(streams) == 0:
        raise RuntimeError('Can\'t find EEG stream.')
    inlet = StreamInlet(streams[0], max_chunklen=12)
    eeg_time_correction = inlet.time_correction()
    info = inlet.info()
    description = info.desc()
    fs = int(info.nominal_srate())
    eeg_buffer = np.zeros((int(fs * BUFFER_LENGTH), 1))
    filter_state = None
    n_win_test = int(
        np.floor((BUFFER_LENGTH - EPOCH_LENGTH) / SHIFT_LENGTH + 1))

    band_buffer = np.zeros((n_win_test, 4))
    musedata = []

    while True:
        eeg_data, timestamp = inlet.pull_chunk(timeout=1,
                                               max_samples=int(SHIFT_LENGTH *
                                                               fs))
        musedata += eeg_data
        if len(musedata) > duration * fs:
            return musedata
            break
Esempio n. 3
0
    def __init__(self):

        print('Connecting...')
        streams = resolve_byprop('type', 'EEG', timeout=2)
        if len(streams) == 0:
            raise RuntimeError('Can\'t find EEG stream.')

        # set up Inlet
        inlet = StreamInlet(streams[0], max_chunklen=12)
        eeg_time_correction = inlet.time_correction()

        # Pull relevant information
        info            = inlet.info()
        self.desc       = info.desc()
        self.freq       = int(info.nominal_srate())

        ## TRAIN DATASET
        print('Recording Baseline')
        eeg_data_baseline = BCI.record_eeg_filtered(
                                    self.TRAINING_LENGTH,
                                    self.freq,
                                    self.INDEX_CHANNEL,
                                    True, )
        eeg_epochs_baseline = BCI.epoch_array(
                                    eeg_data_baseline,
                                    self.EPOCH_LENGTH,
                                    self.OVERLAP_LENGTH * self.freq,
                                    self.freq)
        feat_matrix_baseline = BCI.compute_feature_matrix(
                                    eeg_epochs_baseline,
                                    self.freq)
        self.baseline = BCI.calc_baseline(feat_matrix_baseline)
Esempio n. 4
0
 def get_Stream_Info(self, streams):
     inlet = StreamInlet(
         streams[0], max_chunklen=12, recover=False
     )  ## create a getter to change the chuncklength based on the device
     timecorrect = inlet.time_correction(
     )  # gets the time correction of the two buffers
     info = inlet.info()
     descrition = info.desc()
     fs = int(info.nominal_srate())
     num_channels = info.channel_count()
     return inlet, timecorrect, info, descrition, fs, num_channels
Esempio n. 5
0
def testing():
    dummy_streamer = ble2lsl.Dummy(muse2016)  #

    streams = resolve_byprop(
        "type", "EEG", timeout=5
    )  #type: EEG, minimum return streams = 1, timeout after 5 seconds

    streamIn = StreamInlet(
        streams[0], max_chunklen=12, recover=True
    )  #Grab first stream from streams, MUSE chunk 12, drop lost stream
    print(streamIn)
    print(streamIn.info().channel_count())
    streamIn.open_stream(
    )  #This actually isn't required: pull_sample() and pull_chunk() implicitly open the stream.
    #But it's good to be explicit because it makes the code clearer
    print("Pull Sample")
    print(streamIn.pull_sample()
          )  #Returns a tuple with the actual values we want.
    #The first element is the list of channel values, the second element is a timestamp. This is a snapshot of our stream
    #at a certain point in time.
    print("Pull Chunk")
    ts = time.time()
    while (1):
        x = streamIn.pull_chunk()
        if all(x):
            #if not np.shape(x) == (2, 0):
            print(np.shape(x))
            print(np.shape(x[1]))
            t = [t - ts for t in x[1]]
            print(t)
            print(t[-1] - t[0])

        # for y in x:
        #     for z in y:
        #         print(z)
        #print("\n")

    plt.style.use('ggplot')

    # data first then time stamps, sick

    pprint(streamIn.info().as_xml())  #what
    timeC = streamIn.time_correction()
    print(timeC)

    #Clean up time

    streams.clear()
    streamIn.close_stream()  #calls lsl_close_stream
    streamIn.__del__()  #Not throwing errors
    dummy_streamer.stop()
Esempio n. 6
0
def main():
    # first resolve an EEG stream on the lab network
    print("looking for an EEG stream...")
    streams = resolve_stream('type', 'EEG')
    info = streams[0]

    # create a new inlet to read from the stream
    inlet = StreamInlet(info)

    print('Connected to outlet ' + info.name() + '@' + info.hostname())
    while True:
        offset = inlet.time_correction()
        print('Offset: ' + str(offset))
        time.sleep(1)
Esempio n. 7
0
class Consumer:

    def __init__(self):
        streams = resolve_byprop('type', 'EEG', timeout=30)
        if len(streams) == 0:
            raise RuntimeError('Could not find EEG stream')
        elif len(streams) > 1:
            raise RuntimeError('Found multiple EEG streams')
        self._inlet = StreamInlet(streams[0])

    def pull_data(self):
        samples, timestamp = self._inlet.pull_sample()
        time_correction = self._inlet.time_correction()

        return (samples, timestamp)
Esempio n. 8
0
def acquire_eeg(duration,
                callback=print_eeg_callback,
                eeg_chunck=LSL_EEG_CHUNK):

    DATA_SOURCE = "EEG"

    print("Looking for a %s stream..." % (DATA_SOURCE))
    streams = resolve_byprop('type', DATA_SOURCE, timeout=LSL_SCAN_TIMEOUT)

    if len(streams) == 0:
        print("Can't find %s stream." % (DATA_SOURCE))
        return

    print("Started acquiring data.")
    inlet = StreamInlet(streams[0], max_chunklen=eeg_chunck)

    info = inlet.info()
    description = info.desc()
    Nchan = info.channel_count()

    ch = description.child('channels').first_child()
    ch_names = [ch.child_value('label')]
    for i in range(1, Nchan):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value('label'))

    timestamps = []
    t_init = time()
    time_correction = inlet.time_correction()

    print('Start acquiring at time t=%.3f' % t_init)
    print('Time correction: ', time_correction)

    while (time() - t_init) < duration:
        try:
            chunk, timestamps = inlet.pull_chunk(timeout=1.0,
                                                 max_samples=eeg_chunck)

            if timestamps:
                samples = {
                    key: [sample[i] for sample in chunk]
                    for i, key in enumerate(ch_names)
                }
                callback(timestamps, samples)
        except KeyboardInterrupt:
            break

    print('Acquisition is done')
Esempio n. 9
0
class BciThread (threading.Thread):
    def __init__(self, name):
        threading.Thread.__init__(self)
        self.__streamsEEG = resolve_byprop('type', 'EEG', timeout=TIMEOUT)
        if len(self.__streamsEEG) == 0:
            raise RuntimeError("Can't find EEG stream.")
        self.__inlet = StreamInlet(self.__streamsEEG[0], max_chunklen=12)
        self.__eeg_time_correction = self.__inlet.time_correction()
        self.__info = self.__inlet.info()
        self.__fs = int(self.__info.nominal_srate())
        self.name = name
        self.__lock = threading.Lock()
        self.__work = False
        self.__observers = set()

    def attach(self, observer: Observer):
        self.__observers.add(observer)

    def event(self, info):
        for observer in self.__observers:
            observer.update_data(info)

    def detach(self, observer: Observer):
        self.__observers.remove(observer)

    def run(self):
        self.__work = True
        while self.__work:
            eeg_data, timestamp = self.__inlet.pull_chunk(timeout=1, max_samples=int(SHIFT_LENGTH * self.__fs))
            info = np.column_stack((timestamp, eeg_data))
            self.event(info)

    def get_fs(self):
        return self.__fs

    def stop(self):
        self.__lock.acquire()
        self.__work = False
        self.__lock.release()
Esempio n. 10
0
def record(duration, filename=None, dejitter=False):
    if not filename:
        filename = os.path.join(
            os.getcwd(),
            ("recording_%s.csv" % strftime("%Y-%m-%d-%H.%M.%S", gmtime())))

    print("Looking for an EEG stream...")
    streams = resolve_byprop('type', 'EEG', timeout=LSL_SCAN_TIMEOUT)

    if len(streams) == 0:
        raise (RuntimeError("Can't find EEG stream."))

    print("Started acquiring data.")
    inlet = StreamInlet(streams[0], max_chunklen=LSL_CHUNK)
    # eeg_time_correction = inlet.time_correction()

    print("Looking for a Markers stream...")
    marker_streams = resolve_byprop('name',
                                    'Markers',
                                    timeout=LSL_SCAN_TIMEOUT)

    if marker_streams:
        inlet_marker = StreamInlet(marker_streams[0])
    else:
        inlet_marker = False
        print("Can't find Markers stream.")

    info = inlet.info()
    description = info.desc()

    Nchan = info.channel_count()

    ch = description.child('channels').first_child()
    ch_names = [ch.child_value('label')]
    for i in range(1, Nchan):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value('label'))

    res = []
    timestamps = []
    markers = []
    t_init = time()
    time_correction = inlet.time_correction()
    print('Start recording at time t=%.3f' % t_init)
    print('Time correction: ', time_correction)
    while (time() - t_init) < duration:
        try:
            data, timestamp = inlet.pull_chunk(timeout=1.0,
                                               max_samples=LSL_CHUNK)

            if timestamp:
                res.append(data)
                timestamps.extend(timestamp)
            if inlet_marker:
                marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
                if timestamp:
                    markers.append([marker, timestamp])
        except KeyboardInterrupt:
            break

    time_correction = inlet.time_correction()
    print('Time correction: ', time_correction)

    res = np.concatenate(res, axis=0)
    timestamps = np.array(timestamps) + time_correction

    if dejitter:
        y = timestamps
        X = np.atleast_2d(np.arange(0, len(y))).T
        lr = LinearRegression()
        lr.fit(X, y)
        timestamps = lr.predict(X)

    res = np.c_[timestamps, res]
    data = pd.DataFrame(data=res, columns=['timestamps'] + ch_names)

    if inlet_marker:
        n_markers = len(markers[0][0])
        for ii in range(n_markers):
            data['Marker%d' % ii] = 0
        # process markers:
        for marker in markers:
            # find index of markers
            ix = np.argmin(np.abs(marker[1] - timestamps))
            for ii in range(n_markers):
                data.loc[ix, 'Marker%d' % ii] = marker[0][ii]

    directory = os.path.dirname(filename)
    if not os.path.exists(directory):
        os.makedirs(directory)

    data.to_csv(filename, float_format='%.3f', index=False)

    print('Done - wrote file: ' + filename + '.')
Esempio n. 11
0
class P300Window(object):
    def __init__(self, master: Tk):
        self.master = master
        master.title('P300 speller')

        #Parameters
        self.imagesize = 125
        self.images_folder_path = '../utils/images/'  #use utils/char_generator to generate any image you want
        self.flash_image_path = '../utils/images/flash_images/einstein.jpg'
        self.number_of_rows = 6
        self.number_of_columns = 6  #make sure you have 6 x 6 amount of images in the images_folder_path
        self.flash_mode = 2  #single element  #1 for columns and rows; currently is NOT working yet; if I have time, will revisit
        self.flash_duration = 100  #flash duration
        self.break_duration = 150  #isi

        self.trials = 6  #number of letters
        self.delay = 1000  #interval between trial
        self.letter_idx = 0

        # Param for creating sequence
        self.num_sequence = 100

        #Parameter for random
        # self.number_of_symbols =36
        # self.fashed_per_iteration = 2
        # self.number_of_iterations = 6
        # self.stimuli_per_iteration = self.number_of_symbols * self.fashed_per_iteration / self.number_of_iterations

        #did not include numbers yet!
        self.random_letter = random.choices(
            string.ascii_lowercase,
            k=self.trials)  #randomize [self.trials] number letters
        self.word = ''.join(self.random_letter)

        # Variables
        self.usable_images = []
        self.image_labels = []
        self.flash_sequence = []
        self.flash_image = None
        self.sequence_number = 0
        self.lsl_output = None

        # self.running = 0  #for pause

        self.image_frame = Frame(self.master)
        self.image_frame.grid(row=0,
                              column=0,
                              rowspan=self.number_of_rows,
                              columnspan=self.number_of_columns)

        self.start_btn_text = StringVar()
        self.start_btn_text.set('Start')
        self.start_btn = Button(self.master,
                                textvariable=self.start_btn_text,
                                command=self.start)
        self.start_btn.grid(row=self.number_of_rows + 3,
                            column=self.number_of_columns - 1)

        # self.pause_btn = Button(self.master, text='Pause', command=self.pause)
        # self.pause_btn.grid(row=self.number_of_rows + 3, column=self.number_of_columns - 4)  #-4 for center
        # self.pause_btn.configure(state='disabled')

        self.close_btn = Button(self.master, text='Close', command=master.quit)
        self.close_btn.grid(row=self.number_of_rows + 3, column=0)

        fontStyle = tkFont.Font(family="Courier", size=40)

        self.output = Text(root, height=1, font=fontStyle)
        self.output.tag_configure("red", foreground="red")
        self.output.tag_configure("green", foreground="green")
        self.output.configure(width=10)
        self.output.insert("end", "  ")
        self.output.grid(row=self.number_of_rows + 2,
                         column=self.number_of_columns - 4)

        self.outputlabel = Label(root, text="Output: ", font=fontStyle)
        self.outputlabel.grid(row=self.number_of_rows + 2,
                              column=self.number_of_columns - 5)

        self.targetlabel = Label(root, text="Target: ", font=fontStyle)
        self.targetlabel.grid(row=self.number_of_rows + 1,
                              column=self.number_of_columns - 5)

        self.show_highlight_letter(0)

        # Initialization
        self.show_images()
        self.create_flash_sequence()
        self.lsl_output = self.create_lsl_output()

    def open_images(self):
        self.usable_images = []
        self.highlight_letter_images = []

        letter_images = sorted(
            glob.glob(
                os.path.join(self.images_folder_path, 'letter_images/*.png')))

        #currently, still did not flash number yet!
        number_images = sorted(
            glob.glob(
                os.path.join(self.images_folder_path, 'number_images/*.png')))
        letter_highlight_images = sorted(
            glob.glob(
                os.path.join(self.images_folder_path,
                             'letter_highlight_images/*.png')))
        number_highlight_images = sorted(
            glob.glob(
                os.path.join(self.images_folder_path,
                             'number_highlight_images/*.png')))

        for number_image in number_images:
            letter_images.append(number_image)
        #print("Paths: ", letter_images)
        min_number_of_images = self.number_of_columns * self.number_of_rows
        if len(letter_images) < min_number_of_images:
            print('To few images in folder: ' + self.images_folder_path)
            return

        # Convert and resize images
        for image_path in letter_images:
            image = Image.open(image_path)
            resized = image.resize((self.imagesize, self.imagesize),
                                   Image.BICUBIC)
            Tkimage = ImageTk.PhotoImage(resized)
            self.usable_images.append(Tkimage)

        # Convert and resize images
        for image_path in letter_highlight_images:
            image = Image.open(image_path)
            resized = image.resize((self.imagesize, self.imagesize),
                                   Image.BICUBIC)
            Tkimage = ImageTk.PhotoImage(resized)
            self.highlight_letter_images.append(Tkimage)

        flash_img = Image.open(self.flash_image_path)
        flash_img_res = flash_img.resize((self.imagesize, self.imagesize),
                                         Image.BICUBIC)
        self.flash_image = ImageTk.PhotoImage(flash_img_res)

    def show_images(self):
        self.open_images()

        if self.usable_images == []:
            print('No images opened')
            return

        num_rows = self.number_of_rows
        num_cols = self.number_of_columns

        # Arrange images
        for r in range(0, num_rows):
            for c in range(0, num_cols):
                current_image = self.usable_images[r * num_cols + c]
                label = Label(self.image_frame, image=current_image)
                label.image = current_image
                label.grid(row=r, column=c)
                self.image_labels.append(label)

    def create_lsl_output(self):
        """Creates an LSL Stream outlet"""
        info = StreamInfo(name='LetterMarkerStream',
                          type='LetterFlashMarkers',
                          channel_count=1,
                          channel_format='int8',
                          nominal_srate=IRREGULAR_RATE,
                          source_id='lettermarker_stream',
                          handle=None)

        return StreamOutlet(info)  #for sending the predicted classes

    # def create_flash_sequence_old(self):
    #     num_rows = 6
    #     num_cols = 6
    #     maximum_number = num_rows * num_cols
    #     number_count = np.array([0] * maximum_number)
    #     seq = []
    #     next_number = [random.randint(0, maximum_number)]

    #     count = 1
    #     while (len(seq) / 6 < 100):
    #         if (len(seq) > 0):
    #             neighbor_list = seq[-count:]   #the same set, with size of -1 to -6
    #             left_list = [x-1 for x in neighbor_list]   # neighbor of each neighbor in neighbor_list
    #             right_list = [x+1 for x in neighbor_list]
    #             up_list = [x-6 for x in neighbor_list]
    #             bot_list = [x+6 for x in neighbor_list]
    #             combine_list =[]
    #             combine_list.extend(left_list)
    #             combine_list.extend(right_list)
    #             combine_list.extend(up_list)
    #             combine_list.extend(bot_list)
    #             #should not be same element as previous set (*2), and should not be same element in the combine_list
    #             if (next_number not in seq[-CONCURRENT_ELEMENTS*2:] and next_number not in combine_list):
    #                 seq.extend(next_number)
    #                 count = ((count) % 6) + 1  #this count makes sure we have one set of 6
    #         else:
    #             seq.extend(next_number)
    #         left_over = np.argwhere(number_count == np.argmin(number_count))
    #         selectMax = len(left_over) - 1
    #         next_number = left_over[random.randint(0, selectMax)]
    #     self.flash_sequence = seq

    def find_replacement(self, nextelem, seq):
        length = CONCURRENT_ELEMENTS if len(
            self.flash_sequence) > CONCURRENT_ELEMENTS - 1 else len(
                self.flash_sequence)
        for i in range(1, length + 1):
            prev_seq = self.flash_sequence[-i:][0]
            for j in range(len(prev_seq)):
                previous_seq_elem = self.flash_sequence[-i:][0][j]
                prev_seq_without_prevelem = [
                    x for x in prev_seq if x != previous_seq_elem
                ]
                nl_prev = self.get_neighbors(prev_seq_without_prevelem)
                nl_current = self.get_neighbors(seq)
                if (nextelem not in prev_seq and  #a
                        nextelem not in nl_prev and  #b
                        nextelem not in self.flash_sequence[-i - 1:][0]
                        and  #c1  left consecutive
                        nextelem not in self.flash_sequence[-i + 1:][
                            0]  #c2  right consecutive
                        and previous_seq_elem not in seq and  #a
                        previous_seq_elem not in nl_current and  #b  
                        previous_seq_elem
                        not in self.flash_sequence[-1:][0]):  #c
                    self.flash_sequence[-i:][0][j] = nextelem
                    seq.append(previous_seq_elem)
                    return seq
        print("Can't swap..Restarting everything...")
        create_flash_sequence()

    def get_neighbors(self, seq):
        right = [x + 1 for x in seq]
        left = [x - 1 for x in seq]
        top = [x - 6 for x in seq]
        bottom = [x + 6 for x in seq]
        neighbors = list(chain(right, left, top, bottom))
        return neighbors

    def create_flash_sequence(self):
        num_rows = self.number_of_rows
        num_cols = self.number_of_columns
        total = num_rows * num_cols
        num_sequence = self.num_sequence

        li = list(range(total))

        while len(self.flash_sequence) < num_sequence:
            seq = []
            failcount = 0
            if not li:
                #if li is exhausted
                li = list(range(36))
            while len(seq) < CONCURRENT_ELEMENTS:
                nextelem = random.choice(li)
                if len(seq) > 0:
                    nl = self.get_neighbors(seq)
                    #prevent stuck
                    if failcount > 20:
                        seq = self.find_replacement(nextelem, seq)
                        li.remove(nextelem)
                        failcount = 0
                    elif nextelem not in nl and nextelem not in seq:
                        if len(self.flash_sequence):
                            if nextelem not in self.flash_sequence[-1:][
                                    0]:  #0 remove the outer list [[]] becomes []
                                seq.append(nextelem)
                                li.remove(nextelem)
                            else:
                                failcount += 1
                        else:
                            seq.append(nextelem)
                            li.remove(nextelem)
                    else:
                        failcount += 1
                else:  #first element, just insert
                    if len(self.flash_sequence):
                        if nextelem not in self.flash_sequence[-1:][0]:
                            seq.append(nextelem)
                            li.remove(nextelem)
                    else:
                        seq.append(nextelem)
                        li.remove(nextelem)
            self.flash_sequence.append(seq)

        self.flash_sequence = list(chain(*self.flash_sequence))

    def start(self):
        if not (TEST_UI):
            self.read_lsl_marker()
        self.running = 1
        letter = self.word[0]
        image_index = string.ascii_lowercase.index(letter)
        self.highlight_image(image_index)
        self.start_btn.configure(state='disabled')
        self.pause_btn.configure(state='normal')

    # def pause(self):
    #     self.running = 0
    #     self.start_btn_text.set('Resume')
    #     self.start_btn.configure(state='normal')
    #     self.pause_btn.configure(state='disabled')

    # def check_pause(self):
    #     if self.running == 0:
    #         print('Flashing paused at sequence number ' + str(self.sequence_number))
    #         return

    def check_sequence_end(self):
        if self.sequence_number == len(
                self.flash_sequence
        ):  #stop flashing if all generated sequence number runs out
            print('All elements had flashed - run out of juice')
            self.running = 0
            self.sequence_number = 0
            return

    def get_marker_result(self):
        result = self.marker_result()
        if (result):
            print("Marker received: ", result[0][0])
            receive = result[0][0]
        else:
            receive = 0

        return receive

    def output_letter(self, receive, image_index):
        # receive = [9,10,13]
        if not receive:
            if FLASH_CONCURRENT:
                self.master.after(self.break_duration,
                                  self.start_concurrent_flashing)
            else:
                self.master.after(self.break_duration, self.start_flashing)

        elif isinstance(receive, int):
            if (image_index + 1) == receive:
                self.output.insert("end", self.pos_to_char(receive), "green")
            else:
                self.output.insert("end", self.pos_to_char(receive), "red")

            self.letter_idx += 1
            if self.letter_idx == len(self.word):
                return
            letter = self.word[self.letter_idx]
            image_index = string.ascii_lowercase.index(letter)
            self.master.after(self.break_duration, self.highlight_target,
                              image_index)

        else:
            if FLASH_CONCURRENT:
                self.candidate_flash_sequence(receive)
                self.master.after(self.break_duration,
                                  self.start_concurrent_flashing)
            else:
                self.master.after(self.break_duration, self.start_flashing)

    def candidate_flash_sequence(self, candidates):
        print("got candidate")
        num_rows = self.number_of_rows
        num_col = self.number_of_columns
        current_elements = self.flash_sequence[self.sequence_number:self.
                                               sequence_number +
                                               CONCURRENT_ELEMENTS]
        next_elements = self.flash_sequence[self.sequence_number +
                                            CONCURRENT_ELEMENTS:self.
                                            sequence_number +
                                            CONCURRENT_ELEMENTS * 2]

        # Step 1 check if the candidate is included in the next list
        # Step 2 check if there is multiple candidate in the next list ~ else just continue
        count_contain_candidate = 0
        for target_candidate in candidates:
            if target_candidate in next_elements:
                count_contain_candidate += 1

        if count_contain_candidate > 1 or count_contain_candidate == 0:
            # Step 3 check previous i length of sequence to know lease used candidate where i = candidate length
            start_check_index = 0
            previous_flashes_count = []
            if CONCURRENT_ELEMENTS * len(candidates) > self.sequence_number:
                start_check_index = self.sequence_number - CONCURRENT_ELEMENTS * len(
                    candidates) - 1
            for target_candidate in candidates:
                previous_flashes = self.flash_sequence[start_check_index:self.
                                                       sequence_number - 1]
                previous_flashes_count.append([
                    target_candidate,
                    previous_flashes.count(target_candidate)
                ])
            least_index = np.argmin(previous_flashes_count, axis=0)[1]
            choosen_candidate = previous_flashes_count[least_index][0]

            # generate new flash_sequence
            new_sequence = [choosen_candidate]
            li = list(range(num_rows * num_col))

            print("new_sequence1 :", new_sequence)
            #generate sequence
            while len(new_sequence) < 6:
                nextelem = random.choice(li)
                print("nextelem :", nextelem)
                nl = self.get_neighbors(new_sequence)
                #random until get the flasshes sequence
                while (nextelem in nl or nextelem in current_elements
                       or nextelem in next_elements):
                    nextelem = random.choice(li)
                    print("nextelem :", nextelem)
                new_sequence.append(nextelem)
            print("new_sequence2: ", new_sequence)

            print("self.sequence_number: ", self.sequence_number)

            # update flashing sequences
            print(self.sequence_number)

            self.flash_sequence = self.flash_sequence[
                0:self.sequence_number +
                CONCURRENT_ELEMENTS] + new_sequence + self.flash_sequence[
                    self.sequence_number + CONCURRENT_ELEMENTS:]

        # Step 4 generate new list where
        #       1 no duplicate
        #       2 no multiple candidate
        #       3 no neighbors
        #       4 no previous list if possible (maybe all candidate is in the previous)
        #       5 no next list if possible (maybe all candidate is in the next)
        #       6 use the least use candidate

    def start_concurrent_flashing(self):

        self.check_sequence_end()
        # self.check_pause()
        receive = self.get_marker_result()

        element_to_flash = self.flash_sequence[self.sequence_number:self.
                                               sequence_number +
                                               CONCURRENT_ELEMENTS]
        letter = self.word[self.letter_idx]
        image_index = string.ascii_lowercase.index(letter)

        #pushed markers to LSL stream

        print("Letter: ", image_index, " Element flash: ",
              [x + 1 for x in element_to_flash])
        for e in element_to_flash:
            self.lsl_output.push_sample([e + 1
                                         ])  # add 1 to prevent 0 in markers

        self.flash_multiple_elements(element_to_flash)
        self.output_letter(receive, image_index)

        self.sequence_number = self.sequence_number + CONCURRENT_ELEMENTS  #change flash position

    def start_flashing(self):
        self.check_sequence_end()
        # self.check_pause()
        receive = self.get_marker_result()

        element_to_flash = self.flash_sequence[self.sequence_number]
        letter = self.word[self.letter_idx]
        image_index = string.ascii_lowercase.index(letter)

        #pushed markers to LSL stream

        print("Letter: ", image_index, " Element flash: ",
              [element_to_flash + 1])
        self.lsl_output.push_sample([element_to_flash + 1
                                     ])  # add 1 to prevent 0 in markers
        self.flash_single_element(element_to_flash)

        self.output_letter(receive, image_index)

        self.sequence_number = self.sequence_number + 1  #change flash position

    def pos_to_char(self, pos):
        return chr(pos + 97)

    def highlight_target(self, image_index):
        self.show_highlight_letter(self.letter_idx)
        self.highlight_image(image_index)

    def change_image(self, label, img):
        label.configure(image=img)
        label.image = img

    def highlight_image(self, element_no):
        self.change_image(self.image_labels[element_no],
                          self.highlight_letter_images[element_no])
        self.master.after(self.delay, self.unhighlight_image, element_no)

    def unhighlight_image(self, element_no):
        self.change_image(self.image_labels[element_no],
                          self.usable_images[element_no])

        if (FLASH_CONCURRENT):
            self.master.after(self.flash_duration,
                              self.start_concurrent_flashing)
        else:
            self.master.after(self.flash_duration, self.start_flashing)

    def show_highlight_letter(self, pos):

        fontStyle = tkFont.Font(family="Courier", size=40)
        fontStyleBold = tkFont.Font(family="Courier bold", size=40)

        text = Text(root, height=1, font=fontStyle)
        text.tag_configure("bold", font=fontStyleBold)
        text.tag_configure("center", justify='center')

        for i in range(0, len(self.word)):
            if (i != pos):
                text.insert("end", self.word[i])
            else:
                text.insert("end", self.word[i], "bold")

        text.configure(state="disabled", width=10)
        text.tag_add("center", "1.0", "end")

        text.grid(row=self.number_of_rows + 1,
                  column=self.number_of_columns - 4)

    def flash_row_or_col(self, rc_number):
        num_rows = self.number_of_rows
        num_cols = self.number_of_columns

        if rc_number < num_rows:
            for c in range(0, num_cols):  #flash row
                cur_idx = rc_number * num_cols + c
                self.change_image(self.image_labels[cur_idx], self.flash_image)
        else:
            current_column = rc_number - num_rows
            for r in range(0, num_rows):  #flash column
                cur_idx = current_column + r * num_cols
                self.change_image(self.image_labels[cur_idx], self.flash_image)

        self.master.after(self.flash_duration, self.unflash_row_or_col,
                          rc_number)

    def unflash_row_or_col(self, rc_number):
        num_rows = self.number_of_rows
        num_cols = self.number_of_columns
        if rc_number < num_rows:
            for c in range(0, num_cols):  #flash row
                cur_idx = rc_number * num_cols + c
                self.change_image(self.image_labels[cur_idx],
                                  self.usable_images[cur_idx])
        else:
            current_column = rc_number - num_rows
            for r in range(0, num_rows):  #flash column
                cur_idx = current_column + r * num_cols
                self.change_image(self.image_labels[cur_idx],
                                  self.usable_images[cur_idx])

    def flash_multiple_elements(self, element_array):
        for element_no in element_array:
            self.change_image(self.image_labels[element_no], self.flash_image)

        self.master.after(self.flash_duration, self.unflash_multiple_elements,
                          element_array)

    def unflash_multiple_elements(self, element_array):
        for element_no in element_array:
            self.change_image(self.image_labels[element_no],
                              self.usable_images[element_no])

    def flash_single_element(self, element_no):
        self.change_image(self.image_labels[element_no], self.flash_image)
        self.master.after(self.flash_duration, self.unflash_single_element,
                          element_no)

    def unflash_single_element(self, element_no):
        self.change_image(self.image_labels[element_no],
                          self.usable_images[element_no])

    def marker_result(self):
        if not (TEST_UI):
            marker, timestamp = self.inlet_marker.pull_chunk()
            return marker
        else:
            return 0

    def read_lsl_marker(self):
        print("looking for a Markers stream...")
        marker_streams = resolve_byprop('name', 'ResultMarkerStream')
        if marker_streams:
            self.inlet_marker = StreamInlet(marker_streams[0])
            marker_time_correction = self.inlet_marker.time_correction()
            print("Found Markers stream")
Esempio n. 12
0
def record(duration, filename=None, dejitter=False, data_source="EEG"):
    chunk_length = LSL_EEG_CHUNK
    if data_source == "PPG":
        chunk_length = LSL_PPG_CHUNK
    if data_source == "ACC":
        chunk_length = LSL_ACC_CHUNK
    if data_source == "GYRO":
        chunk_length = LSL_GYRO_CHUNK

    if not filename:
        filename = os.path.join(
            os.getcwd(),
            "%s_recording_%s.csv" %
            (data_source, strftime("%Y-%m-%d-%H.%M.%S", gmtime())),
        )

    print("Looking for a %s stream..." % (data_source))
    streams = resolve_byprop("type", data_source, timeout=LSL_SCAN_TIMEOUT)

    if len(streams) == 0:
        print("Can't find %s stream." % (data_source))
        return

    print("Started acquiring data.")
    inlet = StreamInlet(streams[0], max_chunklen=chunk_length)
    # eeg_time_correction = inlet.time_correction()

    print("Looking for a Markers stream...")
    marker_streams = resolve_byprop("name",
                                    "Markers",
                                    timeout=LSL_SCAN_TIMEOUT)

    if marker_streams:
        inlet_marker = StreamInlet(marker_streams[0])
    else:
        inlet_marker = False
        print("Can't find Markers stream.")

    info = inlet.info()
    description = info.desc()

    Nchan = info.channel_count()

    ch = description.child("channels").first_child()
    ch_names = [ch.child_value("label")]
    for i in range(1, Nchan):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value("label"))

    res = []
    timestamps = []
    markers = []
    t_init = time()
    time_correction = inlet.time_correction()
    print("Start recording at time t=%.3f" % t_init)
    print("Time correction: ", time_correction)
    while (time() - t_init) < duration:
        try:
            data, timestamp = inlet.pull_chunk(timeout=1.0,
                                               max_samples=chunk_length)

            if timestamp:
                res.append(data)
                timestamps.extend(timestamp)
            if inlet_marker:
                marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
                if timestamp:
                    markers.append([marker, timestamp])
        except KeyboardInterrupt:
            break

    time_correction = inlet.time_correction()
    print("Time correction: ", time_correction)

    res = np.concatenate(res, axis=0)
    timestamps = np.array(timestamps) + time_correction

    if dejitter:
        y = timestamps
        X = np.atleast_2d(np.arange(0, len(y))).T
        lr = LinearRegression()
        lr.fit(X, y)
        timestamps = lr.predict(X)

    res = np.c_[timestamps, res]
    data = pd.DataFrame(data=res, columns=["timestamps"] + ch_names)

    if inlet_marker and markers:
        n_markers = len(markers[0][0])
        for ii in range(n_markers):
            data["Marker%d" % ii] = 0
        # process markers:
        for marker in markers:
            # find index of markers
            ix = np.argmin(np.abs(marker[1] - timestamps))
            for ii in range(n_markers):
                data.loc[ix, "Marker%d" % ii] = marker[0][ii]

    directory = os.path.dirname(filename)
    if not os.path.exists(directory):
        os.makedirs(directory)

    data.to_csv(filename, float_format="%.3f", index=False)

    print("Done - wrote file: " + filename + ".")
Esempio n. 13
0
class Graph(object):
  def __init__(self, size=(600,350)):
    streams = resolve_byprop('name', 'bci', timeout=2.5)
    try:
      self.inlet = StreamInlet(streams[0])
    except IndexError:
      raise ValueError('Make sure stream name=bci is opened first.')
    
    self.running = True
    
    self.frequency = 250.0
    self.sampleinterval = (1/self.frequency)
    self.timewindow = 10
    self._bufsize = int(self.timewindow/self.sampleinterval)
    self.dataBuffer = collections.deque([0.0] * self._bufsize, self._bufsize)
    self.timeBuffer = collections.deque([0.0] * self._bufsize, self._bufsize)
    self.x = np.empty(self._bufsize,dtype='float64')
    self.y = np.empty(self._bufsize,dtype='float64')
    self.app = QtGui.QApplication([])
    self.plt = pg.plot(title='EEG data from OpenBCI')
    self.plt.resize(*size)
    self.plt.showGrid(x=True,y=True)
    self.plt.setLabel('left','Amplitude','V')
    self.plt.setLabel('bottom','Time','s')
    self.curve = self.plt.plot(self.x,self.y,pen=(255,0,0))
    self.sample = np.zeros(8)
    self.timestamp = 0.0

    #QTimer
    self.timer = QtCore.QTimer()
    self.timer.timeout.connect(self.update)
    self.timer.start(self.sampleinterval)

  def _graph_lsl(self):
    while self.running:
      # initial run
      self.sample, self.timestamp = self.inlet.pull_sample(timeout=5)
      if self.timeBuffer[0] == 0.0:
        self.timeBuffer = collections.deque([self.timestamp] * self._bufsize, self._bufsize)

      # time correction to sync to local_clock()
      try:
        if self.timestamp is not None and self.sample is not None:
          self.timestamp = self.timestamp + self.inlet.time_correction(timeout=5) 

      except TimeoutError:
        pass

    print('closing graphing utility')
    self.inlet.close_stream()

  def update(self):
    self.dataBuffer.append(self.sample[3])
    self.y[:] = self.dataBuffer
    self.timeBuffer.append(self.timestamp)
    self.x[:] = self.timeBuffer

    if len(self.x):
      print(self.x[0])
    else:
      print('no data yet')

    self.curve.setData(self.x,self.y)
    self.app.processEvents()

  def start(self):
    self.lsl_thread = threading.Thread(target=self._graph_lsl)
    self.lsl_thread.start()
  
  def stop(self):
    self.running = False
    self.lsl_thread.join(5)
# 0 = left ear, 1 = left forehead, 2 = right forehead, 3 = right ear
INDEX_CHANNEL = [0]

if __name__ == "__main__":
    """ 1. CONNECT TO EEG STREAM """

    # Search for active LSL streams
    print('Looking for an EEG stream...')
    streams = resolve_byprop('type', 'EEG', timeout=2)
    if len(streams) == 0:
        raise RuntimeError('Can\'t find EEG stream.')

    # Set active EEG stream to inlet and apply time correction
    print("Start acquiring data")
    inlet = StreamInlet(streams[0], max_chunklen=12)
    eeg_time_correction = inlet.time_correction()

    # Get the stream info and description
    info = inlet.info()
    description = info.desc()

    # Get the sampling frequency
    # This is an important value that represents how many EEG data points are
    # collected in a second. This influences our frequency band calculation.
    # for the Muse 2016, this should always be 256
    fs = int(info.nominal_srate())
    """ 2. INITIALIZE BUFFERS """

    # Initialize raw EEG data buffer
    eeg_buffer = np.zeros((int(fs * BUFFER_LENGTH), 1))
    filter_state = None  # for use with the notch filter
Esempio n. 15
0
import numpy as np
import pandas as pd
import time
from psychopy import visual, core, event
from glob import glob
from random import choice, random
from psychopy.visual import ShapeStim
from pylsl import StreamInlet, resolve_byprop
import threading

#%%
print("looking for a Markers stream...")
marker_streams = resolve_byprop('type', 'Markers')
if marker_streams:
    inlet_marker = StreamInlet(marker_streams[0])
    marker_time_correction = inlet_marker.time_correction()
    print("Found Markers stream")

# %%
import math


def getFrames(freq):
    framerate = 60  # mywin.getActualFrameRate()
    frame = int(round(framerate / freq))
    frame_on = math.ceil(frame / 2)
    frame_off = math.floor(frame / 2)
    return frame_on, frame_off


# %%
Esempio n. 16
0
def record(duration, filename=None, dejitter=False, data_source="EEG", exp=None):
    chunk_length = LSL_EEG_CHUNK
    if data_source == "PPG":
        chunk_length = LSL_PPG_CHUNK
    if data_source == "ACC":
        chunk_length = LSL_ACC_CHUNK
    if data_source == "GYRO":
        chunk_length = LSL_GYRO_CHUNK

    experiments = ["baseline","auditory_oddball","visual_oddball","posture"]

    if exp not in experiments:
        print("Please enter either baseline or oddball for the experiment.")
        return

#    if exp != "baseline":
#        if exp != "oddball":
#            if exp != "posture":
#                print("Please enter either baseline or oddball for the experiment.")
#                return
# line 44 creates another file ("Stop_EEG2.csv") so muselsl knows when to start/stop recording.
    if not filename:
        filename = os.path.join(os.getcwd(
        ), "%s_recording_%s.csv" % (data_source, strftime('%Y-%m-%d-%H.%M.%S', gmtime())))

    print("Looking for a %s stream..." % (data_source))
    streams = resolve_byprop('type', data_source, timeout=LSL_SCAN_TIMEOUT)

    if len(streams) == 0:
        print("Can't find %s stream." % (data_source))
        return

    print("Started acquiring data.")
    inlet = StreamInlet(streams[0], max_chunklen=chunk_length)

    stop_eeg = [0]
    np.savetxt("/Users/mathlab/muse_exp/Experiments/Stop_EEG2.csv", (stop_eeg), delimiter=',',fmt="%s")
    # eeg_time_correction = inlet.time_correction()

    print("Looking for a Markers stream...")
    marker_streams = resolve_byprop(
        'name', 'Markers', timeout=LSL_SCAN_TIMEOUT)

    if marker_streams:
        inlet_marker = StreamInlet(marker_streams[0])
    else:
        inlet_marker = False
        print("Can't find Markers stream.")

    info = inlet.info()
    description = info.desc()

    Nchan = info.channel_count()

    ch = description.child('channels').first_child()
    ch_names = [ch.child_value('label')]
    for i in range(1, Nchan):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value('label'))

# lines 77-90 will constantly check to see if the above file exists
    res = []
    timestamps = []
    markers = []
    t_init = time()
    time_correction = inlet.time_correction()
    print('Start recording at time t=%.3f' % t_init)
    print('Time correction: ', time_correction)
    while os.path.isfile("/Users/mathlab/muse_exp/Experiments/Stop_EEG2.csv") == True:
        try:
            data, timestamp = inlet.pull_chunk(timeout=1.0,
                                               max_samples=chunk_length)

            if timestamp:
                res.append(data)
                timestamps.extend(timestamp)
            if inlet_marker:
                marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
                if timestamp:
                    markers.append([marker, timestamp])
        except KeyboardInterrupt:
            break

    time_correction = inlet.time_correction()
    print('Time correction: ', time_correction)

    res = np.concatenate(res, axis=0)
    timestamps = np.array(timestamps) + time_correction

    if dejitter:
        y = timestamps
        X = np.atleast_2d(np.arange(0, len(y))).T
        lr = LinearRegression()
        lr.fit(X, y)
        timestamps = lr.predict(X)

    res = np.c_[timestamps, res]
    data = pd.DataFrame(data=res, columns=['timestamps'] + ch_names)

# lines 120-143 will create the file name for our recorded data based on our experiment, and it will also check to see what participant number (ie. 001, 002, etc) should be used. It starts at "001" and if a file already exists with that number, increases it by 1 and checks again
    if inlet_marker:
        n_markers = len(markers[0][0])
        for ii in range(n_markers):
            data['Marker%d' % ii] = 0
        # process markers:
        for marker in markers:
            # find index of markers
            ix = np.argmin(np.abs(marker[1] - timestamps))
            for ii in range(n_markers):
                data.loc[ix, 'Marker%d' % ii] = marker[0][ii]

    directory = os.path.dirname(filename)
    if not os.path.exists(directory):
        os.makedirs(directory)

    if exp == 'baseline':
        exp_loc = 'Baseline'
    elif exp == 'auditory_oddball':
        exp_loc = 'Auditory_P3'
    elif exp == 'visual_oddball':
        exp_loc = 'Visual_P3'
    elif exp == 'posture':
        exp_loc = 'Posture_EKG'

    partnum = '001'
    while os.path.isfile('/Users/mathlab/Desktop/MUSE_STROKE_ STUDY/Muse_Baseline_Data/EEG_data/' + partnum + '_' + data_source + '_' + exp + '_stroke_study_updated.csv') == True:
        if int(partnum) >= 9:
            partnum = '0' + str(int(partnum) + 1)
        else:
            partnum = '00' + str(int(partnum) + 1)

    filename = '/Users/mathlab/Desktop/MUSE_STROKE_ STUDY/Muse_Baseline_Data/EEG_data/' + partnum + '_' + data_source + '_' + exp + '_stroke_study_updated.csv'

    data.to_csv(filename, float_format='%.3f', index=False)

    print('Done - wrote file: ' + filename + '.')
Esempio n. 17
0
def recordData():
    global timeReadings
    """ 1. CONNECT TO EEG STREAM """

    # Search for active LSL streams
    print('Looking for an EEG stream...')
    streams = resolve_byprop('type', 'EEG', timeout=2)
    if len(streams) == 0:
        raise RuntimeError('Can\'t find EEG stream.')

    # Set active EEG stream to inlet and apply time correction
    print("Start acquiring data")
    inlet = StreamInlet(streams[0], max_chunklen=12)
    eeg_time_correction = inlet.time_correction()

    # Get the stream info and description
    info = inlet.info()
    description = info.desc()

    # Get the sampling frequency
    # This is an important value that represents how many EEG data points are
    # collected in a second. This influences our frequency band calculation.
    # for the Muse 2016, this should always be 256
    fs = int(info.nominal_srate())
    """ 2. INITIALIZE BUFFERS """

    # Initialize raw EEG data buffer
    eeg_buffer = np.zeros((int(fs * BUFFER_LENGTH), 1))
    filter_state = None  # for use with the notch filter

    # Compute the number of epochs in "buffer_length"
    n_win_test = int(
        np.floor((BUFFER_LENGTH - EPOCH_LENGTH) / SHIFT_LENGTH + 1))

    # Initialize the band power buffer (for plotting)
    # bands will be ordered: [delta, theta, alpha, beta]
    band_buffer = np.zeros((n_win_test, 4))
    """ 3. GET DATA """

    # The try/except structure allows to quit the while loop by aborting the
    # script with <Ctrl-C>
    # print('Press Ctrl-C in the console to break the while loop.')
    i = 0

    try:
        #     # The following loop acquires data, computes band powers, and calculates neurofeedback metrics based on those band powers
        while temp:
            """ 3.1 ACQUIRE DATA """
            # Obtain EEG data from the LSL stream
            eeg_data, timestamp = inlet.pull_chunk(timeout=1,
                                                   max_samples=int(
                                                       SHIFT_LENGTH * fs))

            # Only keep the channel we're interested in
            ch_data = np.array(eeg_data)[:, INDEX_CHANNEL]

            # Update EEG buffer with the new data
            eeg_buffer, filter_state = utils.update_buffer(
                eeg_buffer, ch_data, notch=True, filter_state=filter_state)
            """ 3.2 COMPUTE BAND POWERS """
            # Get newest samples from the buffer
            data_epoch = utils.get_last_data(eeg_buffer, EPOCH_LENGTH * fs)

            # Compute band powers
            band_powers = utils.compute_band_powers(data_epoch, fs)
            band_buffer, _ = utils.update_buffer(band_buffer,
                                                 np.asarray([band_powers]))
            # Compute the average band powers for all epochs in buffer
            # This helps to smooth out noise
            smooth_band_powers = np.mean(band_buffer, axis=0)

            # print('Delta: ', band_powers[Band.Delta], ' Theta: ', band_powers[Band.Theta],
            #       ' Alpha: ', band_powers[Band.Alpha], ' Beta: ', band_powers[Band.Beta])
            """ 3.3 COMPUTE NEUROFEEDBACK METRICS """
            # These metrics could also be used to drive brain-computer interfaces

            # Alpha Protocol:
            # Simple redout of alpha power, divided by delta waves in order to rule out noise
            alpha_metric = smooth_band_powers[Band.Alpha] / \
                smooth_band_powers[Band.Delta]
            alphaReadings.append(alpha_metric)
            # print('Alpha Relaxation: ', alpha_metric)

            # Beta Protocol:
            # Beta waves have been used as a measure of mental activity and concentration
            # This beta over theta ratio is commonly used as neurofeedback for ADHD
            beta_metric = smooth_band_powers[Band.Beta] / \
                smooth_band_powers[Band.Theta]
            betaReadings.append(beta_metric)
            # print('Beta Concentration: ', beta_metric)

            # Alpha/Theta Protocol:
            # This is another popular neurofeedback metric for stress reduction
            # Higher theta over alpha is supposedly associated with reduced anxiety
            theta_metric = smooth_band_powers[Band.Theta] / \
                smooth_band_powers[Band.Alpha]
            thetaReadings.append(theta_metric)

            dt = datetime.datetime.now().strftime("%x %X")
            if dt in timeReadings.keys():
                tempDt = timeReadings[dt]
                aph = tempDt['alpha']
                bth = tempDt['beta']
                tth = tempDt['theta']
                aph.append(alpha_metric)
                bth.append(beta_metric)
                tth.append(theta_metric)
            else:
                timeReadings = {
                    dt: {
                        "alpha": [alpha_metric],
                        "beta": [beta_metric],
                        "theta": [theta_metric]
                    }
                }
                result.append(timeReadings)
            i = i + 1

    except KeyboardInterrupt:
        print('Closing!')
Esempio n. 18
0
class Lsl_receiver:

    def __init__(self):
        self.inlet = None
        self.stream = None
        self.data_buffer = deque(maxlen=125*5) #efficient implemented list object, thread safe, 256*5 == 5 seconds (with sampling rate 256)
        self.time_buffer = deque(maxlen=125*5) #buffer for timestemps
        self.offset = 0.0
        self.recording = False
        self.collection_thread = Thread(target=self.grab_data)
        self.lock = Lock() # lock buffer so that only one process is working on it

        self.cl_min = 0
        self.cl_max = 0
        self.threshold = 0
        self.threshold_calculated = False


    def auto_resolve(self):
        streams = resolve_stream() # get all current streams
        for stream in streams:
            print(stream)
            if stream.name() == "obci_eeg1": #looking for stream with name ...
                self.stream = stream
                print("Found stream!!!!!!!")

        if self.stream is None:
            print("No stream found")
            return False

        self.inlet = StreamInlet(self.stream) #inlet for handeling data from stream
        print("Sampling rate:", self.inlet.info().nominal_srate())

        self.offset = self.inlet.time_correction(timeout=3)

    def grab_data(self):
        while self.recording:
            data, times = self.inlet.pull_chunk(timeout=0.0) # data contains sample (e.g. 16 values for 16 channels), if CPU to slow, data can contain more than one sample (list of lists == chunks)

            if data: #if more than 0 elements is in data
                #print(data)
                # f = open("eeg_eyes_closed.txt", "a")
                # np.savetxt(f, data)
                # f.write("\n")
                # f.close()

                with self.lock: # use lock to protect buffer from other processes
                    self.data_buffer.extend(data) # extend: puts data into data_buffer
                    self.time_buffer.extend(np.array(times) + self.offset)

            time.sleep(0.0001) #wait so CPU is not crashing

    def cut_segment(self, timestamp, segment_size_in_sec): #timestamp we are interested in, second argument: segment_size
        data_segment = None
        time_segment = None
        with self.lock:
            diffs = np.abs([timestamp-ts for ts in self.time_buffer])  # create list of differences from timestamp
            index = int(diffs.argmin()) #index of timestamp with smallest difference
            segment_size = int(self.inlet.info().nominal_srate() * segment_size_in_sec)


            #print("index of timestamp with smallest difference: ", index)
            #print("current segment_size: ", segment_size)
            #print("length of data_buffer: ", len(self.data_buffer))
            #sanity checks: segment_size not bigger than buffer_size, index+segment_length not bigger than buffer

            data_segment = list(islice(self.data_buffer, index, index + segment_size))
            time_segment = list(islice(self.time_buffer, index, index + segment_size))

        return data_segment, time_segment


    def start_recording(self):
        self.recording = True
        self.collection_thread.daemon = True #automatic ending if main is finished
        self.collection_thread.start()

    def stop_recording(self):
        self.recording = False
        self.collection_thread.join() #clean up threads


    def cl_index(self, segment_powers, alpha_channel, theta_channel):
        # alpha_channel, theta_channel -> choosing channel of interest for each band

        segment_powers = np.array(segment_powers)
        theta = segment_powers[theta_channel, 0]  # mat -> second values rows, third values columns
        alpha = segment_powers[alpha_channel, 1]  # currently over all channels

        cli_all_channel = theta / alpha # calculate cognitive load

        if len(cli_all_channel) > 1:
            cli_mean_of_channel = np.mean(cli_all_channel)
        else:
            cli_mean_of_channel = cli_all_channel
        return cli_mean_of_channel, cli_all_channel

    ####################################################################################################################
    #  Calculates Cognitive Load adapted from Pfurtscheller, 1999
    def erd_ers(self, absolute_power, alpha_channel, theta_channel, baseline):
        baseline = np.array(baseline)
        absolute_power = np.array(absolute_power)
        theta = absolute_power[theta_channel, 0]
        alpha = absolute_power[alpha_channel, 1]

        abs_power = np.zeros(2)  # erstellt array für [theta alspha]
        # abs_power_ = [theta, alpha]
        abs_power[0] = theta
        abs_power[1] = alpha

        cog_load = np.zeros(len(abs_power))

        for i in range(2):

            # ERD/ERS% percentage of power decrease or increase
            cog_load[i] = ((baseline[i]- abs_power[i])/ baseline[i])*100

        #print("Cog Load", cog_load)

        return cog_load
    ####################################################################################################################


    # plot of Cog Load CLi
    def plot_cli(self, cli_list):
        plt.figure(1)
        plt.plot(cli_list)
        plt.show()



    def send_hint(self, cli_list):
        # send hint, if mean of last three cli values is under threshold

        threshold = 4
        num_of_last_values = 10
        if (do_pretest):
            threshold = lsl.threshold

        cli_last_values = cli_list[-num_of_last_values:-1] # == cli_list[len(cli_list)-4 : len(cli_list)-1]
        if (len(cli_last_values) >= 2):
            cli_mean = np.mean(cli_last_values)
            print("mean of last cli values: ", cli_mean)

            if (cli_mean > threshold): # choose threshold as hard coded, lsl.threshold as calculated from pretest
                print("Show hint!!!")

                requests.post("http://localhost:25080", json.dumps({'hint': 1}))

    ####################################################################################################################
    def plot_cogload(self, cog_load_theta):

        plt.figure(2)
        plt.plot(cog_load_theta)
        plt.xlabel('time in sec')
        plt.ylabel('ERD/ERS %')
        plt.show()
    
    ####################################################################################################################

    # calculates mean of first test phase
    def calculate_cl_min(self, cli_list):
        num_last_values = 30
        cli_list = cli_list[-num_last_values:-1]
        cli_list = np.asarray(cli_list)
        min_value = np.mean(cli_list)
        self.cl_min = min_value

    ####################################################################################################################
    # Calculates mean of theta and alpha of 29 last values in selected channel for first test phase
    def calculate_baseline(self, baseline_list):
        baseline_list = np.array(baseline_list)
        # 34 werte
        num_last_values = 30
        # theta_baseline = baseline_list
        baseline_list = baseline_list[-num_last_values:-1]  # --> 2x29 values
        baseline_list = np.asanyarray(baseline_list)
        baseline = np.mean(baseline_list, axis=0)

        self.cl_baseline = baseline

        return baseline
    ####################################################################################################################

    def calculate_cl_max(self, cli_list):
        num_last_values = 40
        cli_list = cli_list[-num_last_values:-1]
        cli_list = np.asarray(cli_list)
        max_value = max(cli_list)
        self.cl_max = max_value

    def calculate_threshold(self):
        self.threshold = 0.9 * self.cl_max

    def start_task2(self):
        requests.post("http://localhost:25080",
                      json.dumps({'brainTeaser': "Please remember the sequence of numbers and speak it out loud, when the numbers disappear"}))
        time.sleep(5)

        requests.post("http://localhost:25080",
                      json.dumps({'numbers': [["2","392"], ["2", "5346"],["2,5", "28975"], ["3", "640901"], ["3,5", "8475132"], ["4","10738295"], ["4","923717562"], ["5", "28461053042"]]}))


    def end_task2(self):

        requests.post("http://localhost:25080",
                      json.dumps({'brainTeaser': "Thank you"}))
Esempio n. 19
0
        help='channel number to use. If not specified, all the channels are used')

    args = parser.parse_args()

    """ 1. CONNECT TO EEG STREAM """

    # Search for active LSL stream
    print('Looking for an EEG stream...')
    streams = resolve_byprop('type', 'EEG', timeout=2)
    if len(streams) == 0:
        raise RuntimeError('Can\'t find EEG stream.')

    # Set active EEG stream to inlet and apply time correction
    print("Start acquiring data")
    inlet = StreamInlet(streams[0], max_chunklen=12)
    eeg_time_correction = inlet.time_correction()

    # Get the stream info, description, sampling frequency, number of channels
    info = inlet.info()
    description = info.desc()
    fs = int(info.nominal_srate())
    n_channels = info.channel_count()

    # Get names of all channels
    ch = description.child('channels').first_child()
    ch_names = [ch.child_value('label')]
    for i in range(1, n_channels):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value('label'))

    """ 2. SET EXPERIMENTAL PARAMETERS """
Esempio n. 20
0
                  help="Name of the recording file.")

# dejitter timestamps
dejitter = False

(options, args) = parser.parse_args()

print("looking for an EEG stream...")
streams = resolve_byprop('type', 'EEG', timeout=10)

if len(streams) == 0:
    raise (RuntimeError, "Cant find EEG stream")

print("Start aquiring data")
inlet = StreamInlet(streams[0], max_chunklen=12)
eeg_time_correction = inlet.time_correction()

print("looking for a Markers stream...")
marker_streams = resolve_byprop('name', 'Markers', timeout=20)

if marker_streams:
    inlet_marker = StreamInlet(marker_streams[0])
    marker_time_correction = inlet_marker.time_correction()
else:
    inlet_marker = False
    print("Cant find Markers stream")

info = inlet.info()
description = info.desc()

freq = info.nominal_srate()
Esempio n. 21
0
def record(
    duration: int,
    filename=None,
    dejitter=False,
    data_source="EEG",
    continuous: bool = True,
) -> None:
    chunk_length = LSL_EEG_CHUNK
    if data_source == "PPG":
        chunk_length = LSL_PPG_CHUNK
    if data_source == "ACC":
        chunk_length = LSL_ACC_CHUNK
    if data_source == "GYRO":
        chunk_length = LSL_GYRO_CHUNK

    if not filename:
        filename = os.path.join(
            os.getcwd(), "%s_recording_%s.csv" %
            (data_source, strftime('%Y-%m-%d-%H.%M.%S', gmtime())))

    channel_idx = 0
    num_of_data = 750
    existing = pd.DataFrame()
    index = index_list
    model = torch.load("Emotion_Detector.pt")

    print("Looking for a %s stream..." % (data_source))
    streams = resolve_byprop('type', data_source, timeout=LSL_SCAN_TIMEOUT)

    if len(streams) == 0:
        print("Can't find %s stream." % (data_source))
        return

    print("Started acquiring data.")
    inlet = StreamInlet(streams[0], max_chunklen=chunk_length)
    # eeg_time_correction = inlet.time_correction()

    print("Looking for a Markers stream...")
    marker_streams = resolve_byprop('name',
                                    'Markers',
                                    timeout=LSL_SCAN_TIMEOUT)

    if marker_streams:
        inlet_marker = StreamInlet(marker_streams[0])
    else:
        inlet_marker = False
        print("Can't find Markers stream.")

    info = inlet.info()
    description = info.desc()

    Nchan = info.channel_count()

    ch = description.child('channels').first_child()
    ch_names = [ch.child_value('label')]
    for i in range(1, Nchan):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value('label'))

    res = []
    timestamps = []
    markers = []
    t_init = time()
    time_correction = inlet.time_correction()
    last_written_timestamp = None
    print('Start recording at time t=%.3f' % t_init)
    print('Time correction: ', time_correction)
    while (time() - t_init) < duration:
        try:
            data, timestamp = inlet.pull_chunk(timeout=1.0,
                                               max_samples=chunk_length)

            if timestamp:
                # print("Data: " + str(data))
                new_arr = pd.DataFrame(arr)
                combine = [existing, new_arr]
                existing = pd.concat(combine).reset_index(drop=True)
                print(len(existing))
                if len(existing) >= num_of_data:
                    row = existing[0:num_of_data]
                    row = row[channel_idx]
                    row = pd.DataFrame(row)
                    row = row.T
                    row.columns = index
                    existing.drop(existing.index[0:num_of_data], inplace=True)
                    existing = existing.reset_index(drop=True)
                    row = row.iloc[0, :]
                    row, clas, probs = model.predict(row)
                    if clas.int() == 2:
                        print("Negative emotion predicted!")
                    elif clas.int() == 1:
                        print("Positive emotion predicted!")
                    elif clas.int() == 0:
                        print("Neutral emotion")
                    else:
                        pass

                res.append(data)
                timestamps.extend(timestamp)
                tr = time()
            if inlet_marker:
                marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
                if timestamp:
                    markers.append([marker, timestamp])

            # Save every 5s
            if continuous and (last_written_timestamp is None
                               or last_written_timestamp + 5 < timestamps[-1]):
                _save(
                    filename,
                    res,
                    timestamps,
                    time_correction,
                    dejitter,
                    inlet_marker,
                    markers,
                    ch_names,
                    last_written_timestamp=last_written_timestamp,
                )
                last_written_timestamp = timestamps[-1]

        except KeyboardInterrupt:
            break

    time_correction = inlet.time_correction()
    print("Time correction: ", time_correction)

    _save(
        filename,
        res,
        timestamps,
        time_correction,
        dejitter,
        inlet_marker,
        markers,
        ch_names,
    )

    print("Done - wrote file: {}".format(filename))
Esempio n. 22
0
class Lslrecorder:
    def __init__(self):
        self.mutex = QMutex()
        self.timeStamps = None
        self.stream = None
        self.inlet = None
        self.info = None
        self.channelCount = None
        self.doRec = False
        self.srate = None
        self.data = None
        self.bufferUpdateThread = None

    def findStream(self, hostname=None, timeout=1):
        # Gather lsl stream and create respective inlet and buffer, returns channelcount of that stream
        print("Searching for streams with a timeout of " + str(timeout) +
              " seconds")
        streams = resolve_stream(timeout)
        if len(streams) < 1:
            print("No stream found - exiting")
            return -1
        else:
            print("Found " + str(len(streams)) + " streams")
        if hostname is None:
            print(
                "No stream hostname has been specified - selecting first stream"
            )
            self.stream = streams[0]
        else:
            for stream in streams:
                if stream.hostname() == hostname:
                    self.stream = stream
                    print("Selected stream with hostname " + str(hostname))
                    break
            if self.stream is None:
                print("No stream with hostname " + str(hostname) +
                      " has been found - exiting")

        self.inlet = StreamInlet(self.stream)
        self.info = self.inlet.info()
        self.channelCount = self.info.channel_count()
        self.srate = self.info.nominal_srate()
        self.data = np.empty((0, self.channelCount))
        try:
            self.offset = self.inlet.time_correction(timeout=3)
            print("Offset: " + str(self.offset))
        except TimeoutError:
            self.offset = 0
            print("Offset Retrieval Timed Out")

        #print("Stream Meta Info:")
        #print(self.info.as_xml())
        return self.channelCount

    def startRec(self):
        # Create and Start buffer update thread as daemon so that it gets terminated automatically when program exits
        self.doRec = True
        self.bufferUpdateThread = Thread(target=self.grabData, args=())
        self.bufferUpdateThread.daemon = True
        self.bufferUpdateThread.start()

    def stopRec(self):
        self.doRec = False
        self.bufferUpdateThread.join()
        print("Stopped recording")

    def grabData(self):
        print("Starting recording")
        while self.doRec:
            c, t = self.inlet.pull_chunk(timeout=0.0)
            if c:
                # add offset to timestamps and transform timestamps to ms and round to int
                tmp_t = np.array([int((ts + self.offset) * 1000) for ts in t])

                self.mutex.lock()
                self.data = np.concatenate((self.data, c), axis=0)
                if self.timeStamps is None:
                    self.timeStamps = np.array(tmp_t)
                else:
                    self.timeStamps = np.concatenate((self.timeStamps, tmp_t))
                self.mutex.unlock()
Esempio n. 23
0
class PupilTracker(object):
  def __init__(self):
    pupil_queue = Queue()
    self.pupil_proc = Process(target=pupil_capture.alternate_launch,
                              args=((pupil_queue), ))
    self.pupil_proc.start()

    while True:
      pupil_msg = pupil_queue.get()
      print(pupil_msg)
      if 'tcp' in pupil_msg:
        self.ipc_sub_url = pupil_msg
      if 'EYE_READY' in pupil_msg:
        break

    context = zmq.Context()
    self.socket = zmq.Socket(context, zmq.SUB)
    monitor = self.socket.get_monitor_socket()

    self.socket.connect(self.ipc_sub_url)
    while True:
      status = recv_monitor_message(monitor)
      if status['event'] == zmq.EVENT_CONNECTED:
        break
      elif status['event'] == zmq.EVENT_CONNECT_DELAYED:
        pass
    print('Capturing from pupil on url %s.' % self.ipc_sub_url)
    self.socket.subscribe('pupil')

    # setup LSL
    streams = resolve_byprop('name', LSL_STREAM_NAME, timeout=2.5)
    try:
      self.inlet = StreamInlet(streams[0])
    except IndexError:
      raise ValueError('Make sure stream name="%s", is opened first.'
          % LSL_STREAM_NAME)

    self.running = True
    self.samples = []

  # LSL and pupil samples are synchronized to local_clock(), which is the
  # runtime on this slave, not the host
  def _record_lsl(self):
    while self.running:
      sample, timestamp = self.inlet.pull_sample(timeout=5)

      # time correction to sync to local_clock()
      try:
        if timestamp is not None and sample is not None:
          timestamp = timestamp + self.inlet.time_correction(timeout=5) 

          samples_lock.acquire()
          self.samples.append(('STIM', timestamp, sample))
          samples_lock.release()

      except TimeoutError:
        pass

    print('closing lsl on the pupil side')
    self.inlet.close_stream()

  def _record_pupil(self):
    while self.running:
      topic = self.socket.recv_string()
      payload = serializer.loads(self.socket.recv(), encoding='utf-8')

      samples_lock.acquire()
      self.samples.append(('pupil', local_clock(), payload['diameter']))
      samples_lock.release()

    print('Terminating pupil tracker recording.')
      
  def capture(self):
    self.pupil_thread = threading.Thread(target=self._record_pupil)
    self.lsl_thread = threading.Thread(target=self._record_lsl)
    self.pupil_thread.start()
    self.lsl_thread.start()

  def export_data(self):
    self.running = False

    self.pupil_thread.join(5)
    self.lsl_thread.join(5)
    print('Joined threads, now outputting pupil data.')

    i = 0
    while os.path.exists("data/pupil/data-%s.csv" % i):
      i += 1

    # csv writer with stim_type, msg, and timestamp, then data
    with open('data/pupil/data-%s.csv' % i, 'w+') as f:
      writer = csv.writer(f)
      writer.writerow(('Signal Type', 'Msg', 'Time', 'Channel 1', 'Channel 2', 'Channel 3', 'Channel 4', 'Channel 5', 'Channel 6', 'Channel 7', 'Channel 8' ))
      for sample in self.samples:
        signal_type, timestamp, datas = sample
        out = (signal_type, 'msg', timestamp)
        for data in datas:
          out = out + (data,)
        writer.writerow(out)

  def __str__(self):
    return 'Pupil tracker listening to %s' % self.ipc_sub_url

  def __del__(self):
    try:
      self.inlet.close_stream()
    except AttributeError:
      raise AttributeError('self.inlet does not exist. Most likely the LSL stimuli stream was not opened yet.')

    self.pupil_proc.terminate()
"""Example program to show how to read a multi-channel time series from LSL."""

from pylsl import StreamInlet, resolve_stream
import time

# first resolve an EEG stream on the lab network
print("looking for an EEG stream...")
streams = resolve_stream('type', 'EEG')
info = streams[0]

# create a new inlet to read from the stream
inlet = StreamInlet(info)

print('Connected to outlet ' + info.name() + '@' + info.hostname())
while True:
    offset = inlet.time_correction()
    print('Offset: ' + str(offset))
    time.sleep(1)
Esempio n. 25
0
"""Example program to show how to read a multi-channel time series from LSL."""

from pylsl import StreamInlet, resolve_stream
import time

# first resolve an EEG stream on the lab network
print("looking for an EEG stream...")
# streams = resolve_stream('type', 'EEG')
streams = resolve_stream()

print(streams[0].source_id())
# print(streams[1].source_id())

# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])

i=0
while True:
    # get a new sample (you can also omit the timestamp part if you're not
    # interested in it)
    sample, timestamp = inlet.pull_sample()
    timeCorr = inlet.time_correction()
    print(timestamp, '\n',sample, '\n', timeCorr, '\n')
    # if i==10:
    #     break
    # break
    i = i + 1
    time.sleep(0.01)
Esempio n. 26
0
def record(duration, recording_path):
    data_source="EEG"
    dejitter=False
    filename = recording_path
    print("looking for an EEG stream...")
    streams = resolve_byprop('type', 'EEG', timeout=2)

    if len(streams) == 0:
        raise RuntimeError("Cant find EEG stream")

    print("Start aquiring data")
    inlet = StreamInlet(streams[0], max_chunklen=12)
    eeg_time_correction = inlet.time_correction()

    print("looking for a Markers stream...")
    marker_streams = resolve_byprop('type', 'Markers', timeout=2)

    if marker_streams:
        inlet_marker = StreamInlet(marker_streams[0])
        marker_time_correction = inlet_marker.time_correction()
    else:
        inlet_marker = False
        print("Cant find Markers stream")

    info = inlet.info()
    description = info.desc()

    freq = info.nominal_srate()
    Nchan = info.channel_count()

    ch = description.child('channels').first_child()
    ch_names = [ch.child_value('label')]
    for i in range(1, Nchan):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value('label'))

    res = []
    timestamps = []
    markers = []
    t_init = time()
    print('Start recording at time t=%.3f' % t_init)
    while (time() - t_init) < duration:
        try:
            data, timestamp = inlet.pull_chunk(timeout=1.0,
                                            max_samples=12)
            if timestamp:
                res.append(data)
                timestamps.extend(timestamp)
            if inlet_marker:
                marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
                if timestamp:
                    markers.append([marker, timestamp])
        except KeyboardInterrupt:
            break

    res = np.concatenate(res, axis=0)
    timestamps = np.array(timestamps)

    if dejitter:
        y = timestamps
        X = np.atleast_2d(np.arange(0, len(y))).T
        lr = LinearRegression()
        lr.fit(X, y)
        timestamps = lr.predict(X)

    res = np.c_[timestamps, res]
    data = pd.DataFrame(data=res, columns=['timestamps'] + ch_names)

    data['Marker'] = 0
    # process markers:
    for marker in markers:
        # find index of margers
        ix = np.argmin(np.abs(marker[1] - timestamps))
        val = timestamps[ix]
        data.loc[ix, 'Marker'] = marker[0][0]

    data.to_csv(filename, float_format='%.3f', index=False)
Esempio n. 27
0
    help="Name of the recording file.")

# dejitter timestamps
dejitter = False

(options, args) = parser.parse_args()

print("looking for an EEG stream...")
streams = resolve_byprop('type', 'EEG', timeout=2)

if len(streams) == 0:
    raise (RuntimeError, "Cant find EEG stream")

print("Start aquiring data")
inlet = StreamInlet(streams[0], max_chunklen=12)
eeg_time_correction = inlet.time_correction(
)  # Retrieve an estimated time correction offset for the given stream

print("looking for a Markers stream...")
marker_streams = resolve_byprop('type', 'Markers', timeout=2)

if marker_streams:
    inlet_marker = StreamInlet(marker_streams[0])
    marker_time_correction = inlet_marker.time_correction()
else:
    inlet_marker = False
    print("Cant find Markers stream")

info = inlet.info()
description = info.desc()

freq = info.nominal_srate()
def record(
    duration: int,
    filename=None,
    dejitter=False,
    data_source="EEG",
    continuous: bool = True,
) -> None:
    chunk_length = LSL_EEG_CHUNK
    if data_source == "PPG":
        chunk_length = LSL_PPG_CHUNK
    if data_source == "ACC":
        chunk_length = LSL_ACC_CHUNK
    if data_source == "GYRO":
        chunk_length = LSL_GYRO_CHUNK

    if not filename:
        filename = os.path.join(
            os.getcwd(), "%s_recording_%s.csv" %
            (data_source, strftime('%Y-%m-%d-%H.%M.%S', gmtime())))

    channel_idx = 1
    num_of_data = 178
    existing = pd.DataFrame()
    model = load_model(
        "/home/pi/.virtualenvs/muse_lsl_env/lib/python3.7/site-packages/muselsl/Epilepsy.h5"
    )
    print("Initialized Variables")

    print("Looking for a %s stream..." % (data_source))
    streams = resolve_byprop('type', data_source, timeout=LSL_SCAN_TIMEOUT)

    if len(streams) == 0:
        print("Can't find %s stream." % (data_source))
        return

    print("Started acquiring data.")
    inlet = StreamInlet(streams[0], max_chunklen=chunk_length)
    # eeg_time_correction = inlet.time_correction()

    print("Looking for a Markers stream...")
    marker_streams = resolve_byprop('name',
                                    'Markers',
                                    timeout=LSL_SCAN_TIMEOUT)

    if marker_streams:
        inlet_marker = StreamInlet(marker_streams[0])
    else:
        inlet_marker = False
        print("Can't find Markers stream.")

    info = inlet.info()
    description = info.desc()

    Nchan = info.channel_count()

    ch = description.child('channels').first_child()
    ch_names = [ch.child_value('label')]
    for i in range(1, Nchan):
        ch = ch.next_sibling()
        ch_names.append(ch.child_value('label'))

    res = []
    timestamps = []
    markers = []
    t_init = time()
    time_correction = inlet.time_correction()
    last_written_timestamp = None
    print('Start recording at time t=%.3f' % t_init)
    print('Time correction: ', time_correction)
    while (time() - t_init) < duration:
        try:
            data, timestamp = inlet.pull_chunk(timeout=1.0,
                                               max_samples=chunk_length)

            if timestamp:
                # print("Data: " + str(data))
                new_arr = pd.DataFrame(data)
                combine = [existing, new_arr]
                existing = pd.concat(combine).reset_index(drop=True)
                # print(len(existing))

                if len(existing) >= num_of_data:
                    row = existing[0:num_of_data]
                    row = row[channel_idx]
                    row = row.values.reshape(-1, 178, 1)
                    existing.drop(existing.index[0:178], inplace=True)
                    existing = existing.reset_index(drop=True)
                    predictions = model.predict(
                        (row[:, ::4] - row.mean()) / row.std())
                    result = np.argmax(predictions[0]) + 1
                    print("Result: " + str(result))
                res.append(data)
                timestamps.extend(timestamp)
                tr = time()
            if inlet_marker:
                marker, timestamp = inlet_marker.pull_sample(timeout=0.0)
                if timestamp:
                    markers.append([marker, timestamp])

            # Save every 5s
            if continuous and (last_written_timestamp is None
                               or last_written_timestamp + 5 < timestamps[-1]):
                _save(
                    filename,
                    res,
                    timestamps,
                    time_correction,
                    dejitter,
                    inlet_marker,
                    markers,
                    ch_names,
                    last_written_timestamp=last_written_timestamp,
                )
                last_written_timestamp = timestamps[-1]

        except KeyboardInterrupt:
            break

    time_correction = inlet.time_correction()
    print("Time correction: ", time_correction)

    _save(
        filename,
        res,
        timestamps,
        time_correction,
        dejitter,
        inlet_marker,
        markers,
        ch_names,
    )

    print("Done - wrote file: {}".format(filename))
Esempio n. 29
0
class P300Window(object):
    def __init__(self, master: Tk):
        self.master = master
        master.title('P300 speller')

        #Parameters
        self.imagesize = 125
        self.images_folder_path = '../utils/images/'  #use utils/char_generator to generate any image you want
        self.flash_image_path = '../utils/images/flash_images/einstein.jpg'
        self.number_of_rows = 6
        self.number_of_columns = 6  #make sure you have 6 x 6 amount of images in the images_folder_path
        self.flash_mode = 2  #single element  #1 for columns and rows; currently is NOT working yet; if I have time, will revisit
        self.flash_duration = 100  #soa
        self.break_duration = 125  #iti

        self.trials = 6  #number of letters
        self.delay = 2500  #interval between trial
        self.letter_idx = 0

        #did not include numbers yet!
        self.random_letter = random.choices(
            string.ascii_lowercase,
            k=self.trials)  #randomize [self.trials] number letters
        self.word = ''.join(self.random_letter)

        # Variables
        self.usable_images = []
        self.image_labels = []
        self.flash_sequence = []
        self.flash_image = None
        self.sequence_number = 0
        self.lsl_output = None

        self.running = 0  #for pause

        self.image_frame = Frame(self.master)
        self.image_frame.grid(row=0,
                              column=0,
                              rowspan=self.number_of_rows,
                              columnspan=self.number_of_columns)

        self.start_btn_text = StringVar()
        self.start_btn_text.set('Start')
        self.start_btn = Button(self.master,
                                textvariable=self.start_btn_text,
                                command=self.start)
        self.start_btn.grid(row=self.number_of_rows + 3,
                            column=self.number_of_columns - 1)

        self.pause_btn = Button(self.master, text='Pause', command=self.pause)
        self.pause_btn.grid(row=self.number_of_rows + 3,
                            column=self.number_of_columns - 4)  #-4 for center
        self.pause_btn.configure(state='disabled')

        self.close_btn = Button(self.master, text='Close', command=master.quit)
        self.close_btn.grid(row=self.number_of_rows + 3, column=0)

        fontStyle = tkFont.Font(family="Courier", size=40)

        self.output = Text(root, height=1, font=fontStyle)
        self.output.tag_configure("red", foreground="red")
        self.output.tag_configure("green", foreground="green")
        self.output.configure(width=10)
        self.output.insert("end", "  ")
        self.output.grid(row=self.number_of_rows + 2,
                         column=self.number_of_columns - 4)

        self.outputlabel = Label(root, text="Output: ", font=fontStyle)
        self.outputlabel.grid(row=self.number_of_rows + 2,
                              column=self.number_of_columns - 5)

        self.targetlabel = Label(root, text="Target: ", font=fontStyle)
        self.targetlabel.grid(row=self.number_of_rows + 1,
                              column=self.number_of_columns - 5)

        self.show_highlight_letter(0)

        # Initialization
        self.show_images()
        self.create_flash_sequence()
        self.lsl_output = self.create_lsl_output()

    def open_images(self):
        self.usable_images = []
        self.highlight_letter_images = []

        letter_images = sorted(
            glob.glob(
                os.path.join(self.images_folder_path, 'letter_images/*.png')))

        #currently, still did not flash number yet!
        number_images = sorted(
            glob.glob(
                os.path.join(self.images_folder_path, 'number_images/*.png')))
        letter_highlight_images = sorted(
            glob.glob(
                os.path.join(self.images_folder_path,
                             'letter_highlight_images/*.png')))
        number_highlight_images = sorted(
            glob.glob(
                os.path.join(self.images_folder_path,
                             'number_highlight_images/*.png')))

        for number_image in number_images:
            letter_images.append(number_image)
        #print("Paths: ", letter_images)
        min_number_of_images = self.number_of_columns * self.number_of_rows
        if len(letter_images) < min_number_of_images:
            print('To few images in folder: ' + self.images_folder_path)
            return

        # Convert and resize images
        for image_path in letter_images:
            image = Image.open(image_path)
            resized = image.resize((self.imagesize, self.imagesize),
                                   Image.BICUBIC)
            Tkimage = ImageTk.PhotoImage(resized)
            self.usable_images.append(Tkimage)

        # Convert and resize images
        for image_path in letter_highlight_images:
            image = Image.open(image_path)
            resized = image.resize((self.imagesize, self.imagesize),
                                   Image.BICUBIC)
            Tkimage = ImageTk.PhotoImage(resized)
            self.highlight_letter_images.append(Tkimage)

        flash_img = Image.open(self.flash_image_path)
        flash_img_res = flash_img.resize((self.imagesize, self.imagesize),
                                         Image.BICUBIC)
        self.flash_image = ImageTk.PhotoImage(flash_img_res)

    def show_images(self):
        self.open_images()

        if self.usable_images == []:
            print('No images opened')
            return

        num_rows = self.number_of_rows
        num_cols = self.number_of_columns

        # Arrange images
        for r in range(0, num_rows):
            for c in range(0, num_cols):
                current_image = self.usable_images[r * num_cols + c]
                label = Label(self.image_frame, image=current_image)
                label.image = current_image
                label.grid(row=r, column=c)
                self.image_labels.append(label)

    def create_lsl_output(self):
        """Creates an LSL Stream outlet"""
        info = StreamInfo(name='LetterMarkerStream',
                          type='LetterFlashMarkers',
                          channel_count=1,
                          channel_format='int8',
                          nominal_srate=IRREGULAR_RATE,
                          source_id='lettermarker_stream',
                          handle=None)

        return StreamOutlet(info)  #for sending the predicted classes

    def create_flash_sequence(self):
        self.flash_sequence = []
        num_rows = self.number_of_rows
        num_cols = self.number_of_columns
        maximum_number = num_rows * num_cols

        flash_sequence = []

        for i in range(10000):
            seq = list(range(maximum_number))  #generate 0 to maximum_number
            random.shuffle(seq)  #shuffle
            flash_sequence.extend(seq)

        self.flash_sequence = flash_sequence

    def start(self):
        self.read_lsl_marker()
        self.running = 1
        letter = self.word[0]
        image_index = string.ascii_lowercase.index(letter)
        self.highlight_image(image_index)
        self.start_btn.configure(state='disabled')
        self.pause_btn.configure(state='normal')

    def pause(self):
        self.running = 0
        self.start_btn_text.set('Resume')
        self.start_btn.configure(state='normal')
        self.pause_btn.configure(state='disabled')

    def start_flashing(self):
        if self.sequence_number == len(
                self.flash_sequence
        ):  #stop flashing if all generated sequence number runs out
            print('All elements had flashed - run out of juice')
            self.running = 0
            self.sequence_number = 0
            return

        if self.running == 0:
            print('Flashing paused at sequence number ' +
                  str(self.sequence_number))
            return

        result = self.marker_result()
        if (result):
            print("Marker received: ", result[0][0])
            receive = result[0][0]
        else:
            receive = 0

        element_to_flash = self.flash_sequence[self.sequence_number]
        letter = self.word[self.letter_idx]
        image_index = string.ascii_lowercase.index(letter)

        #pushed markers to LSL stream
        timestamp = local_clock()
        print("Letter: ", image_index, " Element flash: ",
              [element_to_flash + 1], timestamp)
        self.lsl_output.push_sample([element_to_flash + 1],
                                    timestamp)  # add 1 to prevent 0 in markers
        self.flash_single_element(element_to_flash)

        if not (receive):
            self.master.after(self.break_duration, self.start_flashing)
        else:
            if ((image_index + 1) == receive):
                self.output.insert("end", self.pos_to_char(receive), "green")
            else:
                self.output.insert("end", self.pos_to_char(receive), "red")

            self.letter_idx += 1
            if (self.letter_idx == len(self.word)):
                return
            letter = self.word[self.letter_idx]
            image_index = string.ascii_lowercase.index(letter)
            self.master.after(self.break_duration, self.highlight_target,
                              image_index)

        self.sequence_number = self.sequence_number + 1  #change flash position

    def pos_to_char(self, pos):
        return chr(pos - 1 + 97)

    def highlight_target(self, image_index):
        self.show_highlight_letter(self.letter_idx)
        self.highlight_image(image_index)

    def change_image(self, label, img):
        label.configure(image=img)
        label.image = img

    def highlight_image(self, element_no):
        self.change_image(self.image_labels[element_no],
                          self.highlight_letter_images[element_no])
        self.master.after(self.delay, self.unhighlight_image, element_no)

    def unhighlight_image(self, element_no):
        self.change_image(self.image_labels[element_no],
                          self.usable_images[element_no])
        self.master.after(self.flash_duration, self.start_flashing)

    def show_highlight_letter(self, pos):

        fontStyle = tkFont.Font(family="Courier", size=40)
        fontStyleBold = tkFont.Font(family="Courier bold", size=40)

        text = Text(root, height=1, font=fontStyle)
        text.tag_configure("bold", font=fontStyleBold)
        text.tag_configure("center", justify='center')

        for i in range(0, len(self.word)):
            if (i != pos):
                text.insert("end", self.word[i])
            else:
                text.insert("end", self.word[i], "bold")

        text.configure(state="disabled", width=10)
        text.tag_add("center", "1.0", "end")

        text.grid(row=self.number_of_rows + 1,
                  column=self.number_of_columns - 4)

    def flash_row_or_col(self, rc_number):
        num_rows = self.number_of_rows
        num_cols = self.number_of_columns

        if rc_number < num_rows:
            for c in range(0, num_cols):  #flash row
                cur_idx = rc_number * num_cols + c
                self.change_image(self.image_labels[cur_idx], self.flash_image)
        else:
            current_column = rc_number - num_rows
            for r in range(0, num_rows):  #flash column
                cur_idx = current_column + r * num_cols
                self.change_image(self.image_labels[cur_idx], self.flash_image)

        self.master.after(self.flash_duration, self.unflash_row_or_col,
                          rc_number)

    def unflash_row_or_col(self, rc_number):
        num_rows = self.number_of_rows
        num_cols = self.number_of_columns
        if rc_number < num_rows:
            for c in range(0, num_cols):  #flash row
                cur_idx = rc_number * num_cols + c
                self.change_image(self.image_labels[cur_idx],
                                  self.usable_images[cur_idx])
        else:
            current_column = rc_number - num_rows
            for r in range(0, num_rows):  #flash column
                cur_idx = current_column + r * num_cols
                self.change_image(self.image_labels[cur_idx],
                                  self.usable_images[cur_idx])

    def flash_single_element(self, element_no):
        self.change_image(self.image_labels[element_no], self.flash_image)
        self.master.after(self.flash_duration, self.unflash_single_element,
                          element_no)

    def unflash_single_element(self, element_no):
        self.change_image(self.image_labels[element_no],
                          self.usable_images[element_no])

    def marker_result(self):
        marker, timestamp = self.inlet_marker.pull_chunk()
        return marker

    def read_lsl_marker(self):
        print("looking for a Markers stream...")
        marker_streams = resolve_byprop('name', 'ResultMarkerStream')
        if marker_streams:
            self.inlet_marker = StreamInlet(marker_streams[0])
            marker_time_correction = self.inlet_marker.time_correction()
            print("Found Markers stream")
Esempio n. 30
0
class Receive(Node):
    """Receive from a LSL stream.

    Attributes:
        o (Port): Default output, provides DataFrame and meta.

    Args:
        prop (string): The property to look for during stream resolution (e.g., ``name``, ``type``, ``source_id``).
        value (string): The value that the property should have (e.g., ``EEG`` for the type property).
        timeout (float): The resolution timeout, in seconds.
        unit (string): Unit of the timestamps (e.g., ``s``, ``ms``, ``us``, ``ns``). The LSL library uses seconds by default. Timeflux uses nanoseconds. Default: ``s``.
        sync (string, None): The method used to synchronize timestamps. Use ``local`` if you receive the stream from another application on the same computer. Use ``network`` if you receive from another computer. Use ``None`` if you receive from a Timeflux instance on the same computer.
        channels (list, None): Override the channel names. If ``None``, the names defined in the LSL stream will be used.
        max_samples (int): The maximum number of samples to return per call.

    Example:
        .. literalinclude:: /../examples/lsl_multiple.yaml
           :language: yaml

    """
    def __init__(
        self,
        prop="name",
        value=None,
        timeout=1.0,
        unit="s",
        sync="local",
        channels=None,
        max_samples=1024,
    ):
        if not value:
            raise ValueError(
                "Please specify a stream name or a property and value.")
        self._prop = prop
        self._value = value
        self._inlet = None
        self._labels = None
        self._unit = unit
        self._sync = sync
        self._channels = channels
        self._timeout = timeout
        self._max_samples = max_samples
        self._offset = np.timedelta64(
            int((time() - pylsl.local_clock()) * 1e9), "ns")

    def update(self):
        if not self._inlet:
            self.logger.debug(
                f"Resolving stream with {self._prop} {self._value}")
            streams = resolve_byprop(self._prop,
                                     self._value,
                                     timeout=self._timeout)
            if not streams:
                return
            self.logger.debug("Stream acquired")
            self._inlet = StreamInlet(streams[0])
            info = self._inlet.info()
            self._meta = {
                "name": info.name(),
                "type": info.type(),
                "rate": info.nominal_srate(),
                "info": str(info.as_xml()).replace("\n", "").replace("\t", ""),
            }
            if isinstance(self._channels, list):
                self._labels = self._channels
            else:
                description = info.desc()
                channel = description.child("channels").first_child()
                self._labels = [channel.child_value("label")]
                for _ in range(info.channel_count() - 1):
                    channel = channel.next_sibling()
                    self._labels.append(channel.child_value("label"))
        if self._inlet:
            values, stamps = self._inlet.pull_chunk(
                max_samples=self._max_samples)
            if stamps:
                stamps = pd.to_datetime(stamps, format=None, unit=self._unit)
                if self._sync == "local":
                    stamps += self._offset
                elif self._sync == "network":
                    stamps = (stamps + np.timedelta64(
                        self._inlet.time_correction() * 1e9, "ns") +
                              self._offset)
            self.o.set(values, stamps, self._labels, self._meta)
Esempio n. 31
0
    def record_multiple(self, filename=None):
        self.processing = False
        print("Looking for streams")
        # Gets all LSL streams within the system
        streams = resolve_streams()
        # print(len(streams))
        print(filename)
        if len(streams) < 3:
            raise ValueError("Insufficient Streams")
        # Assign each used stream to an inlet
        for stream in streams:
            if stream.type() == 'EEG':
                inlet_eeg = StreamInlet(stream, max_chunklen=LSL_EEG_CHUNK)
            elif stream.type() == 'PPG':
                inlet_ppg = StreamInlet(stream, max_chunklen=LSL_PPG_CHUNK)
            elif stream.type() == 'Markers':
                inlet_markers = StreamInlet(stream)

        # Get info and description of channels names for data dumping
        # Info for PPG
        info_eeg = inlet_eeg.info()
        description_eeg = info_eeg.desc()
        nchan_eeg = info_eeg.channel_count()
        ch_eeg = description_eeg.child('channels').first_child()
        ch_names_eeg = [ch_eeg.child_value('label')]
        for i in range(1, nchan_eeg):
            ch_eeg = ch_eeg.next_sibling()
            ch_names_eeg.append(ch_eeg.child_value('label'))

        # Info for PPG
        info_ppg = inlet_ppg.info()
        description_ppg = info_ppg.desc()
        nchan_ppg = info_ppg.channel_count()
        ch_ppg = description_ppg.child('channels').first_child()
        ch_names_ppg = [ch_ppg.child_value('label')]
        for i in range(1, nchan_ppg):
            ch_ppg = ch_ppg.next_sibling()
            ch_names_ppg.append(ch_ppg.child_value('label'))

        res_eeg = []
        timestamps_eeg = []
        res_ppg = []
        timestamps_ppg = []
        markers = []
        # ppgs = []
        # timestamp_markers = []
        t_init = time.time()
        last_timestamp = 0
        time_correction_eeg = inlet_eeg.time_correction()
        time_correction_ppg = inlet_ppg.time_correction()

        print("Start recording")
        while self.recording:
            # print(last_timestamp - t_init)
            try:
                chunk_eeg, ts_eeg = inlet_eeg.pull_chunk(
                    max_samples=LSL_EEG_CHUNK)
                chunk_ppg, ts_ppg = inlet_ppg.pull_chunk(
                    max_samples=LSL_PPG_CHUNK)
                marker, timestamp_markers = inlet_markers.pull_sample()
                # print("Seconds elapsed %.4f" % (time.time() - t_init))
                # if timestamp_markers and ts_eeg and ts_ppg:
                if ts_eeg:
                    # print('I am here')
                    res_eeg.append(chunk_eeg)
                    timestamps_eeg.extend(ts_eeg)
                if ts_ppg:
                    res_ppg.append(chunk_ppg)
                    timestamps_ppg.extend(ts_ppg)
                if timestamp_markers:
                    markers.append([marker, timestamp_markers])
                    last_timestamp = timestamp_markers
                    # print(last_timestamp)
                # progress = (last_timestamp - t_init)/(duration+1.4)*100
                # print(progress)
                if time.time() - t_init + 1.2 > (10 * 60.0):
                    self.recording = False

            except KeyboardInterrupt:
                break

        self.processing = True
        time_correction_eeg = inlet_eeg.time_correction()
        time_correction_ppg = inlet_ppg.time_correction()
        print("Time corrections: EEG {}, PPG {}".format(
            time_correction_eeg, time_correction_ppg))

        res_eeg = np.concatenate(res_eeg, axis=0)
        res_ppg = np.concatenate(res_ppg, axis=0)
        timestamps_ppg = np.array(timestamps_ppg) + time_correction_ppg
        timestamps_eeg = np.array(timestamps_eeg) + time_correction_eeg

        ts_df_eeg = pd.DataFrame(np.c_[timestamps_eeg - timestamps_eeg[0]],
                                 columns=['timestamps'])
        ts_df_ppg = pd.DataFrame(np.c_[timestamps_ppg - timestamps_ppg[0]],
                                 columns=['timestamps'])

        res_eeg = np.c_[timestamps_eeg, res_eeg]
        res_ppg = np.c_[timestamps_ppg, res_ppg]
        data_eeg = pd.DataFrame(data=res_eeg,
                                columns=['timestamps'] + ch_names_eeg)
        data_ppg = pd.DataFrame(data=res_ppg,
                                columns=['timestamps'] + ch_names_ppg)

        n_markers = len(markers[0][0])
        t = time.time()
        n = 0
        for ii in range(n_markers):
            data_eeg['Marker%d' % ii] = "NaN"
            data_ppg['Marker%d' % ii] = 'NaN'
            # Process markers
            for marker in markers:
                ix_eeg = np.argmin(np.abs(marker[1] - timestamps_eeg))
                ix_ppg = np.argmin(np.abs(marker[1] - timestamps_ppg))
                self.progress = int(n / len(markers) * 100)
                n += 1
                for i in range(n_markers):
                    # print("Time elapsed: {0} (s)".format(time.time()-t))
                    data_eeg.loc[ix_eeg, 'Marker%d' % i] = marker[0][i]
                    data_ppg.loc[ix_ppg, 'Marker%d' % i] = marker[0][i]
        print("Process took {0} seconds to complete".format(time.time() - t))
        data_eeg.update(ts_df_eeg)
        data_ppg.update(ts_df_ppg)

        recordings_path = os.path.join(os.getcwd(), 'recordings')
        if not os.path.exists(recordings_path):
            os.mkdir(recordings_path)
        # Change to the directory
        os.chdir(recordings_path)
        print(recordings_path)

        data_ppg.to_csv('PPG_' + filename + '.csv',
                        float_format='%.3f',
                        index=False)
        data_eeg.to_csv('EEG_' + filename + '.csv',
                        float_format='%.3f',
                        index=False)
        self.processing = False

        print("Success! Both files written")
        os.chdir('..')