Exemple #1
0
def main ():
    BoardShim.enable_dev_board_logger ()

    # use synthetic board for demo
    params = BrainFlowInputParams ()
    board = BoardShim (BoardIds.SYNTHETIC_BOARD.value, params)
    board.prepare_session ()
    board.start_stream ()
    BoardShim.log_message (LogLevels.LEVEL_INFO.value, 'start sleeping in the main thread')
    time.sleep (10)
    data = board.get_current_board_data (20) # get 20 latest data points dont remove them from internal buffer
    board.stop_stream ()
    board.release_session ()

    # demo how to convert it to pandas DF and plot data
    eeg_channels = BoardShim.get_eeg_channels (BoardIds.SYNTHETIC_BOARD.value)
    df = pd.DataFrame (np.transpose (data))
    print ('Data From the Board')
    print (df.head (10))

    # demo for data serialization using brainflow API, we recommend to use it instead pandas.to_csv()
    DataFilter.write_file (data, 'test.csv', 'w') # use 'a' for append mode
    restored_data = DataFilter.read_file ('test.csv')
    restored_df = pd.DataFrame (np.transpose (restored_data))
    print ('Data From the File')
    print (restored_df.head (10))
	def read_data(self):
		if self.board.get_board_data_count() > 0:
			raw_data = self.board.get_board_data()
			raw_eeg_data = utils.extract_eeg_data(raw_data, global_config.BOARD_ID)

			if self.root_directory_label.text() != "":
				full_path = self.root_directory_label.text() + "/" + global_config.EEG_DATA_FILE_NAME
				DataFilter.write_file(raw_eeg_data, full_path, "a")
				self.slice_generator.write_to_file(self.root_directory_label.text())

			# Make room for new samples, discard the oldest
			self.data_buffer = np.roll(self.data_buffer, shift=-raw_eeg_data.shape[1], axis=1)

			# Insert new samples
			first_index = self.feature_extraction_info.first_electrode() - 1
			last_index = self.feature_extraction_info.last_electrode()  # Not including
			self.data_buffer[:, self.data_buffer.shape[1] - raw_eeg_data.shape[1]:] = raw_eeg_data[first_index:last_index, :]

			self.samples_push_count += raw_eeg_data.shape[1]
			self.sample_count += raw_eeg_data.shape[1]
			if self.online_training and self.online_training_timer is None:
				self.online_training_samples_push_count += raw_eeg_data.shape[1]

			if self.samples_push_count >= self.config.repetition_interval * self.feature_extraction_info.sampling_rate:
				self.classify_data()
				self.samples_push_count = 0

			if self.online_training_samples_push_count >= self.config.feature_window_size * self.feature_extraction_info.sampling_rate:
				self.classify_data(online_training=True)
				self.online_training_samples_push_count = 0
				self.online_training_timer = QTimer()
				self.online_training_timer.singleShot(self.MENTAL_TASK_DELAY, self.next_mental_task)
				self.clear_highlight_tile()
Exemple #3
0
    def read_eeg_data(self):
        seconds_since_start = time.time() - self.timer_start_time
        seconds = math.floor(self.configurations.trial_duration +
                             self.configurations.relaxation_period -
                             seconds_since_start)
        self.timer_label.setText("{} sec".format(seconds))
        if self.board.get_board_data_count() > 0:
            raw_data = self.board.get_board_data()
            raw_eeg_data = utils.extract_eeg_data(raw_data,
                                                  global_config.BOARD_ID)

            self.sample_count += raw_eeg_data.shape[
                1]  # Add the number of columns present in the newly read data to the count.

            if self.configurations.validate_saving_info():
                # file_name = FileNameFormatter.format(self.configurations.file_name_template,
                # 										self.configurations.file_basename, self.current_class.name, self.trial_count)
                # full_path = self.configurations.root_directory + "/" + file_name + ".csv"
                # print("Saving to {}".format(full_path))

                full_path = self.configurations.root_directory + "/" + global_config.EEG_DATA_FILE_NAME

                # Currently saves in append mode. If their are files in the directory and they have the same name,
                # the new data would be appended to the previous, instead of overwriting them.
                DataFilter.write_file(raw_eeg_data, full_path, "a")
Exemple #4
0
def end_test():
    """Ends the test and flushes the saved data in a file in same directory"""
    data = board.get_board_data()
    board.stop_stream()
    board.release_session()
    now = datetime.now()

    file = now.strftime("%d.%m.%Y_%H.%M.%S") + "_eeg_data.csv"
    DataFilter.write_file(data, file, "w")
    print("\nTest ended.")
def main ():
    parser = argparse.ArgumentParser ()
    # use docs to check which parameters are required for specific board, e.g. for Cyton - set serial port
    parser.add_argument ('--ip-port', type = int, help  = 'ip port', required = False, default = 0)
    parser.add_argument ('--ip-protocol', type = int, help  = 'ip protocol, check IpProtocolType enum', required = False, default = 0)
    parser.add_argument ('--ip-address', type = str, help  = 'ip address', required = False, default = '')
    parser.add_argument ('--serial-port', type = str, help  = 'serial port', required = False, default = '')
    parser.add_argument ('--mac-address', type = str, help  = 'mac address', required = False, default = '')
    parser.add_argument ('--other-info', type = str, help  = 'other info', required = False, default = '')
    parser.add_argument ('--streamer-params', type = str, help  = 'other info', required = False, default = '')
    parser.add_argument ('--board-id', type = int, help  = 'board id, check docs to get a list of supported boards', required = True)
    parser.add_argument ('--log', action = 'store_true')
    args = parser.parse_args ()

    params = BrainFlowInputParams ()
    params.ip_port = args.ip_port
    params.serial_port = args.serial_port
    params.mac_address = args.mac_address
    params.other_info = args.other_info
    params.ip_address = args.ip_address
    params.ip_protocol = args.ip_protocol

    if (args.log):
        BoardShim.enable_dev_board_logger ()
    else:
        BoardShim.disable_board_logger ()

    board = BoardShim (args.board_id, params)
    board.prepare_session ()

    board.start_stream ()
    print('Session Started')
    for x in range(2):
        time.sleep (5)
        board.config_board ('/2') # enable analog mode only for Cyton Based Boards!
    time.sleep (5)
    data = board.get_board_data ()
    board.stop_stream ()
    board.release_session ()

    """
    data[BoardShim.get_other_channels(args.board_id)[0]] contains cyton end byte
    data[BoardShim.get_other_channels(args.board_id)[1....]] contains unprocessed bytes
    if end byte is 0xC0 there are accel data in data[BoardShim.get_accel_channels(args.board_id)[....]] else there are zeros
    if end byte is 0xC1 there are analog data in data[BoardShim.get_analog_channels(args.board_id)[....]] else there are zeros
    """
    print (data[BoardShim.get_other_channels(args.board_id)[0]][0:5]) # should be standard end byte 0xC0
    print (data[BoardShim.get_other_channels(args.board_id)[0]][-5:]) # should be analog and byte 0xC1

    DataFilter.write_file (data, 'cyton_data_new.txt', 'w')
Exemple #6
0
def main ():
    parser = argparse.ArgumentParser ()
    # use docs to check which parameters are required for specific board, e.g. for Cyton - set serial port
    parser.add_argument ('--timeout', type = int, help  = 'timeout for device discovery or connection', required = False, default = 0)
    parser.add_argument ('--ip-port', type = int, help  = 'ip port', required = False, default = 0)
    parser.add_argument ('--ip-protocol', type = int, help  = 'ip protocol, check IpProtocolType enum', required = False, default = 0)
    parser.add_argument ('--ip-address', type = str, help  = 'ip address', required = False, default = '')
    parser.add_argument ('--serial-port', type = str, help  = 'serial port', required = False, default = '')
    parser.add_argument ('--mac-address', type = str, help  = 'mac address', required = False, default = '')
    parser.add_argument ('--other-info', type = str, help  = 'other info', required = False, default = '')
    parser.add_argument ('--streamer-params', type = str, help  = 'streamer params', required = False, default = '')
    parser.add_argument ('--serial-number', type = str, help  = 'serial number', required = False, default = '')
    parser.add_argument ('--board-id', type = int, help  = 'board id, check docs to get a list of supported boards', required = True)
    parser.add_argument ('--log', action = 'store_true')
    args = parser.parse_args ()

    params = BrainFlowInputParams ()
    params.ip_port = args.ip_port
    params.serial_port = args.serial_port
    params.mac_address = args.mac_address
    params.other_info = args.other_info
    params.serial_number = args.serial_number
    params.ip_address = args.ip_address
    params.ip_protocol = args.ip_protocol
    params.timeout = args.timeout

    BoardShim.enable_dev_board_logger ()

    board = BoardShim (args.board_id, params)
    board.prepare_session ()
    board.start_stream ()
    BoardShim.log_message (LogLevels.LEVEL_INFO.value, 'start sleeping in the main thread')
    time.sleep (10)
    data = board.get_current_board_data (20) # get 20 latest data points dont remove them from internal buffer
    board.stop_stream ()
    board.release_session ()

    # demo how to convert it to pandas DF and plot data
    # eeg_channels = BoardShim.get_eeg_channels (BoardIds.SYNTHETIC_BOARD.value)
    df = pd.DataFrame (np.transpose (data))
    print ('Data From the Board')
    print (df.head (10))

    # demo for data serialization using brainflow API, we recommend to use it instead pandas.to_csv()
    DataFilter.write_file (data, 'test.csv', 'w') # use 'a' for append mode
    restored_data = DataFilter.read_file ('test.csv')
    restored_df = pd.DataFrame (np.transpose (restored_data))
    print ('Data From the File')
    print (restored_df.head (10))
 def poll(self, sample_num):
     try:
         while self.board.get_board_data_count() < sample_num:
             time.sleep(0.02)
     except Exception as e:
         raise (e)
     board_data = self.board.get_board_data()
     DataFilter.write_file(board_data, '.\Data\cyton_data_new.txt',
                           'a')  # 'a' appends; 'w' overwrites
     # Could add check to see if file already exists, adding a 1, 2, etc. on the end to avoid conflict
     # Could use date function for generating names based on date-time.
     df = board_2_df(np.transpose(board_data))
     #print('/n')
     #print(df)
     return df
	def read_data(self):
		if not self.recording and not self.recording_reference:
			return

		recording_duration_in_samples = self.recording_progress_dialog.maximum()

		if self.recording_reference:
			if self.reference_eeg_data.get_channel_data(0).shape[0] > recording_duration_in_samples or\
					self.recording_progress_dialog.wasCanceled():
				self.stop_recording(True)
				return

		if self.recording:
			if self.recording_progress_dialog.wasCanceled() or\
					self.eeg_data_buffer.get_channel_data(0).shape[0] > recording_duration_in_samples:
				self.stop_recording(self.recording_reference)
				return

		if self.board.get_board_data_count() > 0:
			raw_data = self.board.get_board_data()
			raw_eeg_data = utils.extract_eeg_data(raw_data, global_config.BOARD_ID)

			self.eeg_sample_count += raw_eeg_data.shape[1]

			path = self.root_directory_label.text()

			if path != "":
				full_path = path + "/" + global_config.RESONANCE_DATA_FILE_NAME
				DataFilter.write_file(raw_eeg_data, full_path, "a")

			# c3 = raw_eeg_data[self.DEFAULT_C3_CHANNEL_INDEX, :]
			# cz = raw_eeg_data[self.DEFAULT_CZ_CHANNEL_INDEX, :]
			# c4 = raw_eeg_data[self.DEFAULT_C4_CHANNEL_INDEX, :]

			if self.recording_reference:
				self.reference_eeg_data.append_data(raw_eeg_data)
				print(f"reference size: {self.reference_eeg_data.sample_count()}")
				self.recording_progress_dialog.setValue(self.reference_eeg_data.get_channel_data(0).shape[0])
			else:
				self.eeg_data_buffer.append_data(raw_eeg_data)
				print(f"data size: {self.eeg_data_buffer.sample_count()}")
				self.recording_progress_dialog.setValue(self.eeg_data_buffer.get_channel_data(0).shape[0])
Exemple #9
0
	def read_data(self):
		""""
			If available, read data from board and update the channels
		"""
		if self.boardShim.get_board_data_count() > 0:
			raw_data = self.boardShim.get_board_data()

			raw_eeg_data = utils.extract_eeg_data(raw_data, global_config.BOARD_ID)

			if self.save_to_file and self.file_path != "" and self.file_path.endswith(".csv"):
				DataFilter.write_file(raw_eeg_data, self.file_path, "a")

			for channel_index in range(len(self.channels)):
				self.channels[channel_index].add_data_points(raw_eeg_data[channel_index])

			self.fft_graph.update_graph()
			self.band_power_graph.update_values(
				self.channels[self.band_power_channel_index]
				.filtered_data.reshape(1, self.channels[self.band_power_channel_index].filtered_data.shape[0]),
				float(self.fft_window_size_combo.currentText()))
Exemple #10
0
def animate(i, board):

    data = board.get_board_data().T
    DataFilter.write_file(data.T, output_file, 'a') if create_csv else True

    data = data[:,
                colsused] if isinstance(colsused, np.ndarray) else data[:, :]
    cols = colsused if isinstance(colsused, np.ndarray) else np.arange(
        data[0].size)
    maxdat = data[:, 0].size if len(data.shape) > 1 else data.size
    leftborder = 0 if math.isnan(leftbord) else leftbord
    rightborder = maxdat if math.isnan(rightbord) else rightbord
    sum_welch = np.zeros(maxdat)

    # time domain plot
    ax1 = plt.subplot(3 if avg_plot and cols.size > 1 else 2, 1, 1)
    plt.cla()
    ax1.set_title("Time Domain")
    ax1.set_xlabel("Sample Index")
    ax1.set_ylabel("Voltage (mV)")
    for i in range(cols.size):
        newdat = bandpass_data(data[:, i] if len(data.shape) > 1 else data,
                               leftborder, rightborder)
        index = leftborder + np.arange(newdat.size)
        ax1.plot(index, newdat)
        if (legend and cols.size > 1):
            if ((cols.size < 12 and avg_plot == False)
                    or (cols.size < 6 and avg_plot == True)):
                ax1.legend(cols)

    # fft/welch plot for all channels
    ax2 = plt.subplot(3 if avg_plot and cols.size > 1 else 2, 1, 2)
    plt.cla()
    ax2.set_title("Frequency Domain using " + fft_type)
    ax2.set_xlabel("Frequency (Hz)")
    ax2.set_ylabel("Amplitude")
    # plot
    for i in range(cols.size):
        newdat = bandpass_data(data[:, i] if len(data.shape) > 1 else data,
                               leftborder, rightborder)
        welchfreq, welch_data = process_data(newdat)
        if 'left-lim' and 'right_lim' not in locals():
            left_lim, right_lim = find_lims(welchfreq, welch_data, welch_left,
                                            welch_right)
        if 'avg_freq' not in locals():
            avg_freq = welchfreq
        if 'max_welch' not in locals():
            max_welch = welch_data[left_lim]
        elif max(welch_data[left_lim:right_lim]) > max_welch:
            max_welch = max(welch_data[left_lim:right_lim])
        ax2.plot(welchfreq[left_lim:right_lim], welch_data[left_lim:right_lim])
        if (legend and cols.size > 1):
            if ((cols.size < 12 and not avg_plot)
                    or (cols.size < 6 and avg_plot)):
                ax2.legend(cols)
            else:
                max_y = max(welch_data[left_lim:right_lim])
                max_x = welchfreq[left_lim:right_lim][
                    welch_data[left_lim:right_lim].argmax()]
                ax2.annotate(cols[i], (max_x, max_y),
                             color="red",
                             verticalalignment="bottom",
                             weight="bold")
    # plotting peaks
    for i in range(cols.size):
        newdat = bandpass_data(data[:, i] if len(data.shape) > 1 else data,
                               leftborder, rightborder)
        welchfreq, welch_data = process_data(newdat)

        for j in range(welch_data.size):
            sum_welch[j] = sum_welch[j] + welch_data[j]

        if (label):
            index = peakutils.indexes(
                welch_data[left_lim:right_lim],
                thres=0,
                min_dist=math.ceil(
                    np.where(welchfreq > left_lim + min_dis)[0][0] -
                    np.where(welchfreq > left_lim)[0][0]))
            index = [
                x for x in index
                if welch_data[left_lim:right_lim][x] > label_thres * max_welch
            ]
            for j, value in enumerate(index):
                ax2.plot(welchfreq[left_lim:right_lim][value],
                         welch_data[left_lim:right_lim][value],
                         marker="o",
                         ls="",
                         ms=3)
                ax2.annotate("{:.2f}".format(
                    welchfreq[left_lim:right_lim][value]),
                             (welchfreq[left_lim:right_lim][value],
                              welch_data[left_lim:right_lim][value]),
                             verticalalignment="top")

    # average fft/welch plot
    if (avg_plot and cols.size > 1):
        ax3 = plt.subplot(313)
        plt.cla()
        ax3.set_title("Average Frequency Domain using " + fft_type)
        ax3.set_xlabel("Frequency (Hz)")
        ax3.set_ylabel("Amplitude")
        ax3.plot(avg_freq[left_lim:right_lim],
                 sum_welch[left_lim:right_lim] / cols.size)
        index = peakutils.indexes(
            sum_welch[left_lim:right_lim] / cols.size,
            thres=avg_thres,
            min_dist=math.ceil(
                np.where(welchfreq > left_lim + avg_min_dis)[0][0] -
                np.where(welchfreq > left_lim)[0][0]))
        if (label):
            for j, value in enumerate(index):
                ax3.plot(avg_freq[left_lim:right_lim][value],
                         sum_welch[left_lim:right_lim][value] / cols.size,
                         marker="o",
                         ls="",
                         ms=3)
                ax3.annotate(
                    "{:.2f}".format(avg_freq[left_lim:right_lim][value]),
                    (avg_freq[left_lim:right_lim][value],
                     sum_welch[left_lim:right_lim][value] / cols.size),
                    verticalalignment="top")

    plt.tight_layout()
Exemple #11
0
    def run(self):
        current_song_id = None
        prev_song_id = None
        is_playing = False
        is_end = False
        # handle the case of skipping songs
        needed_duration = 10  # in seconds
        time_sleep = 1
        counter_for_duration = 0
        counter_max = int(needed_duration / time_sleep)
        current_time = str(time.time())
        folder_name = str(self.board.get_board_id())
        if not os.path.exists(folder_name):
            os.makedirs(folder_name)
        brainflow_output_file = os.path.join(
            folder_name, 'brainflow' + current_time + '.csv')
        song_features_output_file = os.path.join(
            folder_name, 'songs' + current_time + '.csv')

        while self.keep_alive:
            time.sleep(time_sleep)
            track = self.spotify.current_user_playing_track()
            if track is not None:
                # despite the check above track obj can become None in case of ads
                try:
                    if track.get('item', {}).get(
                            'id'
                    ) != current_song_id and current_song_id is not None:
                        is_end = True
                    if track.get('is_playing', False) and not is_end:
                        try:
                            # despite the check above track object can become None, no idea how
                            current_song_id = track.get('item', {}).get('id')
                            counter_for_duration = counter_for_duration + 1
                            is_end = False
                            is_playing = True
                        except BaseException:
                            pass
                    elif not track.get('is_playing', True):
                        is_end = True
                except AttributeError as e:
                    BoardShim.log_message(
                        LogLevels.LEVEL_WARN.value,
                        'Exception occured, more likely because of ads(its ok): %s'
                        % str(e))
            else:
                is_end = True

            if is_end:
                prev_song_id = current_song_id
                current_song_id = None
                data = self.board.get_board_data()
                # store data when a song ends
                if is_playing and counter_for_duration >= counter_max and prev_song_id is not None and data.shape[
                        1] > 1:
                    DataFilter.write_file(data, brainflow_output_file, 'a')
                    features = self.spotify.audio_features(prev_song_id)
                    BoardShim.log_message(
                        LogLevels.LEVEL_DEBUG.value,
                        'adding info about song: %s' % prev_song_id)
                    features_df = pd.DataFrame.from_dict(features)
                    music_feature = features_df[[
                        'danceability', 'energy', 'loudness', 'speechiness',
                        'acousticness', 'instrumentalness', 'liveness',
                        'valence', 'tempo', 'id'
                    ]]
                    music_features_replicated = pd.concat([music_feature] *
                                                          (data.shape[1] - 1),
                                                          ignore_index=True)
                    music_features_replicated.to_csv(song_features_output_file,
                                                     sep='\t',
                                                     mode='a')
                is_playing = False
                counter_for_duration = 0
                is_end = False
def main():
    parser = argparse.ArgumentParser()
    # use docs to check which parameters are required for specific board, e.g. for Cyton - set serial port,
    parser.add_argument('--ip-port',
                        type=int,
                        help='ip port',
                        required=False,
                        default=0)
    parser.add_argument('--ip-protocol',
                        type=int,
                        help='ip protocol, check IpProtocolType enum',
                        required=False,
                        default=0)
    parser.add_argument('--ip-address',
                        type=str,
                        help='ip address',
                        required=False,
                        default='')
    parser.add_argument('--serial-port',
                        type=str,
                        help='serial port',
                        required=False,
                        default='')
    parser.add_argument('--mac-address',
                        type=str,
                        help='mac address',
                        required=False,
                        default='')
    parser.add_argument('--other-info',
                        type=str,
                        help='other info',
                        required=False,
                        default='')
    parser.add_argument(
        '--board-id',
        type=int,
        help='board id, check docs to get a list of supported boards',
        required=True)
    parser.add_argument('--log', action='store_true')
    args = parser.parse_args()

    params = BrainFlowInputParams()
    params.ip_port = args.ip_port
    params.serial_port = args.serial_port
    params.mac_address = args.mac_address
    params.other_info = args.other_info
    params.ip_address = args.ip_address
    params.ip_protocol = args.ip_protocol

    if (args.log):
        BoardShim.enable_dev_board_logger()
    else:
        BoardShim.disable_board_logger()

    # demo how to read data as 2d numpy array
    board = BoardShim(args.board_id, params)
    board.prepare_session()
    board.start_stream()
    BoardShim.log_message(LogLevels.LEVEL_INFO.value,
                          'start sleeping in the main thread')
    time.sleep(10)
    # data = board.get_current_board_data (256) # get latest 256 packages or less, doesnt remove them from internal buffer
    data = board.get_board_data(
    )  # get all data and remove it from internal buffer
    board.stop_stream()
    board.release_session()

    # demo how to convert it to pandas DF and plot data
    eeg_channels = BoardShim.get_eeg_channels(args.board_id)
    df = pd.DataFrame(np.transpose(data))
    print('Data From the Board')
    print(df.head())
    plt.figure()
    df[eeg_channels].plot(subplots=True)
    plt.savefig('before_processing.png')

    # demo for data serialization
    DataFilter.write_file(data, 'test.csv', 'w')
    restored_data = DataFilter.read_file('test.csv')
    restored_df = pd.DataFrame(np.transpose(restored_data))
    print('Data From the File')
    print(restored_df.head())

    # demo how to perform signal processing
    for count, channel in enumerate(eeg_channels):
        if count == 0:
            DataFilter.perform_bandpass(
                data[channel], BoardShim.get_sampling_rate(args.board_id),
                15.0, 6.0, 4, FilterTypes.BESSEL.value, 0)
        elif count == 1:
            DataFilter.perform_bandstop(
                data[channel], BoardShim.get_sampling_rate(args.board_id), 5.0,
                1.0, 3, FilterTypes.BUTTERWORTH.value, 0)
        elif count == 2:
            DataFilter.perform_lowpass(
                data[channel], BoardShim.get_sampling_rate(args.board_id), 9.0,
                5, FilterTypes.CHEBYSHEV_TYPE_1.value, 1)
        elif count == 3:
            DataFilter.perform_highpass(
                data[channel], BoardShim.get_sampling_rate(args.board_id), 3.0,
                4, FilterTypes.BUTTERWORTH.value, 0)

    df = pd.DataFrame(np.transpose(data))
    print('Data After Processing')
    print(df.head())
    plt.figure()
    df[eeg_channels].plot(subplots=True)
    plt.savefig('after_processing.png')
Exemple #13
0
def liveStream():
    BoardShim.enable_dev_board_logger()
    # use synthetic board for demo
    params = BrainFlowInputParams()
    board_id = BoardIds.SYNTHETIC_BOARD.value
    board = BoardShim(board_id, params)
    eeg_channels = BoardShim.get_eeg_channels(board_id)
    sampling_rate = BoardShim.get_sampling_rate(board_id)
    timestamp = BoardShim.get_timestamp_channel(board_id)
    board.prepare_session()
    board.start_stream()

    while True:
        #get board data removes data from the buffer
        while board.get_board_data_count() < 250:
            time.sleep(0.005)
        data = board.get_board_data()

        #datadf = pd.DataFrame(np.transpose(data)) #creating a dataframe of the eeg data to extract eeg values later
        """for count, channel in enumerate(eeg_channels):
            # filters work in-place
             #Check Brainflow docs for more filters
            if count == 0:
                DataFilter.perform_bandstop(data[channel], sampling_rate, 60.0, 4.0, 4,
                                            FilterTypes.BUTTERWORTH.value, 0)  # bandstop 58 - 62
                DataFilter.perform_bandpass(data[channel], sampling_rate, 21.0, 20.0, 4,
                                            FilterTypes.BESSEL.value, 0)  # bandpass 11 - 31
            if count == 1:
                DataFilter.perform_bandstop(data[channel], sampling_rate, 60.0, 4.0, 4,
                                            FilterTypes.BUTTERWORTH.value, 0)  # bandstop 58 - 62
                DataFilter.perform_bandpass(data[channel], sampling_rate, 21.0, 20.0, 4,
                                            FilterTypes.BESSEL.value, 0)  # bandpass 11 - 31
            if count == 2:
                DataFilter.perform_bandstop(data[channel], sampling_rate, 60.0, 4.0, 4,
                                            FilterTypes.BUTTERWORTH.value, 0)  # bandstop 58 - 62
                DataFilter.perform_bandpass(data[channel], sampling_rate, 21.0, 20.0, 4,
                                            FilterTypes.BESSEL.value, 0)  # bandpass 11 - 31
            if count == 3:
                DataFilter.perform_bandstop(data[channel], sampling_rate, 60.0, 4.0, 4,
                                            FilterTypes.BUTTERWORTH.value, 0)  # bandstop 58 - 62
                DataFilter.perform_bandpass(data[channel], sampling_rate, 21.0, 20.0, 4,
                                            FilterTypes.BESSEL.value, 0)  # bandpass 11 - 31"""

        #Brainflow ML Model
        bands = DataFilter.get_avg_band_powers(data, eeg_channels,
                                               sampling_rate, True)
        feature_vector = np.concatenate((bands[0], bands[1]))
        print(feature_vector)

        # calc concentration
        concentration_params = BrainFlowModelParams(
            BrainFlowMetrics.CONCENTRATION.value,
            BrainFlowClassifiers.KNN.value)
        concentration = MLModel(concentration_params)
        concentration.prepare()
        print('Concentration: %f' % concentration.predict(feature_vector))
        concentration.release()

        # calc relaxation
        relaxation_params = BrainFlowModelParams(
            BrainFlowMetrics.RELAXATION.value,
            BrainFlowClassifiers.REGRESSION.value)
        relaxation = MLModel(relaxation_params)
        relaxation.prepare()
        print('Relaxation: %f' % relaxation.predict(feature_vector))
        relaxation.release()

        DataFilter.write_file(data, 'data.csv', 'w')  #writing data to csv file

    board.stop_stream()
    board.release_session()
Exemple #14
0
BUFFER_TIME = args.buffer
params = BrainFlowInputParams() if args.synth else fill_in_cyton_params()
board = BoardShim(board_id=board_id, input_params=params)
board.prepare_session()
board.start_stream()

time.sleep(BUFFER_TIME)
for i in range(
        3
):  #TODO: make while(True) and elegantly shutdown when board is shutdown or keyboard interrupt
    data = board.get_board_data()
    channel = eegs[1]
    print(data.shape)
    foo = np.mean(data[channel])
    log_info(
        f"eeg type is {type(data[channel])} with shape {data[eegs[1]].shape}")
    DataFilter.perform_highpass(data[channel],
                                BoardShim.get_sampling_rate(board_id), 20.0, 5,
                                FilterTypes.CHEBYSHEV_TYPE_1.value, 1)
    # DataFilter.perform_bandstop (data[channel], BoardShim.get_sampling_rate (board_id), 30.0, 1.0, 3, FilterTypes.BUTTERWORTH.value, 0)
    print(f"is foo the same as data[channel]? {foo == np.mean(data[channel])}")
    indexes = [timestamp, *eegs, *others]
    data_to_write = data[indexes]
    DataFilter.write_file(data=data_to_write,
                          file_name=raw_data_path,
                          file_mode='a')
    log_debug(f"sample {i}:: data is {data} or len {len(data)}")
    time.sleep(1)

board.stop_stream()
board.release_session()
Exemple #15
0
def main ():


    #Convert text files to csv file. May possibly remove this part later. It's useless right now.
    directory = os.path.dirname(os.path.abspath(__file__))

    # renames .txt files to .csv and then prints its contents
    filename = 'OpenBCI-RAW-2020-11-11_08-42-41YES.txt'
    restored_data = DataFilter.read_file(filename)
    print(restored_data.shape)
    if (restored_data.shape[0] > 9):  # If the timestamp has not already been removed then we will remove it
        #Removing the first 5 lines

        # Deleting Time channel and all the other 'unneccessary' channels
        for i in range(9,24):
            new_data = np.delete(restored_data, 9, 0)
            restored_df = pd.DataFrame(np.transpose(new_data))
            DataFilter.write_file(new_data, filename, 'w')
            restored_data = DataFilter.read_file(filename)

        new_data = np.delete(restored_data, 0, 0)
        restored_df = pd.DataFrame(np.transpose(new_data))
        DataFilter.write_file(new_data, filename, 'w')
        restored_data = DataFilter.read_file(filename)

        new_data = np.delete(restored_data, 7, 0)
        restored_df = pd.DataFrame(np.transpose(new_data))
        DataFilter.write_file(new_data, filename, 'w')
        restored_data = DataFilter.read_file(filename)


    else:
        restored_df = pd.DataFrame(np.transpose(restored_data))

    # new_data = np.delete(restored_data, 7, 0)
    # restored_df = pd.DataFrame(np.transpose(new_data))
    # DataFilter.write_file(new_data, filename, 'w')

    ##############################################################
    # Raw Data                                                   #
    ##############################################################
    
    print('Data From the File')
    print(restored_df.head(10))


    data = np.loadtxt(filename, delimiter=',')  # remember to remove the first five lines
    data = np.transpose(data)


    ch_names = ['EXG Channel 0', 'EXG Channel 1', 'EXG Channel 2', 'EXG Channel 3', 'EXG Channel 4', 'EXG Channel 5',
                'EXG Channel 6']

    sfreq = 250
    info = mne.create_info(ch_names, sfreq, ch_types='emg')

    data = data.astype(float)

    raw = mne.io.RawArray(data, info)
    print(raw)
    print(raw.info)

    raw.plot(block = True, scalings=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
     emg=1e2, ref_meg=1e-12, misc=1e-3, stim=1,
     resp=1, chpi=1e-4, whitened=1e2))


    ##############################################################
    # Butterworth Filter                                         #
    ##############################################################

    sfreq = 250
    f_p = 7

    #Applying butterworth filter
    iirs_params = dict(order = 8, ftype = 'butter', output = 'sos')
    iir_params = mne.filter.construct_iir_filter(iirs_params, f_p, None, sfreq, btype='lowpass', return_copy = False, verbose = True)

    filtered_raw = mne.filter.filter_data(data, sfreq = sfreq, l_freq = None, h_freq = f_p, picks = None, method = 'iir', iir_params = iir_params, copy = False, verbose = True)

    filtered_data = mne.io.RawArray(filtered_raw, info)
    print(filtered_data.info)

    #Plotting filtered data
    filtered_data.plot(block = True, scalings=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
     emg=1e2, ref_meg=1e-12, misc=1e-3, stim=1,
     resp=1, chpi=1e-4, whitened=1e2))
    print(type(filtered_data))


    ##############################################################
    # ICA Preprocessing                                          #
    ##############################################################

    #Setting up data for fitting
    ica_info = mne.create_info(ch_names, sfreq, ch_types='eeg') 
    ica_data = mne.io.RawArray(filtered_raw, ica_info)
    
    #Fitting and applying ICA
    ica = mne.preprocessing.ICA(verbose = True)
    ica.fit(inst = ica_data)
    ica.apply(ica_data)

    #Plotting data
    ica_data.plot(block = True, scalings=dict(mag=1e-12, grad=4e-11, eeg=1e2, eog=150e-6, ecg=5e-4,
     emg=1e2, ref_meg=1e-12, misc=1e-3, stim=1,
     resp=1, chpi=1e-4, whitened=1e2))


    ##############################################################
    # Normalization                                              #
    ##############################################################

    filtered_raw_numpy = ica_data[:][0]
    normalized_raw = sk.normalize(filtered_raw_numpy, norm='l2')
    preprocessed_raw = ica_data[:][0]
    normalized_raw = sk.normalize(preprocessed_raw, norm='l2')
    print((normalized_raw))
    
    normalized_data = mne.io.RawArray(normalized_raw, info)
    
    normalized_data.plot(block = True, scalings=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
    emg=5e-3, ref_meg=1e-12, misc=1e-3, stim=1,
    resp=1, chpi=1e-4, whitened=1e2))