Пример #1
0
def main ():
    BoardShim.enable_dev_board_logger ()

    # use synthetic board for demo
    params = BrainFlowInputParams ()
    board = BoardShim (BoardIds.SYNTHETIC_BOARD.value, params)
    board.prepare_session ()
    board.start_stream ()
    BoardShim.log_message (LogLevels.LEVEL_INFO.value, 'start sleeping in the main thread')
    time.sleep (10)
    data = board.get_current_board_data (20) # get 20 latest data points dont remove them from internal buffer
    board.stop_stream ()
    board.release_session ()

    # demo how to convert it to pandas DF and plot data
    eeg_channels = BoardShim.get_eeg_channels (BoardIds.SYNTHETIC_BOARD.value)
    df = pd.DataFrame (np.transpose (data))
    print ('Data From the Board')
    print (df.head (10))

    # demo for data serialization using brainflow API, we recommend to use it instead pandas.to_csv()
    DataFilter.write_file (data, 'test.csv', 'w') # use 'a' for append mode
    restored_data = DataFilter.read_file ('test.csv')
    restored_df = pd.DataFrame (np.transpose (restored_data))
    print ('Data From the File')
    print (restored_df.head (10))
Пример #2
0
    def _load_session_data(self, subject_name, run):
        """Loads the session data and event files for a single session for a single subject. The first 5 seconds
        of every session is a baseline that was used to wait for the signal to settle, so the first 5 seconds
        of every trial is also removed.

        Parameters:
             subject_name
             run
             path

        Returns:
            data
            events
        """
        data_fn = subject_name + '_' + self.erp_type + '_' + str(run) + '.csv'
        event_fn = subject_name + '_' + self.erp_type + '_' + str(
            run) + '_EVENTS.csv'
        data_path = os.path.join('data', data_fn)
        event_path = os.path.join('data', event_fn)
        data = DataFilter.read_file(data_path)

        # remove beginning 5 seconds where signal settles
        idx = 5 * self.eeg_info[1]
        data = data[:, idx:]

        events = pd.read_csv(event_path)
        return data, events
Пример #3
0
def load_data(root_directories: [str]) -> [np.ndarray]:

    raw_unsliced_data = []

    for path in root_directories:
        data_file_path = path + f"/{global_config.EEG_DATA_FILE_NAME}"

        print(data_file_path)

        raw_unsliced_data.append(DataFilter.read_file(data_file_path))

    return raw_unsliced_data
Пример #4
0
def main ():
    parser = argparse.ArgumentParser ()
    # use docs to check which parameters are required for specific board, e.g. for Cyton - set serial port
    parser.add_argument ('--timeout', type = int, help  = 'timeout for device discovery or connection', required = False, default = 0)
    parser.add_argument ('--ip-port', type = int, help  = 'ip port', required = False, default = 0)
    parser.add_argument ('--ip-protocol', type = int, help  = 'ip protocol, check IpProtocolType enum', required = False, default = 0)
    parser.add_argument ('--ip-address', type = str, help  = 'ip address', required = False, default = '')
    parser.add_argument ('--serial-port', type = str, help  = 'serial port', required = False, default = '')
    parser.add_argument ('--mac-address', type = str, help  = 'mac address', required = False, default = '')
    parser.add_argument ('--other-info', type = str, help  = 'other info', required = False, default = '')
    parser.add_argument ('--streamer-params', type = str, help  = 'streamer params', required = False, default = '')
    parser.add_argument ('--serial-number', type = str, help  = 'serial number', required = False, default = '')
    parser.add_argument ('--board-id', type = int, help  = 'board id, check docs to get a list of supported boards', required = True)
    parser.add_argument ('--log', action = 'store_true')
    args = parser.parse_args ()

    params = BrainFlowInputParams ()
    params.ip_port = args.ip_port
    params.serial_port = args.serial_port
    params.mac_address = args.mac_address
    params.other_info = args.other_info
    params.serial_number = args.serial_number
    params.ip_address = args.ip_address
    params.ip_protocol = args.ip_protocol
    params.timeout = args.timeout

    BoardShim.enable_dev_board_logger ()

    board = BoardShim (args.board_id, params)
    board.prepare_session ()
    board.start_stream ()
    BoardShim.log_message (LogLevels.LEVEL_INFO.value, 'start sleeping in the main thread')
    time.sleep (10)
    data = board.get_current_board_data (20) # get 20 latest data points dont remove them from internal buffer
    board.stop_stream ()
    board.release_session ()

    # demo how to convert it to pandas DF and plot data
    # eeg_channels = BoardShim.get_eeg_channels (BoardIds.SYNTHETIC_BOARD.value)
    df = pd.DataFrame (np.transpose (data))
    print ('Data From the Board')
    print (df.head (10))

    # demo for data serialization using brainflow API, we recommend to use it instead pandas.to_csv()
    DataFilter.write_file (data, 'test.csv', 'w') # use 'a' for append mode
    restored_data = DataFilter.read_file ('test.csv')
    restored_df = pd.DataFrame (np.transpose (restored_data))
    print ('Data From the File')
    print (restored_df.head (10))
Пример #5
0
def prepare_data():
    # use different windows, its kinda data augmentation
    window_sizes = [4.0, 6.0, 8.0, 10.0]
    overlaps = [0.5, 0.45, 0.4, 0.35]  # percentage of window_size
    dataset_x = list()
    dataset_y = list()
    for data_type in ('relaxed', 'focused'):
        for file in glob.glob(os.path.join('data', data_type, '*', '*.csv')):
            print(file)
            board_id = os.path.basename(os.path.dirname(file))
            try:
                board_id = int(board_id)
                data = DataFilter.read_file(file)
                sampling_rate = BoardShim.get_sampling_rate(board_id)
                eeg_channels = get_eeg_channels(board_id)
                for num, window_size in enumerate(window_sizes):
                    if data_type == 'focused':
                        cur_pos = sampling_rate * 10  # skip a little more for focus
                    else:
                        cur_pos = sampling_rate * 3
                    while cur_pos + int(
                            window_size * sampling_rate) < data.shape[1]:
                        data_in_window = data[:, cur_pos:cur_pos +
                                              int(window_size * sampling_rate)]
                        bands = DataFilter.get_avg_band_powers(
                            data_in_window, eeg_channels, sampling_rate, True)
                        feature_vector = np.concatenate((bands[0], bands[1]))
                        dataset_x.append(feature_vector)
                        if data_type == 'relaxed':
                            dataset_y.append(0)
                        else:
                            dataset_y.append(1)
                        cur_pos = cur_pos + int(
                            window_size * overlaps[num] * sampling_rate)
            except Exception as e:
                print(str(e))

    print('Class 1: %d Class 0: %d' % (len([x for x in dataset_y if x == 1]),
                                       len([x for x in dataset_y if x == 0])))

    with open('dataset_x.pickle', 'wb') as f:
        pickle.dump(dataset_x, f, protocol=3)
    with open('dataset_y.pickle', 'wb') as f:
        pickle.dump(dataset_y, f, protocol=3)

    return dataset_x, dataset_y
Пример #6
0
def main(i):

    board_id = BoardIds.SYNTHETIC_BOARD.value
    eeg_channels = BoardShim.get_eeg_channels(board_id)
    timestamp = BoardShim.get_timestamp_channel(board_id)

    style.use('fivethirtyeight')
    plt.title("Live EEG Datastream from Brainflow", fontsize=15)
    plt.ylabel("Data in millivolts", fontsize=15)
    plt.xlabel("\nTime", fontsize=10)

    data = DataFilter.read_file('data.csv') 
    eegdf = pd.DataFrame(np.transpose(data[eeg_channels, timestamp])) 
    #timedf = pd.DataFrame(np.transpose(data[timestamp])) #to keep it simple, making another dataframe for the timestamps to access later
    
    eegdf_col_names = ["ch1","ch2","ch3","ch4","ch5","ch6","ch7","ch8","ch9","ch10","ch11","ch12","ch13","ch14","ch15","ch16"]
    eegdf.columns = eegdf_col_names

    print("EEG Dataframe")
    print(eegdf) #easy way to check what data is being streamed and if program is working

    #print(eegdf)
    eeg1 = eegdf.iloc[:, 0].values #I am using OpenBCI Ganglion board, so only have four channels.
    eeg2 = eegdf.iloc[:, 1].values 
    eeg3 = eegdf.iloc[:, 2].values  
    eeg4 = eegdf.iloc[:, 3].values
    timex= eegdf.iloc[:, 15].values #timestamps
    #print(timex) #use this to see what the UNIX timestamps look like
    print("EEG Channel 1")
    print(eeg1)
    #print("Time DF")
    #print(timedf)
    print("Timestamp")
    print(timex)

    """plt.cla()
Пример #7
0
def load_slice_and_filter_resonance_data(
        root_directory: str,
        filter_settings: FilterSettings) -> ([float], [EegData], EegData):
    """
	Loads the eeg data recorded from a run of the resonance finder GUI.
	The data should be formatted according to the specification in the frequency_index_format.txt
	:param root_directory: the path to the root directory containing the files
	:param filter_settings: Filters to be applied on the data
	:return: A tuple containing a list of frequencies and another list with their corresponding eeg data objects.
			and, an eeg data object for the reference data marked with frequency 0
	"""

    raw_eeg_data = DataFilter.read_file(root_directory + "/" +
                                        global_config.RESONANCE_DATA_FILE_NAME)

    filtered_data = filter_settings.apply(raw_eeg_data)

    index_file = open(
        root_directory + "/" + global_config.FREQUENCY_INDEX_FILE_NAME, "r")

    line = index_file.readline()

    sampling_rate = int(line)

    frequencies = []
    averages = []

    while True:
        line = index_file.readline()

        if not line:
            break

        elements = line.split(",")

        freq = float(elements[0])
        start_index = int(elements[1])
        end_index = int(elements[2])

        current_filtered_data = filtered_data[:, start_index:end_index]

        if freq in frequencies:
            index = frequencies.index(freq)
            current_average = averages[index]
            current_average.add_values(current_filtered_data)
        else:
            average = AccumulatingAverages()
            average.add_values(current_filtered_data)
            frequencies.append(freq)
            averages.append(average)

    averaged_eeg_data = []
    updated_frequencies = []

    reference_data = EegData()

    for i in range(0, len(frequencies)):
        if frequencies[i] == 0:  # Reference data
            reference_data = EegData(averages[i].compute_average())
        else:
            averaged_eeg_data.append(EegData(averages[i].compute_average()))
            updated_frequencies.append(frequencies[i])

    return updated_frequencies, averaged_eeg_data, reference_data
def main():
    parser = argparse.ArgumentParser()
    # use docs to check which parameters are required for specific board, e.g. for Cyton - set serial port,
    parser.add_argument('--ip-port',
                        type=int,
                        help='ip port',
                        required=False,
                        default=0)
    parser.add_argument('--ip-protocol',
                        type=int,
                        help='ip protocol, check IpProtocolType enum',
                        required=False,
                        default=0)
    parser.add_argument('--ip-address',
                        type=str,
                        help='ip address',
                        required=False,
                        default='')
    parser.add_argument('--serial-port',
                        type=str,
                        help='serial port',
                        required=False,
                        default='')
    parser.add_argument('--mac-address',
                        type=str,
                        help='mac address',
                        required=False,
                        default='')
    parser.add_argument('--other-info',
                        type=str,
                        help='other info',
                        required=False,
                        default='')
    parser.add_argument(
        '--board-id',
        type=int,
        help='board id, check docs to get a list of supported boards',
        required=True)
    parser.add_argument('--log', action='store_true')
    args = parser.parse_args()

    params = BrainFlowInputParams()
    params.ip_port = args.ip_port
    params.serial_port = args.serial_port
    params.mac_address = args.mac_address
    params.other_info = args.other_info
    params.ip_address = args.ip_address
    params.ip_protocol = args.ip_protocol

    if (args.log):
        BoardShim.enable_dev_board_logger()
    else:
        BoardShim.disable_board_logger()

    # demo how to read data as 2d numpy array
    board = BoardShim(args.board_id, params)
    board.prepare_session()
    board.start_stream()
    BoardShim.log_message(LogLevels.LEVEL_INFO.value,
                          'start sleeping in the main thread')
    time.sleep(10)
    # data = board.get_current_board_data (256) # get latest 256 packages or less, doesnt remove them from internal buffer
    data = board.get_board_data(
    )  # get all data and remove it from internal buffer
    board.stop_stream()
    board.release_session()

    # demo how to convert it to pandas DF and plot data
    eeg_channels = BoardShim.get_eeg_channels(args.board_id)
    df = pd.DataFrame(np.transpose(data))
    print('Data From the Board')
    print(df.head())
    plt.figure()
    df[eeg_channels].plot(subplots=True)
    plt.savefig('before_processing.png')

    # demo for data serialization
    DataFilter.write_file(data, 'test.csv', 'w')
    restored_data = DataFilter.read_file('test.csv')
    restored_df = pd.DataFrame(np.transpose(restored_data))
    print('Data From the File')
    print(restored_df.head())

    # demo how to perform signal processing
    for count, channel in enumerate(eeg_channels):
        if count == 0:
            DataFilter.perform_bandpass(
                data[channel], BoardShim.get_sampling_rate(args.board_id),
                15.0, 6.0, 4, FilterTypes.BESSEL.value, 0)
        elif count == 1:
            DataFilter.perform_bandstop(
                data[channel], BoardShim.get_sampling_rate(args.board_id), 5.0,
                1.0, 3, FilterTypes.BUTTERWORTH.value, 0)
        elif count == 2:
            DataFilter.perform_lowpass(
                data[channel], BoardShim.get_sampling_rate(args.board_id), 9.0,
                5, FilterTypes.CHEBYSHEV_TYPE_1.value, 1)
        elif count == 3:
            DataFilter.perform_highpass(
                data[channel], BoardShim.get_sampling_rate(args.board_id), 3.0,
                4, FilterTypes.BUTTERWORTH.value, 0)

    df = pd.DataFrame(np.transpose(data))
    print('Data After Processing')
    print(df.head())
    plt.figure()
    df[eeg_channels].plot(subplots=True)
    plt.savefig('after_processing.png')
Пример #9
0
def main():
    #BoardShim.enable_board_logger ()
    DataFilter.enable_data_logger()
    MLModel.enable_ml_logger()
    '''
    parser = argparse.ArgumentParser ()
    # use docs to check which parameters are required for specific board, e.g. for Cyton - set serial port
    parser.add_argument ('--timeout', type = int, help  = 'timeout for device discovery or connection', required = False, default = 0)
    parser.add_argument ('--ip-port', type = int, help  = 'ip port', required = False, default = 0)
    parser.add_argument ('--ip-protocol', type = int, help  = 'ip protocol, check IpProtocolType enum', required = False, default = 0)
    parser.add_argument ('--ip-address', type = str, help  = 'ip address', required = False, default = '')
    parser.add_argument ('--serial-port', type = str, help  = 'serial port', required = False, default = '')
    parser.add_argument ('--mac-address', type = str, help  = 'mac address', required = False, default = '')
    parser.add_argument ('--other-info', type = str, help  = 'other info', required = False, default = '')
    parser.add_argument ('--streamer-params', type = str, help  = 'streamer params', required = False, default = '')
    parser.add_argument ('--serial-number', type = str, help  = 'serial number', required = False, default = '')
    parser.add_argument ('--board-id', type = int, help  = 'board id, check docs to get a list of supported boards', required = True)
    parser.add_argument ('--file', type = str, help  = 'file', required = False, default = '')
    args = parser.parse_args ()

    params = BrainFlowInputParams ()
    params.ip_port = args.ip_port
    params.serial_port = args.serial_port
    params.mac_address = args.mac_address
    params.other_info = args.other_info
    params.serial_number = args.serial_number
    params.ip_address = args.ip_address
    params.ip_protocol = args.ip_protocol
    params.timeout = args.timeout
    params.file = args.file
    
    board = BoardShim (args.board_id, params)
    master_board_id = board.get_board_id ()
    sampling_rate = BoardShim.get_sampling_rate (master_board_id)
    board.prepare_session ()
    board.start_stream (45000, args.streamer_params)
    BoardShim.log_message (LogLevels.LEVEL_INFO.value, 'start sleeping in the main thread')
    time.sleep (5) # recommended window size for eeg metric calculation is at least 4 seconds, bigger is better
    data = board.get_board_data ()
    board.stop_stream ()
    board.release_session ()
    '''
    print('hello')
    data = DataFilter.read_file('test.txt')
    print('world')
    master_board_id = 0
    eeg_channels = BoardShim.get_eeg_channels(master_board_id)
    sampling_rate = BoardShim.get_sampling_rate(master_board_id)
    bands = DataFilter.get_avg_band_powers(data, eeg_channels, sampling_rate,
                                           True)
    feature_vector = np.concatenate((bands[0], bands[1]))
    print(feature_vector)

    # calc concentration
    concentration_params = BrainFlowModelParams(
        BrainFlowMetrics.CONCENTRATION.value, BrainFlowClassifiers.KNN.value)
    concentration = MLModel(concentration_params)
    concentration.prepare()
    print('Concentration: %f' % concentration.predict(feature_vector))
    concentration.release()

    # calc relaxation
    relaxation_params = BrainFlowModelParams(
        BrainFlowMetrics.RELAXATION.value,
        BrainFlowClassifiers.REGRESSION.value)
    relaxation = MLModel(relaxation_params)
    relaxation.prepare()
    print('Relaxation: %f' % relaxation.predict(feature_vector))
    relaxation.release()
Пример #10
0
def main ():


    #Convert text files to csv file. May possibly remove this part later. It's useless right now.
    directory = os.path.dirname(os.path.abspath(__file__))

    # renames .txt files to .csv and then prints its contents
    filename = 'OpenBCI-RAW-2020-11-11_08-42-41YES.txt'
    restored_data = DataFilter.read_file(filename)
    print(restored_data.shape)
    if (restored_data.shape[0] > 9):  # If the timestamp has not already been removed then we will remove it
        #Removing the first 5 lines

        # Deleting Time channel and all the other 'unneccessary' channels
        for i in range(9,24):
            new_data = np.delete(restored_data, 9, 0)
            restored_df = pd.DataFrame(np.transpose(new_data))
            DataFilter.write_file(new_data, filename, 'w')
            restored_data = DataFilter.read_file(filename)

        new_data = np.delete(restored_data, 0, 0)
        restored_df = pd.DataFrame(np.transpose(new_data))
        DataFilter.write_file(new_data, filename, 'w')
        restored_data = DataFilter.read_file(filename)

        new_data = np.delete(restored_data, 7, 0)
        restored_df = pd.DataFrame(np.transpose(new_data))
        DataFilter.write_file(new_data, filename, 'w')
        restored_data = DataFilter.read_file(filename)


    else:
        restored_df = pd.DataFrame(np.transpose(restored_data))

    # new_data = np.delete(restored_data, 7, 0)
    # restored_df = pd.DataFrame(np.transpose(new_data))
    # DataFilter.write_file(new_data, filename, 'w')

    ##############################################################
    # Raw Data                                                   #
    ##############################################################
    
    print('Data From the File')
    print(restored_df.head(10))


    data = np.loadtxt(filename, delimiter=',')  # remember to remove the first five lines
    data = np.transpose(data)


    ch_names = ['EXG Channel 0', 'EXG Channel 1', 'EXG Channel 2', 'EXG Channel 3', 'EXG Channel 4', 'EXG Channel 5',
                'EXG Channel 6']

    sfreq = 250
    info = mne.create_info(ch_names, sfreq, ch_types='emg')

    data = data.astype(float)

    raw = mne.io.RawArray(data, info)
    print(raw)
    print(raw.info)

    raw.plot(block = True, scalings=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
     emg=1e2, ref_meg=1e-12, misc=1e-3, stim=1,
     resp=1, chpi=1e-4, whitened=1e2))


    ##############################################################
    # Butterworth Filter                                         #
    ##############################################################

    sfreq = 250
    f_p = 7

    #Applying butterworth filter
    iirs_params = dict(order = 8, ftype = 'butter', output = 'sos')
    iir_params = mne.filter.construct_iir_filter(iirs_params, f_p, None, sfreq, btype='lowpass', return_copy = False, verbose = True)

    filtered_raw = mne.filter.filter_data(data, sfreq = sfreq, l_freq = None, h_freq = f_p, picks = None, method = 'iir', iir_params = iir_params, copy = False, verbose = True)

    filtered_data = mne.io.RawArray(filtered_raw, info)
    print(filtered_data.info)

    #Plotting filtered data
    filtered_data.plot(block = True, scalings=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
     emg=1e2, ref_meg=1e-12, misc=1e-3, stim=1,
     resp=1, chpi=1e-4, whitened=1e2))
    print(type(filtered_data))


    ##############################################################
    # ICA Preprocessing                                          #
    ##############################################################

    #Setting up data for fitting
    ica_info = mne.create_info(ch_names, sfreq, ch_types='eeg') 
    ica_data = mne.io.RawArray(filtered_raw, ica_info)
    
    #Fitting and applying ICA
    ica = mne.preprocessing.ICA(verbose = True)
    ica.fit(inst = ica_data)
    ica.apply(ica_data)

    #Plotting data
    ica_data.plot(block = True, scalings=dict(mag=1e-12, grad=4e-11, eeg=1e2, eog=150e-6, ecg=5e-4,
     emg=1e2, ref_meg=1e-12, misc=1e-3, stim=1,
     resp=1, chpi=1e-4, whitened=1e2))


    ##############################################################
    # Normalization                                              #
    ##############################################################

    filtered_raw_numpy = ica_data[:][0]
    normalized_raw = sk.normalize(filtered_raw_numpy, norm='l2')
    preprocessed_raw = ica_data[:][0]
    normalized_raw = sk.normalize(preprocessed_raw, norm='l2')
    print((normalized_raw))
    
    normalized_data = mne.io.RawArray(normalized_raw, info)
    
    normalized_data.plot(block = True, scalings=dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
    emg=5e-3, ref_meg=1e-12, misc=1e-3, stim=1,
    resp=1, chpi=1e-4, whitened=1e2))