Esempio n. 1
0
def main ():
    BoardShim.enable_dev_board_logger ()

    # use synthetic board for demo
    params = BrainFlowInputParams ()
    board = BoardShim (BoardIds.SYNTHETIC_BOARD.value, params)
    board.prepare_session ()
    board.start_stream ()
    BoardShim.log_message (LogLevels.LEVEL_INFO.value, 'start sleeping in the main thread')
    time.sleep (10)
    data = board.get_current_board_data (20) # get 20 latest data points dont remove them from internal buffer
    board.stop_stream ()
    board.release_session ()

    # demo how to convert it to pandas DF and plot data
    eeg_channels = BoardShim.get_eeg_channels (BoardIds.SYNTHETIC_BOARD.value)
    df = pd.DataFrame (np.transpose (data))
    print ('Data From the Board')
    print (df.head (10))

    # demo for data serialization using brainflow API, we recommend to use it instead pandas.to_csv()
    DataFilter.write_file (data, 'test.csv', 'w') # use 'a' for append mode
    restored_data = DataFilter.read_file ('test.csv')
    restored_df = pd.DataFrame (np.transpose (restored_data))
    print ('Data From the File')
    print (restored_df.head (10))
Esempio n. 2
0
def main():
    BoardShim.enable_dev_board_logger()

    # use synthetic board for demo
    params = BrainFlowInputParams()
    board = BoardShim(BoardIds.SYNTHETIC_BOARD.value, params)
    board.prepare_session()
    board.start_stream()
    BoardShim.log_message(LogLevels.LEVEL_INFO.value,
                          'start sleeping in the main thread')
    time.sleep(10)
    data = board.get_current_board_data(
        20)  # get 20 latest data points dont remove them from internal buffer
    board.stop_stream()
    board.release_session()

    eeg_channels = BoardShim.get_eeg_channels(BoardIds.SYNTHETIC_BOARD.value)
    # demo for downsampling, it just aggregates data
    for count, channel in enumerate(eeg_channels):
        print('Original data for channel %d:' % channel)
        print(data[channel])
        if count == 0:
            downsampled_data = DataFilter.perform_downsampling(
                data[channel], 3, AggOperations.MEDIAN.value)
        elif count == 1:
            downsampled_data = DataFilter.perform_downsampling(
                data[channel], 2, AggOperations.MEAN.value)
        else:
            downsampled_data = DataFilter.perform_downsampling(
                data[channel], 2, AggOperations.EACH.value)
        print('Downsampled data for channel %d:' % channel)
        print(downsampled_data)
Esempio n. 3
0
def main():
    BoardShim.enable_dev_board_logger()

    # use synthetic board for demo
    params = BrainFlowInputParams()
    board_id = BoardIds.SYNTHETIC_BOARD.value
    sampling_rate = BoardShim.get_sampling_rate(board_id)
    board = BoardShim(board_id, params)
    board.prepare_session()
    board.start_stream()
    BoardShim.log_message(LogLevels.LEVEL_INFO.value,
                          'start sleeping in the main thread')
    time.sleep(10)
    data = board.get_current_board_data(
        DataFilter.get_nearest_power_of_two(sampling_rate))
    board.stop_stream()
    board.release_session()

    eeg_channels = BoardShim.get_eeg_channels(board_id)
    for count, channel in enumerate(eeg_channels):
        # optional: subtract mean or detrend
        psd = DataFilter.get_psd(data[channel], sampling_rate,
                                 WindowFunctions.BLACKMAN_HARRIS.value)
        band_power_alpha = DataFilter.get_band_power(psd, 7.0, 13.0)
        band_power_beta = DataFilter.get_band_power(psd, 14.0, 30.0)
        print("alpha/beta:%f", band_power_alpha / band_power_beta)
Esempio n. 4
0
def main ():
    parser = argparse.ArgumentParser ()
    # use docs to check which parameters are required for specific board, e.g. for Cyton - set serial port
    parser.add_argument ('--timeout', type = int, help  = 'timeout for device discovery or connection', required = False, default = 0)
    parser.add_argument ('--ip-port', type = int, help  = 'ip port', required = False, default = 0)
    parser.add_argument ('--ip-protocol', type = int, help  = 'ip protocol, check IpProtocolType enum', required = False, default = 0)
    parser.add_argument ('--ip-address', type = str, help  = 'ip address', required = False, default = '')
    parser.add_argument ('--serial-port', type = str, help  = 'serial port', required = False, default = '')
    parser.add_argument ('--mac-address', type = str, help  = 'mac address', required = False, default = '')
    parser.add_argument ('--other-info', type = str, help  = 'other info', required = False, default = '')
    parser.add_argument ('--streamer-params', type = str, help  = 'streamer params', required = False, default = '')
    parser.add_argument ('--serial-number', type = str, help  = 'serial number', required = False, default = '')
    parser.add_argument ('--board-id', type = int, help  = 'board id, check docs to get a list of supported boards', required = True)
    parser.add_argument ('--log', action = 'store_true')
    args = parser.parse_args ()

    params = BrainFlowInputParams ()
    params.ip_port = args.ip_port
    params.serial_port = args.serial_port
    params.mac_address = args.mac_address
    params.other_info = args.other_info
    params.serial_number = args.serial_number
    params.ip_address = args.ip_address
    params.ip_protocol = args.ip_protocol
    params.timeout = args.timeout

    BoardShim.enable_dev_board_logger ()

    board = BoardShim (args.board_id, params)
    board.prepare_session ()
    board.start_stream ()
    BoardShim.log_message (LogLevels.LEVEL_INFO.value, 'start sleeping in the main thread')
    time.sleep (10)
    data = board.get_current_board_data (20) # get 20 latest data points dont remove them from internal buffer
    board.stop_stream ()
    board.release_session ()

    # demo how to convert it to pandas DF and plot data
    # eeg_channels = BoardShim.get_eeg_channels (BoardIds.SYNTHETIC_BOARD.value)
    df = pd.DataFrame (np.transpose (data))
    print ('Data From the Board')
    print (df.head (10))

    # demo for data serialization using brainflow API, we recommend to use it instead pandas.to_csv()
    DataFilter.write_file (data, 'test.csv', 'w') # use 'a' for append mode
    restored_data = DataFilter.read_file ('test.csv')
    restored_df = pd.DataFrame (np.transpose (restored_data))
    print ('Data From the File')
    print (restored_df.head (10))
Esempio n. 5
0
def main():
    BoardShim.enable_dev_board_logger()

    # use synthetic board for demo
    params = BrainFlowInputParams()
    board_id = BoardIds.SYNTHETIC_BOARD.value
    sampling_rate = BoardShim.get_sampling_rate(board_id)
    board = BoardShim(board_id, params)
    board.prepare_session()
    board.start_stream()
    BoardShim.log_message(LogLevels.LEVEL_INFO.value,
                          'start sleeping in the main thread')
    time.sleep(10)
    data = board.get_current_board_data(
        DataFilter.get_nearest_power_of_two(sampling_rate))
    board.stop_stream()
    board.release_session()

    eeg_channels = BoardShim.get_eeg_channels(board_id)
    # demo for transforms
    for count, channel in enumerate(eeg_channels):
        print('Original data for channel %d:' % channel)
        print(data[channel])
        # demo for wavelet transforms
        # wavelet_coeffs format is[A(J) D(J) D(J-1) ..... D(1)] where J is decomposition level, A - app coeffs, D - detailed coeffs
        # lengths array stores lengths for each block
        wavelet_coeffs, lengths = DataFilter.perform_wavelet_transform(
            data[channel], 'db5', 3)
        app_coefs = wavelet_coeffs[0:lengths[0]]
        detailed_coeffs_first_block = wavelet_coeffs[lengths[0]:lengths[1]]
        # you can do smth with wavelet coeffs here, for example denoising works via thresholds
        # for wavelets coefficients
        restored_data = DataFilter.perform_inverse_wavelet_transform(
            (wavelet_coeffs, lengths), data[channel].shape[0], 'db5', 3)
        print('Restored data after wavelet transform for channel %d:' %
              channel)
        print(restored_data)

        # demo for fft, len of data must be a power of 2
        fft_data = DataFilter.perform_fft(data[channel],
                                          WindowFunctions.NO_WINDOW.value)
        # len of fft_data is N / 2 + 1
        restored_fft_data = DataFilter.perform_ifft(fft_data)
        print('Restored data after fft for channel %d:' % channel)
        print(restored_fft_data)
def main():
    print('BoardShim version: ' + BoardShim.get_version())
    print('DataFilter version: ' + DataFilter.get_version())
    print('MLModel version: ' + MLModel.get_version())
    BoardShim.enable_dev_board_logger()

    board = BoardShim(BoardIds.SYNTHETIC_BOARD.value, BrainFlowInputParams())
    board.prepare_session()

    board.start_stream()
    time.sleep(10)
    board.stop_stream()
    data = board.get_board_data()
    print(DataFilter.calc_stddev(data[2]))
    data = board.get_board_data()
    print(data)
    data = board.get_current_board_data(10)
    print(data)
    board.release_session()
Esempio n. 7
0
class Neural_Feedback:
    def cv2_video_thread(self):
        cv2.namedWindow(self.windowName, cv2.WINDOW_GUI_EXPANDED)
        video = cv2.VideoCapture(self.video_path)
        signal_timestamp = time.time()
        old_signal = self.positive_signal
        try:
            brightness = 0
            brightness_delta = 5
            while self.player_is_playing:
                grabbed, frame = video.read()
                video_msec = video.get(cv2.CAP_PROP_POS_MSEC)
                audio_current_time = time.time() - self.audio_start_time_sec
                if audio_current_time * 1000 > video_msec + 200:
                    frame_pos = video.get(cv2.CAP_PROP_POS_FRAMES)
                    video.set(cv2.CAP_PROP_POS_FRAMES, frame_pos + 10)
                    #print(f'audio_current_time_ms {audio_current_time}  video_time {video_msec/1000} frame {frame_pos}')
                elif audio_current_time * 1000 < video_msec - 100:
                    t = (video_msec - audio_current_time * 1000) / 1500
                    time.sleep(t)
                    #print(f'video is too fast {t}')
                if not grabbed:
                    break
                if cv2.waitKey(28) & 0xFF == ord("q"):
                    self.player_is_playing = False
                    break
                signal = self.positive_signal
                if old_signal != signal:  # and (self.is_last_signal_delta_high or time.time() - signal_timestamp > 2):
                    signal_timestamp = time.time()
                    if signal:
                        brightness_delta = 30 if self.is_last_signal_delta_high else 10
                    else:
                        brightness_delta = -2
                    old_signal = signal
                brightness += brightness_delta
                if brightness > 254:
                    brightness = 255
                if brightness < 50:
                    brightness = 50

                cv2.normalize(frame, frame, 0, brightness,
                              cv2.NORM_MINMAX)  # 0 - 255
                cv2.imshow(self.windowName, frame)

        except Exception as e:
            print(e)
        finally:
            self.player_is_playing = False
            video.release()
            cv2.destroyAllWindows()

    def audio_thread(self):
        player = MediaPlayer(self.video_path, ff_opts={'vn': True})
        old_signal = self.positive_signal
        signal_timestamp = time.time()
        try:
            player.set_volume(1.0)
            self.audio_start_time_sec = time.time()
            while self.player_is_playing:
                signal = self.positive_signal
                if old_signal != signal:
                    if signal:
                        player.set_volume(1.0)
                        signal_timestamp = time.time()
                    elif time.time() - signal_timestamp > 2:
                        player.set_volume(0.4)
                        signal_timestamp = time.time()
                    old_signal = signal
                time.sleep(0.1)
        except Exception as e:
            print(e)
        finally:
            self.player_is_playing = False
            player.close_player()

    def __init__(self, video_path):
        self.windowName = "Neurofeedback"
        self.video_path = video_path
        self.audio_start_time_sec = time.time()
        BoardShim.enable_dev_board_logger()
        params = BrainFlowInputParams()
        params.serial_port = "/dev/ttyUSB0"
        self.board_id = BoardIds.CYTON_DAISY_BOARD.value
        self.sampling_rate = BoardShim.get_sampling_rate(self.board_id)
        self.board = BoardShim(self.board_id, params)
        self.player_is_playing = False
        self.positive_signal = True
        self.last_signal_delta = 0
        self.metrics = {}
        self.channels = {}

    def dispose(self):
        self.board.stop_stream()
        self.board.release_session()

    def config_protocol(self, protocol):
        (components, self.metrics) = protocol
        for component in components.values():
            if (component.channel not in self.channels):
                self.channels[component.channel] = []
            self.channels[component.channel].append(component)
        print(f'config {len(self.channels)} channels')
        for channel in self.channels.keys():
            print(f'ch {channel}')
            for component in self.channels[channel]:
                print(f'{component.name}')
        for metric in self.metrics:
            print(
                f'metric {metric.metric_type} {metric.component1.name} {metric.component2.name}'
            )

    def main(self):
        self.board.prepare_session()
        self.board.start_stream()
        try:
            nfft = DataFilter.get_nearest_power_of_two(self.sampling_rate)
            eeg_channels = BoardShim.get_eeg_channels(self.board_id)
            time.sleep(3)
            cv2_thread = threading.Thread(target=self.cv2_video_thread)
            audio_thread = threading.Thread(target=self.audio_thread)
            cv2_thread.start()
            audio_thread.start()
            self.player_is_playing = True
            signal_freq_coeff = 1.1  # auto adjustable coefficient?
            high_signal_freq_coeff = 1.5
            data_log_file = open(f'log2-{time.time()}.csv', 'a')
            print_bands = []
            for channel in self.channels.keys():
                print_bands.append(','.join(
                    [b.name for b in self.channels[channel]]))
            data_log_file.write(
                f'time,metrics_sum,signal,high_signal,{",".join(print_bands)}')
            metrics_hist = []
            while self.player_is_playing:
                time.sleep(.3)
                self.on_next(eeg_channels, nfft)
                metrics_sum = 0.0
                for metric in self.metrics:
                    metrics_sum += metric.get_metric()
                metrics_hist.append(metrics_sum)
                if len(metrics_hist) > 50:
                    metrics_hist.pop(0)

                avg_metrics_hist = sum(metrics_hist) / len(metrics_hist)
                self.positive_signal = avg_metrics_hist < metrics_sum * signal_freq_coeff

                self.is_last_signal_delta_high = False
                if self.positive_signal and avg_metrics_hist < metrics_sum * high_signal_freq_coeff:
                    self.is_last_signal_delta_high = True

                print(
                    f'{self.positive_signal} {avg_metrics_hist} < {metrics_sum*signal_freq_coeff}'
                )
                print_bands = []
                for channel in self.channels.keys():
                    print_bands.append(','.join([
                        str(b.band_current_power)
                        for b in self.channels[channel]
                    ]))

                log_line = f'\n{time.asctime(time.gmtime(time.time()))},{metrics_sum},{self.positive_signal},{self.is_last_signal_delta_high},{",".join(print_bands)}'

                data_log_file.write(log_line)

            data_log_file.close()
            audio_thread.join()
            cv2_thread.join()
        except Exception as e:
            print(e)
            self.player_is_playing = False
        return

    def on_next(self, eeg_channels, nfft):
        data = self.board.get_current_board_data(
            max(self.sampling_rate, nfft) +
            1)  #get_board_data () we are taking ~1 sec data ~10 times a sec
        for channel in self.channels.keys():
            channel_data = data[channel]
            DataFilter.detrend(channel_data, DetrendOperations.LINEAR.value)
            psd = DataFilter.get_psd_welch(
                channel_data, nfft, nfft // 2, self.sampling_rate,
                WindowFunctions.BLACKMAN_HARRIS.value)
            for component in self.channels[channel]:
                component.add_band_power_value(psd)
Esempio n. 8
0
class Neural_Feedback:
    def cv2_video_thread(self):
        cv2.namedWindow(self.windowName, cv2.WINDOW_GUI_EXPANDED)
        video = cv2.VideoCapture(self.video_path)
        signal_timestamp = time.time()
        old_signal = self.positive_signal
        try:
            brightness = 0
            brightness_delta = 5
            while self.player_is_playing:
                grabbed, frame = video.read()
                video_msec = video.get(cv2.CAP_PROP_POS_MSEC)
                audio_current_time = time.time() - self.audio_start_time_sec
                if audio_current_time * 1000 > video_msec + 200:
                    frame_pos = video.get(cv2.CAP_PROP_POS_FRAMES)
                    video.set(cv2.CAP_PROP_POS_FRAMES, frame_pos + 10)
                    #print(f'audio_current_time_ms {audio_current_time}  video_time {video_msec/1000} frame {frame_pos}')
                elif audio_current_time * 1000 < video_msec - 100:
                    t = (video_msec - audio_current_time * 1000) / 1500
                    time.sleep(t)
                    #print(f'video is too fast {t}')
                if not grabbed:
                    break
                if cv2.waitKey(28) & 0xFF == ord("q"):
                    self.player_is_playing = False
                    break
                signal = self.positive_signal
                if old_signal != signal:  # and (self.is_last_signal_delta_high or time.time() - signal_timestamp > 2):
                    signal_timestamp = time.time()
                    if signal:
                        brightness_delta = 30 if self.is_last_signal_delta_high else 10
                    else:
                        brightness_delta = -2
                    old_signal = signal
                brightness += brightness_delta
                if brightness > 254:
                    brightness = 255
                if brightness < 50:
                    brightness = 50

                cv2.normalize(frame, frame, 0, brightness,
                              cv2.NORM_MINMAX)  # 0 - 255
                cv2.imshow(self.windowName, frame)

        except Exception as e:
            print(e)
        finally:
            self.player_is_playing = False
            video.release()
            cv2.destroyAllWindows()

    def audio_thread(self):
        player = MediaPlayer(self.video_path, ff_opts={'vn': True})
        old_signal = self.positive_signal
        signal_timestamp = time.time()
        try:
            player.set_volume(1.0)
            self.audio_start_time_sec = time.time()
            while self.player_is_playing:
                signal = self.positive_signal
                if old_signal != signal:
                    if signal:
                        player.set_volume(1.0)
                        signal_timestamp = time.time()
                    elif time.time() - signal_timestamp > 2:
                        player.set_volume(0.4)
                        signal_timestamp = time.time()
                    old_signal = signal
                time.sleep(0.1)
        except Exception as e:
            print(e)
        finally:
            self.player_is_playing = False
            player.close_player()

    def __init__(self, video_path):
        self.windowName = "Neurofeedback"
        self.video_path = video_path
        self.audio_start_time_sec = time.time()
        BoardShim.enable_dev_board_logger()
        params = BrainFlowInputParams()
        params.serial_port = "/dev/ttyUSB0"
        self.board_id = BoardIds.CYTON_DAISY_BOARD.value
        self.sampling_rate = BoardShim.get_sampling_rate(self.board_id)
        self.board = BoardShim(self.board_id, params)
        self.player_is_playing = False
        self.positive_signal = True
        self.last_signal_delta = 0
        self.signals = []

    def dispose(self):
        self.board.stop_stream()
        self.board.release_session()

    def config_protocol(self, protocol):
        self.protocol = protocol
        print(f'config {len(protocol)} channels')
        for channel in protocol:
            print(f'channel {channel.channel_inx}')
            for band in channel.bands:
                print(
                    f'band {band.band_range_min}-{band.band_range_max} {"inhibit" if band.is_inhibit else "enchance"}'
                )

    def main(self):
        self.board.prepare_session()
        self.board.start_stream()
        try:
            nfft = DataFilter.get_nearest_power_of_two(self.sampling_rate)
            eeg_channels = BoardShim.get_eeg_channels(self.board_id)
            time.sleep(3)
            cv2_thread = threading.Thread(target=self.cv2_video_thread)
            audio_thread = threading.Thread(target=self.audio_thread)
            cv2_thread.start()
            audio_thread.start()
            self.player_is_playing = True
            signal_freq_coeff = .5  # auto adjustable coefficient?
            high_signal_freq_coeff = .15
            data_log_file = open(f'log-{time.time()}.csv', 'a')
            while self.player_is_playing:
                time.sleep(.1)
                bands_signals = self.on_next(eeg_channels, nfft)
                positive_signals_sum = 0.0
                negative_signals_sum = 0.0
                for i in bands_signals.keys():
                    if bands_signals[i] > 0:
                        positive_signals_sum += bands_signals[i]
                    else:
                        negative_signals_sum += bands_signals[i]

                self.positive_signal = abs(
                    negative_signals_sum
                ) < positive_signals_sum * signal_freq_coeff

                self.is_last_signal_delta_high = False
                if self.positive_signal and positive_signals_sum * high_signal_freq_coeff > abs(
                        negative_signals_sum):
                    self.is_last_signal_delta_high = True

                print_bands = []
                for proto in self.protocol:
                    print_bands.append(
                        f'{eeg_channels[proto.channel_inx]},' + ",".join(
                            [str(b.band_current_power) for b in proto.bands]))

                log_line = f'\n{time.asctime(time.gmtime(time.time()))},{positive_signals_sum},{negative_signals_sum},{self.positive_signal},{self.is_last_signal_delta_high},{",".join(print_bands)}'

                data_log_file.write(log_line)

            data_log_file.close()
            audio_thread.join()
            cv2_thread.join()
        except Exception as e:
            print(e)
            self.player_is_playing = False
        return

    def on_next(self, eeg_channels, nfft):
        data = self.board.get_current_board_data(
            max(self.sampling_rate, nfft) +
            1)  #get_board_data () we are taking ~1 sec data ~10 times a sec
        bands_sum = collections.defaultdict(float)
        for channel in self.protocol:
            channel_data = data[eeg_channels[channel.channel_inx]]
            DataFilter.detrend(channel_data, DetrendOperations.LINEAR.value)
            psd = DataFilter.get_psd_welch(
                channel_data, nfft, nfft // 2, self.sampling_rate,
                WindowFunctions.BLACKMAN_HARRIS.value)
            for band in channel.bands:
                band.add_band_power_value(psd)
                bands_sum[band.name] += band.get_signal()
        return bands_sum
Esempio n. 9
0
class Neural_Feedback:
    def cv2_video_read_thread(self):
        cv2.namedWindow(self.windowName, cv2.WINDOW_GUI_EXPANDED)
        video = cv2.VideoCapture(self.video_path)
        signal_timestamp = time.time()
        old_signal = self.positive_signal
        try:
            brightness = 0
            brightness_delta = 5
            while self.player_is_playing:
                grabbed, frame = video.read()
                video_msec = video.get(cv2.CAP_PROP_POS_MSEC)
                audio_current_time = time.time() - self.audio_start_time_sec
                if audio_current_time * 1000 > video_msec + 200:
                    frame_pos = video.get(cv2.CAP_PROP_POS_FRAMES)
                    video.set(cv2.CAP_PROP_POS_FRAMES, frame_pos + 10)
                    #print(f'audio_current_time_ms {audio_current_time}  video_time {video_msec/1000} frame {frame_pos}')
                elif audio_current_time * 1000 < video_msec - 100:
                    t = (video_msec - audio_current_time * 1000) / 1500
                    time.sleep(t)
                    #print(f'video is too fast {t}')
                if not grabbed:
                    break
                if cv2.waitKey(28) & 0xFF == ord("q"):
                    self.player_is_playing = False
                    break
                signal = self.positive_signal
                if old_signal != signal and (
                        self.last_signal_delta > 2.0
                        or time.time() - signal_timestamp > 3):
                    signal_timestamp = time.time()
                    if signal:
                        brightness_delta = 10
                    else:
                        brightness_delta = -5
                    old_signal = signal
                brightness += brightness_delta
                if brightness > 254:
                    brightness = 255
                if brightness < 50:
                    brightness = 50

                cv2.normalize(frame, frame, 0, brightness,
                              cv2.NORM_MINMAX)  # 0 - 255
                cv2.imshow(self.windowName, frame)

        except Exception as e:
            print(e)
        finally:
            self.player_is_playing = False
            video.release()
            cv2.destroyAllWindows()

    def audio_thread(self):
        player = MediaPlayer(self.video_path, ff_opts={'vn': True})
        old_signal = self.positive_signal
        signal_timestamp = time.time()
        try:
            player.set_volume(1.0)
            self.audio_start_time_sec = time.time()
            while self.player_is_playing:
                signal = self.positive_signal
                if old_signal != signal and (
                        self.last_signal_delta > 2.0
                        or time.time() - signal_timestamp > 3):
                    signal_timestamp = time.time()
                    if signal:
                        player.set_volume(1.0)
                    else:
                        player.set_volume(0.5)
                    old_signal = signal
                time.sleep(0.1)
        except Exception as e:
            print(e)
        finally:
            self.player_is_playing = False
            player.close_player()

    def __init__(self, video_path):
        self.windowName = "Neurofeedback"
        self.video_path = video_path
        self.audio_start_time_sec = time.time()
        BoardShim.enable_dev_board_logger()
        params = BrainFlowInputParams()
        params.serial_port = "COM4"
        self.board_id = BoardIds.CYTON_BOARD.value
        self.sampling_rate = BoardShim.get_sampling_rate(self.board_id)
        self.board = BoardShim(self.board_id, params)
        self.player_is_playing = False
        self.positive_signal = True
        self.last_signal_delta = 0
        self.signals = []

    def dispose(self):
        self.board.stop_stream()
        self.board.release_session()

    def config_protocol(self, protocol):
        self.protocol = protocol
        print(f'config {len(protocol)} channels')
        for channel in protocol:
            print(f'channel {channel.channel_inx}')
            for band in channel.bands:
                print(
                    f'band {band.band_range_min}-{band.band_range_max} {"inhibit" if band.is_inhibit else "enchance"}'
                )

    def main(self):
        self.board.prepare_session()
        self.board.start_stream()
        try:
            nfft = DataFilter.get_nearest_power_of_two(self.sampling_rate)
            eeg_channels = BoardShim.get_eeg_channels(self.board_id)
            time.sleep(3)
            signals = []
            cv2_thread = threading.Thread(target=self.cv2_video_read_thread)
            audio_thread = threading.Thread(target=self.audio_thread)
            cv2_thread.start()
            audio_thread.start()
            self.player_is_playing = True
            while self.player_is_playing:
                signals.append(self.on_next(eeg_channels, nfft))
                if len(signals) > 3:
                    signals.pop(0)
                avg_signal = sum(signals) / len(signals)
                self.positive_signal = avg_signal < signals[-1]
                self.last_signal_delta = abs(avg_signal - signals[-1])
                if signals[-1] > 9:  # min positive signals
                    self.positive_signal = True
                print(
                    f'up {self.last_signal_delta}' if avg_signal < signals[-1]
                    else f'down {self.last_signal_delta}')  # enable it later
            audio_thread.join()
            cv2_thread.join()
        except Exception as e:
            print(e)
        return

    def on_next(self, eeg_channels, nfft):
        time.sleep(.3)
        data = self.board.get_current_board_data(
            max(self.sampling_rate, nfft) +
            1)  #get_board_data () we are taking ~1 sec data ~3 times a sec
        for channel in self.protocol:
            channel_data = data[eeg_channels[channel.channel_inx]]
            DataFilter.detrend(channel_data, DetrendOperations.LINEAR.value)
            psd = DataFilter.get_psd_welch(
                channel_data, nfft, nfft // 2, self.sampling_rate,
                WindowFunctions.BLACKMAN_HARRIS.value)
            for band in channel.bands:
                band.add_band_power_value(psd, 30)
            #print(f'channel: {channel.channel_inx} positive_signals: {channel.get_positive_signals_count()} avg_power: {channel.get_avg_bands()}')
        return sum([i.get_positive_signals_count() for i in self.protocol])
Esempio n. 10
0
    def return_prediction():

        #ms = post_id
        # Lines 27-65 copied from https://brainflow.readthedocs.io/en/stable/Examples.html
        # use docs to check which parameters are required for specific board, e.g. for Cyton - set serial port

        parser = argparse.ArgumentParser()
        parser.add_argument('--ip-port',
                            type=int,
                            help='ip port',
                            required=False,
                            default=0)
        parser.add_argument('--ip-protocol',
                            type=int,
                            help='ip protocol, check IpProtocolType enum',
                            required=False,
                            default=0)
        parser.add_argument('--ip-address',
                            type=str,
                            help='ip address',
                            required=False,
                            default='')
        parser.add_argument('--serial-port',
                            type=str,
                            help='serial port',
                            required=False,
                            default='')
        parser.add_argument('--mac-address',
                            type=str,
                            help='mac address',
                            required=False,
                            default='')
        parser.add_argument('--other-info',
                            type=str,
                            help='other info',
                            required=False,
                            default='')
        parser.add_argument('--streamer-params',
                            type=str,
                            help='other info',
                            required=False,
                            default='')
        parser.add_argument(
            '--board-id',
            type=int,
            help='board id, check docs to get a list of supported boards',
            required=True)
        parser.add_argument('--log', action='store_true')
        args = parser.parse_args()

        params = BrainFlowInputParams()
        params.ip_port = args.ip_port
        params.serial_port = args.serial_port
        params.mac_address = args.mac_address
        params.other_info = args.other_info
        params.ip_address = args.ip_address
        params.ip_protocol = args.ip_protocol

        if (args.log):
            BoardShim.enable_dev_board_logger()
        else:
            BoardShim.disable_board_logger()

        board = BoardShim(args.board_id, params)
        board.prepare_session()

        board.start_stream()  # use this for default options
        board.start_stream(ms, args.streamer_params)
        time.sleep(3000 / 1000)
        data = board.get_current_board_data(
            256
        )  # get latest 256 packages or less, doesnt remove them from internal buffer
        data = board.get_board_data(
        )  # get all data and remove it from internal buffer
        board.stop_stream()
        board.release_session()

        # CONNECT THE DATA TO THE MODEL FOR PREDICTIONS

        # Test Sample Data
        #data = 'https://archlife.org/wp-content/uploads/2020/03/OpenBCI-RAW-right0.txt'

        column_names = [
            'index', 'channel1', 'channel2', 'channel3', 'channel4', 'accel1',
            'accel2', 'accel3', 'timestamp', 'aux'
        ]
        dropped_row_indices = [0, 1, 2, 3, 4, 5]

        df = pd.read_csv(data, sep=',', header=None, names=column_names)
        df = df.drop(dropped_row_indices, axis=0).reset_index()
        df = df.drop(['level_0', 'index', 'timestamp'], axis=1)
        df = df.dropna(axis=0)

        model = joblib.load('flask_test/rfc.joblib')

        commands_proba = model.predict_proba(df)
        commands_pred = model.predict(df)

        commands_df = pd.DataFrame({
            'index': df.index,
            'predictions': commands_pred
        })
        commands_df['predictions'] = commands_df['predictions'].astype('int64')
        command_count = commands_df['predictions'].value_counts()
        ccdf = pd.DataFrame({
            'index': command_count.index,
            'predictions': command_count
        })
        preds = ccdf['index'].values
        command_pred = preds[0]

        return str(command_pred)
Esempio n. 11
0
def main():
    parser = argparse.ArgumentParser ()
    # use docs to check which parameters are required for specific board, e.g. for Cyton - set serial port
    parser.add_argument ('--timeout', type = int, help  = 'timeout for device discovery or connection', required = False, default = 0)
    parser.add_argument ('--ip-port', type = int, help  = 'ip port', required = False, default = 0)
    parser.add_argument ('--ip-protocol', type = int, help  = 'ip protocol, check IpProtocolType enum', required = False, default = 0)
    parser.add_argument ('--ip-address', type = str, help  = 'ip address', required = False, default = '')
    parser.add_argument ('--serial-port', type = str, help  = 'serial port', required = False, default = '')
    parser.add_argument ('--mac-address', type = str, help  = 'mac address', required = False, default = '')
    parser.add_argument ('--other-info', type = str, help  = 'other info', required = False, default = '')
    parser.add_argument ('--streamer-params', type = str, help  = 'streamer params', required = False, default = '')
    parser.add_argument ('--serial-number', type = str, help  = 'serial number', required = False, default = '')
    parser.add_argument ('--board-id', type = int, help  = 'board id, check docs to get a list of supported boards', required = True)
    parser.add_argument ('--log', action = 'store_true')
    args = parser.parse_args ()

    params = BrainFlowInputParams ()
    params.ip_port = args.ip_port
    params.serial_port = args.serial_port
    params.mac_address = args.mac_address
    params.other_info = args.other_info
    params.serial_number = args.serial_number
    params.ip_address = args.ip_address
    params.ip_protocol = args.ip_protocol
    params.timeout = args.timeout

    if (args.log):
        BoardShim.enable_dev_board_logger ()
    else:
        BoardShim.disable_board_logger ()

    board = BoardShim (args.board_id, params)
    board.prepare_session ()

    board.start_stream (45000, args.streamer_params)

    model = P300ClassifierLDA()
    model.load("test-model")

    figlet = Figlet(font='slant')

    stopped = True
    raw = None

    time.sleep(3)

    while stopped:
        try:
            with mne.utils.use_log_level('error'):
            # time.sleep(MICROSECONDS_BEFORE_STIMULUS / 1000)
            # show_stimulus()
            # time.sleep(MICROSECONDS_AFTER_STIMULUS / 1000)
                data = board.get_current_board_data(SAMPLES_TOTAL) # TODO constant from model
                raw = create_raw(data, model)
                prediction = prepare_raw(raw, model)
                print_prediction(figlet, prediction)   
        except KeyboardInterrupt:
            print("Got keyboard interrupt, stopping...")
            break

    board.stop_stream ()
    board.release_session ()
Esempio n. 12
0
def main(i):
    BoardShim.enable_dev_board_logger()
    BoardShim.disable_board_logger(
    )  #optional. take this out for initial setup for your board.

    # use synthetic board for demo
    params = BrainFlowInputParams()
    board_id = BoardIds.SYNTHETIC_BOARD.value
    board = BoardShim(board_id, params)
    eeg_channels = BoardShim.get_eeg_channels(board_id)
    sampling_rate = BoardShim.get_sampling_rate(board_id)
    timestamp = BoardShim.get_timestamp_channel(board_id)

    board.prepare_session()
    board.start_stream()
    style.use('fivethirtyeight')
    plt.title("Live EEG stream from Brainflow", fontsize=15)
    plt.ylabel("Data in millivolts", fontsize=15)
    plt.xlabel("\nTime", fontsize=10)
    keep_alive = True

    eeg1 = []  #lists to store eeg data
    eeg2 = []
    eeg3 = []
    eeg4 = []
    timex = []  #list to store timestamp

    while keep_alive == True:

        while board.get_board_data_count(
        ) < 250:  #ensures that all data shape is the same
            time.sleep(0.005)
        data = board.get_current_board_data(250)

        # creating a dataframe of the eeg data to extract eeg values later
        eegdf = pd.DataFrame(np.transpose(data[eeg_channels]))
        eegdf_col_names = [
            "ch1", "ch2", "ch3", "ch4", "ch5", "ch6", "ch7", "ch8", "ch9",
            "ch10", "ch11", "ch12", "ch13", "ch14", "ch15", "ch16"
        ]
        eegdf.columns = eegdf_col_names

        # to keep it simple, making another dataframe for the timestamps to access later
        timedf = pd.DataFrame(np.transpose(data[timestamp]))

        print(
            "EEG Dataframe"
        )  #easy way to check what data is being streamed and if program is working
        print(eegdf)  #isn't neccesary.

        for count, channel in enumerate(eeg_channels):
            # filters work in-place
            # Check Brainflow docs for more filters
            if count == 0:
                DataFilter.perform_bandstop(data[channel], sampling_rate, 60.0,
                                            4.0, 4,
                                            FilterTypes.BUTTERWORTH.value,
                                            0)  # bandstop 58 - 62
                DataFilter.perform_bandpass(data[channel], sampling_rate, 21.0,
                                            20.0, 4, FilterTypes.BESSEL.value,
                                            0)  # bandpass 11 - 31
            if count == 1:
                DataFilter.perform_bandstop(data[channel], sampling_rate, 60.0,
                                            4.0, 4,
                                            FilterTypes.BUTTERWORTH.value,
                                            0)  # bandstop 58 - 62
                DataFilter.perform_bandpass(data[channel], sampling_rate, 21.0,
                                            20.0, 4, FilterTypes.BESSEL.value,
                                            0)  # bandpass 11 - 31
            if count == 2:
                DataFilter.perform_bandstop(data[channel], sampling_rate, 60.0,
                                            4.0, 4,
                                            FilterTypes.BUTTERWORTH.value,
                                            0)  # bandstop 58 - 62
                DataFilter.perform_bandpass(data[channel], sampling_rate, 21.0,
                                            20.0, 4, FilterTypes.BESSEL.value,
                                            0)  # bandpass 11 - 31
            if count == 3:
                DataFilter.perform_bandstop(data[channel], sampling_rate, 60.0,
                                            4.0, 4,
                                            FilterTypes.BUTTERWORTH.value,
                                            0)  # bandstop 58 - 62
                DataFilter.perform_bandpass(data[channel], sampling_rate, 21.0,
                                            20.0, 4, FilterTypes.BESSEL.value,
                                            0)  # bandpass 11 - 31

        # Brainflow ML Model
        bands = DataFilter.get_avg_band_powers(data, eeg_channels,
                                               sampling_rate, True)
        feature_vector = np.concatenate((bands[0], bands[1]))

        # calc concentration
        concentration_params = BrainFlowModelParams(
            BrainFlowMetrics.CONCENTRATION.value,
            BrainFlowClassifiers.KNN.value)
        concentration = MLModel(concentration_params)
        concentration.prepare()
        print('Concentration: %f' % concentration.predict(feature_vector))
        concentrated_measure = concentration.predict(feature_vector)
        concentration.release()

        # calc relaxation
        relaxation_params = BrainFlowModelParams(
            BrainFlowMetrics.RELAXATION.value, BrainFlowClassifiers.KNN.value)
        relaxation = MLModel(relaxation_params)
        relaxation.prepare()
        print('Relaxation: %f' % relaxation.predict(feature_vector))
        relaxed_measure = relaxation.predict(feature_vector)
        relaxation.release()

        #appending eeg data to lists
        eeg1.extend(
            eegdf.iloc[:, 0].values
        )  # I am using OpenBCI Ganglion board, so I only have four channels.
        eeg2.extend(
            eegdf.iloc[:, 1].values
        )  # If you have a different board, you should be able to copy paste
        eeg3.extend(eegdf.iloc[:,
                               2].values)  # these commands for more channels.
        eeg4.extend(eegdf.iloc[:, 3].values)
        timex.extend(timedf.iloc[:, 0].values)  # timestamps

        plt.cla()
        #plotting eeg data
        plt.plot(timex, eeg1, label="Channel 1", color="red")
        plt.plot(timex, eeg2, label="Channel 2", color="blue")
        plt.plot(timex, eeg3, label="Channel 3", color="orange")
        plt.plot(timex, eeg4, label="Channel 4", color="purple")
        plt.tight_layout()
        keep_alive = False  #resetting stream so that matplotlib can plot data

        if concentrated_measure >= 0.5:
            print(
                "GOOD KEEP CONCENTRATING"
            )  #a program screaming at you to concentrate should do the trick :)
        else:
            print("WHERE IS THE CONCENTRATION??")

        if relaxed_measure >= 0.5:
            print("YES RELAX MORE")
        else:
            print("NO, START RELAXING")

    board.stop_stream()
    board.release_session()
Esempio n. 13
0
    def return_prediction():

        #ms = post_id
        # Lines 27-65 copied from https://brainflow.readthedocs.io/en/stable/Examples.html
        # use docs to check which parameters are required for specific board, e.g. for Cyton - set serial port

        params = BrainFlowInputParams()
        params.board_id = 1
        params.serial_port = "COM4"
        params.mac_address = "DC:3D:46:E5:10:99"

        board = BoardShim(1, params)
        board.prepare_session()

        board.start_stream()  # use this for default options
        data = board.get_current_board_data(
            256
        )  # get latest 256 packages or less, doesnt remove them from internal buffer
        board.stop_stream()
        board.release_session()

        # return 'a string'

        # CONNECT THE DATA TO THE MODEL FOR PREDICTIONS

        # Test Sample Data
        # data = 'https://archlife.org/wp-content/uploads/2020/03/OpenBCI-RAW-right0.txt'

        # column_names = ['index', 'channel1','channel2', 'channel3', 'channel4', 'accel1', 'accel2', 'accel3', 'timestamp', 'aux']
        # dropped_row_indices = [0, 1, 2, 3, 4, 5]

        # df = pd.DataFrame(data=data, columns=column_names)

        # df = df.drop(dropped_row_indices, axis=0).reset_index()
        # df = df.drop(['level_0', 'index', 'timestamp'], axis=1)

        df = pd.DataFrame({
            'channel1': data[0],
            'channel2': data[1],
            'channel3': data[2],
            'channel4': data[3],
            'accel1': data[4],
            'accel2': data[5],
            'accel3': data[6],
            'aux': data[8]
        })

        df = df.dropna(axis=0)

        model = joblib.load('flask_test/rfc.joblib')

        commands_proba = model.predict_proba(df)
        commands_pred = model.predict(df)

        commands_df = pd.DataFrame({
            'index': df.index,
            'predictions': commands_pred
        })
        commands_df['predictions'] = commands_df['predictions'].astype('int64')
        command_count = commands_df['predictions'].value_counts()
        ccdf = pd.DataFrame({
            'index': command_count.index,
            'predictions': command_count
        })
        preds = ccdf['index'].values
        command_pred = preds[0]

        return str(command_pred)
class Neural_Feedback:
    def overlay_window_control_thread(self):
        signal_timestamp = time.time()
        old_signal = self.positive_signal
        try:
            brightness = 0.0
            brightness_delta = 0.01
            while self.player_is_playing:
                signal = self.positive_signal
                if old_signal != signal:  # and (self.is_last_signal_delta_high or time.time() - signal_timestamp > 2):
                    signal_timestamp = time.time()
                    if signal:
                        brightness_delta = -0.07 if self.is_last_signal_delta_high else -0.02
                    else:
                        brightness_delta = 0.02
                    old_signal = signal
                brightness += brightness_delta
                if brightness > .7:
                    brightness = .7
                if brightness < .01:
                    brightness = .01

                self.tk_window.wm_attributes('-alpha', brightness)
                self.tk_window.update()
                time.sleep(.05)

        except Exception as e:
            print(e)
        finally:
            self.player_is_playing = False

    def __init__(self):
        BoardShim.enable_dev_board_logger()
        params = BrainFlowInputParams()
        params.serial_port = "/dev/ttyUSB0"
        self.board_id = BoardIds.CYTON_DAISY_BOARD.value
        self.sampling_rate = BoardShim.get_sampling_rate(self.board_id)
        self.board = BoardShim(self.board_id, params)
        self.player_is_playing = False
        self.positive_signal = True
        self.last_signal_delta = 0
        self.metrics = {}
        self.channels = {}

    def dispose(self):
        self.board.stop_stream()
        self.board.release_session()

    def config_protocol(self, protocol):
        (components, self.metrics) = protocol
        for component in components.values():
            if (component.channel not in self.channels):
                self.channels[component.channel] = []
            self.channels[component.channel].append(component)
        print(f'config {len(self.channels)} channels')
        for channel in self.channels.keys():
            print(f'ch {channel}')
            for component in self.channels[channel]:
                print(f'{component.name}')
        for metric in self.metrics:
            if metric.component2 is None:
                print(f'metric {metric.metric_type} {metric.component1.name}')
            else:
                print(
                    f'metric {metric.metric_type} {metric.component1.name} {metric.component2.name}'
                )

    def board_read_thread(self):
        try:
            self.board.prepare_session()
            self.board.start_stream()
            nfft = DataFilter.get_nearest_power_of_two(self.sampling_rate)
            eeg_channels = BoardShim.get_eeg_channels(self.board_id)
            time.sleep(3)
            self.player_is_playing = True
            signal_freq_coeff = 1.05  # auto adjustable coefficient?
            high_signal_freq_coeff = 1.5
            data_log_file = open(f'log2-{time.time()}.csv', 'a')
            print_bands = []
            for channel in self.channels.keys():
                print_bands.append(','.join(
                    [b.name for b in self.channels[channel]]))
            data_log_file.write(
                f'time,metrics_sum,signal,high_signal,{",".join(print_bands)}')
            metrics_hist = []
            while self.player_is_playing:
                time.sleep(.1)
                self.on_next(eeg_channels, nfft)
                metrics_sum = 0.0
                for metric in self.metrics:
                    metrics_sum += metric.get_metric()
                metrics_hist.append(metrics_sum)
                if len(metrics_hist) > 150:
                    metrics_hist.pop(0)

                avg_metrics_hist = sum(metrics_hist) / len(metrics_hist)
                self.positive_signal = avg_metrics_hist < metrics_sum * signal_freq_coeff

                self.is_last_signal_delta_high = False
                if self.positive_signal and avg_metrics_hist < metrics_sum * high_signal_freq_coeff:
                    self.is_last_signal_delta_high = True

                print(
                    f'{self.positive_signal} {avg_metrics_hist} < {metrics_sum*signal_freq_coeff}'
                )
                print_bands = []
                for channel in self.channels.keys():
                    print_bands.append(','.join([
                        str(b.band_current_power)
                        for b in self.channels[channel]
                    ]))

                log_line = f'\n{time.asctime(time.gmtime(time.time()))},{metrics_sum},{self.positive_signal},{self.is_last_signal_delta_high},{",".join(print_bands)}'

                data_log_file.write(log_line)

            data_log_file.close()
        except Exception as e:
            print(e)
            self.player_is_playing = False
            self.tk_window.destroy()
        return

    def main(self):
        self.player_is_playing = True
        self.tk_window = Tk()
        self.tk_window.attributes('-fullscreen', True)
        self.tk_window.configure(background='black')
        self.tk_window.wait_visibility(self.tk_window)
        self.tk_window.bind('q', lambda k: self.tk_window.destroy())
        board_thread = Thread(target=self.board_read_thread)
        overlay_control_thread = threading.Thread(
            target=self.overlay_window_control_thread)
        board_thread.start()
        overlay_control_thread.start()
        self.tk_window.mainloop()
        self.player_is_playing = False
        #overlay_control_thread.join()
        board_thread.join()

    def on_next(self, eeg_channels, nfft):
        data = self.board.get_current_board_data(
            max(self.sampling_rate, nfft) +
            1)  #get_board_data () we are taking ~1 sec data ~10 times a sec
        for channel in self.channels.keys():
            channel_data = data[channel]
            DataFilter.detrend(channel_data, DetrendOperations.LINEAR.value)
            psd = DataFilter.get_psd_welch(
                channel_data, nfft, nfft // 2, self.sampling_rate,
                WindowFunctions.BLACKMAN_HARRIS.value)
            for component in self.channels[channel]:
                component.add_band_power_value(psd)