def main(): BoardShim.enable_dev_board_logger() # use synthetic board for demo params = BrainFlowInputParams() board_id = BoardIds.SYNTHETIC_BOARD.value sampling_rate = BoardShim.get_sampling_rate(board_id) board = BoardShim(board_id, params) board.prepare_session() board.start_stream() BoardShim.log_message(LogLevels.LEVEL_INFO.value, 'start sleeping in the main thread') time.sleep(10) data = board.get_current_board_data( DataFilter.get_nearest_power_of_two(sampling_rate)) board.stop_stream() board.release_session() eeg_channels = BoardShim.get_eeg_channels(board_id) # demo for transforms for count, channel in enumerate(eeg_channels): print('Original data for channel %d:' % channel) print(data[channel]) # demo for wavelet transforms # wavelet_coeffs format is[A(J) D(J) D(J-1) ..... D(1)] where J is decomposition level, A - app coeffs, D - detailed coeffs # lengths array stores lengths for each block wavelet_coeffs, lengths = DataFilter.perform_wavelet_transform( data[channel], 'db5', 3) app_coefs = wavelet_coeffs[0:lengths[0]] detailed_coeffs_first_block = wavelet_coeffs[lengths[0]:lengths[1]] # you can do smth with wavelet coeffs here, for example denoising works via thresholds # for wavelets coefficients restored_data = DataFilter.perform_inverse_wavelet_transform( (wavelet_coeffs, lengths), data[channel].shape[0], 'db5', 3) print('Restored data after wavelet transform for channel %d:' % channel) print(restored_data) # demo for fft, len of data must be a power of 2 fft_data = DataFilter.perform_fft(data[channel], WindowFunctions.NO_WINDOW.value) # len of fft_data is N / 2 + 1 restored_fft_data = DataFilter.perform_ifft(fft_data) print('Restored data after fft for channel %d:' % channel) print(restored_fft_data)
def fft(self, window_size: float = 0) -> (np.ndarray, np.ndarray): """" This method computes the fft values for the given data. The window size (in seconds) specifies how to divide the data, compute the fft values for each window and then average all. By default, the windows overlap in half their size. The default window size is zero, in that case, no windows are used! It returns two arrays with the same size. The first one contains the frequencies and the second contains their corresponding power. """ if window_size != 0: window_size_in_samples = math.floor(self.sampling_rate * window_size) if window_size_in_samples >= self.channel_data.shape[0]: print("Window to big for data, not using!") return self.fft(window_size=0) window_length = pow(2, closest_power_of_two(window_size_in_samples)) half_window_size_in_samples = math.floor(window_size_in_samples / 2) windows_count = math.floor(self.channel_data.shape[0] / window_size_in_samples) windows_count += windows_count - 1 # Add overlaps. Each non last window has an overlapping window fft_average = AccumulatingAverages() for window_index in range(0, windows_count): start_index = half_window_size_in_samples * window_index end_index = start_index + window_size_in_samples window_data = self.channel_data[start_index:end_index] # TODO: Find out whether a window function should be used amplitudes = abs( DataFilter.perform_fft(window_data[:window_length], WindowFunctions.NO_WINDOW.value)) amplitudes = amplitudes * (1 / window_length) fft_average.add_values(amplitudes) base_freq = self.sampling_rate / window_length frequencies = np.linspace(0, window_length - 1, window_length) * base_freq frequencies = frequencies[0:int(window_length / 2 + 1)] return frequencies, fft_average.compute_average() else: length = pow(2, closest_power_of_two(self.channel_data.shape[0])) # frequencies = np.linspace(0, self.sampling_rate / 2, int(length / 2 + 1)) base_freq = self.sampling_rate / length frequencies = np.linspace(0, length - 1, length) * base_freq frequencies = frequencies[0:int(length / 2 + 1)] powers = abs( DataFilter.perform_fft(self.channel_data[:length], WindowFunctions.NO_WINDOW.value)) return frequencies, powers * (1 / length)
def fft(self, data, parameter): fft_data = DataFilter.perform_fft(data, parameter) return fft_data