def generate_data(system): # Training set A = 1 noises, noise_responses = [], [] for _ in range(NOISE_NUM): noise = gaussian_white_noise(A, NOISE_LEN, NOISE_LEN) noises.append(noise) noise_responses.append(system(noise)) training_set = DataSet(noises, noise_responses, memory_depth=NN_MEM_DEPTH, intensity=A) # Validation set fs = 20000 freq_pool = [[8000], [1200, 7690], [560, 1400, 8000], [3200, 4550, 6710, 8190], [1100, 3200, 5210, 7019, 7200]] duration = 2 noise_power = 1e-3 val_signals, val_responses = [], [] for freqs in freq_pool: val_signal = multi_tone(freqs, fs, duration, noise_power=noise_power) val_signals.append(val_signal) val_responses.append(system(val_signal)) validation_set = DataSet(val_signals, val_responses, memory_depth=NN_MEM_DEPTH) # Test set test_signal = multi_tone(TEST_FREQS, fs, duration, noise_power=noise_power) test_response = system(test_signal) test_set = DataSet(test_signal, test_response, memory_depth=NN_MEM_DEPTH) return training_set, validation_set, test_set
def wrap(x, y, filename=None): from signals.utils import DataSet data_set = DataSet(x, y, name='train_set') # Save data if filename is not None: data_set.save(filename) print('>> Data set saved to {}'.format(filename))
def generate_data(): system = Volterra() # order 1 system.set_kernel((0, ), 1.0) system.set_kernel((10, ), 0.2) system.set_kernel((20, ), 0.03) # order 2 system.set_kernel((0, 0), 0.03) system.set_kernel((10, 20), 0.01) # order 3 system.set_kernel((10, 20, 10), 0.001) system.set_kernel((0, 20, 10), 0.0005) # order 4 system.set_kernel((0, 20, 10, 30), 0.0001) system.set_kernel((0, 10, 10, 10), 0.0001) # order 5 system.set_kernel((0, 10, 20, 10, 30), 0.00001) system.set_kernel((0, 10, 20, 20, 30), 0.00001) # order 6 system.set_kernel((0, 10, 20, 20, 30, 20), 0.000001) system.set_kernel((0, 10, 20, 20, 30, 30), 0.000001) # order 7 system.set_kernel((0, 10, 20, 20, 30, 30, 20), 0.0000001) system.set_kernel((0, 10, 20, 20, 30, 30, 10), 0.0000001) # order 8 system.set_kernel((0, 10, 20, 20, 30, 30, 10, 10), 0.0000001) system.set_kernel((0, 10, 20, 20, 30, 30, 10, 0), 0.0000001) # order 9 system.set_kernel((0, 10, 20, 20, 30, 30, 10, 0, 10), 0.0000001) system.set_kernel((0, 10, 20, 20, 30, 30, 10, 0, 10), 0.0000001) sig_length = 100000 intensity = 1 fs = 20000 noise = gaussian_white_noise(intensity, sig_length, fs) system_output_tra = system(noise) tra_features = noise[:80000] tra_targets = system_output_tra[:80000] val_features = noise[80000:] val_targets = system_output_tra[80000:] freqs = [5000, 6000] mulit_tone_signal = multi_tone(freqs, fs, 3) systemoutput_test = system(mulit_tone_signal) train_set = DataSet(tra_features, tra_targets, memory_depth=30) val_set = DataSet(val_features, val_targets, memory_depth=30) test_set = DataSet(mulit_tone_signal, systemoutput_test, memory_depth=30) return train_set, val_set, test_set
def psudo_load_data(file_path, depth): data_set = DataSet.load(file_path) data_set.init_tfdata(depth) return data_set, data_set, data_set
# ============================================================================= # Identification # ============================================================================= # region : Generate data sets num = 2 A = 1 N = 50000 noises = [] noise_responses = [] for i in range(num): noise = gaussian_white_noise(A, N, N) noise_response = system(noise) noises.append(noise) noise_responses.append(noise_response) train_set = DataSet(noises, noise_responses, memory_depth=3, intensity=A) # endregion : Generate data sets # Prepare test signal freqs = [120, 310] fs = 1000 duration = 2 vrms = [1, 0.7] noise_power = 1e-3 signal = multi_tone(freqs, fs, duration, vrms, noise_power=noise_power) system_output = system(signal) val_set = DataSet(signal, system_output, memory_depth=3) # Wiener degree = 3
def step_3(): """Load and check your data""" from signals.utils import DataSet data_set = DataSet.load('./hello_nls_data.tfd') assert isinstance(data_set, DataSet) data_set.plot()