def sqrt_spectra(examples): n = examples[0].shape[0] freq = get_single_side_frequency(n) spectras = [] fe = fft_data(examples) for e in fe: spectras.append((freq, np.absolute(e))) return spectras
def log_spectra(examples): n = examples[0].shape[0] freq = get_single_side_frequency(n) spectras = [] fe = fft_data(examples) for e in fe: m = np.amax(np.absolute(e)) spectras.append((freq, 20 * np.log10(np.absolute(e) / m))) return spectras
def draw_spectra_of(path): x = _read_one_file(path) x, _, _ = standardize_data(x, data_mean(), data_std()) x = fft_data([x])[0] x = np.absolute(x) plt.plot(x) plt.show() return x
def example(self): r = get_random_example(1, config.TRIM_LENGTH) e = fft_data(r) e = flatten_complex_data(e) if self.in_cpu: e = torch.tensor(e) else: e = torch.tensor(e, device=self.device, dtype=torch.float32) e = self.forward(e).detach().cpu() e = np.array(e) e = iflatten_complex_data(e) e = ifft_data(e)[0] r = np.array(r[0]) return e - r
from dynamic_reporter import stop_dynamic_report from dynamic_reporter import report from data_reader import write_one_file from multiprocessing import set_start_method import random import os # time.sleep(13500) # Prepare the training set for this model print('Preparing the training set...') if config.TRIM_LENGTH is None: set_trim_length(300) train_set = trim_data(standardize_all_data()) train_set, WIN = window(train_set, 'hann') print(WIN.shape) train_set = fft_data(train_set) train_set, dim = lpf_dimension_reduction(train_set, frequency=10) train_set = flatten_complex_data(train_set) print(dim) print('Training set is ready!') class Complex_Fully_Connected_Linear_Discriminator_LPF(nn.Module): def __init__(self, dimension): super(Complex_Fully_Connected_Linear_Discriminator_LPF, self).__init__() self.n_in = dimension * 2 * 6 # real part and imaginary part are saperated # hidden linear layers self.linear1 = nn.Linear(self.n_in, self.n_in * 12) self.linear2 = nn.Linear(self.n_in * 12, self.n_in * 12)
def _draw(i): global AX1 global AX2 global AX3 global AX4 global FIGURE global QUEUE global DATA if not QUEUE.empty(): DATA = QUEUE.get() if DATA == 'close': plt.close(FIGURE) return example_length = DATA['example'].shape[0] freq = get_single_side_frequency(example_length) # DATA['loss_title'] = data['loss_title'] # DATA['losses'].append(data['loss']), # DATA['loss_labels'] = data['loss_labels'] # DATA['score_title'] = data['score_title'] # DATA['scores'].append(data['score']) # DATA['score_labels'] = data['score_labels'] # DATA['interval'] = data['interval'] n = len(DATA['losses']) interval = DATA['interval'] x = np.arange(interval, interval * n + 1, interval) loss_Y = np.array(DATA['losses']).T score_Y = np.array(DATA['scores']).T AX1.clear() AX2.clear() AX3.clear() AX4.clear() AX1.set_title(DATA['loss_title']) AX1.set_xlabel('Training Steps') AX1.set_ylabel('Model Loss') for i in range(loss_Y.shape[0]): AX1.plot(x, loss_Y[i].flatten(), label=DATA['loss_labels'][i]) AX1.legend() AX2.set_title(DATA['score_title']) AX2.set_xlabel('Training Steps') AX2.set_ylabel('Score') for i in range(score_Y.shape[0]): AX2.plot(x, score_Y[i].flatten(), label=DATA['score_labels'][i]) AX2.legend() AX3.set_title('Spectra') AX3.set_xlabel('Frequency') AX3.set_ylabel('Power (dB)') example = DATA['example'] spectra = np.absolute(fft_data([DATA['example']])[0]).real m = np.amax(spectra) spectra = 20 * np.log10(spectra / m) AX3.plot(freq, spectra) AX4.set_title('Example') AX4.set_xlabel('Time') AX4.set_ylabel('Axis') AX4.plot(example.real) return
import random import os # time.sleep(13500) # Prepare the training set for this model print('Preparing the training set...') if config.TRIM_LENGTH is None: set_trim_length(300) data = trim_data(standardize_all_data(), 150) data_set = [] for i in range(len(data) - 2): d = np.concatenate((data[i], data[i + 1]), 0) data_set.append(d) data_set, WIN = window(data_set, 'hann') data_set = fft_data(data_set) data_set, dim = lpf_dimension_reduction(data_set, frequency=10) data_set = flatten_complex_data(data_set) train_set = [] for i in range(0, len(data_set) - 2, 3): d = [data_set[i], data_set[i + 1], data_set[i + 2]] train_set.append(d) x = torch.tensor(train_set) print(x[0:10, 2, :].shape) print(x.shape[1]) print('Training set is ready!')
from dynamic_reporter import stop_dynamic_report from dynamic_reporter import report from data_reader import write_one_file from multiprocessing import set_start_method import random import os from data_processor import window from data_processor import iwindow # Prepare the training set for this model print('Preparing the training set...') if config.TRIM_LENGTH is None: set_trim_length(300) origin = trim_data(standardize_all_data()) windowed, win = window(origin, 'hamming') data = fft_data(windowed) train_set = flatten_complex_data(data) print('Training set is ready!') class Complex_Fully_Connected_Linear_Discriminator(nn.Module): def __init__(self, dimension): super(Complex_Fully_Connected_Linear_Discriminator, self).__init__() self.n_in = dimension * ( config.TRIM_LENGTH // 2 + 1) * 2 # real part and imaginary part are saperated # hidden linear layers self.linear1 = nn.Linear(self.n_in, self.n_in) self.linear2 = nn.Linear(self.n_in, self.n_in) self.linear3 = nn.Linear(self.n_in, self.n_in)
from data_processor import trim_data from dynamic_reporter import init_dynamic_report from dynamic_reporter import stop_dynamic_report from dynamic_reporter import report from data_reader import write_one_file from multiprocessing import set_start_method import random import os # Prepare the training set for this model print('Preparing the training set...') if config.TRIM_LENGTH is None: set_trim_length(300) origin = trim_data(standardize_all_data()) data_p = pca_data(origin, 4) data_pf = fft_data(data_p) train_set = flatten_complex_data(data_pf) print(train_set.shape) print('Training set is ready!') class Complex_Fully_Connected_Linear_Discriminator(nn.Module): def __init__(self, dimension): super(Complex_Fully_Connected_Linear_Discriminator, self).__init__() self.n_in = dimension * ( config.TRIM_LENGTH // 2 + 1) * 2 # real part and imaginary part are saperated # hidden linear layers self.linear1 = nn.Linear(self.n_in, self.n_in) self.linear2 = nn.Linear(self.n_in, self.n_in)
lpf_compare(samples) # %% WGAN = load_model(Complex_Fully_Connected_WGAN, Choosed_WGAN, (6, )) # %% samples = sample_net(WGAN, 6) # %% draw_frequency_analysis(samples) # %% draw_frequency_analysis_lpf(samples) # %% lpf_compare(samples) # %% origin = get_random_example(6, 300) originf = fft_data(origin) yf, _ = lpf_dimension_reduction(originf, 5) yf = pad_data_zeros(yf, 151) y = ifft_data(yf) # %% draw_frequency_analysis(origin) # %% draw_frequency_analysis(y) # %% compare(origin, y) # %% originw, win = window(origin, 'hamming') originfw = fft_data(originw) yfw, _ = lpf_dimension_reduction(originfw, 5) yfw = pad_data_zeros(yfw, 151) yw = ifft_data(yfw)
# %% from data_processor import fft_data from data_processor import ifft_data import numpy as np import config from data_reader import get_trajectory from data_processor import trim_data from scipy.fft import fft, fftfreq, ifft # %% data = get_trajectory('Adam') data = trim_data(data, 300) # %% data[0] # %% data_f = fft_data(data) # %% data_f[0].shape # %% idata = ifft_data(data_f) # %% idata[0] # %% data[0] - idata[0] # %% x = data[0].iloc[:, 0] # %% x # %% fx = fft(np.array(x))