Ejemplo n.º 1
0
    def example(self):
        r = get_random_example(1, config.TRIM_LENGTH)
        e = fft_data(r)
        e = flatten_complex_data(e)
        if self.in_cpu:
            e = torch.tensor(e)
        else:
            e = torch.tensor(e, device=self.device, dtype=torch.float32)

        e = self.forward(e).detach().cpu()
        e = np.array(e)

        e = iflatten_complex_data(e)
        e = ifft_data(e)[0]
        r = np.array(r[0])

        return e - r
from data_reader import write_one_file
from multiprocessing import set_start_method
import random
import os

# time.sleep(13500)
# Prepare the training set for this model
print('Preparing the training set...')
if config.TRIM_LENGTH is None:
    set_trim_length(300)
train_set = trim_data(standardize_all_data())
train_set, WIN = window(train_set, 'hann')
print(WIN.shape)
train_set = fft_data(train_set)
train_set, dim = lpf_dimension_reduction(train_set, frequency=10)
train_set = flatten_complex_data(train_set)
print(dim)
print('Training set is ready!')


class Complex_Fully_Connected_Linear_Discriminator_LPF(nn.Module):
    def __init__(self, dimension):
        super(Complex_Fully_Connected_Linear_Discriminator_LPF,
              self).__init__()
        self.n_in = dimension * 2 * 6  # real part and imaginary part are saperated

        # hidden linear layers
        self.linear1 = nn.Linear(self.n_in, self.n_in * 12)
        self.linear2 = nn.Linear(self.n_in * 12, self.n_in * 12)
        self.linear3 = nn.Linear(self.n_in * 12, self.n_in * 12)
        self.linear4 = nn.Linear(self.n_in * 12, 1)
Ejemplo n.º 3
0
# time.sleep(13500)
# Prepare the training set for this model
print('Preparing the training set...')
if config.TRIM_LENGTH is None:
    set_trim_length(300)
data = trim_data(standardize_all_data(), 150)
data_set = []
for i in range(len(data) - 2):
    d = np.concatenate((data[i], data[i + 1]), 0)
    data_set.append(d)

data_set, WIN = window(data_set, 'hann')
data_set = fft_data(data_set)
data_set, dim = lpf_dimension_reduction(data_set, frequency=10)
data_set = flatten_complex_data(data_set)

train_set = []
for i in range(0, len(data_set) - 2, 3):
    d = [data_set[i], data_set[i + 1], data_set[i + 2]]
    train_set.append(d)

x = torch.tensor(train_set)
print(x[0:10, 2, :].shape)
print(x.shape[1])

print('Training set is ready!')


class Complex_Fully_Connected_Linear_Discriminator_LPF(nn.Module):
    def __init__(self, dimension):
from dynamic_reporter import init_dynamic_report
from dynamic_reporter import stop_dynamic_report
from dynamic_reporter import report
from data_reader import write_one_file
from multiprocessing import set_start_method
import random
import os

# Prepare the training set for this model
print('Preparing the training set...')
if config.TRIM_LENGTH is None:
    set_trim_length(300)
origin = trim_data(standardize_all_data())
data_p = pca_data(origin, 4)
data_pf = fft_data(data_p)
train_set = flatten_complex_data(data_pf)
print(train_set.shape)
print('Training set is ready!')


class Complex_Fully_Connected_Linear_Discriminator(nn.Module):
    def __init__(self, dimension):
        super(Complex_Fully_Connected_Linear_Discriminator, self).__init__()
        self.n_in = dimension * (
            config.TRIM_LENGTH // 2 +
            1) * 2  # real part and imaginary part are saperated

        # hidden linear layers
        self.linear1 = nn.Linear(self.n_in, self.n_in)
        self.linear2 = nn.Linear(self.n_in, self.n_in)
        self.linear3 = nn.Linear(self.n_in, self.n_in)