예제 #1
0
def stft_spectrogram(data):
    """
    This function calculate the spectrogram using STFT with hamming window.
    """
    # parameters -------------------------------------------------------------
    config_ini = read_config.read_config()
    window_length = config_ini.getint('STFT', 'window_length')
    slide_length = config_ini.getfloat('STFT', 'slide_length')
    high_freq = config_ini.getint('STFT', 'frequency_high')
    low_freq = config_ini.getint('STFT', 'frequency_low')
    bins = config_ini.getint('STFT', 'bins')
    fs = config_ini.getint('SAMPLING_FREQUENCY', 'fs')
    # ------------------------------------------------------------------------
    power_list = []
    for i in range(0, int(len(data) - window_length * fs),
                   (int(slide_length * fs))):
        power, freq = util_func.power_spectrum(
            util_func.hamming_window(data[i:i + int(fs * window_length)]), fs)
        index_low = int(low_freq / freq[1])
        index_high = int(high_freq / freq[1])
        power = power[index_low:index_high]
        power_mean = [
            np.array(power[j:j + bins]).mean()
            for j in range(0,
                           len(power) - bins, bins)
        ]
        power_list.append(power_mean)
    return util_func.min_max_image(power_list)
예제 #2
0
def anomaly_score(real_image, fake_img, D):
    """
    """
    # parameters-----------------------------------
    config_ini = read_config.read_config()
    lam = config_ini.getfloat('ANOMALY', 'lambda')
    # ---------------------------------------------

    # pixel difference
    residual_loss = torch.abs(real_image - fake_img)
    residual_loss = residual_loss.view(residual_loss.size()[0], -1)
    residual_loss = torch.sum(residual_loss, dim=1)

    # fetch discriminator feature
    _, real_feature = D(real_image)
    _, G_feature = D(fake_img)

    # feature difference
    discrimination_loss = torch.abs(real_feature-G_feature)
    discrimination_loss = discrimination_loss.view(discrimination_loss.size()[0], -1)
    discrimination_loss = torch.sum(discrimination_loss, dim=1)

    # calculate anomaly score
    loss_each = (1-lam)*residual_loss + lam*discrimination_loss
    total_loss = torch.sum(loss_each)

    return total_loss, loss_each
예제 #3
0
def run():
    """
    """
    # parameters----------------------------------------------------
    config_ini = read_config.read_config()
    z_dim = config_ini.getint('GENERATOR', 'z_dim')
    g_img_size = config_ini.getint('GENERATOR', 'image_size')
    d_img_size = config_ini.getint('DISCRIMINATOR', 'image_size')
    g_path = config_ini.get('PATH', 'g_save')
    d_path = config_ini.get('PATH', 'd_save')
    # --------------------------------------------------------------
    # set up data
    image_list = read_data()
    data_loader = make_dataset(image_list)

    # set up model
    G = Generator(z_dim, g_img_size)
    D = Discriminator(d_img_size)
    G.apply(weights_init)
    D.apply(weights_init)

    G_learned, D_learned = train_model(G, D, data_loader)

    # save model
    torch.save(D_learned, d_path)
    torch.save(G_learned, g_path)
예제 #4
0
def cut_overlap(raw_data):
    """
    This function cut data to 30 seconds and overlap data to increase it.
    :param raw_data: list of array of raw data.
    :return: type: array, shape: (batch, fs*window_length)
    """
    # parameters ------------------------------------------------------------------
    config_ini = read_config.read_config()
    window_length = config_ini.getint('PRE_PROCESSING', 'window_length')  # sec
    slide_length = config_ini.getint('PRE_PROCESSING', 'slide_length')  # sec
    cut_length = config_ini.getint('PRE_PROCESSING',
                                   'cut_length')  # sec (30/2)
    fs = config_ini.getint('SAMPLING_FREQUENCY', 'fs')  # Hz
    # ------------------------------------------------------------------------------
    cut_list = [
        raw_data[i]
        [int(len(raw_data[i]) / 2 -
             fs * cut_length):int(len(raw_data[i]) / 2 + fs * cut_length)]
        for i in range(len(raw_data))
    ]
    overlapped = []
    for sig in cut_list:
        sig_over = [
            sig[i:i + int(fs * window_length)]
            for i in range(0,
                           len(sig) -
                           int(window_length * fs), int(slide_length * fs))
        ]
        overlapped.append(sig_over)
    overlapped = np.array(overlapped)
    return overlapped.reshape(int(overlapped.shape[0] * overlapped.shape[1]),
                              overlapped.shape[2])
예제 #5
0
def read_model():
    """
    """
    # parameters-----------------------------------
    config_ini = read_config.read_config()
    G = config_ini.get('PATH', 'g_save')
    D = config_ini.get('PATH', 'd_save')
    # ---------------------------------------------
    return torch.load(G), torch.load(D)
예제 #6
0
def read_data(path_type=True):
    """
    :param path_type: noise dir is True, normal dir is False
    :return:
    """
    config_ini = read_config.read_config()
    if path_type is True:
        # parameters----------------------------------------------
        path = config_ini.get('PATH', 'test_noise_images')
    else:
        # parameters----------------------------------------------
        path = config_ini.get('PATH', 'test_normal_images')
    return glob.glob(path)
예제 #7
0
def make_dataset(data_list):
    # parameters ------------------------------------------
    config_ini = read_config.read_config()
    mean = eval(config_ini.get('DATA', 'mean'))
    std = eval(config_ini.get('DATA', 'std'))
    batch_size = config_ini.getint('DATA', 'batch_size')
    shuffle = config_ini.getboolean('DATA', 'shuffle')
    # -----------------------------------------------------
    train_dataset = GAN_Img_Dataset(file_list=data_list,
                                    transform=ImageTransform(mean, std))
    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=batch_size,
                                                   shuffle=shuffle)
    return train_dataloader
예제 #8
0
def read_audio(directory_path=None):
    """
    :return:List of array of raw data.
    """
    if directory_path is None:
        # parameters ------------------------------------------------
        config_ini = read_config.read_config()
        directory_path = config_ini.get('PATH', 'data_directory')
        # -----------------------------------------------------------
    file_path = glob.glob(directory_path)
    file_path.sort()
    raw_data = [sf.read(file_path[i])[0] for i in range(len(file_path))]
    if file_path == []:
        print('FileNotFoundError: No such file or directory: ',
              file=sys.stderr)
        sys.exit(1)
    return raw_data
예제 #9
0
def spectrogram_image(data):
    """
    padding a spectrogram to square and saving it as an image.
    :param data: return of stft_spectrogram function.
    :return: save image.
    """
    # parameters ------------------------------------------
    config_ini = read_config.read_config()
    save_path = config_ini.get('PATH', 'train_images')
    # -----------------------------------------------------
    spectrogram = [stft_spectrogram(i) for i in data]
    spectrogram = np.array(spectrogram)
    spectrogram = padding(spectrogram)
    for i in range(len(spectrogram)):
        spe_array = spectrogram[i].T
        image = Image.fromarray(spe_array.astype(np.uint8))
        image.save(save_path + "spectrogram{}.png".format(i))
예제 #10
0
def optimize_z(path_type=True):
    """
    """
    # parameters----------------------------------------------
    config_ini = read_config.read_config()
    z_dim = config_ini.getint('Z', 'z_dim')
    z_lr = config_ini.getfloat('Z', 'z_lr')
    epoch = config_ini.getint('Z', 'epoch')
    if path_type is True:
        batch_size = config_ini.getint('DATALOADER', 'batch_noise')
    else:
        batch_size = config_ini.getint('DATALOADER', 'batch_normal')
    # ----------------------------------------------------------

    # Check GPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("Use device:", device)

    # dataset to iterator
    test_dataset = make_test_dataset(path_type)
    batch_iterator = iter(test_dataset)
    test_images = next(batch_iterator)
    test_images = test_images.to(device)

    # define z
    z = torch.randn(batch_size, z_dim).to(device)
    z = z.view(z.size(0), z.size(1), 1, 1)
    z.requires_grad = True
    z_optimizer = torch.optim.Adam([z], lr=z_lr)

    G, D = read_model()

    for i in range(epoch):
        fake_img = G(z)
        loss, _ = anomaly_score(test_images, fake_img, D)

        z_optimizer.zero_grad()
        loss.backward()
        z_optimizer.step()

        if epoch % 1000 == 0:
            print('epoch {} || loss_total:{:.0f} '.format(i, loss.item()))

    return z
예제 #11
0
def padding(spectrogram_array):
    """
    :param spectrogram_array: shape==(,,,)
    :return:
    """
    # parameters -------------------------------------
    cofig_ini = read_config.read_config()
    image_len = cofig_ini.getint("IMAGE", 'shape')
    # ------------------------------------------------
    diff_row = spectrogram_array.shape[1] - image_len
    diff_col = spectrogram_array.shape[2] - image_len
    try:
        if diff_row <= 0 and diff_col <= 0:
            spectrogram = np.pad(spectrogram_array, [(0, 0),
                                                     (0, abs(diff_row)),
                                                     (0, abs(diff_col))],
                                 'constant')

        elif diff_row <= 0 and diff_col >= 0:
            spectrogram = np.pad(spectrogram_array,
                                 [(0, 0), (0, abs(diff_row) + diff_col),
                                  (0, 0)], 'constant')

        elif diff_row >= 0 and diff_col <= 0:
            spectrogram = np.pad(spectrogram_array,
                                 [(0, 0), (0, 0),
                                  (0, abs(diff_col) + diff_row)], 'constant')

        elif diff_row >= 0 and diff_col >= 0:
            diff = diff_row - diff_col
            if diff <= 0:
                spectrogram = np.pad(spectrogram_array,
                                     [(0, 0), (0, abs(diff)),
                                      (0, 0)], 'constant')
            elif diff >= 0:
                spectrogram = np.pad(spectrogram_array, [(0, 0), (0, 0),
                                                         (0, diff)],
                                     'constant')
    except:
        print("Error: Can't padding :", file=sys.stderr)
        sys.exit(1)
    return spectrogram
예제 #12
0
def make_test_dataset(path_type=True):
    """
    """
    # parameters------------------------------------------
    config_ini = read_config.read_config()
    mean = eval(config_ini.get('DATALOADER', 'mean'))
    std = eval(config_ini.get('DATALOADER', 'std'))
    shuffle = config_ini.getboolean('DATALOADER', 'shuffle')
    # -----------------------------------------------------
    if path_type is True:
        # parameters----------------------------------------------
        batch_size = config_ini.getint('DATALOADER', 'batch_noise')
    else:
        # parameters----------------------------------------------
        batch_size = config_ini.getint('DATALOADER', 'batch_normal')

    data_list = read_data()
    test_dataset = GAN_Img_Dataset(file_list=data_list, transform=ImageTransform(mean, std))
    test_dataset = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=shuffle)

    return test_dataset
from scipy import signal
import numpy as np

from src.utils import read_config

# parameters ---------------------------------------------
config_ini = read_config.read_config()
amplitude = config_ini.getint('NOISE', 'amplitude_h')
high_h = config_ini.getint('BANDPASS', 'high_h')
high_l = config_ini.getint('BANDPASS', 'high_l')
side = config_ini.getint('BANDPASS', 'side_w')
b_gstop = config_ini.getint('BANDPASS', 'gstop')
b_gpass = config_ini.getint('BANDPASS', 'gpass')
fs = config_ini.getint("SAMPLING_FREQUENCY", 'fs')
# --------------------------------------------------------


def band_pass(data):
    gpass = b_gpass
    gstop = b_gstop
    Wp1 = high_l / (fs / 2)
    Wp2 = high_h / (fs / 2)
    Ws1 = (high_l + side) / (fs / 2)
    Ws2 = (high_h + side) / (fs / 2)
    N1, Wn1 = signal.buttord([Wp1, Wp2], [Ws1, Ws2], gpass, gstop)
    b1, a1 = signal.butter(N1, Wn1, "bandpass")
    return signal.filtfilt(b1, a1, data)


def run(data):
    """
예제 #14
0
def train_model(generator, discriminator, dataloader):
    """
    """
    # Parameters -------------------------------------------------------
    config_ini = read_config.read_config()
    g_lr = config_ini.getfloat('TRAIN', 'g_learning_rate')
    d_lr = config_ini.getfloat('TRAIN', 'g_learning_rate')
    beta1_g = config_ini.getfloat('TRAIN', 'g_beta_1')
    beta2_g = config_ini.getfloat('TRAIN', 'g_beta_2')
    beta1_d = config_ini.getfloat('TRAIN', 'd_beta_1')
    beta2_d = config_ini.getfloat('TRAIN', 'd_beta_2')
    z_dim = config_ini.getint('GENERATOR', 'z_dim')
    num_epochs = config_ini.getint('TRAIN', 'num_epoch')
    # ------------------------------------------------------------------

    # Check GPU
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("Use device:", device)

    # Define optimizer and loss function
    g_optimizer = torch.optim.Adam(generator.parameters(), g_lr,
                                   [beta1_g, beta2_g])
    d_optimizer = torch.optim.Adam(discriminator.parameters(), d_lr,
                                   [beta1_d, beta2_d])
    criterion = nn.BCEWithLogitsLoss(reduction='mean')

    # To GPU and train mode
    generator.to(device)
    discriminator.to(device)
    generator.train()
    discriminator.train()

    # Accelerator
    torch.backends.cudnn.benchmark = True

    # num_train_imgs = len(dataloader.dataset)
    batch_size = dataloader.batch_size
    iteration = 1
    for epoch in range(num_epochs):
        t_epoch_start = time.time()
        epoch_g_loss = 0.0
        epoch_d_loss = 0.0

        print('-------------')
        print('Epoch {}/{}'.format(epoch, num_epochs))
        print('-------------')
        print('(train)')

        for imges in dataloader:

            # Discriminator---------------------------------------------------------------------------------------------
            if imges.size(
            )[0] == 1:  # If mini_batch is 1 will cause error. For avoid it.
                continue

            imges = imges.to(device)
            mini_batch_size = imges.size()[0]

            # make label
            real_l = np.random.randint(7, 12, (mini_batch_size)) / 10
            fake_l = np.random.randint(0, 2, (mini_batch_size)) / 10
            label_real = torch.from_numpy(real_l).to(device)
            label_fake = torch.from_numpy(fake_l).to(device)

            # judge real image
            d_out_real, _ = discriminator(imges)

            # judge fake image
            input_z = torch.randn(mini_batch_size, z_dim).to(device)
            input_z = input_z.view(input_z.size(0), input_z.size(1), 1, 1)
            fake_images = generator(input_z)
            d_out_fake, _ = discriminator(fake_images)

            # loss
            d_loss_real = criterion(d_out_real.view(-1), label_real)
            d_loss_fake = criterion(d_out_fake.view(-1), label_fake)
            d_loss = d_loss_real + d_loss_fake

            g_optimizer.zero_grad()
            d_optimizer.zero_grad()
            d_loss.backward()
            d_optimizer.step()

            # Generator-------------------------------------------------------------------------------------------------

            # judge fake image
            input_z = torch.randn(mini_batch_size, z_dim).to(device)
            input_z = input_z.view(input_z.size(0), input_z.size(1), 1, 1)
            fake_images = generator(input_z)
            d_out_fake, _ = discriminator(fake_images)

            # loss
            g_loss = criterion(d_out_fake.view(-1), label_real)

            g_optimizer.zero_grad()
            d_optimizer.zero_grad()
            g_loss.backward()
            g_optimizer.step()

            # save loss
            epoch_d_loss += d_loss.item()
            epoch_g_loss += g_loss.item()
            iteration += 1

        # print loss
        t_epoch_finish = time.time()
        print('-------------')
        print('epoch {} || Epoch_D_Loss:{:.4f} ||Epoch_G_Loss:{:.4f}'.format(
            epoch, epoch_d_loss / batch_size, epoch_g_loss / batch_size))
        print('timer:  {:.4f} sec.'.format(t_epoch_finish - t_epoch_start))
        t_epoch_start = time.time()

    print("Iteration:", iteration)

    return generator, discriminator
예제 #15
0
def read_data():
    config_ini = read_config.read_config()
    train_path = config_ini.get('PATH', 'train_images')
    return glob.glob(train_path)