Example #1
0
def gan_data_selfnoise_1d_perf(data_epoch_num, cuda=True):
    n_class = 5
    if cuda:
        FloatTensor = torch.cuda.FloatTensor
        LongTensor = torch.cuda.LongTensor
    else:
        FloatTensor = torch.FloatTensor
        LongTensor = torch.LongTensor

    generator = G_D_Module.GeneratorSelfNoise1D()
    generator.load_state_dict(
        torch.load('GANParameters/SELFNOISE1DGAN/generator.pt'))

    if cuda:
        generator.cuda()

    datas = np.empty((n_class, data_epoch_num, 1024), dtype=float)
    data_list = read_csv_data()
    for i in range(data_epoch_num):
        data = np.empty((n_class, 1, 1024), dtype=float)
        for j in range(n_class):
            index = random.randint(0, data_list[j].shape[0] - 1)
            data[j][0] = data_list[j][index]
        data = FloatTensor(data)
        gen_data = generator(data).cpu().detach().numpy()
        for j in range(n_class):
            datas[j][i] = gen_data[j][0]
        print('[%d/%d]' % (i, data_epoch_num))

    np.savez(save_path + "gen_1d/data_selfnoise.npz", datas)
Example #2
0
def show_1d_data():
    real_data = read_csv_data()
    n_class = len(real_data)
    weight = 2
    index = np.random.randint(450, size=(5, 2))
    for i in range(n_class):
        for j in range(weight):
            plt.subplot(n_class, weight, i * weight + j + 1)
            plt.xticks([])
            # index = random.randint(0, real_data[i].shape[0] - 1)
            plt.plot(real_data[i][index[i][j]])
    plt.savefig('caches/real_1d_data.jpg', bbox_inches='tight')
    plt.close()

    cuda = torch.cuda.is_available()
    FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
    LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
    latent_dim = 50
    generator = G_D_Module.GeneratorConv1D(latent_dim, n_class)
    generator.load_state_dict(
        torch.load('GANParameters/CONV1DGAN/generator.pt'))
    if cuda:
        generator.cuda()
    noise = FloatTensor(
        np.random.normal(0, 1, (n_class * weight, 1, latent_dim)))
    labels = LongTensor(list(range(0, n_class)) * weight)
    gen_data = generator(noise, labels)
    gen_data = gen_data.cpu().detach().numpy()
    for i in range(gen_data.shape[0]):
        plt.subplot(n_class, weight, i + 1)
        plt.xticks([])
        # plt.xlabel(str(i // weight))
        plt.plot(gen_data[i % n_class][0])
    plt.savefig('caches/gen_conv_1d_data.jpg', bbox_inches='tight')
    plt.close()

    generator = G_D_Module.GeneratorSelfNoise1D()
    if cuda:
        generator.cuda()
    inputs = np.empty((n_class * weight, 1, 1024), dtype=float)
    for i in range(inputs.shape[0]):
        # index = random.randint(0, real_data[i // weight].shape[0] - 1)
        inputs[i][0] = real_data[i // weight][index[i // 2][i % 2]]
    inputs = FloatTensor(inputs)
    gen_data = generator(inputs)
    gen_data = gen_data.cpu().detach().numpy()
    for i in range(gen_data.shape[0]):
        plt.subplot(n_class, weight, i + 1)
        plt.xticks([])
        # plt.xlabel(str(i // weight))
        plt.plot(gen_data[i][0])
    plt.savefig('caches/gen_selfnoise_1d_data.jpg', bbox_inches='tight')
    plt.close()
Example #3
0
def ex_selfnoise_1d_gan():
    data_sets = ReWrite.load_data_in_seq_1d('data')
    data_sets = ReWrite.MyDataSet1D(data_sets)
    data_loader = DataLoader(
        data_sets,
        batch_size=128,
        shuffle=True,
    )
    generator = G_D_Module.GeneratorSelfNoise1D()
    discriminator = G_D_Module.DiscriminatorSelfNoise1D()

    TrainFunction.train_selfnoise_1d_gan(generator,
                                         discriminator,
                                         data_loader,
                                         opt.n_epochs,
                                         opt.lr,
                                         opt.b1,
                                         opt.b2,
                                         -1,
                                         opt.n_classes,
                                         cuda,
                                         first_train=False)