Ejemplo n.º 1
0
def ex_info_gan():
    data_sets = ReWrite.load_data_in_seq(source_files)
    data_sets = ReWrite.MyDataSet(data_sets)
    data_loader = DataLoader(
        data_sets,
        batch_size=256,
        shuffle=True,
    )
    generator = G_D_Module.GeneratorInfo(latent_dim=50,
                                         n_classes=5,
                                         code_dim=2,
                                         img_shape=img_shape)
    discriminator = G_D_Module.DiscriminatorInfo(n_classes=5,
                                                 code_dim=2,
                                                 img_shape=img_shape)

    TrainFunction.train_info_gan(generator,
                                 discriminator,
                                 data_loader,
                                 opt.n_epochs,
                                 opt.lr,
                                 opt.b1,
                                 opt.b2,
                                 latent_dim=50,
                                 n_classes=5,
                                 code_dim=2,
                                 cuda=cuda,
                                 first_train=False)
Ejemplo n.º 2
0
def ex_ponodcwcgan():
    data_sets = ReWrite.load_data_in_seq(source_files)
    data_sets = ReWrite.MyDataSet(data_sets)
    data_loader = DataLoader(
        data_sets,
        batch_size=256,
        shuffle=True,
    )
    latent_dim = 100
    generator = G_D_Module.GeneratorPONODCWCGAN(
        latent_dim, opt.n_classes, img_shape)  # latent_dim should be 20
    discriminator = G_D_Module.DiscriminatorPONODCWCGAN(
        opt.n_classes, img_shape)

    TrainFunction.train_ponodcwcgan(generator,
                                    discriminator,
                                    data_loader,
                                    opt.n_epochs,
                                    opt.lr,
                                    opt.b1,
                                    opt.b2,
                                    latent_dim,
                                    opt.n_classes,
                                    cuda,
                                    fist_train=False)
Ejemplo n.º 3
0
def cal_classify_img(i, j, min_epoch, max_epoch):
    os.makedirs('Classify_img', exist_ok=True)
    # para_list = os.listdir('Classify_img')
    # if 'parameter%d_%d' % (i, j) in para_list:
    #     return -1
    if i == 0 and j == 0:
        return 0
    data_train = ReWrite.MyDataSetForImgLoad('imgdatas', mode='train', num=(i, j))
    data_test = ReWrite.MyDataSetForImgLoad('imgdatas', mode='test')

    train_data_loader = DataLoader(
        data_train,
        batch_size=512,
        shuffle=True
    )
    test_data_loader = DataLoader(
        data_test,
        batch_size=1000,
        shuffle=True
    )

    cuda = True if torch.cuda.is_available() else False

    FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
    LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor

    accuracy_ave, classify = train_function_img(min_epoch, max_epoch, train_data_loader, test_data_loader,
                                                FloatTensor, LongTensor, cuda)

    # torch.save(classify.state_dict(), 'Classify/parameter%d_%d' % (i, j))
    return accuracy_ave
Ejemplo n.º 4
0
def test():
    datasets = ReWrite.load_data_in_seq(source_files)
    datasets = ReWrite.MyDataSet(datasets)
    data_loader = DataLoader(
        datasets,
        batch_size=16,
        shuffle=True,
    )
    for i, (imag, labels) in enumerate(data_loader):
        plt.contourf(imag[0])
        plt.show()

    print('done')
Ejemplo n.º 5
0
def cal_classify(i, j, min_epoch, max_epoch, data_train, data_test):
    '''
    :param i: real data num
    :param j: gen data num
    :param min_epoch: meaning
    :param max_epoch: meaning
    :param data_train: 3-D np
    :param data_test: 3-D np
    :return:
    '''
    # if i % n_class != 0 or j % n_class != 0:
    #     raise ValueError('i or j can be // by n_class')
    os.makedirs('Classify_1d', exist_ok=True)
    para_list = os.listdir('Classify_1d')
    # if 'parameter%d_%d' % (i, j) in para_list:
    #     return -1
    if i == 0 and j == 0:
        return 0.2

    data_test_in = ReWrite.MyDataSet1D(data_test)

    data_train_in = []
    for count in range(len(data_train)):
        mat1 = data_train[count][0:j]
        mat2 = data_train[count][-(i + 1): -1]
        data_train_in.append(
            np.concatenate((mat1, mat2), axis=0))  # mat1:gen mat2:real
    data_train_in = ReWrite.MyDataSet1D(data_train_in)

    train_data_loader = DataLoader(
        data_train_in,
        batch_size=256,
        shuffle=True
    )
    test_data_loader = DataLoader(
        data_test_in,
        batch_size=256,
        shuffle=True
    )

    cuda = True if torch.cuda.is_available() else False

    FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
    LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor

    accuracy_ave, classify = train_function_1d(min_epoch, max_epoch, train_data_loader, test_data_loader,
                                               FloatTensor, LongTensor, cuda, eps=5e-3)

    # torch.save(classify.state_dict(), 'Classify/parameter%d_%d' % (i, j))
    return accuracy_ave
Ejemplo n.º 6
0
def ex_self_noise_gan():
    data_sets = ReWrite.load_data_in_seq(source_files)
    data_sets = ReWrite.MyDataSet(data_sets)
    data_loader = DataLoader(
        data_sets,
        batch_size=256,
        shuffle=True,
    )
    generator = G_D_Module.GeneratorSelfNoise(img_shape)
    discriminator = G_D_Module.DiscriminatorSelfNoise(img_shape)

    TrainFunction.train_self_noise_gan(generator,
                                       discriminator,
                                       data_loader,
                                       opt.n_epochs,
                                       opt.lr,
                                       opt.b1,
                                       opt.b2,
                                       cuda,
                                       first_train=False)
Ejemplo n.º 7
0
def ex_selfnoise_1d_gan():
    data_sets = ReWrite.load_data_in_seq_1d('data')
    data_sets = ReWrite.MyDataSet1D(data_sets)
    data_loader = DataLoader(
        data_sets,
        batch_size=128,
        shuffle=True,
    )
    generator = G_D_Module.GeneratorSelfNoise1D()
    discriminator = G_D_Module.DiscriminatorSelfNoise1D()

    TrainFunction.train_selfnoise_1d_gan(generator,
                                         discriminator,
                                         data_loader,
                                         opt.n_epochs,
                                         opt.lr,
                                         opt.b1,
                                         opt.b2,
                                         -1,
                                         opt.n_classes,
                                         cuda,
                                         first_train=False)
Ejemplo n.º 8
0
def ex_linear1d_gan():
    data_sets = ReWrite.load_data_in_seq_1d('data')
    data_sets = ReWrite.MyDataSet1D(data_sets)
    data_loader = DataLoader(
        data_sets,
        batch_size=512,
        shuffle=True,
    )
    latent_dim = 50
    generator = G_D_Module.GeneratorLinear1D(latent_dim, opt.n_classes)
    discriminator = G_D_Module.DiscriminatorLinear1D(opt.n_classes)

    TrainFunction.train_linear_1d_gan(generator,
                                      discriminator,
                                      data_loader,
                                      opt.n_epochs,
                                      opt.lr,
                                      opt.b1,
                                      opt.b2,
                                      latent_dim,
                                      opt.n_classes,
                                      cuda,
                                      first_train=False)
Ejemplo n.º 9
0
def train_gan():
    datasets = ReWrite.load_data_in_seq(source_files)
    datasets = ReWrite.MyDataSet(datasets)
    data_loader = DataLoader(
        datasets,
        batch_size=16,
        shuffle=True,
    )
    generator = Generator()
    discriminator = Discriminator()
    loss = nn.BCELoss()

    if cuda:
        generator.cuda()
        discriminator.cuda()
        loss.cuda()

    optimizer_G = torch.optim.Adam(generator.parameters(),
                                   lr=opt.lr,
                                   betas=(opt.b1, opt.b2))
    optimizer_D = torch.optim.Adam(discriminator.parameters(),
                                   lr=opt.lr,
                                   betas=(opt.b1, opt.b2))

    Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor

    for epoch in range(opt.n_epochs):
        for i, data in enumerate(data_loader):
            valid = Variable(Tensor(data.size(0), 1).fill_(1.0),
                             requires_grad=False)
            fake = Variable(Tensor(data.size(0), 1).fill_(0.0),
                            requires_grad=False)

            real_data = Variable(data.type(Tensor))

            optimizer_G.zero_grad()

            # Sample noise as generator input
            z = Variable(
                Tensor(np.random.normal(0, 1,
                                        (data.size(0), opt.data_length))))

            gen_data = generator(z)

            g_loss = loss(discriminator(gen_data), valid)

            g_loss.backward()
            optimizer_G.step()

            optimizer_D.zero_grad()

            # Measure discriminator's ability to classify real from generated samples
            real_loss = loss(discriminator(real_data), valid)
            fake_loss = loss(discriminator(gen_data.detach()), fake)
            d_loss = (real_loss + fake_loss) / 2

            d_loss.backward()
            optimizer_D.step()

            if epoch % 20 == 0 and i == 1:
                if cuda:
                    gen_data = gen_data.cpu()
                plt.plot(range(opt.data_length), gen_data[1].detach().numpy())
                plt.savefig("images\\" + str(epoch) + '.jpg')
                plt.close()
                print("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" %
                      (epoch, opt.n_epochs, i, len(data_loader), d_loss.item(),
                       g_loss.item()))