Пример #1
0
        D_optimizer.step()

        '''
        训练生成器
        '''
        Net_G.zero_grad()
        Noise_var = Variable(torch.randn(BATCH_SIZE, NOISE_DIM))
        image_fake = Net_G(Noise_var,label_var)
        D_fake = Net_D(image_fake,label_var)

        G_loss = BCELoss()(D_fake, label_true_var)

        G_loss.backward()
        G_optimizer.step()

        proBar.show(D_loss.data[0], G_loss.data[0])
    Noise_var = Variable(torch.randn(BATCH_SIZE, NOISE_DIM))
    y = (torch.ones(BATCH_SIZE) * 7).long()
    y = one_hot(y)
    y = Variable(y.cuda() if GPU_NUMS > 1 else y)
    samples = Net_G(Noise_var,y)[:24]
    img = torchvision.utils.make_grid( samples.data)
    npimg = img.cpu().numpy()
    plt.imshow(np.transpose(npimg, (1,2,0)))

    if not os.path.exists('out/'):
        os.makedirs('out/')

    plt.savefig('out/{}.png'.format(str(epoch).zfill(3)), bbox_inches='tight')
    plt.close()
Пример #2
0
        error_real=criterion(output.squeeze(),label)
        error_real.backward()
        D_x=output.data.mean()
        ## train netd with fake img
        fake_pic=netg(noise).detach()
        output2=netd(fake_pic)
        label.data.fill_(0) # 0 for fake
        error_fake=criterion(output2.squeeze(),label)
        error_fake.backward()
        D_x2=output2.data.mean()
        error_D=error_real+error_fake
        optimizerD.step()

        # ------ train netg -------
        netg.zero_grad()
        label.data.fill_(1)
        noise.data.normal_(0,1)
        fake_pic=netg(noise)
        output=netd(fake_pic)
        error_G=criterion(output.squeeze(),label)
        error_G.backward()
        optimizerG.step()
        D_G_z2=output.data.mean()
        bar.show(error_D.data[0], error_G.data[0])

    fake_u=netg(fix_noise)
    imgs = make_grid(fake_u.data*0.5+0.5).cpu() # CHW
    plt.imsave("output/%02d.png" % epoch, imgs.permute(1,2,0).numpy())
    plt.close()

t.save(netg.state_dict(), "output/NetG_Cifar.pth")
Пример #3
0
                # errord_meter.add(error_d.data[0])

            if ii % config.G_EVERY == 0:
                # 训练生成器
                optimizer_generator.zero_grad()
                noises.data.copy_(
                    t.randn(config.BATCH_SIZE, config.NOISE_Z, 1, 1))
                fake_img = netG(noises)
                output = netD(fake_img)
                error_g = criterion(output, true_labels)
                error_g.backward()
                optimizer_generator.step()
                # errorg_meter.add(error_g.data[0])

            proBar.show(error_d.data[0], error_g.data[0])
            fix_fake_imgs = netG(fix_noises)
            # if opt.vis and ii%opt.plot_every == opt.plot_every-1:
            #     ## 可视化
            #     if os.path.exists(opt.debug_file):
            #         ipdb.set_trace()
            #     fix_fake_imgs = netg(fix_noises)
            #     vis.images(fix_fake_imgs.data.cpu().numpy()[:64]*0.5+0.5,win='fixfake')
            #     vis.images(real_img.data.cpu().numpy()[:64]*0.5+0.5,win='real')
            #     vis.plot('errord',errord_meter.value()[0])
            #     vis.plot('errorg',errorg_meter.value()[0])

        if epoch % config.DECAY_EVERY == 0:
            # 保存模型、图片
            tv.utils.save_image(fix_fake_imgs.data[:8],
                                '%s/%s.png' % (config.SAVE_PATH, epoch),
Пример #4
0
        Net_D.zero_grad()
        D_loss.backward()
        D_optimizer.step()

        # Train generator
        gen_image = Net_G(x_)
        D_fake_decision = Net_D(x_, gen_image).squeeze()
        G_fake_loss = BCE_loss(D_fake_decision, real_)

        # L1 loss
        l1_loss = 100 * L1_loss(gen_image, y_)

        # Back propagation
        G_loss = G_fake_loss + l1_loss
        Net_G.zero_grad()
        G_loss.backward()
        G_optimizer.step()

        bar.show(D_loss.data[0], G_loss.data[0])

    gen_image = Net_G(
        Variable(test_input.cuda() if GPU_NUMS > 1 else test_input))
    gen_image = gen_image.cpu().data
    plot_test_result(test_input,
                     test_target,
                     gen_image,
                     epoch,
                     save=True,
                     save_dir='output/')

torch.save(Net_G.state_dict(), "output/Net_G_20.pth")
Пример #5
0
        if c2_len:
            c2 = Q_con(hid)
            loss_q += 0.5 * mse(c2, z_dict['con']) # Multiply by 0.5 as we treat targets as Gaussian (and there's a coefficient of 0.5 when we take logs)
            Q_con.zero_grad() # Zero gradient buffers before the backward pass
        if c3_len:
            c3 = Sigmoid()(Q_bin(hid))
            loss_q += bce(c3, z_dict['bin'])
            Q_bin.zero_grad() # Zero gradient buffers before the backward pass

        # Backward pass for latent code objective
        loss_q.backward()

        # Do the updates for everything
        d_optim.step()
        g_optim.step()
        qcat_optim.step()

        if c2_len:
            qcon_optim.step()
        if c3_len:
            qbin_optim.step()

        bar.show(loss_dis.cpu().data.numpy()[0], loss_gen.cpu().data.numpy()[0], loss_q.cpu().data.numpy()[0])

    sample_images(epoch)

torch.save(Net_G.state_dict(),"output/Net_G_cifar_%d.pth" % epoch)

out_test = run_dis(Variable(x_test_th).cuda().float() / 255 if GPU_NUMS > 1 else Variable(x_test_th).cuda().float() / 255)[1]
out_test = np.argmax(out_test.data.cpu().numpy(), axis = 1)
print(np.mean(out_test == np.argmax(y_test, axis = 1)))
Пример #6
0
    # loss = 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
    KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
    KLD = torch.sum(KLD_element).mul_(-0.5)
    # KL divergence
    return BCE + KLD


optimizer = optim.Adam(model.parameters(), lr=1e-3)

for epoch in range(EPOCH):
    model.train()
    train_loss = 0
    for batch_idx, data in enumerate(train_loader):
        img, _ = data
        img = img.view(img.size(0), -1)
        img = Variable(img)
        if torch.cuda.is_available():
            img = img.cuda()
        optimizer.zero_grad()
        recon_batch, mu, logvar = model(img)
        loss = loss_function(recon_batch, img, mu, logvar)
        loss.backward()
        train_loss += loss.data[0]
        optimizer.step()
        proBar.show(loss.data[0] / len(img))

    if epoch % 10 == 0:
        save = to_img(recon_batch.cpu().data)
        save_image(save, 'vae_img/image_{}.png'.format(epoch))

torch.save(model.state_dict(), 'vae.pth')
Пример #7
0
        if GPU_NUMS > 0:
            g_gen_input = g_gen_input.cuda()
        g_fake_data = generator(g_gen_input)
        g_fake_decision = discriminator(g_fake_data)
        labels = Variable(torch.ones(bs))
        if GPU_NUMS > 0:
            labels = labels.cuda()
        g_loss = criterion(
            g_fake_decision,
            labels)  # we want to fool, so pretend it's all genuine

        g_loss.backward()
        g_optimizer.step()  # Only optimizes G's parameters

    loss_d_real = d_real_loss.item()
    loss_d_fake = d_fake_loss.item()
    loss_g = g_loss.item()

    progBar.show(loss_d_real, loss_d_fake, loss_g)
    if train_iter == 1 or train_iter % 100 == 0:
        msg = 'Iteration {}: D_loss(real/fake): {:.6g}/{:.6g} G_loss: {:.6g}'.format(
            train_iter, loss_d_real, loss_d_fake, loss_g)

        gen_samples = g_fake_data.data.cpu().numpy(
        ) if GPU_NUMS > 0 else g_fake_data.data.numpy()

        visualizer.draw(real_samples, gen_samples, msg, show=False)
        visualizer.savefig('output/Pytorch_Z_%04d.png' % train_iter)

torch.save(generator.state_dict(), "output/GAN_Z_Pytorch_Generator.pth")
Пример #8
0
# 准备数据
json = MODEL_LIST["name" == MODEL]
train_data = Cifar10DataSet(train=True, transform=json["transform"])
train_loader = DataLoader(dataset=train_data,
                          batch_size=BATCH_SIZE,
                          shuffle=True)

# 准备网络
model = json["model"](json["pretrained"])
model = torch.nn.DataParallel(model).cuda()
optimizer = Adam(model.parameters(), lr=LR)
loss_func = CrossEntropyLoss().cuda()

# 训练数据
proBar = ProgressBar(EPOCH, len(train_loader), "loss:%.3f,acc:%.3f")
for epoch in range(EPOCH):
    for step, (x, y) in enumerate(train_loader):
        data = Variable(x)
        label = Variable(torch.squeeze(y, dim=1).type(torch.LongTensor))
        output = model(data)
        loss = loss_func(output, label)
        loss.backward()
        optimizer.step()

        prediction = torch.max(softmax(output), 1)[1]
        pred_label = prediction.data.numpy().squeeze()
        target_y = label.data.numpy()
        accuracy = sum(pred_label == target_y) / len(target_y)

        proBar.show(loss.data[0], accuracy)
Пример #9
0

model = autoencoder().cuda() if torch.cuda.is_available() else autoencoder()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(),
                             lr=learning_rate,
                             weight_decay=1e-5)

proBar = ProgressBar(EPOCH, len(train_loader), "Loss:%.3f")

for epoch in range(EPOCH):
    for data in train_loader:
        img, _ = data
        img = img.view(img.size(0), -1)
        img = Variable(img).cuda() if torch.cuda.is_available() else Variable(
            img)
        # ===================forward=====================
        output = model(img)
        loss = criterion(output, img)
        # ===================backward====================
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        proBar.show(loss.data[0])
    # ===================log========================

    if epoch % 10 == 0:
        pic = to_img(output.cpu().data)
        save_image(pic, 'mlp_img/image_{}.png'.format(epoch))

torch.save(model.state_dict(), 'sim_autoencoder.pth')
Пример #10
0
        noise = np.random.normal(-1, 1, size=[bs, z_dim])
        generateImage = generator.predict(noise)

        discriminator.trainable = True
        yDis = np.zeros(2 * bs)
        yDis[:bs] = 1
        d_loss = discriminator.train_on_batch(
            np.concatenate((real_samples, generateImage)), yDis)
    for index in range(1):
        noise = np.random.normal(-1, 1, size=[bs, z_dim])
        generateImage = generator.predict(noise)
        yGen = np.ones(bs)
        discriminator.trainable = False
        g_loss = gan.train_on_batch(noise, yGen)

    progBar.show(d_loss, g_loss)

    if epoch_iter % 100 == 0:

        loss_g = g_loss

        msg = ""

        visualizer.draw(real_samples, generateImage, msg, show=False)

        if True:
            filename = input_path.split(os.sep)[-1]
            output_dir = 'gan_training_{}'.format(
                filename[:filename.rfind('.')])
            os.system('mkdir -p {}'.format(output_dir))
            export_filepath = os.sep.join(
Пример #11
0
            # Labels for generated and real data
            yDis = np.zeros(2 * batchSize)
            # One-sided label smoothing
            yDis[:batchSize] = 0.9

            # Train discriminator
            discriminator.trainable = True
            dloss = discriminator.train_on_batch(X, yDis)

            # Train generator
            noise = np.random.normal(0, 1, size=[batchSize, randomDim])
            yGen = np.ones(batchSize)
            discriminator.trainable = False
            gloss = gan.train_on_batch(noise, yGen)
            progBar.show(dloss[0], dloss[1], gloss[0], gloss[1])

        dLosses.append(dloss)
        gLosses.append(gloss)
        if e == 1 or e % 5 == 0:
            noise = np.random.normal(0, 1, size=[100, randomDim])
            generatedImages = generator.predict(noise)
            generatedImages = generatedImages.reshape(100, 28, 28)
            # samples_image.append(generatedImages)
            fig, axs = plt.subplots(10, 10)
            fig.suptitle("Generated digits", fontsize=12)
            cnt = 0
            for i in range(10):
                for j in range(10):
                    axs[i, j].imshow(generatedImages[cnt, :, :], plt.cm.gray)
                    axs[i, j].axis('off')
Пример #12
0
                                                   x_train.shape[0],
                                                   size=batchSize)]

            combined_images = np.concatenate([generatedImages, imageBatch])

            labels = np.concatenate(
                [np.ones((batchSize, 1)),
                 np.zeros((batchSize, 1))])
            labels += 0.05 * np.random.random(labels.shape)

            d_loss = discriminator.train_on_batch(combined_images, labels)

            noise = np.random.normal(size=[batchSize, 100])
            yGen = np.zeros(batchSize)
            aloss = dcgan.train_on_batch(noise, yGen)
            progBar.show(d_loss, aloss)

        dLosses.append(dloss)
        aLosses.append(aloss)
        if epoch == 1 or epoch % 5 == 0:
            samples_image.append(generatedImages)
            img = image.array_to_img(generatedImages[0] * 255., scale=False)
            img.save('generated_airplane' + str(epoch) + '.png')

    with open('train_samples.pkl', 'wb') as f:
        pickle.dump(samples_image, f)

    generator.save('stl_generator.h5')
else:
    generator = load_model("cifar_generator.h5")
    noise = np.random.normal(size=[batchSize, 100])
    def train(self, epochs, batch_size=128, save_interval=50):

        # Load the dataset
        (X_train, _), (_, _) = read_mnist()

        # Rescale -1 to 1
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_train = np.expand_dims(X_train, axis=3)

        half_batch = int(batch_size / 2)
        proBar = ProgressBar(1, epochs,
                             "d loss:%.3f,d acc:%.3f;g loss:%.3f,g acc:%.3f")

        for epoch in range(epochs):

            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Select a random half batch of images
            idx = np.random.randint(0, X_train.shape[0], half_batch)
            imgs = X_train[idx]

            # Generate a half batch of embedded images
            latent_fake = self.encoder.predict(imgs)

            latent_real = np.random.normal(size=(half_batch, self.encoded_dim))

            valid = np.ones((half_batch, 1))
            fake = np.zeros((half_batch, 1))

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(latent_real, valid)
            d_loss_fake = self.discriminator.train_on_batch(latent_fake, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  Train Generator
            # ---------------------

            # Select a random half batch of images
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs = X_train[idx]

            # Generator wants the discriminator to label the generated representations as valid
            valid_y = np.ones((batch_size, 1))

            # Train the generator
            g_loss = self.adversarial_autoencoder.train_on_batch(
                imgs, [imgs, valid_y])

            # Plot the progress
            # print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f, mse: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[0], g_loss[1]))
            proBar.show(d_loss[0], d_loss[1], g_loss[0], g_loss[1])

            # If at save interval => save generated image samples
            if epoch % save_interval == 0:
                # Select a random half batch of images
                idx = np.random.randint(0, X_train.shape[0], 25)
                imgs = X_train[idx]
                self.save_imgs(epoch, imgs)
Пример #14
0
        G_result = Net_G(img_fake_var, label_fake_G_var)
        D_result = Net_D(G_result, label_fake_D_var).squeeze()
        D_fake_loss = BCELoss()(D_result, label_false_var)

        D_train_loss = D_real_loss + D_fake_loss
        D_train_loss.backward()
        D_optimizer.step()
        '''
        生成器训练
        '''
        Net_G.zero_grad()
        img_fake = torch.randn((BATCH_SIZE, 100)).view(-1, 100, 1, 1)
        label_fake = (torch.rand(BATCH_SIZE, 1) * 10).type(
            torch.LongTensor).squeeze()  # [BATCH_SIZE]
        img_fake_var = Variable(img_fake.cuda() if GPU_NUMS > 1 else img_fake)
        label_fake_G_var = Variable(onehot[label_fake].cuda() if GPU_NUMS > 1
                                    else onehot[label_fake])  #[BATCH,10,1,1]
        label_fake_D_var = Variable(
            fill[label_fake].cuda() if GPU_NUMS > 1 else
            fill[label_fake])  #[BATCH,10,IMAGE_SIZE,IMAGE_SIZE]
        G_result = Net_G(img_fake_var, label_fake_G_var)
        D_result = Net_D(G_result, label_fake_D_var).squeeze()
        G_train_loss = BCELoss()(D_result, label_true_var)
        G_train_loss.backward()
        G_optimizer.step()

        bar.show(D_train_loss.data[0], G_train_loss.data[0])

    fixed_p = 'Fixed_results/' + str(epoch + 1) + '.png'
    show_result((epoch + 1), save=True, path=fixed_p)
    torch.save(Net_G.state_dict(), 'Fixed_results/netg_%s.pth' % epoch)
Пример #15
0
        loss_d.backward()
        optimizer_D.step()

        ############################
        # (2) Update G network: maximize log(D(x,G(x))) + L1(y,G(x))
        ##########################
        optimizer_G.zero_grad()
        # First, G(A) should fake the discriminator
        fake_ab = torch.cat((real_a, fake_b), 1)
        pred_fake = Net_D.forward(fake_ab)
        loss_g_gan = lossGAN(pred_fake, True)

        # Second, G(A) = B
        loss_g_l1 = lossL1(fake_b, real_b) * 10
        loss_g = loss_g_gan + loss_g_l1
        loss_g.backward()
        optimizer_G.step()

        bar.show(loss_d.data[0], loss_g.data[0])

    gen_image = Net_G(
        Variable(test_input.cuda() if GPU_NUMS > 1 else test_input))
    gen_image = gen_image.cpu().data
    plot_test_result(test_input,
                     test_target,
                     gen_image,
                     epoch,
                     save=True,
                     save_dir='output/')
    torch.save(Net_G.state_dict(), "output/Net_G_%s.pth" % epoch)
Пример #16
0
开始训练
'''
half_batch = int(BATCH_SIZE / 2)
proBar = ProgressBar(1, EPOCH, "D loss: %f, acc.: %.2f%%; G loss: %f")
for epoch in range(1, EPOCH + 1):
    idx = np.random.randint(0, x_train.shape[0], size=half_batch)
    image, label = x_train[idx], y_train[idx]

    noise = np.random.normal(0, 1, (half_batch, 100))
    generate_image = Generator.predict([noise, label])

    valid = np.ones((half_batch, 1))
    fake = np.zeros((half_batch, 1))

    d_loss_real = Discriminator.train_on_batch([image, label], valid)
    d_loss_fake = Discriminator.train_on_batch([generate_image, label], fake)
    d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

    noise = np.random.normal(0, 1, (BATCH_SIZE, 100))

    valid = np.ones((BATCH_SIZE, 1))

    sampled_labels = np.random.randint(0, 10, BATCH_SIZE).reshape(-1, 1)

    # Train the generator
    g_loss = Gan.train_on_batch([noise, sampled_labels], valid)
    proBar.show(d_loss[0], 100 * d_loss[1], g_loss)
    if epoch % 100 == 0:
        save_images(Generator)

Generator.save("output/keras_mnist_generator.h5")