Пример #1
0
def train(discriminator,
          generator,
          show_every=25,
          num_epochs=20,
          save_every=2000):
    start_t = time.time()
    iter_count = 0
    discriminator.add_optimizer()
    generator.add_optimizer()
    for epoch in range(num_epochs):
        for x, _ in loader_train:
            if len(x) != batch_size:
                continue
            discriminator.optimizer.zero_grad()
            real_data = Variable(x).type(torch.FloatTensor)
            logits_real = discriminator.forward(real_data).type(
                torch.FloatTensor)

            g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(
                torch.FloatTensor)
            fake_images = generator.forward(g_fake_seed)
            logits_fake = discriminator.forward(fake_images.detach())

            d_total_error = discriminator.loss(logits_real, logits_fake)
            d_total_error.backward()
            discriminator.optimizer.step()

            generator.optimizer.zero_grad()
            g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(
                torch.FloatTensor)
            fake_images = generator.forward(g_fake_seed)

            gen_logits_fake = discriminator.forward(fake_images)
            g_loss = generator.loss(gen_logits_fake)
            g_loss.backward()
            generator.optimizer.step()

            if (iter_count % show_every == 0):
                checkpt_t = time.time()
                print("time : {:.2f} sec".format(checkpt_t - start_t))
                print('Iter: {}, D: {:.4}, G:{:.4}'.format(
                    iter_count, d_total_error.data[0], g_loss.data[0]))
                print("real logits average ", torch.mean(logits_real).data)
                print("average output generator : ",
                      torch.mean(fake_images).data)
                print("fake logits average ", torch.mean(gen_logits_fake).data)
                imgs = fake_images[:16].data.numpy()
                show_images(imgs,
                            iter_num=iter_count,
                            save=True,
                            show=False,
                            model=generator.label)
            iter_count += 1
            if iter_count % save_every == 0:
                torch.save(
                    discriminator, 'results/weights/discriminator' +
                    discriminator.label + '.pt')
                torch.save(
                    generator,
                    'results/weights/generator' + generator.label + '.pt')
def train(discriminator,
          generator,
          show_every=25,
          num_epochs=10,
          resume=False,
          save_every=1500):
    iter_count = 0
    dis_optimizer = get_optimizer(discriminator)
    gen_optimizer = get_optimizer(generator)
    for epoch in range(num_epochs):
        for x, _ in loader_train:
            if len(x) != batch_size:
                continue
            dis_optimizer.zero_grad()
            real_data = Variable(x).type(torch.FloatTensor)
            # noise = Variable(sample_noise(batch_size, 32**2*3)).type(torch.FloatTensor)
            logits_real = discriminator(real_data.view([-1, 32**2 * 3])).type(
                torch.FloatTensor)

            g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(
                torch.FloatTensor)
            fake_images = generator(g_fake_seed)
            logits_fake = discriminator(fake_images.view([-1, 32**2 * 3
                                                          ])).detach()

            d_total_error = discriminator_loss(logits_real, logits_fake)
            d_total_error.backward()
            dis_optimizer.step()

            gen_optimizer.zero_grad()
            g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(
                torch.FloatTensor)
            fake_images = generator(g_fake_seed)

            gen_logits_fake = discriminator(fake_images)
            g_loss = generator_loss(gen_logits_fake)
            g_loss.backward()
            gen_optimizer.step()

            if (iter_count % show_every == 0):
                print('Iter: {}, D: {:.4}, G:{:.4}'.format(
                    iter_count, d_total_error.data[0], g_loss.data[0]))
                fake_images = fake_images.view([-1, 3, 32, 32])
                imgs = fake_images[:64].data
                show_cifar(imgs,
                           iter_num=iter_count,
                           save=True,
                           show=False,
                           name=generator.label)
            iter_count += 1
            if iter_count % save_every == 0:
                torch.save(
                    discriminator, '../../results/weights/discriminator' +
                    discriminator.label + '.pt')
                torch.save(
                    generator, '../../results/weights/generator' +
                    generator.label + '.pt')
Пример #3
0
def train(discriminator, generator, show_every= 25, num_epochs= 10, resume=False,
          save_every = 2000):
    iter_count = 0
    dis_optimizer = optimizer_discriminator(discriminator)
    gen_optimizer = optimizer_generator(generator)
    for epoch in range(num_epochs):
        for x, _ in loader_train:
            if len(x) != batch_size:
                continue
            if iter_count == 0:
                weights_average = {}
                weights_average_gen = {}
                for name, value in discriminator.state_dict().items():
                    weights_average[name] = torch.mean(value)
                for name, value in generator.state_dict().items():
                    weights_average_gen[name] = torch.mean(value)
                print("Average value of initialized weights dis : \n", weights_average)
                print("Average value of initialized weights gen : \n", weights_average_gen)

            dis_optimizer.zero_grad()
            real_data = Variable(x).type(torch.FloatTensor)
            logits_real = discriminator(real_data).type(torch.FloatTensor)

            print("Average logits real :", torch.mean(logits_real).data.numpy())
            g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(torch.FloatTensor)
            fake_images = generator(g_fake_seed)
            print("average output generator :", torch.mean(fake_images).data.numpy())
            logits_fake = discriminator(fake_images.detach())
            print("Avarage logits fake :", torch.mean(logits_fake).data.numpy())

            d_total_error = discriminator_loss(logits_real, logits_fake)
            d_total_error.backward()
            dis_optimizer.step()

            gen_optimizer.zero_grad()
            g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(torch.FloatTensor)
            fake_images = generator(g_fake_seed)

            gen_logits_fake = discriminator(fake_images)
            g_loss = generator_loss(gen_logits_fake)
            g_loss.backward()
            gen_optimizer.step()

            if (iter_count % show_every == 0):
                print('Iter: {}, D: {:.4}, G:{:.4}'.format(iter_count, d_total_error.data[0],
                                                           g_loss.data[0]))
                imgs = fake_images[:64].data
                show_cifar(imgs, iter_num=iter_count, save=True, show=False, name=generator.label)
            iter_count += 1
            if iter_count % save_every == 0:
                torch.save(discriminator, 'results/weights/discriminator' +
                           discriminator.label + '.pt')
                torch.save(generator, 'results/weights/generator' + generator.label
                           + '.pt')
Пример #4
0
def train(discriminator, generator, show_every=250, num_epochs=10):
    iter_count = 0
    dis_optimizer = optimizer_dis(discriminator)
    gen_optimizer = optimizer_gen(generator)
    for epoch in range(num_epochs):
        for x, _ in loader_train:
            if len(x) != batch_size:
                continue
            dis_optimizer.zero_grad()
            real_data = Variable(x).type(torch.FloatTensor)
            logits_real = discriminator(
                2 * (real_data.view(batch_size, -1) - 0.5)).type(
                    torch.FloatTensor)

            g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(
                torch.FloatTensor)
            fake_images = generator(g_fake_seed)
            logits_fake = discriminator(fake_images.detach().view(
                batch_size, -1))

            d_total_error = discriminator_loss(logits_real, logits_fake)

            if iter_count % 2 == 0:
                d_total_error.backward()
                dis_optimizer.step()

            gen_optimizer.zero_grad()
            g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(
                torch.FloatTensor)
            fake_images = generator(g_fake_seed)

            gen_logits_fake = discriminator(fake_images.view(batch_size, -1))
            g_loss = generator_loss(gen_logits_fake)
            g_loss.backward()
            gen_optimizer.step()

            if (iter_count % show_every == 0):
                print('Iter: {}, D: {:.4}, G:{:.4}'.format(
                    iter_count, d_total_error.data[0], g_loss.data[0]))
                imgs_numpy = fake_images.data.cpu().numpy()
                plot_batch_images(imgs_numpy[0:16])
                print()
            iter_count += 1
def train(discriminator,
          generator,
          show_every=100,
          num_epochs=20,
          save_every=2000):
    start_t = time.time()
    iter_count = 0
    optimizer_dis = optimizer_discriminator(discriminator)
    generator.add_optimizer()
    for epoch in range(num_epochs):
        for x, _ in loader_train:
            if len(x) != batch_size:
                continue
            # weights_init = discriminator.state_dict()["7.weight"][:]
            if iter_count == 0:
                weights_average = {}
                weights_average_gen = {}
                for name, value in discriminator.state_dict().items():
                    weights_average[name] = torch.mean(value)
                for name, value in generator.state_dict().items():
                    weights_average_gen[name] = torch.mean(value)
                print("Average value of initialized weights dis : \n",
                      weights_average)
                print("Average value of initialized weights gen : \n",
                      weights_average_gen)

            optimizer_dis.zero_grad()
            real_data = Variable(x).type(torch.FloatTensor)
            logits_real = discriminator(real_data).type(torch.FloatTensor)

            g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(
                torch.FloatTensor)
            fake_images = generator.forward(g_fake_seed)
            logits_fake = discriminator(fake_images.detach())

            # analysed = batch_size
            # fooled = np.sum(logits_fake.data.numpy() > 0.5)
            # print("average logits real ", torch.mean(logits_real))
            print("average logits fake ", torch.mean(logits_fake))
            # print("fooled : ", fooled)
            # print("analysed ", analysed)
            # print("guess ratio {0:.4f}" .format(fooled/analysed))
            # if fooled/analysed > 0.5 or iter_count == 0:
            d_total_error = discriminator_loss(logits_real, logits_fake)
            d_total_error.backward()
            optimizer_dis.step()

            generator.optimizer.zero_grad()
            g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(
                torch.FloatTensor)
            fake_images = generator.forward(g_fake_seed)
            print("average output generator :",
                  torch.mean(fake_images).data.numpy())

            gen_logits_fake = discriminator(fake_images)
            g_loss = generator.loss(gen_logits_fake)
            g_loss.backward()
            generator.optimizer.step()

            # weights_after = discriminator.state_dict()["7.weight"]
            # if iter_count > 0:
            # assert weights_after.numpy().all() == weights_init.numpy().all(), "Discriminator has " \
            #                                                           "trained"

            if (iter_count % show_every == 0):
                checkpt_t = time.time()
                print("time : {:.2f} sec".format(checkpt_t - start_t))
                print('Iter: {}, D: {:.4}, G:{:.4}'.format(
                    iter_count, d_total_error.data[0], g_loss.data[0]))
                print("real logits average ", torch.mean(logits_real).data)
                print("average output generator : ",
                      torch.mean(fake_images).data)
                print("fake logits average ", torch.mean(gen_logits_fake).data)
                imgs = fake_images[:64].data
                show_cifar(imgs,
                           iter_num=iter_count,
                           save=True,
                           show=False,
                           name=generator.label)
            iter_count += 1
            if iter_count % save_every == 0:
                torch.save(
                    discriminator, 'results/weights/discriminator' +
                    discriminator.label + '.pt')
                torch.save(
                    generator,
                    'results/weights/generator' + generator.label + '.pt')
Пример #6
0
def train(discriminator,
          generator,
          show_every=250,
          num_epochs=100,
          save_every=2000):
    iter_count = 0
    dis_optimizer = optimizer_dis(discriminator)
    gen_optimizer = optimizer_gen(generator)
    for epoch in range(num_epochs):
        for x, _ in loader_train:
            if len(x) != batch_size:
                continue
            dis_optimizer.zero_grad()
            real_data = Variable(x).type(torch.FloatTensor)
            logits_real = discriminator(
                2 * (real_data.view(batch_size, -1) - 0.5)).type(
                    torch.FloatTensor)

            # g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(torch.FloatTensor)
            # fake_images = generator(g_fake_seed)

            for _ in range(5):
                g_fake_seed = Variable(sample_noise(
                    batch_size, noise_dim)).type(torch.FloatTensor)
                fake_images = generator(g_fake_seed)
                logits_fake = discriminator(fake_images.detach().view(
                    batch_size, -1))

                d_total_error = discriminator_loss(logits_real, logits_fake)

                d_total_error.backward(retain_graph=True)
                dis_optimizer.step()

                # weight clipping
                for p in discriminator.parameters():
                    p.data.clamp_(-0.01, 0.01)

            gen_optimizer.zero_grad()
            gen_logits_fake = discriminator(fake_images.view(batch_size, -1))
            g_loss = generator_loss(gen_logits_fake)
            g_loss.backward()
            gen_optimizer.step()

            if iter_count % show_every == 0:
                print('Iter: {}, D: {:.4}, G:{:.4}'.format(
                    iter_count, d_total_error.data[0], g_loss.data[0]))
                imgs_numpy = fake_images.data.cpu().numpy()
                show_images(imgs_numpy[0:16],
                            iter_count,
                            save=True,
                            model=generator.label)

            iter_count += 1
            if iter_count % save_every == 0:
                torch.save(
                    discriminator, 'results/weights/discriminator' +
                    discriminator.label + '.pt')
                torch.save(
                    generator,
                    'results/weights/generator' + generator.label + '.pt')
            iter_count += 1
Пример #7
0
def train(discriminator,
          generator,
          show_every=50,
          num_epochs=20,
          save_every=4000):
    start_t = time.time()
    iter_count = 0
    discriminator.add_optimizer()
    generator.add_optimizer()
    for epoch in range(num_epochs):
        for x, _ in loader_train:
            if len(x) != batch_size:
                continue
            discriminator.optimizer.zero_grad()
            real_data = Variable(x).type(torch.FloatTensor)
            logits_real = discriminator.forward(real_data.view(
                batch_size, -1)).type(torch.FloatTensor)

            g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(
                torch.FloatTensor)
            fake_images = generator.forward(g_fake_seed).detach()
            logits_fake = discriminator.forward(fake_images)

            d_total_error = discriminator.loss(logits_real, logits_fake)
            d_total_error.backward()
            discriminator.optimizer.step()

            generator.optimizer.zero_grad()
            g_fake_seed = Variable(sample_noise(batch_size, noise_dim)).type(
                torch.FloatTensor)
            fake_images = generator.forward(g_fake_seed)

            gen_logits_fake = discriminator.forward(fake_images)
            g_loss = generator.loss(gen_logits_fake)
            g_loss.backward()
            generator.optimizer.step()

            if (iter_count % show_every == 0):
                checkpt_t = time.time()
                print("Mean deconv W1 {0}, var {1}".format(
                    np.mean(generator.W1_deconv.data.numpy()),
                    np.std(generator.W1_deconv.data.numpy())))
                print("Mean deconv W2 {0}, var {1}".format(
                    np.mean(generator.W2_deconv.data.numpy()),
                    np.std(generator.W2_deconv.data.numpy())))
                print("Mean conv W1 {0}, var {1}".format(
                    np.mean(discriminator.W1_conv.data.numpy()),
                    np.std(discriminator.W1_conv.data.numpy())))
                print("Mean conv W2 {0}, var {1}".format(
                    np.mean(discriminator.W2_conv.data.numpy()),
                    np.std(discriminator.W2_conv.data.numpy())))
                print("time : {:.2f} sec".format(checkpt_t - start_t))
                print('Iter: {}, D: {:.4}, G:{:.4}'.format(
                    iter_count, d_total_error.data[0], g_loss.data[0]))
                print("real logits average ",
                      torch.mean(logits_real).data.numpy())
                print("variance fake images : ",
                      torch.std(fake_images).data.numpy())
                print("variance real images : ",
                      torch.std(real_data).data.numpy())
                print("real image :", real_data.data.numpy())
                print("fake image :", fake_images.data.numpy())
                print("fake logits average ",
                      torch.mean(gen_logits_fake).data.numpy())
                fake_images = fake_images.view(batch_size, 3, 32, 32)
                imgs = fake_images[:64].data
                # imgs_real = real_data[:64].data
                show_cifar(imgs,
                           iter_num=iter_count,
                           save=True,
                           show=False,
                           name=generator.label)
                # show_cifar(imgs_real, iter_num=iter_count, save=True, show=False,
                #            name=generator.label + "real")

            iter_count += 1
            if iter_count % save_every == 0:
                torch.save(
                    discriminator, 'results/weights/discriminator' +
                    discriminator.label + '.pt')
                torch.save(
                    generator,
                    'results/weights/generator' + generator.label + '.pt')