Beispiel #1
0
def train():
    logger = Logger(model_name="DCGAN", data_name="CIFAR10")
    num_epochs = 200
    for epoch in range(num_epochs):
        for n_batch, (real_batch, _) in enumerate(data_loader):
            N = real_batch.size(0)

            # TRAIN DISCRIMINATOR
            # generate real data from data loader
            real_data = Variable(real_batch)
            if torch.cuda.is_available(): real_data = real_data.cuda()

            # generate fake data and detach gradient
            fake_data = generator(data.noise(N)).detach()

            # train discriminator
            d_error, d_pred_real, d_pred_fake = train_discriminator(
                discriminator, loss, d_optimizer, real_data, fake_data)

            # TRAIN GENERATOR

            # generate fake data
            fake_data = generator(data.noise(N))

            # train generator
            g_error = train_generator(discriminator, loss, g_optimizer,
                                      fake_data)

            # LOG BATCH ERROR
            logger.log(d_error, g_error, epoch, n_batch, num_batches)
            # Display progress every few batches
            if (n_batch) % 10 == 0:
                display.clear_output(True)
                test_images = generator(test_noise).data

                logger.log_images(test_images, num_test_samples, epoch,
                                  n_batch, num_batches)
                logger.display_status(epoch, num_epochs, n_batch, num_batches,
                                      d_error, g_error, d_pred_real,
                                      d_pred_fake)

            # Model Checkpoints
            logger.save_models(generator, discriminator, epoch)
# Iterate through epochs
for epoch in range(num_epochs):
    for n_batch, (batch, _) in enumerate(data_loader):

        # 1. Train Discriminator
        X_batch = images_to_vectors(batch.permute(0, 2, 3, 1).numpy())
        feed_dict = {X: X_batch, Z: noise(BATCH_SIZE, NOISE_SIZE)}
        _, d_error, d_pred_real, d_pred_fake = session.run(
            [D_opt, D_loss, D_real, D_fake], feed_dict=feed_dict)

        # 2. Train Generator
        feed_dict = {Z: noise(BATCH_SIZE, NOISE_SIZE)}
        _, g_error = session.run([G_opt, G_loss], feed_dict=feed_dict)

        if n_batch % 100 == 0:
            display.clear_output(True)
            # Generate images from test noise
            test_images = session.run(G_sample, feed_dict={Z: test_noise})
            test_images = vectors_to_images(test_images)
            # Log Images
            logger.log_images(test_images,
                              num_test_samples,
                              epoch,
                              n_batch,
                              num_batches,
                              format='NHWC')
            # Log Status
            logger.display_status(epoch, num_epochs, n_batch, num_batches,
                                  d_error, g_error, d_pred_real, d_pred_fake)
Beispiel #3
0
        d_x = d_real_out.mean().item()

        fake_data = generator(torch.randn(batch_size, nz, 1, 1, device=device))
        label.fill_(fake_label)
        d_fake_out = discriminator(fake_data.detach())
        d_fake_err = loss(d_fake_out, label)
        d_fake_err.backward()
        D_G_z1 = d_fake_out.mean().item()

        d_tot_err = d_real_err + d_fake_err
        d_optim.step()

        g_optim.zero_grad()
        label.fill_(real_label)
        g_out = discriminator(fake_data)
        g_err = loss(g_out, label)
        g_err.backward()
        D_G_z2 = g_out.mean().item()
        g_optim.step()

        logger.log(d_tot_err, g_err, epoch, n_batch, num_batches)

        if n_batch % 100 == 0:
            test_images = generator(test_noise).data.cpu()
            logger.log_images(test_images, int(opt.numSamples), epoch, n_batch,
                              num_batches)
            logger.display_status(epoch, num_epochs, n_batch, num_batches,
                                  d_tot_err, g_err, d_real_out, d_fake_out)
        pbar.update(1)
        logger.save_models(generator, discriminator, epoch)
pbar.close()
Beispiel #4
0
        gradients_of_d = d_tape.gradient(d_loss, discriminator.variables)
        gradients_of_c = c_tape.gradient(c_loss,
                                         classifier.layers[1].variables)

        g_optimizer.apply_gradients(zip(gradients_of_g, generator.variables))
        d_optimizer.apply_gradients(
            zip(gradients_of_d, discriminator.variables))
        c_optimizer.apply_gradients(
            zip(gradients_of_c, classifier.layers[1].variables))

        logger.log(d_loss.numpy(), g_loss.numpy(), epoch, n_batch, num_batches)

        # Display Progress every few batches
        if n_batch % 10 == 0:
            now_time = time.time()
            elapsed = now_time - batch_start_time
            batch_start_time = now_time
            print("Batches took {:.3f} ms".format(elapsed * 1000))

            test_images = generator(test_noise).numpy()

            logger.log_images(test_images, num_test_samples, epoch, n_batch,
                              num_batches)
            # Display status Logs
            logger.display_status(epoch, num_epochs, n_batch, num_batches,
                                  d_loss.numpy(), g_loss.numpy(),
                                  c_loss.numpy(), real_output.numpy(),
                                  generated_output.numpy())
            # save progess
            checkpoint.save(file_prefix=checkpoint_prefix)
Beispiel #5
0
def main(batch_size, epochs, data_file, num_test_samples, num_workers,
         learning_rate, beta1, beta2):

    # Create loader with data, so that we can iterate over it
    data_loader = kmnist_dataloader(data_file, batch_size, num_workers)

    # Num batches
    num_batches = len(data_loader)

    generator, discriminator = create_model()

    # Optimizers
    d_optimizer = optim.Adam(discriminator.parameters(),
                             lr=learning_rate,
                             betas=(beta1, beta2))
    g_optimizer = optim.Adam(generator.parameters(),
                             lr=learning_rate,
                             betas=(beta1, beta2))

    # Loss function
    loss = nn.BCELoss()

    test_noise = noise(num_test_samples)

    logger = Logger(model_name='DCGAN', data_name='KMNIST')
    nn_output = []

    for epoch in range(epochs):
        d_loss, g_loss = 0, 0
        cnt = 0
        for n_batch, (real_batch) in enumerate(data_loader):

            # 1. Train Discriminator

            # ============================================
            #            TRAIN THE DISCRIMINATORS
            # ============================================

            real_data = Variable(real_batch)
            if torch.cuda.is_available(): real_data = real_data.cuda()
            # Generate fake data
            fake_data = generator(noise(real_data.size(0))).detach()
            # Train D
            d_error, d_pred_real, d_pred_fake = train_discriminator(
                d_optimizer, discriminator, loss, real_data, fake_data)

            # 2. Train Generator

            # ============================================
            #            TRAIN THE GENERATORS
            # ============================================

            # Generate fake data
            fake_data = generator(noise(real_batch.size(0)))
            # Train G
            g_error = train_generator(g_optimizer, discriminator, loss,
                                      fake_data)
            # Log error
            logger.log(d_error, g_error, epoch, n_batch, num_batches)

            # Display Progress
            if (n_batch) % 100 == 0:
                # display.clear_output(True)
                # Display Images
                test_images = generator(test_noise).data.cpu()
                logger.log_images(test_images, num_test_samples, epoch,
                                  n_batch, num_batches)
                # Display status Logs
                (epoch, temp_d_loss, temp_g_loss) = logger.display_status(
                    epoch, epochs, n_batch, num_batches, d_error, g_error,
                    d_pred_real, d_pred_fake)
                d_loss += temp_d_loss
                g_loss += temp_g_loss
                cnt += 1

            # Model Checkpoints
            logger.save_models(generator, discriminator, epoch)

        d_loss, g_loss = d_loss / cnt, g_loss / cnt
        nn_output.append([epoch, d_loss, g_loss])

    pd_results = pd.DataFrame(nn_output, columns=['epoch', 'd_loss', 'g_loss'])
    print(pd_results)

    fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(12, 4))
    axes.plot(pd_results['epoch'],
              pd_results['d_loss'],
              label='discriminative loss')
    axes.plot(pd_results['epoch'],
              pd_results['g_loss'],
              label='generative loss')
    # axes[0].plot(pd_results['epoch'],pd_results['test_loss'], label='test_loss')

    axes.legend()
Beispiel #6
0
def main(batch_size, epochs, num_test_samples, num_workers):

    # Create loader with data, so that we can iterate over it
    data_loader = mnist_dataloader(batch_size, num_workers)
    # Num batches
    num_batches = len(data_loader)

    generator, discriminator = create_model()

    # Optimizers
    d_optimizer = optim.Adam(discriminator.parameters(), lr=0.0002, betas=(0.5, 0.999))
    g_optimizer = optim.Adam(generator.parameters(), lr=0.0002, betas=(0.5, 0.999))

    # Loss function
    loss = nn.BCELoss()

    test_noise = noise(num_test_samples)

    logger = Logger(model_name='DCGAN', data_name='MNIST')

    for epoch in range(epochs):
        for n_batch, (real_batch,_) in enumerate(data_loader):
            
            # 1. Train Discriminator

            # ============================================
            #            TRAIN THE DISCRIMINATORS
            # ============================================

            real_data = Variable(real_batch)
            if torch.cuda.is_available(): real_data = real_data.cuda()
            # Generate fake data
            fake_data = generator(noise(real_data.size(0))).detach()
            # Train D
            d_error, d_pred_real, d_pred_fake = train_discriminator(d_optimizer, discriminator, loss,
                                                                    real_data, fake_data)

            var = input()
            # 2. Train Generator

            # ============================================
            #            TRAIN THE GENERATORS
            # ============================================

            # Generate fake data
            fake_data = generator(noise(real_batch.size(0)))
            # Train G
            g_error = train_generator(g_optimizer, discriminator, loss, fake_data)
            # Log error
            logger.log(d_error, g_error, epoch, n_batch, num_batches)
            
            # Display Progress
            if (n_batch) % 100 == 0:
                # display.clear_output(True)
                # Display Images
                test_images = generator(test_noise).data.cpu()
                logger.log_images(test_images, num_test_samples, epoch, n_batch, num_batches);
                # Display status Logs
                logger.display_status(
                    epoch, epochs, n_batch, num_batches,
                    d_error, g_error, d_pred_real, d_pred_fake
                )
            # Model Checkpoints
            logger.save_models(generator, discriminator, epoch)