Ejemplo n.º 1
0
def train(autoencoder, optimizer, start_epoch):
    for epoch in range(start_epoch, epochs):
        for i, data in enumerate(dataloader):
            autoencoder.zero_grad()
            img = data.to(device)
            _, fake_img = autoencoder(img)
            loss = criterion(img, fake_img)
            loss.backward()
            optimizer.step()

            losses.append(loss)

            if i % 10 == 0:
                print('[%d/%d][%d/%d]\tLoss: %.4f' %
                      (epoch, epochs, i, len(dataloader), loss.item()))

        test_noise = torch.randn(64, out_channels, 1, 1, device=device)
        with torch.no_grad():
            if isinstance(autoencoder, torch.nn.DataParallel):
                fake = autoencoder.module.decoder(test_noise).detach().cpu()
        show_data(fake, 'epoch %d: generated images' % epoch)

        torch.save(
            {
                'epoch': epoch,
                'loss': losses,
                'autoencoder_state_dict': autoencoder.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }, './checkpoints/AE/epoch_%d.pth' % epoch)
Ejemplo n.º 2
0
def train(vae, optimizer, start_epoch):
    for epoch in range(start_epoch, epochs):
        for i, data in enumerate(dataloader):
            vae.zero_grad()
            img = data.to(device)
            fake_img, mu, log_var = vae(img)
            kld_weight = len(fake_img) / len_dataset
            loss = loss_function(img, fake_img, mu, log_var, kld_weight)
            loss.backward()
            optimizer.step()
            losses.append(loss)

            if i % 10 == 0:
                print('[%d/%d][%d/%d]\tLoss: %.4f' %
                      (epoch, epochs, i, len(dataloader), loss.item()))

        sigma = log_var.mul(0.5).exp_()
        # (64,100, 1,1)
        test_noise = torch.randn(64, out_channels, 1, 1, device=device)
        test_noise = test_noise.mul_(sigma[:64]).add_(mu[:64])

        with torch.no_grad():
            if isinstance(vae, torch.nn.DataParallel):
                fake = vae.module.decoder(test_noise).detach().cpu()
        show_data(fake, 'epoch %d: generated images' % epoch)

        torch.save(
            {
                'epoch': epoch,
                'loss': losses,
                'vae_state_dict': vae.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }, './checkpoints/VAE/epoch_%d.pth' % epoch)
Ejemplo n.º 3
0
def train(discriminator, generator, optimizerD, optimizerG, start_epoch):
    for epoch in range(start_epoch, epochs):
        for i, data in enumerate(dataloader):
            # 训练discriminator
            discriminator.zero_grad()
            real_img = data.to(device)
            real_out = discriminator(real_img).view(-1)
            real_labels = torch.ones(len(real_out)).to(device)
            real_loss = criterion(real_out, real_labels)
            real_loss.backward()

            noise = torch.randn(batch_size, in_channels, 1, 1, device=device)
            fake_img = generator(noise).detach()
            fake_out = discriminator(fake_img).view(-1)
            fake_labels = torch.zeros(len(fake_out)).to(device)
            fake_loss = criterion(fake_out, fake_labels)
            fake_loss.backward()

            discriminator_loss = real_loss + fake_loss

            optimizerD.step()

            # 训练generator
            generator.zero_grad()
            noise = torch.randn(batch_size, in_channels, 1, 1, device=device)
            fake_img = generator(noise)
            fake_out = discriminator(fake_img).view(-1)
            real_labels = torch.ones(len(fake_out)).to(device)
            fake_loss = criterion(fake_out, real_labels)
            fake_loss.backward()

            generator_loss = fake_loss

            optimizerG.step()

            if i % 10 == 0:
                print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f' %
                      (epoch, epochs, i, len(dataloader),
                       discriminator_loss.item(), generator_loss.item()))
                discriminator_losses.append(discriminator_loss)
                generator_losses.append(generator_loss)

        test_noise = torch.randn(64, in_channels, 1, 1, device=device)
        with torch.no_grad():
            fake = generator(test_noise).detach().cpu()
        show_data(fake, 'epoch %d: generated images' % epoch)

        torch.save(
            {
                'epoch': epoch,
                'g_loss': generator_losses,
                'd_loss': discriminator_losses,
                'generator_state_dict': generator.state_dict(),
                'discriminator_state_dict': discriminator.state_dict(),
                'optimizerG_state_dict': optimizerG.state_dict(),
                'optimizerD_state_dict': optimizerD.state_dict(),
            }, './checkpoints/DCGAN/epoch_%d.pth' % epoch)
Ejemplo n.º 4
0
                fake = autoencoder.module.decoder(test_noise).detach().cpu()
        show_data(fake, 'epoch %d: generated images' % epoch)

        torch.save(
            {
                'epoch': epoch,
                'loss': losses,
                'autoencoder_state_dict': autoencoder.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
            }, './checkpoints/AE/epoch_%d.pth' % epoch)


if __name__ == "__main__":

    batch = next(iter(dataloader))
    show_data(batch[:64], description='training images')

    autoencoder = AutoEncoder(in_channels, out_channels).to(device)
    autoencoder.apply(weights_init)
    print(autoencoder)

    # 是否使用多块GPU
    if (device.type == 'cuda') and (gpu_num > 1):
        print("使用%d块GPU训练!" % gpu_num)
        autoencoder = nn.DataParallel(autoencoder)

    optimizer = optim.Adam(autoencoder.parameters(),
                           lr=lr,
                           betas=(beta1, 0.999))

    start_epoch = 0