Exemplo n.º 1
0
def create_discriminator(params):

    disc = Discriminator(params).to(params['device']) 
    disc_opt = torch.optim.Adam(disc.parameters(), lr=params['lr'], betas=(params['beta_1'],params['beta_2']))
    disc = disc.apply(weights_init)
    return disc, disc_opt
Exemplo n.º 2
0
optimizer = torch.optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.99))
criterion = torch.nn.CrossEntropyLoss()
criterion = torch.nn.BCELoss()

def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)

netG = Generator()
netG.apply(weights_init)
netD = Discriminator()
netD.apply(weights_init)
print(netG)
print(netD)

optimizerD = optim.Adam(netD.parameters(), lr=0.0001, betas=(0.9, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=0.0001, betas=(0.9, 0.999))

real_label = 1
fake_label = 0
fixed_noise = torch.randn(batch_size, nz, 1, 1)

for epoch in range(100):
    for i, data in enumerate(train_loader, 0):
        ############################
        # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
        ###########################
Exemplo n.º 3
0
def train():
    """Train DCGAN and save the generator and discrinator."""

    torch.manual_seed(7)
    epochs = 200
    z_dim = 100
    batch_size = 256

    lr = 0.0003  # A learning rate of 0.0002 works well on DCGAN
    beta_1 = 0.5
    beta_2 = 0.999

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])
    ])

    dataset = datasets.ImageFolder(os.path.join(DATA_DIR, "train"), transform)
    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            shuffle=True,
                            num_workers=4,
                            drop_last=True)

    gen = Generator(z_dim).to(device)
    gen_optimizer = torch.optim.Adam(gen.parameters(),
                                     lr=lr,
                                     betas=(beta_1, beta_2))

    disc = Discriminator().to(device)
    disc_optimizer = torch.optim.Adam(disc.parameters(),
                                      lr=lr * 1.5,
                                      betas=(beta_1, beta_2))

    gen = gen.apply(weights_init)
    disc = disc.apply(weights_init)

    c_lambda = 10  # weight of gradient penalty in loss

    for epoch in range(epochs):
        print("Epoch:   ", epoch + 1, end='\n')
        total_discriminator_loss = 0
        total_generator_loss = 0
        display_fake = None

        for i, (real, _) in enumerate(dataloader):
            real = real.to(device)

            # UPDATE DISCRIMINATOR
            for _ in range(5):
                disc_optimizer.zero_grad()
                noise = torch.randn(batch_size, z_dim, device=device)
                fake = gen(noise)

                disc_fake_pred = disc(fake.detach())
                disc_real_pred = disc(real)

                # ratio of real to fake in calculating gp
                epsilon = torch.rand(len(real),
                                     1,
                                     1,
                                     1,
                                     device=device,
                                     requires_grad=True)

                gradient = get_gradient(disc, real, fake.detach(), epsilon)
                gp = grad_penalty(gradient)

                # value of real should go up, fake should go down : so loss is opposite
                disc_loss = -torch.mean(disc_real_pred) + torch.mean(
                    disc_fake_pred) + c_lambda * gp

                total_discriminator_loss += disc_loss.item()
                disc_loss.backward(retain_graph=True)
                # if i % 2 == 0:
                disc_optimizer.step()

            # UPDATE GENERATOR
            gen_optimizer.zero_grad()

            noise = torch.randn(batch_size, z_dim, device=device)
            fake = gen(noise)
            display_fake = fake
            disc_fake_pred = disc(fake)  # Notice no detach

            # for generator, critic prediction should be higher
            gen_loss = -torch.mean(disc_fake_pred)
            gen_loss.backward()
            gen_optimizer.step()

            total_generator_loss += gen_loss.item()

            print(
                'Discriminator Loss: {:.4f} \t Generator Loss: {:.4f} \t Done: {:.4f}'
                .format(total_discriminator_loss / (i + 1),
                        total_generator_loss / (i + 1), i / len(dataloader)),
                end='\r')

        if (epoch + 1) % 5 == 0:
            show_tensor_images(display_fake, epoch=epoch)
            torch.save(gen.state_dict,
                       "saved_gen/wgan_gp_gen_{}.pth".format(epoch))
Exemplo n.º 4
0
def train():
    """Train DCGAN and save the generator and discrinator."""

    torch.manual_seed(1)
    epochs = 200
    z_dim = 100
    batch_size = 128

    lr = 0.0002
    beta_1 = 0.5
    beta_2 = 0.999

    criterion = nn.BCEWithLogitsLoss()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    transform = transforms.Compose([
            transforms.Resize(64),
            transforms.ToTensor(),
            # transforms.Normalize([0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261])
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), #seems to work better
            ])

    dataset = datasets.ImageFolder(os.path.join(DATA_DIR, "train"), transform)
    dataloader = DataLoader(dataset, batch_size, shuffle=True, num_workers=4, drop_last=True)

    # initialize generator and discriminator
    gen = Generator(z_dim).to(device)
    gen_optimizer = torch.optim.Adam(gen.parameters(), lr=lr, betas=(beta_1, beta_2))

    disc = Discriminator().to(device)
    disc_optimizer = torch.optim.Adam(disc.parameters(), lr=lr, betas=(beta_1, beta_2))

    gen = gen.apply(weights_init)
    disc = disc.apply(weights_init)

    # to show generated image examples and improvement over training
    fixed_noise = torch.randn(64, z_dim, device=device)

    for epoch in range(epochs):
        print("Epoch:   ", epoch + 1, end='\n')
        total_discriminator_loss = 0
        total_generator_loss = 0
        display_fake = None

        for i, (real, _) in enumerate(dataloader):
            real = real.to(device)

            # UPDATE DISCRIMINATOR
            disc_optimizer.zero_grad()
            
            noise = torch.randn(batch_size, z_dim, device=device)
            fake = gen(noise)
            # discriminator predictions on generated images
            disc_fake_pred = disc(fake.detach())
            disc_fake_loss = criterion(disc_fake_pred, torch.zeros_like(disc_fake_pred))
            disc_fake_loss.backward(retain_graph=True)
            # discriminator predictions on real images
            disc_real_pred = disc(real)
            disc_real_loss = criterion(disc_real_pred, torch.ones_like(disc_real_pred))
            disc_real_loss.backward(retain_graph=True)

            disc_loss = disc_fake_loss + disc_real_loss
            total_discriminator_loss += disc_loss.item()            
            
            # if i % 5 == 0:
            disc_optimizer.step()

            # UPDATE GENERATOR
            gen_optimizer.zero_grad()

            noise = torch.randn(batch_size, z_dim, device=device)
            fake = gen(noise)
            display_fake = fake
            disc_fake_pred = disc(fake)   # Notice no detach

            gen_loss = criterion(disc_fake_pred, torch.ones_like(disc_fake_pred))
            gen_loss.backward()
            gen_optimizer.step()

            total_generator_loss += gen_loss.item()

            print('Discriminator Loss: {:.4f} \t Generator Loss: {:.4f} \t Done: {:.4f}'.format(total_discriminator_loss/(i+1),
                total_generator_loss/(i+1), i/len(dataloader)), end='\r')

        if (epoch + 1) % 5 == 0:
            fixed_output = gen(fixed_noise)
            show_tensor_images(fixed_output, id_num=epoch)
            torch.save(gen.state_dict, "saved_gen/gen_{}.pth".format(epoch))
        elif (epoch + 1) % 5 == 1:
            show_tensor_images(display_fake, id_num=epoch)