예제 #1
0
optimizerD = optim.Adam(D.parameters(), lr=args.lr, betas=(args.beta1, 0.999))
optimizerG = optim.Adam(G.parameters(), lr=args.lr, betas=(args.beta1, 0.999))

model_name = f'gan_warmup_samples{len(train_idx)}_seed{args.manualSeed}_nG{args.nG}'
save_dir = os.path.join('runs', model_name)
writer = SummaryWriter(save_dir)
n_iter = 0

for epoch in range(args.epochs):
    pbar = tqdm(dataloader)
    for data in pbar:
        ############################
        # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
        ###########################
        # train with real
        D.zero_grad()
        image = data[0].to(device)
        batch_size = image.size(0)
        label = torch.full((batch_size,), real_label, device=device)

        output = D(image)
        errD_real = criterion(output, label)
        D_x = output.mean().item()

        # train with fake
        z = torch.randn(batch_size, nz, 1, 1, device=device)
        fake = G(z)
        # label.fill_(fake_label)
        label = torch.full((batch_size,), fake_label, device=device)
        output = D(fake.detach())
        errD_fake = criterion(output, label)
예제 #2
0
for epoch in range(num_epochs):
    for batch_idx, (real, _) in enumerate(loader):
        real = real.view(-1, 784)  # .to(device)
        batch_size = real.shape[0]

        # Train Discriminator: max log(D(real)) + log(1 - D(G(z)))
        noise = torch.randn(batch_size, z_dim)  # .to(device)
        fake = gen(noise)
        disc_real = disc(real).view(-1)
        lossD_real = criterion(disc_real, torch.ones_like(disc_real))
        disc_fake = disc(fake).view(-1)
        lossD_fake = criterion(disc_fake, torch.zeros_like(disc_fake))

        lossD = (lossD_real + lossD_fake) / 2
        disc.zero_grad()
        lossD.backward(retain_graph=True)
        opt_disc.step()

        # Train Generator: min log(1 - D(G(z))) -> max log(D(G(z)))
        output = disc(fake).view(-1)
        lossG = criterion(output, torch.ones_like(output))
        gen.zero_grad()
        lossG.backward()
        opt_gen.step()

        if batch_idx % 200 == 0:
            print("Epoch {}, Loss D: {}, Loss G: {}".format(
                epoch, lossD, lossG))

            with torch.no_grad():
예제 #3
0
파일: main_W.py 프로젝트: fenoy/Deepmoon
    'imgs': [],
    'epoch': []
}

for epoch in range(n_epochs):
    for i, data in enumerate(loader['train']):

        x_real = data['moves'].type(Tensor)

        if epoch == 0 and i == 0:
            viz.images(x_real[:16].flip(2),
                       opts=dict(title='Epoch' + str(epoch),
                                 width=1000,
                                 height=250))

        netD.zero_grad()

        z = torch.randn(x_real.size(0), latent_size, 1, 1, device=device)
        x_fake = netG(z)

        out_real = netD(x_real)
        out_fake = netD(x_fake)

        lossD = torch.mean(out_fake) - torch.mean(out_real) \
                + lmbda * gradient_penalty(netD, x_real.data, x_fake.data) \

        lossD.backward(retain_graph=True)
        optimizerD.step()

        if i % n_critic == 0: