Esempio n. 1
0
netG.apply(weights_init)
if opt.netG != '':
    netG.load_state_dict(torch.load(opt.netG))
print(netG)

netD = netD(nc, ndf, dfs, ngpu=1)
netD.apply(weights_init)
if opt.netD != '':
    netD.load_state_dict(torch.load(opt.netD))
print(netD)

criterion = nn.BCELoss()

# Optimizers
optimizerD = optim.Adam(netD.parameters(),
                        lr=opt.lr,
                        betas=(opt.beta1, 0.999),
                        weight_decay=opt.l2_fac)
optimizerG = optim.Adam(netG.parameters(),
                        lr=opt.lr,
                        betas=(opt.beta1, 0.999),
                        weight_decay=opt.l2_fac)

# Set distribution
uniform = False

if uniform:
    input_noise = torch.rand(batch_size, nz, zx, zy, device=device) * 2 - 1
    fixed_noise = torch.rand(1, nz, zx_sample, zy_sample,
                             device=device) * 2 - 1
Esempio n. 2
0
netG.apply(weights_init)
if opt.netG != '':
    netG.load_state_dict(torch.load(opt.netG))
print(netG)

netD = netD(nc, ndf, dfs, ngpu = 1)
netD.apply(weights_init)
if opt.netD != '':
    netD.load_state_dict(torch.load(opt.netD))
print(netD)

criterion = nn.BCELoss()

# Optimizers
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999),weight_decay=opt.l2_fac)
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999),weight_decay=opt.l2_fac)

input_noise = torch.rand(batch_size, nz, zx, zy, device=device)*2-1
fixed_noise = torch.rand(1, nz, zx_sample, zy_sample, device=device)*2-1
real_label = 1
fake_label = 0

if opt.cuda:
    netD.cuda()
    netG.cuda()
    criterion.cuda()
    input_noise, fixed_noise = input_noise.cuda(), fixed_noise.cuda()
    
summary(netD, (1, npx, npy))
#