예제 #1
0
dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batchSize, sampler=sampler,
                                         shuffle=True, num_workers=int(args.workers))

device = torch.device("cuda" if args.cuda else "cpu")

nz = args.nz
ngf = args.ngf
ndf = args.ndf

G = Generator().to(device)
G.apply(weights_init)
if args.G != '':
    G.load_state_dict(clean_state_dict(torch.load(args.G)))

D = Discriminator().to(device)
D.apply(weights_init)
if args.D != '':
    D.load_state_dict(clean_state_dict(torch.load(args.D)))

if torch.cuda.device_count() > 1 and args.cuda:
    print("Let's use {} GPUs".format(torch.cuda.device_count()))
    G = nn.DataParallel(G)
    D = nn.DataParallel(D)

criterion = nn.BCELoss()

# fixed noise & label
fixed_noise = torch.randn(args.batchSize, nz, 1, 1, device=device)
real_label = 1
fake_label = 0
예제 #2
0
netD = Discriminator(n_features)
netG.to(device)
netD.to(device)


def weights_init_normal(m):
    classname = m.__class__.__name__
    if classname.find("Conv") != -1:
        torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif classname.find("BatchNorm2d") != -1:
        torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
        torch.nn.init.constant_(m.bias.data, 0.0)


netG.apply(weights_init_normal)
netD.apply(weights_init_normal)

criterion = torch.nn.BCELoss()

optimizerD = torch.optim.Adam(netD.parameters(), lr, betas)
optimizerG = torch.optim.Adam(netG.parameters(), lr, betas)


# Define penalties
class RoundNoGradient(torch.autograd.Function):
    @staticmethod
    def forward(ctx, x):
        return x.round()

    @staticmethod
    def backward(ctx, g):