return self.coder.forward(network) NetD = Discriminator() NetG = Generator() optimizerD = torch.optim.Adam(NetD.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) optimizerG = torch.optim.Adam(NetG.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) dataset = epfd.Cifar10DataSetForPytorch( root=DATA_PATH, transform=tv.transforms.Compose([ # tv.transforms.Resize(CONFIG["IMAGE_SIZE"]), tv.transforms.ToTensor(), tv.transforms.Normalize([0.5] * 3, [0.5] * 3) ])) train_loader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True) fix_noise = torch.FloatTensor(100, NOISE_DIM).normal_(0, 1) fix_noise_var = torch.autograd.Variable(fix_noise) if torch.cuda.is_available() > 0: NetG = NetG.cuda() NetD = NetD.cuda() fix_noise_var = fix_noise_var.cuda()
return x trans = tv.transforms.Compose([tv.transforms.ToTensor()]) NetG = Generator() NetD = Discriminator() criterion = torch.nn.BCELoss() fix_noise = torch.autograd.Variable(torch.FloatTensor(BATCH_SIZE, NOISE_DIM, 1, 1).normal_(0, 1)) if torch.cuda.is_available(): NetG = NetG.cuda() NetD = NetD.cuda() fix_noise = fix_noise.cuda() criterion.cuda() dataset = epfd.Cifar10DataSetForPytorch(train=True, transform=trans) dataloader=torch.utils.data.DataLoader(dataset,BATCH_SIZE,shuffle = True) optimizerD = torch.optim.Adam(NetD.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) optimizerG = torch.optim.Adam(NetG.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) bar = eup.ProgressBar(EPOCH, len(dataloader), "D Loss:%.3f;G Loss:%.3f") for epoch in range(1, EPOCH + 1): if epoch % 30 == 0: optimizerD.param_groups[0]['lr'] /= 10 optimizerG.param_groups[0]['lr'] /= 10 for ii, data in enumerate(dataloader,0): input,_=data input = torch.autograd.Variable(input) label = torch.ones(input.size(0))
return output NetD = Discriminator() NetG = Generator() MSE_LOSS = torch.nn.MSELoss() optimizerD = torch.optim.Adam(NetD.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) optimizerG = torch.optim.Adam(NetG.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) dataset = epfd.Cifar10DataSetForPytorch(root=DATA_PATH, transform=tv.transforms.Compose([ tv.transforms.ToTensor(), ])) train_loader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True) fix_noise = torch.randn(100, NOISE_DIM) fix_noise_var = torch.autograd.Variable(fix_noise) if torch.cuda.is_available() > 0: NetG = NetG.cuda() NetD = NetD.cuda() MSE_LOSS = MSE_LOSS.cuda() fix_noise_var = fix_noise_var.cuda() bar = eup.ProgressBar(EPOCH, len(train_loader), "D Loss:%.3f;G Loss:%.3f")
return x def one_hot(target): y = torch.zeros(target.size()[0], 10) for i in range(target.size()[0]): y[i, target[i]] = 1 return y trans = tv.transforms.Compose([tv.transforms.ToTensor()]) dataset = epfd.Cifar10DataSetForPytorch(root=DATA_PATH, train=True, transform=trans) dataLoader = torch.utils.data.DataLoader(dataset, BATCH_SIZE, shuffle=True) NetG = Generator() NetD = Discriminator() optimizerD = torch.optim.Adam(NetD.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) optimizerG = torch.optim.Adam(NetG.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) criterion = torch.nn.BCELoss() predict_noise = torch.randn(100, NOISE_DIM) temp_z_ = torch.randn(10, 100)