output[:, CC_DIM + 1:CC_DIM + 1 + DC_DIM] = torch.nn.functional.softmax( output[:, CC_DIM + 1:CC_DIM + 1 + DC_DIM].clone()) return output NetG = Generator() NetD = Discriminator() optimizerD = torch.optim.Adam(NetD.parameters(), lr=0.0002, betas=(0.5, 0.999)) optimizerG = torch.optim.Adam(NetG.parameters(), lr=0.001, betas=(0.5, 0.999)) trans = tv.transforms.Compose( [tv.transforms.ToTensor(), tv.transforms.Normalize([0.5], [0.5])]) dataset = epfd.FashionMnistPytorchData(root=DATA_PATH, train=True, transform=trans) dataLoader = torch.utils.data.DataLoader(dataset, BATCH_SIZE, shuffle=True) fixed_noise = torch.Tensor(np.zeros((NOISE_DIM, Z_DIM))) tmp = np.zeros((NOISE_DIM, CC_DIM)) for k in range(10): tmp[k * 10:(k + 1) * 10, 0] = np.linspace(-2, 2, 10) fixed_cc = torch.Tensor(tmp) tmp = np.zeros((NOISE_DIM, DC_DIM)) for k in range(10): tmp[k * 10:(k + 1) * 10, k] = 1 fixed_dc = torch.Tensor(tmp) if torch.cuda.is_available(): NetG = NetG.cuda()
output = self.conv2(output) output = output.view(-1, 128 * 7 * 7) output = self.fc1(output) output = self.fc2(output) return output NetD = Discriminator() NetG = Generator() optimizerD = torch.optim.Adam(NetD.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) optimizerG = torch.optim.Adam(NetG.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) dataset = epfd.FashionMnistPytorchData(root=DATA_PATH, transform=tv.transforms.Compose([ tv.transforms.ToTensor(), ])) train_loader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True) MSE_LOSS = torch.nn.MSELoss() fill = torch.zeros([10, 10, IMAGE_SIZE, IMAGE_SIZE]) for i in range(10): fill[i, i, :, :] = 1 onehot = torch.zeros(10, 10) onehot = onehot.scatter_(1, torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).view(10,1), 1).view(10, 10, 1, 1) temp_z_ = torch.randn(10, 100) fixed_z_ = temp_z_ fixed_y_ = torch.zeros(10, 1)
return network NetD = Discriminator() NetG = Generator() optimizerD = torch.optim.Adam(NetD.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) optimizerG = torch.optim.Adam(NetG.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999)) dataset = epfd.FashionMnistPytorchData( root=DATA_PATH, transform=tv.transforms.Compose([ # tv.transforms.Resize(CONFIG["IMAGE_SIZE"]), tv.transforms.ToTensor(), # tv.transforms.Normalize([0.5] * 3, [0.5] * 3) ])) train_loader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True) fix_noise = torch.randn(100, NOISE_DIM) fix_noise_var = torch.autograd.Variable(fix_noise) if torch.cuda.is_available() > 0: NetG = NetG.cuda() NetD = NetD.cuda() fix_noise_var = fix_noise_var.cuda()
import torch import torchvision as tv import ELib.utils.progressbar as eup import ELib.pyt.nuwa.dataset as epfd num_epochs = 20 batch_size = 64 learning_rate = 0.001 img_transform = tv.transforms.Compose([ tv.transforms.ToTensor(), tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) dataset = epfd.FashionMnistPytorchData(train=True, transform=img_transform) dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True) test_images = epfd.FashionMnistPytorchData(train=False).test_data class autoencoder(torch.nn.Module): def __init__(self): super(autoencoder, self).__init__() self.encoder = torch.nn.Sequential(torch.nn.Linear(28 * 28, 128), torch.nn.ReLU(True), torch.nn.Linear(128, 64), torch.nn.ReLU(True), torch.nn.Linear(64, 32),
x = torch.nn.Dropout(p=0.25, inplace=True)(x) x = torch.nn.ReLU(inplace=True)(x) x = self.lin2(x) x = torch.nn.Dropout(p=0.25, inplace=True)(x) x = torch.nn.ReLU(inplace=True)(x) return torch.nn.functional.sigmoid(self.lin3(x)) img_transform = tv.transforms.Compose([ tv.transforms.ToTensor(), tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) batch_size = 128 num_epochs = 100 dataset = epfd.FashionMnistPytorchData(train=True, transform=img_transform) dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True) EPS = 1e-15 gen_lr = 0.0001 reg_lr = 0.00005 z_red_dims = 120 Q = Q_net(784,1000,z_red_dims) P = P_net(784,1000,z_red_dims) D_gauss = D_net_gauss(500,z_red_dims) if torch.cuda.is_available(): Q = Q.cuda() P = P.cuda() D_gauss = D_gauss.cuda() optim_P = torch.optim.Adam(P.parameters(), lr=gen_lr)