def run_seq_gan(): config = Config() n_samples = config.get('n_samples') batch_size = config.get('batch_size') gen_embedding_dim = config.get('gen_embedding_dim') gen_hidden_dim = config.get('gen_hidden_dim') dis_embedding_dim = config.get('dis_embedding_dim') dis_hidden_dim = config.get('dis_hidden_dim') dataset_features = config.get('dataset_features') dataset_dtypes = config.get('dataset_dtypes') generated_features = config.get('generated_features') service_list = config.get('service_list') protocol_service_dict = config.get('protocol_service_dict') service_port_dict = config.get('service_port_dict') file_path = config.get('file_path') CUDA = torch.cuda.is_available() dataset = Traffic_Dataset(file_path, dataset_features, dataset_dtypes, generated_features, batch_size=batch_size, transform=build_input_indices) vocab_dim = dataset.vocabulary_length max_seq_len = dataset.max_seq_length train_epochs = 100 g = Generator(gen_embedding_dim, gen_hidden_dim, vocab_dim, max_seq_len, CUDA) d = Discriminator(dis_embedding_dim, dis_hidden_dim, vocab_dim, max_seq_len, CUDA) if CUDA: g.cuda() d.cuda() g_opt = optim.Adam(g.parameters()) d_opt = optim.Adagrad(d.parameters()) pre_training(g, d, g_opt, d_opt, dataset, n_samples, batch_size, CUDA) training(g, d, g_opt, d_opt, dataset, train_epochs, n_samples, batch_size, CUDA, service_list, protocol_service_dict, service_port_dict) visualize(dataset_features, generated_features)
discriminator = Discriminator(args) generator = Generator(args) print("* Loss Initialization") loss_fn = nn.BCELoss() print(loss_fn) zeroClass = Variable(torch.Tensor(args.batchSize).fill_(0), requires_grad = False) oneClass = Variable(torch.Tensor(args.batchSize).fill_(1), requires_grad = False) smoothedOneClass = Variable(torch.Tensor(args.batchSize).fill_(1-args.smoothing), requires_grad = False) if args.gpuid>=0: with torch.cuda.device(args.gpuid): generator = generator.cuda() discriminator = discriminator.cuda() zeroClass = zeroClass.cuda() oneClass = oneClass.cuda() smoothedOneClass = smoothedOneClass.cuda() learningRate = args.learningRate # ------------------------------------------------------- # TRAINING print("* Start Training") valids = [] stopCondition = False it = 1 while it <= args.nEpochs and not stopCondition: genLoss = 0
def train(batch_size=1, latent_size=100, learning_rate=2e-3, num_epochs=100): cuda = torch.cuda.is_available() device = 'cuda:0' if cuda else 'cpu' dataloader = datamaker(batch_size=batch_size) fixed_img = np.random.uniform(-1, 1, size=(batch_size, latent_size)) fixed_img = torch.from_numpy(fixed_img).float() gen_imgs = [] G = Generator(input_size=latent_size) D = Discriminator() if cuda: print('Using CUDA') fixed_img = fixed_img.cuda() G.cuda() D.cuda() g_optimizer = optim.Adam(G.parameters(), lr=learning_rate) d_optimizer = optim.Adam(D.parameters(), lr=learning_rate) wandb.watch(G) wandb.watch(D) for epoch in range(num_epochs): D.train() G.train() for idx, ( real_images, _ ) in enumerate(tqdm(dataloader)): if cuda: real_images = real_images.cuda() batch_size = real_images.size(0) real_images = real_images * 2 - 1 g_loss_value = 0.0 d_loss_value = 0.0 for phase in ['discriminator', 'generator']: # TRAIN DISCRIMINATOR if phase == 'discriminator': # generate fake images from latent vector latent_vector = np.random.uniform(-1, 1, size=(batch_size, latent_size)) latent_vector = torch.from_numpy(latent_vector).float() if cuda: latent_vector = latent_vector.cuda() fake_images = G(latent_vector) # compute discriminator loss on real images d_optimizer.zero_grad() d_real = D(real_images) d_real_loss = real_loss(d_real, smooth=True) # compute discriminator loss in fake images d_fake = D(fake_images) d_fake_loss = fake_loss(d_fake) # total loss, backprop, optimize and update weights d_loss = d_real_loss + d_fake_loss d_loss_value = d_loss.item() d_loss.backward() d_optimizer.step() # TRAIN GENERATOR if phase == 'generator': latent_vector = np.random.uniform(-1, 1, size=(batch_size, latent_size)) latent_vector = torch.from_numpy(latent_vector).float() if cuda: latent_vector = latent_vector.cuda() fake_images = G(latent_vector) g_optimizer.zero_grad() d_fake = D(fake_images) g_loss = real_loss(d_fake) g_loss_value = g_loss.item() g_loss.backward() g_optimizer.step() if idx % 100 == 0: pass wandb.log({ 'G Loss': g_loss_value, 'D Loss': d_loss_value }) wandb.log({ 'G Epoch Loss': g_loss_value, 'D Epoch Loss': d_loss_value }, step=epoch) # test performance G.eval() gen_img = G(fixed_img) gen_imgs.append(gen_img) # dump generated images with open('gen_imgs.pkl', 'wb') as f: pkl.dump(gen_imgs, f)
import torch from net import Discriminator, Generator from torch.autograd import Variable from torchvision.utils import save_image from utils import to_img z_dimension = 100 batch_size = 64 D = Discriminator() G = Generator(z_dimension) D.load_state_dict(torch.load("./model/discriminator.pth")) G.load_state_dict(torch.load("./model/generator.pth")) if torch.cuda.is_available(): D = D.cuda().eval() G = G.cuda().eval() with torch.no_grad(): z = Variable(torch.randn(batch_size, z_dimension)).cuda() fake_img = G(z) fake_img = to_img(fake_img.cpu().data) save_image(fake_img, "./result/fake_test.png")
if not os.path.exists("./result"): os.mkdir("./result") if not os.path.exists("./model"): os.mkdir("./model") batch_size = 64 z_dimension = 100 num_epoch = 1000 D = Discriminator() G = Generator(z_dimension) # if torch.cuda.is_available: D = D.cuda() G = G.cuda() criterion = nn.BCELoss() d_optimizer = torch.optim.Adam(D.parameters(), lr=0.001) g_optimizer = torch.optim.Adam(G.parameters(), lr=0.001) for epoch in range(num_epoch): for i, (img, _) in enumerate(dataloader): num_img = img.size(0) real_img = Variable(img).cuda() real_label = Variable(torch.ones(num_img)).cuda() fake_label = Variable(torch.zeros(num_img)).cuda() real_out = D(real_img)