def main(): previous_time = '' with TelegramClient(config.session_name, config.api_id, config.api_hash) as client: while True: if not previous_time == get_current_time(): current_time = get_current_time() previous_time = current_time generate_image(current_time) image = client.upload_file('time_image.jpg') client(DeletePhotosRequest(client.get_profile_photos('me'))) client(UploadProfilePhotoRequest(image))
async def command(ctx, stat_name, stats_folder, player_cache, stats_list): stat_name = difflib.get_close_matches(stat_name, stats_list, 1) if not stat_name: await ctx.send("`Stat not found`") return stat_name = stat_name[0] stats = {} for file in os.listdir(stats_folder): uuid = file[:-5] if uuid not in player_cache: player_name = utils.uuid_to_name(uuid) player_cache[uuid] = player_name if player_name else "Yeeted gamer" ctx.cog.player_cache = player_cache with open(os.path.join(stats_folder, file), "r") as f: try: value = json.load(f)[stat_name] player_name = player_cache[uuid] stats[player_name] = value except KeyError: continue except json.decoder.JSONDecodeError: continue name_split = stat_name.split(".") if len(name_split) > 3: stat_name = name_split[1] + " " + name_split[3] f = utils.generate_image(stat_name, stats) await ctx.send(file=f)
def main(): xs = generate_image(batchsize=3) x = add_noise(xs, 0.2) HNN = HopfieldNeuralNetwork() HNN.train(xs) ys = HNN.recall(x) return ys
def main(): previous_time = '' previous_progress_of_the_day = '' with TelegramClient(config.session_name, config.api_id, config.api_hash) as client: while True: if not previous_time == get_current_time(): current_time = get_current_time() previous_time = current_time generate_image(current_time) image = client.upload_file(config.image_filename) client(UploadProfilePhotoRequest(image)) client( DeletePhotosRequest([client.get_profile_photos('me')[-1]])) delete_image() time.sleep(1) if not previous_progress_of_the_day == get_progress_of_the_day(): current_progress_of_the_day = get_progress_of_the_day() previous_progress_of_the_day = current_progress_of_the_day profile_bio = config.profile_bio.format( current_progress_of_the_day) client(UpdateProfileRequest(about=profile_bio))
async def command(ctx, objective_name, data_folder, objectives): objective_name = difflib.get_close_matches(objective_name, objectives, 1) if not objective_name: await ctx.send("`Scoreboard not found`") return objective_name = objective_name[0] scores = {} nbt_file = nbt.NBTFile(os.path.join(data_folder, "scoreboard.dat"))["data"] for player in nbt_file["PlayerScores"]: if player["Objective"].value == objective_name and player[ "Name"].value != "Total": scores[player["Name"].value] = player["Score"].value image = utils.generate_image(objective_name, scores) await ctx.send(file=image)
def train(): args = load_args() train_gen, test_gen = load_data(args) torch.manual_seed(1) netG, netD, netE = load_models(args) if args.use_spectral_norm: optimizerD = optim.Adam(filter(lambda p: p.requires_grad, netD.parameters()), lr=2e-4, betas=(0.0,0.9)) else: optimizerD = optim.Adam(netD.parameters(), lr=2e-4, betas=(0.5, 0.9)) optimizerG = optim.Adam(netG.parameters(), lr=2e-4, betas=(0.5, 0.9)) optimizerE = optim.Adam(netE.parameters(), lr=2e-4, betas=(0.5, 0.9)) schedulerD = optim.lr_scheduler.ExponentialLR(optimizerD, gamma=0.99) schedulerG = optim.lr_scheduler.ExponentialLR(optimizerG, gamma=0.99) schedulerE = optim.lr_scheduler.ExponentialLR(optimizerE, gamma=0.99) ae_criterion = nn.MSELoss() one = torch.FloatTensor([1]).cuda() mone = (one * -1).cuda() iteration = 0 for epoch in range(args.epochs): for i, (data, targets) in enumerate(train_gen): start_time = time.time() """ Update AutoEncoder """ for p in netD.parameters(): p.requires_grad = False netG.zero_grad() netE.zero_grad() real_data_v = autograd.Variable(data).cuda() real_data_v = real_data_v.view(args.batch_size, -1) encoding = netE(real_data_v) fake = netG(encoding) ae_loss = ae_criterion(fake, real_data_v) ae_loss.backward(one) optimizerE.step() optimizerG.step() """ Update D network """ for p in netD.parameters(): p.requires_grad = True for i in range(5): real_data_v = autograd.Variable(data).cuda() # train with real data netD.zero_grad() D_real = netD(real_data_v) D_real = D_real.mean() D_real.backward(mone) # train with fake data noise = torch.randn(args.batch_size, args.dim).cuda() noisev = autograd.Variable(noise, volatile=True) fake = autograd.Variable(netG(noisev).data) inputv = fake D_fake = netD(inputv) D_fake = D_fake.mean() D_fake.backward(one) # train with gradient penalty gradient_penalty = ops.calc_gradient_penalty(args, netD, real_data_v.data, fake.data) gradient_penalty.backward() D_cost = D_fake - D_real + gradient_penalty Wasserstein_D = D_real - D_fake optimizerD.step() # Update generator network (GAN) noise = torch.randn(args.batch_size, args.dim).cuda() noisev = autograd.Variable(noise) fake = netG(noisev) G = netD(fake) G = G.mean() G.backward(mone) G_cost = -G optimizerG.step() schedulerD.step() schedulerG.step() schedulerE.step() # Write logs and save samples save_dir = './plots/'+args.dataset plot.plot(save_dir, '/disc cost', D_cost.cpu().data.numpy()) plot.plot(save_dir, '/gen cost', G_cost.cpu().data.numpy()) plot.plot(save_dir, '/w1 distance', Wasserstein_D.cpu().data.numpy()) plot.plot(save_dir, '/ae cost', ae_loss.data.cpu().numpy()) # Calculate dev loss and generate samples every 100 iters if iteration % 100 == 99: dev_disc_costs = [] for i, (images, targets) in enumerate(test_gen): imgs_v = autograd.Variable(images, volatile=True).cuda() D = netD(imgs_v) _dev_disc_cost = -D.mean().cpu().data.numpy() dev_disc_costs.append(_dev_disc_cost) plot.plot(save_dir ,'/dev disc cost', np.mean(dev_disc_costs)) utils.generate_image(iteration, netG, save_dir, args) # utils.generate_ae_image(iteration, netE, netG, save_dir, args, real_data_v) # Save logs every 100 iters if (iteration < 5) or (iteration % 100 == 99): plot.flush() plot.tick() if iteration % 100 == 0: utils.save_model(netG, optimizerG, iteration, 'models/{}/G_{}'.format(args.dataset, iteration)) utils.save_model(netD, optimizerD, iteration, 'models/{}/D_{}'.format(args.dataset, iteration)) iteration += 1
def train(args): torch.manual_seed(8734) netG = Generator(args).cuda() netD = Discriminator(args).cuda() print (netG, netD) optimG = optim.Adam(netG.parameters(), lr=1e-4, betas=(0.5, 0.9), weight_decay=1e-4) optimD = optim.Adam(netD.parameters(), lr=1e-4, betas=(0.5, 0.9), weight_decay=1e-4) mnist_train, mnist_test = datagen.load_mnist(args) train = inf_gen(mnist_train) print ('saving reals') reals, _ = next(train) if not os.path.exists('results/'): os.makedirs('results') save_image(reals, 'results/reals.png') one = torch.tensor(1.).cuda() mone = (one * -1) print ('==> Begin Training') for iter in range(args.epochs): ops.batch_zero_grad([netG, netD]) for p in netD.parameters(): p.requires_grad = True for _ in range(args.disc_iters): data, targets = next(train) data = data.view(args.batch_size, 28*28).cuda() netD.zero_grad() d_real = netD(data).mean() d_real.backward(mone, retain_graph=True) noise = torch.randn(args.batch_size, args.z, requires_grad=True).cuda() with torch.no_grad(): fake = netG(noise) fake.requires_grad_(True) d_fake = netD(fake) d_fake = d_fake.mean() d_fake.backward(one, retain_graph=True) gp = ops.grad_penalty_1dim(args, netD, data, fake) gp.backward() d_cost = d_fake - d_real + gp wasserstein_d = d_real - d_fake optimD.step() for p in netD.parameters(): p.requires_grad=False netG.zero_grad() noise = torch.randn(args.batch_size, args.z, requires_grad=True).cuda() fake = netG(noise) G = netD(fake) G = G.mean() G.backward(mone) g_cost = -G optimG.step() if iter % 100 == 0: print('iter: ', iter, 'train D cost', d_cost.cpu().item()) print('iter: ', iter, 'train G cost', g_cost.cpu().item()) if iter % 300 == 0: val_d_costs = [] for i, (data, target) in enumerate(mnist_test): data = data.cuda() d = netD(data) val_d_cost = -d.mean().item() val_d_costs.append(val_d_cost) utils.generate_image(args, iter, netG)
def train(): with torch.cuda.device(1): args = load_args() train_gen, dev_gen, test_gen = utils.dataset_iterator(args) torch.manual_seed(1) netG = first_layer.FirstG(args).cuda() SecondG = second_layer.SecondG(args).cuda() SecondE = second_layer.SecondE(args).cuda() ThridG = third_layer.ThirdG(args).cuda() ThridE = third_layer.ThirdE(args).cuda() ThridD = third_layer.ThirdD(args).cuda() netG.load_state_dict(torch.load('./1stLayer/1stLayerG71999.model')) SecondG.load_state_dict(torch.load('./2ndLayer/2ndLayerG71999.model')) SecondE.load_state_dict(torch.load('./2ndLayer/2ndLayerE71999.model')) ThridE.load_state_dict(torch.load('./3rdLayer/3rdLayerE10999.model')) ThridG.load_state_dict(torch.load('./3rdLayer/3rdLayerG10999.model')) optimizerD = optim.Adam(ThridD.parameters(), lr=1e-4, betas=(0.5, 0.9)) optimizerG = optim.Adam(ThridG.parameters(), lr=1e-4, betas=(0.5, 0.9)) optimizerE = optim.Adam(ThridE.parameters(), lr=1e-4, betas=(0.5, 0.9)) ae_criterion = nn.MSELoss() one = torch.FloatTensor([1]).cuda() mone = (one * -1).cuda() dataLoader = BSDDataLoader(args.dataset, args.batch_size, args) for iteration in range(args.epochs): start_time = time.time() """ Update AutoEncoder """ for p in ThridD.parameters(): p.requires_grad = False ThridG.zero_grad() ThridE.zero_grad() real_data = dataLoader.getNextHDBatch().cuda() real_data_v = autograd.Variable(real_data) encoding = ThridE(real_data_v) fake = ThridG(encoding) ae_loss = ae_criterion(fake, real_data_v) ae_loss.backward(one) optimizerE.step() optimizerG.step() """ Update D network """ for p in ThridD.parameters(): p.requires_grad = True for i in range(5): real_data = dataLoader.getNextHDBatch().cuda() real_data_v = autograd.Variable(real_data) # train with real data ThridD.zero_grad() D_real = ThridD(real_data_v) D_real = D_real.mean() D_real.backward(mone) # train with fake data noise = generateTensor(args.batch_size).cuda() noisev = autograd.Variable(noise, volatile=True) fake = autograd.Variable(ThridG(ThridE(SecondG(SecondE(netG(noisev, True), True)), True)).data) inputv = fake D_fake = ThridD(inputv) D_fake = D_fake.mean() D_fake.backward(one) # train with gradient penalty gradient_penalty = ops.calc_gradient_penalty(args, ThridD, real_data_v.data, fake.data) gradient_penalty.backward() optimizerD.step() # Update generator network (GAN) noise = generateTensor(args.batch_size).cuda() noisev = autograd.Variable(noise) fake = ThridG(ThridE(SecondG(SecondE(netG(noisev, True), True)), True)) G = ThridD(fake) G = G.mean() G.backward(mone) G_cost = -G optimizerG.step() # Write logs and save samples save_dir = './plots/' + args.dataset # Calculate dev loss and generate samples every 100 iters if iteration % 1000 == 999: torch.save(ThridE.state_dict(), './3rdLayer/3rdLayerE%d.model' % iteration) torch.save(ThridG.state_dict(), './3rdLayer/3rdLayerG%d.model' % iteration) utils.generate_image(iteration, netG, save_dir, args) utils.generate_MidImage(iteration, netG, SecondE, SecondG, save_dir, args) utils.generate_HDImage(iteration, netG, SecondE, SecondG, ThridE, ThridG, save_dir, args) if iteration % 2000 == 1999: noise = generateTensor(args.batch_size).cuda() noisev = autograd.Variable(noise, volatile=True) fake = autograd.Variable(ThridG(ThridE(SecondG(SecondE(netG(noisev, True), True)), True)).data) print(inception_score(fake.data.cpu().numpy(), resize=True, batch_size=5)[0]) endtime = time.time() print('iter:', iteration, 'total time %4f' % (endtime-start_time), 'ae loss %4f' % ae_loss.data[0], 'G cost %4f' % G_cost.data[0])
def train(args): from torch import optim #torch.manual_seed(8734) netE = models.Encoderz(args).cuda() netD = models.DiscriminatorZ(args).cuda() E1 = models.GeneratorE1(args).cuda() E2 = models.GeneratorE2(args).cuda() #E3 = models.GeneratorE3(args).cuda() #E4 = models.GeneratorE4(args).cuda() #D1 = models.GeneratorD1(args).cuda() D1 = models.GeneratorD2(args).cuda() D2 = models.GeneratorD3(args).cuda() D3 = models.GeneratorD4(args).cuda() print(netE, netD) print(E1, E2, D1, D2, D3) optimE = optim.Adam(netE.parameters(), lr=5e-4, betas=(0.5, 0.9), weight_decay=1e-4) optimD = optim.Adam(netD.parameters(), lr=1e-4, betas=(0.5, 0.9), weight_decay=1e-4) Eoptim = [ optim.Adam(E1.parameters(), lr=1e-4, betas=(0.5, 0.9), weight_decay=1e-4), optim.Adam(E2.parameters(), lr=1e-4, betas=(0.5, 0.9), weight_decay=1e-4), #optim.Adam(E3.parameters(), lr=1e-4, betas=(0.5, 0.9), weight_decay=1e-4), #optim.Adam(E4.parameters(), lr=1e-4, betas=(0.5, 0.9), weight_decay=1e-4) ] Doptim = [ #optim.Adam(D1.parameters(), lr=1e-4, betas=(0.5, 0.9), weight_decay=1e-4), optim.Adam(D1.parameters(), lr=1e-4, betas=(0.5, 0.9), weight_decay=1e-4), optim.Adam(D2.parameters(), lr=1e-4, betas=(0.5, 0.9), weight_decay=1e-4), optim.Adam(D3.parameters(), lr=1e-4, betas=(0.5, 0.9), weight_decay=1e-4) ] Enets = [E1, E2] Dnets = [D1, D2, D3] best_test_loss = np.inf args.best_loss = best_test_loss mnist_train, mnist_test = datagen.load_mnist(args) x_dist = utils.create_d(args.ze) z_dist = utils.create_d(args.z) one = torch.FloatTensor([1]).cuda() mone = (one * -1).cuda() print("==> pretraining encoder") j = 0 final = 100. e_batch_size = 1000 if args.pretrain_e: for j in range(100): x = utils.sample_d(x_dist, e_batch_size) z = utils.sample_d(z_dist, e_batch_size) codes = netE(x) for i, code in enumerate(codes): code = code.view(e_batch_size, args.z) mean_loss, cov_loss = ops.pretrain_loss(code, z) loss = mean_loss + cov_loss loss.backward(retain_graph=True) optimE.step() netE.zero_grad() print('Pretrain Enc iter: {}, Mean Loss: {}, Cov Loss: {}'.format( j, mean_loss.item(), cov_loss.item())) final = loss.item() if loss.item() < 0.1: print('Finished Pretraining Encoder') break print('==> Begin Training') for _ in range(args.epochs): for batch_idx, (data, target) in enumerate(mnist_train): netE.zero_grad() for optim in Eoptim: optim.zero_grad() for optim in Doptim: optim.zero_grad() z = utils.sample_d(x_dist, args.batch_size) codes = netE(z) for code in codes: noise = utils.sample_z_like((args.batch_size, args.z)) d_real = netD(noise) d_fake = netD(code) d_real_loss = torch.log((1 - d_real).mean()) d_fake_loss = torch.log(d_fake.mean()) d_real_loss.backward(torch.tensor(-1, dtype=torch.float).cuda(), retain_graph=True) d_fake_loss.backward(torch.tensor(-1, dtype=torch.float).cuda(), retain_graph=True) d_loss = d_real_loss + d_fake_loss optimD.step() netD.zero_grad() z = utils.sample_d(x_dist, args.batch_size) codes = netE(z) Eweights, Dweights = [], [] i = 0 for net in Enets: Eweights.append(net(codes[i])) i += 1 for net in Dnets: Dweights.append(net(codes[i])) i += 1 d_real = [] for code in codes: d = netD(code) d_real.append(d) netD.zero_grad() d_loss = torch.stack(d_real).log().mean() * 10. for layers in zip(*(Eweights + Dweights)): loss, _ = train_clf(args, layers, data, target) scaled_loss = args.beta * loss scaled_loss.backward(retain_graph=True) d_loss.backward(torch.tensor(-1, dtype=torch.float).cuda(), retain_graph=True) optimE.step() for optim in Eoptim: optim.step() for optim in Doptim: optim.step() loss = loss.item() if batch_idx % 50 == 0: print('**************************************') print('AE MNIST Test, beta: {}'.format(args.beta)) print('MSE Loss: {}'.format(loss)) print('D loss: {}'.format(d_loss)) print('best test loss: {}'.format(args.best_loss)) print('**************************************') if batch_idx > 1 and batch_idx % 199 == 0: test_acc = 0. test_loss = 0. for i, (data, y) in enumerate(mnist_test): z = utils.sample_d(x_dist, args.batch_size) codes = netE(z) Eweights, Dweights = [], [] i = 0 for net in Enets: Eweights.append(net(codes[i])) i += 1 for net in Dnets: Dweights.append(net(codes[i])) i += 1 for layers in zip(*(Eweights + Dweights)): loss, out = train_clf(args, layers, data, y) test_loss += loss.item() if i == 10: break test_loss /= 10 * len(y) * args.batch_size print('Test Loss: {}'.format(test_loss)) if test_loss < best_test_loss: print('==> new best stats, saving') #utils.save_clf(args, z_test, test_acc) if test_loss < best_test_loss: best_test_loss = test_loss args.best_loss = test_loss archE = sampleE(args).cuda() archD = sampleD(args).cuda() rand = np.random.randint(args.batch_size) eweight = list(zip(*Eweights))[rand] dweight = list(zip(*Dweights))[rand] modelE = utils.weights_to_clf(eweight, archE, args.statE['layer_names']) modelD = utils.weights_to_clf(dweight, archD, args.statD['layer_names']) utils.generate_image(args, batch_idx, modelE, modelD, data.cuda())
def train(args): torch.manual_seed(8734) netG = Generator(args).cuda() netD = Discriminator(args).cuda() print(netG, netD) optimG = optim.Adam(netG.parameters(), lr=1e-4, betas=(0.5, 0.9), weight_decay=1e-4) optimD = optim.Adam(netD.parameters(), lr=1e-4, betas=(0.5, 0.9), weight_decay=1e-4) celeba_train = datagen.load_celeba_50k(args) train = inf_gen(celeba_train) print('saving reals') reals, _ = next(train) utils.create_if_empty('results') utils.create_if_empty('results/celeba') utils.create_if_empty('saved_models') utils.create_if_empty('saved_models/celeba') save_image(reals, 'results/celeba/reals.png') one = torch.tensor(1.).cuda() mone = one * -1 total_batches = 0 print('==> Begin Training') for iter in range(args.epochs): total_batches += 1 ops.batch_zero_grad([netG, netD]) for p in netD.parameters(): p.requires_grad = True for _ in range(args.disc_iters): data, targets = next(train) netD.zero_grad() d_real = netD(data).mean() d_real.backward(mone, retain_graph=True) noise = torch.randn(args.batch_size, args.z, requires_grad=True).cuda() with torch.no_grad(): fake = netG(noise) fake.requires_grad_(True) d_fake = netD(fake) d_fake = d_fake.mean() d_fake.backward(one, retain_graph=True) gp = ops.grad_penalty_3dim(args, netD, data, fake) ct = ops.consistency_term(args, netD, data) gp.backward() d_cost = d_fake - d_real + gp + (2 * ct) wasserstein_d = d_real - d_fake optimD.step() for p in netD.parameters(): p.requires_grad = False netG.zero_grad() noise = torch.randn(args.batch_size, args.z, requires_grad=True).cuda() fake = netG(noise) G = netD(fake) G = G.mean() G.backward(mone) g_cost = -G optimG.step() if iter % 100 == 0: print('iter: ', iter, 'train D cost', d_cost.cpu().item()) print('iter: ', iter, 'train G cost', g_cost.cpu().item()) if iter % 500 == 0: val_d_costs = [] path = 'results/celeba/iter_{}.png'.format(iter) utils.generate_image(args, netG, path) if iter % 5000 == 0: utils.save_model('saved_models/celeba/netG_{}'.format(iter), netG, optimG) utils.save_model('saved_models/celeba/netD_{}'.format(iter), netD, optimD)
} #存放风格图形的特征(VGG不同层的特征值) S_FEATURES = {} C_FEATURES = {} CONTINUE = True #是否是中继训练 C_img = tf.placeholder(tf.float32, [None, 224, 224, 3], "C_image") #内容原图 S_img = tf.placeholder(tf.float32, [None, 224, 224, 3], "S_image") #风格原图 if CONTINUE: #加载训练过的图片 _img = np.load(OUTPUT_NP_PATH) print("中继训练 加载最后保存的 训练过的图片数据") X_img = tf.Variable(_img, name='X_image') else: X_img = tf.Variable(utils.generate_image(C_IMG_PATH, 0.3), name='X_image') C_vgg = VGG19(C_img) S_vgg = VGG19(S_img, reuse=True) X_vgg = VGG19(X_img, reuse=True) """compute loss""" loss_style = 0.0 loss_content = 0.0 use_layers = tuple(C_LAYERS.keys()) + tuple(S_LAYERS.keys()) #目标图片要获取特征值的层 for layer in use_layers: X = eval('X_vgg.' + layer) #目标图片的layer层输出 shape = X.get_shape() #输出维度 size = tf.cast(np.prod(shape[1:]), tf.float32) #累乘各维度 if layer in S_LAYERS.keys(): # #风格图片的layer层特征值(gram)
def train(): args = load_args() train_gen, dev_gen, test_gen = utils.dataset_iterator(args) torch.manual_seed(1) netG, netD, netE = load_models(args) optimizerD = optim.Adam(netD.parameters(), lr=1e-4, betas=(0.5, 0.9)) optimizerG = optim.Adam(netG.parameters(), lr=1e-4, betas=(0.5, 0.9)) optimizerE = optim.Adam(netE.parameters(), lr=1e-4, betas=(0.5, 0.9)) ae_criterion = nn.MSELoss() one = torch.FloatTensor([1]).cuda() mone = (one * -1).cuda() gen = utils.inf_train_gen(train_gen) preprocess = torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) for iteration in range(args.epochs): start_time = time.time() """ Update AutoEncoder """ for p in netD.parameters(): p.requires_grad = False netG.zero_grad() netE.zero_grad() _data = next(gen) real_data = stack_data(args, _data) real_data_v = autograd.Variable(real_data) encoding = netE(real_data_v) fake = netG(encoding) ae_loss = ae_criterion(fake, real_data_v) ae_loss.backward(one) optimizerE.step() optimizerG.step() """ Update D network """ for p in netD.parameters(): p.requires_grad = True for i in range(5): _data = next(gen) real_data = stack_data(args, _data) real_data_v = autograd.Variable(real_data) # train with real data netD.zero_grad() D_real = netD(real_data_v) D_real = D_real.mean() D_real.backward(mone) # train with fake data noise = torch.randn(args.batch_size, args.dim).cuda() noisev = autograd.Variable(noise, volatile=True) fake = autograd.Variable(netG(noisev).data) inputv = fake D_fake = netD(inputv) D_fake = D_fake.mean() D_fake.backward(one) # train with gradient penalty gradient_penalty = ops.calc_gradient_penalty( args, netD, real_data_v.data, fake.data) gradient_penalty.backward() D_cost = D_fake - D_real + gradient_penalty Wasserstein_D = D_real - D_fake optimizerD.step() # Update generator network (GAN) noise = torch.randn(args.batch_size, args.dim).cuda() noisev = autograd.Variable(noise) fake = netG(noisev) G = netD(fake) G = G.mean() G.backward(mone) G_cost = -G optimizerG.step() # Write logs and save samples save_dir = './plots/' + args.dataset plot.plot(save_dir, '/disc cost', D_cost.cpu().data.numpy()) plot.plot(save_dir, '/gen cost', G_cost.cpu().data.numpy()) plot.plot(save_dir, '/w1 distance', Wasserstein_D.cpu().data.numpy()) plot.plot(save_dir, '/ae cost', ae_loss.data.cpu().numpy()) # Calculate dev loss and generate samples every 100 iters if iteration % 100 == 99: dev_disc_costs = [] for images, _ in dev_gen(): imgs = stack_data(args, images) imgs_v = autograd.Variable(imgs, volatile=True) D = netD(imgs_v) _dev_disc_cost = -D.mean().cpu().data.numpy() dev_disc_costs.append(_dev_disc_cost) plot.plot(save_dir, '/dev disc cost', np.mean(dev_disc_costs)) utils.generate_image(iteration, netG, save_dir, args) # utils.generate_ae_image(iteration, netE, netG, save_dir, args, real_data_v) # Save logs every 100 iters if (iteration < 5) or (iteration % 100 == 99): plot.flush() plot.tick()
def generate_new_background(self): generate_image()
def train(): args = load_args() torch.manual_seed(1) netG = first_layer.FirstG(args).cuda() netD = first_layer.FirstD(args).cuda() netE = first_layer.FirstE(args).cuda() optimizerD = optim.Adam(netD.parameters(), lr=1e-4, betas=(0.5, 0.9)) optimizerG = optim.Adam(netG.parameters(), lr=1e-4, betas=(0.5, 0.9)) optimizerE = optim.Adam(netE.parameters(), lr=1e-4, betas=(0.5, 0.9)) ae_criterion = nn.MSELoss() one = torch.FloatTensor([1]).cuda() mone = (one * -1).cuda() dataLoader = BSDDataLoader(args.dataset, args.batch_size, args) incep_score = 0 zeros = autograd.Variable(torch.zeros(args.batch_size, 4 * 4 * 5).cuda()) for iteration in range(args.epochs): start_time = time.time() """ Update AutoEncoder """ for p in netD.parameters(): p.requires_grad = False netG.zero_grad() netE.zero_grad() real_data = dataLoader.getNextLoBatch().cuda() real_data_v = autograd.Variable(real_data) encoding = netE(real_data_v) fake = netG(encoding) ae_loss = ae_criterion(fake, real_data_v) + ae_criterion( encoding, zeros) ae_loss.backward(one) optimizerE.step() optimizerG.step() """ Update D network """ for p in netD.parameters(): p.requires_grad = True for i in range(5): real_data = dataLoader.getNextLoBatch().cuda() real_data_v = autograd.Variable(real_data) # train with real data netD.zero_grad() D_real = netD(real_data_v) D_real = D_real.mean() D_real.backward(mone) # train with fake data noise = generateTensor(args.batch_size).cuda() noisev = autograd.Variable(noise, volatile=True) fake = autograd.Variable(netG(noisev, True).data) inputv = fake D_fake = netD(inputv) D_fake = D_fake.mean() D_fake.backward(one) # train with gradient penalty gradient_penalty = ops.calc_gradient_penalty( args, netD, real_data_v.data, fake.data) gradient_penalty.backward() optimizerD.step() # Update generator network (GAN) noise = generateTensor(args.batch_size).cuda() noisev = autograd.Variable(noise) fake = netG(noisev, True) G = netD(fake) G = G.mean() G.backward(mone) G_cost = -G optimizerG.step() # Write logs and save samples save_dir = './plots/' + args.dataset # Calculate dev loss and generate samples every 100 iters if iteration % 1000 == 999: torch.save(netE.state_dict(), './1stLayer/1stLayerE%d.model' % iteration) torch.save(netG.state_dict(), './1stLayer/1stLayerG%d.model' % iteration) utils.generate_image(iteration, netG, save_dir, args) endtime = time.time() if iteration % 2000 == 1999: noise = generateTensor(1000).cuda() noisev = autograd.Variable(noise, volatile=True) fake = autograd.Variable(netG(noisev, True).data) incep_score = (inception_score(fake.data.cpu().numpy(), resize=True, batch_size=5))[0] print('iter:', iteration, 'total time %4f' % (endtime - start_time), 'ae loss %4f' % ae_loss.data[0], 'G cost %4f' % G_cost.data[0], 'inception score %4f' % incep_score)