NetD.zero_grad() output=NetD(input) error_real=criterion(output.squeeze(),label) error_real.backward() D_x=output.data.mean() fake_pic=NetG(noise).detach() output2=NetD(fake_pic) label.data.fill_(0) # 0 for fake error_fake=criterion(output2.squeeze(),label) error_fake.backward() D_x2=output2.data.mean() error_D=error_real+error_fake optimizerD.step() NetG.zero_grad() label.data.fill_(1) noise.data.normal_(0,1) fake_pic=NetG(noise) output=NetD(fake_pic) error_G=criterion(output.squeeze(),label) error_G.backward() optimizerG.step() D_G_z2=output.data.mean() bar.show(epoch, error_D.item(), error_G.item()) fake_u=NetG(fix_noise) torchvision.utils.save_image(fake_u.data[:64], "outputs/MNIST_%03d.png" % epoch,normalize=True,range=(-1,1))
t.randn(CONFIG["BATCH_SIZE"], CONFIG["NOISE_DIM"], 1, 1)) fake_img = netG(noises).detach() # 根据噪声生成假图 output = netD(fake_img) error_d_fake = criterion(output, fake_labels) error_d_fake.backward() optimizer_discriminator.step() error_d = error_d_fake + error_d_real if ii % 1 == 0: # 训练生成器 netG.zero_grad() noises.data.copy_( t.randn(CONFIG["BATCH_SIZE"], CONFIG["NOISE_DIM"], 1, 1)) fake_img = netG(noises) output = netD(fake_img) error_g = criterion(output, true_labels) error_g.backward() optimizer_generator.step() proBar.show(epoch, error_d.item(), error_g.item()) # 保存模型、图片 fix_fake_imgs = netG(fix_noises) tv.utils.save_image(fix_fake_imgs.data[:64], 'outputs/Pytorch_AnimateFace_%03d.png' % epoch, normalize=True, range=(-1, 1)) t.save(netG.state_dict(), "outputs/DCGAN_AnimateFace_Pytorch_Generator.pth")
latent_samples = torch.randn(bs, z_dim) g_gen_input = Variable(latent_samples) if GPU_NUMS > 0: g_gen_input = g_gen_input.cuda() g_fake_data = generator(g_gen_input) g_fake_decision = discriminator(g_fake_data) labels = Variable(torch.ones(bs)) if GPU_NUMS > 0: labels = labels.cuda() g_loss = criterion(g_fake_decision, labels) # we want to fool, so pretend it's all genuine g_loss.backward() g_optimizer.step() # Only optimizes G's parameters loss_d_real = d_real_loss.item() loss_d_fake = d_fake_loss.item() loss_g = g_loss.item() progBar.show(loss_d_real, loss_d_fake, loss_g) if train_iter == 1 or train_iter % 100 == 0: msg = 'Iteration {}: D_loss(real/fake): {:.6g}/{:.6g} G_loss: {:.6g}'.format(train_iter, loss_d_real, loss_d_fake, loss_g) gen_samples = g_fake_data.data.cpu().numpy() visualizer.draw(real_samples, gen_samples, msg, show=False) visualizer.savefig('outputs/Pytorch_Batman_%04d' % train_iter) torch.save(generator.state_dict(), "outputs/GAN_Batman_Pytorch_Generator.pth")
label_fake_G_var = Variable(onehot[label_fake].cuda() if CONFIG["GPU_NUMS"] > 0 else onehot[label_fake]) label_fake_D_var = Variable(fill[label_fake].cuda() if CONFIG["GPU_NUMS"] > 0 else fill[label_fake]) g_result = NetG(img_fake_var, label_fake_G_var) d_result = NetD(g_result, label_fake_D_var) d_result = d_result.squeeze() D_LOSS_FAKE = BCELoss()(d_result, label_false_var) D_train_loss = D_LOSS_REAL + D_LOSS_FAKE D_train_loss.backward() D_optimizer.step() NetG.zero_grad() img_fake = torch.randn((mini_batch, 100)).view(-1, 100, 1, 1) label_fake = (torch.rand(mini_batch, 1) * 10).type(torch.LongTensor).squeeze() img_fake_var = Variable(img_fake.cuda() if CONFIG["GPU_NUMS"] > 0 else img_fake) label_fake_G_var = Variable(onehot[label_fake].cuda() if CONFIG["GPU_NUMS"] > 0 else onehot[label_fake]) label_fake_D_var = Variable(fill[label_fake].cuda() if CONFIG["GPU_NUMS"] > 0 else fill[label_fake]) g_result = NetG(img_fake_var, label_fake_G_var) d_result = NetD(g_result, label_fake_D_var) d_result = d_result.squeeze() G_train_loss= BCELoss()(d_result, label_true_var) G_train_loss.backward() G_optimizer.step() bar.show(epoch, D_train_loss.item(), G_train_loss.item()) test_images = NetG(fixed_z_, fixed_y_label_) torchvision.utils.save_image(test_images.data[:100],'outputs/mnist_%03d.png' % (epoch),nrow=10, normalize=True,range=(-1,1), padding=0)
b_x = Variable(x.cuda() if GPU_NUMS > 0 else x) b_y = Variable( y.type(torch.LongTensor).cuda() if GPU_NUMS > 0 else y. type(torch.LongTensor)).squeeze_() output = cnn(b_x) loss = loss_func(output, b_y) optimizer.zero_grad() loss.backward() optimizer.step() prediction = torch.max(F.softmax(output, dim=1), 1)[1] pred_y = prediction.cpu().data.numpy().squeeze() target_y = b_y.cpu().data.numpy() accuracy = sum(pred_y == target_y) / len(target_y) proBar.show(epoch, loss.data[0], accuracy) test_x = Variable( torch.unsqueeze(torch.FloatTensor(train_data.test_data), dim=1).cuda( ) if GPU_NUMS > 0 else torch. unsqueeze(torch.FloatTensor(train_data.test_data), dim=1)) test_y = Variable( torch.LongTensor(train_data.test_labels).cuda() if GPU_NUMS > 0 else torch. LongTensor(train_data.test_labels)) test_y = test_y.squeeze() test_output = cnn(test_x) pred_y = torch.max(F.softmax(test_output, dim=1), 1)[1].cpu().data.numpy().squeeze() target_y = test_y.cpu().data.numpy() accuracy = sum(pred_y == target_y) / len(target_y) print("test accuracy is %.3f" % accuracy)
for step in range(STEPS): out = net(x) # input x and predict based on x loss = loss_func( out, y ) # must be (1. nn output, 2. target), the target label is NOT one-hotted optimizer.zero_grad() # clear gradients for next train loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients _, prediction = torch.max(F.softmax(out, dim=None), 1) pred_y = prediction.data.numpy().squeeze() target_y = y.data.numpy() accuracy = sum(pred_y == target_y) / x.shape[0] bar.show(1, loss.item(), accuracy) if (step + 1) % DECAY_STEP == 0: out = net(Variable(torch.FloatTensor(x_show))) _, prediction = torch.max(F.softmax(out, dim=None), 1) pred_y = prediction.data.numpy().squeeze() predict.append(pred_y) myloss.append(loss.item()) fig, axes = plt.subplots() plt.xlabel("X1", fontsize=15) plt.ylabel("X2", fontsize=15) plt.xlim(x1_min, x1_max) plt.ylim(x2_min, x2_max) plt.suptitle("Pytorch") time_template = 'step = %d, train loss=%.9f'
for step in range(STEPS): out = net(x) # input x and predict based on x loss = loss_func( out, y ) # must be (1. nn output, 2. target), the target label is NOT one-hotted optimizer.zero_grad() # clear gradients for next train loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients _, prediction = torch.max(F.softmax(out, dim=None), 1) pred_y = prediction.data.numpy().squeeze() target_y = y.data.numpy() accuracy = sum(pred_y == target_y) / x.shape[0] bar.show(step, loss.item(), accuracy) if (step + 1) % DECAY_STEP == 0: out = net(Variable(torch.FloatTensor(x_show))) _, prediction = torch.max(F.softmax(out, dim=None), 1) pred_y = prediction.data.numpy().squeeze() predict.append(pred_y) myloss.append(loss.item()) fig, axes = plt.subplots() plt.xlabel(iris_feature[0], fontsize=15) plt.ylabel(iris_feature[1], fontsize=15) plt.xlim(x1_min, x1_max) plt.ylim(x2_min, x2_max) plt.suptitle("Pytorch") time_template = 'step = %d, train loss=%.9f'
BCE = reconstruction_function(recon_x, x) # mse loss # loss = 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar) KLD = torch.sum(KLD_element).mul_(-0.5) return BCE + KLD optimizer = optim.Adam(model.parameters(), lr=1e-3) for epoch in range(EPOCH): model.train() train_loss = 0 for batch_idx, data in enumerate(train_loader): img, _ = data img = img.view(img.size(0), -1) img = Variable(img) if torch.cuda.is_available(): img = img.cuda() optimizer.zero_grad() recon_batch, mu, logvar = model(img) loss = loss_function(recon_batch, img, mu, logvar) loss.backward() train_loss += loss.data[0] optimizer.step() proBar.show(loss.data[0] / len(img)) if epoch % 10 == 0: save = to_img(recon_batch.cpu().data) save_image(save, 'output/image_{}.png'.format(epoch)) torch.save(model.state_dict(), 'output/vae.pth')
def forward(self, x): x = self.encoder(x) x = self.decoder(x) return x model = autoencoder().cuda() if torch.cuda.is_available() else autoencoder() criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5) proBar = ProgressBar(num_epochs, len(dataloader), "loss:%.3f") for epoch in range(num_epochs): for data in dataloader: img, _ = data img = Variable(img).cuda() if torch.cuda.is_available() else Variable( img) # ===================forward===================== output = model(img) loss = criterion(output, img) # ===================backward==================== optimizer.zero_grad() loss.backward() optimizer.step() proBar.show(loss.data[0]) if epoch % 10 == 0: pic = to_img(output.cpu().data) save_image(pic, './dc_img/image_{}.png'.format(epoch))
loss = tf.reduce_mean(tf.square(y - prediction)) train_step = tf.train.MomentumOptimizer(0.0001, momentum=0.1).minimize(loss) predict = [] myloss = [] bar = ProgressBar(1, STEPS, "train_loss:%.9f") with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for step in range(STEPS): _, train_loss, prediction_value = sess.run( [train_step, loss, prediction], feed_dict={ x: x_data, y: y_data }) bar.show(1, train_loss) if (step + 1) % DECAY_STEP == 0: predict.append(prediction_value) myloss.append(train_loss) fig, ax = plt.subplots() xdata, ydata = [], [] ln, = ax.plot([], [], 'r-', animated=False) plt.scatter(x_data, y_data) time_template = 'step = %d, train loss=%.9f' time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes) plt.title('Tensorflow', fontsize=18) plt.grid(True) def init():
train_data = Cifar10DataSetForPytorch(train=True, transform=json["transform"]) train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True) # 准备网络 model = json["model"](json["pretrained"]) model = torch.nn.DataParallel(model).cuda() if GPU_NUMS > 1 else torch.nn.DataParallel(model) optimizer = Adam(model.parameters(), lr=LR) loss_func = CrossEntropyLoss().cuda() if GPU_NUMS > 0 else CrossEntropyLoss() # 训练数据 proBar = ProgressBar(EPOCH, len(train_loader), "loss:%.3f,acc:%.3f") for epoch in range(EPOCH): for step, (x,y) in enumerate(train_loader): data = Variable(x.cuda() if GPU_NUMS > 0 else x) label = Variable(torch.squeeze(y, dim=1).type(torch.LongTensor).cuda() if GPU_NUMS > 0 else torch.squeeze(y, dim=1).type(torch.LongTensor)) optimizer.zero_grad() output = model(data) loss = loss_func(output, label) loss.backward() optimizer.step() prediction = torch.max(softmax(output), 1)[1] pred_label = prediction.data.cpu().numpy().squeeze() target_y = label.data.cpu().numpy() accuracy = sum(pred_label == target_y) / len(target_y) proBar.show(loss.data[0], accuracy) torch.save(model.state_dict(), "%s.pth" % MODEL)
dataset = ImageFolder(root='/input/face/64_crop', transform=Compose([ToTensor()])) train_loader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True) bar = ProgressBar(EPOCH, len(train_loader), "Loss:%.3f") model.train() train_loss = 0 for epoch in range(EPOCH): for ii, (image, label) in enumerate(train_loader): mini_batch = image.shape[0] data = Variable(image.cuda() if GPU_NUMS > 0 else image) optimizer.zero_grad() recon_batch, mu, logvar = model(data) loss = loss_function(recon_batch, data, mu, logvar) loss.backward() train_loss += loss.data[0] optimizer.step() bar.show(loss.data[0] / mini_batch) model.eval() z = torch.randn(BATCH_SIZE, model.latent_variable_size) z = Variable(z.cuda() if GPU_NUMS > 0 else z, volatile=True) recon = model.decode(z) torchvision.utils.save_image(recon.data, 'output/Face64_%02d.png' % (epoch + 1)) torch.save(model.state_dict(), "output/VAE_64_Face_Pytorch_Generator.pth")
x = self.encoder(x) x = self.decoder(x) return x model = autoencoder().cuda() if GPU_NUMS > 0 else autoencoder() criterion = MSELoss() optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5) proBar = ProgressBar(EPOCH, len(train_loader), "Loss:%.3f") for epoch in range(1, EPOCH): for data in train_loader: img, _ = data img = img.view(img.size(0), -1) img = Variable(img).cuda() if GPU_NUMS > 0 else Variable(img) # ===================forward===================== output = model(img) loss = criterion(output, img) # ===================backward==================== optimizer.zero_grad() loss.backward() optimizer.step() proBar.show(epoch, loss.item()) # ===================log======================== if epoch % 10 == 0: pic = to_img(output.cpu().data) save_image(pic, 'output/image_{}.png'.format(epoch)) torch.save(model.state_dict(), 'sim_autoencoder.pth')
image_fake = NetG(Noise_var, label_var) D_fake = NetD(image_fake, label_var) D_fake_loss = BCE_LOSS(D_fake, label_false_var) D_loss = D_real_loss + D_fake_loss D_loss.backward() D_optimizer.step() NetG.zero_grad() Noise_var = Variable( torch.randn(mini_batch, CONFIG["NOISE_DIM"]).cuda( ) if CONFIG["GPU_NUMS"] > 0 else torch. randn(mini_batch, CONFIG["NOISE_DIM"])) image_fake = NetG(Noise_var, label_var) D_fake = NetD(image_fake, label_var) G_loss = BCE_LOSS(D_fake, label_true_var) G_loss.backward() G_optimizer.step() bar.show(epoch, D_loss.item(), G_loss.item()) test_images = NetG(Predict_Noise_var, Predict_y) torchvision.utils.save_image(test_images.data[:100], 'outputs/JData_%03d.png' % (epoch), nrow=10, normalize=True, range=(-1, 1), padding=0)
D_real = y_pred.data.mean() z.data.resize_(current_batch_size, CONFIG["NOISE_DIM"], 1, 1).normal_(0, 1) x_fake = NetG(z) y.data.resize_(current_batch_size).fill_(0) y_pred_fake = NetD(x_fake.detach()) errD_fake = criterion(y_pred_fake, y) errD_fake.backward() D_fake = y_pred_fake.data.mean() errD = errD_real + errD_fake optimizerD.step() for p in NetD.parameters(): p.requires_grad = False NetG.zero_grad() y.data.resize_(current_batch_size).fill_(1) y_pred_fake = NetD(x_fake) errG = criterion(y_pred_fake, y) errG.backward(retain_graph=True) D_G = y_pred_fake.data.mean() optimizerG.step() bar.show(epoch, errD.item(), errG.item()) fake_test = NetG(z_test) tv.utils.save_image(fake_test.data, 'outputs/Cat_%03d.png' % epoch, nrow=10, normalize=True)
output_dc = d_output_fake[:, 1 + CONFIG["CC_DIM"]:] d_loss_cc = torch.mean((((output_cc - 0.0) / 0.5)**2)) d_loss_dc = -(torch.mean(torch.sum(dc * output_dc, 1)) + torch.mean(torch.sum(dc * dc, 1))) d_loss = d_loss_a + 0.5 * d_loss_cc + 1.0 * d_loss_dc # Optimization NetD.zero_grad() d_loss.backward(retain_graph=True) d_optimizer.step() # ===================== Train G =====================# # Fake -> Real g_loss_a = -torch.mean(torch.log(d_output_fake[:, 0])) g_loss = g_loss_a + CONFIG[ "CONTINUOUS_WEIGHT"] * d_loss_cc + 1.0 * d_loss_dc # Optimization NetG.zero_grad() g_loss.backward() g_optimizer.step() bar.show(epoch, d_loss.item(), g_loss.item()) fake_images = NetG( torch.cat((fixed_noise_var, fixed_cc_var, fixed_dc_var), 1)) torchvision.utils.save_image(fake_images.data, "outputs/cifar10_%03d.png" % epoch, nrow=10)
loss_func = MSELoss() x_data, y_data = Variable(x_data), Variable(y_data) bar = ProgressBar(1, STEPS, "train_loss:%.9f") predict = [] myloss = [] for step in range(STEPS): prediction = Net(x_data) loss = loss_func(prediction, y_data) optimizer.zero_grad() loss.backward() optimizer.step() bar.show(1, loss.item()) if (step + 1) % DECAY_STEP == 0: predict.append(prediction.data.numpy()) myloss.append(loss.item()) fig, ax = plt.subplots() xdata, ydata = [], [] ln, = ax.plot([], [], 'r-', animated=False) plt.scatter(x_data, y_data) plt.grid(True) time_template = 'step = %d, train loss=%.9f' time_text = ax.text(0.05, 0.9, '', transform=ax.transAxes) plt.title('Pytorch', fontsize=18) def init():