batch_size = 100 train_epoch = 30 input_size = 196 data_set = DataLoader( datasets.MNIST( "..\\data\\", train = True, download = False, transform = transforms.Compose( [transforms.Resize((56, 56)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])] ), ), batch_size = batch_size, shuffle = True, ) batch_number = data_set.__len__() G = Generator(input_size).cuda() D = Discriminator().cuda() gopt = optim.Adam(G.parameters(), lr = 4e-3) dopt = optim.Adam(D.parameters(), lr = 5e-4) lossFunc = nn.BCELoss() real_labels = Var(torch.ones((batch_size, 1))).cuda() fake_labels = Var(torch.zeros((batch_size, 1))).cuda() for epoch in range(train_epoch): for k, (bx, _) in enumerate(data_set): dopt.zero_grad() bx = bx.cuda() # =============== 判别器训练 ================= real_out = D(bx) d_loss_real = lossFunc(real_out, real_labels)
# print(np.shape(batch_data)) # # image = image.data.numpy() # # image = np.sum(image[0], axis=0) # # plt.imshow(image) # # plt.show() # # #print(np.shape(image)) # # #print(np.shape(heat_map)) #predict = model_AE.forward(Variable(image.type(torch.FloatTensor)).to(device)) #predict = model_AE.forward(Variable(image.type(torch.FloatTensor).to(device))) optim.zero_grad() predict = model_AE.forward(batch_data) # # #print(predict) output = criterion(predict, batch_label) # optim.zero_grad() output.backward() optim.step() loss_mean += output / (data_loader.__len__()) loss_mean_list.append(loss_mean) print(loss_mean) torch.save(model_AE, saved_model_path + "body_tracking_model.pth") # train print(cnt) # print(loss_mean_list) # f = open(saved_model_path + "loss_mean_list.txt", "w+") # f.write(str(loss_mean_list)) # f.close()