batch_realdata = batch_realdata.view(opt.Seq_Len, opt.BATCH_SIZE, 1) batch_realdata = Variable(batch_realdata) # 判别器判别生成数据,真实数据 prob_fake = D(batch_gdata) prob_real = D(batch_realdata) d_loss = -torch.mean(torch.log(prob_real) + torch.log(1 - prob_fake)) g_loss = -torch.mean(torch.log(prob_fake)) opt_D.zero_grad() d_loss.backward(retain_graph=True) opt_D.step() opt_G.zero_grad() g_loss.backward(retain_graph=True) opt_G.step() print 'Epoch:{} / BATCH: {} ,d_loss:{},g_loss:{}'.format( epoch, batch, d_loss.data.cpu().numpy(), g_loss.data.cpu().numpy()) dloss_list.append(d_loss.data.cpu().numpy()) gloss_list.append(g_loss.data.cpu().numpy()) dloss_avg = listsumavg(dloss_list) gloss_avg = listsumavg(gloss_list) outputWriter.writerow([epoch, dloss_avg, gloss_avg]) outputFile.close() torch.save(G.state_dict(), 'modelsaved/generatormlp.pkl')
NUM = int(listlength / opt.Seq_Len / opt.BATCH_SIZE) Filename = 'resultdata/rnn_auto.csv' outputFile = open(Filename, 'w') outputWriter = csv.writer(outputFile) outputWriter.writerow(['Epoch', 'Loss']) for epoch in range(500): loss_list = [] for batch in range(NUM): inputs = torch.randn(opt.BATCH_SIZE, opt.Seq_Len, 1) for i in range(opt.BATCH_SIZE): for j in range(opt.Seq_Len): inputs[i][j][0] = pointlistx[batch * opt.Seq_Len * opt.BATCH_SIZE + i * opt.Seq_Len + j] inputs = Variable(inputs.view(opt.Seq_Len, opt.BATCH_SIZE, 1).cuda()) output = net(inputs) criterion.cuda() loss = criterion(output, inputs) print 'epoch:{}, batch:{}, loss:{}'.format(epoch, batch, loss) loss_list.append(loss.data[0].item()) # 反向传播 optimizer.zero_grad() loss.backward() optimizer.step() lossavg = listsumavg(loss_list) outputWriter.writerow([epoch, lossavg]) outputFile.close() torch.save(net.state_dict(), 'modelsaved/rnn_auto.pkl')