Пример #1
0
		inputs = train_set[i]
		labels = train_labels[i]
		if torch.cuda.is_available():
			inputs = inputs.cuda()
			labels = labels.cuda()
		optimizer.zero_grad()
		outputs = model(inputs)
		loss = criterion(outputs, labels)
		loss.backward()
		optimizer.step()
		running_loss += loss.item()
	if (epoch + 1) % args.save_frequency == 0:
		path = os.path.join(args.results_dir, "{0}-{1:04d}.tar".format(args.checkpoint_prefix, epoch + 1))
		torch.save({
								'epoch': epoch,
								'model_state_dict': model.state_dict(),
								'optimizer_state_dict': optimizer.state_dict(),
								'running_loss': running_loss
							 }, path)
		print('\nModel saved to %s' % path)
	if (epoch + 1) % args.display_frequency == 0:
		print('\nEpoch %d:\n\tLoss = %.3f' % (epoch + 1, running_loss / args.display_frequency))
		running_loss = 0.0
		model = model.eval()
		correct = 0.0
		total = 0.0
		for k in range(len(train_set)):
			inputs = train_set[k]
			labels = train_labels[k]
			if torch.cuda.is_available():
				inputs = inputs.cuda()
Пример #2
0
    ax1.set_xticks(range(len(index_list)))
    ax1.set_xticklabels(range(len(index_list)))
    df2.plot(ax=ax2, kind='line', rot=360, grid='on')
    ax2.set_xticks(range(Epoch))
    ax2.set_xticklabels(range(Epoch))
    plt.show()


"""
USE_Bi=True
"""
w2v = EmbeddingModel(vocab_size, embedding_dim)
checkpoint = torch.load('Model/checkpoint.pth2.tar')
w2v.load_state_dict(checkpoint['state_dict'])  # 模型参数

print(w2v.state_dict()["in_embed.weight"])
"""
if USE_Bi:
    print("Using BiLSTM")
    model = BiLSTM_Match(w2v,embedding_dim, hidden_dim, vocab_size, target, Batchsize, stringlen)
    model_path = "./Model/BiLSTMmodel.pth"
else:
    print("Using LSTM")
    model = LSTM_Match(embedding_dim, hidden_dim, vocab_size,target,Batchsize,stringlen)
    model_path = "./Model/LSTMmodel.pth"

print(w2v.in_embed==model.word_embeddings)


print(model)
if USE_CUDA: