if args.embedding_type == 'linear': model = EmbeddingModel(len(args.classes)) elif args.embedding_type == 'conv': model = ConvolutionalEmbeddingModel(len(args.classes)) else: print("Model type [{0}] not supported".format(args.embedding_type)) exit(1) eval_dataset, eval_labels, _, __, ___ = generateData( args.eval_file, eval_list, 1.0, args.load_embedding_dict_from_file, args.save_embedding_dict, args.verbose, 'embedding_dicts/animal_embedding_dict.pkl', False, args.classes) embedding_dict = {} if len(args.model_checkpoint) > 0: checkpoint = torch.load(args.model_checkpoint, map_location='cpu') model.load_state_dict(checkpoint['model_state_dict']) embedding = checkpoint['model_state_dict']['l2.weight'] for i in range(len(args.classes)): embedding_dict[args.classes[i]] = ([embedding[i, :]], [i]) x = eval_dataset y = eval_labels classes = eval_list for i in range(len(x)): inputs = x[i] label = y[i] if inputs.shape[1] < 2: continue embedding = model.embedding(inputs) output = model(inputs) _, predicted = torch.max(output.data, 1) if classes[label] in embedding_dict:
df2 = pd.DataFrame(tempc, index=tempb, columns=['Test']) df1.plot(ax=ax1, kind='line', rot=360, grid='on') ax1.set_xticks(range(len(index_list))) ax1.set_xticklabels(range(len(index_list))) df2.plot(ax=ax2, kind='line', rot=360, grid='on') ax2.set_xticks(range(Epoch)) ax2.set_xticklabels(range(Epoch)) plt.show() """ USE_Bi=True """ w2v = EmbeddingModel(vocab_size, embedding_dim) checkpoint = torch.load('Model/checkpoint.pth2.tar') w2v.load_state_dict(checkpoint['state_dict']) # 模型参数 print(w2v.state_dict()["in_embed.weight"]) """ if USE_Bi: print("Using BiLSTM") model = BiLSTM_Match(w2v,embedding_dim, hidden_dim, vocab_size, target, Batchsize, stringlen) model_path = "./Model/BiLSTMmodel.pth" else: print("Using LSTM") model = LSTM_Match(embedding_dim, hidden_dim, vocab_size,target,Batchsize,stringlen) model_path = "./Model/LSTMmodel.pth" print(w2v.in_embed==model.word_embeddings)