model.load_state_dict(torch.load(PATH, map_location=device)) model = model.to(device) print(model) for i in range(len(labels)): s1_embed, s1_len = get_batch_from_idx(s1[i].split(), embeddings, config) s2_embed, s2_len = get_batch_from_idx(s2[i].split(), embeddings, config) u = torch.sum(s1_embed,0).to(device) v = torch.sum(s1_embed,0).to(device) feats = torch.cat((u, v, torch.abs(u- v), u*v), 0).to(device) with torch.no_grad(): out = model.forward(feats).to(device) pred = torch.max(out,0)[1] if label[i] == 0: tot1 += 1 if label # s1_embed, s1_len = get_batch_from_idx(s1[i].split(), embeddings, config) # s2_embed, s2_len = get_batch_from_idx(s2[i].split(), embeddings, config) # s1_embed = s1_embed.expand(1,s1_len, -1).transpose(0,1) # s2_embed = s2_embed.expand(1,s2_len, -1).transpose(0,1) # s1_len = torch.as_tensor(s1_len, dtype=torch.int64).expand(1) # s2_len = torch.as_tensor(s2_len, dtype=torch.int64).expand(1)
print("\n\n Hi I am the " + str(FLAGS.model_name) + " model...!!") print("\n Hhhhmmmmm....lemme think...\n") if config['model_name'] == 'base': model = Classifier(config).to(device) PATH = FLAGS.model_path model.load_state_dict(torch.load(PATH, map_location=device)) model = model.to(device) u = torch.sum(s1_embed, 0).to(device) v = torch.sum(s1_embed, 0).to(device) feats = torch.cat((u, v, torch.abs(u - v), u * v), 0).to(device) with torch.no_grad(): out = model.forward(feats).to(device) pred = torch.max(out, 0)[1] else: if config['model_name'] == 'lstm': PATH = FLAGS.model_path elif config['model_name'] == 'bilstm': PATH = FLAGS.model_path elif config['model_name'] == 'bilstm_pool': PATH = FLAGS.model_path model = LSTM_main(config).to(device) model.load_state_dict(torch.load(PATH, map_location=device)) model = model.to(device)