def evaluate_conv(args, unique_entities): model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim, args.drop_GAT, args.drop_conv, args.alpha, args.alpha_conv, args.nheads_GAT, args.out_channels) model_conv = nn.DataParallel(model_conv) if CUDA: model_conv.load_state_dict(torch.load( '{0}conv/trained_{1}.pth'.format(args.output_folder, args.epochs_conv - 1))) model_conv.cuda() else: model_conv.load_state_dict(torch.load( '{0}conv/trained_{1}.pth'.format(args.output_folder, args.epochs_conv - 1), map_location=torch.device('cpu'))) model_conv.eval() with torch.no_grad(): if isinstance(model_conv, nn.DataParallel): ### original code is get_validation_pred Corpus_.get_validation_pred_relation(args, model_conv.module, unique_entities) else: Corpus_.get_validation_pred_relation(args, model_conv, unique_entities) if isinstance(model_conv, nn.DataParallel): Corpus_.get_validation_pred(args, model_conv.module, unique_entities) else: Corpus_.get_validation_pred(args, model_conv, unique_entities)
def evaluate_conv(args, unique_entities, load_model): CUDA = torch.cuda.is_available() model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim, args.drop_GAT, args.drop_conv, args.alpha, args.alpha_conv, args.nheads_GAT, args.out_channels) model_conv = nn.DataParallel(model_conv) if CUDA: model_conv.load_state_dict(torch.load(load_model)) model_conv.cuda() else: model_conv.load_state_dict(torch.load(load_model), map_location=torch.device('cpu')) model_conv.eval() with torch.no_grad(): if isinstance(model_conv, nn.DataParallel): Corpus_.get_validation_pred_relation(args, model_conv.module, unique_entities) else: Corpus_.get_validation_pred_relation(args, model_conv, unique_entities) if isinstance(model_conv, nn.DataParallel): Corpus_.get_validation_pred(args, model_conv.module, unique_entities) else: Corpus_.get_validation_pred(args, model_conv, unique_entities)
def evaluate_conv(args, unique_entities_train, unique_entities_test): global initial_entity_emb_params global entity_embeddings model_gat = SpKBGATModified(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim, args.drop_GAT, args.alpha, args.nheads_GAT, initial_entity_emb_params) model_gat.load_state_dict( torch.load('{0}trained_{1}.pth'.format(args.output_folder, 0))) model_entity_embedding = EntityEmbedding( initial_entity_emb_params['entity_embed_dim_in'], \ initial_entity_emb_params['hidden_dim_entity'], \ initial_entity_emb_params['num_encoder_layers_entity'], \ initial_entity_emb_params['is_bidirectional'], \ initial_entity_emb_params['drop_out_rate'], \ initial_entity_emb_params['hidden_dim_entity'], \ initial_entity_emb_params['entity_embed_dim_out'], \ initial_entity_emb_params['entity_conv_filter_size'], initial_entity_emb_params['word_vocab'], \ initial_entity_emb_params['word_embed_dim'], \ initial_entity_emb_params['char_embed_dim'], \ initial_entity_emb_params['word_embed_matrix'], \ initial_entity_emb_params['char_feature_size'], \ initial_entity_emb_params['conv_filter_size'], \ initial_entity_emb_params['max_word_len_entity'], \ initial_entity_emb_params['char_vocab']) model_entity_embedding.load_state_dict( torch.load('{0}{1}/trained_{2}.pth'.format(args.output_folder, 'entity_embeddings', 0))) model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim, args.drop_GAT, args.drop_conv, args.alpha, args.alpha_conv, args.nheads_GAT, args.out_channels) model_conv.load_state_dict( torch.load('{0}conv/trained_{1}.pth'.format(args.output_folder, 0))) if CUDA: model_conv.cuda() model_gat.cuda() model_entity_embedding.cuda() model_conv.eval() model_gat.eval() model_entity_embedding.eval() with torch.no_grad(): Corpus_.get_validation_cnfmat(args, model_gat, model_entity_embedding, model_conv, unique_entities_train, unique_entities_test, reuse=False, gat_only=False)
def evaluate_conv(args, unique_entities): model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim, args.drop_GAT, args.drop_conv, args.alpha, args.alpha_conv, args.nheads_GAT, args.out_channels) model_conv.load_state_dict(torch.load( '{0}conv/trained_{1}.pth'.format(args.output_folder, args.epochs_conv - 1))) model_conv.cuda() model_conv.eval() with torch.no_grad(): Corpus_.get_validation_pred(args, model_conv, unique_entities)
def evaluate_conv(args, unique_entities): model_conv = SpKBGATConvOnly(entity_embeddings, relation_embeddings, args.entity_out_dim, args.entity_out_dim, args.drop_GAT, args.drop_conv, args.alpha, args.alpha_conv, args.nheads_GAT, args.out_channels) model_conv.load_state_dict( torch.load('{0}conv/trained_{1}.pth'.format(args.output_folder, args.epochs_conv - 1))) model_conv.cuda() model_conv.eval() with torch.no_grad(): preds = Corpus_.get_validation_pred(args, model_conv, unique_entities) data_path = "/home/harsha1/Desktop/pLogicNet/data/wn18rr/" entity2id = read_entity_from_id(data_path + 'entity2id.txt') relation2id = read_relation_from_id(data_path + 'relation2id.txt') id2entity = {v: k for k, v in entity2id.items()} id2relation = {v: k for k, v in relation2id.items()} is_unweigted = False directed = True work_path = args.work_path cand_triples, train_adjacency_mat, unique_entities_test = load_data1( os.path.join(work_path, 'hidden.txt'), entity2id, relation2id, is_unweigted, directed) scores = infer_step(model_conv, cand_triples, args) with open(work_path + '/annotation.txt', 'w') as fo: for (h, r, t), s in zip(cand_triples, scores): fo.write('{}\t{}\t{}\t{}\n'.format(id2entity[h], id2relation[r], id2entity[t], s)) with open(work_path + '/pred_kge.txt', 'w') as fo: print(len(preds)) i = 0 for value in preds: if len(value) == 3: continue else: i = i + 1 h, r, t, f, rk, l = value fo.write('{}\t{}\t{}\t{}\t{}\n'.format(id2entity[h], id2relation[r], id2entity[t], f, rk)) #print(i) for e, val in l: fo.write('{}:{:.12f} '.format(id2entity[e], val)) fo.write('\n')
def evaluate_conv(args, unique_entities): model_conv = SpKBGATConvOnly( initial_entity_emb=entity_embeddings, initial_relation_emb=relation_embeddings, entity_out_dim=args.entity_out_dim, relation_out_dim=args.entity_out_dim, drop_GAT=args.drop_GAT, drop_conv=args.drop_conv, alpha=args.alpha, alpha_conv=args.alpha_conv, nheads_GAT=args.nheads_GAT, conv_out_channels=args.out_channels, ) model_conv.load_state_dict( torch.load("{0}conv/trained_{1}.pth".format(args.output_folder, args.epochs_conv - 1)), strict=False, ) if CUDA: model_conv.cuda() model_conv.eval() with torch.no_grad(): Corpus_.get_validation_pred(args, model_conv, unique_entities)