"""

import sys
import json
import os
from collections import defaultdict
from entity_align.utils.Config import Config

if __name__ == "__main__":
    file_of_scores = sys.argv[1]
    only_best = True if len(sys.argv) > 2 and sys.argv[2] == "True" else False
    score_objs = []
    with open(file_of_scores, 'r') as fin:
        for line in fin:
            js = json.loads(line.strip())
            c = Config()
            c.__dict__ = js['config']
            js['config'] = c
            score_objs.append(js)
    for js in score_objs:
        print("{}\t{}\t{}\t{}".format(js['config'].model_name,
                                      js['config'].dataset_name, "MAP",
                                      js['map']))
        print("{}\t{}\t{}\t{}".format(js['config'].model_name,
                                      js['config'].dataset_name, "HITS@1",
                                      js['hits_at_1']))
        print("{}\t{}\t{}\t{}".format(js['config'].model_name,
                                      js['config'].dataset_name, "HITS@10",
                                      js['hits_at_10']))
        print("{}\t{}\t{}\t{}".format(js['config'].model_name,
                                      js['config'].dataset_name, "HITS@50",
            with open(
                    os.path.join(output_dir,
                                 "dev.scores.{}.tsv".format(counter)),
                    "w") as fout:
                fout.write(scores)
            if map_score > best_map:
                print("New best MAP!")
                print("Saving Model.....")
                torch.save(
                    model,
                    os.path.join(
                        output_dir,
                        "model_{}_{}_{}.torch".format(config.model_name,
                                                      config.dataset_name,
                                                      counter),
                    ),
                )
                best_map = map_score
            sys.stdout.flush()
        if counter == config.num_minibatches:
            break


if __name__ == "__main__":

    # Set up the config
    config = Config(sys.argv[1])
    dataset_name = sys.argv[2]
    model_name = sys.argv[3]
    train_model(config, dataset_name, model_name)
import os
import torch

from entity_align.eval.Predict import write_predictions
from entity_align.model.Vocab import Vocab
from entity_align.utils.Config import Config
from entity_align.utils.DevTestBatcher import TestBatcher
from entity_align.eval.EvalHitsAtK import eval_hits_at_k_file
from entity_align.eval.EvalMap import eval_map_file
from entity_align.utils.Util import save_dict_to_json

if __name__ == "__main__":
    configfile = sys.argv[1]
    modelfile = sys.argv[2]

    config = Config(configfile)
    vocab = Vocab(config.vocab_file,config.max_string_len)
    model = torch.load(modelfile).cuda()
    test_batcher = TestBatcher(config,vocab)
    prediction_filename = os.path.join(config.experiment_out_dir,"test.predictions")
    write_predictions(model,test_batcher,prediction_filename)

    # score
    scores = ""
    map_score = float(eval_map_file(prediction_filename))
    hits_at_1 = float(eval_hits_at_k_file(prediction_filename, 1))
    hits_at_10 = float(eval_hits_at_k_file(prediction_filename, 10))
    hits_at_50 = float(eval_hits_at_k_file(prediction_filename, 50))
    scores += "{}\t{}\t{}\tMAP\t{}\n".format(config.model_name, config.dataset_name, "TEST", map_score)
    scores += "{}\t{}\t{}\tHits@1\t{}\n".format(config.model_name, config.dataset_name, "TEST", hits_at_1)
    scores += "{}\t{}\t{}\tHits@10\t{}\n".format(config.model_name, config.dataset_name, "TEST", hits_at_10)
 def copy_config(self):
     c = Config()
     c.__dict__ = self.__dict__.copy()
     return c
Esempio n. 5
0
        sources = np.asarray(sources)
        targets = np.asarray(targets)
        sources_lengths = np.asarray(sources_lengths)
        targets_lengths = np.asarray(targets_lengths)
        return sources, targets, sources_lengths, targets_lengths

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-t", "--test_file", required=True)
    parser.add_argument("-m", "--model_path", default=True)
    parser.add_argument("-v", "--vocab", default=True)
    parser.add_argument("-c", "--config", default=True)

    args = parser.parse_args()
    tfp = args.test_file
    config = Config(args.config)
    vocab = Vocab(args.vocab, config.max_string_len)
    sources, targets, sources_lengths, targets_lengths = load_data(tfp, vocab, config)
    if config.bidirectional == True:
        num_directions = 2
    else:
        num_directions = 1
    model = torch.load(args.model_path)
    model.h0_dev = Variable(torch.zeros(num_directions, len(sources), config.rnn_hidden_size).cuda(), requires_grad=False)
    model.c0_dev = Variable(torch.zeros(num_directions, len(sources), config.rnn_hidden_size).cuda(), requires_grad=False)
    scores = model.print_mm(sources, targets, sources_lengths, targets_lengths).cpu().data.numpy()
    max_scores = np.max(scores)
    min_scores = np.min(scores)
    for idx in range(0, len(scores)):
        scores[idx][sources_lengths[idx]] = max_scores
        scores[idx][sources_lengths[idx] + 1] = min_scores