Esempio n. 1
0
def eval_model(db: FeverDocDB, args) -> Model:
    archive = load_archive(args.archive_file, cuda_device=args.cuda_device)

    config = archive.config
    ds_params = config["dataset_reader"]

    model = archive.model
    model.eval()

    reader = FEVERReader(db,
                         sentence_level=ds_params.pop("sentence_level", False),
                         wiki_tokenizer=Tokenizer.from_params(
                             ds_params.pop('wiki_tokenizer', {})),
                         claim_tokenizer=Tokenizer.from_params(
                             ds_params.pop('claim_tokenizer', {})),
                         token_indexers=TokenIndexer.dict_from_params(
                             ds_params.pop('token_indexers', {})))

    logger.info("Reading training data from %s", args.in_file)
    data = reader.read(args.in_file).instances

    actual = []
    predicted = []

    if args.log is not None:
        f = open(args.log, "w+")

    for item in tqdm(data):
        if item.fields["premise"] is None or item.fields[
                "premise"].sequence_length() == 0:
            cls = "NOT ENOUGH INFO"
        else:
            prediction = model.forward_on_instance(item, args.cuda_device)
            cls = model.vocab._index_to_token["labels"][np.argmax(
                prediction["label_probs"])]

        if "label" in item.fields:
            actual.append(item.fields["label"].label)
        predicted.append(cls)

        if args.log is not None:
            if "label" in item.fields:
                f.write(
                    json.dumps({
                        "actual": item.fields["label"].label,
                        "predicted": cls
                    }) + "\n")
            else:
                f.write(json.dumps({"predicted": cls}) + "\n")

    if args.log is not None:
        f.close()

    if len(actual) > 0:
        print(accuracy_score(actual, predicted))
        print(classification_report(actual, predicted))
        print(confusion_matrix(actual, predicted))

    return model
Esempio n. 2
0
 def from_params(cls, params: Params):
     """
     Parameters
     ----------
     squad_filename : ``str``
     negative_sentence_selection : ``str``, optional (default=``"paragraph"``)
     tokenizer : ``Params``, optional
     token_indexers: ``List[Params]``, optional
     """
     negative_sentence_selection = params.pop('negative_sentence_selection',
                                              'paragraph')
     tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
     token_indexers = {}
     token_indexer_params = params.pop('token_indexers', Params({}))
     for name, indexer_params in token_indexer_params.items():
         token_indexers[name] = TokenIndexer.from_params(indexer_params)
     # The default parameters are contained within the class,
     # so if no parameters are given we must pass None.
     if token_indexers == {}:
         token_indexers = None
     params.assert_empty(cls.__name__)
     return SquadSentenceSelectionReader(
         negative_sentence_selection=negative_sentence_selection,
         tokenizer=tokenizer,
         token_indexers=token_indexers)
Esempio n. 3
0
def tokenizer_dict_from_params(
        params: Params) -> 'Dict[str, Tokenizer]':  # type: ignore
    """
    ``Tokenizer`` can be used in a dictionary, with each ``Tokenizer`` getting a
    name.  The specification for this in a ``Params`` object is typically ``{"name" ->
    {tokenizer_params}}``.  This method reads that whole set of parameters and returns a
    dictionary suitable for use in a ``TextField``.

    Because default values for token indexers are typically handled in the calling class to
    this and are based on checking for ``None``, if there were no parameters specifying any
    tokenizers in the given ``params``, we return ``None`` instead of an empty dictionary.
    """
    tokenizers = {}
    for name, indexer_params in params.items():
        tokenizers[name] = Tokenizer.from_params(indexer_params)
    if tokenizers == {}:
        tokenizers = None
    return tokenizers
Esempio n. 4
0
 def from_params(cls, params: Params):
     """
     Parameters
     ----------
     filename : ``str``
     tokenizer : ``Params``, optional
     token_indexers: ``List[Params]``, optional
     """
     tokenizer = Tokenizer.from_params(params.pop('tokenizer', {}))
     token_indexers = {}
     token_indexer_params = params.pop('token_indexers', Params({}))
     for name, indexer_params in token_indexer_params.items():
         token_indexers[name] = TokenIndexer.from_params(indexer_params)
     # The default parameters are contained within the class,
     # so if no parameters are given we must pass None.
     if token_indexers == {}:
         token_indexers = None
     params.assert_empty(cls.__name__)
     return SnliReader(tokenizer=tokenizer, token_indexers=token_indexers)
Esempio n. 5
0
    def test_no_constructor(self):
        params = Params({"type": "just_spaces"})

        Tokenizer.from_params(params)
Esempio n. 6
0
def train_model(db: FeverDocDB, params: Union[Params, Dict[str, Any]],
                cuda_device: int, serialization_dir: str,
                filtering: str) -> Model:
    """
    This function can be used as an entry point to running models in AllenNLP
    directly from a JSON specification using a :class:`Driver`. Note that if
    you care about reproducibility, you should avoid running code using Pytorch
    or numpy which affect the reproducibility of your experiment before you
    import and use this function, these libraries rely on random seeds which
    can be set in this function via a JSON specification file. Note that this
    function performs training and will also evaluate the trained model on
    development and test sets if provided in the parameter json.

    Parameters
    ----------
    params: Params, required.
        A parameter object specifying an AllenNLP Experiment.
    serialization_dir: str, required
        The directory in which to save results and logs.
    """

    SimpleRandom.set_seeds()

    os.makedirs(serialization_dir, exist_ok=True)
    sys.stdout = TeeLogger(os.path.join(serialization_dir, "stdout.log"),
                           sys.stdout)  # type: ignore
    sys.stderr = TeeLogger(os.path.join(serialization_dir, "stderr.log"),
                           sys.stderr)  # type: ignore
    handler = logging.FileHandler(
        os.path.join(serialization_dir, "python_logging.log"))
    handler.setLevel(logging.INFO)
    handler.setFormatter(
        logging.Formatter(
            '%(asctime)s - %(levelname)s - %(name)s - %(message)s'))
    logging.getLogger().addHandler(handler)
    serialization_params = deepcopy(params).as_dict(quiet=True)

    with open(os.path.join(serialization_dir, "model_params.json"),
              "w") as param_file:
        json.dump(serialization_params, param_file, indent=4)

    # Now we begin assembling the required parts for the Trainer.
    ds_params = params.pop('dataset_reader', {})
    dataset_reader = FEVERReader(db,
                                 sentence_level=ds_params.pop(
                                     "sentence_level", False),
                                 wiki_tokenizer=Tokenizer.from_params(
                                     ds_params.pop('wiki_tokenizer', {})),
                                 claim_tokenizer=Tokenizer.from_params(
                                     ds_params.pop('claim_tokenizer', {})),
                                 token_indexers=TokenIndexer.dict_from_params(
                                     ds_params.pop('token_indexers', {})),
                                 filtering=filtering)

    train_data_path = params.pop('train_data_path')
    logger.info("Reading training data from %s", train_data_path)
    train_data = dataset_reader.read(train_data_path)

    all_datasets = [train_data]
    datasets_in_vocab = ["train"]

    validation_data_path = params.pop('validation_data_path', None)
    if validation_data_path is not None:
        logger.info("Reading validation data from %s", validation_data_path)
        validation_data = dataset_reader.read(validation_data_path)
        all_datasets.append(validation_data)
        datasets_in_vocab.append("validation")
    else:
        validation_data = None

    logger.info("Creating a vocabulary using %s data.",
                ", ".join(datasets_in_vocab))
    vocab = Vocabulary.from_params(
        params.pop("vocabulary", {}),
        Dataset([
            instance for dataset in all_datasets
            for instance in dataset.instances
        ]))
    vocab.save_to_files(os.path.join(serialization_dir, "vocabulary"))

    model = Model.from_params(vocab, params.pop('model'))
    iterator = DataIterator.from_params(params.pop("iterator"))

    train_data.index_instances(vocab)
    if validation_data:
        validation_data.index_instances(vocab)

    trainer_params = params.pop("trainer")
    if cuda_device is not None:
        trainer_params["cuda_device"] = cuda_device
    trainer = Trainer.from_params(model, serialization_dir, iterator,
                                  train_data, validation_data, trainer_params)

    trainer.train()

    # Now tar up results
    archive_model(serialization_dir)

    return model
Esempio n. 7
0
def eval_model_fnc_data(db: FeverDocDB, args, mithun_logger,
                        name_of_trained_model_to_use,
                        path_to_trained_models_folder, cuda_device, operation,
                        path_to_fnc_annotated_data) -> Model:

    print("got inside eval_model_fnc_data")
    archive = load_archive(
        path_to_trained_models_folder + name_of_trained_model_to_use,
        cuda_device)
    config = archive.config
    ds_params = config["dataset_reader"]

    model = archive.model
    model.eval()

    reader = FEVERReader(db,
                         sentence_level=ds_params.pop("sentence_level", False),
                         wiki_tokenizer=Tokenizer.from_params(
                             ds_params.pop('wiki_tokenizer', {})),
                         claim_tokenizer=Tokenizer.from_params(
                             ds_params.pop('claim_tokenizer', {})),
                         token_indexers=TokenIndexer.dict_from_params(
                             ds_params.pop('token_indexers', {})))

    # do annotation on the fly  using pyprocessors. i.e creating NER tags, POS Tags etcThis takes along time.
    #  so almost always we do it only once, and load it from disk . Hence do_annotation_live = False
    do_annotation_live = False

    data = reader.read_annotated_fnc_and_do_ner_replacement(
        args, operation, do_annotation_live, mithun_logger,
        path_to_fnc_annotated_data).instances
    joblib.dump(data, "fever_dev_dataset_format.pkl")
    #
    ###################end of running model and saving

    path = os.getcwd()

    #data=joblib.load(path+"fever_dev_dataset_format")

    actual = []

    predicted = []

    if args.log is not None:
        f = open(args.log, "w+")
    if_ctr, else_ctr = 0, 0
    pred_dict = defaultdict(int)

    for item in tqdm(data):
        if item.fields["premise"] is None or item.fields[
                "premise"].sequence_length() == 0:
            cls = "NOT ENOUGH INFO"
            if_ctr += 1
        else:
            else_ctr += 1

            prediction = model.forward_on_instance(item, args.cuda_device)
            cls = model.vocab._index_to_token["labels"][np.argmax(
                prediction["label_probs"])]

        if "label" in item.fields:
            actual.append(item.fields["label"].label)
        predicted.append(cls)
        pred_dict[cls] += 1

        if args.log is not None:
            if "label" in item.fields:
                f.write(
                    json.dumps({
                        "actual": item.fields["label"].label,
                        "predicted": cls
                    }) + "\n")
            else:
                f.write(json.dumps({"predicted": cls}) + "\n")
    print(f'if_ctr = {if_ctr}')
    print(f'else_ctr = {else_ctr}')
    print(f'pred_dict = {pred_dict}')

    if args.log is not None:
        f.close()

    if len(actual) > 0:
        print(accuracy_score(actual, predicted))
        print(classification_report(actual, predicted))
        print(confusion_matrix(actual, predicted))

    return model
Esempio n. 8
0
def eval_model(db: FeverDocDB, args) -> Model:
    archive = load_archive(args.archive_file,
                           cuda_device=args.cuda_device,
                           overrides=args.overrides)

    config = archive.config
    ds_params = config["dataset_reader"]

    model = archive.model
    model.eval()

    reader = FEVERReader(db,
                         sentence_level=ds_params.pop("sentence_level", False),
                         wiki_tokenizer=Tokenizer.from_params(
                             ds_params.pop('wiki_tokenizer', {})),
                         claim_tokenizer=Tokenizer.from_params(
                             ds_params.pop('claim_tokenizer', {})),
                         token_indexers=TokenIndexer.dict_from_params(
                             ds_params.pop('token_indexers', {})))

    while True:

        claim = input("enter claim (or q to quit) >>")
        if claim.lower() == "q":
            break

        ranker = retriever.get_class('tfidf')(tfidf_path=args.model)

        p_lines = []
        pages, _ = ranker.closest_docs(claim, 5)

        for page in pages:
            lines = db.get_doc_lines(page)
            lines = [
                line.split("\t")[1] if len(line.split("\t")[1]) > 1 else ""
                for line in lines.split("\n")
            ]

            p_lines.extend(zip(lines, [page] * len(lines), range(len(lines))))

        scores = tf_idf_sim(claim, [pl[0] for pl in p_lines])
        scores = list(
            zip(scores, [pl[1] for pl in p_lines], [pl[2] for pl in p_lines],
                [pl[0] for pl in p_lines]))
        scores = list(filter(lambda score: len(score[3].strip()), scores))
        sentences_l = list(
            sorted(scores, reverse=True, key=lambda elem: elem[0]))

        sentences = [s[3] for s in sentences_l[:5]]
        evidence = " ".join(sentences)

        print("Best pages: {0}".format(repr(pages)))

        print("Evidence:")
        for idx, sentence in enumerate(sentences_l[:5]):
            print("{0}\t{1}\t\t{2}\t{3}".format(idx + 1, sentence[0],
                                                sentence[1], sentence[3]))

        item = reader.text_to_instance(evidence, claim)

        prediction = model.forward_on_instance(item, args.cuda_device)
        cls = model.vocab._index_to_token["labels"][np.argmax(
            prediction["label_probs"])]
        print("PREDICTED: {0}".format(cls))
        print()
Esempio n. 9
0
 def from_params(cls, params):
     tokenizer = Tokenizer.from_params(params.pop("tokenizer"))
     ret = TokenCharactersIndexer.from_params(params)
     ret._character_tokenizer = tokenizer
     return ret
Esempio n. 10
0
    logger.info("Init Retriever")
    evidence_retriever = EvidenceRetrieval(db, args.drqa_model, args.max_page,
                                           args.max_sent)

    config = archive.config
    ds_params = config["dataset_reader"]
    model = archive.model
    model.eval()

    reader = FEVERReader(
        db,
        sentence_level=ds_params.pop("sentence_level", False),
        wiki_tokenizer=Tokenizer.from_params(
            ds_params.pop('wiki_tokenizer',
                          {"word_splitter": {
                              "type": "indexed_spaces"
                          }})),
        claim_tokenizer=Tokenizer.from_params(
            ds_params.pop('claim_tokenizer',
                          {"word_splitter": {
                              "type": "indexed_spaces"
                          }})),
        token_indexers=TokenIndexer.dict_from_params(
            ds_params.pop('token_indexers',
                          {'tokens': SingleIdTokenIndexer()})))

    print("")
    print("")
    print("")
    while True:
Esempio n. 11
0
def eval_model(db: FeverDocDB, args) -> Model:
    archive = load_archive(args.archive_file, cuda_device=args.cuda_device)

    config = archive.config
    ds_params = config["dataset_reader"]

    model = archive.model
    model.eval()

    reader = FEVERReader(db,
                         sentence_level=ds_params.pop("sentence_level", False),
                         wiki_tokenizer=Tokenizer.from_params(
                             ds_params.pop('wiki_tokenizer', {})),
                         claim_tokenizer=Tokenizer.from_params(
                             ds_params.pop('claim_tokenizer', {})),
                         token_indexers=FEVERReader.custom_dict_from_params(
                             ds_params.pop('token_indexers', {})),
                         ner_facts=args.ner_facts)

    logger.info("Reading training data from %s", args.in_file)
    data = reader.read(args.in_file)

    actual = []
    predicted = []

    if args.log is not None:
        f = open(args.log, "w+")

    for item in tqdm(data):
        if item.fields["premise"] is None or item.fields[
                "premise"].sequence_length() == 0:
            cls = "NOT ENOUGH INFO"
        else:
            prediction = model.forward_on_instance(item)
            cls = model.vocab._index_to_token["labels"][np.argmax(
                prediction["label_probs"])]

        if "label" in item.fields:
            actual.append(item.fields["label"].label)
            if args.ner_missing is not None:
                if args.ner_missing == 'oracle' and item.fields[
                        "label"].label == "NOT ENOUGH INFO" and cls != "NOT ENOUGH INFO":
                    if item.fields["metadata"].metadata["ner_missing"]:
                        cls = "NOT ENOUGH INFO"

                if args.ner_missing == 'oracle' and item.fields[
                        "label"].label == "SUPPORTS" and cls != "SUPPORTS":
                    if item.fields["metadata"].metadata["ner_missing"]:
                        cls = "SUPPORTS"

                if args.ner_missing == 'oracle' and item.fields[
                        "label"].label == "REFUTES" and cls != "REFUTES":
                    if item.fields["metadata"].metadata["ner_missing"]:
                        cls = "REFUTES"

                if args.ner_missing == 'naive' and cls == 'SUPPORTS':
                    if item.fields["metadata"].metadata["ner_missing"]:
                        highest = np.argmax(prediction["label_probs"])
                        lowest = np.argmin(prediction["label_probs"])
                        copy = []
                        for pred in prediction["label_probs"]:
                            copy.append(pred)

                        copy[highest] = prediction["label_probs"][lowest]

                        original_logits = prediction["label_logits"][highest]
                        chosen_logits = prediction["label_logits"][np.argmax(
                            copy)]
                        difference_logits = original_logits - chosen_logits

                        if difference_logits < 3.0:
                            cls = model.vocab._index_to_token["labels"][
                                np.argmax(copy)]

        predicted.append(cls)

        if args.log is not None:
            if "label" in item.fields:
                f.write(
                    json.dumps({
                        "actual": item.fields["label"].label,
                        "predicted": cls
                    }) + "\n")
            else:
                f.write(json.dumps({"predicted": cls}) + "\n")

    if args.log is not None:
        f.close()

    if len(actual) > 0:
        print(accuracy_score(actual, predicted))
        print(classification_report(actual, predicted))
        print(confusion_matrix(actual, predicted))

    return model
                FeverDocDB, path_to_trained_models, mithun_logger, cuda_device,
                path_to_pyproc_annotated_data_folder)

        db = FeverDocDB(path_to_saved_db)
        archive = load_archive(
            path_to_trained_models_folder + name_of_trained_model_to_use,
            cuda_device)
        config = archive.config
        ds_params = config["dataset_reader"]
        model = archive.model
        model.eval()
        mithun_logger.info(f"going to initiate FEVERReaderUofa.")
        fever_reader = FEVERReaderUofa(
            db,
            sentence_level=ds_params.pop("sentence_level", False),
            wiki_tokenizer=Tokenizer.from_params(
                ds_params.pop('wiki_tokenizer', {})),
            claim_tokenizer=Tokenizer.from_params(
                ds_params.pop('claim_tokenizer', {})),
            token_indexers=TokenIndexer.dict_from_params(
                ds_params.pop('token_indexers', {})))

        cwd = os.getcwd()
        mithun_logger.info(f"going to start reading data.")
        zipped_annotated_data, length_data = fever_reader.read(
            mithun_logger, cwd + path_to_pyproc_annotated_data_folder)

        mithun_logger.info(
            f"done with reading data. going to generate features.")

        data = None
        for feature in features: