Example #1
0
def print_sparql_queries():
    import argparse

    parser = argparse.ArgumentParser(description="Dump qa entity pairs.")
    parser.add_argument("--config",
                        default="config.cfg",
                        help="The configuration file to use.")
    parser.add_argument("--output",
                        help="The file to dump results to.")
    args = parser.parse_args()
    globals.read_configuration(args.config)
    scorer_globals.init()

    parameters = translator.TranslatorParameters()
    parameters.require_relation_match = False
    parameters.restrict_answer_type = False

    dataset = "webquestions_test_filter"

    sparql_backend = globals.get_sparql_backend(globals.config)
    queries = get_evaluated_queries(dataset, True, parameters)
    for index, query in enumerate(queries):
        print "--------------------------------------------"
        print query.utterance
        print "\n".join([str((entity.__class__, entity.entity)) for entity in query.eval_candidates[0].query_candidate.query.identified_entities])
        for eval_candidate in query.eval_candidates:
            query_candidate = eval_candidate.query_candidate
            query_candidate.sparql_backend = sparql_backend
            notable_types = query_candidate.get_answers_notable_types()
            if notable_types:
                print notable_types
                print query_candidate.graph_as_simple_string().encode("utf-8")
                print query_candidate.to_sparql_query().encode("utf-8")
                print "\n\n"
 def init_from_config():
     config_params = globals.config
     sparql_backend = globals.get_sparql_backend(config_params)
     query_extender = QueryCandidateExtender.init_from_config()
     entity_linker = EntityLinker.init_from_config()
     parser = CoreNLPParser.init_from_config()
     scorer_obj = ranker.SimpleScoreRanker('DefaultScorer')
     return QueryTranslator(sparql_backend, query_extender, entity_linker,
                            parser, scorer_obj)
Example #3
0
 def init_from_config():
     config_params = globals.config
     sparql_backend = globals.get_sparql_backend(config_params)
     query_extender = QueryCandidateExtender.init_from_config()
     entity_linker = EntityLinker.init_from_config()
     parser = CoreNLPParser.init_from_config()
     scorer_obj = ranker.SimpleScoreRanker('DefaultScorer')
     return QueryTranslator(sparql_backend, query_extender,
                            entity_linker, parser, scorer_obj)
Example #4
0
def init_from_config(args):
    global w2v, sparql_backend, entity_linker, facts_ranker, facts_extractor
    global wiki_url
    config_options = globals.config

    w2v = Word2Vec.init_from_config(config_options)
    sparql_backend = globals.get_sparql_backend(config_options)
    wiki_url = WikiUrl(config_options)
    entity_linker = EntityLinker.init_from_config(config_options, wiki_url)
    facts_ranker = Ranker.init_from_config(config_options)
    facts_extractor = FactExtractor.init_from_config(config_options)
Example #5
0
    def get_from_config(cls, config_params):
        sparql_backend = globals.get_sparql_backend(config_params)
        query_extender = QueryCandidateExtender.init_from_config()
        entity_linker = globals.get_entity_linker()
        parser = globals.get_parser()
        scorer_obj = ranker.SimpleScoreRanker('DefaultScorer')
        ngram_notable_types_npmi_path = config_params.get('QueryCandidateExtender', 'ngram-notable-types-npmi', '')
        notable_types_npmi_threshold = float(config_params.get('QueryCandidateExtender', 'notable-types-npmi-threshold'))
        ngram_notable_types_npmi = None
        if ngram_notable_types_npmi_path and os.path.exists(ngram_notable_types_npmi_path):
            import cPickle as pickle
            try:
                with open(ngram_notable_types_npmi_path, 'rb') as inp:
                    logger.info("Loading types model from disk...")
                    ngram_notable_types_npmi = pickle.load(inp)
            except IOError as exc:
                logger.error("Error reading types model: %s" % str(exc))
                ngram_notable_types_npmi = None

        return SparqlQueryTranslator(sparql_backend, query_extender,
                                     entity_linker, parser, scorer_obj,
                                     ngram_notable_types_npmi,
                                     notable_types_npmi_threshold)
Example #6
0
import globals
import sys

if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(description="Dump qa entity pairs.")
    parser.add_argument("--config",
                        default="config.cfg",
                        help="The configuration file to use.")
    args = parser.parse_args()
    globals.read_configuration(args.config)
    sparql_backend = globals.get_sparql_backend(globals.config)
    while True:
        print "Please enter query: "
        query_str = ""
        while True:
            query_str_line = sys.stdin.readline()
            if query_str_line.startswith("END"):
                break
            query_str += query_str_line
        print sparql_backend.query(query_str)
 def init_from_config():
     """
     Return an instance with options parsed by a config parser.
     :param config_options:
     :return:
     """
     config_options = globals.config
     sparql_backend = globals.get_sparql_backend(config_options)
     relation_counts_file = config_options.get('QueryCandidateExtender',
                                               'relation-counts')
     mediator_names_file = config_options.get('QueryCandidateExtender',
                                              'mediator-names')
     reverse_relations_file = config_options.get('QueryCandidateExtender',
                                                 'reverse-relations')
     expected_types_file = config_options.get('QueryCandidateExtender',
                                              'relation-expected-types')
     tt_distributions_file = config_options.get('QueryCandidateExtender',
                                                'relation-target-type-distributions')
     mediator_relations_file = config_options.get('QueryCandidateExtender',
                                                  'mediator-relations')
     rel_lemmas_file = config_options.get('QueryCandidateExtender',
                                          'relation-lemmas')
     relation_words_file = config_options.get('QueryCandidateExtender',
                                              'relation-words')
     mediated_relation_words_file = config_options.get(
         'QueryCandidateExtender',
         'mediated-relation-words')
     word_type_counts_file = config_options.get(
         'QueryCandidateExtender',
         'word-type-counts')
     word_type_counts = data.read_word_type_distributions(
         word_type_counts_file)
     embeddings_model = config_options.get('Alignment',
                                           'word-embeddings')
     word_deriv_file = config_options.get('Alignment',
                                          'word-derivations')
     we_synonyms = WordembeddingSynonyms(embeddings_model)
     word_derivations = WordDerivations(word_deriv_file)
     mediator_relations = data.read_mediator_relations(
         mediator_relations_file)
     relation_counts = data.read_relation_counts(relation_counts_file)
     mediator_names = data.read_mediator_names(mediator_names_file)
     mediator_index = MediatorIndexFast.init_from_config()
     reverse_relations = data.read_reverse_relations(reverse_relations_file)
     relation_expected_types = data.read_relation_expected_types(
         expected_types_file)
     relation_words = data.read_relation_words(relation_words_file,
                                               n_top_words=1000)
     mediated_relation_words = data.read_mediated_relation_words(
         mediated_relation_words_file, n_top_words=1000)
     rel_tt_distributions = data.read_relation_target_type_distributions(
         tt_distributions_file)
     rel_lemmas = data.read_relation_lemmas(rel_lemmas_file)
     return QueryCandidateExtender(mediator_index, relation_counts,
                                   mediator_names,
                                   mediator_relations,
                                   reverse_relations,
                                   relation_expected_types,
                                   sparql_backend, relation_words,
                                   mediated_relation_words,
                                   rel_tt_distributions, we_synonyms,
                                   word_derivations, word_type_counts,
                                   rel_lemmas)