Ejemplo n.º 1
0
def resolve_pronouns(doc):
    coref = Coref()
    coref.one_shot_coref(utterances=doc.text)
    mentions = coref.get_mentions()
    #print(mentions, coref.get_scores())
    clusters = coref.get_clusters(remove_singletons=True)
    alias_groups = []
    for cluster in clusters[0].values():
        # cluster here is a list of mention indices
        aliases = []
        indices = []
        for mention_index in cluster:
            mention = mentions[mention_index]
            aliases.append(mention.text)
            indices.append((mention.start, mention.end))
        alias_groups.append((aliases, indices))
    return alias_groups
Ejemplo n.º 2
0
from neuralcoref import Coref

coref = Coref()
clusters = coref.continuous_coref(utterances=u"John wanted to marry Mary. He was a doctor and she was a nurse. She went to the market one day when Henry met her and proposed her. He was a local don.")
print(clusters)

mentions = coref.get_mentions()
#print(mentions)

utterances = coref.get_utterances()
print(utterances)

resolved_utterance_text = coref.get_resolved_utterances()
print(resolved_utterance_text)

print(coref.get_scores())
Ejemplo n.º 3
0
from nltk.tokenize import word_tokenize
import sys
import os

sys.stdout = open(os.devnull, "w")

resolved_sentence = []

con = sys.argv[1].replace("_", " ")
utt = sys.argv[2].replace("_", " ")

coref = Coref()

clusters = coref.one_shot_coref(utterances=unicode(utt), context=unicode(con))

candidates = coref.get_mentions()

utterances = coref.get_utterances()

resolved_utterance_text = coref.get_resolved_utterances()

sys.stdout = sys.__stdout__
First = str(candidates[0]).split()

one = []
for item in candidates:
    one.append(str(item))

two = []
Three = []
for item in one: