Example #1
0
def compute_pmi(story):
    total = 0  #this will be what is returned at the end
    id, deps = chains.extract_dependency_pairs(
        story
    )  #unpacks the tuple into a dictionary of story id, which is a number and deps which is a dictionary of a list of tuples
    for dependency_pairs1 in deps.values():
        total += entity_pmi(dependency_pairs1)
    return total
Example #2
0
        table = chains.ProbabilityTable(json.load(fp))

    total, correct = 0, 0

    # load testing data
    test = chains.load_data("test.csv")

    with open("answers.txt", "w") as f:
        # header for the csv
        f.write("InputStoryid,AnswerRightEnding\n")

        test_tqdm = tqdm.tqdm(test)
        for t in test_tqdm:
            one, two = parse_test_instance(t)

            _, one_deps = chains.extract_dependency_pairs(one)
            _, two_deps = chains.extract_dependency_pairs(two)

            # logic to choose between one and two
            prog_one = chains.protagonist(one)
            prog_two = chains.protagonist(two)

            pmi_one, pmi_two = 0, 0

            for entity_id, deps in one_deps.items():
                for first, second in list(zip(deps, deps[1:])):
                    pmi_one += table.pmi(first[0], first[1], second[0],
                                         second[1])

            for entity_id, deps in two_deps.items():
                for first, second in list(zip(deps, deps[1:])):
Example #3
0
    return [
        chains.ParsedStory(id, id, chains.nlp(" ".join(story[2:6] + [a])),
                           *(sentences + [chains.nlp(a)]))
        for a in alternatives
    ]


def story_answer(story):
    """Tells you the correct answer. Return (storyid, index). 1 for the first ending, 2 for the second ending"""
    #obviously you can't use this information until you've chosen your answer!
    return story.InputStoryid, story.AnswerRightEnding


# Load training data and build the model
#data, table = chains.process_corpus("train.csv", 100)
#print(table.pmi("move", "nsubj", "move", "nsubj"))

# load the pre-built model
with open("all.json") as fp:
    table = chains.ProbabilityTable(json.load(fp))

# load testing data
test = chains.load_data("val.csv")
for t in test:
    one, two = parse_test_instance(t)
    one_deps = chains.extract_dependency_pairs(one)
    pprint(one[2:])
    pprint(two[2:])
    # logic to choose between one and two
    pprint("answer:" + str(story_answer(t)))
Example #4
0
    for verb in verbs:
        for child in verb.children:
            # Add the word/dependency pair to the identified entity
            tup = (verb.lemma_, child.dep_)
            list.append(tup)
    return list


# # load testing data

test = chains.load_data("val.csv")
result = [0, 0]
for t in test:
    one, two = parse_test_instance2(t)  #does have ending
    three, four = parse_test_instance(t)  #doesnt have ending
    one_deps = chains.extract_dependency_pairs(three)
    two_deps = chains.extract_dependency_pairs(four)
    one_deps1 = chains.extract_dependency_pairs(one)
    two_deps1 = chains.extract_dependency_pairs(two)
    one_pmi = []
    two_pmi = []
    decision_verb = extract_dependency_five(one.five)
    decision_verb2 = extract_dependency_five(two.five)

    correct_ans = '0'

    if (len(decision_verb) == 0 | len(decision_verb2) == 0): continue

    for choice in decision_verb:
        if (choice in one_deps1[1][0]) & (len(one_deps[1][0]) != 0):
            one_pmi.append(