Esempio n. 1
0
def analyze_result(target_col, result_obj, queue_state_rows, dataset,
                   state_macro_f1s):
    for i, (robj, (sid, row)) in enumerate(zip(result_obj, queue_state_rows)):
        if len(robj) == 0:
            state_macro_f1s[sid].append(0.0)
            continue

        entry_tokens = [
            token["lemma"] for token in
            annotate(dataset[row][target_col].text,
                     properties=NLP_PROPERTIES_LEMMA)["sentences"][0]["tokens"]
        ]
        span_tokens = [
            token["lemma"]
            for sent in annotate(robj[0]["span"],
                                 properties=NLP_PROPERTIES_LEMMA)["sentences"]
            for token in sent["tokens"]
        ]
        entry_set, span_set = set(entry_tokens), set(span_tokens)
        union_set = entry_set | span_set

        sorted_entries = sorted(union_set)
        true_labels = [
            1 if key in span_tokens else 0 for key in sorted_entries
        ]
        pred_labels = [
            1 if key in entry_tokens else 0 for key in sorted_entries
        ]

        state_macro_f1s[sid].append(
            f1_score(true_labels, pred_labels, average="macro"))
Esempio n. 2
0
def search_question_template(dataset,
                             name="candidate_templates",
                             total_steps=2,
                             external=False,
                             ks=None):
    num_rows, num_cols = len(dataset), len(dataset[0])

    column_phrases = {}
    for j in xrange(num_cols):
        current = []
        if j not in dataset.attrs["column_category_scores"]:
            column_phrases[j] = current
            continue

        for cate_name, cate_score in dataset.attrs["column_category_scores"][
                j].most_common()[:3]:
            current.extend([
                remove_parenthesis_on_tokens_text([
                    token["word"]
                    for token in annotate(text)["sentences"][0]["tokens"]
                ])
                for text in extract_noun_phrases(cate_name.replace("_", " "))
            ])

        current = list(set(current))
        random.shuffle(current)
        current = current[:15]
        column_phrases[j] = current

    for j in xrange(num_cols):
        if j not in dataset.attrs["ner_annotation"]:
            continue

        ner_annotation = dataset.attrs["ner_annotation"][j]
        current = NER_MAP.get(ner_annotation, [ner_annotation.lower()])
        column_phrases[j] += current
        column_phrases[j] = list(set(column_phrases[j]))

    dataset.attrs["column_phrases"] = column_phrases

    if ks is None:
        ks = [20 for _ in xrange(total_steps)]
    elif isinstance(ks, int):
        ks = [ks for _ in xrange(total_steps)]

    candidate_templates = {}
    for j in xrange(num_cols):
        candidate_templates[j] = search_template_for_columns(
            dataset, j, total_steps, external, ks)

    dataset.attrs[name] = candidate_templates
Esempio n. 3
0
def mark_relations_and_corefs(row, name="rel_triplets"):
    docs = {}
    entry_cols, entry_texts, entry_tokens = [], [], []
    for i, entry in enumerate(row):
        if isinstance(entry, TEntry):
            if len(entry.text.strip()) == 0:
                continue

            entry_texts.append(entry.text)
            entry_cols.append(i)

            for doc_name in entry.attributes["doc_scores"][0]:
                docs[doc_name] = read_doc(title=doc_name)[0][1]

    for text in entry_texts:
        entry_tokens.append(extract_sentences(annotate(text.lower()))[0])

    triplets = []
    for doc_name, text in docs.items():
        paragraphs = text.split("\n\n")[1:]

        for paraid, paragraph in enumerate(paragraphs):
            cumlen = [0]
            jobj = annotate(paragraph, properties=NLP_PROPERTIES_ADVANCED)
            sentences = extract_sentences(jobj, lower=True)

            for sent in sentences:
                cumlen.append(cumlen[-1] + len(sent))
            cumlen = cumlen[1:]

            cur_triplets = _get_relations(jobj, cumlen, entry_texts, doc_name,
                                          paraid)
            triplets.extend(
                _merge_relations(jobj, cumlen, entry_texts, cur_triplets))

    row.attrs[name] = triplets
Esempio n. 4
0
def mark_literal_ner(dataset, name="ner_annotation"):
    num_rows, num_cols = len(dataset), len(dataset[0])

    ner_annotations = {}
    for j in xrange(num_cols):
        votes = Counter()

        for i in xrange(num_rows):
            if not isinstance(dataset[i][j], TEntry):
                continue

            text = dataset[i][j].text
            annotated_obj = annotate(text, properties=NLP_PROPERTIES_NER)

            for sentence in annotated_obj["sentences"]:
                for token in sentence["tokens"]:
                    if token["word"] == text:
                        votes[token["ner"]] += 1

        if len(votes) > 0 and votes.most_common()[0][0] != 'O':
            ner_annotations[j] = votes.most_common()[0][0]

    dataset.attrs[name] = ner_annotations
Esempio n. 5
0
def annotate_entities(row, name="entity_ranges"):
    entity_ranges = {}
    entry_tokens, entry_titles = [], []

    for entry in row:
        if isinstance(entry, TEntry):
            tokens = [
                token["word"]
                for token in annotate(entry.text)["sentences"][0]["tokens"]
            ]
            entry_tokens.append(tokens)
            entry_titles.append(entry.text)

    for entry in row:
        if "best_candidate_doc" in entry.attrs:
            doc_name = entry["best_candidate_doc"]
            annotated_doc = entry["annotated_doc"]
            doc_tokens = [
                token["word"] for sent in annotated_doc["sentences"]
                for token in sent["tokens"]
            ]

            entity_ranges[doc_name] = {title: set() for title in entry_titles}
            for tokens, title in zip(entry_tokens, entry_titles):
                mark_ranges = mark_tokens(doc_tokens, tokens, lower=True)
                for mark_range in mark_ranges:
                    entity_ranges[doc_name][title].update(xrange(*mark_range))

            cumsum, sent_start = 0, []

            for sentence_obj in annotated_doc["sentences"]:
                sent_start.append(cumsum)
                cumsum += len(sentence_obj["tokens"])

            for chain in annotated_doc["corefs"].values():
                accepted = {title: False for title in entry_titles}
                to_add = {title: set() for title in entry_titles}

                for item in chain:
                    start_index = sent_start[item["sentNum"] -
                                             1] + item["startIndex"] - 1
                    end_index = sent_start[item["sentNum"] -
                                           1] + item["endIndex"] - 1

                    index_set = {
                        start_index + idx
                        for idx in remove_parenthesis_on_tokens(
                            doc_tokens[start_index:end_index])
                    }
                    # print(doc_tokens[start_index:end_index])
                    tokens = [
                        doc_tokens[idx].lower() for idx in sorted(index_set)
                    ]

                    for title, title_tokens in zip(entry_titles, entry_tokens):
                        int_set = index_set & entity_ranges[doc_name][title]
                        to_add[title].update(index_set)
                        if len(int_set) > 0.25 * len(index_set):
                            accepted[title] = True
                        elif SequenceMatcher(
                                None, tokens,
                            [token.lower()
                             for token in title_tokens]).ratio() > 0.6:
                            accepted[title] = True
                        elif mark_tokens(tokens, title_tokens, lower=True):
                            accepted[title] = True

                for title in entry_titles:
                    # print(title, to_add[title], accepted[title])
                    if accepted[title]:
                        entity_ranges[doc_name][title].update(to_add[title])

    row.attrs[name] = entity_ranges
Esempio n. 6
0
def mark_entry_collocations(row, name="collocations", limit=60):
    docs = {}
    entry_cols, entry_texts, entry_tokens = [], [], []
    for i, entry in enumerate(row):
        if isinstance(entry, TEntry):
            if len(entry.text.strip()) == 0:
                continue

            entry_texts.append(entry.text)
            entry_cols.append(i)

            for doc_name in entry.attributes["doc_scores"][0]:
                docs[doc_name] = read_doc(title=doc_name)[0][1]

    for text in entry_texts:
        entry_tokens.append(extract_sentences(annotate(text.lower()))[0])

    doc_paragraphs = {}

    for doc_name, text in docs.items():
        doc_paragraphs[doc_name] = []
        paragraphs = text.split("\n\n")[1:]

        for paragraph in paragraphs:
            jobj = annotate(paragraph)
            doc_paragraphs[doc_name].append((paragraph, jobj))

    collocations = []
    for doc_name, text in docs.items():
        for i, j in combinations(range(len(entry_tokens)), r=2):
            found = False

            for paragraph, jobj in doc_paragraphs[doc_name]:
                if found:
                    break

                jobj = annotate(paragraph)
                sentences = extract_sentences(jobj, lower=True)
                doc_tokens = [
                    token for sentence in sentences for token in sentence
                ]

                marks = []

                for tokens in entry_tokens:
                    marks.append(mark_tokens(doc_tokens, tokens))

                bestl, bestb, beste = limit + 1, -1, -1
                for b1, e1 in marks[i]:
                    for b2, e2 in marks[j]:
                        if 0 <= e2 - b1 < bestl:
                            bestb, beste = b1, e2
                            bestl = e2 - b1
                        elif 0 <= e1 - b2 < bestl:
                            bestb, beste = b2, e1
                            bestl = e1 - b2

                if bestl <= limit:
                    sbeg, send, csum = 0, 0, 0
                    for sent in jobj["sentences"]:
                        csum += len(sent["tokens"])
                        if csum > bestb:
                            break
                        sbeg += 1

                    csum = 0
                    for sent in jobj["sentences"]:
                        csum += len(sent["tokens"])
                        if csum >= beste:
                            break
                        send += 1

                    # print(sbeg, send)
                    obeg, oend = (jobj["sentences"][sbeg]["tokens"][0]
                                  ['characterOffsetBegin'], jobj["sentences"]
                                  [send]["tokens"][-1]['characterOffsetEnd'])
                    pair = ((entry_cols[i], entry_cols[j])
                            if entry_cols[i] < entry_cols[j] else
                            (entry_cols[j], entry_cols[i]))
                    collocations.append(
                        dict(pair=pair,
                             text=paragraph[obeg:oend],
                             doc_name=doc_name))
                    found = True

    row.attrs[name] = collocations