def extract(
    mention_id="text",
    doc_begin_index="int",
    doc_end_index="int",
    doc_id="text",
    position="text",
    sentence_index="int",
    tokens="text[]",
    pos_tags="text[]",
):
    # Constant
    # WINDOW_SIZE = 10

    # Load keyword dictionaries using ddlib, for domain-specific features
    # Words in "legal_penalty" dictionary are indicative of marriage
    # Words in "non_legal_penalty" dictionary are indicative of non_marriage
    APP_HOME = os.environ['APP_HOME']
    ddlib.load_dictionary(APP_HOME + "/udf/dicts/kw_crime.txt",
                          dict_id="crime")
    ddlib.load_dictionary(APP_HOME + "/udf/dicts/kw_non_crime.txt",
                          dict_id="non_crime")

    # kw_non_legal_penalty = map(lambda word: word.strip(), open(APP_HOME + "/udf/dicts/kw_non_legal_penalty.txt", 'r').readlines())
    # kw_legal_penalty = map(lambda word: word.strip(), open(APP_HOME + "/udf/dicts/kw_legal_penalty.txt", 'r').readlines())
    # Non penalty signals on the left of candidate mention
    # NON_PENAL_SIGNALS_LEFT = frozenset(kw_non_legal_penalty)
    # Penalty signals on the right of candidate mention
    # PENAL_SIGNALS_LEFT = frozenset(kw_legal_penalty)

    WINDOW_SIZE = 10
    MAX_PHRASE_LENGTH = 5

    # Get all subsequences of left sentence with WINDOW_SIZE = 10
    low_tokens = map(lambda token: token.lower(), tokens)
    left_window = get_left_window(doc_begin_index, low_tokens, WINDOW_SIZE)
    phrases_in_sentence_left = list(
        get_all_phrases_in_sentence(left_window, MAX_PHRASE_LENGTH))

    # Create a DDLIB sentence object, which is just a list of DDLIB Word objects
    sent = []
    for i, t in enumerate(tokens):
        sent.append(
            ddlib.Word(
                begin_char_offset=None,
                end_char_offset=None,
                word=t,
                lemma=tokens[i],  # lemma for vietnamese: lowercase
                pos=pos_tags[i],
                ner=None,
                dep_par=
                -1,  # Note that as stored from CoreNLP 0 is ROOT, but for DDLIB -1 is ROOT
                dep_label=None))

    # Create DDLIB Span for penalty candidate
    penalty_span = ddlib.Span(begin_word_id=doc_begin_index,
                              length=(doc_end_index - doc_begin_index + 1))

    # Generate the generic features using DDLIB on left and right window
    for feature in ddlib.get_generic_features_mention(sent, penalty_span):
        yield [mention_id, feature]
Пример #2
0
def extract(
    organization_id="text",
    begin_index="int",
    end_index="int",
    doc_id="text",
    sentence_index="int",
    tokens="text[]",
    pos_tags="text[]",
    dep_types="text[]",
    dep_heads="int[]",
):
    sent = []
    for i, t in enumerate(tokens):
        sent.append(
            ddlib.Word(
                begin_char_offset=None,
                end_char_offset=None,
                word=t,
                lemma=tokens[i],
                pos=pos_tags[i],
                ner=None,
                dep_par=dep_heads[i] -
                1,  # Note that as stored from CoreNLP 0 is ROOT, but for DDLIB -1 is ROOT
                dep_label=dep_types[i]))
    ####
    org_span = ddlib.Span(begin_word_id=begin_index,
                          length=(end_index - begin_index + 1))
    for feature in ddlib.get_generic_features_mention(sent, org_span):
        yield [organization_id, feature]
Пример #3
0
def create_ddlib_sentence(row):
    """Create a list of ddlib.Word objects from input row."""
    sentence = []
    for i, word in enumerate(row.words):
        sentence.append(
            ddlib.Word(begin_char_offset=None,
                       end_char_offset=None,
                       word=word,
                       lemma=row.lemmas[i],
                       pos=row.poses[i],
                       ner=row.ners[i],
                       dep_par=row.dep_parents[i],
                       dep_label=row.dep_paths[i]))
    return sentence
Пример #4
0
 def unpack_(begin_char_offsets, end_char_offsets, words, lemmas, poses,
             ners, dep_parents, dep_paths):
     wordobjs = []
     for i in range(0, len(words)):
         wordobjs.append(
             ddlib.Word(
                 begin_char_offset=None,
                 end_char_offset=None,
                 word=words[i],
                 lemma=lemmas[i],
                 pos=poses[i],
                 ner='',  # NER is noisy on medical docs
                 dep_par=dep_parents[i],
                 dep_label=dep_paths[i]))
     return wordobjs
def extract(
    p_id="text",
    e_id="text",
    p_begin_index="int",
    p_end_index="int",
    e_begin_index="int",
    e_end_index="int",
    doc_id="text",
    sent_index="int",
    tokens="text[]",
    lemmas="text[]",
    pos_tags="text[]",
    ner_tags="text[]",
    dep_types="text[]",
    dep_parents="int[]",
):
    """
    Uses DDLIB to generate features for the spouse relation.
    """
    ddlib.load_dictionary(os.path.abspath("../../../job_employ_keyword.txt"),
                          dict_id="has_employment")
    ddlib.load_dictionary(
        os.path.abspath("../../../job_no_employ_keyword.txt"),
        dict_id="no_employment")
    # Create a DDLIB sentence object, which is just a list of DDLIB Word objects
    sent = []
    for i, t in enumerate(tokens):
        sent.append(
            ddlib.Word(
                begin_char_offset=None,
                end_char_offset=None,
                word=t,
                lemma=lemmas[i],
                pos=pos_tags[i],
                ner=ner_tags[i],
                dep_par=dep_parents[i] -
                1,  # Note that as stored from CoreNLP 0 is ROOT, but for DDLIB -1 is ROOT
                dep_label=dep_types[i]))

    # Create DDLIB Spans for the two mentions
    p_span = ddlib.Span(begin_word_id=p_begin_index,
                        length=(p_end_index - p_begin_index + 1))
    e_span = ddlib.Span(begin_word_id=e_begin_index,
                        length=(e_end_index - e_begin_index + 1))

    # Generate the generic features using DDLIB
    for feature in ddlib.get_generic_features_relation(sent, p_span, e_span):
        yield [p_id, e_id, feature]
def extract(
        chemical_id             = "text",
        disease_id              = "text",
        chemical_begin_index    = "int",
        chemical_end_index      = "int",
        disease_begin_index     = "int",
        disease_end_index       = "int",
        doc_id                  = "text",
        sent_index              = "int",
        tokens                  = "text[]",
        lemmas                  = "text[]",
        pos_tags                = "text[]",
        ner_tags                = "text[]",
        my_ner_tags             = "text[]",
        my_ner_tags_token_ids   = "int[]",
        dep_types               = "text[]",
        dep_parents             = "int[]",
    ):
    """
    Uses DDLIB to generate features for the chemical-disease relation candidates.
    """

    # creates a dictionary of tags from the sparse my_ner_tags array
    my_ner_tags_dict = { i:tag for i,tag in zip(my_ner_tags_token_ids, my_ner_tags) }

    sent = []
    for i,t in enumerate(tokens):
        sent.append(ddlib.Word(
            begin_char_offset=None,
            end_char_offset=None,
            word=t,
            lemma=lemmas[i],
            pos=pos_tags[i],
            # replace NER tag if one is found for that token in my_ner_tags:
            ner=my_ner_tags_dict[i] if i in my_ner_tags_dict else ner_tags[i],
            dep_par=dep_parents[i] - 1,  # Note that as stored from CoreNLP 0 is ROOT, but for DDLIB -1 is ROOT
            dep_label=dep_types[i]))

    # Create DDLIB Spans for the two person mentions
    chemical_span = ddlib.Span(begin_word_id=chemical_begin_index, length=(chemical_end_index-chemical_begin_index+1))
    disease_span = ddlib.Span(begin_word_id=disease_begin_index, length=(disease_end_index-disease_begin_index+1))

    # Generate the generic features using DDLIB
    for feature in ddlib.get_generic_features_relation(sent, chemical_span, disease_span):
        yield [chemical_id, disease_id, feature]
def extract(
    gene_id="text",
    variation_id="text",
    gene_begin_index="int",
    gene_end_index="int",
    var_begin_index="int",
    var_end_index="int",
    doc_id="text",
    sent_index="int",
    tokens="text[]",
    lemmas="text[]",
    pos_tags="text[]",
    ner_tags="text[]",
    dep_types="text[]",
    dep_parents="int[]",
):
    """
    Uses DDLIB to generate features for the spouse relation.
    """
    # Create a DDLIB sentence object, which is just a list of DDLIB Word objects
    sent = []
    for i, t in enumerate(tokens):
        sent.append(
            ddlib.Word(
                begin_char_offset=None,
                end_char_offset=None,
                word=t,
                lemma=lemmas[i],
                pos=pos_tags[i],
                ner=ner_tags[i],
                dep_par=dep_parents[i] -
                1,  # Note that as stored from CoreNLP 0 is ROOT, but for DDLIB -1 is ROOT
                dep_label=dep_types[i]))

    # Create DDLIB Spans for the gene and variation mentions
    gene_span = ddlib.Span(begin_word_id=gene_begin_index,
                           length=gene_end_index - gene_begin_index)
    variation_span = ddlib.Span(begin_word_id=var_begin_index,
                                length=var_end_index - var_begin_index)

    # Generate the generic features using DDLIB
    for feature in ddlib.get_generic_features_relation(sent, gene_span,
                                                       variation_span):
        yield [gene_id, variation_id, feature]
def extract(
    p1_id="text",
    p2_id="text",
    p1_begin_index="int",
    p1_end_index="int",
    p2_begin_index="int",
    p2_end_index="int",
    doc_id="text",
    sent_index="int",
    tokens="text[]",
    lemmas="text[]",
    pos_tags="text[]",
    ner_tags="text[]",
    dep_types="text[]",
    dep_parents="int[]",
):
    """
    Uses DDLIB to generate features for the relation of MED and ARD.
    """
    # Create a DDLIB sentence object, which is just a list of DDLIB Word objects
    sent = []
    for i, t in enumerate(tokens):
        sent.append(
            ddlib.Word(
                begin_char_offset=None,
                end_char_offset=None,
                word=t,
                lemma=lemmas[i],
                pos=pos_tags[i],
                ner=ner_tags[i],
                dep_par=dep_parents[i] -
                1,  # Note that as stored from CoreNLP 0 is ROOT, but for DDLIB -1 is ROOT
                dep_label=dep_types[i]))

    # Create DDLIB Spans for the two person mentions
    p1_span = ddlib.Span(begin_word_id=p1_begin_index,
                         length=(p1_end_index - p1_begin_index + 1))
    p2_span = ddlib.Span(begin_word_id=p2_begin_index,
                         length=(p2_end_index - p2_begin_index + 1))

    # Generate the generic features using DDLIB
    for feature in ddlib.get_generic_features_relation(sent, p1_span, p2_span):
        yield [p1_id, p2_id, feature]
Пример #9
0
def extract(S_id="text",
            O_id="text",
            S_begin_index="int",
            S_end_index="int",
            O_begin_index="int",
            O_end_index="int",
            sent_id="text",
            tokens="text[]",
            pos_tags="text[]",
            ner_tags="text[]",
            dep_types="text[]",
            dep_tokens="int[]"):
    """
    Uses DDLIB to generate features for relation.
    """
    # Create a DDLIB sentence object, which is just a list of DDLIB Word objects
    sent = []
    if len(tokens) != len(pos_tags):
        print >> sys.stderr, '===>>>', sent_id, len(tokens), len(pos_tags)
    for i, t in enumerate(tokens):
        sent.append(
            ddlib.Word(
                begin_char_offset=None,
                end_char_offset=None,
                word=t,
                lemma=tokens[i],
                pos=pos_tags[i],
                ner=ner_tags[i],
                dep_par=dep_tokens[i] -
                1,  # Note that as stored from CoreNLP 0 is ROOT, but for DDLIB -1 is ROOT
                dep_label=dep_types[i]))

    # Create DDLIB Spans for the two person mentions
    S_span = ddlib.Span(begin_word_id=S_begin_index,
                        length=(S_begin_index - S_end_index + 1))
    O_span = ddlib.Span(begin_word_id=O_begin_index,
                        length=(O_begin_index - O_end_index + 1))

    # Generate the generic features using DDLIB
    for feature in ddlib.get_generic_features_relation(sent, S_span, O_span):
        yield [S_id, O_id, feature]
Пример #10
0
def extract(
    p_id="text",
    p_begin_index="int",
    p_end_index="int",
    doc_id="text",
    sent_index="int",
    tokens="text[]",
    pos_tags="text[]",
    ner_tags="text[]",
    dep_types="text[]",
    dep_parents="int[]",
):
    """
    Uses DDLIB to generate features for the legal penalty mention
    """
    # Constant
    # WINDOW_SIZE = 10

    # Load keyword dictionaries using ddlib, for domain-specific features
    # Words in "legal_penalty" dictionary are indicative of marriage
    # Words in "non_legal_penalty" dictionary are indicative of non_marriage
    APP_HOME = os.environ['APP_HOME']
    ddlib.load_dictionary(APP_HOME + "/udf/dicts/kw_legal_penalty.txt",
                          dict_id="legal_penalty")
    ddlib.load_dictionary(APP_HOME + "/udf/dicts/kw_non_legal_penalty.txt",
                          dict_id="non_legal_penalty")

    kw_non_legal_penalty = map(
        lambda word: word.strip(),
        open(APP_HOME + "/udf/dicts/kw_non_legal_penalty.txt",
             'r').readlines())
    # kw_legal_penalty = map(lambda word: word.strip(), open(APP_HOME + "/udf/dicts/kw_legal_penalty.txt", 'r').readlines())
    # Non penalty signals on the left of candidate mention
    NON_PENAL_SIGNALS_LEFT = frozenset(kw_non_legal_penalty)
    # Penalty signals on the right of candidate mention
    # PENAL_SIGNALS_LEFT = frozenset(kw_legal_penalty)

    WINDOW_SIZE = 10
    MAX_PHRASE_LENGTH = 5

    # Get all subsequences of left sentence with WINDOW_SIZE = 10
    low_tokens = map(lambda token: token.lower(), tokens)
    left_window = get_left_window(p_begin_index, low_tokens, WINDOW_SIZE)
    phrases_in_sentence_left = list(
        get_all_phrases_in_sentence(left_window, MAX_PHRASE_LENGTH))

    # Create a DDLIB sentence object, which is just a list of DDLIB Word objects
    sent = []
    for i, t in enumerate(tokens):
        sent.append(
            ddlib.Word(
                begin_char_offset=None,
                end_char_offset=None,
                word=t,
                lemma=t.lower(),  # lemma for vietnamese: lowercase
                pos=pos_tags[i],
                ner=ner_tags[i],
                dep_par=dep_parents[i] -
                1,  # Note that as stored from CoreNLP 0 is ROOT, but for DDLIB -1 is ROOT
                dep_label=dep_types[i]))

    # Create DDLIB Span for penalty candidate
    penalty_span = ddlib.Span(begin_word_id=p_begin_index,
                              length=(p_end_index - p_begin_index + 1))

    # Generate the generic features using DDLIB on left and right window
    for feature in ddlib.get_generic_features_mention(sent, penalty_span):
        yield [p_id, feature]

    # Keywords represent non-legal_penalty appears on the left
    if len(NON_PENAL_SIGNALS_LEFT.intersection(phrases_in_sentence_left)) > 0:
        yield [p_id, 'APPEAR_LEFT_KW_NON_LEGAL_PENALTY']

    # "phạt tù" appear on the left of mention
    if "phạt tù" in phrases_in_sentence_left:
        yield [p_id, 'APPEAR_LEFT_PHAT_TU']