def LF_DEBUG(c): """ This label function is for debugging purposes. Feel free to ignore. keyword arguments: c - The candidate object to be labeled """ print(c) print() print("Left Tokens") print(list(get_left_tokens(c[0], window=5))) print() print("Right Tokens") print(list(get_right_tokens(c[0]))) print() print("Between Tokens") print(list(get_between_tokens(c))) print() print("Tagged Text") print(get_tagged_text(c)) print(re.search(r'{{B}} .* is a .* {{A}}', get_tagged_text(c))) print() print("Get between Text") print(get_text_between(c)) print(len(get_text_between(c))) print() print("Parent Text") print(c.get_parent()) print() return 0
def LF_abbreviation(c): """ IF {{B}}} {{A}} or vice versa then not a valid relationship """ if len(get_text_between(c)) < 3: return -1 return 0
def LF_GiG_REGULATION(c): if LF_GiG_UPREGULATES(c) or LF_GiG_DOWNREGULATES(c): return -1 elif re.search(ltp(regulation_identifiers), get_text_between(c), flags=re.I): return -1 else: return 0
def LF_GiG_ASSOCIATION(c): if re.search(ltp(association_identifiers), " ".join(get_right_tokens(c[0], window=2)), flags=re.I): return -1 elif re.search(ltp(association_identifiers), " ".join(get_right_tokens(c[1], window=2)), flags=re.I): return -1 elif re.search(ltp(association_identifiers), get_text_between(c), flags=re.I): return -1 else: return 0
def LF_GiG_DOWNREGULATES(c): if re.search(ltp(downregulates_identifiers), " ".join(get_right_tokens(c[0], window=2)), flags=re.I): return -1 elif re.search(ltp(downregulates_identifiers), " ".join(get_right_tokens(c[1], window=2)), flags=re.I): return -1 elif re.search(ltp(downregulates_identifiers), get_text_between(c), flags=re.I): return -1 else: return 0
def LF_GiG_COMPOUND_IDENTIFICATIONS(c): if re.search(ltp(compound_indications), " ".join(get_right_tokens(c[0], window=2)), flags=re.I): return -1 elif re.search(ltp(compound_indications), " ".join(get_right_tokens(c[1], window=2)), flags=re.I): return -1 elif re.search(ltp(compound_indications), get_text_between(c), flags=re.I): return -1 else: return 0
def LF_CG_WEAK_BINDING(c): """ This label function is designed to look for phrases that could imply a compound binding to a gene/protein """ if re.search(ltp(weak_binding_indications), get_text_between(c), flags=re.I): return 1 else: return 0
def LF_CD_TREATS(c): if re.search(ltp(treat_indication), get_text_between(c), flags=re.I): return 1 elif re.search(ltp(treat_indication), " ".join(get_left_tokens(c[0], window=5)), flags=re.I): return 1 elif re.search(ltp(treat_indication), " ".join(get_right_tokens(c[0], window=5)), flags=re.I): return 1 else: return 0
def LF_CG_UPREGULATES(c): """ This label function is designed to look for phrases that implies a compound increaseing activity of a gene/protein """ if re.search(ltp(upregulates), get_text_between(c), flags=re.I): return 1 elif upregulates.intersection(get_left_tokens(c[1], window=2)): return 1 else: return 0
def LF_CG_DOWNREGULATES(c): """ This label function is designed to look for phrases that could implies a compound decreasing the activity of a gene/protein """ if re.search(ltp(downregulates), get_text_between(c), flags=re.I): return 1 elif downregulates.intersection(get_right_tokens(c[1], window=2)): return 1 else: return 0
def LF_GiG_BINDING_IDENTIFICATIONS(c): gene1_tokens = list(get_left_tokens(c[0], window=5)) + list(get_right_tokens(c[0], window=5)) gene2_tokens = list(get_left_tokens(c[0], window=5)) + list(get_right_tokens(c[0], window=5)) if re.search(ltp(binding_identifiers), " ".join(gene1_tokens), flags=re.I): return 1 elif re.search(ltp(binding_identifiers), " ".join(gene2_tokens), flags=re.I): return 1 elif re.search(ltp(binding_identifiers), get_text_between(c), flags=re.I): return 1 else: return 0
def LF_CD_PALLIATES(c): """ This label function is designed to look for phrases that could imply a compound binding to a gene/protein """ if re.search(ltp(palliates_indication), get_text_between(c), flags=re.I): return 1 elif re.search(ltp(palliates_indication), " ".join(get_left_tokens(c[0], window=5)), flags=re.I): return 1 elif re.search(ltp(palliates_indication), " ".join(get_right_tokens(c[0], window=5)), flags=re.I): return 1 else: return 0
def LF_CD_WEAKLY_TREATS(c): """ This label function is designed to look for phrases that imply a compound binding to a gene/protein """ if re.search(ltp(weak_treatment_indications), get_text_between(c), flags=re.I): return 1 elif re.search(ltp(weak_treatment_indications), " ".join(get_left_tokens(c[0], window=5)), flags=re.I): return 1 elif re.search(ltp(weak_treatment_indications), " ".join(get_right_tokens(c[0], window=5)), flags=re.I): return 1 else: return 0
def LF_DEBUG(C): print "Left Tokens" print get_left_tokens(c, window=3) print print "Right Tokens" print get_right_tokens(c) print print "Between Tokens" print get_between_tokens(c) print print "Tagged Text" print get_tagged_text(c) print re.search(r'{{B}} .* is a .* {{A}}', get_tagged_text(c)) print print "Get between Text" print get_text_between(c) print len(get_text_between(c)) print print "Parent Text" print c.get_parent() print return 0
def LF_CD_COMPOUND_INDICATION(c): """ This label function is designed to look for phrases that implies a compound increaseing activity of a gene/protein """ if re.search(ltp(compound_indications), get_text_between(c), flags=re.I): return 1 elif re.search(ltp(compound_indications), " ".join(get_left_tokens(c[0], window=5)), flags=re.I): return 1 elif re.search(ltp(compound_indications), " ".join(get_right_tokens(c[0], window=5)), flags=re.I): return 1 else: return 0
def LF_DG_GENETIC_ABNORMALITIES(c): """ This LF searches for key phraes that indicate a genetic abnormality """ left_window = " ".join(get_left_tokens(c[0], window=10)) + " ".join(get_left_tokens(c[1], window=10)) right_window = " ".join(get_right_tokens(c[0], window=10)) + " ".join(get_right_tokens(c[1], window=10)) if re.search(ltp(genetic_abnormalities), get_text_between(c), flags=re.I): return 1 elif re.search(ltp(genetic_abnormalities), left_window, flags=re.I): return 1 elif re.search(ltp(genetic_abnormalities), right_window, flags=re.I): return 1 return 0
def LF_DG_WEAK_ASSOCIATION(c): """ This label function is design to search for phrases that indicate a weak association between the disease and gene """ if re.search(ltp(weak_association), get_text_between(c), flags=re.I): return -1 elif re.search(ltp(weak_association) + r".*({{B}}|{{A}})", get_tagged_text(c), flags=re.I): return -1 elif re.search(r"({{B}}|{{A}}).*" + ltp(weak_association), get_tagged_text(c), flags=re.I): return -1 else: return 0
def LF_DG_NO_ASSOCIATION(c): """ This LF is designed to test if there is a key phrase that suggests a d-g pair is no an association. """ if re.search(ltp(no_direct_association), get_text_between(c), flags=re.I): return -1 elif re.search(ltp(no_direct_association) + r".*({{B}}|{{A}})", get_tagged_text(c), flags=re.I): return -1 elif re.search(r"({{B}}|{{A}}).*" + ltp(no_direct_association), get_tagged_text(c), flags=re.I): return -1 else: return 0
def _get_search_func(self, c): """ Enumerate the token search space for pattern matching :param c: :return: """ if self.search == "sentence": return c.get_parent().__dict__[self.attrib] elif self.search == "between": return get_text_between(c).strip().split() elif self.search == "left": # use left-most Span span = c[0] if c[0].char_start < c[1].char_start else c[1] return get_left_tokens(span, window=self.window, attrib=self.attrib) elif self.search == "right": # use right-most Span span = c[0] if c[0].char_start > c[1].char_start else c[1] return get_right_tokens(span, window=self.window, attrib=self.attrib)
def LF_DaG_NO_ASSOCIATION(c): """ This LF is designed to test if there is a key phrase that suggests a d-g pair is no an association. """ left_window = " ".join(get_left_tokens(c[0], window=10)) + " ".join(get_left_tokens(c[1], window=10)) right_window = " ".join(get_right_tokens(c[0], window=10)) + " ".join(get_right_tokens(c[1], window=10)) if LF_DG_METHOD_DESC(c) or LF_DG_TITLE(c): return 0 elif re.search(ltp(no_direct_association), get_text_between(c), flags=re.I): return -1 elif re.search(ltp(no_direct_association), left_window, flags=re.I): return -1 elif re.search(ltp(no_direct_association), right_window, flags=re.I): return -1 else: return 0
def LF_DaG_WEAK_ASSOCIATION(c): """ This label function is design to search for phrases that indicate a weak association between the disease and gene """ left_window = " ".join(get_left_tokens(c[0], window=10)) + " ".join(get_left_tokens(c[1], window=10)) right_window = " ".join(get_right_tokens(c[0], window=10)) + " ".join(get_right_tokens(c[1], window=10)) if LF_DG_METHOD_DESC(c) or LF_DG_TITLE(c): return 0 elif re.search(ltp(weak_association), get_text_between(c), flags=re.I): return 1 elif re.search(ltp(weak_association), left_window, flags=re.I): return 1 elif re.search(ltp(weak_association), right_window, flags=re.I): return 1 else: return 0
def LF_DaG_ASSOCIATION(c): """ This LF is designed to test if there is a key phrase that suggests a d-g pair is an association. """ left_window = " ".join(get_left_tokens(c[0], window=10)) + " ".join(get_left_tokens(c[1], window=10)) right_window = " ".join(get_right_tokens(c[0], window=10)) + " ".join(get_right_tokens(c[1], window=10)) found_negation = not re.search(r'\b(not|no)\b', left_window, flags=re.I) if LF_DG_METHOD_DESC(c) or LF_DG_TITLE(c): return 0 elif re.search(r'(?<!not )(?<!no )' + ltp(direct_association), get_text_between(c), flags=re.I) and found_negation: return 1 elif re.search(r'(?<!not )(?<!no )' + ltp(direct_association), left_window, flags=re.I) and found_negation: return 1 elif re.search(r'(?<!not )(?<!no )' + ltp(direct_association), right_window, flags=re.I) and found_negation: return 1 else: return 0
def LF_CtD_WEAKLY_TREATS(c): """ This label function is designed to look for phrases that have a weak implication towards a compound treating a disease """ if re.search(ltp(weak_treatment_indications), get_text_between(c), flags=re.I): return 1 elif re.search(ltp(weak_treatment_indications), " ".join(get_left_tokens(c[0], window=5)), flags=re.I): return 1 elif re.search(ltp(weak_treatment_indications), " ".join(get_right_tokens(c[0], window=5)), flags=re.I): return 1 else: return 0
def LF_DaG_ASSOCIATION(c): """ This LF is designed to test if there is a key phrase that suggests a d-g pair is an association. """ if LF_DG_METHOD_DESC(c) or LF_DG_TITLE(c): return 0 elif re.search(r'(?<!not )(?<!no )' + ltp(direct_association), get_text_between(c), flags=re.I): return 1 elif re.search(r'(?<!not )(?<!no )' + ltp(direct_association) + r".*({{B}}|{{A}})", get_tagged_text(c), flags=re.I): return 1 elif re.search(r"({{B}}|{{A}}).*(?<!not )(?<!no )" + ltp(direct_association), get_tagged_text(c), flags=re.I): return 1 else: return 0
def LF_DG_CONTEXT_SWITCH(c): if re.search(ltp(context_change_keywords), get_text_between(c), flags=re.I): return -1 return 0
def LF_GiG_DIAGNOSIS_IDENTIFIERS(c): if re.search(ltp(diagnosis_indication), get_text_between(c), flags=re.I): return -1 else: return 0
def LF_NO_ASSOCIATION(c): """ This LF is designed to test if there is a key phrase that suggests a d-g pair is no an association. """ return -1 if re.search(ltp(direct_assocation_complement), get_text_between(candidate), re.I) return 0