def calculate_belief(stmts): scorer = SimpleScorer(subtype_probs={ 'biopax': {'pc11': 0.2, 'phosphosite': 0.01}, }) be = BeliefEngine(scorer=scorer) be.set_prior_probs(stmts) be.set_hierarchy_probs(stmts) return {str(s.get_hash()): s.belief for s in stmts}
def test_hierarchy_probs1(): be = BeliefEngine() st1 = Phosphorylation(None, Agent('a'), evidence=[ev1]) st2 = Phosphorylation(None, Agent('b'), evidence=[ev2]) st2.supports = [st1] st1.supported_by = [st2] be.set_hierarchy_probs([st1, st2]) assert_close_enough(st1.belief, 1 - 0.35) assert_close_enough(st2.belief, 1 - 0.35 * 0.35)
def test_cycle(): st1 = Phosphorylation(Agent('B'), Agent('A1')) st2 = Phosphorylation(None, Agent('A1')) st1.supports = [st2] st1.supported_by = [st2] st2.supports = [st1] st2.supported_by = [st1] engine = BeliefEngine() engine.set_hierarchy_probs([st1, st2])
def test_hierarchy_probs1(): be = BeliefEngine() st1 = Phosphorylation(None, Agent('a'), evidence=[ev1]) st2 = Phosphorylation(None, Agent('b'), evidence=[ev2]) st2.supports = [st1] st1.supported_by = [st2] be.set_hierarchy_probs([st1, st2]) assert_close_enough(st1.belief, 1-0.35) assert_close_enough(st2.belief, 1-0.35*0.35)
def test_hierarchy_probs1(): be = BeliefEngine() st1 = Phosphorylation(None, Agent('a'), evidence=[ev1]) st2 = Phosphorylation(None, Agent('b'), evidence=[ev2]) st2.supports = [st1] st1.supported_by = [st2] st1.belief = 0.5 st2.belief = 0.8 be.set_hierarchy_probs([st1, st2]) assert(st1.belief == 0.5) assert(st2.belief == 0.9)
def test_belief_calc_up_to_hierarchy(): be = BeliefEngine() test_stmts = [ MockStatement(1, [MockEvidence('sparser'), MockEvidence('reach')]), MockStatement(2, MockEvidence('biopax')), MockStatement(3, MockEvidence('signor')), MockStatement(4, MockEvidence('biogrid')), MockStatement(5, MockEvidence('bel')), MockStatement(6, [MockEvidence('phosphosite'), MockEvidence('trips')]), ] be.set_prior_probs(test_stmts) init_results = {s.matches_key(): s.belief for s in test_stmts} print(init_results) supp_links = [(1, 2), (1, 3), (2, 3), (1, 5), (4, 3)] populate_support(test_stmts, supp_links) be.set_hierarchy_probs(test_stmts) results = {s.matches_key(): s.belief for s in test_stmts} print(results) # Test a couple very simple properties. assert len(results) == len(test_stmts), (len(results), len(test_stmts)) assert all([0 < b < 1 for b in results.values()]), 'Beliefs out of range.' # Test the change from the initial. all_deltas_correct = True deltas_dict = {} for s in test_stmts: h = s.matches_key() b = s.belief # Get results res = {'actual': b - init_results[h]} # Define expectations. if s.supports: res['expected'] = 'increase' if res['actual'] <= 0: all_deltas_correct = False else: res['expected'] = 'no change' if res['actual'] != 0: all_deltas_correct = False deltas_dict[h] = res assert all_deltas_correct, deltas_dict
def test_hierarchy_probs4(): be = BeliefEngine() st1 = Phosphorylation(None, Agent('a'), evidence=[ev1]) st2 = Phosphorylation(None, Agent('b'), evidence=[ev2]) st3 = Phosphorylation(None, Agent('c'), evidence=[ev1]) st4 = Phosphorylation(None, Agent('d'), evidence=[ev1]) st4.supports = [st1, st2, st3] st3.supports = [st1] st2.supports = [st1] st1.supported_by = [st2, st3, st4] st2.supported_by = [st4] st3.supported_by = [st4] be.set_hierarchy_probs([st1, st2, st3, st4]) assert_close_enough(st1.belief, 1 - 0.35) assert_close_enough(st2.belief, 1 - 0.35 * 0.35) assert_close_enough(st3.belief, 1 - (0.05 + 0.3 * 0.3)) assert_close_enough(st4.belief, 1 - 0.35 * (0.05 + 0.3 * 0.3 * 0.3))
def test_hierarchy_probs4(): be = BeliefEngine() st1 = Phosphorylation(None, Agent('a'), evidence=[ev1]) st2 = Phosphorylation(None, Agent('b'), evidence=[ev2]) st3 = Phosphorylation(None, Agent('c'), evidence=[ev1]) st4 = Phosphorylation(None, Agent('d'), evidence=[ev1]) st4.supports = [st1, st2, st3] st3.supports = [st1] st2.supports = [st1] st1.supported_by = [st2, st3, st4] st2.supported_by = [st4] st3.supported_by = [st4] be.set_hierarchy_probs([st1, st2, st3, st4]) assert_close_enough(st1.belief, 1-0.35) assert_close_enough(st2.belief, 1-0.35*0.35) assert_close_enough(st3.belief, 1-(0.05 + 0.3*0.3)) assert_close_enough(st4.belief, 1-0.35*(0.05 + 0.3*0.3*0.3))
def run_preassembly(statements, hierarchies): print('%d total statements' % len(statements)) # Filter to grounded only statements = ac.filter_grounded_only(statements, score_threshold=0.4) # Make a Preassembler with the Eidos and TRIPS ontology pa = Preassembler(hierarchies, statements) # Make a BeliefEngine and run combine duplicates be = BeliefEngine() unique_stmts = pa.combine_duplicates() print('%d unique statements' % len(unique_stmts)) be.set_prior_probs(unique_stmts) # Run combine related related_stmts = pa.combine_related(return_toplevel=False) be.set_hierarchy_probs(related_stmts) # Filter to top-level Statements top_stmts = ac.filter_top_level(related_stmts) print('%d top-level statements' % len(top_stmts)) return top_stmts
def run_preassembly(statements, hierarchies): print('%d total statements' % len(statements)) # Filter to grounded only statements = map_onto(statements) ac.dump_statements(statements, 'pi_mtg_demo_unfiltered.pkl') statements = ac.filter_grounded_only(statements, score_threshold=0.7) #statements = ac.filter_by_db_refs(statements, 'UN', # ['conflict', 'food_security', 'precipitation'], policy='one', # match_suffix=True) statements = ac.filter_by_db_refs( statements, 'UN', [ 'conflict', 'food_security', 'flooding', 'food_production', 'human_migration', 'drought', 'food_availability', 'market', 'food_insecurity' ], policy='all', match_suffix=True) assume_polarity(statements) statements = filter_has_polarity(statements) # Make a Preassembler with the Eidos and TRIPS ontology pa = Preassembler(hierarchies, statements) # Make a BeliefEngine and run combine duplicates be = BeliefEngine() unique_stmts = pa.combine_duplicates() print('%d unique statements' % len(unique_stmts)) be.set_prior_probs(unique_stmts) # Run combine related related_stmts = pa.combine_related(return_toplevel=False) be.set_hierarchy_probs(related_stmts) #related_stmts = ac.filter_belief(related_stmts, 0.8) # Filter to top-level Statements top_stmts = ac.filter_top_level(related_stmts) pa.stmts = top_stmts print('%d top-level statements' % len(top_stmts)) conflicts = pa.find_contradicts() top_stmts = remove_contradicts(top_stmts, conflicts) ac.dump_statements(top_stmts, 'pi_mtg_demo.pkl') return top_stmts
def test_hierarchy_probs4(): be = BeliefEngine() st1 = Phosphorylation(None, Agent('a'), evidence=[ev1]) st2 = Phosphorylation(None, Agent('b'), evidence=[ev2]) st3 = Phosphorylation(None, Agent('c'), evidence=[ev3]) st4 = Phosphorylation(None, Agent('d'), evidence=[ev1]) st4.supports = [st1, st2, st3] st3.supports = [st1] st2.supports = [st1] st1.supported_by = [st2, st3, st4] st2.supported_by = [st4] st3.supported_by = [st4] st1.belief = 0.5 st2.belief = 0.8 st3.belief = 0.2 st4.belief = 0.6 be.set_hierarchy_probs([st1, st2, st3]) assert(st1.belief == 0.5) assert(st2.belief == 0.9) assert(st3.belief == 0.6) assert(st4.belief == 0.968)
def run_assembly(stmts, folder, pmcid, background_assertions=None): '''Run assembly on a list of statements, for a given PMCID.''' # Folder for index card output (scored submission) indexcard_prefix = folder + '/index_cards/' + pmcid # Folder for other outputs (for analysis, debugging) otherout_prefix = folder + '/other_outputs/' + pmcid # Do grounding mapping here # Load the TRIPS-specific grounding map and add to the default # (REACH-oriented) grounding map: trips_gm = load_grounding_map('trips_grounding_map.csv') default_grounding_map.update(trips_gm) gm = GroundingMapper(default_grounding_map) mapped_agent_stmts = gm.map_agents(stmts) renamed_agent_stmts = gm.rename_agents(mapped_agent_stmts) # Filter for grounding grounded_stmts = [] for st in renamed_agent_stmts: if all([is_protein_or_chemical(a) for a in st.agent_list()]): grounded_stmts.append(st) # Instantiate the Preassembler pa = Preassembler(hierarchies) pa.add_statements(grounded_stmts) print('== %s ====================' % pmcid) print('%d statements collected in total.' % len(pa.stmts)) # Combine duplicates unique_stmts = pa.combine_duplicates() print('%d statements after combining duplicates.' % len(unique_stmts)) # Run BeliefEngine on unique statements epe = BeliefEngine() epe.set_prior_probs(pa.unique_stmts) # Build statement hierarchy related_stmts = pa.combine_related() # Run BeliefEngine on hierarchy epe.set_hierarchy_probs(related_stmts) print('%d statements after combining related.' % len(related_stmts)) # Instantiate the mechanism linker ml = MechLinker(related_stmts) # Link statements linked_stmts = ml.link_statements() # Run BeliefEngine on linked statements epe.set_linked_probs(linked_stmts) # Print linked statements for debugging purposes print('Linked\n=====') for ls in linked_stmts: print(ls.inferred_stmt.belief, ls.inferred_stmt) print('=============') # Combine all statements including linked ones all_statements = ml.statements + [ls.inferred_stmt for ls in linked_stmts] # Instantiate a new preassembler pa = Preassembler(hierarchies, all_statements) # Build hierarchy again pa.combine_duplicates() # Choose the top-level statements related_stmts = pa.combine_related() # Remove top-level statements that came only from the prior if background_assertions is not None: nonbg_stmts = [ stmt for stmt in related_stmts if stmt not in background_assertions ] else: nonbg_stmts = related_stmts # Dump top-level statements in a pickle with open(otherout_prefix + '.pkl', 'wb') as fh: pickle.dump(nonbg_stmts, fh, protocol=2) # Flatten evidence for statements flattened_evidence_stmts = flatten_evidence(nonbg_stmts) # Start a card counter card_counter = 1 # We don't limit the number of cards reported in this round card_lim = float('inf') top_stmts = [] ############################################### # The belief cutoff for statements belief_cutoff = 0.3 ############################################### # Sort by amount of evidence for st in sorted(flattened_evidence_stmts, key=lambda x: x.belief, reverse=True): if st.belief >= belief_cutoff: print(st.belief, st) if st.belief < belief_cutoff: print('SKIP', st.belief, st) # If it's background knowledge, we skip the statement if is_background_knowledge(st): print('This statement is background knowledge - skipping.') continue # Assemble IndexCards ia = IndexCardAssembler([st], pmc_override=pmcid) ia.make_model() # If the index card was actually made # (not all statements can be assembled into index cards to # this is often not the case) if ia.cards: # Save the index card json ia.save_model(indexcard_prefix + '-%d.json' % card_counter) card_counter += 1 top_stmts.append(st) if card_counter > card_lim: break # Print the English-assembled model for debugging purposes ea = EnglishAssembler(top_stmts) print('=======================') print(ea.make_model()) print('=======================') # Print the statement graph graph = render_stmt_graph(nonbg_stmts) graph.draw(otherout_prefix + '_graph.pdf', prog='dot') # Print statement diagnostics print_stmts(pa.stmts, otherout_prefix + '_statements.tsv') print_stmts(related_stmts, otherout_prefix + '_related_statements.tsv')
def run_assembly(stmts, folder, pmcid, background_assertions=None): '''Run assembly on a list of statements, for a given PMCID.''' # Folder for index card output (scored submission) indexcard_prefix = folder + '/index_cards/' + pmcid # Folder for other outputs (for analysis, debugging) otherout_prefix = folder + '/other_outputs/' + pmcid # Do grounding mapping here # Load the TRIPS-specific grounding map and add to the default # (REACH-oriented) grounding map: trips_gm = load_grounding_map('trips_grounding_map.csv') default_grounding_map.update(trips_gm) gm = GroundingMapper(default_grounding_map) mapped_agent_stmts = gm.map_agents(stmts) renamed_agent_stmts = gm.rename_agents(mapped_agent_stmts) # Filter for grounding grounded_stmts = [] for st in renamed_agent_stmts: if all([is_protein_or_chemical(a) for a in st.agent_list()]): grounded_stmts.append(st) # Instantiate the Preassembler pa = Preassembler(hierarchies) pa.add_statements(grounded_stmts) print('== %s ====================' % pmcid) print('%d statements collected in total.' % len(pa.stmts)) # Combine duplicates unique_stmts = pa.combine_duplicates() print('%d statements after combining duplicates.' % len(unique_stmts)) # Run BeliefEngine on unique statements epe = BeliefEngine() epe.set_prior_probs(pa.unique_stmts) # Build statement hierarchy related_stmts = pa.combine_related() # Run BeliefEngine on hierarchy epe.set_hierarchy_probs(related_stmts) print('%d statements after combining related.' % len(related_stmts)) # Instantiate the mechanism linker # Link statements linked_stmts = MechLinker.infer_active_forms(related_stmts) linked_stmts += MechLinker.infer_modifications(related_stmts) linked_stmts += MechLinker.infer_activations(related_stmts) # Run BeliefEngine on linked statements epe.set_linked_probs(linked_stmts) # Print linked statements for debugging purposes print('Linked\n=====') for ls in linked_stmts: print(ls.inferred_stmt.belief, ls.inferred_stmt) print('=============') # Combine all statements including linked ones all_statements = related_stmts + [ls.inferred_stmt for ls in linked_stmts] # Instantiate a new preassembler pa = Preassembler(hierarchies, all_statements) # Build hierarchy again pa.combine_duplicates() # Choose the top-level statements related_stmts = pa.combine_related() # Remove top-level statements that came only from the prior if background_assertions is not None: nonbg_stmts = [stmt for stmt in related_stmts if stmt not in background_assertions] else: nonbg_stmts = related_stmts # Dump top-level statements in a pickle with open(otherout_prefix + '.pkl', 'wb') as fh: pickle.dump(nonbg_stmts, fh) # Flatten evidence for statements flattened_evidence_stmts = flatten_evidence(nonbg_stmts) # Start a card counter card_counter = 1 # We don't limit the number of cards reported in this round card_lim = float('inf') top_stmts = [] ############################################### # The belief cutoff for statements belief_cutoff = 0.3 ############################################### # Sort by amount of evidence for st in sorted(flattened_evidence_stmts, key=lambda x: x.belief, reverse=True): if st.belief >= belief_cutoff: print(st.belief, st) if st.belief < belief_cutoff: print('SKIP', st.belief, st) # If it's background knowledge, we skip the statement if is_background_knowledge(st): print('This statement is background knowledge - skipping.') continue # Assemble IndexCards ia = IndexCardAssembler([st], pmc_override=pmcid) ia.make_model() # If the index card was actually made # (not all statements can be assembled into index cards to # this is often not the case) if ia.cards: # Save the index card json ia.save_model(indexcard_prefix + '-%d.json' % card_counter) card_counter += 1 top_stmts.append(st) if card_counter > card_lim: break # Print the English-assembled model for debugging purposes ea = EnglishAssembler(top_stmts) print('=======================') print(ea.make_model().encode('utf-8')) print('=======================') # Print the statement graph graph = render_stmt_graph(nonbg_stmts) graph.draw(otherout_prefix + '_graph.pdf', prog='dot') # Print statement diagnostics print_stmts(pa.stmts, otherout_prefix + '_statements.tsv') print_stmts(related_stmts, otherout_prefix + '_related_statements.tsv')
def preassemble(self, filters=None): """Preassemble the Statements collected in the model. Use INDRA's GroundingMapper, Preassembler and BeliefEngine on the IncrementalModel and save the unique statements and the top level statements in class attributes. Currently the following filter options are implemented: - grounding: require that all Agents in statements are grounded - model_one: require that at least one Agent is in the incremental model - model_all: require that all Agents are in the incremental model - prior_one: require that at least one Agent is in the prior model - prior_all: require that all Agents are in the prior model Note that model_one -> prior_all are increasingly more restrictive options. Parameters ---------- filters : Optional[list[str]] A list of filter options to apply when choosing the statements. See description above for more details. Default: None """ stmts = self.get_statements() logger.info("%d raw Statements in total" % len(stmts)) # Fix grounding logger.info("Running grounding map") twg = gm.agent_texts_with_grounding(stmts) prot_map = gm.protein_map_from_twg(twg) gm.default_grounding_map.update(prot_map) gmap = gm.GroundingMapper(gm.default_grounding_map) stmts = gmap.map_agents(stmts, do_rename=True) logger.info("%d Statements after grounding map" % len(stmts)) # Fix sites sm = SiteMapper(default_site_map) stmts, _ = sm.map_sites(stmts) logger.info("%d Statements with valid sequence" % len(stmts)) if filters: if "grounding" in filters: # Filter out ungrounded statements logger.info("Running grounding filter") stmts = self._relevance_filter(stmts, ["grounding"]) logger.info("%s Statements after filter" % len(stmts)) if "human_only" in filters: # Filter out non-human proteins logger.info("Running non-human protein filter") stmts = self._relevance_filter(stmts, ["human_only"]) logger.info("%s Statements after filter" % len(stmts)) for rel_key in ("prior_one", "model_one", "prior_all", "model_all"): if rel_key in filters: logger.info("Running %s relevance filter" % rel_key) stmts = self._relevance_filter(stmts, [rel_key]) logger.info("%s Statements after filter" % len(stmts)) # Combine duplicates logger.info("Preassembling %d Statements" % len(stmts)) pa = Preassembler(hierarchies, stmts) self.unique_stmts = pa.combine_duplicates() logger.info("%d unique Statements" % len(self.unique_stmts)) # Run BeliefEngine on unique statements be = BeliefEngine() be.set_prior_probs(self.unique_stmts) # Build statement hierarchy self.unique_stmts = pa.combine_related(return_toplevel=False) self.toplevel_stmts = [st for st in self.unique_stmts if not st.supports] logger.info("%d top-level Statements" % len(self.toplevel_stmts)) # Run BeliefEngine on hierarchy be.set_hierarchy_probs(self.unique_stmts)
def calculate_belief(stmts): be = BeliefEngine() be.set_prior_probs(stmts) be.set_hierarchy_probs(stmts) return {s.matches_key(): s.belief for s in stmts}