def test_association_contradicts(): neg = 'wm/concept/causal_factor/food_insecurity/food_unavailability' pos = 'wm/concept/causal_factor/food_security/food_availability' food_avail_neg = Event(Concept('food security', db_refs={'WM': pos}), delta=QualitativeDelta(polarity=-1)) food_avail_pos = Event(Concept('food security', db_refs={'WM': pos}), delta=QualitativeDelta(polarity=1)) food_unavail = Event(Concept('food insecurity', db_refs={'WM': neg}), delta=QualitativeDelta(polarity=1)) prp = Event(Concept('production'), delta=QualitativeDelta(polarity=1)) prn = Event(Concept('production'), delta=QualitativeDelta(polarity=-1)) assert Association([food_avail_neg, prp]).contradicts(Association([food_unavail, prn]), world_ontology) assert Association([food_avail_neg, prp]).contradicts(Association([food_avail_neg, prn]), world_ontology) assert Association([prp, food_avail_neg ]).contradicts(Association([food_avail_neg, prn]), world_ontology) assert Association([prn, food_avail_neg ]).contradicts(Association([food_avail_pos, prn]), world_ontology) assert Association([food_avail_neg, food_avail_pos]).contradicts( Association([food_unavail, food_avail_neg]), world_ontology) assert Association([food_unavail, food_avail_pos]).contradicts( Association([food_avail_pos, food_avail_pos]), world_ontology) assert Association([food_unavail, food_avail_pos]).contradicts( Association([food_avail_neg, food_avail_neg]), world_ontology)
def get_event(event_entry): name = event_entry['Relation'] concept = Concept(name, db_refs={'TEXT': name}) grounding = event_entry['Event_Type'] if grounding: concept.db_refs['SOFIA'] = grounding context = WorldContext() time = event_entry.get('Time') if time: context.time = TimeContext(text=time.strip()) loc = event_entry.get('Location') if loc: context.geo_location = RefContext(name=loc) text = event_entry.get('Text') ref = event_entry.get('Source') agent = event_entry.get('Agent') patient = event_entry.get('Patient') anns = {} if agent: anns['agent'] = agent if patient: anns['patient'] = patient ev = Evidence(source_api='sofia', pmid=ref, text=text, annotations=anns, source_id=event_entry['Event Index']) pol = event_entry.get('Polarity') event = Event(concept, context=context, evidence=[ev], delta=QualitativeDelta(polarity=pol, adjectives=None)) return event
def test_association_refinement(): health = 'UN/entities/human/health' food = 'UN/entities/human/food' food_security = 'UN/entities/human/food/food_security' eh = Event(Concept('health', db_refs={'UN': [(health, 1.0)]})) ef = Event(Concept('food', db_refs={'UN': [(food, 1.0)]})) efs = Event( Concept('food security', db_refs={'UN': [(food_security, 1.0)]})) st1 = Association([eh, ef], evidence=[Evidence(source_api='eidos1')]) st2 = Association([ef, eh], evidence=[Evidence(source_api='eidos2')]) st3 = Association([eh, efs], evidence=[Evidence(source_api='eidos3')]) st4 = Association([ef, efs], evidence=[Evidence(source_api='eidos4')]) eidos_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../sources/eidos/eidos_ontology.rdf') hm = HierarchyManager(eidos_ont, True, True) hierarchies = {'entity': hm} pa = Preassembler(hierarchies, [st1, st2, st3, st4]) unique_stmts = pa.combine_duplicates() # debugging assert len(unique_stmts) == 3 rel_stmts = pa.combine_related() assert len(rel_stmts) == 2 eh_efs_stmt = [ st for st in rel_stmts if (st.members[0].concept.name in {'health', 'food security'} and st.members[1].concept.name in {'health', 'food security'}) ][0] assert len(eh_efs_stmt.supported_by) == 1 assert (eh_efs_stmt.supported_by[0].members[0].concept.name in {'food', 'health'}) assert (eh_efs_stmt.supported_by[0].members[1].concept.name in {'food', 'health'})
def get_event_compositional(self, event_entry: Dict[str, str]) -> Event: """Get an Event with compositional grounding Parameters ---------- event_entry : The event to process Returns ------- event : An Event statement """ # Get get compositional grounding comp_name, comp_grnd = self.get_compositional_grounding(event_entry) if comp_name is not None and \ comp_grnd[0] is not None and \ comp_grnd[0][0] is not None: concept = Concept(comp_name, db_refs={ 'TEXT': comp_name, 'WM': [comp_grnd] }) # If not try to get old style Sofia grounding else: name = event_entry['Relation'] concept = Concept(name, db_refs={'TEXT': name}) if event_entry['Event_Type']: concept.db_refs['SOFIA'] = event_entry['Event_Type'] context = WorldContext() time = event_entry.get('Time') if time: context.time = TimeContext(text=time.strip()) loc = event_entry.get('Location') if loc: context.geo_location = RefContext(name=loc) text = event_entry.get('Text') ref = event_entry.get('Source') agent = event_entry.get('Agent') patient = event_entry.get('Patient') anns = {} if agent: anns['agent'] = agent if patient: anns['patient'] = patient text_refs = {'DART': ref} ev = Evidence(source_api='sofia', text_refs=text_refs, text=text, annotations=anns, source_id=event_entry['Event Index']) pol = event_entry.get('Polarity') event = Event(concept, context=context, evidence=[ev], delta=QualitativeDelta(polarity=pol, adjectives=None)) return event
def _make_wm_stmts(): ev1 = Evidence(source_api='eidos', text='A', annotations={'found_by': 'ported_syntax_1_verb-Causal'}) ev2 = Evidence(source_api='eidos', text='B', annotations={'found_by': 'dueToSyntax2-Causal'}) ev3 = Evidence(source_api='hume', text='C') ev4 = Evidence(source_api='cwms', text='D') ev5 = Evidence(source_api='sofia', text='E') ev6 = Evidence(source_api='sofia', text='F') x = Event(Concept('x', db_refs={'TEXT': 'dog'})) y = Event(Concept('y', db_refs={'TEXT': 'cat'})) stmt1 = Influence(x, y, evidence=[ev1, ev2]) stmt2 = Influence(x, y, evidence=[ev1, ev3]) stmt3 = Influence(x, y, evidence=[ev3, ev4, ev5]) stmt4 = Influence(x, y, evidence=[ev5]) stmt5 = Influence(x, y, evidence=[ev6]) stmt1.uuid = '1' stmt2.uuid = '2' stmt3.uuid = '3' stmt4.uuid = '4' stmt5.uuid = '5' stmts = [stmt1, stmt2, stmt3, stmt4] return stmts
def test_assemble_influence(): stmt = Influence(Event(Concept('rainfall')), Event(Concept('crop_yields'))) fa = FigaroAssembler([stmt]) fa.make_model() assert fa.BN is not None assert len(fa.BN.nodes()) == 2 assert len(fa.BN.edges()) == 1
def _make_path_stmts(self, stmts, merge=False): sentences = [] if merge: groups = group_and_sort_statements(stmts) new_stmts = [] for group in groups: stmt_type = group[0][-1] agent_names = group[0][1] if len(agent_names) != 2: continue if stmt_type == 'Influence': stmt = get_class_from_name(stmt_type, Statement)( Event(Concept(agent_names[0])), Event(Concept(agent_names[1]))) else: try: stmt = get_class_from_name(stmt_type, Statement)(Agent( agent_names[0]), Agent(agent_names[1])) except ValueError: stmt = get_class_from_name(stmt_type, Statement)( [Agent(ag_name) for ag_name in agent_names]) new_stmts.append(stmt) stmts = new_stmts for stmt in stmts: ea = EnglishAssembler([stmt]) sentence = ea.make_model() if self.make_links: link = get_statement_queries([stmt])[0] + '&format=html' sentences.append((link, sentence)) else: sentences.append(('', sentence)) return sentences
def get_event(event_entry): name = event_entry['Relation'] concept = Concept(name, db_refs={'TEXT': name}) grounding = event_entry['Event_Type'] if grounding: concept.db_refs['SOFIA'] = grounding context = WorldContext() time = event_entry.get('Time') if time: context.time = TimeContext(text=time.strip()) loc = event_entry.get('Location') if loc: context.geo_location = RefContext(name=loc) text = event_entry.get('Text') ref = event_entry.get('Source') ev = Evidence(source_api='sofia', pmid=ref, text=text) pol = event_entry.get('Polarity') event = Event(concept, context=context, evidence=[ev], delta={ 'polarity': pol, 'adjectives': [] }) return event
def _process_row(header, row): row_dict = {h: v for h, v in zip(header, row)} subj = row_dict.get('Agent') obj = row_dict.get('Patient') if not obj or not subj: return None rel = row_dict.get('Relation') if _in_rels(rel, pos_rels): pol = 1 elif _in_rels(rel, neg_rels): pol = -1 elif _in_rels(rel, neu_rels): pol = None else: return None subj_concept = Concept(subj, db_refs={'TEXT': subj, 'SOFIA': subj}) obj_concept = Concept(obj, db_refs={'TEXT': subj, 'SOFIA': subj}) text = row_dict.get('Sentence') annot_keys = ['Relation', 'Event_Type', 'Location', 'Time'] annots = {k: row_dict.get(k) for k in annot_keys} ref = row_dict.get('Source_File') ev = Evidence(source_api='sofia', pmid=ref, annotations=annots, text=text) stmt = Influence(subj_concept, obj_concept, evidence=[ev]) stmt.obj_delta['polarity'] = pol return stmt
def _process_relations(relation_rows, event_dict): header = [cell.value for cell in next(relation_rows)] stmts = [] for row in relation_rows: row_values = [r.value for r in row] row_dict = {h: v for h, v in zip(header, row_values)} cause_entries = row_dict.get('Cause Index') effect_entries = row_dict.get('Effect Index') # FIXME: Handle cases in which there is a missing cause/effect if not cause_entries or not effect_entries: continue causes = [c.strip() for c in cause_entries.split(',')] effects = [e.strip() for e in effect_entries.split(',')] rel = row_dict.get('Relation') if _in_rels(rel, pos_rels): pol = 1 elif _in_rels(rel, neg_rels): pol = -1 elif _in_rels(rel, neu_rels): pol = None # If we don't recognize this relation, we don't get any statements else: continue text = row_dict.get('Sentence') #annot_keys = ['Relation', 'Event_Type', 'Location', 'Time'] #annots = {k: row_dict.get(k) for k in annot_keys} annot_keys = ['Relation'] annots = {k: row_dict.get(k) for k in annot_keys} ref = row_dict.get('Source_File') ev = Evidence(source_api='sofia', pmid=ref, annotations=annots, text=text) for cause_index, effect_index in itertools.product( causes, effects): cause_name = event_dict[cause_index]['Relation'] cause_grounding = event_dict[cause_index]['Event_Type'] effect_name = event_dict[effect_index]['Relation'] effect_grounding = event_dict[effect_index]['Event_Type'] cause_concept = Concept(cause_name, db_refs={ 'TEXT': cause_name, 'SOFIA': cause_grounding }) effect_concept = Concept(effect_name, db_refs={ 'TEXT': effect_name, 'SOFIA': effect_grounding }) stmt = Influence(cause_concept, effect_concept, evidence=[ev]) # Assume unknown polarity on the subject, put the overall # polarity in the sign of the object stmt.subj_delta['polarity'] = None stmt.obj_delta['polarity'] = pol stmts.append(stmt) return stmts
def test_curations(): sc.db = DbManager(url='sqlite:///:memory:') sc.db.create_all() _call_api('post', 'assembly/new_project', json=dict(project_id='p1', project_name='Project 1')) # Now add a record just on the back-end sc.db.add_records_for_project('p1', ['r1']) # And now add a statement for that record so we can "curate" it stmt = Influence(Event(Concept('x')), Event(Concept('y'))) stmt_hash = -11334164755554266 sc.db.add_statements_for_record('r1', [stmt], '1.0') curation = { 'project_id': 'p1', 'statement_id': 'abcdef', 'update_type': 'reverse_relation' } mappings = _call_api('post', 'assembly/submit_curations', json=dict(project_id='p1', curations={stmt_hash: curation})) assert mappings res = _call_api('get', 'assembly/get_project_curations', json=dict(project_id='p1')) assert len(res) == 1 assert res[str(stmt_hash)] == curation, res
def test_agent_name_custom_preassembly(): e1 = Event(Concept('price oil')) e2 = Event(Concept('oil price')) stmts = [e1, e2] stmts_out = ac.run_preassembly(stmts, matches_fun=agent_name_stmt_type_matches) assert len(stmts_out) == 1
def get_model_checker(statements): pa = PysbAssembler() pa.add_statements(statements) model = pa.make_model() stmt = Influence(Concept('crop_production'), Concept('food_security')) mc = ModelChecker(model, [stmt]) mc.prune_influence_map() return mc
def from_uncharted_json_serialized_dict( cls, _dict, minimum_evidence_pieces_required: int = 1): sts = _dict["statements"] G = nx.DiGraph() for s in sts: if len(s["evidence"]) >= minimum_evidence_pieces_required: subj, obj = s["subj"], s["obj"] if (subj["db_refs"]["concept"] is not None and obj["db_refs"]["concept"] is not None): subj_name, obj_name = [ "/".join(s[x]["db_refs"]["concept"].split("/")[:]) for x in ["subj", "obj"] ] G.add_edge(subj_name, obj_name) subj_delta = s["subj_delta"] obj_delta = s["obj_delta"] for delta in (subj_delta, obj_delta): # TODO : Ensure that all the statements provided by # Uncharted have unambiguous polarities. if delta["polarity"] is None: delta["polarity"] = 1 influence_stmt = Influence( Concept(subj_name, db_refs=subj["db_refs"]), Concept(obj_name, db_refs=obj["db_refs"]), subj_delta=s["subj_delta"], obj_delta=s["obj_delta"], evidence=[ Evidence( source_api=ev["source_api"], annotations=ev["annotations"], text=ev["text"], epistemics=ev.get("epistemics"), ) for ev in s["evidence"] ], ) influence_sts = G.edges[subj_name, obj_name].get( "InfluenceStatements", []) influence_sts.append(influence_stmt) G.edges[subj_name, obj_name]["InfluenceStatements"] = influence_sts for concept, indicator in _dict["concept_to_indicator_mapping"].items( ): if indicator is not None: indicator_source, indicator_name = ( indicator.split("/")[0], indicator, ) if concept in G: if G.nodes[concept].get("indicators") is None: G.nodes[concept]["indicators"] = {} G.nodes[concept]["indicators"][indicator_name] = Indicator( indicator_name, indicator_source) self = cls(G) self.assign_uuids_to_nodes_and_edges() return self
def _make_path_stmts(self, stmts, merge=False): sentences = [] if merge: groups = group_and_sort_statements(stmts) for group in groups: group_stmts = group[-1] stmt_type = group[0][-1] agent_names = group[0][1] if len(agent_names) < 2: continue if stmt_type == 'Influence': stmt = get_class_from_name(stmt_type, Statement)( Event(Concept(agent_names[0])), Event(Concept(agent_names[1]))) elif stmt_type == 'Conversion': stmt = get_class_from_name(stmt_type, Statement)( Agent(agent_names[0]), [Agent(ag) for ag in agent_names[1]], [Agent(ag) for ag in agent_names[2]]) else: try: stmt = get_class_from_name(stmt_type, Statement)(Agent( agent_names[0]), Agent(agent_names[1])) except ValueError: stmt = get_class_from_name(stmt_type, Statement)( [Agent(ag_name) for ag_name in agent_names]) ea = EnglishAssembler([stmt]) sentence = ea.make_model() stmt_hashes = [gr_st.get_hash() for gr_st in group_stmts] url_param = parse.urlencode( { 'stmt_hash': stmt_hashes, 'source': 'model_statement', 'model': self.model.name }, doseq=True) link = f'/evidence?{url_param}' sentences.append((link, sentence, '')) else: for stmt in stmts: if isinstance(stmt, PybelEdge): sentence = pybel_edge_to_english(stmt) sentences.append(('', sentence, '')) else: ea = EnglishAssembler([stmt]) sentence = ea.make_model() stmt_hashes = [stmt.get_hash()] url_param = parse.urlencode( { 'stmt_hash': stmt_hashes, 'source': 'model_statement', 'model': self.model.name }, doseq=True) link = f'/evidence?{url_param}' sentences.append((link, sentence, '')) return sentences
def test_wm_scorer(): scorer = get_eidos_scorer() stmt = Influence(Concept('a'), Concept('b'), evidence=[Evidence(source_api='eidos')]) # Make sure other sources are still in the map assert 'hume' in scorer.prior_probs['rand'] assert 'biopax' in scorer.prior_probs['syst'] engine = BeliefEngine(scorer) engine.set_prior_probs([stmt])
def test_wm_map(): c1 = Concept('x', db_refs={'UN': [('UN/properties/price', 1.0)]}) c2 = Concept('y', db_refs={'UN': [('UN/entities/human/education', 1.0)]}) stmts = [Influence(c1, c2)] om = OntologyMapper(stmts, wm_ontomap, symmetric=False) om.map_statements() stmt = om.statements[0] assert 'BBN' in stmt.subj.db_refs assert 'BBN' in stmt.obj.db_refs assert 'SOFIA' in stmt.subj.db_refs assert 'SOFIA' in stmt.obj.db_refs
def test_map(): c1 = Concept('x', db_refs={'UN': [('entities/x', 1.0)]}) c2 = Concept('y', db_refs={'HUME': [('entities/y', 1.0)]}) c3 = Concept('z') stmts = [Influence(c1, c3), Influence(c2, c3)] om = OntologyMapper(stmts) om.map_statements() assert len(om.statements) == 2 assert om.statements[0].subj.db_refs['HUME'] == [('entities/y', 1.0)], \ om.statements[0].subj.db_refs assert om.statements[1].subj.db_refs['UN'] == [('entities/x', 1.0)], \ om.statements[1].subj.db_refs
def test_normalize_equals_opposites(): ont = _get_extended_wm_hierarchy() flooding1 = 'wm/a/b/c/flooding' flooding2 = 'wm/x/y/z/flooding' # Note that as of 5/15/2020 food_insecurity and food_security aren't # explicitly opposites in the ontology food_insec = 'wm/concept/causal_factor/food_insecurity/food_nonaccess' food_sec = 'wm/concept/causal_factor/food_security/food_access' # Top grounding: flooding1 dbr = {'WM': [(flooding1, 1.0), (flooding2, 0.5), (food_insec, 0.1)]} ev1 = Event(Concept('x', db_refs=dbr)) # Top grounding: food security dbr = {'WM': [(food_sec, 1.0), (flooding2, 0.5)]} ev2 = Event(Concept('x', db_refs=dbr), delta=QualitativeDelta(polarity=1)) # Make sure that by default, things don't get normalized out stmts = ac.run_preassembly([ev1, ev2], ontology=ont) assert stmts[0].concept.db_refs['WM'][0][0] != \ stmts[0].concept.db_refs['WM'][1][0] # Now we turn on equivalence normalization and expect # that flooding1 and flooding2 have been normalized out # in ev1's db_refs stmts = ac.run_preassembly([ev1, ev2], normalize_equivalences=True, normalize_ns='WM', ontology=ont) assert stmts[0].concept.db_refs['WM'][0][0] == \ stmts[0].concept.db_refs['WM'][1][0], \ stmts[0].concept.db_refs['WM'] # Now we turn on opposite normalization and expect that food # security and insecurity will get normalized out stmts = ac.run_preassembly([ev1, ev2], normalize_equivalences=True, normalize_opposites=True, normalize_ns='WM', ontology=ont) assert len(stmts) == 2 stmts = sorted(stmts, key=lambda x: len(x.concept.db_refs['WM']), reverse=True) assert len(stmts[0].concept.db_refs['WM']) == 3, stmts[0].concept.db_refs # This is to check that food_insecurity was normalized to food_security assert stmts[0].concept.db_refs['WM'][2][0] == \ stmts[1].concept.db_refs['WM'][0][0], \ (stmts[0].concept.db_refs['WM'], stmts[1].concept.db_refs['WM'])
def influence_stmt_from_dict(d: Dict) -> Influence: st = Influence( Concept(d["subj"]["name"], db_refs=d["subj"]["db_refs"]), Concept(d["obj"]["name"], db_refs=d["obj"]["db_refs"]), d.get("subj_delta"), d.get("obj_delta"), [ Evidence( e["source_api"], text=e["text"], annotations=e["annotations"] ) for e in d["evidence"] ], ) st.belief = d["belief"] return st
def test_concept_isa_eid(): c1 = Concept('b', db_refs={'WM': [('wm/concept/entity/organization', 1.0)]}) c2 = Concept('a', db_refs={'WM': [('wm/concept/entity', 1.0)]}) print(c1.get_grounding()) print(c2.get_grounding()) assert c1.refinement_of(c2, world_ontology) assert not c2.refinement_of(c1, world_ontology)
def get_concept(entity): """Return Concept from an Eidos entity.""" # Use the canonical name as the name of the Concept name = entity['canonicalName'] db_refs = EidosProcessor.get_groundings(entity) concept = Concept(name, db_refs=db_refs) return concept
def _make_concept(self, entity): """Return Concept from a Hume entity.""" # Use the canonical name as the name of the Concept by default name = self._sanitize(entity['canonicalName']) # But if there is a trigger head text, we prefer that since # it almost always results in a cleaner name # This is removed for now since the head word seems to be too # minimal for some concepts, e.g. it gives us only "security" # for "food security". """ trigger = entity.get('trigger') if trigger is not None: head_text = trigger.get('head text') if head_text is not None: name = head_text """ # Save raw text and Hume scored groundings as db_refs db_refs = {'TEXT': entity['text']} hume_grounding = _get_hume_grounding(entity) # We could get an empty list here in which case we don't add the # grounding if hume_grounding: db_refs['HUME'] = hume_grounding concept = Concept(name, db_refs=db_refs) return concept
def _make_concept(self, entity): """Return Concept from a Hume entity.""" # Use the canonical name as the name of the Concept by default name = self._sanitize(entity['canonicalName']) # But if there is a trigger head text, we prefer that since # it almost always results in a cleaner name # This is removed for now since the head word seems to be too # minimal for some concepts, e.g. it gives us only "security" # for "food security". """ trigger = entity.get('trigger') if trigger is not None: head_text = trigger.get('head text') if head_text is not None: name = head_text """ # Save raw text and Hume scored groundings as db_refs db_refs = _get_grounding(entity) concept = Concept(name, db_refs=db_refs) metadata = { arg['type']: arg['value']['@id'] for arg in entity['arguments'] } return concept, metadata
def get_event_flat(self, event_entry: Dict[str, str]) -> Event: """Get an Event with flattened grounding Parameters ---------- event_entry : The event to process Returns ------- event : An Event statement """ name = event_entry['Relation'] concept = Concept(name, db_refs={'TEXT': name}) grounding = event_entry['Event_Type'] if grounding: concept.db_refs['SOFIA'] = grounding context = WorldContext() time = event_entry.get('Time') if time: context.time = TimeContext(text=time.strip()) loc = event_entry.get('Location') if loc: context.geo_location = RefContext(name=loc) text = event_entry.get('Text') ref = event_entry.get('Source') agent = event_entry.get('Agent') patient = event_entry.get('Patient') anns = {} if agent: anns['agent'] = agent if patient: anns['patient'] = patient text_refs = {'DART': ref} ev = Evidence(source_api='sofia', text_refs=text_refs, text=text, annotations=anns, source_id=event_entry['Event Index']) pol = event_entry.get('Polarity') event = Event(concept, context=context, evidence=[ev], delta=QualitativeDelta(polarity=pol, adjectives=None)) return event
def test_influence_duplicate(): gov = 'UN/entities/human/government/government_entity' agr = 'UN/entities/natural/crop_technology' cgov = Event(Concept('government', db_refs={'UN': [(gov, 1.0)]})) cagr = Event(Concept('agriculture', db_refs={'UN': [(agr, 1.0)]})) stmt1 = Influence(cgov, cagr, evidence=[Evidence(source_api='eidos1')]) stmt2 = Influence(cagr, cgov, evidence=[Evidence(source_api='eidos2')]) stmt3 = Influence(cgov, cagr, evidence=[Evidence(source_api='eidos3')]) eidos_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../sources/eidos/eidos_ontology.rdf') hm = HierarchyManager(eidos_ont, True, True) hierarchies = {'entity': hm} pa = Preassembler(hierarchies, [stmt1, stmt2, stmt3]) unique_stmts = pa.combine_duplicates() assert len(unique_stmts) == 2 assert len(unique_stmts[0].evidence) == 2 assert len(unique_stmts[1].evidence) == 1 sources = [e.source_api for e in unique_stmts[0].evidence] assert set(sources) == set(['eidos1', 'eidos3'])
def make_event(concept, attrs): return Event( Concept( attrs["grounding"], db_refs={"TEXT": concept, "UN": [(attrs["grounding"], 0.8)]}, ), delta=QualitativeDelta( attrs["delta"]["polarity"], attrs["delta"]["adjective"] ), )
def test_run_preassembly_concepts(): ont = _get_extended_wm_hierarchy() rainfall = Event( Concept('rain', db_refs={ 'WM': ('wm/concept/causal_factor/environmental/meteorologic/' 'precipitation/rainfall') })) flooding_1 = Event(Concept('flood', db_refs={'WM': 'wm/x/y/z/flooding'})) flooding_2 = Event(Concept('flooding', db_refs={'WM': 'wm/a/b/c/flooding'})) st_out = ac.run_preassembly( [Influence(rainfall, flooding_1), Influence(rainfall, flooding_2)], normalize_ns='WM', normalize_equivalences=True, ontology=ont) assert len(st_out) == 1, st_out
def _make_concept(entity): """Return Concept from an Eidos entity.""" # Use the canonical name as the name of the Concept name = entity['canonicalName'] # Save raw text and Eidos scored groundings as db_refs db_refs = {'TEXT': entity['text']} groundings = _get_groundings(entity) db_refs.update(groundings) concept = Concept(name, db_refs=db_refs) return concept
def _make_concept(entity): """Return Concept from an BBN entity.""" # Use the canonical name as the name of the Concept name = self._sanitize(entity['canonicalName']) # Save raw text and BBN scored groundings as db_refs db_refs = { 'TEXT': entity['text'], 'BBN': _get_bbn_grounding(entity) } concept = Concept(name, db_refs=db_refs) return concept
def get_event(event_entry): name = event_entry['Relation'] concept = Concept(name, db_refs={'TEXT': name}) grounding = event_entry['Event_Type'] if grounding: concept.db_refs['SOFIA'] = grounding context = WorldContext() time = event_entry.get('Time') if time: context.time = TimeContext(text=time.strip()) loc = event_entry.get('Location') if loc: context.geo_location = RefContext(name=loc) text = event_entry.get('Text') ref = event_entry.get('Source') ev = Evidence(source_api='sofia', pmid=ref, text=text) pol = event_entry.get('Polarity') event = Event(concept, context=context, evidence=[ev], delta=QualitativeDelta(polarity=pol, adjectives=None)) return event