def test_association_contradicts(): neg = 'wm/concept/causal_factor/food_insecurity/food_unavailability' pos = 'wm/concept/causal_factor/food_security/food_availability' food_avail_neg = Event(Concept('food security', db_refs={'WM': pos}), delta=QualitativeDelta(polarity=-1)) food_avail_pos = Event(Concept('food security', db_refs={'WM': pos}), delta=QualitativeDelta(polarity=1)) food_unavail = Event(Concept('food insecurity', db_refs={'WM': neg}), delta=QualitativeDelta(polarity=1)) prp = Event(Concept('production'), delta=QualitativeDelta(polarity=1)) prn = Event(Concept('production'), delta=QualitativeDelta(polarity=-1)) assert Association([food_avail_neg, prp]).contradicts(Association([food_unavail, prn]), world_ontology) assert Association([food_avail_neg, prp]).contradicts(Association([food_avail_neg, prn]), world_ontology) assert Association([prp, food_avail_neg ]).contradicts(Association([food_avail_neg, prn]), world_ontology) assert Association([prn, food_avail_neg ]).contradicts(Association([food_avail_pos, prn]), world_ontology) assert Association([food_avail_neg, food_avail_pos]).contradicts( Association([food_unavail, food_avail_neg]), world_ontology) assert Association([food_unavail, food_avail_pos]).contradicts( Association([food_avail_pos, food_avail_pos]), world_ontology) assert Association([food_unavail, food_avail_pos]).contradicts( Association([food_avail_neg, food_avail_neg]), world_ontology)
def _get_event_and_context(self, event, eid=None, arg_type=None, evidence=None): """Return an INDRA Event based on an event entry.""" if not eid: eid = _choose_id(event, arg_type) ev = self.concept_dict[eid] concept, metadata = self._make_concept(ev) is_migration_event = False hume_grounding = {x[0] for x in concept.db_refs['HUME']} for grounding_en in hume_grounding: if "wm/concept/causal_factor/social_and_political/migration" in \ grounding_en: is_migration_event = True if is_migration_event: movement_context, quantitative_state = ( self._make_movement_context(ev)) event_obj = Migration(concept, delta=quantitative_state, context=movement_context, evidence=evidence) else: ev_delta = QualitativeDelta(polarity=get_polarity(ev), adjectives=None) context = self._make_world_context(ev) event_obj = Event(concept, delta=ev_delta, context=context, evidence=evidence) return event_obj
def get_event(event_entry): name = event_entry['Relation'] concept = Concept(name, db_refs={'TEXT': name}) grounding = event_entry['Event_Type'] if grounding: concept.db_refs['SOFIA'] = grounding context = WorldContext() time = event_entry.get('Time') if time: context.time = TimeContext(text=time.strip()) loc = event_entry.get('Location') if loc: context.geo_location = RefContext(name=loc) text = event_entry.get('Text') ref = event_entry.get('Source') agent = event_entry.get('Agent') patient = event_entry.get('Patient') anns = {} if agent: anns['agent'] = agent if patient: anns['patient'] = patient ev = Evidence(source_api='sofia', pmid=ref, text=text, annotations=anns, source_id=event_entry['Event Index']) pol = event_entry.get('Polarity') event = Event(concept, context=context, evidence=[ev], delta=QualitativeDelta(polarity=pol, adjectives=None)) return event
def get_event_compositional(self, event_entry: Dict[str, str]) -> Event: """Get an Event with compositional grounding Parameters ---------- event_entry : The event to process Returns ------- event : An Event statement """ # Get get compositional grounding comp_name, comp_grnd = self.get_compositional_grounding(event_entry) if comp_name is not None and \ comp_grnd[0] is not None and \ comp_grnd[0][0] is not None: concept = Concept(comp_name, db_refs={ 'TEXT': comp_name, 'WM': [comp_grnd] }) # If not try to get old style Sofia grounding else: name = event_entry['Relation'] concept = Concept(name, db_refs={'TEXT': name}) if event_entry['Event_Type']: concept.db_refs['SOFIA'] = event_entry['Event_Type'] context = WorldContext() time = event_entry.get('Time') if time: context.time = TimeContext(text=time.strip()) loc = event_entry.get('Location') if loc: context.geo_location = RefContext(name=loc) text = event_entry.get('Text') ref = event_entry.get('Source') agent = event_entry.get('Agent') patient = event_entry.get('Patient') anns = {} if agent: anns['agent'] = agent if patient: anns['patient'] = patient text_refs = {'DART': ref} ev = Evidence(source_api='sofia', text_refs=text_refs, text=text, annotations=anns, source_id=event_entry['Event Index']) pol = event_entry.get('Polarity') event = Event(concept, context=context, evidence=[ev], delta=QualitativeDelta(polarity=pol, adjectives=None)) return event
def make_event(concept, attrs): return Event( Concept( attrs["grounding"], db_refs={"TEXT": concept, "UN": [(attrs["grounding"], 0.8)]}, ), delta=QualitativeDelta( attrs["delta"]["polarity"], attrs["delta"]["adjective"] ), )
def get_event(self, event): concept = self.get_concept(event) states = event.get('states', []) extracted_states = self.extract_entity_states(states) polarity = extracted_states.get('polarity') adjectives = extracted_states.get('adjectives') delta = QualitativeDelta(polarity=polarity, adjectives=adjectives) timex = extracted_states.get('time_context', None) geo = extracted_states.get('geo_context', None) context = WorldContext(time=timex, geo_location=geo) \ if timex or geo else None stmt = Event(concept, delta=delta, context=context) return stmt
def test_normalize_equals_opposites(): ont = _get_extended_wm_hierarchy() flooding1 = 'wm/a/b/c/flooding' flooding2 = 'wm/x/y/z/flooding' # Note that as of 5/15/2020 food_insecurity and food_security aren't # explicitly opposites in the ontology food_insec = 'wm/concept/causal_factor/food_insecurity/food_nonaccess' food_sec = 'wm/concept/causal_factor/food_security/food_access' # Top grounding: flooding1 dbr = {'WM': [(flooding1, 1.0), (flooding2, 0.5), (food_insec, 0.1)]} ev1 = Event(Concept('x', db_refs=dbr)) # Top grounding: food security dbr = {'WM': [(food_sec, 1.0), (flooding2, 0.5)]} ev2 = Event(Concept('x', db_refs=dbr), delta=QualitativeDelta(polarity=1)) # Make sure that by default, things don't get normalized out stmts = ac.run_preassembly([ev1, ev2], ontology=ont) assert stmts[0].concept.db_refs['WM'][0][0] != \ stmts[0].concept.db_refs['WM'][1][0] # Now we turn on equivalence normalization and expect # that flooding1 and flooding2 have been normalized out # in ev1's db_refs stmts = ac.run_preassembly([ev1, ev2], normalize_equivalences=True, normalize_ns='WM', ontology=ont) assert stmts[0].concept.db_refs['WM'][0][0] == \ stmts[0].concept.db_refs['WM'][1][0], \ stmts[0].concept.db_refs['WM'] # Now we turn on opposite normalization and expect that food # security and insecurity will get normalized out stmts = ac.run_preassembly([ev1, ev2], normalize_equivalences=True, normalize_opposites=True, normalize_ns='WM', ontology=ont) assert len(stmts) == 2 stmts = sorted(stmts, key=lambda x: len(x.concept.db_refs['WM']), reverse=True) assert len(stmts[0].concept.db_refs['WM']) == 3, stmts[0].concept.db_refs # This is to check that food_insecurity was normalized to food_security assert stmts[0].concept.db_refs['WM'][2][0] == \ stmts[1].concept.db_refs['WM'][0][0], \ (stmts[0].concept.db_refs['WM'], stmts[1].concept.db_refs['WM'])
def get_event_flat(self, event_entry: Dict[str, str]) -> Event: """Get an Event with flattened grounding Parameters ---------- event_entry : The event to process Returns ------- event : An Event statement """ name = event_entry['Relation'] concept = Concept(name, db_refs={'TEXT': name}) grounding = event_entry['Event_Type'] if grounding: concept.db_refs['SOFIA'] = grounding context = WorldContext() time = event_entry.get('Time') if time: context.time = TimeContext(text=time.strip()) loc = event_entry.get('Location') if loc: context.geo_location = RefContext(name=loc) text = event_entry.get('Text') ref = event_entry.get('Source') agent = event_entry.get('Agent') patient = event_entry.get('Patient') anns = {} if agent: anns['agent'] = agent if patient: anns['patient'] = patient text_refs = {'DART': ref} ev = Evidence(source_api='sofia', text_refs=text_refs, text=text, annotations=anns, source_id=event_entry['Event Index']) pol = event_entry.get('Polarity') event = Event(concept, context=context, evidence=[ev], delta=QualitativeDelta(polarity=pol, adjectives=None)) return event
def _get_event_and_context(self, event, eid=None, arg_type=None, evidence=None): """Return an INDRA Event based on an event entry.""" if not eid: eid = _choose_id(event, arg_type) ev = self.concept_dict[eid] concept, metadata = self._make_concept(ev) ev_delta = QualitativeDelta(polarity=get_polarity(ev), adjectives=None) context = self._make_context(ev) event_obj = Event(concept, delta=ev_delta, context=context, evidence=evidence) return event_obj
def from_uncharted_json_serialized_dict( cls, _dict, minimum_evidence_pieces_required: int = 1): """ Construct an AnalysisGraph object from a dict of INDRA statements exported by Uncharted's CauseMos webapp. Args: _dict: A dict of INDRA statements exported by Uncharted's CauseMos HMI. minimum_evidence_pieces_required: The minimum number of evidence pieces required to consider a statement for assembly. """ sts = _dict["statements"] G = nx.DiGraph() for s in sts: if len(s["evidence"]) >= minimum_evidence_pieces_required: subj, obj = s["subj"], s["obj"] if (subj["db_refs"]["concept"] is not None and obj["db_refs"]["concept"] is not None): subj_name, obj_name = [ "/".join(s[x]["db_refs"]["concept"].split("/")[:]) for x in ["subj", "obj"] ] G.add_edge(subj_name, obj_name) subj_delta = s["subj_delta"] obj_delta = s["obj_delta"] for delta in (subj_delta, obj_delta): # TODO : Ensure that all the statements provided by # Uncharted have unambiguous polarities. if delta["polarity"] is None: delta["polarity"] = 1 influence_stmt = Influence( Event( Concept(subj_name, db_refs=subj["db_refs"]), delta=QualitativeDelta( s["subj_delta"]["polarity"], s["subj_delta"]["adjectives"], ), ), Event( Concept(obj_name, db_refs=obj["db_refs"]), delta=QualitativeDelta( s["obj_delta"]["polarity"], s["obj_delta"]["adjectives"], ), ), evidence=[ INDRAEvidence( source_api=ev["source_api"], annotations=ev["annotations"], text=ev["text"], epistemics=ev.get("epistemics"), ) for ev in s["evidence"] ], ) influence_sts = G.edges[subj_name, obj_name].get( "InfluenceStatements", []) influence_sts.append(influence_stmt) G.edges[subj_name, obj_name]["InfluenceStatements"] = influence_sts func_dict = { "mean": np.mean, "median": np.median, "max": max, "min": min, "raw": lambda x: x, } for concept, indicator in _dict["conceptIndicators"].items(): indicator_source, indicator_name = ( indicator["name"].split("/")[0], "/".join(indicator["name"].split("/")[1:]), ) G.nodes[concept]["indicators"] = { indicator_name: Indicator(indicator_name, indicator_source) } values = [x["value"] for x in indicator["values"]] indicator["mean"] = func_dict[indicator["func"]](values) # indicator.source = indicator["source"] self = cls(G) self.assign_uuids_to_nodes_and_edges() return self
def test_merge_deltas(): def add_annots(stmt): for ev in stmt.evidence: ev.annotations['subj_adjectives'] = stmt.subj.delta.adjectives ev.annotations['obj_adjectives'] = stmt.obj.delta.adjectives ev.annotations['subj_polarity'] = stmt.subj.delta.polarity ev.annotations['obj_polarity'] = stmt.obj.delta.polarity return stmt # d1 = {'adjectives': ['a', 'b', 'c'], 'polarity': 1} # d2 = {'adjectives': [], 'polarity': -1} # d3 = {'adjectives': ['g'], 'polarity': 1} # d4 = {'adjectives': ['d', 'e', 'f'], 'polarity': -1} # d5 = {'adjectives': ['d'], 'polarity': None} # d6 = {'adjectives': [], 'polarity': None} # d7 = {'adjectives': [], 'polarity': 1} d1 = QualitativeDelta(polarity=1, adjectives=['a', 'b', 'c']) d2 = QualitativeDelta(polarity=-1, adjectives=None) d3 = QualitativeDelta(polarity=1, adjectives=['g']) d4 = QualitativeDelta(polarity=-1, adjectives=['d', 'e', 'f']) d5 = QualitativeDelta(polarity=None, adjectives=['d']) d6 = QualitativeDelta(polarity=None, adjectives=None) d7 = QualitativeDelta(polarity=1, adjectives=None) def make_ev(name, delta): return Event(Concept(name), delta=delta) stmts = [ add_annots( Influence(make_ev('a', sd), make_ev('b', od), evidence=[Evidence(source_api='eidos', text='%d' % idx)])) for idx, (sd, od) in enumerate([(d1, d2), (d3, d4)]) ] stmts = ac.run_preassembly(stmts, return_toplevel=True) stmts = merge_deltas(stmts) assert stmts[0].subj.delta.polarity == 1, stmts[0].subj.delta assert stmts[0].obj.delta.polarity == -1, stmts[0].obj.delta assert set(stmts[0].subj.delta.adjectives) == {'a', 'b', 'c', 'g'}, \ stmts[0].subj.delta assert set(stmts[0].obj.delta.adjectives) == {'d', 'e', 'f'}, \ stmts[0].obj.delta stmts = [ add_annots( Influence(make_ev('a', sd), make_ev('b', od), evidence=[Evidence(source_api='eidos', text='%d' % idx)])) for idx, (sd, od) in enumerate([(d1, d5), (d6, d7), (d6, d7)]) ] stmts = ac.run_preassembly(stmts, return_toplevel=True) stmts = merge_deltas(stmts) assert stmts[0].subj.delta.polarity is None, stmts[0].subj.delta assert stmts[0].obj.delta.polarity == 1, stmts[0].obj.delta assert set(stmts[0].subj.delta.adjectives) == {'a', 'b', 'c'}, \ stmts[0].subj.delta assert set(stmts[0].obj.delta.adjectives) == {'d'}, \ stmts[0].obj.delta