Exemplo n.º 1
0
def test_assemble_influence():
    stmt = Influence(Event(Concept('rainfall')), Event(Concept('crop_yields')))
    fa = FigaroAssembler([stmt])
    fa.make_model()
    assert fa.BN is not None
    assert len(fa.BN.nodes()) == 2
    assert len(fa.BN.edges()) == 1
def test_agent_name_custom_preassembly():
    e1 = Event(Concept('price oil'))
    e2 = Event(Concept('oil price'))
    stmts = [e1, e2]
    stmts_out = ac.run_preassembly(stmts,
                                   matches_fun=agent_name_stmt_type_matches)
    assert len(stmts_out) == 1
def make_stmt_from_sort_key(key, verb, agents=None):
    """Make a Statement from the sort key.

    Specifically, the sort key used by `group_and_sort_statements`.
    """
    def make_agent(name):
        if name == 'None' or name is None:
            return None
        return Agent(name)

    StmtClass = get_statement_by_name(verb)
    inps = list(key[1])
    if agents is None:
        agents = []
    if verb == 'Complex':
        agents.extend([make_agent(name) for name in inps])
        stmt = StmtClass(agents[:])
    elif verb == 'Conversion':
        names_from = [make_agent(name) for name in inps[1]]
        names_to = [make_agent(name) for name in inps[2]]
        agents.extend(names_from + names_to)
        stmt = StmtClass(make_agent(inps[0]), names_from, names_to)
    elif verb == 'ActiveForm' or verb == 'HasActivity':
        agents.extend([make_agent(inps[0])])
        stmt = StmtClass(agents[0], inps[1], inps[2])
    elif verb == 'Influence':
        agents.extend([make_agent(inp) for inp in inps[:2]])
        stmt = Influence(*[Event(ag) for ag in agents])
    elif verb == 'Association':
        agents.extend([make_agent(inp) for inp in inps])
        stmt = StmtClass([Event(ag) for ag in agents])
    else:
        agents.extend([make_agent(name) for name in inps])
        stmt = StmtClass(*agents)
    return stmt
Exemplo n.º 4
0
def test_curations():
    sc.db = DbManager(url='sqlite:///:memory:')
    sc.db.create_all()

    _call_api('post',
              'assembly/new_project',
              json=dict(project_id='p1', project_name='Project 1'))

    # Now add a record just on the back-end
    sc.db.add_records_for_project('p1', ['r1'])
    # And now add a statement for that record so we can "curate" it
    stmt = Influence(Event(Concept('x')), Event(Concept('y')))
    stmt_hash = -11334164755554266
    sc.db.add_statements_for_record('r1', [stmt], '1.0')

    curation = {
        'project_id': 'p1',
        'statement_id': 'abcdef',
        'update_type': 'reverse_relation'
    }
    mappings = _call_api('post',
                         'assembly/submit_curations',
                         json=dict(project_id='p1',
                                   curations={stmt_hash: curation}))
    assert mappings
    res = _call_api('get',
                    'assembly/get_project_curations',
                    json=dict(project_id='p1'))
    assert len(res) == 1
    assert res[str(stmt_hash)] == curation, res
Exemplo n.º 5
0
def test_association_contradicts():
    neg = 'wm/concept/causal_factor/food_insecurity/food_unavailability'
    pos = 'wm/concept/causal_factor/food_security/food_availability'
    food_avail_neg = Event(Concept('food security', db_refs={'WM': pos}),
                           delta=QualitativeDelta(polarity=-1))
    food_avail_pos = Event(Concept('food security', db_refs={'WM': pos}),
                           delta=QualitativeDelta(polarity=1))
    food_unavail = Event(Concept('food insecurity', db_refs={'WM': neg}),
                         delta=QualitativeDelta(polarity=1))
    prp = Event(Concept('production'), delta=QualitativeDelta(polarity=1))
    prn = Event(Concept('production'), delta=QualitativeDelta(polarity=-1))

    assert Association([food_avail_neg,
                        prp]).contradicts(Association([food_unavail, prn]),
                                          world_ontology)
    assert Association([food_avail_neg,
                        prp]).contradicts(Association([food_avail_neg, prn]),
                                          world_ontology)
    assert Association([prp, food_avail_neg
                        ]).contradicts(Association([food_avail_neg, prn]),
                                       world_ontology)
    assert Association([prn, food_avail_neg
                        ]).contradicts(Association([food_avail_pos, prn]),
                                       world_ontology)
    assert Association([food_avail_neg, food_avail_pos]).contradicts(
        Association([food_unavail, food_avail_neg]), world_ontology)
    assert Association([food_unavail, food_avail_pos]).contradicts(
        Association([food_avail_pos, food_avail_pos]), world_ontology)
    assert Association([food_unavail, food_avail_pos]).contradicts(
        Association([food_avail_neg, food_avail_neg]), world_ontology)
Exemplo n.º 6
0
def _make_wm_stmts():
    ev1 = Evidence(source_api='eidos',
                   text='A',
                   annotations={'found_by': 'ported_syntax_1_verb-Causal'})
    ev2 = Evidence(source_api='eidos',
                   text='B',
                   annotations={'found_by': 'dueToSyntax2-Causal'})
    ev3 = Evidence(source_api='hume', text='C')
    ev4 = Evidence(source_api='cwms', text='D')
    ev5 = Evidence(source_api='sofia', text='E')
    ev6 = Evidence(source_api='sofia', text='F')
    x = Event(Concept('x', db_refs={'TEXT': 'dog'}))
    y = Event(Concept('y', db_refs={'TEXT': 'cat'}))
    stmt1 = Influence(x, y, evidence=[ev1, ev2])
    stmt2 = Influence(x, y, evidence=[ev1, ev3])
    stmt3 = Influence(x, y, evidence=[ev3, ev4, ev5])
    stmt4 = Influence(x, y, evidence=[ev5])
    stmt5 = Influence(x, y, evidence=[ev6])
    stmt1.uuid = '1'
    stmt2.uuid = '2'
    stmt3.uuid = '3'
    stmt4.uuid = '4'
    stmt5.uuid = '5'
    stmts = [stmt1, stmt2, stmt3, stmt4]
    return stmts
Exemplo n.º 7
0
def test_association_refinement():
    health = 'UN/entities/human/health'
    food = 'UN/entities/human/food'
    food_security = 'UN/entities/human/food/food_security'
    eh = Event(Concept('health', db_refs={'UN': [(health, 1.0)]}))
    ef = Event(Concept('food', db_refs={'UN': [(food, 1.0)]}))
    efs = Event(
        Concept('food security', db_refs={'UN': [(food_security, 1.0)]}))
    st1 = Association([eh, ef], evidence=[Evidence(source_api='eidos1')])
    st2 = Association([ef, eh], evidence=[Evidence(source_api='eidos2')])
    st3 = Association([eh, efs], evidence=[Evidence(source_api='eidos3')])
    st4 = Association([ef, efs], evidence=[Evidence(source_api='eidos4')])
    eidos_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             '../sources/eidos/eidos_ontology.rdf')
    hm = HierarchyManager(eidos_ont, True, True)
    hierarchies = {'entity': hm}
    pa = Preassembler(hierarchies, [st1, st2, st3, st4])
    unique_stmts = pa.combine_duplicates()  # debugging
    assert len(unique_stmts) == 3
    rel_stmts = pa.combine_related()
    assert len(rel_stmts) == 2
    eh_efs_stmt = [
        st for st in rel_stmts
        if (st.members[0].concept.name in {'health', 'food security'}
            and st.members[1].concept.name in {'health', 'food security'})
    ][0]
    assert len(eh_efs_stmt.supported_by) == 1
    assert (eh_efs_stmt.supported_by[0].members[0].concept.name
            in {'food', 'health'})
    assert (eh_efs_stmt.supported_by[0].members[1].concept.name
            in {'food', 'health'})
Exemplo n.º 8
0
 def _make_path_stmts(self, stmts, merge=False):
     sentences = []
     if merge:
         groups = group_and_sort_statements(stmts)
         new_stmts = []
         for group in groups:
             stmt_type = group[0][-1]
             agent_names = group[0][1]
             if len(agent_names) != 2:
                 continue
             if stmt_type == 'Influence':
                 stmt = get_class_from_name(stmt_type, Statement)(
                     Event(Concept(agent_names[0])),
                     Event(Concept(agent_names[1])))
             else:
                 try:
                     stmt = get_class_from_name(stmt_type, Statement)(Agent(
                         agent_names[0]), Agent(agent_names[1]))
                 except ValueError:
                     stmt = get_class_from_name(stmt_type, Statement)(
                         [Agent(ag_name) for ag_name in agent_names])
             new_stmts.append(stmt)
         stmts = new_stmts
     for stmt in stmts:
         ea = EnglishAssembler([stmt])
         sentence = ea.make_model()
         if self.make_links:
             link = get_statement_queries([stmt])[0] + '&format=html'
             sentences.append((link, sentence))
         else:
             sentences.append(('', sentence))
     return sentences
Exemplo n.º 9
0
 def _make_path_stmts(self, stmts, merge=False):
     sentences = []
     if merge:
         groups = group_and_sort_statements(stmts)
         for group in groups:
             group_stmts = group[-1]
             stmt_type = group[0][-1]
             agent_names = group[0][1]
             if len(agent_names) < 2:
                 continue
             if stmt_type == 'Influence':
                 stmt = get_class_from_name(stmt_type, Statement)(
                     Event(Concept(agent_names[0])),
                     Event(Concept(agent_names[1])))
             elif stmt_type == 'Conversion':
                 stmt = get_class_from_name(stmt_type, Statement)(
                     Agent(agent_names[0]),
                     [Agent(ag) for ag in agent_names[1]],
                     [Agent(ag) for ag in agent_names[2]])
             else:
                 try:
                     stmt = get_class_from_name(stmt_type, Statement)(Agent(
                         agent_names[0]), Agent(agent_names[1]))
                 except ValueError:
                     stmt = get_class_from_name(stmt_type, Statement)(
                         [Agent(ag_name) for ag_name in agent_names])
             ea = EnglishAssembler([stmt])
             sentence = ea.make_model()
             stmt_hashes = [gr_st.get_hash() for gr_st in group_stmts]
             url_param = parse.urlencode(
                 {
                     'stmt_hash': stmt_hashes,
                     'source': 'model_statement',
                     'model': self.model.name
                 },
                 doseq=True)
             link = f'/evidence?{url_param}'
             sentences.append((link, sentence, ''))
     else:
         for stmt in stmts:
             if isinstance(stmt, PybelEdge):
                 sentence = pybel_edge_to_english(stmt)
                 sentences.append(('', sentence, ''))
             else:
                 ea = EnglishAssembler([stmt])
                 sentence = ea.make_model()
                 stmt_hashes = [stmt.get_hash()]
                 url_param = parse.urlencode(
                     {
                         'stmt_hash': stmt_hashes,
                         'source': 'model_statement',
                         'model': self.model.name
                     },
                     doseq=True)
                 link = f'/evidence?{url_param}'
                 sentences.append((link, sentence, ''))
     return sentences
Exemplo n.º 10
0
def test_normalize_equals_opposites():
    ont = _get_extended_wm_hierarchy()
    flooding1 = 'wm/a/b/c/flooding'
    flooding2 = 'wm/x/y/z/flooding'
    # Note that as of 5/15/2020 food_insecurity and food_security aren't
    # explicitly opposites in the ontology
    food_insec = 'wm/concept/causal_factor/food_insecurity/food_nonaccess'
    food_sec = 'wm/concept/causal_factor/food_security/food_access'

    # Top grounding: flooding1
    dbr = {'WM': [(flooding1, 1.0), (flooding2, 0.5), (food_insec, 0.1)]}
    ev1 = Event(Concept('x', db_refs=dbr))

    # Top grounding: food security
    dbr = {'WM': [(food_sec, 1.0), (flooding2, 0.5)]}
    ev2 = Event(Concept('x', db_refs=dbr), delta=QualitativeDelta(polarity=1))

    # Make sure that by default, things don't get normalized out
    stmts = ac.run_preassembly([ev1, ev2], ontology=ont)
    assert stmts[0].concept.db_refs['WM'][0][0] != \
           stmts[0].concept.db_refs['WM'][1][0]

    # Now we turn on equivalence normalization and expect
    # that flooding1 and flooding2 have been normalized out
    # in ev1's db_refs
    stmts = ac.run_preassembly([ev1, ev2],
                               normalize_equivalences=True,
                               normalize_ns='WM',
                               ontology=ont)
    assert stmts[0].concept.db_refs['WM'][0][0] == \
           stmts[0].concept.db_refs['WM'][1][0], \
        stmts[0].concept.db_refs['WM']

    # Now we turn on opposite normalization and expect that food
    # security and insecurity will get normalized out
    stmts = ac.run_preassembly([ev1, ev2],
                               normalize_equivalences=True,
                               normalize_opposites=True,
                               normalize_ns='WM',
                               ontology=ont)
    assert len(stmts) == 2
    stmts = sorted(stmts,
                   key=lambda x: len(x.concept.db_refs['WM']),
                   reverse=True)
    assert len(stmts[0].concept.db_refs['WM']) == 3, stmts[0].concept.db_refs
    # This is to check that food_insecurity was normalized to food_security
    assert stmts[0].concept.db_refs['WM'][2][0] == \
           stmts[1].concept.db_refs['WM'][0][0], \
        (stmts[0].concept.db_refs['WM'],
         stmts[1].concept.db_refs['WM'])
Exemplo n.º 11
0
    def get_event(event_entry):
        name = event_entry['Relation']
        concept = Concept(name, db_refs={'TEXT': name})
        grounding = event_entry['Event_Type']
        if grounding:
            concept.db_refs['SOFIA'] = grounding
        context = WorldContext()
        time = event_entry.get('Time')
        if time:
            context.time = TimeContext(text=time.strip())
        loc = event_entry.get('Location')
        if loc:
            context.geo_location = RefContext(name=loc)

        text = event_entry.get('Text')
        ref = event_entry.get('Source')
        agent = event_entry.get('Agent')
        patient = event_entry.get('Patient')
        anns = {}
        if agent:
            anns['agent'] = agent
        if patient:
            anns['patient'] = patient
        ev = Evidence(source_api='sofia', pmid=ref, text=text,
                      annotations=anns, source_id=event_entry['Event Index'])
        pol = event_entry.get('Polarity')
        event = Event(concept, context=context, evidence=[ev],
                      delta=QualitativeDelta(polarity=pol, adjectives=None))

        return event
Exemplo n.º 12
0
    def _get_event_and_context(self,
                               event,
                               eid=None,
                               arg_type=None,
                               evidence=None):
        """Return an INDRA Event based on an event entry."""
        if not eid:
            eid = _choose_id(event, arg_type)
        ev = self.concept_dict[eid]
        concept, metadata = self._make_concept(ev)

        is_migration_event = False
        hume_grounding = {x[0] for x in concept.db_refs['HUME']}
        for grounding_en in hume_grounding:
            if "wm/concept/causal_factor/social_and_political/migration" in \
                    grounding_en:
                is_migration_event = True
        if is_migration_event:
            movement_context, quantitative_state = (
                self._make_movement_context(ev))
            event_obj = Migration(concept,
                                  delta=quantitative_state,
                                  context=movement_context,
                                  evidence=evidence)
        else:
            ev_delta = QualitativeDelta(polarity=get_polarity(ev),
                                        adjectives=None)
            context = self._make_world_context(ev)
            event_obj = Event(concept,
                              delta=ev_delta,
                              context=context,
                              evidence=evidence)
        return event_obj
Exemplo n.º 13
0
    def get_event_compositional(self, event_entry: Dict[str, str]) -> Event:
        """Get an Event with compositional grounding

        Parameters
        ----------
        event_entry :
            The event to process

        Returns
        -------
        event :
            An Event statement
        """
        # Get get compositional grounding
        comp_name, comp_grnd = self.get_compositional_grounding(event_entry)
        if comp_name is not None and \
                comp_grnd[0] is not None and \
                comp_grnd[0][0] is not None:
            concept = Concept(comp_name,
                              db_refs={
                                  'TEXT': comp_name,
                                  'WM': [comp_grnd]
                              })
        # If not try to get old style Sofia grounding
        else:
            name = event_entry['Relation']
            concept = Concept(name, db_refs={'TEXT': name})
            if event_entry['Event_Type']:
                concept.db_refs['SOFIA'] = event_entry['Event_Type']

        context = WorldContext()
        time = event_entry.get('Time')
        if time:
            context.time = TimeContext(text=time.strip())
        loc = event_entry.get('Location')
        if loc:
            context.geo_location = RefContext(name=loc)

        text = event_entry.get('Text')
        ref = event_entry.get('Source')
        agent = event_entry.get('Agent')
        patient = event_entry.get('Patient')
        anns = {}
        if agent:
            anns['agent'] = agent
        if patient:
            anns['patient'] = patient
        text_refs = {'DART': ref}
        ev = Evidence(source_api='sofia',
                      text_refs=text_refs,
                      text=text,
                      annotations=anns,
                      source_id=event_entry['Event Index'])
        pol = event_entry.get('Polarity')
        event = Event(concept,
                      context=context,
                      evidence=[ev],
                      delta=QualitativeDelta(polarity=pol, adjectives=None))

        return event
Exemplo n.º 14
0
    def get_event(event_entry):
        name = event_entry['Relation']
        concept = Concept(name, db_refs={'TEXT': name})
        grounding = event_entry['Event_Type']
        if grounding:
            concept.db_refs['SOFIA'] = grounding
        context = WorldContext()
        time = event_entry.get('Time')
        if time:
            context.time = TimeContext(text=time.strip())
        loc = event_entry.get('Location')
        if loc:
            context.geo_location = RefContext(name=loc)

        text = event_entry.get('Text')
        ref = event_entry.get('Source')
        ev = Evidence(source_api='sofia', pmid=ref, text=text)
        pol = event_entry.get('Polarity')
        event = Event(concept,
                      context=context,
                      evidence=[ev],
                      delta={
                          'polarity': pol,
                          'adjectives': []
                      })

        return event
Exemplo n.º 15
0
def test_event_assemble_location():
    rainfall = Concept('rainfall')
    loc1 = RefContext(name='x', db_refs={'GEOID': '1'})
    loc2 = RefContext(name='x', db_refs={'GEOID': '2'})
    ev1 = Event(rainfall, context=WorldContext(geo_location=loc1))
    ev2 = Event(rainfall, context=WorldContext(geo_location=loc2))

    pa = Preassembler(ontology=world_ontology,
                      stmts=[ev1, ev2],
                      matches_fun=None)
    unique_stmts = pa.combine_duplicates()

    assert len(unique_stmts) == 1
    pa = Preassembler(ontology=world_ontology,
                      stmts=[ev1, ev2],
                      matches_fun=location_matches)
    unique_stmts = pa.combine_duplicates()
    assert len(unique_stmts) == 2
def test_apply_grounding_curation():
    gr1 = [('theme1', 0.8), None, ('process', 0.7), None]
    gr2 = ['theme2', 'property2', None, None]
    cur = {
        "before": {"subj": {"factor": 'x',
                            "concept": gr1},
                   "obj": {"factor": 'y',
                           "concept": 'z'}},
        "after": {"subj": {"factor": 'x',
                           "concept": gr2},
                  "obj": {"factor": 'y',
                          "concept": 'z'}},
    }
    c1 = Concept('x', db_refs={'WM': [gr1]})
    stmt = Influence(Event(c1), Event('y'))
    IncrementalAssembler.apply_grounding_curation(stmt, cur)
    assert stmt.subj.concept.db_refs['WM'][0] == \
        [('theme2', 1.0), ('property2', 1.0), None, None]
Exemplo n.º 17
0
def make_event(concept, attrs):
    return Event(
        Concept(
            attrs["grounding"],
            db_refs={"TEXT": concept, "UN": [(attrs["grounding"], 0.8)]},
        ),
        delta=QualitativeDelta(
            attrs["delta"]["polarity"], attrs["delta"]["adjective"]
        ),
    )
Exemplo n.º 18
0
def test_run_preassembly_concepts():
    ont = _get_extended_wm_hierarchy()
    rainfall = Event(
        Concept('rain',
                db_refs={
                    'WM':
                    ('wm/concept/causal_factor/environmental/meteorologic/'
                     'precipitation/rainfall')
                }))
    flooding_1 = Event(Concept('flood', db_refs={'WM': 'wm/x/y/z/flooding'}))
    flooding_2 = Event(Concept('flooding', db_refs={'WM':
                                                    'wm/a/b/c/flooding'}))
    st_out = ac.run_preassembly(
        [Influence(rainfall, flooding_1),
         Influence(rainfall, flooding_2)],
        normalize_ns='WM',
        normalize_equivalences=True,
        ontology=ont)
    assert len(st_out) == 1, st_out
Exemplo n.º 19
0
def test_influence_duplicate():
    gov = 'UN/entities/human/government/government_entity'
    agr = 'UN/entities/natural/crop_technology'
    cgov = Event(Concept('government', db_refs={'UN': [(gov, 1.0)]}))
    cagr = Event(Concept('agriculture', db_refs={'UN': [(agr, 1.0)]}))
    stmt1 = Influence(cgov, cagr, evidence=[Evidence(source_api='eidos1')])
    stmt2 = Influence(cagr, cgov, evidence=[Evidence(source_api='eidos2')])
    stmt3 = Influence(cgov, cagr, evidence=[Evidence(source_api='eidos3')])
    eidos_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             '../sources/eidos/eidos_ontology.rdf')
    hm = HierarchyManager(eidos_ont, True, True)
    hierarchies = {'entity': hm}
    pa = Preassembler(hierarchies, [stmt1, stmt2, stmt3])
    unique_stmts = pa.combine_duplicates()
    assert len(unique_stmts) == 2
    assert len(unique_stmts[0].evidence) == 2
    assert len(unique_stmts[1].evidence) == 1
    sources = [e.source_api for e in unique_stmts[0].evidence]
    assert set(sources) == set(['eidos1', 'eidos3'])
Exemplo n.º 20
0
def test_print_model():
    stmt1 = Influence(Event(Concept('rainfall')),
                      Event(Concept('crop_yields')))
    stmt2 = Influence(Event(Concept('irrigation')),
                      Event(Concept('crop_yields')))
    stmt3 = Influence(Event(Concept('temperature')),
                      Event(Concept('crop_yields')))
    stmt4 = Influence(Event(Concept('rainfall')),
                      Event(Concept('temperature')))
    stmts = [stmt1, stmt2, stmt3, stmt4]
    fa = FigaroAssembler(stmts)
    fa.make_model()
    txt = fa.print_model()
    assert txt is not None
Exemplo n.º 21
0
 def get_event(self, event):
     concept = self.get_concept(event)
     states = event.get('states', [])
     extracted_states = self.extract_entity_states(states)
     polarity = extracted_states.get('polarity')
     adjectives = extracted_states.get('adjectives')
     delta = QualitativeDelta(polarity=polarity, adjectives=adjectives)
     timex = extracted_states.get('time_context', None)
     geo = extracted_states.get('geo_context', None)
     context = WorldContext(time=timex, geo_location=geo) \
         if timex or geo else None
     stmt = Event(concept, delta=delta, context=context)
     return stmt
Exemplo n.º 22
0
def test_association_duplicate():
    ev1 = Event(Concept('a'))
    ev2 = Event(Concept('b'))
    ev3 = Event(Concept('c'))
    # Order of members does not matter
    st1 = Association([ev1, ev2], evidence=[Evidence(source_api='eidos1')])
    st2 = Association([ev1, ev3], evidence=[Evidence(source_api='eidos2')])
    st3 = Association([ev2, ev1], evidence=[Evidence(source_api='eidos3')])
    st4 = Association([ev2, ev3], evidence=[Evidence(source_api='eidos4')])
    st5 = Association([ev2, ev3], evidence=[Evidence(source_api='eidos5')])
    eidos_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             '../sources/eidos/eidos_ontology.rdf')
    hm = HierarchyManager(eidos_ont, True, True)
    hierarchies = {'entity': hm}
    pa = Preassembler(hierarchies, [st1, st2, st3, st4, st5])
    unique_stmts = pa.combine_duplicates()
    assert len(unique_stmts) == 3
    assert len(unique_stmts[0].evidence) == 2
    assert len(unique_stmts[1].evidence) == 1
    assert len(unique_stmts[2].evidence) == 2
    sources = [e.source_api for e in unique_stmts[0].evidence]
    assert set(sources) == set(['eidos1', 'eidos3'])
Exemplo n.º 23
0
def test_compositional_grounding_filter():
    # Test property filtered out based on score
    wm = [[('x', 0.5), ('y', 0.8), None, None]]
    concept = Concept('x', db_refs={'WM': wm})
    stmt = Event(concept)
    stmt_out = compositional_grounding_filter_stmt(stmt, 0.7, [])
    concept = stmt_out.concept
    assert concept.db_refs['WM'][0][0] == ('y', 0.8), concept.db_refs
    assert concept.db_refs['WM'][0][1] is None

    # Test property being promoted to theme
    wm = [[None, ('y', 0.8), None, None]]
    concept.db_refs['WM'] = wm
    stmt = Event(concept)
    stmt_out = compositional_grounding_filter_stmt(stmt, 0.7, [])
    concept = stmt_out.concept
    assert concept.db_refs['WM'][0][0] == ('y', 0.8), concept.db_refs
    assert concept.db_refs['WM'][0][1] is None

    # Test score threshold being equal to score
    wm = [[('x', 0.7), ('y', 0.7), None, None]]
    concept.db_refs['WM'] = wm
    stmt = Event(concept)
    stmt_out = compositional_grounding_filter_stmt(stmt, 0.7, [])
    concept = stmt_out.concept
    assert concept.db_refs['WM'][0][0] == ('x', 0.7), concept.db_refs
    assert concept.db_refs['WM'][0][1] == ('y', 0.7), concept.db_refs

    # Score filter combined with groundings to exclude plus promoting
    # a property to a theme
    wm = [[('wm_compositional/entity/geo-location', 0.7), ('y', 0.7), None,
           None]]
    concept.db_refs['WM'] = wm
    stmt = Event(concept)
    stmt_out = compositional_grounding_filter_stmt(
        stmt, 0.7, ['wm_compositional/entity/geo-location'])
    concept = stmt_out.concept
    assert concept.db_refs['WM'][0][0] == ('y', 0.7), concept.db_refs
Exemplo n.º 24
0
def test_influence_refinement():
    tran = 'UN/entities/human/infrastructure/transportation'
    truck = 'UN/entities/human/infrastructure/transportation/' + \
        'transportation_methods'
    agr = 'UN/entities/human/livelihood'
    ctran = Event(Concept('transportation', db_refs={'UN': [(tran, 1.0)]}))
    ctruck = Event(Concept('trucking', db_refs={'UN': [(truck, 1.0)]}))
    cagr = Event(Concept('agriculture', db_refs={'UN': [(agr, 1.0)]}))
    stmt1 = Influence(ctran, cagr, evidence=[Evidence(source_api='eidos1')])
    stmt2 = Influence(ctruck, cagr, evidence=[Evidence(source_api='eidos2')])
    stmt3 = Influence(cagr, ctran, evidence=[Evidence(source_api='eidos3')])
    eidos_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                             '../sources/eidos/eidos_ontology.rdf')
    hm = HierarchyManager(eidos_ont, True, True)
    hierarchies = {'entity': hm}
    pa = Preassembler(hierarchies, [stmt1, stmt2, stmt3])
    rel_stmts = pa.combine_related()
    assert len(rel_stmts) == 2
    truck_stmt = [
        st for st in rel_stmts if st.subj.concept.name == 'trucking'
    ][0]
    assert len(truck_stmt.supported_by) == 1
    assert truck_stmt.supported_by[0].subj.concept.name == 'transportation'
Exemplo n.º 25
0
def test_influence_event_hash_reference():
    rainfall = Concept('rainfall')
    loc1 = RefContext(name='x', db_refs={'GEOID': '1'})
    loc2 = RefContext(name='x', db_refs={'GEOID': '2'})
    ev1 = Event(rainfall, context=WorldContext(geo_location=loc1))
    ev2 = Event(rainfall, context=WorldContext(geo_location=loc2))
    infl = Influence(ev1, ev2)

    h1 = ev1.get_hash(refresh=True)
    h2 = ev2.get_hash(refresh=True)
    hl1 = ev1.get_hash(refresh=True, matches_fun=location_matches)
    hl2 = ev2.get_hash(refresh=True, matches_fun=location_matches)

    assert h1 == h2, (h1, h2)
    assert hl1 != hl2, (hl1, hl2)

    ij = infl.to_json(matches_fun=location_matches)
    ev1j = ev1.to_json(matches_fun=location_matches)
    assert ev1j['matches_hash'] == ij['subj']['matches_hash'], \
        (print(json.dumps(ev1j, indent=1)),
         print(json.dumps(ij, indent=1)))
Exemplo n.º 26
0
    def get_event_flat(self, event_entry: Dict[str, str]) -> Event:
        """Get an Event with flattened grounding

        Parameters
        ----------
        event_entry :
            The event to process

        Returns
        -------
        event :
            An Event statement
        """
        name = event_entry['Relation']
        concept = Concept(name, db_refs={'TEXT': name})
        grounding = event_entry['Event_Type']
        if grounding:
            concept.db_refs['SOFIA'] = grounding
        context = WorldContext()
        time = event_entry.get('Time')
        if time:
            context.time = TimeContext(text=time.strip())
        loc = event_entry.get('Location')
        if loc:
            context.geo_location = RefContext(name=loc)

        text = event_entry.get('Text')
        ref = event_entry.get('Source')
        agent = event_entry.get('Agent')
        patient = event_entry.get('Patient')
        anns = {}
        if agent:
            anns['agent'] = agent
        if patient:
            anns['patient'] = patient
        text_refs = {'DART': ref}
        ev = Evidence(source_api='sofia',
                      text_refs=text_refs,
                      text=text,
                      annotations=anns,
                      source_id=event_entry['Event Index'])
        pol = event_entry.get('Polarity')
        event = Event(concept,
                      context=context,
                      evidence=[ev],
                      delta=QualitativeDelta(polarity=pol, adjectives=None))
        return event
Exemplo n.º 27
0
 def _get_event_and_context(self,
                            event,
                            eid=None,
                            arg_type=None,
                            evidence=None):
     """Return an INDRA Event based on an event entry."""
     if not eid:
         eid = _choose_id(event, arg_type)
     ev = self.concept_dict[eid]
     concept, metadata = self._make_concept(ev)
     ev_delta = QualitativeDelta(polarity=get_polarity(ev), adjectives=None)
     context = self._make_context(ev)
     event_obj = Event(concept,
                       delta=ev_delta,
                       context=context,
                       evidence=evidence)
     return event_obj
Exemplo n.º 28
0
    def from_uncharted_json_serialized_dict(
            cls, _dict, minimum_evidence_pieces_required: int = 1):
        """ Construct an AnalysisGraph object from a dict of INDRA statements
        exported by Uncharted's CauseMos webapp.

        Args:
            _dict: A dict of INDRA statements exported by Uncharted's CauseMos
                HMI.
            minimum_evidence_pieces_required: The minimum number of evidence
                pieces required to consider a statement for assembly.
        """
        sts = _dict["statements"]
        G = nx.DiGraph()
        for s in sts:
            if len(s["evidence"]) >= minimum_evidence_pieces_required:
                subj, obj = s["subj"], s["obj"]
                if (subj["db_refs"]["concept"] is not None
                        and obj["db_refs"]["concept"] is not None):
                    subj_name, obj_name = [
                        "/".join(s[x]["db_refs"]["concept"].split("/")[:])
                        for x in ["subj", "obj"]
                    ]
                    G.add_edge(subj_name, obj_name)
                    subj_delta = s["subj_delta"]
                    obj_delta = s["obj_delta"]

                    for delta in (subj_delta, obj_delta):
                        # TODO : Ensure that all the statements provided by
                        # Uncharted have unambiguous polarities.
                        if delta["polarity"] is None:
                            delta["polarity"] = 1

                    influence_stmt = Influence(
                        Event(
                            Concept(subj_name, db_refs=subj["db_refs"]),
                            delta=QualitativeDelta(
                                s["subj_delta"]["polarity"],
                                s["subj_delta"]["adjectives"],
                            ),
                        ),
                        Event(
                            Concept(obj_name, db_refs=obj["db_refs"]),
                            delta=QualitativeDelta(
                                s["obj_delta"]["polarity"],
                                s["obj_delta"]["adjectives"],
                            ),
                        ),
                        evidence=[
                            INDRAEvidence(
                                source_api=ev["source_api"],
                                annotations=ev["annotations"],
                                text=ev["text"],
                                epistemics=ev.get("epistemics"),
                            ) for ev in s["evidence"]
                        ],
                    )
                    influence_sts = G.edges[subj_name, obj_name].get(
                        "InfluenceStatements", [])
                    influence_sts.append(influence_stmt)
                    G.edges[subj_name,
                            obj_name]["InfluenceStatements"] = influence_sts

        func_dict = {
            "mean": np.mean,
            "median": np.median,
            "max": max,
            "min": min,
            "raw": lambda x: x,
        }

        for concept, indicator in _dict["conceptIndicators"].items():
            indicator_source, indicator_name = (
                indicator["name"].split("/")[0],
                "/".join(indicator["name"].split("/")[1:]),
            )

            G.nodes[concept]["indicators"] = {
                indicator_name: Indicator(indicator_name, indicator_source)
            }
            values = [x["value"] for x in indicator["values"]]
            indicator["mean"] = func_dict[indicator["func"]](values)
            # indicator.source = indicator["source"]

        self = cls(G)
        self.assign_uuids_to_nodes_and_edges()
        return self
Exemplo n.º 29
0
 def make_event(comp_grounding):
     scored_grounding = [(gr, 1.0) if gr else None for gr in comp_grounding]
     name = '_'.join([gr.split('/')[-1] for gr in comp_grounding if gr])
     concept = Concept(name=name, db_refs={'WM': [scored_grounding]})
     event = Event(concept)
     return event
Exemplo n.º 30
0
 def make_ev(name, delta):
     return Event(Concept(name), delta=delta)