コード例 #1
0
ファイル: inference.py プロジェクト: festrade/assembl
 def get_inference(self, graph):
     network = self.network
     network.reset()
     network.feedFactsToAdd(generateTokenSet(self.ontology))
     print "ontology loaded"
     network.feedFactsToAdd(generateTokenSet(graph))
     return network.inferredFacts
コード例 #2
0
ファイル: inference.py プロジェクト: mapofemergence/edgesense
 def get_inference(self, graph):
     network = self.network
     network.reset()
     network.feedFactsToAdd(generateTokenSet(self.ontology))
     logging.info("InferenceStore ontology loaded")
     network.feedFactsToAdd(generateTokenSet(graph))
     return network.inferredFacts
コード例 #3
0
 def setUp(self):
     ruleStore,ruleGraph,network=SetupRuleStore(makeNetwork=True)
     self.network= network
     self.factGraph = Graph().parse(StringIO(SKOLEM_MACHINE_FACTS),format='n3')
     for rule in HornFromN3(StringIO(SKOLEM_MACHINE_RULES)):
         self.network.buildNetworkFromClause(rule)
     self.network.feedFactsToAdd(generateTokenSet(self.factGraph))
コード例 #4
0
ファイル: testOWL.py プロジェクト: KiranAjayakumar/python-dlp
    def calculateEntailments(self, factGraph):
        start = time.time()  
        self.network.feedFactsToAdd(generateTokenSet(factGraph))                    
        sTime = time.time() - start
        if sTime > 1:
            sTimeStr = "%s seconds"%sTime
        else:
            sTime = sTime * 1000
            sTimeStr = "%s milli seconds"%sTime
        print "Time to calculate closure on working memory: ",sTimeStr
        print self.network
        
        tNodeOrder = [tNode 
                        for tNode in self.network.terminalNodes 
                            if self.network.instanciations.get(tNode,0)]
        tNodeOrder.sort(key=lambda x:self.network.instanciations[x],reverse=True)
        for termNode in tNodeOrder:
            print termNode
            print "\t", termNode.clause
            print "\t\t%s instanciations"%self.network.instanciations[termNode]
    #                    for c in AllClasses(factGraph):
    #                        print CastClass(c,factGraph)
        print "=============="
        self.network.inferredFacts.namespace_manager = factGraph.namespace_manager
#        if self.network.inferredFacts:
#            print "Implicit facts: "
#            print self.network.inferredFacts.serialize(format='turtle')
#        print "ruleset after MST:"                    
#        pprint(list(self.network.rules))
#        print "rate of reduction in the size of the program: ", len len(self.network.rules)
        return sTimeStr
コード例 #5
0
ファイル: test_owl.py プロジェクト: koo5/new_shit
def test_04_dl():
    ontology = Graph()
    data = Graph()

    scotland = Country(DATA.scotland, graph=data)
    scotland.label = "Scotland"
    
    edinburgh = City(DATA.edinburgh, graph=data)
    edinburgh.label = "Edinburgh"
    edinburgh.country = scotland

    place = PlaceClass(graph=ontology, factoryGraph=data)
    country = CountryClass(graph=ontology, factoryGraph=data)
    city = CityClass(graph=ontology, factoryGraph=data)

    from FuXi.Rete.RuleStore import SetupRuleStore
    from FuXi.Rete.Util import generateTokenSet

    rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)
    network.reset(data)
    network.setupDescriptionLogicProgramming(ontology)
    network.feedFactsToAdd(generateTokenSet(data))

    assert place.get(DATA.scotland) == scotland
    assert place.get(DATA.edinburgh) == edinburgh
コード例 #6
0
ファイル: insert_worm.py プロジェクト: travs/OpenWormData
def infer():
    from rdflib import Graph
    from FuXi.Rete.RuleStore import SetupRuleStore
    from FuXi.Rete.Util import generateTokenSet
    from FuXi.Horn.HornRules import HornFromN3

    try:
        w = P.Worm()
        semnet = w.rdf #fetches the entire worm.db graph

        rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)
        closureDeltaGraph = Graph()
        network.inferredFacts = closureDeltaGraph

        #build a network of rules
        for rule in HornFromN3('testrules.n3'):
            network.buildNetworkFromClause(rule)

        network.feedFactsToAdd(generateTokenSet(semnet)) # apply rules to original facts to infer new facts

        # combine original facts with inferred facts
        for x in closureDeltaGraph:
            w.rdf.add(x)

        ###uncomment next 4 lines to print inferred facts to human-readable file (demo purposes)
        #inferred_facts = closureDeltaGraph.serialize(format='n3') #format inferred facts to notation 3
        #inferred = open('what_was_inferred.n3', 'w')
        #inferred.write(inferred_facts)
        #inferred.close()

    except Exception, e:
        traceback.print_exc()
コード例 #7
0
ファイル: testOWL.py プロジェクト: baojie/FuXi-1
    def calculateEntailments(self, factGraph):
        start = time.time()
        self.network.feedFactsToAdd(generateTokenSet(factGraph))
        sTime = time.time() - start
        if sTime > 1:
            sTimeStr = "%s seconds" % sTime
        else:
            sTime = sTime * 1000
            sTimeStr = "%s milli seconds" % sTime
        print("Time to calculate closure on working memory: %s" % sTimeStr)
        print(self.network)

        tNodeOrder = [tNode
                        for tNode in self.network.terminalNodes
                            if self.network.instantiations.get(tNode, 0)]
        tNodeOrder.sort(key=lambda x:
                            self.network.instantiations[x], reverse=True)
        for termNode in tNodeOrder:
            print(termNode)
            print("\t", termNode.rules)
            print("\t\t%s instantiations" % self.network.instantiations[termNode])
            # for c in AllClasses(factGraph):
            #     print(CastClass(c,factGraph))
        print("==============")
        self.network.inferredFacts.namespace_manager = factGraph.namespace_manager
        return sTimeStr
コード例 #8
0
def build_network(rules):
    if isinstance(rules, basestring):
        rules = StringIO(rules)
    graph = ConjunctiveGraph()
    graph.load(rules, publicID="test", format="n3")
    network = NetworkFromN3(graph, additionalBuiltins={STRING_NS.startsWith: StringStartsWith})
    network.feedFactsToAdd(generateTokenSet(extractBaseFacts(graph)))
    return network
コード例 #9
0
 def expand(self):
     abox = Graph()
     i = 0
     for t in self._classes:
         abox.add( (URIRef("http://el%i"%(i)), RDF.type, t) )
         i += 1
     
     _, _, network = SetupRuleStore(makeNetwork=True)
     NormalFormReduction(self.tbox)
     
     # Warning: The use of pD-rules is a memory killer!
     for rule in HornFromN3('http://www.agfa.com/w3c/euler/rdfs-rules.n3'): #'../lib/python-dlp/fuxi/test/pD-rules.n3'): #HornFromDL(self.tBoxGraph):
         network.buildNetworkFromClause(rule)
     network.feedFactsToAdd(generateTokenSet(self.tbox))
     network.feedFactsToAdd(generateTokenSet(abox))
     
     self._classes = self._classes.union( self._extractRdfTypes(network.inferredFacts) )
コード例 #10
0
ファイル: Negation.py プロジェクト: Web5design/FuXi
def CalculateStratifiedModel(network, ontGraph, derivedPreds, edb=None):
    posRules, ignored = MapDLPtoNetwork(network,
                               ontGraph,
                               constructNetwork=False,
                               derivedPreds=derivedPreds,
                               ignoreNegativeStratus=True)
    for rule in posRules:
        network.buildNetworkFromClause(rule)
    network.feedFactsToAdd(generateTokenSet(edb and edb or ontGraph))
    for i in ignored:
        #Evaluate the Graph pattern, and instanciate the head of the rule with
        #the solutions returned
        sel, compiler = StratifiedSPARQL(i)
        query = compiler.compile(sel)
        i.stratifiedQuery = query
        vars = sel.projection
        for rt in (edb and edb or ontGraph).query(query):
            solutions = {}
            if isinstance(rt, tuple):
                solutions.update(dict(
                    [(vars[idx], i) for idx, i in enumerate(rt)]))
            else:
                solutions[vars[0]] = rt
            i.solutions = solutions
            head = copy.deepcopy(i.formula.head)
            head.ground(solutions)
            fact = head.toRDFTuple()
            network.inferredFacts.add(fact)
            network.feedFactsToAdd(generateTokenSet([fact]))

    # Now we need to clear assertions that cross the individual,
    # concept, relation divide
    # toRemove=[]
    for s, p, o in network.inferredFacts.triples((None,
                                                RDF.type,
                                                None)):
        if s in (edb and edb or ontGraph).predicates() or\
           s in [_s for _s, _p, _o in
                    (edb and edb or ontGraph).triples_choices(
                                        (None,
                                         RDF.type,
                                         [OWL_NS.Class,
                                          OWL_NS.Restriction]))]:
            network.inferredFacts.remove((s, p, o))
    return posRules, ignored
コード例 #11
0
ファイル: testReteAction.py プロジェクト: carnotip/FuXi
 def testReteActionTest(self):
     factGraph = Graph().parse(StringIO(N3_FACTS),format='n3')
     rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)
     for rule in HornFromN3(StringIO(N3_PROGRAM),additionalBuiltins=None):
         network.buildNetworkFromClause(rule)
     network.registerReteAction(matchingHeadTriple,False,encodeAction)
     network.feedFactsToAdd(generateTokenSet(factGraph))
     print network.inferredFacts.serialize(format='n3')
     self.failUnless(resultingTriple in network.inferredFacts)
コード例 #12
0
    def testReasoning(self):
        print 'setting up DLP...'
        self.network.setupDescriptionLogicProgramming(self.tBoxGraph)
        pprint(list(self.network.rules))
        print self.network

        print 'feeding TBox... '
        self.network.feedFactsToAdd(generateTokenSet(self.tBoxGraph))
        print 'feeding ABox...'
        self.network.feedFactsToAdd(generateTokenSet(self.aBoxGraph))

        self.network.inferredFacts.bind('ex',EX)
        self.network.inferredFacts.bind('exterms',EX_TERMS)
        print self.network.inferredFacts.serialize(format='n3')

        print 'Checking...'
        for triple in expected_triples:
            self.failUnless(triple in self.network.inferredFacts,"Missing %s"%(repr(triple)))
コード例 #13
0
 def setUp(self):
     ruleStore, ruleGraph, network = SetupRuleStore(makeNetwork=True)
     self.network = network
     self.factGraph = Graph().parse(
             data=SKOLEM_MACHINE_FACTS, format='n3')
     _debug("Factgraph:\n{}".format(self.factGraph.serialize()))
     for rule in HornFromN3(StringIO(SKOLEM_MACHINE_RULES)):
         self.network.buildNetworkFromClause(rule)
     self.network.feedFactsToAdd(generateTokenSet(self.factGraph))
コード例 #14
0
ファイル: test_builtin_ordering.py プロジェクト: NanduBC/FuXi
def build_network(rules):
    import sys
    if isinstance(rules, basestring if sys.version < '3' else str):
        rules = StringIO(rules)
    graph = ConjunctiveGraph()
    graph.load(rules, publicID='test', format='n3')
    network = NetworkFromN3(
        graph, additionalBuiltins={STRING_NS.startsWith: StringStartsWith})
    network.feedFactsToAdd(generateTokenSet(extractBaseFacts(graph)))
    return network
コード例 #15
0
 def test_hornfromn3_inferencing(self):
     # https://groups.google.com/d/msg/fuxi-discussion/4r1Nt_o1Hco/4QQ7BaqBCH8J
     from io import StringIO
     rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)
     for rule in HornFromN3(StringIO(rules)):
         network.buildNetworkFromClause(rule)
     g = Graph()
     g.parse(data=facts, format="n3")
     network.feedFactsToAdd(generateTokenSet(g))
     print(network.inferredFacts.serialize(format="n3").decode('utf-8'))
コード例 #16
0
ファイル: test_hornfromn3.py プロジェクト: gjhiggins/FuXi
 def test_hornfromn3_inferencing(self):
     # https://groups.google.com/d/msg/fuxi-discussion/4r1Nt_o1Hco/4QQ7BaqBCH8J
     from io import StringIO
     rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)
     for rule in HornFromN3(StringIO(rules)):
         network.buildNetworkFromClause(rule)
     g = Graph()
     g.parse(data=facts, format="n3")
     network.feedFactsToAdd(generateTokenSet(g))
     print(network.inferredFacts.serialize(format="n3").decode('utf-8'))
コード例 #17
0
def build_network2(rules):
    graph = ConjunctiveGraph()
    graph.load(StringIO(rules), publicID='test', format='n3')
    rule_store, rule_graph = SetupRuleStore(
        StringIO(rules),
        additionalBuiltins={STRING_NS.startsWith: StringStartsWith})
    from FuXi.Rete.Network import ReteNetwork
    network = ReteNetwork(rule_store)
    network.feedFactsToAdd(generateTokenSet(extractBaseFacts(graph)))
    return network
コード例 #18
0
def build_network(rules):
    import sys
    if isinstance(rules, basestring if sys.version < '3' else str):
        rules = StringIO(rules)
    graph = ConjunctiveGraph()
    graph.load(rules, publicID='test', format='n3')
    network = NetworkFromN3(
        graph, additionalBuiltins={STRING_NS.startsWith: StringStartsWith})
    network.feedFactsToAdd(generateTokenSet(extractBaseFacts(graph)))
    return network
コード例 #19
0
def CalculateStratifiedModel(network, ontGraph, derivedPreds, edb=None):
    posRules, ignored = MapDLPtoNetwork(network,
                                        ontGraph,
                                        constructNetwork=False,
                                        derivedPreds=derivedPreds,
                                        ignoreNegativeStratus=True)
    for rule in posRules:
        network.buildNetworkFromClause(rule)
    network.feedFactsToAdd(generateTokenSet(edb and edb or ontGraph))
    for i in ignored:
        #Evaluate the Graph pattern, and instanciate the head of the rule with
        #the solutions returned
        sel, compiler = StratifiedSPARQL(i)
        query = compiler.compile(sel)
        i.stratifiedQuery = query
        vars = sel.projection
        for rt in (edb and edb or ontGraph).query(query):
            solutions = {}
            if isinstance(rt, tuple):
                solutions.update(
                    dict([(vars[idx], i) for idx, i in enumerate(rt)]))
            else:
                solutions[vars[0]] = rt
            i.solutions = solutions
            head = copy.deepcopy(i.formula.head)
            head.ground(solutions)
            fact = head.toRDFTuple()
            network.inferredFacts.add(fact)
            network.feedFactsToAdd(generateTokenSet([fact]))

    # Now we need to clear assertions that cross the individual,
    # concept, relation divide
    # toRemove=[]
    for s, p, o in network.inferredFacts.triples((None, RDF.type, None)):
        if s in (edb and edb or ontGraph).predicates() or\
           s in [_s for _s, _p, _o in
                    (edb and edb or ontGraph).triples_choices(
                                        (None,
                                         RDF.type,
                                         [OWL_NS.Class,
                                          OWL_NS.Restriction]))]:
            network.inferredFacts.remove((s, p, o))
    return posRules, ignored
コード例 #20
0
ファイル: test_builtin_ordering.py プロジェクト: NanduBC/FuXi
def build_network2(rules):
    graph = ConjunctiveGraph()
    graph.load(StringIO(rules), publicID='test', format='n3')
    rule_store, rule_graph = SetupRuleStore(
        StringIO(rules),
        additionalBuiltins={STRING_NS.startsWith: StringStartsWith})
    from FuXi.Rete.Network import ReteNetwork
    network = ReteNetwork(rule_store)
    network.feedFactsToAdd(generateTokenSet(extractBaseFacts(graph)))
    return network
コード例 #21
0
ファイル: data.py プロジェクト: mwatts15/YAROM
            def infer(graph, new_data):
                """ Fire FuXi rule engine to infer triples """
                # apply rules to original facts to infer new facts
                closureDeltaGraph = Graph()
                network.inferredFacts = closureDeltaGraph

                network.feedFactsToAdd(generateTokenSet(new_data))
                # combine original facts with inferred facts
                if graph:
                    for x in closureDeltaGraph:
                        graph.add(x)
コード例 #22
0
ファイル: test_cmpuri.py プロジェクト: pombredanne/curate
 def setUp(self):
     ruleStore, ruleGraph, self.network = makeRuleStore([fixture("test_cmpuri.n3")])
     g = Graph()
     g.parse(StringIO("""
     <http://example.org/> a _:x .
     <http://EXAMPLE.ORG/> a _:x .
     <HTTP://example.org:80/> a _:x .
     <http://example.com/> a _:x .
     """), format="n3")
     self.network.feedFactsToAdd(generateTokenSet(g))
     logging.debug("Inferred Facts:\n%s" % self.network.inferredFacts.serialize(format="n3"))
コード例 #23
0
ファイル: test_hornfromn3.py プロジェクト: gjhiggins/FuXi
 def test_hornfromn3(self):
     self.rule_store, self.rule_graph, self.network = SetupRuleStore(
         makeNetwork=True)
     closureDeltaGraph = Graph()
     self.network.inferredFacts = closureDeltaGraph
     for rule in HornFromN3(
             'http://www.agfa.com/w3c/euler/rdfs-rules.n3',
             additionalBuiltins=None):
         self.network.buildNetworkFromClause(rule)
         print("{} {}".format(self.network, rule))
         # state_before_inferencing = str(self.network)
         self.network.feedFactsToAdd(generateTokenSet(self.network.inferredFacts))
コード例 #24
0
ファイル: test_curlreq.py プロジェクト: pombredanne/curate
    def test_unresolvable_host(self):
        g = Graph()
        g.parse(fixture("cap-uk-payments-2009.rdf"))
        resource = URIRef("http://cap-payments.defra.../2008_All_CAP_Search_Results.xls")

        self.network.feedFactsToAdd(generateTokenSet(g))
        closureDelta = self.network.inferredFacts
        logging.debug("Inferred Triples:\n%s" % closureDelta.serialize(format="n3"))
        req, resp = self.getreq(closureDelta, resource)

        assert (resp, CURL["status"], CURL["Failure"]) in closureDelta
        assert (resp, HTTP["statusCodeNumber"], Literal("0")) in closureDelta
コード例 #25
0
 def test_hornfromn3(self):
     self.rule_store, self.rule_graph, self.network = SetupRuleStore(
         makeNetwork=True)
     closureDeltaGraph = Graph()
     self.network.inferredFacts = closureDeltaGraph
     for rule in HornFromN3('http://www.agfa.com/w3c/euler/rdfs-rules.n3',
                            additionalBuiltins=None):
         self.network.buildNetworkFromClause(rule)
         print("{} {}".format(self.network, rule))
         # state_before_inferencing = str(self.network)
         self.network.feedFactsToAdd(
             generateTokenSet(self.network.inferredFacts))
コード例 #26
0
ファイル: test_curlreq.py プロジェクト: pombredanne/curate
    def test_404(self):
        g = Graph()
        g.parse(fixture("dbpedia_broken.rdf"))
        resource = URIRef("http://eris.okfn.org/nonexistent")

        self.network.feedFactsToAdd(generateTokenSet(g))
        closureDelta = self.network.inferredFacts
        logging.debug("Inferred Triples:\n%s" % closureDelta.serialize(format="n3"))
        req, resp = self.getreq(closureDelta, resource)

        assert (resp, CURL["status"], CURL["Failure"]) in closureDelta
        assert (resp, HTTP["statusCodeNumber"], Literal("404")) in closureDelta
コード例 #27
0
ファイル: test_regexp.py プロジェクト: pombredanne/curate
 def setUp(self):
     ruleStore, ruleGraph, self.network = makeRuleStore([fixture("test_regexp.n3")])
     g = Graph()
     g.parse(StringIO("""
     @prefix dc: <http://purl.org/dc/terms/>.
     @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
     
     <http://example.org/> dc:title "abc/123" .
     <http://example.org/> rdfs:seeAlso <http://example.org/12345.txt>.
     """), format="n3")
     self.network.feedFactsToAdd(generateTokenSet(g))
     logging.debug("Inferred Facts:\n%s" % self.network.inferredFacts.serialize(format="n3"))
コード例 #28
0
    def test_multi_lang_inference(self):

        graph = Graph().parse(StringIO(N3_FACTS), format='n3')
        rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)

        for rule in HornFromN3(StringIO(N3_PROGRAM), additionalBuiltins=None):
            network.buildNetworkFromClause(rule)

        network.feedFactsToAdd(generateTokenSet(graph))

        for expected_triple in resulting_triples:
            self.assertIn(expected_triple, network.inferredFacts)
コード例 #29
0
    def testReasoning(self):
        _debug('setting up DLP...')
        self.network.setupDescriptionLogicProgramming(self.tBoxGraph)
        _debug("Rules:\n %s" % pformat(list(self.network.rules)))
        _debug(self.network)

        _debug('feeding TBox... ')
        self.network.feedFactsToAdd(generateTokenSet(self.tBoxGraph))
        _debug('feeding ABox...')
        self.network.feedFactsToAdd(generateTokenSet(self.aBoxGraph))

        self.network.inferredFacts.bind('ex', EX)
        self.network.inferredFacts.bind('exterms', EX_TERMS)
        _debug("Facts:\n%s" % (
            self.network.inferredFacts.serialize(format='n3')))

        for triple in expected_triples:
            self.failUnless(
                triple in self.network.inferredFacts, \
                    "Missing %s" % (repr(triple)))

        self.failUnless(3 == 2)
コード例 #30
0
ファイル: inference.py プロジェクト: drewp/homeauto
def infer(graph, rules):
    """
    returns new graph of inferred statements. Plain rete api seems to
    alter rules.formulae and rules.rules, but this function does not
    alter the incoming rules object, so you can cache it.
    """
    # based on fuxi/tools/rdfpipe.py
    target = Graph()
    tokenSet = generateTokenSet(graph)
    with _dontChangeRulesStore(rules):
        network = ReteNetwork(rules, inferredTarget=target)
        network.feedFactsToAdd(tokenSet)
    
    return target
コード例 #31
0
 def setUp(self):
     from FuXi.Rete.RuleStore import N3RuleStore
     from FuXi.Rete import ReteNetwork
     from FuXi.Rete.Util import generateTokenSet
     self.testGraph = Graph()
     self.ruleStore=N3RuleStore()
     self.ruleGraph = Graph(self.ruleStore)           
     self.ruleGraph.parse(StringIO(testN3),format='n3')
     self.testGraph.parse(StringIO(testN3),format='n3')        
     self.closureDeltaGraph = Graph()
     self.network = ReteNetwork(self.ruleStore,
                                initialWorkingMemory=generateTokenSet(self.testGraph),
                                inferredTarget = self.closureDeltaGraph,
                                nsMap = {})
コード例 #32
0
def infer(graph, rules):
    """
    returns new graph of inferred statements. Plain rete api seems to
    alter rules.formulae and rules.rules, but this function does not
    alter the incoming rules object, so you can cache it.
    """
    # based on fuxi/tools/rdfpipe.py
    target = Graph()
    tokenSet = generateTokenSet(graph)
    with _dontChangeRulesStore(rules):
        network = ReteNetwork(rules, inferredTarget=target)
        network.feedFactsToAdd(tokenSet)

    return target
コード例 #33
0
 def setUp(self):
     from FuXi.Rete.RuleStore import N3RuleStore
     from FuXi.Rete import ReteNetwork
     from FuXi.Rete.Util import generateTokenSet
     self.testGraph = Graph()
     self.ruleStore = N3RuleStore()
     self.ruleGraph = Graph(self.ruleStore)
     self.ruleGraph.parse(StringIO(testN3), format='n3')
     self.testGraph.parse(StringIO(testN3), format='n3')
     self.closureDeltaGraph = Graph()
     self.network = ReteNetwork(self.ruleStore,
                                initialWorkingMemory=generateTokenSet(
                                    self.testGraph),
                                inferredTarget=self.closureDeltaGraph,
                                nsMap={})
コード例 #34
0
def infer(graph):
    rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)
    rules = HornFromN3(
        os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rules.n3'))

    closure_delta = Graph()
    network.inferredFacts = closure_delta
    for rule in rules:
        network.buildNetworkFromClause(rule)

    network.feedFactsToAdd(generateTokenSet(graph))

    new_graph = graph + closure_delta

    # Send to ingest
    http.post('http://localhost:5200/', new_graph.serialize(format='json-ld'))
コード例 #35
0
ファイル: infer.py プロジェクト: avengerpenguin/ldsearch
def infer(graph):
    rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)
    rules = HornFromN3(os.path.join(
        os.path.dirname(os.path.realpath(__file__)), 'rules.n3'))

    closure_delta = Graph()
    network.inferredFacts = closure_delta
    for rule in rules:
        network.buildNetworkFromClause(rule)

    network.feedFactsToAdd(generateTokenSet(graph))

    new_graph = graph + closure_delta

    # Send to ingest
    http.post('http://localhost:5200/', new_graph.serialize(format='json-ld'))
コード例 #36
0
    def answers(self, debug=False, solutionCallback=NoopCallbackFn):
        """
        Takes a conjunctive query, a sip collection
        and initiates the meta-interpreter for a given
        goal (at a time), propagating evaluate procedures
        explicitely if no bindings are given from the query
        to trigger subsequent subqueries via EDB predicates

        @TODO:
        Add a PRD externally defined action to the
        production of rules that produce answers
        for the query predicate.
        The action is a user specified callback that can be used
        to signal InferredGoal and halt RETE/UL evaluation prematurely
        otherwise, it is left to reach a stable state and the
        answers collected along the way are added and returned

        """
        # solutions = []

        # queryOp = GetOp(self.goal)
        if self.goal.isGround():
            # Mark ground goal so, production rule engine
            # halts when goal is inferred
            self.metaInterpNetwork.goal = self.goal.toRDFTuple()

        adornment = ["f" if isinstance(v, Variable) else "b" for v in GetArgs(self.goal, secondOrder=True)]
        adornment = reduce(lambda x, y: x + y, adornment)
        adornedQuery = AdornedUniTerm(self.goal, adornment)
        bfpTopQuery = self.makeDerivedQueryPredicate(adornedQuery)
        if debug:
            print >> sys.stderr, "Asserting initial BFP query ", bfpTopQuery

        assert bfpTopQuery.isGround()
        # Add BFP query atom to working memory, triggering procedure
        try:
            self.metaInterpNetwork.feedFactsToAdd(
                generateTokenSet([bfpTopQuery.toRDFTuple()], debugTriples=[bfpTopQuery.toRDFTuple()] if debug else [])
            )
        except InferredGoal:
            if debug:
                print >> sys.stderr, "Reached ground goal. Terminated BFP!"
            return True
        else:
            if self.goal.isGround():
                # Ground goal, but didn't trigger it, response must be negative
                return False
コード例 #37
0
 def testExistentials(self):
     store = plugin.get("IOMemory", Store)()
     store.open("")
     ruleStore = N3RuleStore()
     ruleGraph = Graph(ruleStore)
     ruleGraph.parse(StringIO(N3_PROGRAM), format="n3")
     factGraph = Graph(store)
     factGraph.parse(StringIO(N3_FACTS), format="n3")
     deltaGraph = Graph(store)
     network = ReteNetwork(ruleStore, initialWorkingMemory=generateTokenSet(factGraph), inferredTarget=deltaGraph)
     inferenceCount = 0
     for inferredFact in network.inferredFacts.subjects(
         predicate=RDF.type, object=URIRef("http://example.com/#Inference")
     ):
         inferenceCount = inferenceCount + 1
     print(network.inferredFacts.serialize(format="n3"))
     self.failUnless(inferenceCount > 1, "Each rule firing should introduce a new BNode!")
コード例 #38
0
def infer(ntriples):
    graph = Graph()
    graph.parse(data=ntriples, format='nt')
    rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)
    rules = HornFromN3(os.path.join(
        os.path.dirname(os.path.realpath(__file__)), 'rules.n3'))

    closure_delta = Graph()
    network.inferredFacts = closure_delta
    for rule in rules:
        network.buildNetworkFromClause(rule)

    network.feedFactsToAdd(generateTokenSet(graph))

    new_graph = graph + closure_delta

    return new_graph.serialize(format='nt').decode('utf-8')
コード例 #39
0
def main():
    rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)
    closureDeltaGraph = Graph()
    network.inferredFacts = closureDeltaGraph

    print("N0", network)

    hornrules = HornFromN3(
        'http://fuxi.googlecode.com/hg/test/sameAsTestRules.n3')

    for rule in hornrules:
        network.buildNetworkFromClause(rule)

    print("N1", network)

    factGraph = Graph().parse(
        'http://fuxi.googlecode.com/hg/test/sameAsTestFacts.n3', format='n3')
    network.feedFactsToAdd(generateTokenSet(factGraph))
    print(closureDeltaGraph.serialize(format='n3'))
コード例 #40
0
    def infer(self):
        """ Fire FuXi rule engine to infer triples """

        from FuXi.Rete.RuleStore import SetupRuleStore
        from FuXi.Rete.Util import generateTokenSet
        from FuXi.Horn.HornRules import HornFromN3
        # fetch the derived object's graph
        semnet = self.rdf
        rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)
        closureDeltaGraph = Graph()
        network.inferredFacts = closureDeltaGraph
        # build a network of rules
        for rule in HornFromN3('testrules.n3'):
            network.buildNetworkFromClause(rule)
        # apply rules to original facts to infer new facts
        network.feedFactsToAdd(generateTokenSet(semnet))
        # combine original facts with inferred facts
        for x in closureDeltaGraph:
            self.rdf.add(x)
コード例 #41
0
 def testExistentials(self):
     store = plugin.get('IOMemory', Store)()
     store.open('')
     ruleStore = N3RuleStore()
     ruleGraph = Graph(ruleStore)
     ruleGraph.parse(StringIO(N3_PROGRAM), format='n3')
     factGraph = Graph(store)
     factGraph.parse(StringIO(N3_FACTS), format='n3')
     deltaGraph = Graph(store)
     network = ReteNetwork(ruleStore,
                           initialWorkingMemory=generateTokenSet(factGraph),
                           inferredTarget=deltaGraph)
     inferenceCount = 0
     for inferredFact in network.inferredFacts.subjects(
             predicate=RDF.type,
             object=URIRef('http://example.com/#Inference')):
         inferenceCount = inferenceCount + 1
     print(network.inferredFacts.serialize(format='n3'))
     self.failUnless(inferenceCount > 1,
                     'Each rule firing should introduce a new BNode!')
コード例 #42
0
def main():
    rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)
    closureDeltaGraph = Graph()
    network.inferredFacts = closureDeltaGraph

    print("N0", network)

    hornrules = HornFromN3(
        'https://raw.githubusercontent.com/RDFLib/FuXi/master/test/sameAsTestRules.n3'
    )

    for rule in hornrules:
        network.buildNetworkFromClause(rule)

    print("N1", network)

    factGraph = Graph().parse(
        'https://raw.githubusercontent.com/RDFLib/FuXi/master/test/sameAsTestFacts.n3',
        format='n3')
    network.feedFactsToAdd(generateTokenSet(factGraph))
    print(closureDeltaGraph.serialize(format='n3'))
コード例 #43
0
    def test_make_inference(self):
        """
        Tests that the rule engine is able to fire and make an inference.
        This passes if any inference at all is generated.
        """

        rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)

        closureDeltaGraph = Graph()

        network.inferredFacts = closureDeltaGraph

        for rule in HornFromN3('tests/fuxi_test_files/rules.n3'):
            network.buildNetworkFromClause(rule)

        factGraph = Graph().parse('tests/fuxi_test_files/facts.n3',
                                  format='n3')

        network.feedFactsToAdd(generateTokenSet(factGraph))

        inferred_facts = list(closureDeltaGraph.objects())

        assert len(inferred_facts) > 0
コード例 #44
0
ファイル: rdfpipe.py プロジェクト: slitayem/fuxi
def main():
    from optparse import OptionParser

    parser = OptionParser()
    parser.add_option(
        '--stdin',
        type="choice",
        choices=['xml', 'trix', 'n3', 'nt', 'rdfa'],
        help='Parse RDF from STDIN (useful for piping) with given format')
    parser.add_option('-x',
                      '--xml',
                      action='append',
                      help='Append to the list of RDF/XML documents to parse')
    parser.add_option('-t',
                      '--trix',
                      action='append',
                      help='Append to the list of TriX documents to parse')
    parser.add_option('-n',
                      '--n3',
                      action='append',
                      help='Append to the list of N3 documents to parse')
    parser.add_option('--nt',
                      action='append',
                      help='Append to the list of NT documents to parse')
    parser.add_option('-a',
                      '--rdfa',
                      action='append',
                      help='Append to the list of RDFa documents to parse')

    parser.add_option(
        '-o',
        '--output',
        type="choice",
        choices=['n3', 'xml', 'pretty-xml', 'TriX', 'turtle', 'nt'],
        help='Format of the final serialized RDF graph')

    parser.add_option(
        '-m',
        '--ns',
        action='append',
        help='Register a namespace binding (QName prefix to a base URI)')

    parser.add_option(
        '-r',
        '--rules',
        action='append',
        help='Append to the list of fact files to use to perform reasoning')
    parser.add_option(
        '-i',
        '--inferred',
        help='URI to use for the graph containing any inferred triples')

    parser.set_defaults(xml=[],
                        trix=[],
                        n3=[],
                        nt=[],
                        rdfa=[],
                        ns=[],
                        output='n3')

    (options, args) = parser.parse_args()

    store = plugin.get(RDFLIB_STORE, Store)()
    store.open(RDFLIB_CONNECTION)

    namespace_manager = NamespaceManager(Graph())
    for prefixDef in options.ns:
        prefix, uri = prefixDef.split('=')
        namespace_manager.bind(prefix, uri, override=False)

    factGraph = ConjunctiveGraph(store)
    for graphRef in options.xml:
        factGraph.parse(graphRef,
                        publicID=Uri.OsPathToUri(graphRef),
                        format='xml')
    for graphRef in options.trix:
        factGraph.parse(graphRef,
                        publicID=Uri.OsPathToUri(graphRef),
                        format='trix')
    for graphRef in options.n3:
        factGraph.parse(graphRef,
                        publicID=Uri.OsPathToUri(graphRef),
                        format='n3')
    for graphRef in options.nt:
        factGraph.parse(graphRef,
                        publicID=Uri.OsPathToUri(graphRef),
                        format='nt')
    for graphRef in options.rdfa:
        factGraph.parse(graphRef,
                        publicID=Uri.OsPathToUri(graphRef),
                        format='rdfa')
    if options.stdin:
        factGraph.parse(sys.stdin, format=options.stdin)

    if options.inferred and len(options.rules) > 0:
        inferredURI = URIRef(options.inferred)
        ruleStore = N3RuleStore()
        ruleGraph = Graph(ruleStore)
        for ruleFile in options.rules:
            ruleGraph.parse(ruleFile, format='n3')
        tokenSet = generateTokenSet(factGraph)
        deltaGraph = Graph(store=factGraph.store, identifier=inferredURI)
        network = ReteNetwork(ruleStore, inferredTarget=deltaGraph)
        network.feedFactsToAdd(tokenSet)

    print factGraph.serialize(destination=None,
                              format=options.output,
                              base=None)
    store.rollback()
コード例 #45
0
ファイル: example6.py プロジェクト: roisevege/FuXi
from FuXi.Horn.HornRules import HornFromN3
from FuXi.Rete.RuleStore import SetupRuleStore
from FuXi.Rete.Util import generateTokenSet
from rdflib import Graph

rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)

closureDeltaGraph = Graph()
network.inferredFacts = closureDeltaGraph
print(network)

for rule in HornFromN3('http://www.agfa.com/w3c/euler/rdfs-rules.n3',
                       additionalBuiltins=None):
    network.buildNetworkFromClause(rule)
print(network)
network.feedFactsToAdd(generateTokenSet(network.inferredFacts))
print(network)
コード例 #46
0
ファイル: test_fuxi.py プロジェクト: travs/PyOpenWorm
from rdflib import Graph
from FuXi.Rete.RuleStore import SetupRuleStore

from FuXi.Rete.Util import generateTokenSet
from FuXi.Horn.HornRules import HornFromN3

rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)

closureDeltaGraph = Graph()

network.inferredFacts = closureDeltaGraph

for rule in HornFromN3('rules.n3'):
    network.buildNetworkFromClause(rule)

factGraph = Graph().parse('facts.n3', format='n3')

network.feedFactsToAdd(generateTokenSet(factGraph))

inferred_facts = closureDeltaGraph.serialize(format='n3')

#write inferred facts to file
inferred = open('inferred.n3', 'w')
inferred.write(inferred_facts)
inferred.close()
コード例 #47
0
ファイル: example8.py プロジェクト: roisevege/FuXi
from io import StringIO

rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)

rules = u"""
@prefix owl: <http://www.w3.org/2002/07/owl#> .
{ ?x owl:sameAs ?y } => { ?y owl:sameAs ?x } .
{ ?x owl:sameAs ?y . ?x ?p ?o } => { ?y ?p ?o } .
"""

for rule in HornFromN3(StringIO(rules)):
    network.buildNetworkFromClause(rule)

facts = """
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix ex: <http://example.org/> .
@prefix exterms: <http://example.org/terms/> .
ex:foo
        a exterms:Something ;
        exterms:hasX "blah blah" ;
        owl:sameAs ex:bar .
ex:bar
        exterms:hasY "yyyy" .
"""
g = Graph()
g.parse(data=facts, format="n3")

network.feedFactsToAdd(generateTokenSet(g))

print(network.inferredFacts.serialize(format="n3"))
コード例 #48
0
def main():
    from optparse import OptionParser
    op = OptionParser(
        'usage: %prog [options] factFile1 factFile2 ... factFileN')

    op.add_option(
        '--why',
        default=None,
        help='Specifies the goals to solve for using the non-naive methods' +
        'see --method')

    op.add_option(
        '--closure',
        action='store_true',
        default=False,
        help='Whether or not to serialize the inferred triples' +
        ' along with the original triples.  Otherwise ' +
        '(the default behavior), serialize only the inferred triples')

    op.add_option(
        '--imports',
        action='store_true',
        default=False,
        help='Whether or not to follow owl:imports in the fact graph')

    op.add_option(
        '--output',
        default='n3',
        metavar='RDF_FORMAT',
        choices=[
            'xml', 'TriX', 'n3', 'pml', 'proof-graph', 'nt', 'rif', 'rif-xml',
            'conflict', 'man-owl'
        ],
        help=
        "Serialize the inferred triples and/or original RDF triples to STDOUT "
        +
        "using the specified RDF syntax ('xml', 'pretty-xml', 'nt', 'turtle', "
        +
        "or 'n3') or to print a summary of the conflict set (from the RETE " +
        "network) if the value of this option is 'conflict'.  If the the " +
        " value is 'rif' or 'rif-xml', Then the rules used for inference " +
        "will be serialized as RIF.  If the value is 'pml' and --why is used, "
        + " then the PML RDF statements are serialized.  If output is " +
        "'proof-graph then a graphviz .dot file of the proof graph is printed. "
        +
        "Finally if the value is 'man-owl', then the RDF facts are assumed " +
        "to be OWL/RDF and serialized via Manchester OWL syntax. The default is %default"
    )

    op.add_option(
        '--class',
        dest='classes',
        action='append',
        default=[],
        metavar='QNAME',
        help='Used with --output=man-owl to determine which ' +
        'classes within the entire OWL/RDF are targetted for serialization' +
        '.  Can be used more than once')

    op.add_option(
        '--hybrid',
        action='store_true',
        default=False,
        help='Used with with --method=bfp to determine whether or not to ' +
        'peek into the fact graph to identify predicates that are both ' +
        'derived and base.  This is expensive for large fact graphs' +
        'and is explicitely not used against SPARQL endpoints')

    op.add_option(
        '--property',
        action='append',
        dest='properties',
        default=[],
        metavar='QNAME',
        help='Used with --output=man-owl or --extract to determine which ' +
        'properties are serialized / extracted.  Can be used more than once')

    op.add_option(
        '--normalize',
        action='store_true',
        default=False,
        help=
        "Used with --output=man-owl to attempt to determine if the ontology is 'normalized' [Rector, A. 2003]"
        + "The default is %default")

    op.add_option(
        '--ddlGraph',
        default=False,
        help=
        "The location of a N3 Data Description document describing the IDB predicates"
    )

    op.add_option(
        '--input-format',
        default='xml',
        dest='inputFormat',
        metavar='RDF_FORMAT',
        choices=['xml', 'trix', 'n3', 'nt', 'rdfa'],
        help=
        "The format of the RDF document(s) which serve as the initial facts " +
        " for the RETE network. One of 'xml', 'n3', 'trix', 'nt', " +
        "or 'rdfa'.  The default is %default")

    op.add_option(
        '--safety',
        default='none',
        metavar='RULE_SAFETY',
        choices=['loose', 'strict', 'none'],
        help="Determines how to handle RIF Core safety.  A value of 'loose' " +
        " means that unsafe rules will be ignored.  A value of 'strict' " +
        " will cause a syntax exception upon any unsafe rule.  A value of " +
        "'none' (the default) does nothing")

    op.add_option(
        '--pDSemantics',
        action='store_true',
        default=False,
        help=
        'Used with --dlp to add pD semantics ruleset for semantics not covered '
        + 'by DLP but can be expressed in definite Datalog Logic Programming' +
        ' The default is %default')

    op.add_option(
        '--stdin',
        action='store_true',
        default=False,
        help=
        'Parse STDIN as an RDF graph to contribute to the initial facts. The default is %default '
    )

    op.add_option(
        '--ns',
        action='append',
        default=[],
        metavar="PREFIX=URI",
        help='Register a namespace binding (QName prefix to a base URI).  This '
        + 'can be used more than once')

    op.add_option(
        '--rules',
        default=[],
        action='append',
        metavar='PATH_OR_URI',
        help='The Notation 3 documents to use as rulesets for the RETE network'
        + '.  Can be specified more than once')

    op.add_option('-d',
                  '--debug',
                  action='store_true',
                  default=True,
                  help='Include debugging output')

    op.add_option(
        '--strictness',
        default='defaultBase',
        metavar='DDL_STRICTNESS',
        choices=['loose', 'defaultBase', 'defaultDerived', 'harsh'],
        help=
        'Used with --why to specify whether to: *not* check if predicates are '
        +
        ' both derived and base (loose), if they are, mark as derived (defaultDerived) '
        +
        'or as base (defaultBase) predicates, else raise an exception (harsh)')

    op.add_option(
        '--method',
        default='naive',
        metavar='reasoning algorithm',
        choices=['gms', 'bfp', 'naive'],
        help='Used with --why to specify how to evaluate answers for query.  '
        + 'One of: gms, sld, bfp, naive')

    op.add_option(
        '--firstAnswer',
        default=False,
        action='store_true',
        help=
        'Used with --why to determine whether to fetch all answers or just ' +
        'the first')

    op.add_option(
        '--edb',
        default=[],
        action='append',
        metavar='EXTENSIONAL_DB_PREDICATE_QNAME',
        help=
        'Used with --why/--strictness=defaultDerived to specify which clashing '
        + 'predicate will be designated as a base predicate')

    op.add_option(
        '--idb',
        default=[],
        action='append',
        metavar='INTENSIONAL_DB_PREDICATE_QNAME',
        help=
        'Used with --why/--strictness=defaultBase to specify which clashing ' +
        'predicate will be designated as a derived predicate')

    op.add_option(
        '--hybridPredicate',
        default=[],
        action='append',
        metavar='PREDICATE_QNAME',
        help=
        'Used with --why to explicitely specify a hybrid predicate (in both ' +
        ' IDB and EDB) ')

    op.add_option(
        '--noMagic',
        default=[],
        action='append',
        metavar='DB_PREDICATE_QNAME',
        help='Used with --why to specify that the predicate shouldnt have its '
        + 'magic sets calculated')

    op.add_option(
        '--filter',
        action='append',
        default=[],
        metavar='PATH_OR_URI',
        help=
        'The Notation 3 documents to use as a filter (entailments do not particpate in network)'
    )

    op.add_option(
        '--ruleFacts',
        action='store_true',
        default=False,
        help="Determines whether or not to attempt to parse initial facts from "
        + "the rule graph.  The default is %default")

    op.add_option(
        '--builtins',
        default=False,
        metavar='PATH_TO_PYTHON_MODULE',
        help="The path to a python module with function definitions (and a " +
        "dicitonary called ADDITIONAL_FILTERS) to use for builtins implementations"
    )

    op.add_option(
        '--dlp',
        action='store_true',
        default=False,
        help=
        'Use Description Logic Programming (DLP) to extract rules from OWL/RDF.  The default is %default'
    )

    op.add_option(
        '--sparqlEndpoint',
        action='store_true',
        default=False,
        help=
        'Indicates that the sole argument is the URI of a SPARQL endpoint to query'
    )

    op.add_option(
        '--ontology',
        action='append',
        default=[],
        metavar='PATH_OR_URI',
        help=
        'The path to an OWL RDF/XML graph to use DLP to extract rules from ' +
        '(other wise, fact graph(s) are used)  ')

    op.add_option(
        '--ontologyFormat',
        default='xml',
        dest='ontologyFormat',
        metavar='RDF_FORMAT',
        choices=['xml', 'trix', 'n3', 'nt', 'rdfa'],
        help=
        "The format of the OWL RDF/XML graph specified via --ontology.  The default is %default"
    )

    op.add_option(
        '--builtinTemplates',
        default=None,
        metavar='N3_DOC_PATH_OR_URI',
        help=
        'The path to an N3 document associating SPARQL FILTER templates to ' +
        'rule builtins')

    op.add_option('--negation',
                  action='store_true',
                  default=False,
                  help='Extract negative rules?')

    op.add_option(
        '--normalForm',
        action='store_true',
        default=False,
        help='Whether or not to reduce DL axioms & LP rules to a normal form')
    (options, facts) = op.parse_args()

    nsBinds = {'iw': 'http://inferenceweb.stanford.edu/2004/07/iw.owl#'}
    for nsBind in options.ns:
        pref, nsUri = nsBind.split('=')
        nsBinds[pref] = nsUri

    namespace_manager = NamespaceManager(Graph())
    if options.sparqlEndpoint:
        factGraph = Graph(plugin.get('SPARQLStore', Store)(facts[0]))
        options.hybrid = False
    else:
        factGraph = Graph()
    ruleSet = Ruleset()

    for fileN in options.rules:
        if options.ruleFacts and not options.sparqlEndpoint:
            factGraph.parse(fileN, format='n3')
            print("Parsing RDF facts from ", fileN)
        if options.builtins:
            import imp
            userFuncs = imp.load_source('builtins', options.builtins)
            rs = HornFromN3(fileN,
                            additionalBuiltins=userFuncs.ADDITIONAL_FILTERS)
        else:
            rs = HornFromN3(fileN)
        nsBinds.update(rs.nsMapping)
        ruleSet.formulae.extend(rs)
        #ruleGraph.parse(fileN, format='n3')

    ruleSet.nsMapping = nsBinds

    for prefix, uri in list(nsBinds.items()):
        namespace_manager.bind(prefix, uri, override=False)
    closureDeltaGraph = Graph()
    closureDeltaGraph.namespace_manager = namespace_manager
    factGraph.namespace_manager = namespace_manager

    if not options.sparqlEndpoint:
        for fileN in facts:
            factGraph.parse(fileN, format=options.inputFormat)
            if options.imports:
                for owlImport in factGraph.objects(predicate=OWL_NS.imports):
                    factGraph.parse(owlImport)
                    print("Parsed Semantic Web Graph.. ", owlImport)

    if not options.sparqlEndpoint and facts:
        for pref, uri in factGraph.namespaces():
            nsBinds[pref] = uri

    if options.stdin:
        assert not options.sparqlEndpoint, "Cannot use --stdin with --sparqlEndpoint"
        factGraph.parse(sys.stdin, format=options.inputFormat)

    #Normalize namespace mappings
    #prune redundant, rdflib-allocated namespace prefix mappings
    newNsMgr = NamespaceManager(factGraph)
    from FuXi.Rete.Util import CollapseDictionary
    for k, v in list(
            CollapseDictionary(
                dict([(k, v) for k, v in factGraph.namespaces()])).items()):
        newNsMgr.bind(k, v)
    factGraph.namespace_manager = newNsMgr

    if options.normalForm:
        NormalFormReduction(factGraph)

    if not options.sparqlEndpoint:
        workingMemory = generateTokenSet(factGraph)
    if options.builtins:
        import imp
        userFuncs = imp.load_source('builtins', options.builtins)
        rule_store, rule_graph, network = SetupRuleStore(
            makeNetwork=True, additionalBuiltins=userFuncs.ADDITIONAL_FILTERS)
    else:
        rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)
    network.inferredFacts = closureDeltaGraph
    network.nsMap = nsBinds

    if options.dlp:
        from FuXi.DLP.DLNormalization import NormalFormReduction
        if options.ontology:
            ontGraph = Graph()
            for fileN in options.ontology:
                ontGraph.parse(fileN, format=options.ontologyFormat)
                for prefix, uri in ontGraph.namespaces():
                    nsBinds[prefix] = uri
                    namespace_manager.bind(prefix, uri, override=False)
                    if options.sparqlEndpoint:
                        factGraph.store.bind(prefix, uri)
        else:
            ontGraph = factGraph
        NormalFormReduction(ontGraph)
        dlp = network.setupDescriptionLogicProgramming(
            ontGraph,
            addPDSemantics=options.pDSemantics,
            constructNetwork=False,
            ignoreNegativeStratus=options.negation,
            safety=safetyNameMap[options.safety])
        ruleSet.formulae.extend(dlp)
    if options.output == 'rif' and not options.why:
        for rule in ruleSet:
            print(rule)
        if options.negation:
            for nRule in network.negRules:
                print(nRule)

    elif options.output == 'man-owl':
        cGraph = network.closureGraph(factGraph, readOnly=False)
        cGraph.namespace_manager = namespace_manager
        Individual.factoryGraph = cGraph
        if options.classes:
            mapping = dict(namespace_manager.namespaces())
            for c in options.classes:
                pref, uri = c.split(':')
                print(Class(URIRef(mapping[pref] + uri)).__repr__(True))
        elif options.properties:
            mapping = dict(namespace_manager.namespaces())
            for p in options.properties:
                pref, uri = p.split(':')
                print(Property(URIRef(mapping[pref] + uri)))
        else:
            for p in AllProperties(cGraph):
                print(p.identifier, first(p.label))
                print(repr(p))
            for c in AllClasses(cGraph):
                if options.normalize:
                    if c.isPrimitive():
                        primAnc = [
                            sc for sc in c.subClassOf if sc.isPrimitive()
                        ]
                        if len(primAnc) > 1:
                            warnings.warn(
                                "Branches of primitive skeleton taxonomy" +
                                " should form trees: %s has %s primitive parents: %s"
                                % (c.qname, len(primAnc), primAnc),
                                UserWarning, 1)
                        children = [desc for desc in c.subSumpteeIds()]
                        for child in children:
                            for otherChild in [
                                    o for o in children if o is not child
                            ]:
                                if not otherChild in [
                                        c.identifier
                                        for c in Class(child).disjointWith
                                ]:  # and \
                                    warnings.warn(
                                        "Primitive children (of %s) " % (c.qname) + \
                                        "must be mutually disjoint: %s and %s" % (
                                    Class(child).qname, Class(otherChild).qname), UserWarning, 1)
                # if not isinstance(c.identifier, BNode):
                print(c.__repr__(True))

    if not options.why:
        # Naive construction of graph
        for rule in ruleSet:
            network.buildNetworkFromClause(rule)

    magicSeeds = []
    if options.why:
        builtinTemplateGraph = Graph()
        if options.builtinTemplates:
            builtinTemplateGraph = Graph().parse(options.builtinTemplates,
                                                 format='n3')
        factGraph.templateMap = \
            dict([(pred, template)
                      for pred, _ignore, template in
                            builtinTemplateGraph.triples(
                                (None,
                                 TEMPLATES.filterTemplate,
                                 None))])
        goals = []
        query = ParseSPARQL(options.why)
        network.nsMap['pml'] = PML
        network.nsMap['gmp'] = GMP_NS
        network.nsMap['owl'] = OWL_NS
        nsBinds.update(network.nsMap)
        network.nsMap = nsBinds
        if not query.prologue:
            query.prologue = Prologue(None, [])
            query.prologue.prefixBindings.update(nsBinds)
        else:
            for prefix, nsInst in list(nsBinds.items()):
                if prefix not in query.prologue.prefixBindings:
                    query.prologue.prefixBindings[prefix] = nsInst
        print("query.prologue", query.prologue)
        print("query.query", query.query)
        print("query.query.whereClause", query.query.whereClause)
        print("query.query.whereClause.parsedGraphPattern",
              query.query.whereClause.parsedGraphPattern)
        goals.extend([(s, p, o) for s, p, o, c in ReduceGraphPattern(
            query.query.whereClause.parsedGraphPattern,
            query.prologue).patterns])
        # dPreds=[]# p for s, p, o in goals ]
        # print("goals", goals)
        magicRuleNo = 0
        bottomUpDerivedPreds = []
        # topDownDerivedPreds  = []
        defaultBasePreds = []
        defaultDerivedPreds = set()
        hybridPredicates = []
        mapping = dict(newNsMgr.namespaces())
        for edb in options.edb:
            pref, uri = edb.split(':')
            defaultBasePreds.append(URIRef(mapping[pref] + uri))
        noMagic = []
        for pred in options.noMagic:
            pref, uri = pred.split(':')
            noMagic.append(URIRef(mapping[pref] + uri))
        if options.ddlGraph:
            ddlGraph = Graph().parse(options.ddlGraph, format='n3')
            # @TODO: should also get hybrid predicates from DDL graph
            defaultDerivedPreds = IdentifyDerivedPredicates(
                ddlGraph, Graph(), ruleSet)
        else:
            for idb in options.idb:
                pref, uri = idb.split(':')
                defaultDerivedPreds.add(URIRef(mapping[pref] + uri))
            defaultDerivedPreds.update(
                set([p == RDF.type and o or p for s, p, o in goals]))
            for hybrid in options.hybridPredicate:
                pref, uri = hybrid.split(':')
                hybridPredicates.append(URIRef(mapping[pref] + uri))

        if options.method == 'gms':
            for goal in goals:
                goalSeed = AdornLiteral(goal).makeMagicPred()
                print("Magic seed fact (used in bottom-up evaluation)",
                      goalSeed)
                magicSeeds.append(goalSeed.toRDFTuple())
            if noMagic:
                print("Predicates whose magic sets will not be calculated")
                for p in noMagic:
                    print("\t", factGraph.qname(p))
            for rule in MagicSetTransformation(
                    factGraph,
                    ruleSet,
                    goals,
                    derivedPreds=bottomUpDerivedPreds,
                    strictCheck=nameMap[options.strictness],
                    defaultPredicates=(defaultBasePreds, defaultDerivedPreds),
                    noMagic=noMagic):
                magicRuleNo += 1
                network.buildNetworkFromClause(rule)
            if len(list(ruleSet)):
                print("reduction in size of program: %s (%s -> %s clauses)" %
                      (100 -
                       (float(magicRuleNo) / float(len(list(ruleSet)))) * 100,
                       len(list(ruleSet)), magicRuleNo))
            start = time.time()
            network.feedFactsToAdd(generateTokenSet(magicSeeds))
            if not [
                    rule for rule in factGraph.adornedProgram if len(rule.sip)
            ]:
                warnings.warn(
                    "Using GMS sideways information strategy with no " +
                    "information to pass from query.  Falling back to " +
                    "naive method over given facts and rules")
                network.feedFactsToAdd(workingMemory)
            sTime = time.time() - start
            if sTime > 1:
                sTimeStr = "%s seconds" % sTime
            else:
                sTime = sTime * 1000
                sTimeStr = "%s milli seconds" % sTime
            print("Time to calculate closure on working memory: ", sTimeStr)

            if options.output == 'rif':
                print("Rules used for bottom-up evaluation")
                if network.rules:
                    for clause in network.rules:
                        print(clause)
                else:
                    for clause in factGraph.adornedProgram:
                        print(clause)
            if options.output == 'conflict':
                network.reportConflictSet()

        elif options.method == 'bfp':
            topDownDPreds = defaultDerivedPreds
            if options.builtinTemplates:
                builtinTemplateGraph = Graph().parse(options.builtinTemplates,
                                                     format='n3')
                builtinDict = dict([
                    (pred, template) for pred, _ignore, template in
                    builtinTemplateGraph.triples((None,
                                                  TEMPLATES.filterTemplate,
                                                  None))
                ])
            else:
                builtinDict = None
            topDownStore = TopDownSPARQLEntailingStore(
                factGraph.store,
                factGraph,
                idb=ruleSet,
                DEBUG=options.debug,
                derivedPredicates=topDownDPreds,
                templateMap=builtinDict,
                nsBindings=network.nsMap,
                identifyHybridPredicates=options.hybrid
                if options.method == 'bfp' else False,
                hybridPredicates=hybridPredicates)
            targetGraph = Graph(topDownStore)
            for pref, nsUri in list(network.nsMap.items()):
                targetGraph.bind(pref, nsUri)
            start = time.time()
            # queryLiteral = EDBQuery([BuildUnitermFromTuple(goal) for goal in goals],
            #                         targetGraph)
            # query = queryLiteral.asSPARQL()
            # print("Goal to solve ", query)
            sTime = time.time() - start
            result = targetGraph.query(options.why, initNs=network.nsMap)
            if result.askAnswer:
                sTime = time.time() - start
                if sTime > 1:
                    sTimeStr = "%s seconds" % sTime
                else:
                    sTime = sTime * 1000
                    sTimeStr = "%s milli seconds" % sTime
                print("Time to reach answer ground goal answer of %s: %s" %
                      (result.askAnswer[0], sTimeStr))
            else:
                for rt in result:
                    sTime = time.time() - start
                    if sTime > 1:
                        sTimeStr = "%s seconds" % sTime
                    else:
                        sTime = sTime * 1000
                        sTimeStr = "%s milli seconds" % sTime
                    if options.firstAnswer:
                        break
                    print(
                        "Time to reach answer %s via top-down SPARQL sip strategy: %s"
                        % (rt, sTimeStr))
            if options.output == 'conflict' and options.method == 'bfp':
                for _network, _goal in topDownStore.queryNetworks:
                    print(network, _goal)
                    _network.reportConflictSet(options.debug)
                for query in topDownStore.edbQueries:
                    print(query.asSPARQL())

    elif options.method == 'naive':
        start = time.time()
        network.feedFactsToAdd(workingMemory)
        sTime = time.time() - start
        if sTime > 1:
            sTimeStr = "%s seconds" % sTime
        else:
            sTime = sTime * 1000
            sTimeStr = "%s milli seconds" % sTime
        print("Time to calculate closure on working memory: ", sTimeStr)
        print(network)
        if options.output == 'conflict':
            network.reportConflictSet()

    for fileN in options.filter:
        for rule in HornFromN3(fileN):
            network.buildFilterNetworkFromClause(rule)

    if options.negation and network.negRules and options.method in [
            'both', 'bottomUp'
    ]:
        now = time.time()
        rt = network.calculateStratifiedModel(factGraph)
        print(
            "Time to calculate stratified, stable model (inferred %s facts): %s"
            % (rt, time.time() - now))
    if options.filter:
        print("Applying filter to entailed facts")
        network.inferredFacts = network.filteredFacts

    if options.closure and options.output in RDF_SERIALIZATION_FORMATS:
        cGraph = network.closureGraph(factGraph)
        cGraph.namespace_manager = namespace_manager
        print(
            cGraph.serialize(destination=None,
                             format=options.output,
                             base=None))
    elif options.output and options.output in RDF_SERIALIZATION_FORMATS:
        print(
            network.inferredFacts.serialize(destination=None,
                                            format=options.output,
                                            base=None))
コード例 #49
0
        factGraph.parse(sys.stdin, format=options.inputFormat)

    #Normalize namespace mappings
    #prune redundant, rdflib-allocated namespace prefix mappings
    newNsMgr = NamespaceManager(factGraph)
    from FuXi.Rete.Util import CollapseDictionary
    for k, v in CollapseDictionary(
            dict([(k, v) for k, v in factGraph.namespaces()])).items():
        newNsMgr.bind(k, v)
    factGraph.namespace_manager = newNsMgr

    if options.normalForm:
        NormalFormReduction(factGraph)

    if not options.sparqlEndpoint and options.naive:
        workingMemory = generateTokenSet(factGraph)
    if options.builtins:
        import imp
        userFuncs = imp.load_source('builtins', options.builtins)
        rule_store, rule_graph, network = SetupRuleStore(
            makeNetwork=True, additionalBuiltins=userFuncs.ADDITIONAL_FILTERS)
    else:
        rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)
    network.inferredFacts = closureDeltaGraph
    network.nsMap = nsBinds

    if options.dlp:
        from FuXi.DLP.DLNormalization import NormalFormReduction
        if options.ontology:
            ontGraph = Graph()
            for fileN in options.ontology:
コード例 #50
0
def load_graph(schema, data, additional=None, debug=False):
    """
    Load the specified schema, extract rules,
    load data and any additional ontologies,
    and infer facts.

    :param schema: Schema Graph
    :type schema: `rdflib.ConjunctiveGraph`
    :param components: Component Graph
    :type components: `rdflib.ConjunctiveGraph`
    :param debug: Whether to print debugging information
    :type debug: bool

    :returns: (Graph w/ inferred facts, Graph w/o inferred facts)
    :rtype: tuple(`rdflib.ConjunctiveGraph`,
                  `rdflib.ConjunctiveGraph`)

    """
    from FuXi.Rete.Util import generateTokenSet
    from FuXi.DLP.DLNormalization import NormalFormReduction
    from FuXi.Rete.RuleStore import SetupRuleStore
    rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True)

    additional_schema = ConjunctiveGraph()
    len_additional = 0
    if additional:
        for g in additional:
            additional_schema += g
            len_additional += len(g)

    len_schema = len(schema)
    len_data = len(data)

    schema += additional_schema

    NormalFormReduction(schema)
    network.setupDescriptionLogicProgramming(schema)
    network.feedFactsToAdd(generateTokenSet(schema))
    network.feedFactsToAdd(generateTokenSet(data))

    if debug:
        print network

        print dir(network)

        for r in network.rules:
            print r

        for f in network.inferredFacts:
            print f

    len_inferred = len(network.inferredFacts)

    print "==================="
    print "Component Reference"
    print "==================="
    print "Report Information"
    print "=================="
    print "Generated by Bobcat @ %s" % datetime.datetime.now()
    print ""
    print ".. list-table::"
    print "   :header-rows: 1"
    print ""
    print rest_list_table_row(["Graph", "Triple Count"])
    print rest_list_table_row(["Schema", len_schema])
    print rest_list_table_row(["Additional", len_additional])
    print rest_list_table_row(["Data", len_data])
    print rest_list_table_row(["", ""])
    print rest_list_table_row(["Inferred", len_inferred])
    print rest_list_table_row(["", ""])
    print rest_list_table_row(
        ["Subtotal", len_schema + len_data + len_inferred + len_additional])
    print rest_list_table_row(["", ""])

    gall = schema
    gall += data
    if additional:
        for g in additional:
            gall += g

    gall_inferred = copy.deepcopy(gall)

    for f in network.inferredFacts:
        gall_inferred.add(f)

    print rest_list_table_row(["Union Total", len(gall_inferred)])

    return gall_inferred, gall