def __init__(self, name): if not self._initialized: super(VariableExpression, self).__init__(Variable(name)) self._initialized = True
def canonicalize(gp, shorten_varnames=True): """Returns a canonical basic graph pattern (BGP) with canonical var names. :param gp: a GraphPattern in form of a list of triples with Variables :param shorten_varnames: If True (default) long sha256 based var-names will be renamed to short enumerated ones. :return: A canonical GraphPattern with Variables renamed. >>> U = URIRef >>> V = Variable >>> gp1 = GraphPattern([ ... (V('blub'), V('bar'), U('blae')), ... (V('foo'), V('bar'), U('bla')), ... (SOURCE_VAR, V('poo'), TARGET_VAR), ... ]) >>> cgp = canonicalize(gp1) >>> v_poo = cgp[[t[2] for t in cgp].index(TARGET_VAR)][1] >>> v_foo = cgp[[t[2] for t in cgp].index(U('bla'))][0] >>> v_bar = cgp[[t[2] for t in cgp].index(U('bla'))][1] >>> v_blub = cgp[[t[2] for t in cgp].index(U('blae'))][0] >>> expected = GraphPattern([ ... (SOURCE_VAR, v_poo, TARGET_VAR), ... (v_foo, v_bar, U('bla')), ... (v_blub, v_bar, U('blae')), ... ]) >>> cgp == expected True And again in a different order: >>> gp2 = GraphPattern([ ... (SOURCE_VAR, V('bla'), TARGET_VAR), ... (V('blub'), V('bli'), U('bla')), ... (V('bluub'), V('bli'), U('blae')), ... ]) >>> cgp == canonicalize(gp2) True """ assert isinstance(gp, GraphPattern) cbgp = canonicalize_sparql_bgp(gp, fixed_vars={SOURCE_VAR, TARGET_VAR}) mapping = {} if shorten_varnames: vars_ = set(chain.from_iterable(cbgp)) vars_ = sorted([ v for v in vars_ if isinstance(v, Variable) and v.startswith('cb') ]) for i, v in enumerate(vars_): mapping[v] = Variable('vcb%d' % i) cgp = GraphPattern(cbgp, mapping=mapping) if not (len(gp) == len(cbgp) == len(cgp) and len(gp.nodes) == len( cgp.nodes) and len(gp.edges) == len(cgp.edges) and sorted(gp.identifier_counts().values()) == sorted( cgp.identifier_counts().values())): # canonicalization should never change any of the features above, but it # did before (e.g., https://github.com/RDFLib/rdflib/issues/494 ). # this is a last resort safety-net logger.warning( 'GraphPattern canonicalization failed, returning original:\n%r\n' 'Canonicalized RDF Graph:\n%r\n' 'Canonicalized Graph Pattern:\n%r\n', gp, cbgp, cgp) return gp return cgp
assert isinstance(queryStringOrObj, basestring) #self.setNamespaceBindings(initNs) if len(initNs) > 0: raise Exception("initNs not supported.") if len(initBindings) > 0: raise Exception("initBindings not supported.") self.setQuery(queryStringOrObj) return SPARQLResult(SPARQLWrapper.query(self).response) def triples(self, (subject, predicate, obj), context=None): """ SELECT ?subj ?pred ?obj WHERE { ?subj ?pred ?obj } """ subjVar = Variable('subj') predVar = Variable('pred') objVar = Variable('obj') termsSlots = {} selectVars = [] if subject is not None: termsSlots[subjVar] = subject else: selectVars.append(subjVar) if predicate is not None: termsSlots[predVar] = predicate else: selectVars.append(predVar) if obj is not None: termsSlots[objVar] = obj
def test_graph_pattern(): g = Graph() g.add((URIRef('foo'), URIRef('bar'), Literal('bla'))) g.add((URIRef('foo'), URIRef('baa'), Literal('bla'))) g.add((URIRef('faa'), URIRef('boo'), Literal('blub'))) gp = GraphPattern(g) gp = gp.replace({ URIRef('foo'): Variable('a'), Literal('bla'): Variable('l'), }) sparql = gp.to_sparql_select_query() expected = 'SELECT ?a ?l WHERE {\n' \ ' ?a <baa> ?l .\n' \ ' ?a <bar> ?l .\n' \ ' <faa> <boo> "blub" .\n' \ '}\n' assert sparql == expected, "expected: %s\ngot: %s" % (expected, sparql) sparql = gp.to_sparql_select_query() assert sparql == expected, "expected: %s\ngot: %s" % (expected, sparql) gp2 = gp.replace({URIRef('baa'): Variable('b')}) sparql = gp2.to_sparql_select_query(bind={Variable('a'): URIRef('bound')}) expected = 'SELECT ?a ?b ?l WHERE {\n' \ ' ?a ?b ?l .\n' \ ' ?a <bar> ?l .\n' \ ' <faa> <boo> "blub" .\n' \ ' FILTER(\n' \ ' ?a=<bound>\n' \ ' )\n' \ '}\n' assert sparql == expected, "expected: %s\ngot: %s" % (expected, sparql) gp3 = GraphPattern(g, source_node=URIRef('foo'), target_node=Literal('bla')) expected = 'SELECT ?source ?target WHERE {\n' \ ' ?source <baa> ?target .\n' \ ' ?source <bar> ?target .\n' \ ' <faa> <boo> "blub" .\n' \ '}\n' sparql = gp3.to_sparql_select_query() assert sparql == expected, "expected: %s\ngot: %s" % (expected, sparql) gp4 = gp3.only_with([TARGET_VAR]) expected = 'SELECT ?source ?target WHERE {\n' \ ' ?source <baa> ?target .\n' \ ' ?source <bar> ?target .\n' \ '}\n' sparql = gp4.to_sparql_select_query() assert sparql == expected, "expected: %s\ngot: %s" % (expected, sparql) gp4_red = gp4.replace({URIRef('baa'): URIRef('bar')}) assert len(gp4) > len(gp4_red), \ "double edge should've been reduced: %s" % (gp4_red,) gp5 = gp3.only_with([URIRef('bar')]) expected = 'SELECT ?source ?target WHERE {\n' \ ' ?source <bar> ?target .\n' \ '}\n' sparql = gp5.to_sparql_select_query() assert sparql == expected, "expected: %s\ngot: %s" % (expected, sparql) gp6 = gp + gp2 expected = 'SELECT ?a ?b ?l WHERE {\n' \ ' ?a ?b ?l .\n' \ ' ?a <baa> ?l .\n' \ ' ?a <bar> ?l .\n' \ ' <faa> <boo> "blub" .\n' \ '}\n' sparql = gp6.to_sparql_select_query() assert sparql == expected, "expected: %s\ngot: %s" % (expected, sparql) gp7 = gp - gp2 expected = 'SELECT ?a ?l WHERE {\n' \ ' ?a <baa> ?l .\n' \ '}\n' sparql = gp7.to_sparql_select_query() assert sparql == expected, "expected: %s\ngot: %s" % (expected, sparql) gp8 = gp + ((TARGET_VAR, TARGET_VAR, TARGET_VAR), ) expected = 'SELECT ?a ?l ?target WHERE {\n' \ ' ?a <baa> ?l .\n' \ ' ?a <bar> ?l .\n' \ ' ?target ?target ?target .\n' \ ' <faa> <boo> "blub" .\n' \ '}\n' sparql = gp8.to_sparql_select_query() assert sparql == expected, "expected: %s\ngot: %s" % (expected, sparql) gp9 = gp - gp assert not bool(gp9), 'gp9 was not empty' gp9 = gp - list(gp) assert not bool(gp9), 'gp9 - list(gp9) was not empty' # test triples by identifier: tbi = gp8.triples_by_identifier() expected = { Variable('a'): { (Variable('a'), URIRef('baa'), Variable('l')), (Variable('a'), URIRef('bar'), Variable('l')), }, Variable('l'): { (Variable('a'), URIRef('baa'), Variable('l')), (Variable('a'), URIRef('bar'), Variable('l')), }, URIRef('baa'): { (Variable('a'), URIRef('baa'), Variable('l')), }, URIRef('bar'): { (Variable('a'), URIRef('bar'), Variable('l')), }, Variable('target'): { (Variable('target'), Variable('target'), Variable('target')), }, URIRef('faa'): { (URIRef('faa'), URIRef('boo'), Literal('blub')), }, URIRef('boo'): { (URIRef('faa'), URIRef('boo'), Literal('blub')), }, Literal('blub'): { (URIRef('faa'), URIRef('boo'), Literal('blub')), }, } assert tbi == expected, 'triples_by_identifier %s != %s' % (tbi, expected) tbn = gp8.triples_by_nodes({ Variable('a'), Variable('target'), URIRef('notthere'), URIRef('faa'), URIRef('boo') }) expected = { Variable('a'): { (Variable('a'), URIRef('baa'), Variable('l')), (Variable('a'), URIRef('bar'), Variable('l')), }, Variable('target'): { (Variable('target'), Variable('target'), Variable('target')), }, URIRef('faa'): { (URIRef('faa'), URIRef('boo'), Literal('blub')), }, URIRef('notthere'): set(), URIRef('boo'): set(), } assert tbn == expected, 'triples_by_nodes %s != %s' % (tbn, expected) tbe = gp8.triples_by_edges( {URIRef('baa'), Variable('a'), Variable('?target')}) expected = { URIRef('baa'): { (Variable('a'), URIRef('baa'), Variable('l')), }, Variable('target'): { (Variable('target'), Variable('target'), Variable('target')), }, Variable('a'): set(), } assert tbe == expected, 'triples_by_edges %s != %s' % (tbe, expected)
def evalQuery(graph, query, initBindings, base=None): ctx = QueryContext(graph) ctx.prologue = query.prologue main = query.algebra if initBindings: # add initBindings as a values clause values = {} # no dict comprehension in 2.6 :( for k,v in initBindings.iteritems(): if not isinstance(k, Variable): k = Variable(k) values[k] = v main = main.clone() # clone to not change prepared q main['p'] = main.p.clone() # Find the right place to insert MultiSet join repl = main.p if repl.name == 'Slice': repl['p'] = repl.p.clone() repl = repl.p if repl.name == 'Distinct': repl['p'] = repl.p.clone() repl = repl.p if repl.p.name == 'OrderBy': repl['p'] = repl.p.clone() repl = repl.p if repl.p.name == 'Extend': repl['p'] = repl.p.clone() repl = repl.p repl['p'] = Join(repl.p, ToMultiSet(Values([values]))) # TODO: Vars? if main.datasetClause: if ctx.dataset is None: raise Exception( "Non-conjunctive-graph doesn't know about " + "graphs! Try a query without FROM (NAMED).") ctx = ctx.clone() # or push/pop? firstDefault = False for d in main.datasetClause: if d.default: if firstDefault: # replace current default graph dg = ctx.dataset.get_context(BNode()) ctx = ctx.pushGraph(dg) firstDefault = True ctx.load(d.default, default=True) elif d.named: g = d.named ctx.load(g, default=False) return evalPart(ctx, main)
def __init__(self, s=Variable('s'), p=Variable('p'), o=Variable('o')): self.s = s self.p = p self.o = o
return Result.parse(SPARQLWrapper.query(self).response) def triples(self, (s, p, o), context=None): """ SELECT ?subj ?pred ?obj WHERE { ?subj ?pred ?obj } """ if (isinstance(s, BNode) or isinstance(p, BNode) or isinstance(o, BNode)): raise Exception( "SPARQLStore does not support Bnodes! See http://www.w3.org/TR/sparql11-query/#BGPsparqlBNodes" ) vars = [] if not s: s = Variable('s') vars.append(s) if not p: p = Variable('p') vars.append(p) if not o: o = Variable('o') vars.append(o) if vars: v = ' '.join([term.n3() for term in vars]) else: v = '*' query = "SELECT %s WHERE { %s %s %s }" % \
def test_push_triple(self): self.s.push_triple(\ Variable("var1"), RDFS.type, DEB.Source) self.assertEqual(1, len(self.s.query.whereclause.stmts)) self.assertEqual(self.triple1, self.s.query.whereclause.stmts[0])
def test_add_triple_variables(self): st1 = Triple(Variable("a"), Variable("b"), Variable("c")) self.s.add_triple_variables(st1) self.assertEqual(3, len(self.s.query.variables))
def test_add_variable(self): self.s.add_variable("varname") self.s.add_variable("varname") self.assertEqual([Variable("varname")], self.s.query.variables)
def test_add_triple_variables(self): self.s.add_triple(self.triple3) self.assertEqual(1, len(self.s.query.whereclause.stmts)) self.assertEqual(self.triple3, self.s.query.whereclause.stmts[0]) varlist = [Variable("var3"), Variable("var4")] self.assertEqual(varlist.sort(), self.s.query.variables.sort())
def test_add_filter_notbound(self): self.s.add_filter_notbound(Variable("a")) self.assertEqual(1, len(self.s.query.whereclause.stmts)) u1 = UnaryExpression(FunCall("bound", Variable("a")), "!") f1 = Filter(u1) self.assertEqual(f1, self.s.query.whereclause.stmts[0])
def test_set_orderby(self): self.s.set_orderby("var") self.assertEqual(Variable("var"), self.s.query.orderby.variable)
from FuXi.Rete.RuleStore import SetupRuleStore from FuXi.Rete.Util import generateTokenSet from FuXi.Horn.HornRules import HornFromN3 from FuXi.Rete.Magic import MagicSetTransformation, AdornLiteral from FuXi.SPARQL import RDFTuplesToSPARQL exNs = Namespace('http://dev.w3.org/2000/10/swap/test/cwm/fam.n3#') rules = HornFromN3('http://dev.w3.org/2000/10/swap/test/cwm/fam-rules.n3') factGraph = Graph().parse('http://dev.w3.org/2000/10/swap/test/cwm/fam.n3',format='n3') factGraph.bind(u'ex',exNs) dPreds = [exNs.ancestor] rule_store, rule_graph, network = SetupRuleStore(makeNetwork=True) network.nsMap = {u'ex':exNs} closureDeltaGraph=Graph() network.inferredFacts = closureDeltaGraph goals = [(exNs.david,exNs.ancestor,Variable('ANCESTOR'))] for rule in MagicSetTransformation(factGraph,rules,goals,dPreds): network.buildNetworkFromClause(rule) # network.rules.add(rule) print ("\t", rule) goalLit = AdornLiteral(goals[0]) adornedGoalSeed = goalLit.makeMagicPred() goal=adornedGoalSeed.toRDFTuple() print (RDFTuplesToSPARQL([goalLit],factGraph,vars=[Variable('ANCESTOR')]) SELECT ?ANCESTOR { <http://dev.w3.org/2000/10/swap/test/cwm/fam.n3#david> <http://dev.w3.org/2000/10/swap/test/cwm/fam.n3#ancestor> ?ANCESTOR })
def set_orderby(self, varname): self.query.orderby = Orderby(Variable(varname))
for clsLHS, clsRHS in operands: if isinstance(clsLHS, URIRef) and isinstance(clsRHS, URIRef): owlGraph.add((clsLHS, RDFS.subClassOf, clsRHS)) owlGraph.add((clsRHS, RDFS.subClassOf, clsLHS)) owlGraph.remove((clsLHS, OWL_NS.equivalentClass, clsRHS)) elif isinstance(clsLHS, URIRef) and isinstance(clsRHS, BNode): owlGraph.add((clsLHS, RDFS.subClassOf, clsRHS)) owlGraph.remove((clsLHS, OWL_NS.equivalentClass, clsRHS)) elif isinstance(clsLHS, BNode) and isinstance(clsRHS, URIRef): owlGraph.add((clsRHS, RDFS.subClassOf, clsLHS)) owlGraph.remove((clsLHS, OWL_NS.equivalentClass, clsRHS)) if __name__ == '__main__': goal = (EX_NS.KneeJoint, RDFS.subClassOf, Variable('Class')) ontGraph = createTestOntGraph() # ontGraph.add((EX_NS.KneeJoint, # RDFS.subClassOf, # EX_NS.KneeJoint)) NormalizeSubsumption(ontGraph) for c in AllClasses(ontGraph): log.debug(c.__repr__(True)) SetupMetaInterpreter(ontGraph, goal) # test() # import doctest # doctest.testmod() # from FuXi.DLP.CompletionReasoning import LIST_NS # from FuXi.DLP.CompletionReasoning import KOR_NS # from FuXi.DLP.CompletionReasoning import EX_NS
def result_to_dataset(result): ds = Dataset() for q in result.bindings: ds.add((q[Variable('s')], q[Variable('p')], q[Variable('o')], q[Variable('g')])) return ds
def setUp(self): self.aNode1 = AlphaNode( (Variable('HOSP'), TEST_NS.contains, Variable('HOSP_START_DATE'))) self.aNode2 = AlphaNode( (Variable('HOSP'), RDF.type, TEST_NS.Hospitalization)) self.aNode5 = AlphaNode( (Variable('HOSP_START_DATE'), TEST_NS.dateTimeMin, Variable('ENCOUNTER_START'))) self.aNode6 = AlphaNode( (Variable('HOSP_STOP_DATE'), RDF.type, TEST_NS.EventStopDate)) self.aNode7 = AlphaNode( (Variable('HOSP_STOP_DATE'), TEST_NS.dateTimeMax, Variable('ENCOUNTER_STOP'))) self.aNode8 = AlphaNode( (Variable('EVT_DATE'), RDF.type, TEST_NS.EventStartDate)) self.aNode9 = AlphaNode((Variable('EVT_DATE'), TEST_NS.dateTimeMin, Variable('EVT_START_MIN'))) self.aNode10 = AlphaNode( (Variable('EVT'), TEST_NS.contains, Variable('EVT_DATE'))) self.aNode11 = AlphaNode( (Variable('EVT'), RDF.type, Variable('EVT_KIND'))) self.joinedBindings = { Variable('HOSP_START_DATE'): BNode(), Variable('HOSP_STOP_DATE'): BNode(), Variable('HOSP'): BNode() } self.unJoinedBindings = { Variable('EVT'): BNode(), Variable('EVT_DATE'): BNode(), Variable('EVT_KIND'): TEST_NS.ICUStay } for dtVariable in [ Variable('ENCOUNTER_START'), Variable('ENCOUNTER_STOP'), Variable('EVT_START_MIN') ]: self.unJoinedBindings[dtVariable] = Literal( "2007-02-14T10:00:00", datatype=_XSD_NS.dateTime)
def testVariableKey(): results = list( g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={Variable("s"): EX['s1']})) assert len(results) == 1, results
def testFilter(): results = list( g2.query("SELECT ?o WHERE { ?s :p ?o FILTER (?s = ?x)}", initBindings={Variable("?x"): EX['s1']})) assert len(results) == 1, results
def test_graph_pattern_canonicalization(): # test for bug in lib: # rdflib.compare.to_canonical_graph(g) sometimes collapses distinct bnodes # see https://github.com/RDFLib/rdflib/issues/494 # The GraphPattern below causes such a problem, currently we return gp # itself instead of a canonical representation of it. We just test the len # in case it's fixed in rdflib. gp = GraphPattern( ((SOURCE_VAR, Variable('vcb0'), TARGET_VAR), (SOURCE_VAR, Variable('vrBYUk8'), TARGET_VAR), (TARGET_VAR, Variable('vrBYUk8'), SOURCE_VAR), (TARGET_VAR, Variable('vrvGapn'), SOURCE_VAR))) cgp = canonicalize(gp) assert len(gp) == len(cgp) # test for a bug in canonicalization when it didn't rewrite fixed gp = GraphPattern(( (TARGET_VAR, Variable('v0'), SOURCE_VAR), (TARGET_VAR, Variable('v0'), Variable('v1')), (TARGET_VAR, Variable('v2'), Variable('v1')), (TARGET_VAR, Variable('v2'), Variable('v3')), (TARGET_VAR, Variable('v4'), Variable('v5')), )) cgp = canonicalize(gp) assert len(gp) == len(cgp)
def evalUpdate(graph, update, initBindings=None, actionLog=False): """ http://www.w3.org/TR/sparql11-update/#updateLanguage 'A request is a sequence of operations [...] Implementations MUST ensure that operations of a single request are executed in a fashion that guarantees the same effects as executing them in lexical order. Operations all result either in success or failure. If multiple operations are present in a single request, then a result of failure from any operation MUST abort the sequence of operations, causing the subsequent operations to be ignored.' This will return None on success and raise Exceptions on error """ res = [] for u in update: ctx = QueryContext(graph) ctx.prologue = u.prologue if initBindings: for k, v in initBindings.items(): if not isinstance(k, Variable): k = Variable(k) ctx[k] = v try: if u.name == 'Load': result = evalLoad(ctx, u) if result: res.append(result) elif u.name == 'Clear': evalClear(ctx, u) elif u.name == 'Drop': evalDrop(ctx, u) elif u.name == 'Create': evalCreate(ctx, u) elif u.name == 'Add': evalAdd(ctx, u) elif u.name == 'Move': evalMove(ctx, u) elif u.name == 'Copy': evalCopy(ctx, u) elif u.name == 'InsertData': result = evalInsertData(ctx, u) if result: res.append(result) elif u.name == 'DeleteData': result = evalDeleteData(ctx, u) if result: res.append(result) elif u.name == 'DeleteWhere': result = evalDeleteWhere(ctx, u) if result: res.append(result) elif u.name == 'Modify': result = evalModify(ctx, u) if result: res.append(result) else: raise Exception('Unknown update operation: %s' % (u, )) except UnSupportedQuery as e: return res, e except Exception: if not u.silent: raise return res, None
def triples(self, spo, context=None): """ - tuple **(s, o, p)** the triple used as filter for the SPARQL select. (None, None, None) means anything. - context **context** the graph effectively calling this method. Returns a tuple of triples executing essentially a SPARQL like SELECT ?subj ?pred ?obj WHERE { ?subj ?pred ?obj } **context** may include three parameter to refine the underlying query: * LIMIT: an integer to limit the number of results * OFFSET: an integer to enable paging of results * ORDERBY: an instance of Variable('s'), Variable('o') or Variable('p') or, by default, the first 'None' from the given triple .. warning:: - Using LIMIT or OFFSET automatically include ORDERBY otherwise this is because the results are retrieved in a not deterministic way (depends on the walking path on the graph) - Using OFFSET without defining LIMIT will discard the first OFFSET - 1 results `` a_graph.LIMIT = limit a_graph.OFFSET = offset triple_generator = a_graph.triples(mytriple): #do something #Removes LIMIT and OFFSET if not required for the next triple() calls del a_graph.LIMIT del a_graph.OFFSET `` """ s, p, o = spo vars = [] if not s: s = Variable("s") vars.append(s) if not p: p = Variable("p") vars.append(p) if not o: o = Variable("o") vars.append(o) if vars: v = " ".join([term.n3() for term in vars]) verb = "SELECT %s " % v else: verb = "ASK" nts = self.node_to_sparql query = "%s { %s %s %s }" % (verb, nts(s), nts(p), nts(o)) # The ORDER BY is necessary if (hasattr(context, LIMIT) or hasattr(context, OFFSET) or hasattr(context, ORDERBY)): var = None if isinstance(s, Variable): var = s elif isinstance(p, Variable): var = p elif isinstance(o, Variable): var = o elif hasattr(context, ORDERBY) and isinstance( getattr(context, ORDERBY), Variable): var = getattr(context, ORDERBY) query = query + " %s %s" % (ORDERBY, var.n3()) try: query = query + " LIMIT %s" % int(getattr(context, LIMIT)) except (ValueError, TypeError, AttributeError): pass try: query = query + " OFFSET %s" % int(getattr(context, OFFSET)) except (ValueError, TypeError, AttributeError): pass result = self._query( query, default_graph=context.identifier if self._is_contextual(context) else None, ) if vars: for row in result: yield ( row.get(s, s), row.get(p, p), row.get(o, o), ), None # why is the context here not the passed in graph 'context'? else: if result.askAnswer: yield (s, p, o), None
def T(owlGraph, complementExpansions=[], derivedPreds=[]): """ #Subsumption (purely for TBOX classification) {?C rdfs:subClassOf ?SC. ?A rdfs:subClassOf ?C} => {?A rdfs:subClassOf ?SC}. {?C owl:equivalentClass ?A} => {?C rdfs:subClassOf ?A. ?A rdfs:subClassOf ?C}. {?C rdfs:subClassOf ?SC. ?SC rdfs:subClassOf ?C} => {?C owl:equivalentClass ?SC}. T(rdfs:subClassOf(C, D)) -> Th(D(y)) :- Tb(C(y)) T(owl:equivalentClass(C, D)) -> { T(rdfs:subClassOf(C, D) T(rdfs:subClassOf(D, C) } A generator over the Logic Programming rules which correspond to the DL ( unary predicate logic ) subsumption axiom described via rdfs:subClassOf """ for s, p, o in owlGraph.triples((None, OWL_NS.complementOf, None)): if isinstance(o, URIRef) and isinstance(s, URIRef): headLiteral = Uniterm(RDF.type, [Variable("X"), SkolemizeExistentialClasses(s)], newNss=owlGraph.namespaces()) yield NormalizeClause(Clause(Tc(owlGraph, o), headLiteral)) for c, p, d in owlGraph.triples((None, RDFS.subClassOf, None)): try: yield NormalizeClause(Clause(Tb(owlGraph, c), Th(owlGraph, d))) except UnsupportedNegation: import warnings warnings.warn("Unable to handle negation in DL axiom (%s), skipping" % c, # e.msg, SyntaxWarning, 3) #assert isinstance(c, URIRef), "%s is a kind of %s"%(c, d) for c, p, d in owlGraph.triples((None, OWL_NS.equivalentClass, None)): if c not in derivedPreds: yield NormalizeClause(Clause(Tb(owlGraph, c), Th(owlGraph, d))) yield NormalizeClause(Clause(Tb(owlGraph, d), Th(owlGraph, c))) for s, p, o in owlGraph.triples((None, OWL_NS.intersectionOf, None)): try: if s not in complementExpansions: if s in derivedPreds: import warnings warnings.warn("Derived predicate (%s) is defined via a conjunction (consider using a complex GCI) " % owlGraph.qname(s), SyntaxWarning, 3) elif isinstance(s, BNode): # and (None, None, s) not in owlGraph:# and \ #(s, RDFS.subClassOf, None) in owlGraph: #complex GCI, pass over (handled) by Tb continue conjunction = [] handleConjunct(conjunction, owlGraph, o) body = And(conjunction) head = Uniterm(RDF.type, [Variable("X"), SkolemizeExistentialClasses(s)], newNss=owlGraph.namespaces()) # O1 ^ O2 ^ ... ^ On => S(?X) yield Clause(body, head) if isinstance(s, URIRef): # S(?X) => O1 ^ O2 ^ ... ^ On # special case, owl:intersectionOf is a neccessary and sufficient # criteria and should thus work in *both* directions # This rule is not added for anonymous classes or derived predicates if s not in derivedPreds: yield Clause(head, body) except UnsupportedNegation: import warnings warnings.warn("Unable to handle negation in DL axiom (%s), skipping" % s, # e.msg, SyntaxWarning, 3) for s, p, o in owlGraph.triples((None, OWL_NS.unionOf, None)): if isinstance(s, URIRef): #special case, owl:unionOf is a neccessary and sufficient #criteria and should thus work in *both* directions body = Or([Uniterm(RDF.type, [Variable("X"), NormalizeBooleanClassOperand(i, owlGraph)], newNss=owlGraph.namespaces()) \ for i in Collection(owlGraph, o)]) head = Uniterm(RDF.type, [Variable("X"), s], newNss=owlGraph.namespaces()) yield Clause(body, head) for s, p, o in owlGraph.triples((None, OWL_NS.inverseOf, None)): # T(owl:inverseOf(P, Q)) -> { Q(x, y) :- P(y, x) # P(y, x) :- Q(x, y) } newVar = Variable(BNode()) s = SkolemizeExistentialClasses(s) if isinstance(s, BNode) else s o = SkolemizeExistentialClasses(o) if isinstance(o, BNode) else o body1 = Uniterm(s, [newVar, Variable("X")], newNss=owlGraph.namespaces()) head1 = Uniterm(o, [Variable("X"), newVar], newNss=owlGraph.namespaces()) yield Clause(body1, head1) newVar = Variable(BNode()) body2 = Uniterm(o, [Variable("X"), newVar], newNss=owlGraph.namespaces()) head2 = Uniterm(s, [newVar, Variable("X")], newNss=owlGraph.namespaces()) yield Clause(body2, head2) for s, p, o in owlGraph.triples((None, RDF.type, OWL_NS.TransitiveProperty)): #T(owl:TransitiveProperty(P)) -> P(x, z) :- P(x, y) ^ P(y, z) y = Variable(BNode()) z = Variable(BNode()) x = Variable("X") s = SkolemizeExistentialClasses(s) if isinstance(s, BNode) else s body = And([Uniterm(s, [x, y], newNss=owlGraph.namespaces()), \ Uniterm(s, [y, z], newNss=owlGraph.namespaces())]) head = Uniterm(s, [x, z], newNss=owlGraph.namespaces()) yield Clause(body, head) for s, p, o in owlGraph.triples((None, RDFS.subPropertyOf, None)): # T(rdfs:subPropertyOf(P, Q)) -> Q(x, y) :- P(x, y) x = Variable("X") y = Variable("Y") s = SkolemizeExistentialClasses(s) if isinstance(s, BNode) else s o = SkolemizeExistentialClasses(o) if isinstance(o, BNode) else o body = Uniterm(s, [x, y], newNss=owlGraph.namespaces()) head = Uniterm(o, [x, y], newNss=owlGraph.namespaces()) yield Clause(body, head) for s, p, o in owlGraph.triples((None, OWL_NS.equivalentProperty, None)): # T(owl:equivalentProperty(P, Q)) -> { Q(x, y) :- P(x, y) # P(x, y) :- Q(x, y) } x = Variable("X") y = Variable("Y") s = SkolemizeExistentialClasses(s) if isinstance(s, BNode) else s o = SkolemizeExistentialClasses(o) if isinstance(o, BNode) else o body = Uniterm(s, [x, y], newNss=owlGraph.namespaces()) head = Uniterm(o, [x, y], newNss=owlGraph.namespaces()) yield Clause(body, head) yield Clause(head, body) #Contribution (Symmetric DL roles) for s, p, o in owlGraph.triples((None, RDF.type, OWL_NS.SymmetricProperty)): #T(owl:SymmetricProperty(P)) -> P(y, x) :- P(x, y) y = Variable("Y") x = Variable("X") s = SkolemizeExistentialClasses(s) if isinstance(s, BNode) else s body = Uniterm(s, [x, y], newNss=owlGraph.namespaces()) head = Uniterm(s, [y, x], newNss=owlGraph.namespaces()) yield Clause(body, head) for s, p, o in owlGraph.triples_choices((None, [RDFS.range, RDFS.domain], None)): s = SkolemizeExistentialClasses(s) if isinstance(s, BNode) else s if p == RDFS.range: #T(rdfs:range(P, D)) -> D(y) := P(x, y) x = Variable("X") y = Variable(BNode()) body = Uniterm(s, [x, y], newNss=owlGraph.namespaces()) head = Uniterm(RDF.type, [y, o], newNss=owlGraph.namespaces()) yield Clause(body, head) else: #T(rdfs:domain(P, D)) -> D(x) := P(x, y) x = Variable("X") y = Variable(BNode()) body = Uniterm(s, [x, y], newNss=owlGraph.namespaces()) head = Uniterm(RDF.type, [x, o], newNss=owlGraph.namespaces()) yield Clause(body, head)
import rdflib.compare import rdflib.term from rdflib import BNode from rdflib import Graph from rdflib import RDF from rdflib import URIRef from rdflib import Variable import six from utils import URIShortener logger = logging.getLogger(__name__) RANDOM_VAR_LEN = 5 # so in total we have 62**5=916132832 different random vars RANDOM_VAR_PREFIX = 'vr' SOURCE_VAR = Variable('source') TARGET_VAR = Variable('target') ASK_VAR = Variable('ask') COUNT_VAR = Variable('count') def gen_random_var(): return Variable(RANDOM_VAR_PREFIX + ''.join( random.choice(string.ascii_letters + string.digits) for _ in range(RANDOM_VAR_LEN))) def replace_vars_with_random_vars(triples, exclude=(SOURCE_VAR, TARGET_VAR)): rv = defaultdict(gen_random_var) return [ tuple([
def testFormulaStore(store="default", configString=None): try: g = ConjunctiveGraph(store=store) except ImportError: raise SkipTest("Dependencies for store '%s' not available!" % store) if configString: g.destroy(configString) g.open(configString) else: if store == "SQLite": _, path = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite") g.open(path, create=True) else: g.open(mkdtemp(), create=True) g.parse(data=testN3, format="n3") try: for s, p, o in g.triples((None, implies, None)): formulaA = s formulaB = o assert type(formulaA) == QuotedGraph and type(formulaB) == QuotedGraph # a = URIRef('http://test/a') b = URIRef("http://test/b") c = URIRef("http://test/c") d = URIRef("http://test/d") v = Variable("y") universe = ConjunctiveGraph(g.store) # test formula as terms assert len(list(universe.triples((formulaA, implies, formulaB)))) == 1 # test variable as term and variable roundtrip assert len(list(formulaB.triples((None, None, v)))) == 1 for s, p, o in formulaB.triples((None, d, None)): if o != c: assert isinstance(o, Variable) assert o == v s = list(universe.subjects(RDF.type, RDFS.Class))[0] assert isinstance(s, BNode) assert len(list(universe.triples((None, implies, None)))) == 1 assert len(list(universe.triples((None, RDF.type, None)))) == 1 assert len(list(formulaA.triples((None, RDF.type, None)))) == 1 assert len(list(formulaA.triples((None, None, None)))) == 2 assert len(list(formulaB.triples((None, None, None)))) == 2 assert len(list(universe.triples((None, None, None)))) == 3 assert len( list(formulaB.triples((None, URIRef("http://test/d"), None)))) == 2 assert len( list(universe.triples((None, URIRef("http://test/d"), None)))) == 1 # #context tests # #test contexts with triple argument # assert len(list(universe.contexts((a, d, c)))) == 1, \ # [ct for ct in universe.contexts((a, d, c))] # FAIL: test.test_graph_formula.testFormulaStores('SQLite',) # -------------------------------------------------------------------- # Traceback (most recent call last): # File ".../site-packages/nose/case.py", line 197, in runTest # self.test(*self.arg) # File ".../test_graph_formula.py", line 80, in testFormulaStore # [ct for ct in universe.contexts((a, d, c))] # AssertionError: [ # <Graph identifier=N52fd4417ef7641089b2e4045ef19ad87 # (<class 'rdflib.graph.Graph'>)>, # <Graph identifier=_:Formula16 (<class 'rdflib.graph.Graph'>)> # ] # Remove test cases universe.remove((None, implies, None)) assert len(list(universe.triples((None, implies, None)))) == 0 assert len(list(formulaA.triples((None, None, None)))) == 2 assert len(list(formulaB.triples((None, None, None)))) == 2 formulaA.remove((None, b, None)) assert len(list(formulaA.triples((None, None, None)))) == 1 formulaA.remove((None, RDF.type, None)) assert len(list(formulaA.triples((None, None, None)))) == 0 universe.remove((None, RDF.type, RDFS.Class)) # remove_context tests universe.remove_context(formulaB) assert len(list(universe.triples((None, RDF.type, None)))) == 0 assert len(universe) == 1 assert len(formulaB) == 0 universe.remove((None, None, None)) assert len(universe) == 0 g.close() if store == "SQLite": os.unlink(path) else: g.store.destroy(configString) except: g.close() if store == "SQLite": os.unlink(path) else: g.store.destroy(configString) raise
def gen_random_var(): return Variable(RANDOM_VAR_PREFIX + ''.join( random.choice(string.ascii_letters + string.digits) for _ in range(RANDOM_VAR_LEN)))
def add_variable(self, varname): if not varname in self.query.variables: self.query.variables.append(Variable(varname))
def testVariableKeyWithQuestionMark(): results = list( g2.query("SELECT ?o WHERE { ?s :p ?o }", initBindings={Variable("?s"): EX["s1"]})) assert len(results) == 1, results
def SetupDDLAndAdornProgram(factGraph, rules, GOALS, derivedPreds=None, strictCheck=DDL_STRICTNESS_FALLBACK_DERIVED, defaultPredicates=None, ignoreUnboundDPreds=False, hybridPreds2Replace=None): if not defaultPredicates: defaultPredicates = [], [] # _dPredsProvided = bool(derivedPreds) if not derivedPreds: _derivedPreds = DerivedPredicateIterator( factGraph, rules, strict=strictCheck, defaultPredicates=defaultPredicates) if not isinstance(derivedPreds, (set, list)): derivedPreds = list(_derivedPreds) else: derivedPreds.extend(_derivedPreds) hybridPreds2Replace = hybridPreds2Replace or [] adornedProgram = AdornProgram(factGraph, rules, GOALS, derivedPreds, ignoreUnboundDPreds, hybridPreds2Replace=hybridPreds2Replace) if adornedProgram != set([]): rt = reduce(lambda l, r: l + r, [ list(iterCondition(clause.formula.body)) for clause in adornedProgram ]) else: rt = set() for hybridPred, adornment in [ (t, a) for t, a in set([(URIRef(GetOp(term).split('_derived')[0] ) if GetOp(term).find('_derived') + 1 else GetOp(term), ''.join(term.adornment)) for term in rt if isinstance(term, AdornedUniTerm)]) if t in hybridPreds2Replace ]: # If there are hybrid predicates, add rules that derived their IDB counterpart # using information from the adorned queries to determine appropriate arity # and adornment hybridPred = URIRef(hybridPred) hPred = URIRef(hybridPred + u'_derived') if len(adornment) == 1: # p_derived^{a}(X) :- p(X) body = BuildUnitermFromTuple((Variable('X'), RDF.type, hybridPred)) head = BuildUnitermFromTuple((Variable('X'), RDF.type, hPred)) else: # p_derived^{a}(X, Y) :- p(X, Y) body = BuildUnitermFromTuple( (Variable('X'), hybridPred, Variable('Y'))) head = BuildUnitermFromTuple((Variable('X'), hPred, Variable('Y'))) _head = AdornedUniTerm(head, list(adornment)) rule = AdornedRule(Clause(And([body]), _head.clone())) rule.sip = Graph() adornedProgram.add(rule) if factGraph is not None: factGraph.adornedProgram = adornedProgram return adornedProgram