def NormalizeGoals(goals): if isinstance(goals, (list, set)): for goal in goals: yield goal, {} elif isinstance(goals, tuple): yield sparqlQuery, {} else: query = RenderSPARQLAlgebra(parse(goals)) for pattern in query.patterns: yield pattern[:3], query.prolog.prefixBindings
def NormalizeGoals(goals): if isinstance(goals,(list,set)): for goal in goals: yield goal,{} elif isinstance(goals,tuple): yield sparqlQuery,{} else: query=RenderSPARQLAlgebra(parse(goals)) for pattern in query.patterns: yield pattern[:3],query.prolog.prefixBindings
def testUnicodeString(self): from rdflib.sparql.parser import parse from cStringIO import StringIO q = \ u""" PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> SELECT ?pred WHERE { rdf:foobar rdf:predicate ?pred. } """ p = parse(q)
def prepQuery(queryString, ontGraph): query = parse(queryString) if ontGraph: if not query.prolog: query.prolog = Prolog(None, []) query.prolog.prefixBindings.update( dict(ontGraph.namespace_manager.namespaces())) else: for prefix, nsInst in ontGraph.namespace_manager.namespaces(): if prefix not in query.prolog.prefixBindings: query.prolog.prefixBindings[prefix] = nsInst print "Bindings picked up ", query.prolog.prefixBindings return query
def QueryStats(queryString, log, depthPrint=False): global depthPrintEnabled depthPrintEnabled = depthPrint log["queryString"] = queryString # use the SPARQL parser try: q = parse(queryString) log["ParseError"] = 0 except Exception, e: print "PARSE ERROR: %s" % e q = None log["ParseError"] = 1 return False
def QueryStats(queryString, log, depthPrint = False): global depthPrintEnabled depthPrintEnabled = depthPrint log['queryString'] = queryString # use the SPARQL parser try: q = parse(queryString) log['ParseError'] = 0 except Exception, e: print 'PARSE ERROR: %s' % e q = None log['ParseError'] = 1 return False
def isaBaseQuery(self, queryString, queryObj=None): """ If the given SPARQL query involves purely base predicates it returns it (as a parsed string), otherwise it returns a SPARQL algebra instance for top-down evaluation using this store >>> graph=Graph() >>> topDownStore = TopDownSPARQLEntailingStore(graph.store,[RDFS.seeAlso],nsBindings={u'rdfs':RDFS.RDFSNS}) >>> rt=topDownStore.isaBaseQuery("SELECT * { [] rdfs:seeAlso [] }") >>> isinstance(rt,(BasicGraphPattern,AlgebraExpression)) True >>> rt=topDownStore.isaBaseQuery("SELECT * { [] a [] }") >>> isinstance(rt,(Query,basestring)) True >>> rt=topDownStore.isaBaseQuery("SELECT * { [] a [] OPTIONAL { [] rdfs:seeAlso [] } }") >>> isinstance(rt,(BasicGraphPattern,AlgebraExpression)) True """ from rdflib.sparql.bison.Query import Prolog from rdflib.sparql.parser import parse from rdflib import sparql as sparqlModule if queryObj: query = queryObj else: query = parse(queryString) if not query.prolog: query.prolog = Prolog(None, []) query.prolog.prefixBindings.update(self.nsBindings) else: for prefix, nsInst in self.nsBindings.items(): if prefix not in query.prolog.prefixBindings: query.prolog.prefixBindings[prefix] = nsInst sparqlModule.prolog = query.prolog algebra = RenderSPARQLAlgebra(query, nsMappings=self.nsBindings) return first(self.getDerivedPredicates( algebra, sparqlModule.prolog)) and algebra or query
def isaBaseQuery(self, queryString,queryObj=None): """ If the given SPARQL query involves purely base predicates it returns it (as a parsed string), otherwise it returns a SPARQL algebra instance for top-down evaluation using this store >>> graph=Graph() >>> topDownStore = TopDownSPARQLEntailingStore(graph.store,[RDFS.seeAlso],nsBindings={u'rdfs':RDFS}) >>> rt=topDownStore.isaBaseQuery("SELECT * { [] rdfs:seeAlso [] }") >>> isinstance(rt,(BasicGraphPattern,AlgebraExpression)) True >>> rt=topDownStore.isaBaseQuery("SELECT * { [] a [] }") >>> isinstance(rt,(Query,basestring)) True >>> rt=topDownStore.isaBaseQuery("SELECT * { [] a [] OPTIONAL { [] rdfs:seeAlso [] } }") >>> isinstance(rt,(BasicGraphPattern,AlgebraExpression)) True """ from rdflib.sparql.bison.Query import Prolog from rdflib.sparql.parser import parse from rdflib import sparql as sparqlModule if queryObj: query = queryObj else: query = parse(queryString) if not query.prolog: query.prolog = Prolog(None, []) query.prolog.prefixBindings.update(self.nsBindings) else: for prefix, nsInst in self.nsBindings.items(): if prefix not in query.prolog.prefixBindings: query.prolog.prefixBindings[prefix] = nsInst sparqlModule.prolog = query.prolog algebra=RenderSPARQLAlgebra(query,nsMappings=self.nsBindings) return first(self.getDerivedPredicates(algebra,sparqlModule.prolog)) and algebra or query
def test(self, debug=debug): if debug: print testName, label, named_graphs query = urlopen(queryFile).read() try: parsedQuery = parse(query) except ParseException: return assertion = BNode() result_node = BNode() test_graph.add((result_node, RDF.type, EARL.TestResult)) test_graph.add( (result_node, DC['date'], Literal(datetime.date.today()))) test_graph.add((assertion, RDF.type, EARL.Assertion)) test_graph.add((assertion, EARL.assertedBy, MY_FOAF.chime)) test_graph.add((assertion, EARL.subject, URIRef('http://metacognition.info/software/fuxi'))) test_graph.add((assertion, EARL.test, TEST[testName])) test_graph.add((assertion, EARL.result, result_node)) if named_graphs: g = ConjunctiveGraph() else: g = Graph() if debug: print "Source graph ", rdfDoc g.parse(urlopen(rdfDoc), publicID=rdfDoc, format='n3') for sourceUri, graphIri in named_graphs: g.parse(urlopen(sourceUri), publicID=graphIri, format='n3') if named_graphs: factGraph = Graph(g.store, identifier=rdfDoc) else: factGraph = g if ENT.RIF in regime: rules = [] else: from FuXi.DLP.CompletionReasoning import GetELHConsequenceProcedureRules rules = [i for i in self.rdfs_rules] if ENT.RDFS in regime else [] rules.extend( self.network.setupDescriptionLogicProgramming( factGraph, addPDSemantics=True, constructNetwork=False)) if query.find('subClassOf') + 1 and (ENT.RDFS not in regime or testName in COMPLETION_RULES): if debug: print "Added completion rules for EL TBox reasoning" rules.extend(GetELHConsequenceProcedureRules(factGraph)) facts2add = [] for owl_class in factGraph.subjects(RDF.type, OWLNS.Class): facts2add.append( (owl_class, RDFS.subClassOf, owl_class, factGraph)) factGraph.addN(facts2add) if debug: pprint(list(rules)) if debug: print query topDownStore = TopDownSPARQLEntailingStore( factGraph.store, factGraph, idb=rules, DEBUG=debug, nsBindings=nsMap, #hybridPredicates = [RDFS.subClassOf], identifyHybridPredicates=True, templateMap={STRING.contains: "REGEX(%s,%s)"}) targetGraph = Graph(topDownStore) for pref, nsUri in (setdict(nsMap) | setdict( parsedQuery.prolog.prefixBindings)).items(): targetGraph.bind(pref, nsUri) rt = targetGraph.query('', parsedQuery=parsedQuery) if rt.askAnswer: actualSolns = rt.askAnswer[0] expectedSolns = parseResults(urlopen(result).read()) else: actualSolns = [ ImmutableDict([(k, v) for k, v in d.items()]) for d in parseResults(rt.serialize(format='xml')) ] expectedSolns = [ ImmutableDict([(k, v) for k, v in d.items()]) for d in parseResults(urlopen(result).read()) ] actualSolns.sort(key=lambda d: hash(d)) expectedSolns.sort(key=lambda d: hash(d)) actualSolns = set(actualSolns) expectedSolns = set(expectedSolns) if actualSolns == expectedSolns: test_graph.add((result_node, EARL.outcome, EARL['pass'])) else: test_graph.add((result_node, EARL.outcome, EARL['fail'])) self.failUnless( actualSolns == expectedSolns, "Answers don't match %s v.s. %s" % (actualSolns, expectedSolns)) if debug: for network, goal in topDownStore.queryNetworks: pprint(goal) network.reportConflictSet(True)
} MANIFEST_QUERY = \ """ SELECT ?status ?premise ?conclusion ?feature ?descr WHERE { [ a otest:PositiveEntailmentTest; otest:feature ?feature; rtest:description ?descr; rtest:status ?status; rtest:premiseDocument ?premise; rtest:conclusionDocument ?conclusion ] }""" PARSED_MANIFEST_QUERY = parse(MANIFEST_QUERY) Features2Skip = [ URIRef('http://www.w3.org/2002/07/owl#sameClassAs'), ] NonNaiveSkip = [ 'OWL/oneOf/Manifest002.rdf', #see Issue 25 'OWL/unionOf/Manifest002.rdf', # support for disjunctive horn logic ] MagicTest2Skip = [ 'OWL/oneOf/Manifest002.rdf', #requires second order predicate derivation 'OWL/oneOf/Manifest003.rdf', #requires second order predicate derivation 'OWL/disjointWith/Manifest001.rdf' #requires second order predicate derivation ]
def RunQuery(subQueryJoin, bindings, factGraph, vars=None, debug=False, symmAtomicInclusion=False, specialBNodeHandling=None, toldBNode=False): initialNs = hasattr(factGraph,'nsMap') and factGraph.nsMap or \ dict([(k,v) for k,v in factGraph.namespaces()]) if not subQueryJoin: return False if not vars: vars = [] if bool(bindings): #Apply a priori substitutions openVars,conjGroundLiterals,bindings = \ normalizeBindingsAndQuery(set(vars), bindings, subQueryJoin) vars = list(openVars) else: conjGroundLiterals = subQueryJoin isGround = not vars subquery = RDFTuplesToSPARQL(conjGroundLiterals, factGraph, isGround, [v for v in vars], symmAtomicInclusion, specialBNodeHandling) if toldBNode: from rdflib.sparql.bison.Query import Prolog from rdflib.sparql.parser import parse parsedQuery = parse(subquery) if not parsedQuery.prolog: parsedQuery.prolog = Prolog(None, []) parsedQuery.prolog.toldBNodes = True subquery = '' else: parsedQuery = None rt = factGraph.query(subquery, initNs=initialNs, parsedQuery=parsedQuery) projectedBindings = vars and project(bindings, vars) or bindings if isGround: if debug: print >> sys.stderr, "%s%s-> %s" % ( subquery, projectedBindings and " %s apriori binding(s)" % len(projectedBindings) or '', rt.askAnswer[0]) return subquery, rt.askAnswer[0] else: rt = len(vars)>1 and ( dict([(vars[idx], specialBNodeHandling[-1](i) if specialBNodeHandling and isinstance(i,BNode) else i) for idx,i in enumerate(v)]) for v in rt ) \ or ( dict([(vars[0], specialBNodeHandling[-1](v) if specialBNodeHandling and isinstance(v,BNode) else v) ]) for v in rt ) if debug: print >> sys.stderr, "%s%s-> %s" % ( subquery, projectedBindings and " %s apriori binding(s)" % len(projectedBindings) or '', rt and '[]') # .. %s answers .. ]'%len(rt) or '[]') return subquery, rt
def test(self,debug=debug): if debug: print testName, label, named_graphs query = urlopen(queryFile).read() try: parsedQuery=parse(query) except ParseException: return assertion = BNode() result_node = BNode() test_graph.add((result_node,RDF.type,EARL.TestResult)) test_graph.add((result_node,DC['date'],Literal(datetime.date.today()))) test_graph.add((assertion,RDF.type,EARL.Assertion)) test_graph.add((assertion,EARL.assertedBy,MY_FOAF.chime)) test_graph.add((assertion, EARL.subject, URIRef('http://metacognition.info/software/fuxi'))) test_graph.add((assertion,EARL.test,TEST[testName])) test_graph.add((assertion,EARL.result,result_node)) if named_graphs: g = ConjunctiveGraph() else: g = Graph() if debug: print "Source graph ", rdfDoc g.parse( urlopen(rdfDoc), publicID=rdfDoc, format='n3') for sourceUri, graphIri in named_graphs: g.parse( urlopen(sourceUri), publicID=graphIri, format='n3') if named_graphs: factGraph = Graph(g.store,identifier=rdfDoc) else: factGraph = g if ENT.RIF in regime: rules = [] else: from FuXi.DLP.CompletionReasoning import GetELHConsequenceProcedureRules rules = [ i for i in self.rdfs_rules ] if ENT.RDFS in regime else [] rules.extend(self.network.setupDescriptionLogicProgramming( factGraph, addPDSemantics=True, constructNetwork=False)) if query.find('subClassOf')+1 and ( ENT.RDFS not in regime or testName in COMPLETION_RULES ): if debug: print "Added completion rules for EL TBox reasoning" rules.extend(GetELHConsequenceProcedureRules(factGraph)) facts2add = [] for owl_class in factGraph.subjects(RDF.type,OWLNS.Class): facts2add.append( (owl_class,RDFS.subClassOf,owl_class,factGraph) ) factGraph.addN(facts2add) if debug: pprint(list(rules)) if debug: print query topDownStore=TopDownSPARQLEntailingStore( factGraph.store, factGraph, idb=rules, DEBUG=debug, nsBindings=nsMap, #hybridPredicates = [RDFS.subClassOf], identifyHybridPredicates = True, templateMap={ STRING.contains : "REGEX(%s,%s)" }) targetGraph = Graph(topDownStore) for pref,nsUri in (setdict(nsMap) | setdict( parsedQuery.prolog.prefixBindings)).items(): targetGraph.bind(pref,nsUri) rt=targetGraph.query('',parsedQuery=parsedQuery) if rt.askAnswer: actualSolns = rt.askAnswer[0] expectedSolns = parseResults(urlopen(result).read()) else: actualSolns=[ImmutableDict([(k,v) for k,v in d.items()]) for d in parseResults(rt.serialize(format='xml'))] expectedSolns=[ImmutableDict([(k,v) for k,v in d.items()]) for d in parseResults(urlopen(result).read())] actualSolns.sort(key=lambda d:hash(d)) expectedSolns.sort(key=lambda d:hash(d)) actualSolns = set(actualSolns) expectedSolns = set(expectedSolns) if actualSolns == expectedSolns: test_graph.add((result_node,EARL.outcome,EARL['pass'])) else: test_graph.add((result_node,EARL.outcome,EARL['fail'])) self.failUnless(actualSolns == expectedSolns, "Answers don't match %s v.s. %s"%(actualSolns, expectedSolns) ) if debug: for network,goal in topDownStore.queryNetworks: pprint(goal) network.reportConflictSet(True)
def RunQuery(subQueryJoin, bindings, factGraph, vars=None, debug = False, symmAtomicInclusion = False, specialBNodeHandling = None, toldBNode = False): initialNs = hasattr(factGraph,'nsMap') and factGraph.nsMap or \ dict([(k,v) for k,v in factGraph.namespaces()]) if not subQueryJoin: return False if not vars: vars=[] if bool(bindings): #Apply a priori substitutions openVars,conjGroundLiterals,bindings = \ normalizeBindingsAndQuery(set(vars), bindings, subQueryJoin) vars=list(openVars) else: conjGroundLiterals = subQueryJoin isGround = not vars subquery = RDFTuplesToSPARQL(conjGroundLiterals, factGraph, isGround, [v for v in vars], symmAtomicInclusion, specialBNodeHandling) if toldBNode: from rdflib.sparql.bison.Query import Prolog from rdflib.sparql.parser import parse parsedQuery = parse(subquery) if not parsedQuery.prolog: parsedQuery.prolog = Prolog(None, []) parsedQuery.prolog.toldBNodes = True subquery = '' else: parsedQuery = None rt = factGraph.query(subquery, initNs = initialNs, parsedQuery=parsedQuery) projectedBindings = vars and project(bindings,vars) or bindings if isGround: if debug: print >>sys.stderr, "%s%s-> %s"%( subquery, projectedBindings and " %s apriori binding(s)"%len(projectedBindings) or '', rt.askAnswer[0]) return subquery,rt.askAnswer[0] else: rt = len(vars)>1 and ( dict([(vars[idx], specialBNodeHandling[-1](i) if specialBNodeHandling and isinstance(i,BNode) else i) for idx,i in enumerate(v)]) for v in rt ) \ or ( dict([(vars[0], specialBNodeHandling[-1](v) if specialBNodeHandling and isinstance(v,BNode) else v) ]) for v in rt ) if debug: print >>sys.stderr, "%s%s-> %s"%( subquery, projectedBindings and " %s apriori binding(s)"%len(projectedBindings) or '', rt and '[]')# .. %s answers .. ]'%len(rt) or '[]') return subquery,rt
from rdflib.sparql.parser import parse # second query from here: # http://www.w3.org/TR/rdf-sparql-query/#GroupPatterns query = """ PREFIX foaf: <http://xmlns.com/foaf/0.1/> SELECT ?name ?mbox WHERE { { ?x foaf:name ?name . } { ?x foaf:mbox ?mbox . } } """ correct = """{ [<SPARQLParser.GraphPattern: [[?x [foaf:name([u'?name'])], ?x [foaf:mbox([u'?mbox'])]]]>] }""" if __name__ == "__main__": p = parse(query) tmp = p.query.whereClause.parsedGraphPattern if str(tmp) == correct: print "PASSED"