def test_comparisons_var(): ctx = QueryContext() ctx[Variable('x')] = Literal(2) eq(bool(_eval(_translate((p.Expression.parseString('?x<3')[0])), ctx)), True) eq(bool(_eval(_translate((p.Expression.parseString('?x<3.0')[0])), ctx)), True) eq(bool(_eval(_translate((p.Expression.parseString('?x<3e0')[0])), ctx)), True) eq(bool(_eval(_translate((p.Expression.parseString('?x<2.1')[0])), ctx)), True) eq(bool(_eval(_translate((p.Expression.parseString('?x<21e-1')[0])), ctx)), True) eq(bool(_eval(_translate((p.Expression.parseString('?x=2.0')[0])), ctx)), True) eq(bool(_eval(_translate((p.Expression.parseString('?x=2e0')[0])), ctx)), True) eq( bool(_eval(_translate((p.Expression.parseString('?x="cake"')[0])), ctx)), False) ctx = QueryContext() ctx[Variable('x')] = Literal(4) eq(bool(_eval(_translate((p.Expression.parseString('?x<3')[0])), ctx)), False) eq(bool(_eval(_translate((p.Expression.parseString('?x<3.0')[0])), ctx)), False) eq(bool(_eval(_translate((p.Expression.parseString('?x<3e0')[0])), ctx)), False)
def _evaluate(self, bindings): """Evaluate the FILTER expression with a set mappings""" d = {Variable(key[1:]): to_rdflib_term(value) for key, value in bindings.items()} b = Bindings(d=d) context = QueryContext(bindings=b) context.prologue = self._prologue return self._compiled_expression.eval(context)
def evalUpdate(graph, update, initBindings=None): """ http://www.w3.org/TR/sparql11-update/#updateLanguage 'A request is a sequence of operations [...] Implementations MUST ensure that operations of a single request are executed in a fashion that guarantees the same effects as executing them in lexical order. Operations all result either in success or failure. If multiple operations are present in a single request, then a result of failure from any operation MUST abort the sequence of operations, causing the subsequent operations to be ignored.' This will return None on success and raise Exceptions on error """ for u in update: ctx = QueryContext(graph) ctx.prologue = u.prologue if initBindings: for k, v in initBindings.items(): if not isinstance(k, Variable): k = Variable(k) ctx[k] = v # ctx.push() # nescessary? try: if u.name == 'Load': evalLoad(ctx, u) elif u.name == 'Clear': evalClear(ctx, u) elif u.name == 'Drop': evalDrop(ctx, u) elif u.name == 'Create': evalCreate(ctx, u) elif u.name == 'Add': evalAdd(ctx, u) elif u.name == 'Move': evalMove(ctx, u) elif u.name == 'Copy': evalCopy(ctx, u) elif u.name == 'InsertData': evalInsertData(ctx, u) elif u.name == 'DeleteData': evalDeleteData(ctx, u) elif u.name == 'DeleteWhere': evalDeleteWhere(ctx, u) elif u.name == 'Modify': evalModify(ctx, u) else: raise Exception('Unknown update operation: %s' % (u,)) except: if not u.silent: raise
def evalUpdate(graph, update, initBindings=None): """ http://www.w3.org/TR/sparql11-update/#updateLanguage 'A request is a sequence of operations [...] Implementations MUST ensure that operations of a single request are executed in a fashion that guarantees the same effects as executing them in lexical order. Operations all result either in success or failure. If multiple operations are present in a single request, then a result of failure from any operation MUST abort the sequence of operations, causing the subsequent operations to be ignored.' This will return None on success and raise Exceptions on error """ for u in update: ctx = QueryContext(graph) ctx.prologue = u.prologue if initBindings: for k, v in initBindings.iteritems(): if not isinstance(k, Variable): k = Variable(k) ctx[k] = v # ctx.push() # nescessary? try: if u.name == 'Load': evalLoad(ctx, u) elif u.name == 'Clear': evalClear(ctx, u) elif u.name == 'Drop': evalDrop(ctx, u) elif u.name == 'Create': evalCreate(ctx, u) elif u.name == 'Add': evalAdd(ctx, u) elif u.name == 'Move': evalMove(ctx, u) elif u.name == 'Copy': evalCopy(ctx, u) elif u.name == 'InsertData': evalInsertData(ctx, u) elif u.name == 'DeleteData': evalDeleteData(ctx, u) elif u.name == 'DeleteWhere': evalDeleteWhere(ctx, u) elif u.name == 'Modify': evalModify(ctx, u) else: raise Exception('Unknown update operation: %s' % (u, )) except: if not u.silent: raise
def evalQuery(graph, query, initBindings, base=None): initBindings = dict( ( Variable(k),v ) for k,v in iteritems(initBindings) ) ctx = QueryContext(graph, initBindings=initBindings) ctx.prologue = query.prologue main = query.algebra if main.datasetClause: if ctx.dataset is None: raise Exception( "Non-conjunctive-graph doesn't know about " + "graphs! Try a query without FROM (NAMED).") ctx = ctx.clone() # or push/pop? firstDefault = False for d in main.datasetClause: if d.default: if firstDefault: # replace current default graph dg = ctx.dataset.get_context(BNode()) ctx = ctx.pushGraph(dg) firstDefault = True ctx.load(d.default, default=True) elif d.named: g = d.named ctx.load(g, default=False) return evalPart(ctx, main)
def evalUpdate(graph, update, initBindings={}): """ http://www.w3.org/TR/sparql11-update/#updateLanguage 'A request is a sequence of operations [...] Implementations MUST ensure that operations of a single request are executed in a fashion that guarantees the same effects as executing them in lexical order. Operations all result either in success or failure. If multiple operations are present in a single request, then a result of failure from any operation MUST abort the sequence of operations, causing the subsequent operations to be ignored.' This will return None on success and raise Exceptions on error """ for u in update: initBindings = dict((Variable(k), v) for k, v in initBindings.items()) ctx = QueryContext(graph, initBindings=initBindings) ctx.prologue = u.prologue try: if u.name == "Load": evalLoad(ctx, u) elif u.name == "Clear": evalClear(ctx, u) elif u.name == "Drop": evalDrop(ctx, u) elif u.name == "Create": evalCreate(ctx, u) elif u.name == "Add": evalAdd(ctx, u) elif u.name == "Move": evalMove(ctx, u) elif u.name == "Copy": evalCopy(ctx, u) elif u.name == "InsertData": evalInsertData(ctx, u) elif u.name == "DeleteData": evalDeleteData(ctx, u) elif u.name == "DeleteWhere": evalDeleteWhere(ctx, u) elif u.name == "Modify": evalModify(ctx, u) else: raise Exception("Unknown update operation: %s" % (u, )) except: if not u.silent: raise
def _evaluate(self, bindings: Dict[str, str]) -> bool: """Evaluate the FILTER expression with a set mappings. Argument: A set of solution mappings. Returns: The outcome of evaluating the SPARQL FILTER on the input set of solution mappings. """ d = {Variable(key[1:]): to_rdflib_term(value) for key, value in bindings.items()} b = Bindings(d=d) context = QueryContext(bindings=b) context.prologue = self._prologue return self._compiled_expression.eval(context)
def _evaluate(self, bindings: Dict[str, str]) -> bool: """Evaluate the BIND expression with a set mappings. Argument: A set of solution mappings. Returns: The outcome of evaluating the SPARQL BIND on the input set of solution mappings. """ # print("bind_eval:"+str(bindings)) ## For experiments on summaries if self._expr.startswith("<esumm>"): #print("express summ") s = URIRef(bindings['?s']) p = URIRef(bindings['?p']) o = Literal(bindings['?o']) dummy = Dummy() dummy.expr = [s, p, o] self._result = summary(dummy, None) return self._result elif self._expr.startswith("<fsumm>"): self._result = esummary(bindings['?s'], bindings['?p'], bindings['?o']) return self._result elif self._expr.startswith("<ehib>"): self._result = ehibsumm(bindings['?s'], bindings['?p'], bindings['?o']) return self._result elif self._expr.startswith("<split>"): self._result = splitsumm(bindings['?s'], bindings['?p'], bindings['?o']) return self._result elif self._expr.startswith("<suf>"): self._result = sufsumm(bindings['?s'], bindings['?p'], bindings['?o']) return self._result elif self._expr.startswith("<void>"): self._result = voidsumm(bindings['?s'], bindings['?p'], bindings['?o']) return self._result context = None if bindings is None: context = QueryContext(Bindings()) else: d = { Variable(key[1:]): to_rdflib_term(value) for key, value in bindings.items() } b = Bindings(d=d) context = QueryContext(bindings=b) context.prologue = self._prologue self._result = self._compiled_expression.eval(context) return self._result
def evalQuery(graph, query, initBindings, base=None): initBindings = dict((Variable(k), v) for k, v in iteritems(initBindings)) ctx = QueryContext(graph, initBindings=initBindings) ctx.prologue = query.prologue main = query.algebra if main.datasetClause: if ctx.dataset is None: raise Exception( "Non-conjunctive-graph doesn't know about " + "graphs! Try a query without FROM (NAMED).") ctx = ctx.clone() # or push/pop? firstDefault = False for d in main.datasetClause: if d.default: if firstDefault: # replace current default graph dg = ctx.dataset.get_context(BNode()) ctx = ctx.pushGraph(dg) firstDefault = True ctx.load(d.default, default=True) elif d.named: g = d.named ctx.load(g, default=False) return evalPart(ctx, main)
def _eval(e, ctx=None): if not ctx: ctx = QueryContext() try: r = e.eval(ctx) if isinstance(r, SPARQLError): print(r) return False return r except SPARQLError: return False
def _apply_filter(v, resource, filters, agp_filters): if v in filters: for var_f in filters[v]: context = QueryContext() context[v] = resource passing = var_f.expr.eval(context) if hasattr( var_f.expr, 'eval') else bool(resource.toPython()) if not passing: return True elif v in agp_filters: return resource != agp_filters.get(v) return False
def evalQuery(graph, query, initBindings, base=None): ctx = QueryContext(graph) ctx.prologue = query.prologue if initBindings: for k, v in initBindings.iteritems(): if not isinstance(k, Variable): k = Variable(k) ctx[k] = v # ctx.push() # nescessary? main = query.algebra # import pdb; pdb.set_trace() if main.datasetClause: if ctx.dataset is None: raise Exception("Non-conjunctive-graph doesn't know about " + "graphs! Try a query without FROM (NAMED).") ctx = ctx.clone() # or push/pop? firstDefault = False for d in main.datasetClause: if d.default: if firstDefault: # replace current default graph dg = ctx.dataset.get_context(BNode()) ctx = ctx.pushGraph(dg) firstDefault = True ctx.load(d.default, default=True) elif d.named: g = d.named ctx.load(g, default=False) return evalPart(ctx, main)
def __evalBGP__(ctx: QueryContext, bgp: BGP): # A SPARQL query executed over a non HDTStore is evaluated as usual if not isinstance(ctx.graph.store, HDTStore): return rdflib_evalBGP(ctx, bgp) if not bgp: yield ctx.solution() return # delegate the join evaluation to HDT store: HDTStore = ctx.graph.store for row in store.hdt_document.search_join(set(bgp)): # convert the ResultRow into a FrozenBindings object bindings = dict() for key in row.labels: bindings[Variable(key)] = row[key] yield FrozenBindings(ctx, bindings) return
def test_arithmetic_var(): ctx = QueryContext() ctx[Variable("x")] = Literal(2) eq(_eval(_translate((p.Expression.parseString("2+?x")[0])), ctx).value, 4) eq(_eval(_translate((p.Expression.parseString("?x+3")[0])), ctx).value, 5) eq(_eval(_translate((p.Expression.parseString("3-?x")[0])), ctx).value, 1) eq(_eval(_translate((p.Expression.parseString("?x*3")[0])), ctx).value, 6) eq(_eval(_translate((p.Expression.parseString("4/?x")[0])), ctx).value, 2) eq( _eval(_translate((p.Expression.parseString("?x+?x+?x")[0])), ctx).value, 6) eq( _eval(_translate((p.Expression.parseString("?x-?x+?x")[0])), ctx).value, 2) eq( _eval(_translate((p.Expression.parseString("(?x-?x)+?x")[0])), ctx).value, 2) eq( _eval(_translate((p.Expression.parseString("?x-(?x+?x)")[0])), ctx).value, -2) eq( _eval(_translate((p.Expression.parseString("?x*?x*?x")[0])), ctx).value, 8) eq( _eval(_translate((p.Expression.parseString("4/?x*?x")[0])), ctx).value, 4) eq( _eval(_translate((p.Expression.parseString("8/4*?x")[0])), ctx).value, 4) eq( _eval(_translate((p.Expression.parseString("8/(4*?x)")[0])), ctx).value, 1) eq( _eval(_translate((p.Expression.parseString("(?x/?x)*?x")[0])), ctx).value, 2) eq( _eval(_translate((p.Expression.parseString("4/(?x*?x)")[0])), ctx).value, 1)
def test_arithmetic_var(): ctx = QueryContext() ctx[Variable('x')] = Literal(2) eq(_eval(_translate((p.Expression.parseString('2+?x')[0])), ctx).value, 4) eq(_eval(_translate((p.Expression.parseString('?x+3')[0])), ctx).value, 5) eq(_eval(_translate((p.Expression.parseString('3-?x')[0])), ctx).value, 1) eq(_eval(_translate((p.Expression.parseString('?x*3')[0])), ctx).value, 6) eq(_eval(_translate((p.Expression.parseString('4/?x')[0])), ctx).value, 2) eq( _eval(_translate((p.Expression.parseString('?x+?x+?x')[0])), ctx).value, 6) eq( _eval(_translate((p.Expression.parseString('?x-?x+?x')[0])), ctx).value, 2) eq( _eval(_translate((p.Expression.parseString('(?x-?x)+?x')[0])), ctx).value, 2) eq( _eval(_translate((p.Expression.parseString('?x-(?x+?x)')[0])), ctx).value, -2) eq( _eval(_translate((p.Expression.parseString('?x*?x*?x')[0])), ctx).value, 8) eq( _eval(_translate((p.Expression.parseString('4/?x*?x')[0])), ctx).value, 4) eq( _eval(_translate((p.Expression.parseString('8/4*?x')[0])), ctx).value, 4) eq( _eval(_translate((p.Expression.parseString('8/(4*?x)')[0])), ctx).value, 1) eq( _eval(_translate((p.Expression.parseString('(?x/?x)*?x')[0])), ctx).value, 2) eq( _eval(_translate((p.Expression.parseString('4/(?x*?x)')[0])), ctx).value, 1)
def evalQuery(graph, query, initBindings, base=None): ctx = QueryContext(graph) ctx.prologue = query.prologue if initBindings: for k, v in initBindings.items(): if not isinstance(k, Variable): k = Variable(k) ctx[k] = v # ctx.push() # nescessary? main = query.algebra # import pdb; pdb.set_trace() if main.datasetClause: if ctx.dataset is None: raise Exception( "Non-conjunctive-graph doesn't know about " + "graphs! Try a query without FROM (NAMED).") ctx = ctx.clone() # or push/pop? firstDefault = False for d in main.datasetClause: if d.default: if firstDefault: # replace current default graph dg = ctx.dataset.get_context(BNode()) ctx = ctx.pushGraph(dg) firstDefault = True ctx.load(d.default, default=True) elif d.named: g = d.named ctx.load(g, default=False) return evalPart(ctx, main)
def evalQuery(graph, query, initBindings, base=None): initBindings = dict( ( Variable(k),v ) for k,v in iteritems(initBindings) ) ctx = QueryContext(graph, initBindings=initBindings) ctx.prologue = query.prologue main = query.algebra if main.datasetClause: if ctx.dataset is None: raise Exception( "Non-conjunctive-graph doesn't know about " + "graphs! Try a query without FROM (NAMED).") ctx = ctx.clone() # or push/pop? firstDefault = False for d in main.datasetClause: if d.default: if firstDefault: # replace current default graph dg = ctx.dataset.get_context(BNode()) ctx = ctx.pushGraph(dg) firstDefault = True ctx.load(d.default, default=True) # TODO re-enable original behaviour if FROM NAMED works with named graphs # https://github.com/AKSW/QuitStore/issues/144 elif d.named: raise FromNamedError # g = d.named # ctx.load(g, default=False) return evalPart(ctx, main)
def evalQuery(graph, query, initBindings, base=None): ctx = QueryContext(graph) ctx.prologue = query.prologue main = query.algebra if initBindings: # add initBindings as a values clause values = {} # no dict comprehension in 2.6 :( for k, v in initBindings.iteritems(): if not isinstance(k, Variable): k = Variable(k) values[k] = v main = main.clone() # clone to not change prepared q main['p'] = main.p.clone() # Find the right place to insert MultiSet join repl = main.p if repl.name == 'Slice': repl['p'] = repl.p.clone() repl = repl.p if repl.name == 'Distinct': repl['p'] = repl.p.clone() repl = repl.p if repl.p.name == 'OrderBy': repl['p'] = repl.p.clone() repl = repl.p if repl.p.name == 'Extend': repl['p'] = repl.p.clone() repl = repl.p repl['p'] = Join(repl.p, ToMultiSet(Values([values]))) # TODO: Vars? if main.datasetClause: if ctx.dataset is None: raise Exception("Non-conjunctive-graph doesn't know about " + "graphs! Try a query without FROM (NAMED).") ctx = ctx.clone() # or push/pop? firstDefault = False for d in main.datasetClause: if d.default: if firstDefault: # replace current default graph dg = ctx.dataset.get_context(BNode()) ctx = ctx.pushGraph(dg) firstDefault = True ctx.load(d.default, default=True) elif d.named: g = d.named ctx.load(g, default=False) return evalPart(ctx, main)
def __process_pattern(seed, space, tp, expected_types, check, graph): candidates = set([]) with self.tp_lock(seed, tp): self.__wrapper.filter_var(tp, tp.s) if self.__wrapper.is_filtered(seed, space, tp.s): return if isinstance(tp.s, URIRef) and seed != tp.s: return else: # tp.s is a Variable if tp.s in var_filters: for var_f in var_filters[tp.s]: context = QueryContext() context[tp.o] = seed passing = var_f.expr.eval(context) if hasattr( var_f.expr, 'eval') else bool(seed.toPython()) if not passing: return if tp.p != RDF.type or isinstance(tp.o, Variable): try: sobs = list( __process_pattern_link_seed(seed, graph, tp.p)) # TODO: This may not apply when considering OPTIONAL support if not isinstance(tp.o, Variable) and not sobs: return if not sobs: return obs_candidates = [] for object in sobs: filtered = True __check_stop() if not isinstance(tp.o, Variable): if object.n3() == tp.o.n3(): filtered = False else: if tp.o in var_filters: for var_f in var_filters[tp.o]: context = QueryContext() context[tp.o] = object passing = var_f.expr.eval( context) if hasattr( var_f.expr, 'eval') else bool( object.toPython()) if passing: filtered = False else: filtered = False if not filtered: candidate = (tp, seed, object) obs_candidates.append(candidate) candidates.update(obs_candidates) except AttributeError as e: log.warning( 'Trying to find {} objects of {}: {}'.format( tp.p, seed, e.message)) else: if type_strict and check: __dereference_uri(graph, seed) types = set( graph.objects(subject=seed, predicate=RDF.type)) if tp.o not in types: # print 'filtering ' + seed + ' for ' + str(tp.s) return candidates.add((tp, seed, tp.o)) return candidates
def evalQuery(graph, query, initBindings, base=None): ctx = QueryContext(graph) ctx.prologue = query.prologue main = query.algebra if initBindings: # add initBindings as a values clause values = {} # no dict comprehension in 2.6 :( for k,v in initBindings.iteritems(): if not isinstance(k, Variable): k = Variable(k) values[k] = v main = main.clone() # clone to not change prepared q main['p'] = main.p.clone() # Find the right place to insert MultiSet join repl = main.p if repl.name == 'Slice': repl['p'] = repl.p.clone() repl = repl.p if repl.name == 'Distinct': repl['p'] = repl.p.clone() repl = repl.p if repl.p.name == 'OrderBy': repl['p'] = repl.p.clone() repl = repl.p if repl.p.name == 'Extend': repl['p'] = repl.p.clone() repl = repl.p repl['p'] = Join(repl.p, ToMultiSet(Values([values]))) # TODO: Vars? if main.datasetClause: if ctx.dataset is None: raise Exception( "Non-conjunctive-graph doesn't know about " + "graphs! Try a query without FROM (NAMED).") ctx = ctx.clone() # or push/pop? firstDefault = False for d in main.datasetClause: if d.default: if firstDefault: # replace current default graph dg = ctx.dataset.get_context(BNode()) ctx = ctx.pushGraph(dg) firstDefault = True ctx.load(d.default, default=True) elif d.named: g = d.named ctx.load(g, default=False) return evalPart(ctx, main)