def test_serialize(self): g = ConjunctiveGraph() uri1 = URIRef("http://example.org/mygraph1") uri2 = URIRef("http://example.org/mygraph2") bob = URIRef(u"urn:bob") likes = URIRef(u"urn:likes") pizza = URIRef(u"urn:pizza") g.get_context(uri1).add((bob, likes, pizza)) g.get_context(uri2).add((bob, likes, pizza)) s = g.serialize(format="nquads") self.assertEqual( len([x for x in s.split("\n".encode("latin-1")) if x.strip()]), 2 ) g2 = ConjunctiveGraph() g2.parse(data=s, format="nquads") self.assertEqual(len(g), len(g2)) self.assertEqual( sorted(x.identifier for x in g.contexts()), sorted(x.identifier for x in g2.contexts()), )
class NQSink(object): def __init__(self, graph): # NQuads is a context-aware format. self.graph = ConjunctiveGraph(store = graph.store) def quad(self, s, p, o, c): self.graph.get_context(c).add((s, p, o))
def _graphFromQuads2(q): g = ConjunctiveGraph() #g.addN(q) # no effect on nquad output for s,p,o,c in q: g.get_context(c).add((s,p,o)) # kind of works with broken rdflib nquad serializer code #g.store.add((s,p,o), c) # no effect on nquad output return g
def test_named_filter_graph_query(): g = ConjunctiveGraph() g.namespace_manager.bind('rdf', RDF) g.namespace_manager.bind('rdfs', RDFS) ex = Namespace('https://ex.com/') g.namespace_manager.bind('ex', ex) g.get_context(ex.g1).parse(format="turtle", data=f""" PREFIX ex: <{str(ex)}> PREFIX rdfs: <{str(RDFS)}> ex:Boris rdfs:label "Boris" . ex:Susan rdfs:label "Susan" . """) g.get_context(ex.g2).parse(format="turtle", data=f""" PREFIX ex: <{str(ex)}> ex:Boris a ex:Person . """) assert list(g.query("SELECT ?l WHERE { GRAPH ex:g1 { ?a rdfs:label ?l } ?a a ?type }", initNs={'ex': ex})) == [(Literal('Boris'),)] assert list(g.query("SELECT ?l WHERE { GRAPH ex:g1 { ?a rdfs:label ?l } FILTER EXISTS { ?a a ?type }}", initNs={'ex': ex})) == [(Literal('Boris'),)] assert list(g.query("SELECT ?l WHERE { GRAPH ex:g1 { ?a rdfs:label ?l } FILTER NOT EXISTS { ?a a ?type }}", initNs={'ex': ex})) == [(Literal('Susan'),)] assert list(g.query("SELECT ?l WHERE { GRAPH ?g { ?a rdfs:label ?l } ?a a ?type }", initNs={'ex': ex})) == [(Literal('Boris'),)] assert list(g.query("SELECT ?l WHERE { GRAPH ?g { ?a rdfs:label ?l } FILTER EXISTS { ?a a ?type }}", initNs={'ex': ex})) == [(Literal('Boris'),)] assert list(g.query("SELECT ?l WHERE { GRAPH ?g { ?a rdfs:label ?l } FILTER NOT EXISTS { ?a a ?type }}", initNs={'ex': ex})) == [(Literal('Susan'),)]
def test_dSet_parsed_as_context_returns_results(self): querystr = """SELECT DISTINCT ?s FROM <http://test/> { ?s ?p ?o }""" graph = ConjunctiveGraph() graph.get_context(URIRef('http://test/') ).parse("http://www.w3.org/People/Berners-Lee/card.rdf") r = graph.query(querystr, loadContexts=True) self.assert_(len(r.bindings) is not 0)
def test_dSet_parsed_as_URL_raises_Exception(self): querystr = """SELECT DISTINCT ?s FROM <http://test/> { ?s ?p ?o }""" graph = ConjunctiveGraph() graph.get_context(URIRef("http://test/") ).parse("http://www.w3.org/People/Berners-Lee/card.rdf") self.assertRaises((URLError, UriException), graph.query, (querystr), loadContexts=False)
def test_quad_contexts(): g = ConjunctiveGraph() a = URIRef('urn:a') b = URIRef('urn:b') g.get_context(a).add((a, a, a)) g.addN([(b, b, b, b)]) assert set(g) == set([(a, a, a), (b, b, b)]) for q in g.quads(): assert isinstance(q[3], Graph)
def test_quad_contexts(): g = ConjunctiveGraph() a = URIRef("urn:a") b = URIRef("urn:b") g.get_context(a).add((a, a, a)) g.addN([(b, b, b, b)]) assert set(g) == set([(a, a, a), (b, b, b)]) for q in g.quads(): assert isinstance(q[3], Graph)
class NQuadsParser(NTriplesParser): def parse(self, inputsource, sink, **kwargs): """Parse f as an N-Triples file.""" assert sink.store.context_aware, ("NQuadsParser must be given" " a context aware store.") self.sink = ConjunctiveGraph(store=sink.store, identifier=sink.identifier) source = inputsource.getByteStream() if not hasattr(source, "read"): raise ParseError("Item to parse must be a file-like object.") source = getreader("utf-8")(source) self.file = source self.buffer = "" while True: self.line = __line = self.readline() if self.line is None: break try: self.parseline() except ParseError as msg: raise ParseError("Invalid line (%s):\n%r" % (msg, __line)) return self.sink def parseline(self): self.eat(r_wspace) if (not self.line) or self.line.startswith(("#")): return # The line is empty or a comment subject = self.subject() self.eat(r_wspace) predicate = self.predicate() self.eat(r_wspace) obj = self.object() self.eat(r_wspace) context = self.uriref() or self.nodeid() or self.sink.identifier self.eat(r_tail) if self.line: raise ParseError("Trailing garbage") # Must have a context aware store - add on a normal Graph # discards anything where the ctx != graph.identifier self.sink.get_context(context).add((subject, predicate, obj))
class NQuadsParser(NTriplesParser): def parse(self, inputsource, sink, **kwargs): """Parse f as an N-Triples file.""" assert sink.store.context_aware, ("NQuadsParser must be given" " a context aware store.") self.sink = ConjunctiveGraph(store=sink.store, identifier=sink.identifier) source = inputsource.getByteStream() if not hasattr(source, 'read'): raise ParseError("Item to parse must be a file-like object.") source = getreader('utf-8')(source) self.file = source self.buffer = '' while True: self.line = __line = self.readline() if self.line is None: break try: self.parseline() except ParseError as msg: raise ParseError("Invalid line (%s):\n%r" % (msg, __line)) return self.sink def parseline(self): self.eat(r_wspace) if (not self.line) or self.line.startswith(('#')): return # The line is empty or a comment subject = self.subject() self.eat(r_wspace) predicate = self.predicate() self.eat(r_wspace) obj = self.object() self.eat(r_wspace) context = self.uriref() or self.nodeid() or self.sink.identifier self.eat(r_tail) if self.line: raise ParseError("Trailing garbage") # Must have a context aware store - add on a normal Graph # discards anything where the ctx != graph.identifier self.sink.get_context(context).add((subject, predicate, obj))
def test_nquads_default_graph(): ds = ConjunctiveGraph() data = """ <http://example.org/s1> <http://example.org/p1> <http://example.org/o1> . <http://example.org/s2> <http://example.org/p2> <http://example.org/o2> . <http://example.org/s3> <http://example.org/p3> <http://example.org/o3> <http://example.org/g3> . """ publicID = URIRef("http://example.org/g0") ds.parse(data=data, format="nquads", publicID=publicID) assert len(ds) == 3, len(g) assert len(list(ds.contexts())) == 2, len(list(ds.contexts())) assert len(ds.get_context(publicID)) == 2, len(ds.get_context(publicID))
def __store_graph(cur_g, rdf_iri_string, d_dir): try: res_dir, dest_file = \ find_paths(rdf_iri_string, args.base + os.sep, "https://w3id.org/oc/corpus/", 10000, 1000) dest_dir = res_dir.replace(args.base + os.sep, d_dir + os.sep) if not os.path.exists(dest_dir): os.makedirs(dest_dir) cur_file = dest_file.replace(res_dir, dest_dir) if os.path.exists(cur_file): c_graph = __load_graph(cur_file) else: c_graph = ConjunctiveGraph() c_graph.remove_context(c_graph.get_context(cur_g.identifier)) c_graph.addN([item + (cur_g.identifier,) for item in list(cur_g)]) with open(dest_file.replace(res_dir, dest_dir), "w") as f: cur_json_ld = json.loads(c_graph.serialize(format="json-ld", context=context_json)) cur_json_ld["@context"] = context_path json.dump(cur_json_ld, f, indent=4) # repok.add_sentence("File '%s' added." % cur_file) return dest_file except Exception as e: reperr.add_sentence("[5] It was impossible to store the RDF statements in %s. %s" % (dest_file, str(e)))
def test_nquads_default_graph(): ds = ConjunctiveGraph() data = """ <http://example.org/s1> <http://example.org/p1> <http://example.org/o1> . <http://example.org/s2> <http://example.org/p2> <http://example.org/o2> . <http://example.org/s3> <http://example.org/p3> <http://example.org/o3> <http://example.org/g3> . """ publicID = URIRef("http://example.org/g0") ds.parse(data=data, format="nquads", publicID=publicID) assert len(ds) == 3, len(g) assert len(list(ds.contexts())) == 2, len(list(ds.contexts())) assert len(ds.get_context(publicID)) == 2, len(ds.get_context(publicID))
def _construct(compiler, sources, query=None): dataset = ConjunctiveGraph() if not isinstance(sources, list): sources = [sources] for sourcedfn in sources: source = sourcedfn['source'] graph = dataset.get_context(URIRef(sourcedfn.get('dataset') or source)) if isinstance(source, (dict, list)): context_data = sourcedfn['context'] if not isinstance(context_data, list): context_data = compiler.load_json(context_data )['@context'] context_data = [compiler.load_json(ctx)['@context'] if isinstance(ctx, unicode) else ctx for ctx in context_data] to_rdf(source, graph, context_data=context_data) elif isinstance(source, Graph): graph += source else: graph += compiler.cached_rdf(source) if not query: return graph with compiler.path(query).open() as fp: result = dataset.query(fp.read()) g = Graph() for spo in result: g.add(spo) return g
def test_bnode_publicid(): g = ConjunctiveGraph() b = BNode() data = "<d:d> <e:e> <f:f> ." print("Parsing %r into %r" % (data, b)) g.parse(data=data, format="turtle", publicID=b) triples = list(g.get_context(b).triples((None, None, None))) if not triples: raise Exception("No triples found in graph %r" % b) u = URIRef(b) triples = list(g.get_context(u).triples((None, None, None))) if triples: raise Exception("Bad: Found in graph %r: %r" % (u, triples))
def test_bnode_publicid(): g = ConjunctiveGraph() b = BNode() data = '<d:d> <e:e> <f:f> .' print("Parsing %r into %r" % (data, b)) g.parse(data=data, format='turtle', publicID=b) triples = list(g.get_context(b).triples((None, None, None))) if not triples: raise Exception("No triples found in graph %r" % b) u = URIRef(b) triples = list(g.get_context(u).triples((None, None, None))) if triples: raise Exception("Bad: Found in graph %r: %r" % (u, triples))
def graph(self): g = ConjunctiveGraph() for prefix in self.__prefixes: g.bind(prefix, self.__prefixes[prefix]) variables = {} def nodify(elm): if is_variable(elm): if not (elm in variables): elm_node = BNode(elm) variables[elm] = elm_node return variables[elm] else: if elm == 'a': return RDF.type elif elm.startswith('"'): return Literal(elm.lstrip('"').rstrip('"')) else: try: return float(elm) except ValueError: return URIRef(elm) nxg = nx.Graph() for (s, p, o) in self._tps: nxg.add_nodes_from([s, o]) nxg.add_edge(s, o) contexts = dict([(str(index), c) for (index, c) in enumerate(nx.connected_components(nxg))]) for (s, p, o) in self._tps: s_node = nodify(s) o_node = nodify(o) p_node = nodify(p) context = None for uid in contexts: if s in contexts[uid]: context = str(uid) g.get_context(context).add((s_node, p_node, o_node)) return g
def test_serialize(self): g=ConjunctiveGraph() uri1=URIRef("http://example.org/mygraph1") uri2=URIRef("http://example.org/mygraph2") bob = URIRef(u'urn:bob') likes = URIRef(u'urn:likes') pizza = URIRef(u'urn:pizza') g.get_context(uri1).add((bob, likes, pizza)) g.get_context(uri2).add((bob, likes, pizza)) s=g.serialize(format='nquads') self.assertEqual(len([x for x in s.split(b("\n")) if x.strip()]), 2) g2=ConjunctiveGraph() g2.parse(data=s, format='nquads') self.assertEqual(len(g), len(g2)) self.assertEqual(sorted(x.identifier for x in g.contexts()), sorted(x.identifier for x in g2.contexts()))
def generate_config_file(data_source, rdf_data=None, rdf_format=None, sparql_url=None, sparql_graph=None): current_task.update_state(state='PROGRESS', meta={'progress_percent': 15, 'progress_msg': 'Reading provided RDF data...'}) try: if data_source == 'rdf': data_graph = Graph() data_graph.parse(format=rdf_format, data=rdf_data) else: g = ConjunctiveGraph('SPARQLStore') g.open(sparql_url) data_graph = g.get_context(sparql_graph) if sparql_graph else g except Exception, e: raise Exception("An error occurred while trying to read provided data source: %s" % str(e))
def get_ltw_data_graph(graph_id=None): if not graph_id: ltwapp = App.query.filter_by(id=session['app']).first() graph_id = ltwapp.graph_id if graph_id: Virtuoso = plugin("Virtuoso", Store) store = Virtuoso(app.config['VIRTUOSO_ODBC']) ltw_data_graph = ConjunctiveGraph(store=store) g = ltw_data_graph.get_context(graph_id) # Initialization step needed to make Virtuoso library work g.add((URIRef('initializationstuff'), URIRef('initializationstuff'), URIRef('initializationstuff'))) g.remove((URIRef('initializationstuff'), URIRef('initializationstuff'), URIRef('initializationstuff'))) return g else: return None
class GraphCache(object): def __init__(self, cachedir): self.graph = ConjunctiveGraph() self.mtime_map = {} self.cachedir = cachedir if not os.path.isdir(cachedir): os.makedirs(cachedir) def load(self, url): src = VOCAB_SOURCE_MAP.get(str(url), url) if os.path.isfile(url): context_id = create_input_source(url).getPublicId() last_vocab_mtime = self.mtime_map.get(url) vocab_mtime = os.stat(url).st_mtime if not last_vocab_mtime or last_vocab_mtime < vocab_mtime: logger.debug("Parse file: '%s'", url) self.mtime_map[url] = vocab_mtime # use CG as workaround for json-ld always loading as dataset graph = ConjunctiveGraph() graph.parse(src, format=guess_format(src)) self.graph.remove_context(context_id) for s, p, o in graph: self.graph.add((s, p, o, context_id)) return graph else: context_id = url if any(self.graph.triples((None, None, None), context=context_id)): logger.debug("Using context <%s>" % context_id) return self.graph.get_context(context_id) cache_path = self.get_fs_path(url) if os.path.exists(cache_path): logger.debug("Load local copy of <%s> from '%s'", context_id, cache_path) return self.graph.parse(cache_path, format='turtle', publicID=context_id) else: logger.debug("Fetching <%s> to '%s'", context_id, cache_path) graph = self.graph.parse(src, format='rdfa' if url.endswith('html') else None) with open(cache_path, 'w') as f: graph.serialize(f, format='turtle') return graph def get_fs_path(self, url): return os.path.join(self.cachedir, quote(url, safe="")) + '.ttl'
def get_ltw_data_graph(graph_id=None): if not graph_id: ltwapp = App.query.filter_by(id=session['app']).first() graph_id = ltwapp.graph_id if graph_id: Virtuoso = plugin("Virtuoso", Store) store = Virtuoso(app.config['VIRTUOSO_ODBC']) ltw_data_graph = ConjunctiveGraph(store=store) g = ltw_data_graph.get_context(graph_id) # Initialization step needed to make Virtuoso library work g.add((URIRef('initializationstuff'), URIRef('initializationstuff'), URIRef('initializationstuff'))) g.remove((URIRef('initializationstuff'), URIRef('initializationstuff'), URIRef('initializationstuff'))) return g else: return None
def discussion_as_quads_old(self, discussion_id): self.quadstore_lock.acquire() self.ensure_discussion_storage(discussion_id) d_storage_name = self.discussion_storage_name(discussion_id) v = get_virtuoso(self.session, d_storage_name) cg = ConjunctiveGraph(v, d_storage_name) quads = cg.serialize(format='nquads') for (g,) in v.query( 'SELECT ?g WHERE {graph ?g {?s catalyst:expressesIdea ?o}}'): ectx = cg.get_context(g) for l in ectx.serialize(format='nt').split('\n'): l = l.strip() if not l: continue l = l.rstrip('.') l += ' ' + g.n3(self.nsm) quads += l + ' .\n' self.quadstore_lock.release() return quads
def generate_config_file(data_source, rdf_data=None, rdf_format=None, sparql_url=None, sparql_graph=None): current_task.update_state(state='PROGRESS', meta={ 'progress_percent': 15, 'progress_msg': 'Reading provided RDF data...' }) try: if data_source == 'rdf': data_graph = Graph() data_graph.parse(format=rdf_format, data=rdf_data) else: g = ConjunctiveGraph('SPARQLStore') g.open(sparql_url) data_graph = g.get_context(sparql_graph) if sparql_graph else g except Exception, e: raise Exception( "An error occurred while trying to read provided data source: %s" % str(e))
class TestSparql11(unittest.TestCase): def setUp(self): self.longMessage = True self.graph = ConjunctiveGraph('SPARQLUpdateStore') root = HOST + DB self.graph.open((root + "sparql", root + "update")) # clean out the store for c in self.graph.contexts(): c.remove((None, None, None)) assert len(c) == 0 def tearDown(self): self.graph.close() def testSimpleGraph(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g.add((bob, likes, pizza)) g.add((bob, likes, cheese)) g2 = self.graph.get_context(othergraphuri) g2.add((michel, likes, pizza)) self.assertEquals(3, len(g), 'graph contains 3 triples') self.assertEquals(1, len(g2), 'other graph contains 1 triple') r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(2, len(list(r)), "two people like pizza") r = g.triples((None, likes, pizza)) self.assertEquals(2, len(list(r)), "two people like pizza") # Test initBindings r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, pizza)) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, cheese)) self.assertEquals(0, len(list(r)), "tarek doesn't like cheese") g2.add((tarek, likes, pizza)) g.remove((tarek, likes, pizza)) r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(1, len(list(r)), "only bob likes pizza") def testConjunctiveDefault(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g2 = self.graph.get_context(othergraphuri) g2.add((bob, likes, pizza)) g.add((tarek, hates, cheese)) self.assertEquals(2, len(g), 'graph contains 2 triples') # the following are actually bad tests as they depend on your endpoint, # as pointed out in the sparqlstore.py code: # ## For ConjunctiveGraphs, reading is done from the "default graph" Exactly ## what this means depends on your endpoint, because SPARQL does not offer a ## simple way to query the union of all graphs as it would be expected for a ## ConjuntiveGraph. ## ## Fuseki/TDB has a flag for specifying that the default graph ## is the union of all graphs (tdb:unionDefaultGraph in the Fuseki config). self.assertEquals(3, len(self.graph), 'default union graph should contain three triples but contains:\n' '%s' % list(self.graph)) r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(2, len(list(r)), "two people like pizza") r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, pizza)) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, cheese)) self.assertEquals(0, len(list(r)), "tarek doesn't like cheese") g2.remove((bob, likes, pizza)) r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(1, len(list(r)), "only tarek likes pizza") def testUpdate(self): self.graph.update("INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> . } }") g = self.graph.get_context(graphuri) self.assertEquals(1, len(g), 'graph contains 1 triples') def testUpdateWithInitNs(self): self.graph.update( "INSERT DATA { GRAPH ns:graph { ns:michel ns:likes ns:pizza . } }", initNs={'ns': URIRef('urn:')} ) g = self.graph.get_context(graphuri) self.assertEquals( set(g.triples((None,None,None))), set([(michel,likes,pizza)]), 'only michel likes pizza' ) def testUpdateWithInitBindings(self): self.graph.update( "INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WherE { }", initBindings={ 'a': URIRef('urn:michel'), 'b': URIRef('urn:likes'), 'c': URIRef('urn:pizza'), } ) g = self.graph.get_context(graphuri) self.assertEquals( set(g.triples((None,None,None))), set([(michel,likes,pizza)]), 'only michel likes pizza' ) def testMultipleUpdateWithInitBindings(self): self.graph.update( "INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WHERE { };" "INSERT { GRAPH <urn:graph> { ?d ?b ?c . } } WHERE { }", initBindings={ 'a': URIRef('urn:michel'), 'b': URIRef('urn:likes'), 'c': URIRef('urn:pizza'), 'd': URIRef('urn:bob'), } ) g = self.graph.get_context(graphuri) self.assertEquals( set(g.triples((None,None,None))), set([(michel,likes,pizza), (bob,likes,pizza)]), 'michel and bob like pizza' ) def testNamedGraphUpdate(self): g = self.graph.get_context(graphuri) r1 = "INSERT DATA { <urn:michel> <urn:likes> <urn:pizza> }" g.update(r1) self.assertEquals( set(g.triples((None,None,None))), set([(michel,likes,pizza)]), 'only michel likes pizza' ) r2 = "DELETE { <urn:michel> <urn:likes> <urn:pizza> } " + \ "INSERT { <urn:bob> <urn:likes> <urn:pizza> } WHERE {}" g.update(r2) self.assertEquals( set(g.triples((None, None, None))), set([(bob, likes, pizza)]), 'only bob likes pizza' ) says = URIRef("urn:says") # Strings with unbalanced curly braces tricky_strs = ["With an unbalanced curly brace %s " % brace for brace in ["{", "}"]] for tricky_str in tricky_strs: r3 = """INSERT { ?b <urn:says> "%s" } WHERE { ?b <urn:likes> <urn:pizza>} """ % tricky_str g.update(r3) values = set() for v in g.objects(bob, says): values.add(str(v)) self.assertEquals(values, set(tricky_strs)) # Complicated Strings r4strings = [] r4strings.append(ur'''"1: adfk { ' \\\" \" { "''') r4strings.append(ur'''"2: adfk } <foo> #éï \\"''') r4strings.append(ur"""'3: adfk { " \\\' \' { '""") r4strings.append(ur"""'4: adfk } <foo> #éï \\'""") r4strings.append(ur'''"""5: adfk { ' \\\" \" { """''') r4strings.append(ur'''"""6: adfk } <foo> #éï \\"""''') r4strings.append(u'"""7: ad adsfj \n { \n sadfj"""') r4strings.append(ur"""'''8: adfk { " \\\' \' { '''""") r4strings.append(ur"""'''9: adfk } <foo> #éï \\'''""") r4strings.append(u"'''10: ad adsfj \n { \n sadfj'''") r4 = "\n".join([ u'INSERT DATA { <urn:michel> <urn:says> %s } ;' % s for s in r4strings ]) g.update(r4) values = set() for v in g.objects(michel, says): values.add(unicode(v)) self.assertEquals(values, set([re.sub(ur"\\(.)", ur"\1", re.sub(ur"^'''|'''$|^'|'$|" + ur'^"""|"""$|^"|"$', ur"", s)) for s in r4strings]))
class TestSparql11(unittest.TestCase): def setUp(self): self.longMessage = True self.graph = ConjunctiveGraph('SPARQLUpdateStore') root = HOST + DB self.graph.open((root + "sparql", root + "update")) # clean out the store for c in self.graph.contexts(): c.remove((None, None, None)) assert len(c) == 0 def tearDown(self): self.graph.close() def testSimpleGraph(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g.add((bob, likes, pizza)) g.add((bob, likes, cheese)) g2 = self.graph.get_context(othergraphuri) g2.add((michel, likes, pizza)) self.assertEqual(3, len(g), 'graph contains 3 triples') self.assertEqual(1, len(g2), 'other graph contains 1 triple') r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEqual(2, len(list(r)), "two people like pizza") r = g.triples((None, likes, pizza)) self.assertEqual(2, len(list(r)), "two people like pizza") # Test initBindings r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEqual(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, pizza)) self.assertEqual(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, cheese)) self.assertEqual(0, len(list(r)), "tarek doesn't like cheese") g2.add((tarek, likes, pizza)) g.remove((tarek, likes, pizza)) r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEqual(1, len(list(r)), "only bob likes pizza") def testConjunctiveDefault(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g2 = self.graph.get_context(othergraphuri) g2.add((bob, likes, pizza)) g.add((tarek, hates, cheese)) self.assertEqual(2, len(g), 'graph contains 2 triples') # the following are actually bad tests as they depend on your endpoint, # as pointed out in the sparqlstore.py code: # ## For ConjunctiveGraphs, reading is done from the "default graph" Exactly ## what this means depends on your endpoint, because SPARQL does not offer a ## simple way to query the union of all graphs as it would be expected for a ## ConjuntiveGraph. ## ## Fuseki/TDB has a flag for specifying that the default graph ## is the union of all graphs (tdb:unionDefaultGraph in the Fuseki config). self.assertEqual(3, len(self.graph), 'default union graph should contain three triples but contains:\n' '%s' % list(self.graph)) r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEqual(2, len(list(r)), "two people like pizza") r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEqual(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, pizza)) self.assertEqual(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, cheese)) self.assertEqual(0, len(list(r)), "tarek doesn't like cheese") g2.remove((bob, likes, pizza)) r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEqual(1, len(list(r)), "only tarek likes pizza") def testUpdate(self): self.graph.update("INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> . } }") g = self.graph.get_context(graphuri) self.assertEqual(1, len(g), 'graph contains 1 triples') def testUpdateWithInitNs(self): self.graph.update( "INSERT DATA { GRAPH ns:graph { ns:michel ns:likes ns:pizza . } }", initNs={'ns': URIRef('urn:')} ) g = self.graph.get_context(graphuri) self.assertEqual( set(g.triples((None,None,None))), set([(michel,likes,pizza)]), 'only michel likes pizza' ) def testUpdateWithInitBindings(self): self.graph.update( "INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WherE { }", initBindings={ 'a': URIRef('urn:michel'), 'b': URIRef('urn:likes'), 'c': URIRef('urn:pizza'), } ) g = self.graph.get_context(graphuri) self.assertEqual( set(g.triples((None,None,None))), set([(michel,likes,pizza)]), 'only michel likes pizza' ) def testMultipleUpdateWithInitBindings(self): self.graph.update( "INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WHERE { };" "INSERT { GRAPH <urn:graph> { ?d ?b ?c . } } WHERE { }", initBindings={ 'a': URIRef('urn:michel'), 'b': URIRef('urn:likes'), 'c': URIRef('urn:pizza'), 'd': URIRef('urn:bob'), } ) g = self.graph.get_context(graphuri) self.assertEqual( set(g.triples((None,None,None))), set([(michel,likes,pizza), (bob,likes,pizza)]), 'michel and bob like pizza' ) def testNamedGraphUpdate(self): g = self.graph.get_context(graphuri) r1 = "INSERT DATA { <urn:michel> <urn:likes> <urn:pizza> }" g.update(r1) self.assertEqual( set(g.triples((None,None,None))), set([(michel,likes,pizza)]), 'only michel likes pizza' ) r2 = "DELETE { <urn:michel> <urn:likes> <urn:pizza> } " + \ "INSERT { <urn:bob> <urn:likes> <urn:pizza> } WHERE {}" g.update(r2) self.assertEqual( set(g.triples((None, None, None))), set([(bob, likes, pizza)]), 'only bob likes pizza' ) says = URIRef("urn:says") # Strings with unbalanced curly braces tricky_strs = ["With an unbalanced curly brace %s " % brace for brace in ["{", "}"]] for tricky_str in tricky_strs: r3 = """INSERT { ?b <urn:says> "%s" } WHERE { ?b <urn:likes> <urn:pizza>} """ % tricky_str g.update(r3) values = set() for v in g.objects(bob, says): values.add(str(v)) self.assertEqual(values, set(tricky_strs)) # Complicated Strings r4strings = [] r4strings.append(r'''"1: adfk { ' \\\" \" { "''') r4strings.append(r'''"2: adfk } <foo> #éï \\"''') r4strings.append(r"""'3: adfk { " \\\' \' { '""") r4strings.append(r"""'4: adfk } <foo> #éï \\'""") r4strings.append(r'''"""5: adfk { ' \\\" \" { """''') r4strings.append(r'''"""6: adfk } <foo> #éï \\"""''') r4strings.append('"""7: ad adsfj \n { \n sadfj"""') r4strings.append(r"""'''8: adfk { " \\\' \' { '''""") r4strings.append(r"""'''9: adfk } <foo> #éï \\'''""") r4strings.append("'''10: ad adsfj \n { \n sadfj'''") r4 = "\n".join([ u'INSERT DATA { <urn:michel> <urn:says> %s } ;' % s for s in r4strings ]) g.update(r4) values = set() for v in g.objects(michel, says): values.add(text_type(v)) self.assertEqual(values, set([re.sub(r"\\(.)", r"\1", re.sub(r"^'''|'''$|^'|'$|" + r'^"""|"""$|^"|"$', r"", s)) for s in r4strings])) # IRI Containing ' or # # The fragment identifier must not be misinterpreted as a comment # (commenting out the end of the block). # The ' must not be interpreted as the start of a string, causing the } # in the literal to be identified as the end of the block. r5 = """INSERT DATA { <urn:michel> <urn:hates> <urn:foo'bar?baz;a=1&b=2#fragment>, "'}" }""" g.update(r5) values = set() for v in g.objects(michel, hates): values.add(text_type(v)) self.assertEqual(values, set([u"urn:foo'bar?baz;a=1&b=2#fragment", u"'}"])) # Comments r6 = u""" INSERT DATA { <urn:bob> <urn:hates> <urn:bob> . # No closing brace: } <urn:bob> <urn:hates> <urn:michel>. } #Final { } comment""" g.update(r6) values = set() for v in g.objects(bob, hates): values.add(v) self.assertEqual(values, set([bob, michel])) def testNamedGraphUpdateWithInitBindings(self): g = self.graph.get_context(graphuri) r = "INSERT { ?a ?b ?c } WHERE {}" g.update(r, initBindings={ 'a': michel, 'b': likes, 'c': pizza }) self.assertEqual( set(g.triples((None, None, None))), set([(michel, likes, pizza)]), 'only michel likes pizza' ) def testEmptyNamedGraph(self): empty_graph_iri = "urn:empty-graph-1" self.graph.update("CREATE GRAPH <%s>" % empty_graph_iri) named_graphs = [text_type(r[0]) for r in self.graph.query( "SELECT ?name WHERE { GRAPH ?name {} }")] # Some SPARQL endpoint backends (like TDB) are not able to find empty named graphs # (at least with this query) if empty_graph_iri in named_graphs: self.assertTrue(empty_graph_iri in [text_type(g.identifier) for g in self.graph.contexts()]) def testEmptyLiteral(self): # test for https://github.com/RDFLib/rdflib/issues/457 # also see test_issue457.py which is sparql store independent! g = self.graph.get_context(graphuri) g.add(( URIRef('http://example.com/s'), URIRef('http://example.com/p'), Literal(''))) o = tuple(g)[0][2] self.assertEqual(o, Literal(''), repr(o))
class TestSparql11(unittest.TestCase): def setUp(self): self.longMessage = True self.graph = ConjunctiveGraph('SPARQLUpdateStore') root = "http://localhost:3030/ukpp/" self.graph.open((root + "sparql", root + "update")) # clean out the store for c in self.graph.contexts(): c.remove((None, None, None)) assert len(c) == 0 def tearDown(self): self.graph.close() def testSimpleGraph(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g.add((bob, likes, pizza)) g.add((bob, likes, cheese)) g2 = self.graph.get_context(othergraphuri) g2.add((michel, likes, pizza)) self.assertEquals(3, len(g), 'graph contains 3 triples') self.assertEquals(1, len(g2), 'other graph contains 1 triple') r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(2, len(list(r)), "two people like pizza") r = g.triples((None, likes, pizza)) self.assertEquals(2, len(list(r)), "two people like pizza") # Test initBindings r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, pizza)) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, cheese)) self.assertEquals(0, len(list(r)), "tarek doesn't like cheese") g2.add((tarek, likes, pizza)) g.remove((tarek, likes, pizza)) r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(1, len(list(r)), "only bob likes pizza") def testConjunctiveDefault(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g2 = self.graph.get_context(othergraphuri) g2.add((bob, likes, pizza)) g.add((tarek, hates, cheese)) self.assertEquals(2, len(g), 'graph contains 2 triples') self.assertEquals(3, len(self.graph), 'default union graph contains three triples') r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(2, len(list(r)), "two people like pizza") r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, pizza)) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, cheese)) self.assertEquals(0, len(list(r)), "tarek doesn't like cheese") g2.remove((bob, likes, pizza)) r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(1, len(list(r)), "only tarek likes pizza") def testUpdate(self): self.graph.update("INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> . } }") g = self.graph.get_context(graphuri) self.assertEquals(1, len(g), 'graph contains 1 triples') def testUpdateWithInitNs(self): self.graph.update( "INSERT DATA { GRAPH ns:graph { ns:michel ns:likes ns:pizza . } }", initNs={'ns': URIRef('urn:')} ) g = self.graph.get_context(graphuri) self.assertEquals( set(g.triples((None,None,None))), set([(michel,likes,pizza)]), 'only michel likes pizza' ) def testUpdateWithInitBindings(self): self.graph.update( "INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WherE { }", initBindings={ 'a': URIRef('urn:michel'), 'b': URIRef('urn:likes'), 'c': URIRef('urn:pizza'), } ) g = self.graph.get_context(graphuri) self.assertEquals( set(g.triples((None,None,None))), set([(michel,likes,pizza)]), 'only michel likes pizza' ) def testMultipleUpdateWithInitBindings(self): self.graph.update( "INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WHERE { };" "INSERT { GRAPH <urn:graph> { ?d ?b ?c . } } WHERE { }", initBindings={ 'a': URIRef('urn:michel'), 'b': URIRef('urn:likes'), 'c': URIRef('urn:pizza'), 'd': URIRef('urn:bob'), } ) g = self.graph.get_context(graphuri) self.assertEquals( set(g.triples((None,None,None))), set([(michel,likes,pizza), (bob,likes,pizza)]), 'michel and bob like pizza' )
class TestSparql11(unittest.TestCase): def setUp(self): self.longMessage = True self.graph = ConjunctiveGraph('SPARQLUpdateStore') root = HOST + DB self.graph.open((root + "sparql", root + "update")) # clean out the store for c in self.graph.contexts(): c.remove((None, None, None)) assert len(c) == 0 def tearDown(self): self.graph.close() def testSimpleGraph(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g.add((bob, likes, pizza)) g.add((bob, likes, cheese)) g2 = self.graph.get_context(othergraphuri) g2.add((michel, likes, pizza)) self.assertEquals(3, len(g), 'graph contains 3 triples') self.assertEquals(1, len(g2), 'other graph contains 1 triple') r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(2, len(list(r)), "two people like pizza") r = g.triples((None, likes, pizza)) self.assertEquals(2, len(list(r)), "two people like pizza") # Test initBindings r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, pizza)) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, cheese)) self.assertEquals(0, len(list(r)), "tarek doesn't like cheese") g2.add((tarek, likes, pizza)) g.remove((tarek, likes, pizza)) r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(1, len(list(r)), "only bob likes pizza") def testConjunctiveDefault(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g2 = self.graph.get_context(othergraphuri) g2.add((bob, likes, pizza)) g.add((tarek, hates, cheese)) self.assertEquals(2, len(g), 'graph contains 2 triples') # the following are actually bad tests as they depend on your endpoint, # as pointed out in the sparqlstore.py code: # ## For ConjunctiveGraphs, reading is done from the "default graph" Exactly ## what this means depends on your endpoint, because SPARQL does not offer a ## simple way to query the union of all graphs as it would be expected for a ## ConjuntiveGraph. ## ## Fuseki/TDB has a flag for specifying that the default graph ## is the union of all graphs (tdb:unionDefaultGraph in the Fuseki config). self.assertEquals( 3, len(self.graph), 'default union graph should contain three triples but contains:\n' '%s' % list(self.graph)) r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(2, len(list(r)), "two people like pizza") r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, pizza)) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, cheese)) self.assertEquals(0, len(list(r)), "tarek doesn't like cheese") g2.remove((bob, likes, pizza)) r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(1, len(list(r)), "only tarek likes pizza") def testUpdate(self): self.graph.update( "INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> . } }" ) g = self.graph.get_context(graphuri) self.assertEquals(1, len(g), 'graph contains 1 triples') def testUpdateWithInitNs(self): self.graph.update( "INSERT DATA { GRAPH ns:graph { ns:michel ns:likes ns:pizza . } }", initNs={'ns': URIRef('urn:')}) g = self.graph.get_context(graphuri) self.assertEquals(set(g.triples((None, None, None))), set([(michel, likes, pizza)]), 'only michel likes pizza') def testUpdateWithInitBindings(self): self.graph.update( "INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WherE { }", initBindings={ 'a': URIRef('urn:michel'), 'b': URIRef('urn:likes'), 'c': URIRef('urn:pizza'), }) g = self.graph.get_context(graphuri) self.assertEquals(set(g.triples((None, None, None))), set([(michel, likes, pizza)]), 'only michel likes pizza') def testMultipleUpdateWithInitBindings(self): self.graph.update( "INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WHERE { };" "INSERT { GRAPH <urn:graph> { ?d ?b ?c . } } WHERE { }", initBindings={ 'a': URIRef('urn:michel'), 'b': URIRef('urn:likes'), 'c': URIRef('urn:pizza'), 'd': URIRef('urn:bob'), }) g = self.graph.get_context(graphuri) self.assertEquals(set(g.triples((None, None, None))), set([(michel, likes, pizza), (bob, likes, pizza)]), 'michel and bob like pizza') def testNamedGraphUpdate(self): g = self.graph.get_context(graphuri) r1 = "INSERT DATA { <urn:michel> <urn:likes> <urn:pizza> }" g.update(r1) self.assertEquals(set(g.triples((None, None, None))), set([(michel, likes, pizza)]), 'only michel likes pizza') r2 = "DELETE { <urn:michel> <urn:likes> <urn:pizza> } " + \ "INSERT { <urn:bob> <urn:likes> <urn:pizza> } WHERE {}" g.update(r2) self.assertEquals(set(g.triples((None, None, None))), set([(bob, likes, pizza)]), 'only bob likes pizza') says = URIRef("urn:says") # Strings with unbalanced curly braces tricky_strs = [ "With an unbalanced curly brace %s " % brace for brace in ["{", "}"] ] for tricky_str in tricky_strs: r3 = """INSERT { ?b <urn:says> "%s" } WHERE { ?b <urn:likes> <urn:pizza>} """ % tricky_str g.update(r3) values = set() for v in g.objects(bob, says): values.add(str(v)) self.assertEquals(values, set(tricky_strs)) # Complicated Strings r4strings = [] r4strings.append(ur'''"1: adfk { ' \\\" \" { "''') r4strings.append(ur'''"2: adfk } <foo> #éï \\"''') r4strings.append(ur"""'3: adfk { " \\\' \' { '""") r4strings.append(ur"""'4: adfk } <foo> #éï \\'""") r4strings.append(ur'''"""5: adfk { ' \\\" \" { """''') r4strings.append(ur'''"""6: adfk } <foo> #éï \\"""''') r4strings.append(u'"""7: ad adsfj \n { \n sadfj"""') r4strings.append(ur"""'''8: adfk { " \\\' \' { '''""") r4strings.append(ur"""'''9: adfk } <foo> #éï \\'''""") r4strings.append(u"'''10: ad adsfj \n { \n sadfj'''") r4 = "\n".join([ u'INSERT DATA { <urn:michel> <urn:says> %s } ;' % s for s in r4strings ]) g.update(r4) values = set() for v in g.objects(michel, says): values.add(unicode(v)) self.assertEquals( values, set([ re.sub( ur"\\(.)", ur"\1", re.sub(ur"^'''|'''$|^'|'$|" + ur'^"""|"""$|^"|"$', ur"", s)) for s in r4strings ]))
class TestSparql11(unittest.TestCase): def setUp(self): self.longMessage = True self.graph = ConjunctiveGraph('SPARQLUpdateStore') root = "http://localhost:3030/ukpp/" self.graph.open((root + "sparql", root + "update")) # clean out the store for c in self.graph.contexts(): c.remove((None, None, None)) assert len(c) == 0 def tearDown(self): self.graph.close() def testSimpleGraph(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g.add((bob, likes, pizza)) g.add((bob, likes, cheese)) g2 = self.graph.get_context(othergraphuri) g2.add((michel, likes, pizza)) self.assertEquals(3, len(g), 'graph contains 3 triples') self.assertEquals(1, len(g2), 'other graph contains 1 triple') r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(2, len(list(r)), "two people like pizza") r = g.triples((None, likes, pizza)) self.assertEquals(2, len(list(r)), "two people like pizza") # Test initBindings r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, pizza)) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, cheese)) self.assertEquals(0, len(list(r)), "tarek doesn't like cheese") g2.add((tarek, likes, pizza)) g.remove((tarek, likes, pizza)) r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(1, len(list(r)), "only bob likes pizza") def testConjunctiveDefault(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g2 = self.graph.get_context(othergraphuri) g2.add((bob, likes, pizza)) g.add((tarek, hates, cheese)) self.assertEquals(2, len(g), 'graph contains 2 triples') self.assertEquals(3, len(self.graph), 'default union graph contains three triples') r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(2, len(list(r)), "two people like pizza") r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, pizza)) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, cheese)) self.assertEquals(0, len(list(r)), "tarek doesn't like cheese") g2.remove((bob, likes, pizza)) r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(1, len(list(r)), "only tarek likes pizza") def testUpdate(self): self.graph.update("INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> . } }") g = self.graph.get_context(graphuri) self.assertEquals(1, len(g), 'graph contains 1 triples') def testUpdateWithInitNs(self): self.graph.update( "INSERT DATA { GRAPH ns:graph { ns:michel ns:likes ns:pizza . } }", initNs={'ns': URIRef('urn:')} ) g = self.graph.get_context(graphuri) self.assertEquals( set(g.triples((None,None,None))), set([(michel,likes,pizza)]), 'only michel likes pizza' ) def testUpdateWithInitBindings(self): self.graph.update( "INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WherE { }", initBindings={ 'a': URIRef('urn:michel'), 'b': URIRef('urn:likes'), 'c': URIRef('urn:pizza'), } ) g = self.graph.get_context(graphuri) self.assertEquals( set(g.triples((None,None,None))), set([(michel,likes,pizza)]), 'only michel likes pizza' ) def testMultipleUpdateWithInitBindings(self): self.graph.update( "INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WHERE { };" "INSERT { GRAPH <urn:graph> { ?d ?b ?c . } } WHERE { }", initBindings={ 'a': URIRef('urn:michel'), 'b': URIRef('urn:likes'), 'c': URIRef('urn:pizza'), 'd': URIRef('urn:bob'), } ) g = self.graph.get_context(graphuri) self.assertEquals( set(g.triples((None,None,None))), set([(michel,likes,pizza), (bob,likes,pizza)]), 'michel and bob like pizza' ) def testNamedGraphUpdate(self): g = self.graph.get_context(graphuri) r1 = "INSERT DATA { <urn:michel> <urn:likes> <urn:pizza> }" g.update(r1) self.assertEquals( set(g.triples((None,None,None))), set([(michel,likes,pizza)]), 'only michel likes pizza' ) r2 = "DELETE { <urn:michel> <urn:likes> <urn:pizza> } " + \ "INSERT { <urn:bob> <urn:likes> <urn:pizza> } WHERE {}" g.update(r2) self.assertEquals( set(g.triples((None, None, None))), set([(bob, likes, pizza)]), 'only bob likes pizza' ) says = URIRef("urn:says") # Strings with unbalanced curly braces tricky_strs = ["With an unbalanced curly brace %s " % brace for brace in ["{", "}"]] for tricky_str in tricky_strs: r3 = """INSERT { ?b <urn:says> "%s" } WHERE { ?b <urn:likes> <urn:pizza>} """ % tricky_str g.update(r3) values = set() for v in g.objects(bob, says): values.add(str(v)) self.assertEquals(values, set(tricky_strs)) # Complicated Strings r4strings = [] r4strings.append(ur'''"1: adfk { ' \\\" \" { "''') r4strings.append(ur'''"2: adfk } <foo> #éï \\"''') r4strings.append(ur"""'3: adfk { " \\\' \' { '""") r4strings.append(ur"""'4: adfk } <foo> #éï \\'""") r4strings.append(ur'''"""5: adfk { ' \\\" \" { """''') r4strings.append(ur'''"""6: adfk } <foo> #éï \\"""''') r4strings.append(u'"""7: ad adsfj \n { \n sadfj"""') r4strings.append(ur"""'''8: adfk { " \\\' \' { '''""") r4strings.append(ur"""'''9: adfk } <foo> #éï \\'''""") r4strings.append(u"'''10: ad adsfj \n { \n sadfj'''") r4 = "\n".join([ u'INSERT DATA { <urn:michel> <urn:says> %s } ;' % s for s in r4strings ]) g.update(r4) values = set() for v in g.objects(michel, says): values.add(unicode(v)) self.assertEquals(values, set([re.sub(ur"\\(.)", ur"\1", re.sub(ur"^'''|'''$|^'|'$|" + ur'^"""|"""$|^"|"$', ur"", s)) for s in r4strings]))
def graph_plan(plan, fountain, agp): def extract_cycle_roots(): c_roots = {} for c_id, c_node in described_cycles.items(): c_root_types = set({}) for crt in plan_graph.objects(c_node, AGORA.expectedType): crt_qname = plan_graph.qname(crt) c_root_types.update(_type_subtree(fountain, crt_qname)) c_roots[c_id] = c_root_types return c_roots def inc_tree_length(tree, l): if tree not in tree_lengths: tree_lengths[tree] = 0 tree_lengths[tree] += l def add_variable(p_node, vid, subject=True): sub_node = BNode(str(vid).replace('?', 'var_')) if subject: plan_graph.add((p_node, AGORA.subject, sub_node)) else: plan_graph.add((p_node, AGORA.object, sub_node)) plan_graph.set((sub_node, RDF.type, AGORA.Variable)) plan_graph.set((sub_node, RDFS.label, Literal(str(vid), datatype=XSD.string))) def describe_cycle(cycle_id, cg): c_node = BNode('cycle{}'.format(cycle_id)) cg = cg.get_context(c_node) cg.add((c_node, RDF.type, AGORA.Cycle)) previous_node = c_node c_steps = cycles[cycle_id] cycle_type = c_steps[0].get('type') for et in _type_subtree(fountain, cycle_type): cg.add((c_node, AGORA.expectedType, __extend_uri(prefixes, et))) for j, step in enumerate(c_steps): prop = step.get('property') b_node = BNode(previous_node.n3() + '/' + prop) cg.add((b_node, AGORA.onProperty, __extend_uri(prefixes, prop))) c_expected_type = step.get('type') cg.add((b_node, AGORA.expectedType, __extend_uri(prefixes, c_expected_type))) cg.add((previous_node, AGORA.next, b_node)) previous_node = b_node return c_node def is_extensible(node, node_patterns): extensible = True near_patterns = node_patterns.copy() for prev in tree_graph.subjects(AGORA.next, node): for sib_node in tree_graph.objects(prev, AGORA.next): if sib_node != res.n: near_patterns.update(set(tree_graph.objects(sib_node, AGORA.byPattern))) subjects = set() for p_node in near_patterns: p_subject = list(plan_graph.objects(p_node, AGORA.subject)).pop() if not isinstance(p_subject, URIRef): subject_str = list(plan_graph.objects(p_subject, RDFS.label)).pop().toPython() else: subject_str = str(p_subject) subjects.add(subject_str) if subjects and set.difference(subjects, roots): extensible = False return extensible def enrich_type_patterns(node_patterns): for p_node in node_patterns: p_pred = list(plan_graph.objects(p_node, AGORA.predicate)).pop() if p_pred == RDF.type: p_type = list(plan_graph.objects(p_node, AGORA.object)).pop() if isinstance(p_type, URIRef): for et in [et for et in expected_types if et == p_type]: q_expected_types = _type_subtree(fountain, tree_graph.qname(et)) for et_q in q_expected_types: tree_graph.add((res.n, AGORA.expectedType, __extend_uri(prefixes, et_q))) else: for et in expected_types: q_expected_types = _type_subtree(fountain, tree_graph.qname(et)) for et_q in q_expected_types: tree_graph.add((res.n, AGORA.expectedType, __extend_uri(prefixes, et_q))) def apply_cycle_extensions(c_roots, node_types): for c_id, root_types in c_roots.items(): found_extension = False for n, expected in node_types.items(): if set.intersection(set(root_types), set(expected)): tree_graph.add((n, AGORA.isCycleStartOf, described_cycles[c_id])) found_extension = True if not found_extension: plan_graph.remove_context(plan_graph.get_context(described_cycles[c_id])) def include_path(elm, p_seeds, p_steps, cycles, check): m = hashlib.md5() for s in p_seeds: m.update(s) elm_uri = __extend_uri(prefixes, elm) b_tree = BNode(m.digest().encode('base64').strip()) s_trees.add(b_tree) tree_graph.set((b_tree, RDF.type, AGORA.SearchTree)) tree_graph.add((b_tree, AGORA.fromType, elm_uri)) for seed in p_seeds: tree_graph.add((b_tree, AGORA.hasSeed, URIRef(seed))) for cycle_id in filter(lambda x: x not in described_cycles.keys(), cycles): c_node = describe_cycle(cycle_id, plan_graph) described_cycles[cycle_id] = c_node plan_graph.get_context(c_node).add((b_tree, AGORA.goesThroughCycle, c_node)) previous_node = b_tree inc_tree_length(b_tree, len(p_steps)) root_index = -1 pp = [] for j, step in enumerate(p_steps): prop = step.get('property') pp.append(prop) path_root = step.get('root', None) if path_root and root_index < 0: root_index = j base_id = path_root or b_tree base_id += '/' if j < len(p_steps) - 1 or (pattern[1] == RDF.type and isinstance(pattern[2], URIRef)): b_node = BNode(base_id + '/'.join(pp)) tree_graph.add((b_node, AGORA.onProperty, __extend_uri(prefixes, prop))) else: b_node = BNode(base_id + '/'.join(pp)) tree_graph.add((b_node, AGORA.expectedType, __extend_uri(prefixes, step.get('type')))) tree_graph.add((previous_node, AGORA.next, b_node)) previous_node = b_node p_node = _get_pattern_node(pattern, patterns) if pattern[1] == RDF.type and isinstance(pattern[2], URIRef): b_id = '{}_{}_{}'.format(pattern[0].n3(plan_graph.namespace_manager), pattern[1].n3(plan_graph.namespace_manager), pattern[2].n3(plan_graph.namespace_manager)) b_node = BNode(b_id) tree_graph.add((b_node, AGORA.expectedType, pattern[2])) tree_graph.add((previous_node, AGORA.next, b_node)) tree_graph.add((b_node, AGORA.byPattern, p_node)) if check: tree_graph.add((b_node, AGORA.checkType, Literal(check))) else: tree_graph.add((previous_node, AGORA.byPattern, p_node)) plan_graph = ConjunctiveGraph() plan_graph.bind('agora', AGORA) prefixes = plan.get('prefixes') ef_plan = plan.get('plan') tree_lengths = {} s_trees = set([]) patterns = {} described_cycles = {} for (prefix, u) in prefixes.items(): plan_graph.bind(prefix, u) tree_graph = plan_graph.get_context('trees') for i, tp_plan in enumerate(ef_plan): paths = tp_plan.get('paths') pattern = tp_plan.get('pattern') hints = tp_plan.get('hints') cycles = {} for c in tp_plan.get('cycles'): cid = str(c['cycle']) c_steps = c['steps'] cycles[cid] = c_steps if len(c_steps) > 1: cycles[cid + 'r'] = list(reversed(c_steps)) context = BNode('space_{}'.format(tp_plan.get('context'))) for path in paths: steps = path.get('steps') seeds = path.get('seeds') check = path.get('check', None) ty = None if not len(steps) and len(seeds): ty = pattern[2] elif len(steps): ty = steps[0].get('type') if ty: include_path(ty, seeds, steps, cycles, check) for t in s_trees: tree_graph.set((t, AGORA.length, Literal(tree_lengths.get(t, 0), datatype=XSD.integer))) pattern_node = _get_pattern_node(pattern, patterns) plan_graph.add((context, AGORA.definedBy, pattern_node)) plan_graph.set((context, RDF.type, AGORA.SearchSpace)) plan_graph.add((pattern_node, RDF.type, AGORA.TriplePattern)) plan_graph.add((pattern_node, RDFS.label, Literal(pattern_node.toPython()))) (sub, pred, obj) = pattern if isinstance(sub, BNode): add_variable(pattern_node, str(sub)) elif isinstance(sub, URIRef): plan_graph.add((pattern_node, AGORA.subject, sub)) if isinstance(obj, BNode): add_variable(pattern_node, str(obj), subject=False) elif isinstance(obj, Literal): node = BNode(str(obj).replace(' ', '').replace(':', '')) plan_graph.add((pattern_node, AGORA.object, node)) plan_graph.set((node, RDF.type, AGORA.Literal)) plan_graph.set((node, AGORA.value, obj)) else: plan_graph.add((pattern_node, AGORA.object, obj)) plan_graph.add((pattern_node, AGORA.predicate, pred)) if pred == RDF.type: if 'check' in hints: plan_graph.add((pattern_node, AGORA.checkType, Literal(hints['check'], datatype=XSD.boolean))) expected_res = tree_graph.query("""SELECT DISTINCT ?n WHERE { ?n agora:expectedType ?type }""") node_types = {} roots = set(_extract_roots(agp)) for res in expected_res: expected_types = list(tree_graph.objects(res.n, AGORA.expectedType)) q_expected_types = set(map(lambda x: tree_graph.qname(x), expected_types)) q_expected_types = filter( lambda x: not set.intersection(set(fountain.get_type(x)['super']), q_expected_types), q_expected_types) type_hierarchy = len(q_expected_types) == 1 tree_graph.add((res.n, AGORA.typeHierarchy, Literal(type_hierarchy))) direct_patterns = set(tree_graph.objects(res.n, AGORA.byPattern)) enrich_type_patterns(direct_patterns) if is_extensible(res.n, direct_patterns): node_types[res.n] = q_expected_types c_roots = extract_cycle_roots() apply_cycle_extensions(c_roots, node_types) for t in s_trees: tree_graph.set((t, AGORA.length, Literal(tree_lengths.get(t, 0), datatype=XSD.integer))) from_types = set([plan_graph.qname(x) for x in plan_graph.objects(t, AGORA.fromType)]) def_from_types = filter(lambda x: not set.intersection(set(fountain.get_type(x)['sub']), from_types), from_types) for dft in def_from_types: tree_graph.set((t, AGORA.fromType, __extend_uri(prefixes, dft))) for res in plan_graph.query("""SELECT ?tree ?sub ?nxt WHERE { ?tree a agora:SearchTree ; agora:next ?nxt . ?nxt agora:byPattern [ agora:subject ?sub ] }"""): if isinstance(res.sub, URIRef): plan_graph.set((res.tree, AGORA.hasSeed, res.sub)) plan_graph.remove((res.nxt, AGORA.isCycleStartOf, None)) _inform_on_inverses(plan_graph, fountain, prefixes) return plan_graph
obj = None log.error(u'%s' % e) # build the rdf graph g = Graph() if model == 'tag': # This is a temporairy work around. thess.economie-solidaire.fr is not # yet responding. TODO we have to deal with contexts as tags come thesaurus try: # TEMP hard coded thessEndPoint = 'http://localhost:8080/openrdf-workbench/repositories/thessRepository/query' graph = ConjunctiveGraph('SPARQLStore') graph.open(thessEndPoint, False) ctx = 'http://%s' % str(Site.objects.get_current().domain) ctx = rdflib.term.URIRef(ctx) localg = graph.get_context(ctx) except Exception, e: log.error(u'%s' % e) gen = localg.triples((rdflib.term.URIRef(uri), None, None)) try: while True: g.add(gen.next()) except: pass gen = localg.triples((None, None, rdflib.term.URIRef(uri))) try: while True: g.add(gen.next()) except: pass else:
class ContextTestCase(unittest.TestCase): store = "default" slow = True tmppath = None def setUp(self): try: self.graph = ConjunctiveGraph(store=self.store) except ImportError: raise SkipTest("Dependencies for store '%s' not available!" % self.store) if self.store == "SQLite": _, self.tmppath = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite") else: self.tmppath = mkdtemp() self.graph.open(self.tmppath, create=True) self.michel = URIRef(u"michel") self.tarek = URIRef(u"tarek") self.bob = URIRef(u"bob") self.likes = URIRef(u"likes") self.hates = URIRef(u"hates") self.pizza = URIRef(u"pizza") self.cheese = URIRef(u"cheese") self.c1 = URIRef(u"context-1") self.c2 = URIRef(u"context-2") # delete the graph for each test! self.graph.remove((None, None, None)) def tearDown(self): self.graph.close() if os.path.isdir(self.tmppath): shutil.rmtree(self.tmppath) else: os.remove(self.tmppath) def addStuff(self): tarek = self.tarek michel = self.michel bob = self.bob likes = self.likes hates = self.hates pizza = self.pizza cheese = self.cheese c1 = self.c1 graph = Graph(self.graph.store, c1) graph.add((tarek, likes, pizza)) graph.add((tarek, likes, cheese)) graph.add((michel, likes, pizza)) graph.add((michel, likes, cheese)) graph.add((bob, likes, cheese)) graph.add((bob, hates, pizza)) graph.add((bob, hates, michel)) # gasp! def removeStuff(self): tarek = self.tarek michel = self.michel bob = self.bob likes = self.likes hates = self.hates pizza = self.pizza cheese = self.cheese c1 = self.c1 graph = Graph(self.graph.store, c1) graph.remove((tarek, likes, pizza)) graph.remove((tarek, likes, cheese)) graph.remove((michel, likes, pizza)) graph.remove((michel, likes, cheese)) graph.remove((bob, likes, cheese)) graph.remove((bob, hates, pizza)) graph.remove((bob, hates, michel)) # gasp! def addStuffInMultipleContexts(self): c1 = self.c1 c2 = self.c2 triple = (self.pizza, self.hates, self.tarek) # revenge! # add to default context self.graph.add(triple) # add to context 1 graph = Graph(self.graph.store, c1) graph.add(triple) # add to context 2 graph = Graph(self.graph.store, c2) graph.add(triple) def testConjunction(self): if self.store == "SQLite": raise SkipTest("Skipping known issue with __len__") self.addStuffInMultipleContexts() triple = (self.pizza, self.likes, self.pizza) # add to context 1 graph = Graph(self.graph.store, self.c1) graph.add(triple) self.assertEqual(len(self.graph), len(graph)) def testAdd(self): self.addStuff() def testRemove(self): self.addStuff() self.removeStuff() def testLenInOneContext(self): c1 = self.c1 # make sure context is empty self.graph.remove_context(self.graph.get_context(c1)) graph = Graph(self.graph.store, c1) oldLen = len(self.graph) for i in range(0, 10): graph.add((BNode(), self.hates, self.hates)) self.assertEqual(len(graph), oldLen + 10) self.assertEqual(len(self.graph.get_context(c1)), oldLen + 10) self.graph.remove_context(self.graph.get_context(c1)) self.assertEqual(len(self.graph), oldLen) self.assertEqual(len(graph), 0) def testLenInMultipleContexts(self): if self.store == "SQLite": raise SkipTest("Skipping known issue with __len__") oldLen = len(self.graph) self.addStuffInMultipleContexts() # addStuffInMultipleContexts is adding the same triple to # three different contexts. So it's only + 1 self.assertEqual(len(self.graph), oldLen + 1) graph = Graph(self.graph.store, self.c1) self.assertEqual(len(graph), oldLen + 1) def testRemoveInMultipleContexts(self): c1 = self.c1 c2 = self.c2 triple = (self.pizza, self.hates, self.tarek) # revenge! self.addStuffInMultipleContexts() # triple should be still in store after removing it from c1 + c2 self.assertTrue(triple in self.graph) graph = Graph(self.graph.store, c1) graph.remove(triple) self.assertTrue(triple in self.graph) graph = Graph(self.graph.store, c2) graph.remove(triple) self.assertTrue(triple in self.graph) self.graph.remove(triple) # now gone! self.assertTrue(triple not in self.graph) # add again and see if remove without context removes all triples! self.addStuffInMultipleContexts() self.graph.remove(triple) self.assertTrue(triple not in self.graph) def testContexts(self): triple = (self.pizza, self.hates, self.tarek) # revenge! self.addStuffInMultipleContexts() def cid(c): return c.identifier self.assertTrue(self.c1 in map(cid, self.graph.contexts())) self.assertTrue(self.c2 in map(cid, self.graph.contexts())) contextList = list(map(cid, list(self.graph.contexts(triple)))) self.assertTrue(self.c1 in contextList, (self.c1, contextList)) self.assertTrue(self.c2 in contextList, (self.c2, contextList)) def testRemoveContext(self): c1 = self.c1 self.addStuffInMultipleContexts() self.assertEqual(len(Graph(self.graph.store, c1)), 1) self.assertEqual(len(self.graph.get_context(c1)), 1) self.graph.remove_context(self.graph.get_context(c1)) self.assertTrue(self.c1 not in self.graph.contexts()) def testRemoveAny(self): Any = None self.addStuffInMultipleContexts() self.graph.remove((Any, Any, Any)) self.assertEqual(len(self.graph), 0) def testTriples(self): tarek = self.tarek michel = self.michel bob = self.bob likes = self.likes hates = self.hates pizza = self.pizza cheese = self.cheese c1 = self.c1 asserte = self.assertEqual triples = self.graph.triples graph = self.graph c1graph = Graph(self.graph.store, c1) c1triples = c1graph.triples Any = None self.addStuff() # unbound subjects with context asserte(len(list(c1triples((Any, likes, pizza)))), 2) asserte(len(list(c1triples((Any, hates, pizza)))), 1) asserte(len(list(c1triples((Any, likes, cheese)))), 3) asserte(len(list(c1triples((Any, hates, cheese)))), 0) # unbound subjects without context, same results! asserte(len(list(triples((Any, likes, pizza)))), 2) asserte(len(list(triples((Any, hates, pizza)))), 1) asserte(len(list(triples((Any, likes, cheese)))), 3) asserte(len(list(triples((Any, hates, cheese)))), 0) # unbound objects with context asserte(len(list(c1triples((michel, likes, Any)))), 2) asserte(len(list(c1triples((tarek, likes, Any)))), 2) asserte(len(list(c1triples((bob, hates, Any)))), 2) asserte(len(list(c1triples((bob, likes, Any)))), 1) # unbound objects without context, same results! asserte(len(list(triples((michel, likes, Any)))), 2) asserte(len(list(triples((tarek, likes, Any)))), 2) asserte(len(list(triples((bob, hates, Any)))), 2) asserte(len(list(triples((bob, likes, Any)))), 1) # unbound predicates with context asserte(len(list(c1triples((michel, Any, cheese)))), 1) asserte(len(list(c1triples((tarek, Any, cheese)))), 1) asserte(len(list(c1triples((bob, Any, pizza)))), 1) asserte(len(list(c1triples((bob, Any, michel)))), 1) # unbound predicates without context, same results! asserte(len(list(triples((michel, Any, cheese)))), 1) asserte(len(list(triples((tarek, Any, cheese)))), 1) asserte(len(list(triples((bob, Any, pizza)))), 1) asserte(len(list(triples((bob, Any, michel)))), 1) # unbound subject, objects with context asserte(len(list(c1triples((Any, hates, Any)))), 2) asserte(len(list(c1triples((Any, likes, Any)))), 5) # unbound subject, objects without context, same results! asserte(len(list(triples((Any, hates, Any)))), 2) asserte(len(list(triples((Any, likes, Any)))), 5) # unbound predicates, objects with context asserte(len(list(c1triples((michel, Any, Any)))), 2) asserte(len(list(c1triples((bob, Any, Any)))), 3) asserte(len(list(c1triples((tarek, Any, Any)))), 2) # unbound predicates, objects without context, same results! asserte(len(list(triples((michel, Any, Any)))), 2) asserte(len(list(triples((bob, Any, Any)))), 3) asserte(len(list(triples((tarek, Any, Any)))), 2) # unbound subjects, predicates with context asserte(len(list(c1triples((Any, Any, pizza)))), 3) asserte(len(list(c1triples((Any, Any, cheese)))), 3) asserte(len(list(c1triples((Any, Any, michel)))), 1) # unbound subjects, predicates without context, same results! asserte(len(list(triples((Any, Any, pizza)))), 3) asserte(len(list(triples((Any, Any, cheese)))), 3) asserte(len(list(triples((Any, Any, michel)))), 1) # all unbound with context asserte(len(list(c1triples((Any, Any, Any)))), 7) # all unbound without context, same result! asserte(len(list(triples((Any, Any, Any)))), 7) for c in [graph, self.graph.get_context(c1)]: # unbound subjects asserte(set(c.subjects(likes, pizza)), set((michel, tarek))) asserte(set(c.subjects(hates, pizza)), set((bob, ))) asserte(set(c.subjects(likes, cheese)), set([tarek, bob, michel])) asserte(set(c.subjects(hates, cheese)), set()) # unbound objects asserte(set(c.objects(michel, likes)), set([cheese, pizza])) asserte(set(c.objects(tarek, likes)), set([cheese, pizza])) asserte(set(c.objects(bob, hates)), set([michel, pizza])) asserte(set(c.objects(bob, likes)), set([cheese])) # unbound predicates asserte(set(c.predicates(michel, cheese)), set([likes])) asserte(set(c.predicates(tarek, cheese)), set([likes])) asserte(set(c.predicates(bob, pizza)), set([hates])) asserte(set(c.predicates(bob, michel)), set([hates])) asserte(set(c.subject_objects(hates)), set([(bob, pizza), (bob, michel)])) asserte( set(c.subject_objects(likes)), set([ (tarek, cheese), (michel, cheese), (michel, pizza), (bob, cheese), (tarek, pizza), ]), ) asserte(set(c.predicate_objects(michel)), set([(likes, cheese), (likes, pizza)])) asserte( set(c.predicate_objects(bob)), set([(likes, cheese), (hates, pizza), (hates, michel)]), ) asserte(set(c.predicate_objects(tarek)), set([(likes, cheese), (likes, pizza)])) asserte( set(c.subject_predicates(pizza)), set([(bob, hates), (tarek, likes), (michel, likes)]), ) asserte( set(c.subject_predicates(cheese)), set([(bob, likes), (tarek, likes), (michel, likes)]), ) asserte(set(c.subject_predicates(michel)), set([(bob, hates)])) asserte( set(c), set([ (bob, hates, michel), (bob, likes, cheese), (tarek, likes, pizza), (michel, likes, pizza), (michel, likes, cheese), (bob, hates, pizza), (tarek, likes, cheese), ]), ) # remove stuff and make sure the graph is empty again self.removeStuff() asserte(len(list(c1triples((Any, Any, Any)))), 0) asserte(len(list(triples((Any, Any, Any)))), 0)
class NQuadsParser(W3CNTriplesParser): def parse(self, inputsource, sink, bnode_context=None, **kwargs): """ Parse inputsource as an N-Quads file. :type inputsource: `rdflib.parser.InputSource` :param inputsource: the source of N-Quads-formatted data :type sink: `rdflib.graph.Graph` :param sink: where to send parsed triples :type bnode_context: `dict`, optional :param bnode_context: a dict mapping blank node identifiers to `~rdflib.term.BNode` instances. See `.NTriplesParser.parse` """ assert sink.store.context_aware, ("NQuadsParser must be given" " a context aware store.") self.sink = ConjunctiveGraph(store=sink.store, identifier=sink.identifier) source = inputsource.getCharacterStream() if not source: source = inputsource.getByteStream() source = getreader("utf-8")(source) if not hasattr(source, "read"): raise ParseError("Item to parse must be a file-like object.") self.file = source self.buffer = "" while True: self.line = __line = self.readline() if self.line is None: break try: self.parseline(bnode_context) except ParseError as msg: raise ParseError("Invalid line (%s):\n%r" % (msg, __line)) return self.sink def parseline(self, bnode_context=None): self.eat(r_wspace) if (not self.line) or self.line.startswith(("#")): return # The line is empty or a comment subject = self.subject(bnode_context) self.eat(r_wspace) predicate = self.predicate() self.eat(r_wspace) obj = self.object(bnode_context) self.eat(r_wspace) context = self.uriref() or self.nodeid( bnode_context) or self.sink.identifier self.eat(r_tail) if self.line: raise ParseError("Trailing garbage") # Must have a context aware store - add on a normal Graph # discards anything where the ctx != graph.identifier self.sink.get_context(context).add((subject, predicate, obj))
class TestSparql11(unittest.TestCase): def setUp(self): self.longMessage = True self.graph = ConjunctiveGraph('SPARQLUpdateStore') root = "http://localhost:3030/ukpp/" self.graph.open((root + "sparql", root + "update")) # clean out the store for c in self.graph.contexts(): c.remove((None, None, None)) def tearDown(self): self.graph.close() def testSimpleGraph(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g.add((bob, likes, pizza)) g.add((bob, likes, cheese)) g2 = self.graph.get_context(othergraphuri) g2.add((michel, likes, pizza)) self.assertEquals(3, len(g), 'graph contains 3 triples') self.assertEquals(1, len(g2), 'other graph contains 1 triple') r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(2, len(list(r)), "two people like pizza") r = g.triples((None, likes, pizza)) self.assertEquals(2, len(list(r)), "two people like pizza") # Test initBindings r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, pizza)) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, cheese)) self.assertEquals(0, len(list(r)), "tarek doesn't like cheese") g2.add((tarek, likes, pizza)) g.remove((tarek, likes, pizza)) r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(1, len(list(r)), "only bob likes pizza") def testConjunctiveDefault(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g2 = self.graph.get_context(othergraphuri) g2.add((bob, likes, pizza)) g.add((tarek, hates, cheese)) self.assertEquals(2, len(g), 'graph contains 2 triples') self.assertEquals(3, len(self.graph), 'default union graph contains three triples') r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(2, len(list(r)), "two people like pizza") r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, pizza)) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, cheese)) self.assertEquals(0, len(list(r)), "tarek doesn't like cheese") g2.remove((bob, likes, pizza)) r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(1, len(list(r)), "only tarek likes pizza")
class MemoryStore: """A class that combines and syncronieses n-quad files and an in-memory quad store. This class contains information about all graphs, their corresponding URIs and pathes in the file system. For every Graph (context of Quad-Store) exists a FileReference object (n-quad) that enables versioning (with git) and persistence. """ def __init__(self): """Initialize a new MemoryStore instance.""" logger = logging.getLogger('quit.core.MemoryStore') logger.debug('Create an instance of MemoryStore') self.store = ConjunctiveGraph(identifier='default') return def getgraphuris(self): """Method to get all available named graphs. Returns: A list containing all graph uris found in store. """ graphs = [] for graph in self.store.contexts(): if isinstance(graph, BNode) or str(graph.identifier) == 'default': pass else: graphs.append(graph.identifier) return graphs def getgraphcontent(self, graphuri): """Get the serialized content of a named graph. Args: graphuri: The URI of a named graph. Returns: content: A list of strings where each string is a quad. """ data = [] context = self.store.get_context(URIRef(graphuri)) triplestring = context.serialize(format='nt').decode('UTF-8') # Since we have triples here, we transform them to quads by adding the graphuri # TODO This might cause problems if ' .\n' will be part of a literal. # Maybe a regex would be a better solution triplestring = triplestring.replace(' .\n', ' <' + graphuri + '> .\n') data = triplestring.splitlines() data.remove('') return data def getstoreobject(self): """Get the conjunctive graph object. Returns: graph: A list of strings where each string is a quad. """ def graphexists(self, graphuri): """Ask if a named graph FileReference object for a named graph URI. Args: graphuri: A string containing the URI of a named graph Returns: True or False """ if self.store.get_context(URIRef(graphuri)) is None: return False else: return True def addfile(self, filename, serialization): """Add a file to the store. Args: filename: A String for the path to the file. serialization: A String containg the RDF format Raises: ValueError if the given file can't be parsed as nquads. """ try: self.store.parse(source=filename, format=serialization) except Exception as e: logger.debug(e) logger.debug("Could not import file: {}. " + "Make sure the file exists and contains data in {}". format(filename, serialization)) def addquads(self, quads): """Add quads to the MemoryStore. Args: quads: Rdflib.quads that should be added to the MemoryStore. """ self.store.addN(quads) self.store.commit() def query(self, querystring): """Execute a SPARQL select query. Args: querystring: A string containing a SPARQL ask or select query. Returns: The SPARQL result set """ return self.store.query(querystring) def update(self, querystring, versioning=True): """Execute a SPARQL update query and update the store. This method executes a SPARQL update query and updates and commits all affected files. Args: querystring: A string containing a SPARQL upate query. """ # methods of rdflib ConjunciveGraph if versioning: actions = evalUpdate(self.store, querystring) self.store.update(querystring) return actions else: self.store.update(querystring) return return def removequads(self, quads): """Remove quads from the MemoryStore. Args: quads: Rdflib.quads that should be removed to the MemoryStore. """ self.store.remove((quads)) self.store.commit() return def exit(self): """Execute actions on API shutdown.""" return
# -*- coding: utf-8 -*- """ Test property inheritance, isinstance() and issubclass() """ from unittest import TestCase from rdflib import ConjunctiveGraph, URIRef from oldman import ClientResourceManager, parse_graph_safely, SPARQLDataStore default_graph = ConjunctiveGraph() schema_graph = default_graph.get_context(URIRef("http://localhost/schema")) data_graph = default_graph.get_context(URIRef("http://localhost/data")) EXAMPLE = "http://localhost/vocab#" schema_ttl = """ @prefix ex: <%s> . @prefix hydra: <http://www.w3.org/ns/hydra/core#> . @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . ex:GrandParentClass a hydra:Class ; hydra:supportedProperty [ hydra:property ex:oldProperty ] . ex:ParentClass a hydra:Class ; rdfs:subClassOf ex:GrandParentClass ; hydra:supportedProperty [ hydra:property ex:mediumProperty ] .
class ContextTestCase(unittest.TestCase): def setUp(self): self.store = FastStore() self.graph = ConjunctiveGraph(self.store) self.michel = URIRef(u'michel') self.tarek = URIRef(u'tarek') self.bob = URIRef(u'bob') self.likes = URIRef(u'likes') self.hates = URIRef(u'hates') self.pizza = URIRef(u'pizza') self.cheese = URIRef(u'cheese') self.c1 = URIRef(u'context-1') self.c2 = URIRef(u'context-2') def addStuff(self): tarek = self.tarek michel = self.michel bob = self.bob likes = self.likes hates = self.hates pizza = self.pizza cheese = self.cheese c1 = self.c1 graph = Graph(self.graph.store, c1) graph.add((tarek, likes, pizza)) graph.add((tarek, likes, cheese)) graph.add((michel, likes, pizza)) graph.add((michel, likes, cheese)) graph.add((bob, likes, cheese)) graph.add((bob, hates, pizza)) graph.add((bob, hates, michel)) # gasp! def removeStuff(self): tarek = self.tarek michel = self.michel bob = self.bob likes = self.likes hates = self.hates pizza = self.pizza cheese = self.cheese c1 = self.c1 graph = Graph(self.graph.store, c1) graph.remove((tarek, likes, pizza)) graph.remove((tarek, likes, cheese)) graph.remove((michel, likes, pizza)) graph.remove((michel, likes, cheese)) graph.remove((bob, likes, cheese)) graph.remove((bob, hates, pizza)) graph.remove((bob, hates, michel)) # gasp! def addStuffInMultipleContexts(self): c1 = self.c1 c2 = self.c2 triple = (self.pizza, self.hates, self.tarek) # revenge! # add to default context self.graph.add(triple) # add to context 1 graph = Graph(self.graph.store, c1) graph.add(triple) # add to context 2 graph = Graph(self.graph.store, c2) graph.add(triple) def testConjunction(self): self.addStuffInMultipleContexts() triple = (self.pizza, self.likes, self.pizza) # add to context 1 contextualizedGraph = Graph(self.graph.store, self.c1) contextualizedGraph.add(triple) print(self.store.statements()) self.assertEqual(len(self.graph), len(contextualizedGraph)) def testAdd(self): self.addStuff() def testRemove(self): self.addStuff() self.removeStuff() def testLenInOneContext(self): c1 = self.c1 # make sure context is empty self.graph.remove_context(self.graph.get_context(c1)) contextualizedGraph = Graph(self.graph.store, c1) initialLen = len(self.graph) for i in range(0, 10): contextualizedGraph.add((BNode(), self.hates, self.hates)) self.assertEqual(len(contextualizedGraph), initialLen + 10) self.assertEqual(len(self.graph.get_context(c1)), initialLen + 10) self.graph.remove_context(self.graph.get_context(c1)) self.assertEqual(len(self.graph), initialLen) self.assertEqual(len(contextualizedGraph), 0) def testLenInMultipleContexts(self): oldLen = len(self.graph) self.addStuffInMultipleContexts() # addStuffInMultipleContexts is adding the same triple to # three different contexts. So it's only + 1 self.assertEqual(len(self.graph), oldLen + 1) graph = Graph(self.graph.store, self.c1) self.assertEqual(len(graph), oldLen + 1) def testRemoveInMultipleContexts(self): c1 = self.c1 c2 = self.c2 triple = (self.pizza, self.hates, self.tarek) # revenge! self.addStuffInMultipleContexts() # triple should be still in store after removing it from c1 + c2 self.assertTrue(triple in self.graph) graph = Graph(self.graph.store, c1) graph.remove(triple) self.assertTrue(triple in self.graph) graph = Graph(self.graph.store, c2) graph.remove(triple) self.assertTrue(triple in self.graph) # now fully remove self.graph.remove(triple) self.assertTrue(triple not in self.graph) # add again and see if remove without context removes all triples! self.addStuffInMultipleContexts() self.graph.remove(triple) self.assertTrue(triple not in self.graph) def testContexts(self): triple = (self.pizza, self.hates, self.tarek) # revenge! self.addStuffInMultipleContexts() def cid(c): return c.identifier self.assertTrue(self.c1 in map(cid, self.graph.contexts())) self.assertTrue(self.c2 in map(cid, self.graph.contexts())) contextList = list(map(cid, list(self.graph.contexts(triple)))) self.assertTrue(self.c1 in contextList, (self.c1, contextList)) self.assertTrue(self.c2 in contextList, (self.c2, contextList)) def testRemoveContext(self): c1 = self.c1 self.addStuffInMultipleContexts() self.assertEqual(len(Graph(self.graph.store, c1)), 1) self.assertEqual(len(self.graph.get_context(c1)), 1) self.graph.remove_context(self.graph.get_context(c1)) self.assertTrue(self.c1 not in self.graph.contexts()) def testRemoveAny(self): Any = None self.addStuffInMultipleContexts() self.graph.remove((Any, Any, Any)) self.assertEqual(len(self.graph), 0) def testTriples(self): tarek = self.tarek michel = self.michel bob = self.bob likes = self.likes hates = self.hates pizza = self.pizza cheese = self.cheese c1 = self.c1 asserte = self.assertEqual triples = self.graph.triples graph = self.graph c1graph = Graph(self.graph.store, c1) c1triples = c1graph.triples Any = None self.addStuff() # unbound subjects with context asserte(len(list(c1triples((Any, likes, pizza)))), 2) asserte(len(list(c1triples((Any, hates, pizza)))), 1) asserte(len(list(c1triples((Any, likes, cheese)))), 3) asserte(len(list(c1triples((Any, hates, cheese)))), 0) # unbound subjects without context, same results! asserte(len(list(triples((Any, likes, pizza)))), 2) asserte(len(list(triples((Any, hates, pizza)))), 1) asserte(len(list(triples((Any, likes, cheese)))), 3) asserte(len(list(triples((Any, hates, cheese)))), 0) # unbound objects with context asserte(len(list(c1triples((michel, likes, Any)))), 2) asserte(len(list(c1triples((tarek, likes, Any)))), 2) asserte(len(list(c1triples((bob, hates, Any)))), 2) asserte(len(list(c1triples((bob, likes, Any)))), 1) # unbound objects without context, same results! asserte(len(list(triples((michel, likes, Any)))), 2) asserte(len(list(triples((tarek, likes, Any)))), 2) asserte(len(list(triples((bob, hates, Any)))), 2) asserte(len(list(triples((bob, likes, Any)))), 1) # unbound predicates with context asserte(len(list(c1triples((michel, Any, cheese)))), 1) asserte(len(list(c1triples((tarek, Any, cheese)))), 1) asserte(len(list(c1triples((bob, Any, pizza)))), 1) asserte(len(list(c1triples((bob, Any, michel)))), 1) # unbound predicates without context, same results! asserte(len(list(triples((michel, Any, cheese)))), 1) asserte(len(list(triples((tarek, Any, cheese)))), 1) asserte(len(list(triples((bob, Any, pizza)))), 1) asserte(len(list(triples((bob, Any, michel)))), 1) # unbound subject, objects with context asserte(len(list(c1triples((Any, hates, Any)))), 2) asserte(len(list(c1triples((Any, likes, Any)))), 5) # unbound subject, objects without context, same results! asserte(len(list(triples((Any, hates, Any)))), 2) asserte(len(list(triples((Any, likes, Any)))), 5) # unbound predicates, objects with context asserte(len(list(c1triples((michel, Any, Any)))), 2) asserte(len(list(c1triples((bob, Any, Any)))), 3) asserte(len(list(c1triples((tarek, Any, Any)))), 2) # unbound predicates, objects without context, same results! asserte(len(list(triples((michel, Any, Any)))), 2) asserte(len(list(triples((bob, Any, Any)))), 3) asserte(len(list(triples((tarek, Any, Any)))), 2) # unbound subjects, predicates with context asserte(len(list(c1triples((Any, Any, pizza)))), 3) asserte(len(list(c1triples((Any, Any, cheese)))), 3) asserte(len(list(c1triples((Any, Any, michel)))), 1) # unbound subjects, predicates without context, same results! asserte(len(list(triples((Any, Any, pizza)))), 3) asserte(len(list(triples((Any, Any, cheese)))), 3) asserte(len(list(triples((Any, Any, michel)))), 1) # all unbound with context asserte(len(list(c1triples((Any, Any, Any)))), 7) # all unbound without context, same result! asserte(len(list(triples((Any, Any, Any)))), 7) for c in [graph, self.graph.get_context(c1)]: # unbound subjects asserte(set(c.subjects(likes, pizza)), set((michel, tarek))) asserte(set(c.subjects(hates, pizza)), set((bob, ))) asserte(set(c.subjects(likes, cheese)), set([tarek, bob, michel])) asserte(set(c.subjects(hates, cheese)), set()) # unbound objects asserte(set(c.objects(michel, likes)), set([cheese, pizza])) asserte(set(c.objects(tarek, likes)), set([cheese, pizza])) asserte(set(c.objects(bob, hates)), set([michel, pizza])) asserte(set(c.objects(bob, likes)), set([cheese])) # unbound predicates asserte(set(c.predicates(michel, cheese)), set([likes])) asserte(set(c.predicates(tarek, cheese)), set([likes])) asserte(set(c.predicates(bob, pizza)), set([hates])) asserte(set(c.predicates(bob, michel)), set([hates])) asserte(set(c.subject_objects(hates)), set([(bob, pizza), (bob, michel)])) asserte( set(c.subject_objects(likes)), set([(tarek, cheese), (michel, cheese), (michel, pizza), (bob, cheese), (tarek, pizza)])) asserte(set(c.predicate_objects(michel)), set([(likes, cheese), (likes, pizza)])) asserte(set(c.predicate_objects(bob)), set([(likes, cheese), (hates, pizza), (hates, michel)])) asserte(set(c.predicate_objects(tarek)), set([(likes, cheese), (likes, pizza)])) asserte(set(c.subject_predicates(pizza)), set([(bob, hates), (tarek, likes), (michel, likes)])) asserte(set(c.subject_predicates(cheese)), set([(bob, likes), (tarek, likes), (michel, likes)])) asserte(set(c.subject_predicates(michel)), set([(bob, hates)])) asserte( set(c), set([(bob, hates, michel), (bob, likes, cheese), (tarek, likes, pizza), (michel, likes, pizza), (michel, likes, cheese), (bob, hates, pizza), (tarek, likes, cheese)])) # remove stuff and make sure the graph is empty again self.removeStuff() asserte(len(list(c1triples((Any, Any, Any)))), 0) asserte(len(list(triples((Any, Any, Any)))), 0)
class Store(object): def __init__(self, store='default'): self.graphs = ConjunctiveGraph(store) self._lock = threading.RLock() @locked def _print(self): print "Graph %r" % self.graphs @locked def get_graph_uris(self): return [ n.identifier for n in self.graphs.contexts() ] def __write_graph(self, graph): self._lock.acquire() while True: #new_uri = URIRef('http://otsopack/%s' % random.randint(0, 1000)) new_id = str( random.randint(0, 1000) ) if new_id in filter(lambda n: n.identifier!=new_id, self.graphs.contexts()): continue gr = self.graphs.get_context(new_id) #Graph(self.graphs.store, new_uri) gr += graph return new_id @locked def write(self, triples): if not isinstance(triples, Graph): raise Exception("'triples' must be a Graph.") new_uri = self.__write_graph(triples) self._lock.release() return new_uri @locked def read_uri(self, uri): ret = Graph(self.graphs.store, uri) return deepcopy(ret) if ret else None @locked def read_wildcard(self, subject, predicate, obj): gr = self._find_graph(subject, predicate, obj) return deepcopy(gr) @locked def read_sparql(self, query): gr = self._find_graph_sparql(query) return deepcopy(gr) @locked def take_uri(self, uri): ret = None try: #context = URIRef(uri) #ret = Graph(self.graphs.store, uri) to_delete = self.graphs.get_context(uri) ret = deepcopy(to_delete) self.graphs.remove_context(to_delete) except KeyError: return None return ret if len(ret)>0 else None @locked def take_wildcard(self, subject, predicate, obj): ret = None try: to_delete = self._find_graph(subject, predicate, obj) if to_delete is not None: ret = deepcopy(to_delete) self.graphs.remove_context(to_delete) except KeyError: return None return ret @locked def take_sparql(self, query): ret = None try: to_delete = self._find_graph_sparql(query) if to_delete is not None: ret = deepcopy(to_delete) self.graphs.remove_context(to_delete) except KeyError: return None return ret @locked def query_wildcard(self, subject, predicate, obj): ret = Graph() for t in self.graphs.triples((subject, predicate, obj)): ret.add(t) return ret if len(ret)>0 else None @locked def query_sparql(self, query): ret = Graph() for t in self.graphs.query( query ): ret.add(t) return ret if len(ret)>0 else None def _find_graph(self, subject, predicate, obj): for graph in self.graphs.contexts(): #(subject, predicate, obj)): for _ in graph.triples((subject, predicate, obj)): return graph # if it has at least a triple matching that triple, we return the graph return None def _find_graph_sparql(self, query): for graph in self.graphs.contexts(): for _ in graph.query( query ): return graph # if the graph match the a triple matching that triple, we return the graph return None
class TestSparql11(unittest.TestCase): def setUp(self): self.longMessage = True self.graph = ConjunctiveGraph('SPARQLUpdateStore') root = HOST + DB self.graph.open((root + "sparql", root + "update")) # clean out the store for c in self.graph.contexts(): c.remove((None, None, None)) assert len(c) == 0 def tearDown(self): self.graph.close() def testSimpleGraph(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g.add((bob, likes, pizza)) g.add((bob, likes, cheese)) g2 = self.graph.get_context(othergraphuri) g2.add((michel, likes, pizza)) self.assertEqual(3, len(g), 'graph contains 3 triples') self.assertEqual(1, len(g2), 'other graph contains 1 triple') r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEqual(2, len(list(r)), "two people like pizza") r = g.triples((None, likes, pizza)) self.assertEqual(2, len(list(r)), "two people like pizza") # Test initBindings r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEqual(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, pizza)) self.assertEqual(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, cheese)) self.assertEqual(0, len(list(r)), "tarek doesn't like cheese") g2.add((tarek, likes, pizza)) g.remove((tarek, likes, pizza)) r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEqual(1, len(list(r)), "only bob likes pizza") def testConjunctiveDefault(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g2 = self.graph.get_context(othergraphuri) g2.add((bob, likes, pizza)) g.add((tarek, hates, cheese)) self.assertEqual(2, len(g), 'graph contains 2 triples') # the following are actually bad tests as they depend on your endpoint, # as pointed out in the sparqlstore.py code: # # For ConjunctiveGraphs, reading is done from the "default graph" Exactly # what this means depends on your endpoint, because SPARQL does not offer a # simple way to query the union of all graphs as it would be expected for a # ConjuntiveGraph. ## # Fuseki/TDB has a flag for specifying that the default graph # is the union of all graphs (tdb:unionDefaultGraph in the Fuseki config). self.assertEqual( 3, len(self.graph), 'default union graph should contain three triples but contains:\n' '%s' % list(self.graph)) r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEqual(2, len(list(r)), "two people like pizza") r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEqual(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, pizza)) self.assertEqual(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, cheese)) self.assertEqual(0, len(list(r)), "tarek doesn't like cheese") g2.remove((bob, likes, pizza)) r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEqual(1, len(list(r)), "only tarek likes pizza") def testUpdate(self): self.graph.update( "INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> . } }" ) g = self.graph.get_context(graphuri) self.assertEqual(1, len(g), 'graph contains 1 triples') def testUpdateWithInitNs(self): self.graph.update( "INSERT DATA { GRAPH ns:graph { ns:michel ns:likes ns:pizza . } }", initNs={'ns': URIRef('urn:')}) g = self.graph.get_context(graphuri) self.assertEqual(set(g.triples((None, None, None))), set([(michel, likes, pizza)]), 'only michel likes pizza') def testUpdateWithInitBindings(self): self.graph.update( "INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WherE { }", initBindings={ 'a': URIRef('urn:michel'), 'b': URIRef('urn:likes'), 'c': URIRef('urn:pizza'), }) g = self.graph.get_context(graphuri) self.assertEqual(set(g.triples((None, None, None))), set([(michel, likes, pizza)]), 'only michel likes pizza') def testUpdateWithBlankNode(self): self.graph.update( "INSERT DATA { GRAPH <urn:graph> { _:blankA <urn:type> <urn:Blank> } }" ) g = self.graph.get_context(graphuri) for t in g.triples((None, None, None)): self.assertTrue(isinstance(t[0], BNode)) self.assertEqual(t[1].n3(), "<urn:type>") self.assertEqual(t[2].n3(), "<urn:Blank>") def testUpdateWithBlankNodeSerializeAndParse(self): self.graph.update( "INSERT DATA { GRAPH <urn:graph> { _:blankA <urn:type> <urn:Blank> } }" ) g = self.graph.get_context(graphuri) string = g.serialize(format='ntriples').decode('utf-8') raised = False try: Graph().parse(data=string, format="ntriples") except Exception as e: raised = True self.assertFalse(raised, 'Exception raised when parsing: ' + string) def testMultipleUpdateWithInitBindings(self): self.graph.update( "INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WHERE { };" "INSERT { GRAPH <urn:graph> { ?d ?b ?c . } } WHERE { }", initBindings={ 'a': URIRef('urn:michel'), 'b': URIRef('urn:likes'), 'c': URIRef('urn:pizza'), 'd': URIRef('urn:bob'), }) g = self.graph.get_context(graphuri) self.assertEqual(set(g.triples((None, None, None))), set([(michel, likes, pizza), (bob, likes, pizza)]), 'michel and bob like pizza') def testNamedGraphUpdate(self): g = self.graph.get_context(graphuri) r1 = "INSERT DATA { <urn:michel> <urn:likes> <urn:pizza> }" g.update(r1) self.assertEqual(set(g.triples((None, None, None))), set([(michel, likes, pizza)]), 'only michel likes pizza') r2 = "DELETE { <urn:michel> <urn:likes> <urn:pizza> } " + \ "INSERT { <urn:bob> <urn:likes> <urn:pizza> } WHERE {}" g.update(r2) self.assertEqual(set(g.triples((None, None, None))), set([(bob, likes, pizza)]), 'only bob likes pizza') says = URIRef("urn:says") # Strings with unbalanced curly braces tricky_strs = [ "With an unbalanced curly brace %s " % brace for brace in ["{", "}"] ] for tricky_str in tricky_strs: r3 = """INSERT { ?b <urn:says> "%s" } WHERE { ?b <urn:likes> <urn:pizza>} """ % tricky_str g.update(r3) values = set() for v in g.objects(bob, says): values.add(str(v)) self.assertEqual(values, set(tricky_strs)) # Complicated Strings r4strings = [] r4strings.append(r'''"1: adfk { ' \\\" \" { "''') r4strings.append(r'''"2: adfk } <foo> #éï \\"''') r4strings.append(r"""'3: adfk { " \\\' \' { '""") r4strings.append(r"""'4: adfk } <foo> #éï \\'""") r4strings.append(r'''"""5: adfk { ' \\\" \" { """''') r4strings.append(r'''"""6: adfk } <foo> #éï \\"""''') r4strings.append('"""7: ad adsfj \n { \n sadfj"""') r4strings.append(r"""'''8: adfk { " \\\' \' { '''""") r4strings.append(r"""'''9: adfk } <foo> #éï \\'''""") r4strings.append("'''10: ad adsfj \n { \n sadfj'''") r4 = "\n".join([ u'INSERT DATA { <urn:michel> <urn:says> %s } ;' % s for s in r4strings ]) g.update(r4) values = set() for v in g.objects(michel, says): values.add(text_type(v)) self.assertEqual( values, set([ re.sub( r"\\(.)", r"\1", re.sub(r"^'''|'''$|^'|'$|" + r'^"""|"""$|^"|"$', r"", s)) for s in r4strings ])) # IRI Containing ' or # # The fragment identifier must not be misinterpreted as a comment # (commenting out the end of the block). # The ' must not be interpreted as the start of a string, causing the } # in the literal to be identified as the end of the block. r5 = """INSERT DATA { <urn:michel> <urn:hates> <urn:foo'bar?baz;a=1&b=2#fragment>, "'}" }""" g.update(r5) values = set() for v in g.objects(michel, hates): values.add(text_type(v)) self.assertEqual(values, set([u"urn:foo'bar?baz;a=1&b=2#fragment", u"'}"])) # Comments r6 = u""" INSERT DATA { <urn:bob> <urn:hates> <urn:bob> . # No closing brace: } <urn:bob> <urn:hates> <urn:michel>. } #Final { } comment""" g.update(r6) values = set() for v in g.objects(bob, hates): values.add(v) self.assertEqual(values, set([bob, michel])) def testNamedGraphUpdateWithInitBindings(self): g = self.graph.get_context(graphuri) r = "INSERT { ?a ?b ?c } WHERE {}" g.update(r, initBindings={'a': michel, 'b': likes, 'c': pizza}) self.assertEqual(set(g.triples((None, None, None))), set([(michel, likes, pizza)]), 'only michel likes pizza') def testEmptyNamedGraph(self): empty_graph_iri = "urn:empty-graph-1" self.graph.update("CREATE GRAPH <%s>" % empty_graph_iri) named_graphs = [ text_type(r[0]) for r in self.graph.query("SELECT ?name WHERE { GRAPH ?name {} }") ] # Some SPARQL endpoint backends (like TDB) are not able to find empty named graphs # (at least with this query) if empty_graph_iri in named_graphs: self.assertTrue( empty_graph_iri in [text_type(g.identifier) for g in self.graph.contexts()]) def testEmptyLiteral(self): # test for https://github.com/RDFLib/rdflib/issues/457 # also see test_issue457.py which is sparql store independent! g = self.graph.get_context(graphuri) g.add((URIRef('http://example.com/s'), URIRef('http://example.com/p'), Literal(''))) o = tuple(g)[0][2] self.assertEqual(o, Literal(''), repr(o))
<http://helheim.deusto.es/ltw/0.1#identifier> ?id . } """ ) current_task.update_state(state='PROGRESS', meta={'progress_percent': 20, 'progress_msg': 'Fetching your data...'}) ont_id_list = [ str(a[0]).lower() + 's' for a in config_q_res ] ont_class_list = [ str(a[1]) for a in config_q_res ] try: Virtuoso = plugin("Virtuoso", Store) store = Virtuoso(celery.conf.VIRTUOSO_ODBC) ltw_conj_data_graph = ConjunctiveGraph(store=store) graph_id = str(uuid.uuid4()) ltw_data_graph = ltw_conj_data_graph.get_context(graph_id) except Exception, e: raise Exception("Unable to connect to LTW data source: %s" % str(e)) progress_per_part = 70 / len(ont_class_list) counter = Value('f', float(20)) max_processes = len(ont_class_list) if len(ont_class_list) <= celery.conf.MAX_MULTIPROCESSING else celery.conf.MAX_MULTIPROCESSING pool = Pool(max_processes, p_q_initializer, [counter]) if len(ont_id_list) > 1: fetch_msg = 'Fetching %s' % ', '.join(ont_id_list[:-1]) fetch_msg += ' and %s...' % ont_id_list[-1] else: fetch_msg = 'Fetching %s...' % ont_id_list[0]
current_task.update_state(state='PROGRESS', meta={ 'progress_percent': 20, 'progress_msg': 'Fetching your data...' }) ont_id_list = [str(a[0]).lower() + 's' for a in config_q_res] ont_class_list = [str(a[1]) for a in config_q_res] try: Virtuoso = plugin("Virtuoso", Store) store = Virtuoso(celery.conf.VIRTUOSO_ODBC) ltw_conj_data_graph = ConjunctiveGraph(store=store) graph_id = str(uuid.uuid4()) ltw_data_graph = ltw_conj_data_graph.get_context(graph_id) except Exception, e: raise Exception("Unable to connect to LTW data source: %s" % str(e)) progress_per_part = 70 / len(ont_class_list) counter = Value('f', float(20)) max_processes = len(ont_class_list) if len( ont_class_list ) <= celery.conf.MAX_MULTIPROCESSING else celery.conf.MAX_MULTIPROCESSING pool = Pool(max_processes, p_q_initializer, [counter]) if len(ont_id_list) > 1: fetch_msg = 'Fetching %s' % ', '.join(ont_id_list[:-1]) fetch_msg += ' and %s...' % ont_id_list[-1] else:
from unittest import TestCase from rdflib import ConjunctiveGraph, URIRef, RDF, BNode, Graph from oldman import ClientResourceManager, SPARQLDataStore from oldman.iri import UUIDFragmentIriGenerator from oldman.exception import OMRequiredHashlessIRIError from oldman.rest.crud import HashLessCRUDer EXAMPLE = "http://localhost/vocab#" HYDRA = "http://www.w3.org/ns/hydra/core#" default_graph = ConjunctiveGraph() schema_graph = default_graph.get_context(URIRef("http://localhost/schema")) data_graph = default_graph.get_context(URIRef("http://localhost/data")) # Declaration (no attribute) schema_graph.add( (URIRef(EXAMPLE + "MyClass"), RDF.type, URIRef(HYDRA + "Class"))) context = { "@context": { "ex": EXAMPLE, "id": "@id", "type": "@type", "MyClass": "ex:MyClass", } } data_store = SPARQLDataStore(data_graph, schema_graph=schema_graph) data_store.create_model("MyClass",
def update_test(t): # the update-eval tests refer to graphs on http://example.org rdflib_sparql_module.SPARQL_LOAD_GRAPHS = False uri, name, comment, data, graphdata, query, res, syntax = t if uri in skiptests: raise SkipTest() try: g = ConjunctiveGraph() if not res: if syntax: translateUpdate(parseUpdate(open(query[7:]))) else: try: translateUpdate(parseUpdate(open(query[7:]))) raise AssertionError("Query shouldn't have parsed!") except: pass # negative syntax test return resdata, resgraphdata = res # read input graphs if data: g.default_context.load(data, format=_fmt(data)) if graphdata: for x, l in graphdata: g.load(x, publicID=URIRef(l), format=_fmt(x)) req = translateUpdate(parseUpdate(open(query[7:]))) evalUpdate(g, req) # read expected results resg = ConjunctiveGraph() if resdata: resg.default_context.load(resdata, format=_fmt(resdata)) if resgraphdata: for x, l in resgraphdata: resg.load(x, publicID=URIRef(l), format=_fmt(x)) eq( set(x.identifier for x in g.contexts() if x != g.default_context), set(x.identifier for x in resg.contexts() if x != resg.default_context)) assert isomorphic(g.default_context, resg.default_context), \ 'Default graphs are not isomorphic' for x in g.contexts(): if x == g.default_context: continue assert isomorphic(x, resg.get_context(x.identifier)), \ "Graphs with ID %s are not isomorphic" % x.identifier except Exception, e: if isinstance(e, AssertionError): failed_tests.append(uri) fails[str(e)] += 1 else: error_tests.append(uri) errors[str(e)] += 1 if DEBUG_ERROR and not isinstance(e, AssertionError) or DEBUG_FAIL: print "======================================" print uri print name print comment if not res: if syntax: print "Positive syntax test" else: print "Negative syntax test" if data: print "----------------- DATA --------------------" print ">>>", data print open(data[7:]).read() if graphdata: print "----------------- GRAPHDATA --------------------" for x, l in graphdata: print ">>>", x, l print open(x[7:]).read() print "----------------- Request -------------------" print ">>>", query print open(query[7:]).read() if res: if resdata: print "----------------- RES DATA --------------------" print ">>>", resdata print open(resdata[7:]).read() if resgraphdata: print "----------------- RES GRAPHDATA -------------------" for x, l in resgraphdata: print ">>>", x, l print open(x[7:]).read() print "------------- MY RESULT ----------" print g.serialize(format='trig') try: pq = translateUpdate(parseUpdate(open(query[7:]).read())) print "----------------- Parsed ------------------" pprintAlgebra(pq) # print pq except: print "(parser error)" print decodeStringEscape(unicode(e)) import pdb pdb.post_mortem(sys.exc_info()[2]) raise
goodness = rel[3].split('=')[1] ## 73287 ## this will add a context and triple for any relation whose confidence factor is less than 1, OR ALL might_be relations. ## AND it will add to the default graph a goodness triple: <context, goodness, decimalLiteral> ## For the current data, NO relation other than might_be will have a goodness of < 1 ## So NO relation other than 'might_be' will have such a context and triples. if Decimal(goodness) < 1 or p == chartex['might_be']: rgraph = Graph(cg.store, gid[rel[0]+rel[2]+rel[1]]) rgraph.add((s,p,o)) cg.add(( rgraph.identifier, chartex['goodness'], Literal(goodness, datatype=XSD.decimal) )) ## I don't appear to gain anything by creating a separate graph to hold these statements, so we add them to the default graph. AG will figure out where to put them on upload??? ## add the triple also to the appropriate document graph ## might_be is the only relation that can lie between entities in different documents ## if necessary, we can draw from a list of such relations if we want to add others g = cg.get_context( gid[entity_dict[rel[0]]['File']] ) if p != chartex['might_be']: g.add((s,p,o)) #cProfile.run('cg.serialize(format="trig")') #print rdflib.__version__ #print len([c for c in cg.contexts()]) #print cg.serialize(format='nquads') # there may be a bug in the serialization of context-aware stores. Serializing a graph of 78k statements takes just under an hour # in my (unmerged) version of rdflib repo, this is fixed ######################## UPLOAD TO THE ADS AllegroGraph TRIPLE-STORE ########################### ## Upload by the following means takes around 3 hours
class ElasticTestCase(unittest.TestCase): identifier = URIRef("rdflib_test") dburi = Literal("http://localhost:9200/collection") def setUp(self): self.store = plugin.get("Elasticsearch", Store)(identifier=self.identifier, configuration=self.dburi) self.graph = ConjunctiveGraph(self.store, identifier=self.identifier) self.graph.open(self.dburi, create=True) def tearDown(self): self.graph.destroy(self.dburi) self.graph.close() def test_registerplugins(self): # I doubt this is quite right for a fresh pip installation, # this test is mainly here to fill a coverage gap. registerplugins() self.assertIsNotNone(plugin.get("Elasticsearch", Store)) p = plugin._plugins self.assertIn(("Elasticsearch", Store), p) del p[("Elasticsearch", Store)] plugin._plugins = p registerplugins() self.assertIn(("Elasticsearch", Store), p) def test_namespaces(self): self.assertNotEqual(list(self.graph.namespaces()), []) def test_contexts_without_triple(self): self.assertEqual(list(self.graph.contexts()), []) def test_contexts_result(self): g = self.graph.get_context(ctx_id) g.add((michel, likes, pizza)) actual = list(self.store.contexts()) self.assertEqual(actual[0], ctx_id) def test_contexts_with_triple(self): statemnt = (michel, likes, pizza) self.assertEqual(list(self.graph.contexts(triple=statemnt)), []) def test__len(self): self.assertEqual(self.store.__len__(), 0) def test_triples_choices(self): # Set this so we're not including selects for both asserted and literal tables for # a choice self.store.STRONGLY_TYPED_TERMS = True # Set the grouping of terms self.store.max_terms_per_where = 2 results = [((michel, likes, pizza), ctx_id)] # force execution of the generator for x in self.store.triples_choices( (None, likes, [michel, pizza, likes])): print("x=" + str(x)) print("results=" + str(results)) assert x in results
from rdflib.serializer import Serializer store='Sleepycat' graph = ConjunctiveGraph(store=store, identifier='mygraph') graph.open('foaf_flask/static/rdf/sleepycat', create = False) n = Namespace("http://127.0.0.1:5000/ldp/") graph.bind('foaf', FOAF) graph.bind('local', n) graph.add( (n.bob, RDF.type, FOAF.Person) ) graph.add( (n.bob, FOAF.age, Literal('42', datatype=XSD.integer)) ) g = graph.get_context(identifier=n.linda) g.add( (n.linda, RDF.type, FOAF.Person) ) g.add( (n.bob, FOAF.knows, n.donna) ) g.add( (n.donna, FOAF.name, Literal("Donna Fales")) ) g.add( (n.donna, FOAF.firstName,Literal("Donna")) ) g.add( (n.donna, FOAF.lastName, Literal("Fales")) ) g.add( (n.bob, FOAF.age, Literal('42', datatype=XSD.integer)) ) g.add( (n.bob, RDFS.label, Literal('Bob', lang='en') ) ) g.add( (n.bob, RDFS.label, Literal('Robert', lang='fr') ) ) g.add( (n.donna, FOAF.nick, Literal("Doudie", lang="en")) ) g.add( (n.donna, FOAF.nick, Literal("Dudu", lang="es")) ) g.add( (n.donna, FOAF.mbox, URIRef("mailto:[email protected]")) ) g.add( (n.bob, FOAF.name, Literal('Bob')) ) g.add( (n.linda, FOAF.name, Literal('Linda') ) ) g.add( (n.bob, FOAF.knows, n.linda) )
def update_test(t): # the update-eval tests refer to graphs on http://example.org rdflib_sparql_module.SPARQL_LOAD_GRAPHS = False uri, name, comment, data, graphdata, query, res, syntax = t if uri in skiptests: raise SkipTest() try: g = ConjunctiveGraph() if not res: if syntax: translateUpdate(parseUpdate(open(query[7:]))) else: try: translateUpdate(parseUpdate(open(query[7:]))) raise AssertionError("Query shouldn't have parsed!") except: pass # negative syntax test return resdata, resgraphdata = res # read input graphs if data: g.default_context.load(data, format=_fmt(data)) if graphdata: for x, l in graphdata: g.load(x, publicID=URIRef(l), format=_fmt(x)) req = translateUpdate(parseUpdate(open(query[7:]))) evalUpdate(g, req) # read expected results resg = ConjunctiveGraph() if resdata: resg.default_context.load(resdata, format=_fmt(resdata)) if resgraphdata: for x, l in resgraphdata: resg.load(x, publicID=URIRef(l), format=_fmt(x)) eq( set(x.identifier for x in g.contexts() if x != g.default_context), set(x.identifier for x in resg.contexts() if x != resg.default_context), ) assert isomorphic(g.default_context, resg.default_context), "Default graphs are not isomorphic" for x in g.contexts(): if x == g.default_context: continue assert isomorphic(x, resg.get_context(x.identifier)), "Graphs with ID %s are not isomorphic" % x.identifier except Exception, e: if isinstance(e, AssertionError): failed_tests.append(uri) fails[str(e)] += 1 else: error_tests.append(uri) errors[str(e)] += 1 if DEBUG_ERROR and not isinstance(e, AssertionError) or DEBUG_FAIL: print "======================================" print uri print name print comment if not res: if syntax: print "Positive syntax test" else: print "Negative syntax test" if data: print "----------------- DATA --------------------" print ">>>", data print open(data[7:]).read() if graphdata: print "----------------- GRAPHDATA --------------------" for x, l in graphdata: print ">>>", x, l print open(x[7:]).read() print "----------------- Request -------------------" print ">>>", query print open(query[7:]).read() if res: if resdata: print "----------------- RES DATA --------------------" print ">>>", resdata print open(resdata[7:]).read() if resgraphdata: print "----------------- RES GRAPHDATA -------------------" for x, l in resgraphdata: print ">>>", x, l print open(x[7:]).read() print "------------- MY RESULT ----------" print g.serialize(format="trig") try: pq = translateUpdate(parseUpdate(open(query[7:]).read())) print "----------------- Parsed ------------------" pprintAlgebra(pq) # print pq except: print "(parser error)" print decodeStringEscape(unicode(e)) import pdb pdb.post_mortem(sys.exc_info()[2]) raise
class TestSparql11(unittest.TestCase): def setUp(self): self.longMessage = True self.graph = ConjunctiveGraph('SPARQLUpdateStore') root = "http://localhost:3030/ukpp/" self.graph.open((root + "sparql", root + "update")) # clean out the store for c in self.graph.contexts(): c.remove((None, None, None)) assert len(c) == 0 def tearDown(self): self.graph.close() def testSimpleGraph(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g.add((bob, likes, pizza)) g.add((bob, likes, cheese)) g2 = self.graph.get_context(othergraphuri) g2.add((michel, likes, pizza)) self.assertEquals(3, len(g), 'graph contains 3 triples') self.assertEquals(1, len(g2), 'other graph contains 1 triple') r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(2, len(list(r)), "two people like pizza") r = g.triples((None, likes, pizza)) self.assertEquals(2, len(list(r)), "two people like pizza") # Test initBindings r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, pizza)) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, cheese)) self.assertEquals(0, len(list(r)), "tarek doesn't like cheese") g2.add((tarek, likes, pizza)) g.remove((tarek, likes, pizza)) r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(1, len(list(r)), "only bob likes pizza") def testConjunctiveDefault(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g2 = self.graph.get_context(othergraphuri) g2.add((bob, likes, pizza)) g.add((tarek, hates, cheese)) self.assertEquals(2, len(g), 'graph contains 2 triples') self.assertEquals(3, len(self.graph), 'default union graph contains three triples') r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(2, len(list(r)), "two people like pizza") r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, pizza)) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, cheese)) self.assertEquals(0, len(list(r)), "tarek doesn't like cheese") g2.remove((bob, likes, pizza)) r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(1, len(list(r)), "only tarek likes pizza") def testUpdate(self): self.graph.update( "INSERT DATA { GRAPH <urn:graph> { <urn:michel> <urn:likes> <urn:pizza> . } }" ) g = self.graph.get_context(graphuri) self.assertEquals(1, len(g), 'graph contains 1 triples') def testUpdateWithInitNs(self): self.graph.update( "INSERT DATA { GRAPH ns:graph { ns:michel ns:likes ns:pizza . } }", initNs={'ns': URIRef('urn:')}) g = self.graph.get_context(graphuri) self.assertEquals(set(g.triples((None, None, None))), set([(michel, likes, pizza)]), 'only michel likes pizza') def testUpdateWithInitBindings(self): self.graph.update( "INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WherE { }", initBindings={ 'a': URIRef('urn:michel'), 'b': URIRef('urn:likes'), 'c': URIRef('urn:pizza'), }) g = self.graph.get_context(graphuri) self.assertEquals(set(g.triples((None, None, None))), set([(michel, likes, pizza)]), 'only michel likes pizza') def testMultipleUpdateWithInitBindings(self): self.graph.update( "INSERT { GRAPH <urn:graph> { ?a ?b ?c . } } WHERE { };" "INSERT { GRAPH <urn:graph> { ?d ?b ?c . } } WHERE { }", initBindings={ 'a': URIRef('urn:michel'), 'b': URIRef('urn:likes'), 'c': URIRef('urn:pizza'), 'd': URIRef('urn:bob'), }) g = self.graph.get_context(graphuri) self.assertEquals(set(g.triples((None, None, None))), set([(michel, likes, pizza), (bob, likes, pizza)]), 'michel and bob like pizza') def testNamedGraphUpdate(self): g = self.graph.get_context(graphuri) r1 = "INSERT DATA { <urn:michel> <urn:likes> <urn:pizza> }" g.update(r1) self.assertEquals(set(g.triples((None, None, None))), set([(michel, likes, pizza)]), 'only michel likes pizza') r2 = "DELETE { <urn:michel> <urn:likes> <urn:pizza> } " + \ "INSERT { <urn:bob> <urn:likes> <urn:pizza> } WHERE {}" g.update(r2) self.assertEquals(set(g.triples((None, None, None))), set([(bob, likes, pizza)]), 'only bob likes pizza') says = URIRef("urn:says") # Strings with unbalanced curly braces tricky_strs = [ "With an unbalanced curly brace %s " % brace for brace in ["{", "}"] ] for tricky_str in tricky_strs: r3 = """INSERT { ?b <urn:says> "%s" } WHERE { ?b <urn:likes> <urn:pizza>} """ % tricky_str g.update(r3) values = set() for v in g.objects(bob, says): values.add(str(v)) self.assertEquals(values, set(tricky_strs)) # Complicated Strings r4strings = [] r4strings.append(ur'''"1: adfk { ' \\\" \" { "''') r4strings.append(ur'''"2: adfk } <foo> #éï \\"''') r4strings.append(ur"""'3: adfk { " \\\' \' { '""") r4strings.append(ur"""'4: adfk } <foo> #éï \\'""") r4strings.append(ur'''"""5: adfk { ' \\\" \" { """''') r4strings.append(ur'''"""6: adfk } <foo> #éï \\"""''') r4strings.append(u'"""7: ad adsfj \n { \n sadfj"""') r4strings.append(ur"""'''8: adfk { " \\\' \' { '''""") r4strings.append(ur"""'''9: adfk } <foo> #éï \\'''""") r4strings.append(u"'''10: ad adsfj \n { \n sadfj'''") r4 = "\n".join([ u'INSERT DATA { <urn:michel> <urn:says> %s } ;' % s for s in r4strings ]) g.update(r4) values = set() for v in g.objects(michel, says): values.add(unicode(v)) self.assertEquals( values, set([ re.sub( ur"\\(.)", ur"\1", re.sub(ur"^'''|'''$|^'|'$|" + ur'^"""|"""$|^"|"$', ur"", s)) for s in r4strings ]))
class ContextTestCase(unittest.TestCase): store = 'default' slow = True tmppath = None def setUp(self): try: self.graph = ConjunctiveGraph(store=self.store) except ImportError: raise SkipTest( "Dependencies for store '%s' not available!" % self.store) if self.store == "SQLite": _, self.tmppath = mkstemp( prefix='test', dir='/tmp', suffix='.sqlite') else: self.tmppath = mkdtemp() self.graph.open(self.tmppath, create=True) self.michel = URIRef(u'michel') self.tarek = URIRef(u'tarek') self.bob = URIRef(u'bob') self.likes = URIRef(u'likes') self.hates = URIRef(u'hates') self.pizza = URIRef(u'pizza') self.cheese = URIRef(u'cheese') self.c1 = URIRef(u'context-1') self.c2 = URIRef(u'context-2') # delete the graph for each test! self.graph.remove((None, None, None)) def tearDown(self): self.graph.close() if os.path.isdir(self.tmppath): shutil.rmtree(self.tmppath) else: os.remove(self.tmppath) def addStuff(self): tarek = self.tarek michel = self.michel bob = self.bob likes = self.likes hates = self.hates pizza = self.pizza cheese = self.cheese c1 = self.c1 graph = Graph(self.graph.store, c1) graph.add((tarek, likes, pizza)) graph.add((tarek, likes, cheese)) graph.add((michel, likes, pizza)) graph.add((michel, likes, cheese)) graph.add((bob, likes, cheese)) graph.add((bob, hates, pizza)) graph.add((bob, hates, michel)) # gasp! def removeStuff(self): tarek = self.tarek michel = self.michel bob = self.bob likes = self.likes hates = self.hates pizza = self.pizza cheese = self.cheese c1 = self.c1 graph = Graph(self.graph.store, c1) graph.remove((tarek, likes, pizza)) graph.remove((tarek, likes, cheese)) graph.remove((michel, likes, pizza)) graph.remove((michel, likes, cheese)) graph.remove((bob, likes, cheese)) graph.remove((bob, hates, pizza)) graph.remove((bob, hates, michel)) # gasp! def addStuffInMultipleContexts(self): c1 = self.c1 c2 = self.c2 triple = (self.pizza, self.hates, self.tarek) # revenge! # add to default context self.graph.add(triple) # add to context 1 graph = Graph(self.graph.store, c1) graph.add(triple) # add to context 2 graph = Graph(self.graph.store, c2) graph.add(triple) def testConjunction(self): if self.store == "SQLite": raise SkipTest("Skipping known issue with __len__") self.addStuffInMultipleContexts() triple = (self.pizza, self.likes, self.pizza) # add to context 1 graph = Graph(self.graph.store, self.c1) graph.add(triple) self.assertEqual(len(self.graph), len(graph)) def testAdd(self): self.addStuff() def testRemove(self): self.addStuff() self.removeStuff() def testLenInOneContext(self): c1 = self.c1 # make sure context is empty self.graph.remove_context(self.graph.get_context(c1)) graph = Graph(self.graph.store, c1) oldLen = len(self.graph) for i in range(0, 10): graph.add((BNode(), self.hates, self.hates)) self.assertEqual(len(graph), oldLen + 10) self.assertEqual(len(self.graph.get_context(c1)), oldLen + 10) self.graph.remove_context(self.graph.get_context(c1)) self.assertEqual(len(self.graph), oldLen) self.assertEqual(len(graph), 0) def testLenInMultipleContexts(self): if self.store == "SQLite": raise SkipTest("Skipping known issue with __len__") oldLen = len(self.graph) self.addStuffInMultipleContexts() # addStuffInMultipleContexts is adding the same triple to # three different contexts. So it's only + 1 self.assertEqual(len(self.graph), oldLen + 1) graph = Graph(self.graph.store, self.c1) self.assertEqual(len(graph), oldLen + 1) def testRemoveInMultipleContexts(self): c1 = self.c1 c2 = self.c2 triple = (self.pizza, self.hates, self.tarek) # revenge! self.addStuffInMultipleContexts() # triple should be still in store after removing it from c1 + c2 self.assertTrue(triple in self.graph) graph = Graph(self.graph.store, c1) graph.remove(triple) self.assertTrue(triple in self.graph) graph = Graph(self.graph.store, c2) graph.remove(triple) self.assertTrue(triple in self.graph) self.graph.remove(triple) # now gone! self.assertTrue(triple not in self.graph) # add again and see if remove without context removes all triples! self.addStuffInMultipleContexts() self.graph.remove(triple) self.assertTrue(triple not in self.graph) def testContexts(self): triple = (self.pizza, self.hates, self.tarek) # revenge! self.addStuffInMultipleContexts() def cid(c): return c.identifier self.assertTrue(self.c1 in map(cid, self.graph.contexts())) self.assertTrue(self.c2 in map(cid, self.graph.contexts())) contextList = list(map(cid, list(self.graph.contexts(triple)))) self.assertTrue(self.c1 in contextList, (self.c1, contextList)) self.assertTrue(self.c2 in contextList, (self.c2, contextList)) def testRemoveContext(self): c1 = self.c1 self.addStuffInMultipleContexts() self.assertEqual(len(Graph(self.graph.store, c1)), 1) self.assertEqual(len(self.graph.get_context(c1)), 1) self.graph.remove_context(self.graph.get_context(c1)) self.assertTrue(self.c1 not in self.graph.contexts()) def testRemoveAny(self): Any = None self.addStuffInMultipleContexts() self.graph.remove((Any, Any, Any)) self.assertEqual(len(self.graph), 0) def testTriples(self): tarek = self.tarek michel = self.michel bob = self.bob likes = self.likes hates = self.hates pizza = self.pizza cheese = self.cheese c1 = self.c1 asserte = self.assertEqual triples = self.graph.triples graph = self.graph c1graph = Graph(self.graph.store, c1) c1triples = c1graph.triples Any = None self.addStuff() # unbound subjects with context asserte(len(list(c1triples((Any, likes, pizza)))), 2) asserte(len(list(c1triples((Any, hates, pizza)))), 1) asserte(len(list(c1triples((Any, likes, cheese)))), 3) asserte(len(list(c1triples((Any, hates, cheese)))), 0) # unbound subjects without context, same results! asserte(len(list(triples((Any, likes, pizza)))), 2) asserte(len(list(triples((Any, hates, pizza)))), 1) asserte(len(list(triples((Any, likes, cheese)))), 3) asserte(len(list(triples((Any, hates, cheese)))), 0) # unbound objects with context asserte(len(list(c1triples((michel, likes, Any)))), 2) asserte(len(list(c1triples((tarek, likes, Any)))), 2) asserte(len(list(c1triples((bob, hates, Any)))), 2) asserte(len(list(c1triples((bob, likes, Any)))), 1) # unbound objects without context, same results! asserte(len(list(triples((michel, likes, Any)))), 2) asserte(len(list(triples((tarek, likes, Any)))), 2) asserte(len(list(triples((bob, hates, Any)))), 2) asserte(len(list(triples((bob, likes, Any)))), 1) # unbound predicates with context asserte(len(list(c1triples((michel, Any, cheese)))), 1) asserte(len(list(c1triples((tarek, Any, cheese)))), 1) asserte(len(list(c1triples((bob, Any, pizza)))), 1) asserte(len(list(c1triples((bob, Any, michel)))), 1) # unbound predicates without context, same results! asserte(len(list(triples((michel, Any, cheese)))), 1) asserte(len(list(triples((tarek, Any, cheese)))), 1) asserte(len(list(triples((bob, Any, pizza)))), 1) asserte(len(list(triples((bob, Any, michel)))), 1) # unbound subject, objects with context asserte(len(list(c1triples((Any, hates, Any)))), 2) asserte(len(list(c1triples((Any, likes, Any)))), 5) # unbound subject, objects without context, same results! asserte(len(list(triples((Any, hates, Any)))), 2) asserte(len(list(triples((Any, likes, Any)))), 5) # unbound predicates, objects with context asserte(len(list(c1triples((michel, Any, Any)))), 2) asserte(len(list(c1triples((bob, Any, Any)))), 3) asserte(len(list(c1triples((tarek, Any, Any)))), 2) # unbound predicates, objects without context, same results! asserte(len(list(triples((michel, Any, Any)))), 2) asserte(len(list(triples((bob, Any, Any)))), 3) asserte(len(list(triples((tarek, Any, Any)))), 2) # unbound subjects, predicates with context asserte(len(list(c1triples((Any, Any, pizza)))), 3) asserte(len(list(c1triples((Any, Any, cheese)))), 3) asserte(len(list(c1triples((Any, Any, michel)))), 1) # unbound subjects, predicates without context, same results! asserte(len(list(triples((Any, Any, pizza)))), 3) asserte(len(list(triples((Any, Any, cheese)))), 3) asserte(len(list(triples((Any, Any, michel)))), 1) # all unbound with context asserte(len(list(c1triples((Any, Any, Any)))), 7) # all unbound without context, same result! asserte(len(list(triples((Any, Any, Any)))), 7) for c in [graph, self.graph.get_context(c1)]: # unbound subjects asserte(set(c.subjects(likes, pizza)), set((michel, tarek))) asserte(set(c.subjects(hates, pizza)), set((bob,))) asserte(set(c.subjects(likes, cheese)), set([tarek, bob, michel])) asserte(set(c.subjects(hates, cheese)), set()) # unbound objects asserte(set(c.objects(michel, likes)), set([cheese, pizza])) asserte(set(c.objects(tarek, likes)), set([cheese, pizza])) asserte(set(c.objects(bob, hates)), set([michel, pizza])) asserte(set(c.objects(bob, likes)), set([cheese])) # unbound predicates asserte(set(c.predicates(michel, cheese)), set([likes])) asserte(set(c.predicates(tarek, cheese)), set([likes])) asserte(set(c.predicates(bob, pizza)), set([hates])) asserte(set(c.predicates(bob, michel)), set([hates])) asserte(set( c.subject_objects(hates)), set([(bob, pizza), (bob, michel)])) asserte( set(c.subject_objects(likes)), set( [(tarek, cheese), (michel, cheese), (michel, pizza), (bob, cheese), (tarek, pizza)])) asserte(set(c.predicate_objects( michel)), set([(likes, cheese), (likes, pizza)])) asserte(set(c.predicate_objects(bob)), set([(likes, cheese), (hates, pizza), (hates, michel)])) asserte(set(c.predicate_objects( tarek)), set([(likes, cheese), (likes, pizza)])) asserte(set(c.subject_predicates( pizza)), set([(bob, hates), (tarek, likes), (michel, likes)])) asserte(set(c.subject_predicates(cheese)), set([( bob, likes), (tarek, likes), (michel, likes)])) asserte(set(c.subject_predicates(michel)), set([(bob, hates)])) asserte(set(c), set( [(bob, hates, michel), (bob, likes, cheese), (tarek, likes, pizza), (michel, likes, pizza), (michel, likes, cheese), (bob, hates, pizza), (tarek, likes, cheese)])) # remove stuff and make sure the graph is empty again self.removeStuff() asserte(len(list(c1triples((Any, Any, Any)))), 0) asserte(len(list(triples((Any, Any, Any)))), 0)
elif x.get("type") == "person": g.add(( me, DCTERMS.contributor, URIRef(MT + x.get("data")), )) elif register.tag == "{http://www.iana.org/assignments}people": for person in register.getchildren(): me = URIRef(MT + person.get("id")) g.add((me, RDF.type, FOAF.Agent)) g.add((me, FOAF.name, Literal(person.name))) if hasattr(person, "uri"): if str(person.uri).startswith("mailto:") or "@" in str( person.uri): g.get_context(URIRef(MT + "person/")).add(( me, FOAF.mbox, URIRef( str(person.uri).replace("&", "@").replace(" at ", "@")), )) elif str(person.uri).startswith("http"): g.get_context(URIRef(MT + "person/")).add( (me, FOAF.homepage, URIRef(str(person.uri)))) else: # junk value pass with open("mediatypes.ttl", "w") as f: f.write(g.serialize(format="turtle").decode("utf-8"))
class TestSparql11(unittest.TestCase): def setUp(self): self.longMessage = True self.graph = ConjunctiveGraph('SPARQLUpdateStore') root = "http://localhost:3030/ukpp/" self.graph.open((root + "sparql", root + "update")) # clean out the store for c in self.graph.contexts(): c.remove((None, None, None)) def tearDown(self): self.graph.close() def testSimpleGraph(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g.add((bob, likes, pizza)) g.add((bob, likes, cheese)) g2 = self.graph.get_context(othergraphuri) g2.add((michel, likes, pizza)) self.assertEquals(3, len(g), 'graph contains 3 triples') self.assertEquals(1, len(g2), 'other graph contains 1 triple') r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(2, len(list(r)), "two people like pizza") r = g.triples((None, likes, pizza)) self.assertEquals(2, len(list(r)), "two people like pizza") # Test initBindings r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, pizza)) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = g.triples((tarek, likes, cheese)) self.assertEquals(0, len(list(r)), "tarek doesn't like cheese") g2.add((tarek, likes, pizza)) g.remove((tarek, likes, pizza)) r = g.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(1, len(list(r)), "only bob likes pizza") def testConjunctiveDefault(self): g = self.graph.get_context(graphuri) g.add((tarek, likes, pizza)) g2 = self.graph.get_context(othergraphuri) g2.add((bob, likes, pizza)) g.add((tarek, hates, cheese)) self.assertEquals(2, len(g), 'graph contains 2 triples') self.assertEquals(3, len(self.graph), 'default union graph contains three triples') r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(2, len(list(r)), "two people like pizza") r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }", initBindings={'s': tarek}) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, pizza)) self.assertEquals(1, len(list(r)), "i was asking only about tarek") r = self.graph.triples((tarek, likes, cheese)) self.assertEquals(0, len(list(r)), "tarek doesn't like cheese") g2.remove((bob, likes, pizza)) r = self.graph.query("SELECT * WHERE { ?s <urn:likes> <urn:pizza> . }") self.assertEquals(1, len(list(r)), "only tarek likes pizza")
class AGP(set): def __init__(self, s=(), prefixes=None): super(AGP, self).__init__(s) self.__prefixes = prefixes or {} self.__graph = ConjunctiveGraph() @property def prefixes(self): # type: () -> dict return self.__prefixes @property def wire(self): # type: () -> nx.DiGraph """ Creates a graph from the graph pattern :return: The graph (networkx) """ g = nx.DiGraph() for s, p, o in self: edge_data = {'link': p} g.add_node(s) if isinstance(o, Variable): g.add_node(o) else: g.add_node(o, filter=o) edge_data['to'] = o g.add_edge(s, o, **edge_data) return g @property def roots(self): # type: () -> iter def filter_root(x): r = x[1] == min_in if min_in > 0: r = r and x[0] in cycle_elms return r w = self.wire cycle_elms = list(nx.simple_cycles(w)) if cycle_elms: cycle_elms = set.union(*map(lambda x: set(x), list(nx.simple_cycles(w)))) in_deg = list(w.in_degree()) min_in = min(map(lambda x: x[1], in_deg)) if in_deg else 0 roots = map(lambda x: x[0], filter(filter_root, list(w.in_degree()))) return roots def __nodify(self, elm, variables): if isinstance(elm, Variable): if elm not in variables: elm_node = BNode('?' + str(elm)) variables[elm] = elm_node return variables[elm] else: return elm def __iter__(self): for x in super(AGP, self).__iter__(): yield x if isinstance(x, TP) else TP.from_string(x, prefixes=self.__prefixes) @property def graph(self): # type: () -> ConjunctiveGraph if not self.__graph: for prefix in self.__prefixes: self.__graph.bind(prefix, self.__prefixes[prefix]) variables = {} nxg = nx.Graph() for (s, p, o) in self: nxg.add_nodes_from([s, o]) nxg.add_edge(s, o) contexts = dict([(str(index), c) for (index, c) in enumerate(nx.connected_components(nxg))]) for (s, p, o) in self: s_node = self.__nodify(s, variables) o_node = self.__nodify(o, variables) p_node = self.__nodify(p, variables) context = None for uid in contexts: if s in contexts[uid]: context = str(uid) if context is not None: self.__graph.get_context(context).add((s_node, p_node, o_node)) return self.__graph @staticmethod def from_string(st, prefixes): # type: (str, dict) -> AGP gp = None if st.startswith('{') and st.endswith('}'): st = st.replace('{', '').replace('}', '').strip() tps = re.split('\. ', st) tps = map(lambda x: x.strip().strip('.'), filter(lambda y: y != '', tps)) gp = AGP(prefixes=prefixes) for tp in tps: gp.add(TP.from_string(tp, gp.prefixes)) return gp def get_tp_context(self, (s, p, o)): # type: (tuple) -> str return str(list(self.graph.contexts((s, p, o))).pop().identifier)