def test_result_fragments(self): rdfXml = serialize(self.sourceGraph, self.serializer) assert b('<Test rdf:about="http://example.org/data/a">') in rdfXml assert b('<rdf:Description rdf:about="http://example.org/data/b">') in rdfXml assert b('<name xml:lang="en">Bee</name>') in rdfXml assert b('<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>') in rdfXml assert b('<BNode rdf:nodeID="') in rdfXml, "expected one identified bnode in serialized graph"
def to_key(triple, context): "Takes a string; returns key" return b("^").join( (context, triple[i % 3], triple[(i + 1) % 3], triple[(i + 2) % 3], b(""))) # "" to tac on the trailing ^
def testFinalNewline(): """ http://code.google.com/p/rdflib/issues/detail?id=5 """ import sys import platform if getattr(sys, 'pypy_version_info', None) or platform.system() == 'Java': from nose import SkipTest raise SkipTest( 'Testing under pypy and Jython2.5 fails to detect that ' + \ 'IOMemory is a context_aware store') graph=Graph() graph.add((URIRef("http://ex.org/a"), URIRef("http://ex.org/b"), URIRef("http://ex.org/c"))) failed = set() for p in rdflib.plugin.plugins(None, rdflib.plugin.Serializer): if p.name not in ( 'nquads', 'trix' ): v = graph.serialize(format=p.name) lines = v.split(b("\n")) if b("\n") not in v or (lines[-1]!=b('')): failed.add(p.name) assert len(failed)==0, "No final newline for formats: '%s'" % failed
def test_validating_unquote_raises(self): ntriples.validate = True uniquot = b("""<http://www.w3.org/People/Berners-Lee/card#cm> <http://xmlns.com/foaf/0.1/name> "R\\u00E4ksm\\u00F6rg\\u00E5s" <http://www.w3.org/People/Berners-Lee/card> .""") self.assertRaises(ntriples.ParseError, ntriples.unquote, uniquot) uniquot = b("""<http://www.w3.org/People/Berners-Lee/card#cm> <http://xmlns.com/foaf/0.1/name> "R\\\\u00E4ksm\\u00F6rg\\u00E5s" <http://www.w3.org/People/Berners-Lee/card> .""") self.assertRaises(ntriples.ParseError, ntriples.unquote, uniquot) # revert to default ntriples.validate = False
def test_result_fragments_with_base(self): rdfXml = serialize(self.sourceGraph, self.serializer, extra_args={'base':"http://example.org/", 'xml_base':"http://example.org/"}) assert b('xml:base="http://example.org/"') in rdfXml assert b('<Test rdf:about="data/a">') in rdfXml assert b('<rdf:Description rdf:about="data/b">') in rdfXml assert b('<value rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">3</value>') in rdfXml assert b('<BNode rdf:nodeID="') in rdfXml, "expected one identified bnode in serialized graph"
def remove(self, triple, context): (subject, predicate, object) = triple assert self.__open, "The Store must be open." Store.remove(self, (subject, predicate, object), context) _to_string = self._to_string if context is not None: if context == self: context = None if subject is not None \ and predicate is not None \ and object is not None \ and context is not None: s = _to_string(subject) p = _to_string(predicate) o = _to_string(object) c = _to_string(context) value = self.__indices[0].get(bb("%s^%s^%s^%s^" % (c, s, p, o))) if value is not None: self.__remove((bb(s), bb(p), bb(o)), bb(c)) self.__needs_sync = True else: cspo, cpos, cosp = self.__indices index, prefix, from_key, results_from_key = self.__lookup( (subject, predicate, object), context) needs_sync = False for key in index.match_prefix(prefix): c, s, p, o = from_key(key) if context is None: contexts_value = index.get(key) or b("") # remove triple from all non quoted contexts contexts = set(contexts_value.split(b("^"))) contexts.add(b("")) # and from the conjunctive index for c in contexts: for i, _to_key, _ in self.__indices_info: i.remove(_to_key((s, p, o), c)) else: self.__remove((s, p, o), c) needs_sync = True if context is not None: if subject is None and predicate is None and object is None: # TODO: also if context becomes empty and not just on # remove((None, None, None), c) try: self.__contexts.remove(bb(_to_string(context))) # except db.DBNotFoundError, e: # pass except Exception as e: # pragma: NO COVER print("%s, Failed to delete %s" % ( e, context)) # pragma: NO COVER pass # pragma: NO COVER self.__needs_sync = needs_sync
def test_validating_unquote_raises(self): ntriples.validate = True uniquot = b( """<http://www.w3.org/People/Berners-Lee/card#cm> <http://xmlns.com/foaf/0.1/name> "R\\u00E4ksm\\u00F6rg\\u00E5s" <http://www.w3.org/People/Berners-Lee/card> .""" ) self.assertRaises(ntriples.ParseError, ntriples.unquote, uniquot) uniquot = b( """<http://www.w3.org/People/Berners-Lee/card#cm> <http://xmlns.com/foaf/0.1/name> "R\\\\u00E4ksm\\u00F6rg\\u00E5s" <http://www.w3.org/People/Berners-Lee/card> .""" ) self.assertRaises(ntriples.ParseError, ntriples.unquote, uniquot) # revert to default ntriples.validate = False
def __len__(self, context=None): assert self.__open, "The Store must be open." if context is not None: if context == self: context = None if context is None: prefix = b("^") else: prefix = bb("%s^" % self._to_string(context)) index = self.__indicies[0] cursor = index.cursor() current = cursor.set_range(prefix) count = 0 while current: key, value = current if key.startswith(prefix): count += 1 # Hack to stop 2to3 converting this to next(cursor) current = getattr(cursor, 'next')() else: break cursor.close() return count
def contexts(self, triple=None): _from_string = self._from_string _to_string = self._to_string if triple: s, p, o = triple s = _to_string(s) p = _to_string(p) o = _to_string(o) contexts = self.__indicies[0].get( bb("%s^%s^%s^%s^" % ("", s, p, o))) if contexts: for c in contexts.split(b("^")): if c: yield _from_string(c) else: index = self.__contexts cursor = index.cursor() current = cursor.first() cursor.close() while current: key, value = current context = _from_string(key) yield context cursor = index.cursor() try: cursor.set_range(key) # Hack to stop 2to3 converting this to next(cursor) current = getattr(cursor, 'next')() except db.DBNotFoundError: current = None cursor.close()
def contexts(self, triple=None): _from_string = self._from_string _to_string = self._to_string if triple: s, p, o = triple s = _to_string(s) p = _to_string(p) o = _to_string(o) contexts = self.__indicies[0].get(bb( "%s^%s^%s^%s^" % ("", s, p, o))) if contexts: for c in contexts.split(b("^")): if c: yield _from_string(c) else: index = self.__contexts cursor = index.cursor() current = cursor.first() cursor.close() while current: key, value = current context = _from_string(key) yield context cursor = index.cursor() try: cursor.set_range(key) # Hack to stop 2to3 converting this to next(cursor) current = getattr(cursor, 'next')() except db.DBNotFoundError: current = None cursor.close()
def test_nonvalidating_unquote(self): safe = b( """<http://example.org/alice/foaf.rdf#me> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://xmlns.com/foaf/0.1/Person> <http://example.org/alice/foaf1.rdf> .""" ) ntriples.validate = False res = ntriples.unquote(safe) self.assert_(isinstance(res, unicode))
def uriref(self): if self.peek(b('<')): uri = self.eat(r_uriref).group(1) uri = unquote(uri) uri = uriquote(uri) return URI(uri) return False
def unquote(s): """Unquote an N-Triples string.""" if not validate: return s.decode('unicode-escape') else: result = [] while s: m = r_safe.match(s) if m: s = s[m.end():] result.append(m.group(1).decode('ascii')) continue m = r_quot.match(s) if m: s = s[2:] result.append(quot[m.group(1)]) continue m = r_uniquot.match(s) if m: s = s[m.end():] u, U = m.groups() codepoint = int(u or U, 16) if codepoint > 0x10FFFF: raise ParseError("Disallowed codepoint: %08X" % codepoint) result.append(unichr(codepoint)) elif s.startswith(b('\\')): raise ParseError("Illegal escape at: %s..." % s[:10]) else: raise ParseError("Illegal literal character: %r" % s[0]) return u''.join(result)
def test_issue_130(): g = rdflib.Graph() try: g.parse(location="http://linked-data.ru/example") except: raise SkipTest('Test data URL unparseable') assert b('rdf:about="http://semanticfuture.net/linked-data/example/#company"') in g.serialize(), g.serialize()
def testIssue78(self): g = Graph() g.add((URIRef("foo"), URIRef("foo"), Literal(u"R\u00E4ksm\u00F6rg\u00E5s"))) s = g.serialize(format='nt') self.assertEquals(type(s), bytestype) self.assertTrue(b(r"R\u00E4ksm\u00F6rg\u00E5s") in s)
def test_turtle_namespace_prefixes(self): g = ConjunctiveGraph() n3 = \ """ @prefix _9: <http://data.linkedmdb.org/resource/movie/> . @prefix p_9: <urn:test:> . @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . p_9:a p_9:b p_9:c . <http://data.linkedmdb.org/resource/director/1> a <http://data.linkedmdb.org/resource/movie/director>; rdfs:label "Cecil B. DeMille (Director)"; _9:director_name "Cecil B. DeMille" .""" g.parse(data=n3, format='n3') turtle = g.serialize(format="turtle") # Check round-tripping, just for kicks. g = ConjunctiveGraph() g.parse(data=turtle, format='turtle') # Shouldn't have got to here s=g.serialize(format="turtle") self.assert_(b('@prefix _9') not in s)
def test_validating_unquote(self): quot = b("""<http://example.org/alice/foaf.rdf#me> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://xmlns.com/foaf/0.1/Person> <http://example.org/alice/foaf1.rdf> .""") ntriples.validate = True res = ntriples.unquote(quot) # revert to default ntriples.validate = False log.debug("restype %s" % type(res))
def test_turtle_namespace_prefixes(self): g = ConjunctiveGraph() n3 = \ """ @prefix _9: <http://data.linkedmdb.org/resource/movie/> . @prefix p_9: <urn:test:> . @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . p_9:a p_9:b p_9:c . <http://data.linkedmdb.org/resource/director/1> a <http://data.linkedmdb.org/resource/movie/director>; rdfs:label "Cecil B. DeMille (Director)"; _9:director_name "Cecil B. DeMille" .""" g.parse(data=n3, format='n3') turtle = g.serialize(format="turtle") # Check round-tripping, just for kicks. g = ConjunctiveGraph() g.parse(data=turtle, format='turtle') # Shouldn't have got to here s = g.serialize(format="turtle") self.assertTrue(b('@prefix _9') not in s)
def from_key(key): "Takes a key; returns string" parts = key.split(b("^")) return \ parts[0], \ parts[(3 - i + 0) % 3 + 1], \ parts[(3 - i + 1) % 3 + 1], \ parts[(3 - i + 2) % 3 + 1]
def testBaseSerialize(self): g = Graph() g.add((URIRef('http://example.com/people/Bob'), URIRef('urn:knows'), URIRef('http://example.com/people/Linda'))) s = g.serialize(base='http://example.com/', format='n3') self.assertTrue(b('<people/Bob>') in s) g2 = ConjunctiveGraph() g2.parse(data=s, publicID='http://example.com/', format='n3') self.assertEqual(list(g), list(g2))
def test_n32(self): # this test not generating prefixes for subjects/objects g = Graph() g.add((URIRef("http://example1.com/foo"), URIRef("http://example2.com/bar"), URIRef("http://example3.com/baz"))) n3 = g.serialize(format="n3") self.assertTrue(b("<http://example1.com/foo> ns1:bar <http://example3.com/baz> .") in n3)
def setUp(self): NS = u"http://example.org/" self.graph = Graph(store) self.graph.parse(StringInputSource(b(""" @prefix : <http://example.org/> . @prefix rdf: <%s> . @prefix rdfs: <%s> . [ :prop :val ]. [ a rdfs:Class ]."""%(RDF,RDFS))), format="n3")
def serialize(self, stream, base=None, encoding=None, **args): if base is not None: warnings.warn("NTSerializer does not support base.") if encoding is not None: warnings.warn("NTSerializer does not use custom encoding.") encoding = self.encoding for triple in self.store: stream.write(_nt_row(triple).encode(encoding, "replace")) stream.write(b("\n"))
def test_validating_unquote(self): quot = b( """<http://example.org/alice/foaf.rdf#me> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://xmlns.com/foaf/0.1/Person> <http://example.org/alice/foaf1.rdf> .""" ) ntriples.validate = True res = ntriples.unquote(quot) # revert to default ntriples.validate = False log.debug("restype %s" % type(res))
def from_key(key, subject, predicate, object, contexts_value): "Takes a key and subject, predicate, object; returns tuple for yield" parts = key.split(b("^")) if subject is None: # TODO: i & 1: # dis assemble and/or measure to see which is faster # subject is None or i & 1 s = from_string(parts[(3-i+0)%3+1]) else: s = subject if predicate is None:#i & 2: p = from_string(parts[(3-i+1)%3+1]) else: p = predicate if object is None:#i & 4: o = from_string(parts[(3-i+2)%3+1]) else: o = object return (s, p, o), (from_string(c) for c in contexts_value.split(b("^")) if c)
def serialize(self, stream, base=None, encoding=None, **args): if base is not None: warnings.warn("YTSerializer does not support base.") if encoding is not None: warnings.warn("YTSerializer does not use custom encoding.") encoding = self.encoding for triple in self.store: stream.write(_yt_row(triple).encode(encoding, "replace")) stream.write(b("\n"))
def testSPARQLNotEquals(): NS = u"http://example.org/" graph = ConjunctiveGraph() graph.parse(StringInputSource(b(""" @prefix : <http://example.org/> . @prefix rdf: <%s> . :foo rdf:value 1. :bar rdf:value 2.""") % RDF.uri), format="n3") rt = graph.query(b("""SELECT ?node WHERE { ?node rdf:value ?val. FILTER (?val != 1) }"""), initNs={b('rdf'): RDF.uri}, DEBUG=False) for row in rt: item = row[0] assert item == URIRef("http://example.org/bar"), "unexpected item of '%s'" % repr(item)
def testBaseSerialize(self): g = Graph() g.add((URIRef('http://example.com/people/Bob'), URIRef( 'urn:knows'), URIRef('http://example.com/people/Linda'))) s = g.serialize(base='http://example.com/', format='n3') self.assertTrue(b('<people/Bob>') in s) g2 = ConjunctiveGraph() g2.parse(data=s, publicID='http://example.com/', format='n3') self.assertEqual(list(g), list(g2))
def add(self, triple, context, quoted=False, txn=None): """\ Add a triple to the store of triples. """ (subject, predicate, object) = triple assert self.__open, "The Store must be open." assert context != self, "Can not add triple directly to store" Store.add(self, (subject, predicate, object), context, quoted) _to_string = self._to_string s = _to_string(subject, txn=txn) p = _to_string(predicate, txn=txn) o = _to_string(object, txn=txn) c = _to_string(context, txn=txn) cspo, cpos, cosp = self.__indicies value = cspo.get(bb("%s^%s^%s^%s^" % (c, s, p, o)), txn=txn) if value is None: self.__contexts.put(bb(c), "", txn=txn) contexts_value = cspo.get(bb("%s^%s^%s^%s^" % ("", s, p, o)), txn=txn) or b("") contexts = set(contexts_value.split(b("^"))) contexts.add(bb(c)) contexts_value = b("^").join(contexts) assert contexts_value is not None cspo.put(bb("%s^%s^%s^%s^" % (c, s, p, o)), "", txn=txn) cpos.put(bb("%s^%s^%s^%s^" % (c, p, o, s)), "", txn=txn) cosp.put(bb("%s^%s^%s^%s^" % (c, o, s, p)), "", txn=txn) if not quoted: cspo.put(bb("%s^%s^%s^%s^" % ("", s, p, o)), contexts_value, txn=txn) cpos.put(bb("%s^%s^%s^%s^" % ("", p, o, s)), contexts_value, txn=txn) cosp.put(bb("%s^%s^%s^%s^" % ("", o, s, p)), contexts_value, txn=txn) self.__needs_sync = True
def test_n3(self): g = Graph() g.add((URIRef("http://example.com/foo"), URIRef("http://example.com/bar"), URIRef("http://example.com/baz"))) n3 = g.serialize(format="n3") # Gunnar disagrees that this is right: # self.assertTrue("<http://example.com/foo> ns1:bar <http://example.com/baz> ." in n3) # as this is much prettier, and ns1 is already defined: self.assertTrue(b("ns1:foo ns1:bar ns1:baz .") in n3)
def testSameSubject(self): g=rdflib.ConjunctiveGraph() g.get_context('urn:a').add(( rdflib.URIRef('urn:1'), rdflib.URIRef('urn:p1'), rdflib.URIRef('urn:o1') )) g.get_context('urn:b').add(( rdflib.URIRef('urn:1'), rdflib.URIRef('urn:p2'), rdflib.URIRef('urn:o2') )) self.assertEqual(len(g.get_context('urn:a')),1) self.assertEqual(len(g.get_context('urn:b')),1) s=g.serialize(format='trig') self.assertEqual(len(re.findall(b("p1"), s)), 1) self.assertEqual(len(re.findall(b("p2"), s)), 1) self.assert_(b('{}') not in s) # no empty graphs!
def serialize(self, stream, base=None, encoding=None, **args): if base is not None: warnings.warn("NQuadsSerializer does not support base.") if encoding is not None: warnings.warn("NQuadsSerializer does not use custom encoding.") encoding = self.encoding for context in self.store.contexts(): for triple in context: stream.write(_nq_row(triple, context.identifier).encode(encoding, "replace")) stream.write(b("\n"))
def testSameSubject(self): g = rdflib.ConjunctiveGraph() g.get_context('urn:a').add( (rdflib.URIRef('urn:1'), rdflib.URIRef('urn:p1'), rdflib.URIRef('urn:o1'))) g.get_context('urn:b').add( (rdflib.URIRef('urn:1'), rdflib.URIRef('urn:p2'), rdflib.URIRef('urn:o2'))) self.assertEqual(len(g.get_context('urn:a')), 1) self.assertEqual(len(g.get_context('urn:b')), 1) s = g.serialize(format='trig') self.assertEqual(len(re.findall(b("p1"), s)), 1) self.assertEqual(len(re.findall(b("p2"), s)), 1) self.assertTrue(b('{}') not in s) # no empty graphs!
def _query_result_contains(self, query, fragments): results = self.graph.query(query) result_xml = results.serialize(format='xml') result_xml = normalize(result_xml) # TODO: poor mans c14n.. # print result_xml for frag in fragments: # print(frag, result_xml) if frag.startswith(b('<sparql:result>')): raise SkipTest("False negative.") self.failUnless(frag in result_xml)
def test_issue_130(): raise SkipTest("Remote content change - skip for now") g = rdflib.Graph() try: g.parse(location="http://linked-data.ru/example") except: raise SkipTest('Test data URL unparseable') if len(g) == 0: raise SkipTest('Test data URL empty of content') assert b('rdf:about="http://semanticfuture.net/linked-data/example/#company"') in g.serialize(), g.serialize()
def serialize(self, stream, base=None, encoding=None, **args): self.__serialized = {} store = self.store self.base = base self.max_depth = args.get("max_depth", 3) assert self.max_depth > 0, "max_depth must be greater than 0" self.nm = nm = store.namespace_manager self.writer = writer = XMLWriter(stream, nm, encoding) namespaces = {} possible = set(store.predicates()).union( store.objects(None, RDF.type)) for predicate in possible: prefix, namespace, local = nm.compute_qname(predicate) namespaces[prefix] = namespace namespaces["rdf"] = "http://www.w3.org/1999/02/22-rdf-syntax-ns#" writer.push(RDF.RDF) if "xml_base" in args: writer.attribute(XMLBASE, args["xml_base"]) writer.namespaces(namespaces.iteritems()) # Write out subjects that can not be inline for subject in store.subjects(): if (None, None, subject) in store: if (subject, None, subject) in store: self.subject(subject, 1) else: self.subject(subject, 1) # write out anything that has not yet been reached # write out BNodes last (to ensure they can be inlined where possible) bnodes = set() for subject in store.subjects(): if isinstance(subject, BNode): bnodes.add(subject) continue self.subject(subject, 1) # now serialize only those BNodes that have not been serialized yet for bnode in bnodes: if bnode not in self.__serialized: self.subject(subject, 1) writer.pop(RDF.RDF) stream.write(b("\n")) # Set to None so that the memory can get garbage collected. self.__serialized = None
def serialize(self, stream, base=None, encoding=None, **args): self.__serialized = {} store = self.store self.base = base self.max_depth = args.get("max_depth", 3) assert self.max_depth > 0, "max_depth must be greater than 0" self.nm = nm = store.namespace_manager self.writer = writer = XMLWriter(stream, nm, encoding) namespaces = {} possible = set(store.predicates()).union( store.objects(None, RDF.type)) for predicate in possible: prefix, namespace, local = nm.compute_qname(predicate) namespaces[prefix] = namespace namespaces["rdf"] = "http://www.w3.org/1999/02/22-rdf-syntax-ns#" writer.push(RDF.RDF) if "xml_base" in args: writer.attribute(XMLBASE, args["xml_base"]) writer.namespaces(iter(namespaces.items())) # Write out subjects that can not be inline for subject in store.subjects(): if (None, None, subject) in store: if (subject, None, subject) in store: self.subject(subject, 1) else: self.subject(subject, 1) # write out anything that has not yet been reached # write out BNodes last (to ensure they can be inlined where possible) bnodes = set() for subject in store.subjects(): if isinstance(subject, BNode): bnodes.add(subject) continue self.subject(subject, 1) # now serialize only those BNodes that have not been serialized yet for bnode in bnodes: if bnode not in self.__serialized: self.subject(subject, 1) writer.pop(RDF.RDF) stream.write(b("\n")) # Set to None so that the memory can get garbage collected. self.__serialized = None
def testDefaultGraphSerializesWithoutName(self): data = """ <http://example.com/thing#thing_a> <http://example.com/knows> <http://example.com/thing#thing_b> . { <http://example.com/thing#thing_c> <http://example.com/knows> <http://example.com/thing#thing_d> . } """ g = rdflib.ConjunctiveGraph() g.parse(data=data, format='trig') data = g.serialize(format='trig') self.assertTrue(b('None') not in data)
def test_issue_130(): g = rdflib.Graph() try: g.parse(location="http://linked-data.ru/example") except: raise SkipTest('Test data URL unparseable') if len(g) == 0: raise SkipTest('Test data URL empty of content') assert b( 'rdf:about="http://semanticfuture.net/linked-data/example/#company"' ) in g.serialize(), g.serialize()
def setUp(self): NS = u"http://example.org/" self.graph = Graph(store) self.graph.parse(StringInputSource( b(""" @prefix : <http://example.org/> . @prefix rdf: <%s> . @prefix rdfs: <%s> . [ :prop :val ]. [ a rdfs:Class ].""" % (RDF, RDFS))), format="n3")
def test_n32(self): # this test not generating prefixes for subjects/objects g = Graph() g.add((URIRef("http://example1.com/foo"), URIRef("http://example2.com/bar"), URIRef("http://example3.com/baz"))) n3 = g.serialize(format="n3") self.assertTrue( b("<http://example1.com/foo> ns1:bar <http://example3.com/baz> .") in n3)
def serialize(self, stream, base=None, encoding=None, **args): if base is not None: warnings.warn("NQuadsSerializer does not support base.") if encoding is not None: warnings.warn("NQuadsSerializer does not use custom encoding.") encoding = self.encoding for context in self.store.contexts(): for triple in context: stream.write(_nq_row( triple, context.identifier).encode(encoding, "replace")) stream.write(b("\n"))
def testTurtleFinalDot(): """ https://github.com/RDFLib/rdflib/issues/282 """ g = Graph() u = URIRef("http://ex.org/bob.") g.bind("ns", "http://ex.org/") g.add((u, u, u)) s = g.serialize(format='turtle') assert b("ns:bob.") not in s
def testFinalNewline(self): """ http://code.google.com/p/rdflib/issues/detail?id=5 """ import sys import platform if getattr(sys, 'pypy_version_info', None) or platform.system() == 'Java': from nose import SkipTest raise SkipTest( 'Testing under pypy and Jython2.5 fails to detect that ' + \ 'IOMemory is a context_aware store') failed = set() for p in rdflib.plugin.plugins(None, rdflib.plugin.Serializer): if p.name is not 'nquads': v = self.graph.serialize(format=p.name) lines = v.split(b("\n")) if b("\n") not in v or (lines[-1]!=b('')): failed.add(p.name) self.assertEqual(len(failed), 0, "No final newline for formats: '%s'" % failed)
def testTurtleFinalDot(): """ https://github.com/RDFLib/rdflib/issues/282 """ g = rdflib.Graph() u = rdflib.URIRef("http://ex.org/bob.") g.bind("ns", "http://ex.org/") g.add( (u, u, u) ) s=g.serialize(format='turtle') assert b("ns:bob.") not in s
def add(self, triple, context, quoted=False, txn=None): """\ Add a triple to the store of triples. """ (subject, predicate, object) = triple assert self.__open, "The Store must be open." assert context != self, "Can not add triple directly to store" Store.add(self, (subject, predicate, object), context, quoted) _to_string = self._to_string s = _to_string(subject, txn=txn) p = _to_string(predicate, txn=txn) o = _to_string(object, txn=txn) c = _to_string(context, txn=txn) cspo, cpos, cosp = self.__indicies value = cspo.get(bb("%s^%s^%s^%s^" % (c, s, p, o)), txn=txn) if value is None: self.__contexts.put(bb(c), "", txn=txn) contexts_value = cspo.get( bb("%s^%s^%s^%s^" % ("", s, p, o)), txn=txn) or b("") contexts = set(contexts_value.split(b("^"))) contexts.add(bb(c)) contexts_value = b("^").join(contexts) assert contexts_value is not None cspo.put(bb("%s^%s^%s^%s^" % (c, s, p, o)), "", txn=txn) cpos.put(bb("%s^%s^%s^%s^" % (c, p, o, s)), "", txn=txn) cosp.put(bb("%s^%s^%s^%s^" % (c, o, s, p)), "", txn=txn) if not quoted: cspo.put(bb( "%s^%s^%s^%s^" % ("", s, p, o)), contexts_value, txn=txn) cpos.put(bb( "%s^%s^%s^%s^" % ("", p, o, s)), contexts_value, txn=txn) cosp.put(bb( "%s^%s^%s^%s^" % ("", o, s, p)), contexts_value, txn=txn) self.__needs_sync = True
def testPrefixes(self): data = """ @prefix ns1: <http://ex.org/schema#> . <http://ex.org/docs/document1> = { ns1:Person_A a ns1:Person ; ns1:TextSpan "Simon" . } <http://ex.org/docs/document2> = { ns1:Person_C a ns1:Person ; ns1:TextSpan "Agnes" . } """ cg = rdflib.ConjunctiveGraph() cg.parse(data=data, format='trig') data = cg.serialize(format='trig') self.assert_(b('ns2: <http://ex.org/docs/') in data, data) self.assert_(b('<ns2:document1>') not in data, data) self.assert_(b('ns2:document1') in data, data)
def __len__(self, context=None): assert self.__open, "The Store must be open." if context is not None: if context == self: context = None if context is None: prefix = b("^") else: prefix = bb("%s^" % self._to_string(context)) return len([key for key in self.__indices[0] if key.startswith(prefix)])