def test_graph_ids(): def check(kws): cg = ConjunctiveGraph() cg.parse(**kws) for g in cg.contexts(): gid = g.identifier assert isinstance(gid, Identifier) yield check, dict(data=DATA, publicID=PUBLIC_ID, format="turtle") source = StringInputSource(DATA.encode("utf8")) source.setPublicId(PUBLIC_ID) yield check, dict(source=source, format="turtle")
def test_graph_ids(): def check(kws): cg = ConjunctiveGraph() cg.parse(**kws) for g in cg.contexts(): gid = g.identifier assert isinstance(gid, Identifier) yield check, dict(data=DATA, publicID=PUBLIC_ID, format="turtle") source = StringInputSource(DATA) source.setPublicId(PUBLIC_ID) yield check, dict(source=source, format="turtle")
def _loadAndEscape(ruleStore, n3, outputPatterns): ruleGraph = Graph(ruleStore) # Can't escapeOutputStatements in the ruleStore since it # doesn't support removals. Can't copy plainGraph into # ruleGraph since something went wrong with traversing the # triples inside quoted graphs, and I lose all the bodies # of my rules. This serialize/parse version is very slow (400ms), # but it only runs when the file changes. plainGraph = Graph() plainGraph.parse(StringInputSource(n3.encode('utf8')), format='n3') # for inference escapeOutputStatements(plainGraph, outputPatterns=outputPatterns) expandedN3 = plainGraph.serialize(format='n3') ruleGraph.parse(StringInputSource(expandedN3), format='n3')
def _onGraphBodyStatements(self, body, headers): # maybe quads only so we can track who made the input and from what interface? # Or your input of triples gets wrapped in a new quad in here? g = Graph() g.parse(StringInputSource(body), format='nt') if not g: raise ValueError("expected graph body") self._onStatements(list(g.triples((None, None, None))))
def getTestGraph(n3): from rdflib import Graph from rdflib.parser import StringInputSource g = Graph() g.parse(StringInputSource(n3), format="n3") for k,v in initNs.items(): g.bind(k, v) return g
def post(self): g = Graph() g.parse(StringInputSource(self.request.body), format={ 'text/n3': 'n3', }[self.request.headers['content-type']]) for anim in g.subjects(ROOM['playback'], ROOM['start']): startAnim(anim)
def parse(self, doc): # parse to find graph graph = Graph() data = StringInputSource(doc.data) if doc.format: graph.parse(data, format=doc.format) else: graph.parse(data) return self.process_graph(graph)
def triples(self, sub, pre, obj): graph = rdflib.ConjunctiveGraph() data = StringInputSource(self.conn.statements_default_graph( self.repository, 'text/plain' )) graph.parse(data, format="nt") t = self.parse_triple(sub, pre, obj) return graph.triples(t)
def setUp(self): NS = u"http://example.org/" self.graph = Graph(store) self.graph.parse(StringInputSource(""" @prefix : <http://example.org/> . @prefix rdf: <%s> . @prefix rdfs: <%s> . [ :prop :val ]. [ a rdfs:Class ].""" % (RDF.RDFNS, RDFS.RDFSNS)), format="n3")
def reread(self): self.cancelRead() log.info('read config') self.configGraph = ConjunctiveGraph() for v, md in etcd.get_prefix(self.etcPrefix): log.info(' read file %r', md.key) self.configGraph.parse(StringInputSource(v), format='n3') self.configGraph.bind('', ROOM) # not working self.configGraph.bind('rdf', RDF) # config graph is too noisy; maybe make it a separate resource #masterGraph.patch(Patch(addGraph=self.configGraph)) self.setupBoards()
def _onFullGraph(self, message): try: g = ConjunctiveGraph() g.parse(StringInputSource(message), format='json-ld') p = Patch(addGraph=g) self._sendPatch(p, fullGraph=True) except Exception: log.error(traceback.format_exc()) raise self._fullGraphReceived = True self._fullGraphTime = time.time() self._patchesReceived += 1
def _graph(self): """Lazy loading of the _graph attribute This property getter will be called only when the instance attribute self._graph has been deleted. In that case, it will load the graph from self.identifier. This is used by the `from_iri`:meth: class method, to ensure that graphs are only loaded when required... """ if '_graph' in self.__dict__: return self.__dict__['_graph'] headers = self.__dict__.pop('_headers') http = self.__dict__.pop('_http') base_iri = self._identifier.split('#', 1)[0] effective_headers = dict(DEFAULT_REQUEST_HEADERS) if headers: effective_headers.update(headers) http = http or DEFAULT_HTTP_CLIENT LOG.info('downloading <%s>', base_iri) response, content = http.request(base_iri, "GET", headers=effective_headers) LOG.debug('got %s %s %s', response.status, response['content-type'], response.fromcache) if response.status // 100 != 2: raise HttpLib2ErrorWithResponse(response.reason, response, content) source = StringInputSource(content) ctype = response['content-type'].split(';', 1)[0] g = ConjunctiveGraph(identifier=base_iri) g.addN(BACKGROUND_KNOWLEDGE.quads()) g.parse(source, base_iri, ctype) _fix_default_graph(g) # if available, load API Documentation in a separate graph links = response.get('link') if links: if type(links) != list: links = [links] for link in links: match = APIDOC_RE.match(link) if match: self._api_doc = apidoc_iri = URIRef(match.groups()[0]) if apidoc_iri != self.identifier: apidoc = ApiDocumentation.from_iri( apidoc_iri, headers, http) g.addN(apidoc.graph.quads()) break self.__dict__['_graph'] = g return g
def rdf(self): """ RDF """ if self._rdf: return self._rdf data = self.context if not hasattr(data, 'read'): if callable(data.data): data = StringInputSource(data.data()) else: data = StringInputSource(data.data) try: self._rdf = ConjunctiveGraph().parse(data) except Exception: s = data.getByteStream() # this is a StringIO instance s.seek(0) with tempfile.NamedTemporaryFile(prefix="rdflib_staff_log", delete=False) as f: f.write(s.read()) self.context.error_log.raising(sys.exc_info()) self.validDatas = False self._rdf = ConjunctiveGraph() return self._rdf
def save(self, filename, file_format): start = time.clock() if filename != '': of = open(filename, "wb") graph = rdflib.ConjunctiveGraph() data = StringInputSource(self.conn.statements_default_graph( self.repository, 'text/plain' )) graph.parse(data, format="nt") of.write(graph.serialize(format=file_format)) of.close() elapsed = (time.clock() - start) print("Elapsed file write time: %ss" % elapsed)
def testStoreLiteralsXmlQuote(self): bob = self.bob says = URIRef(u"http://www.rdflib.net/terms/says") imtheone = Literal(u"I'm the one", lang="en") testdoc = (PY3 and bytes(xmltestdocXmlQuote, "UTF-8")) or xmltestdocXmlQuote self.graph.parse(StringInputSource(testdoc), formal="xml") objs = list(self.graph) self.assertEquals(len(objs), 1) o = objs[0] self.assertEquals(o, (bob, says, imtheone))
def render_graph(result, cfg, **kwargs): """ Render for output a result that can be parsed as an RDF graph """ # Mapping from MIME types to formats accepted by RDFlib rdflib_formats = { 'text/rdf+n3': 'n3', 'text/turtle': 'turtle', 'application/x-turtle': 'turtle', 'text/turtle': 'turtle', 'application/rdf+xml': 'xml', 'text/rdf': 'xml', 'application/rdf+xml': 'xml', } try: got = kwargs.get('format', 'text/rdf+n3') fmt = rdflib_formats[got] except KeyError: raise KrnlException('Unsupported format for graph processing: {!s}', got) g = ConjunctiveGraph() g.load(StringInputSource(result), format=fmt) display = cfg.dis[0] if is_collection(cfg.dis) else cfg.dis if display in ('png', 'svg'): try: literal = len(cfg.dis) > 1 and cfg.dis[1].startswith('withlit') opt = {'lang': cfg.lan, 'literal': literal, 'graphviz': []} data, metadata = draw_graph(g, fmt=display, options=opt) return {'data': data, 'metadata': metadata} except Exception as e: raise KrnlException('Exception while drawing graph: {!r}', e) elif display == 'table': it = rdf_iterator(g, add_vtype=cfg.typ, lang=cfg.lan) n, data = html_table(it, limit=cfg.lmt, withtype=cfg.typ) data += div('Shown: {}, Total rows: {}', n if cfg.lmt else 'all', len(g), css="tinfo") data = {'text/html': div(data)} elif len(g) == 0: data = {'text/html': div(div('empty graph', css='krn-warn'))} else: data = {'text/plain': g.serialize(format='nt').decode('utf-8')} return {'data': data, 'metadata': {}}
def addTrig(graph, url, timeout=2): t1 = time.time() # workaround for some reason my ipv6 names don't resolve for name, addr in ipv6Addresses.items(): url = url.replace('/' + name + ':', '/[' + addr + ']:') log.debug(' fetching %r', url) response = yield treq.get(url, headers={'accept': ['application/trig']}, timeout=timeout) if response.code != 200: raise ValueError("status %s from %s" % (response.code, url)) g = ConjunctiveGraph() g.parse(StringInputSource((yield response.content())), format='trig') fetchTime = time.time() - t1 log.debug(' %r done in %.04f sec', url, fetchTime) graph.addN(g.quads()) returnValue(fetchTime)
def testSPARQLNotEquals(): NS = u"http://example.org/" graph = ConjunctiveGraph() graph.parse(StringInputSource(""" @prefix : <http://example.org/> . @prefix rdf: <%s> . :foo rdf:value 1. :bar rdf:value 2.""" % RDF.uri), format="n3") rt = graph.query("""SELECT ?node WHERE { ?node rdf:value ?val. FILTER (?val != 1) }""", initNs={'rdf': RDF.uri}, DEBUG=False) for row in rt: #item = row[0] item = row assert item == URIRef("http://example.org/bar"), "unexpected item of '%s'" % repr(item)
def testStoreLiteralsXml(self): bob = self.bob says = URIRef(u'http://www.rdflib.net/terms/says') objects = [ Literal(u'I\'m the one', lang='en'), Literal(u'こんにちは', lang='ja'), Literal(u'les garçons à Noël reçoivent des œufs', lang='fr') ] testdoc = (PY3 and bytes(xmltestdocXml, "UTF-8")) or xmltestdocXml self.graph.parse(StringInputSource(testdoc), formal='xml') objs = list(self.graph) self.assertEquals(len(objs), 3) for o in objs: self.assertEquals(o[0], bob) self.assertEquals(o[1], says) self.assertTrue(o[2] in objects)
def testStoreLiteralsXml(self): bob = self.bob says = URIRef(u"http://www.rdflib.net/terms/says") objects = [ Literal(u"I'm the one", lang="en"), Literal(u"こんにちは", lang="ja"), Literal(u"les garçons à Noël reçoivent des œufs", lang="fr") ] testdoc = (PY3 and bytes(xmltestdocXml, "UTF-8")) or xmltestdocXml self.graph.parse(StringInputSource(testdoc), formal="xml") objs = list(self.graph) self.assertEquals(len(objs), 3) for o in objs: self.assertEquals(o[0], bob) self.assertEquals(o[1], says) self.assertTrue(o[2] in objects)
def setUp(self): graph = ConjunctiveGraph() graph.parse(StringInputSource(''' @prefix p: <http://example.com/pic/> . @prefix : <http://photo.bigasterisk.com/0.1/> . @prefix foaf: <http://xmlns.com/foaf/0.1/> . @prefix xs: <http://www.w3.org/2001/XMLSchema#> . @prefix exif: <http://www.kanzaki.com/ns/exif#> . p:a a foaf:Image; exif:dateTime "2014-01-01T00:00:00Z"^^xs:dateTime . p:b a foaf:Image; exif:dateTime "2014-01-02T00:00:00Z"^^xs:dateTime . p:c a foaf:Image; exif:dateTime "2014-01-03T00:00:00Z"^^xs:dateTime . p:d a foaf:Image; exif:dateTime "2014-01-04T00:00:00Z"^^xs:dateTime . '''), format='n3') bindAll(graph) index = imageset.ImageIndex(graph) index.finishBackgroundIndexing() self.imageSet = imageset.ImageSet(graph, index) self.request = self.imageSet.request
def remove(self, sub, pre, obj): start = time.clock() t = self.parse_triple(sub, pre, obj) # create a new auxiliary graph graph = rdflib.ConjunctiveGraph() data = StringInputSource(self.conn.statements_default_graph( self.repository, 'text/plain' )) graph.parse(data, format="nt") # remove all triples from the Sesame repository self.conn.remove_all_statements(self.repository) # remove desired triple from auxiliary graph graph.remove(t) # send all remaining triples to the Sesame repository self.conn.add_data_no_context(self.repository, graph.serialize(format="nt")) elapsed = (time.clock() - start) print("Elapsed removal time: %ss" % elapsed)
""" # --- End of primer code # To make this go easier to spit back out... # technically, we already created a namespace # with the object init (and it added some namespaces as well) # By default, your main namespace is the URI of your # current working directory, so lets make that simpler: myNS = Namespace(URIRef('http://www.w3.org/2000/10/swap/Primer#')) primer.bind('', myNS) primer.bind('owl', 'http://www.w3.org/2002/07/owl#') primer.bind('dc', 'http://purl.org/dc/elements/1.1/') primer.bind('swap', 'http://www.w3.org/2000/10/swap/') sourceCode = StringInputSource(mySource, myNS) # Lets load it up! primer.parse(sourceCode, format='n3') # Now you can query, either directly straight into a list: [(x, y, z) for x, y, z in primer] # or spit it back out (mostly) the way we created it: print primer.serialize(format='n3') # for more insight into things already done, lets see the namespaces
def test2(): meta2 = meta.encode("utf-8") % test_string2.encode("utf-8") graph = ConjunctiveGraph() graph.parse(StringInputSource(prefix + "<http://example.org/>" + meta2), format="n3")
def __init__(self, n3Content): self._graph = Graph() self._graph.parse(StringInputSource(n3Content), format='n3')
def testRegex(): g = Graph(store='MySQL') g.destroy(configString) g.open(configString) g.parse(StringInputSource(testN3), format="n3") try: for s, p, o in g.triples((None, implies, None)): formulaA = s formulaB = o assert type(formulaA) == QuotedGraph and type(formulaB) == QuotedGraph a = URIRef('http://test/a') b = URIRef('http://test/b') c = URIRef('http://test/c') d = URIRef('http://test/d') universe = ConjunctiveGraph(g.backend) #REGEX triple matching assert len( list( universe.triples( (None, REGEXTerm('.*22-rdf-syntax-ns.*'), None)))) == 1 assert len(list(universe.triples((None, REGEXTerm('.*'), None)))) == 3 assert len( list(universe.triples( (REGEXTerm('.*formula.*$'), None, None)))) == 1 assert len( list(universe.triples( (None, None, REGEXTerm('.*formula.*$'))))) == 1 assert len( list(universe.triples((None, REGEXTerm('.*implies$'), None)))) == 1 for s, p, o in universe.triples((None, REGEXTerm('.*test.*'), None)): assert s == a assert o == c for s, p, o in formulaA.triples((None, REGEXTerm('.*type.*'), None)): assert o != c or isinstance(o, BNode) #REGEX context matching assert len( list(universe.contexts( (None, None, REGEXTerm('.*schema.*'))))) == 1 assert len(list(universe.contexts((None, REGEXTerm('.*'), None)))) == 3 #test optimized interfaces assert len(list(g.backend.subjects(RDF.type, [RDFS.Class, c]))) == 1 for subj in g.backend.subjects(RDF.type, [RDFS.Class, c]): assert isinstance(subj, BNode) assert len(list(g.backend.subjects(implies, [REGEXTerm('.*')]))) == 1 for subj in g.backend.subjects(implies, [formulaB, RDFS.Class]): assert subj.identifier == formulaA.identifier assert len(list(g.backend.subjects(REGEXTerm('.*'), [formulaB, c]))) == 2 assert len(list(g.backend.subjects(None, [formulaB, c]))) == 2 assert len(list(g.backend.subjects(None, [formulaB, c]))) == 2 assert len( list(g.backend.subjects([REGEXTerm('.*rdf-syntax.*'), d], None))) == 2 assert len(list(g.backend.objects(None, RDF.type))) == 1 assert len(list(g.backend.objects(a, [d, RDF.type]))) == 1 assert len(list(g.backend.objects(a, [d]))) == 1 assert len(list(g.backend.objects(a, None))) == 1 assert len(list(g.backend.objects(a, [REGEXTerm('.*')]))) == 1 assert len(list(g.backend.objects([a, c], None))) == 1 except: g.backend.destroy(configString) raise
""" query = query_prefix + query_body print(f'{msg}') for row in g.query(query): for l in row.__dict__['labels']: print(f'{l}: {row[l]}') # test = '<https://genecoop.waag.org/schema/v1#exp_000> <http://www.w3.org/1999/02/22-rdf-syntax-ns#label> "Array SNP request + Analysis and interpretation" .' test = do_normalize(doc) g = rdflib.Graph() source = StringInputSource(test.encode("utf8")) g.load(source, format="nt") query_body = """ SELECT ?consent ?type ?name ?dna_donor ?researcher ?issuedate ?issuer ?issuername WHERE { ?consent a ?type . FILTER (STRSTARTS(STR(?type), "https://genecoop.waag.org/credentials/v1#")) ?consent rdf:label ?name; cred:credentialSubject ?dna_donor ; gc_cred:given_to ?researcher ; cred:issuanceDate ?issuedate ; cred:issuer ?issuer . ?issuer rdf:label ?issuername }
def parseRdf(text: str, contentType: str): g = Graph() g.parse(StringInputSource(text), format={ 'text/n3': 'n3', }[contentType]) return g
def test2(): meta2 = meta.encode('utf-8') % test_string2.encode('utf-8') graph = ConjunctiveGraph() graph.parse(StringInputSource(prefix + '<http://example.org/>' + meta2), format='n3')
def rdfGraphBody(body, headers): g = Graph() g.parse(StringInputSource(body), format='nt') return g
def _onGraphBodyStatements(self, body, headers): g = Graph() g.parse(StringInputSource(body), format='nt') if not g: raise ValueError("expected graph body") self._onStatements(list(g.triples((None, None, None))))