class Graph: path = None model = None def __init__(self, path=None): if (path): self.path = path else: self.path = os.path.join('.', 'domegraph.sqlite3') self._open() def _open(self): if (self.model is not None): return self.model # Add try-catch here # try: if (os.path.isfile(os.path.join('.', 'domegraph.sqlite3'))): print('Reusing new store') storage = Storage(storage_name=STORAGE_NAME, name=GRAPH_NAME, options_string="new='false'") else: print('Creating new store') storage = Storage(storage_name=STORAGE_NAME, name=GRAPH_NAME, options_string="new='true'") self.model = Model(storage) return self.model # except Exception: print('Storage creation failed') # return None def _add(self, statements): for statement in statements: self.model.append(statement) self.model.sync() def addDevice(self, label, actuates, prop_src, ha_name, ha_type): subject = DOME_DATA[cleanUri(ha_name)] statements = [ Statement(subject, rdf.type, DOME.Device), Statement(subject, rdfs.label, label), Statement(subject, DOME.ha_name, ha_name), Statement(subject, DOME.ha_type, ha_type) ] assert (isinstance(prop_src, Node)) if (actuates): statements.append(Statement(subject, DOME.actuates, prop_src)) else: statements.append(Statement(subject, DOME.observes, prop_src)) self._add(statements) return subject def addProperty(self, label, value, updated, changed): subject = DOME_DATA['property/' + label + str(uuid.uuid4())] statements = [ Statement(subject, rdf.type, DOME.Property), Statement(subject, rdfs.label, label), Statement(subject, DOME.value, value), Statement(subject, DOME.last_updated, updated), Statement(subject, DOME.last_changed, changed) ] self._add(statements) return subject def addAutomation(self, label, trigger, actions, enabled=True): subject = DOME_DATA['automation/' + label + str(uuid.uuid4())] statements = [ Statement(subject, rdf.type, DOME.Automation), Statement(subject, rdfs.label, label), Statement(subject, DOME.triggeredby, DOME.Trigger), Statement(subject, DOME.isenabled, enabled) ] for action in actions: statements.append(subject, DOME.performs, actions) self._add(statements) return subject def addTrigger(self, label, triggers, conditions, operator): subject = DOME_DATA['trigger/' + label + str(uuid.uuid4())] statements = [ Statement(subject, rdf.type, DOME.Trigger), Statement(subject, rdfs.label, label), Statement(subject, DOME.operatortype, operator) ] for trigger in triggers: statements.append(subject, DOME.hassubtrigger, trigger) for condition in conditions: statements.append(subject, DOME.hascondition, condition) self._add(statements) return subject def addCondition(self, label, prop_src, value, operator): subject = DOME_DATA['condition/' + label + str(uuid.uiid4())] statements = [ Statement(subject, rdf.type, DOME.Condition), Statement(subject, rdfs.label, label), Statement(subject, DOME.observes, prop_src), Statement(subject, DOME.target, value), Statement(subject, DOME.operatortype, operator) ] self._add(statements) return subject def addAction(self, label, prop_src, service): subject = DOME_DATA['action/' + str(uuid.uuid4())] statements = [ Statement(subject, rdf.type, DOME.Action), Statement(subject, rdfs.label, label), Statement(subject, DOME.actuates, prop_src), Statement(subject, DOME.callservice, command) ] self._add(statements) return subject def addWebProperty(self, label, host, ext_res_src, ext_prop_src, poll, graph=None): subject = DOME_DATA['webproperty/' + str(uuid.uuid4())] statements = [ Statement(subject, rdf.type, DOME.WebProperty), Statement(subject, rdfs.label, label), Statement(subject, DOME.hostedby, host), Statement(subject, DOME.resource, ext_res_src), Statement(subject, DOME.property, ext_prop_src), Statement(subject, DOME.poll, str(poll)) ] if (graph): statements.append(Statement(subject, DOME.graphname, graph)) self._add(statements) return subject # Removes all existing statements first!!! def updateStatement(self, src, pred, obj): for stmt in self.model.find_statements(Statement(src, pred, None)): del self.model[stmt] self.model.append(Statement(src, pred, obj)) # More adds to be added' def getModel(self): if (not self.model): self._open() return self.model def __del__(self): del self.model
class Graph(object): TYPEMAP = { 'turtle': 'turtle', 'ttl': 'turtle', 'json': 'json', 'rdf+xml': 'rdfxml-abbrev', 'rdf': 'rdfxml-abbrev', 'nt': 'ntriples', 'ntriples': 'ntriples', 'n3': 'turtle', } def _h(self, code, level, facility, message, line, column, byte, file, uri): raise Exception(message) def __init__(self, p, base_uri=None): self._base = base_uri or request.base_url self._f = self._f0 = 'turtle' self._p = os.path.abspath(p) if p and p[-1] == '/' and (not self._p or self._p[-1] != '/'): self._p += '/' self._p0 = self._p if not self.exists(): for k in self.TYPEMAP: if self.exists(p + '.' + k): self._p0 += '.' + k self._f0 = self.TYPEMAP[k] break self._s = Storage(storage_name='hashes', name='', options_string="hash-type='memory'") self._g = Model(self._s) self._editable = False def headers(self, more=None): d = { 'Base': self._base, 'Storage-Format': self._f0, 'Storage-Path': self._p0, 'Triples': len(self._g), } if self._editable: d['MS-Author-Via'] = 'DAV, SPARQL' d['DAV'] = '1' if more: d.update(more) return d def exists(self, p=None): return os.path.exists(p or self._p0) def load(self, uri='', name=''): if uri: return self._g.load(uri=uri, name=name, handler=self._h) st = os.stat(self._p0) if not stat.S_ISDIR(st.st_mode): self._editable = True p = Parser(self._f0) assert p.parse_into_model(self._g, 'file:' + self._p0, base_uri=self._base, handler=self._h) else: self._g.append( Statement(Node(uri_string=self._base), ns.rdf['type'], ns.stat['Directory'])) for k in os.listdir(self._p0): st = os.stat(self._p0 + '/' + k) if stat.S_ISDIR(st.st_mode): kn = Node(uri_string=str(k) + '/') self._g.append( Statement(kn, ns.rdf['type'], ns.stat['Directory'])) else: kn = Node(uri_string=str(k)) self._g.append( Statement(Node(uri_string=self._base), ns.rdfs['member'], kn)) self._g.append( Statement(kn, ns.stat['atime'], Node(literal=str(int(st.st_atime))))) self._g.append( Statement(kn, ns.stat['ctime'], Node(literal=str(int(st.st_ctime))))) self._g.append( Statement(kn, ns.stat['mtime'], Node(literal=str(int(st.st_mtime))))) self._g.append( Statement(kn, ns.stat['size'], Node(literal=str(int(st.st_size))))) def append(self, s, name=None, mime_type=None): p = Parser(name=name, mime_type=mime_type) assert p.parse_string_into_model(self._g, s, self._base, handler=self._h) def update(self, s, name=None, mime_type=None): if mime_type == 'application/sparql-update': for op in sparqlUpdate.translateUpdate( sparqlUpdate.parseUpdate(s)): if op.name is 'InsertData': g = Graph('sparql:') for elt in op.triples: g.append(' '.join([x.n3() for x in elt]), name='turtle') for x in g._g: self._g.append(x) elif op.name is 'DeleteData': g = Graph('sparql:') for elt in op.triples: g.append(' '.join([x.n3() for x in elt]), name='turtle') for x in g._g: if x in self._g: del self._g[x] else: return self.append(s, name, mime_type) def toString(self, f): if f == 'html': return '''<!DOCTYPE html><html><head> <link type="text/css" rel="stylesheet" href="https://w3.scripts.mit.edu/tabulator/tabbedtab.css" /> <script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.7.1/jquery.min.js"></script> <script type="text/javascript" src="https://w3.scripts.mit.edu/tabulator/js/mashup/mashlib.js"></script> <script type="text/javascript"> jQuery.ajaxPrefilter(function(options) { if (options.crossDomain) { options.url = "https://w3.scripts.mit.edu/proxy?uri=" + encodeURIComponent(options.url); } }); jQuery(document).ready(function() { tabulator.outline.GotoSubject(tabulator.kb.sym(window.location.href), true, undefined, true, undefined); }); </script></head><body> <div class="TabulatorOutline" id="DummyUUID"><table id="outline"></table></div></body></html>''' s = Serializer(name=f) s.set_feature(Uri('http://feature.librdf.org/raptor-writeBaseURI'), Node(literal='0')._node) for k, v in list(ns.__dict__.items()): if type(v) is NS: s.set_namespace(k, Uri(v._prefix)) return s.serialize_model_to_string(self._g, base_uri=self._base) def save(self): s = self.toString(self._f0) file(self._p0, 'w').write(s) def size(self): return self._g.size() def unlink(self): if self.size() > 0: return os.unlink(self._p0) def query(self, query): q = SPARQLQuery(query, base_uri=self._base) r = q.execute(self._g) for mtype, q in request.accept_mimetypes: for k in ('json', 'xml', 'tsv', 'csv'): if k in mtype: return Response( r.to_string( 'http://www.w3.org/ns/formats/SPARQL_Results_' + k.upper(), base_uri=self._base)) return Response( r.to_string('http://www.w3.org/ns/formats/SPARQL_Results_JSON', base_uri=self._base)) def __call__(self, status=None, headers=None, body=None): f = 'html' m = 'text/html' for mtype, q in request.accept_mimetypes: elt = mtype.split('/', 1) + [''] if elt[1] in self.TYPEMAP: f = self.TYPEMAP[elt[1]] m = mtype break if body is None and (status is None or status < 400): body = self.toString(f) return Response(body, status=status, headers=self.headers(headers), mimetype=m)
class Graph(object): TYPEMAP = { 'turtle': 'turtle', 'ttl': 'turtle', 'json': 'json', 'rdf+xml': 'rdfxml-abbrev', 'rdf': 'rdfxml-abbrev', 'nt': 'ntriples', 'ntriples': 'ntriples', 'n3': 'turtle', } def _h(self, code, level, facility, message, line, column, byte, file, uri): raise Exception(message) def __init__(self, p, base_uri=None): self._base = base_uri or request.base_url self._f = self._f0 = 'turtle' self._p = os.path.abspath(p) if p and p[-1]=='/' and (not self._p or self._p[-1]!='/'): self._p += '/' self._p0 = self._p if not self.exists(): for k in self.TYPEMAP: if self.exists(p + '.' + k): self._p0 += '.' + k self._f0 = self.TYPEMAP[k] break self._s = Storage(storage_name='hashes', name='', options_string="hash-type='memory'") self._g = Model(self._s) def info(self): return { 'base-uri': self._base, 'storage-format': self._f0, 'storage-path': self._p0, 'triples': len(self._g), } def exists(self, p=None): return os.path.exists(p or self._p0) def load(self, uri='', name=''): if uri: return self._g.load(uri=uri, name=name, handler=self._h) st = os.stat(self._p0) if not stat.S_ISDIR(st.st_mode): p = Parser(self._f0) assert p.parse_into_model(self._g, 'file:' + self._p0, base_uri=self._base, handler=self._h) else: for k in os.listdir(self._p0): kn = Node(uri_string=str(k)) st = os.stat(self._p0 + '/' + k) if stat.S_ISDIR(st.st_mode): self._g.append(Statement(kn,ns.rdf['type'],ns.rdfs['Container'])) elif stat.S_ISREG(st.st_mode): self._g.append(Statement(kn,ns.rdf['type'],ns.rdfs['Resource'])) self._g.append(Statement(kn,ns.stat['atime'],Node(literal=str(st.st_atime)))) self._g.append(Statement(kn,ns.stat['ctime'],Node(literal=str(st.st_ctime)))) self._g.append(Statement(kn,ns.stat['mtime'],Node(literal=str(st.st_mtime)))) self._g.append(Statement(kn,ns.stat['size'],Node(literal=str(st.st_size)))) def append(self, s, name=None, mime_type=None): p = Parser(name=name, mime_type=mime_type) assert p.parse_string_into_model(self._g, s, self._base, handler=self._h) def update(self, s, name=None, mime_type=None): if mime_type == 'application/sparql-update': for op in sparqlUpdate.translateUpdate(sparqlUpdate.parseUpdate(s)): if op.name is 'InsertData': g = Graph('sparql:') for elt in op.triples: g.append(' '.join([x.n3() for x in elt]), name='turtle') for x in g._g: self._g.append(x) elif op.name is 'DeleteData': g = Graph('sparql:') for elt in op.triples: g.append(' '.join([x.n3() for x in elt]), name='turtle') for x in g._g: if x in self._g: del self._g[x] else: return self.append(s, name, mime_type) def toString(self, f): s = Serializer(name=f) s.set_feature(Uri('http://feature.librdf.org/raptor-writeBaseURI'), Node(literal='0')._node) for k, v in list(ns.__dict__.items()): if type(v) is NS: s.set_namespace(k, Uri(v._prefix)) return s.serialize_model_to_string(self._g, base_uri=self._base) def save(self): s = self.toString(self._f0) file(self._p0, 'w').write(s) def size(self): return self._g.size() def unlink(self): if self.size() > 0: return os.unlink(self._p0) def query(self, query): q = SPARQLQuery(query, base_uri=self._base) r = q.execute(self._g) for mtype, q in request.accept_mimetypes: for k in ('json', 'xml', 'tsv', 'csv'): if k in mtype: return Response(r.to_string('http://www.w3.org/ns/formats/SPARQL_Results_'+k.upper(), base_uri=self._base)) return Response(r.to_string('http://www.w3.org/ns/formats/SPARQL_Results_JSON', base_uri=self._base)) def __call__(self, status=None): f = 'turtle' m = 'text/turtle' for mtype, q in request.accept_mimetypes: elt = mtype.split('/', 1) + [''] if elt[1] in self.TYPEMAP: f = self.TYPEMAP[elt[1]] m = mtype break return Response(self.toString(f), status=status, headers=self.info(), mimetype=m)