def from_model(graph, query): s = query rv = [] while s: elem = URIRef(s) class_of = None class_of_objects = graph.objects(elem, RDF.type) if class_of_objects: for class_of_value in graph.objects(elem, RDF.type): class_of = from_node(graph, class_of_value, [], False, query) break triples = list(triple_frags(elem, graph, [], class_of, query)) graph.remove((elem, None, None)) model = { 'display': DISPLAYER.apply(elem), 'literal_encode': quote_plus(str(elem)), 'uri': query, 'uri_encode': quote_plus(query), 'triples': triples, 'has_triples': len(triples) > 0, 'classOf': class_of, 'context': CONTEXT, 'inverses': list(inverse_triple_frags(elem, graph, query)) } graph.remove((None, None, elem)) rv.append(model) s = next_subject(graph, class_of) return { 'head': rv[0], 'tail': rv[1:] }
def from_node(graph, node, stack, recurse=True, query=None): if type(node) == URIRef: fragment = None if '#' in str(node): fragment = str(node)[str(node).index('#') + 1:] if recurse and str(node).startswith(str(query)): triples = list(triple_frags(node, graph, stack, None, query)) graph.remove((node, None, None)) return { 'display': DISPLAYER.apply(node), 'uri': str(node), 'uri_encode': quote_plus(str(node)), 'triples': triples, 'has_triples': len(triples) > 0, 'context': CONTEXT, 'fragment': fragment } else: return { 'display': DISPLAYER.apply(node), 'uri': str(node), 'uri_encode': quote_plus(str(node)), 'triples': [], 'has_triples': False, 'context': CONTEXT, 'fragment': fragment } elif type(node) == BNode: triples = list(triple_frags(node, graph, stack, None, query)) graph.remove((node, None, None)) return { 'display': DISPLAYER.apply(node), 'bnode': True, 'triples': triples, 'has_triples': len(triples) > 0, 'context': CONTEXT } elif type(node) == Literal: return { 'display': str(node), 'literal': True, 'literal_encode': quote_plus(str(node)), 'lang': node.language, 'datatype': from_dt(node.datatype), 'has_triples': False, 'context': CONTEXT }
def from_dt(dt): if dt: return { 'display': DISPLAYER.apply(dt), 'uri': str(dt) } else: return None
def sparql_results_to_dict(result): if result.findall( "{http://www.w3.org/2005/sparql-results#}boolean"): r = (result.findall( "{http://www.w3.org/2005/sparql-results#}boolean")[0].text == "true") return {"boolean": r} variables = [] head = result.findall( "{http://www.w3.org/2005/sparql-results#}head")[0] r = {"variables": [], "results": [], "context": CONTEXT} for variable in head: variables.append(variable.get("name")) r["variables"].append({"name": variable.get("name")}) body = result.findall( "{http://www.w3.org/2005/sparql-results#}results")[0] results = body.findall( "{http://www.w3.org/2005/sparql-results#}result") n = 0 for result in results: r["results"].append({"result": []}) for v in variables: r["results"][n]["result"].append(dict()) bindings = result.findall( "{http://www.w3.org/2005/sparql-results#}binding") for binding in bindings: name = binding.get("name") target = r["results"][n]["result"][variables.index(name)] = {} if (binding[0].tag == '{http://www.w3.org/2005/sparql-results#}uri'): target['uri'] = binding[0].text target['display'] = DISPLAYER.apply(binding[0].text) if (binding[0].tag == '{http://www.w3.org/2005/sparql-results#}bnode'): target['bnode'] = binding[0].text if (binding[0].tag == '{http://www.w3.org/2005/sparql-results#}literal'): target['value'] = binding[0].text if binding[0].get( "{http://www.w3.org/XML/1998/namespace}lang"): target['lang'] = binding[0].get( "{http://www.w3.org/XML/1998/namespace}lang") elif binding[0].get("datatype"): target['datatype'] = binding[0].get("datatype") n += 1 return r
def application(self, environ, start_response): """The entry point for all queries (see WSGI docs for more details)""" uri = environ['PATH_INFO'].encode('latin-1').decode() is_test = request_uri(environ) == BASE_NAME + uri # Guess the file type required if re.match(".*\.html", uri): mime = "html" elif re.match(".*\.rdf", uri): mime = "pretty-xml" elif re.match(".*\.ttl", uri): mime = "turtle" elif re.match(".*\.nt", uri): mime = "nt" elif re.match(".*\.json", uri): mime = "json-ld" elif 'HTTP_ACCEPT' in environ: if (SPARQL_PATH and (uri == SPARQL_PATH or uri == (SPARQL_PATH+"/"))): mime = self.best_mime_type(environ['HTTP_ACCEPT'], "sparql-json") else: mime = self.best_mime_type(environ['HTTP_ACCEPT'], "html") else: mime = "html" # The welcome page if uri == "/" or uri == "/index.html": start_response('200 OK', [('Content-type', 'text/html; charset=utf-8')]) if not exists(DB_FILE): return [self.render_html(DISPLAY_NAME, pystache.render( open(resolve("html/onboarding.mustache")).read(), {'context': CONTEXT}), is_test)] else: return [self.render_html( DISPLAY_NAME, pystache.render(open(resolve("html/index.html")).read(), {'property_facets': FACETS, 'context': CONTEXT}), is_test).encode('utf-8')] # The search page elif (SEARCH_PATH and (uri == SEARCH_PATH or uri == (SEARCH_PATH + "/"))): if 'QUERY_STRING' in environ: qs_parsed = parse_qs(environ['QUERY_STRING']) if 'query' in qs_parsed: query = qs_parsed['query'][0] if 'property' in qs_parsed: prop = qs_parsed['property'][0] else: prop = None if 'offset' in qs_parsed: offset = int(qs_parsed['offset'][0]) else: offset = 0 return self.search(start_response, query, prop, offset) else: return self.send400(start_response, YZ_NO_RESULTS) else: return self.send400(start_response, YZ_NO_QUERY) # The dump file elif uri == DUMP_URI: start_response('200 OK', [('Content-type', 'appliction/x-gzip'), ('Content-length', str(os.stat(DUMP_FILE).st_size))]) return [open(resolve(DUMP_FILE), "rb").read()] # The favicon (i.e., the logo users see in the # browser next to the title) elif (uri.startswith("/favicon.ico") and exists(resolve("assets/favicon.ico"))): start_response( '200 OK', [('Content-type', 'image/png'), ('Content-length', str(os.stat( resolve("assets/favicon.ico")).st_size))]) return [open(resolve("assets/favicon.ico"), "rb").read()] # Any assets requests elif uri.startswith(ASSETS_PATH) and exists(resolve(uri[1:])): start_response( '200 OK', [('Content-type', mimetypes.guess_type(uri)[0]), ('Content-length', str(os.stat(resolve(uri[1:])).st_size))]) x = open(resolve(uri[1:]), "rb").read() return [x] # SPARQL requests elif SPARQL_PATH and (uri == SPARQL_PATH or uri == (SPARQL_PATH+"/")): if 'QUERY_STRING' in environ: qs = parse_qs(environ['QUERY_STRING']) if 'query' in qs: return self.sparql_query( qs['query'][0], mime, qs.get('default-graph-uri', [None])[0], start_response) else: start_response('200 OK', [('Content-type', 'text/html; charset=utf-8')]) s = open(resolve("html/sparql.html")).read() return [self.render_html( DISPLAY_NAME, s, is_test).encode('utf-8')] else: start_response('200 OK', [('Content-type', 'text/html; charset=utf-8')]) s = open(resolve("html/sparql.html")).read() return [self.render_html(DISPLAY_NAME, s, is_test).encode('utf-8')] elif LIST_PATH and (uri == LIST_PATH or uri == (LIST_PATH + "/")): offset = 0 prop = None obj = None obj_offset = 0 if 'QUERY_STRING' in environ: qs = parse_qs(environ['QUERY_STRING']) if 'offset' in qs: try: offset = int(qs['offset'][0]) except ValueError: return self.send400(start_response) if 'prop' in qs: prop = "<%s>" % qs['prop'][0] if 'obj' in qs: obj = qs['obj'][0] if 'obj_offset' in qs and re.match("\d+", qs['obj_offset'][0]): obj_offset = int(qs['obj_offset'][0]) return self.list_resources(start_response, offset, prop, obj, obj_offset) elif METADATA_PATH and (uri == METADATA_PATH or uri == ("/" + METADATA_PATH) or uri == ("/" + METADATA_PATH + ".rdf") or uri == (METADATA_PATH + ".rdf") or uri == ("/" + METADATA_PATH + ".ttl") or uri == (METADATA_PATH + ".ttl") or uri == ("/" + METADATA_PATH + ".nt") or uri == (METADATA_PATH + ".nt") or uri == ("/" + METADATA_PATH + ".json") or uri == (METADATA_PATH + ".json")): graph = dataid() if mime == "html": content = self.rdfxml_to_html(graph, BASE_NAME + METADATA_PATH, YZ_METADATA, is_test) else: try: self.add_namespaces(graph) if mime == "json-ld": content = yuzu.jsonld.write( graph, BASE_NAME + id) else: content = graph.serialize(format=mime).decode('utf-8') except Exception as e: print (e) return self.send501(start_response) start_response( '200 OK', [('Content-type', self.mime_types[mime] + "; charset=utf-8"), ('Vary', 'Accept'), ('Content-length', str(len(content)))]) return [content.encode('utf-8')] elif exists(resolve("html/%s.html" % re.sub("/$", "", uri))): start_response('200 OK', [('Content-type', 'text/html; charset=utf-8')]) s = pystache.render(open(resolve( "html/%s.html" % re.sub("/$", "", uri))).read(), {'context': CONTEXT, 'dump_uri': DUMP_URI}) return [self.render_html(DISPLAY_NAME, s, is_test).encode('utf-8')] # Anything else is sent to the backend elif re.match("^/(.*?)(|\.nt|\.html|\.rdf|\.ttl|\.json)$", uri): id, _ = re.findall( "^/(.*?)(|\.nt|\.html|\.rdf|\.ttl|\.json)$", uri)[0] graph = self.backend.lookup(id) if graph is None: return self.send404(start_response) labels = sorted([str(o) for s, p, o in graph.triples( (URIRef(BASE_NAME + id), RDFS.label, None))]) if labels: title = ', '.join(labels) else: title = DISPLAYER.uri_to_str(BASE_NAME + id) if mime == "html": content = self.rdfxml_to_html(graph, BASE_NAME + id, title, is_test) else: try: self.add_namespaces(graph) if mime == "json-ld": content = yuzu.jsonld.write( graph, BASE_NAME + id) else: content = graph.serialize(format=mime).decode('utf-8') except Exception as e: print (e) return self.send501(start_response) start_response( '200 OK', [('Content-type', self.mime_types[mime] + "; charset=utf-8"), ('Vary', 'Accept'), ('Content-length', str(len(content)))]) return [content.encode('utf-8')] else: return self.send404(start_response)