def command(self): uri = self.get_uri() g = Graph(identifier=uri) g.parse(uri, format="n3") if self.options.format == "thimbl": print serialize_thimbl(g) else: print g.serialize(format=self.options.format)
def index_aggregate(a): doc = xapian.Document() doc.add_value(VAL_URI, a.identifier) docid = u"URI" + a.identifier doc.add_term(docid) log.debug("Aggregate: %s" % a.identifier) def add_value(g, val_id, subject, predicate): val = [] for s, p, o in g.triples((subject, predicate, None)): if not o.language or o.language == "en": ### TODO: fix this val.append(o) if val: val = u", ".join(val) doc.add_value(val_id, val) return val ## create an abbreviated graph to store in the xapian database extract = Graph() add_value(a, VAL_LABEL, a.identifier, RDFS.label) for g in a.contexts(): log.debug("Indexing: %s" % g.identifier) for pred in (RDF.type, RDFS.label, RDFS.comment, DC.title, DC.description, FOAF.name): for statement in a.triples((g.identifier, pred, None)): extract.add(statement) title = add_value(g, VAL_TITLE, g.identifier, DC.title) if title: doc.add_term(u"ZT" + title[:160]) name = add_value(g, VAL_NAME, g.identifier, FOAF.name) if name: doc.add_term(u"NA" + name[:160]) doc.set_data(extract.serialize(format="n3")) ## take any fields that contain text, stem them according to their ## language (or english if unsupported or unspecified) and put them ## in the index termgen = xapian.TermGenerator() termgen.set_document(doc) for pred in (RDFS.label, RDFS.comment, DC.title, DC.description, FOAF.name, FOAF.first_name, FOAF.last_name, FOAF.surname): for s, p, o in a.triples((None, pred, None)): termgen.increase_termpos() if o.language: try: stemmer = xapian.Stem(o.language) except xapian.InvalidArgumentError: stemmer = xapian.Stem("en") else: stemmer = xapian.Stem("en") termgen.set_stemmer(stemmer) termgen.index_text(o) return docid, doc
def getContent(self): registry = getUtility(IRegistry) settings = registry.forInterface(IRDFSettings, check=False) graph_uri = settings.fresnel_graph_uri graph = Graph(identifier=graph_uri) graph.parse(StringIO(master), format='n3') graph.parse(StringIO(custom), format='n3') return dict( lens=graph.serialize(format='n3') )
def test_05_put(self): response = self.app.get( url("/graph", uri=test_graph, format="application/rdf+xml")) data = StringIO(response.body) g = Graph() g.parse(data, format="xml") ## now put it back body = g.serialize(format="pretty-xml") response = self.app.put( url("/graph", uri=test_graph), params=body, headers={"Content-type": "application/rdf+xml"}) assert response.body.find("urn:uuid:") == -1
def _get_graph(self): uri = self._uri() content_type, format = self._accept(uri) if uri.endswith("bibtex"): content_type = "text/x-bibtex" format = "bibtex" uri_str, _ = uri.rsplit(".", 1) uri = URIRef(uri_str) graph = handler.get(uri) if len(graph) == 0: graph.rollback() cursor = handler.rdflib.store.cursor() cursor.execute("SET result_timeout = 10000") q = construct_graph % {"agent": uri.n3()} graph = handler.rdflib.store.sparql_query(q, cursor=cursor) graph = Graph(graph.store, identifier=graph.identifier) # ordf extensions cursor.close() if len(graph) == 0: abort(404, "No such graph: %s" % uri) if format == "html": c.graph = graph c.model = model.Entry.get_by_uri(uri) response.content_type = str(content_type) # should really iterate through the potential views if URIRef("http://purl.org/ontology/bibo/Book") in list( c.model.type): data = render("view_bibo_book.html") else: data = self._render_graph() elif format == "bibtex": b = Bibtex() b.load_from_graph(graph) data = b.to_bibtex() response.content_type = str(content_type) response.headers['Content-Location'] = "%s.bibtex" % b.uniquekey response.headers['Location'] = "%s.bibtex" % b.uniquekey else: data = graph.serialize(format=format) response.content_type = str(content_type) graph.rollback() # log.warn("XXX cursor: %s" % handler.rdflib.store._cursor) return data
def _get_graph(self): uri = self._uri() content_type, format = self._accept(uri) if uri.endswith("bibtex"): content_type = "text/x-bibtex" format = "bibtex" uri_str, _ = uri.rsplit(".", 1) uri = URIRef(uri_str) graph = handler.get(uri) if len(graph) == 0: graph.rollback() cursor = handler.rdflib.store.cursor() cursor.execute("SET result_timeout = 10000") q = construct_graph % {"agent": uri.n3()} graph = handler.rdflib.store.sparql_query(q, cursor=cursor) graph = Graph(graph.store, identifier=graph.identifier) # ordf extensions cursor.close() if len(graph) == 0: abort(404, "No such graph: %s" % uri) if format == "html": c.graph = graph data = self._render_graph() elif format == "bibtex": b = Bibtex() b.load_from_graph(graph) data = b.to_bibtex() response.content_type = str(content_type) response.headers['Content-Location'] = "%s.bibtex" % b.uniquekey response.headers['Location'] = "%s.bibtex" % b.uniquekey else: data = graph.serialize(format=format) response.content_type = str(content_type) graph.rollback() # log.warn("XXX cursor: %s" % handler.rdflib.store._cursor) return data