def test_auto_load(self): """ Test that session.auto_load works. """ store = surf.Store(reader = "rdflib", writer = "rdflib") session = surf.Session(store, auto_load = True) Person = session.get_class(surf.ns.FOAF.Person) person = Person() person.foaf_name = "John" person.save() same_person = Person(person.subject) # Check that rdf_direct is filled self.assertTrue(surf.ns.FOAF.name in same_person.rdf_direct)
def __init__(self): self.store = surf.Store(reader='allegro_franz', writer='allegro_franz', server=Config.query_server, port=Config.query_port, username=Config.query_username, password=Config.query_password, catalog=Config.query_catalog, repository=Config.query_repository, reasoning=True) self.main_session = surf.Session(self.store, {}) self.metadatastore = surf.Store(reader='allegro_franz', writer='allegro_franz', server=Config.metadata_server, port=Config.metadata_port, username=Config.metadata_username, password=Config.metadata_password, catalog=Config.metadata_catalog, repository=Config.metadata_repository, reasoning=True) self.meta_session = surf.Session(self.metadatastore, {})
def _add_content(self, instance, **kwargs): session = surf.Session(self.store) obj2surf = queryMultiAdapter((instance, session), interface=IObject2Surf) if obj2surf is None: return self.store.reader.graph.bind(obj2surf.prefix, obj2surf.namespace, override=False) endLevel = kwargs.get('endLevel', 1) obj2surf.write(endLevel=endLevel) return obj2surf
def marshall(self, instance, **kwargs): """ Marshall the rdf data to xml representation """ session = surf.Session(self.store) content_type = 'text/xml; charset=UTF-8' length = data = 0 # is this line required? should be len(data) obj2surf = queryMultiAdapter((instance, session), interface=IObject2Surf) self.store.reader.graph.bind(obj2surf.prefix, obj2surf.namespace, override=False) endLevel = kwargs.get('endLevel', 1) obj2surf.write(endLevel=endLevel) data = self.store.reader.graph.serialize(format='pretty-xml') return (content_type, length, data)
def test_rdflib_store(): """ Create a SuRF rdflib based store """ kwargs = {"reader": "rdflib", "writer": "rdflib"} if False: # use_default_context: kwargs["default_context"] = "http://surf_test_graph/dummy2" try: store = surf.Store(**kwargs) session = surf.Session(store) # clean store store.clear() except Exception as e: pytest.fail(error_message(e), pytrace=True)
def load_objects_from_rdf(self): store = surf.Store( reader='rdflib', writer='rdflib', rdflib_store='IOMemory', ) # Get a new surf session session = surf.Session(store) # Load the rdf data from the uris for uri in self.uris: store.load_triples(source=uri) # Define the License class as an skos:concept object Object = session.get_class(self.object_class) # Get the licenses objects objects = Object.all().full() return objects
def test_dict_access(self): """ Test that resources support dictionary-style attribute access. """ session = surf.Session(surf.Store(reader = "rdflib")) Person = session.get_class(surf.ns.FOAF.Person) person = Person() person.foaf_name = "John" # Reading self.assertEquals(person["foaf_name"].first, "John") self.assertEquals(person[surf.ns.FOAF.name].first, "John") # Writing person["foaf_name"] = "Dave" self.assertEquals(person.foaf_name.first, "Dave") # Deleting del person["foaf_name"] self.assertEquals(person.foaf_name.first, None)
def _get_store_session(self, use_default_context=True): """ Return initialized SuRF store and session objects. """ # FIXME: take endpoint from configuration file, # maybe we can mock SPARQL endpoint. kwargs = {"reader": "rdflib", "writer": "rdflib"} if False: #use_default_context: kwargs["default_context"] = "http://surf_test_graph/dummy2" store = surf.Store(**kwargs) session = surf.Session(store) # Fresh start! #store.clear("http://surf_test_graph/dummy2") #store.clear(URIRef("http://my_context_1")) #store.clear(URIRef("http://other_context_1")) store.clear() return store, session
def __setstate__(self, dict): self.__dict__.update(dict) self._store = surf.Store(**self._store_params) self._session = surf.Session(self._store, {}) # don't forget to reload the triples if it's an in-memory store! if ('rdflib_store' in self._store_params): basedir = pkg_resources.resource_filename('knowledge_base', 'data/kb/') sources = [ "%s%s" % (basedir, file) for file in self._store_params["knowledge_base_sources"].split(",") ] source_format = self._store_params["sources_format"] for source_path in sources: self._store.writer._graph.parse(source=source_path, format=source_format) logger.info("The KnowledgeBase contains %i triples" % self._store.size()) self._register_namespaces() self._register_mappings()
def test_dict_access(): """ Test that resources support dictionary-style attribute access. """ session = surf.Session(surf.Store(reader="rdflib")) Person = session.get_class(surf.ns.FOAF.Person) person = Person() person.foaf_name = "John" # Reading assert person["foaf_name"].first == Literal(u"John") assert person[surf.ns.FOAF.name].first == Literal(u"John") # Writing person["foaf_name"] = "Dave" assert person.foaf_name.first == Literal(u"Dave") # Deleting del person["foaf_name"] assert person.foaf_name.first is None
def initialize_rdflib(engine=None, mysql_connstr=None, virtuoso_connstr=None, clear=False, logging=True): #rdflib.plugin.register('MySQL', rdflib.store.Store,'rdfstorage.MySQL', 'MySQL') rdflib.plugin.register('sparql', rdflib.query.Processor, 'rdfextras.sparql.processor', 'Processor') rdflib.plugin.register('sparql', rdflib.query.Result, 'rdfextras.sparql.query', 'SPARQLQueryResult') #if engine: # store = store_from_engine(engine=engine) #elif mysql_connstr: # store = store_from_connstr(connstr=mysql_connstr) #elif virtuoso_connstr: # store = store_from_virtuoso_connstr(virtuoso_connstr) rdf_store = surf.Store(reader='virtuoso_protocol', writer='virtuoso_protocol', endpoint='http://localhost:8890/sparql', default_context='http://default') if clear: rdf_store.clear() #print 'SIZE of STORE : ',rdf_store.size() # the surf session rdf_session = surf.Session(rdf_store, {}) rdf_session.enable_logging = logging # register the namespace # ns.register(myblog=config['myblog.namespace']) init_model(rdf_session)
def get_store_session(): """ Return initialized SuRF store and session objects. """ # maybe we can mock SPARQL endpoint. kwargs = {"reader": "sparql_protocol", "writer": "sparql_protocol", "endpoint": "http://localhost:9980/sparql", "use_subqueries": True, "combine_queries": True} if True: # use_default_context: kwargs["default_context"] = "http://surf_test_graph/dummy2" store = surf.Store(**kwargs) session = surf.Session(store) # Fresh start! store.clear("http://surf_test_graph/dummy2") store.clear(URIRef("http://my_context_1")) store.clear(URIRef("http://other_context_1")) return store, session
def _get_store_session(self, use_default_context = True): """ Return initialized SuRF store and session objects. """ # FIXME: take endpoint from configuration file, kwargs = {"reader": "sesame2", "writer" : "sesame2", "server" : "localhost", "port" : 8080, "root_path" : "/openrdf-sesame", "repository" : "test"} if use_default_context: kwargs["default_context"] = "http://surf_test_graph/dummy2" store = surf.Store(**kwargs) session = surf.Session(store) # Fresh start! store.clear(URIRef("http://surf_test_graph/dummy2")) store.clear(URIRef("http://my_context_1")) store.clear(URIRef("http://other_context_1")) store.clear() return store, session
def _get_store_session(self, cleanup = True): """ Return initialized SuRF store and session objects. """ store = surf.Store(reader = "rdflib", writer = "rdflib") session = surf.Session(store) return store, session
def toRDF(self, rdf_format='turtle'): """ TODO: The model is not stable yet. This method may not work. """ base_url = 'http://cli-mate.lumc.nl/data/definitions/default/%s#' % self.name store = surf.Store(reader='rdflib', writer='rdflib', rdflib_store='IOMemory') session = surf.Session(store) # set up RDF classes. CLPCommandLineProgram = session.get_class( surf.ns.CLP.CommandLineProgram) CLPExecutionRequirements = session.get_class( surf.ns.CLP.ExecutionRequirements) CLPSoftware = session.get_class(surf.ns.CLP.Software) CLPArgument = session.get_class(surf.ns.CLP.Argument) # set up main node. command_line_program = CLPCommandLineProgram(base_url + self.name) command_line_program.dcterms_label = self.name command_line_program.dcterms_title = self.binary command_line_program.dcterms_description = self.description command_line_program.clp_hasVersion = self.version command_line_program.dcterms_comment = self.help_text command_line_program.save() # set up execution requirements execution_requirements = CLPExecutionRequirements( base_url + 'execution_requirements') command_line_program.clp_hasExecutionRequirements = execution_requirements execution_requirements.clp_requiresOperationSystem = surf.ns.CLP.Linux # TODO if self.interpreter != '(binary)': execution_requirements.clp_interpreter = surf.ns.CLP[ self.interpreter] if self.grid_access_type != '-': execution_requirements.clp_gridAccessType = self.grid_access_type execution_requirements.clp_gridID = self.grid_access_location for req in self.requirements: software = CLPSoftware(base_url + req.name) software.dcterms_tile = req.name software.clp_gridID = req.location software.clp_softwareType = req.type execution_requirements.clp_requiresSoftware = software execution_requirements.save() # add arguments argument_list = [] for arg in self.arguments: argument = Argument(**arg) argument_node = CLPArgument(base_url + arg['name']) argument.toRDF(argument_node) argument_list.append(argument_node) command_line_program.clp_hasArgument = argument_list # add document metadata Agent = session.get_class(surf.ns.FOAF['Agent']) agent = Agent('http://climate.lumc.nl') agent.dcterms_title = app.config['TITLE'] agent.dcterms_creator = app.config['AUTHOR'] agent.dcterms_hasVersion = app.config['VERSION'] agent.save() Document = session.get_class(surf.ns.DCTERMS['Document']) document = Document('') document.dcterms_creator = agent document.dcterms_created = datetime.utcnow() document.rdfs_label = 'RDF definition of %s' % self.name session.commit() # prepare for serialization graph = session.default_store.reader.graph for prefix, url in ns_dict.items(): graph.bind(prefix.lower(), url) graph.bind('', base_url) return graph.serialize(format=rdf_format)
def store_session(): """ Return initialized SuRF store and session objects. """ store = surf.Store(reader="rdflib", writer="rdflib") session = surf.Session(store) return store, session
import surf store = surf.Store(reader='sparql_protocol', endpoint='http://dbpedia.org/sparql', default_graph='http://dbpedia.org') print 'Create the session' session = surf.Session(store, {}) # session.log_level = False PhilCollinsAlbums = session.get_class(surf.ns.YAGO['PhilCollinsAlbums']) all_albums = PhilCollinsAlbums.all() print 'Phil Collins has %d albums on dbpedia' % len(all_albums) first_album = all_albums.first() # This loads all direct atributes for first_album, so # each subsequent attribute access doesn't require HTTP call. first_album.load() print 'All covers' for a in all_albums: if a.dbpedia_name and a.dbpedia_cover: # Resource attributes are list-like, with convenience # properties "first" and "one". print '\tCover %s for "%s"' % (a.dbpedia_cover.first, a.dbpedia_name.first)
import surf dbpedia = surf.Store(reader='sparql_protocol', endpoint='http://dbpedia.org/sparql', default_graph='http://dbpedia.org') local = surf.Store(reader='allegro_franz', writer='allegro_franz', server='localhost', port=6789, catalog='repositories', repository='surf_test') print 'Create the session' session = surf.Session() session.enable_logging = True session['dbpedia'] = dbpedia session['local'] = local print '------------------------------------------------------------------------' print 'DBPEDIA' PhilCollinsAlbums = session.get_class(surf.ns.YAGO['PhilCollinsAlbums'], store='dbpedia') all_albums = PhilCollinsAlbums.all() print 'Phill Collins has %d albums on dbpedia' % len(all_albums) first_album = all_albums.first() first_album.load() print 'All covers'
'eea.soer.vocab.diversity_questions': ( (u'10', 'Distinguishing factors'), (u'11', 'Societal developments'), (u'12', 'Drivers and impacts'), (u'13', 'Future developments'), ), } atvocabs['eea.soer.vocab.all_questions'] = \ atvocabs['eea.soer.vocab.diversity_questions'] + \ atvocabs['eea.soer.vocab.questions'] geostore = surf.Store(reader='rdflib', writer='rdflib', rdflib_store='IOMemory') geosession = surf.Session(geostore) surf.ns.register(ROD="http://rod.eionet.europa.eu/schema.rdf#") #geostore.load_triples(source="http://rod.eionet.europa.eu/countries") atvocabs['eea.soer.vocab.geo_coverage'] = [] Locality = geosession.get_class(surf.ns.ROD['Locality']) surf.ns.register(NUTS="http://rdfdata.eionet.europa.eu/ramon/ontology/") surf.ns.register(EVALUATION= \ "http://www.eea.europa.eu/soer/rdfs/evaluation/1.0#") for loc in Locality.all().order(): atvocabs['eea.soer.vocab.geo_coverage'].append( (loc.rod_loccode.first.strip(), loc.rdfs_label.first.strip())) #geostore.load_triples(source="http://rdfdata.eionet.europa.eu/ramon/send_all") # use local file to speed up for now
def marshall_inner(self, instance, **kwargs): """Marshall the rdf data to xml representation.""" session = surf.Session(self.store) assert (session)
def session(self): """A new session for surf""" session = surf.Session(self.store) return session