def __mod__(self, *args, **kwargs): return URIRef(str(self).__mod__(*args, **kwargs))
def setUp(self): self.x = Literal( "2008-12-01T18:02:00Z", datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
def test_util_from_n3_expectliteralanddtype(self): s = '"true"^^xsd:boolean' res = util.from_n3(s, default=None, backend=None) self.assertEqual(res, Literal('true', datatype=URIRef('xsd:boolean')))
def relativize(self, uri): base = self.base if base is not None and uri.startswith(base): uri = URIRef(uri.replace(base, "", 1)) return uri
def namespace(self, prefix): prefix = prefix.encode("utf-8") ns = self.__namespace.get(prefix, None) if ns is not None: return URIRef(ns.decode('utf-8')) return None
def contains(cls, value): return URIRef(value)
""" Methods for parsing the CIDOC CRM RDF specification into something useful. """ import rdflib from rdflib.term import URIRef from itertools import chain TITLE = URIRef('http://purl.org/dc/terms/title') PROPERTY = URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#Property') TYPE = URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type') CLASS = URIRef('http://www.w3.org/2000/01/rdf-schema#Class') OWL_CLASS = URIRef('http://www.w3.org/2002/07/owl#Class') DESCRIPTION = URIRef('http://purl.org/dc/terms/description') COMMENT = URIRef('http://www.w3.org/2000/01/rdf-schema#comment') LABEL = URIRef('http://www.w3.org/2000/01/rdf-schema#label') RANGE = URIRef('http://www.w3.org/2000/01/rdf-schema#range') DOMAIN = URIRef('http://www.w3.org/2000/01/rdf-schema#domain') SUBPROPERTYOF = URIRef('http://www.w3.org/2000/01/rdf-schema#subPropertyOf') SUBCLASSOF = URIRef('http://www.w3.org/2000/01/rdf-schema#subClassOf') def _get_object(g, s, p): """ Retrieve the (first) object of a relation. This is mainly to be used where we expect only one relation of the specified type. Parameters ---------- g : rdflib.Graph
def namespaces(self): for prefix, namespace in self.store.namespaces(): namespace = URIRef(namespace) yield prefix, namespace
def title(self): return URIRef(self + 'title')
def __init__(self, uri, terms): self.uri = uri self.__uris = {} for t in terms: self.__uris[t] = URIRef(self.uri + t)
def term(self, name): try: i = int(name) return URIRef("%s_%s" % (self.uri, i)) except ValueError, e: return super(_RDFNamespace, self).term(name)
# RDF Resources "nil" ]) def term(self, name): try: i = int(name) return URIRef("%s_%s" % (self.uri, i)) except ValueError, e: return super(_RDFNamespace, self).term(name) RDF = _RDFNamespace() RDFS = ClosedNamespace(uri=URIRef("http://www.w3.org/2000/01/rdf-schema#"), terms=[ "Resource", "Class", "subClassOf", "subPropertyOf", "comment", "label", "domain", "range", "seeAlso", "isDefinedBy", "Literal", "Container", "ContainerMembershipProperty", "member", "Datatype" ]) OWL = Namespace('http://www.w3.org/2002/07/owl#') XSD = Namespace(_XSD_PFX) SKOS = Namespace('http://www.w3.org/2004/02/skos/core#') class NamespaceManager(object):
from rdflib.namespace import FOAF, XSD from rdflib import Graph import urllib import os import re map = dict() map['actor'] = 'actors' map['actress'] = 'actresses' map['director'] = 'directors' for key in map: for year in range(2018, 1850, -1): g = Graph() g.bind("foaf", FOAF) predicate = URIRef(f"http://xmlns.com/foaf/0.1/{key}_of") with open(f'{map[key]}.list', encoding='latin-1') as f: person = None person_added = False person_name = None person_string = None for line in f: try: #print(line) info = re.match( '^(([^\t\n]+\t+)|\t+)"?([^"\n]+)"? \(([0-9?]{4})[^\)]*\)( {([^}]+)})?.*$', line) if info: if info.group(2): person_string = info.group(2).strip() person_name = Literal(person_string,
def format(self, *args, **kwargs): return URIRef(str.format(self, *args, **kwargs))
def test_rdflib_mysql_test(self): """ test taken from rdflib/test/test_mysql.py """ implies = URIRef("http://www.w3.org/2000/10/swap/log#implies") testN3=""" @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . @prefix : <http://test/> . {:a :b :c;a :foo} => {:a :d :c,?y}. _:foo a rdfs:Class. :a :d :c.""" #Thorough test suite for formula-aware store g = self.rdflib_graph g.parse(data=testN3, format="n3") #print g.store for s,p,o in g.triples((None,implies,None)): formulaA = s formulaB = o self.assertTrue(type(formulaA)==QuotedGraph and type(formulaB)==QuotedGraph) a = URIRef('http://test/a') b = URIRef('http://test/b') c = URIRef('http://test/c') d = URIRef('http://test/d') v = Variable('y') universe = ConjunctiveGraph(g.store) #test formula as terms self.assertTrue(len(list(universe.triples((formulaA,implies,formulaB))))==1) #test variable as term and variable roundtrip self.assertTrue(len(list(formulaB.triples((None,None,v))))==1) for s,p,o in formulaB.triples((None,d,None)): if o != c: self.assertTrue(isinstance(o,Variable)) self.assertTrue(o == v) s = list(universe.subjects(RDF.type, RDFS.Class))[0] self.assertTrue(isinstance(s,BNode)) self.assertTrue( len(list(universe.triples((None,implies,None)))) == 1) self.assertTrue( len(list(universe.triples((None,RDF.type,None)))) ==1) self.assertTrue( len(list(formulaA.triples((None,RDF.type,None))))==1) self.assertTrue( len(list(formulaA.triples((None,None,None))))==2) self.assertTrue( len(list(formulaB.triples((None,None,None))))==2) self.assertTrue( len(list(universe.triples((None,None,None))))==3) self.assertTrue( len(list(formulaB.triples((None,URIRef('http://test/d'),None))))==2) self.assertTrue( len(list(universe.triples((None,URIRef('http://test/d'),None))))==1) #context tests #test contexts with triple argument self.assertTrue( len(list(universe.contexts((a,d,c))))==1) #Remove test cases universe.remove((None,implies,None)) self.assertTrue( len(list(universe.triples((None,implies,None))))==0) self.assertTrue( len(list(formulaA.triples((None,None,None))))==2) self.assertTrue( len(list(formulaB.triples((None,None,None))))==2) formulaA.remove((None,b,None)) self.assertTrue( len(list(formulaA.triples((None,None,None))))==1) formulaA.remove((None,RDF.type,None)) self.assertTrue( len(list(formulaA.triples((None,None,None))))==0) universe.remove((None,RDF.type,RDFS.Class)) #remove_context tests universe.remove_context(formulaB) self.assertTrue( len(list(universe.triples((None,RDF.type,None))))==0) self.assertTrue( len(universe)==1) self.assertTrue( len(list(formulaB.triples((None,None,None))))==0) universe.remove((None,None,None)) self.assertTrue( len(universe)==0)
def term(self, name): return URIRef(self + name)
from rdflib.term import BNode, Literal, URIRef from rdflib.namespace import FOAF, XSD from rdflib import Graph import urllib import re import os for year in range(2018, 1850, -1): g = Graph() g.bind("foaf", FOAF) predicate1 = URIRef(f"http://xmlns.com/foaf/0.1/rating") predicate2 = URIRef(f"http://xmlns.com/foaf/0.1/votes") predicate3 = URIRef(f"http://xmlns.com/foaf/0.1/episode") with open(f'ratings.list', encoding='latin-1') as f: for line_number, line in enumerate(f): info = re.match( '^ +[0-9.]+ +([0-9]+) +([0-9.]+) +"?([^"\n]+)"? \(([0-9]+)[^\)]*\)( {(.+)})?', line) if info: # print(info.group(1),info.group(2),info.group(3)) year_string = info.group(4).strip() if year_string == str(year): movie_string = info.group(3).strip() movie_name = Literal(movie_string, datatype=XSD.string) movie = URIRef( f"http://imdb.org/movie/{urllib.parse.quote(movie_string)}" ) g.add((movie, FOAF.name, movie_name)) rating_string = info.group(2).strip() rating_name = Literal(rating_string, datatype=XSD.integer) g.add((movie, predicate1, rating_name))
def map(raw_data): MAP = Namespace('http://dp.la/about/map/') EDM = Namespace('http://www.europeana.eu/schemas/edm/') ORE = Namespace('http://www.openarchives.org/ore/terms/') g = rdflib.Graph() g.bind('dc', DC) g.bind('rdf', RDF) g.bind('skos', SKOS) g.bind('map', MAP) g.bind('edm', EDM) g.bind('ore', ORE) g.bind('dcterms', DCTERMS) data = json.load(raw_data) item = URIRef(data['url_item']) g.add((item, RDF.type, EDM['WebResource'])) cdl = URIRef('http://dp.la/api/contributor/cdl') g.add((cdl, RDF.type, EDM['Agent'])) g.add((cdl, SKOS['prefLabel'], Literal('California Digital Library'))) if 'reference_image_md5' in data: thumb = URIRef('https://thumbnails.calisphere.org/clip/150x150/' + data['reference_image_md5']) g.add((thumb, RDF.type, EDM['WebResource'])) root = BNode() g.add((root, RDF.type, ORE['Aggregation'])) originalRecord = BNode() g.add((root, MAP['originalRecord'], originalRecord)) g.add((originalRecord, RDF.type, EDM['WebResource'])) aggregatedCHO = BNode() g.add((root, EDM.aggregatedCHO, aggregatedCHO)) g.add((aggregatedCHO, RDF.type, MAP.SourceResource)) if 'title_ss' in data: for title in data['title_ss']: g.add((aggregatedCHO, DCTERMS.title, Literal(title))) if 'date_ss' in data: for date in data['date_ss']: date_bnode = BNode() g.add((aggregatedCHO, DC.date, date_bnode)) g.add((date_bnode, RDF.type, EDM.TimeSpan)) g.add((date_bnode, MAP.providedLabel, Literal(date))) if 'identifier_ss' in data: for identifier in data['identifier_ss']: g.add((aggregatedCHO, DC.identifier, Literal(identifier))) g.add((aggregatedCHO, DC.identifier, Literal(data['url_item']))) if 'rights_ss' in data: for rights in data['rights_ss']: g.add((aggregatedCHO, DC.rights, Literal(rights))) if 'contributor_ss' in data: for contributor in data['contributor_ss']: contributor_bnode = BNode() g.add((aggregatedCHO, DCTERMS.contributor, contributor_bnode)) g.add((contributor_bnode, RDF.type, EDM.Agent)) g.add((contributor_bnode, MAP.providedLabel, Literal(contributor))) if 'creator_ss' in data: for creator in data['creator_ss']: creator_bnode = BNode() g.add((aggregatedCHO, DCTERMS.creator, creator_bnode)) g.add((creator_bnode, RDF.type, EDM.Agent)) g.add((creator_bnode, MAP.providedLabel, Literal(creator))) if 'collection_name' in data: for collection in data['collection_name']: collection_bnode = BNode() g.add((aggregatedCHO, DCTERMS.isPartOf, collection_bnode)) g.add((collection_bnode, RDF.type, DCTERMS.Collection)) g.add((collection_bnode, DCTERMS.title, Literal(collection))) if 'publisher_ss' in data: for publisher in data['publisher_ss']: g.add((aggregatedCHO, DCTERMS.publisher, Literal(publisher))) if 'type' in data: for type in data['type']: g.add((aggregatedCHO, DCTERMS.type, Literal(type))) provider = None if 'campus_name' in data and 'repository_name' in data: provider = data['campus_name'][0] + ', ' + data['repository_name'][ 0] elif 'repository_name' in data: provider = data['repository_name'][0] if provider is not None: provider_bnode = BNode() g.add((root, EDM.dataProvider, provider_bnode)) g.add((provider_bnode, RDF.type, EDM.Agent)) g.add((provider_bnode, MAP.providedLabel, Literal(provider))) g.add((root, EDM.isShownAt, item)) if 'reference_image_md5' in data: md5 = data['reference_image_md5'] image_url = "https://thumbnails.calisphere.org/clip/150x150/" + md5 g.add((root, EDM.preview, URIRef(image_url))) return g.serialize(format='turtle')
def test_uid_to_uri(self): assert g.tbox.uid_to_uri('/1234') == URIRef(g.webroot + '/1234') assert g.tbox.uid_to_uri('/1/2/34') == URIRef(g.webroot + '/1/2/34') assert g.tbox.uid_to_uri('') == URIRef(g.webroot)
def setUp(self): self.uriref = URIRef("http://example.org/") self.bnode = BNode() self.literal = Literal("http://example.org/") self.python_literal = u"http://example.org/" self.python_literal_2 = u"foo"
def rdf_description(name, notation='xml'): """ Funtion takes title of node, and rdf notation. """ valid_formats = ["xml", "n3", "ntriples", "trix"] default_graph_uri = "http://gstudio.gnowledge.org/rdfstore" # default_graph_uri = "http://example.com/" configString = "/var/tmp/rdfstore" # Get the IOMemory plugin. store = plugin.get('IOMemory', Store)('rdfstore') # Open previously created store, or create it if it doesn't exist yet graph = Graph(store="IOMemory", identifier=URIRef(default_graph_uri)) path = mkdtemp() rt = graph.open(path, create=False) if rt == NO_STORE: graph.open(path, create=True) else: assert rt == VALID_STORE, "The underlying store is corrupt" # Now we'll add some triples to the graph & commit the changes #rdflib = Namespace('http://sbox.gnowledge.org/gstudio/') graph.bind("gstudio", "http://gnowledge.org/") exclusion_fields = [ "id", "rght", "node_ptr_id", "image", "lft", "_state", "_altnames_cache", "_tags_cache", "nid_ptr_id", "_mptt_cached_fields" ] #verifies the type of node node = NID.objects.get(title=name) node_type = node.reftype if (node_type == 'Gbobject'): node = Gbobject.objects.get(title=name) rdflib = link(node) elif (node_type == 'None'): node = Gbobject.objects.get(title=name) rdflib = link(node) elif (node_type == 'Processes'): node = Gbobject.objects.get(title=name) rdflib = link(node) elif (node_type == 'System'): node = Gbobject.objects.get(title=name) rdflib = link(node) elif (node_type == 'Objecttype'): node = Objecttype.objects.get(title=name) rdflib = link(node) elif (node_type == 'Attributetype'): node = Attributetype.objects.get(title=name) rdflib = link(node) elif (node_type == 'Complement'): node = Complement.objects.get(title=name) rdflib = link(node) elif (node_type == 'Union'): node = Union.objects.get(title=name) rdflib = link(node) elif (node_type == 'Intersection'): node = Intersection.objects.get(title=name) rdflib = link(node) elif (node_type == 'Expression'): node = Expression.objects.get(title=name) rdflib = link(node) elif (node_type == 'Processtype'): node = Processtype.objects.get(title=name) rdflib = link(node) elif (node_type == 'Systemtype'): node = Systemtype.objects.get(title=name) rdflib = link(node) elif (node_type == 'AttributeSpecification'): node = AttributeSpecification.objects.get(title=name) rdflib = link(node) elif (node_type == 'RelationSpecification'): node = RelationSpecification.objects.get(title=name) rdflib = link(node) elif (node_type == 'Attribute'): node = Attribute.objects.get(title=name) rdflib = Namespace('http://sbox.gnowledge.org/gstudio/') elif (node_type == 'Relationtype'): node = Relationtype.objects.get(title=name) rdflib = Namespace('http://sbox.gnowledge.org/gstudio/') elif (node_type == 'Metatype'): node = Metatype.objects.get(title=name) rdflib = Namespace('http://sbox.gnowledge.org/gstudio/') else: rdflib = Namespace('http://sbox.gnowledge.org/gstudio/') node_dict = node.__dict__ subject = str(node_dict['id']) for key in node_dict: if key not in exclusion_fields: predicate = str(key) pobject = str(node_dict[predicate]) graph.add((rdflib[subject], rdflib[predicate], Literal(pobject))) rdf_code = graph.serialize(format=notation) graph.commit() print rdf_code graph.close()
def testH(self): self.assertEqual( URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#RDF") in CORE_SYNTAX_TERMS, True, )
def contexts(self, triple=None): assert False, "Not implemented." for i in range(0): yield(URIRef())
from dataclasses import dataclass from typing import Callable, Iterable, List, Optional, Union from iolanta.facet import Facet from iolanta.facets.base import FacetSearchAttempt from iolanta.facets.by_environment import FindFacetByEnvironment from iolanta.facets.by_instance import FindFacetByInstance from iolanta.facets.by_literal_datatype import FindFacetByLiteralDatatype from iolanta.facets.by_type import FindFacetByType from ldflex import LDFlex from octadocs.iolanta.errors import FacetError, FacetNotCallable, FacetNotFound from octadocs.octiron import Octiron from rdflib import RDFS from rdflib.term import Literal, Node, URIRef HTML = URIRef('https://html.spec.whatwg.org/') def resolve_facet(iri: URIRef) -> Callable[[Octiron, Node], str]: """Resolve a path to a Python object to that object.""" url = str(iri) if not url.startswith('python://'): raise Exception( 'Octadocs only supports facets which are importable Python ' 'callables. The URLs of such facets must start with `python://`, ' 'which {url} does not comply to.'.format(url=url, )) # It is impossible to use `urlpath` for this operation because it (or, # rather, one of upper classes from `urllib` that `urlpath` depends upon) # will lowercase the URL when parsing it - which means, irreversibly. We
def open(self, path, create=True): if not has_bsddb: return NO_STORE homeDir = path if self.__identifier is None: self.__identifier = URIRef(pathname2url(abspath(homeDir))) db_env = self._init_db_environment(homeDir, create) if db_env == NO_STORE: return NO_STORE self.db_env = db_env self.__open = True dbname = None dbtype = db.DB_BTREE # auto-commit ensures that the open-call commits when transactions # are enabled dbopenflags = DBOPENFLAGS if self.transaction_aware is True: dbopenflags |= db.DB_AUTO_COMMIT if create: dbopenflags |= db.DB_CREATE dbmode = 0o660 dbsetflags = 0 # create and open the DBs self.__indicies = [ None, ] * 3 self.__indicies_info = [ None, ] * 3 for i in range(0, 3): index_name = to_key_func(i)((b("s"), b("p"), b("o")), b("c")).decode() index = db.DB(db_env) index.set_flags(dbsetflags) index.open(index_name, dbname, dbtype, dbopenflags, dbmode) self.__indicies[i] = index self.__indicies_info[i] = (index, to_key_func(i), from_key_func(i)) lookup = {} for i in range(0, 8): results = [] for start in range(0, 3): score = 1 len = 0 for j in range(start, start + 3): if i & (1 << (j % 3)): score = score << 1 len += 1 else: break tie_break = 2 - start results.append(((score, tie_break), start, len)) results.sort() score, start, len = results[-1] def get_prefix_func(start, end): def get_prefix(triple, context): if context is None: yield "" else: yield context i = start while i < end: yield triple[i % 3] i += 1 yield "" return get_prefix lookup[i] = (self.__indicies[start], get_prefix_func(start, start + len), from_key_func(start), results_from_key_func(start, self._from_string)) self.__lookup_dict = lookup self.__contexts = db.DB(db_env) self.__contexts.set_flags(dbsetflags) self.__contexts.open("contexts", dbname, dbtype, dbopenflags, dbmode) self.__namespace = db.DB(db_env) self.__namespace.set_flags(dbsetflags) self.__namespace.open("namespace", dbname, dbtype, dbopenflags, dbmode) self.__prefix = db.DB(db_env) self.__prefix.set_flags(dbsetflags) self.__prefix.open("prefix", dbname, dbtype, dbopenflags, dbmode) self.__k2i = db.DB(db_env) self.__k2i.set_flags(dbsetflags) self.__k2i.open("k2i", dbname, db.DB_HASH, dbopenflags, dbmode) self.__i2k = db.DB(db_env) self.__i2k.set_flags(dbsetflags) self.__i2k.open("i2k", dbname, db.DB_RECNO, dbopenflags, dbmode) self.__needs_sync = False t = Thread(target=self.__sync_run) t.setDaemon(True) t.start() self.__sync_thread = t return VALID_STORE
def extract_props(self): properties = [] self.PROPS = {} #gather properties property_types = [ OWL.DatatypeProperty, OWL.ObjectProperty, OWL.AnnotationProperty, OWL.TransitiveProperty, OWL.FunctionalProperty, RDF.Property, OWL.InverseFunctionalProperty, OWL.SymmetricProperty ] for types in property_types: for s, p, o in self.G.triples((None, RDF.type, types)): properties.append(s) # # for s,p,o in self.G.triples((None,RDF.type,OWL.ObjectProperty)): # properties.append(s) # for s,p,o in self.G.triples((None,RDF.type,OWL.AnnotationProperty)): # properties.append(s) # for s,p,o in self.G.triples((None,RDF.type,OWL.TransitiveProperty)): # properties.append(s) # # for s,p,o in self.G.triples((None,RDF.type,RDF.Property)): # properties.append(s) for p in sorted(properties): self.PROPS[p] = {} #gather property values count = 0 for prop in self.PROPS.keys(): count = count + 1 s = URIRef(prop) self.PROPS[prop]['domain'] = None self.PROPS[prop]['domain_union'] = None self.PROPS[prop]['range'] = None self.PROPS[prop]['range_union'] = None self.PROPS[prop]['range_value'] = None self.PROPS[prop]['e_prop'] = [] self.PROPS[prop]['label'] = self.sh_label_gen(prop) self.PROPS[prop]['shape_name'] = None self.PROPS[prop]['definition'] = None self.PROPS[prop]['type'] = [] # for domain in self.G.objects(subject=s, predicate=RDFS.domain): # if type(domain) != BNode: # self.PROPS[prop]['domain'] = domain for obje in self.G.objects(subject=prop, predicate=RDF.type): self.PROPS[prop]['type'].append(obje) for sub, pred, ob in self.G.triples((s, RDFS.domain, None)): if type(ob) != BNode: self.PROPS[prop]['domain'] = ob else: for sub1, pred1, ob1 in self.G.triples((ob, None, None)): if pred1 == OWL.unionOf: c = Collection(self.G, ob1) self.PROPS[prop]['domain_union'] = c for sub, pred, ob in self.G.triples((s, RDFS.range, None)): if type(ob) != BNode: self.PROPS[prop]['range'] = ob else: for sub1, pred1, ob1 in self.G.triples((ob, None, None)): if pred1 == OWL.oneOf: c = Collection(self.G, ob1) self.PROPS[prop]['range_value'] = c if pred1 == OWL.unionOf: c = Collection(self.G, ob1) self.PROPS[prop]['range_union'] = c for equal in self.G.objects(subject=s, predicate=OWL.equivalentProperty): self.PROPS[prop]['e_prop'].append(equal) for defin in self.G.objects(subject=s, predicate=RDFS.comment): self.PROPS[prop]['definition'] = defin for name in self.G.objects(subject=s, predicate=RDFS.label): self.PROPS[prop]['shape_name'] = name if self.PROPS[prop]['shape_name'] == None: self.PROPS[prop]['shape_name'] = self.sh_label_gen(prop)
def test_util_from_n3_expectliteralandlangdtype(self): s = '"michel"@fr^^xsd:fr' res = util.from_n3(s, default=None, backend=None) self.assert_(isinstance(res, Literal)) self.assertEqual(res, Literal('michel', datatype=URIRef('xsd:fr')))
def convertTerm(term, queryProlog): """ Utility function for converting parsed Triple components into Unbound """ #from rdfextras.sparql.sql.RdfSqlBuilder import BNodeRef if isinstance(term, Variable): if hasattr( queryProlog, 'variableBindings') and term in queryProlog.variableBindings: #Resolve pre-bound variables at SQL generation time for SPARQL-to-SQL invokations rt = queryProlog.variableBindings.get(term, term) return isinstance(rt, BNode) and BNodeRef(rt) or rt else: return term elif isinstance(term, BNodeRef): return term elif isinstance(term, BNode): #from rdfextras.sparql.sql.RdfSqlBuilder import RdfSqlBuilder #if isinstance(queryProlog,RdfSqlBuilder): # return BNode(term + '_bnode') # ensure namespace doesn't overlap with variables return term elif isinstance(term, QName): #QNames and QName prefixes are the same in the grammar if not term.prefix: if queryProlog is None: return URIRef(term.localname) else: if queryProlog.baseDeclaration and u'' in queryProlog.prefixBindings and queryProlog.prefixBindings[ u'']: base = URIRef(Resolver().normalize( queryProlog.prefixBindings[u''], queryProlog.baseDeclaration)) elif queryProlog.baseDeclaration: base = queryProlog.baseDeclaration else: base = queryProlog.prefixBindings[u''] return URIRef(Resolver().normalize(term.localname, base)) elif term.prefix == '_': #Told BNode See: http://www.w3.org/2001/sw/DataAccess/issues#bnodeRef #from rdfextras.sparql.sql.RdfSqlBuilder import RdfSqlBuilder, EVAL_OPTION_ALLOW_BNODE_REF, BNodeRef # if isinstance(queryProlog,RdfSqlBuilder): # if queryProlog.UseEvalOption(EVAL_OPTION_ALLOW_BNODE_REF): # # this is a 'told' BNode referencing a BNode in the data set (i.e. previously returned by a query) # return BNodeRef(term.localname) # else: # # follow the spec and treat it as a variable # return BNode(term.localname + '_bnode') # ensure namespace doesn't overlap with variables import warnings warnings.warn( "The verbatim interpretation of explicit bnode identifiers is contrary to (current) DAWG stance", SyntaxWarning) return SessionBNode(term.localname) else: return URIRef(Resolver().normalize( term.localname, queryProlog.prefixBindings[term.prefix])) elif isinstance(term, QNamePrefix): if queryProlog is None: return URIRef(term) else: if queryProlog.baseDeclaration is None: return URIRef(term) return URIRef(Resolver().normalize(term, queryProlog.baseDeclaration)) elif isinstance(term, ParsedString): return Literal(term) elif isinstance(term, ParsedDatatypedLiteral): dT = term.dataType if isinstance(dT, QName): dT = convertTerm(dT, queryProlog) return Literal(term.value, datatype=dT) elif isinstance(term, IRIRef) and queryProlog.baseDeclaration: return URIRef(Resolver().normalize(term, queryProlog.baseDeclaration)) else: return term
def setUp(self): self.c = URIRef("http://example.com") self.s = BNode("http://example.com") self.p = URIRef("http://example.com/predicates/isa") self.o = Literal("Objectification")
def term(self, name): # need to handle slices explicitly because of __getitem__ override return URIRef(self + (name if isinstance(name, str) else ""))