def jsondict2graph(json_dict): g = Graph() [g.bind(*x) for x in ns_store.items()] for triple in json_dict['results']['bindings']: ts = triple['s'].get('type',None) vs = triple['s']['value'] if ts == 'uri': s = URIRef(vs) elif ts == 'literal': s = Literal(vs) elif ts == 'bnode': s = BNode(vs) #logging.debug(s) p = URIRef(triple['p']['value']) #logging.debug(p) to = triple['o'].get('type',None) vo = triple['o']['value'] dto = triple['o'].get('datatype',None) if to == 'uri': o = URIRef(triple['o']['value']) elif to == 'literal': o = Literal(triple['o']['value']) if dto: o.datatype = URIRef(dto) elif ts == 'bnode': o = BNode(vo) #logging.debug(o) g.add((s,p,o)) logging.debug(g.serialize(format='turtle')) return g
def create_input_source(source=None, publicID=None, location=None, file=None, data=None, format=None): """ Return an appropriate InputSource instance for the given parameters. """ # TODO: test that exactly one of source, location, file, and data # is not None. input_source = None if source is not None: if isinstance(source, InputSource): input_source = source else: if isinstance(source, basestring): location = source elif hasattr(source, "read") and not isinstance(source, Namespace): f = source input_source = InputSource() input_source.setByteStream(f) if hasattr(f, "name"): input_source.setSystemId(f.name) else: raise Exception("Unexpected type '%s' for source '%s'" % (type(source), source)) absolute_location = None # Further to fix for issue 130 if location is not None: # Fix for Windows problem https://github.com/RDFLib/rdflib/issues/145 if os.path.exists(location): location = pathname2url(location) base = urljoin("file:", "%s/" % pathname2url(os.getcwd())) absolute_location = URIRef(location, base=base).defrag() if absolute_location.startswith("file:///"): filename = url2pathname(absolute_location.replace("file:///", "/")) file = open(filename, "rb") else: input_source = URLInputSource(absolute_location, format) # publicID = publicID or absolute_location # More to fix for issue 130 if file is not None: input_source = FileInputSource(file) if data is not None: if isinstance(data, unicode): data = data.encode('utf-8') input_source = StringInputSource(data) if input_source is None: raise Exception("could not create InputSource") else: if publicID is not None: # Further to fix for issue 130 input_source.setPublicId(publicID) # Further to fix for issue 130 elif input_source.getPublicId() is None: input_source.setPublicId(absolute_location or "") return input_source
def create_input_source(source=None, publicID=None, location=None, file=None, data=None, format=None): """ Return an appropriate InputSource instance for the given parameters. """ # TODO: test that exactly one of source, location, file, and data # is not None. input_source = None if source is not None: if isinstance(source, InputSource): input_source = source else: if isinstance(source, _StringTypes): location = source elif hasattr(source, "read") and not isinstance(source, Namespace): f = source input_source = InputSource() input_source.setByteStream(f) if hasattr(f, "name"): input_source.setSystemId(f.name) else: raise Exception("Unexpected type '%s' for source '%s'" % (type(source), source)) if location is not None: base = urljoin("file:", "%s/" % pathname2url(os.getcwd())) absolute_location = URIRef(location, base=base).defrag() if absolute_location.startswith("file:///"): filename = url2pathname(absolute_location.replace("file:///", "/")) file = __builtin__.file(filename, "rb") else: input_source = URLInputSource(absolute_location, format) publicID = publicID or absolute_location if file is not None: input_source = FileInputSource(file) if data is not None: if isinstance(data, unicode): data = data.encode('utf-8') input_source = StringInputSource(data) if input_source is None: raise Exception("could not create InputSource") else: if publicID: input_source.setPublicId(publicID) # TODO: what motivated this bit? id = input_source.getPublicId() if id is None: input_source.setPublicId("") return input_source
def __init__(self, store, identifier=None, graph=None): if graph is not None: assert identifier is None np = store.node_pickler identifier = md5() s = list(graph.triples((None, None, None))) s.sort() for t in s: identifier.update("^".join((np.dumps(i) for i in t))) identifier = URIRef("data:%s" % identifier.hexdigest()) super(GraphValue, self).__init__(store, identifier) for t in graph: store.add(t, context=self) else: super(GraphValue, self).__init__(store, identifier)
def convert(self, name, qname, attrs): if name[0] is None: name = URIRef(name[1]) else: name = URIRef("".join(name)) atts = {} for (n, v) in attrs.items(): #attrs._attrs.iteritems(): # if n[0] is None: att = URIRef(n[1]) else: att = URIRef("".join(n)) if att.startswith(XMLNS) or att[0:3].lower()=="xml": pass elif att in UNQUALIFIED: #if not RDFNS[att] in atts: atts[RDFNS[att]] = v else: atts[URIRef(att)] = v return name, atts
def get_children_uri(self): parts = [] query = """ SELECT ?part WHERE { %s %s ?part } """ % ( self.n3(), ns_dict["purl"]["hasPart"].n3(), ) results = self.sparql(query) while results: current = URIRef(results[0]["part"]) parts.append(current) query = """ SELECT ?part WHERE{ %s %s ?part } """ % ( current.n3(), ns_dict["drc"]["next"].n3(), ) results = self.sparql(query) return parts[:-1] # last one will be null, so delete it
VOCABURI = SdoTermSource.vocabUri() NAMESPACES = { "xml:base": VOCABURI, "xmlns": VOCABURI, "xmlns:schema": VOCABURI, "xmlns:rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", "xmlns:rdfs": "http://www.w3.org/2000/01/rdf-schema#", "xmlns:owl": "http://www.w3.org/2002/07/owl#", "xmlns:dcterms": "http://purl.org/dc/terms/", "xmlns:xsd": "http://www.w3.org/2001/XMLSchema#" } from rdflib.term import URIRef DOMAININC = URIRef(VOCABURI + "domainIncludes") RANGEINC = URIRef(VOCABURI + "rangeIncludes") INVERSEOF = URIRef(VOCABURI + "inverseOf") SUPERSEDEDBY = URIRef(VOCABURI + "supersededBy") DEFAULTRANGES = [VOCABURI + "Text",VOCABURI + "URL",VOCABURI + "Role"] DATATYPES = [VOCABURI + "Boolean", VOCABURI + "Date", VOCABURI + "DateTime", VOCABURI + "Number", VOCABURI + "Float", VOCABURI + "Integer", VOCABURI + "Time"] class OwlBuild(): def __init__(self): self.typesCount = self.propsCount = self.namedCount = 0
def uid_to_uri(self, uid): '''Convert a UID to a URI. :rtype: rdflib.URIRef ''' return URIRef(g.webroot + uid)
def _load_uri_ref(uri: str) -> URIRef: return URIRef(uri)
def namespaces(self): for prefix, namespace in self.store.namespaces(): namespace = URIRef(namespace) yield prefix, namespace
def aURI(c): return URIRef(f'http://example.org/uri#{c}')
def term(self, name): # need to handle slices explicitly because of __getitem__ override return URIRef(self + (name if isinstance(name, basestring) else ''))
def format(self, *args, **kwargs): return URIRef(unicode.format(self, *args, **kwargs))
def __mod__(self, *args, **kwargs): return URIRef(text_type(self).__mod__(*args, **kwargs))
def format(self, *args, **kwargs): return URIRef(text_type.format(self, *args, **kwargs))
def open(self, path, create=True): if not has_bsddb: return NO_STORE homeDir = path if self.__identifier is None: self.__identifier = URIRef(pathname2url(abspath(homeDir))) db_env = self._init_db_environment(homeDir, create) if db_env == NO_STORE: return NO_STORE self.db_env = db_env self.__open = True dbname = None dbtype = db.DB_BTREE # auto-commit ensures that the open-call commits when transactions # are enabled dbopenflags = DBOPENFLAGS if self.transaction_aware is True: dbopenflags |= db.DB_AUTO_COMMIT if create: dbopenflags |= db.DB_CREATE dbmode = 0660 dbsetflags = 0 # create and open the DBs self.__indicies = [ None, ] * 3 self.__indicies_info = [ None, ] * 3 for i in xrange(0, 3): index_name = to_key_func(i)((b("s"), b("p"), b("o")), b("c")).decode() index = db.DB(db_env) index.set_flags(dbsetflags) index.open(index_name, dbname, dbtype, dbopenflags, dbmode) self.__indicies[i] = index self.__indicies_info[i] = (index, to_key_func(i), from_key_func(i)) lookup = {} for i in xrange(0, 8): results = [] for start in xrange(0, 3): score = 1 len = 0 for j in xrange(start, start + 3): if i & (1 << (j % 3)): score = score << 1 len += 1 else: break tie_break = 2 - start results.append(((score, tie_break), start, len)) results.sort() score, start, len = results[-1] def get_prefix_func(start, end): def get_prefix(triple, context): if context is None: yield "" else: yield context i = start while i < end: yield triple[i % 3] i += 1 yield "" return get_prefix lookup[i] = (self.__indicies[start], get_prefix_func(start, start + len), from_key_func(start), results_from_key_func(start, self._from_string)) self.__lookup_dict = lookup self.__contexts = db.DB(db_env) self.__contexts.set_flags(dbsetflags) self.__contexts.open("contexts", dbname, dbtype, dbopenflags, dbmode) self.__namespace = db.DB(db_env) self.__namespace.set_flags(dbsetflags) self.__namespace.open("namespace", dbname, dbtype, dbopenflags, dbmode) self.__prefix = db.DB(db_env) self.__prefix.set_flags(dbsetflags) self.__prefix.open("prefix", dbname, dbtype, dbopenflags, dbmode) self.__k2i = db.DB(db_env) self.__k2i.set_flags(dbsetflags) self.__k2i.open("k2i", dbname, db.DB_HASH, dbopenflags, dbmode) self.__i2k = db.DB(db_env) self.__i2k.set_flags(dbsetflags) self.__i2k.open("i2k", dbname, db.DB_RECNO, dbopenflags, dbmode) self.__needs_sync = False t = Thread(target=self.__sync_run) t.setDaemon(True) t.start() self.__sync_thread = t return VALID_STORE
def title(self): return URIRef(self + 'title')
def namespace(self, prefix): prefix = prefix.encode("utf-8") ns = self.__namespace.get(prefix, None) if ns is not None: return URIRef(ns.decode('utf-8')) return None
def write_to_model(self, model, evidences, reuse_existing=False): # Handles reusing found subgraph IRIs as well as inserting all new individuals axiom_ids = [] exact_match = None response = [] if reuse_existing: response = self.find_matches_in_model(model) if len(response) > 0: for res in response: # TODO: First check that these are "exact" subgraph matches, meaning match result isn't subgraph of other annotation. exact_match = res break if exact_match: # Update rdflib.Graph - just add evidence to all edges/axioms # Collect axiom_ids to add evidence to for u, v, relation in self.edges(data="relation"): subject_instance_iri = exact_match[self.node_sparql_variable( u)] object_instance_iri = exact_match[self.node_sparql_variable(v)] self.nodes[u]["instance_iri"] = subject_instance_iri self.nodes[v]["instance_iri"] = object_instance_iri relation_uri = expand_uri_wrapper(relation) axiom_ids.append( model.find_bnode( (self.node_instance_iri(u), URIRef(relation_uri), self.node_instance_iri(v)))) else: # Insert into rdflib.Graph - everything is new pattern = self.generate_sparql_representation() # No. Need to invent IRIs for individuals. # Can we reuse model.declare_individual, add_axiom stuff? Probably. for u, v, relation in self.edges(data="relation"): subject_instance_iri = self.node_instance_iri(u) if subject_instance_iri is None: subject_instance_iri = model.declare_individual( self.node_class(u)) self.nodes[u]["instance_iri"] = subject_instance_iri object_instance_iri = self.node_instance_iri(v) if object_instance_iri is None: object_instance_iri = model.declare_individual( self.node_class(v)) self.nodes[v]["instance_iri"] = object_instance_iri try: relation_uri = expand_uri_wrapper(relation) except AttributeError as ex: exception_message = "Unparseable relation: {relation} from triple {u} {relation} {v} in model {modeltitle}".format( relation=relation, u=u, v=v, modeltitle=model.modeltitle) logger.info(exception_message) raise ModelRdfWriteException(exception_message) axiom_ids.append( model.add_axiom( model.writer.emit(subject_instance_iri, URIRef(relation_uri), object_instance_iri))) # Add the evidences to whatever axioms we got for axiom_id in axiom_ids: for evidence in evidences: model.add_evidence(axiom_id, evidence)
def outputProp(self,uri, graph): self.propsCount += 1 children = [] domains = {} ranges = [] datatypeonly = True ext = None for (p,o) in graph.predicate_objects(uri): if p == RDFS.label: l = Element("rdfs:label") l.set("xml:lang","en") l.text = o children.append(l) elif p == RDFS.comment: c = Element("rdfs:comment") c.set("xml:lang","en") c.text = Markdown.parse(o) children.append(c) elif p == RDFS.subPropertyOf: sub = Element("rdfs:subPropertyOf") subval = str(o) if subval == "rdf:type": #Fixes a special case with schema:additionalType subval = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type" sub.set("rdf:resource",subval) children.append(sub) elif p == INVERSEOF: sub = Element("owl:inverseOf") sub.set("rdf:resource",o) children.append(sub) elif p == SUPERSEDEDBY: sub = Element("schema:supersededBy") sub.set("rdf:resource",o) children.append(sub) elif p == DOMAININC: domains[o] = True elif p == RANGEINC: ranges.append(str(o)) if str(o) not in DATATYPES: datatypeonly = False elif p == URIRef(VOCABURI + "isPartOf"): ext = str(o) children.append(self.addDefined(uri,ext)) if not datatypeonly: for r in DEFAULTRANGES: if r not in ranges: ranges.append(r) if len(domains): d = Element("rdfs:domain") children.append(d) cl = SubElement(d,"owl:Class") u = SubElement(cl,"owl:unionOf") u.set("rdf:parseType","Collection") for target in domains.keys(): targ = SubElement(u,"owl:Class") targ.set("rdf:about",target) if len(ranges): r = Element("rdfs:range") children.append(r) cl = SubElement(r,"owl:Class") u = SubElement(cl,"owl:unionOf") u.set("rdf:parseType","Collection") for target in ranges: targ = SubElement(u,"owl:Class") targ.set("rdf:about",target) if datatypeonly: prop = SubElement(self.dom,"owl:DatatypeProperty") else: prop = SubElement(self.dom,"owl:ObjectProperty") prop.set("rdf:about",uri) for sub in children: prop.append(sub)
def setUp(self): self.x = Literal( "2008-12-01T18:02:00Z", datatype=URIRef('http://www.w3.org/2001/XMLSchema#dateTime'))
def __init__(self, uri, terms): self.uri = uri self.__uris = {} for t in terms: self.__uris[t] = URIRef(self.uri + t)
"XMLLiteral", "HTML", "langString" ]) def term(self, name): try: i = int(name) return URIRef("%s_%s" % (self, i)) except ValueError: return super(_RDFNamespace, self).term(name) RDF = _RDFNamespace() RDFS = ClosedNamespace(uri=URIRef("http://www.w3.org/2000/01/rdf-schema#"), terms=[ "Resource", "Class", "subClassOf", "subPropertyOf", "comment", "label", "domain", "range", "seeAlso", "isDefinedBy", "Literal", "Container", "ContainerMembershipProperty", "member", "Datatype" ]) OWL = Namespace('http://www.w3.org/2002/07/owl#') XSD = Namespace(_XSD_PFX) SKOS = Namespace('http://www.w3.org/2004/02/skos/core#') DOAP = Namespace('http://usefulinc.com/ns/doap#') FOAF = Namespace('http://xmlns.com/foaf/0.1/') DC = Namespace('http://purl.org/dc/elements/1.1/')
import rdflib testContent = """ @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . <http://example.org/doc/1> rdfs:label "Document 1"@en, "Dokument 1"@sv . <http://example.org/doc/2> rdfs:label "Document 2"@en, "Dokument 2"@sv . <http://example.org/doc/3> rdfs:label "Document 3"@en, "Dokument 3"@sv . """ graph = ConjunctiveGraph() graph.load(StringIO(testContent), format='n3') doc1 = URIRef("http://example.org/doc/1") PROLOGUE = """ PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> """ def test_filter_by_lang(): testdata = [ ("en", u'"Document 1"@en'), ("sv", u'"Dokument 1"@sv') ] query = PROLOGUE+''' SELECT ?label WHERE { '''+doc1.n3()+''' rdfs:label ?label .
def __mod__(self, *args, **kwargs): return URIRef(unicode(self).__mod__(*args, **kwargs))
def setUp(self): self.uriref = URIRef("http://example.org/") self.bnode = BNode() self.literal = Literal("http://example.org/") self.python_literal = u"http://example.org/" self.python_literal_2 = u"foo"
def term(self, name): try: i = int(name) return URIRef("%s_%s" % (self, i)) except ValueError: return super(_RDFNamespace, self).term(name)
def identifier_augment(self): return URIRef(self.url.defined_values[0].identifier)
def title(self): # overrides unicode.title to allow DCTERMS.title for example return URIRef(self + 'title')
def _store_uri_ref(ref: URIRef) -> str: return ref.toPython()
def create_input_source(source=None, publicID=None, location=None, file=None, data=None, format=None): """ Return an appropriate InputSource instance for the given parameters. """ # test that exactly one of source, location, file, and data is not None. if sum(( source is not None, location is not None, file is not None, data is not None, )) != 1: raise ValueError( 'exactly one of source, location, file or data must be given' ) input_source = None if source is not None: if isinstance(source, InputSource): input_source = source else: if isinstance(source, string_types): location = source elif hasattr(source, "read") and not isinstance(source, Namespace): f = source input_source = InputSource() input_source.setByteStream(f) if f is sys.stdin: input_source.setSystemId("file:///dev/stdin") elif hasattr(f, "name"): input_source.setSystemId(f.name) else: raise Exception("Unexpected type '%s' for source '%s'" % (type(source), source)) absolute_location = None # Further to fix for issue 130 auto_close = False # make sure we close all file handles we open if location is not None: # Fix for Windows problem https://github.com/RDFLib/rdflib/issues/145 if os.path.exists(location): location = pathname2url(location) base = urljoin("file:", "%s/" % pathname2url(os.getcwd())) absolute_location = URIRef(location, base=base).defrag() if absolute_location.startswith("file:///"): filename = url2pathname(absolute_location.replace("file:///", "/")) file = open(filename, "rb") else: input_source = URLInputSource(absolute_location, format) auto_close = True # publicID = publicID or absolute_location # Further to fix # for issue 130 if file is not None: input_source = FileInputSource(file) if data is not None: if isinstance(data, text_type): data = data.encode('utf-8') input_source = StringInputSource(data) auto_close = True if input_source is None: raise Exception("could not create InputSource") else: input_source.auto_close |= auto_close if publicID is not None: # Further to fix for issue 130 input_source.setPublicId(publicID) # Further to fix for issue 130 elif input_source.getPublicId() is None: input_source.setPublicId(absolute_location or "") return input_source
''' import rdflib import rdfextras from rdflib.graph import Graph, ConjunctiveGraph from rdflib.namespace import Namespace from rdflib.term import Literal from rdflib.term import URIRef from rdflib import plugin from rdflib.store import Store, NO_STORE, VALID_STORE # Define Namespace hs = Namespace("http://homesensor.com#") # SNode1 = URIRef("http://homesensor.com/Node1/") SNode2 = URIRef("http://homesensor.com/Node2/") SNode3 = URIRef("http://homesensor.com/Node3/") SNode4 = URIRef("http://homesensor.com/Node4/") mystore_graph_uri = "http://homesensor.com/mystore" configString = "/var/tmp/mystore" # Get the Sleepycat plugin. mystore = plugin.get('Sleepycat', Store)('mystore') mystore.open("ay_folder", create=False) # g = ConjunctiveGraph(store=mystore) g.bind("homesensor", hs)
def __init__( self, graph: Graph, concept_type_uri: Union[str, URIRef], sub_thesaurus_type_uri: Union[str, URIRef] = "", thesaurus_relation_type_uri: Union[str, URIRef] = "", thesaurus_relation_is_specialisation: bool = False, remove_deprecated: bool = True, langs: FrozenSet[str] = frozenset(), handle_title_case: bool = True, extract_upper_case_from_braces: bool = True, extract_any_case_from_braces: bool = False, expand_ampersand_with_spaces: bool = True, expand_abbreviation_with_punctuation: bool = True, simple_english_plural_rules: bool = False, ): """Creates the predictor. Args: graph: The SKOS onthology used to extract the labels. concept_type_uri The uri of the concept type. It is assumed that for every concept c, there is a triple (c, RDF.type, concept_type_uri) in the graph. sub_thesaurus_type_uri: The uri of the concept type. It is assumed that for every sub thesaurus t, there is a triple (t, RDF.type, sub_thesaurus_type_uri) in the graph. thesaurus_relation_type_uri: Uri of the relation that links concepts to thesauri. thesaurus_relation_is_specialisation: Indicates whether the thesaurus_relation links thesauri to concepts or the other way round. E.g., for the relation skos:broader it should be false. Conversely it should be true for skos:narrower. remove_deprecated: When True will discard deprecated subjects. Deprecation of a subject has to be indicated by a triple (s, OWL.deprecated, Literal(True)) in the graph. langs: For each language present in the set, labels will be extracted from the graph. An empy set or None will extract labels regardless of language. handle_title_case: When True, will also match labels in title case. I.e., in a text the first letter of every word can be upper or lower case and will still be matched. When False only the case of the first word's first letter will be adapted. Example: * Given a label "garbage can" and the title "Oscar Lives in a Garbage Can" * When handle_title_case == True the label will match the text. * When handle_title_case == False the label will not match the text. It would however still match "Garbage can is home to grouchy neighbor.".""" self.graph = graph if isinstance(concept_type_uri, str): concept_type_uri = URIRef(concept_type_uri) self.concept_type_uri = concept_type_uri if isinstance(sub_thesaurus_type_uri, str): sub_thesaurus_type_uri = URIRef(sub_thesaurus_type_uri) self.sub_thesaurus_type_uri = sub_thesaurus_type_uri if isinstance(thesaurus_relation_type_uri, str): thesaurus_relation_type_uri = URIRef(thesaurus_relation_type_uri) self.thesaurus_relation_type_uri = thesaurus_relation_type_uri self.thesaurus_relation_is_specialisation = ( thesaurus_relation_is_specialisation) self.remove_deprecated = remove_deprecated self.langs = langs self.handle_title_case = handle_title_case self.extract_upper_case_from_braces = extract_upper_case_from_braces self.extract_any_case_from_braces = extract_any_case_from_braces self.expand_ampersand_with_spaces = expand_ampersand_with_spaces self.expand_abbreviation_with_punctuation = \ expand_abbreviation_with_punctuation self.simple_english_plural_rules = simple_english_plural_rules
def setUp(self): self.c = URIRef("http://example.com") self.s = BNode("http://example.com") self.p = URIRef("http://example.com/predicates/isa") self.o = Literal("Objectification")
def testH(self): self.assertEquals( URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#RDF") in CORE_SYNTAX_TERMS, True)
# create the weather station resource template # emulate the .well-known/core interface baseObject.create({'resourceName': '.well-known','resourceClass': 'SmartObject'},\ ).create({'resourceName': 'core','resourceClass': 'LinkFormatProxy'}) # sensors resource under the baseObject for all sensors # top level object container for sensors, default class is SmartObject sensors = baseObject.create({'resourceName': 'sensors', 'resourceClass': 'SmartObject'}) #weather resource under sensors for the weather sensor # create a default class SmartObject for the weather sensor cluster weather = sensors.create({'resourceName': 'rhvWeather-01', 'resourceClass': 'SmartObject'}) # example description in simple link-format like concepts baseObject.Description.set((URIRef('sensors/rhvWeather-01'), RDFS.Class, Literal('SmartObject'))) baseObject.Description.set((URIRef('sensors/rhvWeather-01'), RDF.type, Literal('SensorSystem'))) baseObject.Description.set((URIRef('sensors/rhvWeather-01'), RDFS.Resource, Literal('Weather'))) # baseObject.Description.set((URIRef('sensors/rhvWeather-01/outdoor_temperature'), RDF.type, Literal('sensor'))) baseObject.Description.set((URIRef('sensors/rhvWeather-01/outdoor_temperature'), RDFS.Resource, Literal('temperature'))) baseObject.Description.set((URIRef('sensors/rhvWeather-01/outdoor_humidity'), RDF.type, Literal('sensor'))) baseObject.Description.set((URIRef('sensors/rhvWeather-01/outdoor_humidity'), RDFS.Resource, Literal('humidity'))) baseObject.Description.set((URIRef('sensors/rhvWeather-01/sealevel_pressure'), RDF.type, Literal('sensor'))) baseObject.Description.set((URIRef('sensors/rhvWeather-01/sealevel_pressure'), RDFS.Resource, Literal('pressure'))) baseObject.Description.set((URIRef('sensors/rhvWeather-01/indoor_temperature'), RDF.type, Literal('sensor'))) baseObject.Description.set((URIRef('sensors/rhvWeather-01/indoor_temperature'), RDFS.Resource, Literal('temperature'))) baseObject.Description.set((URIRef('sensors/rhvWeather-01/indoor_humidity'), RDF.type, Literal('sensor'))) baseObject.Description.set((URIRef('sensors/rhvWeather-01/indoor_humidity'), RDFS.Resource, Literal('humidity'))) baseObject.Description.set((URIRef('sensors/rhvWeather-01/wind_gust'), RDF.type, Literal('sensor'))) baseObject.Description.set((URIRef('sensors/rhvWeather-01/wind_gust'), RDFS.Resource, Literal('speed')))
def relativize(self, uri): base = self.base if base is not None and uri.startswith(base): uri = URIRef(uri.replace(base, "", 1)) return uri
def from_n3(s, default=None, backend=None, nsm=None): r''' Creates the Identifier corresponding to the given n3 string. >>> from_n3('<http://ex.com/foo>') == URIRef('http://ex.com/foo') True >>> from_n3('"foo"@de') == Literal('foo', lang='de') True >>> from_n3('"""multi\nline\nstring"""@en') == Literal( ... 'multi\nline\nstring', lang='en') True >>> from_n3('42') == Literal(42) True >>> from_n3(Literal(42).n3()) == Literal(42) True >>> from_n3('"42"^^xsd:integer') == Literal(42) True >>> from rdflib import RDFS >>> from_n3('rdfs:label') == RDFS['label'] True >>> nsm = NamespaceManager(Graph()) >>> nsm.bind('dbpedia', 'http://dbpedia.org/resource/') >>> berlin = URIRef('http://dbpedia.org/resource/Berlin') >>> from_n3('dbpedia:Berlin', nsm=nsm) == berlin True ''' if not s: return default if s.startswith('<'): return URIRef(s[1:-1]) elif s.startswith('"'): if s.startswith('"""'): quotes = '"""' else: quotes = '"' value, rest = s.rsplit(quotes, 1) value = value[len(quotes):] # strip leading quotes datatype = None language = None # as a given datatype overrules lang-tag check for it first dtoffset = rest.rfind('^^') if dtoffset >= 0: # found a datatype # datatype has to come after lang-tag so ignore everything before # see: http://www.w3.org/TR/2011/WD-turtle-20110809/ # #prod-turtle2-RDFLiteral datatype = from_n3(rest[dtoffset + 2:], default, backend, nsm) else: if rest.startswith("@"): language = rest[1:] # strip leading at sign value = value.replace(r'\"', '"') # Hack: this should correctly handle strings with either native unicode # characters, or \u1234 unicode escapes. value = value.encode("raw-unicode-escape").decode("unicode-escape") return Literal(value, language, datatype) elif s == 'true' or s == 'false': return Literal(s == 'true') elif s.isdigit(): return Literal(int(s)) elif s.startswith('{'): identifier = from_n3(s[1:-1]) return QuotedGraph(backend, identifier) elif s.startswith('['): identifier = from_n3(s[1:-1]) return Graph(backend, identifier) elif s.startswith("_:"): return BNode(s[2:]) elif ':' in s: if nsm is None: # instantiate default NamespaceManager and rely on its defaults nsm = NamespaceManager(Graph()) prefix, last_part = s.split(':', 1) ns = dict(nsm.namespaces())[prefix] return Namespace(ns)[last_part] else: return BNode(s)