def get_src_queries(self): q1 = prepareQuery( QueryStrings.SPARQL_HEIGHT_DIAMETER_CONTENT_X_Y_VELOCITY_MASSFLOW_TEMP_HEATCAPA_DENSITY_MOLEWEIGHT) q2 = prepareQuery(QueryStrings.SPARQL_CONTENT) q3 = prepareQuery(QueryStrings.SPARQL_ERATE) return q1, q2, q3
def get_src_data(self): """Gets all sourced data. returns: list of source data objects for the ADMS_SOURCE_DETAILS section of the APL. """ sources = [] q1 = prepareQuery( QueryStrings. SPARQL_DIAMETER_TEMP_HEIGHT_MASSFLOW_HEATCAPA_DENSITY_MOLEWEIGHT) q2 = prepareQuery(QueryStrings.SPARQL_CONTENT) q3 = prepareQuery(QueryStrings.SPARQL_ERATE) for src in self.topnode: iri = self.connectChimneyDB(src) qdata = self.query(q1) qdataC = self.query(q2) qdataERate = self.query(q3) aresult, sorteder, pollutantnames = self.get_new_src_data( iri, qdata, qdataC, qdataERate) new_src = admsSrc( SrcName=str(src[Constants.KEY_MMSI]), SrcHeight=aresult[Constants.KEY_HEIGHT].toPython(), SrcDiameter=float(aresult[Constants.KEY_DIAMETER].toPython()), SrcPolEmissionRate=sorteder, SrcPollutants=pollutantnames, SrcTemperature=aresult[Constants.KEY_TEMP].toPython(), SrcMolWeight=aresult[Constants.KEY_MOLE_WEIGHT].toPython(), SrcDensity=float(aresult[Constants.KEY_DENSITY].toPython()), SrcSpecHeatCap=aresult[Constants.KEY_HEAT_CAP].toPython(), SrcNumPollutants=len(pollutantnames), SrcMassFlux=aresult[Constants.KEY_MASS_FLOW].toPython()) sources.append(new_src) return sources
def rewriteQueryToCapabilityMap(q): # Note, for debugging, use: algebra.pprintAlgebra(<node?>) # Navigate to BGP node in tree prologue_ = [] for prefix, uri in q.prologue.namespace_manager.namespaces(): prologue_.append("PREFIX %s: <%s>" % ( prefix, uri, )) capabilityServiceMappings = [ ('http://froogle.com/', algebra.Join( algebra.BGP([(Variable('a'), Variable('a'), Variable('a'))]), algebra.BGP([(Variable('b'), Variable('b'), Variable('b'))]))), ] addServiceCapabilities(q.algebra, capabilityServiceMappings) query_ = evalPart(q.prologue, q.algebra) #print "\n".join(prologue_) print query_ # ServiceGraphPattern( # term = http://fish.db.endpoint # graph = GroupGraphPatternSub( # part = [TriplesBlock_{'_vars': set([?dbp]), 'triples': [[?dbp, rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'), rdflib.term.Literal(u'fish')]]}] # _vars = set([?dbp]) # ) # _vars = set([?dbp]) # ) try: prepareQuery("\n".join(prologue_) + query_, initNs=initNS) except: print '## Did not validate ##'
def rewriteQueryToCapabilityMap(q): # Note, for debugging, use: algebra.pprintAlgebra(<node?>) # Navigate to BGP node in tree prologue_ = [] for prefix, uri in q.prologue.namespace_manager.namespaces(): prologue_.append("PREFIX %s: <%s>" % (prefix, uri,)) capabilityServiceMappings = [ ('http://froogle.com/', algebra.Join( algebra.BGP([( Variable('a'), Variable('a'), Variable('a'))]), algebra.BGP([( Variable('b'), Variable('b'), Variable('b'))]))), ] addServiceCapabilities(q.algebra, capabilityServiceMappings) query_ = evalPart(q.prologue, q.algebra) #print "\n".join(prologue_) print query_ # ServiceGraphPattern( # term = http://fish.db.endpoint # graph = GroupGraphPatternSub( # part = [TriplesBlock_{'_vars': set([?dbp]), 'triples': [[?dbp, rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type'), rdflib.term.Literal(u'fish')]]}] # _vars = set([?dbp]) # ) # _vars = set([?dbp]) # ) try: prepareQuery("\n".join(prologue_) + query_, initNs = initNS) except: print '## Did not validate ##'
def build_possible_queries(): queries = [] query = prepareQuery("""SELECT ?name WHERE { ?r owl:Class ro:Recipe . ?r ro:name ?name . } ORDER BY ?name""", initNs={ 'ro': RO, 'owl': OWL }) queries.append(Query('List all imported recipes', query)) query = prepareQuery("""SELECT ?name WHERE { ?i owl:Class ro:Food . ?i ro:food_name ?name . } ORDER BY ?name""", initNs={ 'ro': RO, 'owl': OWL }) queries.append(Query('List all imported ingredients', query)) query = prepareQuery("""SELECT ?name WHERE { ?recipe ro:name ?name . ?recipe ro:ingredient ?ingredient . ?ingredient ro:food ?food . ?food ro:food_name ?param . } ORDER BY ?name""", initNs={ 'ro': RO, 'owl': OWL }) queries.append( Query('List all recipes containing desired ingredient', query, 'Ingredient name: ')) return queries
def parse(foaf_url): global gexf_graph global parsedFOAFS global queuedFOAFS g = Graph() try: g.load(foaf_url) except Exception: print "Can't fetch " + foaf_url return SIOC = Namespace("http://rdfs.org/sioc/ns#") acctID = URIRef(g.value(URIRef(foaf_url), FOAF.maker) + "#acct") root_accountName = str(g.value(acctID, FOAF.accountName)) root_webfinger = root_accountName + "@" + urlparse(foaf_url).hostname subscriptions = prepareQuery( """SELECT ?accountName ?accountProfilePage WHERE { ?person sioc:follows ?b . ?b foaf:accountName ?accountName . ?b foaf:accountProfilePage ?accountProfilePage . }""", initNs = { "foaf": FOAF, "sioc": SIOC }) subscribers = prepareQuery( """SELECT ?accountName ?accountProfilePage WHERE { ?b sioc:follows ?person . ?b foaf:accountName ?accountName . ?b foaf:accountProfilePage ?accountProfilePage . }""", initNs = { "foaf": FOAF, "sioc": SIOC }) gexf_graph.addNode(root_webfinger, root_webfinger) for subscription in g.query(subscriptions, initBindings={'person': acctID}): accountProfilePage = str(subscription.accountProfilePage) + "/foaf" accountName = str(subscription.accountName) if (blacklisted(accountProfilePage) is False): hostname = urlparse(accountProfilePage).hostname webfinger = accountName + "@" + hostname gexf_graph.addNode(webfinger, webfinger) gexf_graph.addEdge(root_webfinger + webfinger, root_webfinger, webfinger) if accountProfilePage not in parsedFOAFS: queuedFOAFS.put(accountProfilePage) for subscriber in g.query(subscribers, initBindings={'person': acctID}): accountProfilePage = str(subscriber.accountProfilePage) + "/foaf" accountName = str(subscriber.accountName) if (blacklisted(accountProfilePage) is False): hostname = urlparse(accountProfilePage).hostname webfinger = accountName + "@" + hostname gexf_graph.addNode(webfinger, webfinger) gexf_graph.addEdge(webfinger + root_webfinger, root_webfinger, webfinger) if accountProfilePage not in parsedFOAFS: queuedFOAFS.put(accountProfilePage)
def validateSPARQL(self): if self.prefixes != None and self.comboBox != None and self.comboBox.currentIndex( ) != None and self.prefixes[self.comboBox.currentIndex( )] != None and self.inp_sparql2.toPlainText( ) != None and self.inp_sparql2.toPlainText() != "": try: if self.prefixes[self.comboBox.currentIndex()] != "": prepareQuery( "".join(self.prefixes[self.comboBox.currentIndex()]) + "\n" + self.inp_sparql2.toPlainText()) self.errorLabel.setText("Valid Query") self.errorline = -1 self.sparqlhighlight.errorhighlightline = self.errorline self.sparqlhighlight.currentline = 0 self.inp_sparql2.errorline = None except Exception as e: match = re.search(r'line:([0-9]+),', str(e)) match2 = re.search(r'col:([0-9]+),', str(e)) start = int(match.group(1)) - len(self.triplestoreconf[ self.comboBox.currentIndex()]["prefixes"]) - 1 self.errorLabel.setText( re.sub("line:([0-9]+),", "line: " + str(start) + ",", str(e))) self.inp_sparql2.errorline = start - 1 if "line" in str(e): ex = str(e) start = ex.find('line:') + 5 end = ex.find(',', start) start2 = ex.find('col:') + 4 end2 = ex.find(')', start2) self.errorline = ex[start:end] self.sparqlhighlight.errorhighlightcol = ex[start2:end2] self.sparqlhighlight.errorhighlightline = self.errorline self.sparqlhighlight.currentline = 0
def test_info(testname): q = prepareQuery(('SELECT ?atest ?analname ?fastinfo WHERE {' '?analyte foaf:name ?analname .' '?atest lab:tests ?analyte .' '?atest lab:fasting ?fastinfo .}'), initNs={ 'foaf': FOAF, 'lab': labn }) test = labn[testname.upper()] data = [] for row in graph.query(q, initBindings={'atest': test}): data.append({ 'test': row.atest, 'test_code': testname.upper(), 'analyte': row.analname, 'preparation': { 'fasting': row.fastinfo } }) q = prepareQuery( 'SELECT ?specimens ?label WHERE { ?atest lab:testWith ?specimens . ?specimens rdfs:label ?label}', initNs={'lab': labn}) specimens = [] for row in graph.query(q, initBindings={'test': data[0]['test']}): specimens.append({'specimens': row.specimens, 'label': row.label}) data[0]['specimens'] = specimens return jsonify(data)
def get_pol(self): """ Prepares data for ADMS_POLLUTANT_DETAILS section of the APL. Separates particles to PM10 and PM2.5 categories. Stores emission rates in em_rates class member variable for every examined source for later use. @see: get_src_data() @return: list of Pol objects. """ limit = 10 - len( self.pollutants ) # No more that 10 pollutants are allowed per ADMS source pols = [] pol_data = [] diam_dens = set() pol_names = {} q1 = prepareQuery(QueryStrings.SPARQL_DIAMETER_DENSITY_MASSFRACTION) q2 = prepareQuery(QueryStrings.SPARQL_MASSRATE) i = 0 k = 0 for src in self.entity: iri = self.connect_chimney_db(src) self.em_rates[iri] = {} qb = self.query(q1) massrate = self.query(q2).__iter__().__next__()[ Constants.KEY_MASS_RATE].toPython() for row in qb: dd = (row[Constants.KEY_DIAMETER].toPython(), row[Constants.KEY_DENSITY].toPython()) mf = float(row[Constants.KEY_MASS_FRACTION]) * massrate pol_data.append({ Constants.KEY_DIAMETER + Constants.KEY_DENSITY: dd, Constants.KEY_MASS_FLOW: mf, Constants.KEY_SRC: iri }) diam_dens.add(dd) self.em_rates[iri][dd] = mf dd_srt = sorted(diam_dens, key=lambda tup: tup[0], reverse=True)[0:limit] for diam, dens in dd_srt: name = None if diam <= 0.00001: name = Constants.POL_PM10 + '-' + str(i) i = i + 1 if diam <= 0.0000025: name = Constants.POL_PM25 + '-' + str(k) k = k + 1 if name is not None: pol_names[(diam, dens)] = name pols.append(AdmsPol(name, 1, [diam], [dens], [1.0e+0])) self.pollutants.append(name) self.update_em_rates(pol_names, limit) return pols
def load_rules(self): # Create the var to store the rules rules = {} # Load the rules file g = rdflib.Graph() g.load(bz2.BZ2File(self.rulesFile), format="turtle") if len(g) == 0: return print "Loaded %d triple rules from %s" % (len(g), self.rulesFile) # Load the AddDimensionValue rules from the graph g q = prepareQuery(""" select ?target ?dim ?value where { ?rule a harmonizer:AddDimensionValue. ?rule harmonizer:dimension ?dim. ?rule harmonizer:targetDimension ?target. ?rule harmonizer:value ?value. } """, initNs={"harmonizer": self.namespaces['harmonizer']}) qres = g.query(q) for row in qres: (target, dim, value) = row rule = {'type': 'AddDimensionValue', 'dimval': (dim, value)} rules.setdefault(target, []).append(rule) # Load the SetDimension rules from the graph g q = prepareQuery(""" select ?target ?dim where { ?rule a harmonizer:SetDimension. ?rule harmonizer:dimension ?dim. ?rule harmonizer:targetDimension ?target. } """, initNs={"harmonizer": self.namespaces['harmonizer']}) qres = g.query(q) for row in qres: (target, dim) = row rule = {'type': 'SetDimension', 'dimension': dim} rules.setdefault(target, []).append(rule) # Load the IgnoreObservation rules from the graph g q = prepareQuery(""" select ?target where { ?rule a harmonizer:IgnoreObservation. ?rule harmonizer:targetDimension ?target. } """, initNs={"harmonizer": self.namespaces['harmonizer']}) qres = g.query(q) for row in qres: target = row[Variable('target')] rule = { 'type': 'IgnoreObservation', } rules.setdefault(target, []).append(rule) return rules
def get_pol(self): """ Prepares data for ADMS_POLLUTANT_DETAILS section of the APL. Separates particles to PM10 and PM2.5 categories. Stores emission rates in em_rates class member variable for every examined source for later use. @see: get_src_data() @return: list of Pol objects. """ pols = [] pol_data = [] diam_dens = set() pol_names = {} q1 = prepareQuery(QueryStrings.SPARQL_DIAMETER_DENSITY_MASSFRACTION) q2 = prepareQuery(QueryStrings.SPARQL_MASSRATE) i = 0 k = 0 for src in self.topnode: iri = self.connectChimneyDB(src) qb = self.query(q1) massrate = self.query(q2).__iter__().__next__()[ Constants.KEY_MASS_RATE].toPython() for row in qb: dd = (row[Constants.KEY_DIAMETER].toPython(), row[Constants.KEY_DENSITY].toPython()) pol_data.append({ Constants.KEY_DIAMETER + Constants.KEY_DENSITY: dd, Constants.KEY_MASS_FLOW: float(row[Constants.KEY_MASS_FRACTION]) * massrate, Constants.KEY_SRC: iri }) diam_dens.add(dd) self.em_rates[iri] = {} for diam, dens in diam_dens: name = None if diam <= 0.00001: name = Constants.POL_PM10 + '-' + str(i) i = i + 1 if diam <= 0.0000025: name = Constants.POL_PM25 + '-' + str(k) k = k + 1 if name != None: pol_names[(diam, dens)] = name pols.append(admsPol(name, 1, [diam], [dens], [1.0e+0])) self.pollutants.append(name) for pd in pol_data: pol_key = pd[Constants.KEY_DIAMETER + Constants.KEY_DENSITY] if pol_key in pol_names: self.em_rates[pd[Constants.KEY_SRC]][pol_names[pol_key]] = pd[ Constants.KEY_MASS_FLOW] return pols
def localPropertyTypeExtractor(self): query_str = "SELECT DISTINCT ?p WHERE { ?s ?p ?o . } " query_object = prepareQuery(query_str)#, initNs={CMR_QA.NAMESPACE_PREFIX : CMR_QA.BASE_URI}) predicates = self.localrdfgraph.query(query_object) print("Using %s local predicates" % (len(predicates))) for p in predicates: #print(p) #continue prop = str(p[0]) #print(prop) if not prop.startswith("http://dbpedia.org/"): #we ignore other type of properties. Focus on dbpedia ones. #Others will be trreates as annotation (rdfs:label, foaf:name) or specially (rdf:type) continue query_str = "SELECT ?value WHERE { ?s <" + prop + "> ?value . } limit 100" #print(query_str) #continue #print("lalala") query_object = prepareQuery(query_str)#, initNs={CMR_QA.NAMESPACE_PREFIX : CMR_QA.BASE_URI}) values = self.localrdfgraph.query(query_object) n_values = len(values) n_uris = 0 for v in values: #print(v[0]) if str(v[0]).startswith("http"): n_uris+=1 if n_values==1: isObjectProperty = (n_uris == n_values) else: isObjectProperty = (n_uris > (n_values/2)) #print("New: " + prop) if isObjectProperty: #self.rdfgraph.add( (URIRef(prop), RDF.type, URIRef(OWL.OWLOBJECTPROPERTY)) ) self.propertyType[prop]=True else: #self.rdfgraph.add( (URIRef(prop), RDF.type, URIRef(OWL.OWLDATAPROPERTY)) ) self.propertyType[prop]=False
def find_filenames_for_tags(self, tags, flag_only_existing=False): dinst_set = self.find_datainstances_for_tags(tags) print(dinst_set) g = Graph() for dinst in dinst_set: g.parse(self.client.target('observation/data-about?urn=' + dinst)) filepaths = {} for dinst in dinst_set: queryDataInstance = prepareQuery(""" SELECT DISTINCT ?filepath ?dataobject WHERE { ?dataInstance fff:dataObject ?dataobject . ?dataInstance fff:dataLocation ?dataLocation . ?dataLocation fff:filePath ?filepath . }""", initNs=ns) queryDataObject = prepareQuery( """ SELECT ?tag WHERE { ?anno oa:hasTarget ?dataobject . ?anno oa:hasBody ?body . ?body cnt:chars ?tag . }""", initNs=ns) queryDataInstance dinst_uri = URIRef(dinst) filepath = "" tags = set() for row in g.query(queryDataInstance, initBindings=dict(dataInstance=dinst_uri)): try: row['filepath'] fp = row['filepath'].value filepath = fp print fp except KeyError: pass try: dataobject = "" + row['dataobject'] g.parse(self.client.target("observation/data-about?urn=" + dataobject)) # print len(g) for row2 in g.query(queryDataObject, initBindings=dict(dataobject=dataobject)): try: row2['tag'] tag = row2['tag'].value tags.add(tag) except KeyError: pass except KeyError: pass # if (flag_only_existing and os.path.exists(filepath)): # filepaths[filepath] = tags # else: filepaths[filepath] = tags return filepaths
def add_prepared_query(self, name, query, initNs=None): self.log.debug("adding prepared query with name %s", name) pq = lambda x,y: prepareQuery(x, initNs=y) if initNs is None: pq = lambda x,y: prepareQuery(x) prepared_query = pq(query, initNs) self.prepared_queries[name] = (query, prepared_query) self.prepared_query_to_str[prepared_query] = query return self.prepared_queries[name][-1]
def getQ10(): q10 = prepareQuery(''' SELECT ?name ?link (SUM(?amount) as ?totalAmount) (COUNT(?order) as ?orderCount) WHERE { ?company s:name ?name. ?order s:seller ?company. OPTIONAL { ?company o:sameAs ?link. } ?order ex:hasOrderAmount ?amount. } GROUP BY ?company ORDER BY DESC(?orderCount) ''', initNs={ "s": s, "ex": ex, "o": o }) output = g.query(q10) df = pd.DataFrame( output, columns=["Company", "Link", "Total Amount", "Number of contracts"]) print() #notFinisehd q10_1 = prepareQuery(''' SELECT ?name (COUNT(?order) as ?orderCount) WHERE { ?company s:name ?name. ?order s:seller ?company. ?order ex:hasProductQuantityPending ?quantity. FILTER(?quantity > 0). } GROUP BY ?company ORDER BY DESC(?orderCount) ''', initNs={ "s": s, "ex": ex, "o": o }) output2 = g.query(q10_1) df2 = pd.DataFrame(output2, columns=["Company", "Number of contracts pending"]) df_final = pd.merge(left=df, right=df2, on="Company") return df_final
def getStreamIDs(self, type): if type == 'temperature': stream_q = \ ''' SELECT DISTINCT ?sensor_uuid WHERE { ?room rdf:type/brick:subClassOf* brick:Room . ?sensor rdf:type/brick:subClassOf* brick:Temperature_Sensor . ?sensor brick:pointOf ?room . ?sensor brick:label ?sensor_uuid . } ''' prep_q = prepareQuery(stream_q, initNs={"rdf": RDF, "brick": BRICK}) return self.pprint(self.query(prep_q)) elif type == 'co2': stream_q = \ ''' SELECT DISTINCT ?sensor_uuid WHERE { ?room rdf:type/brick:subClassOf* brick:Room . ?sensor rdf:type/brick:subClassOf* brick:CO2_Sensor . ?sensor brick:pointOf ?room . ?sensor brick:label ?sensor_uuid . } ''' prep_q = prepareQuery(stream_q, initNs={"rdf": RDF, "brick": BRICK}) return self.pprint(self.query(prep_q)) elif type == 'humidity': stream_q = \ ''' SELECT DISTINCT ?sensor_uuid WHERE { ?room rdf:type/brick:subClassOf* brick:Room . ?sensor rdf:type/brick:subClassOf* brick:Humidity . ?sensor brick:pointOf ?room . ?sensor brick:label ?sensor_uuid . } ''' prep_q = prepareQuery(stream_q, initNs={"rdf": RDF, "brick": BRICK}) return self.pprint(self.query(prep_q)) else: return "No such type"
def __init__(self, callback, sparql_query): """Creates a SPARQL filter for RDF triples.""" if sparql_query.strip()[:3].lower() != 'ask': raise ZtreamyException('Only ASK queries are allowed ' 'in SPARQLFilter') super(SPARQLFilter, self).__init__(callback) self.query = prepareQuery(sparql_query)
def get(self, concept, predicate, obj): sub = domain + concept pre = domain + predicate predUri = URIRef(pre) subUri = URIRef(sub) relations = [] query = prepareQuery('SELECT ?a ?b ?c WHERE{?a ?b ?c}') if predicate == "all": results = g.query(query, initBindings={'a': subUri}) else: results = g.query(query, initBindings={'a': subUri, 'b': predUri}) if obj == 'no': for row in results: relation = OrderedDict() predicate = row.b relation['predicate'] = predicate relations.append(relation) return {'relations': marshal(relations, relation_fields)} else: for row in results: relation = OrderedDict() predicate = row.b obj = row.c relation['predicate'] = predicate relation['object'] = obj relations.append(relation) return {'relations': marshal(relations, relation_object_fields)}
def getTitle(self, expression): # Prepare query query = sparql.prepareQuery(""" SELECT ?title WHERE { { ?annotation ao:type "hasTitle" . ?annotation oa:hasBody ?body . ?body rdf:subject ?expression . ?body rdf:object ?title . } UNION { ?annotation ao:type "hasShortTitle" . ?annotation oa:hasBody ?body . ?body rdf:subject ?expression . ?body rdf:object ?title . } }""", initNs = { 'oa': OA, 'ao': AO, 'rdf': RDF }) # Fire query results = self.store.query(query, initBindings={'expression': URIRef(expression)}) if results: return results[0].title else: item = self.getItem(expression) if item: h1 = self.getHeader() if h1: return h1 return expression
def getAuthorInfo(self, author): # Prepare query query = sparql.prepareQuery(""" SELECT ?name ?email WHERE { ?id foaf:name ?name . OPTIONAL { ?id schema:email ?email } . ?annotation ao:type "hasAuthor" . ?annotation oa:hasBody ?body . ?body rdf:object ?id . }""", initNs = { 'aop': AOP, 'oa': OA, 'rdf': RDF, 'dcterms': DCTERMS }) # Fire the query results = self.store.query(query, initBindings={'author': URIRef(author)}) # Build the json theJSON = [] for tag in results: article = {} article['expression'] = tag.expression article['item'] = tag.item theJSON.append(article) return json.dumps(theJSON, indent=4)
def getQ07_01(date): # Incidencia covid por comunidades en una fecha dada q07_1 = prepareQuery(''' SELECT ?iso ?link ?na ?np ?nh ?nu WHERE { ?CCAA ex:hasCovidStatus ?CS. ?CCAA ex:hasISOCode ?iso. ?CCAA o:sameAs ?link. ?CS ex:inDate ?date. ?CS ex:numPositiveAC ?na. ?CS ex:numPositivePCR ?np. ?CS ex:numberHospitalizations ?nh. ?CS ex:numberUCI ?nu. ?CS ex:inDate ?fecha . } ''', initNs={"s": s, "ex": ex, "o": o} ) r = g.query(q07_1, initBindings={'?date': Literal(date, datatype=s + "Date")}) df = pd.DataFrame(r, columns=['CCAA', 'Link', 'AC', 'PCR', 'Hospitalizations', 'UCI']) return df
def getQ05(): q05 = prepareQuery(''' SELECT ?date ?service (COUNT(?service) as ?ns) WHERE { ?order s:orderDate ?date. ?order s:orderedItem ?sv. ?sv rdf:type s:Service. ?sv s:name ?service. } GROUP BY ?date ?sv ORDER BY ?date DESC(?ns) ?service LIMIT 100 ''', initNs={"s": s, "ex": ex} ) output = g.query(q05) df = pd.DataFrame(output, columns=["Date", "Service", "Requested times"]) df["Date"] = df["Date"].astype(str) df['Date'] = pd.to_datetime(df['Date']) return df
def getQ06(): q06 = prepareQuery(''' SELECT ?date ?cases ?product (SUM(?quantity) as ?nq) WHERE { ?order s:orderDate ?date. ?order ex:hasProductQuantityPending ?quantity. FILTER (?quantity > 0). ?order s:orderedItem ?pr. ?pr rdf:type s:Product. ?pr s:name ?product. ?order s:customer ?ccaa. ?ccaa ex:hasCovidStatus ?covid. ?covid ex:inDate ?date. ?covid ex:numberHospitalizations ?cases. } GROUP BY ?date ?pr ORDER BY ?date DESC(?nq) ''', initNs={"s": s, "ex": ex} ) output = g.query(q06) df = pd.DataFrame(output, columns=["Date", "Number Hospitalizations", "Product", "Quantity pending"]) df["Date"] = df["Date"].astype(str) df['Date'] = pd.to_datetime(df['Date']) return df
def getQ07(): q07 = prepareQuery(''' SELECT ?iso ?link ?fecha ?na ?np ?nh ?nu WHERE { ?CCAA ex:hasCovidStatus ?CS. ?CCAA ex:hasISOCode ?iso. ?CCAA o:sameAs ?link. ?CS ex:numPositiveAC ?na. ?CS ex:numPositivePCR ?np. ?CS ex:numberHospitalizations ?nh. ?CS ex:numberUCI ?nu. ?CS ex:inDate ?fecha . } ORDER BY ASC(?fecha) ''', initNs={"s": s, "ex": ex, "o": o} ) df = pd.DataFrame(g.query(q07), columns=['CCAA', 'Link', 'Date', 'AC', 'PCR', 'Hospitalizations', 'UCI']) df["Date"] = df["Date"].astype(str) df['Date'] = pd.to_datetime(df['Date']) return df
def test_graph_prefix(): """ This is issue https://github.com/RDFLib/rdflib/issues/313 """ g1 = Graph() g1.parse(data=""" @prefix : <urn:ns1:> . :foo <p> 42. """, format="n3") g2 = Graph() g2.parse(data=""" @prefix : <urn:somethingelse:> . <urn:ns1:foo> <p> 42. """, format="n3") assert isomorphic(g1, g2) q_str = (""" PREFIX : <urn:ns1:> SELECT ?val WHERE { :foo ?p ?val } """) q_prepared = prepareQuery(q_str) expected = [(Literal(42),)] eq_(list(g1.query(q_prepared)), expected) eq_(list(g2.query(q_prepared)), expected) eq_(list(g1.query(q_str)), expected) eq_(list(g2.query(q_str)), expected)
def getQ06_1(): q06_1 = prepareQuery(''' SELECT ?date ?product (SUM(?quantity) as ?nq) WHERE { ?order s:orderDate ?date. ?order ex:hasProductQuantityPending ?quantity. FILTER (?quantity > 0). ?order s:orderedItem ?pr. ?pr rdf:type s:Product. ?pr s:name ?product. } GROUP BY ?date ?pr ORDER BY ?date DESC(?nq) ''', initNs={"s": s, "ex": ex} ) output = g.query(q06_1) df = pd.DataFrame(output, columns=["Date", "Product", "Quantity Pending"]) df["Date"] = df["Date"].astype(str) df['Date'] = pd.to_datetime(df['Date']) return df
def index(): zipcode_query = Queries.ALL_ZIPCODES.value query = Queries.ALL_CARS.value data = [] all_zipcodes = [] for _ in g.query(zipcode_query): all_zipcodes.append(_.zipcode.value) if request.method == 'POST': if request.form['search']: code = Literal(request.form['search']) query = check_filters() q = prepareQuery(query) for item in g.query(q, initBindings={'zipcode': code}): data.append(parse_data(item)) else: query = check_filters() for item in g.query(query): data.append(parse_data(item)) else: for item in g.query(query): data.append(parse_data(item)) return render_template('index.html', all_zipcodes=all_zipcodes, data=data)
async def a_sparqlQuery(self, app, loop, query: str, graphIri: str, bindings): reply = [] try: q = prepareQuery(query) graph = self.rdfService.graphByUri(graphIri) assert graph is not None if bindings: results = graph.query(q, initBindings=bindings) else: results = graph.query(q) except Exception as err: log.debug(f'SparQL query error for {query}: {err}') return QVariant([]) else: # Build the QVariant from the results for row in results: r = {} for var in results.vars: r[str(var)] = str(row[var]) reply.append(r) return QVariant(reply)
def test_graph_prefix(): """ This is issue https://github.com/RDFLib/rdflib/issues/313 """ g1 = Graph() g1.parse(data=""" @prefix : <urn:ns1:> . :foo <p> 42. """, format="n3") g2 = Graph() g2.parse(data=""" @prefix : <urn:somethingelse:> . <urn:ns1:foo> <p> 42. """, format="n3") assert isomorphic(g1, g2) q_str = (""" PREFIX : <urn:ns1:> SELECT ?val WHERE { :foo ?p ?val } """) q_prepared = prepareQuery(q_str) expected = [(Literal(42), )] eq_(list(g1.query(q_prepared)), expected) eq_(list(g2.query(q_prepared)), expected) eq_(list(g1.query(q_str)), expected) eq_(list(g2.query(q_str)), expected)
def test_dataset_description_linksets(self): res = self.client.get('/.well-known/void') self.assertEqual(res.status_code, http.client.OK) self.assertEqual(res.headers['Content-Type'], 'text/turtle') g = Graph() g.parse(format='turtle', data=res.get_data(as_text=True)) # http://dbpedia.org/void/Dataset q = sparql.prepareQuery(''' SELECT ?triples WHERE { ?linkset a void:Linkset . ?linkset void:subset <http://n2t.net/ark:/99152/p0d> . ?linkset void:subjectsTarget <http://n2t.net/ark:/99152/p0d> . ?linkset void:linkPredicate ?predicate . ?linkset void:objectsTarget ?dataset . ?linkset void:triples ?triples . } ''', initNs={'void': VOID}) dbpedia = URIRef('http://dbpedia.org/void/Dataset') triples = next(iter(g.query( q, initBindings={'dataset': dbpedia, 'predicate': DCTERMS.spatial})))['triples'].value self.assertEqual(triples, 3) worldcat = URIRef('http://purl.oclc.org/dataset/WorldCat') triples = next(iter(g.query( q, initBindings={'dataset': worldcat, 'predicate': DCTERMS.isPartOf})))['triples'].value self.assertEqual(triples, 1)
def loadFragmentDefinitions(self, folder, fileName): datasets = {} self.fragments = {} with open (fileName, 'r') as f: for line in f: line = line.strip() ws = line.split() if (len(ws) > 0): fragment = ws[0] ds = ws[1] datasets[fragment] = ds content = os.listdir(folder) self.viewsDefinition = {} for g in content: path = folder+'/'+g f = open(path) viewStr = f.read() f.close() view = prepareQuery(viewStr) t = getTriple(view) i = path.rfind('/') + 1 j = path.rfind('.') j = len(path) if j < 0 else j name = path[i:j] ds = datasets[name] self.fragments[name] = TriplePatternFragment(t, ds)
def test_construct_init_bindings(self): """ This is issue https://github.com/RDFLib/rdflib/issues/1001 """ g1 = Graph() q_str = (""" PREFIX : <urn:ns1:> CONSTRUCT { ?uri :prop1 ?val1; :prop2 ?c . } WHERE { bind(uri(concat("urn:ns1:", ?a)) as ?uri) bind(?b as ?val1) } """) q_prepared = prepareQuery(q_str) expected = [ (URIRef('urn:ns1:A'), URIRef('urn:ns1:prop1'), Literal('B')), (URIRef('urn:ns1:A'), URIRef('urn:ns1:prop2'), Literal('C')) ] results = g1.query(q_prepared, initBindings={ 'a': Literal('A'), 'b': Literal('B'), 'c': Literal('C') }) eq_(sorted(results, key=lambda x: str(x[1])), expected)
def getQ04_1(date): q04_1 = prepareQuery(''' SELECT ?product (SUM(?quantity) as ?nq) WHERE { ?order s:orderDate ?date. ?order ex:hasProductQuantity ?quantity. ?order s:orderedItem ?pr. ?pr rdf:type s:Product. ?pr s:name ?product } GROUP BY ?pr ORDER BY DESC(?nq) LIMIT 10 ''', initNs={"s": s, "ex": ex} ) output = g.query(q04_1, initBindings={'?date': Literal(date, datatype=s+"Date")}) df = pd.DataFrame(output, columns=["Product", "Quantity"]) #df["Date"] = df["Date"].astype(str) #df['Date'] = pd.to_datetime(df['Date']) return df
def getAllOfTypeDescendingTemporal(self, c_type: str) -> Query: """Function to get all instances of a given type in descending temporal order (based on `hasDate` property). Arguments: c_type {str} -- Target type (eg: 'Degree', 'WorkExperience', etc.). Returns: Query -- Prepared query. """ logging.debug('Preparing query to extract instances of type {0} in\ descending temporal order'.format(c_type)) return prepareQuery(""" SELECT DISTINCT ?s WHERE {{ {{ ?s rdf:type precis:{c_type} . }} OPTIONAL {{ ?s precis:hasDate ?date . }} }} ORDER BY DESC(?date) """.format(c_type=c_type), initNs=self.initN)
def getAwards(self, target_iri: str) -> Query: """SPARQL query to get the name and affiliated organizations issuing awards from given a `target_iri` 'relatedTo' entities. Arguments: target_iri {str} -- Target instance IRI. Returns: Query -- Prepared query. """ logging.debug('Preparing awards query for individual {0}'.\ format(target_iri)) return prepareQuery(""" SELECT ?award_name ?org_name WHERE {{ <{target_iri}> precis:relatedTo ?org . ?org precis:hasName ?org_name . ?award precis:affiliatedWith ?org . ?award rdf:type precis:Award . ?award precis:hasName ?award_name . }} """.format(target_iri=target_iri), initNs=self.initN)
def getOrderedDescriptionText( self, target_iri: str, max_priority: int = int(1e10)) -> Query: """SPARQL query to get description text for a given instance IRI, as ordered by the 'hasPriority' attribute (in ascending order, so priority 0 > 1 > 2 > ...). Arguments: target_iri {str} -- Target instance IRI. Keyword Arguments: max_priority {int} -- Maximum description priority (default: {int(1e10)}). Returns: Query -- Prepared query. """ logging.debug('Preparing description query for individual {0}'.format( target_iri)) return prepareQuery(""" SELECT DISTINCT ?text WHERE {{ <{target_iri}> precis:hasDescription ?descr . ?descr precis:hasPriority ?priority . FILTER (?priority < {max_priority}) . ?descr precis:hasText ?text . }} ORDER BY ?priority """.format(target_iri=target_iri, max_priority=max_priority), initNs=self.initN)
def sparqlCommonObj(usr1, usr2, tbl1, tbl2, database_path=DEFAULT_DATABASE_PATH): loadstar = Namespace('https://loadstar.com/ontology#') myg = open_context(usr1, tbl1, database_path) for item in myg: if item != None: subj1 = item[0] break myg2 = open_context(usr2, tbl2, database_path) for item in myg2: if item != None: subj2 = item[0] break mydum = myg + myg2 myquer = prepareQuery("""SELECT ?object ?a ?b WHERE {?subject1 ?a ?object . ?subject2 ?b ?object .}""", initNs={"load": loadstar}) for row in mydum.query(myquer, initBindings={ "subject1": subj1, "subject2": subj2 }): print row
def synsets(self, word): synsets = [] #word = 'cachorro' stringSparqlQuery = """ select distinct ?word ?sspt ?lexico where { ?word <https://w3id.org/own-pt/wn30/schema/lexicalForm> \"""" + word + """\"@pt . ?sspt <https://w3id.org/own-pt/wn30/schema/containsWordSense>/<https://w3id.org/own-pt/wn30/schema/word> ?word . ?ssen <http://www.w3.org/2002/07/owl#sameAs> ?sspt . ?ssen <https://w3id.org/own-pt/wn30/schema/lexicographerFile> ?lexico . } """ queryString = sparql.prepareQuery(stringSparqlQuery) syns = graph.query(queryString) for s in syns: synsets.append(str(s[0])) synsets.append(str(s[1])) synsets.append(str(s[2])) return synsets #return array of synsets
def validateSparql(self, text): ''' validates a sparql query and stores it in a separate file ''' text = str(text) try: return sparql.prepareQuery(text, initNs=self.namespaces) except Exception as error: raise MalformedSparqlQueryError([str(type(error)), str(error), text])
def testQuery(): g = makeGraph() q = sparql.prepareQuery('SELECT ?o WHERE {?s schema:name ?o} LIMIT 20', initNs = { "schema": "http://schema.org/" }) myquery = g.query(q) return myquery
def test_dataset_description(self): res1 = self.client.get( '/', headers={'Accept': 'text/html'}, buffered=True) self.assertEqual(res1.status_code, http.client.SEE_OTHER) self.assertEqual(urlparse(res1.headers['Location']).path, '/index.json.html') res2 = self.client.get('/', headers={'Accept': 'text/turtle'}) self.assertEqual(res2.status_code, http.client.OK) self.assertEqual(res2.headers['Content-Type'], 'text/turtle') res3 = self.client.get('/.well-known/void') self.assertEqual(res3.status_code, http.client.OK) self.assertEqual(res3.headers['Content-Type'], 'text/turtle') self.assertEqual(res3.get_data(as_text=True), res2.get_data(as_text=True)) res4 = self.client.get('/.wellknown/void') self.assertEqual(res4.status_code, http.client.OK) self.assertEqual(res4.headers['Content-Type'], 'text/turtle') self.assertEqual(res4.get_data(as_text=True), res3.get_data(as_text=True)) res5 = self.client.get('/.well-known/void.ttl') self.assertEqual(res5.status_code, http.client.OK) self.assertEqual(res5.headers['Content-Type'], 'text/turtle') self.assertEqual(res5.get_data(as_text=True), res4.get_data(as_text=True)) res6 = self.client.get('/.well-known/void.ttl.html') self.assertEqual(res6.status_code, http.client.OK) self.assertEqual(res6.headers['Content-Type'], 'text/html') g = Graph() g.parse(format='turtle', data=res2.get_data(as_text=True)) self.assertIn( (PERIODO['p0d'], DCTERMS.provenance, HOST['h#changes']), g) desc = g.value(predicate=RDF.type, object=VOID.DatasetDescription) self.assertEqual( desc.n3(), '<http://n2t.net/ark:/99152/p0>') title = g.value(subject=desc, predicate=DCTERMS.title) self.assertEqual( title.n3(), '"Description of the PeriodO Period Gazetteer"@en') q = sparql.prepareQuery(''' SELECT ?count WHERE { ?d void:classPartition ?p . ?p void:class ?class . ?p void:entities ?count . } ''', initNs={'void': VOID, 'skos': SKOS}) concept_count = next(iter(g.query( q, initBindings={'class': SKOS.Concept})))['count'].value self.assertEqual(concept_count, 3) scheme_count = next(iter(g.query( q, initBindings={'class': SKOS.ConceptScheme})))['count'].value self.assertEqual(scheme_count, 1)
def is_inside(textplace,graph): """Returns true if the place defined as text is disambiguated in graph returns false otherwise""" askquery = prepareQuery( """ASK {?iri schema:jobLocation ?place . ?iri edsa:Location ?placeiri}""", initNs = {"schema" : ns.schema , "edsa" : ns.edsa}) return bool(graph.query(askquery, initBindings={"place" : Literal(textplace)}))
def timequery(action): #time query function q = prepareQuery( 'SELECT ?time WHERE { ?action alfred:takes ?time .}', initNs = { "alfred": "http://www.semanticweb.org/suv/ontologies/2015/3/alfredowl/"}) this = parse + action for row in g.query(q, initBindings={'action': this}): return int(row.time)
def moodquery(action): #mood query function q = prepareQuery( 'SELECT ?name WHERE { ?action alfred:results ?mood . ?mood alfred:named ?name .}', initNs = { "alfred": "http://www.semanticweb.org/suv/ontologies/2015/3/alfredowl/"}) this = parse + action for row in g.query(q, initBindings={'action': this}): return str(row.name)
def taskquery(action, tasks=[]): #tasks query function q = prepareQuery( 'SELECT ?name WHERE { ?action alfred:involves ?task . ?task alfred:named ?name .}', initNs = { "alfred": "http://www.semanticweb.org/suv/ontologies/2015/3/alfredowl/"}) this = parse + action for row in g.query(q, initBindings={'action': this}): print row.name
def actionquery(mood, actions=[]): #actions query function q = prepareQuery( 'SELECT ?name WHERE { ?mood alfred:possible ?action . ?action alfred:named ?name .}', initNs = { "alfred": "http://www.semanticweb.org/suv/ontologies/2015/3/alfredowl/"}) this = parse + mood for row in g.query(q, initBindings={'mood': this}): actions.append(row.name) return actions
def test_sparql_bnodelist(): """ syntax tests for a few corner-cases not touched by the official tests. """ prepareQuery('select * where { ?s ?p ( [] ) . }') prepareQuery('select * where { ?s ?p ( [ ?p2 ?o2 ] ) . }') prepareQuery('select * where { ?s ?p ( [ ?p2 ?o2 ] [] ) . }') prepareQuery('select * where { ?s ?p ( [] [ ?p2 ?o2 ] [] ) . }')
def build_possible_queries(): queries = [] query = prepareQuery("""SELECT ?name WHERE { ?r owl:Class ro:Recipe . ?r ro:name ?name . } ORDER BY ?name""", initNs={'ro': RO, 'owl': OWL}) queries.append(Query('List all imported recipes', query)) query = prepareQuery("""SELECT ?name WHERE { ?i owl:Class ro:Food . ?i ro:food_name ?name . } ORDER BY ?name""", initNs={'ro': RO, 'owl': OWL}) queries.append(Query('List all imported ingredients', query)) query = prepareQuery("""SELECT ?name WHERE { ?recipe ro:name ?name . ?recipe ro:ingredient ?ingredient . ?ingredient ro:food ?food . ?food ro:food_name ?param . } ORDER BY ?name""", initNs={'ro': RO, 'owl': OWL}) queries.append(Query('List all recipes containing desired ingredient', query, 'Ingredient name: ')) return queries
def delete(self): query = prepareQuery( 'delete { ?s ?p ?o } WHERE { ?person foaf:knows ?s .}', initNs={"foaf": FOAF}) tim = rdflib.URIRef("http://www.w3.org/People/Berners-Lee/card#i") for row in self._graph.query(query, initBindings={'person': tim}): print row return query
def get_iri(textplace,graph): """ Returns the IRI of an existent place in the jobs base """ getquery = prepareQuery(""" SELECT DISTINCT ?placeiri WHERE {?iri schema:jobLocation ?place . ?iri edsa:Location ?placeiri} """, initNs = {"schema" : ns.schema , "edsa" : ns.edsa}) res = graph.query(getquery, initBindings = {"place" : Literal(textplace)}) for row in res: return row["placeiri"]
def queryQualityDataIds(self): query_str = """SELECT DISTINCT ?uri ?comment WHERE { ?uri """ + CMR_QA.NAMESPACE_PREFIX + """:"""+ CMR_QA.hasQualityComment_Name+ """ ?comment . ?uri rdf:type """ + CMR_QA.NAMESPACE_PREFIX + """:""" + CMR_QA.Cine_MRI_Quality_Data_Name + """ . }""" query_object = prepareQuery(query_str, initNs={CMR_QA.NAMESPACE_PREFIX : CMR_QA.BASE_URI}) #print(query2) self.qualityDataURIComments = self.rdfgraph.query(query_object)
def testPrepare(): q = prepareQuery('SELECT ?target WHERE { }') r = list(g.query(q)) e = [] assert r == e, 'prepare: %r != %r'%(r,e) r = list(g.query(q, initBindings={'target': Literal('example')})) e = [(Literal('example'),)] assert r == e, 'prepare: %r != %r'%(r, e) r = list(g.query(q)) e = [] assert r == e, 'prepare: %r != %r'%(r,e)
def test_issue(): query = sparql.prepareQuery( """ SELECT DISTINCT ?property ?parent WHERE{ ?property a owl:DeprecatedProperty . ?property dc:relation ?relation . ?property rdfs:subPropertyOf ?parent . ?property rdfs:label | skos:altLabel ?label . } """, initNs={"rdfs": RDFS, "owl": OWL, "dc": DC, "skos": SKOS})
def testPrepare(): q = prepareQuery('SELECT ?target WHERE { }') r = list(g.query(q)) e = [(None,)] # TODO: https://github.com/RDFLib/rdflib/issues/554 assert r == e, 'prepare: %r != %r'%(r,e) r = list(g.query(q, initBindings={'target': Literal('example')})) e = [(Literal('example'),)] assert r == e, 'prepare: %r != %r'%(r, e) r = list(g.query(q)) e = [(None,)] # TODO: https://github.com/RDFLib/rdflib/issues/554 assert r == e, 'prepare: %r != %r'%(r,e)
def test_find_location_simple(self): location = "Southampton, UK" tup = gn.find_location(location) self.assertTrue(isinstance(tup[0], rdflib.URIRef)) self.assertTrue(isinstance(tup[1], rdflib.Graph)) # At least we brought the correct name askplace = prepareQuery( """ ASK { ?iri gn:name ?name } """, initNs={"gn": ns.geonames}, ) self.assertTrue(tup[1].query(askplace, initBindings={"name": rdflib.Literal("Southampton")}))
def writeOutBGP(f, query): ''' writes existing BGP statistics (# triples and # of vars) in the query to given file''' try: sq = sparql.prepareQuery(query, initNs=ns.ns) except Exception as error: log.warn(str([str(type(error)), str(error), sparql]) + '\n') return # we only look into select queries with a projection if sq.algebra.name == "SelectQuery": if sq.algebra.p.name == 'Project': if sq.algebra.p.p.name == 'BGP': f.write(str(len(sq.algebra.p.p.triples)) + ', ' + str(len(sq.algebra.p.p._vars)) + '\n') # will hopefully reduce memory usage del sq
def home(request): query = prepareQuery('''SELECT ?object WHERE { ?language dbpedia:influenced ?object. }''', initNs= { "dbpedia": '<http://dbpedia.org/ontology/>' }) results = Graph() fresh = Graph() results.parse("http://live.dbpedia.org/data/Programming_languages.ntriples", format="nt") python = rdflib.URIRef("<http://dbpedia.org/resource/Python_(programming_language)") results = results.query(query, initBindings={ 'language': python }) #for triple in results.query(query, initBindings={ 'language': python }): # fresh.append(triple.uri) return render(request, "semantic/index.html", {'results': results})
def get_query(self, q_str=None, q_parser=None): """ :param q_parser: one of possible query parsers. :param q_str: doc(author:D. N. Adams) section(name:Stimulus) prop(name:Contrast, value:20, unit:%) :return rdflib prepare query. """ # TODO find out if the validation for the q_str is important # We can possibly warn about not used parts and print the parsed dictionary if not self.q_dict: if not q_str: raise AttributeError("Please fulfill q_str param (query string)") elif not q_parser: raise AttributeError("Please fulfill q_parser param (query parser)") self.q_dict = q_parser.parse_query_string(q_str) self._prepare_query() return prepareQuery(self.query, initNs={"odml": Namespace("https://g-node.org/projects/odml-rdf#"), "rdf": RDF})
def getQualityComments(self): #query = """SELECT DISTINCT ?uri ?comment # WHERE { # ?uri <""" + CMR_QA.hasQualityComment + """> ?comment . # ?uri rdf:type <""" + CMR_QA.Cine_MRI_Quality_Data + """> . # }""" query_str = """SELECT DISTINCT ?uri ?comment WHERE { ?uri """ + CMR_QA.NAMESPACE_PREFIX + """:"""+ CMR_QA.hasQualityComment_Name+ """ ?comment . ?uri rdf:type """ + CMR_QA.NAMESPACE_PREFIX + """:""" + CMR_QA.Cine_MRI_Quality_Data_Name + """ . }""" query_object = prepareQuery(query_str, initNs={CMR_QA.NAMESPACE_PREFIX : CMR_QA.BASE_URI}) #print(query2) return self.rdfgraph.query(query_object)
def test_find_location_utf8(self): location = "Αθήνα, GR" tup = gn.find_location(location) self.assertTrue(isinstance(tup[0], rdflib.URIRef)) self.assertTrue(isinstance(tup[1], rdflib.Graph)) # At least we brought the correct name and country askplace = prepareQuery( """ ASK { ?iri gn:name ?name } """, initNs={"gn": ns.geonames}, ) self.assertTrue(tup[1].query(askplace, initBindings={"name": rdflib.Literal("Athens")})) def test_find_location_unknown(self): location = "Unknownlandidfgdfg" with assertRaises(gn.NotFoundException): tup = gn.find_location(location)