def filtraGT(key, value, greaterthan, equal): if greaterthan: operand = ">" else: operand = "<" if equal: operand += "=" xml = etree.parse(farmacie) if key == "lat" or key == "long": rss = xml.xpath("/locations/location[@" + key + operand + "'" + value + "']") metad = xml.xpath("/locations/metadata") output = etree.Element("locations") for met in metad: output.append(met) for loc in rss: output.append(loc) print ("Content-type: application/xml; charset=UTF-8\n") print ( etree.tostring( output, pretty_print=True, xml_declaration=True, doctype='<!DOCTYPE locations SYSTEM "http://vitali.web.cs.unibo.it/twiki/pub/TechWeb12/DTDs/locations.dtd">', encoding=uencoding, ) ) else: error.errhttp("406") return
def filtraCONTAINS(key,value,ncontains): if(ncontains): op="id" else: op="not" if(key != "id" and key != "name" and key != "category" and key != "address" and key != "opening" and key != "closing"): error.errhttp("406") return if(ncontains and key == "address"): error.errhttp("406") return data=open(supermarket, "r").read() orig=json.loads(data) jdata=copy.deepcopy(orig) print("Content-type: application/json; charset=UTF-8\n") for item, subdict in orig.iteritems(): for subkey,val in subdict.iteritems(): if(item!="metadata"): if((key=="category")): for i in range(0, len(val["category"])): if(ops[op](value in val["category"][i].lower())): del jdata["locations"][subkey] elif((key=="id") and ops[op](value in subkey.lower())): del jdata["locations"][subkey] elif key in val and (ops[op](value in (val[key].lower()))): del jdata["locations"][subkey] print json.dumps(jdata, ensure_ascii=False, encoding=uencoding ,sort_keys=True, indent=4).encode(uencoding)
def filtraGT(key,value,greaterthan, equal): turtle=rdflib.Graph() src=turtle.parse(poste, format='n3') result=rdflib.Graph() result.bind('', rdflib.URIRef('http://www.essepuntato.it/resource/', False)) result.bind('vcard', rdflib.URIRef('http://www.w3.org/2006/vcard/ns#')) result.bind('cs', rdflib.URIRef('http://cs.unibo.it/ontology/')) result.bind('dcterms', rdflib.URIRef('http://purl.org/dc/terms/')) result.bind('xsd', rdflib.URIRef('http://www.w3.org/2001/XMLSchema#')) result.bind('this', rdflib.URIRef('http://vitali.web.cs.unibo.it/twiki/pub/TechWeb12/DataSource2/posteBO2011.ttl#')) if greaterthan: op=">" else: op="<" if equal: op+="=" oquery=""" PREFIX vcard: <http://www.w3.org/2006/vcard/ns#> PREFIX cs: <http://cs.unibo.it/ontology/> PREFIX : <http://www.essepuntato.it/resource/> PREFIX dcterms: <http://purl.org/dc/terms/> PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> PREFIX this: <http://vitali.web.cs.unibo.it/twiki/pub/TechWeb12/DataSource2/posteBO2011.ttl#> CONSTRUCT {?s ?p ?o} WHERE { ?s ?p ?o. ?s dcterms:creator "Working Group LTW 2011/2012" } """ query_meta=src.query(oquery) for element in query_meta: result.add(element) if(key=="lat" or key=="long"): squery=""" PREFIX vcard: <http://www.w3.org/2006/vcard/ns#> PREFIX cs: <http://cs.unibo.it/ontology/> PREFIX : <http://www.essepuntato.it/resource/> PREFIX dcterms: <http://purl.org/dc/terms/> PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> PREFIX this: <http://vitali.web.cs.unibo.it/twiki/pub/TechWeb12/DataSource2/posteBO2011.ttl#> CONSTRUCT {?s ?p ?o} WHERE { ?s ?p ?o; """+campi[key]+""" ?name. FILTER (?name """+op+""" """+value+""") } """ else: error.errhttp("406") return print("Content-type: text/turtle; charset=UTF-8\n") query_result=src.query(squery) for element in query_result: result.add(element) print result.serialize(format="n3")
def filtraEQ(key, value, nequal): if nequal: operand = "!=" else: operand = "=" xml = etree.parse(farmacie) if key == "id" or key == "lat" or key == "long": rss = xml.xpath( "/locations/location[translate(@" + key + ",'" + maiusstr + "','" + minusstr + "')" + operand + "'" + value + "']" ) elif (key == "name") or (key == "category"): rss = xml.xpath( "/locations/location[translate(" + key + ',"' + maiusstr + '", "' + minusstr + '")' + operand + '"' + value + '"]' ) else: error.errhttp("406") return print ("Content-type: application/xml; charset=UTF-8\n") metad = xml.xpath("/locations/metadata") output = etree.Element("locations") for met in metad: output.append(met) for loc in rss: output.append(loc) print ( etree.tostring( output, pretty_print=True, xml_declaration=True, doctype='<!DOCTYPE locations SYSTEM "http://vitali.web.cs.unibo.it/twiki/pub/TechWeb12/DTDs/locations.dtd">', encoding=uencoding, ) ) return
def formatresult(mimetype, ellist,meta): if ("application/xml" in mimetype or "*/*" in mimetype): locationtoxml(ellist,meta) elif("application/json" in mimetype): locationtojson(ellist,meta) elif("text/csv" in mimetype): locationtocsv(ellist,meta) elif("text/turtle" in mimetype): locationtoturtle(ellist,meta) else: error.errhttp("406")
def main(): fs = cgi.FieldStorage() chiave=fs.getvalue("key") confronto=fs.getvalue("comp") valore=fs.getvalue("value") if(not chiave) and (not confronto) and (not valore) and not error.testenviron(os.environ, mimexml): xml=os.path.splitext(supermarket)[0]+".xml" file=open(xml, "r") print("Content-type: application/xml; charset=UTF-8\n") content=file.read() print content return """ questa parte serve per controllare che json in questo caso sia accettato dal descrittore. tuttavia se la abilitiamo adesso, siccome il browser non accetta json non va... la lascio commentata finche non iniziamo con i descrittori """ if(error.testenviron(os.environ, mimejson)): error.errhttp("406") return if(not chiave) and (not confronto) and (not valore): xml=open(supermarket, "r") print("Content-type: application/json; charset=UTF-8\n") content=xml.read() print content else: if(not chiave) or (not confronto) or (not valore): error.errhttp("406") else: chiave=chiave.lower().decode(uencoding) confronto=confronto.lower().decode(uencoding) valore=valore.lower().decode(uencoding) if(confronto=="eq"): filtraEQ(chiave, valore, False) elif(confronto=="neq"): filtraEQ(chiave,valore,True) elif(confronto=="contains"): filtraCONTAINS(chiave,valore, False) elif(confronto=="ncontains"): filtraCONTAINS(chiave,valore,True) elif(confronto=="gt"): filtraGT(chiave,valore,True, False) elif(confronto=="lt"): filtraGT(chiave,valore,False,False) elif(confronto=="ge"): filtraGT(chiave,valore,True,True) elif(confronto=="le"): filtraGT(chiave,valore,False,True) else: error.errcode("406")
def main(): fs = cgi.FieldStorage() aggr=fs.getvalue("aggr").lower() name=fs.getvalue("name").lower() if (not name or not aggr): error.errhttp("406"); else: urlaggr="http://ltw1219.web.cs.unibo.it/descrizione/"+aggr+"/params/"+name req=urllib2.Request(url=urlaggr) response = urllib2.urlopen(req) restype= response.info().gettype() resource=response.read() response.close() print ("Content-type: text/html; charset=UTF-8\n") print resource
def filtraCONTAINS(key, value, ncontains): if ncontains: operator = "not (contains" else: operator = "(contains" xml = etree.parse(farmacie) if (key == "id") and (not ncontains): rss = xml.xpath( "/locations/location[contains(translate(@id,'" + maiusstr + "','" + minusstr + "'), '" + value + "')]" ) elif (key == "name") or (key == "category") or (key == "opening") or (key == "closing"): rss = xml.xpath( "/locations/location[" + operator + "(translate(" + key + ",'" + maiusstr + "','" + minusstr + "'),'" + value + "'))]" ) elif (key == "address") and (not ncontains): rss = xml.xpath( "/locations/location[contains(translate(address,'" + maiusstr + "','" + minusstr + "'),'" + value + "')]" ) else: error.errhttp("406") return metad = xml.xpath("/locations/metadata") output = etree.Element("locations") print ("Content-type: application/xml; charset=UTF-8\n") for met in metad: output.append(met) for loc in rss: output.append(loc) print ( etree.tostring( output, pretty_print=True, xml_declaration=True, doctype='<!DOCTYPE locations SYSTEM "http://vitali.web.cs.unibo.it/twiki/pub/TechWeb12/DTDs/locations.dtd">', encoding=uencoding, ) ) return
def main(): fs = cgi.FieldStorage() aggr=fs.getvalue("aggr").lower() name=fs.getvalue("name").lower() if (not name or not aggr): error.errhttp("406"); else: urldescr="http://ltw1219.web.cs.unibo.it/trova-per-nome/"+aggr+"/params/"+name urldescr=urllib.quote(urldescr, safe="%/:=&?~#+!$,;'@()*[]") req=urllib2.Request(url=urldescr) req.add_header('Accept', 'application/xml') response = urllib2.urlopen(req) resource=response.read() response.close() print ("Content-type: application/xml; charset=UTF-8\n") print resource
def main(): fs = cgi.FieldStorage() multiint=fs.getvalue("multi") intsemplice=fs.getvalue("simple") if (not multiint or not intsemplice): error.errhttp("406"); else: urldescr="http://ltw1219.web.cs.unibo.it/aperto/params/"+multiint+"/"+intsemplice urldescr=urllib.quote(urldescr, safe="%/:=&?~#+!$,;'@()*[]") req=urllib2.Request(url=urldescr) req.add_header('Accept', '*/*, application/xml, text/turtle, text/csv, application/json') response = urllib2.urlopen(req) restype= response.info().gettype() resource=response.read() response.close() print ("Content-type: text/plain; charset=UTF-8\n") print resource
def filtraGT(key,value,greaterthan, equal): if(greaterthan): op=">" else: op="<" if(not equal): op+="=" if(key!="lat" and key!="long"): error.errhttp("406") return data=open(supermarket, "r").read() orig=json.loads(data) jdata=copy.deepcopy(orig) print("Content-type: application/json; charset=UTF-8\n") for item, subdict in orig.iteritems(): for subkey,val in subdict.iteritems(): if((key in val) and (ops[op](float(value), float(val[key])))): del jdata["locations"][subkey] print json.dumps(jdata, ensure_ascii=False, encoding=uencoding ,sort_keys=True, indent=4).encode(uencoding)
def main(): fs = cgi.FieldStorage() chiave = fs.getvalue("key") confronto = fs.getvalue("comp") valore = fs.getvalue("value") if error.testenviron(os.environ, mimexml): return if (not chiave) and (not confronto) and (not valore): xml = open(farmacie, "r") print ("Content-type: application/xml; charset=UTF-8\n") content = xml.read() print content else: if (not chiave) or (not confronto) or (not valore): error.errhttp("406") else: chiave = chiave.lower().decode(uencoding) confronto = confronto.lower().decode(uencoding) valore = valore.lower().decode(uencoding) if confronto == "eq": filtraEQ(chiave, valore, False) elif confronto == "neq": filtraEQ(chiave, valore, True) elif confronto == "contains": filtraCONTAINS(chiave, valore, False) elif confronto == "ncontains": filtraCONTAINS(chiave, valore, True) elif confronto == "gt": filtraGT(chiave, valore, True, False) elif confronto == "lt": filtraGT(chiave, valore, False, False) elif confronto == "ge": filtraGT(chiave, valore, True, True) elif confronto == "le": filtraGT(chiave, valore, False, True) else: error.errhttp("406")
def main(): fs = cgi.FieldStorage() aggr=fs.getvalue("aggr") lat=fs.getvalue("lat") longi=fs.getvalue("long") if("maxel" in fs): maxel=(int(fs.getvalue("maxel"))) else: maxel=None if ((not aggr) or (not lat) or (not longi)): error.errhttp("406") return urlaggr=getaggrurl(aggr) if(isinstance(urlaggr, ( int, long ))): error.errhttp(str(urlaggr)) return req=urllib2.Request(url=urlaggr) req.add_header('Accept', 'application/xml, text/turtle, text/csv, application/json') response = urllib2.urlopen(req) restype= response.info().gettype() resource=response.read() response.close() if(restype=="application/xml"): meta=trasforma.locationfromxml(resource,loclist) elif(restype=="text/turtle"): meta=trasforma.locationfromturtle(resource,loclist) elif(restype=="text/csv"): meta=trasforma.locationfromcsv(resource,loclist) elif(restype=="application/json"): meta=trasforma.locationfromjson(resource,loclist) else: error.errhttp("406") computedistances(loclist, lat, longi) loclist.sort(key=lambda location: location.distance) if(maxel): trasforma.formatresult(os.environ["HTTP_ACCEPT"], loclist[:maxel], meta) else: trasforma.formatresult(os.environ["HTTP_ACCEPT"], loclist, meta)
def main(): fs = cgi.FieldStorage() aggr=fs.getvalue("aggr") operator=fs.getvalue("operator") dates=fs.getvalue("dates") if ((not aggr) or (not operator) or (not dates)): error.errhttp("406") return urlaggr=getaggrurl(aggr) if(urlaggr=="404"): error.errhttp("404") return aggr=aggr.lower() operator=operator.lower() dates=dates.lower() req=urllib2.Request(url=urlaggr) req.add_header('Accept', 'application/xml, text/turtle, text/csv, application/json') response = urllib2.urlopen(req) restype= response.info().gettype() resource=response.read() response.close() if(restype=="application/xml"): meta=trasforma.locationfromxml(resource,loclist) elif(restype=="text/turtle"): meta=trasforma.locationfromturtle(resource,loclist) elif(restype=="text/csv"): meta=trasforma.locationfromcsv(resource,loclist) elif(restype=="application/json"): meta=trasforma.locationfromjson(resource,loclist) else: error.errhttp("406") return finallist=getopened(dates, operator,loclist) if(isinstance(finallist, ( int, long ))): error.errcode(str(finallist)) return trasforma.formatresult(os.environ["HTTP_ACCEPT"], finallist, meta)
def filtraCONTAINS(key,value,ncontains): turtle=rdflib.Graph() src=turtle.parse(poste, format='n3') result=rdflib.Graph() result.bind('', rdflib.URIRef('http://www.essepuntato.it/resource/', False)) result.bind('vcard', rdflib.URIRef('http://www.w3.org/2006/vcard/ns#')) result.bind('cs', rdflib.URIRef('http://cs.unibo.it/ontology/')) result.bind('dcterms', rdflib.URIRef('http://purl.org/dc/terms/')) result.bind('xsd', rdflib.URIRef('http://www.w3.org/2001/XMLSchema#')) result.bind('this', rdflib.URIRef('http://vitali.web.cs.unibo.it/twiki/pub/TechWeb12/DataSource2/posteBO2011.ttl#')) if ncontains: nop="!" else: oquery=""" PREFIX vcard: <http://www.w3.org/2006/vcard/ns#> PREFIX cs: <http://cs.unibo.it/ontology/> PREFIX : <http://www.essepuntato.it/resource/> PREFIX dcterms: <http://purl.org/dc/terms/> PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> PREFIX this: <http://vitali.web.cs.unibo.it/twiki/pub/TechWeb12/DataSource2/posteBO2011.ttl#> CONSTRUCT {?s ?p ?o} WHERE { ?s ?p ?o. ?s dcterms:creator [] } """ query_meta=src.query(oquery) for element in query_meta: result.add(element) nop="" if(key=="id" and not ncontains): squery=""" PREFIX vcard: <http://www.w3.org/2006/vcard/ns#> PREFIX cs: <http://cs.unibo.it/ontology/> PREFIX : <http://www.essepuntato.it/resource/> PREFIX dcterms: <http://purl.org/dc/terms/> PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> PREFIX this: <http://vitali.web.cs.unibo.it/twiki/pub/TechWeb12/DataSource2/posteBO2011.ttl> CONSTRUCT {?s ?p ?o} WHERE { ?s ?p ?o. FILTER ("""+nop+"""regex (?s ,"http://www.essepuntato.it/resource/"""+value+""", "i")) } """ elif(key=="category" or key=="name" or key=="opening" or key=="closing" or (key=="address" and not ncontains)): squery=""" PREFIX vcard: <http://www.w3.org/2006/vcard/ns#> PREFIX cs: <http://cs.unibo.it/ontology/> PREFIX : <http://www.essepuntato.it/resource/> PREFIX dcterms: <http://purl.org/dc/terms/> PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> PREFIX this: <http://vitali.web.cs.unibo.it/twiki/pub/TechWeb12/DataSource2/posteBO2011.ttl#> CONSTRUCT {?s ?p ?o} WHERE { ?s ?p ?o; """+campi[key]+""" ?name. FILTER ("""+nop+"""regex (?name ,\""""+value+"""\", "i")) } """ else: error.errhttp("406") return print("Content-type: text/turtle; charset=UTF-8\n") query_result=src.query(squery) for element in query_result: result.add(element) print result.serialize(format="n3")