def __init__(self, config, logger=None): self.missing_official = { "item": "8060", "class": 1, "level": 3, "tag": ["merge", "railway"], "desc": T_(u"Crossing level not integrated") } Analyser_Merge.__init__( self, config, logger, "https://www.data.gouv.fr/fr/datasets/passages-a-niveau-30383135/", u"Passages à niveau", CSV(Source( attribution=u"data.gouv.fr:RFF", millesime="01/2014", fileUrl= "http://static.data.gouv.fr/c5/caae14a4ab1f6530f4c24b3e3c25b4a4f753556a8eda7cbf989501626ff400.csv", encoding="ISO-8859-15"), separator=";"), Load("LONGITUDE (WGS84)", "LATITUDE (WGS84)", xFunction=self.float_comma, yFunction=self.float_comma, where=lambda res: res["TYPE"] != 'PN de classe 00'), Mapping( select=Select(types=["nodes"], tags={"railway": ["level_crossing", "crossing"]}), conflationDistance=150, generate=Generate( static2={"source": self.source}, mapping1={"railway": lambda res: self.type[res["TYPE"]]})))
def __init__(self, config, logger = None): self.missing_official = {"item":"8150", "class": 11, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"CAPP bicycle parking not integrated") } Analyser_Merge.__init__(self, config, logger) self.officialURL = "http://opendata.agglo-pau.fr/index.php/fiche?idQ=20" self.officialName = "Supports vélos sur la CAPP" self.csv_file = "merge_data/bicycle_parking_FR_capp.csv" self.csv_format = "WITH DELIMITER AS ',' NULL AS '' CSV HEADER" decsep = re.compile("([0-9]),([0-9])") self.csv_filter = lambda t: decsep.sub("\\1.\\2", t) self.osmTags = { "amenity": "bicycle_parking", } self.osmTypes = ["nodes"] self.sourceTable = "capp_bicycle_parking" self.sourceX = "x" self.sourceY = "y" self.sourceSRID = "4326" self.defaultTag = { "source": "Communauté d'Agglomération Pau-Pyrénées - 01/2013", "amenity": "bicycle_parking", } self.defaultTagMapping = { "capacity": lambda res: str(int(res["nombre"])*2), } self.conflationDistance = 50
def __init__(self, config, logger = None): self.missing_official = {"item":"8040", "class": 61, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"CG71 stop not integrated") } self.possible_merge = {"item":"8041", "class": 63, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"CG71 stop, integration suggestion") } Analyser_Merge.__init__(self, config, logger) self.officialURL = "http://www.opendata71.fr/thematiques/transport/localisation-des-points-d-arret-de-bus" self.officialName = "Localisation des arrêts de bus et car - CG71" self.csv_file = "merge_data/public_transport_FR_cg71.csv" self.csv_format = "WITH DELIMITER AS ',' NULL AS '' CSV HEADER QUOTE '~'" self.osmTags = { "highway": "bus_stop" } self.osmRef = "ref:FR:CG71" self.osmTypes = ["nodes", "ways"] self.sourceTable = "bus_cg71" self.sourceX = "latitude" self.sourceY = "longitude" self.sourceSRID = "4326" self.defaultTag = { "source": "Conseil général de la Saône-et-Loire - Direction des Transports et de l'intermodalité - 03/2013", "highway": "bus_stop", "public_transport": "stop_position", "bus": "yes", } self.defaultTagMapping = { "ref:FR:CG71": "cod_arret", "name": lambda res: res['nom'].split(' - ')[1].strip() if ' - ' in res['nom'] else res['nom'].strip(), } self.conflationDistance = 100 self.text = lambda tags, fields: {"en": u"CG71 stop of %s" % fields["nom"].strip(), "fr": u"Arrêt CG71 de %s" % fields["nom"].strip()}
def __init__(self, config, logger = None): self.missing_official = {"item":"8010", "class": 1, "level": 3, "tag": ["merge", "building"], "desc": T_(u"Historical monument not integrated") } self.missing_osm = {"item":"7080", "class": 2, "level": 3, "tag": ["merge"], "desc": T_(u"Historical monument without ref:mhs or invalid") } self.possible_merge = {"item":"8011", "class": 3, "level": 3, "tag": ["merge"], "desc": T_(u"Historical monument, integration suggestion") } Analyser_Merge.__init__(self, config, logger) self.officialURL = "http://www.data.gouv.fr/donnees/view/Liste-des-Immeubles-prot%C3%A9g%C3%A9s-au-titre-des-Monuments-Historiques-30382152" self.officialName = "Liste des Immeubles protégés au titre des Monuments Historiques" self.csv_file = "merge_data/merimee.csv" self.osmTags = { "heritage": ["1", "2", "3"], "heritage:operator": None, } self.osmRef = "ref:mhs" self.osmTypes = ["nodes", "ways", "relations"] self.sourceTable = "merimee" self.sourceX = "lon" self.sourceY = "lat" self.sourceSRID = "4326" self.defaultTag = { "heritage:operator": "mhs", "source": "data.gouv.fr:Ministère de la Culture - 08/2011" } self.defaultTagMapping = { "ref:mhs": "ref", "name": "tico", "mhs:inscription_date": lambda res: u"%s" % res["ppro"][-4:], "heritage": lambda res: 2 if "classement par arrêté" in res["ppro"] else 3 if "inscription par arrêté" in res["ppro"] else None, "wikipedia": self.wikipedia, } self.conflationDistance = 1000 self.text = lambda tags, fields: {"en": u"Historical monument: %s" % ", ".join(filter(lambda x: x!= None and x != "", [fields["ppro"], fields["adrs"], fields["loca"]]))} self.WikipediaSearch = re.compile("\[\[.*\]\]") self.WikipediaSub = re.compile("[^[]*\[\[([^|]*).*\]\][^]]*")
def __init__(self, config, logger = None): self.missing_official = {"item":"8240", "class": 11, "level": 3, "tag": ["merge", "amenity"], "desc": T_(u"Restaurant not integrated") } start_restaurant = re.compile("^(hôtel-)?restaurant ", flags=re.IGNORECASE) final_name = re.compile("/.*$") Analyser_Merge.__init__(self, config, logger, "http://opendata71interactive.cloudapp.net/DataBrowser/data/CG71Restaurants", u"Les restaurants en Saône-et-Loire - CG71", CSV(Source(attribution = u"Conseil général de la Saône-et-Loire - Agence de Développement Touristique", millesime = "03/2013", fileUrl = "http://opendata71interactive.cloudapp.net/DataBrowser/DownloadCsv?container=data&entitySet=CG71Restaurants&filter=NOFILTER", encoding = "ISO-8859-15"), separator = ";"), Load("LONGITUDE", "LATITUDE", xFunction = self.float_comma, yFunction = self.float_comma), Mapping( select = Select( types = ["nodes", "ways"], tags = {"amenity": "restaurant"}), conflationDistance = 100, generate = Generate( static1 = {"amenity": "restaurant"}, static2 = {"source": self.source}, mapping1 = { "amenity": lambda fields: self.amenity_type.get(fields["CATEGORIE"]) or "restaurant", "name": lambda fields: final_name.sub('', start_restaurant.sub('', fields["NOM"])), "tourism": lambda fields: "hotel" if fields["TYPE_RESTAURATION"] == u"Hotel-restaurant" else None, "cuisine": lambda fields: self.cuisine(fields), "website": "SITE_WEB", "stars": lambda fields: len(fields["note_Guide_Rouge_Michelin"]) if fields["note_Guide_Rouge_Michelin"] else None}, text = lambda tags, fields: {"en": ', '.join(filter(lambda x: x != "None", [fields["NOM"], fields["TYPE_RESTAURATION"], fields["CATEGORIE"], fields["ADRESSE1"], fields["ADRESSE2"], fields["ADRESSE3"], fields["VILLE"]]))} )))
def __init__(self, config, logger = None): self.missing_official = {"item":"8130", "class": 1, "level": 3, "tag": ["merge", "parking"], "desc": T_(u"CAPP parking not integrated") } Analyser_Merge.__init__(self, config, logger, "http://opendata.agglo-pau.fr/index.php/fiche?idQ=18", u"Parkings sur la CAPP", CSV(Source(attribution = u"Communauté d'Agglomération Pau-Pyrénées", millesime = "01/2013", fileUrl = "http://opendata.agglo-pau.fr/sc/call.php?f=1&idf=18", zip = "Parking_WGS84.csv", encoding = "ISO-8859-15")), Load("X", "Y", xFunction = self.float_comma, yFunction = self.float_comma), Mapping( select = Select( types = ["nodes", "ways"], tags = {"amenity": "parking"}), conflationDistance = 200, generate = Generate( static1 = { "amenity": "parking"}, static2 = { "source": self.source}, mapping1 = { "name": "NOM", "fee": lambda res: "yes" if res["Pay_grat"] == "Payant" else "no", "capacity": lambda res: res["Places"] if res["Places"] != "0" else None, "parking": lambda res: "surface" if res["Ouvrage"] == "Plein air" else "underground" if res["Ouvrage"] == "Souterrain" else None}, text = lambda tags, fields: {"en": u"Parking %s" % tags["name"]} )))
def __init__(self, config, logger, clas, conflationDistance, select, osmTags, defaultTag): self.missing_official = {"item":"8040", "class": 1+10*clas, "level": 3, "tag": ["merge", "railway", "public transport"], "desc": T_(u"STIF public transport stop not integrated") } self.possible_merge = {"item":"8041", "class": 3+10*clas, "level": 3, "tag": ["merge", "railway", "public transport"], "desc": T_(u"STIF public transport stop, integration suggestion") } Analyser_Merge.__init__(self, config, logger, "https://opendata.stif.info/explore/dataset/referentiel-arret-tc-idf/information/", u"Référentiel des arrêts de transport en commun en Ile-de-France", CSV(Source(attribution = u"STIF", millesime = "12/2016", fileUrl = u"https://opendata.stif.info/explore/dataset/referentiel-arret-tc-idf/download/?format=csv&timezone=Europe/Berlin&use_labels_for_header=true"), separator = ";"), Load("ZDEr_X_Y", "ZDEr_X_Y", srid = 2154, select = {"ZDEr_LIBELLE_TYPE_ARRET": select}, xFunction = lambda x: x.split(",")[0], yFunction = lambda y: y.split(",")[1]), Mapping( select = Select( types = ["nodes", "ways"], tags = osmTags), osmRef = "ref:FR:STIF", conflationDistance = conflationDistance, generate = Generate( static1 = defaultTag, static2 = {"source": self.source}, mapping1 = {"ref:FR:STIF": "ZDEr_ID_REF_A"}, mapping2 = {"name": "ZDEr_NOM"}, text = lambda tags, fields: {"en": u"STIF public transport stop of %s" % tags["name"], "fr": u"Arrêt de transport d'Île-de-France de %s" % tags["name"]} )))
def __init__(self, config, classs, desc, wikiTypes, wikiCountry, wikiLang, starts, osmTags, osmTypes, conflationDistance, logger = None): self.possible_merge = {"item":"8101", "class": classs, "level": 3, "tag": ["merge", "wikipedia"], "desc":desc } Analyser_Merge.__init__(self, config, logger, Source( url = "http://toolserver.org/~kolossos/wp-world/pg-dumps/wp-world/", name = "Wikipedia-World", file = "wikipedia_point_fr.csv.bz2", csv = CSV(csv = False, separator = None, null = None)), Load(("ST_X(the_geom)",), ("ST_Y(the_geom)",), table = "wikipedia_point_fr", create = self.create_table, select = {"lang": wikiLang, "Country": wikiCountry}, where = (lambda res: not res["titel"].startswith("Liste ")) if starts == None else (lambda res: res["titel"].startswith(starts)) ), Mapping( select = Select( types = osmTypes, tags = {"name": None}), osmRef = "wikipedia", conflationDistance = conflationDistance, generate = Generate( mapping = {"wikipedia": lambda fields: fields["lang"]+":"+fields["titel"]}, text = lambda tags, fields: {fields["lang"]: fields["titel"]} ))) if wikiTypes != None: self.load.select["types"] = wikiTypes # http://en.wikipedia.org/wiki/Wikipedia:GEO#type:T if isinstance(osmTags, dict): self.mapping.select.tags.update(osmTags) else: for t in osmTags: t.update(self.osmTags) self.mapping.select.tags = osmTags
def __init__(self, config, logger = None): self.missing_official = {"item":"8070", "class": 1, "level": 3, "tag": ["merge"], "desc": T_(u"Missing survey point") } self.moved_official = {"item":"8070", "class": 3, "level": 3, "tag": ["merge"], "desc": T_(u"Moved survey point")} Analyser_Merge.__init__(self, config, logger, "http://geodesie.ign.fr", u"Fiches géodésiques", CSV(Source(attribution = u"©IGN %s dans le cadre de la cartographie réglementaire", millesime = "2010", file = "geodesie.csv.bz2"), header = False), Load("lon", "lat", create = """ id VARCHAR(254) PRIMARY KEY, lat VARCHAR(254), lon VARCHAR(254), description VARCHAR(4096), ele VARCHAR(254), ref VARCHAR(254)"""), Mapping( select = Select( types = ["nodes"], tags = {"man_made": "survey_point"}), osmRef = "ref", extraJoin = "description", generate = Generate( static1 = { "man_made": "survey_point"}, static2 = { "note": u"Ne pas déplacer ce point, cf. - Do not move this node, see - http://wiki.openstreetmap.org/wiki/WikiProject_France/Repères_Géodésiques#Permanence_des_rep.C3.A8res", "source": self.source}, mapping1 = { "ref": "ref", "ele": "ele"}, mapping2 = { "description": "description"}, text = lambda tags, fields: {"en": u"Survey point %s" % tags["ref"], "fr": u"Repères géodésiques %s" % tags["ref"], "es": u"Señales geodésicas %s" % tags["ref"]} )))
def __init__(self, config, logger = None): self.missing_official = {"item":"8022", "class": 1, "level": 3, "tag": ["merge", "post"], "desc": T_(u"Post box not integrated") } self.missing_osm = {"item":"7051", "class": 2, "level": 3, "tag": ["merge", "post"], "desc": T_(u"Post box without ref:FR:LaPoste") } self.possible_merge = {"item":"8023", "class": 3, "level": 3, "tag": ["merge", "post"], "desc": T_(u"Post box, integration suggestion") } Analyser_Merge.__init__(self, config, logger, Source( url = "https://www.data.gouv.fr/fr/datasets/liste-des-boites-aux-lettres-de-rue-france-metropolitaine-et-dom-1/", name = u"Liste des boîtes aux lettres de rue France métropolitaine et DOM", file = "post_box_FR.csv.bz2", encoding = "ISO-8859-15", csv = CSV(separator = ";")), Load("Longitude", "Latitude", table = "post_box_fr"), Mapping( select = Select( types = ["nodes"], tags = {"amenity": "post_box"}), osmRef = "ref:FR:LaPoste", conflationDistance = 100, generate = Generate( static = { "amenity": "post_box", "operator": "La Poste", "source": "data.gouv.fr:LaPoste - 06/2015"}, mapping = { "ref:FR:LaPoste": "CO_MUP", "addr:postcode": "CO_POSTAL"}, text = lambda tags, fields: {"en": ", ".join(filter(lambda x: x and x != 'None' and x != '', [fields[u"VA_NO_VOIE"], fields[u"LB_EXTENSION"].strip(), fields[u"LB_VOIE_EXT"], fields["CO_POSTAL"], fields[u"LB_COM"]]))} )))
def __init__(self, config, classs, officialName, srid, logger = None): self.missing_official = {"item":"8030", "class": classs+1, "level": 3, "tag": ["merge"], "desc": T_(u"School not integrated") } self.missing_osm = {"item":"7070", "class": classs+2, "level": 3, "tag": ["merge"], "desc": T_(u"School without ref:UAI or invalid") } self.possible_merge = {"item":"8031", "class": classs+3, "level": 3, "tag": ["merge"], "desc": T_(u"School, integration suggestion") } Analyser_Merge.__init__(self, config, logger, Source( url = "http://www.data.gouv.fr/donnees/view/G%C3%A9olocalisation-des-%C3%A9tablissements-d%27enseignement-du-premier-degr%C3%A9-et-du-second-degr%C3%A9-du-minist%C3%A8re-d-30378093", name = u"établissements d'enseignement du premier degré et du second degré - " + officialName, file = "school_FR.csv.bz2", encoding = "ISO-8859-15", csv = CSV(separator = ";", null = "null")), Load("X", "Y", srid = srid, table = "School_FR", filter = lambda t: t.replace("; ", ";null").replace(";.", ";null").replace("Ecole", u"École").replace("Saint ", "Saint-").replace("Sainte ", "Sainte-").replace(u"élementaire", u"élémentaire"), where = lambda res: res["_x"] and res["_y"] and self.is_in(float(res["_x"]), float(res["_y"]))), Mapping( select = Select( types = ["nodes", "ways", "relations"], tags = {"amenity": ["school", "kindergarten"]}), osmRef = "ref:UAI", conflationDistance = 50, generate = Generate( static = { "amenity": "school", "source": u"data.gouv.fr:Ministère de l'Éducation nationale, de la Jeunesse et de la Vie associative - 05/2012"}, mapping = { "ref:UAI": "numero_uai", "school:FR": self.School_FR, "name": "appellation_officielle_uai", "operator:type": lambda res: "private" if res["denomination_principale_uai"] and "PRIVE" in res["denomination_principale_uai"] else None}, text = lambda tags, fields: {"en":fields["appellation_officielle_uai"] if fields["appellation_officielle_uai"] else ""} )))
def __init__(self, config, logger = None): self.missing_official = {"item":"8160", "class": 1, "level": 3, "tag": ["merge", "public equipment"], "desc": T_(u"Paris Autolib' car rental not integrated") } self.missing_osm = {"item":"7140", "class": 2, "level": 3, "tag": ["merge", "public equipment"], "desc": T_(u"Paris Autolib' car rental without ref:FR:Paris:DSP") } self.possible_merge = {"item":"8161", "class": 3, "level": 3, "tag": ["merge", "public equipment"], "desc": T_(u"Paris Autolib' car rental integration suggestion") } Analyser_Merge.__init__(self, config, logger, Source( url = "http://opendata.paris.fr/explore/dataset/stations_et_espaces_autolib_de_la_metropole_parisienne", name = u"Stations et espaces AutoLib de la métropole parisienne", file = "car_rental_FR_paris.csv.bz2", csv = CSV(separator = ";")), Load("field13", "field13", table = "car_rental_FR_paris", xFunction = lambda x: x.split(',')[1], yFunction = lambda y: y.split(',')[0]), Mapping( select = Select( types = ["ways", "nodes"], tags = {"amenity": "car_rental", "network": "Autolib'"}), osmRef = "ref:FR:Paris:DSP", conflationDistance = 200, generate = Generate( static = { "source": u"Mairie de Paris - 05/2013", "amenity": "car_rental", "network": "Autolib'", "operator": "Autolib'", }, mapping = { "name": "nom_de_la_station", "ref:FR:Paris:DSP": "identifiant_dsp", "capacity": "places_autolib"} )))
def __init__(self, config, logger=None): self.missing_official = { "item": "8130", "class": 21, "level": 3, "tag": ["merge", "parking"], "desc": T_(u"BM parking disabled not integrated") } Analyser_Merge.__init__( self, config, logger, "http://data.bordeaux-metropole.fr/data.php?themes=8", u"Place de stationnement PMR", SHP( Source( attribution=u"Bordeaux Métropole", millesime="08/2016", fileUrl= "http://data.bordeaux-metropole.fr/files.php?gid=73&format=2", zip="GRS_GIGC_P.shp", encoding="ISO-8859-15")), Load(("ST_X(geom)", ), ("ST_Y(geom)", ), srid=2154), Mapping(select=Select(types=["nodes", "ways"], tags={ "amenity": "parking", "capacity:disabled": None }), conflationDistance=100, generate=Generate(static1={ "amenity": "parking", "capacity:disabled": "yes" }, static2={"source": self.source})))
def __init__(self, config, logger = None): self.missing_official = {"item":"8025", "class": 1, "level": 3, "tag": ["merge", "post"], "desc": T_(u"Post box not integrated") } self.missing_osm = {"item":"7051", "class": 2, "level": 3, "tag": ["merge", "post"], "desc": T_(u"Post box without ref") } self.possible_merge = {"item":"8026", "class": 3, "level": 3, "tag": ["merge", "post"], "desc": T_(u"Post box, integration suggestion") } Analyser_Merge.__init__(self, config, logger, "http://datanova.legroupe.laposte.fr/explore/dataset/laposte_boiterue", u"Liste des boîtes aux lettres de rue France métropolitaine et DOM", CSV(Source(attribution = u"data.gouv.fr:LaPoste", millesime = "05/2016", fileUrl = "http://datanova.legroupe.laposte.fr/explore/dataset/laposte_boiterue/download/?format=csv&use_labels_for_header=true"), separator = ";"), Load("Latlong", "Latlong", xFunction = lambda x: x and x.split(',')[1], yFunction = lambda y: y and y.split(',')[0]), Mapping( select = Select( types = ["nodes"], tags = {"amenity": "post_box"}), osmRef = "ref", conflationDistance = 50, generate = Generate( missing_official_fix = False, static1 = { "amenity": "post_box", "operator": "La Poste"}, static2 = {"source": self.source}, mapping1 = {"ref": "CO_MUP"}, text = lambda tags, fields: {"en": ", ".join(filter(lambda x: x and x != 'None' and x != '', [fields[u"VA_NO_VOIE"], fields[u"LB_EXTENSION"].strip(), fields[u"LB_VOIE_EXT"], fields["CO_POSTAL"], fields[u"LB_COM"]]))} ))) #LB_TYPE_GEO
def __init__(self, config, logger=None): self.missing_official = { "item": "8150", "class": 11, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"CAPP bicycle parking not integrated") } Analyser_Merge.__init__( self, config, logger, "http://opendata.agglo-pau.fr/index.php/fiche?idQ=20", u"Supports vélos sur la CAPP", CSV( Source(attribution=u"Communauté d'Agglomération Pau-Pyrénées", millesime="01/2013", fileUrl= "http://opendata.agglo-pau.fr/sc/call.php?f=1&idf=20", zip="Sta_Velo_Agglo_WGS84.csv")), Load("X", "Y", xFunction=self.float_comma, yFunction=self.float_comma), Mapping( select=Select(types=["nodes"], tags={"amenity": "bicycle_parking"}), conflationDistance=50, generate=Generate(static1={"amenity": "bicycle_parking"}, static2={"source": self.source}, mapping1={ "capacity": lambda res: str(int(res["NOMBRE"]) * 2) })))
def __init__(self, config, logger = None): self.missing_official = {"item":"8160", "class": 1, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"CUB bicycle rental not integrated") } self.possible_merge = {"item":"8161", "class": 3, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"CUB bicycle rental integration suggestion") } Analyser_Merge.__init__(self, config, logger) self.officialURL = "http://data.lacub.fr/data.php?themes=10" self.officialName = "Station VCUB" self.csv_file = "merge_data/bicycle_rental_FR_cub.csv" self.csv_encoding = "ISO-8859-15" self.csv_format = "WITH DELIMITER AS ',' NULL AS '' CSV HEADER" self.osmTags = { "amenity": "bicycle_rental", } self.osmRef = "ref" self.osmTypes = ["nodes"] self.sourceTable = "cub_bicycle_rental" self.sourceX = "x" self.sourceY = "y" self.sourceSRID = "2154" self.defaultTag = { "source": "Communauté Urbaine de Bordeaux - 03/2014", "amenity": "bicycle_rental", "network": "VCUB", } self.defaultTagMapping = { "name": "nom", "ref": "numstat", "capacity": "nbsuppor", "vending_machine": lambda res: "yes" if res["termbanc"] == "OUI" else None, "description": lambda res: "VCUB+" if res["tarif"] == "VLS PLUS" else None, } self.conflationDistance = 100
def __init__(self, config, logger, clas, select, osmTags, defaultTag): self.missing_official = {"item":"8040", "class": 1+10*clas, "level": 3, "tag": ["merge", "railway", "public transport"], "desc": T_(u"RATP station not integrated") } self.possible_merge = {"item":"8041", "class": 3+10*clas, "level": 3, "tag": ["merge", "railway", "public transport"], "desc": T_(u"RATP station, integration suggestion") } Analyser_Merge.__init__(self, config, logger, "http://data.ratp.fr/fr/les-donnees/fiche-de-jeu-de-donnees/dataset/positions-geographiques-des-stations-du-reseau-ratp.html", u"Positions géographiques des stations du réseau RATP", CSV(Source(attribution = u"RATP", millesime = "07/2012", file = "ratp_arret_graphique.csv.bz2"), separator = "#"), Load("lon", "lat", create = """ id VARCHAR(254), lon VARCHAR(254), lat VARCHAR(254), nom_station VARCHAR(254), ville_cp VARCHAR(254), reseau VARCHAR(254)""", select = {"reseau": select}), Mapping( select = Select( types = ["nodes", "ways"], tags = osmTags), osmRef = "ref:FR:RATP", conflationDistance = 100, generate = Generate( static1 = defaultTag, static2 = {"source": self.source}, mapping1 = {"ref:FR:RATP": "id"}, mapping2 = {"name": "nom_station"}, text = lambda tags, fields: {"en": u"RATP station of %s" % tags["name"], "fr": u"Station RATP de %s" % tags["name"]} )))
def __init__(self, config, logger = None): self.missing_official = {"item":"8270", "class": 1, "level": 3, "tag": ["merge", "power"], "desc": T_(u"Power generator not integrated") } self.missing_osm = {"item":"7180", "class": 2, "level": 3, "tag": ["merge", "power"], "desc": T_(u"Power generator without ref:FR:RTE") } self.possible_merge = {"item":"8271", "class": 3, "level": 3, "tag": ["merge", "power"], "desc": T_(u"Power generator, integration suggestion") } self.update_official = {"item":"8272", "class": 4, "level": 3, "tag": ["merge", "power"], "desc": T_(u"Power generator update") } Analyser_Merge.__init__(self, config, logger, "https://opendata.rte-france.com/explore/dataset/registre_parc_prod_rpt", u"Registre 2015 des installations de production raccordées au Réseau de Transport d'Electricité", CSV(Source(attribution = u"data.gouv.fr:RTE", millesime = "2015", fileUrl = "https://opendata.rte-france.com/explore/dataset/registre_parc_prod_rpt/download/?format=csv&timezone=Europe/Berlin&use_labels_for_header=true"), separator = ";"), Load("Geo-point IRIS", "Geo-point IRIS", xFunction = lambda x: x and x.split(',')[1], yFunction = lambda y: y and y.split(',')[0]), Mapping( select = Select( types = ["ways"], tags = {"power": "generator"}), conflationDistance = 5000, generate = Generate( static1 = { "power": "generator"}, static2 = {"source": self.source}, mapping1 = { "ref:FR:RTE": "Identifiant", "voltage": lambda fields: (int(float(fields["Tension (kV)"]) * 1000)) if fields.get("Tension (kV)") and fields["Tension (kV)"] != "<45" else None, "generator:source": lambda fields: self.filiere[fields["Filière"]][fields["Combustible"]], "generator:output:electricity": lambda fields: (int(float(fields["Puissance maximale (MW)"]) * 1000000)) if fields.get("Puissance maximale (MW)") else None}, mapping2 = { "start": lambda fields: fields[u"Date de mise en service"][0:4] if fields[u"Date de mise en service"].endswith('-01-01') or fields[u"Date de mise en service"].endswith('-12-31') else fields[u"Date de mise en service"], "operator": "Exploitant"}, tag_keep_multiple_values = ["voltage"], text = lambda tags, fields: T_(u"Power substation of %s", fields["Site de production"]))))
def __init__(self, config, logger = None): self.missing_official = {"item":"8120", "class": 1, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"CUB glass recycling not integrated") } self.possible_merge = {"item":"8121", "class": 1, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"CUB glass recycling, integration suggestion") } Analyser_Merge.__init__(self, config, logger) self.officialURL = "http://data.lacub.fr/data.php?themes=5" self.officialName = "Emplacements d'apport volontaire" self.csv_file = "merge_data/recycling_FR_cub.csv" self.csv_format = "WITH DELIMITER AS ',' NULL AS '' CSV HEADER" self.csv_encoding = "ISO-8859-15" self.csv_select = { "ident": "%" } self.osmTags = { "amenity": "recycling", } self.osmRef = "ref:FR:CUB" self.osmTypes = ["nodes", "ways"] self.sourceTable = "cub_recycling_glass" self.sourceX = "ident_x" self.sourceY = "ident_y" self.sourceSRID = "3945" self.defaultTag = { "source": "Communauté Urbaine de Bordeaux - 03/2014", "amenity": "recycling", "recycling:glass": "yes", "recycling:glass_bottles": "yes", "recycling_type": "container", } self.defaultTagMapping = { "ref:FR:CUB": "ident", } self.conflationDistance = 100
def __init__(self, config, logger = None): self.missing_official = {"item":"8120", "class": 11, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"CAPP glass recycling not integrated") } Analyser_Merge.__init__(self, config, logger) self.officialURL = "http://opendata.agglo-pau.fr/index.php/fiche?idQ=8" self.officialName = "Point d'apport volontaire du verre : Bornes à verres sur la CAPP" self.csv_file = "merge_data/recycling_FR_capp_glass.csv" self.csv_format = "WITH DELIMITER AS ',' NULL AS '' CSV HEADER" self.csv_encoding = "ISO-8859-15" decsep = re.compile("([0-9]),([0-9])") self.csv_filter = lambda t: decsep.sub("\\1.\\2", t) self.csv_select = { "usage_": "En service" } self.osmTags = { "amenity": "recycling", } self.osmTypes = ["nodes", "ways"] self.sourceTable = "capp_recycling_glass" self.sourceX = "x" self.sourceY = "y" self.sourceSRID = "4326" self.defaultTag = { "source": "Communauté d'Agglomération Pau-Pyrénées - 01/2013", "amenity": "recycling", "recycling:glass": "yes", "recycling:glass_bottles": "yes", "recycling_type": "container", } self.conflationDistance = 100
def __init__(self, config, logger=None): self.missing_official = { "item": "8190", "class": 1, "level": 3, "tag": ["merge"], "desc": T_(u"Police not integrated") } Analyser_Merge.__init__( self, config, logger, "http://www.data.gouv.fr/fr/dataset/liste-des-points-d-accueil-de-la-gendarmerie-nationale-avec-geolocalisation", u"Liste des points d'accueil de la gendarmerie nationale avec géolocalisation", CSV(Source( attribution=u"data.gouv.fr:Ministère de l'Intérieur", millesime="02/2016", fileUrl= "https://www.data.gouv.fr/s/resources/liste-des-points-d-accueil-de-la-gendarmerie-nationale-avec-geolocalisation/20160211-105304/ETALABexport_gn.csv" ), separator=";"), Load("geocodage_x_GPS", "geocodage_y_GPS"), Mapping(select=Select(types=["nodes", "ways"], tags={"amenity": "police"}), conflationDistance=1000, generate=Generate( static1={ "amenity": "police", "operator": "Gendarmerie Nationale" }, static2={"source": self.source}, mapping2={"phone": "telephone"}, text=lambda tags, fields: { "en": u"%s, %s" % (fields["service"], fields["adresse_geographique"]) })))
def __init__(self, config, logger = None): self.missing_official = {"item":"8140", "class": 11, "level": 3, "tag": ["merge", "tourism"], "desc": T_(u"Gironde camp site not integrated") } Analyser_Merge.__init__(self, config, logger, "http://catalogue.datalocale.fr/dataset/liste-campings-aquitaine", u"Liste des campings en Aquitaine", JSON(Source(attribution = u"Réseau SIRTAQUI - Comité Régional de Tourisme d'Aquitaine - www.sirtaqui-aquitaine.com", millesime = "06/2016", fileUrl = "http://wcf.tourinsoft.com/Syndication/aquitaine/13d7f8ab-bd69-4815-b02c-d8134663b849/Objects?$format=json"), extractor = lambda json: json['d']), Load("LON", "LAT", xFunction = self.degree, yFunction = self.degree), Mapping( select = Select( types = ["nodes", "ways"], tags = {"tourism": "camp_site"}), conflationDistance = 300, generate = Generate( static1 = {"tourism": "camp_site"}, static2 = {"source": self.source}, mapping1 = { "name": "NOMOFFRE", "stars": lambda fields: fields["RECHERCHECLAS"][0] if fields["RECHERCHECLAS"] and fields["RECHERCHECLAS"][0].isdigit() else None, "ref:FR:CRTA": "SyndicObjectID", "website": lambda fields: None if not fields["URL"] else fields["URL"] if fields["URL"].startswith('http') else 'http://' + fields["URL"]}, text = lambda tags, fields: {"en": ', '.join(filter(lambda x: x != "None", [fields["NOMOFFRE"], fields["AD1"], fields["AD1SUITE"], fields["AD2"], fields["AD3"], fields["CP"], fields["COMMUNE"]]))} )))
def __init__(self, config, logger = None): self.missing_official = {"item":"8200", "class": 1, "level": 3, "tag": ["merge", "highway"], "desc": T_(u"Gas station not integrated") } Analyser_Merge.__init__(self, config, logger, Source( url = "http://www.prix-carburants.economie.gouv.fr/rubrique/opendata/", name = u"Prix des carburants en France", file = "fuel_FR.csv.bz2"), Load("lon", "lat", table = "fuel_FR"), Mapping( select = Select( types = ["nodes", "ways"], tags = {"amenity": "fuel"}), conflationDistance = 300, generate = Generate( static = { "amenity": "fuel", "source": "Ministère de l'Economie, de l'Industrie et du Numérique - 15/09/2014"}, mapping = { "fuel:e85": lambda res: "yes" if res["E85"] == "x" else None, "fuel:lpg": lambda res: "yes" if res["GPLc"] == "x" else None, "fuel:lpg": lambda res: "yes" if res["GPL"] == "x" else None, "fuel:e10": lambda res: "yes" if res["E10"] == "x" else None, "fuel:octane_95": lambda res: "yes" if res["SP95"] == "x" else None, "fuel:octane_98": lambda res: "yes" if res["SP98"] == "x" else None, "fuel:diesel": lambda res: "yes" if res["Gazole"] == "x" else None, "vending_machine": lambda res: "fuel" if res["Automate CB"] == "x" else None, "opening_hours": lambda res: "24/7" if res["debut"] != "" and res["debut"] == res["fin"] and res["saufjour"] == "" else None, "toilets": lambda res: "yes" if res["Toilettes publiques"] == "x" else None, "compressed_air": lambda res: "yes" if res["Station de gonflage"] == "x" else None, "shop": lambda res: "convenience" if res["Boutique alimentaire"] == "x" else None, "hgv:lanes": lambda res: "yes" if res["Piste poids lourds"] == "x" else None, "vending": lambda res: "fuel" if res["Automate CB"] == "x" else None}, text = lambda tags, fields: {"en": u"%s, %s" % (fields["adresse"], fields["ville"])} )))
def __init__(self, config, logger = None): self.missing_official = {"item":"8030", "class": 100, "level": 3, "tag": ["merge", "railway"], "desc": T_(u"College not integrated") } Analyser_Merge.__init__(self, config, logger) self.officialURL = "http://www.data.gouv.fr/DataSet/30382046" self.officialName = "Etablissements d'enseignement supérieur" self.csv_file = "merge_data/Etablissements d'enseignement supérieur.csv" self.csv_format = "WITH DELIMITER AS ',' NULL AS '' CSV HEADER" decsep = re.compile("([0-9]),([0-9])") self.csv_filter = lambda t: decsep.sub("\\1.\\2", t) self.osmTags = { "amenity": ["college", "university"], } self.osmTypes = ["nodes", "ways", "relations"] self.sourceTable = "college_fr" self.sourceX = "lon" self.sourceY = "lat" self.sourceSRID = "4326" self.defaultTag = { "amenity": "college", "source": u"data.gouv.fr:Office national d'information sur les enseignements et les professions - 11/2011" } self.defaultTagMapping = { "name": "nom", "short_name": "sigle", "operator:type": lambda res: "private" if res["statut"] in [u"CFA privé", u"Privé hors contrat", u"Privé reconnu", u"Privé sous contrat"] else None, } self.conflationDistance = 50 self.text = lambda tags, fields: {"en": " - ".join(filter(lambda i: i != "None", [fields["sigle"], fields["nom"]]))}
def __init__(self, config, logger = None): self.missing_official = {"item":"8040", "class": 41, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"TransGironde stop not integrated") } self.possible_merge = {"item":"8041", "class": 43, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"TransGironde stop, integration suggestion") } Analyser_Merge.__init__(self, config, logger) self.officialURL = "http://www.datalocale.fr/drupal7/dataset/ig_transgironde_pa" self.officialName = "Localisation des points d'arrêts des lignes régulières du réseau TransGironde" self.csv_file = "merge_data/public_transport_FR_transgironde.csv" self.csv_format = "WITH DELIMITER AS ',' NULL AS '' CSV HEADER" self.osmTags = {"highway": "bus_stop"} self.osmRef = "ref:FR:TransGironde" self.osmTypes = ["nodes", "ways"] self.sourceTable = "transgironde" self.sourceX = "lon" self.sourceY = "lat" self.sourceSRID = "4326" self.defaultTag = { "source": "Conseil général de la Gironde - 03/2013", "highway": "bus_stop", "public_transport": "stop_position", "bus": "yes", "network": "TransGironde" } self.defaultTagMapping = { "ref:FR:TransGironde": "numero_peg", "name": lambda res: self.replace(res['nom'].split(' - ')[1]), } self.conflationDistance = 100 self.text = lambda tags, fields: {"en": u"TransGironde stop of %s" % fields["nom"], "fr": u"Arrêt TransGironde de %s" % fields["nom"]}
def __init__(self, config, logger = None): self.missing_official = {"item":"8040", "class": 61, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"CG71 stop not integrated") } self.possible_merge = {"item":"8041", "class": 63, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"CG71 stop, integration suggestion") } Analyser_Merge.__init__(self, config, logger, Source( url = "http://www.opendata71.fr/thematiques/transport/localisation-des-points-d-arret-de-bus", name = u"Localisation des arrêts de bus et car - CG71", file = "public_transport_FR_cg71.csv.bz2", encoding = "ISO-8859-15", csv = CSV(separator = ";")), Load("latitude", "longitude", table = "bus_cg71", xFunction = self.float_comma, yFunction = self.float_comma), Mapping( select = Select( types = ["nodes", "ways"], tags = {"highway": "bus_stop"}), osmRef = "ref:FR:CG71", conflationDistance = 100, generate = Generate( static = { "source": u"Conseil général de la Saône-et-Loire - Direction des Transports et de l'intermodalité - 03/2013", "highway": "bus_stop", "public_transport": "stop_position", "bus": "yes"}, mapping = { "ref:FR:CG71": "cod_arret", "name": lambda res: res['nom'].split(' - ')[1].strip() if ' - ' in res['nom'] else res['nom'].strip()}, text = lambda tags, fields: {"en": u"CG71 stop of %s" % fields["nom"].strip(), "fr": u"Arrêt CG71 de %s" % fields["nom"].strip()} )))
def __init__(self, config, logger=None): self.update_official = { "item": "8101", "class": 100, "level": 3, "tag": ["merge", "wikipedia"], "desc": T_(u"Update Wikipedia tag") } Analyser_Merge.__init__( self, config, logger, "http://wikipedia.fr", "wikipedia insee", CSV(Source(file="wikipedia_insee_FR.csv.bz2")), Load(create=""" insee VARCHAR(254) PRIMARY KEY, title VARCHAR(254)"""), Mapping(select=Select(types=["relations"], tags={ "type": "boundary", "boundary": "administrative", "admin_level": "8" }), osmRef="ref:INSEE", generate=Generate( mapping1={ "ref:INSEE": "insee", "wikipedia": lambda res: "fr:" + res["title"] })))
def __init__(self, config, logger, level, desc, osmTags, osmTypes, c, tcd, stcd, threshold): self.missing_official = { "item": "7110", "class": tcd * 100 + stcd, "level": level, "tag": ["merge", "highway"], "desc": desc } Analyser_Merge.__init__( self, config, logger, "http://diffusion-numerique.info-routiere.gouv.fr/tables-alert-c-a4.html", "Alert-C-point", CSV(Source( fileUrl= "http://diffusion-numerique.info-routiere.gouv.fr/IMG/zip/Cederom_Alert-C_v11-0.zip", zip="Data/Mff/POINTS.DAT"), separator=";"), Load("XCOORD", "YCOORD", xFunction=lambda x: float(x) / 100000, yFunction=lambda y: float(y) / 100000, where=lambda res: res["CLASS"] == c and res["TCD"] == str(tcd) and res["STCD"] == str(stcd)), Mapping(select=Select(types=osmTypes, tags=osmTags), conflationDistance=threshold))
def __init__(self, config, logger = None): self.missing_official = {"item":"8180", "class": 1, "level": 3, "tag": ["merge", "public equipment"], "desc": T_(u"Bordeaux bicycle parking not integrated") } Analyser_Merge.__init__(self, config, logger) self.officialURL = "http://opendata.bordeaux.fr/content/toilettes-publiques" self.officialName = "Toilettes publiques" self.csv_file = "merge_data/public_equipment_FR_bordeaux_toilets.csv" self.csv_format = "WITH DELIMITER AS ';' NULL AS '' CSV HEADER" decsep = re.compile("([0-9]),([0-9])") self.csv_filter = lambda t: decsep.sub("\\1.\\2", t) self.osmTags = { "amenity": "toilets", } self.osmTypes = ["nodes", "ways"] self.sourceTable = "bordeaux_toilets" self.sourceX = "x_long" self.sourceY = "y_lat" self.sourceSRID = "4326" self.defaultTag = { "source": "Ville de Bordeaux - 01/2014", "amenity": "toilets", "fee": "no", "access": "public", } self.defaultTagMapping = { "toilets:wheelchair": lambda res: "yes" if res["options"] == u"Handicapé" else None, "toilets:position": lambda res: "urinal" if res["typologie"] == u"Urinoir" else None, } self.conflationDistance = 100
def __init__(self, config, logger = None): self.missing_official = {"item":"8240", "class": 11, "level": 3, "tag": ["merge", "amenity"], "desc": T_(u"Restaurant not integrated") } latlon = re.compile(",(4[0-9])([0-9]+),([0-9])([0-9]+),") start_restaurant = re.compile("^(hôtel-)?restaurant ", flags=re.IGNORECASE) final_name = re.compile("/.*$") Analyser_Merge.__init__(self, config, logger, Source( url = "http://opendata71interactive.cloudapp.net/DataBrowser/data/CG71Restaurants", name = u"Les restaurants en Saône-et-Loire - CG71", file = "restaurant_FR_cg71.csv.bz2", csv = CSV(quote = "$")), Load("longitude", "latitude", table = "restaurant_cg71", filter = lambda text: latlon.sub(",\\1.\\2,\\3.\\4,", text)), Mapping( select = Select( types = ["nodes", "ways"], tags = {"amenity": "restaurant"}), osmRef = "ref:FR:CG71", conflationDistance = 100, generate = Generate( static = { "source": u"Conseil général de la Saône-et-Loire - Agence de Développement Touristique - 03/2013", "amenity": "restaurant"}, mapping = { "amenity": lambda fields: self.amenity_type.get(fields["categorie"]) or "restaurant", "name": lambda fields: final_name.sub('', start_restaurant.sub('', fields["nom"])), "tourism": lambda fields: "hotel" if fields["type_restauration"] == u"Hotel-restaurant" else None, "cuisine": lambda fields: self.cuisine(fields), "website": "site_web", "stars": lambda fields: len(fields["note_guide_rouge_michelin"]) if fields["note_guide_rouge_michelin"] else None, }, text = lambda tags, fields: {"en": ', '.join(filter(lambda x: x != "None", [fields["nom"], fields["type_restauration"], fields["categorie"], fields["adresse1"], fields["adresse2"], fields["adresse3"], fields["ville"]]))} )))
def __init__(self, config, logger = None): self.missing_official = {"item":"8160", "class": 1, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle rental not integrated") } self.possible_merge = {"item":"8161", "class": 3, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle rental integration suggestion") } self.update_official = {"item":"8162", "class": 4, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle update") } Analyser_Merge.__init__(self, config, logger, "http://data.bordeaux-metropole.fr/data.php?themes=10", u"Station VCUB", SHP(Source(attribution = u"Bordeaux Métropole", millesime = "08/2016", fileUrl = "http://data.bordeaux-metropole.fr/files.php?gid=43&format=2", zip = "TB_STVEL_P.shp", encoding = "ISO-8859-15")), Load(("ST_X(geom)",), ("ST_Y(geom)",), srid = 2154), Mapping( select = Select( types = ["nodes"], tags = {"amenity": "bicycle_rental"}), osmRef = "ref", conflationDistance = 100, generate = Generate( static1 = { "amenity": "bicycle_rental", "network": "VCUB"}, static2 = {"source": self.source}, mapping1 = { "name": "NOM", "ref": "NUMSTAT", "capacity": "NBSUPPOR", "vending": lambda res: "subscription" if res["TERMBANC"] == "OUI" else None, "description": lambda res: "VCUB+" if res["TARIF"] == "VLS PLUS" else None} )))
def __init__(self, config, logger = None): self.missing_official = {"item":"8120", "class": 21, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"NM glass recycling not integrated") } self.possible_merge = {"item":"8121", "class": 23, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"NM glass recycling, integration suggestion") } self.update_official = {"item":"8122", "class": 24, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"NM glass recycling update") } Analyser_Merge.__init__(self, config, logger, "http://data.nantes.fr/donnees/detail/localisation-des-colonnes-aeriennes-de-nantes-metropole/", u"Localisation des colonnes aériennes de Nantes Métropole", SHP(Source(attribution = u"Nantes Métropole", millesime = "07/2016", fileUrl = "http://data.nantes.fr/fileadmin/data/datastore/nm/environnement/24440040400129_NM_NM_00119/COLONNES_AERIENNES_NM_shp_l93.zip", zip = "COLONNES_AERIENNES_NM.shp", encoding = "ISO-8859-15")), Load(("ST_X(geom)",), ("ST_Y(geom)",), srid = 2154, select = {"TYPE_DECHE": "verre"}), Mapping( select = Select( types = ["nodes", "ways"], tags = {"amenity": "recycling"}), osmRef = "ref:FR:NM", conflationDistance = 100, generate = Generate( static1 = { "amenity": "recycling", "recycling:glass_bottles": "yes", "recycling_type": "container"}, static2 = {"source": self.source}, mapping1 = {"ref:FR:NM": "ID_COLONNE"}, text = lambda tags, fields: {"en": ', '.join(filter(lambda x: x != "None", [fields["TYPE_DECHE"], fields["VOIE"], fields["OBS"]]))} )))
def __init__(self, config, logger = None): self.missing_official = {"item":"8010", "class": 11, "level": 3, "tag": ["merge", "tourism"], "desc": T_(u"Gironde museum not integrated") } self.possible_merge = {"item":"8011", "class": 13, "level": 3, "tag": ["merge", "tourism"], "desc": T_(u"Gironde museum, integration suggestion") } Analyser_Merge.__init__(self, config, logger, "http://catalogue.datalocale.fr/dataset/liste-musees-aquitaine", u"Liste des musées et centres d'interprétation de Gironde", JSON(Source(attribution = u"Réseau SIRTAQUI - Comité Régional de Tourisme d'Aquitaine - www.sirtaqui-aquitaine.com", millesime = "06/2016", fileUrl = "http://wcf.tourinsoft.com/Syndication/aquitaine/094df128-7ac5-43e5-a7e9-a5d752317674/Objects?$format=json"), extractor = lambda json: json['d']), Load("LON", "LAT", xFunction = self.degree, yFunction = self.degree), Mapping( select = Select( types = ["nodes", "ways"], tags = {"tourism": "museum"}), conflationDistance = 300, generate = Generate( static1 = {"tourism": "museum"}, static2 = {"source": self.source}, mapping1 = { "name": "NOMOFFRE", "ref:FR:CRTA": "SyndicObjectID", "website": lambda fields: None if not fields["URL"] else fields["URL"] if fields["URL"].startswith('http') else 'http://' + fields["URL"]}, text = lambda tags, fields: {"en": ', '.join(filter(lambda x: x != "None", [fields["NOMOFFRE"], fields["AD1"], fields["AD1SUITE"], fields["AD2"], fields["AD3"], fields["CP"], fields["COMMUNE"]]))} )))
def __init__(self, config, logger = None): self.missing_official = {"item":"8290", "class": 1, "level": 3, "tag": ["merge", "power"], "desc": T_(u"Power support not integrated") } self.missing_osm = {"item":"7200", "class": 2, "level": 3, "tag": ["merge", "power"], "desc": T_(u"Power support without ref") } self.possible_merge = {"item":"8291", "class": 3, "level": 3, "tag": ["merge", "power"], "desc": T_(u"Power support, integration suggestion") } Analyser_Merge.__init__(self, config, logger, "https://opendata.rte-france.com/explore/dataset/pylones/", u"Pylones RTE", CSV(Source(attribution = u"data.gouv.fr:RTE", millesime = "04/2017", fileUrl = "https://opendata.rte-france.com/explore/dataset/pylones/download/?format=csv&timezone=Europe/Berlin&use_labels_for_header=true"), separator = ";"), Load("Longitude du pylône (DD)", "Latitude du pylône (DD)"), Mapping( select = Select( types = ["nodes"], tags = [{"power": "tower", "operator": "RTE"}, {"power": "pole", "operator": "RTE"}, {"power": "terminal", "operator": "RTE"}, {"power": "portal", "operator": "RTE"}, {"power": "insulator", "operator": "RTE"}, {"power": "tower", "operator": False}, {"power": "pole", "operator": False}, {"power": "terminal", "operator": False}, {"power": "portal", "operator": False}, {"power": "insulator", "operator": False}]), # osmRef = "ref:FR:RTE", # Commented initial. Only issues missing tower. Then when the missing tower number lower, uncomment to integrate ref into OSM. conflationDistance = 10, generate = Generate( static1 = { "power": "tower", "operator": "RTE"}, static2 = {"source": self.source}, mapping1 = { "ref": "Numéro de pylône"}, mapping2 = { "height": "Hauteur du pylône (m)"})))
def __init__(self, config, logger = None): self.missing_official = {"item":"8010", "class": 11, "level": 3, "tag": ["merge", "tourism"], "desc": T_(u"Gironde museum not integrated") } self.possible_merge = {"item":"8011", "class": 13, "level": 3, "tag": ["merge", "tourism"], "desc": T_(u"Gironde museum, integration suggestion") } Analyser_Merge.__init__(self, config, logger) self.officialURL = "http://www.datalocale.fr/drupal7/dataset/liste-musees-cdt33" self.officialName = "Liste des musées et centres d'interprétation de Gironde" self.csv_file = "merge_data/tourism_FR_gironde_museum.csv" self.csv_format = "WITH DELIMITER AS ',' NULL AS '' CSV HEADER" self.csv_select = { "type": u"Musée" } self.osmTags = { "tourism": "museum" } self.osmTypes = ["nodes", "ways"] self.sourceTable = "gironde_museum" self.sourceX = "longitude" self.sourceY = "latitude" self.sourceSRID = "4326" self.defaultTag = { "source": "Observatoire du comité départemental du Tourisme de la Gironde - 09/2013", "tourism": "museum" } self.defaultTagMapping = { "name": "raison_sociale", } self.conflationDistance = 300 self.text = lambda tags, fields: { "en": u"%s, %s %s %s" % (fields["raison_sociale"], fields["adresse"], fields["adresse_suite"], fields["commune"]), }
def __init__(self, config, logger, clas, select, osmTags, defaultTag): self.missing_official = {"item":"8040", "class": 1+10*clas, "level": 3, "tag": ["merge", "railway", "public transport"], "desc": T_(u"RATP station not integrated") } self.possible_merge = {"item":"8041", "class": 3+10*clas, "level": 3, "tag": ["merge", "railway", "public transport"], "desc": T_(u"RATP station, integration suggestion") } Analyser_Merge.__init__(self, config, logger) self.officialURL = "http://data.ratp.fr/fr/les-donnees/fiche-de-jeu-de-donnees/dataset/positions-geographiques-des-stations-du-reseau-ratp.html" self.officialName = "Positions géographiques des stations du réseau RATP" self.csv_file = "merge_data/ratp_arret_graphique.csv" self.csv_format = "WITH DELIMITER AS '#' NULL AS '' CSV" self.csv_select = { "reseau": select } self.osmTags = osmTags self.osmRef = "ref:FR:RATP" self.osmTypes = ["nodes", "ways"] self.sourceTable = "ratp" self.sourceX = "lon" self.sourceY = "lat" self.sourceSRID = "4326" self.defaultTag = { "source": "RATP - 07/2012", } self.defaultTag.update(defaultTag) self.defaultTagMapping = { "ref:FR:RATP": "id", "name": "nom_station", } self.conflationDistance = 100 self.text = lambda tags, fields: {"en": u"RATP station of %s" % tags["name"], "fr": u"Station RATP de %s" % tags["name"]}
def __init__(self, config, logger = None): self.missing_official = {"item":"8050", "class": 1, "level": 3, "tag": ["merge", "railway"], "desc": T_(u"Railway station not integrated") } self.missing_osm = {"item":"7100", "class": 2, "level": 3, "tag": ["merge", "railway"], "desc": T_(u"Railway station without uic_ref or invalid") } self.possible_merge = {"item":"8051", "class": 3, "level": 3, "tag": ["merge", "railway"], "desc": T_(u"Railway station, integration suggestion") } Analyser_Merge.__init__(self, config, logger, "https://ressources.data.sncf.com/explore/dataset/sncf-ter-gtfs/", u"Horaires prévus des trains TER", CSV(Source(attribution = u"SNCF", millesime = "03/2017", fileUrl = "http://medias.sncf.com/sncfcom/open-data/gtfs/export-TER-GTFS-LAST.zip", zip = "stops.txt")), Load("stop_lon", "stop_lat", select = {"stop_id": "StopArea:%"}), Mapping( select = Select( types = ["nodes", "ways"], tags = {"railway": ["station", "halt"]}), osmRef = "uic_ref", conflationDistance = 500, generate = Generate( static1 = { "railway": "station", "operator": "SNCF"}, static2 = {"source": self.source}, mapping1 = {"uic_ref": lambda res: res["stop_id"].split(":")[1][3:].split("-")[-1][:-1]}, mapping2 = {"name": lambda res: res["stop_name"].replace("gare de ", "")}, text = lambda tags, fields: {"en": fields["stop_name"][0].upper() + fields["stop_name"][1:]} )))
def __init__(self, config, logger = None): self.missing_official = {"item":"8180", "class": 1, "level": 3, "tag": ["merge", "public equipment"], "desc": T_(u"Bordeaux toilets not integrated") } Analyser_Merge.__init__(self, config, logger, Source( url = "http://opendata.bordeaux.fr/content/toilettes-publiques", name = u"Toilettes publiques", file = "public_equipment_FR_bordeaux_toilets.csv.bz2", csv = CSV(separator = ";")), Load("X_LONG", "Y_LAT", table = "bordeaux_toilets", xFunction = self.float_comma, yFunction = self.float_comma), Mapping( select = Select( types = ["nodes", "ways"], tags = {"amenity": "toilets"}), conflationDistance = 100, generate = Generate( static = { "source": u"Ville de Bordeaux - 01/2014", "amenity": "toilets", "fee": "no", "access": "public"}, mapping = { "toilets:wheelchair": lambda res: "yes" if res["OPTIONS"] == u"Handicapé" else None, "toilets:position": lambda res: "urinal" if res["TYPOLOGIE"] == u"Urinoir" else None} )))
def __init__(self, config, classs, logger = None): self.missing_official = {"item":"8030", "class": classs+1, "level": 3, "tag": ["merge"], "desc": T_(u"School not integrated") } self.missing_osm = {"item":"7070", "class": classs+2, "level": 3, "tag": ["merge"], "desc": T_(u"School without ref:UAI or invalid") } self.possible_merge = {"item":"8031", "class": classs+3, "level": 3, "tag": ["merge"], "desc": T_(u"School, integration suggestion") } Analyser_Merge.__init__(self, config, logger) self.officialURL = "http://www.data.gouv.fr/donnees/view/G%C3%A9olocalisation-des-%C3%A9tablissements-d%27enseignement-du-premier-degr%C3%A9-et-du-second-degr%C3%A9-du-minist%C3%A8re-d-30378093" self.officialName = "établissements d'enseignement du premier degré et du second degré" self.csv_file = "merge_data/MENJVA_etab_geoloc.csv" self.csv_format = "WITH DELIMITER AS ';' NULL AS 'null' CSV HEADER" self.csv_encoding = "ISO-8859-15" self.csv_filter = lambda t: t.replace("; ", ";null").replace(";.", ";null").replace("Ecole", u"École").replace("Saint ", "Saint-").replace("Sainte ", "Sainte-").replace(u"élementaire", u"élémentaire") self.osmTags = { "amenity": ["school", "kindergarten"], } self.osmRef = "ref:UAI" self.osmTypes = ["nodes", "ways", "relations"] self.sourceTable = "School_FR" self.sourceX = "X" self.sourceY = "Y" self.defaultTag = { "amenity": "school", "source": "data.gouv.fr:Ministère de l'Éducation nationale, de la Jeunesse et de la Vie associative - 05/2012" } self.defaultTagMapping = { "ref:UAI": "numero_uai", "school:FR": self.School_FR, "name": "appellation_officielle_uai", "operator:type": lambda res: "private" if "PRIVE" in res["denomination_principale_uai"] else None, } self.conflationDistance = 50 self.text = lambda tags, fields: {"en":fields["appellation_officielle_uai"] if fields["appellation_officielle_uai"] else ""}
def __init__(self, config, logger=None): self.missing_official = { "item": "8120", "class": 11, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"CAPP glass recycling not integrated") } Analyser_Merge.__init__( self, config, logger, "http://opendata.agglo-pau.fr/index.php/fiche?idQ=8", u"Point d'apport volontaire du verre : Bornes à verres sur la CAPP", # Dod_Bav_CC43.csv is in WGS84 CSV( Source(attribution=u"Communauté d'Agglomération Pau-Pyrénées", millesime="01/2013", fileUrl= "http://opendata.agglo-pau.fr/sc/call.php?f=1&idf=8", zip="Dod_Bav_CC43.csv")), Load("X", "Y", xFunction=self.float_comma, yFunction=self.float_comma, select={"USAGE_": "En service"}), Mapping(select=Select(types=["nodes", "ways"], tags={"amenity": "recycling"}), conflationDistance=100, generate=Generate(static1={ "amenity": "recycling", "recycling:glass_bottles": "yes", "recycling_type": "container" }, static2={"source": self.source})))
def __init__(self, config, logger = None): self.missing_official = {"item":"8120", "class": 1, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"BM glass recycling not integrated") } self.possible_merge = {"item":"8121", "class": 3, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"BM glass recycling, integration suggestion") } self.update_official = {"item":"8122", "class": 4, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"BM glass recycling update") } Analyser_Merge.__init__(self, config, logger, "http://data.bordeaux-metropole.fr/data.php?themes=5", u"Emplacements d'apport volontaire", SHP(Source(attribution = u"Bordeaux Métropole", millesime = "08/2016", fileUrl = "http://data.bordeaux-metropole.fr/files.php?gid=69&format=2", zip = "EN_EMPAC_P.shp", encoding = "ISO-8859-15")), Load(("ST_X(geom)",), ("ST_Y(geom)",), srid = 2154, select = {"IDENT": "%"}), Mapping( select = Select( types = ["nodes", "ways"], tags = {"amenity": "recycling"}), osmRef = "ref:FR:CUB", conflationDistance = 100, generate = Generate( static1 = { "amenity": "recycling", "recycling:glass": "yes", "recycling:glass_bottles": "yes", "recycling_type": "container"}, static2 = {"source": self.source}, mapping1 = {"ref:FR:CUB": "IDENT"} )))
def __init__(self, config, logger=None): self.update_official = { "item": "8101", "class": 100, "level": 3, "tag": ["merge", "wikipedia"], "desc": T_(u"Update Wikipedia tag"), } Analyser_Merge.__init__( self, config, logger, Source(url="http://wikipedia.fr", name="wikipedia insee", file="wikipedia_insee_FR.csv.bz2"), Load( table="wikipedia_insee_FR", create=""" insee VARCHAR(254) PRIMARY KEY, title VARCHAR(254)""", ), Mapping( select=Select( types=["relations"], tags={"type": "boundary", "boundary": "administrative", "admin_level": "8"} ), osmRef="ref:INSEE", generate=Generate(mapping={"ref:INSEE": "insee", "wikipedia": lambda res: "fr:" + res["title"]}), ), )
def __init__(self, config, logger = None): self.missing_official = {"item":"8240", "class": 1, "level": 3, "tag": ["merge", "amenity"], "desc": T_(u"Restaurant not integrated") } Analyser_Merge.__init__(self, config, logger, "http://catalogue.datalocale.fr/dataset/liste-restaurants-aquitaine", u"Liste des restaurants en Aquitaine", JSON(Source(attribution = u"Réseau SIRTAQUI - Comité Régional de Tourisme d'Aquitaine - www.sirtaqui-aquitaine.com", millesime = "06/2016", fileUrl = "http://wcf.tourinsoft.com/Syndication/aquitaine/e150e425-fbb6-4e32-916b-5bfc47171c3c/Objects?$format=json"), extractor = lambda json: json['d']), Load("LON", "LAT", select = { 'TYPRES': [u"Restaurant", u"Hôtel restaurant", u"Ferme auberge"], 'CATRES': self.amenity_type.keys()}, xFunction = self.degree, yFunction = self.degree), Mapping( select = Select( types = ["nodes", "ways"], tags = {"amenity": ["restaurant", "fast_food", "bar", "pub", "cafe"]}), conflationDistance = 200, generate = Generate( static2 = {"source": self.source}, mapping1 = { "amenity": lambda fields: self.amenity_type[fields["CATRES"]], "name": "NOMOFFRE", "ref:FR:CRTA": "SyndicObjectID", "tourism": lambda fields: "hotel" if fields["TYPRES"] == u"Hôtel restaurant" else None, "cuisine": lambda fields: self.cuisine(fields), "diet:kosher": lambda fields: "yes" if fields["SPECIALITES"] and u"Cuisine casher" in fields["SPECIALITES"] else None, "diet:vegetarian ": lambda fields: "yes" if fields["SPECIALITES"] and u"Cuisine végétarienne" in fields["SPECIALITES"] else None, "organic": lambda fields: "only" if fields["SPECIALITES"] and u"Cuisine bio" in fields["SPECIALITES"] else None, "website": lambda fields: None if not fields["URL"] else fields["URL"] if fields["URL"].startswith('http') else 'http://' + fields["URL"]}, text = lambda tags, fields: {"en": ', '.join(filter(lambda x: x != "None", [fields["TYPRES"], fields["CATRES"], fields["SPECIALITES"], fields["NOMOFFRE"], fields["AD1"], fields["AD1SUITE"], fields["AD2"], fields["AD3"], fields["CP"], fields["COMMUNE"]]))} )))
def __init__(self, config, logger = None): self.missing_official = {"item":"8160", "class": 11, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"CAPP bicycle rental not integrated") } Analyser_Merge.__init__(self, config, logger, "http://opendata.agglo-pau.fr/index.php/fiche?idQ=14", u"Stations Idécycle du réseau Idelis sur la CAPP", CSV(Source(attribution = u"Communauté d'Agglomération Pau-Pyrénées", millesime = "01/2013", fileUrl = "http://opendata.agglo-pau.fr/sc/call.php?f=1&idf=14", zip = "Idecycl_WGS84.csv", filter = lambda t: t.replace("\0", ""))), Load("X", "Y", xFunction = self.float_comma, yFunction = self.float_comma), Mapping( select = Select( types = ["nodes"], tags = {"amenity": "bicycle_rental"}), conflationDistance = 100, generate = Generate( static1 = { "amenity": "bicycle_rental", "operator": "IDEcycle"}, static2 = {"source": self.source}, mapping1 = { "name": "NOM", "capacity": "Nb_velo", "vending": lambda res: "subscription" if res["Borne_pai"] == "Oui" else None } )))
def __init__(self, config, logger = None): self.missing_official = {"item":"8010", "class": 21, "level": 3, "tag": ["merge", "tourism"], "desc": T_(u"Gironde tourism information not integrated") } self.possible_merge = {"item":"8011", "class": 23, "level": 3, "tag": ["merge", "tourism"], "desc": T_(u"Gironde tourism information, integration suggestion") } Analyser_Merge.__init__(self, config, logger, "http://catalogue.datalocale.fr/dataset/liste-points-infos-tourisme-aquitaine", u"Liste des points infos tourisme en Aquitaine ", JSON(Source(attribution = u"Réseau SIRTAQUI - Comité Régional de Tourisme d'Aquitaine - www.sirtaqui-aquitaine.com", millesime = "06/2016", fileUrl = "http://wcf.tourinsoft.com/Syndication/aquitaine/0c7230f7-94ec-473b-9dce-e4cf38fedb44/Objects?$format=json"), extractor = lambda json: json['d']), Load("LON", "LAT", xFunction = self.degree, yFunction = self.degree), Mapping( select = Select( types = ["nodes", "ways"], tags = {"tourism": "information"}), conflationDistance = 1000, generate = Generate( static1 = { "tourism": "information", "information": "office"}, static2 = {"source": self.source}, mapping1 = { "name": "NOMOFFRE", "ref:FR:CRTA": "SyndicObjectID", "website": lambda fields: None if not fields["URL"] else fields["URL"] if fields["URL"].startswith('http') else 'http://' + fields["URL"]}, mapping2 = {"phone": "TEL"}, text = lambda tags, fields: {"en": ', '.join(filter(lambda x: x != "None", [fields["NOMOFFRE"], fields["AD1"], fields["AD1SUITE"], fields["AD2"], fields["AD3"], fields["CP"], fields["COMMUNE"]]))} )))
def __init__(self, config, logger=None): self.missing_official = { "item": "8160", "class": 1, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle rental not integrated") } self.possible_merge = { "item": "8161", "class": 3, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle rental integration suggestion") } self.update_official = { "item": "8162", "class": 4, "level": 3, "tag": ["merge", "public equipment", "cycle"], "desc": T_(u"BM bicycle update") } Analyser_Merge.__init__( self, config, logger, "http://data.bordeaux-metropole.fr/data.php?themes=10", u"Station VCUB", SHP( Source( attribution=u"Bordeaux Métropole", millesime="08/2016", fileUrl= "http://data.bordeaux-metropole.fr/files.php?gid=43&format=2", zip="TB_STVEL_P.shp", encoding="ISO-8859-15")), Load(("ST_X(geom)", ), ("ST_Y(geom)", ), srid=2154), Mapping(select=Select(types=["nodes"], tags={"amenity": "bicycle_rental"}), osmRef="ref", conflationDistance=100, generate=Generate( static1={ "amenity": "bicycle_rental", "network": "VCUB" }, static2={"source": self.source}, mapping1={ "name": "NOM", "ref": "NUMSTAT", "capacity": "NBSUPPOR", "vending": lambda res: "subscription" if res["TERMBANC"] == "OUI" else None, "description": lambda res: "VCUB+" if res["TARIF"] == "VLS PLUS" else None })))
def __init__(self, config, logger=None): self.missing_official = { "item": "8040", "class": 41, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"TransGironde stop not integrated") } self.possible_merge = { "item": "8041", "class": 43, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"TransGironde stop, integration suggestion") } self.update_official = { "item": "8042", "class": 44, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"TransGironde stop update") } Analyser_Merge.__init__( self, config, logger, "http://catalogue.datalocale.fr/dataset/liste-lignereguliere-transgironde", u"Horaires des lignes régulières du réseau transgironde", CSV( Source( attribution=u"Conseil général de la Gironde", millesime="12/2015", fileUrl= "http://catalogue.datalocale.fr/storage/f/2015-12-07T101339/ExportGTFS_30-11-15.zip", zip="Export GTFS au 30-11-15/stops.txt")), Load("stop_lon", "stop_lat"), Mapping( select=Select(types=["nodes", "ways"], tags={"highway": "bus_stop"}), osmRef="ref:FR:TransGironde", conflationDistance=100, generate=Generate( static1={ "highway": "bus_stop", "public_transport": "stop_position", "bus": "yes", "network": "TransGironde" }, static2={"source": self.source}, mapping1={ "ref:FR:TransGironde": lambda res: res["stop_id"].split(':')[1], "name": lambda res: res['stop_name'].split(' - ')[1] if len(res['stop_name'].split(' - ')) > 1 else None }, text=lambda tags, fields: { "en": u"TransGironde stop of %s" % fields["stop_name"], "fr": u"Arrêt TransGironde de %s" % fields["stop_name"] })))
def __init__(self, config, classs, desc, wikiTypes, wikiCountry, wikiLang, starts, osmTags, osmTypes, conflationDistance, logger=None): self.possible_merge = { "item": "8101", "class": classs, "level": 3, "tag": ["merge", "wikipedia"], "desc": desc } Analyser_Merge.__init__( self, config, logger, "http://toolserver.org/~kolossos/wp-world/pg-dumps/wp-world/", "Wikipedia-World", CSV(Source(file="wikipedia_point_fr.csv.bz2"), csv=False, separator=None, null=None), Load(("ST_X(the_geom)", ), ("ST_Y(the_geom)", ), create=self.create_table, select={ "lang": wikiLang, "Country": wikiCountry }, where=(lambda res: not res["titel"].startswith("Liste ")) if starts == None else (lambda res: res["titel"].startswith(starts))), Mapping(select=Select(types=osmTypes, tags={"name": None}), osmRef="wikipedia", conflationDistance=conflationDistance, generate=Generate(mapping1={ "wikipedia": lambda fields: fields["lang"] + ":" + fields["titel"] }, text=lambda tags, fields: {fields["lang"]: fields["titel"]}))) if wikiTypes != None: self.load.select[ "types"] = wikiTypes # http://en.wikipedia.org/wiki/Wikipedia:GEO#type:T if isinstance(osmTags, dict): self.mapping.select.tags[0].update(osmTags) else: for t in osmTags: t.update(self.osmTags) self.mapping.select.tags = osmTags
def __init__(self, config, logger=None): self.missing_official = { "item": "8040", "class": 51, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"TBM stop not integrated") } self.possible_merge = { "item": "8041", "class": 53, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"TBM stop, integration suggestion") } Analyser_Merge.__init__( self, config, logger, "http://data.bordeaux-metropole.fr/data.php?themes=10", u"Arrêt physique sur le réseau", SHP( Source( attribution=u"Bordeaux Métropole", millesime="07/2016", fileUrl= "http://data.bordeaux-metropole.fr/files.php?gid=39&format=2", zip="TB_ARRET_P.shp", encoding="ISO-8859-15")), Load(("ST_X(geom)", ), ("ST_Y(geom)", ), srid=2154, select={"RESEAU": [None, "BUS"]}), Mapping(select=Select(types=["nodes", "ways"], tags={"highway": "bus_stop"}), conflationDistance=100, generate=Generate( static1={ "highway": "bus_stop", "public_transport": "stop_position", "bus": "yes", "network": "TBM" }, static2={"source": self.source}, mapping2={ "name": lambda res: res['NOMARRET'], "shelter": lambda res: "yes" if res["MOBILIE1"] and "abribus" in res["MOBILIE1"].lower() else "no" if res["MOBILIE1"] and "poteau" in res[ "MOBILIE1"].lower() else None }, text=lambda tags, fields: { "en": u"TBM stop %s" % fields["NOMARRET"], "fr": u"Arrêt TBM %s" % fields["NOMARRET"] })))
def __init__(self, config, logger=None): self.missing_official = { "item": "8010", "class": 11, "level": 3, "tag": ["merge", "tourism"], "desc": T_(u"Gironde museum not integrated") } self.possible_merge = { "item": "8011", "class": 13, "level": 3, "tag": ["merge", "tourism"], "desc": T_(u"Gironde museum, integration suggestion") } Analyser_Merge.__init__( self, config, logger, "http://catalogue.datalocale.fr/dataset/liste-musees-aquitaine", u"Liste des musées et centres d'interprétation de Gironde", JSON(Source( attribution= u"Réseau SIRTAQUI - Comité Régional de Tourisme d'Aquitaine - www.sirtaqui-aquitaine.com", millesime="06/2016", fileUrl= "http://wcf.tourinsoft.com/Syndication/aquitaine/094df128-7ac5-43e5-a7e9-a5d752317674/Objects?$format=json" ), extractor=lambda json: json['d']), Load("LON", "LAT", xFunction=self.degree, yFunction=self.degree), Mapping(select=Select(types=["nodes", "ways"], tags={"tourism": "museum"}), conflationDistance=300, generate=Generate( static1={"tourism": "museum"}, static2={"source": self.source}, mapping1={ "name": "NOMOFFRE", "ref:FR:CRTA": "SyndicObjectID", "website": lambda fields: None if not fields["URL"] else fields["URL"] if fields["URL"].startswith('http') else 'http://' + fields["URL"] }, text=lambda tags, fields: { "en": ', '.join( filter(lambda x: x != "None", [ fields["NOMOFFRE"], fields["AD1"], fields["AD1SUITE"], fields["AD2"], fields[ "AD3"], fields["CP"], fields["COMMUNE"] ])) })))
def __init__(self, config, logger=None): self.missing_official = { "item": "8040", "class": 91, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"CG71 stop not integrated") } self.possible_merge = { "item": "8041", "class": 93, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"CG71 stop, integration suggestion") } Analyser_Merge.__init__( self, config, logger, "http://opendata.grandnancy.eu/jeux-de-donnees/detail-dune-fiche-de-donnees/?tx_icsoddatastore_pi1%5Buid%5D=108&tx_icsoddatastore_pi1%5BreturnID%5D=447", u"Réseau Stan: horaires et lignes", GTFS( Source( attribution=u"Métropole du Grand Nancy", millesime="06/2017", fileUrl= "http://opendata.grandnancy.eu/?eID=ics_od_datastoredownload&file=333" )), Load("stop_lon", "stop_lat"), Mapping(select=Select(types=["nodes", "ways"], tags=[{ "highway": "bus_stop" }, { "public_transport": "stop_position" }]), conflationDistance=2, osmRef="ref:FR:STAN", generate=Generate( static1={ "highway": "bus_stop", "public_transport": "stop_position", "bus": "yes" }, static2={"source": self.source}, mapping1={ "ref:FR:STAN": "stop_code", "wheelchair": lambda fields: self.wheelchair_boarding[fields.get( "wheelchair_boarding")] }, mapping2={"name": "stop_name"}, text=lambda tags, fields: { "en": u"STAN stop of %s" % fields["stop_name"], "fr": u"Arrêt STAN de %s" % fields["stop_name"] })))
def __init__(self, config, error_file, logger, url, name, source=Source(), load=Load(), mapping=Mapping()): self.error_file = error_file Analyser_Merge.__init__(self, config, logger, url, name, source, load, mapping)
def __init__(self, config, logger=None): self.missing_official = { "item": "2042", "class": 31, "level": 3, "tag": ["missing_official", "recycling"], "desc": T_(u"SITCOM recycling not integrated") } self.possible_merge = { "item": "2044", "class": 33, "level": 3, "tag": ["possible_merge", "recycling"], "desc": T_(u"SITCOM recycling, integration suggestion") } self.update_official = { "item": "2045", "class": 34, "level": 3, "tag": ["update_official", "recycling"], "desc": T_(u"SITCOM recycling update") } Analyser_Merge.__init__( self, config, logger, "http://www.sitcom40.fr/", u"Emplacements d'apport volontaire", CSV( Source(attribution=u"Sitcom Côte Sud Landes", millesime="07/2017", file="recycling_FR_sitcom.csv.bz2")), Load( "Y", "X", # lat/lon inverted xFunction=self.float_comma, yFunction=self.float_comma), Mapping( select=Select(types=["nodes", "ways"], tags={ "amenity": "recycling", "recycling_type": "container" }), osmRef="ref:FR:SITCOM", conflationDistance=200, generate=Generate( static1={ "amenity": "recycling", "recycling_type": "container" }, static2={"source": self.source}, mapping1={"ref:FR:SITCOM": "Cle"}, text=lambda tags, fields: {"en": fields["Nom du point"]})))
def __init__(self, config, logger=None): self.missing_official = { "item": "8040", "class": 61, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"CG71 stop not integrated") } self.possible_merge = { "item": "8041", "class": 63, "level": 3, "tag": ["merge", "public transport"], "desc": T_(u"CG71 stop, integration suggestion") } Analyser_Merge.__init__( self, config, logger, "http://www.opendata71.fr/thematiques/transport/localisation-des-points-d-arret-de-bus", u"Localisation des arrêts de bus et car - CG71", CSV( Source( attribution= u"Conseil général de la Saône-et-Loire - Direction des Transports et de l'intermodalité", millesime="02/2015", fileUrl= "http://opendata71interactive.cloudapp.net/DataBrowser/DownloadCsv?container=dataviz&entitySet=CG71DTIPointsArret&filter=NOFILTER" )), Load("latitude", "longitude", xFunction=self.float_comma, yFunction=self.float_comma), Mapping(select=Select(types=["nodes", "ways"], tags={"highway": "bus_stop"}), osmRef="ref:FR:CG71", conflationDistance=100, generate=Generate( static1={ "highway": "bus_stop", "public_transport": "stop_position", "bus": "yes" }, static2={"source": self.source}, mapping1={"ref:FR:CG71": "cod_arret"}, mapping2={ "name": lambda res: res['nom'].split(' - ')[1].strip() if ' - ' in res['nom'] else res['nom'].strip() }, text=lambda tags, fields: { "en": u"CG71 stop of %s" % fields["nom"].strip(), "fr": u"Arrêt CG71 de %s" % fields["nom"].strip() })))
def __init__(self, config, logger = None): self.missing_official = {"item":"8020", "class": 1, "level": 3, "tag": ["merge", "post"], "desc": T_(u"Post office not integrated") } self.missing_osm = {"item":"7050", "class": 2, "level": 3, "tag": ["merge", "post"], "desc": T_(u"Post office without ref:FR:LaPoste") } self.possible_merge = {"item":"8021", "class": 3, "level": 3, "tag": ["merge", "post"], "desc": T_(u"Post office, integration suggestion") } self.update_official = {"item":"8022", "class": 4, "level": 3, "tag": ["merge", "post"], "desc": T_(u"Post office update") } self.Annexe = re.compile(' A$') self.Principal = re.compile(' PAL$') self.APBP = re.compile(' (AP|BP)$') Analyser_Merge.__init__(self, config, logger, "https://www.data.gouv.fr/fr/datasets/liste-des-points-de-contact-du-reseau-postal-francais-et-horaires", u"Liste des points de contact du réseau postal français et horaires", CSV(Source(attribution = u"data.gouv.fr:LaPoste", millesime = "06/2015", file = "poste_FR.csv.bz2", encoding = "ISO-8859-15"), separator = ";"), Load("Longitude", "Latitude"), Mapping( select = Select( types = ["nodes", "ways"], tags = {"amenity": "post_office"}), osmRef = "ref:FR:LaPoste", conflationDistance = 1000, generate = Generate( static1 = { "amenity": "post_office", "operator": "La Poste"}, static2 = {"source": self.source}, mapping1 = { "ref:FR:LaPoste": "#Identifiant", "post_office:type": lambda res: { None: None, u"Bureau de poste": None, u"Agence postale commnunale": "post_annex", u"Relais poste commerçant": "post_partner" }[res["Caractéristique_du_site"]], "addr:postcode": "Code_postal", # localite # pays "copy_facility": lambda res: self.bool[res["Photocopie"]], "atm": lambda res: self.bool[res["Distributeur_de_billets"]], "stamping_machine": lambda res: self.bool[res["Affranchissement_Libre_Service"]], "wheelchair": lambda res: "yes" if self.bool[res["Accessibilité_Absence_de_ressaut_de_plus_de_2_cm_de_haut"]] and self.bool[res["Accessibilité_Entrée_autonome_en_fauteuil_roulant_possible"]] else "limited" if self.bool[res["Accessibilité_Absence_de_ressaut_de_plus_de_2_cm_de_haut"]] or self.bool[res["Accessibilité_Entrée_autonome_en_fauteuil_roulant_possible"]] else "no"}, mapping2 = { "name": lambda res: re.sub(self.Principal, " Principal", re.sub(self.Annexe, " Annexe", re.sub(self.APBP, "", res["Libellé_du_site"]))), "change_machine": lambda res: self.bool[res["Changeur_de_monnaie"]], "phone": "Numéro_de_téléphone"}, text = lambda tags, fields: {"en": u"Post office %s" % ", ".join(filter(lambda x: x and x!='None', [fields[u"Précision_du_géocodage"].lower(), fields[u"Adresse"], fields[u"Complément_d_adresse"], fields[u"Lieu_dit"], fields["Code postal"], fields[u"Localité"]]))} )))
def __init__(self, config, logger=None): self.missing_official = { "item": "8230", "class": 1, "level": 3, "tag": ["merge", "amenity"], "desc": T_(u"Library not integrated") } Analyser_Merge.__init__( self, config, logger, "http://catalogue.datalocale.fr/dataset/liste-bibliotheques-mediatheques-aquitaine", u"Liste des bibliothèques et médiathèques en Aquitaine", JSON(Source( attribution= u"Réseau SIRTAQUI - Comité Régional de Tourisme d'Aquitaine - www.sirtaqui-aquitaine.com", millesime="06/2016", fileUrl= "http://wcf.tourinsoft.com/Syndication/aquitaine/057734af-e3fa-448f-8180-0df67d1ad141/Objects?$format=json" ), extractor=lambda json: json['d']), Load("LON", "LAT", where=lambda row: u"Bibliothèque" in row["NOMOFFRE"] or u"Médiathèque" in row["NOMOFFRE"], xFunction=self.degree, yFunction=self.degree), Mapping(select=Select(types=["nodes", "ways"], tags={"amenity": "library"}), conflationDistance=200, generate=Generate( static1={"amenity": "library"}, static2={"source": self.source}, mapping1={ "ref:FR:CRTA": "SyndicObjectID", "website": lambda fields: None if not fields["URL"] else fields["URL"] if fields["URL"].startswith('http') else 'http://' + fields["URL"] }, text=lambda tags, fields: { "en": ', '.join( filter(lambda x: x != "None", [ fields["NOMOFFRE"], fields["AD1"], fields["AD1SUITE"], fields["AD2"], fields[ "AD3"], fields["CP"], fields["COMMUNE"] ])) })))
def __init__(self, config, logger=None): self.missing_official = { "item": "8120", "class": 1, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"BM glass recycling not integrated") } self.possible_merge = { "item": "8121", "class": 3, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"BM glass recycling, integration suggestion") } self.update_official = { "item": "8122", "class": 4, "level": 3, "tag": ["merge", "recycling"], "desc": T_(u"BM glass recycling update") } Analyser_Merge.__init__( self, config, logger, "http://data.bordeaux-metropole.fr/data.php?themes=5", u"Emplacements d'apport volontaire", SHP( Source( attribution=u"Bordeaux Métropole", millesime="08/2016", fileUrl= "http://data.bordeaux-metropole.fr/files.php?gid=69&format=2", zip="EN_EMPAC_P.shp", encoding="ISO-8859-15")), Load(("ST_X(geom)", ), ("ST_Y(geom)", ), srid=2154, select={"IDENT": "%"}), Mapping(select=Select(types=["nodes", "ways"], tags={"amenity": "recycling"}), osmRef="ref:FR:CUB", conflationDistance=100, generate=Generate(static1={ "amenity": "recycling", "recycling:glass": "yes", "recycling:glass_bottles": "yes", "recycling_type": "container" }, static2={"source": self.source}, mapping1={"ref:FR:CUB": "IDENT"})))