def get_way_center(way_id): logger.info("Looking for center of way %s", way_id) resp = requests.get( 'https://api.openstreetmap.org/api/0.6/way/{}/full'.format(way_id), stream=True, ) resp.raw.decode_content = True if resp.status_code == 410: logger.info( "Way %s was deleted, looking for most recent not-deleted version", way_id) resp = requests.get( 'https://api.openstreetmap.org/api/0.6/way/{}/history'.format( way_id), stream=True, ) resp.raw.decode_content = True visible_versions = filter(lambda o: o.visible, [obj for obj in iter_osm_file(resp.raw)]) visible_version = visible_versions[-1] logger.info("Using version %s of way %s", visible_version.version, visible_version.id) # Get the way version so we know the node IDs resp = requests.get( 'https://api.openstreetmap.org/api/0.6/way/{}/{}'.format( way_id, visible_version.version), stream=True, ) resp.raw.decode_content = True visible_way = [obj for obj in iter_osm_file(resp.raw)][0] # Get the nodes for the way resp = requests.get( 'https://api.openstreetmap.org/api/0.6/nodes', params={'nodes': ','.join([str(n) for n in visible_way.nds])}, stream=True, ) resp.raw.decode_content = True # Yes I know this is terrible. lat_sum = 0 lon_sum = 0 n = 0 for obj in iter_osm_file(resp.raw): if isinstance(obj, pyosm.model.Node) and obj.visible: lat_sum += obj.lat lon_sum += obj.lon n += 1 return {'type': 'Point', 'coordinates': [lon_sum / n, lat_sum / n]}
def get_shapes(filelike): shapes = [] node_cache = {} way_cache = {} for thing in iter_osm_file(filelike): if type(thing) == Node: pt = (thing.lon, thing.lat) shape = Point(pt) node_cache[thing.id] = pt if thing.tags: shapes.append((thing, shape)) elif type(thing) == Way: points = [] for nd in thing.nds: node_loc = node_cache.get(nd) if node_loc: points.append(node_loc) else: raise Exception( "Way %s references node %s which is not parsed yet." % (thing.id, nd)) if way_is_polygon(thing): shape = Polygon(points) else: shape = LineString(points) way_cache[thing.id] = points if any(thing.tags): # Only include tagged things at this point. Otherwise, # the shapes that are part of multipolygon relations # will be included twice. shapes.append((thing, shape)) elif type(thing) == Relation: if any([ t.key == 'type' and t.value == 'multipolygon' for t in thing.tags ]): parts = [] for member in thing.members: if member.type == 'way': shape = way_cache.get(member.ref) if not shape: raise Exception( "Relation %s references way %s which is not parsed yet." % (thing.id, member.ref)) parts.append(shape) # Polygonize will return all the polygons created, so the # inner parts of the multipolygons will be returned twice # we only want the first one shapes.append((thing, next(polygonize(parts)))) return shapes
def get_changeset(changeset_id): resp = requests.get( 'https://api.openstreetmap.org/api/0.6/changeset/{}'.format( changeset_id), stream=True, ) resp.raw.decode_content = True cs = next(iter(iter_osm_file(resp.raw))) return cs
def get_shapes(filelike): shapes = [] node_cache = {} way_cache = {} for thing in iter_osm_file(filelike): if type(thing) == Node: pt = (thing.lon, thing.lat) shape = Point(pt) node_cache[thing.id] = pt if thing.tags: shapes.append((thing, shape)) elif type(thing) == Way: points = [] for nd in thing.nds: node_loc = node_cache.get(nd) if node_loc: points.append(node_loc) else: raise Exception("Way %s references node %s which is not parsed yet." % (thing.id, nd)) if way_is_polygon(thing): shape = Polygon(points) else: shape = LineString(points) way_cache[thing.id] = points if any(thing.tags): # Only include tagged things at this point. Otherwise, # the shapes that are part of multipolygon relations # will be included twice. shapes.append((thing, shape)) elif type(thing) == Relation: if any([t.key == 'type' and t.value == 'multipolygon' for t in thing.tags]): parts = [] for member in thing.members: if member.type == 'way': shape = way_cache.get(member.ref) if not shape: raise Exception("Relation %s references way %s which is not parsed yet." % (thing.id, member.ref)) parts.append(shape) # Polygonize will return all the polygons created, so the # inner parts of the multipolygons will be returned twice # we only want the first one shapes.append((thing, next(polygonize(parts)))) return shapes
kind_csv.writerow(headers[kind]) def write_and_clear_buffer(kind): global csvs, buffers, counter kind_buffer = getattr(buffers, kind) getattr(csvs, kind).writerows(kind_buffer) setattr(buffers, kind, []) cut_new_file('changesets') cut_new_file('nodes') cut_new_file('ways') cut_new_file('relations') sys.stdout.write('%8d changesets, %10d nodes, %10d ways, %10d relations' % (counter.changesets, counter.nodes, counter.ways, counter.relations)) for p in iter_osm_file(open(sys.argv[1], 'r'), parse_timestamps=False): if type(p) == pyosm.model.Node: buffers.nodes.append([ p.id, p.version, p.changeset, p.user, p.uid, p.visible, p.timestamp, ','.join(['"%s"=>"%s"' % (re.escape(tag.key), re.escape(tag.value)) for tag in p.tags]), '%0.7f, %0.7f' % (p.lon, p.lat) if p.lat else None ]) counter.nodes += 1
def _get_as_osm(self, path, params={}): return [t for t in iter_osm_file(self._get(path, params))]
global csvs, buffers, counter kind_buffer = getattr(buffers, kind) getattr(csvs, kind).writerows(kind_buffer) setattr(buffers, kind, []) cut_new_file('changesets') cut_new_file('nodes') cut_new_file('ways') cut_new_file('relations') sys.stdout.write( '%8d changesets, %10d nodes, %10d ways, %10d relations' % (counter.changesets, counter.nodes, counter.ways, counter.relations)) for p in iter_osm_file(open(sys.argv[1], 'r'), parse_timestamps=False): if type(p) == pyosm.model.Node: buffers.nodes.append([ p.id, p.version, p.changeset, p.user, p.uid, p.visible, p.timestamp, ','.join([ '"%s"=>"%s"' % (re.escape(tag.key), re.escape(tag.value)) for tag in p.tags ]), '%0.7f, %0.7f' % (p.lon, p.lat) if p.lat else None ]) counter.nodes += 1 if counter.nodes % size_of_buffer == 0: write_and_clear_buffer('nodes')
args = parser.parse_args() input_file = args.INPUT output_file = args.OUTPUT or '{0}.shp'.format(*splitext(input_file)) start = timeit.default_timer() print('Read {0}'.format(input_file)) nodes = dict() outer = list() inner = list() items = 0 ways = set() for item in iter_osm_file(input_file): tags = {i.key: i.value for i in item.tags} if 'boundary' in tags and item.id == int(args.BOUNDARY_ID): # 1015139 if isinstance(item, Relation): for member in item.members: if member.role == 'outer': ways.add(member.ref) items += 1 else: print('Object is not a relation!') for item in iter_osm_file(input_file): if item.id in ways: if isinstance(item, Way): tmp = list() for node in item.nds: nodes[node] = None
def get_way_center(way_id): logger.info("Looking for center of way %s", way_id) resp = requests.get( 'https://api.openstreetmap.org/api/0.6/way/{}/full'.format(way_id), stream=True, ) resp.raw.decode_content = True if resp.status_code == 410: logger.info( "Way %s was deleted, looking for most recent not-deleted version", way_id) resp = requests.get( 'https://api.openstreetmap.org/api/0.6/way/{}/history'.format( way_id), stream=True, ) resp.raw.decode_content = True visible_versions = filter( lambda o: o.visible, (obj for obj in iter_osm_file(resp.raw)), ) visible_version = visible_versions[-1] logger.info("Using version %s of way %s", visible_version.version, visible_version.id) # Get the way version so we know the node IDs. # /way/<>/history doesn't give us nd elements resp = requests.get( 'https://api.openstreetmap.org/api/0.6/way/{}/{}'.format( way_id, visible_version.version), stream=True, ) resp.raw.decode_content = True visible_way = [obj for obj in iter_osm_file(resp.raw)][0] # Get the nodes for the way nodes_to_average = [] for nd in visible_way.nds: logger.info("Looking for location of node %s", nd) resp = requests.get( 'https://api.openstreetmap.org/api/0.6/node/{}/history'.format( nd), stream=True, ) resp.raw.decode_content = True # Pick the node version that was visible immediately # before the timestamp of the way keep_node = None for nd_hist in iter_osm_file(resp.raw): if nd_hist.visible and \ nd_hist.timestamp <= visible_way.timestamp: keep_node = nd_hist logger.info("Using node %s/%s", keep_node.id, keep_node.version) nodes_to_average.append(keep_node) else: nodes_to_average = filter( lambda o: isinstance(o, pyosm.model.Node), (obj for obj in iter_osm_file(resp.raw)), ) # Yes I know this is terrible. lat_sum = 0 lon_sum = 0 n = 0 for obj in nodes_to_average: lat_sum += obj.lat lon_sum += obj.lon n += 1 lon = lon_sum / n lat = lat_sum / n return {'type': 'Point', 'coordinates': [lon, lat]}