def clip_value(in_file, ot_dir, min_height, max_height): """ オンライン学習4 ベクタデータのフィルタリング 浸水・土砂崩れベクタデータをGISデータの属性値(値)を使用してフィルタリングするプログラムを実行します。 関数 : clip_value 引数1 : 浸水・土砂崩れベクタデータ(*.shp) 引数2 : 出力ディレクトリ名 引数3 : 出力対象となる値の最小値 引数4 : 出力対象となる値の最大値 """ # Get actual file path in_file = path.join(DATA_PATH_BASE, in_file) ot_dir = path.join(DATA_PATH_BASE, ot_dir) makedirs(ot_dir, exist_ok=True) ot_file = path.join( ot_dir, "{0}v.tif".format(path.splitext(path.basename(in_file))[0])) reader = ShpReader(in_file, encoding='cp932') writer = ShpWriter(ot_file, encoding='cp932') # Create DBF schema height_col_id = None for i, col in enumerate( (col for col in reader.fields if col[0] != "DeletionFlag")): if col[0] != "DeletionFlag": writer.field(col[0], col[1], col[2], col[3]) if col[0] == "height": height_col_id = i if height_col_id is None: print("height column not found in polygon shapefile") return # Filtering n_mesh = reader.numRecords cnt_mesh = 0 for data in reader.iterShapeRecords(): height = data.record[height_col_id] if (height is not None) and (min_height <= height <= max_height): # This polygon is output target. writer.shape(data.shape) writer.record(*data.record) cnt_mesh = cnt_mesh + 1 if cnt_mesh % 100000 == 0: print("{0}K / {1}K".format(cnt_mesh / 1000, n_mesh / 1000)) writer.close()
def clip_shape(in_mesh, in_mask, ot_dir, flg_mask): """ オンライン学習4 ベクタデータのフィルタリング 浸水・土砂崩れベクタデータをGISデータの形状を利用してフィルタリングします。 関数 : clip_shape 引数1 : 浸水・土砂崩れベクタデータ(*.shp) 引数2 : GISデータ(*.shp) 引数3 : 出力ディレクトリ名 引数4 : 出力フラグ(True:GISデータと重なる部分を出力、False:GISデータと重ならない部分を出力) """ # Get actual file path in_mesh = path.join(DATA_PATH_BASE, in_mesh) in_mask = path.join(DATA_PATH_BASE, in_mask) ot_dir = path.join(DATA_PATH_BASE, ot_dir) makedirs(ot_dir, exist_ok=True) ot_file = path.join(ot_dir, "{0}s.tif".format(path.splitext(path.basename(in_mesh))[0])) reader_mesh = ShpReader(in_mesh, encoding='cp932') reader_mask = ShpReader(in_mask, encoding='cp932') writer = ShpWriter(ot_file, encoding='cp932') # Create DBF schema for col in reader_mesh.fields: if col[0] != "DeletionFlag": writer.field(col[0], col[1], col[2], col[3]) # Create set of mask polygon maskdata = [] for data in reader_mask.iterShapes(): points = data.points points_split = list(data.parts) + [len(points)] poly_list = [] for i in range(len(points_split) - 1): points_part = points[points_split[i]:points_split[i + 1]] poly_list.append(Polygon(points_part)) # Use as mask polygon only when all key conditions are satisfied. # Memorize shape and bbox of polygon. x_range = min(points, key=lambda x: x[0])[0], max(points, key=lambda x: x[0])[0] y_range = min(points, key=lambda x: x[1])[1], max(points, key=lambda x: x[1])[1] maskdata.append((x_range, y_range, poly_list)) # Filtering n_mesh = reader_mesh.numRecords cnt_mesh = 0 for data in reader_mesh.iterShapeRecords(): center = Polygon(data.shape.points).centroid x = center.x y = center.y masked = False for x_range, y_range, mask_polys in maskdata: # Primary screening by mask polygon bbox. if x < x_range[0] or x > x_range[1] or y < y_range[0] or y > y_range[1]: continue mask_count = sum(poly.contains(center) for poly in mask_polys) if mask_count % 2 == 1: masked = True break if masked == flg_mask: # This polygon is output target. writer.shape(data.shape) writer.record(*data.record) cnt_mesh = cnt_mesh + 1 if cnt_mesh % 100000 == 0: print("{0}K / {1}K".format(cnt_mesh/1000, n_mesh/1000)) writer.close()
def add_height_vector(in_polys, in_hpoint, dst_fn): """ オンライン学習3 被害領域の抽出、ラスタベクタ変換 メッシュデータに標高値を付与します。 関数 : add_height_vector 引数1 : 入力メッシュデータ名(.tif) 引数2 : 数値標高モデル名(.shp) 引数3 : 出力ファイル名(.shp) """ # Read DEM data print("loading DEM data ...") dem = GridData() dem_reader = ShpReader(in_hpoint, encoding='cp932') n_p = dem_reader.numRecords cnt_p = 0 for data in dem_reader.iterShapeRecords(): point = Point(data.shape.points[0]) p_val = data.record dem.add_data(point.x, point.y, p_val) cnt_p = cnt_p + 1 if cnt_p % 100000 == 0: print("{0}K / {1}K".format(cnt_p/1000, n_p/1000)) print("loaded DEM data .") print() # Process each polygon shapefile for in_poly in in_polys: print("processing {0} ...".format(in_poly)) poly_reader = ShpReader(in_poly) poly_writer = ShpWriter(target=dst_fn) # Create DBF schema for col in poly_reader.fields: if col[0] != "DeletionFlag": poly_writer.field(col[0], col[1], col[2], col[3]) poly_writer.field("height", "N", 18, 9) # Attach elevation value n_poly = poly_reader.numRecords cnt_poly = 0 for data in poly_reader.iterShapeRecords(): center = Polygon(data.shape.points).centroid key_x = dem.search_nearest_x(center.coords[0][0]) key_y = dem.search_nearest_y(center.coords[0][1]) dem_record = dem.get_data(key_x, key_y) if dem_record: # Nearest grid point has elevation value record = data.record + dem_record else: # Nearest grid point doesn't have elevation value record = data.record + [None] poly_writer.shape(data.shape) poly_writer.record(*record) cnt_poly = cnt_poly + 1 if cnt_poly % 100000 == 0: print("{0}K / {1}K".format(cnt_poly/1000, n_poly/1000)) poly_writer.close() print("processed {0} .".format(in_poly)) print()
def _load_boundaries(filename, index, projections=None, encoding='utf-8'): """ Load boundaries from a shape file and perform a projection if desired. Parameters ---------- filename : str filename to load data from index : int index to extract the LSOA identifier from the metadata for each shape projections : tuple tuple of projections (from, to) Returns ------- boundaries : dict dictionary of lists of polygons keyed by LSOA code """ boundaries = {} # Extract projections if projections is not None: original_projection, target_projection = projections target_projection = pyproj.Proj(init=target_projection) original_projection = pyproj.Proj(init=original_projection) transformer = pyproj.Transformer.from_proj(original_projection, target_projection) else: transformer = None # Iterate over all records shapefile = ShapefileReader(filename, encoding=encoding) logging.info("opened shapefile '%s'", filename) iterator = shapefile.iterShapeRecords() with tqdm.tqdm(total=shapefile.numRecords) as progress: while True: try: sr = next(iterator) except IOError: # the shapefile module has a bug and raises an error when reading, but the data are # fine, and we ignore the error break except StopIteration: break # Get the identifier code = sr.record[index] # Transform the points if transformations are given points = sr.shape.points if projections: points = np.transpose(transformer.transform(*np.transpose(points))) # Build a set of polygons and their areas polygons = [] for part in np.array_split(points, sr.shape.parts[1:]): polygon = geometry.Polygon(part) polygons.append((polygon.area, polygon)) # Transpose to get a tuple (areas, polygons) boundaries[code] = list(zip(*polygons)) assert len(boundaries[code]) == 2, "expected 2 items for %s but got %d" % \ (code, len(boundaries[code])) progress.update() # Check we loaded the correct number of boundaries assert len(boundaries) == shapefile.numRecords, "did not load the correct number of records" logging.info('loaded %d boundaries from %s', len(boundaries), filename) return boundaries