def clean_shapefile(shp_in): """ break multipolygons into individual polygons. """ from stompy.spatial import wkb2shp geoms = wkb2shp.shp2geom(agg_grid_shp) multi_count = 0 new_geoms = [] for fi, feat in enumerate(geoms): if feat['geom'].type == 'Polygon': new_geoms.append(feat['geom']) else: multi_count += 1 for g in feat['geom'].geoms: new_geoms.append(g) if multi_count: cleaned = agg_grid_shp.replace('.shp', '-cleaned.shp') assert cleaned != agg_grid_shp wkb2shp.wkb2shp(cleaned, new_geoms, overwrite=True) return cleaned else: return shp_in
def write_node_shp(self, shpname, extra_fields=[]): """ Write a shapefile with each node. Fields will attempt to mirror self.nodes.dtype extra_fields: goal is similar to write_cells_shp and write_edges_shp, but not yet supported. """ assert len(extra_fields) == 0 # not yet supported! # zero-based index of node (why does write_edge_shp create 1-based ids?) base_dtype = [('node_id', np.int32)] node_geoms = [ geometry.Point(self.nodes['x'][i]) for i in self.valid_node_iter() ] node_data = self.nodes[~self.nodes['deleted']].copy() # don't need to write all of the original fields out: node_data = utils.recarray_del_fields(node_data, ['x', 'deleted']) wkb2shp.wkb2shp(shpname, input_wkbs=node_geoms, fields=node_data, overwrite=True)
def test_write_gpkg(): from shapely import geometry geoms = [geometry.Point(-120.0, 37.0), geometry.Point(-121.0, 37.5)] if os.path.exists('test.gpkg'): import shutil shutil.rmtree('test.gpkg') wkb2shp.wkb2shp("test.gpkg", geoms, driver='GPKG', srs_text='WGS84', layer_name='points')
def dump_to_shp(df, shp_fn, **kwargs): geoms = [ geometry.LineString(np.c_[rec['x'].values, rec['y'].values]) for rec in df['track'].values ] fields = dict() fields['index'] = df.index.values for col in df.columns.values: col_safe = col[:10] if col != 'track': fields[col_safe] = df[col].values wkb2shp.wkb2shp(shp_fn, geoms, fields=fields, **kwargs)
def gen_aggregation_shp(model): pnts = model.grid.cells_centroid() # make this deterministic np.random.seed(37) centroids, labels = vq.kmeans2(pnts, k=20, iter=5, minit='points') permute = np.argsort(np.random.random(labels.max() + 1)) # Make a shapefile out of that polys = [] for k, grp in utils.enumerate_groups(labels): grp_poly = ops.cascaded_union( [model.grid.cell_polygon(i) for i in grp]) assert grp_poly.type == 'Polygon', "Hmm - add code to deal with multipolygons" polys.append(grp_poly) agg_shp_fn = "dwaq_aggregation.shp" wkb2shp.wkb2shp(agg_shp_fn, polys, overwrite=True) return agg_shp_fn
# Legal delta boundary: delta_bdry = wkb2shp.shp2geom("../../../gis/Legal_Delta_Boundary.shp", target_srs='EPSG:26910')['geom'][0] ## ca_shp = wkb2shp.shp2geom("../../../gis/CA_State/CA_State_TIGER2016.shp", target_srs='EPSG:26910') ## from stompy.spatial import wkb2shp if 0: # Is the cascade grid of any use here? cgrid = unstructured_grid.UnstructuredGrid.read_dfm( "../../../../cascade/rmk_validation_setups/wy2011/r18b_net.nc") cgrid_poly = cgrid.boundary_polygon() wkb2shp.wkb2shp('r18b_net_outline.shp', [cgrid_poly], overwrite=True) if 0: sfbo_grid = unstructured_grid.UnstructuredGrid.read_ugrid( "../../../../sfb_ocean/suntans/grid-merge-suisun/splice-merge-05-filled-edit70.nc" ) sfbo_poly = sfbo_grid.boundary_polygon() wkb2shp.wkb2shp('sfb_ocean_outline.shp', [sfbo_poly], overwrite=True) if 0: csc_grid = unstructured_grid.UnstructuredGrid.read_ugrid( "../../../../csc/grid/CacheSloughComplex_v111-edit21.nc") csc_poly = csc_grid.boundary_polygon() wkb2shp.wkb2shp('csc_outline.shp', [csc_poly], overwrite=True) if 0: sfe_poly = wkb2shp.shp2geom( "../../../gis/dem_to_shoreline/shoreline_from_dem.shp") # Extend further into ocean
# ---------SF FRESH if 0: # BAHM data # SF Bay Freshwater and POTW, copied from sfb_dfm_v2: # features which have manually set locations for this grid # Borrow files from sfb_dfm_v2 -- should switch to submodules if 1: # Transcribe to shapefile for debugging/vis from shapely import geometry from stompy.spatial import wkb2shp adj_pli_feats = dio.read_pli(adjusted_pli_fn) names = [feat[0] for feat in adj_pli_feats] geoms = [ geometry.Point(feat[1].mean(axis=0)) for feat in adj_pli_feats ] wkb2shp.wkb2shp('derived/input_locations.shp', geoms, fields={'name': names}, overwrite=True) # kludge - wind the clock back a bit: print("TOTAL KLUDGE ON FRESHWATER") from sfb_dfm_utils import sfbay_freshwater # This will pull freshwater data from 2012, where we already # have a separate run which kind of makes sense time_offset = np.datetime64('2012-01-01') - np.datetime64('2017-01-01') sfbay_freshwater.add_sfbay_freshwater( run_base_dir, run_start, run_stop, ref_date,
def run(self, argv): try: opts, rest = getopt.getopt(argv[1:], 'hb:s:a:t:i:c:r:dv:np:om:i:f:g:C:', ['slide-interior', 'rigid-interior']) except getopt.GetoptError as e: print(e) print("-" * 80) self.usage() exit(1) for opt, val in opts: if opt == '-h': self.usage() exit(1) elif opt == '-s': self.scale_shps.append(val) elif opt == '-a': self.tele_scale_shps.append(val) elif opt == '-t': self.effective_tele_rate = float(val) elif opt == '-f': self.scale_factor = float(val) elif opt == '-b': self.boundary_shp = val elif opt == '-p': self.plot_interval = int(val) elif opt == '-c': self.checkpoint_interval = int(val) elif opt == '-C': self.scale_ratio_for_cutoff = float(val) elif opt == '-r': self.resume_checkpoint_fn = val elif opt == '-d': self.smooth = 0 elif opt == '-v': self.verbosity = int(val) elif opt == '-n': self.dry_run = 1 elif opt == '-o': self.optimize = 1 elif opt == '-m': self.density_map = val elif opt == '-i': if not self.interior_shps: self.interior_shps = [] self.interior_shps.append(val) elif opt == '-g': self.output_shp = val elif opt == '--slide-interior': self.slide_interior = 1 elif opt == '--rigid-interior': self.slide_interior = 0 self.check_parameters() log_fp = open('tom.log', 'wt') log_fp.write("TOM log:\n") log_fp.write(" ".join(argv)) log_fp.close() if not self.resume_checkpoint_fn: bound_args = self.prepare_boundary() density_args = self.prepare_density() args = {} args.update(bound_args) args.update(density_args) args['slide_internal_guides'] = self.slide_interior # Wait until after smoothing to add degenerate interior lines # args.update(self.prepare_interiors()) self.p = paver.Paving(**args) self.p.verbose = self.verbosity self.p.scale_ratio_for_cutoff = self.scale_ratio_for_cutoff if self.smooth: self.p.smooth() # and write out the smoothed shoreline wkb2shp.wkb2shp(self.smoothed_poly_shp, [self.p.poly], overwrite=True) int_args = self.prepare_interiors() if int_args.has_key('degenerates'): for degen in int_args['degenerates']: self.p.clip_and_add_degenerate_ring(degen) else: self.p = paver.Paving.load_complete(self.resume_checkpoint_fn) self.p.verbose = self.verbosity if self.dry_run: print("dry run...") elif self.density_map: f = self.p.density x1, y1, x2, y2, dx, dy = map(float, self.density_map.split(',')) bounds = np.array([[x1, y1], [x2, y2]]) rasterized = f.to_grid(dx=dx, dy=dy, bounds=bounds) rasterized.write_gdal("scale-raster.tif") else: starting_step = self.p.step self.create_grid() if (not os.path.exists('final.pav') ) or self.p.step > starting_step: self.p.write_complete('final.pav') if (not os.path.exists('final.pdf') ) or self.p.step > starting_step: self.plot_intermediate(fn='final.pdf', color_by_step=False) # write grid as shapefile if self.output_shp: print("Writing shapefile with %d features (edgse)" % (self.p.Nedges())) self.p.write_shp(self.output_shp, only_boundaries=0, overwrite=1) # by reading the suntans grid output back in, we should get boundary edges # marked as 1 - self.p probably doesn't have these markers g = trigrid.TriGrid(suntans_path='.') g.write_shp('trigrid_write.shp', only_boundaries=0, overwrite=1) if self.optimize: self.run_optimization() self.p.write_complete('post-optimize.pav') self.plot_intermediate(fn='post-optimize.pdf')
import stompy.model.delft.io as dio from stompy.spatial import wkb2shp from shapely import geometry ## weirs = dio.read_pli('fixed_weirs-v02.pli') geoms = [geometry.LineString(w[1][:, :2]) for w in weirs] ## wkb2shp.wkb2shp('fixed_weirs-v02.shp', geoms)
nb=g.add_or_find_node(x=b) try: j=g.add_edge(nodes=[na,nb]) except g.GridException: pass ## cycles=g.find_cycles(max_cycle_len=g.Nnodes()) ## polys=[geometry.Polygon( g.nodes['x'][cycle] ) for cycle in cycles ] wkb2shp.wkb2shp('regions_from_bounds.shp', polys,overwrite=True) ## from shapely import ops one_poly=ops.cascaded_union(polys) bounds_xyxy=one_poly.bounds ## dem=field.GdalGrid( ('/media/idrive/BASELAYERS/Elevation_DerivedProducts/' 'LiDAR 2005-2012 entire Bay Area from AECOM/USGS_TopoBathy/' 'San_Francisco_TopoBathy_Elevation_2m.tif'), geo_bounds=[bounds_xyxy[0],bounds_xyxy[2], bounds_xyxy[1],bounds_xyxy[3]]) ##
pnts = model.grid.cells_centroid() # make this deterministic np.random.seed(37) centroids, labels = vq.kmeans2(pnts, k=20, iter=5, minit='points') permute = np.argsort(np.random.random(labels.max() + 1)) # Make a shapefile out of that polys = [] for k, grp in utils.enumerate_groups(labels): grp_poly = ops.cascaded_union([model.grid.cell_polygon(i) for i in grp]) assert grp_poly.type == 'Polygon', "Hmm - add code to deal with multipolygons" polys.append(grp_poly) agg_shp_fn = "dwaq_aggregation.shp" wkb2shp.wkb2shp(agg_shp_fn, polys, overwrite=True) ## import matplotlib.pyplot as plt plt.figure(1).clf() ax = plt.gca() model.grid.plot_cells(values=permute[labels], ax=ax) ax.axis('equal') for poly in polys: plot_wkb.plot_wkb(poly, ax=ax, fc='none', lw=3) ## hyd_path = os.path.join(model.run_dir, "DFM_DELWAQ_%s" % model.mdu.name, "%s.hyd" % model.mdu.name)
# Write a shapefile with one feature per tag: geoms = [] tags = [] for df in dfs: if len(df) < 2: continue geo = geometry.LineString(np.c_[df[' X (East)'], df[' Y (North)']]) geoms.append(geo) tag = df['Tag ID'].values[0] tags.append(tag) ## from stompy.spatial import wkb2shp wkb2shp.wkb2shp('tag-lines-2019-utm.shp', geoms, fields=dict(tag=tags), overwrite=True) ## utms = glob.glob('2019/SpeedFilteredUTM/tag*.txt') dfs = [pd.read_csv(fn) for fn in utms] # Write a shapefile with one feature per tag: geoms = [] tags = [] for df in dfs: if len(df) < 2: continue geo = geometry.LineString(np.c_[df[' X (East)'], df[' Y (North)']])
print fn dss.append(read_pts(fn)) ## lonlats = [(ds.longitude.values, ds.latitude.values) for ds in dss] names = [ds.name.values[0] for ds in dss] ## from shapely import geometry from stompy.spatial import wkb2shp points = [geometry.Point(ll[0], ll[1]) for ll in lonlats] wkb2shp.wkb2shp('region5-point_sources.shp', points, fields=dict(name=names), overwrite=True) ## # The useful ones are Davis, Sac Regional, UC Davis. There is a Stockton East WD, # but that appears to be a supplier of water. ds_sac = dss[names.index('SACRAMENTO REGIONAL WWTP')] ds_davis = dss[names.index('Davis Wastewater Treatment Plant')] ds_ucdavis = dss[names.index('UC DAVIS MAIN STP')] ds_woodland = dss[names.index('WOODLAND WWTP')] ## plt.figure(1).clf()
""" rivr_fns = glob.glob('%s/*.rivr' % transect) + glob.glob( '%s/*.riv' % transect) tran_dss = [ tweak_sontek(read_sontek.surveyor_to_xr(fn, proj='EPSG:26910')) for fn in rivr_fns ] return tran_dss ## from shapely import geometry from collections import defaultdict geoms = [] fields = defaultdict(list) for t in transects: dss = read_transect(t) for ds in dss: geom = geometry.LineString(np.c_[ds.x_sample, ds.y_sample]) geoms.append(geom) fields['dir'].append(t) ## from stompy.spatial import wkb2shp wkb2shp.wkb2shp('transects_2018.shp', geoms, overwrite=True, fields=fields)
# Save some details ax.axis((647365.2402763385, 647536.919447323, 4185540.790668355, 4185717.8615734414)) fig.savefig('dem-vs-adcp-v01-zoom_upstream.png', dpi=125) ax.axis((647120.3645595856, 647334.6308117444, 4185737.7525997157, 4185958.7480704053)) fig.savefig('dem-vs-adcp-v01-zoom_junction.png', dpi=125) ax.axis((647174.8171692798, 647325.0441959006, 4185850.7106188717, 4186005.6556562902)) fig.savefig('dem-vs-adcp-v01-zoom_scour.png', dpi=125) ax.axis((647092.4543586917, 647242.6813853125, 4185760.37721371, 4185915.3222511285)) fig.savefig('dem-vs-adcp-v01-zoom_barrier.png', dpi=125) ## from stompy.spatial import proj_utils, wkb2shp from shapely import geometry ll = proj_utils.mapper('EPSG:26910', 'WGS84')(xyz_samples[:, :2]) # Write the points so far to a shapefile, which can then be translated to kml geoms = [geometry.Point(*pnt) for pnt in ll] wkb2shp.wkb2shp('samples-depth.shp', geoms, srs_text='WGS84', fields={'depth': xyz_samples[:, 2]}, overwrite=True)
# trim out high land that's not useful for the model dem_final.F[dem_final.F > 25.0] = np.nan img = dem_final.plot(cmap=sst, vmin=0, vmax=3.5) dem_final.plot_hillshade(ax=ax, z_factor=3) plt.colorbar(img) dem_final.write_gdal(f'compiled-dem-{version}-{date_str}-1m.tif', overwrite=True) ## # Write a shapefile of the composite regions wkb2shp.wkb2shp(f"composite-input-{version}-{date_str}.shp", shp_data['geom'], fields=shp_data, overwrite=True) ## # Final render at 0.5m six.moves.reload_module(field) comp_field = field.CompositeField(shp_data=shp_data, factory=factory) # use a post processing step to separately save rgb versions of the tiles rgb_tiles = [] def post(tile, **kw): tile_rgb = tile.to_rgba(cmap=sst, vmin=0, vmax=5.0) shade_rgba = tile.hillshade_shader(z_factor=3)
winds = rec['winds'] winds['time'] = rec['timestamp'] daily_recs.append(winds) daily_recs = pd.concat(daily_recs) all_wind_recs.append(daily_recs) all_winds = pd.concat(all_wind_recs) locs = all_winds.groupby('name').first() summary = locs[['x', 'y']] ## # Make a summary shapefile of that: from shapely import geometry from stompy.spatial import wkb2shp pnts = [ geometry.Point(rec.x * 1000, rec.y * 1000) for idx, rec in summary.iterrows() ] fields = np.zeros(len(pnts), [('name', 'O'), ('x', 'f8'), ('y', 'f8')]) for i, (idx, rec) in enumerate(summary.iterrows()): fields['name'][i] = idx fields['x'][i] = rec.x fields['y'][i] = rec.y wkb2shp.wkb2shp('wind_locations.shp', pnts, overwrite=True, fields=fields)
fig.savefig('regions_and_datums.png', dpi=200) ## recs = [] for poly_i in range(len(polys)): rec = {} for datum in ds.datum: datum_name = datum.item() rec['A_%s' % datum_name] = float( ds.isel(poly=poly_i).sel(datum=datum).wet_area.values) rec['V_%s' % datum_name] = float( ds.isel(poly=poly_i).sel(datum=datum).volume.values) rec['id'] = poly_i recs.append(rec) wkb2shp.wkb2shp('results_v00.shp', polys, fields=recs, overwrite=True) ## # Profiles: redwood = [[570627, 4153666], [570994, 4153282]] steinberger = [[568364, 4155573], [568644, 4155351]] def pnts_to_xA(pnts, datum='MSL'): samples = linestring_utils.upsample_linearring(pnts, 5, closed_ring=False) z = dem(samples) dists = utils.dist_along(samples) z0 = ds.z.sel(datum=datum).item() depths = (z0 - z).clip(0, np.inf)
import glob ## shp_dest = 'gis/model-features.shp' names = [] geoms = [] for fn in glob.glob('*.pli'): feats = dio.read_pli(fn) for feat in feats: # generally just one per file names.append(feat[0]) geoms.append(geometry.LineString(feat[1])) wkb2shp.wkb2shp("gis/model-features.shp", geoms, fields=dict(names=names)) # AmericanRiver.pli # Barker_Pumping_Plant.pli # DXC.pli # FlowFMcrs.pli # Georgiana.pli # SacramentoRiver.pli # SRV.pli ## # Same but for point features import pandas as pd df = pd.read_csv("ND_stations.xyn", header=None,