def write_flow_data(stn_ds, src_name, flow_scale=1.0): df = stn_ds.to_dataframe().reset_index() df['elapsed_minutes'] = (df.time.values - ref_date) / np.timedelta64( 60, 's') df['salinity'] = 0 * df.flow_cms df['temperature'] = 20 + 0 * df.flow_cms if all_flows_unit: df['flow_cms'] = 1.0 + 0 * df.flow_cms else: df['flow_cms'] = flow_scale * df.flow_cms for quantity, suffix in [('dischargebnd', '_flow'), ('salinitybnd', '_salt'), ('temperaturebnd', '_temp')]: lines = [ 'QUANTITY=%s' % quantity, 'FILENAME=%s%s.pli' % (src_name, suffix), 'FILETYPE=9', 'METHOD=3', 'OPERAND=O', "" ] with open(old_bc_fn, 'at') as fp: fp.write("\n".join(lines)) # read the pli back to know how to name the per-node timeseries feats = dio.read_pli( os.path.join(run_base_dir, "%s%s.pli" % (src_name, suffix))) feat = feats[0] # just one polyline in the file if len(feat) == 3: node_names = feat[2] else: node_names = [""] * len(feat[1]) for node_idx, node_name in enumerate(node_names): # if no node names are known, create the default name of <feature name>_0001 if not node_name: node_name = "%s%s_%04d" % (src_name, suffix, 1 + node_idx) tim_fn = os.path.join(run_base_dir, node_name + ".tim") columns = ['elapsed_minutes'] if quantity == 'dischargebnd': columns.append('flow_cms') elif quantity == 'salinitybnd': columns.append('salinity') elif quantity == 'temperaturebnd': columns.append('temperature') df.to_csv(tim_fn, sep=' ', index=False, header=False, columns=columns)
def set_grid_and_features(self): # For now the only difference is the DEM. If they diverge, might go # with separate grid directories instead (maybe with some common features) self.grid_dir=grid_dir=os.path.join(local_config.model_dir,"../grids/pesca_butano_v04") self.set_grid(os.path.join(grid_dir, f"pesca_butano_{self.terrain}_deep_bathy.nc")) self.add_gazetteer(os.path.join(grid_dir,"line_features.shp")) self.add_gazetteer(os.path.join(grid_dir,"point_features.shp")) self.add_gazetteer(os.path.join(grid_dir,"polygon_features.shp")) # Check for and install fixed_weirs # Updated to now force this, to avoid hidden discrepancies fixed_weir_fn=os.path.join(grid_dir,f"fixed_weirs-{self.terrain}.pliz") self.fixed_weirs=dio.read_pli(fixed_weir_fn) if self.slr!=0.0 and self.slr_raise_inlet: self.raise_inlet(self.slr)
def add_ocean(run_base_dir, rel_bc_dir, run_start, run_stop, ref_date, static_dir, grid, old_bc_fn, all_flows_unit=False, lag_seconds=0.0, factor=1.0): """ Ocean: Silvia used: Water level data from station 46214 (apparently from Yi Chao's ROMS?) no spatial variation Maybe salinity from Yi Chao ROMS? That's what the thesis says, but the actual inputs look like constant 33 Here I'm using data from NOAA Point Reyes. waterlevel, water temperature from Point Reyes. When temperature is not available, use constant 15 degrees factor: a scaling factor applied to tide data to adjust amplitude around MSL. lag_seconds: to shift ocean boundary condition in time, a positive value applying it later in time. """ # get a few extra days of data to allow for transients in the low pass filter. pad_time = np.timedelta64(5, 'D') if 1: if 0: # This was temporary, while NOAA had an issue with their website. log.warning("TEMPORARILY USING FORT POINT TIDES") tide_gage = "9414290" # Fort Point else: tide_gage = "9415020" # Pt Reyes if common.cache_dir is None: tides_raw_fn = os.path.join(run_base_dir, rel_bc_dir, 'tides-%s-raw.nc' % tide_gage) if not os.path.exists(tides_raw_fn): tides = noaa_coops.coops_dataset( tide_gage, run_start - pad_time, run_stop + pad_time, ["water_level", "water_temperature"], days_per_request=30) tides.to_netcdf(tides_raw_fn, engine='scipy') else: tides = xr.open_dataset(tides_raw_fn) else: # rely on caching within noaa_coops tides = noaa_coops.coops_dataset( tide_gage, run_start - pad_time, run_stop + pad_time, ["water_level", "water_temperature"], days_per_request='M', cache_dir=common.cache_dir) # Those retain station as a dimension of length 1 - drop that dimension # here: tides = tides.isel(station=0) # Fort Point mean tide range is 1.248m, vs. 1.193 at Point Reyes. # apply rough correction to amplitude. # S2 phase 316.2 at Pt Reyes, 336.2 for Ft. Point. # 20 deg difference for a 12h tide, or 30 deg/hr, so # that's a lag of 40 minutes. # First go I got this backwards, and wound up with lags # at Presidio and Alameda of 4600 and 4400s. That was # with lag_seconds -= 40*60. # Also got amplitudes 13% high at Presidio, so further correction... if tide_gage == "9414290": # factor *= 1.193 / 1.248 * 1.0 / 1.13 lag_seconds += 35 * 60. if 1: # Clean that up, fabricate salinity water_level = utils.fill_tidal_data(tides.water_level) # IIR butterworth. Nicer than FIR, with minor artifacts at ends # 3 hours, defaults to 4th order. water_level[:] = filters.lowpass(water_level[:].values, utils.to_dnum(water_level.time), cutoff=3. / 24) if 1: # apply factor: msl = 2.152 - 1.214 # MSL(m) - NAVD88(m) for Point Reyes if factor != 1.0: log.info("Scaling tidal forcing amplitude by %.3f" % factor) water_level[:] = msl + factor * (water_level[:].values - msl) if 1: # apply lag if lag_seconds != 0.0: # sign: if lag_seconds is positive, then I want the result # for time.values[0] to come from original data at time.valules[0]-lag_seconds if 0: # Why interpolate here? Just alter the timebase. water_level[:] = np.interp( utils.to_dnum(tides.time.values), utils.to_dnum(tides.time.values) - lag_seconds / 86400., tides.water_level.values) else: # Adjust time base directly. water_level.time.values[:] = water_level.time.values + np.timedelta64( lag_seconds, 's') if 'water_temperature' not in tides: log.warning( "Water temperature was not found in NOAA data. Will use constant 15" ) water_temp = 15 + 0 * tides.water_level water_temp.name = 'water_temperature' else: fill_data(tides.water_temperature) water_temp = tides.water_temperature if all_flows_unit: print("-=-=-=- USING 35 PPT WHILE TESTING! -=-=-=-") salinity = 35 + 0 * water_level else: salinity = 33 + 0 * water_level salinity.name = 'salinity' if 1: # Write it all out # Add a stanza to FlowFMold_bnd.ext: src_name = 'Sea' src_feat = dio.read_pli(os.path.join(static_dir, '%s.pli' % src_name))[0] forcing_data = [('waterlevelbnd', water_level, '_ssh'), ('salinitybnd', salinity, '_salt'), ('temperaturebnd', water_temp, '_temp')] for quant, da, suffix in forcing_data: with open(old_bc_fn, 'at') as fp: lines = [ "QUANTITY=%s" % quant, "FILENAME=%s/%s%s.pli" % (rel_bc_dir, src_name, suffix), "FILETYPE=9", "METHOD=3", "OPERAND=O", "" ] fp.write("\n".join(lines)) feat_suffix = dio.add_suffix_to_feature(src_feat, suffix) dio.write_pli( os.path.join(run_base_dir, rel_bc_dir, '%s%s.pli' % (src_name, suffix)), [feat_suffix]) # Write the data: columns = ['elapsed_minutes', da.name] df = da.to_dataframe().reset_index() df['elapsed_minutes'] = (df.time.values - ref_date) / np.timedelta64(60, 's') if len(feat_suffix) == 3: node_names = feat_suffix[2] else: node_names = [""] * len(feat_suffix[1]) for node_idx, node_name in enumerate(node_names): # if no node names are known, create the default name of <feature name>_0001 if not node_name: node_name = "%s%s_%04d" % (src_name, suffix, 1 + node_idx) tim_fn = os.path.join(run_base_dir, rel_bc_dir, node_name + ".tim") df.to_csv(tim_fn, sep=' ', index=False, header=False, columns=columns)
def add_delta_inflow(mdu, rel_bc_dir, static_dir, grid, dredge_depth, all_flows_unit=False, temp_jersey=True, temp_rio=True): """ Fetch river USGS river flows, add to FlowFM_bnd.ext: Per Silvia's Thesis: Jersey: Discharge boundary affected by tides, discharge and temperature taken from USGS 11337190 SAN JOAQUIN R A JERSEY POINT, 0 salinity (Note that Dutch Slough should probably be added in here) Rio Vista: 11455420 SACRAMENTO A RIO VISTA, temperature from DWR station RIV. 0 salinity. run_base_dir: location of the DFM inputs run_start,run_stop: target period for therun statiC_dir: path to static assets, specifically Jersey.pli and RioVista.pli grid: UnstructuredGrid instance, to be modified at inflow locations old_bc_fn: path to old-style boundary forcing file all_flows_unit: if True, override all flows to be 1 m3 s-1 for model diagnostics """ # get run directory and time and forcing file info run_base_dir = mdu.base_path ref_date, run_start, run_stop = mdu.time_range() old_bc_fn = mdu.filepath(["external forcing", "ExtForceFile"]) pad = np.timedelta64(3, 'D') if 1: # Cache the original data from USGS, then clean it and write to DFM format jersey_raw_fn = os.path.join(run_base_dir, rel_bc_dir, 'jersey-raw.nc') if not os.path.exists(jersey_raw_fn): if temp_jersey == True: jersey_raw = usgs_nwis.nwis_dataset( station="11337190", start_date=run_start - pad, end_date=run_stop + pad, products=[ 60, # "Discharge, cubic feet per second" 10 ], # "Temperature, water, degrees Celsius" days_per_request=30) jersey_raw.to_netcdf(jersey_raw_fn, engine='scipy') if temp_jersey == False: jersey_raw = usgs_nwis.nwis_dataset( station="11337190", start_date=run_start - pad, end_date=run_stop + pad, products=[60], # "Discharge, cubic feet per second" days_per_request=30) jersey_raw.to_netcdf(jersey_raw_fn, engine='scipy') rio_vista_raw_fn = os.path.join(run_base_dir, rel_bc_dir, 'rio_vista-raw.nc') if not os.path.exists(rio_vista_raw_fn): if temp_rio == True: rio_vista_raw = usgs_nwis.nwis_dataset( station="11455420", start_date=run_start - pad, end_date=run_stop + pad, products=[ 60, # "Discharge, cubic feet per second" 10 ], # "Temperature, water, degrees Celsius" days_per_request=30) rio_vista_raw.to_netcdf(rio_vista_raw_fn, engine='scipy') if temp_rio == False: rio_vista_raw = usgs_nwis.nwis_dataset( station="11455420", start_date=run_start - pad, end_date=run_stop + pad, products=[60], # "Discharge, cubic feet per second" days_per_request=30) rio_vista_raw.to_netcdf(rio_vista_raw_fn, engine='scipy') if 1: # Clean and write it all out jersey_raw = xr.open_dataset(jersey_raw_fn) rio_vista_raw = xr.open_dataset(rio_vista_raw_fn) temp_logical = [temp_jersey, temp_rio] i = 0 for src_name, source in [('Jersey', jersey_raw), ('RioVista', rio_vista_raw)]: src_feat = dio.read_pli( os.path.join(static_dir, '%s.pli' % src_name))[0] dredge_grid.dredge_boundary(grid, src_feat[1], dredge_depth) if temp_logical[i] == True: # Add stanzas to FlowFMold_bnd.ext: for quant, suffix in [('dischargebnd', '_flow'), ('salinitybnd', '_salt'), ('temperaturebnd', '_temp')]: with open(old_bc_fn, 'at') as fp: lines = [ "QUANTITY=%s" % quant, "FILENAME=%s/%s%s.pli" % (rel_bc_dir, src_name, suffix), "FILETYPE=9", "METHOD=3", "OPERAND=O", "" ] fp.write("\n".join(lines)) feat_suffix = dio.add_suffix_to_feature(src_feat, suffix) dio.write_pli( os.path.join(run_base_dir, rel_bc_dir, '%s%s.pli' % (src_name, suffix)), [feat_suffix]) # Write the data: if quant == 'dischargebnd': da = source.stream_flow_mean_daily da2 = utils.fill_tidal_data(da) if all_flows_unit: da2.values[:] = 1.0 else: # convert ft3/s to m3/s da2.values[:] *= 0.028316847 elif quant == 'salinitybnd': da2 = source.stream_flow_mean_daily.copy(deep=True) da2.values[:] = 0.0 elif quant == 'temperaturebnd': da = source.temperature_water da2 = utils.fill_tidal_data( da) # maybe safer to just interpolate? if all_flows_unit: da2.values[:] = 20.0 df = da2.to_dataframe().reset_index() df['elapsed_minutes'] = ( df.time.values - ref_date) / np.timedelta64(60, 's') columns = ['elapsed_minutes', da2.name] if len(feat_suffix) == 3: node_names = feat_suffix[2] else: node_names = [""] * len(feat_suffix[1]) for node_idx, node_name in enumerate(node_names): # if no node names are known, create the default name of <feature name>_0001 if not node_name: node_name = "%s%s_%04d" % (src_name, suffix, 1 + node_idx) tim_fn = os.path.join(run_base_dir, rel_bc_dir, node_name + ".tim") df.to_csv(tim_fn, sep=' ', index=False, header=False, columns=columns) if temp_logical[i] == False: # Add stanzas to FlowFMold_bnd.ext: for quant, suffix in [('dischargebnd', '_flow'), ('salinitybnd', '_salt')]: with open(old_bc_fn, 'at') as fp: lines = [ "QUANTITY=%s" % quant, "FILENAME=%s/%s%s.pli" % (rel_bc_dir, src_name, suffix), "FILETYPE=9", "METHOD=3", "OPERAND=O", "" ] fp.write("\n".join(lines)) feat_suffix = dio.add_suffix_to_feature(src_feat, suffix) dio.write_pli( os.path.join(run_base_dir, rel_bc_dir, '%s%s.pli' % (src_name, suffix)), [feat_suffix]) # Write the data: if quant == 'dischargebnd': da = source.stream_flow_mean_daily da2 = utils.fill_tidal_data(da) if all_flows_unit: da2.values[:] = 1.0 else: # convert ft3/s to m3/s da2.values[:] *= 0.028316847 elif quant == 'salinitybnd': da2 = source.stream_flow_mean_daily.copy(deep=True) da2.values[:] = 0.0 df = da2.to_dataframe().reset_index() df['elapsed_minutes'] = ( df.time.values - ref_date) / np.timedelta64(60, 's') columns = ['elapsed_minutes', da2.name] if len(feat_suffix) == 3: node_names = feat_suffix[2] else: node_names = [""] * len(feat_suffix[1]) for node_idx, node_name in enumerate(node_names): # if no node names are known, create the default name of <feature name>_0001 if not node_name: node_name = "%s%s_%04d" % (src_name, suffix, 1 + node_idx) tim_fn = os.path.join(run_base_dir, rel_bc_dir, node_name + ".tim") df.to_csv(tim_fn, sep=' ', index=False, header=False, columns=columns) i += 1
dredge_depth = -1 # sfb_dfm_v2_base_dir="../../sfb_dfm_v2" adjusted_pli_fn = 'nudged_features.pli' if include_fresh: # ---------SF FRESH if 0: # BAHM data # SF Bay Freshwater and POTW, copied from sfb_dfm_v2: # features which have manually set locations for this grid # Borrow files from sfb_dfm_v2 -- should switch to submodules if 1: # Transcribe to shapefile for debugging/vis from shapely import geometry from stompy.spatial import wkb2shp adj_pli_feats = dio.read_pli(adjusted_pli_fn) names = [feat[0] for feat in adj_pli_feats] geoms = [ geometry.Point(feat[1].mean(axis=0)) for feat in adj_pli_feats ] wkb2shp.wkb2shp('derived/input_locations.shp', geoms, fields={'name': names}, overwrite=True) # kludge - wind the clock back a bit: print("TOTAL KLUDGE ON FRESHWATER") from sfb_dfm_utils import sfbay_freshwater # This will pull freshwater data from 2012, where we already # have a separate run which kind of makes sense
def add_sfbay_potw(mdu, rel_src_dir, # added rel_src_dir alliek dec 2020 potw_dir, adjusted_pli_fn, grid,dredge_depth, all_flows_unit=False, time_offset=None, write_salt=True,write_temp=True): """ time_offset: shift all dates by the given offset. To run 2016 with data from 2015, specify np.timedelta64(-365,'D') write_salt: leave as True for older DFM, and newer DFM only set to true when the simulation includes salinity. write_temp: same, but for temperature """ run_base_dir=mdu.base_path ref_date,run_start,run_stop = mdu.time_range() old_bc_fn=mdu.filepath(["external forcing","ExtForceFile"]) if time_offset is not None: run_start = run_start + time_offset run_stop = run_stop + time_offset ref_date = ref_date + time_offset potws=xr.open_dataset(os.path.join(potw_dir,'outputs','sfbay_delta_potw.nc')) adjusted_features=dio.read_pli(adjusted_pli_fn) # select a time subset of the flow data, starting just before the # simulation period, and running beyond the end: time_pnts = np.searchsorted(potws.time, [run_start-DAY,run_stop+DAY]) time_pnts = time_pnts.clip(0,len(potws.time)-1) time_idxs=range(time_pnts[0],time_pnts[1]) # enumerate them for loops below with open(old_bc_fn,'at') as fp: for site in potws.site.values: # NB: site is bytes at this point potw=potws.sel(site=site) try: site_s=site.decode() except AttributeError: site_s=site # py2 if site_s in ['false_sac','false_sj']: six.print_("(skip %s) "%site_s,end="") continue if potw.utm_x.values.mean() > 610200: # Delta POTWs are in this file, too, but not in this # grid. Luckily they are easy to identify based on # x coordinate. six.print_("(skip %s -- too far east) "%site_s,end="") continue six.print_("%s "%site_s,end="") fp.write( ("QUANTITY=discharge_salinity_temperature_sorsin\n" "FILENAME=%s/%s.pli\n" "FILETYPE=9\n" "METHOD=1\n" "OPERAND=O\n" "AREA=0 # no momentum\n" "\n")%(rel_src_dir,site_s) ) # added rel_src_dir alliek dec 2020 # Write the location - writing a single point appears to work, # based on how it shows up in the GUI. Otherwise we'd have to # manufacture a point outside the domain. with open(os.path.join(run_base_dir,rel_src_dir,'%s.pli'%site_s),'wt') as pli_fp: # added rel_src_dir alliek dec 2020 # Scan adjusted features for a match to use instead # This is handled slightly differently with POTWs - use the # put the depth at -50, should be at the bed feat=[site_s, np.array([[potw.utm_x.values,potw.utm_y.values,-50.0]]), ['']] for adj_feat in adjusted_features: if adj_feat[0] == site_s: # Merge them if the adjusted feature is more than 10 m away # (to allow for some rounding in the ascii round-trip.) offset=utils.dist( adj_feat[1][-1][:2] - feat[1][-1][:2] ) if offset > 10.0: # Just add on the extra point - but may have to promote one # or the other to 3D. old_geo=feat[1] new_geo=adj_feat[1][-1:] if old_geo.shape[1] != new_geo.shape[1]: if old_geo.shape[1]<3: old_geo=np.concatenate( (old_geo,0*old_geo[:,:1]), axis=1) else: # copy depth from old_geo new_geo=np.concatenate( (new_geo, old_geo[-1,-1]+0*new_geo[:,:1]), axis=1) # if the original feature was outside the grid, then all is well, # and it will show up in the GUI as a line from the original location # outside the domain to the new location in the domain. if grid.select_cells_nearest(old_geo[-1,:2],inside=True) is None: feat[1]=np.concatenate( (old_geo,new_geo),axis=0 ) if len(feat)==3: # includes labels, but they don't matter here, right? feat[2].append('') else: # but if the original location is inside the grid, this will be interpreted # as a sink-source pair, so we instead just put the single, adjusted # location in. This is done after potentially copying z-coordinate # data from the original location. feat[1]=new_geo break dio.write_pli(pli_fp,[feat]) dredge_grid.dredge_discharge(grid,feat[1],dredge_depth) with open(os.path.join(run_base_dir,rel_src_dir,'%s.tim'%site_s),'wt') as tim_fp: # added rel_src_dir alliek dec 2020 for tidx in time_idxs: tstamp_minutes = (potw.time[tidx]-ref_date) / np.timedelta64(1,'m') if all_flows_unit: flow_cms=1.0 else: flow_cms=potw.flow[tidx] items=[tstamp_minutes,flow_cms] if write_salt: items.append(0.0) if write_temp: items.append(20.0) tim_fp.write(" ".join(["%g"%v for v in items])+"\n") six.print_("Done with POTWs")
import os from stompy.spatial import wkb2shp import stompy.model.delft.io as dio from shapely import geometry import glob ## shp_dest = 'gis/model-features.shp' names = [] geoms = [] for fn in glob.glob('*.pli'): feats = dio.read_pli(fn) for feat in feats: # generally just one per file names.append(feat[0]) geoms.append(geometry.LineString(feat[1])) wkb2shp.wkb2shp("gis/model-features.shp", geoms, fields=dict(names=names)) # AmericanRiver.pli # Barker_Pumping_Plant.pli # DXC.pli # FlowFMcrs.pli # Georgiana.pli # SacramentoRiver.pli # SRV.pli ##
def add_sfbay_freshwater(mdu, adjusted_pli_fn, freshwater_dir, grid, dredge_depth, all_flows_unit=False, time_offset=None): """ Add freshwater flows from sfbay_freshwater git submodule. run_base_dir: location of DFM input files run_start,run_stop: target period for run, as np.datetime64 ref_date: DFM reference date, as np.datetime64[D] adjusted_pli_fn: path to pli file to override source locations freshwater_dir: path to sfbay_freshwater git submodule grid: UnstructuredGrid instance to be modified at input locations old_bc_fn: path to old-style forcing input file time_offset: pull freshwater flows from this timedelta off from the specified. I.e. if your run is really 2016, but you want 2015 flows, specify np.timedelta64(-365,'D'). Slightly safer to use days than years here. """ run_base_dir = mdu.base_path ref_date, run_start, run_stop = mdu.time_range() old_bc_fn = mdu.filepath(["external forcing", "ExtForceFile"]) if time_offset is not None: run_start = run_start + time_offset run_stop = run_stop + time_offset ref_date = ref_date + time_offset def write_flow_data(stn_ds, src_name, flow_scale=1.0): df = stn_ds.to_dataframe().reset_index() df['elapsed_minutes'] = (df.time.values - ref_date) / np.timedelta64( 60, 's') df['salinity'] = 0 * df.flow_cms df['temperature'] = 20 + 0 * df.flow_cms if all_flows_unit: df['flow_cms'] = 1.0 + 0 * df.flow_cms else: df['flow_cms'] = flow_scale * df.flow_cms for quantity, suffix in [('dischargebnd', '_flow'), ('salinitybnd', '_salt'), ('temperaturebnd', '_temp')]: lines = [ 'QUANTITY=%s' % quantity, 'FILENAME=%s%s.pli' % (src_name, suffix), 'FILETYPE=9', 'METHOD=3', 'OPERAND=O', "" ] with open(old_bc_fn, 'at') as fp: fp.write("\n".join(lines)) # read the pli back to know how to name the per-node timeseries feats = dio.read_pli( os.path.join(run_base_dir, "%s%s.pli" % (src_name, suffix))) feat = feats[0] # just one polyline in the file if len(feat) == 3: node_names = feat[2] else: node_names = [""] * len(feat[1]) for node_idx, node_name in enumerate(node_names): # if no node names are known, create the default name of <feature name>_0001 if not node_name: node_name = "%s%s_%04d" % (src_name, suffix, 1 + node_idx) tim_fn = os.path.join(run_base_dir, node_name + ".tim") columns = ['elapsed_minutes'] if quantity == 'dischargebnd': columns.append('flow_cms') elif quantity == 'salinitybnd': columns.append('salinity') elif quantity == 'temperaturebnd': columns.append('temperature') df.to_csv(tim_fn, sep=' ', index=False, header=False, columns=columns) adjusted_features = dio.read_pli(adjusted_pli_fn) # Add the freshwater flows - could come from erddap, but use github submodule # for better control on version # create a pair of bc and pli files, each including all the sources. # exact placement will # be done by hand in the GUI full_flows_ds = xr.open_dataset( os.path.join(freshwater_dir, 'outputs', 'sfbay_freshwater.nc')) # period of the full dataset which will be include for this run sel = (full_flows_ds.time > run_start - 5 * DAY) & (full_flows_ds.time < run_stop + 5 * DAY) flows_ds = full_flows_ds.isel(time=sel) nudge_by_gage(flows_ds, '11169025', station='SCLARAVCc', decorr_days=20) nudge_by_gage(flows_ds, '11180700', station='UALAMEDA', decorr_days=20) if 1: # Special handling for Mowry Slough mowry_feat = None src_name = "MOWRY" for adj_feat in adjusted_features: if adj_feat[0] == src_name: mowry_feat = adj_feat # Write copies for flow, salinity and temperatures for suffix in ['_flow', '_salt', '_temp']: # function to add suffix feat_suffix = dio.add_suffix_to_feature(mowry_feat, suffix) pli_fn = os.path.join(run_base_dir, "%s%s.pli" % (src_name, suffix)) dio.write_pli(pli_fn, [feat_suffix]) dredge_grid.dredge_boundary(grid, mowry_feat[1], dredge_depth) for stni in range(len(flows_ds.station)): stn_ds = flows_ds.isel(station=stni) src_name = stn_ds.station.item( ) # kind of a pain to get scalar values back out... # At least through the GUI, pli files must have more than one node. # Don't get too big for our britches, just stick a second node 50m east # if the incoming data is a point, but check for manually set locations # in adjusted_features if 1: #-- Write a PLI file feat = (src_name, np.array([[stn_ds.utm_x, stn_ds.utm_y], [stn_ds.utm_x + 50.0, stn_ds.utm_y]])) # Scan adjusted features for a match to use instead for adj_feat in adjusted_features: if adj_feat[0] == src_name: feat = adj_feat break # Write copies for flow, salinity and temperatures for suffix in ['_flow', '_salt', '_temp']: # function to add suffix feat_suffix = dio.add_suffix_to_feature(feat, suffix) pli_fn = os.path.join(run_base_dir, "%s%s.pli" % (src_name, suffix)) dio.write_pli(pli_fn, [feat_suffix]) dredge_grid.dredge_boundary(grid, feat[1], dredge_depth) if 1: #-- Write the time series and stanza in FlowFM_bnd.ext if src_name == "EBAYS" and mowry_feat is not None: write_flow_data(stn_ds, src_name) # EBAYS watershed is something like 13000 acres. # don't worry about scaling back EBAYS, but add in some extra # here for MOWRY write_flow_data(stn_ds, "MOWRY", flow_scale=12.8 / 13000) else: write_flow_data(stn_ds, src_name) full_flows_ds.close()
def write_QST_data(mdu, stn_ds, src_name, time_offset=None): """ write flow, salinity and temperature time series from an xarray Dataset. mdu: MDUFile for the run. expects ref_date, ExtForceFile, base_path. src_name: sanitized name for filenames and ExtForceFile. time_offset: offset to apply to stn_ds.time. Note that the sign here is opposite of add_sfbay_freshwater, since it is being added to stn_ds, instead of start_date. """ old_bc_fn = mdu.filepath(('external forcing', 'ExtForceFile')) ref_date, run_start, run_stop = mdu.time_range() df = stn_ds.to_dataframe().reset_index() df['elapsed_minutes'] = (df.time.values - ref_date) / np.timedelta64( 60, 's') # default values: df['salinity'] = 0 * df.flow_cms df['temperature'] = 20 + 0 * df.flow_cms assert np.all(np.isfinite(df.flow_cms)) quant_suffix = [('dischargebnd', '_flow')] if mdu['physics', 'temperature']: quant_suffix.append(('temperaturebnd', '_temp')) if mdu['physics', 'salinity']: quant_suffix.append(('salinitybnd', '_salt')) for quantity, suffix in quant_suffix: lines = [ 'QUANTITY=%s' % quantity, 'FILENAME=%s%s.pli' % (src_name, suffix), 'FILETYPE=9', 'METHOD=3', 'OPERAND=O', "" ] with open(old_bc_fn, 'at') as fp: fp.write("\n".join(lines)) # read the pli back to know how to name the per-node timeseries feats = dio.read_pli( os.path.join(mdu.base_path, "%s%s.pli" % (src_name, suffix))) feat = feats[0] # just one polyline in the file if len(feat) == 3: node_names = feat[2] else: node_names = [""] * len(feat[1]) for node_idx, node_name in enumerate(node_names): # if no node names are known, create the default name of <feature name>_0001 if not node_name: node_name = "%s%s_%04d" % (src_name, suffix, 1 + node_idx) tim_fn = os.path.join(mdu.base_path, node_name + ".tim") columns = ['elapsed_minutes'] if quantity == 'dischargebnd': columns.append('flow_cms') elif quantity == 'salinitybnd': columns.append('salinity') elif quantity == 'temperaturebnd': columns.append('temperature') df.to_csv(tim_fn, sep=' ', index=False, header=False, columns=columns) # To avoid hitting the limit of open files, only write the first # node. It's not actually necessary here to write more than one. break
## de = np.zeros(g.Nedges(), np.float64) c1 = e2c[:, 0].copy() c2 = e2c[:, 1].copy() c1[c1 < 0] = c2[c1 < 0] c2[c2 < 0] = c1[c2 < 0] de = np.maximum(g.cells['depth'][c1], g.cells['depth'][c2]) g.add_edge_field('edge_depth', de, on_exists='overwrite') ## # load levee data: levee_fn = 'grid-sfbay/SBlevees_tdk.pli' levees = dio.read_pli(levee_fn) ## from shapely import geometry def pli_to_grid_edges(g, levees): """ g: UnstructuredGrid levees: polylines in the format returned by stompy.model.delft.io.read_pli, i.e. a list of features [ [ 'name', [ [x,y,z,...],...], ['node0',...]
def plot_MDU(mdu_filename, gridpath): #------------- script now takes over ------------------------------------------- mdu_filename = Path(mdu_filename) base_dir = mdu_filename.parent # The assumption is that we'll find all our bc's in the same folder as the mdu. folder_dir = base_dir / 'bc_figures' folder_dir.exists() or folder_dir.mkdir() # Load in the grid (assumption that it is the same grid.) from stompy.grid import unstructured_grid grid = str(gridpath) # Load in shapefile of SFB grid = unstructured_grid.UnstructuredGrid.read_dfm(grid, cleanup=True) # Open MDU, strip time information using stompy functionality MDU = dio.MDUFile(filename=str(mdu_filename)) t_ref, t_start, t_stop = MDU.time_range() # define shared plotting functions def format_xaxis (axis): months = mdates.MonthLocator(interval = 2) # every other month fmt = mdates.DateFormatter('%b/%Y') axis.xaxis.set_major_locator(months) axis.xaxis.set_major_formatter(fmt) axis.set_xlim(t_ref, t_stop) def save_image(fig, name): fullname = folder_dir / (name + '.png') fig.savefig(str(fullname), dpi = 300, bbox_inches='tight') print('Saved %s' % fullname) plt.close() # Section one # Let's first read through the source_files (which seem to be the POTWs) sourcefolder = base_dir / 'source_files' PLIs = list(sourcefolder.glob('*.pli')) # get a list of all the pli files in the directory # Iterate through each one. Note each pli file has a corresponding timeseries of data (*.tim) for bc in PLIs: print('Reading %s' % bc.stem) pli = dio.read_pli(str(bc)) # read in the *.pli file tim_filename = sourcefolder / (bc.stem + '.tim') # filename of corresponding timeseries tim = dio.read_dfm_tim(str(tim_filename), t_ref, time_unit='M', columns = ['flow','sal','temp']) # Plot the data fig = plt.figure(figsize=(11, 3)) ax1 = fig.add_axes([0.05, 0.05, 0.68, 0.8]) map_axis = fig.add_axes([0.55, 0.18, 0.6, 0.6]) name = pli[0][0] ax1.set_title( name.capitalize() + ' (POTW Source)') ax1.plot(tim.time, tim.flow,'-', linewidth = 5, alpha = 0.5, color = 'skyblue') ax1.grid(b = True, alpha = 0.25) ax1.set_ylabel("Flow (m$^3$/s)") format_xaxis(ax1) # Plot SFB map + location grid.plot_edges(ax = map_axis, alpha = 0.8) map_axis.axis('off') coords = pli[0][1] for coord in coords: x, y = coord[0], coord[1] # There is a z coordinate we are ignoring here map_axis.plot(x , y,'o', markersize= 11, color= 'orangered') # Quick check that temp/salinity are fixed: temp = set(tim.temp.values) sal = set(tim.sal.values) if len(temp)>1 or len(sal)>1: print('sal or temp is NOT FIXED at %s' % bc.stem) else: label = 'Temperature is fixed at %d C\n Salinity is fixed at %d ppt' % (temp.pop(), sal.pop()) ax1.text(1.08, .05, label, horizontalalignment='left', verticalalignment='center', transform=ax1.transAxes, fontsize = 12) save_image(fig, name) ''' NEXT : ONTO THE BOUNDARY CONDITIONS FOR THE INFLOWS / STREAMS CREEKS ETC The only tricky difference here is that these bc's are sometimes divided across multiple cells (aka, a big river might be split across 2 cell segments.... in this case, we look at the pli file (geometry) to see how many cells the BC is split across and then multiple discharge by the #/cells. We don't need to touch temperature (scalar) or salinity (concentration). DFM should always divide evenly across cells (1/3 for 3 cells, 1/2 for 2 cells, so unless someone's really decided to get creative with custom settings this assumption should hold) ''' bcfolder = base_dir / 'bc_files' PLIs = list(bcfolder.glob('*.pli')) for bc in PLIs: print('Reading %s' % bc.stem) # the way this works is that the bc is divided between multiple cells evenly. so we just take one and multiply by the number of poitns. pli = dio.read_pli(str(bc)) filenames = pli[0][2] ncells = len(filenames) tim_filename = bcfolder / (filenames[0] + '.tim') # filename of corresponding timeseries tim = dio.read_dfm_tim(str(tim_filename), t_ref, time_unit='M', columns = ['data']) #, columns = ['flow','sal','temp']) # Plot the data fig = plt.figure(figsize=(11, 3)) ax1 = fig.add_axes([0.05, 0.05, 0.68, 0.8]) map_axis = fig.add_axes([0.55, 0.18, 0.6, 0.6]) name = pli[0][0] # Name of the boundary condition ax1.set_title( name.capitalize() + ' (non-POTW source)') if 'flow' in name: ax1.set_ylabel("Flow (m$^3$/s)") tim.data.values = tim.data.values * ncells # multiply by # of segements inflow is divided across elif 'salt' in name: ax1.set_ylabel("Salinity (PPT)") elif 'temp' in name: ax1.set_ylabel("Temperature (deg C)") elif 'ssh' in name: ax1.set_ylabel('Sea Surface Height Forcing (m)') format_xaxis(ax1) ax1.plot(tim.time, tim.data,'-', linewidth = 5, alpha = 0.5, color = 'skyblue') ax1.grid(b = True, alpha = 0.25) # Plot SFB map + location grid.plot_edges(ax = map_axis, alpha = 0.8) map_axis.axis('off') coords = pli[0][1] for coord in coords: x, y = coord[0], coord[1] # There is a z coordinate we are ignoring here map_axis.plot(x, y, 'o', markersize= 11, color= 'orangered') save_image(fig, name) print('Done plotting boundary conditions.')
import stompy.model.delft.io as dio from stompy.spatial import wkb2shp from shapely import geometry ## weirs = dio.read_pli('fixed_weirs-v02.pli') geoms = [geometry.LineString(w[1][:, :2]) for w in weirs] ## wkb2shp.wkb2shp('fixed_weirs-v02.shp', geoms)