Ejemplo n.º 1
0
    def write(self, mdu, feature, grid):
        print("Writing feature: %s" % (feature['name']))

        name = feature['name']
        old_bc_fn = mdu.filepath(['external forcing', 'ExtForceFile'])

        for var_name in self.var_names:
            if feature['geom'].type == 'LineString':
                pli_data = [(name, np.array(feature['geom'].coords))]
                base_fn = os.path.join(mdu.base_path,
                                       "%s_%s" % (name, var_name))
                pli_fn = base_fn + '.pli'
                dio.write_pli(pli_fn, pli_data)

                if var_name == 'ssh':
                    quant = 'waterlevelbnd'
                else:
                    assert False

                with open(old_bc_fn, 'at') as fp:
                    lines = [
                        "QUANTITY=%s" % quant,
                        "FILENAME=%s_%s.pli" % (name, var_name), "FILETYPE=9",
                        "METHOD=3", "OPERAND=O", ""
                    ]
                    fp.write("\n".join(lines))

                self.write_data(mdu, feature, var_name, base_fn)
            else:
                assert False
Ejemplo n.º 2
0
    def write(self, mdu, feature, grid):
        # obvious copy and paste from above.
        # not quite ready to abstract, though
        print("Writing feature: %s" % (feature['name']))

        name = feature['name']
        old_bc_fn = mdu.filepath(['external forcing', 'ExtForceFile'])

        for var_name in self.var_names:
            if feature['geom'].type == 'LineString':
                pli_data = [(name, np.array(feature['geom'].coords))]
                base_fn = os.path.join(mdu.base_path,
                                       "%s_%s" % (name, var_name))
                pli_fn = base_fn + '.pli'
                dio.write_pli(pli_fn, pli_data)

                if var_name == 'q':
                    quant = 'dischargebnd'
                else:
                    assert False

                with open(old_bc_fn, 'at') as fp:
                    lines = [
                        "QUANTITY=%s" % quant,
                        "FILENAME=%s_%s.pli" % (name, var_name), "FILETYPE=9",
                        "METHOD=3", "OPERAND=O", ""
                    ]
                    fp.write("\n".join(lines))

                self.write_data(mdu, feature, var_name, base_fn)

                dfm_grid.dredge_boundary(grid, pli_data[0][1],
                                         self.dredge_depth)
            else:
                assert False
Ejemplo n.º 3
0
    def write(self, mdu, feature, grid):
        # obvious copy and paste from above.
        # not quite ready to abstract, though
        print("Writing feature: %s" % (feature['name']))

        name = feature['name']
        old_bc_fn = mdu.filepath(['external forcing', 'ExtForceFile'])

        assert feature['geom'].type == 'LineString'

        pli_data = [(name, np.array(feature['geom'].coords))]
        base_fn = os.path.join(mdu.base_path, "%s" % (name))
        pli_fn = base_fn + '.pli'
        dio.write_pli(pli_fn, pli_data)

        with open(old_bc_fn, 'at') as fp:
            lines = [
                "QUANTITY=discharge_salinity_temperature_sorsin",
                "FILENAME=%s" % os.path.basename(pli_fn), "FILETYPE=9",
                "METHOD=1", "OPERAND=O", "AREA=0 # no momentum", ""
            ]
            fp.write("\n".join(lines))

        self.write_data(mdu, feature, base_fn)

        # Really just need to dredge the first and last nodes
        dfm_grid.dredge_discharge(grid, pli_data[0][1], self.dredge_depth)
Ejemplo n.º 4
0
def write_pli(g, run_base_dir, src_name, j, suffix):
    seg = g.nodes['x'][g.edges['nodes'][j]]
    src_feat = (src_name, seg, [src_name + "_0001", src_name + "_0002"])
    feat_suffix = dio.add_suffix_to_feature(src_feat, suffix)
    dio.write_pli(os.path.join(run_base_dir, '%s%s.pli' % (src_name, suffix)),
                  [feat_suffix])
    return feat_suffix
Ejemplo n.º 5
0
    def write(self):
        super().write()
        # My understanding is that the first 'z' coordinate is actually
        # the number of layers,
        # and the second 'z' coordinate is the layertype, and all z
        # values beyond  that are ignored.
        if 1:
            # When extended more than dx/2 beyond the grid,
            # this will run.
            pli_data = [('domain',
                         np.array([[-15, -15, 10], [615, -15, 2],
                                   [615, 115, 2], [-15, 115, 10]]))]
        if 1:
            # What about two polygons that together encompass the grid?
            # fails with nan somewhere
            # what is the number of layers matches? nope.
            # What if they are sigma? matching count and both sigma will run.
            #  non-matching, both sigma does not run.
            pli_data = [('left',
                         np.array([[-15, -15, 10], [300, -15, 1],
                                   [300, 115, 1], [-15, 115, 10]])),
                        ('right',
                         np.array([[290, -15, 1], [615, -15, 1], [615, 115, 1],
                                   [290, 115, 1]]))]

        fn = self.mdu['geometry', 'VertplizFile']
        dio.write_pli(os.path.join(self.run_dir, fn), pli_data)
Ejemplo n.º 6
0
        print("Generating fixed weirs")
        fixed_weir_fn = "fixed_weirs-%s.pliz" % name
        fixed_weirs = []  # suitable for write_pli
        dx = 5.0  # m. discretize lines at this resolution
        for i in range(len(lines)):
            feat = lines[i]
            if feat['type'] != 'fixed_weir': continue

            print(f"Processing levee feature {feat['name']}")
            xy = np.array(feat['geom'])
            xy = linestring_utils.upsample_linearring(xy,
                                                      dx,
                                                      closed_ring=False)
            z = dem(xy)
            fixed_weirs.append((feat['name'], np.c_[xy, z, 0 * z, 0 * z]))
        dio.write_pli(fixed_weir_fn, fixed_weirs)

    if gen_grids:
        print("Setting bathymetry for %s" % name)

        if 0:  # Simplest option:
            #   Put bathy on nodes, just direct sampling.
            z_node = dem(g.nodes['x'])
        if 1:  # Bias deep
            name += "_deep"
            # Maybe a good match with bedlevtype=5.
            # BLT=5: edges get shallower node, cells get deepest edge.
            # So extract edge depths (min,max,mean), and nodes get deepest
            # edge.

            alpha = np.linspace(0, 1, 5)
Ejemplo n.º 7
0
def add_ocean(run_base_dir,
              rel_bc_dir,
              run_start,
              run_stop,
              ref_date,
              static_dir,
              grid,
              old_bc_fn,
              all_flows_unit=False,
              lag_seconds=0.0,
              factor=1.0):
    """
    Ocean:
    Silvia used:
        Water level data from station 46214 (apparently from Yi Chao's ROMS?)
          no spatial variation
        Maybe salinity from Yi Chao ROMS?  That's what the thesis says, but the
        actual inputs look like constant 33
    Here I'm using data from NOAA Point Reyes.
        waterlevel, water temperature from Point Reyes.
    When temperature is not available, use constant 15 degrees

    factor: a scaling factor applied to tide data to adjust amplitude around MSL.
    lag_seconds: to shift ocean boundary condition in time, a positive value 
    applying it later in time.
    """
    # get a few extra days of data to allow for transients in the low pass filter.
    pad_time = np.timedelta64(5, 'D')

    if 1:
        if 0:  # This was temporary, while NOAA had an issue with their website.
            log.warning("TEMPORARILY USING FORT POINT TIDES")
            tide_gage = "9414290"  # Fort Point
        else:
            tide_gage = "9415020"  # Pt Reyes

        if common.cache_dir is None:
            tides_raw_fn = os.path.join(run_base_dir, rel_bc_dir,
                                        'tides-%s-raw.nc' % tide_gage)
            if not os.path.exists(tides_raw_fn):
                tides = noaa_coops.coops_dataset(
                    tide_gage,
                    run_start - pad_time,
                    run_stop + pad_time, ["water_level", "water_temperature"],
                    days_per_request=30)

                tides.to_netcdf(tides_raw_fn, engine='scipy')
            else:
                tides = xr.open_dataset(tides_raw_fn)
        else:
            # rely on caching within noaa_coops
            tides = noaa_coops.coops_dataset(
                tide_gage,
                run_start - pad_time,
                run_stop + pad_time, ["water_level", "water_temperature"],
                days_per_request='M',
                cache_dir=common.cache_dir)
    # Those retain station as a dimension of length 1 - drop that dimension
    # here:
    tides = tides.isel(station=0)

    # Fort Point mean tide range is 1.248m, vs. 1.193 at Point Reyes.
    # apply rough correction to amplitude.
    # S2 phase 316.2 at Pt Reyes, 336.2 for Ft. Point.
    # 20 deg difference for a 12h tide, or 30 deg/hr, so
    # that's a lag of 40 minutes.
    # First go I got this backwards, and wound up with lags
    # at Presidio and Alameda of 4600 and 4400s.  That was
    # with lag_seconds -= 40*60.
    # Also got amplitudes 13% high at Presidio, so further correction...
    if tide_gage == "9414290":
        #
        factor *= 1.193 / 1.248 * 1.0 / 1.13
        lag_seconds += 35 * 60.

    if 1:
        # Clean that up, fabricate salinity
        water_level = utils.fill_tidal_data(tides.water_level)

        # IIR butterworth.  Nicer than FIR, with minor artifacts at ends
        # 3 hours, defaults to 4th order.
        water_level[:] = filters.lowpass(water_level[:].values,
                                         utils.to_dnum(water_level.time),
                                         cutoff=3. / 24)

        if 1:  # apply factor:
            msl = 2.152 - 1.214  # MSL(m) - NAVD88(m) for Point Reyes
            if factor != 1.0:
                log.info("Scaling tidal forcing amplitude by %.3f" % factor)
            water_level[:] = msl + factor * (water_level[:].values - msl)

        if 1:  # apply lag
            if lag_seconds != 0.0:
                # sign:  if lag_seconds is positive, then I want the result
                # for time.values[0] to come from original data at time.valules[0]-lag_seconds
                if 0:  # Why interpolate here? Just alter the timebase.
                    water_level[:] = np.interp(
                        utils.to_dnum(tides.time.values),
                        utils.to_dnum(tides.time.values) -
                        lag_seconds / 86400., tides.water_level.values)
                else:
                    # Adjust time base directly.
                    water_level.time.values[:] = water_level.time.values + np.timedelta64(
                        lag_seconds, 's')

        if 'water_temperature' not in tides:
            log.warning(
                "Water temperature was not found in NOAA data.  Will use constant 15"
            )
            water_temp = 15 + 0 * tides.water_level
            water_temp.name = 'water_temperature'
        else:
            fill_data(tides.water_temperature)
            water_temp = tides.water_temperature

        if all_flows_unit:
            print("-=-=-=- USING 35 PPT WHILE TESTING! -=-=-=-")
            salinity = 35 + 0 * water_level
        else:
            salinity = 33 + 0 * water_level
        salinity.name = 'salinity'

    if 1:  # Write it all out
        # Add a stanza to FlowFMold_bnd.ext:
        src_name = 'Sea'

        src_feat = dio.read_pli(os.path.join(static_dir,
                                             '%s.pli' % src_name))[0]

        forcing_data = [('waterlevelbnd', water_level, '_ssh'),
                        ('salinitybnd', salinity, '_salt'),
                        ('temperaturebnd', water_temp, '_temp')]

        for quant, da, suffix in forcing_data:
            with open(old_bc_fn, 'at') as fp:
                lines = [
                    "QUANTITY=%s" % quant,
                    "FILENAME=%s/%s%s.pli" % (rel_bc_dir, src_name, suffix),
                    "FILETYPE=9", "METHOD=3", "OPERAND=O", ""
                ]
                fp.write("\n".join(lines))

            feat_suffix = dio.add_suffix_to_feature(src_feat, suffix)
            dio.write_pli(
                os.path.join(run_base_dir, rel_bc_dir,
                             '%s%s.pli' % (src_name, suffix)), [feat_suffix])

            # Write the data:
            columns = ['elapsed_minutes', da.name]

            df = da.to_dataframe().reset_index()
            df['elapsed_minutes'] = (df.time.values -
                                     ref_date) / np.timedelta64(60, 's')

            if len(feat_suffix) == 3:
                node_names = feat_suffix[2]
            else:
                node_names = [""] * len(feat_suffix[1])

            for node_idx, node_name in enumerate(node_names):
                # if no node names are known, create the default name of <feature name>_0001
                if not node_name:
                    node_name = "%s%s_%04d" % (src_name, suffix, 1 + node_idx)

                tim_fn = os.path.join(run_base_dir, rel_bc_dir,
                                      node_name + ".tim")
                df.to_csv(tim_fn,
                          sep=' ',
                          index=False,
                          header=False,
                          columns=columns)
Ejemplo n.º 8
0
def add_delta_inflow(mdu,
                     rel_bc_dir,
                     static_dir,
                     grid,
                     dredge_depth,
                     all_flows_unit=False,
                     temp_jersey=True,
                     temp_rio=True):
    """
    Fetch river USGS river flows, add to FlowFM_bnd.ext:
    Per Silvia's Thesis:
    Jersey: Discharge boundary affected by tides, discharge and temperature taken
    from USGS 11337190 SAN JOAQUIN R A JERSEY POINT, 0 salinity
    (Note that Dutch Slough should probably be added in here)
    Rio Vista: 11455420 SACRAMENTO A RIO VISTA, temperature from DWR station RIV.
    0 salinity.

    run_base_dir: location of the DFM inputs
    run_start,run_stop: target period for therun
    statiC_dir: path to static assets, specifically Jersey.pli and RioVista.pli
    grid: UnstructuredGrid instance, to be modified at inflow locations
    old_bc_fn: path to old-style boundary forcing file
    all_flows_unit: if True, override all flows to be 1 m3 s-1 for model diagnostics
    """

    # get run directory and time and forcing file info
    run_base_dir = mdu.base_path
    ref_date, run_start, run_stop = mdu.time_range()
    old_bc_fn = mdu.filepath(["external forcing", "ExtForceFile"])

    pad = np.timedelta64(3, 'D')

    if 1:
        # Cache the original data from USGS, then clean it and write to DFM format
        jersey_raw_fn = os.path.join(run_base_dir, rel_bc_dir, 'jersey-raw.nc')
        if not os.path.exists(jersey_raw_fn):
            if temp_jersey == True:
                jersey_raw = usgs_nwis.nwis_dataset(
                    station="11337190",
                    start_date=run_start - pad,
                    end_date=run_stop + pad,
                    products=[
                        60,  # "Discharge, cubic feet per second"
                        10
                    ],  # "Temperature, water, degrees Celsius"
                    days_per_request=30)
                jersey_raw.to_netcdf(jersey_raw_fn, engine='scipy')
            if temp_jersey == False:
                jersey_raw = usgs_nwis.nwis_dataset(
                    station="11337190",
                    start_date=run_start - pad,
                    end_date=run_stop + pad,
                    products=[60],  # "Discharge, cubic feet per second" 
                    days_per_request=30)
                jersey_raw.to_netcdf(jersey_raw_fn, engine='scipy')

        rio_vista_raw_fn = os.path.join(run_base_dir, rel_bc_dir,
                                        'rio_vista-raw.nc')
        if not os.path.exists(rio_vista_raw_fn):
            if temp_rio == True:
                rio_vista_raw = usgs_nwis.nwis_dataset(
                    station="11455420",
                    start_date=run_start - pad,
                    end_date=run_stop + pad,
                    products=[
                        60,  # "Discharge, cubic feet per second"
                        10
                    ],  # "Temperature, water, degrees Celsius"
                    days_per_request=30)
                rio_vista_raw.to_netcdf(rio_vista_raw_fn, engine='scipy')
            if temp_rio == False:
                rio_vista_raw = usgs_nwis.nwis_dataset(
                    station="11455420",
                    start_date=run_start - pad,
                    end_date=run_stop + pad,
                    products=[60],  # "Discharge, cubic feet per second"
                    days_per_request=30)
                rio_vista_raw.to_netcdf(rio_vista_raw_fn, engine='scipy')

    if 1:  # Clean and write it all out
        jersey_raw = xr.open_dataset(jersey_raw_fn)
        rio_vista_raw = xr.open_dataset(rio_vista_raw_fn)
        temp_logical = [temp_jersey, temp_rio]
        i = 0
        for src_name, source in [('Jersey', jersey_raw),
                                 ('RioVista', rio_vista_raw)]:
            src_feat = dio.read_pli(
                os.path.join(static_dir, '%s.pli' % src_name))[0]
            dredge_grid.dredge_boundary(grid, src_feat[1], dredge_depth)

            if temp_logical[i] == True:
                # Add stanzas to FlowFMold_bnd.ext:
                for quant, suffix in [('dischargebnd', '_flow'),
                                      ('salinitybnd', '_salt'),
                                      ('temperaturebnd', '_temp')]:
                    with open(old_bc_fn, 'at') as fp:
                        lines = [
                            "QUANTITY=%s" % quant,
                            "FILENAME=%s/%s%s.pli" %
                            (rel_bc_dir, src_name, suffix), "FILETYPE=9",
                            "METHOD=3", "OPERAND=O", ""
                        ]
                        fp.write("\n".join(lines))

                    feat_suffix = dio.add_suffix_to_feature(src_feat, suffix)
                    dio.write_pli(
                        os.path.join(run_base_dir, rel_bc_dir,
                                     '%s%s.pli' % (src_name, suffix)),
                        [feat_suffix])

                    # Write the data:
                    if quant == 'dischargebnd':
                        da = source.stream_flow_mean_daily
                        da2 = utils.fill_tidal_data(da)
                        if all_flows_unit:
                            da2.values[:] = 1.0
                        else:
                            # convert ft3/s to m3/s
                            da2.values[:] *= 0.028316847
                    elif quant == 'salinitybnd':
                        da2 = source.stream_flow_mean_daily.copy(deep=True)
                        da2.values[:] = 0.0
                    elif quant == 'temperaturebnd':
                        da = source.temperature_water
                        da2 = utils.fill_tidal_data(
                            da)  # maybe safer to just interpolate?
                        if all_flows_unit:
                            da2.values[:] = 20.0

                    df = da2.to_dataframe().reset_index()
                    df['elapsed_minutes'] = (
                        df.time.values - ref_date) / np.timedelta64(60, 's')
                    columns = ['elapsed_minutes', da2.name]

                    if len(feat_suffix) == 3:
                        node_names = feat_suffix[2]
                    else:
                        node_names = [""] * len(feat_suffix[1])

                    for node_idx, node_name in enumerate(node_names):
                        # if no node names are known, create the default name of <feature name>_0001
                        if not node_name:
                            node_name = "%s%s_%04d" % (src_name, suffix,
                                                       1 + node_idx)

                        tim_fn = os.path.join(run_base_dir, rel_bc_dir,
                                              node_name + ".tim")
                        df.to_csv(tim_fn,
                                  sep=' ',
                                  index=False,
                                  header=False,
                                  columns=columns)

            if temp_logical[i] == False:
                # Add stanzas to FlowFMold_bnd.ext:
                for quant, suffix in [('dischargebnd', '_flow'),
                                      ('salinitybnd', '_salt')]:
                    with open(old_bc_fn, 'at') as fp:
                        lines = [
                            "QUANTITY=%s" % quant,
                            "FILENAME=%s/%s%s.pli" %
                            (rel_bc_dir, src_name, suffix), "FILETYPE=9",
                            "METHOD=3", "OPERAND=O", ""
                        ]
                        fp.write("\n".join(lines))

                    feat_suffix = dio.add_suffix_to_feature(src_feat, suffix)
                    dio.write_pli(
                        os.path.join(run_base_dir, rel_bc_dir,
                                     '%s%s.pli' % (src_name, suffix)),
                        [feat_suffix])

                    # Write the data:
                    if quant == 'dischargebnd':
                        da = source.stream_flow_mean_daily
                        da2 = utils.fill_tidal_data(da)
                        if all_flows_unit:
                            da2.values[:] = 1.0
                        else:
                            # convert ft3/s to m3/s
                            da2.values[:] *= 0.028316847
                    elif quant == 'salinitybnd':
                        da2 = source.stream_flow_mean_daily.copy(deep=True)
                        da2.values[:] = 0.0

                    df = da2.to_dataframe().reset_index()
                    df['elapsed_minutes'] = (
                        df.time.values - ref_date) / np.timedelta64(60, 's')
                    columns = ['elapsed_minutes', da2.name]

                    if len(feat_suffix) == 3:
                        node_names = feat_suffix[2]
                    else:
                        node_names = [""] * len(feat_suffix[1])

                    for node_idx, node_name in enumerate(node_names):
                        # if no node names are known, create the default name of <feature name>_0001
                        if not node_name:
                            node_name = "%s%s_%04d" % (src_name, suffix,
                                                       1 + node_idx)

                        tim_fn = os.path.join(run_base_dir, rel_bc_dir,
                                              node_name + ".tim")
                        df.to_csv(tim_fn,
                                  sep=' ',
                                  index=False,
                                  header=False,
                                  columns=columns)
            i += 1
Ejemplo n.º 9
0
for stni in range(len(ds.station)):
    stn_ds=ds.isel(station=stni)

    # kind of a pain to get scalar values back out...
    src_name=stn_ds.station.item()

    # At least through the GUI, pli files must have more than one node.
    # Don't get too big for our britches, just stick a second node 50m east
    # if the incoming data is a point
    if 1: #-- Write a PLI file
        pnts=np.array( [[stn_ds.utm_x,stn_ds.utm_y],
                        [stn_ds.utm_x + 50.0,stn_ds.utm_y]] )
        pli_data=[ (src_name,pnts) ]
        pli_fn=opj(out_dir,"%s.pli"%src_name)

        dio.write_pli(pli_fn,pli_data)
        pli_files.append(pli_fn)

    if 1: #-- Write a BC file

        df=stn_ds.to_dataframe().reset_index()

        df['unix_time']=utils.to_unix(df.time.values)
        
        bc_fn=opj(out_dir,"%s.bc"%src_name)
        with open(bc_fn,'wt') as fp:
            # I think the 0001 has to be there, as it is used to
            # specify different values at different nodes of the pli
            # seems better to assume that incoming data is a daily average,
            # and keep it constant across the day
            # block-from means that the timestamp of a datum marks the beginning
Ejemplo n.º 10
0
def add_sfbay_potw(mdu,
                   rel_src_dir, # added rel_src_dir alliek dec 2020
                   potw_dir,
                   adjusted_pli_fn,
                   grid,dredge_depth,
                   all_flows_unit=False,
                   time_offset=None,
                   write_salt=True,write_temp=True):
    """
    time_offset: shift all dates by the given offset.  To run 2016 
    with data from 2015, specify np.timedelta64(-365,'D')

    write_salt: leave as True for older DFM, and newer DFM only set to
    true when the simulation includes salinity.

    write_temp: same, but for temperature
    """
    run_base_dir=mdu.base_path
    ref_date,run_start,run_stop = mdu.time_range()
    old_bc_fn=mdu.filepath(["external forcing","ExtForceFile"])

    if time_offset is not None:
        run_start = run_start + time_offset
        run_stop = run_stop + time_offset
        ref_date = ref_date + time_offset
        
    potws=xr.open_dataset(os.path.join(potw_dir,'outputs','sfbay_delta_potw.nc'))
    adjusted_features=dio.read_pli(adjusted_pli_fn)

    # select a time subset of the flow data, starting just before the
    # simulation period, and running beyond the end:
    time_pnts = np.searchsorted(potws.time, [run_start-DAY,run_stop+DAY])
    time_pnts = time_pnts.clip(0,len(potws.time)-1)
    time_idxs=range(time_pnts[0],time_pnts[1]) # enumerate them for loops below

    with open(old_bc_fn,'at') as fp:
        for site in potws.site.values:
            # NB: site is bytes at this point
            potw=potws.sel(site=site)
            try:
                site_s=site.decode()
            except AttributeError:
                site_s=site # py2

            if site_s in ['false_sac','false_sj']:
                six.print_("(skip %s) "%site_s,end="")
                continue

            if potw.utm_x.values.mean() > 610200:
                # Delta POTWs are in this file, too, but not in this
                # grid.  Luckily they are easy to identify based on
                # x coordinate.
                six.print_("(skip %s -- too far east) "%site_s,end="")
                continue
            
            six.print_("%s "%site_s,end="")

            fp.write( ("QUANTITY=discharge_salinity_temperature_sorsin\n"
                       "FILENAME=%s/%s.pli\n"
                       "FILETYPE=9\n"
                       "METHOD=1\n"
                       "OPERAND=O\n"
                       "AREA=0 # no momentum\n"
                       "\n")%(rel_src_dir,site_s) ) # added rel_src_dir alliek dec 2020

            # Write the location - writing a single point appears to work,
            # based on how it shows up in the GUI.  Otherwise we'd have to
            # manufacture a point outside the domain.
            with open(os.path.join(run_base_dir,rel_src_dir,'%s.pli'%site_s),'wt') as pli_fp: # added rel_src_dir alliek dec 2020
                # Scan adjusted features for a match to use instead
                # This is handled slightly differently with POTWs - use the

                # put the depth at -50, should be at the bed
                feat=[site_s,
                      np.array([[potw.utm_x.values,potw.utm_y.values,-50.0]]),
                      ['']]

                for adj_feat in adjusted_features:
                    if adj_feat[0] == site_s:
                        # Merge them if the adjusted feature is more than 10 m away
                        # (to allow for some rounding in the ascii round-trip.)
                        offset=utils.dist( adj_feat[1][-1][:2] - feat[1][-1][:2] )
                        if offset > 10.0:
                            # Just add on the extra point - but may have to promote one 
                            # or the other to 3D.
                            old_geo=feat[1]
                            new_geo=adj_feat[1][-1:]
                            if old_geo.shape[1] != new_geo.shape[1]:
                                if old_geo.shape[1]<3:
                                    old_geo=np.concatenate( (old_geo,0*old_geo[:,:1]), axis=1)
                                else:
                                    # copy depth from old_geo
                                    new_geo=np.concatenate( (new_geo,
                                                             old_geo[-1,-1]+0*new_geo[:,:1]),
                                                            axis=1)

                            # if the original feature was outside the grid, then all is well,
                            # and it will show up in the GUI as a line from the original location
                            # outside the domain to the new location in the domain.
                            if grid.select_cells_nearest(old_geo[-1,:2],inside=True) is None:
                                feat[1]=np.concatenate( (old_geo,new_geo),axis=0 )
                                if len(feat)==3: # includes labels, but they don't matter here, right?
                                    feat[2].append('')
                            else:
                                # but if the original location is inside the grid, this will be interpreted
                                # as a sink-source pair, so we instead just put the single, adjusted
                                # location in.  This is done after potentially copying z-coordinate
                                # data from the original location.
                                feat[1]=new_geo
                        break

                dio.write_pli(pli_fp,[feat])

                dredge_grid.dredge_discharge(grid,feat[1],dredge_depth)

            with open(os.path.join(run_base_dir,rel_src_dir,'%s.tim'%site_s),'wt') as tim_fp: # added rel_src_dir alliek dec 2020
                for tidx in time_idxs:
                    tstamp_minutes = (potw.time[tidx]-ref_date) / np.timedelta64(1,'m')

                    if all_flows_unit:
                        flow_cms=1.0
                    else:
                        flow_cms=potw.flow[tidx]

                    items=[tstamp_minutes,flow_cms]
                    if write_salt:
                        items.append(0.0)
                    if write_temp:
                        items.append(20.0)

                    tim_fp.write(" ".join(["%g"%v for v in items])+"\n")

    six.print_("Done with POTWs")
Ejemplo n.º 11
0
##

# to replicate the 5 fields, where last two are just 10.0
# not entirely sure what these /should/ be, but this is what
# I've seen in previous input files.
g.add_node_field('sill_left', 10 * np.ones_like(g.nodes['elev_m']))
g.add_node_field('sill_right', 10 * np.ones_like(g.nodes['elev_m']))

##

pli_data = io.grid_to_pli_data(
    g,
    node_fields=['elev_m', 'sill_left', 'sill_right'],
    labeler=lambda i: "L%04d" % (total_count + i))
io.write_pli('fixed_weirs-v02.pli', pli_data)

##


def write_node_shp(self, shpname, extra_fields=[]):
    """ Write a shapefile with each node.  Fields will attempt to mirror
    self.nodes.dtype

    extra_fields: goal is similar to write_cells_shp and write_edges_shp, 
    but not yet supported.
    """
    assert len(extra_fields) == 0  # not yet supported!

    # zero-based index of node (why does write_edge_shp create 1-based ids?)
    base_dtype = [('node_id', np.int32)]
Ejemplo n.º 12
0
from __future__ import print_function

import sys
import os
import numpy as np

import stompy.model.delft.io as dio
from stompy.spatial import wkb2shp

shp_fn, run_dir = sys.argv[1:]

##

forcing = wkb2shp.shp2geom(shp_fn)

print("shp_fn: %s" % shp_fn)
print("run_dir: %s" % run_dir)

for f in forcing:
    print(f['name'])
    pli_fn = os.path.join(run_dir, "%s.pli" % f['name'])

    with open(pli_fn, 'wt') as fp:
        dio.write_pli(fp, [[f['name'], np.array(f['geom'].coords)]])
Ejemplo n.º 13
0
##

# to replicate the 5 fields, where last two are just 10.0
# not entirely sure what these /should/ be, but this is what
# I've seen in previous input files.
g.add_node_field('sill_left', 10 * np.ones_like(g.nodes['elev_m']))
g.add_node_field('sill_right', 10 * np.ones_like(g.nodes['elev_m']))

##

pli_data = dio.grid_to_pli_data(
    g,
    node_fields=['elev_m', 'sill_left', 'sill_right'],
    labeler=lambda i: "L%04d" % (total_count + i))
dio.write_pli(os.path.join(out_dir, 'fixed_weirs-v00.pli'), pli_data)

##


def write_node_shp(self, shpname, extra_fields=[]):
    """ Write a shapefile with each node.  Fields will attempt to mirror
    self.nodes.dtype

    extra_fields: goal is similar to write_cells_shp and write_edges_shp, 
    but not yet supported.
    """
    assert len(extra_fields) == 0  # not yet supported!

    # zero-based index of node (why does write_edge_shp create 1-based ids?)
    base_dtype = [('node_id', np.int32)]
Ejemplo n.º 14
0
def add_sfbay_freshwater(mdu,
                         adjusted_pli_fn,
                         freshwater_dir,
                         grid,
                         dredge_depth,
                         all_flows_unit=False,
                         time_offset=None):
    """
    Add freshwater flows from sfbay_freshwater git submodule.
    run_base_dir: location of DFM input files
    run_start,run_stop: target period for run, as np.datetime64
    ref_date: DFM reference date, as np.datetime64[D]
    adjusted_pli_fn: path to pli file to override source locations
    freshwater_dir: path to sfbay_freshwater git submodule
    grid: UnstructuredGrid instance to be modified at input locations
    old_bc_fn: path to old-style forcing input file

    time_offset: pull freshwater flows from this timedelta off from the
    specified.  I.e. if your run is really 2016, but you want 2015 flows,
    specify np.timedelta64(-365,'D').
    Slightly safer to use days than years here.
    """
    run_base_dir = mdu.base_path
    ref_date, run_start, run_stop = mdu.time_range()
    old_bc_fn = mdu.filepath(["external forcing", "ExtForceFile"])

    if time_offset is not None:
        run_start = run_start + time_offset
        run_stop = run_stop + time_offset
        ref_date = ref_date + time_offset

    def write_flow_data(stn_ds, src_name, flow_scale=1.0):
        df = stn_ds.to_dataframe().reset_index()
        df['elapsed_minutes'] = (df.time.values - ref_date) / np.timedelta64(
            60, 's')
        df['salinity'] = 0 * df.flow_cms
        df['temperature'] = 20 + 0 * df.flow_cms

        if all_flows_unit:
            df['flow_cms'] = 1.0 + 0 * df.flow_cms
        else:
            df['flow_cms'] = flow_scale * df.flow_cms

        for quantity, suffix in [('dischargebnd', '_flow'),
                                 ('salinitybnd', '_salt'),
                                 ('temperaturebnd', '_temp')]:
            lines = [
                'QUANTITY=%s' % quantity,
                'FILENAME=%s%s.pli' % (src_name, suffix), 'FILETYPE=9',
                'METHOD=3', 'OPERAND=O', ""
            ]
            with open(old_bc_fn, 'at') as fp:
                fp.write("\n".join(lines))

            # read the pli back to know how to name the per-node timeseries
            feats = dio.read_pli(
                os.path.join(run_base_dir, "%s%s.pli" % (src_name, suffix)))
            feat = feats[0]  # just one polyline in the file

            if len(feat) == 3:
                node_names = feat[2]
            else:
                node_names = [""] * len(feat[1])

            for node_idx, node_name in enumerate(node_names):
                # if no node names are known, create the default name of <feature name>_0001
                if not node_name:
                    node_name = "%s%s_%04d" % (src_name, suffix, 1 + node_idx)

                tim_fn = os.path.join(run_base_dir, node_name + ".tim")

                columns = ['elapsed_minutes']
                if quantity == 'dischargebnd':
                    columns.append('flow_cms')
                elif quantity == 'salinitybnd':
                    columns.append('salinity')
                elif quantity == 'temperaturebnd':
                    columns.append('temperature')

                df.to_csv(tim_fn,
                          sep=' ',
                          index=False,
                          header=False,
                          columns=columns)

    adjusted_features = dio.read_pli(adjusted_pli_fn)
    # Add the freshwater flows - could come from erddap, but use github submodule
    # for better control on version

    # create a pair of bc and pli files, each including all the sources.
    # exact placement will
    # be done by hand in the GUI

    full_flows_ds = xr.open_dataset(
        os.path.join(freshwater_dir, 'outputs', 'sfbay_freshwater.nc'))
    # period of the full dataset which will be include for this run
    sel = (full_flows_ds.time > run_start - 5 * DAY) & (full_flows_ds.time <
                                                        run_stop + 5 * DAY)
    flows_ds = full_flows_ds.isel(time=sel)

    nudge_by_gage(flows_ds, '11169025', station='SCLARAVCc', decorr_days=20)
    nudge_by_gage(flows_ds, '11180700', station='UALAMEDA', decorr_days=20)

    if 1:  # Special handling for Mowry Slough
        mowry_feat = None
        src_name = "MOWRY"
        for adj_feat in adjusted_features:
            if adj_feat[0] == src_name:
                mowry_feat = adj_feat

                # Write copies for flow, salinity and temperatures
                for suffix in ['_flow', '_salt', '_temp']:
                    # function to add suffix
                    feat_suffix = dio.add_suffix_to_feature(mowry_feat, suffix)
                    pli_fn = os.path.join(run_base_dir,
                                          "%s%s.pli" % (src_name, suffix))
                    dio.write_pli(pli_fn, [feat_suffix])

                dredge_grid.dredge_boundary(grid, mowry_feat[1], dredge_depth)

    for stni in range(len(flows_ds.station)):
        stn_ds = flows_ds.isel(station=stni)

        src_name = stn_ds.station.item(
        )  # kind of a pain to get scalar values back out...

        # At least through the GUI, pli files must have more than one node.
        # Don't get too big for our britches, just stick a second node 50m east
        # if the incoming data is a point, but check for manually set locations
        # in adjusted_features
        if 1:  #-- Write a PLI file
            feat = (src_name,
                    np.array([[stn_ds.utm_x, stn_ds.utm_y],
                              [stn_ds.utm_x + 50.0, stn_ds.utm_y]]))
            # Scan adjusted features for a match to use instead
            for adj_feat in adjusted_features:
                if adj_feat[0] == src_name:
                    feat = adj_feat
                    break
            # Write copies for flow, salinity and temperatures
            for suffix in ['_flow', '_salt', '_temp']:
                # function to add suffix
                feat_suffix = dio.add_suffix_to_feature(feat, suffix)
                pli_fn = os.path.join(run_base_dir,
                                      "%s%s.pli" % (src_name, suffix))
                dio.write_pli(pli_fn, [feat_suffix])

            dredge_grid.dredge_boundary(grid, feat[1], dredge_depth)

        if 1:  #-- Write the time series and stanza in FlowFM_bnd.ext
            if src_name == "EBAYS" and mowry_feat is not None:
                write_flow_data(stn_ds, src_name)
                # EBAYS watershed is something like 13000 acres.
                # don't worry about scaling back EBAYS, but add in some extra
                # here for MOWRY
                write_flow_data(stn_ds, "MOWRY", flow_scale=12.8 / 13000)
            else:
                write_flow_data(stn_ds, src_name)

    full_flows_ds.close()
def add_sfbay_freshwater(mdu,
                         flow_locations_shp,
                         grid,
                         dredge_depth,
                         time_offset=None):
    """
    Add freshwater flows from a combination of gaged and ungaged 
    watersheds, with simple scaling between them.
    This is the approach that was used for SUNTANS runs, was replaced
    by BAHM for sfbay_dfm_v2, but is useful for periods outside 
    existing BAHM runs.

    flow_locations_shp: A shapefile with linestring giving each input
    location, 
    fields: 
      gages: A '|' separate listed of USGS gage numbers from which flow data
        will be pulled.
      area_sq_mi: watershed area for this flow.  This area is compared to the
        area in USGS inventory, in order to establish a scaling factor.
      amplify: an additional adjustment to scaling factor.
    
    grid: UnstructuredGrid to add the flows to.  Depths in this grid may be
     "dredged" down to dredge_depth in order to keep inflow locations wet.

    time_offset: pull freshwater flows from this timedelta off from the
    specified.  I.e. if your run is really 2016, but you want 2015 flows,
    specify np.timedelta64(-365,'D').

    Flows are given 0 salinity and 20degC temperature.
    """
    ref_date, run_start, run_stop = mdu.time_range()

    if time_offset is not None:
        run_start = run_start + time_offset
        run_stop = run_stop + time_offset
        ref_date = ref_date + time_offset
    else:
        time_offset = np.timedelta64(0)

    flow_features = wkb2shp.shp2geom(flow_locations_shp)

    # create a pair of bc and pli files, each including all the sources.

    # First need the observations --
    # get a list of all the gages that are referenced:
    all_gages = np.unique(
        np.concatenate([gages.split('|') for gages in flow_features['gages']]))

    usgs_gage_cache = os.path.join(local_config.cache_path, 'usgs',
                                   'streamflow')
    flows_ds = usgs_nwis.nwis_dataset_collection(
        all_gages,
        start_date=run_start - 5 * DAY,
        end_date=run_stop + 5 * DAY,
        products=[60],  # streamflow
        days_per_request='M',  # monthly chunks
        frequency='daily',  # time resolution of the data
        cache_dir=usgs_gage_cache)

    usgs_inventory = wkb2shp.shp2geom(usgs_inventory_shp_fn)
    station_to_area = dict([("%d" % site, area) for site, area in zip(
        usgs_inventory['site_no'], usgs_inventory['drain_area'])])

    unique_names = {}

    for feat_i, feat in enumerate(flow_features):
        gages = feat['gages'].split('|')
        sub_flows = flows_ds.sel(site=gages)

        featA = feat['area_sq_mi']
        gage_areas = np.array(
            [float(station_to_area[g] or 'nan') for g in gages])

        # assume the variable name here, and that dims are [site,time],
        # and units start as cfs.

        # Weighted average of reference gages based on watershed area, and
        # data availability
        # total flow from all reference gages
        site_axis = 0
        ref_cms = np.nansum(sub_flows['stream_flow_mean_daily'].values,
                            axis=site_axis) * FT3_to_M3
        # area represented by reference gages at each time step
        ref_area = np.sum(
            np.isfinite(sub_flows['stream_flow_mean_daily'].values) *
            gage_areas[:, None],
            axis=site_axis)

        # avoid division by zero for steps missing all flows
        feat_cms = featA * ref_cms
        feat_cms[ref_area > 0] /= ref_area[ref_area > 0]
        feat_cms[ref_area == 0.0] = np.nan

        stn_ds = xr.Dataset()
        stn_ds['time'] = flows_ds.time
        missing = np.isnan(feat_cms)
        if np.all(missing):
            raise Exception(
                "Composite from gages %s has no data for period %s - %s" %
                (gages, stn_ds.time.values[0], stn_ds.time.values[-1]))
        if np.any(missing):
            logging.warning(
                "Composite from gages %s has missing data in period %s - %s" %
                (gages, stn_ds.time.values[0], stn_ds.time.values[-1]))
            # Best guess is period average
            feat_cms[missing] = np.mean(feat_cms[~missing])
        stn_ds['flow_cms'] = ('time', ), feat_cms

        # sanitize and trim the feature name
        src_name = feat['name'].replace(' ', '_').replace(',', '_')[:13]
        if src_name in unique_names:
            serial = 1
            while True:
                test_name = "%s_%dser" % (src_name, serial)
                if test_name not in unique_names:
                    break
                serial += 1
            logging.warning("Source name %s duplicate - will use %s" %
                            (src_name, test_name))
            src_name = test_name

        unique_names[src_name] = src_name

        if 1:  #-- Write a PLI file
            pli_feat = (src_name, np.array(feat['geom']))

            # Write copies for flow, salinity and temperatures

            for suffix in ['_flow', '_salt', '_temp']:
                if suffix == '_temp' and not mdu[
                        'physics', 'Temperature']:  # present and not 0.
                    continue
                if suffix == '_salt' and not mdu['physics', 'Salinity']:
                    continue

                # function to add suffix
                pli_feat_with_suffix = dio.add_suffix_to_feature(
                    pli_feat, suffix)
                pli_fn = os.path.join(mdu.base_path,
                                      "%s%s.pli" % (src_name, suffix))
                dio.write_pli(pli_fn, [pli_feat_with_suffix])

            dredge_grid.dredge_boundary(grid, pli_feat[1], dredge_depth)

        if 1:  #-- Write the time series and stanza in FlowFM_bnd.ext
            write_QST_data(mdu, stn_ds, src_name, time_offset=-time_offset)
Ejemplo n.º 16
0
##

# to replicate the 5 fields, where last two are just 10.0
# not entirely sure what these /should/ be, but this is what
# I've seen in previous input files.
g.add_node_field('sill_left', 10 * np.ones_like(g.nodes['elev_m']))
g.add_node_field('sill_right', 10 * np.ones_like(g.nodes['elev_m']))

##

pli_data = dio.grid_to_pli_data(
    g,
    node_fields=['elev_m', 'sill_left', 'sill_right'],
    labeler=lambda i: "L%04d" % (total_count + i))
dio.write_pli(os.path.join(out_dir, 'fixed_weirs-v02.pli'), pli_data)

##


def write_node_shp(self, shpname, extra_fields=[]):
    """ Write a shapefile with each node.  Fields will attempt to mirror
    self.nodes.dtype

    extra_fields: goal is similar to write_cells_shp and write_edges_shp, 
    but not yet supported.
    """
    assert len(extra_fields) == 0  # not yet supported!

    # zero-based index of node (why does write_edge_shp create 1-based ids?)
    base_dtype = [('node_id', np.int32)]