Beispiel #1
0
def test_provisional():
    usgs_a8_site = "372512121585801"

    ds = usgs_nwis.nwis_dataset(usgs_a8_site,
                                np.datetime64("2015-12-10"),
                                np.datetime64("2015-12-20"),
                                products=[72178])
Beispiel #2
0
def nudge_by_gage(ds,
                  usgs_station,
                  station,
                  decorr_days,
                  period_start=None,
                  period_end=None):
    # This slicing may be stopping one sample shy, shouldn't be a problem.
    if period_start is None:
        period_start = ds.time.values[0]
    if period_end is None:
        period_end = ds.time.values[-1]

    usgs_gage = usgs_nwis.nwis_dataset(usgs_station,
                                       products=[60],
                                       start_date=period_start,
                                       end_date=period_end,
                                       days_per_request='M',
                                       cache_dir=common.cache_dir)

    # Downsample to daily
    df = usgs_gage['stream_flow_mean_daily'].to_dataframe()
    df_daily = df.resample('D').mean()

    # Get the subset of BAHM data which overlaps this gage data
    time_slc = slice(np.searchsorted(ds.time, df_daily.index.values[0]),
                     1 + np.searchsorted(ds.time, df_daily.index.values[-1]))

    bahm_subset = ds.sel(station=station).isel(time=time_slc)

    assert len(bahm_subset.time) == len(
        df_daily), "Maybe BAHM data doesn't cover entire period"

    errors = bahm_subset.flow_cfs - df_daily.stream_flow_mean_daily

    # Easiest: interpolate errors over nans, apply to bahm data array.
    # the decorrelation time is tricky, though.

    # Specify a decorrelation time scale then relax from error to zero
    # over that period
    valid = np.isfinite(errors.values)
    errors_interp = np.interp(utils.to_dnum(ds.time),
                              utils.to_dnum(df_daily.index[valid]),
                              errors[valid])
    all_valid = np.zeros(len(ds.time), 'f8')
    all_valid[time_slc] = 1 * valid

    weights = (2 * filters.lowpass_fir(all_valid, decorr_days)).clip(0, 1)
    weighted_errors = weights * errors_interp

    # Does this work?
    subset = dict(station=station)

    cfs_vals = ds.flow_cfs.loc[subset] - weighted_errors
    ds.flow_cfs.loc[subset] = cfs_vals.clip(0, np.inf)
    ds.flow_cms.loc[subset] = 0.028316847 * ds.flow_cfs.loc[subset]

    # user feedback
    cfs_shifts = weighted_errors[time_slc]
    print("Nudge: %s => %s, shift in CFS: %.2f +- %.2f" %
          (usgs_station, station, np.mean(cfs_shifts), np.std(cfs_shifts)))
Beispiel #3
0
def test_basic():
    # This requires internet access!
    ds=usgs_nwis.nwis_dataset(station="11337190",
                              start_date=np.datetime64('2012-08-01'),
                              end_date  =np.datetime64('2012-10-01'),
                              products=[60, # "Discharge, cubic feet per second"
                                        10], # "Temperature, water, degrees Celsius"
                              days_per_request=30)
Beispiel #4
0
def test_missing():
    station = "11162765"
    t_start = np.datetime64('2016-10-01')
    t_stop = np.datetime64('2016-12-01')
    # This period has some missing data identified by '***' which
    # caused problems in older versions of rdb.py
    ds = usgs_nwis.nwis_dataset(station,
                                t_start,
                                t_stop,
                                products=[95, 90860],
                                days_per_request=20)
Beispiel #5
0
def usgs_salinity_time_series(station):
    # A little tricky - there are two elevations, which have the same parameter
    # code of 95 for specific conductance, but ts_id's of 14739 and 14741.
    # requesting the parameter once does return an RDB with both in there.

    time_labels = [
        utils.to_datetime(t).strftime('%Y%m%d') for t in [t_start, t_stop]
    ]
    cache_fn = "usgs%s-%s_%s-salinity.nc" % (station, time_labels[0],
                                             time_labels[1])

    if not os.path.exists(cache_fn):

        # 95: specific conductance
        # 90860: salinity
        ds = usgs_nwis.nwis_dataset(station,
                                    t_start,
                                    t_stop,
                                    products=[95, 90860],
                                    days_per_request=20)
        usgs_nwis.add_salinity(ds)
        ds.to_netcdf(cache_fn)
        ds.close()

    ##

    ds = xr.open_dataset(cache_fn)

    ds.attrs['lon'] = station_locs[station]['lon']
    ds.attrs['lat'] = station_locs[station]['lat']
    ds.salinity.attrs['elev_mab'] = station_locs[station]['elev_mab'][0]
    if 'salinity_01' in ds:
        ds.salinity_01.attrs['elev_mab'] = station_locs[station]['elev_mab'][1]

    xy = ll2utm([ds.attrs['lon'], ds.attrs['lat']])
    ds.attrs['x'] = xy[0]
    ds.attrs['y'] = xy[1]
    return ds
Beispiel #6
0
def test_caching():
    station = "11162765"
    t_start = np.datetime64('2016-10-01')
    t_stop = np.datetime64('2016-12-01')

    cache_dir = 'tmp_cache'
    if os.path.exists(cache_dir):
        # Start clean
        shutil.rmtree(cache_dir)

    os.mkdir(cache_dir)

    timings = []
    for trial in [0, 1]:
        t0 = time.time()
        ds = usgs_nwis.nwis_dataset(station,
                                    t_start,
                                    t_stop,
                                    products=[95, 90860],
                                    days_per_request='10D',
                                    cache_dir=cache_dir)
        timings.append(time.time() - t0)

    assert timings[0] > 5 * timings[1]
Beispiel #7
0
    srv=11455420, # will have to get nudged a bit
    # ryi=11455350, # no data in this particular period
    hwb=11455165, # miner slough at hwy 84
    dws=11455335, # deep water ship channel, near RV
    ame=11446980, # American River, may have to get nudged
    ver=11425500,
    cli=11455315, # Cache Slough at S. Liberty Island
    cah=11455280, # Cache Slough near Hastings Tract
    ula=11455268, # Ulatis near Elmira
)

stations=[]
for stn_name in usgs_station_codes:
    stn_code=usgs_station_codes[stn_name]

    ds=usgs_nwis.nwis_dataset(stn_code,run_start,run_stop,products=[10],
                              cache_dir=cache_dir,cache_only=True)
    if ds is None:
        print("No data for %s -- skip."%stn_code)
        continue
    meta=usgs_nwis.station_metadata(stn_code,cache_dir=cache_dir)
    ds['latitude']=(),meta['lat']
    ds['longitude']=(),meta['lon']
    ll=np.array( [ds.longitude.values,ds.latitude.values])
    ds['ll']=('xy',),ll
    xy=ll2utm(ll)
    ds['xy']=('xy',),xy
    ds.attrs['name']=stn_name
    stations.append(ds)

# Based on the locations, the American River Station is maybe too far
# away, and since the model doesn't have American River flows, it
Beispiel #8
0
def add_delta_inflow(mdu,
                     rel_bc_dir,
                     static_dir,
                     grid,
                     dredge_depth,
                     all_flows_unit=False,
                     temp_jersey=True,
                     temp_rio=True):
    """
    Fetch river USGS river flows, add to FlowFM_bnd.ext:
    Per Silvia's Thesis:
    Jersey: Discharge boundary affected by tides, discharge and temperature taken
    from USGS 11337190 SAN JOAQUIN R A JERSEY POINT, 0 salinity
    (Note that Dutch Slough should probably be added in here)
    Rio Vista: 11455420 SACRAMENTO A RIO VISTA, temperature from DWR station RIV.
    0 salinity.

    run_base_dir: location of the DFM inputs
    run_start,run_stop: target period for therun
    statiC_dir: path to static assets, specifically Jersey.pli and RioVista.pli
    grid: UnstructuredGrid instance, to be modified at inflow locations
    old_bc_fn: path to old-style boundary forcing file
    all_flows_unit: if True, override all flows to be 1 m3 s-1 for model diagnostics
    """

    # get run directory and time and forcing file info
    run_base_dir = mdu.base_path
    ref_date, run_start, run_stop = mdu.time_range()
    old_bc_fn = mdu.filepath(["external forcing", "ExtForceFile"])

    pad = np.timedelta64(3, 'D')

    if 1:
        # Cache the original data from USGS, then clean it and write to DFM format
        jersey_raw_fn = os.path.join(run_base_dir, rel_bc_dir, 'jersey-raw.nc')
        if not os.path.exists(jersey_raw_fn):
            if temp_jersey == True:
                jersey_raw = usgs_nwis.nwis_dataset(
                    station="11337190",
                    start_date=run_start - pad,
                    end_date=run_stop + pad,
                    products=[
                        60,  # "Discharge, cubic feet per second"
                        10
                    ],  # "Temperature, water, degrees Celsius"
                    days_per_request=30)
                jersey_raw.to_netcdf(jersey_raw_fn, engine='scipy')
            if temp_jersey == False:
                jersey_raw = usgs_nwis.nwis_dataset(
                    station="11337190",
                    start_date=run_start - pad,
                    end_date=run_stop + pad,
                    products=[60],  # "Discharge, cubic feet per second" 
                    days_per_request=30)
                jersey_raw.to_netcdf(jersey_raw_fn, engine='scipy')

        rio_vista_raw_fn = os.path.join(run_base_dir, rel_bc_dir,
                                        'rio_vista-raw.nc')
        if not os.path.exists(rio_vista_raw_fn):
            if temp_rio == True:
                rio_vista_raw = usgs_nwis.nwis_dataset(
                    station="11455420",
                    start_date=run_start - pad,
                    end_date=run_stop + pad,
                    products=[
                        60,  # "Discharge, cubic feet per second"
                        10
                    ],  # "Temperature, water, degrees Celsius"
                    days_per_request=30)
                rio_vista_raw.to_netcdf(rio_vista_raw_fn, engine='scipy')
            if temp_rio == False:
                rio_vista_raw = usgs_nwis.nwis_dataset(
                    station="11455420",
                    start_date=run_start - pad,
                    end_date=run_stop + pad,
                    products=[60],  # "Discharge, cubic feet per second"
                    days_per_request=30)
                rio_vista_raw.to_netcdf(rio_vista_raw_fn, engine='scipy')

    if 1:  # Clean and write it all out
        jersey_raw = xr.open_dataset(jersey_raw_fn)
        rio_vista_raw = xr.open_dataset(rio_vista_raw_fn)
        temp_logical = [temp_jersey, temp_rio]
        i = 0
        for src_name, source in [('Jersey', jersey_raw),
                                 ('RioVista', rio_vista_raw)]:
            src_feat = dio.read_pli(
                os.path.join(static_dir, '%s.pli' % src_name))[0]
            dredge_grid.dredge_boundary(grid, src_feat[1], dredge_depth)

            if temp_logical[i] == True:
                # Add stanzas to FlowFMold_bnd.ext:
                for quant, suffix in [('dischargebnd', '_flow'),
                                      ('salinitybnd', '_salt'),
                                      ('temperaturebnd', '_temp')]:
                    with open(old_bc_fn, 'at') as fp:
                        lines = [
                            "QUANTITY=%s" % quant,
                            "FILENAME=%s/%s%s.pli" %
                            (rel_bc_dir, src_name, suffix), "FILETYPE=9",
                            "METHOD=3", "OPERAND=O", ""
                        ]
                        fp.write("\n".join(lines))

                    feat_suffix = dio.add_suffix_to_feature(src_feat, suffix)
                    dio.write_pli(
                        os.path.join(run_base_dir, rel_bc_dir,
                                     '%s%s.pli' % (src_name, suffix)),
                        [feat_suffix])

                    # Write the data:
                    if quant == 'dischargebnd':
                        da = source.stream_flow_mean_daily
                        da2 = utils.fill_tidal_data(da)
                        if all_flows_unit:
                            da2.values[:] = 1.0
                        else:
                            # convert ft3/s to m3/s
                            da2.values[:] *= 0.028316847
                    elif quant == 'salinitybnd':
                        da2 = source.stream_flow_mean_daily.copy(deep=True)
                        da2.values[:] = 0.0
                    elif quant == 'temperaturebnd':
                        da = source.temperature_water
                        da2 = utils.fill_tidal_data(
                            da)  # maybe safer to just interpolate?
                        if all_flows_unit:
                            da2.values[:] = 20.0

                    df = da2.to_dataframe().reset_index()
                    df['elapsed_minutes'] = (
                        df.time.values - ref_date) / np.timedelta64(60, 's')
                    columns = ['elapsed_minutes', da2.name]

                    if len(feat_suffix) == 3:
                        node_names = feat_suffix[2]
                    else:
                        node_names = [""] * len(feat_suffix[1])

                    for node_idx, node_name in enumerate(node_names):
                        # if no node names are known, create the default name of <feature name>_0001
                        if not node_name:
                            node_name = "%s%s_%04d" % (src_name, suffix,
                                                       1 + node_idx)

                        tim_fn = os.path.join(run_base_dir, rel_bc_dir,
                                              node_name + ".tim")
                        df.to_csv(tim_fn,
                                  sep=' ',
                                  index=False,
                                  header=False,
                                  columns=columns)

            if temp_logical[i] == False:
                # Add stanzas to FlowFMold_bnd.ext:
                for quant, suffix in [('dischargebnd', '_flow'),
                                      ('salinitybnd', '_salt')]:
                    with open(old_bc_fn, 'at') as fp:
                        lines = [
                            "QUANTITY=%s" % quant,
                            "FILENAME=%s/%s%s.pli" %
                            (rel_bc_dir, src_name, suffix), "FILETYPE=9",
                            "METHOD=3", "OPERAND=O", ""
                        ]
                        fp.write("\n".join(lines))

                    feat_suffix = dio.add_suffix_to_feature(src_feat, suffix)
                    dio.write_pli(
                        os.path.join(run_base_dir, rel_bc_dir,
                                     '%s%s.pli' % (src_name, suffix)),
                        [feat_suffix])

                    # Write the data:
                    if quant == 'dischargebnd':
                        da = source.stream_flow_mean_daily
                        da2 = utils.fill_tidal_data(da)
                        if all_flows_unit:
                            da2.values[:] = 1.0
                        else:
                            # convert ft3/s to m3/s
                            da2.values[:] *= 0.028316847
                    elif quant == 'salinitybnd':
                        da2 = source.stream_flow_mean_daily.copy(deep=True)
                        da2.values[:] = 0.0

                    df = da2.to_dataframe().reset_index()
                    df['elapsed_minutes'] = (
                        df.time.values - ref_date) / np.timedelta64(60, 's')
                    columns = ['elapsed_minutes', da2.name]

                    if len(feat_suffix) == 3:
                        node_names = feat_suffix[2]
                    else:
                        node_names = [""] * len(feat_suffix[1])

                    for node_idx, node_name in enumerate(node_names):
                        # if no node names are known, create the default name of <feature name>_0001
                        if not node_name:
                            node_name = "%s%s_%04d" % (src_name, suffix,
                                                       1 + node_idx)

                        tim_fn = os.path.join(run_base_dir, rel_bc_dir,
                                              node_name + ".tim")
                        df.to_csv(tim_fn,
                                  sep=' ',
                                  index=False,
                                  header=False,
                                  columns=columns)
            i += 1
Beispiel #9
0
from stompy.io.local import usgs_nwis
from stompy import utils

import os
import matplotlib.pyplot as plt
##

cache_dir = "cache"

if not os.path.exists(cache_dir):
    os.makedirs(cache_dir)

ds = usgs_nwis.nwis_dataset(11455420,
                            np.datetime64("2019-08-01"),
                            np.datetime64("2019-09-01"),
                            products=[65],
                            cache_dir=cache_dir)

ds_long = usgs_nwis.nwis_dataset(11455420,
                                 np.datetime64("2019-07-01"),
                                 np.datetime64("2019-10-01"),
                                 products=[65],
                                 cache_dir=cache_dir)

##

# Try basic tidal filling:
height_filled = utils.fill_tidal_data(ds.height_gage)

height_filled_long = utils.fill_tidal_data(ds_long.height_gage)
Beispiel #10
0
ges_april=pd.read_csv( ('/home/rusty/mirrors/Ed/Sync/UCD/Projects/CDFW_Arc/dflowfm/'
                        '27-comp_bedlevtyp_2_3_4/stations/wsel/'
                        'GES_STAGE_april2014.csv'),
                       parse_dates=['Time'])

sel=(ges_mwtract.Time>=ges_april.Time.iloc[0])&(ges_mwtract.Time<=ges_april.Time.iloc[-1])
##

from stompy.io.local import usgs_nwis

cache_dir='cache'
os.path.exists(cache_dir) or os.mkdir(cache_dir)

ges_ds=usgs_nwis.nwis_dataset(station="11447905",
                              start_date=np.datetime64("2014-04-01"),
                              end_date=np.datetime64("2014-05-01"),
                              cache_dir=cache_dir,
                              products=[60,65], # discharge and stage
                              days_per_request='M')

# that always returns UTC, so add a PST time:
ges_ds['time_pst']=('time',), ges_ds.time + np.timedelta64(-8,'h')

##
import matplotlib.pyplot as plt
plt.ion()

plt.figure(12).clf()

# These are all in agreement now.
plt.plot( ges_april.Time, ges_april.Stage,label='april')
plt.plot( ges_mwtract.Time[sel], ges_mwtract.Stage[sel],label='MWT')
Beispiel #11
0
# Get a sense of how to fabricate Decker Island tides when only Rio Vista
# data is available.
# choosing not to do this -- better to truncate the domain at Rio Vista.

import numpy as np
from stompy.io.local import usgs_nwis
from stompy import utils
import matplotlib.pyplot as plt

##
period = [np.datetime64("2016-01-01"), np.datetime64("2019-01-01")]

decker = usgs_nwis.nwis_dataset(station=11455478,
                                start_date=period[0],
                                end_date=period[1],
                                products=[60, 65],
                                cache_dir='cache')

riovista = usgs_nwis.nwis_dataset(station=11455420,
                                  start_date=period[0],
                                  end_date=period[1],
                                  products=[60, 65],
                                  cache_dir='cache')

##

##
from stompy import filters

# separate into tidal, subtidal
for ds in [decker, riovista]:
Beispiel #12
0
axs[0].plot(adcp.time, adcp.u_davg, label='U davg')
axs[1].plot(adcp.time, adcp.v_davg, label='V davg')

axs[2].plot(ds.time, ds.stream_flow_mean_daily, label='Rio new Raw')

#axs[2].plot(rio.time + np.timedelta64(8,'h'),
#            -rio.stream_flow_mean_daily,
#            label='Rio Flip/shift')
axs[0].legend()
axs[1].legend()
axs[2].legend()

axs[2].axis((735104.96660250274, 735107.16195219487, -126058.97073039225,
             127174.6378024802))

##

# Okay -
run_start = adcp.time[0].astype('M8[D]')

run_stop = run_start + np.timedelta64(10, 'D')
from stompy.io.local import usgs_nwis

import pdb
ds = usgs_nwis.nwis_dataset(station="11455420",
                            start_date=run_start,
                            end_date=run_stop,
                            products=[60],
                            days_per_request=30)
Beispiel #13
0
     "381934121403201"),  # Liberty Island, Wildlands, Up Marsh
        # no physical data until 2015-07:
    ("LIB", "11455315"),  # Cache Slough A S Liberty Island Nr Rio Vista CA
    ("TSL", "11337080"),  # Threemile slough near Rio Vista
    ("SDI", "11455478"),  # Sac River at Decker Island
]:
    stage_fn = '%s-stage.csv' % usgs_name
    flow_fn = '%s-flow.csv' % usgs_name
    if os.path.exists(stage_fn) and os.path.exists(flow_fn):
        continue  # or force.
    print("Downloading %s" % usgs_name)

    ds = usgs_nwis.nwis_dataset(
        usgs_station,
        download_period[0],
        download_period[1],
        [60, 65],  # Discharge and Stage
        days_per_request='M',
        cache_dir=cache_dir)
    # nwis_dataset() returns UTC data.  Convert to PST:
    ds['time'] = ds.time - np.timedelta64(8, 'h')

    # Match the names up with existing csv files:
    ds = ds.rename({'time': 'Time'})
    if 'stream_flow_mean_daily' in ds:
        ds = ds.rename({'stream_flow_mean_daily': 'Flow'})
    if 'height_gage' in ds:
        ds = ds.rename({'height_gage': 'Stage'})
    df = ds.to_dataframe()

    if 'Stage' in df:
Beispiel #14
0
    pd.read_csv('forcing-data/Barker_Pumping_Plant.csv', parse_dates=['time']))
barker = barker.set_coords('time')
model.add_FlowBC(name='Barker_Pumping_Plant', Q=barker['Q'])

rio_vista = model.read_bc('forcing-data/WaterLevel.bc')['SRV_0001']
model.add_StageBC(name='SRV', z=rio_vista['waterlevelbnd'])

Qshared = model.read_bc('forcing-data/Discharge.bc')
model.add_FlowBC(name='Georgiana', Q=Qshared['Georgiana_0001']['dischargebnd'])
# DXC was closed during this period.
# model.add_FlowBC(name='DXC',Q=Qshared['DXC_0001']['dischargebnd'])

if 1:  # freeport flows, lowpass
    ds_fpx = usgs_nwis.nwis_dataset(
        station=11447650,
        start_date=model.run_start - np.timedelta64(5, 'D'),
        end_date=model.run_stop + np.timedelta64(5, 'D'),
        products=[60, 65],
        cache_dir="cache")
    ds_fpx.time.values -= np.timedelta64(8, 'h')  # convert UTC to PST.
    # original data had flows shifted 14.5h, try just 2h? it's possible that they really should
    # be lagged, not shifted back in time, since the signal is mostly tidal and we're talking
    # about propagation of the tides.
    ds_fpx.time.values -= np.timedelta64(int(2. * 3600), 's')
    ds_fpx.stream_flow_mean_daily.values *= 0.028316847  # cfs => m3/s
    # that's 15 minute data.
    flow = ds_fpx.stream_flow_mean_daily
    flow.values[:] = filters.lowpass_godin(flow.values,
                                           utils.to_dnum(flow.time))
    model.add_FlowBC(name="SacramentoRiver", Q=flow)

# Try file pass through for forcing data: