Beispiel #1
0
def run_all(run_base_dir,
            storm_start_h,
            storm_duration_h,
            storm_flow,
            sources=None,
            force=False):
    mdu = dio.MDUFile('template.mdu')

    mdu['geometry', 'NetFile'] = 'stein_03_net.nc'

    grid = dfm_grid.DFMGrid(mdu['geometry', 'NetFile'])

    if os.path.exists(run_base_dir):
        if force:
            shutil.rmtree(run_base_dir)  # Safer - blow it away
        else:
            log.warning("Will not run %s -- already exists" % run_base_dir)
            return False

    mdu.set_time_range(start=np.datetime64('2010-01-01'),
                       stop=np.datetime64('2010-01-05'))

    os.path.exists(run_base_dir) or os.makedirs(run_base_dir)
    mdu.set_filename(os.path.join(run_base_dir, 'flowfm.mdu'))

    ext_fn = mdu.filepath(['external forcing', 'ExtForceFile'])

    # Clear any pre-existing BC file:
    os.path.exists(ext_fn) and os.unlink(ext_fn)

    # Monkey patch the parameters:
    Storm.storm_flow = storm_flow
    Storm.storm_duration_h = storm_duration_h
    Storm.storm_start_h = storm_start_h

    bc_shp = 'forcing_with_q.shp'
    bc_shp_data = wkb2shp.shp2geom(bc_shp)

    for bc in bc_shp_data:
        if sources is not None and bc['name'] not in sources:
            print("Skipping %s" % bc['name'])
            continue
        for data_src in factory(bc):
            data_src.write(mdu=mdu, feature=bc, grid=grid)

    fixed_weir_out = "../derived"
    if 1:  # fixed weir file is just referenced as static input
        shutil.copyfile(os.path.join(fixed_weir_out, 'fixed_weirs-v00.pli'),
                        os.path.join(run_base_dir, 'fixed_weirs-v00.pli'))
        mdu['geometry', 'FixedWeirFile'] = 'fixed_weirs-v00.pli'

    mdu.write()

    dfm_grid.write_dfm(grid,
                       mdu.filepath(['geometry', 'NetFile']),
                       overwrite=True)

    dflowfm(mdu.filename, ['-t', '1', '--autostartstop'])
Beispiel #2
0
def pr_dfm_s1(run_name):
    run_base_dir = "runs/%s" % run_name
    mdu_fn = os.path.join(run_base_dir, "%s.mdu" % run_name)
    mdu = dio.MDUFile(mdu_fn)
    dfm_map = xr.open_dataset('runs/%s/DFM_OUTPUT_%s/%s_map.nc' %
                              (run_name, run_name, run_name))
    dfm_map_g = dfm_grid.DFMGrid(dfm_map)
    dfm_cc = dfm_map_g.cells_centroid()
    pr_cell_idx = dfm_map_g.select_cells_nearest(xy)
    return dfm_map.s1.isel(nFlowElem=pr_cell_idx)
Beispiel #3
0
import datetime

from sfb_dfm_utils import ca_roms

from stompy import utils
from stompy.spatial import wkb2shp, proj_utils
from stompy.io.local import noaa_coops
import stompy.model.delft.io as dio

############ COMPARISONS ##################

run_name = "short_test_18"
run_base_dir = "runs/%s" % run_name
mdu_fn = os.path.join(run_base_dir, "%s.mdu" % run_name)

mdu = dio.MDUFile(mdu_fn)

##

utm2ll = proj_utils.mapper('EPSG:26910', "WGS84")

##
# Comparisons across ROMS, NOAA tides, NOAA ADCPs, and this model

obs_pnts = wkb2shp.shp2geom(
    "/opt/data/delft/sfb_dfm_v2/inputs-static/observation-points.shp")


##
def load_subroms():
    ca_roms_files = ca_roms.fetch_ca_roms(run_start, run_stop)
Beispiel #4
0
from scipy.ndimage.filters import percentile_filter
from stompy.spatial import proj_utils
from stompy import filters

##
ll2utm = proj_utils.mapper('WGS84', 'EPSG:26910')

##

run_name = "wy2014"
begindate = "20130801"
path = "/hpcvol1/emma/sfb_dfm/runs/%s/DFM_OUTPUT_%s/" % (run_name, run_name)
hisfile = os.path.join(path,
                       "%s_0000_%s_000000_his.nc" % (run_name, begindate))
mdufile = os.path.join(path, "..", "%s.mdu" % run_name)
mdu = dio.MDUFile(mdufile)
t_ref, t_start, t_stop = mdu.time_range()

#t_spunup=np.datetime64("2012-10-01") # clip to "real" period
t_spunup = t_start  # show entire simulation

savepath = os.path.join(path + "validation_plots/salinity_time_series/")
metricpath = os.path.join(path + "validation_metrics/")
metric_fn = os.path.join(metricpath + "salinity_time_series.tex")
os.path.exists(savepath) or os.makedirs(savepath)
os.path.exists(metricpath) or os.makedirs(metricpath)

##
station_locs = {
    # elev_mab: upper sensor first on
    # San Mateo Bridge
Beispiel #5
0
from stompy.spatial import wkb2shp

import stompy.model.delft.io as dio
from stompy.grid import unstructured_grid

import sfb_dfm_utils

##
# 52184 is possibly better for velocity BCs
dfm_bin_dir = "/home/rusty/src/dfm/r53925-opt/bin"

utm2ll = proj_utils.mapper('EPSG:26910', 'WGS84')
ll2utm = proj_utils.mapper('WGS84', 'EPSG:26910')

##
mdu = dio.MDUFile('template.mdu')

# short_test_01: straight up waterlevel, 100% from OTPS
# short_test_02: fix bathy, tried riemann, dirichlet
# short_test_03: add DC offset, start comparing Point Reyes
#                clip bathy to -4, not -10
#                add salinity to BC and mdu.  initial=34
#                bump up to 6 days
# short_test_04: Adding COAMPS wind
# short_test_05: Convert to more complete DFM script
# short_test_06: Try 3D, 10 layers
# short_test_07: ragged boundary
# short_test_08: Adding SF Bay
# medium_09: Longer run, with temperature, and ill-behaved velocity
# medium_10: add sponge layer diffusion for scalars, too
# short_test_11: return to ROMS-only domain, and shorter duration
Beispiel #6
0
dredge_depth = -0.5  # m NAVD88, depth to enforce at inflows and discharges

##

# Make sure run directory exists:
os.path.exists(run_base_dir) or os.makedirs(run_base_dir)

# clear any stale bc files:
for fn in [old_bc_fn]:
    os.path.exists(fn) and os.unlink(fn)

## --------------------------------------------------------------------------------
# Edits to the template mdu:
#

mdu = dio.MDUFile('template.mdu')

if 1:  # set dates
    # RefDate can only be specified to day precision
    mdu['time', 'RefDate'] = utils.to_datetime(ref_date).strftime('%Y%m%d')
    mdu['time',
        'Tunit'] = 'M'  # minutes.  kind of weird, but stick with what was used already
    mdu['time', 'TStart'] = 0
    mdu['time', 'TStop'] = int((run_stop - run_start) / np.timedelta64(1, 'm'))

mdu['geometry', 'LandBoundaryFile'] = os.path.join(rel_static_dir,
                                                   "deltabay.ldb")

mdu['geometry', 'Kmx'] = 10  # 10 layers

# update location of the boundary conditions
Beispiel #7
0
def plot_MDU(mdu_filename, gridpath): 
    #------------- script now takes over -------------------------------------------
    mdu_filename = Path(mdu_filename)
    base_dir     = mdu_filename.parent  # The assumption is that we'll find all our bc's in the same folder as the mdu.
    folder_dir   = base_dir / 'bc_figures'
    folder_dir.exists() or folder_dir.mkdir() 
    
    # Load in the grid  (assumption that it is the same grid.)
    from stompy.grid import unstructured_grid
    grid    = str(gridpath) # Load in shapefile of SFB 
    grid    = unstructured_grid.UnstructuredGrid.read_dfm(grid, cleanup=True)
    
    # Open MDU, strip time information using stompy functionality
    MDU = dio.MDUFile(filename=str(mdu_filename))
    t_ref, t_start, t_stop =  MDU.time_range() 
    
    
    # define shared plotting functions 
    def format_xaxis (axis):
        months = mdates.MonthLocator(interval = 2)  # every other month
        fmt = mdates.DateFormatter('%b/%Y')
        axis.xaxis.set_major_locator(months)
        axis.xaxis.set_major_formatter(fmt)
        axis.set_xlim(t_ref, t_stop)
        
    def save_image(fig, name):
        fullname = folder_dir / (name + '.png')
        fig.savefig(str(fullname), dpi = 300, bbox_inches='tight')
        print('Saved %s' % fullname)
        plt.close()
    
    # Section one
    #  Let's first read through the source_files (which seem to be the POTWs)
    
    sourcefolder = base_dir / 'source_files'
    PLIs = list(sourcefolder.glob('*.pli'))  # get a list of all the pli files in the directory
    
    # Iterate through each one. Note each pli file has a corresponding timeseries of data (*.tim)
    for bc in PLIs:
        print('Reading %s' % bc.stem)
        pli = dio.read_pli(str(bc))                      # read in the *.pli file  
        tim_filename = sourcefolder / (bc.stem + '.tim') # filename of corresponding timeseries
        tim = dio.read_dfm_tim(str(tim_filename), t_ref, time_unit='M', columns = ['flow','sal','temp']) 
    
        # Plot the data 
        fig = plt.figure(figsize=(11, 3))
        ax1 = fig.add_axes([0.05, 0.05, 0.68, 0.8])
        map_axis = fig.add_axes([0.55, 0.18, 0.6, 0.6])
        name = pli[0][0]
        ax1.set_title( name.capitalize() + ' (POTW Source)')
        ax1.plot(tim.time, tim.flow,'-', linewidth = 5, alpha = 0.5, color = 'skyblue')    
        ax1.grid(b = True, alpha = 0.25)
        ax1.set_ylabel("Flow (m$^3$/s)")
        format_xaxis(ax1)
        
        # Plot SFB map + location  
        grid.plot_edges(ax = map_axis, alpha = 0.8) 
        map_axis.axis('off')
        coords = pli[0][1] 
        for coord in coords:
            x, y = coord[0], coord[1] # There is a z coordinate we are ignoring here 
            map_axis.plot(x , y,'o', markersize= 11, color= 'orangered')
    
        # Quick check that temp/salinity are fixed:
        temp = set(tim.temp.values)
        sal  = set(tim.sal.values)
        if len(temp)>1 or len(sal)>1:
            print('sal or temp is NOT FIXED at %s' % bc.stem)
        else:
            label = 'Temperature is fixed at %d C\n Salinity is fixed at %d ppt' %  (temp.pop(), sal.pop())
            ax1.text(1.08, .05, label,  horizontalalignment='left',  verticalalignment='center', transform=ax1.transAxes, fontsize = 12)
        save_image(fig, name)
        
        
    '''
    NEXT : ONTO THE BOUNDARY CONDITIONS FOR THE INFLOWS / STREAMS CREEKS ETC
    
    The only tricky difference here is that these bc's are sometimes divided across multiple cells (aka,
    a big river might be split across 2 cell segments.... in this case, we look at the pli file (geometry) to see
    how many cells the BC is split across and then multiple discharge by the #/cells. We don't need to touch
    temperature (scalar) or salinity (concentration). 
    
    DFM should always divide evenly across cells (1/3 for 3 cells, 1/2 for 2 cells, so unless someone's 
    really decided to get creative with custom settings this assumption should hold)
    '''
    bcfolder = base_dir / 'bc_files'
    PLIs = list(bcfolder.glob('*.pli'))           
    for bc in PLIs:
    
        print('Reading %s' % bc.stem)
        # the way this works is that the bc is divided between multiple cells evenly. so we just take one and multiply by the number of poitns. 
        pli = dio.read_pli(str(bc))
        filenames  = pli[0][2]
        ncells = len(filenames)
        tim_filename = bcfolder / (filenames[0] + '.tim') # filename of corresponding timeseries
        tim = dio.read_dfm_tim(str(tim_filename), t_ref, time_unit='M', columns  = ['data']) #, columns = ['flow','sal','temp']) 
    
        # Plot the data 
        fig = plt.figure(figsize=(11, 3))
        ax1 = fig.add_axes([0.05, 0.05, 0.68, 0.8])
        map_axis = fig.add_axes([0.55, 0.18, 0.6, 0.6])
        name = pli[0][0]            # Name of the boundary condition
        ax1.set_title( name.capitalize() + ' (non-POTW source)')
        
        if 'flow' in name:
            ax1.set_ylabel("Flow (m$^3$/s)")
            tim.data.values = tim.data.values * ncells # multiply by # of segements inflow is divided across 
        elif 'salt' in name:
            ax1.set_ylabel("Salinity (PPT)")
        elif 'temp' in name:
            ax1.set_ylabel("Temperature (deg C)")
        elif 'ssh' in name:
            ax1.set_ylabel('Sea Surface Height Forcing (m)')
            
        format_xaxis(ax1)
        ax1.plot(tim.time, tim.data,'-', linewidth = 5, alpha = 0.5, color = 'skyblue')    
        ax1.grid(b = True, alpha = 0.25)
        # Plot SFB map + location  
        grid.plot_edges(ax = map_axis, alpha = 0.8) 
        map_axis.axis('off')
        coords = pli[0][1] 
        for coord in coords:
            x, y = coord[0], coord[1] # There is a z coordinate we are ignoring here
            map_axis.plot(x, y, 'o', markersize= 11, color= 'orangered')
        save_image(fig, name)
            
        print('Done plotting boundary conditions.')
Beispiel #8
0
from stompy.plot import plot_utils
import stompy.plot.cmap as scmap
from stompy.io.local import usgs_sfbay
from stompy.spatial import proj_utils
import stompy.model.delft.io as dio

ll_to_utm = proj_utils.mapper('WGS84', 'EPSG:26910')
utm_to_ll = proj_utils.mapper('EPSG:26910', 'WGS84')

##

# Set the model location
run_name = "wy2003"
path = "/hpcvol1/emma/sfb_dfm/runs/%s/DFM_OUTPUT_%s/" % (run_name, run_name)
hisfile = "%s_0000_20020801_000000_his.nc" % run_name
mdu = dio.MDUFile(os.path.join(path, '../%s.mdu' % run_name))
cache_dir = os.path.join(path, "validation_metrics/cache")
os.path.exists(cache_dir) or os.makedirs(cache_dir)

### Load in USGS cruise data, define variables, and convert datetime64 times to timestamps
start_date = np.datetime64("2002-08-01")
end_date = np.datetime64('2003-04-01')

usgs_cache_fn = os.path.join(cache_dir, 'usgs_cruises.nc')
if not os.path.exists(usgs_cache_fn):
    ds = usgs_sfbay.cruise_dataset(start_date, end_date)
    ds.to_netcdf(usgs_cache_fn)
    ds.close()
# clean read:
ds = xr.open_dataset(usgs_cache_fn)
def task_main(args):
    if args.mpi is None:
        print("args.mpi is None")
        rank = 0
    elif args.mpi in ['mpiexec', 'mpich', 'intel', 'slurm']:
        rank = int(os.environ['PMI_RANK'])
    else:
        raise Exception("Don't know how to find out rank")

    log_fn = os.path.join(os.path.dirname(args.mdu), f'log-{rank}')
    logging.basicConfig(filename=log_fn, level=logging.DEBUG)
    logging.debug("Top of task_main")
    logging.debug('This message should go to the log file')

    import local_config
    import bmi.wrapper
    from numpy.ctypeslib import ndpointer  # nd arrays
    from ctypes import (
        # Types
        c_double,
        c_int,
        c_char_p,
        c_bool,
        c_char,
        c_float,
        c_void_p,
        # Complex types
        # ARRAY, Structure,
        # Making strings
        # Pointering
        POINTER,
        byref,  # CFUNCTYPE,
        # Loading
        # cdll
    )

    for k in os.environ:
        if ('SLURM' in k) or ('MPI' in k):
            logging.debug(f"{k} => {os.environ[k]}")

    logging.info(f"[rank {rank}] about to open engine")

    sim = bmi.wrapper.BMIWrapper(
        engine=os.path.join(local_config.dfm_root, "lib/libdflowfm.so"))
    logging.info(f"[rank {rank}] done with open engine")

    # Just need to keep ahead of the model a little bit.
    dt = 900.0  # update interval of the history file.

    # really just to get the list of seepages and the functions to handle them.
    model = PescaBmiSeepageMixin()
    model.run_dir = os.path.dirname(args.mdu)

    # Need to check the MDU to know if temp/salinity included
    import stompy.model.delft.io as dio
    mdu = dio.MDUFile(args.mdu)
    salt_temp = ""
    if int(mdu['physics', 'salinity']) > 0:
        salt_temp += " 0.0"
    if int(mdu['physics', 'temperature']) > 0:
        salt_temp += " 0.0"
    # runs don't always start at the reference time
    tstart_min = float(mdu['time', 'tstart']) / 60

    dt_min = mdu['numerics', 'MinTimestepBreak']
    if dt_min:
        dt_min = float(dt_min)

    if rank == 0:
        seepages = [dict(name=s) for s in model.seepages]

        for rec in seepages:
            t_pad = dt / 60.  # In minutes
            tim_fn = os.path.join(model.run_dir, f'{rec["name"]}.tim')
            rec['fp'] = open(tim_fn, 'wt')
            for t in [0.0,
                      t_pad]:  # HERE: may have to adjust for reference time
                rec['fp'].write(f"{t+tstart_min:.4f} 0.05{salt_temp}\n")
            rec['fp'].flush()

    # dfm will figure out the per-rank file
    # initialize changes working directory to where mdu is.
    logging.info(f"[{rank}] about to initialize")
    sim.initialize(args.mdu)

    if rank == 0:
        if args.mpi is None:
            hist_fn = "DFM_OUTPUT_flowfm/flowfm_his.nc"
        else:
            hist_fn = "DFM_OUTPUT_flowfm/flowfm_0000_his.nc"
        # hoping I can figure out where to pull stage here, instead of
        # in the time loop
        for waiting in range(10):
            if os.path.exists(hist_fn):
                break
            logging("Will sleep to wait for hist_fn")
            sys.stdout.flush()
            time.sleep(2.0)
        else:
            raise Exception(f"history file {hist_fn} never showed up?!")
        ds = xr.open_dataset(hist_fn)

        stations = [s.decode().strip() for s in ds.station_name.values]
        ds.close()

        for rec in seepages:
            # index of this source_sink in the history output
            # ss_idx=[s.decode().strip()
            #         for s in ds['source_sink_name'].values].index(rec['name'])
            # x=ds.source_sink_x_coordinate.isel(source_sink=ss_idx).values
            # y=ds.source_sink_y_coordinate.isel(source_sink=ss_idx).values
            # rec['xy']=np.c_[x,y] # endpoints of the source_sink
            rec['src_mon_idx'] = stations.index(rec['name'] + 'A')
            rec['snk_mon_idx'] = stations.index(rec['name'] + 'B')

    # TASK TIME LOOP
    t_calc = 0.0
    t_bmi = 0.0
    t_last = time.time()
    while sim.get_current_time() < sim.get_end_time():
        t_now = sim.get_current_time()

        if rank == 0:
            try:
                # try to streamline this, since we'll be doing it a lot and
                # CF decoding could get slow when the history file is large.
                ds = xr.open_dataset(hist_fn,
                                     decode_cf=False,
                                     decode_times=False,
                                     decode_coords=False)
            except Exception as exc:
                logging.warning(
                    f"rank {rank}  model time {t_now}  Failed to open history")
                logging.warning(str(exc))
                ds = None

            for rec in seepages:
                if ds is not None:
                    t_hist = ds.time.values[-1]
                    # Excellent -- history output can be from this same time, so
                    # there isn't even a lag here.
                    # What all do I get in history? everything I need!
                    h_src = ds.waterlevel.isel(
                        time=-1, stations=rec['src_mon_idx']).values
                    h_dst = ds.waterlevel.isel(
                        time=-1, stations=rec['snk_mon_idx']).values

                    ds.close()  # avoid xarray caching

                    L = 100  # [m] closed state, across shore distance
                    W = 100  # [m] along shore length of seepage outlet
                    k = 0.012  # [m/s] hydraulic conductivity
                    z_bedrock = 0.00  # [m]
                    if (h_src > z_bedrock):
                        Q = k * (h_src - z_bedrock) * L / W * (h_src - h_dst)
                    else:
                        Q = k * (1 * 0.001) / W * (h_src - h_dst)
                    #Q*=1.65 # extra factor to get matching with QCM.
                    Q = max(
                        0, Q * 0.61
                    )  # 1.65 was from scatters. but looking at the time series coming out of
                    # the runs, this calculated flux was almost 3x too large. the 0.61 comes from scaling
                    # the previous output to best match the QCM fluxes (as output by the v04 model).
                    # Also go ahead and follow the one-directional flow that Dane suggested.
                    logging.info(
                        f"[rank {rank}] t_model={t_now} h_src={h_src:.4f} h_dst={h_dst:.4f} Q={Q:.4f}"
                    )
                    # That is the last line I see in the log
                else:
                    Q = 0.0

                t_new = (dt + t_now) / 60.0
                rec['fp'].write(f"{t_new+t_pad:.4f} {Q:.4f} {salt_temp}\n")
                rec['fp'].flush()

        t_bmi += time.time() - t_last
        t_last = time.time()
        logging.info(f'taking a step dt={dt}')
        sim.update(dt)
        logging.info('Back from step')
        t_calc += time.time() - t_last
        t_last = time.time()

        # Running via BMI will not fail out when the time step gets too short, but it will
        # return back to here without going as far as we requested.
        t_post = sim.get_current_time()
        if t_post < t_now + 0.75 * dt:
            logging.error("Looks like run has stalled out.")
            logging.error(
                f"Expected a step from {t_now} to {t_now+dt} but only got to {t_post}"
            )
            logging.error("Will break out")
            break

        if rank == 0:
            logging.info(f"t_bmi: {t_bmi}   t_calc: {t_calc}")

    sim.finalize()
Beispiel #10
0
import xarray as xr
import stompy.model.delft.io as dio
##

ds = xr.open_dataset('runs/short_test_12_test/matched_grid_v01_net.nc')

mdu = dio.MDUFile('runs/short_test_12_test/short_test_12-tmp.mdu')
##

# This agrees with both NetNode_z.min(), and FlowElem_zcc.max()
# and FlowElem_bl.min().  All -4155 ish.

##

##

zslay = np.array([
    -4155.000, -3315.789, -2644.420, -2107.325, -1677.649, -1333.908,
    -1058.916, -838.922, -662.926, -522.130, -409.493, -319.383, -247.296,
    -189.626, -143.490, -106.581, -77.054, -53.432, -34.535, -19.417, -7.322
])

plt.figure(1).clf()

plt.plot(k, zslay, 'b-o')

##

zslay = z_layers(mdu)
##