Beispiel #1
0
# this is the path to the message file in PyHSPF (hspfmsg.wdm)
# the HSPF main routine needs the location of this file and the UCI file

pyhspfdirectory = os.path.dirname(hspf.__file__)
messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory)

# before running the examples, we have to create the WDM files used by the
# test runs, which we will do with the WDMUtil class

wdm = WDMUtil(verbose=True)

# the name of the WDM file used in the test UCIs is test.wdm
# open it up for write access

wdm.open('test.wdm', 'w')

# the first few test runs write data to the WDM files, but they assume the
# datasets already exist so we need to create them. have a look at the test
# UCI files if you are curious

attributes = {
    'TCODE ': 4,
    'TSSTEP': 1,
    'TSTYPE': 'WTMP',
    'TSFORM': 3,
}

# what these attributes mean:
#
# the time series type for the first dataset is "WTMP" (water temperature)
Beispiel #2
0
# import from exp to wdm

wdm.import_exp(huntday, f)

# copy the data to the hspfmodel using WDMUtil. in general climate 
# data would need to come from some other place  (not a wdm file); 
# e.g., an NCDC file. the preprocessing modules can automate this
# for the hspexp example, only one timeseries for precip and evap 
# are provided. the file also contains the observed flow at the outlet. 
# this is set up to find the dsns, time steps etc, though if they were 
# known they could be provided directly.

# open the wdm for read access

wdm.open(f, 'r')

# find all the dsns

dsns = wdm.get_datasets(f)

# find all the time series types 
# (this is how they are identified in the exp file)

tstypes = [wdm.get_attribute(f, n, 'TSTYPE') for n in dsns]

# find the precip and evap timeseries (we could also just look at the exp files
# to figure this out, but this illustrates some of the flexibility of PyHSPF)

precip_dsn = dsns[tstypes.index('HPCP')]
evap_dsn   = dsns[tstypes.index('EVAP')]
Beispiel #3
0
# this is the path to the message file in PyHSPF (hspfmsg.wdm)
# the HSPF main routine needs the location of this file and the UCI file

pyhspfdirectory = os.path.dirname(hspf.__file__)
messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory)

# before running the examples, we have to create the WDM files used by the 
# test runs, which we will do with the WDMUtil class

wdm = WDMUtil(verbose = True)

# the name of the WDM file used in the test UCIs is test.wdm
# open it up for write access

wdm.open('test.wdm', 'w')

# the first few test runs write data to the WDM files, but they assume the 
# datasets already exist so we need to create them. have a look at the test
# UCI files if you are curious

attributes = {'TCODE ': 4, 
              'TSSTEP': 1, 
              'TSTYPE': 'WTMP', 
              'TSFORM': 3,
              }

# what these attributes mean:
#
# the time series type for the first dataset is "WTMP" (water temperature)
# the time code is 4 (the units of the time step for the dataset are days)
Beispiel #4
0
# external targets

targets = ['reach_outvolume', 'evaporation', 'reach_volume', 'runoff']

# build the input files and run

hspfmodel.build_uci(targets, start, end, hydrology=True, verbose=False)
hspfmodel.run(verbose=True)

# retrieve results using WDMUtil

wdm = WDMUtil()

# open the file for read access

wdm.open(wdmoutfile, 'r')

# pull up the flow at the outlet and plot it along with the precipitation
# and evapotranspiration

dsns = wdm.get_datasets(wdmoutfile)
idconss = [wdm.get_attribute(wdmoutfile, n, 'IDCONS') for n in dsns]
staids = [wdm.get_attribute(wdmoutfile, n, 'STAID ') for n in dsns]

# find the dsn

n = [
    dsn for dsn, idcons, staid in zip(dsns, idconss, staids)
    if idcons == 'ROVOL' and staid == '101'
][0]
# make sure the PyHSPF data have been generated

for d in (climatedata, pdata, edata, cdata):

    if not os.path.isdir(d):

        print(('error: required data in {} do not exist!\n'.format(d)))
        raise

# use WDMUtil to read the BASINS data

wdm = WDMUtil(verbose=verbose)

# open the precipitation file and the other climate data file

wdm.open(f1, 'r')
wdm.open(f2, 'r')

# make a list of the datasets and numbers

dsns = wdm.get_datasets(f2)
tstypes = [wdm.get_attribute(f2, n, 'TSTYPE') for n in dsns]

# start date for the BASINS data (after the warmup period)

bstart = start + datetime.timedelta(days=warmup)

# get the precipitation data

i = tstypes.index('PREC')
Beispiel #6
0
if not os.path.isfile('test.wdm'):
    print('warning, this simulation assumes the test.wdm file from test01')
    print('and test02 exists; please run these examples first')
    raise

# this is the path to the message file in PyHSPF (hspfmsg.wdm)

pyhspfdirectory = os.path.dirname(hspf.__file__)
messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory)

# the 3rd simulation adds more data to "test.wdm," so we need to create the
# datasets as before.

wdm = WDMUtil(verbose=True)

wdm.open('test.wdm', 'rw')

attributes = {
    'TCODE ': 4,  # hourly units 
    'TSSTEP': 1,  # one unit (hourly) time step
    'TSFORM': 1  # cloud cover is an average across the step
}

attributes['TSTYPE'] = 'CLND'

wdm.create_dataset('test.wdm', 140, attributes)

attributes['TSTYPE'] = 'CLDC'

wdm.create_dataset('test.wdm', 135, attributes)
    os.path.abspath(os.path.curdir) + '\\siletz_HSPF_pet.csv')

flwData = pd.read_csv(
    os.path.abspath(os.path.curdir) + '\\siletz_HSPF_flw.csv')

ts_to_wdmFile(wdmFile=wdmFile,
              pcpData=pcpData,
              petData=petData,
              flwData=flwData)

# See if you can read the data from the WDM file
wdm = WDMUtil(verbose=True, messagepath=mssgpath)

# ADD BASIN TIMESERIES FROM THE WDM TO HSPFMODEL
# open the wdm for read access
wdm.open(wdmFile, 'r')

start, end = wdm.get_dates(wdmFile, 101)

x = 1

# Add specific basin met data
for basin in range(0, len(basinRecords)):

    # The DSNs are known from the exp file so just use those this time
    prcp = wdm.get_data(wdmFile, 100 + x)

    evap = wdm.get_data(wdmFile, 200 + x)

    # Add and assign timeseries data
    hspfmodel.add_timeseries('precipitation', ('prcp_' + str(x)),
Beispiel #8
0
if not os.path.isfile('test.wdm'):
    print('warning, this simulation assumes the test.wdm file from test01')
    print('and test02 exists; please run these examples first')
    raise

# this is the path to the message file in PyHSPF (hspfmsg.wdm)

pyhspfdirectory = os.path.dirname(hspf.__file__)
messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory)

# the 3rd simulation adds more data to "test.wdm," so we need to create the
# datasets as before.

wdm = WDMUtil(verbose = True)

wdm.open('test.wdm', 'rw')

attributes = {'TCODE ': 4,      # hourly units 
              'TSSTEP': 1,      # one unit (hourly) time step
              'TSFORM': 1       # cloud cover is an average across the step
              }

attributes['TSTYPE'] = 'CLND'

wdm.create_dataset('test.wdm', 140, attributes)

attributes['TSTYPE'] = 'CLDC'

wdm.create_dataset('test.wdm', 135, attributes)

wdm.close('test.wdm')
Beispiel #9
0
    print('please update the path and re-run\n')
    raise

# the path to the wdm file to create

f = 'hunting.wdm'

# import from exp to wdm

wdm.import_exp(hunthour, f)

# copy the data to the hspfmodel using WDMUtil

# open the wdm for read access

wdm.open(f, 'r')

# the dsns are known from the exp file so just use those this time

precip = wdm.get_data(f, 106)
evap = wdm.get_data(f, 111)
oflow = wdm.get_data(f, 281)

start, end = wdm.get_dates(f, 106)

# close up the wdm file (forgetting this WILL cause trouble)

wdm.close('hunting.wdm')

# the evaporation data is daily so it needs to be disaggregated to hourly for
# an hourly simulation (see how easy this is with Python)
Beispiel #10
0
    if not os.path.isfile(ucifile):
        print('file does not exist')
        raise

    # find the path to the HSPF message file

    pyhspfdirectory = os.path.dirname(hspf.__file__)
    messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory)

    # run the simulation

    hspf.hsppy(ucifile, messagepath)

    wdm = WDMUtil()

    wdm.open(wdmfile, 'r')

    dsns = wdm.get_datasets(wdmfile)

    # see the datasets in the WDM file

    #for n in dsns: print(n, wdm.get_attribute(wdmfile, n, 'TSTYPE'))

    # look at the UCI file to get more info on the datasets

    precip = wdm.get_data(wdmfile, 106)
    evap = wdm.get_data(wdmfile, 426)
    pet = wdm.get_data(wdmfile, 425)
    rovol = wdm.get_data(wdmfile, 420)  # acre-ft
    oflow = wdm.get_data(wdmfile, 281)  # cfs
Beispiel #11
0
        print('reading year', y)

        p = '{}/{}_{}/{}'.format(directory, y, y + 2, NWISgage)
    
        with open(p, 'rb') as f: hspfmodel = pickle.load(f)

        # calculate the runoff components in each land segment and store
        # the results in a structure as [subbasin][landuse][runoff/area]

        results = {}

        # use WDMUtil to read the data

        output = hspfmodel.filename + '_out.wdm'
        wdmutil = WDMUtil()
        wdmutil.open(output, 'r')

        # read the metadata for each timeseries in the WDM file

        dsns = wdmutil.get_datasets(output)    
        idconss = [wdmutil.get_attribute(output, n, 'IDCONS') for n in dsns]
        descrps = [wdmutil.get_attribute(output, n, 'DESCRP') for n in dsns]
        staids  = [wdmutil.get_attribute(output, n, 'STAID ') for n in dsns]

        # go through the impervious land segments to get the surface runoff

        for o in hspfmodel.implnds:

            c = o.subbasin
        
            # make a data dictionary for each subbasin
Beispiel #12
0
    if not os.path.isfile(ucifile):
        print('file does not exist')
        raise

    # find the path to the HSPF message file

    pyhspfdirectory = os.path.dirname(hspf.__file__)
    messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory)

    # run the simulation

    hspf.hsppy(ucifile, messagepath)

    wdm = WDMUtil()

    wdm.open(wdmfile, 'r')

    dsns = wdm.get_datasets(wdmfile)

    # see the datasets in the WDM file

    #for n in dsns: print(n, wdm.get_attribute(wdmfile, n, 'TSTYPE'))

    # look at the UCI file to get more info on the datasets

    precip = wdm.get_data(wdmfile, 106)
    evap   = wdm.get_data(wdmfile, 426)
    pet    = wdm.get_data(wdmfile, 425)
    rovol  = wdm.get_data(wdmfile, 420) # acre-ft
    oflow  = wdm.get_data(wdmfile, 281) # cfs
Beispiel #13
0
# make sure the PyHSPF data have been generated

for d in (climatedata, pdata, edata, cdata):

    if not os.path.isdir(d):

        print('error: required data in {} do not exist!\n'.format(d))
        raise

# use WDMUtil to read the BASINS data

wdm = WDMUtil(verbose = verbose)

# open the precipitation file and the other climate data file

wdm.open(f1, 'r')
wdm.open(f2, 'r')

# make a list of the datasets and numbers

dsns    = wdm.get_datasets(f2)
tstypes = [wdm.get_attribute(f2, n, 'TSTYPE') for n in dsns]

# start date for the BASINS data (after the warmup period)

bstart = start + datetime.timedelta(days = warmup)

# get the precipitation data

i = tstypes.index('PREC')
Beispiel #14
0
    print('please update the path and re-run\n')
    raise

# the path to the wdm file to create

f = 'hunting.wdm'

# import from exp to wdm

wdm.import_exp(hunthour, f)

# copy the data to the hspfmodel using WDMUtil

# open the wdm for read access

wdm.open(f, 'r')

# the dsns are known from the exp file so just use those this time

precip = wdm.get_data(f, 106)
evap   = wdm.get_data(f, 111)
oflow  = wdm.get_data(f, 281)

start, end = wdm.get_dates(f, 106)

# close up the wdm file (forgetting this WILL cause trouble)

wdm.close('hunting.wdm')

# the evaporation data is daily so it needs to be disaggregated to hourly for
# an hourly simulation (see how easy this is with Python)
Beispiel #15
0
str_wdm_new = 'siletz.wdm'

# need to set variable for HSPF message file. WDMItil uses this file
messagepath = os.path.abspath(os.path.curdir) + '\\hspfmsg.wdm'

df_prec = pd.read_csv(str_precip)
df_prec.head()

df_pet = pd.read_csv(str_pet)
df_pet.head()

# create an instance of WDMUtil class
wdm = WDMUtil(verbose=True, messagepath='hspfmsg.wdm')

# create a new wdm file
wdm.open(str_wdm_new, 'w')

# take from pyHSPF test01.py example
#
# the first few test runs write data to the WDM files, but they assume the
# datasets already exist so we need to create them. have a look at the test
# UCI files if you are curious

attributes = {
    'TCODE ': 4,
    'TSSTEP': 1,
    'TSTYPE': 'WTMP',
    'TSFORM': 3,
}

# what these attributes mean: