print(('error: required file {} does not exist!\n'.format(f)))
        raise

# make sure the PyHSPF data have been generated

for d in (climatedata, pdata, edata, cdata):

    if not os.path.isdir(d):

        print(('error: required data in {} do not exist!\n'.format(d)))
        raise

# use WDMUtil to read the BASINS data

wdm = WDMUtil(verbose=verbose)

# open the precipitation file and the other climate data file

wdm.open(f1, 'r')
wdm.open(f2, 'r')

# make a list of the datasets and numbers

dsns = wdm.get_datasets(f2)
tstypes = [wdm.get_attribute(f2, n, 'TSTYPE') for n in dsns]

# start date for the BASINS data (after the warmup period)

bstart = start + datetime.timedelta(days=warmup)
Beispiel #2
0
    raise

if not os.path.isfile('test.wdm'):
    print('warning, this simulation assumes the test.wdm file from test01')
    print('and exists; please run this example first')
    raise

# this is the path to the message file in PyHSPF (hspfmsg.wdm)

pyhspfdirectory = os.path.dirname(hspf.__file__)
messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory)

# the 2nd simulation adds more data to "test.wdm," so we need to create the
# datasets as before.

wdm = WDMUtil(verbose=True)

wdm.open('test.wdm', 'rw')

attributes = {
    'TCODE ': 3,  # hourly units 
    'TSSTEP': 1,  # one unit (hourly) time step
    'TSTYPE': 'PREC',  # precipitation type
    'TSFORM': 2  # precip is a total amount across the step
}

wdm.create_dataset('test.wdm', 39, attributes)
wdm.create_dataset('test.wdm', 131, attributes)
wdm.create_dataset('test.wdm', 132, attributes)

# solar radiation -- the file aggregates the hourly to bi-hourly so TSSTEP = 2
pcpData = pd.read_csv(
    os.path.abspath(os.path.curdir) + '\\siletz_HSPF_precip.csv')

petData = pd.read_csv(
    os.path.abspath(os.path.curdir) + '\\siletz_HSPF_pet.csv')

flwData = pd.read_csv(
    os.path.abspath(os.path.curdir) + '\\siletz_HSPF_flw.csv')

ts_to_wdmFile(wdmFile=wdmFile,
              pcpData=pcpData,
              petData=petData,
              flwData=flwData)

# See if you can read the data from the WDM file
wdm = WDMUtil(verbose=True, messagepath=mssgpath)

# ADD BASIN TIMESERIES FROM THE WDM TO HSPFMODEL
# open the wdm for read access
wdm.open(wdmFile, 'r')

start, end = wdm.get_dates(wdmFile, 101)

x = 1

# Add specific basin met data
for basin in range(0, len(basinRecords)):

    # The DSNs are known from the exp file so just use those this time
    prcp = wdm.get_data(wdmFile, 100 + x)
Beispiel #4
0
    raise

if not os.path.isfile('test.wdm'):
    print('warning, this simulation assumes the test.wdm file from test01')
    print('and test02 exists; please run these examples first')
    raise

# this is the path to the message file in PyHSPF (hspfmsg.wdm)

pyhspfdirectory = os.path.dirname(hspf.__file__)
messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory)

# the 3rd simulation adds more data to "test.wdm," so we need to create the
# datasets as before.

wdm = WDMUtil(verbose = True)

wdm.open('test.wdm', 'rw')

attributes = {'TCODE ': 4,      # hourly units 
              'TSSTEP': 1,      # one unit (hourly) time step
              'TSFORM': 1       # cloud cover is an average across the step
              }

attributes['TSTYPE'] = 'CLND'

wdm.create_dataset('test.wdm', 140, attributes)

attributes['TSTYPE'] = 'CLDC'

wdm.create_dataset('test.wdm', 135, attributes)
Beispiel #5
0
else:
    print('you appear to be missing the data files in the data/tests')
    print('directory that are needed for this simulation')
    raise

# this is the path to the message file in PyHSPF (hspfmsg.wdm)
# the HSPF main routine needs the location of this file and the UCI file

pyhspfdirectory = os.path.dirname(hspf.__file__)
messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory)

# before running the examples, we have to create the WDM files used by the
# test runs, which we will do with the WDMUtil class

wdm = WDMUtil(verbose=True)

# the name of the WDM file used in the test UCIs is test.wdm
# open it up for write access

wdm.open('test.wdm', 'w')

# the first few test runs write data to the WDM files, but they assume the
# datasets already exist so we need to create them. have a look at the test
# UCI files if you are curious

attributes = {
    'TCODE ': 4,
    'TSSTEP': 1,
    'TSTYPE': 'WTMP',
    'TSFORM': 3,
Beispiel #6
0
str_precip = 'siletz_HSPF_precip.csv'
str_pet = 'siletz_HSPF_PET.csv'
str_wdm = 'bigelk_in.wdm'
str_wdm_new = 'siletz.wdm'

# need to set variable for HSPF message file. WDMItil uses this file
messagepath = os.path.abspath(os.path.curdir) + '\\hspfmsg.wdm'

df_prec = pd.read_csv(str_precip)
df_prec.head()

df_pet = pd.read_csv(str_pet)
df_pet.head()

# create an instance of WDMUtil class
wdm = WDMUtil(verbose=True, messagepath='hspfmsg.wdm')

# create a new wdm file
wdm.open(str_wdm_new, 'w')

# take from pyHSPF test01.py example
#
# the first few test runs write data to the WDM files, but they assume the
# datasets already exist so we need to create them. have a look at the test
# UCI files if you are curious

attributes = {
    'TCODE ': 4,
    'TSSTEP': 1,
    'TSTYPE': 'WTMP',
    'TSFORM': 3,
Beispiel #7
0
        print('reading year', y)

        p = '{}/{}_{}/{}'.format(directory, y, y + 2, NWISgage)
    
        with open(p, 'rb') as f: hspfmodel = pickle.load(f)

        # calculate the runoff components in each land segment and store
        # the results in a structure as [subbasin][landuse][runoff/area]

        results = {}

        # use WDMUtil to read the data

        output = hspfmodel.filename + '_out.wdm'
        wdmutil = WDMUtil()
        wdmutil.open(output, 'r')

        # read the metadata for each timeseries in the WDM file

        dsns = wdmutil.get_datasets(output)    
        idconss = [wdmutil.get_attribute(output, n, 'IDCONS') for n in dsns]
        descrps = [wdmutil.get_attribute(output, n, 'DESCRP') for n in dsns]
        staids  = [wdmutil.get_attribute(output, n, 'STAID ') for n in dsns]

        # go through the impervious land segments to get the surface runoff

        for o in hspfmodel.implnds:

            c = o.subbasin
        
Beispiel #8
0
else:
    print('you appear to be missing the data files in the "data"')
    print('directory that are needed for this simulation')
    raise

# this is the path to the message file in PyHSPF (hspfmsg.wdm)
# the HSPF main routine needs the location of this file and the UCI file

pyhspfdirectory = os.path.dirname(hspf.__file__)
messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory)

# before running the examples, we have to create the WDM files used by the 
# test runs, which we will do with the WDMUtil class

wdm = WDMUtil(verbose = True)

# the name of the WDM file used in the test UCIs is test.wdm
# open it up for write access

wdm.open('test.wdm', 'w')

# the first few test runs write data to the WDM files, but they assume the 
# datasets already exist so we need to create them. have a look at the test
# UCI files if you are curious

attributes = {'TCODE ': 4, 
              'TSSTEP': 1, 
              'TSTYPE': 'WTMP', 
              'TSFORM': 3,
              }
Beispiel #9
0
watershed.add_outlet(sname)

# make the HSPFModel instance (the data for this example use the non-default
# option of English instead of metric units)

from pyhspf import HSPFModel

hspfmodel = HSPFModel(units='English')

# since the climate data are provided with hspexp in an export file called
# "huntobs.exp."  WDMUtil has a method to automatically import the data to a
# WDM file.

from pyhspf import WDMUtil

wdm = WDMUtil()

# path to hspexp2.4 data files (make sure the path is correct)
# the data from the export file (*.exp) provided with hspexp need to be
# imported into a wdm file; the WDMUtil class has a method for this

huntday = 'huntday/huntobs.exp'

f = 'hunting.wdm'

# import from exp to wdm

wdm.import_exp(huntday, f)

# copy the data to the hspfmodel using WDMUtil. in general climate
# data would need to come from some other place  (not a wdm file);
Beispiel #10
0
    # check that the file exists

    if not os.path.isfile(ucifile):
        print('file does not exist')
        raise

    # find the path to the HSPF message file

    pyhspfdirectory = os.path.dirname(hspf.__file__)
    messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory)

    # run the simulation

    hspf.hsppy(ucifile, messagepath)

    wdm = WDMUtil()

    wdm.open(wdmfile, 'r')

    dsns = wdm.get_datasets(wdmfile)

    # see the datasets in the WDM file

    #for n in dsns: print(n, wdm.get_attribute(wdmfile, n, 'TSTYPE'))

    # look at the UCI file to get more info on the datasets

    precip = wdm.get_data(wdmfile, 106)
    evap   = wdm.get_data(wdmfile, 426)
    pet    = wdm.get_data(wdmfile, 425)
    rovol  = wdm.get_data(wdmfile, 420) # acre-ft
Beispiel #11
0
        print('error: required file {} does not exist!\n'.format(f))
        raise

# make sure the PyHSPF data have been generated

for d in (climatedata, pdata, edata, cdata):

    if not os.path.isdir(d):

        print('error: required data in {} do not exist!\n'.format(d))
        raise

# use WDMUtil to read the BASINS data

wdm = WDMUtil(verbose = verbose)

# open the precipitation file and the other climate data file

wdm.open(f1, 'r')
wdm.open(f2, 'r')

# make a list of the datasets and numbers

dsns    = wdm.get_datasets(f2)
tstypes = [wdm.get_attribute(f2, n, 'TSTYPE') for n in dsns]

# start date for the BASINS data (after the warmup period)

bstart = start + datetime.timedelta(days = warmup)
Beispiel #12
0
subbasins['32'] = subbasin

# create an instance of the watershed class from the subbasin information

watershed = Watershed(description, subbasins)

# add the network and the outlet subbasin

watershed.add_mass_linkage(updown)
watershed.add_outlet('30')

# since the climate data are provided with hspexp in an export file called
# "huntobs.exp."  WDMUtil has a method to automatically import the data to a 
# WDM file.

wdm = WDMUtil()

# the data from the export file (*.exp) provided with hspexp need to be 
# imported into a wdm file. WDMUtil has a method for this.

hunthour = 'hunthour/huntobs.exp'

# this is just a check to see the file is there

if not os.path.isfile(hunthour):

    print('error: file {} seems to be missing'.format(hunthour))
    print('please update the path and re-run\n')
    raise

# the path to the wdm file to create
Beispiel #13
0
    raise

if not os.path.isfile('test.wdm'):
    print('warning, this simulation assumes the test.wdm file from test01')
    print('and exists; please run this example first')
    raise

# this is the path to the message file in PyHSPF (hspfmsg.wdm)

pyhspfdirectory = os.path.dirname(hspf.__file__)
messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory)

# the 2nd simulation adds more data to "test.wdm," so we need to create the
# datasets as before.

wdm = WDMUtil(verbose = True)

wdm.open('test.wdm', 'rw')

attributes = {'TCODE ': 3,      # hourly units 
              'TSSTEP': 1,      # one unit (hourly) time step
              'TSTYPE': 'PREC', # precipitation type
              'TSFORM': 2       # precip is a total amount across the step
              }

wdm.create_dataset('test.wdm',  39, attributes)
wdm.create_dataset('test.wdm', 131, attributes)
wdm.create_dataset('test.wdm', 132, attributes)

# solar radiation -- the file aggregates the hourly to bi-hourly so TSSTEP = 2
Beispiel #14
0
# always there if you want to see it, but changing the parameters is much 
# easier in Python, and can even be scripted. The "with" statement just closes 
# the file where the HSPFModel is stored.

import pickle

with open('hspfmodel', 'wb') as f: pickle.dump(hspfmodel, f)

# assuming that went ok (look at the HSPF-generated .ech and .out files), 
# the results can be retrieved using WDMUtil

from pyhspf import WDMUtil

# create an instance of WDMUtil

wdm = WDMUtil()

# open the file for read access

wdm.open(wdmoutfile, 'r')

# pull up the flow at the outlet and plot it along with the precipitation
# and evapotranspiration. the attributes that identify the data are "IDCONS"
# (constituent ID) and "STAID " (station ID). these were assigned by the
# build_wdminfile and build_uci routines automatically; they can be modified
# as needed. the attributes always have six characters so make sure to add 
# trailing spaces.

dsns    =  wdm.get_datasets(wdmoutfile)
idconss = [wdm.get_attribute(wdmoutfile, n, 'IDCONS') for n in dsns]
staids  = [wdm.get_attribute(wdmoutfile, n, 'STAID ') for n in dsns]
Beispiel #15
0
subbasins['32'] = subbasin

# create an instance of the watershed class from the subbasin information

watershed = Watershed(description, subbasins)

# add the network and the outlet subbasin

watershed.add_mass_linkage(updown)
watershed.add_outlet('30')

# since the climate data are provided with hspexp in an export file called
# "huntobs.exp."  WDMUtil has a method to automatically import the data to a
# WDM file.

wdm = WDMUtil()

# the data from the export file (*.exp) provided with hspexp need to be
# imported into a wdm file. WDMUtil has a method for this.

hunthour = 'hunthour/huntobs.exp'

# this is just a check to see the file is there

if not os.path.isfile(hunthour):

    print('error: file {} seems to be missing'.format(hunthour))
    print('please update the path and re-run\n')
    raise

# the path to the wdm file to create
Beispiel #16
0
    raise

if not os.path.isfile('test.wdm'):
    print('warning, this simulation assumes the test.wdm file from test01')
    print('and test02 exists; please run these examples first')
    raise

# this is the path to the message file in PyHSPF (hspfmsg.wdm)

pyhspfdirectory = os.path.dirname(hspf.__file__)
messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory)

# the 3rd simulation adds more data to "test.wdm," so we need to create the
# datasets as before.

wdm = WDMUtil(verbose=True)

wdm.open('test.wdm', 'rw')

attributes = {
    'TCODE ': 4,  # hourly units 
    'TSSTEP': 1,  # one unit (hourly) time step
    'TSFORM': 1  # cloud cover is an average across the step
}

attributes['TSTYPE'] = 'CLND'

wdm.create_dataset('test.wdm', 140, attributes)

attributes['TSTYPE'] = 'CLDC'
Beispiel #17
0
# build the wdm input file using the timeseries

hspfmodel.build_wdminfile()

# external targets

targets = ['reach_outvolume', 'evaporation', 'reach_volume', 'runoff']

# build the input files and run

hspfmodel.build_uci(targets, start, end, hydrology = True, verbose = False)
hspfmodel.run(verbose = True)

# retrieve results using WDMUtil

wdm = WDMUtil()

# open the file for read access

wdm.open(wdmoutfile, 'r')

# pull up the flow at the outlet and plot it along with the precipitation
# and evapotranspiration

dsns    =  wdm.get_datasets(wdmoutfile)
idconss = [wdm.get_attribute(wdmoutfile, n, 'IDCONS') for n in dsns]
staids  = [wdm.get_attribute(wdmoutfile, n, 'STAID ') for n in dsns]

# find the dsn

n = [dsn for dsn, idcons, staid in zip(dsns, idconss, staids)
Beispiel #18
0
# build the wdm input file using the timeseries

hspfmodel.build_wdminfile()

# external targets

targets = ['reach_outvolume', 'evaporation', 'reach_volume', 'runoff']

# build the input files and run

hspfmodel.build_uci(targets, start, end, hydrology=True, verbose=False)
hspfmodel.run(verbose=True)

# retrieve results using WDMUtil

wdm = WDMUtil()

# open the file for read access

wdm.open(wdmoutfile, 'r')

# pull up the flow at the outlet and plot it along with the precipitation
# and evapotranspiration

dsns = wdm.get_datasets(wdmoutfile)
idconss = [wdm.get_attribute(wdmoutfile, n, 'IDCONS') for n in dsns]
staids = [wdm.get_attribute(wdmoutfile, n, 'STAID ') for n in dsns]

# find the dsn

n = [
Beispiel #19
0
    # check that the file exists

    if not os.path.isfile(ucifile):
        print('file does not exist')
        raise

    # find the path to the HSPF message file

    pyhspfdirectory = os.path.dirname(hspf.__file__)
    messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory)

    # run the simulation

    hspf.hsppy(ucifile, messagepath)

    wdm = WDMUtil()

    wdm.open(wdmfile, 'r')

    dsns = wdm.get_datasets(wdmfile)

    # see the datasets in the WDM file

    #for n in dsns: print(n, wdm.get_attribute(wdmfile, n, 'TSTYPE'))

    # look at the UCI file to get more info on the datasets

    precip = wdm.get_data(wdmfile, 106)
    evap = wdm.get_data(wdmfile, 426)
    pet = wdm.get_data(wdmfile, 425)
    rovol = wdm.get_data(wdmfile, 420)  # acre-ft
Beispiel #20
0
watershed.add_outlet(sname)

# make the HSPFModel instance (the data for this example use the non-default
# option of English instead of metric units)

from pyhspf import HSPFModel

hspfmodel = HSPFModel(units = 'English')

# since the climate data are provided with hspexp in an export file called
# "huntobs.exp."  WDMUtil has a method to automatically import the data to a 
# WDM file.

from pyhspf import WDMUtil

wdm = WDMUtil()

# path to hspexp2.4 data files (make sure the path is correct) 
# the data from the export file (*.exp) provided with hspexp need to be 
# imported into a wdm file; the WDMUtil class has a method for this

huntday = 'huntday/huntobs.exp'

f = 'hunting.wdm'

# import from exp to wdm

wdm.import_exp(huntday, f)

# copy the data to the hspfmodel using WDMUtil. in general climate 
# data would need to come from some other place  (not a wdm file); 
Beispiel #21
0
import pandas as pd
import csv, os, datetime, numpy
from matplotlib import pyplot as plt
from pyhspf import HSPFModel, WDMUtil, Postprocessor

messagepath = 'hspfmsg.wdm'

wdm = WDMUtil(verbose=True, messagepath=messagepath)

# EXTRACT MODELED FLOWS FROM WDM OUTPUT FILE AND COMPARE TO
wdmFile = 'siletz_river_out.wdm'

wdm.open(wdmFile, 'r')

dsns = wdm.get_datasets(wdmFile)
idcons = [wdm.get_attribute(wdmFile, n, 'IDCONS') for n in dsns]
staids = [wdm.get_attribute(wdmFile, n, 'STAID ') for n in dsns]

pars = [dsns, idcons, staids]

dsnBas1 = [
    dsns for dsns, idcons, staid in zip(dsns, idconss, staids) if staid == '1'
]  # These are the dsns for all Basin 1 outputs

indBas1 = [dsn - 1
           for dsn in dsnBas1]  # These are element indeces for Basin 1 dsns

start = datetime.datetime(2012, 1, 1)
end = datetime.datetime(2013, 1, 1)

dttm = [