Exemple #1
0
wdm.create_dataset('test.wdm', 119, attributes)

attributes['TSTYPE'] = 'DEWP'
wdm.create_dataset('test.wdm', 124, attributes)
wdm.create_dataset('test.wdm', 125, attributes)
wdm.create_dataset('test.wdm', 126, attributes)

attributes['TSTYPE'] = 'SEDM'
wdm.create_dataset('test.wdm', 127, attributes)

attributes['TSTYPE'] = 'FLOW'
wdm.create_dataset('test.wdm', 136, attributes)

# then we have to manually close the WDM file

wdm.close('test.wdm')

# Run example1.uci--this simulation just inputs data from TEST01DT.91 to the
# test.wdm file we just made

hspf.hsppy('test01.uci', messagepath)

# let's go back into the test.wdm and pull out the data and graph it (you may
# get a warning about the file being open--i've worked around it but needs
# a better fix)

wdm.open('test.wdm', 'r')

# get the datasets

wtemps = wdm.get_data('test.wdm', 134)
Exemple #2
0
# get the time series and start and end dates

precip = wdm.get_data(f, precip_dsn)

start, end = wdm.get_dates(f, precip_dsn)

evap = wdm.get_data(f, evap_dsn, start = start, end = end)

# the observed flow is dsn 281

oflow = wdm.get_data(f, 281, start = start, end = end)

# close up the wdm file (forgetting this WILL cause trouble)

wdm.close('hunting.wdm')

# make a list of the times in the daily time series using datetime "timedelta"

delta = datetime.timedelta(days = 1)
times = [start + i * delta for i in range(len(precip))]

# build the model (file will all be called example02)

hspfmodel.build_from_watershed(watershed, 'example02', ifraction = ifraction,
                               tstep = tstep)

# now add the time series to the model

hspfmodel.add_timeseries('precipitation', 'hunting_prec', start, precip, 
                         tstep = tstep)
Exemple #3
0
wdm.create_dataset('test.wdm', 119, attributes)

attributes['TSTYPE'] = 'DEWP'
wdm.create_dataset('test.wdm', 124, attributes)
wdm.create_dataset('test.wdm', 125, attributes)
wdm.create_dataset('test.wdm', 126, attributes)

attributes['TSTYPE'] = 'SEDM'
wdm.create_dataset('test.wdm', 127, attributes)

attributes['TSTYPE'] = 'FLOW'
wdm.create_dataset('test.wdm', 136, attributes)

# then we have to manually close the WDM file

wdm.close('test.wdm')

# Run example1.uci--this simulation just inputs data from TEST01DT.91 to the
# test.wdm file we just made

hspf.hsppy('test01.uci', messagepath)

# let's go back into the test.wdm and pull out the data and graph it (you may 
# get a warning about the file being open--i've worked around it but needs
# a better fix)

wdm.open('test.wdm', 'r')

# get the datasets

wtemps = wdm.get_data('test.wdm', 134)
Exemple #4
0
dsns = wdm.get_datasets(wdmoutfile)
idconss = [wdm.get_attribute(wdmoutfile, n, 'IDCONS') for n in dsns]
staids = [wdm.get_attribute(wdmoutfile, n, 'STAID ') for n in dsns]

# find the dsn

n = [
    dsn for dsn, idcons, staid in zip(dsns, idconss, staids)
    if idcons == 'ROVOL' and staid == '101'
][0]

rovol = wdm.get_data(wdmoutfile, n)

# close up the fortran files.

wdm.close(wdmoutfile)

flows = [r * 10**6 / 3600 / 4 for r in rovol]

# plot it

from matplotlib import pyplot

# need a list of the dates/times for the plot

times = [
    start + i * datetime.timedelta(hours=4)
    for i in range(int((end - start).total_seconds() / 3600 / 4))
]

fig = pyplot.figure(figsize=(8, 10))
i = tstypes.index('DEWP')
dewt = wdm.get_data(f1, dsns[i], start=bstart, end=end)

# get the PyHSPF solar radiation data

i = tstypes.index('SOLR')
solar = wdm.get_data(f1, dsns[i], start=bstart, end=end)

# get the PyHSPF air temperature data

i = tstypes.index('ATEM')
temp = wdm.get_data(f1, dsns[i], start=bstart, end=end)

# close the WDM files

wdm.close(f1)
wdm.close(f2)

# number of years

years = (end - bstart).days / 365.25

# compare BASINS with PyHSPF generated timeseries for 07080106

pfiles = [f for f in os.listdir(pdata)]

pseries = []

for pfile in pfiles:

    with open('{}/{}'.format(pdata, pfile), 'rb') as f:
Exemple #6
0
# one HSPF parameter we saved is ROVOL (PyHSPF has a Postprocessor that can 
# be used to simplify this, but WDMUtil can also be used more directly). 
# The following statement finds the dataset number for the ROVOL timeseries
# for the reach for subbasin 101.

n = [dsn for dsn, idcons, staid in zip(dsns, idconss, staids)
     if idcons == 'ROVOL' and staid == '101'][0]

# get the data for the reach volume flux dataset

rovol = wdm.get_data(wdmoutfile, n)

# need to close up the files opened by Fortran

wdm.close(wdmoutfile)

# rovol is the total volume (in Mm3) at each time step. so we need to convert
# it m3/s. we could have had HSPF do this, but it's nice to keep track of all
# the fluxes for looking at mass balance checks.

flows = [r * 10**6 / 3600 / 4 for r in rovol]

# plot it up right quick with matplotlib using the plotdate method.

from matplotlib import pyplot

# need a list of the dates/times for the plot

times = [start + i * datetime.timedelta(hours = 4)
         for i in range(int((end - start).total_seconds() / 3600 / 4))]
    hspfmodel.add_timeseries('evaporation', ('evap_' + str(x)),
                             start,
                             evap,
                             tstep=tstep)

    # Assign to specific basin
    hspfmodel.assign_subbasin_timeseries('precipitation', str(basin + 1),
                                         ('prcp_' + str(x)))

    hspfmodel.assign_subbasin_timeseries('evaporation', str(basin + 1),
                                         ('evap_' + str(x)))

    x += 1

# Add flow data to the gaged basin
flow = wdm.get_data(wdmFile, 301)

hspfmodel.add_timeseries('flowgage', 'flow', start, flow, tstep=tstep)

hspfmodel.assign_subbasin_timeseries('flowgage', '11', 'flow')

wdm.close(wdmFile)

# COMPLETE AND EXECUTE MODEL
hspfmodel.add_hydrology()

with open('siletz_river', 'wb') as f:
    pickle.dump(hspfmodel, f)

print('\nsuccessfully created new model "siletz_river."\n')
Exemple #8
0
# open the wdm for read access

wdm.open(f, 'r')

# the dsns are known from the exp file so just use those this time

precip = wdm.get_data(f, 106)
evap = wdm.get_data(f, 111)
oflow = wdm.get_data(f, 281)

start, end = wdm.get_dates(f, 106)

# close up the wdm file (forgetting this WILL cause trouble)

wdm.close('hunting.wdm')

# the evaporation data is daily so it needs to be disaggregated to hourly for
# an hourly simulation (see how easy this is with Python)
# the time series in the WDM file starts at 1 am so had to add one extra
# value to the beginning of the time series for consistency

evap = [0] + [e / 24 for e in evap for i in range(24)]
precip = [0] + [p for p in precip]
oflow = [0] + [o for o in oflow]

# list of times

times = [start + (end - start) / len(precip) * i for i in range(len(precip))]

# make the HSPFModel instance
Exemple #9
0
                            # third: constituent id
                
                            if idcons == v:

                                # get the time series data

                                data = wdmutil.get_data(output, n,
                                                        start=start, end=end)

                                # add the total to the database

                                results[c][o.landtype][v] = data.sum()

        # close up the WDM file

        wdmutil.close(output)
        wdmutil = None

        # free up memory
        
        gc.collect()
    
        # find the part of the watershed contributing to the gage
    
        d = {v:k for k, v in hspfmodel.subbasin_timeseries['flowgage'].items()}
        comid = d[NWISgage]

        # find the comids of the all the subbasins upstream of the gage

        comids = [comid]
        n = 0
i = tstypes.index('DEWP')
dewt = wdm.get_data(f1, dsns[i], start = bstart, end = end)

# get the PyHSPF solar radiation data

i = tstypes.index('SOLR')
solar = wdm.get_data(f1, dsns[i], start = bstart, end = end)

# get the PyHSPF air temperature data

i = tstypes.index('ATEM')
temp = wdm.get_data(f1, dsns[i], start = bstart, end = end)

# close the WDM files

wdm.close(f1)
wdm.close(f2)

# number of years

years = (end - bstart).days / 365.25

# compare BASINS with PyHSPF generated timeseries for 07080106

pfiles = [f for f in os.listdir(pdata)]

pseries = []

for pfile in pfiles:

    with open('{}/{}'.format(pdata, pfile), 'rb') as f:
Exemple #11
0
# 10 m3 per hour. Given that context, HSPF groups all variables into one of
# three categories (the examples reference heat transfer concepts):

# TSFORM = 1 -- The mean value of a state variable (such as temperature)
# TSFORM = 2 -- The total flux across a time step (such as heat flux energy)
# TSFORM = 3 -- The value at the end of the time step (such as temperature)

# for precip and pet, the TSFORM  value would be 2 becuase it would be the total precip that occured over the time-step
attributes['TSFORM'] = 3
attributes['TSTYPE'] = 'PREC'
wdm.create_dataset(str_wdm_new, 11, attributes)
attributes['TSTYPE'] = 'PET'
wdm.create_dataset(str_wdm_new, 12, attributes)

# add precip data for sub-basin 1
start_date = df_prec['DATE'][0]
date_start = datetime.datetime(int(start_date[5:9]), int(start_date[0:2]),
                               int(start_date[3:4]))
prec_add = [float(x) for x in list(df_prec.iloc[:, 1])]
wdm.add_data(str_wdm_new, 11, prec_add, date_start)

# add pet data for sub-basin 1
start_date = df_prec['DATE'][0]
date_start = datetime.datetime(int(start_date[5:9]), int(start_date[0:2]),
                               int(start_date[3:4]))
pet_add = [float(x) for x in list(df_pet.iloc[:, 1])]
wdm.add_data(str_wdm_new, 12, pet_add, date_start)

# close wdm file
wdm.close(str_wdm_new)