Esempio n. 1
0
def get_NSE(values):

    its = values['LZSN'], values['INFILT']
    print(('running simulation LZSN: {:.1f}, INFILT: {:.4f}\n'.format(*its)))

    # open the baseline HSPF model

    with open(model, 'rb') as f:
        hspfmodel = pickle.load(f)

    # change the filename to prevent two simultaneous runs with the same name

    its = working, values['LZSN'], values['INFILT']
    hspfmodel.filename = '{}/LZSN{:.1f}INFILT{:.4f}'.format(*its)

    # change the pan coefficient

    hspfmodel.evap_multiplier = 0.76

    # change the parameter values in all the land segments

    for p in hspfmodel.perlnds:
        p.LZSN = values['LZSN']
        p.INFILT = values['INFILT']

    # build the input WDM file

    hspfmodel.build_wdminfile()

    # build the UCI file (only need to save the reach_outvolume, ROVOL)

    hspfmodel.build_uci(['reach_outvolume'], start, end, hydrology=True)

    # run the simulation

    hspfmodel.run(verbose=True)

    # make an instance of the postprocessor to get the efficiency

    p = Postprocessor(hspfmodel, (start, end), comid=gagecomid)

    # the regressions between the simulated and observed flows can be
    # computed using the "regression" function that returns:
    #
    # daily r2, log daily r2, daily NSE, log daily NSE
    # montly r2, log monthly r2, monthly NSE, log monthly NSE

    dr2, dlr2, dNS, dlNS, mr2, mlr2, mNS, mlNS = p.get_regression(gagecomid)

    # close the Postprocessor

    p.close()

    return dNS
Esempio n. 2
0
    def get_postprocessor(self, hspfmodel, dates, snowdata=None, verbose=True):
        """Postprocesses the data."""

        if verbose: print('postprocessing simulation results\n')

        gagedata = self.directory + '/%s/NWIS/%s' % (self.HUC8, self.gageid)

        postprocessor = Postprocessor(hspfmodel,
                                      self.process_dates,
                                      comid=self.gagecomid,
                                      upcomids=self.upcomids)

        return postprocessor
Esempio n. 3
0
                ]

    # build the UCI and output WDM files

    hspfmodel.build_uci(targets, start, end, atemp = True, snow = True,
                        hydrology = True)

    # run it

    hspfmodel.run(verbose = True)

    # use the Postprocessor to analyze and save the results

    dates = start + datetime.timedelta(days = warmup), end

    postprocessor = Postprocessor(hspfmodel, dates, comid = comid)

    postprocessor.get_hspexp_parameters(verbose = False)
    postprocessor.plot_hydrograph(tstep = 'monthly', show = False,
                                  output = '{}/hydrography'.format(calibration))
    postprocessor.plot_calibration(output = '{}/statistics'.format(calibration),
                                   show = False)
    postprocessor.plot_runoff(tstep = 'daily', show = False,
                              output = '{}/runoff'.format(calibration))
    output = '{}/calibration_report.csv'.format(calibration)
    postprocessor.calibration_report(output = output)
    postprocessor.plot_snow(output = '{}/snow'.format(calibration), 
                            show = False)
    postprocessor.plot_dayofyear(output = '{}/dayofyear'.format(calibration),
                                 show = False)
    postprocessor.plot_storms(season = 'all', show = False, 
Esempio n. 4
0
# the Postprocessor class can be used to analyze results. the following lines
# show how to use it to do some analysis and make some cool graphs.

from pyhspf import Postprocessor

# the dates of the processing period can be changed, and the postprocessor
# can be used to analyze part of the watershed rather than the whole model.
# for example, if a gage is located at a subbasin other than the last outlet.
# these are optional, the last outlet is assumed to be the gage otherwise and
# the run dates are used as the processing dates by default.

process_dates = run_dates  # postprocessing dates
gagecomid = '30'  # the subbasin identifier for the gage

p = Postprocessor(hspfmodel, process_dates, comid=gagecomid)

# here are some examples of things that can be done with the postprocessor.
# many of these require certain external targets to be specified when building
# the model (e.g. runoff, groundwater, snow)

# make a plot of daily or monthly flows, precipitation, and evapotranspiration

p.plot_hydrograph(tstep='daily')

# plot the runoff components, flows, and precipitation on linear and log scales

p.plot_runoff(tstep='daily')

# make a similar plot looking at the largest storm events for each year both
# in summer and outside summer
Esempio n. 5
0
def get_NSE(values):

    its = values['LZSN'], values['INFILT']
    print('running simulation LZSN: {:.0f}, INFILT: {:.2f}\n'.format(*its))

    # open the baseline HSPF model

    with open(filename, 'rb') as f:
        hspfmodel = pickle.load(f)

    # change the filename to prevent two simultaneous runs with the same name

    i = len(hspfmodel.filename) - 1
    while hspfmodel.filename[i] != '/':
        i -= 1

    path = hspfmodel.filename[:i]

    its = path, values['LZSN'], values['INFILT']
    hspfmodel.filename = '{}/LZSN{:.0f}INFILT{:.2f}'.format(*its)

    # change the parameter values in all the land segments

    for p in hspfmodel.perlnds:

        p.LZSN = values['LZSN']
        p.INFILT = values['INFILT']

    # build the input WDM file

    hspfmodel.build_wdminfile()

    # build the UCI file (only need to save the reach_outvolume, ROVOL)

    hspfmodel.build_uci(['reach_outvolume'],
                        start,
                        end,
                        atemp=atemp,
                        snow=snow,
                        hydrology=hydrology)

    # run the simulation

    hspfmodel.run(verbose=True)

    # calibration period

    dates = start + datetime.timedelta(days=warmup), end

    # find the common identifier for the NWIS gage

    d = {v: k for k, v in hspfmodel.subbasin_timeseries['flowgage'].items()}
    comid = d[gageid]

    # make an instance of the postprocessor to get the efficiency

    p = Postprocessor(hspfmodel, dates, comid=comid)

    # the regressions between the simulated and observed flows can be
    # computed using the "regression" function that returns:
    #
    # daily r2, log daily r2, daily NSE, log daily NSE
    # montly r2, log monthly r2, monthly NSE, log monthly NSE

    dr2, dlr2, dNS, dlNS, mr2, mlr2, mNS, mlNS = p.get_regression(comid)

    # close the Postprocessor

    p.close()

    return dNS
Esempio n. 6
0
    # build the UCI and output WDM files

    hspfmodel.build_uci(targets,
                        start,
                        end,
                        atemp=True,
                        snow=True,
                        hydrology=True)

    # run it

    hspfmodel.run(verbose=True)

    # use the Postprocessor to analyze and save the results

    postprocessor = Postprocessor(hspfmodel, (start, end),
                                  comid=calibrator.comid)

    postprocessor.get_hspexp_parameters()
    postprocessor.plot_hydrograph(tstep='monthly',
                                  output='{}/hydrography'.format(calibration))
    postprocessor.plot_calibration(output='{}/statistics'.format(calibration))
    postprocessor.plot_runoff(tstep='daily',
                              output='{}/runoff'.format(calibration))

# Using the preprocessor in other watersheds/gages *should* be as simple as
# supplying the parameters above (start and end date, state, 8-digit HUC,
# NWIS gage ID, land use year, maximum drainage area); if you try and
# get an error please report it!
Esempio n. 7
0
# targets needed are the reach outflow volume and groundwater flow

targets = ['groundwater', 'reach_outvolume']

# build the input files

hspfmodel.build_wdminfile()
hspfmodel.build_uci(targets, start, end, hydrology = True)

# and run it

hspfmodel.run(verbose = True)

# open the postprocessor to get the calibration info

p = Postprocessor(hspfmodel, (start, end), comid = gagecomid) 

# calculate and show the errors in the calibration parameters. the product 
# of the daily log-flow and daily flow Nash-Sutcliffe model efficiency are 
# one possible optimization parameter for a calibration. the log-flow 
# captures relative errors (low-flow conditions) while the flow captures 
# absolute error (high-flow conditions).

p.calculate_errors()

# close the open files

p.close()

# now let's change the value of some parameters, re-run the model, and see 
# the effect on the calibration statistics. we will change the default
Esempio n. 8
0
simplified.build_uci(targets,
                     start,
                     end,
                     atemp=True,
                     snow=True,
                     hydrology=True)

# run it

simplified.run(verbose=True)

# use the Postprocessor to analyze and save the results to the folder

dates = start + datetime.timedelta(days=warmup), end

postprocessor = Postprocessor(simplified, dates, comid=comid)

postprocessor.get_hspexp_parameters(verbose=False)
postprocessor.plot_hydrograph(tstep='monthly',
                              show=False,
                              output='{}/hydrography'.format(output))
postprocessor.plot_calibration(output='{}/statistics'.format(output),
                               show=False)
postprocessor.plot_runoff(tstep='daily',
                          show=False,
                          output='{}/runoff'.format(output))
report = '{}/output_report.csv'.format(output)
postprocessor.calibration_report(output=report)
postprocessor.plot_dayofyear(output='{}/dayofyear'.format(output), show=False)
postprocessor.plot_storms(season='all',
                          show=False,
Esempio n. 9
0
with open(p, 'rb') as f:
    hspfmodel = pickle.load(f)

# find the part of the watershed contributing to the gage

d = {v: k for k, v in list(hspfmodel.subbasin_timeseries['flowgage'].items())}
comid = d[NWISgage]

# make an instance of the postprocessor for the thiry-year

start = datetime.datetime(1981, 1, 1)
end = datetime.datetime(2011, 1, 1)

ds = start, end

postprocessor = Postprocessor(hspfmodel, ds)

# get the flows from the 30-year model

ttimes, tflow = postprocessor.get_sim_flow(comid, dates=ds)

# close the processor

postprocessor.close()

# iterate through each two-year and make the plot

for y in range(1981, 2010, 2):

    print(('reading year', y, 'data'))
Esempio n. 10
0
volMod = wdm.get_data(wdmFile, 11)

wdm.close(wdmFile)

datDFOut = pd.DataFrame.from_items(zip(datNms, datOut))

datDFOut.to_csv('basin_1_output.csv', index=False)

qMod = [q * 10**4 * 35.314666721 / (60 * 60) for q in volMod]

# Read flow data
flwData = pd.read_csv(
    os.path.abspath(os.path.curdir) + '\\siletz_HSPF_flw.csv')

qGge = flwData['Q_slz'].values.tolist()

# post-process

modelVolume = pd.DataFrame({'date': dttm, 'volm': volMod}, index=index)
modelVolume.to_csv('test.csv', index=False)
procDts = start, end
ggID = 11
p = Postprocessor(hspfmodel, procDts)
p.plot_hydrograph(tstep='daily')
plt.plot(dttm, qMod, label='Model')
plt.plot(dttm, qGge, label='Gaged')
plt.xlabel("Date-Time")
plt.ylabel("Flow (cfs)")
plt.yscale('log')
plt.legend()
plt.show()