Ejemplo n.º 1
0
def get_NSE(values):

    its = values['LZSN'], values['INFILT']
    print(('running simulation LZSN: {:.1f}, INFILT: {:.4f}\n'.format(*its)))

    # open the baseline HSPF model

    with open(model, 'rb') as f:
        hspfmodel = pickle.load(f)

    # change the filename to prevent two simultaneous runs with the same name

    its = working, values['LZSN'], values['INFILT']
    hspfmodel.filename = '{}/LZSN{:.1f}INFILT{:.4f}'.format(*its)

    # change the pan coefficient

    hspfmodel.evap_multiplier = 0.76

    # change the parameter values in all the land segments

    for p in hspfmodel.perlnds:
        p.LZSN = values['LZSN']
        p.INFILT = values['INFILT']

    # build the input WDM file

    hspfmodel.build_wdminfile()

    # build the UCI file (only need to save the reach_outvolume, ROVOL)

    hspfmodel.build_uci(['reach_outvolume'], start, end, hydrology=True)

    # run the simulation

    hspfmodel.run(verbose=True)

    # make an instance of the postprocessor to get the efficiency

    p = Postprocessor(hspfmodel, (start, end), comid=gagecomid)

    # the regressions between the simulated and observed flows can be
    # computed using the "regression" function that returns:
    #
    # daily r2, log daily r2, daily NSE, log daily NSE
    # montly r2, log monthly r2, monthly NSE, log monthly NSE

    dr2, dlr2, dNS, dlNS, mr2, mlr2, mNS, mlNS = p.get_regression(gagecomid)

    # close the Postprocessor

    p.close()

    return dNS
Ejemplo n.º 2
0
    def get_postprocessor(self, hspfmodel, dates, snowdata=None, verbose=True):
        """Postprocesses the data."""

        if verbose: print('postprocessing simulation results\n')

        gagedata = self.directory + '/%s/NWIS/%s' % (self.HUC8, self.gageid)

        postprocessor = Postprocessor(hspfmodel,
                                      self.process_dates,
                                      comid=self.gagecomid,
                                      upcomids=self.upcomids)

        return postprocessor
Ejemplo n.º 3
0
# the Postprocessor class can be used to analyze results. the following lines
# show how to use it to do some analysis and make some cool graphs.

from pyhspf import Postprocessor

# the dates of the processing period can be changed, and the postprocessor
# can be used to analyze part of the watershed rather than the whole model.
# for example, if a gage is located at a subbasin other than the last outlet.
# these are optional, the last outlet is assumed to be the gage otherwise and 
# the run dates are used as the processing dates by default.

process_dates = run_dates   # postprocessing dates
gagecomid     = '30'        # the subbasin identifier for the gage

p = Postprocessor(hspfmodel, process_dates, comid = gagecomid)

# here are some examples of things that can be done with the postprocessor.
# many of these require certain external targets to be specified when building
# the model (e.g. runoff, groundwater, snow)

# make a plot of daily or monthly flows, precipitation, and evapotranspiration

p.plot_hydrograph(tstep = 'daily')

# plot the runoff components, flows, and precipitation on linear and log scales

p.plot_runoff(tstep = 'daily')

# make a similar plot looking at the largest storm events for each year both  
# in summer and outside summer
Ejemplo n.º 4
0
# targets needed are the reach outflow volume and groundwater flow

targets = ['groundwater', 'reach_outvolume']

# build the input files

hspfmodel.build_wdminfile()
hspfmodel.build_uci(targets, start, end, hydrology = True)

# and run it

hspfmodel.run(verbose = True)

# open the postprocessor to get the calibration info

p = Postprocessor(hspfmodel, (start, end), comid = gagecomid) 

# calculate and show the errors in the calibration parameters. the product 
# of the daily log-flow and daily flow Nash-Sutcliffe model efficiency are 
# one possible optimization parameter for a calibration. the log-flow 
# captures relative errors (low-flow conditions) while the flow captures 
# absolute error (high-flow conditions).

p.calculate_errors()

# close the open files

p.close()

# now let's change the value of some parameters, re-run the model, and see 
# the effect on the calibration statistics. we will change the default
Ejemplo n.º 5
0
               'evaporation', 
               'runoff', 
               'groundwater',
                ]

    # build the UCI and output WDM files

    hspfmodel.build_uci(targets, start, end, atemp = True, snow = True,
                        hydrology = True)

    # run it

    hspfmodel.run(verbose = True)

    # use the Postprocessor to analyze and save the results

    postprocessor = Postprocessor(hspfmodel, (start, end), 
                                  comid = calibrator.comid)

    postprocessor.get_hspexp_parameters()
    postprocessor.plot_hydrograph(tstep = 'monthly', 
                                  output = '{}/hydrography'.format(calibration))
    postprocessor.plot_calibration(output = '{}/statistics'.format(calibration))
    postprocessor.plot_runoff(tstep = 'daily', 
                              output = '{}/runoff'.format(calibration))

# Using the preprocessor in other watersheds/gages *should* be as simple as
# supplying the parameters above (start and end date, state, 8-digit HUC, 
# NWIS gage ID, land use year, maximum drainage area); if you try and 
# get an error please report it!
Ejemplo n.º 6
0
# targets needed are the reach outflow volume and groundwater flow

targets = ['groundwater', 'reach_outvolume']

# build the input files

hspfmodel.build_wdminfile()
hspfmodel.build_uci(targets, start, end, hydrology = True)

# and run it

hspfmodel.run(verbose = True)

# open the postprocessor to get the calibration info

p = Postprocessor(hspfmodel, (start, end), comid = gagecomid) 

# calculate and show the errors in the calibration parameters. the product 
# of the daily log-flow and daily flow Nash-Sutcliffe model efficiency are 
# one possible optimization parameter for a calibration. the log-flow 
# captures relative errors (low-flow conditions) while the flow captures 
# absolute error (high-flow conditions).

p.calculate_errors()

# close the open files

p.close()

# now let's change the value of some parameters, re-run the model, and see 
# the effect on the calibration statistics. we will change the default
Ejemplo n.º 7
0
        "runoff",
        "groundwater",
        "snow_state",
        "snowpack",
        "snowfall",
    ]

    # build the UCI and output WDM files

    hspfmodel.build_uci(targets, start, end, atemp=True, snow=True, hydrology=True)

    # run it

    hspfmodel.run(verbose=True)

    # use the Postprocessor to analyze and save the results

    dates = start + datetime.timedelta(days=warmup), end

    postprocessor = Postprocessor(hspfmodel, dates, comid=comid)

    postprocessor.get_hspexp_parameters(verbose=False)
    postprocessor.plot_hydrograph(tstep="monthly", show=False, output="{}/hydrography".format(calibration))
    postprocessor.plot_calibration(output="{}/statistics".format(calibration), show=False)
    postprocessor.plot_runoff(tstep="daily", show=False, output="{}/runoff".format(calibration))
    output = "{}/calibration_report.csv".format(calibration)
    postprocessor.calibration_report(output=output)
    postprocessor.plot_snow(output="{}/snow".format(calibration), show=False)
    postprocessor.plot_dayofyear(output="{}/dayofyear".format(calibration), show=False)
    postprocessor.plot_storms(season="all", show=False, output="{}/storms".format(calibration))
Ejemplo n.º 8
0
# the Postprocessor class can be used to analyze results. the following lines
# show how to use it to do some analysis and make some cool graphs.

from pyhspf import Postprocessor

# the dates of the processing period can be changed, and the postprocessor
# can be used to analyze part of the watershed rather than the whole model.
# for example, if a gage is located at a subbasin other than the last outlet.
# these are optional, the last outlet is assumed to be the gage otherwise and
# the run dates are used as the processing dates by default.

process_dates = run_dates  # postprocessing dates
gagecomid = '30'  # the subbasin identifier for the gage

p = Postprocessor(hspfmodel, process_dates, comid=gagecomid)

# here are some examples of things that can be done with the postprocessor.
# many of these require certain external targets to be specified when building
# the model (e.g. runoff, groundwater, snow)

# make a plot of daily or monthly flows, precipitation, and evapotranspiration

p.plot_hydrograph(tstep='daily')

# plot the runoff components, flows, and precipitation on linear and log scales

p.plot_runoff(tstep='daily')

# make a similar plot looking at the largest storm events for each year both
# in summer and outside summer
Ejemplo n.º 9
0
with open(p, 'rb') as f:
    hspfmodel = pickle.load(f)

# find the part of the watershed contributing to the gage

d = {v: k for k, v in list(hspfmodel.subbasin_timeseries['flowgage'].items())}
comid = d[NWISgage]

# make an instance of the postprocessor for the thiry-year

start = datetime.datetime(1981, 1, 1)
end = datetime.datetime(2011, 1, 1)

ds = start, end

postprocessor = Postprocessor(hspfmodel, ds)

# get the flows from the 30-year model

ttimes, tflow = postprocessor.get_sim_flow(comid, dates=ds)

# close the processor

postprocessor.close()

# iterate through each two-year and make the plot

for y in range(1981, 2010, 2):

    print(('reading year', y, 'data'))
Ejemplo n.º 10
0
                    if l == 'Impervious':
                        evap += results[c][l]['IMPEV'] * results[c][l]['area']
                    else:
                        ifwo += results[c][l]['IFWO'] * results[c][l]['area']
                        agwo += results[c][l]['AGWO'] * results[c][l]['area']
                        evap += results[c][l]['TAET'] * results[c][l]['area']
        
        runoff             = suro / area / (end - start).days * 365.25
        interflow          = ifwo / area / (end - start).days * 365.25
        baseflow           = agwo / area / (end - start).days * 365.25
        evapotranspiration = evap / area / (end - start).days * 365.25

        # use the postprocessor to get the simulation stats

        dates = start,end
        postprocessor = Postprocessor(hspfmodel, dates, comid = comid)

        # get the NSE during the calibration period

        d = datetime.datetime(y, 1, 1), datetime.datetime(y + 2 , 1, 1)
        
        stimes, sflow = postprocessor.get_sim_flow(comid, tstep = 'daily',
                                                   dates = d)
        otimes, oflow = postprocessor.get_obs_flow(tstep = 'daily',
                                                   dates = d)

        NSEcalibration = (1 - sum((numpy.array(sflow)-numpy.array(oflow))**2) /
                          sum((numpy.array(oflow) - numpy.mean(oflow))**2))

        # get the total precipitation during the calibration period
Ejemplo n.º 11
0
    hspfmodel.build_uci(targets,
                        start,
                        end,
                        atemp=True,
                        snow=True,
                        hydrology=True)

    # run it

    hspfmodel.run(verbose=True)

    # use the Postprocessor to analyze and save the results

    dates = start + datetime.timedelta(days=warmup), end

    postprocessor = Postprocessor(hspfmodel, dates, comid=comid)

    postprocessor.get_hspexp_parameters(verbose=False)
    postprocessor.plot_hydrograph(tstep='monthly',
                                  show=False,
                                  output='{}/hydrography'.format(preliminary))
    postprocessor.plot_calibration(output='{}/statistics'.format(preliminary),
                                   show=False)
    #postprocessor.plot_runoff(tstep = 'daily', show = False,
    #                          output = '{}/runoff'.format(preliminary))
    output = '{}/calibration_report.csv'.format(preliminary)
    postprocessor.calibration_report(output=output)
    postprocessor.plot_snow(output='{}/snow'.format(preliminary), show=False)
    postprocessor.plot_dayofyear(output='{}/dayofyear'.format(preliminary),
                                 show=False)
Ejemplo n.º 12
0
volMod = wdm.get_data(wdmFile, 11)

wdm.close(wdmFile)

datDFOut = pd.DataFrame.from_items(zip(datNms, datOut))

datDFOut.to_csv('basin_1_output.csv', index=False)

qMod = [q * 10**4 * 35.314666721 / (60 * 60) for q in volMod]

# Read flow data
flwData = pd.read_csv(
    os.path.abspath(os.path.curdir) + '\\siletz_HSPF_flw.csv')

qGge = flwData['Q_slz'].values.tolist()

# post-process

modelVolume = pd.DataFrame({'date': dttm, 'volm': volMod}, index=index)
modelVolume.to_csv('test.csv', index=False)
procDts = start, end
ggID = 11
p = Postprocessor(hspfmodel, procDts)
p.plot_hydrograph(tstep='daily')
plt.plot(dttm, qMod, label='Model')
plt.plot(dttm, qGge, label='Gaged')
plt.xlabel("Date-Time")
plt.ylabel("Flow (cfs)")
plt.yscale('log')
plt.legend()
plt.show()
Ejemplo n.º 13
0
def get_NSE(values):

    its = values['LZSN'], values['INFILT']
    print('running simulation LZSN: {:.0f}, INFILT: {:.2f}\n'.format(*its))
    
    # open the baseline HSPF model

    with open(filename, 'rb') as f: hspfmodel = pickle.load(f)

    # change the filename to prevent two simultaneous runs with the same name

    i = len(hspfmodel.filename) - 1
    while hspfmodel.filename[i] != '/': i -= 1

    path = hspfmodel.filename[:i]
    
    its = path, values['LZSN'], values['INFILT']
    hspfmodel.filename = '{}/LZSN{:.0f}INFILT{:.2f}'.format(*its)
                              
    # change the parameter values in all the land segments

    for p in hspfmodel.perlnds:

        p.LZSN   = values['LZSN']
        p.INFILT = values['INFILT']

    # build the input WDM file
        
    hspfmodel.build_wdminfile()

    # build the UCI file (only need to save the reach_outvolume, ROVOL)
        
    hspfmodel.build_uci(['reach_outvolume'], start, end, atemp = atemp,
                        snow = snow, hydrology = hydrology)

    # run the simulation

    hspfmodel.run(verbose = True)

    # calibration period

    dates = start + datetime.timedelta(days = warmup), end

    # find the common identifier for the NWIS gage

    d = {v:k for k, v in hspfmodel.subbasin_timeseries['flowgage'].items()}
    comid = d[gageid]

    # make an instance of the postprocessor to get the efficiency
        
    p = Postprocessor(hspfmodel, dates, comid = comid)

    # the regressions between the simulated and observed flows can be
    # computed using the "regression" function that returns:
    #
    # daily r2, log daily r2, daily NSE, log daily NSE
    # montly r2, log monthly r2, monthly NSE, log monthly NSE
        
    dr2, dlr2, dNS, dlNS, mr2, mlr2, mNS, mlNS = p.get_regression(comid)

    # close the Postprocessor

    p.close()

    return dNS
Ejemplo n.º 14
0
with open(p, 'rb') as f: hspfmodel = pickle.load(f)

# find the part of the watershed contributing to the gage
    
d = {v:k for k, v in hspfmodel.subbasin_timeseries['flowgage'].items()}
comid = d[NWISgage]

# make an instance of the postprocessor for the thiry-year
    
start = datetime.datetime(1981, 1, 1)
end   = datetime.datetime(2011, 1, 1)

ds = start, end

postprocessor = Postprocessor(hspfmodel, ds)

# get the flows from the 30-year model

ttimes, tflow = postprocessor.get_sim_flow(comid, dates = ds)

# close the processor

postprocessor.close()

# iterate through each two-year and make the plot

for y in range(1981, 2010, 2):

    print('reading year', y, 'data')
Ejemplo n.º 15
0
    hspfmodel.build_uci(targets,
                        run_dates[0],
                        run_dates[1],
                        temp=True,
                        snow=True,
                        hydrology=True)

    hspfmodel.messagepath = None

    # run it

    hspfmodel.run(verbose=True)

# add the simulation parameters to the calibrator

postprocessor = Postprocessor(hspfmodel, process_dates, comid=comid)

# make the figure canvas

fig = pyplot.figure(figsize=(15, 9))

# font sizes

titlesize = 14
axissize = 12
ticksize = 11

# get the monthly flows

otimes, m_oflow = postprocessor.get_obs_flow(tstep='monthly')
stimes, m_sflow = postprocessor.get_sim_flow(comid, tstep='monthly')
Ejemplo n.º 16
0
    # build the UCI and output WDM files

    hspfmodel.build_uci(targets,
                        start,
                        end,
                        atemp=True,
                        snow=True,
                        hydrology=True)

    # run it

    hspfmodel.run(verbose=True)

    # use the Postprocessor to analyze and save the results

    postprocessor = Postprocessor(hspfmodel, (start, end),
                                  comid=calibrator.comid)

    postprocessor.get_hspexp_parameters()
    postprocessor.plot_hydrograph(tstep='monthly',
                                  output='{}/hydrography'.format(calibration))
    postprocessor.plot_calibration(output='{}/statistics'.format(calibration))
    postprocessor.plot_runoff(tstep='daily',
                              output='{}/runoff'.format(calibration))

# Using the preprocessor in other watersheds/gages *should* be as simple as
# supplying the parameters above (start and end date, state, 8-digit HUC,
# NWIS gage ID, land use year, maximum drainage area); if you try and
# get an error please report it!
Ejemplo n.º 17
0
def get_NSE(values):

    its = values['LZSN'], values['INFILT']
    print('running simulation LZSN: {:.0f}, INFILT: {:.2f}\n'.format(*its))

    # open the baseline HSPF model

    with open(filename, 'rb') as f:
        hspfmodel = pickle.load(f)

    # change the filename to prevent two simultaneous runs with the same name

    i = len(hspfmodel.filename) - 1
    while hspfmodel.filename[i] != '/':
        i -= 1

    path = hspfmodel.filename[:i]

    its = path, values['LZSN'], values['INFILT']
    hspfmodel.filename = '{}/LZSN{:.0f}INFILT{:.2f}'.format(*its)

    # change the parameter values in all the land segments

    for p in hspfmodel.perlnds:

        p.LZSN = values['LZSN']
        p.INFILT = values['INFILT']

    # build the input WDM file

    hspfmodel.build_wdminfile()

    # build the UCI file (only need to save the reach_outvolume, ROVOL)

    hspfmodel.build_uci(['reach_outvolume'],
                        start,
                        end,
                        atemp=atemp,
                        snow=snow,
                        hydrology=hydrology)

    # run the simulation

    hspfmodel.run(verbose=True)

    # calibration period

    dates = start + datetime.timedelta(days=warmup), end

    # find the common identifier for the NWIS gage

    d = {v: k for k, v in hspfmodel.subbasin_timeseries['flowgage'].items()}
    comid = d[gageid]

    # make an instance of the postprocessor to get the efficiency

    p = Postprocessor(hspfmodel, dates, comid=comid)

    # the regressions between the simulated and observed flows can be
    # computed using the "regression" function that returns:
    #
    # daily r2, log daily r2, daily NSE, log daily NSE
    # montly r2, log monthly r2, monthly NSE, log monthly NSE

    dr2, dlr2, dNS, dlNS, mr2, mlr2, mNS, mlNS = p.get_regression(comid)

    # close the Postprocessor

    p.close()

    return dNS
Ejemplo n.º 18
0
    hspfmodel.build_wdminfile()

    # create the UCI file and the output WDM file

    hspfmodel.build_uci(targets, run_dates[0], run_dates[1], temp = True,
                        snow = True, hydrology = True)

    hspfmodel.messagepath = None

    # run it

    hspfmodel.run(verbose = True)

# add the simulation parameters to the calibrator

postprocessor = Postprocessor(hspfmodel, process_dates, comid = comid)

# make the figure canvas

fig = pyplot.figure(figsize = (15,9))

# font sizes

titlesize = 14
axissize = 12
ticksize = 11

# get the monthly flows

otimes, m_oflow = postprocessor.get_obs_flow(tstep = 'monthly')
stimes, m_sflow = postprocessor.get_sim_flow(comid, tstep = 'monthly')
Ejemplo n.º 19
0
                ]

    # build the UCI and output WDM files

    hspfmodel.build_uci(targets, start, end, atemp = True, snow = True,
                        hydrology = True)

    # run it

    hspfmodel.run(verbose = True)

    # use the Postprocessor to analyze and save the results

    dates = start + datetime.timedelta(days = warmup), end

    postprocessor = Postprocessor(hspfmodel, dates, comid = comid)

    postprocessor.get_hspexp_parameters(verbose = False)
    postprocessor.plot_hydrograph(tstep = 'monthly', show = False,
                                  output = '{}/hydrography'.format(calibration))
    postprocessor.plot_calibration(output = '{}/statistics'.format(calibration),
                                   show = False)
    postprocessor.plot_runoff(tstep = 'daily', show = False,
                              output = '{}/runoff'.format(calibration))
    output = '{}/calibration_report.csv'.format(calibration)
    postprocessor.calibration_report(output = output)
    postprocessor.plot_snow(output = '{}/snow'.format(calibration), 
                            show = False)
    postprocessor.plot_dayofyear(output = '{}/dayofyear'.format(calibration),
                                 show = False)
    postprocessor.plot_storms(season = 'all', show = False, 
Ejemplo n.º 20
0
                ]

    # build the UCI and output WDM files

    hspfmodel.build_uci(targets, start, end, atemp = True, snow = True,
                        hydrology = True)

    # run it

    hspfmodel.run(verbose = True)

    # use the Postprocessor to analyze and save the results

    dates = start + datetime.timedelta(days = warmup), end

    postprocessor = Postprocessor(hspfmodel, dates, comid = comid)

    postprocessor.get_hspexp_parameters(verbose = False)
    postprocessor.plot_hydrograph(tstep = 'monthly', show = False,
                                  output = '{}/hydrography'.format(preliminary))
    postprocessor.plot_calibration(output = '{}/statistics'.format(preliminary),
                                   show = False)
    #postprocessor.plot_runoff(tstep = 'daily', show = False,
    #                          output = '{}/runoff'.format(preliminary))
    output = '{}/calibration_report.csv'.format(preliminary)
    postprocessor.calibration_report(output = output)
    postprocessor.plot_snow(output = '{}/snow'.format(preliminary), 
                            show = False)
    postprocessor.plot_dayofyear(output = '{}/dayofyear'.format(preliminary),
                                 show = False)