updown = {'100': '101'} # add the info to the watershed and outlet watershed.add_mass_linkage(updown) watershed.add_outlet('101') # names of the files used in the simulation (the HSPF input and output files # are generated automatically); can also specify a directory to use elsewhere filename = 'example06' wdmoutfile = filename + '_out.wdm' # create an instance of the HSPFModel class hspfmodel = HSPFModel() # and build the model from the watershed hspfmodel.build_from_watershed(watershed, filename, tstep=tstep) # add a special action, thawed ground on the agricultural land # in the first subbasin on April 1 at 12 noon. thawdate = datetime.datetime(2001, 4, 1, 12) hspfmodel.add_special_action('thaw', '100', 'Agriculture', thawdate) # add another special action, frozen ground on the agricultural land # in the first subbasin on December 1 at midnight.
# create an instance of the watershed class to store the data to build the model watershed = Watershed(description, subbasins) # add the network and the outlet subbasin watershed.add_mass_linkage(updown) watershed.add_outlet(sname) # make the HSPFModel instance (the data for this example use the non-default # option of English instead of metric units) from pyhspf import HSPFModel hspfmodel = HSPFModel(units = 'English') # since the climate data are provided with hspexp in an export file called # "huntobs.exp." WDMUtil has a method to automatically import the data to a # WDM file. from pyhspf import WDMUtil wdm = WDMUtil() # path to hspexp2.4 data files (make sure the path is correct) # the data from the export file (*.exp) provided with hspexp need to be # imported into a wdm file; the WDMUtil class has a method for this huntday = 'huntday/huntobs.exp'
# use climate data from the PyHSPF base model for the warm up period since # is incomplete (first find the cutoff index for the data) cutoff = (bstart - start).days * 24 if not os.path.isfile(newmodel): with open(basemodel, 'rb') as f: hspfmodel = pickle.load(f) # create a new model for the simplified climate data print('building a new model with the simplified time series\n') simplified = HSPFModel() # build new model parameters from the base model; the build_from_existing # method can be used to copy the perlnds, implnds, rchreses, special # actions, and reach network from the old file but contains no time series # or time series assignments simplified.build_from_existing(hspfmodel, newmodel) # find the comid of the gage and add the flow data to the new model d = { v: k for k, v in list(hspfmodel.subbasin_timeseries['flowgage'].items()) } comid = d[gageid]
# the UCI file generated by PyHSPF is named 'example01.uci' -- look at that # file to see how the information in this script is translated to HSPF. # the input and output WDM filenames are generated automatically, and are the # model filename + '_in.wdm' for the input WDM file and '_out.wdm' for the # output file (we'll need this later to retrieve results from the files) wdmoutfile = filename + '_out.wdm' # let's also generate an optional output file created by HSPF directly outfile = filename + '.out' # create an instance of the HSPFModel class hspfmodel = HSPFModel() # and build the model from the watershed hspfmodel.build_from_watershed(watershed, filename, print_file = outfile, tstep = tstep) # to run a simulation it is necessary to assign precipitation, potential # evapotranspiration, and any other time series to the subbasins. # there are many different ways to estimate the potential evapotranspiration # including correlation to observed pan evaporation, Penman-Monteith, etc. # here the potential evapotranspiration is assumed to start at zero then # increase to 12 mm in a day 7/01, then decreases to zero 1/01; thus max 4-hr # potential evapotranspiration is 2 mm. the following statement will generate # a time series with these assumptions.
updown = {'100':'101'} # add the info to the watershed and outlet watershed.add_mass_linkage(updown) watershed.add_outlet('101') # names of the files used in the simulation (the HSPF input and output files # are generated automatically); can also specify a directory to use elsewhere filename = 'example06' wdmoutfile = filename + '_out.wdm' # create an instance of the HSPFModel class hspfmodel = HSPFModel() # and build the model from the watershed hspfmodel.build_from_watershed(watershed, filename, tstep = tstep) # add a special action, thawed ground on the agricultural land # in the first subbasin on April 1 at 12 noon. thawdate = datetime.datetime(2001, 4, 1, 12) hspfmodel.add_special_action('thaw', '100', 'Agriculture', thawdate) # add another special action, frozen ground on the agricultural land # in the first subbasin on December 1 at midnight.
# CREATE HSPF MODEL watershedSiletz = Watershed("Siletz River", subbasins) watershedSiletz.add_mass_linkage(flow_network) for basin in range(0, len(basinRecords)): if basinRecords[basin][6] == 0: watershedSiletz.add_outlet(str(basin + 1)) # Assumes basin numbers x = 1 # Don't need this but the loop wants to include 'hspfmodel...' # Build the model hspfmodel = HSPFModel(units='Metric') filename = 'siletz_river' outfile = filename + '.out' wdmoutfile = filename + '_out.wdm' hspfmodel.build_from_watershed(watershedSiletz, 'siletz_river', ifraction=ifraction, tstep=tstep, print_file=outfile) watershedSiletz.plot_mass_flow(output='siletz_basin_network')
# the evaporation data is daily so it needs to be disaggregated to hourly for # an hourly simulation (see how easy this is with Python) # the time series in the WDM file starts at 1 am so had to add one extra # value to the beginning of the time series for consistency evap = [0] + [e / 24 for e in evap for i in range(24)] precip = [0] + [p for p in precip] oflow = [0] + [o for o in oflow] # list of times times = [start + (end - start) / len(precip) * i for i in range(len(precip))] # make the HSPFModel instance hspfmodel = HSPFModel(units='English') # build the model (file will all be called example03) hspfmodel.build_from_watershed(watershed, 'example03', ifraction=ifraction, tstep=tstep) # now add the time series to the model hspfmodel.add_timeseries('precipitation', 'hunting_prec', start, precip, tstep=60)
# create an instance of the watershed class to store the data to build the model watershed = Watershed(description, subbasins) # add the network and the outlet subbasin watershed.add_mass_linkage(updown) watershed.add_outlet(sname) # make the HSPFModel instance (the data for this example use the non-default # option of English instead of metric units) from pyhspf import HSPFModel hspfmodel = HSPFModel(units='English') # since the climate data are provided with hspexp in an export file called # "huntobs.exp." WDMUtil has a method to automatically import the data to a # WDM file. from pyhspf import WDMUtil wdm = WDMUtil() # path to hspexp2.4 data files (make sure the path is correct) # the data from the export file (*.exp) provided with hspexp need to be # imported into a wdm file; the WDMUtil class has a method for this huntday = 'huntday/huntobs.exp'
print('') # use climate data from the PyHSPF base model for the warm up period since # is incomplete (first find the cutoff index for the data) cutoff = (bstart - start).days * 24 if not os.path.isfile(newmodel): with open(basemodel, 'rb') as f: hspfmodel = pickle.load(f) # create a new model for the simplified climate data print('building a new model with the simplified time series\n') simplified = HSPFModel() # build new model parameters from the base model; the build_from_existing # method can be used to copy the perlnds, implnds, rchreses, special # actions, and reach network from the old file but contains no time series # or time series assignments simplified.build_from_existing(hspfmodel, newmodel) # find the comid of the gage and add the flow data to the new model d = {v:k for k, v in hspfmodel.subbasin_timeseries['flowgage'].items()} comid = d[gageid] s, tstep, data = hspfmodel.flowgages[gageid]
# the evaporation data is daily so it needs to be disaggregated to hourly for # an hourly simulation (see how easy this is with Python) # the time series in the WDM file starts at 1 am so had to add one extra # value to the beginning of the time series for consistency evap = [0] + [e / 24 for e in evap for i in range(24)] precip = [0] + [p for p in precip] oflow = [0] + [o for o in oflow] # list of times times = [start + (end-start) / len(precip) * i for i in range(len(precip))] # make the HSPFModel instance hspfmodel = HSPFModel(units = 'English') # build the model (file will all be called example03) hspfmodel.build_from_watershed(watershed, 'example03', ifraction = ifraction, tstep = tstep) # now add the time series to the model hspfmodel.add_timeseries('precipitation', 'hunting_prec', start, precip, tstep = 60) hspfmodel.add_timeseries('evaporation', 'hunting_evap', start, evap, tstep = 60) hspfmodel.add_timeseries('flowgage', 'hunting_flow', start, oflow, tstep = 60)
# the UCI file generated by PyHSPF is named 'example01.uci' -- look at that # file to see how the information in this script is translated to HSPF. # the input and output WDM filenames are generated automatically, and are the # model filename + '_in.wdm' for the input WDM file and '_out.wdm' for the # output file (we'll need this later to retrieve results from the files) wdmoutfile = filename + '_out.wdm' # let's also generate an optional output file created by HSPF directly outfile = filename + '.out' # create an instance of the HSPFModel class hspfmodel = HSPFModel() # and build the model from the watershed hspfmodel.build_from_watershed(watershed, filename, print_file=outfile, tstep=tstep) # to run a simulation it is necessary to assign precipitation, potential # evapotranspiration, and any other time series to the subbasins. # there are many different ways to estimate the potential evapotranspiration # including correlation to observed pan evaporation, Penman-Monteith, etc. # here the potential evapotranspiration is assumed to start at zero then # increase to 12 mm in a day 7/01, then decreases to zero 1/01; thus max 4-hr # potential evapotranspiration is 2 mm. the following statement will generate