else: print('you appear to be missing the data files in the data/tests') print('directory that are needed for this simulation') raise # this is the path to the message file in PyHSPF (hspfmsg.wdm) # the HSPF main routine needs the location of this file and the UCI file pyhspfdirectory = os.path.dirname(hspf.__file__) messagepath = '{}/pyhspf/core/hspfmsg.wdm'.format(pyhspfdirectory) # before running the examples, we have to create the WDM files used by the # test runs, which we will do with the WDMUtil class wdm = WDMUtil(verbose=True) # the name of the WDM file used in the test UCIs is test.wdm # open it up for write access wdm.open('test.wdm', 'w') # the first few test runs write data to the WDM files, but they assume the # datasets already exist so we need to create them. have a look at the test # UCI files if you are curious attributes = { 'TCODE ': 4, 'TSSTEP': 1, 'TSTYPE': 'WTMP', 'TSFORM': 3,
# build the wdm input file using the timeseries hspfmodel.build_wdminfile() # external targets targets = ['reach_outvolume', 'evaporation', 'reach_volume', 'runoff'] # build the input files and run hspfmodel.build_uci(targets, start, end, hydrology=True, verbose=False) hspfmodel.run(verbose=True) # retrieve results using WDMUtil wdm = WDMUtil() # open the file for read access wdm.open(wdmoutfile, 'r') # pull up the flow at the outlet and plot it along with the precipitation # and evapotranspiration dsns = wdm.get_datasets(wdmoutfile) idconss = [wdm.get_attribute(wdmoutfile, n, 'IDCONS') for n in dsns] staids = [wdm.get_attribute(wdmoutfile, n, 'STAID ') for n in dsns] # find the dsn n = [
print(('error: required file {} does not exist!\n'.format(f))) raise # make sure the PyHSPF data have been generated for d in (climatedata, pdata, edata, cdata): if not os.path.isdir(d): print(('error: required data in {} do not exist!\n'.format(d))) raise # use WDMUtil to read the BASINS data wdm = WDMUtil(verbose=verbose) # open the precipitation file and the other climate data file wdm.open(f1, 'r') wdm.open(f2, 'r') # make a list of the datasets and numbers dsns = wdm.get_datasets(f2) tstypes = [wdm.get_attribute(f2, n, 'TSTYPE') for n in dsns] # start date for the BASINS data (after the warmup period) bstart = start + datetime.timedelta(days=warmup)
pcpData = pd.read_csv( os.path.abspath(os.path.curdir) + '\\siletz_HSPF_precip.csv') petData = pd.read_csv( os.path.abspath(os.path.curdir) + '\\siletz_HSPF_pet.csv') flwData = pd.read_csv( os.path.abspath(os.path.curdir) + '\\siletz_HSPF_flw.csv') ts_to_wdmFile(wdmFile=wdmFile, pcpData=pcpData, petData=petData, flwData=flwData) # See if you can read the data from the WDM file wdm = WDMUtil(verbose=True, messagepath=mssgpath) # ADD BASIN TIMESERIES FROM THE WDM TO HSPFMODEL # open the wdm for read access wdm.open(wdmFile, 'r') start, end = wdm.get_dates(wdmFile, 101) x = 1 # Add specific basin met data for basin in range(0, len(basinRecords)): # The DSNs are known from the exp file so just use those this time prcp = wdm.get_data(wdmFile, 100 + x)
print('reading year', y) p = '{}/{}_{}/{}'.format(directory, y, y + 2, NWISgage) with open(p, 'rb') as f: hspfmodel = pickle.load(f) # calculate the runoff components in each land segment and store # the results in a structure as [subbasin][landuse][runoff/area] results = {} # use WDMUtil to read the data output = hspfmodel.filename + '_out.wdm' wdmutil = WDMUtil() wdmutil.open(output, 'r') # read the metadata for each timeseries in the WDM file dsns = wdmutil.get_datasets(output) idconss = [wdmutil.get_attribute(output, n, 'IDCONS') for n in dsns] descrps = [wdmutil.get_attribute(output, n, 'DESCRP') for n in dsns] staids = [wdmutil.get_attribute(output, n, 'STAID ') for n in dsns] # go through the impervious land segments to get the surface runoff for o in hspfmodel.implnds: c = o.subbasin
str_precip = 'siletz_HSPF_precip.csv' str_pet = 'siletz_HSPF_PET.csv' str_wdm = 'bigelk_in.wdm' str_wdm_new = 'siletz.wdm' # need to set variable for HSPF message file. WDMItil uses this file messagepath = os.path.abspath(os.path.curdir) + '\\hspfmsg.wdm' df_prec = pd.read_csv(str_precip) df_prec.head() df_pet = pd.read_csv(str_pet) df_pet.head() # create an instance of WDMUtil class wdm = WDMUtil(verbose=True, messagepath='hspfmsg.wdm') # create a new wdm file wdm.open(str_wdm_new, 'w') # take from pyHSPF test01.py example # # the first few test runs write data to the WDM files, but they assume the # datasets already exist so we need to create them. have a look at the test # UCI files if you are curious attributes = { 'TCODE ': 4, 'TSSTEP': 1, 'TSTYPE': 'WTMP', 'TSFORM': 3,