from pyhspf.preprocessing import Preprocessor # 8-digit hydrologic unit code of interest; the lists here of states, years, # and RPUs are just used to point to location of the data files below HUC8 = '02040101' state = 'Delaware' start = datetime.datetime(1980, 1, 1) end = datetime.datetime(2011, 1, 1) drainmax = 400 aggregation = 'cdlaggregation.csv' landuse = 'lucs.csv' if __name__ == '__main__': processor = Preprocessor() processor.set_network(source) processor.set_output(destination) processor.set_parameters(HUC8 = HUC8, start = start, end = end, state = state, cdlaggregate = aggregation, landuse = landuse) processor.preprocess(drainmax = drainmax, parallel = False) # this took about 40 minutes to run on my 3 year old laptop not counting the # time to download the raw data from the NHDPlus and CDL
# set up the directory locations processor.set_network(network) processor.set_output(destination) # set the simulation-specific parameters processor.set_parameters(HUC8=HUC8, start=start, end=end, cdlaggregate=aggregation, landuse=landuse) # preprocess the HUC8 processor.preprocess(drainmax=drainmax) # build the HSPFModel and turn on the flags for the air temperature # (ATEMP for PERLNDs and IMPLNDs), snow (SNOW for PERLNDs and IMPLNDs), # and hydrology (PWATER for PERLNDs, IWATER for IMPLNDs, HYDR for RCHRESs) # for the resulting model (the flags for all modules default to "False", # so no simulation options will be used unless they are activated) processor.build_hspfmodel(landuseyear=landuseyear, atemp=True, snow=True, hydrology=True) # the routine in the previous step will build and pickle an HSPFModel # to the destination/hspf directory named "<landuseyear>baseline" (in this # case 2001) stored in the "hspfmodel" attribute of the preprocessor
source = 'Z:' destination = 'C:/HSPF_data' from pyhspf.preprocessing import Preprocessor # 8-digit hydrologic unit code of interest; the lists here of states, years, # and RPUs are just used to point to location of the data files below HUC8 = '02040101' start = datetime.datetime(1980, 1, 1) end = datetime.datetime(2011, 1, 1) drainmax = 400 aggregation = 'cdlaggregation.csv' landuse = 'lucs.csv' if __name__ == '__main__': processor = Preprocessor() processor.set_network(source) processor.set_output(destination) processor.set_parameters(HUC8=HUC8, start=start, end=end, cdlaggregate=aggregation, landuse=landuse) processor.preprocess(drainmax=drainmax, parallel=False) # this took about 40 minutes to run on my 3 year old laptop not counting the # time to download the raw data from the NHDPlus and CDL
processor.set_network(network) processor.set_output(destination) # set the simulation-specific parameters processor.set_parameters(HUC8 = HUC8, start = start, end = end, state = state, cdlaggregate = aggregation, landuse = landuse) # preprocess the HUC8 processor.preprocess(drainmax = drainmax) # If the script runs succesfully, the following file structure will be created: # # <network> # /NHDPlus # /NHDPlusMS # /NHDPlus07 # /EROMExtension # HUC07 Erosion Runoff Model (dbf) # /NEDSnapshot # HUC07 Elevation Rasters (tif) # /NHDPlusAttributes # HUC07 Flowline attributes (dbf) # /NHDPlusCatchment # HUC07 Catchment shapefile
# if the watershed is in more than one state, this will probably not work # (this is a feature that should be added in the future). # start and end dates (2001 to 2010) start = datetime.datetime(2001, 1, 1) end = datetime.datetime(2011, 1, 1) # comma separated value file linking land use codes from the cropland data # layer to RGB colors and HSPF land segments landcodes = 'aggregate.csv' # because parallel processing is (optionally) used, the process method has # to be called at runtime as shown below if __name__ == '__main__': # make an instance of the preprocessor processor = Preprocessor(network, destination, landcodes = landcodes) # preprocess the HUC8 processor.preprocess(HUC8, state, start, end) # so using the preprocessor in other watersheds *should* be as simple as # supplying the state and 8-digit HUC; if you try and get errors please # report them!