from pyhspf.preprocessing import Preprocessor # 8-digit hydrologic unit code of interest; the lists here of states, years, # and RPUs are just used to point to location of the data files below HUC8 = '02040101' state = 'Delaware' start = datetime.datetime(1980, 1, 1) end = datetime.datetime(2011, 1, 1) drainmax = 400 aggregation = 'cdlaggregation.csv' landuse = 'lucs.csv' if __name__ == '__main__': processor = Preprocessor() processor.set_network(source) processor.set_output(destination) processor.set_parameters(HUC8 = HUC8, start = start, end = end, state = state, cdlaggregate = aggregation, landuse = landuse) processor.preprocess(drainmax = drainmax, parallel = False) # this took about 40 minutes to run on my 3 year old laptop not counting the # time to download the raw data from the NHDPlus and CDL
# including RGB values for plots and evapotranspiration crop coefficients landuse = 'lucs.csv' # Because parallel processing is (optionally) used, the process method has # to be called at runtime as shown below if __name__ == '__main__': # make an instance of the Preprocessor processor = Preprocessor() # set up the directory locations processor.set_network(network) processor.set_output(destination) # set the simulation-specific parameters processor.set_parameters(HUC8 = HUC8, start = start, end = end, state = state, cdlaggregate = aggregation, landuse = landuse) # preprocess the HUC8 processor.preprocess(drainmax = drainmax)
# path where the calibrated model will be saved/located calibrated = '{}/{}'.format(calibration, gageid) # Because parallel processing is (optionally) used, the process method has # to be called at runtime as shown below if __name__ == '__main__': # make an instance of the Preprocessor processor = Preprocessor() # set up the directory locations processor.set_network(network) processor.set_output(destination) # set the simulation-specific parameters processor.set_parameters(HUC8=HUC8, start=start, end=end, cdlaggregate=aggregation, landuse=landuse) # preprocess the HUC8 processor.preprocess(drainmax=drainmax) # build the HSPFModel and turn on the flags for the air temperature
source = 'Z:' destination = 'C:/HSPF_data' from pyhspf.preprocessing import Preprocessor # 8-digit hydrologic unit code of interest; the lists here of states, years, # and RPUs are just used to point to location of the data files below HUC8 = '02040101' start = datetime.datetime(1980, 1, 1) end = datetime.datetime(2011, 1, 1) drainmax = 400 aggregation = 'cdlaggregation.csv' landuse = 'lucs.csv' if __name__ == '__main__': processor = Preprocessor() processor.set_network(source) processor.set_output(destination) processor.set_parameters(HUC8=HUC8, start=start, end=end, cdlaggregate=aggregation, landuse=landuse) processor.preprocess(drainmax=drainmax, parallel=False) # this took about 40 minutes to run on my 3 year old laptop not counting the # time to download the raw data from the NHDPlus and CDL