from pyhspf.preprocessing import Preprocessor

# 8-digit hydrologic unit code of interest; the lists here of states, years,
# and RPUs are just used to point to location of the data files below

HUC8        = '02040101'
state       = 'Delaware'
start       = datetime.datetime(1980, 1, 1)
end         = datetime.datetime(2011, 1, 1)
drainmax    = 400
aggregation = 'cdlaggregation.csv'
landuse     = 'lucs.csv'

if __name__ == '__main__': 
    
    processor = Preprocessor()

    processor.set_network(source)
    processor.set_output(destination)
    processor.set_parameters(HUC8 = HUC8,
                             start = start,
                             end = end,
                             state = state,
                             cdlaggregate = aggregation,
                             landuse = landuse)
    processor.preprocess(drainmax = drainmax, parallel = False)

# this took about 40 minutes to run on my 3 year old laptop not counting the
# time to download the raw data from the NHDPlus and CDL
Exemple #2
0
# file path to place the calibrated model and results

calibration = '{}/{}/calibrations'.format(destination, HUC8)

# path where the calibrated model will be saved/located

calibrated = '{}/{}'.format(calibration, gageid)

# Because parallel processing is (optionally) used, the process method has
# to be called at runtime as shown below

if __name__ == '__main__':

    # make an instance of the Preprocessor

    processor = Preprocessor()

    # set up the directory locations

    processor.set_network(network)
    processor.set_output(destination)

    # set the simulation-specific parameters

    processor.set_parameters(HUC8=HUC8,
                             start=start,
                             end=end,
                             cdlaggregate=aggregation,
                             landuse=landuse)

    # preprocess the HUC8
Exemple #3
0
source = 'Z:'
destination = 'C:/HSPF_data'

from pyhspf.preprocessing import Preprocessor

# 8-digit hydrologic unit code of interest; the lists here of states, years,
# and RPUs are just used to point to location of the data files below

HUC8 = '02040101'
start = datetime.datetime(1980, 1, 1)
end = datetime.datetime(2011, 1, 1)
drainmax = 400
aggregation = 'cdlaggregation.csv'
landuse = 'lucs.csv'

if __name__ == '__main__':

    processor = Preprocessor()

    processor.set_network(source)
    processor.set_output(destination)
    processor.set_parameters(HUC8=HUC8,
                             start=start,
                             end=end,
                             cdlaggregate=aggregation,
                             landuse=landuse)
    processor.preprocess(drainmax=drainmax, parallel=False)

# this took about 40 minutes to run on my 3 year old laptop not counting the
# time to download the raw data from the NHDPlus and CDL
# file path to place the calibrated model and results

calibration = '{}/{}/calibrations'.format(destination, HUC8)

# path where the calibrated model will be saved/located

calibrated = '{}/{}'.format(calibration, gageid)

# Because parallel processing is (optionally) used, the process method has 
# to be called at runtime as shown below

if __name__ == '__main__': 

    # make an instance of the Preprocessor

    processor = Preprocessor()

    # set up the directory locations

    processor.set_network(network)
    processor.set_output(destination)

    # set the simulation-specific parameters

    processor.set_parameters(HUC8 = HUC8,
                             start = start,
                             end = end,
                             state = state,
                             cdlaggregate = aggregation,
                             landuse = landuse)
Exemple #5
0
# if the watershed is in more than one state, this will probably not work 
# (this is a feature that should be added in the future).

# start and end dates (2001 to 2010)

start = datetime.datetime(2001, 1, 1)
end   = datetime.datetime(2011, 1, 1)

# comma separated value file linking land use codes from the cropland data
# layer to RGB colors and HSPF land segments

landcodes = 'aggregate.csv'

# because parallel processing is (optionally) used, the process method has 
# to be called at runtime as shown below

if __name__ == '__main__': 

    # make an instance of the preprocessor

    processor = Preprocessor(network, destination, landcodes = landcodes)

    # preprocess the HUC8

    processor.preprocess(HUC8, state, start, end)

    # so using the preprocessor in other watersheds *should* be as simple as
    # supplying the state and 8-digit HUC; if you try and get errors please
    # report them!
Exemple #6
0
aggregation = 'cdlaggregation.csv'

# Comma separated value file of parameters for the HSPF land use categories
# including RGB values for plots and evapotranspiration crop coefficients

landuse = 'lucs.csv'

# Because parallel processing is (optionally) used, the process method has 
# to be called at runtime as shown below

if __name__ == '__main__': 

    # make an instance of the preprocessor
    
    processor = Preprocessor()

    # set the paths to the source network files and the working directory for
    # the preprocessing, etc.

    processor.set_network(network)
    processor.set_output(destination)

    # set the parameters for the processing

    processor.set_parameters(HUC8 = HUC8,
                             start = start,
                             end = end,
                             cdlaggregate = aggregation,
                             landuse = landuse)