コード例 #1
0
ファイル: nhdplusexample01.py プロジェクト: djibi2/PyHSPF
output = 'HSPF_data'
if not os.path.isdir(output): os.mkdir(output)

# HUC8 NHDPlus info

VPU     = '02'        # NHDPlus Vector Processing Unit
HUC8    = '02060006'  # 8-digit HUC

# path for the output files

HUC8output = '{}/{}'.format(output, HUC8)

# path for a plot of the output

plotfile = '{}/watershed'.format(HUC8output)

# create an instance of the NHDPlus extractor

nhdplusextractor = NHDPlusExtractor(VPU, NHDPlus)

# download and decompress the source data. worth noting--if you point the 
# extractor to the source files, it will skip the (lengthy) download.

nhdplusextractor.download_data()

# extract the HUC8 data for the Patuxent watershed. same thing here--if any of
# these files already exist the extraction step will be skipped.

nhdplusextractor.extract_HUC8(HUC8, HUC8output, plotfile = plotfile)
コード例 #2
0
ファイル: nhdplusexample01.py プロジェクト: waternk/PyHSPF
output = 'HSPF_data'
if not os.path.isdir(output): os.mkdir(output)

# HUC8 NHDPlus info

VPU = '02'  # NHDPlus Vector Processing Unit
HUC8 = '02060006'  # 8-digit HUC

# path for the output files

HUC8output = '{}/{}'.format(output, HUC8)

# path for a plot of the output

plotfile = '{}/watershed'.format(HUC8output)

# create an instance of the NHDPlus extractor

nhdplusextractor = NHDPlusExtractor(VPU, NHDPlus)

# download and decompress the source data. worth noting--if you point the
# extractor to the source files, it will skip the (lengthy) download.

nhdplusextractor.download_data()

# extract the HUC8 data for the Patuxent watershed. same thing here--if any of
# these files already exist the extraction step will be skipped.

nhdplusextractor.extract_HUC8(HUC8, HUC8output, plotfile=plotfile)
コード例 #3
0
ファイル: hunting.py プロジェクト: MachineAi/PyHSPF
def main():

    # create an instance of the NWIS extractor

    nwisextractor = NWISExtractor(NWIS)

    # download and decompress the source metadata files

    nwisextractor.download_metadata()

    # extract all the gage stations and metadata into a shapefile for the HUC8

    nwisextractor.extract_HUC8(HUC8, output)

    # tell the extractor to use the metadata file above to find gage data

    nwisextractor.set_metadata(gagefile)

    # create an instance of the NHDPlus extractor

    nhdplusextractor = NHDPlusExtractor(drainid, VPU, NHDPlus)

    # download and decompress the source data for the Mid Atlantic Region

    nhdplusextractor.download_data()

    # extract the HUC8 data for the Patuxent watershed

    nhdplusextractor.extract_HUC8(HUC8, output)

    # create an instance of the NHDPlusDelineator to use to build the Watershed

    delineator = NHDPlusDelineator(VAAfile, flowfile, catchfile, elevfile,
                                   gagefile = gagefile)

    # delineate the watershed (extract the flowlines, catchments and other data)

    delineator.delineate_gage_watershed(gageid, output = gagepath)

    # add land use data from 1988 to the delineator

    delineator.add_basin_landuse(1988, landuse)

    # build the watershed

    delineator.build_gage_watershed(gageid, watershed, masslinkplot = masslink)

    # make the working directory for HSPF simulation files

    if not os.path.isdir(hspf): os.mkdir(hspf)

    # import old data for Hunting Creek

    wdm = WDMUtil()

    # path to hspexp2.4 data files (modify as needed)

    directory = os.path.abspath(os.path.dirname(__file__)) + '/data'

    # the data from the export file (*.exp) provided with hspexp need to be 
    # imported into a wdm file. WDMUtil has a method for this.

    hunthour = '{}/hunthour/huntobs.exp'.format(directory)

    f = 'temp.wdm'

    # import from exp to wdm

    wdm.import_exp(hunthour, f)

    # close the file and re-open the wdm for read access

    wdm.close(f)
    wdm.open(f, 'r')

    # the dsns are known from the exp file so just use those this time

    precip = wdm.get_data(f, 106)
    evap   = wdm.get_data(f, 111)
    flow   = wdm.get_data(f, 281)

    s, e = wdm.get_dates(f, 106)

    # add the time series to deal with HSPF looking backward stepping

    precip = [0] + [p * 25.4 for p in precip]
    evap   = [e * 25.4 / 24 for e in evap for i in range(24)]

    wdm.close(f)

    # create an HSPF model instance

    hunting = HSPFModel()

    # open the watershed built above

    with open(watershed, 'rb') as f: w = pickle.load(f)

    # use the data to build an HSPFModel

    hunting.build_from_watershed(w, model, ifraction = 1., verbose = True)

    # turn on the hydrology modules to the HSPF model

    hunting.add_hydrology()

    # add precip timeseries with label BWI and provided start date to the model

    hunting.add_timeseries('precipitation', 'BWI', s, precip)

    # add evap timeseries with label Beltsville and provided start date 

    hunting.add_timeseries('evaporation', 'Beltsville', s, evap)

    # add flow timeseries with label Hunting, start date, tstep (days)

    hunting.add_timeseries('flowgage', 'Hunting', s, flow, tstep = 60)

    # assign the evaporation and precipiation timeseries to the whole watershed

    hunting.assign_watershed_timeseries('precipitation', 'BWI')
    hunting.assign_watershed_timeseries('evaporation', 'Beltsville')

    # find the subbasin indentfier for the watershed outlet

    subbasin = [up for up, down in w.updown.items() if down == 0][0]

    # assign the flowgage to the outlet subbasin

    hunting.assign_subbasin_timeseries('flowgage', subbasin, 'Hunting')

    # using pan evaporation data, so need a pan coefficient < 1

    hunting.evap_multiplier = 0.75

    calibrator = AutoCalibrator(hunting, start, end, hspf)

    calibrator.autocalibrate(calibrated,
                             variables = variables, 
                             optimization = optimization,
                             perturbations = perturbations,
                             parallel = parallel
                             )

    for variable, value in zip(calibrator.variables, calibrator.values):

        print('{:6s} {:5.3f}'.format(variable, value))

    print('\nsaving the calibration results\n')
コード例 #4
0
ファイル: huc8example.py プロジェクト: Python3pkg/PyHSPF
def main():

    # make sure the metadata are set before starting the download

    print('')
    print(('preparing to delineate the subbasins for HUC {}\n'.format(HUC8)))
    print(
        ('if you have already downloaded the NHDPlus, NWIS, and NID source ' +
         'data make sure you set the directory paths, or all the data will ' +
         'be downloaded again.\n'))
    print('press "y" to continue or "n" to abort...\n')

    s = 'n'
    while s != 'y':
        s = eval(input())
        if s == 'n': exit()

    # source data directory structure (ideally a data drive or server)

    NHDPlus = '{}/NHDPlus'.format(source)
    NWIS = '{}/NWIS'.format(source)
    NID = '{}/NID'.format(source)

    # download and extract the data using the PyHSPF data extractors (if needed)
    # these steps can/will be skipped if they are not needed

    nhdplusextractor = NHDPlusExtractor(HUC8[:2], NHDPlus)
    nwisextractor = NWISExtractor(NWIS)
    nidextractor = NIDExtractor(NID)

    # extract or set the path to the source NHDPlus data

    nhdplusextractor.download_data()

    # extract the hydrography data for the HUC8 to the output directory

    nhdplusextractor.extract_HUC8(HUC8, output)

    # paths to the NHDPlus data files created above by the nhdplusextractor

    bfile = '{}/boundary'.format(output)  # watershed boundary
    cfile = '{}/catchments'.format(output)  # individual catchments
    ffile = '{}/flowlines'.format(output)  # individual flowlines
    VAAs = '{}/flowlineVAAs'.format(output)  # value-added attributes
    efile = '{}/elevations'.format(output)  # elevation geotiff

    # extract the NWIS gages to a shapefile in the HUC8 directory

    nwisextractor.extract_HUC8(HUC8, output)

    # path to the gage shapefile created above

    gfile = '{}/gagestations'.format(output, HUC8)

    # extract the NID dams to a shapefile in the new HUC8 directory

    dfile = '{}/dams'.format(output, HUC8)

    nidextractor.extract_shapefile(bfile, dfile)

    # use the locations of the gages and dams, the NHDPlus data files, and
    # PyHSPF's HUC8Delineator to delineate subbasins subject to the criteria
    # into a new file in the HUC8 output directory

    delineator = HUC8Delineator(HUC8, VAAs, ffile, cfile, efile, gfile, dfile)

    # delineate the watershed using the NHDPlus data and delineator

    delineator.delineate(output, drainmax=drainmax, parallel=parallel)
コード例 #5
0
ファイル: nhdplusexample01.py プロジェクト: timcera/PyHSPF
output = 'HSPF_data'
if not os.path.isdir(output): os.mkdir(output)

# HUC8 NHDPlus info

VPU     = '02'        # NHDPlus Vector Processing Unit
HUC8    = '02060006'  # 8-digit HUC

# path for the output files

HUC8output = '{}/{}'.format(output, HUC8)

# path for a plot of the output

plotfile = '{}/watershed'.format(HUC8output)

# create an instance of the NHDPlus extractor

nhdplusextractor = NHDPlusExtractor(NHDPlus)

# download and decompress the source data. worth noting--if you point the
# extractor to the source files, it will skip the (lengthy) download.

nhdplusextractor.download_decompress(VPU,decompress=True)

# extract the HUC8 data for the Patuxent watershed. same thing here--if any of
# these files already exist the extraction step will be skipped.

nhdplusextractor.extract_HUC8(VPU, HUC8, HUC8output, plotfile = plotfile)
コード例 #6
0
ファイル: hunting.py プロジェクト: djibi2/PyHSPF
def preprocess():

    # create an instance of the NWIS extractor

    nwisextractor = NWISExtractor(NWIS)

    # download and decompress the source metadata files

    nwisextractor.download_metadata()

    # extract all the gage stations and metadata into a shapefile for the HUC8

    nwisextractor.extract_HUC8(HUC8, output)

    # create an instance of the NHDPlus extractor

    nhdplusextractor = NHDPlusExtractor(VPU, NHDPlus)

    # download and decompress the source data for the Mid Atlantic Region

    nhdplusextractor.download_data()

    # extract the HUC8 data for the Patuxent watershed

    nhdplusextractor.extract_HUC8(HUC8, output)

    # create an instance of the NHDPlusDelineator to use to build the Watershed

    delineator = NHDPlusDelineator(VAAfile, flowfile, catchfile, elevfile, gagefile=gagefile)

    # delineate the watershed (extract the flowlines, catchments and other data)

    delineator.delineate_gage_watershed(gageid, output=gagepath)

    # add land use data from 1988 to the delineator

    delineator.add_basin_landuse(1988, landuse)

    # build the watershed

    delineator.build_gage_watershed(gageid, watershed, masslinkplot=masslink)

    # make the working directory for HSPF simulation files

    if not os.path.isdir(hspf):
        os.mkdir(hspf)

    # import old data for Hunting Creek

    wdm = WDMUtil()

    # path to hspexp2.4 data files (modify as needed)

    directory = os.path.abspath(os.path.dirname(__file__)) + "/data"

    # the data from the export file (*.exp) provided with hspexp need to be
    # imported into a wdm file. WDMUtil has a method for this.

    hunthour = "calibrated/huntobs.exp".format(directory)

    f = "temp.wdm"

    # import from exp to wdm

    wdm.import_exp(hunthour, f)

    # close the file and re-open the wdm for read access

    wdm.close(f)
    wdm.open(f, "r")

    # the dsns are known from the exp file so just use those this time

    precip = wdm.get_data(f, 106)
    evap = wdm.get_data(f, 111)
    flow = wdm.get_data(f, 281)

    s, e = wdm.get_dates(f, 106)

    # add the time series to deal with HSPF looking backward stepping

    precip = [0] + [p * 25.4 for p in precip]
    evap = [e * 25.4 / 24 for e in evap for i in range(24)]

    wdm.close(f)

    # create an HSPF model instance

    hunting = HSPFModel()

    # open the watershed built above

    with open(watershed, "rb") as f:
        w = pickle.load(f)

    # use the data to build an HSPFModel

    hunting.build_from_watershed(w, model, ifraction=1.0, verbose=True)

    # turn on the hydrology modules to the HSPF model

    hunting.add_hydrology()

    # add precip timeseries with label BWI and provided start date to the model

    hunting.add_timeseries("precipitation", "BWI", s, precip)

    # add evap timeseries with label Beltsville and provided start date

    hunting.add_timeseries("evaporation", "Beltsville", s, evap)

    # add flow timeseries with label Hunting, start date, tstep (days)

    hunting.add_timeseries("flowgage", "Hunting", s, flow, tstep=60)

    # assign the evaporation and precipiation timeseries to the whole watershed

    hunting.assign_watershed_timeseries("precipitation", "BWI")
    hunting.assign_watershed_timeseries("evaporation", "Beltsville")

    # find the subbasin indentifier for the watershed outlet

    subbasin = [up for up, down in w.updown.items() if down == 0][0]

    # assign the flowgage to the outlet subbasin

    hunting.assign_subbasin_timeseries("flowgage", subbasin, "Hunting")

    # using pan evaporation data, so need a pan coefficient < 1

    hunting.evap_multiplier = 0.75

    with open(calibrated, "wb") as f:
        pickle.dump(hunting, f)
コード例 #7
0
ファイル: nhdplusexample01.py プロジェクト: eotp/PyHSPF
NHDPlus = 'NHDPlus'

# make a working directory for HSPF data files

output = 'HSPF_data'
if not os.path.isdir(output): os.mkdir(output)

# HUC8 NHDPlus info

VPU     = '02'        # NHDPlus Vector Processing Unit
HUC8    = '02060006'  # 8-digit HUC

# path for the output files

HUC8output = '{}/{}'.format(output, HUC8)

# create an instance of the NHDPlus extractor

nhdplusextractor = NHDPlusExtractor(VPU, NHDPlus)

# download and decompress the source data. worth noting--if you point the 
# extractor to the source files, it will skip the (lengthy) download.

nhdplusextractor.download_data()

# extract the HUC8 data for the Patuxent watershed. same thing here--if any of
# these files already exist the extraction step will be skipped.

nhdplusextractor.extract_HUC8(HUC8, HUC8output)
コード例 #8
0
ファイル: hunting_climate.py プロジェクト: MachineAi/PyHSPF
def extract():
    """Create an extract function to call from at runtime and to turn off
    the extraction steps when they are done."""

    # create an instance of the NWIS extractor

    nwisextractor = NWISExtractor(NWIS)

    # download and decompress the source metadata files

    nwisextractor.download_metadata()

    # extract all the gage stations and metadata into a shapefile for the HUC8

    nwisextractor.extract_HUC8(HUC8, output)

    # tell the extractor to use the metadata file above to find gage data

    nwisextractor.set_metadata(gagefile)

    # create an instance of the NHDPlus extractor

    nhdplusextractor = NHDPlusExtractor(drainid, VPU, NHDPlus)

    # download and decompress the source data for the Mid Atlantic Region

    nhdplusextractor.download_data()

    # extract the HUC8 data for the Patuxent watershed

    nhdplusextractor.extract_HUC8(HUC8, output)

    # create an instance of the NHDPlusDelineator to use to build the Watershed

    delineator = NHDPlusDelineator(VAAfile,
                                   flowfile,
                                   catchfile,
                                   elevfile,
                                   gagefile=gagefile)

    # delineate the watershed (extract the flowlines, catchments and other data)

    delineator.delineate_gage_watershed(gage, output=gagepath)

    # download the daily flow and water quality data for the gage

    nwisextractor.download_gagedata(gage, estart, eend, output=gagedata)

    # open the NWIS flow data for the Hunting Creek gage station

    with open(gagedata, 'rb') as f:
        station = pickle.load(f)

    # get the time series of daily flow values for the gage

    flow = station.make_timeseries(estart, eend)

    # add land use data from 1988 to the delineator

    delineator.add_basin_landuse(1988, landuse)

    # build the watershed

    delineator.build_gage_watershed(gage, watershed, masslinkplot=masslink)
コード例 #9
0
ファイル: example07.py プロジェクト: MachineAi/PyHSPF
VPU     = '02'        # NHDPlus Vector Processing Unit
HUC8    = '02060006'  # 8-digit HUC

# paths for the extracted files for the HUC8 (this represents the "output")

flowfile  = '{}/flowlines'.format(output)      # HUC8 flowline shapefile
cfile     = '{}/catchments'.format(output)     # HUC8 catchment shapefile
bfile     = '{}/boundary'.format(output)       # HUC8 boundary shapefile
VAAfile   = '{}/flowlineVAAs'.format(output)   # NHDPlus value added attributes
elevfile  = '{}/elevations.tif'.format(output) # NED raster file
waterplot = '{}/watershed.png'.format(output)  # plot of the data

# title for the plot

title  = ('Cataloging Unit {}\n'.format(HUC8) +
          'NHDPlus Catchments and Flowlines on 30 meter NED DEM')

# create an instance of the NHDPlus extractor

nhdplusextractor = NHDPlusExtractor(VPU, NHDPlus)

# download and decompress the source data. worth noting--if you point the 
# extractor to the source files, it will skip the (lengthy) download.

nhdplusextractor.download_data()

# extract the HUC8 data for the Patuxent watershed. same thing here--if any of
# these files already exist the extraction step will be skipped.

nhdplusextractor.extract_HUC8(HUC8, output)