# these are provided with the distribution now, though they can be generated # from the previous example. output = 'data/patuxent' # paths to the different source files for the model data flowfile = '{}/flowlines'.format(output) # HUC8 flowline shapefile cfile = '{}/catchments'.format(output) # HUC8 catchment shapefile VAAfile = '{}/flowlineVAAs'.format(output) # NHDPlus value added attributes elevfile = '{}/elevations.tif'.format(output) # NED raster file watershed = '{}/delineated'.format(output) # directory for delineated files # create an instance of the delineator and supply the path to the source files delineator = NHDPlusDelineator(VAAfile, flowfile, cfile, elevfile) # longitude, latitude of the point to delineate (the delineator looks for the # closest flowline to this point) longitude = -76.6056 latitude = 38.5839 # extracts the catchments and flowlines for the gage's watershed and merge # the shapes together to make a boundary file gagewatershed = '{}/01594670'.format(output) delineator.delineate_watershed(longitude, latitude, output = gagewatershed) # make a plot of the watershed
def main(): # create an instance of the NWIS extractor nwisextractor = NWISExtractor(NWIS) # download and decompress the source metadata files nwisextractor.download_metadata() # extract all the gage stations and metadata into a shapefile for the HUC8 nwisextractor.extract_HUC8(HUC8, output) # tell the extractor to use the metadata file above to find gage data nwisextractor.set_metadata(gagefile) # create an instance of the NHDPlus extractor nhdplusextractor = NHDPlusExtractor(drainid, VPU, NHDPlus) # download and decompress the source data for the Mid Atlantic Region nhdplusextractor.download_data() # extract the HUC8 data for the Patuxent watershed nhdplusextractor.extract_HUC8(HUC8, output) # create an instance of the NHDPlusDelineator to use to build the Watershed delineator = NHDPlusDelineator(VAAfile, flowfile, catchfile, elevfile, gagefile = gagefile) # delineate the watershed (extract the flowlines, catchments and other data) delineator.delineate_gage_watershed(gageid, output = gagepath) # add land use data from 1988 to the delineator delineator.add_basin_landuse(1988, landuse) # build the watershed delineator.build_gage_watershed(gageid, watershed, masslinkplot = masslink) # make the working directory for HSPF simulation files if not os.path.isdir(hspf): os.mkdir(hspf) # import old data for Hunting Creek wdm = WDMUtil() # path to hspexp2.4 data files (modify as needed) directory = os.path.abspath(os.path.dirname(__file__)) + '/data' # the data from the export file (*.exp) provided with hspexp need to be # imported into a wdm file. WDMUtil has a method for this. hunthour = '{}/hunthour/huntobs.exp'.format(directory) f = 'temp.wdm' # import from exp to wdm wdm.import_exp(hunthour, f) # close the file and re-open the wdm for read access wdm.close(f) wdm.open(f, 'r') # the dsns are known from the exp file so just use those this time precip = wdm.get_data(f, 106) evap = wdm.get_data(f, 111) flow = wdm.get_data(f, 281) s, e = wdm.get_dates(f, 106) # add the time series to deal with HSPF looking backward stepping precip = [0] + [p * 25.4 for p in precip] evap = [e * 25.4 / 24 for e in evap for i in range(24)] wdm.close(f) # create an HSPF model instance hunting = HSPFModel() # open the watershed built above with open(watershed, 'rb') as f: w = pickle.load(f) # use the data to build an HSPFModel hunting.build_from_watershed(w, model, ifraction = 1., verbose = True) # turn on the hydrology modules to the HSPF model hunting.add_hydrology() # add precip timeseries with label BWI and provided start date to the model hunting.add_timeseries('precipitation', 'BWI', s, precip) # add evap timeseries with label Beltsville and provided start date hunting.add_timeseries('evaporation', 'Beltsville', s, evap) # add flow timeseries with label Hunting, start date, tstep (days) hunting.add_timeseries('flowgage', 'Hunting', s, flow, tstep = 60) # assign the evaporation and precipiation timeseries to the whole watershed hunting.assign_watershed_timeseries('precipitation', 'BWI') hunting.assign_watershed_timeseries('evaporation', 'Beltsville') # find the subbasin indentfier for the watershed outlet subbasin = [up for up, down in w.updown.items() if down == 0][0] # assign the flowgage to the outlet subbasin hunting.assign_subbasin_timeseries('flowgage', subbasin, 'Hunting') # using pan evaporation data, so need a pan coefficient < 1 hunting.evap_multiplier = 0.75 calibrator = AutoCalibrator(hunting, start, end, hspf) calibrator.autocalibrate(calibrated, variables = variables, optimization = optimization, perturbations = perturbations, parallel = parallel ) for variable, value in zip(calibrator.variables, calibrator.values): print('{:6s} {:5.3f}'.format(variable, value)) print('\nsaving the calibration results\n')
if not os.path.isfile(cfile + '.shp'): print(('\nerror: file {} does not exist!'.format(cfile))) print('double-check that the path is correct\n') raise if not os.path.isfile(elevfile): print(('\nerror: file {} does not exist!'.format(elevfile))) print('double-check that the path is correct\n') raise # create an instance of the delineator and supply the path to the source files delineator = NHDPlusDelineator(VAAfile, flowfile, cfile, elevfile) # longitude, latitude of the point to delineate (the delineator looks for the # closest flowline to this point) longitude = -76.6056 latitude = 38.5839 # location to place the output (put it inside the existing HUC8 directory) gageoutput = '{}/01594670'.format(output) # file name plot of the output plot = '{}/hunting_watershed'.format(gageoutput)
def preprocess(): # create an instance of the NWIS extractor nwisextractor = NWISExtractor(NWIS) # download and decompress the source metadata files nwisextractor.download_metadata() # extract all the gage stations and metadata into a shapefile for the HUC8 nwisextractor.extract_HUC8(HUC8, output) # create an instance of the NHDPlus extractor nhdplusextractor = NHDPlusExtractor(VPU, NHDPlus) # download and decompress the source data for the Mid Atlantic Region nhdplusextractor.download_data() # extract the HUC8 data for the Patuxent watershed nhdplusextractor.extract_HUC8(HUC8, output) # create an instance of the NHDPlusDelineator to use to build the Watershed delineator = NHDPlusDelineator(VAAfile, flowfile, catchfile, elevfile, gagefile=gagefile) # delineate the watershed (extract the flowlines, catchments and other data) delineator.delineate_gage_watershed(gageid, output=gagepath) # add land use data from 1988 to the delineator delineator.add_basin_landuse(1988, landuse) # build the watershed delineator.build_gage_watershed(gageid, watershed, masslinkplot=masslink) # make the working directory for HSPF simulation files if not os.path.isdir(hspf): os.mkdir(hspf) # import old data for Hunting Creek wdm = WDMUtil() # path to hspexp2.4 data files (modify as needed) directory = os.path.abspath(os.path.dirname(__file__)) + "/data" # the data from the export file (*.exp) provided with hspexp need to be # imported into a wdm file. WDMUtil has a method for this. hunthour = "calibrated/huntobs.exp".format(directory) f = "temp.wdm" # import from exp to wdm wdm.import_exp(hunthour, f) # close the file and re-open the wdm for read access wdm.close(f) wdm.open(f, "r") # the dsns are known from the exp file so just use those this time precip = wdm.get_data(f, 106) evap = wdm.get_data(f, 111) flow = wdm.get_data(f, 281) s, e = wdm.get_dates(f, 106) # add the time series to deal with HSPF looking backward stepping precip = [0] + [p * 25.4 for p in precip] evap = [e * 25.4 / 24 for e in evap for i in range(24)] wdm.close(f) # create an HSPF model instance hunting = HSPFModel() # open the watershed built above with open(watershed, "rb") as f: w = pickle.load(f) # use the data to build an HSPFModel hunting.build_from_watershed(w, model, ifraction=1.0, verbose=True) # turn on the hydrology modules to the HSPF model hunting.add_hydrology() # add precip timeseries with label BWI and provided start date to the model hunting.add_timeseries("precipitation", "BWI", s, precip) # add evap timeseries with label Beltsville and provided start date hunting.add_timeseries("evaporation", "Beltsville", s, evap) # add flow timeseries with label Hunting, start date, tstep (days) hunting.add_timeseries("flowgage", "Hunting", s, flow, tstep=60) # assign the evaporation and precipiation timeseries to the whole watershed hunting.assign_watershed_timeseries("precipitation", "BWI") hunting.assign_watershed_timeseries("evaporation", "Beltsville") # find the subbasin indentifier for the watershed outlet subbasin = [up for up, down in w.updown.items() if down == 0][0] # assign the flowgage to the outlet subbasin hunting.assign_subbasin_timeseries("flowgage", subbasin, "Hunting") # using pan evaporation data, so need a pan coefficient < 1 hunting.evap_multiplier = 0.75 with open(calibrated, "wb") as f: pickle.dump(hunting, f)
if not os.path.isfile(cfile + '.shp'): print('\nerror: file {} does not exist!'.format(cfile)) print('double-check that the path is correct\n') raise if not os.path.isfile(elevfile): print('\nerror: file {} does not exist!'.format(elevfile)) print('double-check that the path is correct\n') raise # create an instance of the delineator and supply the path to the source files delineator = NHDPlusDelineator(VAAfile, flowfile, cfile, elevfile) # longitude, latitude of the point to delineate (the delineator looks for the # closest flowline to this point) longitude = -76.6056 latitude = 38.5839 # location to place the output (put it inside the existing HUC8 directory) gageoutput = '{}/01594670'.format(output) # file name plot of the output plot = '{}/hunting_watershed'.format(gageoutput)
def extract(): """Create an extract function to call from at runtime and to turn off the extraction steps when they are done.""" # create an instance of the NWIS extractor nwisextractor = NWISExtractor(NWIS) # download and decompress the source metadata files nwisextractor.download_metadata() # extract all the gage stations and metadata into a shapefile for the HUC8 nwisextractor.extract_HUC8(HUC8, output) # tell the extractor to use the metadata file above to find gage data nwisextractor.set_metadata(gagefile) # create an instance of the NHDPlus extractor nhdplusextractor = NHDPlusExtractor(drainid, VPU, NHDPlus) # download and decompress the source data for the Mid Atlantic Region nhdplusextractor.download_data() # extract the HUC8 data for the Patuxent watershed nhdplusextractor.extract_HUC8(HUC8, output) # create an instance of the NHDPlusDelineator to use to build the Watershed delineator = NHDPlusDelineator(VAAfile, flowfile, catchfile, elevfile, gagefile=gagefile) # delineate the watershed (extract the flowlines, catchments and other data) delineator.delineate_gage_watershed(gage, output=gagepath) # download the daily flow and water quality data for the gage nwisextractor.download_gagedata(gage, estart, eend, output=gagedata) # open the NWIS flow data for the Hunting Creek gage station with open(gagedata, 'rb') as f: station = pickle.load(f) # get the time series of daily flow values for the gage flow = station.make_timeseries(estart, eend) # add land use data from 1988 to the delineator delineator.add_basin_landuse(1988, landuse) # build the watershed delineator.build_gage_watershed(gage, watershed, masslinkplot=masslink)