# use the "subbasin_catchments" shapefile to define the data processing area filename = 'subbasin_catchments' if not os.path.isfile(filename + '.shp'): print('error: file {} does not exist!'.format(filename)) raise # make an instance of the ClimateProcessor to fetch the climate data processor = ClimateProcessor() # the Penman-Monteith Equation requires temperature, humidity of dewpoint, # wind speed, and solar radiation, which can be obtained from the processor processor.download_shapefile(filename, start, end, output, space=0.) # let's get the daily tmin, tmax, dewpoint, and wind speed from GSOD tmax = processor.aggregate('GSOD', 'tmax', start, end) tmin = processor.aggregate('GSOD', 'tmin', start, end) dewt = processor.aggregate('GSOD', 'dewpoint', start, end) wind = processor.aggregate('GSOD', 'wind', start, end) # let's use the hourly METSTAT data from the NSRDB for solar radiation solar = processor.aggregate('NSRDB', 'metstat', start, end) # since the solar data are hourly and this example uses daily ET, the time # series has to be aggregated to an average daily value (use a different # variable for daily and hourly)
# start and end dates (aggregate the whole 31 years) start = datetime.datetime(1980, 1, 1) end = datetime.datetime(2011, 1, 1) # the space argument increases the bounding box area for the data download and # processing; let's use a larger area for this example to grab data from a # few more stations space = 0.5 # download/set the location of the data using the "download_shapefile" method processor.download_shapefile(filename, start, end, output, datasets=['precip3240'], space=0.5) # open up and package the time series and locations for processing later names, lons, lats = [], [], [] # make an empty numpy array for the data precipitations = numpy.empty( (len(processor.metadata.precip3240stations), (end - start).days * 24)) i = 0 for k, v in list(processor.metadata.precip3240stations.items()):
if not os.path.isdir(directory): os.mkdir(directory) # start and end dates (aggregate the whole 31 years) start = datetime.datetime(1980, 1, 1) end = datetime.datetime(2011, 1, 1) # the space argument increases the bounding box area for the data download and # processing; let's use a larger area for this example to grab data from a # few more stations space = 0.5 # download/set the location of the data using the "download_shapefile" method processor.download_shapefile(filename, start, end, output, datasets = ['precip3240'], space = 0.5) # the ClimateProcessor's aggregate method can be used with inverse-distance # weighted average (IDWA) to interpolate between the stations at a given point # using the "method," "latitude," and "longitude" keyword arguments. the # result is the same as the previous example. as before, the subbasin_catchments # shapefile will be used that contains the centroid for each aggregation. sf = Reader(filename) # index of the comid, latitude, and longitude records comid_index = [f[0] for f in sf.fields].index('ComID') - 1 lon_index = [f[0] for f in sf.fields].index('CenX') - 1 lat_index = [f[0] for f in sf.fields].index('CenY') - 1
# use the "subbasin_catchments" shapefile to define the data processing area filename = 'subbasin_catchments' if not os.path.isfile(filename + '.shp'): print('error: file {} does not exist!'.format(filename)) raise # make an instance of the ClimateProcessor to fetch the climate data processor = ClimateProcessor() # the Penman-Monteith Equation requires temperature, humidity of dewpoint, # wind speed, and solar radiation, which can be obtained from the processor processor.download_shapefile(filename, start, end, output, space = 0.) # let's get the daily tmin, tmax, dewpoint, and wind speed from GSOD tmax = processor.aggregate('GSOD', 'tmax', start, end) tmin = processor.aggregate('GSOD', 'tmin', start, end) dewt = processor.aggregate('GSOD', 'dewpoint', start, end) wind = processor.aggregate('GSOD', 'wind', start, end) # let's use the hourly METSTAT data from the NSRDB for solar radiation solar = processor.aggregate('NSRDB', 'metstat', start, end) # since the solar data are hourly and this example uses daily ET, the time # series has to be aggregated to an average daily value