Пример #1
0
# path to 7zip if using windows

processor.path_to_7z = r'C:/Program Files/7-Zip/7z.exe'

# download the data; this step will be skipped if this has already been done
# (from the last example) but is necessary to set the metadata

processor.download(bbox, start, end, output, datasets=['GSOD'])

# let's use the processor to aggregate the GSOD data together, including
# tmin, tmax, dewpoint, and wind speed. The GSOD database contains dew point
# and seems to be more complete than GHCND. It doesn't have snow or pan
# evaporation though--this is why they are both included.

tmax = processor.aggregate('GSOD', 'tmax', start, end)
tmin = processor.aggregate('GSOD', 'tmin', start, end)
dewpoint = processor.aggregate('GSOD', 'dewpoint', start, end)
wind = processor.aggregate('GSOD', 'wind', start, end)

# now these time series can be saved for later consistent with the structure
# used by PyHSPF's HSPFModel class (start date, time step in minutes, data)
# this way the data are easy to access later

for n, dataset in zip(('tmax', 'tmin', 'dewpoint', 'wind'),
                      (tmax, tmin, dewpoint, wind)):

    name = '{}/GSOD_aggregated_{}'.format(output, n)
    ts = start, 1440, dataset

    # dump it in a pickled file to use later
Пример #2
0
if not os.path.isfile(filename + '.shp'):
    print('error: file {} does not exist!'.format(filename))
    raise

# make an instance of the ClimateProcessor to fetch the climate data

processor = ClimateProcessor()

# the Penman-Monteith Equation requires temperature, humidity of dewpoint,
# wind speed, and solar radiation, which can be obtained from the processor

processor.download_shapefile(filename, start, end, output, space=0.)

# let's get the daily tmin, tmax, dewpoint, and wind speed from GSOD

tmax = processor.aggregate('GSOD', 'tmax', start, end)
tmin = processor.aggregate('GSOD', 'tmin', start, end)
dewt = processor.aggregate('GSOD', 'dewpoint', start, end)
wind = processor.aggregate('GSOD', 'wind', start, end)

# let's use the hourly METSTAT data from the NSRDB for solar radiation

solar = processor.aggregate('NSRDB', 'metstat', start, end)

# since the solar data are hourly and this example uses daily ET, the time
# series has to be aggregated to an average daily value (use a different
# variable for daily and hourly)

dsolar = [
    sum(solar[i:i + 24]) / 24 for i in range(0, 24 * (end - start).days, 24)
]
Пример #3
0
# path to 7zip if using windows

processor.path_to_7z = r'C:/Program Files/7-Zip/7z.exe'

# download the data; this step will be skipped if this has already been done
# (from the last example); alternatively it will set the metadata

processor.download(bbox, start, end, output, datasets = ['precip3240'])

# aggregate the data -- it's important to keep in mind missing data at many
# of these stations and the high degree of spatial variability associated with
# precipitation. in this example, all the data are aggregating into one series;
# however, it may make sense to aggregate more specifically to capture this
# variability to some degree (lots of papers on this subject).

precip = processor.aggregate('precip3240', 'precip', start, end)

# now these time series can be saved for later consistent with the structure
# used by PyHSPF's HSPFModel class (start date, time step in minutes, data)
# this way the data are easy to access later

name = '{}/precip3240_aggregated_precip'.format(output)
ts = start, 60, precip

# dump it in a pickled file to use later

with open(name, 'wb') as f: pickle.dump(ts, f)

# change the start and end dates for plotting up some hourly results
# (just look at a few months to highlight the concept)
Пример #4
0
# iterate through the shapefile records and aggregate the timeseries

for i in range(len(sf.records())):

    record = sf.record(i)
    comid = record[comid_index]
    lon = record[lon_index]
    lat = record[lat_index]

    i = comid, lon, lat
    print('aggregating timeseries for comid {} at {}, {}\n'.format(*i))

    precipitation = processor.aggregate('precip3240',
                                        'precip',
                                        start,
                                        end,
                                        method='IDWA',
                                        longitude=lon,
                                        latitude=lat)

    mean = sum(precipitation) / (end - start).days * 365.25

    print('aggregated annual average precipitation: {:.1f} in\n'.format(mean))

    # dump the result in PyHSPF timeseries format into a pickle file for later

    ts = start, 60, precipitation

    # use the unique common identifiers for the files

    p = '{}/{}'.format(directory, comid)
Пример #5
0
lat_index   = [f[0] for f in sf.fields].index('CenY')  - 1

# iterate through the shapefile records and aggregate the timeseries

for i in range(len(sf.records())):

    record = sf.record(i)
    comid  = record[comid_index]
    lon    = record[lon_index]
    lat    = record[lat_index]

    i = comid, lon, lat
    print('aggregating timeseries for comid {} at {}, {}\n'.format(*i))

    precipitation = processor.aggregate('precip3240', 'precip', start, end,
                                        method = 'IDWA', longitude = lon,
                                        latitude = lat)

    mean = sum(precipitation) / (end - start).days * 365.25

    print('aggregated annual average precipitation: {:.1f} in\n'.format(mean))

    # dump the result in PyHSPF timeseries format into a pickle file for later

    ts = start, 60, precipitation

    # use the unique common identifiers for the files

    p = '{}/{}'.format(directory, comid)
    with open(p, 'wb') as f: pickle.dump(ts, f)
Пример #6
0
# path to 7zip if using windows

processor.path_to_7z = r'C:/Program Files/7-Zip/7z.exe'

# download the data; this step will be skipped if this has already been done
# (from the last example) but is necessary to set the metadata

processor.download(bbox, start, end, output, datasets=['GHCND'])

# let's use the processor to aggregate the GHCND data together, including
# tmin, tmax, snowdepth, and snowfall. The GHNCD extraction tool also has
# wind and pan evaporation, but the wind data are sparse and pan evaporation
# isn't needed as an input to HSPF so there is not much reason to aggregate
# those data together.

tmax = processor.aggregate('GHCND', 'tmax', start, end)
tmin = processor.aggregate('GHCND', 'tmin', start, end)
snowdepth = processor.aggregate('GHCND', 'snowdepth', start, end)
snowfall = processor.aggregate('GHCND', 'snowfall', start, end)

# now these time series can be saved for later consistent with the structure
# used by PyHSPF's HSPFModel class (start date, time step in minutes, data)
# this way the data are easy to access later

for n, dataset in zip(('tmax', 'tmin', 'snowdepth', 'snowfall'),
                      (tmax, tmin, snowdepth, snowfall)):

    name = '{}/GHCND_aggregated_{}'.format(output, n)
    ts = start, 1440, dataset

    # dump it in a pickled file to use later
Пример #7
0
end = datetime.datetime(2011, 1, 1)

# path to 7zip if using windows

processor.path_to_7z = r'C:/Program Files/7-Zip/7z.exe'

# download the data; this step will be skipped if this has already been done
# (from the last example); alternatively it will set the metadata

processor.download(bbox, start, end, output, datasets=['NSRDB'])

# aggregate the data -- worth noting that prior to 1991 is a separate database
# that corresponds more closely to METSTAT than SUNY; requesting data prior
# to 1991 will give the same values either way.

metstat = processor.aggregate('NSRDB', 'metstat', start, end)
suny = processor.aggregate('NSRDB', 'suny', start, end)

# now these time series can be saved for later consistent with the structure
# used by PyHSPF's HSPFModel class (start date, time step in minutes, data)
# this way the data are easy to access later

for n, dataset in zip(('metstat', 'suny'), (metstat, suny)):

    name = '{}/NSRDB_aggregated_{}'.format(output, n)
    ts = start, 60, dataset

    # dump it in a pickled file to use later

    with open(name, 'wb') as f:
        pickle.dump(ts, f)
Пример #8
0
if not os.path.isfile(filename + '.shp'):
    print('error: file {} does not exist!'.format(filename))
    raise

# make an instance of the ClimateProcessor to fetch the climate data

processor = ClimateProcessor()

# the Penman-Monteith Equation requires temperature, humidity of dewpoint,
# wind speed, and solar radiation, which can be obtained from the processor

processor.download_shapefile(filename, start, end, output, space = 0.)

# let's get the daily tmin, tmax, dewpoint, and wind speed from GSOD

tmax  = processor.aggregate('GSOD', 'tmax', start, end)
tmin  = processor.aggregate('GSOD', 'tmin', start, end)
dewt  = processor.aggregate('GSOD', 'dewpoint', start, end)
wind  = processor.aggregate('GSOD', 'wind', start, end)

# let's use the hourly METSTAT data from the NSRDB for solar radiation

solar = processor.aggregate('NSRDB', 'metstat', start, end)

# since the solar data are hourly and this example uses daily ET, the time
# series has to be aggregated to an average daily value

solar = [sum(solar[i:i+24]) / 24 for i in range(0, 24 * (end-start).days, 24)]

# let's parse the GHCND data and see if there are any pan evaporation 
# observations (only available from GHCND) to compare with estimated PET 
Пример #9
0
if not os.path.isfile(filename + ".shp"):
    print("error: file {} does not exist!".format(filename))
    raise

# make an instance of the ClimateProcessor to fetch the climate data

processor = ClimateProcessor()

# the Penman-Monteith Equation requires temperature, humidity of dewpoint,
# wind speed, and solar radiation, which can be obtained from the processor

processor.download_shapefile(filename, start, end, output, space=0.0)

# let's get the daily tmin, tmax, dewpoint, wind speed and solar

tmax = processor.aggregate("GSOD", "tmax", start, end)
tmin = processor.aggregate("GSOD", "tmin", start, end)
dewt = processor.aggregate("GSOD", "dewpoint", start, end)
wind = processor.aggregate("GSOD", "wind", start, end)
solar = processor.aggregate("NSRDB", "metstat", start, end)

# use the ETCalculator to estimate the evapotranspiration time series

calculator = ETCalculator()

# some of the parameters in the Penman-Monteith Equation depend on the
# geographic location so get the average longitude, latitude, and elevation

sf = Reader(filename)

# make a list of the fields for each shape
Пример #10
0
# path to 7zip if using windows

processor.path_to_7z = r'C:/Program Files/7-Zip/7z.exe'

# download the data; this step will be skipped if this has already been done
# (from the last example) but is necessary to set the metadata

processor.download(bbox, start, end, output, datasets = ['GSOD'])

# let's use the processor to aggregate the GSOD data together, including 
# tmin, tmax, dewpoint, and wind speed. The GSOD database contains dew point
# and seems to be more complete than GHCND. It doesn't have snow or pan 
# evaporation though--this is why they are both included.

tmax     = processor.aggregate('GSOD', 'tmax',     start, end)
tmin     = processor.aggregate('GSOD', 'tmin',     start, end)
dewpoint = processor.aggregate('GSOD', 'dewpoint', start, end)
wind     = processor.aggregate('GSOD', 'wind',     start, end)

# now these time series can be saved for later consistent with the structure
# used by PyHSPF's HSPFModel class (start date, time step in minutes, data) 
# this way the data are easy to access later

for n, dataset in zip(('tmax', 'tmin', 'dewpoint', 'wind'), 
                      (tmax, tmin, dewpoint, wind)):

    name = '{}/GSOD_aggregated_{}'.format(output, n)
    ts = start, 1440, dataset

    # dump it in a pickled file to use later
Пример #11
0
# path to 7zip if using windows

processor.path_to_7z = r'C:/Program Files/7-Zip/7z.exe'

# download the data; this step will be skipped if this has already been done
# (from the last example) but is necessary to set the metadata

processor.download(bbox, start, end, output, datasets = ['GHCND'])

# let's use the processor to aggregate the GHCND data together, including 
# tmin, tmax, snowdepth, and snowfall. The GHNCD extraction tool also has 
# wind and pan evaporation, but the wind data are sparse and pan evaporation
# isn't needed as an input to HSPF so there is not much reason to aggregate 
# those data together.

tmax      = processor.aggregate('GHCND', 'tmax',      start, end)
tmin      = processor.aggregate('GHCND', 'tmin',      start, end)
snowdepth = processor.aggregate('GHCND', 'snowdepth', start, end)
snowfall  = processor.aggregate('GHCND', 'snowfall',  start, end)

# now these time series can be saved for later consistent with the structure
# used by PyHSPF's HSPFModel class (start date, time step in minutes, data) 
# this way the data are easy to access later

for n, dataset in zip(('tmax', 'tmin', 'snowdepth', 'snowfall'), 
                      (tmax, tmin, snowdepth, snowfall)):

    name = '{}/GHCND_aggregated_{}'.format(output, n)
    ts = start, 1440, dataset

    # dump it in a pickled file to use later
Пример #12
0
end   = datetime.datetime(2011, 1, 1)

# path to 7zip if using windows

processor.path_to_7z = r'C:/Program Files/7-Zip/7z.exe'

# download the data; this step will be skipped if this has already been done
# (from the last example); alternatively it will set the metadata

processor.download(bbox, start, end, output, datasets = ['NSRDB'])

# aggregate the data -- worth noting that prior to 1991 is a separate database
# that corresponds more closely to METSTAT than SUNY; requesting data prior
# to 1991 will give the same values either way.

metstat = processor.aggregate('NSRDB', 'metstat', start, end)
suny    = processor.aggregate('NSRDB', 'suny',    start, end)

# now these time series can be saved for later consistent with the structure
# used by PyHSPF's HSPFModel class (start date, time step in minutes, data) 
# this way the data are easy to access later

for n, dataset in zip(('metstat','suny'), (metstat, suny)):

    name = '{}/NSRDB_aggregated_{}'.format(output, n)
    ts = start, 60, dataset

    # dump it in a pickled file to use later

    with open(name, 'wb') as f: pickle.dump(ts, f)