Example #1
0
# David J. Lampert ([email protected])
#
# last updated: 02/21/2015
#
# illustrates how to use the ClimateProcessor class to aggregate climate time
# series from the GSOD database (very similar to the last example)

import os, datetime, pickle

# import the ClimateProcessor

from pyhspf.preprocessing import ClimateProcessor

# create an instance of the ClimateProcessor class

processor = ClimateProcessor()

# working directory location for all the data files

output = 'HSPF_data'

if not os.path.isdir(output): os.mkdir(output)

# this is the bounding box of interest from the last example

bbox = -77.2056, 38.2666, -76.4008, 39.3539

# start and end dates (just look at a few months to highlight the concept)

start = datetime.datetime(1980, 1, 1)
end = datetime.datetime(1990, 1, 2)
Example #2
0
# David J. Lampert ([email protected])
#
# last updated: 02/21/2015
#
# illustrates how to use the ClimateProcessor class to aggregate climate time
# series from the hourly precipitation database (similar to the last example)

import os, datetime, pickle

# import the ClimateProcessor

from pyhspf.preprocessing import ClimateProcessor

# create an instance of the ClimateProcessor class

processor = ClimateProcessor()

# working directory location for all the data files

output = 'HSPF_data'  

if not os.path.isdir(output): os.mkdir(output)

# this is the bounding box of interest from the last example

bbox = -77.2056, 38.2666, -76.4008, 39.3539

# start and end dates (aggregate the whole 31 years)

start = datetime.datetime(1980, 1, 1)
end   = datetime.datetime(2011, 1, 1)
Example #3
0
# last updated: 02/15/2015
#
# illustrates how to use the ClimateProcessor class to download data from
# various online databases and then aggregate the time series using some 
# numpy features. The first part is the essentially the same as the previous 
# example, so there is minimal discussion.

import os, datetime, pickle

# import the ClimateProcessor

from pyhspf.preprocessing import ClimateProcessor

# create an instance of the ClimateProcessor class

processor = ClimateProcessor()

# working directory location for all the data files

output = 'HSPF_data'  

if not os.path.isdir(output): os.mkdir(output)

# this is the bounding box of interest from the last example

bbox = -77.2056, 38.2666, -76.4008, 39.3539

# start and end dates (just look at a few months to highlight the concept)

start = datetime.datetime(1980, 4, 1)
end   = datetime.datetime(1980, 10, 2)
Example #4
0
# start and end dates for data download

start = datetime.datetime(1988, 6, 1)
end = datetime.datetime(1988, 7, 1)

# use the "subbasin_catchments" shapefile to define the data processing area

filename = 'subbasin_catchments'

if not os.path.isfile(filename + '.shp'):
    print('error: file {} does not exist!'.format(filename))
    raise

# make an instance of the ClimateProcessor to fetch the climate data

processor = ClimateProcessor()

# the Penman-Monteith Equation requires temperature, humidity of dewpoint,
# wind speed, and solar radiation, which can be obtained from the processor

processor.download_shapefile(filename, start, end, output, space=0.)

# let's get the daily tmin, tmax, dewpoint, and wind speed from GSOD

tmax = processor.aggregate('GSOD', 'tmax', start, end)
tmin = processor.aggregate('GSOD', 'tmin', start, end)
dewt = processor.aggregate('GSOD', 'dewpoint', start, end)
wind = processor.aggregate('GSOD', 'wind', start, end)

# let's use the hourly METSTAT data from the NSRDB for solar radiation
Example #5
0
# last updated: 02/15/2015
#
# illustrates how to use the ClimateProcessor class to download data from
# various online databases and then aggregate the time series using some
# numpy features. The first part is the essentially the same as the previous
# example, so there is minimal discussion.

import os, datetime, pickle

# import the ClimateProcessor

from pyhspf.preprocessing import ClimateProcessor

# create an instance of the ClimateProcessor class

processor = ClimateProcessor()

# working directory location for all the data files

output = 'HSPF_data'

if not os.path.isdir(output): os.mkdir(output)

# this is the bounding box of interest from the last example

bbox = -77.2056, 38.2666, -76.4008, 39.3539

# start and end dates (just look at a few months to highlight the concept)

start = datetime.datetime(1980, 4, 1)
end = datetime.datetime(1980, 10, 2)
Example #6
0
# import the ClimateProcessor and PyShp reader

from pyhspf.preprocessing import ClimateProcessor
from shapefile import Reader

# path to existing shapefile defining the data region and processing information

filename = 'subbasin_catchments'

if not os.path.isfile(filename + '.shp'):
    print(('error, {} does not exist!'.format(filename)))
    raise

# create an instance of the ClimateProcessor class

processor = ClimateProcessor()

# working directory location for all the data files

output = 'HSPF_data'

if not os.path.isdir(output): os.mkdir(output)

# working directory for aggregated precipitation files

directory = '{}/subbasinprecipitation'.format(output)

if not os.path.isdir(directory): os.mkdir(directory)

# start and end dates (aggregate the whole 31 years)
Example #7
0
# import the ClimateProcessor and PyShp reader

from pyhspf.preprocessing import ClimateProcessor
from shapefile            import Reader

# path to existing shapefile defining the data region and processing information

filename = 'subbasin_catchments'

if not os.path.isfile(filename + '.shp'):
    print('error, {} does not exist!'.format(filename))
    raise

# create an instance of the ClimateProcessor class

processor = ClimateProcessor()

# working directory location for all the data files

output = 'HSPF_data'

if not os.path.isdir(output): os.mkdir(output)

# working directory for aggregated precipitation files

directory = '{}/subbasinprecipitation'.format(output)

if not os.path.isdir(directory): os.mkdir(directory)

# start and end dates (aggregate the whole 31 years)
Example #8
0
# David J. Lampert ([email protected])
#
# last updated: 02/21/2015
#
# illustrates how to use the ClimateProcessor class to aggregate climate time
# series from the NSRDB database (very similar to the last example)

import os, datetime, pickle

# import the ClimateProcessor

from pyhspf.preprocessing import ClimateProcessor

# create an instance of the ClimateProcessor class

processor = ClimateProcessor()

# working directory location for all the data files

output = 'HSPF_data'

if not os.path.isdir(output): os.mkdir(output)

# this is the bounding box of interest from the last example

bbox = -77.2056, 38.2666, -76.4008, 39.3539

# start and end dates (aggregate the whole 31 years)

start = datetime.datetime(1980, 1, 1)
end = datetime.datetime(2011, 1, 1)
Example #9
0
# import the ClimateProcessor and PyShp reader

from pyhspf.preprocessing import ClimateProcessor
from shapefile import Reader

# path to existing shapefile defining the data region and processing information

filename = 'subbasin_catchments'

if not os.path.isfile(filename + '.shp'):
    print('error, {} does not exist!'.format(filename))
    raise

# create an instance of the ClimateProcessor class

processor = ClimateProcessor()

# working directory location for all the data files

output = 'HSPF_data'

if not os.path.isdir(output): os.mkdir(output)

# working directory for aggregated precipitation files

directory = '{}/subbasinprecipitation'.format(output)

if not os.path.isdir(directory): os.mkdir(directory)

# start and end dates (aggregate the whole 31 years)
Example #10
0
# start and end dates

start = datetime.datetime(1980, 1, 1)
end   = datetime.datetime(2010, 1, 1)

# use the "subbasin_catchments" shapefile to define the data processing area

filename = 'subbasin_catchments'

if not os.path.isfile(filename + '.shp'):
    print('error: file {} does not exist!'.format(filename))
    raise

# make an instance of the ClimateProcessor to fetch the climate data

processor = ClimateProcessor()

# the Penman-Monteith Equation requires temperature, humidity of dewpoint,
# wind speed, and solar radiation, which can be obtained from the processor

processor.download_shapefile(filename, start, end, output, space = 0.)

# let's get the daily tmin, tmax, dewpoint, and wind speed from GSOD

tmax  = processor.aggregate('GSOD', 'tmax', start, end)
tmin  = processor.aggregate('GSOD', 'tmin', start, end)
dewt  = processor.aggregate('GSOD', 'dewpoint', start, end)
wind  = processor.aggregate('GSOD', 'wind', start, end)

# let's use the hourly METSTAT data from the NSRDB for solar radiation
Example #11
0
# David J. Lampert ([email protected])
#
# last updated: 02/21/2015
#
# illustrates how to use the ClimateProcessor class to aggregate climate time
# series (using the same data as the previous example to minimize discussion).

import os, datetime, pickle

# import the ClimateProcessor

from pyhspf.preprocessing import ClimateProcessor

# create an instance of the ClimateProcessor class

processor = ClimateProcessor()

# working directory location for all the data files

output = 'HSPF_data'

if not os.path.isdir(output): os.mkdir(output)

# this is the bounding box of interest from the last example

bbox = -77.2056, 38.2666, -76.4008, 39.3539

# start and end dates (just look at a few months to highlight the concept)

start = datetime.datetime(1980, 1, 1)
end = datetime.datetime(1990, 1, 2)
Example #12
0
# start and end dates for data download

start = datetime.datetime(2000, 1, 1)
end = datetime.datetime(2005, 1, 1)

# use the "subbasin_catchments" shapefile to define the data processing area

filename = "subbasin_catchments"

if not os.path.isfile(filename + ".shp"):
    print("error: file {} does not exist!".format(filename))
    raise

# make an instance of the ClimateProcessor to fetch the climate data

processor = ClimateProcessor()

# the Penman-Monteith Equation requires temperature, humidity of dewpoint,
# wind speed, and solar radiation, which can be obtained from the processor

processor.download_shapefile(filename, start, end, output, space=0.0)

# let's get the daily tmin, tmax, dewpoint, wind speed and solar

tmax = processor.aggregate("GSOD", "tmax", start, end)
tmin = processor.aggregate("GSOD", "tmin", start, end)
dewt = processor.aggregate("GSOD", "dewpoint", start, end)
wind = processor.aggregate("GSOD", "wind", start, end)
solar = processor.aggregate("NSRDB", "metstat", start, end)

# use the ETCalculator to estimate the evapotranspiration time series
Example #13
0
# David J. Lampert ([email protected])
#
# last updated: 02/21/2015
#
# illustrates how to use the ClimateProcessor class to aggregate climate time 
# series from the GSOD database (very similar to the last example)

import os, datetime, pickle

# import the ClimateProcessor

from pyhspf.preprocessing import ClimateProcessor

# create an instance of the ClimateProcessor class

processor = ClimateProcessor()

# working directory location for all the data files

output = 'HSPF_data'  

if not os.path.isdir(output): os.mkdir(output)

# this is the bounding box of interest from the last example

bbox = -77.2056, 38.2666, -76.4008, 39.3539

# start and end dates (just look at a few months to highlight the concept)

start = datetime.datetime(1980, 1, 1)
end   = datetime.datetime(1990, 1, 2)
Example #14
0
# David J. Lampert ([email protected])
#
# last updated: 02/21/2015
#
# illustrates how to use the ClimateProcessor class to aggregate climate time 
# series (using the same data as the previous example to minimize discussion).

import os, datetime, pickle

# import the ClimateProcessor

from pyhspf.preprocessing import ClimateProcessor

# create an instance of the ClimateProcessor class

processor = ClimateProcessor()

# working directory location for all the data files

output = 'HSPF_data'  

if not os.path.isdir(output): os.mkdir(output)

# this is the bounding box of interest from the last example

bbox = -77.2056, 38.2666, -76.4008, 39.3539

# start and end dates (just look at a few months to highlight the concept)

start = datetime.datetime(1980, 1, 1)
end   = datetime.datetime(1990, 1, 2)
Example #15
0
# David J. Lampert ([email protected])
#
# last updated: 02/21/2015
#
# illustrates how to use the ClimateProcessor class to aggregate climate time 
# series from the NSRDB database (very similar to the last example)

import os, datetime, pickle

# import the ClimateProcessor

from pyhspf.preprocessing import ClimateProcessor

# create an instance of the ClimateProcessor class

processor = ClimateProcessor()

# working directory location for all the data files

output = 'HSPF_data'  

if not os.path.isdir(output): os.mkdir(output)

# this is the bounding box of interest from the last example

bbox = -77.2056, 38.2666, -76.4008, 39.3539

# start and end dates (aggregate the whole 31 years)

start = datetime.datetime(1980, 1, 1)
end   = datetime.datetime(2011, 1, 1)
Example #16
0
# import the ClimateProcessor and PyShp reader

from pyhspf.preprocessing import ClimateProcessor
from shapefile            import Reader

# path to existing shapefile defining the data region and processing information

filename = 'subbasin_catchments'

if not os.path.isfile(filename + '.shp'):
    print('error, {} does not exist!'.format(filename))
    raise

# create an instance of the ClimateProcessor class

processor = ClimateProcessor()

# working directory location for all the data files

output = 'HSPF_data'  

if not os.path.isdir(output): os.mkdir(output)

# working directory for aggregated precipitation files

directory = '{}/subbasinprecipitation'.format(output)

if not os.path.isdir(directory): os.mkdir(directory)

# start and end dates (aggregate the whole 31 years)