Esempio n. 1
0
 def test_ensemble_name(self):
     self.ensemble_dataset_name = "Dataset Ensemble"
     self.datasets = []
     self.datasets.append(build_ten_cube_dataset(1))
     self.datasets.append(build_ten_cube_dataset(2))
     self.ensemble = dp.ensemble(self.datasets)
     self.assertEquals(self.ensemble.name, self.ensemble_dataset_name)
Esempio n. 2
0
 def test_ensemble_name(self):
     self.ensemble_dataset_name = "Dataset Ensemble"
     self.datasets = []
     self.datasets.append(build_ten_cube_dataset(1))
     self.datasets.append(build_ten_cube_dataset(2))
     self.ensemble = dp.ensemble(self.datasets)
     self.assertEquals(self.ensemble.name, self.ensemble_dataset_name)
Esempio n. 3
0
def check_some_dsp_functions(dataset):
    '''
    Run a subset of dataset processor functions and check for
    any kind of exception.
    '''
    try:
        dsp.temporal_rebin(dataset, 'annual')
        dsp.ensemble([dataset])
    except Exception as e:
        fail("\nDataset processor functions")
        print "Following error occured:"
        print str(e)
        end()
    finally:
        os.remove(dataset.origin['path'])
    success("\nDataset processor functions")
Esempio n. 4
0
def check_some_dsp_functions(dataset):
    '''
    Run a subset of dataset processor functions and check for
    any kind of exception.
    '''
    try:
        dsp.temporal_rebin(dataset, 'annual')
        dsp.ensemble([dataset])
    except Exception as e:
        fail("\nDataset processor functions")
        print("Following error occured:")
        print(str(e))
        end()
    finally:
        os.remove(dataset.origin['path'])
    success("\nDataset processor functions")
Esempio n. 5
0
 def test_ensemble_logic(self):
     self.datasets = []
     self.datasets.append(build_ten_cube_dataset(1))
     self.datasets.append(build_ten_cube_dataset(2))
     self.three = build_ten_cube_dataset(3)
     self.datasets.append(self.three)
     self.datasets.append(build_ten_cube_dataset(4))
     self.datasets.append(build_ten_cube_dataset(5))
     self.ensemble = dp.ensemble(self.datasets)
     self.ensemble_flat = self.ensemble.values.flatten()
     self.three_flat = self.three.values.flatten()
     np.testing.assert_array_equal(self.ensemble_flat, self.three_flat)
Esempio n. 6
0
 def test_ensemble_logic(self):
     self.datasets = []
     self.datasets.append(build_ten_cube_dataset(1))
     self.datasets.append(build_ten_cube_dataset(2))
     self.three = build_ten_cube_dataset(3)
     self.datasets.append(self.three)
     self.datasets.append(build_ten_cube_dataset(4))
     self.datasets.append(build_ten_cube_dataset(5))
     self.ensemble = dp.ensemble(self.datasets)
     self.ensemble_flat = self.ensemble.values.flatten()
     self.three_flat = self.three.values.flatten()
     np.testing.assert_array_equal(self.ensemble_flat, self.three_flat)
# Subset our model datasets so they are the same size
knmi_dataset = dsp.subset(knmi_dataset, new_bounds)
wrf311_dataset = dsp.subset(wrf311_dataset, new_bounds)

""" Spatially Regrid the Dataset Objects to a 1/2 degree grid """
# Using the bounds we will create a new set of lats and lons on 1/2 degree step
new_lons = np.arange(min_lon, max_lon, 0.5)
new_lats = np.arange(min_lat, max_lat, 0.5)
 
# Spatially regrid datasets using the new_lats, new_lons numpy arrays
knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons)
wrf311_dataset = dsp.spatial_regrid(wrf311_dataset, new_lats, new_lons)
cru31_dataset = dsp.spatial_regrid(cru31_dataset, new_lats, new_lons)

# Generate an ensemble dataset from knmi and wrf models
ensemble_dataset = dsp.ensemble([knmi_dataset, wrf311_dataset])

""" Step 4:  Build a Metric to use for Evaluation - Bias for this example """
print("Setting up a Bias metric to use for evaluation")
bias = metrics.Bias()

""" Step 5: Create an Evaluation Object using Datasets and our Metric """
# The Evaluation Class Signature is:
# Evaluation(reference, targets, metrics, subregions=None)
# Evaluation can take in multiple targets and metrics, so we need to convert
# our examples into Python lists.  Evaluation will iterate over the lists
print("Making the Evaluation definition")
bias_evaluation = evaluation.Evaluation(cru31_dataset, 
                      [knmi_dataset, wrf311_dataset, ensemble_dataset],
                      [bias])
print("Executing the Evaluation using the object's run() method")
Esempio n. 8
0
                                           new_lat,
                                           new_lon,
                                           boundary_check=boundary_check)
    print model_names[i] + ' has been regridded'
print 'Propagating missing data information'
obs_dataset = dsp.mask_missing_data([obs_dataset] + model_datasets)[0]
model_datasets = dsp.mask_missing_data([obs_dataset] + model_datasets)[1:]
""" Step 5: Checking and converting variable units """
print 'Checking and converting variable units'
obs_dataset = dsp.variable_unit_conversion(obs_dataset)
for idata, dataset in enumerate(model_datasets):
    model_datasets[idata] = dsp.variable_unit_conversion(dataset)

print 'Generating multi-model ensemble'
if len(model_datasets) >= 2.:
    model_datasets.append(dsp.ensemble(model_datasets))
    model_names.append('ENS')
""" Step 6: Generate subregion average and standard deviation """
if config['use_subregions']:
    # sort the subregion by region names and make a list
    subregions = sorted(config['subregions'].items(),
                        key=operator.itemgetter(0))

    # number of subregions
    nsubregion = len(subregions)

    print('Calculating spatial averages and standard deviations of ',
          str(nsubregion), ' subregions')

    obs_subregion_mean, obs_subregion_std, subregion_array = (
        utils.calc_subregion_area_mean_and_std([obs_dataset], subregions))
Esempio n. 9
0
 def test_unequal_dataset_shapes(self):
     self.ten_year_dataset = ten_year_monthly_dataset()
     self.two_year_dataset = two_year_daily_dataset()
     with self.assertRaises(ValueError):
         self.ensemble_dataset = dp.ensemble(
             [self.ten_year_dataset, self.two_year_dataset])
Esempio n. 10
0
    model_datasets[idata] = dsp.spatial_regrid(dataset, new_lat, new_lon, boundary_check = boundary_check_model)
    print model_names[idata]+' has been regridded'
print 'Propagating missing data information'
ref_dataset = dsp.mask_missing_data([ref_dataset]+model_datasets)[0]
model_datasets = dsp.mask_missing_data([ref_dataset]+model_datasets)[1:]

""" Step 5: Checking and converting variable units """
print 'Checking and converting variable units'
ref_dataset = dsp.variable_unit_conversion(ref_dataset)
for idata,dataset in enumerate(model_datasets):
    model_datasets[idata] = dsp.variable_unit_conversion(dataset)
    

print 'Generating multi-model ensemble'
if len(model_datasets) >= 2.:
    model_datasets.append(dsp.ensemble(model_datasets))
    model_names.append('ENS')

""" Step 6: Generate subregion average and standard deviation """
if config['use_subregions']:
    # sort the subregion by region names and make a list
    subregions= sorted(config['subregions'].items(),key=operator.itemgetter(0))

    # number of subregions
    nsubregion = len(subregions)

    print 'Calculating spatial averages and standard deviations of ',str(nsubregion),' subregions'

    ref_subregion_mean, ref_subregion_std, subregion_array = utils.calc_subregion_area_mean_and_std([ref_dataset], subregions) 
    model_subregion_mean, model_subregion_std, subregion_array = utils.calc_subregion_area_mean_and_std(model_datasets, subregions) 
Esempio n. 11
0
CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)

for member, each_target_dataset in enumerate(target_datasets):
    target_datasets[member] = dsp.spatial_regrid(target_datasets[member],
                                                 new_lats, new_lons)

# find the total annual mean. Note the function exists in util.py as def
# calc_climatology_year(dataset):
_, CRU31.values = utils.calc_climatology_year(CRU31)

for member, each_target_dataset in enumerate(target_datasets):
    _, target_datasets[member].values = utils.calc_climatology_year(
        target_datasets[member])

# make the model ensemble
target_datasets_ensemble = dsp.ensemble(target_datasets)
target_datasets_ensemble.name = "ENS"

# append to the target_datasets for final analysis
target_datasets.append(target_datasets_ensemble)

for target in target_datasets:
    allNames.append(target.name)

list_of_regions = [
    Bounds(-10.0, 0.0, 29.0, 36.5),
    Bounds(0.0, 10.0, 29.0, 37.5),
    Bounds(10.0, 20.0, 25.0, 32.5),
    Bounds(20.0, 33.0, 25.0, 32.5),
    Bounds(-19.3, -10.2, 12.0, 20.0),
    Bounds(15.0, 30.0, 15.0, 25.0),
Esempio n. 12
0
CRU31 = dsp.spatial_regrid(CRU31, new_lats, new_lons)


for member, each_target_dataset in enumerate(target_datasets):
    target_datasets[member] = dsp.spatial_regrid(
        target_datasets[member], new_lats, new_lons)

# find climatology monthly for obs and models
CRU31.values, CRU31.times = utils.calc_climatology_monthly(CRU31)

for member, each_target_dataset in enumerate(target_datasets):
    target_datasets[member].values, target_datasets[
        member].times = utils.calc_climatology_monthly(target_datasets[member])

# make the model ensemble
target_datasets_ensemble = dsp.ensemble(target_datasets)
target_datasets_ensemble.name = "ENS"

# append to the target_datasets for final analysis
target_datasets.append(target_datasets_ensemble)

""" Step 4: Subregion stuff """
list_of_regions = [
    Bounds(-10.0, 0.0, 29.0, 36.5),
    Bounds(0.0, 10.0,  29.0, 37.5),
    Bounds(10.0, 20.0, 25.0, 32.5),
    Bounds(20.0, 33.0, 25.0, 32.5),
    Bounds(-19.3, -10.2, 12.0, 20.0),
    Bounds(15.0, 30.0, 15.0, 25.0),
    Bounds(-10.0, 10.0, 7.3, 15.0),
    Bounds(-10.9, 10.0, 5.0, 7.3),
Esempio n. 13
0
 def test_unequal_dataset_shapes(self):
     self.ten_year_dataset = ten_year_monthly_dataset()
     self.two_year_dataset = two_year_daily_dataset()
     with self.assertRaises(ValueError):
         self.ensemble_dataset = dp.ensemble(
             [self.ten_year_dataset, self.two_year_dataset])
Esempio n. 14
0
                                           boundary_check=boundary_check)
    print('{} has been regridded'.format(target_names[i]))
print('Propagating missing data information')
datasets = dsp.mask_missing_data([reference_dataset]+target_datasets)
reference_dataset = datasets[0]
target_datasets = datasets[1:]

""" Step 4: Checking and converting variable units """
print('Checking and converting variable units')
reference_dataset = dsp.variable_unit_conversion(reference_dataset)
for i, dataset in enumerate(target_datasets):
    target_datasets[i] = dsp.variable_unit_conversion(dataset)

print('Generating multi-model ensemble')
if len(target_datasets) >= 2.:
    target_datasets.append(dsp.ensemble(target_datasets))
    target_names.append('ENS')

""" Step 5: Generate subregion average and standard deviation """
if config['use_subregions']:
    # sort the subregion by region names and make a list
    subregions= sorted(config['subregions'].items(),key=operator.itemgetter(0))

    # number of subregions
    nsubregion = len(subregions)

    print('Calculating spatial averages and standard deviations of {} subregions'
          .format(nsubregion))

    reference_subregion_mean, reference_subregion_std, subregion_array = (
        utils.calc_subregion_area_mean_and_std([reference_dataset], subregions))
Esempio n. 15
0
                                            new_lon,
                                            boundary_check=boundary_check)
    print('{} has been regridded'.format(target_names[i]))
print('Propagating missing data information')
datasets = dsp.mask_missing_data([reference_dataset] + target_datasets)
reference_dataset = datasets[0]
target_datasets = datasets[1:]
""" Step 4: Checking and converting variable units """
print('Checking and converting variable units')
reference_dataset = dsp.variable_unit_conversion(reference_dataset)
for i, dataset in enumerate(target_datasets):
    target_datasets[i] = dsp.variable_unit_conversion(dataset)

print('Generating multi-model ensemble')
if len(target_datasets) >= 2.:
    target_datasets.append(dsp.ensemble(target_datasets))
    target_names.append('ENS')
""" Step 5: Generate subregion average and standard deviation """
if config['use_subregions']:
    # sort the subregion by region names and make a list
    subregions = sorted(config['subregions'].items(),
                        key=operator.itemgetter(0))

    # number of subregions
    nsubregion = len(subregions)

    print(
        'Calculating spatial averages and standard deviations of {} subregions'
        .format(nsubregion))

    reference_subregion_mean, reference_subregion_std, subregion_array = (
Esempio n. 16
0
# Subset our model datasets so they are the same size
knmi_dataset = dsp.subset(knmi_dataset, new_bounds)
wrf311_dataset = dsp.subset(wrf311_dataset, new_bounds)

# Spatially Regrid the Dataset Objects to a 1/2 degree grid.
# Using the bounds we will create a new set of lats and lons on 1/2 degree step
new_lons = np.arange(min_lon, max_lon, 0.5)
new_lats = np.arange(min_lat, max_lat, 0.5)

# Spatially regrid datasets using the new_lats, new_lons numpy arrays
knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons)
wrf311_dataset = dsp.spatial_regrid(wrf311_dataset, new_lats, new_lons)
cru31_dataset = dsp.spatial_regrid(cru31_dataset, new_lats, new_lons)

# Generate an ensemble dataset from knmi and wrf models
ensemble_dataset = dsp.ensemble([knmi_dataset, wrf311_dataset])

# Step 4:  Build a Metric to use for Evaluation - Bias for this example.
print("Setting up a Bias metric to use for evaluation")
bias = metrics.Bias()

# Step 5: Create an Evaluation Object using Datasets and our Metric.
# The Evaluation Class Signature is:
# Evaluation(reference, targets, metrics, subregions=None)
# Evaluation can take in multiple targets and metrics, so we need to convert
# our examples into Python lists.  Evaluation will iterate over the lists
print("Making the Evaluation definition")
bias_evaluation =\
    evaluation.Evaluation(cru31_dataset, [knmi_dataset, wrf311_dataset, ensemble_dataset], [bias])

print("Executing the Evaluation using the object's run() method")