コード例 #1
0
ファイル: functions.py プロジェクト: BrianWilson1/climate
def computeMetrics(datasets, metricNames=['Bias'], subregions=None):
    '''Compute one or more metrics comparing multiple target datasets to a reference dataset.
This routine assumes that the datasets have already been regridded so that there grid dimensions
are identical.
    '''
    metrics = lookupMetrics(metricNames)
    if len(metrics) != len(metricNames):
        print >> sys.stderr, 'computeMetrics: Error, Illegal or misspelled metric name.'
    eval = evaluation.Evaluation(datasets[0], datasets[1:], metrics)
    print >> sys.stderr, 'computeMetrics: Evaluating metrics %s . . .' % str(
        metricNames)
    eval.run()
    return eval.results
コード例 #2
0
new_lats = numpy.arange(min_lat, max_lat, 1)

# Spatially regrid datasets using the new_lats, new_lons numpy arrays
knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons)
wrf_dataset = dsp.spatial_regrid(wrf_dataset, new_lats, new_lons)

# Load the metrics that we want to use for the evaluation.
##########################################################################
taylor_diagram = metrics.SpatialPatternTaylorDiagram()

# Create our new evaluation object. The knmi dataset is the evaluations
# reference dataset. We then provide a list of 1 or more target datasets
# to use for the evaluation. In this case, we only want to use the wrf dataset.
# Then we pass a list of all the metrics that we want to use in the evaluation.
##########################################################################
test_evaluation = evaluation.Evaluation(knmi_dataset, [wrf_dataset],
                                        [taylor_diagram])
test_evaluation.run()

# Pull our the evaluation results and prepare them for drawing a Taylor diagram.
##########################################################################
taylor_data = test_evaluation.results[0]

# Draw our taylor diagram!
##########################################################################
plotter.draw_taylor_diagram(taylor_data, [wrf_dataset.name],
                            knmi_dataset.name,
                            fname='taylor_plot',
                            fmt='png',
                            frameon=False)
コード例 #3
0
# Acessing latittudes and longitudes of netCDF file
lats = knmi_dataset.lats
lons = knmi_dataset.lons
""" Step 2:  Build a Metric to use for Evaluation - Temporal STD for this example """
# You can build your own metrics, but OCW also ships with some common metrics
print "Setting up a Temporal STD metric to use for evaluation"
std = metrics.TemporalStdDev()
""" Step 3: Create an Evaluation Object using Datasets and our Metric """
# The Evaluation Class Signature is:
# Evaluation(reference, targets, metrics, subregions=None)
# Evaluation can take in multiple targets and metrics, so we need to convert
# our examples into Python lists.  Evaluation will iterate over the lists
print "Making the Evaluation definition"
# Temporal STD Metric gets one target dataset then reference dataset should be None
std_evaluation = evaluation.Evaluation(None, [knmi_dataset], [std])
print "Executing the Evaluation using the object's run() method"
std_evaluation.run()
""" Step 4: Make a Plot from the Evaluation.results """
# The Evaluation.results are a set of nested lists to support many different
# possible Evaluation scenarios.
#
# The Evaluation results docs say:
# The shape of results is (num_metrics, num_target_datasets) if no subregion
# Accessing the actual results when we have used 1 metric and 1 dataset is
# done this way:
print "Accessing the Results of the Evaluation run"
results = std_evaluation.unary_results[0][0]
print "The results are of type: %s" % type(results)

# From the temporal std output I want to make a Contour Map of the region
コード例 #4
0
knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons)
print("Spatially Regridding the CRU31_Dataset...")
cru31_dataset = dsp.spatial_regrid(cru31_dataset, new_lats, new_lons)
print("Final shape of the KNMI_Dataset:%s" % (knmi_dataset.values.shape, ))
print("Final shape of the CRU31_Dataset:%s" % (cru31_dataset.values.shape, ))
""" Step 4:  Build a Metric to use for Evaluation - Bias for this example """
# You can build your own metrics, but OCW also ships with some common metrics
print("Setting up a Bias metric to use for evaluation")
bias = metrics.Bias()
""" Step 5: Create an Evaluation Object using Datasets and our Metric """
# The Evaluation Class Signature is:
# Evaluation(reference, targets, metrics, subregions=None)
# Evaluation can take in multiple targets and metrics, so we need to convert
# our examples into Python lists.  Evaluation will iterate over the lists
print("Making the Evaluation definition")
bias_evaluation = evaluation.Evaluation(knmi_dataset, [cru31_dataset], [bias])
print("Executing the Evaluation using the object's run() method")
bias_evaluation.run()
""" Step 6: Make a Plot from the Evaluation.results """
# The Evaluation.results are a set of nested lists to support many different
# possible Evaluation scenarios.
#
# The Evaluation results docs say:
# The shape of results is (num_metrics, num_target_datasets) if no subregion
# Accessing the actual results when we have used 1 metric and 1 dataset is
# done this way:
print("Accessing the Results of the Evaluation run")
results = bias_evaluation.results[0][0, :]

# From the bias output I want to make a Contour Map of the region
print("Generating a contour map using ocw.plotter.draw_contour_map()")
コード例 #5
0
ファイル: user_lund_example.py プロジェクト: darth-pr/climate
ref_dataset = dsp.spatial_regrid(ref_dataset, new_lats, new_lons)

# Load the datasets for the evaluation.
mean_bias = metrics.MeanBias()
# These versions of the metrics require seasonal bounds prior to running
# the metrics. You should set these values above in the evaluation
# configuration section.
spatial_std_dev_ratio = metrics.SeasonalSpatialStdDevRatio(
    month_start=SEASON_MONTH_START, month_end=SEASON_MONTH_END)
pattern_correlation = metrics.SeasonalPatternCorrelation(
    month_start=SEASON_MONTH_START, month_end=SEASON_MONTH_END)

# Create our example evaluation.
example_eval = evaluation.Evaluation(
    ref_dataset,  # Reference dataset for the evaluation
    # 1 or more target datasets for the evaluation
    [target_dataset],
    # 1 ore more metrics to use in the evaluation
    [mean_bias, spatial_std_dev_ratio, pattern_correlation])
example_eval.run()

plotter.draw_contour_map(example_eval.results[0][0],
                         new_lats,
                         new_lons,
                         'lund_example_time_averaged_bias',
                         gridshape=(1, 1),
                         ptitle='Time Averaged Bias')

spatial_stddev_ratio = example_eval.results[0][1]
# Pattern correlation results are a tuple, so we need to index and grab
# the component we care about.
spatial_correlation = example_eval.results[0][2][0]
コード例 #6
0
# Acessing latittudes and longitudes of netCDF file
lats = ccmp_dataset.lats
lons = ccmp_dataset.lons
""" Step 2:  Build a Metric to use for Evaluation - Temporal STD for this example """
# You can build your own metrics, but OCW also ships with some common metrics
print("Setting up a Temporal STD metric to use for evaluation")
std = metrics.TemporalStdDev()
""" Step 3: Create an Evaluation Object using Datasets and our Metric """
# The Evaluation Class Signature is:
# Evaluation(reference, targets, metrics, subregions=None)
# Evaluation can take in multiple targets and metrics, so we need to convert
# our examples into Python lists.  Evaluation will iterate over the lists
print("Making the Evaluation definition")
# Temporal STD Metric gets one target dataset then reference dataset
# should be None
std_evaluation = evaluation.Evaluation(None, [ccmp_dataset], [std])
print("Executing the Evaluation using the object's run() method")
std_evaluation.run()
""" Step 4: Make a Plot from the Evaluation.results """
# The Evaluation.results are a set of nested lists to support many different
# possible Evaluation scenarios.
#
# The Evaluation results docs say:
# The shape of results is (num_metrics, num_target_datasets) if no subregion
# Accessing the actual results when we have used 1 metric and 1 dataset is
# done this way:
print("Accessing the Results of the Evaluation run")
results = std_evaluation.unary_results[0][0]
print("The results are of type: %s" % type(results))
print("Generating a contour map using ocw.plotter.draw_contour_map()")
コード例 #7
0
    Bounds(30.0, 40.0, -15.0, 0.0),
    Bounds(33.0, 40.0, 25.0, 35.00)
]

region_list = ["R" + str(i + 1) for i in xrange(13)]

# metrics
pattern_correlation = metrics.PatternCorrelation()

# create the Evaluation object
RCMs_to_CRU_evaluation = evaluation.Evaluation(
    CRU31,  # Reference dataset for the evaluation
    # 1 or more target datasets for
    # the evaluation
    target_datasets,
    # 1 or more metrics to use in
    # the evaluation
    [pattern_correlation],
    # list of subregion Bounds
    # Objects
    list_of_regions)
RCMs_to_CRU_evaluation.run()

new_patcor = np.squeeze(np.array(RCMs_to_CRU_evaluation.results), axis=1)

plotter.draw_portrait_diagram(np.transpose(new_patcor),
                              allNames,
                              region_list,
                              fname=OUTPUT_PLOT,
                              fmt='png',
                              cmap='coolwarm_r')
コード例 #8
0
wrf_dataset = dsp.spatial_regrid(wrf_dataset, new_lats, new_lons)
print("Final shape of the WRF_Dataset: \n"
      "%s\n" % (wrf_dataset.values.shape, ))

# Step 4:  Build a Metric to use for Evaluation - Bias for this example.
# You can build your own metrics, but OCW also ships with some common metrics
print("Setting up a Bias metric to use for evaluation")
bias = metrics.Bias()

# Step 5: Create an Evaluation Object using Datasets and our Metric.
# The Evaluation Class Signature is:
# Evaluation(reference, targets, metrics, subregions=None)
# Evaluation can take in multiple targets and metrics, so we need to convert
# our examples into Python lists.  Evaluation will iterate over the lists
print("Making the Evaluation definition")
bias_evaluation = evaluation.Evaluation(knmi_dataset, [wrf_dataset], [bias])
print("Executing the Evaluation using the object's run() method")
bias_evaluation.run()

# Step 6: Make a Plot from the Evaluation.results.
# The Evaluation.results are a set of nested lists to support many different
# possible Evaluation scenarios.
#
# The Evaluation results docs say:
# The shape of results is (num_metrics, num_target_datasets) if no subregion
# Accessing the actual results when we have used 1 metric and 1 dataset is
# done this way:
print("Accessing the Results of the Evaluation run")
results = bias_evaluation.results[0][0]
print("The results are of type: %s" % type(results))
コード例 #9
0
for member, each_target_dataset in enumerate(target_datasets):
    target_datasets[member].values = utils.calc_temporal_mean(
        target_datasets[member])

allNames = []

for target in target_datasets:
    allNames.append(target.name)

#calculate the metrics
taylor_diagram = metrics.SpatialPatternTaylorDiagram()

#create the Evaluation object
RCMs_to_CRU_evaluation = evaluation.Evaluation(
    CRU31,  # Reference dataset for the evaluation
    # 1 or more target datasets for the evaluation
    target_datasets,
    # 1 or more metrics to use in the evaluation
    [taylor_diagram
     ])  #, mean_bias,spatial_std_dev_ratio, pattern_correlation])
RCMs_to_CRU_evaluation.run()

taylor_data = RCMs_to_CRU_evaluation.results[0]

plotter.draw_taylor_diagram(taylor_data,
                            allNames,
                            "CRU31",
                            fname=OUTPUT_PLOT,
                            fmt='png',
                            frameon=False)
コード例 #10
0
    target_datasets[member].values =\
        utils.calc_temporal_mean(target_datasets[member])

allNames = []

for target in target_datasets:
    allNames.append(target.name)

# calculate the metrics
taylor_diagram = metrics.SpatialPatternTaylorDiagram()

# create the Evaluation object
RCMs_to_CRU_evaluation = evaluation.Evaluation(
    CRU31,  # Reference dataset for the evaluation
    # 1 or more target datasets for
    # the evaluation
    target_datasets,
    # 1 or more metrics to use in
    # the evaluation
    [taylor_diagram])
RCMs_to_CRU_evaluation.run()

taylor_data = RCMs_to_CRU_evaluation.results[0]

plotter.draw_taylor_diagram(taylor_data,
                            allNames,
                            "CRU31",
                            fname=OUTPUT_PLOT,
                            fmt='png',
                            frameon=False)
コード例 #11
0
# Generate an ensemble dataset from knmi and wrf models
ensemble_dataset = dsp.ensemble([knmi_dataset, wrf311_dataset])

# Step 4:  Build a Metric to use for Evaluation - Bias for this example.
print("Setting up a Bias metric to use for evaluation")
bias = metrics.Bias()

# Step 5: Create an Evaluation Object using Datasets and our Metric.
# The Evaluation Class Signature is:
# Evaluation(reference, targets, metrics, subregions=None)
# Evaluation can take in multiple targets and metrics, so we need to convert
# our examples into Python lists.  Evaluation will iterate over the lists
print("Making the Evaluation definition")
bias_evaluation =\
    evaluation.Evaluation(cru31_dataset, [knmi_dataset, wrf311_dataset, ensemble_dataset], [bias])

print("Executing the Evaluation using the object's run() method")
bias_evaluation.run()

# Step 6: Make a Plot from the Evaluation.results.
# The Evaluation.results are a set of nested lists to support many different
# possible Evaluation scenarios.
#
# The Evaluation results docs say:
# The shape of results is (num_target_datasets, num_metrics) if no subregion
# Accessing the actual results when we have used 3 datasets and 1 metric is
# done this way:
print("Accessing the Results of the Evaluation run")
results = bias_evaluation.results[0]
コード例 #12
0
# Spatially regrid datasets using the new_lats, new_lons numpy arrays
knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons)
wrf_dataset = dsp.spatial_regrid(wrf_dataset, new_lats, new_lons)

# Load the metrics that we want to use for the evaluation.
################################################################################
sstdr = metrics.SpatialStdDevRatio()
pc = metrics.PatternCorrelation()

# Create our new evaluation object. The knmi dataset is the evaluations
# reference dataset. We then provide a list of 1 or more target datasets
# to use for the evaluation. In this case, we only want to use the wrf dataset.
# Then we pass a list of all the metrics that we want to use in the evaluation.
################################################################################
test_evaluation = evaluation.Evaluation(knmi_dataset, [wrf_dataset],
                                        [sstdr, pc])
test_evaluation.run()

# Pull our the evaluation results and prepare them for drawing a Taylor diagram.
################################################################################
spatial_stddev_ratio = test_evaluation.results[0][0]
# Pattern correlation results are a tuple, so we need to index and grab
# the component we care about.
spatial_correlation = test_evaluation.results[0][1][0]

taylor_data = numpy.array([[spatial_stddev_ratio],
                           [spatial_correlation]]).transpose()

# Draw our taylor diagram!
################################################################################
plotter.draw_taylor_diagram(taylor_data, [wrf_dataset.name],
コード例 #13
0
_, CRU31.values = utils.calc_climatology_year(CRU31)

for member, each_target_dataset in enumerate(target_datasets):
    _, target_datasets[member].values = utils.calc_climatology_year(
        target_datasets[member])

for target in target_datasets:
    allNames.append(target.name)

#determine the metrics
mean_bias = metrics.Bias()

#create the Evaluation object
RCMs_to_CRU_evaluation = evaluation.Evaluation(
    CRU31,  # Reference dataset for the evaluation
    # list of target datasets for the evaluation
    target_datasets,
    # 1 or more metrics to use in the evaluation
    [mean_bias])
RCMs_to_CRU_evaluation.run()

#extract the relevant data from RCMs_to_CRU_evaluation.results
#the results returns a list (num_target_datasets, num_metrics). See docs for further details
#remove the metric dimension
rcm_bias = RCMs_to_CRU_evaluation.results[0]

plotter.draw_contour_map(rcm_bias,
                         new_lats,
                         new_lons,
                         gridshape=(2, 3),
                         fname=OUTPUT_PLOT,
                         subtitles=allNames,