Example #1
0
def _draw_contour_plot(evaluation, plot_config):
    """"""
    lats = plot_config['lats']
    if type(lats) != type(list):
        lats = range(lats['range_min'], lats['range_max'], lats['range_step'])

    lons = plot_config['lons']
    if type(lons) != type(list):
        lons = range(lons['range_min'], lons['range_max'], lons['range_step'])


    for i, (row, col) in enumerate(plot_config['results_indices']):
        plot_name = plot_config['output_name'] + '_{}'.format(i)
        plots.draw_contour_map(evaluation.results[row][col],
                                np.array(lats),
                                np.array(lons),
                                    plot_name,
                                    **plot_config.get('optional_args', {}))
Example #2
0
def _draw_contour_plot(evaluation, plot_config):
    """"""
    lats = plot_config['lats']
    if type(lats) != type(list):
        lats = np.arange(lats['range_min'], lats['range_max'],
                         lats['range_step'])

    lons = plot_config['lons']
    if type(lons) != type(list):
        lons = np.arange(lons['range_min'], lons['range_max'],
                         lons['range_step'])

    for i, index in enumerate(plot_config['results_indices']):
        if len(index) == 2:
            target, metric = index
            vals = evaluation.results[target][metric]
        elif len(index) == 3:
            target, metric, subregion = index
            vals = evaluation.results[target][metric][subregion]

        plot_name = plot_config['output_name'] + '_{}'.format(i)
        plots.draw_contour_map(vals, np.array(lats), np.array(lons), plot_name,
                               **plot_config.get('optional_args', {}))
Example #3
0
def _draw_contour_plot(evaluation, plot_config):
    """"""
    lats = plot_config['lats']
    if type(lats) != type(list):
        lats = np.arange(lats['range_min'], lats['range_max'], lats['range_step'])

    lons = plot_config['lons']
    if type(lons) != type(list):
        lons = np.arange(lons['range_min'], lons['range_max'], lons['range_step'])

    for i, index in enumerate(plot_config['results_indices']):
        if len(index) == 2:
            target, metric = index
            vals = evaluation.results[target][metric]
        elif len(index) == 3:
            target, metric, subregion = index
            vals = evaluation.results[target][metric][subregion]

        plot_name = plot_config['output_name'] + '_{}'.format(i)
        plots.draw_contour_map(vals,
                               np.array(lats),
                               np.array(lons),
                               plot_name,
                               **plot_config.get('optional_args', {}))
    'AOD_monthly_2000-MAR_2016-FEB_from_MISR_L3_JOINT.nc', 'nonabsorbing_ave')
''' Subset the data for East Asia'''
Bounds = ds.Bounds(lat_min=20, lat_max=57.7, lon_min=90, lon_max=150)
dataset = dsp.subset(dataset, Bounds)
'''The original dataset includes nonabsorbing AOD values between March 2000 and February 2015. 
dsp.temporal_subset will extract data in September-October-November.'''
dataset_SON = dsp.temporal_subset(dataset,
                                  month_start=9,
                                  month_end=11,
                                  average_each_year=True)

ny, nx = dataset_SON.values.shape[1:]

# multi-year mean aod
clim_aod = ma.zeros([3, ny, nx])

clim_aod[0, :] = ma.mean(dataset_SON.values, axis=0)  # 16-year mean
clim_aod[1, :] = ma.mean(dataset_SON.values[-5:, :],
                         axis=0)  # the last 5-year mean
clim_aod[2, :] = dataset_SON.values[-1, :]  # the last year's value

# plot clim_aod (3 subplots)
plotter.draw_contour_map(
    clim_aod,
    dataset_SON.lats,
    dataset_SON.lons,
    fname='nonabsorbing_AOD_clim_East_Asia_Sep-Nov',
    gridshape=[1, 3],
    subtitles=['2000-2015: 16 years', '2011-2015: 5 years', '2015: 1 year'],
    clevs=np.arange(21) * 0.02)
Example #5
0
min_lon = -125.75
max_lon = -66.75

start_time = datetime(1998, 1, 1)
end_time = datetime(1998, 12, 31)

TRMM_dataset = rcmed.parameter_dataset(3, 36, min_lat, max_lat, min_lon,
                                       max_lon, start_time, end_time)

Cuba_and_Bahamas_bounds = Bounds(boundary_type='countries',
                                 countries=['Cuba', 'Bahamas'])
# to mask out the data over Mexico and Canada
TRMM_dataset2 = dsp.subset(TRMM_dataset,
                           Cuba_and_Bahamas_bounds,
                           extract=False)

plotter.draw_contour_map(ma.mean(TRMM_dataset2.values, axis=0),
                         TRMM_dataset2.lats,
                         TRMM_dataset2.lons,
                         fname='TRMM_without_Cuba_and_Bahamas')

NCA_SW_bounds = Bounds(boundary_type='us_states',
                       us_states=['CA', 'NV', 'UT', 'AZ', 'NM', 'CO'])
# to mask out the data over Mexico and Canada
TRMM_dataset3 = dsp.subset(TRMM_dataset2, NCA_SW_bounds, extract=True)

plotter.draw_contour_map(ma.mean(TRMM_dataset3.values, axis=0),
                         TRMM_dataset3.lats,
                         TRMM_dataset3.lons,
                         fname='TRMM_NCA_SW')
Example #6
0
    # 1 or more target datasets for the evaluation
    #[target_dataset, target_dataset2, target_dataset3, target_dataset4],
    target_datasets,
    # 1 ore more metrics to use in the evaluation
    [mean_bias, spatial_std_dev_ratio, pattern_correlation])
example_eval.run()

spatial_stddev_ratio = []
spatial_correlation = []
taylor_dataset_names = []  # List of datasets names for the Taylor diagram
for i, target in enumerate(example_eval.target_datasets):
    # For each target dataset in the evaluation, draw a contour map
    plotter.draw_contour_map(example_eval.results[i][0],
                             new_lats,
                             new_lons,
                             'lund_{}_{}_time_averaged_bias'.format(
                                 ref_dataset.name, target.name),
                             gridshape=(1, 1),
                             ptitle='Time Averaged Bias')

    taylor_dataset_names.append(target.name)
    # Grab Data for a Taylor Diagram
    spatial_stddev_ratio.append(example_eval.results[i][1])
    # Pattern correlation results are a tuple, so we need to index and grab
    # the component we care about.
    spatial_correlation.append(example_eval.results[i][2][0])

taylor_data = np.array([spatial_stddev_ratio, spatial_correlation]).transpose()
plotter.draw_taylor_diagram(taylor_data,
                            taylor_dataset_names,
                            ref_dataset.name,
Example #7
0
std_evaluation = evaluation.Evaluation(None, [knmi_dataset], [std])
print "Executing the Evaluation using the object's run() method"
std_evaluation.run()
""" Step 4: Make a Plot from the Evaluation.results """
# The Evaluation.results are a set of nested lists to support many different
# possible Evaluation scenarios.
#
# The Evaluation results docs say:
# The shape of results is (num_metrics, num_target_datasets) if no subregion
# Accessing the actual results when we have used 1 metric and 1 dataset is
# done this way:
print "Accessing the Results of the Evaluation run"
results = std_evaluation.unary_results[0][0]
print "The results are of type: %s" % type(results)

# From the temporal std output I want to make a Contour Map of the region
print "Generating a contour map using ocw.plotter.draw_contour_map()"

fname = OUTPUT_PLOT
gridshape = (4, 5)  # 20 Years worth of plots. 20 rows in 1 column
plot_title = "TASMAX Temporal Standard Deviation (1989 - 2008)"
sub_titles = range(1989, 2009, 1)

plotter.draw_contour_map(results,
                         lats,
                         lons,
                         fname,
                         gridshape=gridshape,
                         ptitle=plot_title,
                         subtitles=sub_titles)
# our examples into Python lists.  Evaluation will iterate over the lists
print("Making the Evaluation definition")
bias_evaluation = evaluation.Evaluation(knmi_dataset, [cru31_dataset], [bias])
print("Executing the Evaluation using the object's run() method")
bias_evaluation.run()
 
""" Step 6: Make a Plot from the Evaluation.results """
# The Evaluation.results are a set of nested lists to support many different
# possible Evaluation scenarios.
#
# The Evaluation results docs say:
# The shape of results is (num_metrics, num_target_datasets) if no subregion
# Accessing the actual results when we have used 1 metric and 1 dataset is
# done this way:
print("Accessing the Results of the Evaluation run")
results = bias_evaluation.results[0][0,:]
 
# From the bias output I want to make a Contour Map of the region
print("Generating a contour map using ocw.plotter.draw_contour_map()")
 
lats = new_lats
lons = new_lons
fname = OUTPUT_PLOT
gridshape = (1, 1)  # Using a 1 x 1 since we have a single Bias for the full time range
plot_title = "TASMAX Bias of KNMI Compared to CRU 3.1 (%s - %s)" % (start_time.strftime("%Y/%d/%m"), end_time.strftime("%Y/%d/%m"))
sub_titles = ["Full Temporal Range"]
 
plotter.draw_contour_map(results, lats, lons, fname,
                         gridshape=gridshape, ptitle=plot_title, 
                         subtitles=sub_titles)
Example #9
0
    month_start=SEASON_MONTH_START, month_end=SEASON_MONTH_END)
pattern_correlation = metrics.SeasonalPatternCorrelation(
    month_start=SEASON_MONTH_START, month_end=SEASON_MONTH_END)

# Create our example evaluation.
example_eval = evaluation.Evaluation(
    ref_dataset,  # Reference dataset for the evaluation
    # 1 or more target datasets for the evaluation
    [target_dataset],
    # 1 ore more metrics to use in the evaluation
    [mean_bias, spatial_std_dev_ratio, pattern_correlation])
example_eval.run()

plotter.draw_contour_map(example_eval.results[0][0],
                         new_lats,
                         new_lons,
                         'lund_example_time_averaged_bias',
                         gridshape=(1, 1),
                         ptitle='Time Averaged Bias')

spatial_stddev_ratio = example_eval.results[0][1]
# Pattern correlation results are a tuple, so we need to index and grab
# the component we care about.
spatial_correlation = example_eval.results[0][2][0]
taylor_data = np.array([[spatial_stddev_ratio],
                        [spatial_correlation]]).transpose()

plotter.draw_taylor_diagram(taylor_data, [target_dataset.name],
                            ref_dataset.name,
                            fname='lund_example_taylor_diagram',
                            frameon=False)
    _, target_datasets[member].values = utils.calc_climatology_year(
        target_datasets[member])

for target in target_datasets:
    allNames.append(target.name)

#determine the metrics
mean_bias = metrics.Bias()

#create the Evaluation object
RCMs_to_CRU_evaluation = evaluation.Evaluation(
    CRU31,  # Reference dataset for the evaluation
    # list of target datasets for the evaluation
    target_datasets,
    # 1 or more metrics to use in the evaluation
    [mean_bias])
RCMs_to_CRU_evaluation.run()

#extract the relevant data from RCMs_to_CRU_evaluation.results
#the results returns a list (num_target_datasets, num_metrics). See docs for further details
#remove the metric dimension
rcm_bias = RCMs_to_CRU_evaluation.results[0]

plotter.draw_contour_map(rcm_bias,
                         new_lats,
                         new_lons,
                         gridshape=(2, 3),
                         fname=OUTPUT_PLOT,
                         subtitles=allNames,
                         cmap='coolwarm_r')

''' data source: https://dx.doi.org/10.6084/m9.figshare.3753321.v1
    AOD_monthly_2000-Mar_2016-FEB_from_MISR_L3_JOINT.nc is publicly available.'''
dataset = local.load_file('AOD_monthly_2000-MAR_2016-FEB_from_MISR_L3_JOINT.nc',
                          'nonabsorbing_ave')
''' Subset the data for East Asia'''
Bounds = ds.Bounds(lat_min=20, lat_max=57.7, lon_min=90, lon_max=150)
dataset = dsp.subset(dataset, Bounds)

'''The original dataset includes nonabsorbing AOD values between March 2000 and February 2015. 
dsp.temporal_subset will extract data in September-October-November.'''
dataset_SON = dsp.temporal_subset(
    dataset, month_start=9, month_end=11, average_each_year=True)

ny, nx = dataset_SON.values.shape[1:]

# multi-year mean aod
clim_aod = ma.zeros([3, ny, nx])

clim_aod[0, :] = ma.mean(dataset_SON.values, axis=0)  # 16-year mean
clim_aod[1, :] = ma.mean(dataset_SON.values[-5:, :],
                         axis=0)  # the last 5-year mean
clim_aod[2, :] = dataset_SON.values[-1, :]  # the last year's value

# plot clim_aod (3 subplots)
plotter.draw_contour_map(clim_aod, dataset_SON.lats, dataset_SON.lons,
                         fname='nonabsorbing_AOD_clim_East_Asia_Sep-Nov',
                         gridshape=[1, 3], subtitles=['2000-2015: 16 years', '2011-2015: 5 years', '2015: 1 year'],
                         clevs=np.arange(21) * 0.02)
Example #12
0
# the metrics. You should set these values above in the evaluation
# configuration section.
spatial_std_dev_ratio = metrics.SeasonalSpatialStdDevRatio(month_start=SEASON_MONTH_START, month_end=SEASON_MONTH_END)
pattern_correlation = metrics.SeasonalPatternCorrelation(month_start=SEASON_MONTH_START, month_end=SEASON_MONTH_END)

# Create our example evaluation.
example_eval = evaluation.Evaluation(ref_dataset, # Reference dataset for the evaluation
                                    # 1 or more target datasets for the evaluation
                                    [target_dataset],
                                    # 1 ore more metrics to use in the evaluation
                                    [mean_bias, spatial_std_dev_ratio, pattern_correlation])
example_eval.run()

plotter.draw_contour_map(example_eval.results[0][0],
                         new_lats,
                         new_lons,
                         'lund_example_time_averaged_bias',
                         gridshape=(1, 1),
                         ptitle='Time Averaged Bias')

spatial_stddev_ratio = example_eval.results[0][1]
# Pattern correlation results are a tuple, so we need to index and grab
# the component we care about.
spatial_correlation = example_eval.results[0][2][0]
taylor_data = np.array([[spatial_stddev_ratio], [spatial_correlation]]).transpose()

plotter.draw_taylor_diagram(taylor_data,
                            [target_dataset.name],
                            ref_dataset.name,
                            fname='lund_example_taylor_diagram',
                            frameon=False)
Example #13
0
example_eval = evaluation.Evaluation(ref_dataset, # Reference dataset for the evaluation
                                    # 1 or more target datasets for the evaluation
                                    #[target_dataset, target_dataset2, target_dataset3, target_dataset4],
                                    target_datasets,
                                    # 1 ore more metrics to use in the evaluation
                                    [mean_bias, spatial_std_dev_ratio, pattern_correlation])
example_eval.run()

spatial_stddev_ratio = []
spatial_correlation = []
taylor_dataset_names = [] # List of datasets names for the Taylor diagram
for i, target in enumerate(example_eval.target_datasets):
    # For each target dataset in the evaluation, draw a contour map
    plotter.draw_contour_map(example_eval.results[i][0],
                             new_lats,
                             new_lons,
                             'lund_{}_{}_time_averaged_bias'.format(ref_dataset.name, target.name),
                             gridshape=(1, 1),
                             ptitle='Time Averaged Bias')

    taylor_dataset_names.append(target.name)
    # Grab Data for a Taylor Diagram
    spatial_stddev_ratio.append(example_eval.results[i][1])
    # Pattern correlation results are a tuple, so we need to index and grab
    # the component we care about.
    spatial_correlation.append(example_eval.results[i][2][0])

taylor_data = np.array([spatial_stddev_ratio, spatial_correlation]).transpose()
plotter.draw_taylor_diagram(taylor_data,
                            taylor_dataset_names,
                            ref_dataset.name,
                            fname='lund_taylor_diagram',
Example #14
0
def plotBias(metric, lats, lons, outputName, **config):
    '''Plot the bias of the reference datasets compared to multiple targets.'''
    plotFile = outputName + '.png'
    print(('plotBias: Writing %s' % plotFile))
    plotter.draw_contour_map(metric, lats, lons, outputName, **config)
    return plotFile
Example #15
0
def calculate_metrics_and_make_plots(varName, workdir, lons, lats, obsData, mdlData, obsRgn, mdlRgn, obsList, mdlList, subRegions, \
                                     subRgnLon0, subRgnLon1, subRgnLat0, subRgnLat1):
    '''
    Purpose:: 
        Calculate all the metrics used in Kim et al. [2013] paper and plot them 

    Input::
        varName - evaluating variable
        workdir -
        lons -
        lats -
        obsData -
        mdlData -
        obsRgn -
        mdlRgn -
        obsList -
        mdlList -
        subRegions - 
        subRgnLon0, subRgnLat0 - southwest boundary of sub-regions [numSubRgn]
        subRgnLon1, subRgnLat1 - northeast boundary of sub-regions [numSubRgn]
    Output:: 
        png files
        
     '''
   
   
    nobs, nt, ny, nx = obsData.shape
    nmodel = mdlData.shape[0]
    ### TODO: unit conversion (K to C)
    if varName == 'temp':
        obsData[0, :, :, :] = obsData[0, :, :, :] - 273.15
        if subRegions:
            obsRgn[0, :, :] = obsRgn[0, :, :] - 273.15
    if varName == 'prec' and obsData.max() > mdlData.max()*1000.:
        mdlData[:, :, :, :] = mdlData[:, :, :, :]*86400.
        if subRegions:
            mdlRgn[:, :, :] = mdlRgn[:, :, :]*86400.
        
    oTser, oClim = calcClimYear( obsData[0, :, :, :])
    bias_of_overall_average = ma.zeros([nmodel, ny, nx])
    spatial_stdev_ratio = np.zeros([nmodel])
    spatial_corr = np.zeros([nmodel])
    mdlList.append('ENS')
    
    for imodel in np.arange(nmodel):
        mTser, mClim = calcClimYear( mdlData[imodel,:,:,:])
        bias_of_overall_average[imodel,:,:] = calcBias(mClim, oClim)
        spatial_corr[imodel], sigLev = calcPatternCorrelation(oClim, mClim)
        spatial_stdev_ratio[imodel] = calcSpatialStdevRatio(mClim, oClim)   
    fig_return = plotter.draw_contour_map(oClim, lats, lons, workdir+'/observed_climatology_'+varName, fmt='png', gridshape=(1, 1),
                   clabel='', ptitle='', subtitles=obsList, cmap=None, 
                   clevs=None, nlevs=10, parallels=None, meridians=None,
                   extend='neither')    
    # TODO:
    # Be sure to update "gridshape" argument to be the number of sub plots (rows,columns). This should be improved so that the 
    # gridshape is optimally determined for a given number of models. For example:
    # For 3 models, a gridshape of (2,2) would be sensible:
    # X X 
    # X
    #
    fig_return = plotter.draw_contour_map(bias_of_overall_average, lats, lons, workdir+'/bias_of_climatology_'+varName, fmt='png', gridshape=(6, 2),
                   clabel='', ptitle='', subtitles=mdlList, cmap=None, 
                   clevs=None, nlevs=10, parallels=None, meridians=None,
                   extend='neither')
    Taylor_data = np.array([spatial_stdev_ratio, spatial_corr]).transpose()
    
    fig_return = plotter.draw_taylor_diagram(Taylor_data, mdlList, refname='CRU', fname = workdir+'/Taylor_'+varName, fmt='png',frameon=False)

    if subRegions:
        nseason = 2      # (0: summer and 1: winter)
        nregion = len(subRgnLon0)
        season_name = ['summer','winter']
        rowlabels = ['PNw','PNe','CAn','CAs','SWw','SWe','COL','GPn','GPc','GC','GL','NE','SE','FL']
        collabels = ['M1','M2','M3','M4','M5','M6','ENS']
        collabels[nmodel-1] = 'ENS'
        
        for iseason in [0,1]:
            portrait_subregion = np.zeros([4, nregion, nmodel])
            portrait_titles = ['(a) Normalized Bias', '(b) Normalized STDV', '(c) Normalized RMSE', '(d) Correlation']
            if iseason == 0:
                monthBegin=6
                monthEnd=8
            if iseason == 1:
                monthBegin=12
                monthEnd=2
                      
            obsTser,obsClim = calcClimSeasonSubRegion(6,8,obsRgn[0,:,:])
            for imodel in np.arange(nmodel):
                mTser, mClim =  calcClimSeasonSubRegion(6,8,mdlRgn[imodel,:,:])
                for iregion in np.arange(nregion):
                      portrait_subregion[0,iregion,imodel] = calcBias(mClim[iregion],obsClim[iregion])/calcTemporalStdev(obsTser[iregion,:])   
                      portrait_subregion[1,iregion,imodel] = calcTemporalStdev(mTser[iregion,:])/ calcTemporalStdev(obsTser[iregion,:]) 
                      portrait_subregion[2,iregion,imodel] = calcRootMeanSquaredDifferenceAveragedOverTime(mTser[iregion,:], obsTser[iregion,:])/calcTemporalStdev(obsTser[iregion,:])
                      portrait_subregion[3,iregion, imodel] = calcTemporalCorrelationSubRegion(mTser[iregion,:],obsTser[iregion,:])
            portrait_return = plotter.draw_portrait_diagram(portrait_subregion, rowlabels, collabels[0:nmodel], workdir+'/portrait_diagram_'+season_name[iseason]+'_'+varName, fmt='png', 
                             gridshape=(2, 2), xlabel='', ylabel='', clabel='', 
                             ptitle='', subtitles=portrait_titles, cmap=None, clevs=None, 
                             nlevs=10, extend='neither')  
            # annual cycle
            nmonth = 12
            times = np.arange(nmonth)
            data_names = [obsList[0]] + list(mdlList)
            annual_cycle = np.zeros([nregion, nmonth, nmodel+1])
            obsTser, annual_cycle[:, :, 0] = calcAnnualCycleMeansSubRegion(obsRgn[0,:,:])
            obsStd = calcAnnualCycleStdevSubRegion(obsRgn[0,:,:])
            for imodel in np.arange(nmodel):
                mdlTser, annual_cycle[:, :, imodel+1] = calcAnnualCycleMeansSubRegion(mdlRgn[imodel, :, :])
            # Make annual_cycle shape compatible with draw_time_series
            annual_cycle = annual_cycle.swapaxes(1, 2)
            tseries_return = plotter.draw_time_series(annual_cycle, times, data_names, workdir+'/time_series_'+varName, gridshape=(7, 2), 
                  subtitles=rowlabels, label_month=True)
    ssl._create_default_https_context = ssl._create_unverified_context

# rectangular boundary
min_lat = 15.75
max_lat = 55.75
min_lon = -125.75
max_lon = -66.75

start_time = datetime(1998, 1, 1)
end_time = datetime(1998, 12, 31)

TRMM_dataset = rcmed.parameter_dataset(3, 36, min_lat, max_lat, min_lon, max_lon,
                                       start_time, end_time)

Cuba_and_Bahamas_bounds = Bounds(
    boundary_type='countries', countries=['Cuba', 'Bahamas'])
# to mask out the data over Mexico and Canada
TRMM_dataset2 = dsp.subset(
    TRMM_dataset, Cuba_and_Bahamas_bounds, extract=False)

plotter.draw_contour_map(ma.mean(TRMM_dataset2.values, axis=0), TRMM_dataset2.lats,
                         TRMM_dataset2.lons, fname='TRMM_without_Cuba_and_Bahamas')

NCA_SW_bounds = Bounds(boundary_type='us_states', us_states=[
                       'CA', 'NV', 'UT', 'AZ', 'NM', 'CO'])
# to mask out the data over Mexico and Canada
TRMM_dataset3 = dsp.subset(TRMM_dataset2, NCA_SW_bounds, extract=True)

plotter.draw_contour_map(ma.mean(TRMM_dataset3.values, axis=0),
                         TRMM_dataset3.lats, TRMM_dataset3.lons, fname='TRMM_NCA_SW')
Example #17
0
# Step 6: Make a Plot from the Evaluation.results.
# The Evaluation.results are a set of nested lists to support many different
# possible Evaluation scenarios.
#
# The Evaluation results docs say:
# The shape of results is (num_target_datasets, num_metrics) if no subregion
# Accessing the actual results when we have used 3 datasets and 1 metric is
# done this way:
print("Accessing the Results of the Evaluation run")
results = bias_evaluation.results[0]

# From the bias output I want to make a Contour Map of the region
print("Generating a contour map using ocw.plotter.draw_contour_map()")

lats = new_lats
lons = new_lons
fname = OUTPUT_PLOT

# Using a 3 x N since we have a N year(s) of data for 3 models
gridshape = (3, start_time.year - end_time.year + 1)

plotnames = ["KNMI", "WRF311", "ENSEMBLE"]
for i in np.arange(3):
    plot_title = "TASMAX Bias of CRU 3.1 vs. %s (%s - %s)" % (
        plotnames[i], start_time.strftime("%Y/%d/%m"), end_time.strftime("%Y/%d/%m"))
    output_file = "%s_%s" % (fname, plotnames[i].lower())
    print("creating %s" % (output_file,))
    plotter.draw_contour_map(results[i, :], lats, lons, output_file,
                             gridshape=gridshape, ptitle=plot_title)
Example #18
0
def _generate_evaluation_plots(evaluation, lat_bins, lon_bins, eval_time_stamp):
    ''' Generate the Evaluation's plots

    .. note: This doesn't support graphing evaluations with subregion data.

    :param evaluation: A run Evaluation for which to generate plots.
    :type evaluation: ocw.evaluation.Evaluation
    :param lat_bins: The latitude bin values used in the evaluation.
    :type lat_bins: List
    :param lon_bins: The longitude bin values used in the evaluation.
    :type lon_bins: List
    :param eval_time_stamp: The time stamp for the directory where
        evaluation results should be saved.
    :type eval_time_stamp: Time stamp of the form '%Y-%m-%d_%H-%M-%S'

    :raises ValueError: If there aren't any results to graph.
    '''
    # Create time stamp version-ed WORK_DIR for plotting
    eval_path = os.path.join(WORK_DIR, eval_time_stamp)
    os.makedirs(eval_path)

    # TODO: Should be able to check for None here...
    if evaluation.results == [] and evaluation.unary_results == []:
        cur_frame = sys._getframe().f_code
        err = "{}.{}: No results to graph".format(cur_frame.co_filename,
												  cur_frame.co_name)
        raise ValueError(err)

    if evaluation.ref_dataset:
        grid_shape_dataset = evaluation.ref_dataset
    else:
        grid_shape_dataset = evaluation.target_datasets[0]

    grid_shape = _calculate_grid_shape(grid_shape_dataset)

    if evaluation.results != []:
        for dataset_index, dataset in enumerate(evaluation.target_datasets):
            for metric_index, metric in enumerate(evaluation.metrics):
                results = evaluation.results[dataset_index][metric_index]
                file_name = _generate_binary_eval_plot_file_path(evaluation,
																 dataset_index,
																 metric_index,
                                                                 eval_time_stamp)
                plot_title = _generate_binary_eval_plot_title(evaluation,
															  dataset_index,
															  metric_index)
                plotter.draw_contour_map(results,
										 lat_bins,
										 lon_bins,
										 fname=file_name,
										 ptitle=plot_title,
                                         gridshape=grid_shape)

    if evaluation.unary_results != []:
        for metric_index, metric in enumerate(evaluation.unary_metrics):
			cur_unary_results = evaluation.unary_results[metric_index]
			for result_index, result in enumerate(cur_unary_results):
				file_name = _generate_unary_eval_plot_file_path(evaluation,
																result_index,
																metric_index,
                                                                eval_time_stamp)
				plot_title = _generate_unary_eval_plot_title(evaluation,
															 result_index,
															 metric_index)

				plotter.draw_contrough_map(results,
										   lat_bins,
										   lon_bins,
										   fname=file_name,
										   ptitle=plot_title,
                                           gridshape=grid_shape)
def plotBias(metric, lats, lons, outputName, **config):
    '''Plot the bias of the reference datasets compared to multiple targets.'''
    plotFile = outputName + '.png'
    print(('plotBias: Writing %s' % plotFile))
    plotter.draw_contour_map(metric, lats, lons, outputName, **config)
    return plotFile
                      [knmi_dataset, wrf311_dataset, ensemble_dataset],
                      [bias])
print("Executing the Evaluation using the object's run() method")
bias_evaluation.run()
 
""" Step 6: Make a Plot from the Evaluation.results """
# The Evaluation.results are a set of nested lists to support many different
# possible Evaluation scenarios.
#
# The Evaluation results docs say:
# The shape of results is (num_target_datasets, num_metrics) if no subregion
# Accessing the actual results when we have used 3 datasets and 1 metric is
# done this way:
print("Accessing the Results of the Evaluation run")
results = bias_evaluation.results[0]
 
# From the bias output I want to make a Contour Map of the region
print("Generating a contour map using ocw.plotter.draw_contour_map()")
 
lats = new_lats
lons = new_lons
fname = OUTPUT_PLOT
gridshape = (3, 1)  # Using a 3 x 1 since we have a 1 year of data for 3 models
plotnames = ["KNMI", "WRF311", "ENSEMBLE"]
for i in np.arange(3):
  plot_title = "TASMAX Bias of CRU 3.1 vs. %s (%s - %s)" % (plotnames[i], start_time.strftime("%Y/%d/%m"), end_time.strftime("%Y/%d/%m"))
  output_file = "%s_%s" % (fname, plotnames[i].lower())
  print "creating %s" % (output_file,)
  plotter.draw_contour_map(results[i,:], lats, lons, output_file,
                         gridshape=gridshape, ptitle=plot_title)
Example #21
0
#way to get the mean. Note the function exists in util.py 
_, CRU31.values = utils.calc_climatology_year(CRU31)
CRU31.values = np.expand_dims(CRU31.values, axis=0)

for member, each_target_dataset in enumerate(target_datasets):
  _,target_datasets[member].values = utils.calc_climatology_year(target_datasets[member])
  target_datasets[member].values = np.expand_dims(target_datasets[member].values, axis=0)


for target in target_datasets:
  allNames.append(target.name)

#determine the metrics
mean_bias = metrics.Bias()

#create the Evaluation object
RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation
                                    # list of target datasets for the evaluation
                                    target_datasets,
                                    # 1 or more metrics to use in the evaluation
                                    [mean_bias])   
RCMs_to_CRU_evaluation.run()

#extract the relevant data from RCMs_to_CRU_evaluation.results 
#the results returns a list (num_target_datasets, num_metrics). See docs for further details
rcm_bias = RCMs_to_CRU_evaluation.results[:][0] 
#remove the metric dimension
new_rcm_bias = np.squeeze(np.array(RCMs_to_CRU_evaluation.results))

plotter.draw_contour_map(new_rcm_bias, new_lats, new_lons, gridshape=(2, 5),fname=OUTPUT_PLOT, subtitles=allNames, cmap='coolwarm_r')
Example #22
0
def _generate_evaluation_plots(evaluation, lat_bins, lon_bins, eval_time_stamp):
    ''' Generate the Evaluation's plots

    .. note: This doesn't support graphing evaluations with subregion data.

    :param evaluation: A run Evaluation for which to generate plots.
    :type evaluation: ocw.evaluation.Evaluation
    :param lat_bins: The latitude bin values used in the evaluation.
    :type lat_bins: List
    :param lon_bins: The longitude bin values used in the evaluation.
    :type lon_bins: List
    :param eval_time_stamp: The time stamp for the directory where
        evaluation results should be saved.
    :type eval_time_stamp: Time stamp of the form '%Y-%m-%d_%H-%M-%S'

    :raises ValueError: If there aren't any results to graph.
    '''
    # Create time stamp version-ed WORK_DIR for plotting
    eval_path = os.path.join(WORK_DIR, eval_time_stamp)
    os.makedirs(eval_path)

    # TODO: Should be able to check for None here...
    if evaluation.results == [] and evaluation.unary_results == []:
        cur_frame = sys._getframe().f_code
        err = "{}.{}: No results to graph".format(cur_frame.co_filename,
												  cur_frame.co_name)
        raise ValueError(err)

    if evaluation.ref_dataset:
        grid_shape_dataset = evaluation.ref_dataset
    else:
        grid_shape_dataset = evaluation.target_datasets[0]

    grid_shape = _calculate_grid_shape(grid_shape_dataset)

    if evaluation.results != []:
        for dataset_index, dataset in enumerate(evaluation.target_datasets):
            for metric_index, metric in enumerate(evaluation.metrics):
                results = evaluation.results[dataset_index][metric_index]
                file_name = _generate_binary_eval_plot_file_path(evaluation,
																 dataset_index,
																 metric_index,
                                                                 eval_time_stamp)
                plot_title = _generate_binary_eval_plot_title(evaluation,
															  dataset_index,
															  metric_index)
                plotter.draw_contour_map(results,
										 lat_bins,
										 lon_bins,
										 fname=file_name,
										 ptitle=plot_title,
                                         gridshape=grid_shape)

    if evaluation.unary_results != []:
        for metric_index, metric in enumerate(evaluation.unary_metrics):
			cur_unary_results = evaluation.unary_results[metric_index]
			for result_index, result in enumerate(cur_unary_results):
				file_name = _generate_unary_eval_plot_file_path(evaluation,
																result_index,
																metric_index,
                                                                eval_time_stamp)
				plot_title = _generate_unary_eval_plot_title(evaluation,
															 result_index,
															 metric_index)

				plotter.draw_contrough_map(results,
										   lat_bins,
										   lon_bins,
										   fname=file_name,
										   ptitle=plot_title,
                                           gridshape=grid_shape)
Example #23
0
def run_screen(model_datasets, models_info, observations_info,
               overlap_start_time, overlap_end_time, overlap_min_lat,
               overlap_max_lat, overlap_min_lon, overlap_max_lon,
               temp_grid_setting, spatial_grid_setting, working_directory, plot_title):
     '''Generates screen to show running evaluation process.

     :param model_datasets: list of model dataset objects
     :type model_datasets: list
     :param models_info: list of dictionaries that contain information for each model
     :type models_info: list
     :param observations_info: list of dictionaries that contain information for each observation
     :type observations_info: list
     :param overlap_start_time: overlap start time between model and obs start time
     :type overlap_start_time: datetime
     :param overlap_end_time: overlap end time between model and obs end time
     :type overlap_end_time: float
     :param overlap_min_lat: overlap minimum lat between model and obs minimum lat
     :type overlap_min_lat: float
     :param overlap_max_lat: overlap maximum lat between model and obs maximum lat
     :type overlap_max_lat: float
     :param overlap_min_lon: overlap minimum lon between model and obs minimum lon
     :type overlap_min_lon: float
     :param overlap_max_lon: overlap maximum lon between model and obs maximum lon
     :type overlap_max_lon: float
     :param temp_grid_setting: temporal grid option such as hourly, daily, monthly and annually
     :type temp_grid_setting: string
     :param spatial_grid_setting:
     :type spatial_grid_setting: string
     :param working_directory: path to a directory for storring outputs
     :type working_directory: string
     :param plot_title: Title for plot
     :type plot_title: string
     '''

     option = None
     if option != "0":
          ready_screen("manage_obs_screen")
          y = screen.getmaxyx()[0]
          screen.addstr(2, 2, "Evaluation started....")
          screen.refresh()

          OUTPUT_PLOT = "plot"

          dataset_id = int(observations_info[0]['dataset_id'])       #just accepts one dataset at this time
          parameter_id = int(observations_info[0]['parameter_id'])  #just accepts one dataset at this time

          new_bounds = Bounds(overlap_min_lat, overlap_max_lat, overlap_min_lon, overlap_max_lon, overlap_start_time, overlap_end_time)
          model_dataset = dsp.subset(new_bounds, model_datasets[0])   #just accepts one model at this time

          #Getting bound info of subseted model file to retrive obs data with same bound as subseted model
          new_model_spatial_bounds = model_dataset.spatial_boundaries()
          new_model_temp_bounds = model_dataset.time_range()
          new_min_lat = new_model_spatial_bounds[0]
          new_max_lat = new_model_spatial_bounds[1]
          new_min_lon = new_model_spatial_bounds[2]
          new_max_lon = new_model_spatial_bounds[3]
          new_start_time = new_model_temp_bounds[0]
          new_end_time = new_model_temp_bounds[1]

          screen.addstr(4, 4, "Retrieving data...")
          screen.refresh()

          #Retrieve obs data
          obs_dataset = rcmed.parameter_dataset(
                                        dataset_id,
                                        parameter_id,
                                        new_min_lat,
                                        new_max_lat,
                                        new_min_lon,
                                        new_max_lon,
                                        new_start_time,
                                        new_end_time)
          screen.addstr(4, 4, "--> Data retrieved.")
          screen.refresh()

          screen.addstr(5, 4, "Temporally regridding...")
          screen.refresh()
          if temp_grid_setting.lower() == 'hourly':
               days = 0.5
          elif temp_grid_setting.lower() == 'daily':
               days = 1
          elif temp_grid_setting.lower() == 'monthly':
               days = 31
          else:
               days = 365
          model_dataset = dsp.temporal_rebin(model_dataset, timedelta(days))
          obs_dataset = dsp.temporal_rebin(obs_dataset, timedelta(days))
          screen.addstr(5, 4, "--> Temporally regridded.")
          screen.refresh()

          new_lats = np.arange(new_min_lat, new_max_lat, spatial_grid_setting)
          new_lons = np.arange(new_min_lon, new_max_lon, spatial_grid_setting)

          screen.addstr(6, 4, "Spatially regridding...")
          screen.refresh()
          spatial_gridded_model = dsp.spatial_regrid(model_dataset, new_lats, new_lons)
          spatial_gridded_obs = dsp.spatial_regrid(obs_dataset, new_lats, new_lons)
          screen.addstr(6, 4, "--> Spatially regridded.")
          screen.refresh()

          screen.addstr(7, 4, "Setting up metrics...")
          screen.refresh()
          bias = metrics.Bias()
          bias_evaluation = evaluation.Evaluation(spatial_gridded_model, [spatial_gridded_obs], [bias])
          screen.addstr(7, 4, "--> Metrics setting done.")
          screen.refresh()

          screen.addstr(8, 4, "Running evaluation.....")
          screen.refresh()
          bias_evaluation.run()
          results = bias_evaluation.results[0][0]
          screen.addstr(8, 4, "--> Evaluation Finished.")
          screen.refresh()

          screen.addstr(9, 4, "Generating plots....")
          screen.refresh()
          lats = new_lats
          lons = new_lons

          gridshape = (1, 1)
          sub_titles = [""]   #No subtitle set for now

          if not os.path.exists(working_directory):
               os.makedirs(working_directory)

          for i in range(len(results)):
               fname = working_directory + OUTPUT_PLOT + str(i)
               plotter.draw_contour_map(results[i], lats, lons, fname,
                               gridshape=gridshape, ptitle=plot_title,
                               subtitles=sub_titles)
          screen.addstr(9, 4, "--> Plots generated.")
          screen.refresh()
          screen.addstr(y-2, 1, "Press 'enter' to Exit: ")
          option = screen.getstr()