def Taylor_diagram_spatial_pattern_of_multiyear_climatology( obs_dataset, obs_name, model_datasets, model_names, file_name): # calculate climatological mean fields obs_clim_dataset = ds.Dataset(obs_dataset.lats, obs_dataset.lons, obs_dataset.times, utils.calc_temporal_mean(obs_dataset)) model_clim_datasets = [] for dataset in model_datasets: model_clim_datasets.append( ds.Dataset(dataset.lats, dataset.lons, dataset.times, utils.calc_temporal_mean(dataset))) # Metrics (spatial standard deviation and pattern correlation) # determine the metrics taylor_diagram = metrics.SpatialPatternTaylorDiagram() # create the Evaluation object taylor_evaluation = Evaluation( obs_clim_dataset, # Climatological mean of reference dataset for the evaluation model_clim_datasets, # list of climatological means from model datasets for the evaluation [taylor_diagram]) # run the evaluation (bias calculation) taylor_evaluation.run() taylor_data = taylor_evaluation.results[0] plotter.draw_taylor_diagram( taylor_data, model_names, obs_name, file_name, pos='upper right', frameon=False)
def Taylor_diagram_spatial_pattern_of_multiyear_climatology( obs_dataset, obs_name, model_datasets, model_names, file_name): # calculate climatological mean fields obs_clim_dataset = ds.Dataset(obs_dataset.lats, obs_dataset.lons, obs_dataset.times, utils.calc_temporal_mean(obs_dataset)) model_clim_datasets = [] for dataset in model_datasets: model_clim_datasets.append( ds.Dataset(dataset.lats, dataset.lons, dataset.times, utils.calc_temporal_mean(dataset))) # Metrics (spatial standard deviation and pattern correlation) # determine the metrics taylor_diagram = metrics.SpatialPatternTaylorDiagram() # create the Evaluation object taylor_evaluation = Evaluation( obs_clim_dataset, # Climatological mean of reference dataset for the evaluation model_clim_datasets, # list of climatological means from model datasets for the evaluation [taylor_diagram]) # run the evaluation (bias calculation) taylor_evaluation.run() taylor_data = taylor_evaluation.results[0] plotter.draw_taylor_diagram(taylor_data, model_names, obs_name, file_name, pos='upper right', frameon=False)
def Taylor_diagram_spatial_pattern_of_multiyear_climatology( obs_dataset, obs_name, model_datasets, model_names, file_name): # calculate climatological mean fields obs_dataset.values = utils.calc_temporal_mean(obs_dataset) for dataset in model_datasets: dataset.values = utils.calc_temporal_mean(dataset) # Metrics (spatial standard deviation and pattern correlation) # determine the metrics taylor_diagram = metrics.SpatialPatternTaylorDiagram() # create the Evaluation object taylor_evaluation = Evaluation( obs_dataset, # Reference dataset for the evaluation model_datasets, # list of target datasets for the evaluation [taylor_diagram]) # run the evaluation (bias calculation) taylor_evaluation.run() taylor_data = taylor_evaluation.results[0] plotter.draw_taylor_diagram(taylor_data, model_names, obs_name, file_name, pos='upper right', frameon=False)
def _draw_taylor_diagram(evaluation, plot_config): """""" plot_name = plot_config['output_name'] ref_dataset_name = evaluation.ref_dataset.name target_dataset_names = [t.name for t in evaluation.target_datasets] if len(plot_config['stddev_results_indices'][0]) == 2: stddev_results = [ evaluation.results[tar][met] for (tar, met) in plot_config['stddev_results_indices'] ] pattern_corr_results = [ evaluation.results[tar][met] for (tar, met) in plot_config['pattern_corr_results_indices'] ] elif len(plot_config['stddev_results_indices'][0]) == 3: stddev_results = [ evaluation.results[tar][met][sub] for (tar, met, sub) in plot_config['stddev_results_indices'] ] pattern_corr_results = [ evaluation.results[tar][met][sub] for (tar, met, sub) in plot_config['pattern_corr_results_indices'] ] plot_data = np.array([stddev_results, pattern_corr_results]).transpose() plots.draw_taylor_diagram(plot_data, target_dataset_names, ref_dataset_name, fname=plot_name, **plot_config.get('optional_args', {}))
def Taylor_diagram_spatial_pattern_of_multiyear_climatology(obs_dataset, obs_name, model_datasets, model_names, file_name): # calculate climatological mean fields obs_dataset.values = utils.calc_temporal_mean(obs_dataset) for dataset in model_datasets: dataset.values = utils.calc_temporal_mean(dataset) # Metrics (spatial standard deviation and pattern correlation) # determine the metrics taylor_diagram = metrics.SpatialPatternTaylorDiagram() # create the Evaluation object taylor_evaluation = Evaluation(obs_dataset, # Reference dataset for the evaluation model_datasets, # list of target datasets for the evaluation [taylor_diagram]) # run the evaluation (bias calculation) taylor_evaluation.run() taylor_data = taylor_evaluation.results[0] plotter.draw_taylor_diagram(taylor_data, model_names, obs_name, file_name, pos='upper right',frameon=False)
new_lats = numpy.arange(min_lat, max_lat, 1) # Spatially regrid datasets using the new_lats, new_lons numpy arrays knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons) wrf_dataset = dsp.spatial_regrid(wrf_dataset, new_lats, new_lons) # Load the metrics that we want to use for the evaluation. ########################################################################## taylor_diagram = metrics.SpatialPatternTaylorDiagram() # Create our new evaluation object. The knmi dataset is the evaluations # reference dataset. We then provide a list of 1 or more target datasets # to use for the evaluation. In this case, we only want to use the wrf dataset. # Then we pass a list of all the metrics that we want to use in the evaluation. ########################################################################## test_evaluation = evaluation.Evaluation(knmi_dataset, [wrf_dataset], [taylor_diagram]) test_evaluation.run() # Pull our the evaluation results and prepare them for drawing a Taylor diagram. ########################################################################## taylor_data = test_evaluation.results[0] # Draw our taylor diagram! ########################################################################## plotter.draw_taylor_diagram(taylor_data, [wrf_dataset.name], knmi_dataset.name, fname='taylor_plot', fmt='png', frameon=False)
pattern_correlation = metrics.SeasonalPatternCorrelation( month_start=SEASON_MONTH_START, month_end=SEASON_MONTH_END) # Create our example evaluation. example_eval = evaluation.Evaluation( ref_dataset, # Reference dataset for the evaluation # 1 or more target datasets for the evaluation [target_dataset], # 1 ore more metrics to use in the evaluation [mean_bias, spatial_std_dev_ratio, pattern_correlation]) example_eval.run() plotter.draw_contour_map(example_eval.results[0][0], new_lats, new_lons, 'lund_example_time_averaged_bias', gridshape=(1, 1), ptitle='Time Averaged Bias') spatial_stddev_ratio = example_eval.results[0][1] # Pattern correlation results are a tuple, so we need to index and grab # the component we care about. spatial_correlation = example_eval.results[0][2][0] taylor_data = np.array([[spatial_stddev_ratio], [spatial_correlation]]).transpose() plotter.draw_taylor_diagram(taylor_data, [target_dataset.name], ref_dataset.name, fname='lund_example_taylor_diagram', frameon=False)
new_lats = numpy.arange(min_lat, max_lat, 1) # Spatially regrid datasets using the new_lats, new_lons numpy arrays knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons) wrf_dataset = dsp.spatial_regrid(wrf_dataset, new_lats, new_lons) # Load the metrics that we want to use for the evaluation. ################################################################################ taylor_diagram = metrics.SpatialPatternTaylorDiagram() # Create our new evaluation object. The knmi dataset is the evaluations # reference dataset. We then provide a list of 1 or more target datasets # to use for the evaluation. In this case, we only want to use the wrf dataset. # Then we pass a list of all the metrics that we want to use in the evaluation. ################################################################################ test_evaluation = evaluation.Evaluation(knmi_dataset, [wrf_dataset], [taylor_diagram]) test_evaluation.run() # Pull our the evaluation results and prepare them for drawing a Taylor diagram. ################################################################################ taylor_data = test_evaluation.results[0] # Draw our taylor diagram! ################################################################################ plotter.draw_taylor_diagram(taylor_data, [wrf_dataset.name], knmi_dataset.name, fname='taylor_plot', fmt='png', frameon=False)
for member, each_target_dataset in enumerate(target_datasets): target_datasets[member].values = utils.calc_temporal_mean( target_datasets[member]) allNames = [] for target in target_datasets: allNames.append(target.name) #calculate the metrics taylor_diagram = metrics.SpatialPatternTaylorDiagram() #create the Evaluation object RCMs_to_CRU_evaluation = evaluation.Evaluation( CRU31, # Reference dataset for the evaluation # 1 or more target datasets for the evaluation target_datasets, # 1 or more metrics to use in the evaluation [taylor_diagram ]) #, mean_bias,spatial_std_dev_ratio, pattern_correlation]) RCMs_to_CRU_evaluation.run() taylor_data = RCMs_to_CRU_evaluation.results[0] plotter.draw_taylor_diagram(taylor_data, allNames, "CRU31", fname=OUTPUT_PLOT, fmt='png', frameon=False)
# 1 ore more metrics to use in the evaluation [mean_bias, spatial_std_dev_ratio, pattern_correlation]) example_eval.run() spatial_stddev_ratio = [] spatial_correlation = [] taylor_dataset_names = [] # List of datasets names for the Taylor diagram for i, target in enumerate(example_eval.target_datasets): # For each target dataset in the evaluation, draw a contour map plotter.draw_contour_map(example_eval.results[i][0], new_lats, new_lons, 'lund_{}_{}_time_averaged_bias'.format( ref_dataset.name, target.name), gridshape=(1, 1), ptitle='Time Averaged Bias') taylor_dataset_names.append(target.name) # Grab Data for a Taylor Diagram spatial_stddev_ratio.append(example_eval.results[i][1]) # Pattern correlation results are a tuple, so we need to index and grab # the component we care about. spatial_correlation.append(example_eval.results[i][2][0]) taylor_data = np.array([spatial_stddev_ratio, spatial_correlation]).transpose() plotter.draw_taylor_diagram(taylor_data, taylor_dataset_names, ref_dataset.name, fname='lund_taylor_diagram', frameon=False)
#calculate the metrics pattern_correlation = metrics.PatternCorrelation() spatial_std_dev = metrics.StdDevRatio() #create the Evaluation object RCMs_to_CRU_evaluation = evaluation.Evaluation(CRU31, # Reference dataset for the evaluation # 1 or more target datasets for the evaluation target_datasets, # 1 or more metrics to use in the evaluation [spatial_std_dev, pattern_correlation])#, mean_bias,spatial_std_dev_ratio, pattern_correlation]) RCMs_to_CRU_evaluation.run() rcm_std_dev = [results[0] for results in RCMs_to_CRU_evaluation.results] rcm_pat_cor = [results[1] for results in RCMs_to_CRU_evaluation.results] taylor_data = np.array([rcm_std_dev, rcm_pat_cor]).transpose() new_taylor_data = np.squeeze(np.array(taylor_data)) plotter.draw_taylor_diagram(new_taylor_data, allNames, "CRU31", fname=OUTPUT_PLOT, fmt='png', frameon=False)
def calculate_metrics_and_make_plots(varName, workdir, lons, lats, obsData, mdlData, obsRgn, mdlRgn, obsList, mdlList, subRegions, \ subRgnLon0, subRgnLon1, subRgnLat0, subRgnLat1): ''' Purpose:: Calculate all the metrics used in Kim et al. [2013] paper and plot them Input:: varName - evaluating variable workdir - lons - lats - obsData - mdlData - obsRgn - mdlRgn - obsList - mdlList - subRegions - subRgnLon0, subRgnLat0 - southwest boundary of sub-regions [numSubRgn] subRgnLon1, subRgnLat1 - northeast boundary of sub-regions [numSubRgn] Output:: png files ''' nobs, nt, ny, nx = obsData.shape nmodel = mdlData.shape[0] ### TODO: unit conversion (K to C) if varName == 'temp': obsData[0, :, :, :] = obsData[0, :, :, :] - 273.15 if subRegions: obsRgn[0, :, :] = obsRgn[0, :, :] - 273.15 if varName == 'prec' and obsData.max() > mdlData.max()*1000.: mdlData[:, :, :, :] = mdlData[:, :, :, :]*86400. if subRegions: mdlRgn[:, :, :] = mdlRgn[:, :, :]*86400. oTser, oClim = calcClimYear( obsData[0, :, :, :]) bias_of_overall_average = ma.zeros([nmodel, ny, nx]) spatial_stdev_ratio = np.zeros([nmodel]) spatial_corr = np.zeros([nmodel]) mdlList.append('ENS') for imodel in np.arange(nmodel): mTser, mClim = calcClimYear( mdlData[imodel,:,:,:]) bias_of_overall_average[imodel,:,:] = calcBias(mClim, oClim) spatial_corr[imodel], sigLev = calcPatternCorrelation(oClim, mClim) spatial_stdev_ratio[imodel] = calcSpatialStdevRatio(mClim, oClim) fig_return = plotter.draw_contour_map(oClim, lats, lons, workdir+'/observed_climatology_'+varName, fmt='png', gridshape=(1, 1), clabel='', ptitle='', subtitles=obsList, cmap=None, clevs=None, nlevs=10, parallels=None, meridians=None, extend='neither') # TODO: # Be sure to update "gridshape" argument to be the number of sub plots (rows,columns). This should be improved so that the # gridshape is optimally determined for a given number of models. For example: # For 3 models, a gridshape of (2,2) would be sensible: # X X # X # fig_return = plotter.draw_contour_map(bias_of_overall_average, lats, lons, workdir+'/bias_of_climatology_'+varName, fmt='png', gridshape=(6, 2), clabel='', ptitle='', subtitles=mdlList, cmap=None, clevs=None, nlevs=10, parallels=None, meridians=None, extend='neither') Taylor_data = np.array([spatial_stdev_ratio, spatial_corr]).transpose() fig_return = plotter.draw_taylor_diagram(Taylor_data, mdlList, refname='CRU', fname = workdir+'/Taylor_'+varName, fmt='png',frameon=False) if subRegions: nseason = 2 # (0: summer and 1: winter) nregion = len(subRgnLon0) season_name = ['summer','winter'] rowlabels = ['PNw','PNe','CAn','CAs','SWw','SWe','COL','GPn','GPc','GC','GL','NE','SE','FL'] collabels = ['M1','M2','M3','M4','M5','M6','ENS'] collabels[nmodel-1] = 'ENS' for iseason in [0,1]: portrait_subregion = np.zeros([4, nregion, nmodel]) portrait_titles = ['(a) Normalized Bias', '(b) Normalized STDV', '(c) Normalized RMSE', '(d) Correlation'] if iseason == 0: monthBegin=6 monthEnd=8 if iseason == 1: monthBegin=12 monthEnd=2 obsTser,obsClim = calcClimSeasonSubRegion(6,8,obsRgn[0,:,:]) for imodel in np.arange(nmodel): mTser, mClim = calcClimSeasonSubRegion(6,8,mdlRgn[imodel,:,:]) for iregion in np.arange(nregion): portrait_subregion[0,iregion,imodel] = calcBias(mClim[iregion],obsClim[iregion])/calcTemporalStdev(obsTser[iregion,:]) portrait_subregion[1,iregion,imodel] = calcTemporalStdev(mTser[iregion,:])/ calcTemporalStdev(obsTser[iregion,:]) portrait_subregion[2,iregion,imodel] = calcRootMeanSquaredDifferenceAveragedOverTime(mTser[iregion,:], obsTser[iregion,:])/calcTemporalStdev(obsTser[iregion,:]) portrait_subregion[3,iregion, imodel] = calcTemporalCorrelationSubRegion(mTser[iregion,:],obsTser[iregion,:]) portrait_return = plotter.draw_portrait_diagram(portrait_subregion, rowlabels, collabels[0:nmodel], workdir+'/portrait_diagram_'+season_name[iseason]+'_'+varName, fmt='png', gridshape=(2, 2), xlabel='', ylabel='', clabel='', ptitle='', subtitles=portrait_titles, cmap=None, clevs=None, nlevs=10, extend='neither') # annual cycle nmonth = 12 times = np.arange(nmonth) data_names = [obsList[0]] + list(mdlList) annual_cycle = np.zeros([nregion, nmonth, nmodel+1]) obsTser, annual_cycle[:, :, 0] = calcAnnualCycleMeansSubRegion(obsRgn[0,:,:]) obsStd = calcAnnualCycleStdevSubRegion(obsRgn[0,:,:]) for imodel in np.arange(nmodel): mdlTser, annual_cycle[:, :, imodel+1] = calcAnnualCycleMeansSubRegion(mdlRgn[imodel, :, :]) # Make annual_cycle shape compatible with draw_time_series annual_cycle = annual_cycle.swapaxes(1, 2) tseries_return = plotter.draw_time_series(annual_cycle, times, data_names, workdir+'/time_series_'+varName, gridshape=(7, 2), subtitles=rowlabels, label_month=True)
# configuration section. spatial_std_dev_ratio = metrics.SeasonalSpatialStdDevRatio(month_start=SEASON_MONTH_START, month_end=SEASON_MONTH_END) pattern_correlation = metrics.SeasonalPatternCorrelation(month_start=SEASON_MONTH_START, month_end=SEASON_MONTH_END) # Create our example evaluation. example_eval = evaluation.Evaluation(ref_dataset, # Reference dataset for the evaluation # 1 or more target datasets for the evaluation [target_dataset], # 1 ore more metrics to use in the evaluation [mean_bias, spatial_std_dev_ratio, pattern_correlation]) example_eval.run() plotter.draw_contour_map(example_eval.results[0][0], new_lats, new_lons, 'lund_example_time_averaged_bias', gridshape=(1, 1), ptitle='Time Averaged Bias') spatial_stddev_ratio = example_eval.results[0][1] # Pattern correlation results are a tuple, so we need to index and grab # the component we care about. spatial_correlation = example_eval.results[0][2][0] taylor_data = np.array([[spatial_stddev_ratio], [spatial_correlation]]).transpose() plotter.draw_taylor_diagram(taylor_data, [target_dataset.name], ref_dataset.name, fname='lund_example_taylor_diagram', frameon=False)
target_datasets, # 1 ore more metrics to use in the evaluation [mean_bias, spatial_std_dev_ratio, pattern_correlation]) example_eval.run() spatial_stddev_ratio = [] spatial_correlation = [] taylor_dataset_names = [] # List of datasets names for the Taylor diagram for i, target in enumerate(example_eval.target_datasets): # For each target dataset in the evaluation, draw a contour map plotter.draw_contour_map(example_eval.results[i][0], new_lats, new_lons, 'lund_{}_{}_time_averaged_bias'.format(ref_dataset.name, target.name), gridshape=(1, 1), ptitle='Time Averaged Bias') taylor_dataset_names.append(target.name) # Grab Data for a Taylor Diagram spatial_stddev_ratio.append(example_eval.results[i][1]) # Pattern correlation results are a tuple, so we need to index and grab # the component we care about. spatial_correlation.append(example_eval.results[i][2][0]) taylor_data = np.array([spatial_stddev_ratio, spatial_correlation]).transpose() plotter.draw_taylor_diagram(taylor_data, taylor_dataset_names, ref_dataset.name, fname='lund_taylor_diagram', frameon=False)