def Taylor_diagram_spatial_pattern_of_multiyear_climatology( obs_dataset, obs_name, model_datasets, model_names, file_name): # calculate climatological mean fields obs_dataset.values = utils.calc_temporal_mean(obs_dataset) for dataset in model_datasets: dataset.values = utils.calc_temporal_mean(dataset) # Metrics (spatial standard deviation and pattern correlation) # determine the metrics taylor_diagram = metrics.SpatialPatternTaylorDiagram() # create the Evaluation object taylor_evaluation = Evaluation( obs_dataset, # Reference dataset for the evaluation model_datasets, # list of target datasets for the evaluation [taylor_diagram]) # run the evaluation (bias calculation) taylor_evaluation.run() taylor_data = taylor_evaluation.results[0] plotter.draw_taylor_diagram(taylor_data, model_names, obs_name, file_name, pos='upper right', frameon=False)
def Taylor_diagram_spatial_pattern_of_multiyear_climatology( obs_dataset, obs_name, model_datasets, model_names, file_name): # calculate climatological mean fields obs_clim_dataset = ds.Dataset(obs_dataset.lats, obs_dataset.lons, obs_dataset.times, utils.calc_temporal_mean(obs_dataset)) model_clim_datasets = [] for dataset in model_datasets: model_clim_datasets.append( ds.Dataset(dataset.lats, dataset.lons, dataset.times, utils.calc_temporal_mean(dataset))) # Metrics (spatial standard deviation and pattern correlation) # determine the metrics taylor_diagram = metrics.SpatialPatternTaylorDiagram() # create the Evaluation object taylor_evaluation = Evaluation( obs_clim_dataset, # Climatological mean of reference dataset for the evaluation model_clim_datasets, # list of climatological means from model datasets for the evaluation [taylor_diagram]) # run the evaluation (bias calculation) taylor_evaluation.run() taylor_data = taylor_evaluation.results[0] plotter.draw_taylor_diagram(taylor_data, model_names, obs_name, file_name, pos='upper right', frameon=False)
def setUp(self): self.taylor_diagram = metrics.SpatialPatternTaylorDiagram() self.ref_dataset = Dataset( np.array([1., 1., 1., 1., 1.]), np.array([1., 1., 1., 1., 1.]), np.array([dt.datetime(2000, x, 1) for x in range(1, 13)]), # Reshapped array with 300 values incremented by 5 np.arange(0, 1500, 5).reshape(12, 5, 5), 'ds1') self.tar_dataset = Dataset( np.array([1., 1., 1., 1., 1.]), np.array([1., 1., 1., 1., 1.]), np.array([dt.datetime(2000, x, 1) for x in range(1, 13)]), # Reshapped array with 300 values incremented by 2 np.arange(0, 600, 2).reshape(12, 5, 5), 'ds2')
# Spatially regrid the datasets onto a 1 degree grid. ########################################################################## # Get the bounds of the reference dataset and use it to create a new # set of lat/lon values on a 1 degree step # Using the bounds we will create a new set of lats and lons on 1 degree step min_lat, max_lat, min_lon, max_lon = knmi_dataset.spatial_boundaries() new_lons = numpy.arange(min_lon, max_lon, 1) new_lats = numpy.arange(min_lat, max_lat, 1) # Spatially regrid datasets using the new_lats, new_lons numpy arrays knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons) wrf_dataset = dsp.spatial_regrid(wrf_dataset, new_lats, new_lons) # Load the metrics that we want to use for the evaluation. ########################################################################## taylor_diagram = metrics.SpatialPatternTaylorDiagram() # Create our new evaluation object. The knmi dataset is the evaluations # reference dataset. We then provide a list of 1 or more target datasets # to use for the evaluation. In this case, we only want to use the wrf dataset. # Then we pass a list of all the metrics that we want to use in the evaluation. ########################################################################## test_evaluation = evaluation.Evaluation(knmi_dataset, [wrf_dataset], [taylor_diagram]) test_evaluation.run() # Pull our the evaluation results and prepare them for drawing a Taylor diagram. ########################################################################## taylor_data = test_evaluation.results[0] # Draw our taylor diagram!