def setUp(self): self.full_evaluation = Evaluation( _create_fake_dataset('Ref'), [_create_fake_dataset('T1'), _create_fake_dataset('T2')], [metrics.TemporalStdDev(), metrics.Bias(), metrics.Bias()] ) self.unary_evaluation = Evaluation( None, [_create_fake_dataset('T1'), _create_fake_dataset('T2')], [metrics.TemporalStdDev()] )
def setUp(self): self.bias = metrics.Bias() # Initialize reference dataset self.reference_lat = np.array([10, 12, 14, 16, 18]) self.reference_lon = np.array([100, 102, 104, 106, 108]) self.reference_time = np.array( [dt.datetime(2000, x, 1) for x in range(1, 13)]) flat_array = np.array(range(300)) self.reference_value = flat_array.reshape(12, 5, 5) self.reference_variable = 'prec' self.reference_dataset = Dataset(self.reference_lat, self.reference_lon, self.reference_time, self.reference_value, self.reference_variable) # Initialize target dataset self.target_lat = np.array([1, 2, 4, 6, 8]) self.target_lon = np.array([10, 12, 14, 16, 18]) self.target_time = np.array( [dt.datetime(2001, x, 1) for x in range(1, 13)]) flat_array = np.array(range(300, 600)) self.target_value = flat_array.reshape(12, 5, 5) self.target_variable = 'tasmax' self.target_dataset = Dataset(self.target_lat, self.target_lon, self.target_time, self.target_value, self.target_variable)
""" Spatially Regrid the Dataset Objects to a 1/2 degree grid """ # Using the bounds we will create a new set of lats and lons on 0.5 degree step new_lons = np.arange(min_lon, max_lon, 0.5) new_lats = np.arange(min_lat, max_lat, 0.5) # Spatially regrid datasets using the new_lats, new_lons numpy arrays print("Spatially Regridding the KNMI_Dataset...") knmi_dataset = dsp.spatial_regrid(knmi_dataset, new_lats, new_lons) print("Spatially Regridding the CRU31_Dataset...") cru31_dataset = dsp.spatial_regrid(cru31_dataset, new_lats, new_lons) print("Final shape of the KNMI_Dataset:%s" % (knmi_dataset.values.shape, )) print("Final shape of the CRU31_Dataset:%s" % (cru31_dataset.values.shape, )) """ Step 4: Build a Metric to use for Evaluation - Bias for this example """ # You can build your own metrics, but OCW also ships with some common metrics print("Setting up a Bias metric to use for evaluation") bias = metrics.Bias() """ Step 5: Create an Evaluation Object using Datasets and our Metric """ # The Evaluation Class Signature is: # Evaluation(reference, targets, metrics, subregions=None) # Evaluation can take in multiple targets and metrics, so we need to convert # our examples into Python lists. Evaluation will iterate over the lists print("Making the Evaluation definition") bias_evaluation = evaluation.Evaluation(knmi_dataset, [cru31_dataset], [bias]) print("Executing the Evaluation using the object's run() method") bias_evaluation.run() """ Step 6: Make a Plot from the Evaluation.results """ # The Evaluation.results are a set of nested lists to support many different # possible Evaluation scenarios. # # The Evaluation results docs say: # The shape of results is (num_metrics, num_target_datasets) if no subregion
#append to the target_datasets for final analysis target_datasets.append(target_datasets_ensemble) #find the mean value #way to get the mean. Note the function exists in util.py _, CRU31.values = utils.calc_climatology_year(CRU31) for member, each_target_dataset in enumerate(target_datasets): _, target_datasets[member].values = utils.calc_climatology_year( target_datasets[member]) for target in target_datasets: allNames.append(target.name) #determine the metrics mean_bias = metrics.Bias() #create the Evaluation object RCMs_to_CRU_evaluation = evaluation.Evaluation( CRU31, # Reference dataset for the evaluation # list of target datasets for the evaluation target_datasets, # 1 or more metrics to use in the evaluation [mean_bias]) RCMs_to_CRU_evaluation.run() #extract the relevant data from RCMs_to_CRU_evaluation.results #the results returns a list (num_target_datasets, num_metrics). See docs for further details #remove the metric dimension rcm_bias = RCMs_to_CRU_evaluation.results[0]
def setUpClass(self): self.lats = np.array([10, 12, 14, 16, 18]) self.lons = np.array([100, 102, 104, 106, 108]) self.times = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)]) flat_array = np.array(range(300)) self.values = flat_array.reshape(12, 5, 5) self.variable = 'var' self.units = 'units' self.name = 'name' self.local_origin = { 'source': 'local', 'path': '/a/fake/path.nc', 'lat_name': 'a lat name', 'lon_name': 'a lon name', 'time_name': 'a time name', 'elevation_index': 2 } self.rcmed_origin = { 'source': 'rcmed', 'dataset_id': 4, 'parameter_id': 14 } self.esgf_origin = { 'source': 'esgf', 'dataset_id': 'esgf dataset id', 'variable': 'var' } self.dap_origin = { 'source': 'dap', 'url': 'a fake url', } self.local_ds = Dataset(self.lats, self.lons, self.times, self.values, variable=self.variable, units=self.units, name=self.name, origin=self.local_origin) self.rcmed_ds = Dataset(self.lats, self.lons, self.times, self.values, variable=self.variable, units=self.units, name=self.name, origin=self.rcmed_origin) self.esgf_ds = Dataset(self.lats, self.lons, self.times, self.values, variable=self.variable, units=self.units, name=self.name, origin=self.esgf_origin) self.dap_ds = Dataset(self.lats, self.lons, self.times, self.values, variable=self.variable, units=self.units, name=self.name, origin=self.dap_origin) self.subregions = [ Bounds(lat_min=-10, lat_max=10, lon_min=-20, lon_max=20), Bounds(lat_min=-5, lat_max=5, lon_min=-15, lon_max=15) ] self.evaluation = Evaluation( self.local_ds, [self.rcmed_ds, self.esgf_ds, self.dap_ds], [metrics.Bias(), metrics.TemporalStdDev()], subregions=self.subregions)
def setUpClass(self): self.bias = metrics.Bias() self.tmp_std_dev = metrics.TemporalStdDev() loaded_metrics = [self.bias, self.tmp_std_dev] self.evaluation = Evaluation(None, [], loaded_metrics)