def test_full_init(self): self.eval = Evaluation( self.test_dataset, [self.test_dataset, self.another_test_dataset], [Bias(), Bias(), TemporalStdDev()]) ref_dataset = self.test_dataset target_datasets = [self.test_dataset, self.another_test_dataset] metrics = [Bias(), Bias()] unary_metrics = [TemporalStdDev()] self.eval = Evaluation(ref_dataset, target_datasets, metrics + unary_metrics) self.assertEqual(self.eval.ref_dataset.variable, self.variable) # Make sure the two target datasets were added properly self.assertEqual(self.eval.target_datasets[0].variable, self.variable) self.assertEqual(self.eval.target_datasets[1].variable, self.other_var) # Make sure the three metrics were added properly # The two Bias metrics are "binary" metrics self.assertEqual(len(self.eval.metrics), 2) # TemporalStdDev is a "unary" metric and should be stored as such self.assertEqual(len(self.eval.unary_metrics), 1) self.eval.run() out_str = ("<Evaluation - ref_dataset: {}, " "target_dataset(s): {}, " "binary_metric(s): {}, " "unary_metric(s): {}, " "subregion(s): {}>").format(str( self.test_dataset), [str(ds) for ds in target_datasets], [str(m) for m in metrics], [str(u) for u in unary_metrics], None) self.assertEqual(str(self.eval), out_str)
def setUp(self): self.bias = Bias() # Initialize reference dataset self.reference_lat = np.array([10, 12, 14, 16, 18]) self.reference_lon = np.array([100, 102, 104, 106, 108]) self.reference_time = np.array( [dt.datetime(2000, x, 1) for x in range(1, 13)]) flat_array = np.array(range(300)) self.reference_value = flat_array.reshape(12, 5, 5) self.reference_variable = 'prec' self.reference_dataset = Dataset(self.reference_lat, self.reference_lon, self.reference_time, self.reference_value, self.reference_variable) # Initialize target dataset self.target_lat = np.array([1, 2, 4, 6, 8]) self.target_lon = np.array([10, 12, 14, 16, 18]) self.target_time = np.array( [dt.datetime(2001, x, 1) for x in range(1, 13)]) flat_array = np.array(range(300, 600)) self.target_value = flat_array.reshape(12, 5, 5) self.target_variable = 'tasmax' self.target_dataset = Dataset(self.target_lat, self.target_lon, self.target_time, self.target_value, self.target_variable)
def test_result_shape(self): bias_eval = Evaluation(self.test_dataset, [ self.another_test_dataset, self.another_test_dataset, self.another_test_dataset ], [Bias(), Bias()]) bias_eval.run() # Expected result shape is # [bias, bias] where bias.shape[0] = number of datasets self.assertTrue(len(bias_eval.results) == 2) self.assertTrue(bias_eval.results[0].shape[0] == 3)
def test_bias_output_shape(self): bias_eval = Evaluation(self.test_dataset, [ self.another_test_dataset], [Bias()]) bias_eval.run() input_shape = tuple(self.test_dataset.values.shape) bias_results_shape = tuple(bias_eval.results[0][0].shape) self.assertEqual(input_shape, bias_results_shape)
class TestBias(unittest.TestCase): '''Test the metrics.Bias metric.''' def setUp(self): self.bias = Bias() # Initialize reference dataset self.reference_lat = np.array([10, 12, 14, 16, 18]) self.reference_lon = np.array([100, 102, 104, 106, 108]) self.reference_time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)]) flat_array = np.array(range(300)) self.reference_value = flat_array.reshape(12, 5, 5) self.reference_variable = 'prec' self.reference_dataset = Dataset(self.reference_lat, self.reference_lon, self.reference_time, self.reference_value, self.reference_variable) # Initialize target dataset self.target_lat = np.array([1, 2, 4, 6, 8]) self.target_lon = np.array([10, 12, 14, 16, 18]) self.target_time = np.array([dt.datetime(2001, x, 1) for x in range(1, 13)]) flat_array = np.array(range(300, 600)) self.target_value = flat_array.reshape(12, 5, 5) self.target_variable = 'tasmax' self.target_dataset = Dataset(self.target_lat, self.target_lon, self.target_time, self.target_value, self.target_variable) def test_function_run(self): '''Test bias function between reference dataset and target dataset.''' expected_result = np.zeros((12, 5, 5), dtype=np.int) expected_result.fill(-300) np.testing.assert_array_equal(self.bias.run(self.reference_dataset, self.target_dataset), expected_result)
def test_full_init(self): self.eval = Evaluation( self.test_dataset, [self.test_dataset, self.another_test_dataset], [Bias(), Bias(), TemporalStdDev()]) self.assertEqual(self.eval.ref_dataset.variable, self.variable) # Make sure the two target datasets were added properly self.assertEqual(self.eval.target_datasets[0].variable, self.variable) self.assertEqual(self.eval.target_datasets[1].variable, self.other_var) # Make sure the three metrics were added properly # The two Bias metrics are "binary" metrics self.assertEqual(len(self.eval.metrics), 2) # TemporalStdDev is a "unary" metric and should be stored as such self.assertEqual(len(self.eval.unary_metrics), 1)
def test_add_valid_metric(self): # Add a "binary" metric self.assertEqual(len(self.eval.metrics), 0) self.eval.add_metric(Bias()) self.assertEqual(len(self.eval.metrics), 1) # Add a "unary" metric self.assertEqual(len(self.eval.unary_metrics), 0) self.eval.add_metric(TemporalStdDev()) self.assertEqual(len(self.eval.unary_metrics), 1)
def test_subregion_result_shape(self): bound = Bounds(10, 18, 100, 108, dt.datetime(2000, 1, 1), dt.datetime(2000, 3, 1)) bias_eval = Evaluation( self.test_dataset, [self.another_test_dataset, self.another_test_dataset], [Bias()], [bound]) bias_eval.run() # Expected result shape is # [ # [ # Subregions cause this extra layer # [number of targets, bias.run(reference, target1).shape] # ] # ], self.assertTrue(len(bias_eval.results) == 1) self.assertTrue(len(bias_eval.results[0]) == 1) self.assertTrue(bias_eval.results[0][0].shape[0] == 2) self.assertTrue(isinstance(bias_eval.results, type([])))
def setUp(self): self.bias = Bias() # Initialize reference dataset self.reference_lat = np.array([10, 12, 14, 16, 18]) self.reference_lon = np.array([100, 102, 104, 106, 108]) self.reference_time = np.array([dt.datetime(2000, x, 1) for x in range(1, 13)]) flat_array = np.array(range(300)) self.reference_value = flat_array.reshape(12, 5, 5) self.reference_variable = 'prec' self.reference_dataset = Dataset(self.reference_lat, self.reference_lon, self.reference_time, self.reference_value, self.reference_variable) # Initialize target dataset self.target_lat = np.array([1, 2, 4, 6, 8]) self.target_lon = np.array([10, 12, 14, 16, 18]) self.target_time = np.array([dt.datetime(2001, x, 1) for x in range(1, 13)]) flat_array = np.array(range(300, 600)) self.target_value = flat_array.reshape(12, 5, 5) self.target_variable = 'tasmax' self.target_dataset = Dataset(self.target_lat, self.target_lon, self.target_time, self.target_value, self.target_variable)
class TestBias(unittest.TestCase): '''Test the metrics.Bias metric.''' def setUp(self): self.bias = Bias() # Initialize reference dataset self.reference_lat = np.array([10, 12, 14, 16, 18]) self.reference_lon = np.array([100, 102, 104, 106, 108]) self.reference_time = np.array( [dt.datetime(2000, x, 1) for x in range(1, 13)]) flat_array = np.array(range(300)) self.reference_value = flat_array.reshape(12, 5, 5) self.reference_variable = 'prec' self.reference_dataset = Dataset(self.reference_lat, self.reference_lon, self.reference_time, self.reference_value, self.reference_variable) # Initialize target dataset self.target_lat = np.array([1, 2, 4, 6, 8]) self.target_lon = np.array([10, 12, 14, 16, 18]) self.target_time = np.array( [dt.datetime(2001, x, 1) for x in range(1, 13)]) flat_array = np.array(range(300, 600)) self.target_value = flat_array.reshape(12, 5, 5) self.target_variable = 'tasmax' self.target_dataset = Dataset(self.target_lat, self.target_lon, self.target_time, self.target_value, self.target_variable) def test_function_run(self): '''Test bias function between reference dataset and target dataset.''' expected_result = np.zeros((12, 5, 5), dtype=np.int) expected_result.fill(-300) np.testing.assert_array_equal( self.bias.run(self.reference_dataset, self.target_dataset), expected_result)
def test_add_metrics(self): self.assertEqual(len(self.eval.metrics), 0) self.eval.add_metrics([Bias(), Bias()]) self.assertEqual(len(self.eval.metrics), 2)