def test_kwarg_pass_through_no_kwargs(self): call_func = mock.Mock() data = mock.sentinel.data axis = mock.sentinel.axis aggregator = Aggregator('', call_func) aggregator.aggregate(data, axis) call_func.assert_called_once_with(data, axis=axis)
def test_no_units_change(self): # If the Aggregator has no units_func then the units should be # left unchanged. aggregator = Aggregator("", None) cube = mock.Mock(units=mock.sentinel.units) aggregator.update_metadata(cube, []) self.assertIs(cube.units, mock.sentinel.units)
def test_mdtol_intercept(self): call_func = mock.Mock() data = mock.sentinel.data axis = mock.sentinel.axis aggregator = Aggregator("", call_func) aggregator.aggregate(data, axis, wibble="wobble", mdtol=0.8) call_func.assert_called_once_with(data, axis=axis, wibble="wobble")
def test_mdtol_intercept(self): call_func = Mock() data = sentinel.data axis = sentinel.axis aggregator = Aggregator('', call_func) aggregator.aggregate(data, axis, wibble='wobble', mdtol=0.8) call_func.assert_called_once_with(data, axis=axis, wibble='wobble')
def test_kwarg_pass_through_no_kwargs(self): call_func = Mock() data = sentinel.data axis = sentinel.axis aggregator = Aggregator('', call_func) aggregator.aggregate(data, axis) call_func.assert_called_once_with(data, axis=axis)
def test_no_units_change(self): # If the Aggregator has no units_func then the units should be # left unchanged. aggregator = Aggregator('', None) cube = Mock(units=sentinel.units) aggregator.update_metadata(cube, []) self.assertIs(cube.units, sentinel.units)
def test_kwarg_pass_through_no_kwargs(self): lazy_func = Mock() data = sentinel.data axis = sentinel.axis aggregator = Aggregator('', None, lazy_func=lazy_func) aggregator.lazy_aggregate(data, axis) lazy_func.assert_called_once_with(data, axis)
def test_kwarg_pass_through_no_kwargs(self): lazy_func = mock.Mock() data = mock.sentinel.data axis = mock.sentinel.axis aggregator = Aggregator("", None, lazy_func=lazy_func) aggregator.lazy_aggregate(data, axis) lazy_func.assert_called_once_with(data, axis=axis)
def test_kwarg_pass_through_init_kwargs(self): call_func = mock.Mock() data = mock.sentinel.data axis = mock.sentinel.axis kwargs = dict(wibble='wobble', foo='bar') aggregator = Aggregator('', call_func, **kwargs) aggregator.aggregate(data, axis) call_func.assert_called_once_with(data, axis=axis, **kwargs)
def setUp(self): self.TEST = Aggregator('test', None) self.array = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[False, True, False], [True, False, False]], dtype=np.float64) self.expected_result_axis0 = ma.array([1, 2, 3], mask=None) self.expected_result_axis1 = ma.array([4, 5], mask=None)
def test_kwarg_pass_through_call_kwargs(self): call_func = mock.Mock() data = mock.sentinel.data axis = mock.sentinel.axis kwargs = dict(wibble="wobble", foo="bar") aggregator = Aggregator("", call_func) aggregator.aggregate(data, axis, **kwargs) call_func.assert_called_once_with(data, axis=axis, **kwargs)
def test_kwarg_pass_through_call_kwargs(self): call_func = Mock() data = sentinel.data axis = sentinel.axis kwargs = dict(wibble='wobble', foo='bar') aggregator = Aggregator('', call_func) aggregator.aggregate(data, axis, **kwargs) call_func.assert_called_once_with(data, axis=axis, **kwargs)
def test_kwarg_pass_through_call_kwargs(self): lazy_func = mock.Mock() data = mock.sentinel.data axis = mock.sentinel.axis kwargs = dict(wibble='wobble', foo='bar') aggregator = Aggregator('', None, lazy_func=lazy_func) aggregator.lazy_aggregate(data, axis, **kwargs) lazy_func.assert_called_once_with(data, axis=axis, **kwargs)
def test_kwarg_pass_through_init_kwargs(self): lazy_func = Mock() data = sentinel.data axis = sentinel.axis kwargs = dict(wibble='wobble', foo='bar') aggregator = Aggregator('', None, lazy_func=lazy_func, **kwargs) aggregator.lazy_aggregate(data, axis) lazy_func.assert_called_once_with(data, axis, **kwargs)
def test_kwarg_pass_through_init_kwargs(self): lazy_func = mock.Mock() data = mock.sentinel.data axis = mock.sentinel.axis kwargs = dict(wibble="wobble", foo="bar") aggregator = Aggregator("", None, lazy_func=lazy_func, **kwargs) aggregator.lazy_aggregate(data, axis) lazy_func.assert_called_once_with(data, axis=axis, **kwargs)
def test_units_change(self): # If the Aggregator has a units_func then the new units should # be defined by its return value. units_func = mock.Mock(return_value=mock.sentinel.new_units) aggregator = Aggregator("", None, units_func) cube = mock.Mock(units=mock.sentinel.units) aggregator.update_metadata(cube, []) units_func.assert_called_once_with(mock.sentinel.units) self.assertEqual(cube.units, mock.sentinel.new_units)
def test_units_change(self): # If the Aggregator has a units_func then the new units should # be defined by its return value. units_func = Mock(return_value=sentinel.new_units) aggregator = Aggregator('', None, units_func) cube = Mock(units=sentinel.units) aggregator.update_metadata(cube, []) units_func.assert_called_once_with(sentinel.units) self.assertEqual(cube.units, sentinel.new_units)
def test_kwarg_pass_through_combined_kwargs(self): call_func = mock.Mock() data = mock.sentinel.data axis = mock.sentinel.axis init_kwargs = dict(wibble='wobble', var=1.0) call_kwargs = dict(foo='foo', var=0.5) aggregator = Aggregator('', call_func, **init_kwargs) aggregator.aggregate(data, axis, **call_kwargs) expected_kwargs = init_kwargs.copy() expected_kwargs.update(call_kwargs) call_func.assert_called_once_with(data, axis=axis, **expected_kwargs)
def test_kwarg_pass_through_combined_kwargs(self): lazy_func = Mock() data = sentinel.data axis = sentinel.axis init_kwargs = dict(wibble='wobble', var=1.0) call_kwargs = dict(foo='foo', var=0.5) aggregator = Aggregator('', None, lazy_func=lazy_func, **init_kwargs) aggregator.lazy_aggregate(data, axis, **call_kwargs) expected_kwargs = init_kwargs.copy() expected_kwargs.update(call_kwargs) lazy_func.assert_called_once_with(data, axis, **expected_kwargs)
def test_kwarg_pass_through_combined_kwargs(self): lazy_func = mock.Mock() data = mock.sentinel.data axis = mock.sentinel.axis init_kwargs = dict(wibble="wobble", var=1.0) call_kwargs = dict(foo="foo", var=0.5) aggregator = Aggregator("", None, lazy_func=lazy_func, **init_kwargs) aggregator.lazy_aggregate(data, axis, **call_kwargs) expected_kwargs = init_kwargs.copy() expected_kwargs.update(call_kwargs) lazy_func.assert_called_once_with(data, axis=axis, **expected_kwargs)
def percentile_weighted_mean(self, cube, weights): """ Blend percentile data using the weights provided. Args: cube (iris.cube.Cube): The cube which is being blended over self.blend_coord. Assumes self.blend_coord and percentile are leading coordinates (enforced in process). weights (iris.cube.Cube): Cube of blending weights. Returns: iris.cube.Cube: The cube with percentile values blended over self.blend_coord, with suitable weightings applied. """ non_perc_slice = next(cube.slices_over(PERC_COORD)) weights_array = self.get_weights_array(non_perc_slice, weights) weights_array = self._normalise_weights(weights_array) # Set up aggregator PERCENTILE_BLEND = Aggregator( "mean", # Use CF-compliant cell method. PercentileBlendingAggregator.aggregate, ) cube_new = collapsed( cube, self.blend_coord, PERCENTILE_BLEND, percentiles=cube.coord(PERC_COORD).points, arr_weights=weights_array, ) return cube_new
def weighted_maximum(self, cube, weights): """ Blend data using a weighted maximum using the weights provided. This entails scaling the data by the weights before then taking a maximum across the blending coordinate self.coord. Args: cube (iris.cube.Cube): The cube which is being blended over self.coord. weights (iris.cube.Cube): Cube of blending weights. Returns: cube_new (iris.cube.Cube): The cube with values blended over self.coord, with suitable weightings applied. """ weights_array = self.non_percentile_weights(cube, weights, custom_aggregator=True) # Set up aggregator MAX_PROBABILITY = ( Aggregator( 'maximum', # Use CF-compliant cell method. MaxProbabilityAggregator.aggregate)) cube_new = cube.collapsed(self.coord, MAX_PROBABILITY, arr_weights=weights_array) cube_new.data = cube_new.data.astype(np.float32) return cube_new
def main(): # Load the whole time-sequence as a single cube. file_path = iris.sample_data_path("E1_north_america.nc") cube = iris.load_cube(file_path) # Make an aggregator from the user function. SPELL_COUNT = Aggregator( "spell_count", count_spells, units_func=lambda units: 1 ) # Define the parameters of the test. threshold_temperature = 280.0 spell_years = 5 # Calculate the statistic. warm_periods = cube.collapsed( "time", SPELL_COUNT, threshold=threshold_temperature, spell_length=spell_years, ) warm_periods.rename("Number of 5-year warm spells in 240 years") # Plot the results. qplt.contourf(warm_periods, cmap="RdYlBu_r") plt.gca().coastlines() iplt.show()
def window_counts(mycube, value_threshold, window_size, pctile): """ Function that returns a flat array containing the number of data points within a time window `window_size' per grid point that satify a condition value > value_threshold. It also returns statistical measures for the flat array window_counts[0] = array window_counts[1] = mean(array) window_counts[2] = std(array) window_counts[3] = percentile(array, pctile) """ # Make an aggregator from the user function. SPELL_COUNT = Aggregator('spell_count', count_spells, units_func=lambda units: 1) # Calculate the statistic. counts_windowed_cube = mycube.collapsed('time', SPELL_COUNT, threshold=value_threshold, spell_length=window_size) #if one wants to print the whole array #np.set_printoptions(threshold=np.nan) r = counts_windowed_cube.data.flatten() meanr = np.mean(r) stdr = np.std(r) prcr = np.percentile(r, pctile) return r, meanr, stdr, prcr
def __init__(self): """Create an aggregator instance for reuse""" self.aggregator_instance = Aggregator("mode", self.mode_aggregator) # Create the expected cell method for use with single cube inputs # that do not pass through the aggregator. self.mode_cell_method = iris.coords.CellMethod("mode", coords="time")
def test_fail_no_lazy(self): dummy_agg = Aggregator('custom_op', lambda x: 1) with self.assertRaises(LazyAggregatorError) as err: cube_collapsed = self.cube.collapsed('x', dummy_agg, lazy=True) msg = err.exception.message self.assertIn('custom_op', msg) self.assertIn('lazy', msg) self.assertIn('not support', msg)
def percentile_weighted_mean(self, cube, weights, perc_coord): """ Blend percentile data using the weights provided. Args: cube (iris.cube.Cube): The cube which is being blended over self.blend_coord. weights (iris.cube.Cube): Cube of blending weights. perc_coord (iris.coords.DimCoord): The percentile coordinate for this cube. Returns: iris.cube.Cube: The cube with percentile values blended over self.blend_coord, with suitable weightings applied. """ percentiles = np.array(perc_coord.points, dtype=np.float32) (perc_dim, ) = cube.coord_dims(perc_coord.name()) # The iris.analysis.Aggregator moves the coordinate being # collapsed to index=-1 in initialisation, before the # aggregation method is called. This reduces by 1 the index # of all coordinates with an initial index higher than the # collapsing coordinate. As we need to know the index of # the percentile coordinate at a later step, if it will be # changed by this process, we adjust our record (perc_dim) # here. if cube.coord_dims(self.blend_coord)[0] < perc_dim: perc_dim -= 1 weights_array = self.percentile_weights(cube, weights, perc_coord) # Set up aggregator PERCENTILE_BLEND = Aggregator( "mean", # Use CF-compliant cell method. PercentileBlendingAggregator.aggregate, ) cube_new = collapsed( cube, self.blend_coord, PERCENTILE_BLEND, arr_percent=percentiles, arr_weights=weights_array, perc_dim=perc_dim, ) cube_new.data = cube_new.data.astype(np.float32) # Ensure collapsed coordinates do not promote themselves # to float64. for coord in cube_new.coords(): if coord.points.dtype == np.float64: coord.points = coord.points.astype(np.float32) return cube_new
def mask_cube_counts(mycube, value_threshold, counts_threshold, window_size): # Make an aggregator from the user function. SPELL_COUNT = Aggregator('spell_count', count_spells, units_func=lambda units: 1) # Calculate the statistic. counts_windowed_cube = mycube.collapsed('time', SPELL_COUNT, threshold=value_threshold, spell_length=window_size) mask = counts_windowed_cube.data > counts_threshold mask.astype(np.int) # preserving the original cube metadata masked_cube = mycube.copy() masked_cube.data = mycube.data * mask return counts_windowed_cube, mask, masked_cube
def _get_drought_data(cfg, cube): """Prepare data and calculate characteristics.""" # make a new cube to increase the size of the data array # Make an aggregator from the user function. spell_no = Aggregator('spell_count', count_spells, units_func=lambda units: 1) new_cube = _make_new_cube(cube) # calculate the number of drought events and their average duration drought_show = new_cube.collapsed('time', spell_no, threshold=cfg['threshold']) drought_show.rename('Drought characteristics') # length of time series time_length = len(new_cube.coord('time').points) / 12.0 # Convert number of droughtevents to frequency (per year) drought_show.data[:, :, 0] = drought_show.data[:, :, 0] / time_length return drought_show
def _get_fillvalues_mask(cube, threshold_fraction, min_value, time_window): """ Compute the per-model missing values mask. Construct the mask that fills a certain time window with missing values if the number of values in that specific window is less than a given fractional threshold; it uses a custom iris Aggregator function that aggregates the cube data by a given time window and counts the number of valid (unmasked) data points within that window; a simple value thresholding is also applied if needed. """ # basic checks if threshold_fraction < 0 or threshold_fraction > 1.0: raise ValueError( "Fraction of missing values {} should be between 0 and 1.0".format( threshold_fraction)) nr_time_points = len(cube.coord('time').points) if time_window > nr_time_points: msg = "Time window (in time units) larger than total time span. Stop." raise ValueError(msg) max_counts_per_time_window = nr_time_points / time_window # round to lower integer counts_threshold = int(max_counts_per_time_window * threshold_fraction) # Make an aggregator spell_count = Aggregator('spell_count', count_spells, units_func=lambda units: 1) # Calculate the statistic. counts_windowed_cube = cube.collapsed('time', spell_count, threshold=min_value, spell_length=time_window) # Create mask mask = counts_windowed_cube.data < counts_threshold if np.ma.isMaskedArray(mask): mask = mask.data | mask.mask return mask
def main(): file_path = iris.sample_data_path( '/nfs/a266/data/CMIP5_AFRICA/BC_0.5x0.5/IPSL-CM5A-LR/historical/pr_WFDEI_1979-2013_0.5x0.5_day_IPSL-CM5A-LR_africa_historical_r1i1p1_full.nc' ) cube = iris.load_cube(file_path) cube_wafr = cube.intersection(latitude=(-10.0, 10.0), longitude=(4.0, 25.0)) iris.coord_categorisation.add_year(cube_wafr, 'time', name='year') iris.coord_categorisation.add_month_number(cube_wafr, 'time', name='month_number') iris.coord_categorisation.add_season(cube_wafr, 'time', name='season') SPELL_COUNT = Aggregator('spell_count', count_spells, units_func=lambda units: 1) threshold_rainfall = 0.1 spell_days = 10 dry_periods = cube.collapsed('time', SPELL_COUNT, threshold=threshold_rainfall, spell_length=spell_days) dry_periods.rename('Number of 10-days dry spells in 35 years') qplt.contourf(dry_periods, cmap='RdYlBu_r') plt.gca().coastlines() iplt.show()
def __init__(self, model_id_attr: Optional[str] = None, record_run_attr: Optional[str] = None): """ Set up plugin and create an aggregator instance for reuse Args: model_id_attr: Name of attribute recording source models that should be inherited by the output cube. The source models are expected as a space-separated string. record_run_attr: Name of attribute used to record models and cycles used in constructing the weather symbols. """ self.aggregator_instance = Aggregator("mode", self.mode_aggregator) self.model_id_attr = model_id_attr self.record_run_attr = record_run_attr # Create the expected cell method for use with single cube inputs # that do not pass through the aggregator. self.mode_cell_method = iris.coords.CellMethod("mode", coords="time")
class Test_aggregate(tests.IrisTest): # These unit tests don't call a data aggregation function, they call a # mocked one i.e. the return values of the mocked data aggregation # function don't matter, only how these are dealt with by the aggregate # method. def setUp(self): self.TEST = Aggregator('test', None) self.array = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[False, True, False], [True, False, False]], dtype=np.float64) self.expected_result_axis0 = ma.array([1, 2, 3], mask=None) self.expected_result_axis1 = ma.array([4, 5], mask=None) def test_masked_notol(self): # Providing masked array with no tolerance keyword (mdtol) provided. axis = 0 mock_return = self.expected_result_axis0.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis) self.assertMaskedArrayEqual(result, self.expected_result_axis0) mock_method.assert_called_once_with(self.array, axis=axis) axis = 1 mock_return = self.expected_result_axis1.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis) self.assertMaskedArrayEqual(result, self.expected_result_axis1) mock_method.assert_called_once_with(self.array, axis=axis) def test_masked_above_tol(self): # Providing masked array with a high tolerance (mdtol) provided. axis = 0 mock_return = self.expected_result_axis0.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=.55) self.assertMaskedArrayEqual(result, self.expected_result_axis0) mock_method.assert_called_once_with(self.array, axis=axis) axis = 1 mock_return = self.expected_result_axis1.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=.55) self.assertMaskedArrayEqual(result, self.expected_result_axis1) mock_method.assert_called_once_with(self.array, axis=axis) def test_masked_below_tol(self): # Providing masked array with a tolerance on missing values, low # enough to modify the resulting mask for axis 0. axis = 0 result_axis_0 = self.expected_result_axis0.copy() result_axis_0.mask = np.array([True, True, False]) mock_return = ma.array([1, 2, 3], mask=None) with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=.45) self.assertMaskedArrayAlmostEqual(result, result_axis_0) mock_method.assert_called_once_with(self.array, axis=axis) axis = 1 mock_return = self.expected_result_axis1.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=.45) self.assertMaskedArrayEqual(result, self.expected_result_axis1) mock_method.assert_called_once_with(self.array, axis=axis) def test_masked_below_tol_alt(self): # Providing masked array with a tolerance on missing values, low # enough to modify the resulting mask for axis 1. axis = 1 result_axis_1 = self.expected_result_axis1.copy() result_axis_1.mask = np.array([True, True]) mock_return = self.expected_result_axis1.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=.1) self.assertMaskedArrayAlmostEqual(result, result_axis_1) mock_method.assert_called_once_with(self.array, axis=axis) def test_unmasked_with_mdtol(self): # Providing aggregator with an unmasked array and tolerance specified # for missing data - ensure that result is unaffected. data = self.array.data axis = 0 mock_return = self.expected_result_axis0.data.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis, mdtol=0.5) self.assertArrayAlmostEqual(result, mock_return.copy()) mock_method.assert_called_once_with(data, axis=axis) axis = 1 mock_return = self.expected_result_axis1.data.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis, mdtol=0.5) self.assertArrayAlmostEqual(result, mock_return.copy()) mock_method.assert_called_once_with(data, axis=axis) def test_unmasked(self): # Providing aggregator with an unmasked array and no additional keyword # arguments ensure that result is unaffected. data = self.array.data axis = 0 mock_return = self.expected_result_axis0.data.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis) self.assertArrayAlmostEqual(result, mock_return.copy()) mock_method.assert_called_once_with(data, axis=axis) axis = 1 mock_return = self.expected_result_axis1.data.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis) self.assertArrayAlmostEqual(result, mock_return.copy()) mock_method.assert_called_once_with(data, axis=axis) def test_returning_array_len_one_mdtol(self): # Test the case when the data aggregation function returns a scalar and # turns it into a masked array. axis = -1 data = self.array.flatten() mock_return = 2 with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis, mdtol=1) self.assertMaskedArrayEqual(result, ma.array([2], mask=[False])) mock_method.assert_called_once_with(data, axis=axis) def test_returning_array_len_one_mdtol_alt(self): # Test the case when the data aggregation function returns a scalar # with no tolerance for missing data values and turns it into a masked # array of length one. axis = -1 data = self.array.flatten() mock_return = 2 with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis, mdtol=0) self.assertMaskedArrayEqual(result, ma.array([2], mask=[True])) mock_method.assert_called_once_with(data, axis=axis) def test_returning_non_masked_array_from_masked_array(self): # Providing a masked array, call_func returning a non-masked array, # resulting in a masked array output. axis = 0 mock_return = self.expected_result_axis0.data.copy() result_axis_0 = ma.array(mock_return, mask=[True, True, False]) with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=.45) self.assertMaskedArrayAlmostEqual(result, result_axis_0) mock_method.assert_called_once_with(self.array, axis=axis) axis = 1 mock_return = self.expected_result_axis1.data.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=.45) self.assertMaskedArrayEqual(result, self.expected_result_axis1) mock_method.assert_called_once_with(self.array, axis=axis)
class Test_aggregate(tests.IrisTest): # These unit tests don't call a data aggregation function, they call a # mocked one i.e. the return values of the mocked data aggregation # function don't matter, only how these are dealt with by the aggregate # method. def setUp(self): self.TEST = Aggregator("test", None) self.array = ma.array( [[1, 2, 3], [4, 5, 6]], mask=[[False, True, False], [True, False, False]], dtype=np.float64, ) self.expected_result_axis0 = ma.array([1, 2, 3], mask=None) self.expected_result_axis1 = ma.array([4, 5], mask=None) def test_masked_notol(self): # Providing masked array with no tolerance keyword (mdtol) provided. axis = 0 mock_return = self.expected_result_axis0.copy() with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis) self.assertMaskedArrayEqual(result, self.expected_result_axis0) mock_method.assert_called_once_with(self.array, axis=axis) axis = 1 mock_return = self.expected_result_axis1.copy() with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis) self.assertMaskedArrayEqual(result, self.expected_result_axis1) mock_method.assert_called_once_with(self.array, axis=axis) def test_masked_above_tol(self): # Providing masked array with a high tolerance (mdtol) provided. axis = 0 mock_return = self.expected_result_axis0.copy() with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=0.55) self.assertMaskedArrayEqual(result, self.expected_result_axis0) mock_method.assert_called_once_with(self.array, axis=axis) axis = 1 mock_return = self.expected_result_axis1.copy() with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=0.55) self.assertMaskedArrayEqual(result, self.expected_result_axis1) mock_method.assert_called_once_with(self.array, axis=axis) def test_masked_below_tol(self): # Providing masked array with a tolerance on missing values, low # enough to modify the resulting mask for axis 0. axis = 0 result_axis_0 = self.expected_result_axis0.copy() result_axis_0.mask = np.array([True, True, False]) mock_return = ma.array([1, 2, 3], mask=None) with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=0.45) self.assertMaskedArrayAlmostEqual(result, result_axis_0) mock_method.assert_called_once_with(self.array, axis=axis) axis = 1 mock_return = self.expected_result_axis1.copy() with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=0.45) self.assertMaskedArrayEqual(result, self.expected_result_axis1) mock_method.assert_called_once_with(self.array, axis=axis) def test_masked_below_tol_alt(self): # Providing masked array with a tolerance on missing values, low # enough to modify the resulting mask for axis 1. axis = 1 result_axis_1 = self.expected_result_axis1.copy() result_axis_1.mask = np.array([True, True]) mock_return = self.expected_result_axis1.copy() with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=0.1) self.assertMaskedArrayAlmostEqual(result, result_axis_1) mock_method.assert_called_once_with(self.array, axis=axis) def test_unmasked_with_mdtol(self): # Providing aggregator with an unmasked array and tolerance specified # for missing data - ensure that result is unaffected. data = self.array.data axis = 0 mock_return = self.expected_result_axis0.data.copy() with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis, mdtol=0.5) self.assertArrayAlmostEqual(result, mock_return.copy()) mock_method.assert_called_once_with(data, axis=axis) axis = 1 mock_return = self.expected_result_axis1.data.copy() with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis, mdtol=0.5) self.assertArrayAlmostEqual(result, mock_return.copy()) mock_method.assert_called_once_with(data, axis=axis) def test_unmasked(self): # Providing aggregator with an unmasked array and no additional keyword # arguments ensure that result is unaffected. data = self.array.data axis = 0 mock_return = self.expected_result_axis0.data.copy() with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis) self.assertArrayAlmostEqual(result, mock_return.copy()) mock_method.assert_called_once_with(data, axis=axis) axis = 1 mock_return = self.expected_result_axis1.data.copy() with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis) self.assertArrayAlmostEqual(result, mock_return.copy()) mock_method.assert_called_once_with(data, axis=axis) def test_returning_scalar_mdtol(self): # Test the case when the data aggregation function returns a scalar and # turns it into a masked array. axis = -1 data = self.array.flatten() mock_return = 2 with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis, mdtol=1) self.assertMaskedArrayEqual(result, ma.array(2, mask=False)) mock_method.assert_called_once_with(data, axis=axis) def test_returning_scalar_mdtol_alt(self): # Test the case when the data aggregation function returns a scalar # with no tolerance for missing data values and turns it into a masked # array. axis = -1 data = self.array.flatten() mock_return = 2 with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis, mdtol=0) self.assertMaskedArrayEqual(result, ma.array(2, mask=True)) mock_method.assert_called_once_with(data, axis=axis) def test_returning_non_masked_array_from_masked_array(self): # Providing a masked array, call_func returning a non-masked array, # resulting in a masked array output. axis = 0 mock_return = self.expected_result_axis0.data.copy() result_axis_0 = ma.array(mock_return, mask=[True, True, False]) with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=0.45) self.assertMaskedArrayAlmostEqual(result, result_axis_0) mock_method.assert_called_once_with(self.array, axis=axis) axis = 1 mock_return = self.expected_result_axis1.data.copy() with mock.patch.object(self.TEST, "call_func", return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=0.45) self.assertMaskedArrayEqual(result, self.expected_result_axis1) mock_method.assert_called_once_with(self.array, axis=axis) def test_kwarg_pass_through_no_kwargs(self): call_func = mock.Mock() data = mock.sentinel.data axis = mock.sentinel.axis aggregator = Aggregator("", call_func) aggregator.aggregate(data, axis) call_func.assert_called_once_with(data, axis=axis) def test_kwarg_pass_through_call_kwargs(self): call_func = mock.Mock() data = mock.sentinel.data axis = mock.sentinel.axis kwargs = dict(wibble="wobble", foo="bar") aggregator = Aggregator("", call_func) aggregator.aggregate(data, axis, **kwargs) call_func.assert_called_once_with(data, axis=axis, **kwargs) def test_kwarg_pass_through_init_kwargs(self): call_func = mock.Mock() data = mock.sentinel.data axis = mock.sentinel.axis kwargs = dict(wibble="wobble", foo="bar") aggregator = Aggregator("", call_func, **kwargs) aggregator.aggregate(data, axis) call_func.assert_called_once_with(data, axis=axis, **kwargs) def test_kwarg_pass_through_combined_kwargs(self): call_func = mock.Mock() data = mock.sentinel.data axis = mock.sentinel.axis init_kwargs = dict(wibble="wobble", var=1.0) call_kwargs = dict(foo="foo", var=0.5) aggregator = Aggregator("", call_func, **init_kwargs) aggregator.aggregate(data, axis, **call_kwargs) expected_kwargs = init_kwargs.copy() expected_kwargs.update(call_kwargs) call_func.assert_called_once_with(data, axis=axis, **expected_kwargs) def test_mdtol_intercept(self): call_func = mock.Mock() data = mock.sentinel.data axis = mock.sentinel.axis aggregator = Aggregator("", call_func) aggregator.aggregate(data, axis, wibble="wobble", mdtol=0.8) call_func.assert_called_once_with(data, axis=axis, wibble="wobble") def test_no_lazy_func(self): dummy_agg = Aggregator("custom_op", lambda x: 1) expected = "custom_op aggregator does not support lazy operation" with self.assertRaisesRegex(LazyAggregatorError, expected): dummy_agg.lazy_aggregate(np.arange(10), axis=0)
def process(self, cube, weights=None): """Calculate weighted blend across the chosen coord, for either probabilistic or percentile data. If there is a percentile coordinate on the cube, it will blend using the PercentileBlendingAggregator but the percentile coordinate must have at least two points. Args: cube (iris.cube.Cube): Cube to blend across the coord. weights (Optional list or np.array of weights): or None (equivalent to equal weights). Returns: result (iris.cube.Cube): containing the weighted blend across the chosen coord. Raises: TypeError : If the first argument not a cube. ValueError : If there is a percentile coord and it is not a dimension coord in the cube. ValueError : If there is a percentile dimension with only one point, we need at least two points in order to do the blending. ValueError : If there are more than one percentile coords in the cube. ValueError : If there is a percentile dimension on the cube and the mode for blending is 'weighted_maximum' ValueError : If the weights shape do not match the dimension of the coord we are blending over. Warns: Warning : If trying to blend across a scalar coordinate with only one value. Returns the original cube in this case. """ if not isinstance(cube, iris.cube.Cube): msg = ('The first argument must be an instance of ' 'iris.cube.Cube but is' ' {0:s}.'.format(type(cube))) raise TypeError(msg) # Check to see if the data is percentile data try: perc_coord = find_percentile_coordinate(cube) perc_dim = cube.coord_dims(perc_coord.name()) if not perc_dim: msg = ('The percentile coord must be a dimension ' 'of the cube.') raise ValueError(msg) # Check the percentile coordinate has more than one point, # otherwise raise an error as we won't be able to blend. if len(perc_coord.points) < 2.0: msg = ('Percentile coordinate does not have enough points' ' in order to blend. Must have at least 2 percentiles.') raise ValueError(msg) except CoordinateNotFoundError: perc_coord = None perc_dim = None # If we have a percentile dimension and the mode is 'max' raise an # exception. if perc_coord and self.mode == 'weighted_maximum': msg = ('The "weighted_maximum" mode cannot be used with' ' percentile data.') raise ValueError(msg) # check weights array matches coordinate shape if not None if weights is not None: if np.array(weights).shape != cube.coord(self.coord).points.shape: msg = ('The weights array must match the shape ' 'of the coordinate in the input cube; ' 'weight shape is ' '{0:s}'.format(np.array(weights).shape) + ', cube shape is ' '{0:s}'.format(cube.coord(self.coord).points.shape)) raise ValueError(msg) # If coord to blend over is a scalar_coord warn # and return original cube. coord_dim = cube.coord_dims(self.coord) if not coord_dim: msg = ('Trying to blend across a scalar coordinate with only one' ' value. Returning original cube') warnings.warn(msg) result = cube else: try: cube.coord('threshold') except iris.exceptions.CoordinateNotFoundError: slices_over_threshold = [cube] else: if self.coord != 'threshold': slices_over_threshold = cube.slices_over('threshold') else: slices_over_threshold = [cube] cubelist = iris.cube.CubeList([]) for cube_thres in slices_over_threshold: # Blend the cube across the coordinate # Use percentile Aggregator if required if perc_coord and self.mode == "weighted_mean": percentiles = np.array(perc_coord.points, dtype=float) perc_dim, = cube_thres.coord_dims(perc_coord.name()) # Set equal weights if none are provided if weights is None: num = len(cube_thres.coord(self.coord).points) weights = np.ones(num) / float(num) # Set up aggregator PERCENTILE_BLEND = (Aggregator( 'weighted_mean', PercentileBlendingAggregator.aggregate)) cube_new = cube_thres.collapsed(self.coord, PERCENTILE_BLEND, arr_percent=percentiles, arr_weights=weights, perc_dim=perc_dim) # Else do a simple weighted average elif self.mode == "weighted_mean": # Equal weights are used as default. weights_array = None # Else broadcast the weights to be used by the aggregator. coord_dim_thres = cube_thres.coord_dims(self.coord) if weights is not None: weights_array = (iris.util.broadcast_to_shape( np.array(weights), cube_thres.shape, coord_dim_thres)) orig_cell_methods = cube_thres.cell_methods # Calculate the weighted average. cube_new = cube_thres.collapsed(self.coord, iris.analysis.MEAN, weights=weights_array) # Update the name of the cell_method created by Iris to # 'weighted_mean' to be consistent. new_cell_methods = cube_new.cell_methods extra_cm = (set(new_cell_methods) - set(orig_cell_methods)).pop() add_renamed_cell_method(cube_new, extra_cm, 'weighted_mean') # Else use the maximum probability aggregator. elif self.mode == "weighted_maximum": # Set equal weights if none are provided if weights is None: num = len(cube_thres.coord(self.coord).points) weights = np.ones(num) / float(num) # Set up aggregator MAX_PROBABILITY = (Aggregator( 'weighted_maximum', MaxProbabilityAggregator.aggregate)) cube_new = cube_thres.collapsed(self.coord, MAX_PROBABILITY, arr_weights=weights) cubelist.append(cube_new) result = cubelist.merge_cube() if isinstance(cubelist[0].data, np.ma.core.MaskedArray): result.data = np.ma.array(result.data) # If set adjust values of collapsed coordinates. if self.coord_adjust is not None: for crd in result.coords(): if cube.coord_dims(crd.name()) == coord_dim: pnts = cube.coord(crd.name()).points crd.points = np.array(self.coord_adjust(pnts), dtype=crd.points.dtype) return result
def test_no_lazy_func(self): dummy_agg = Aggregator("custom_op", lambda x: 1) expected = "custom_op aggregator does not support lazy operation" with self.assertRaisesRegex(LazyAggregatorError, expected): dummy_agg.lazy_aggregate(np.arange(10), axis=0)
class Test_aggregate(tests.IrisTest): # These unit tests don't call a data aggregation function, they call a # mocked one i.e. the return values of the mocked data aggregation # function don't matter, only how these are dealt with by the aggregate # method. def setUp(self): self.TEST = Aggregator('test', None) self.array = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[False, True, False], [True, False, False]], dtype=np.float64) self.expected_result_axis0 = ma.array([1, 2, 3], mask=None) self.expected_result_axis1 = ma.array([4, 5], mask=None) def test_masked_notol(self): # Providing masked array with no tolerance keyword (mdtol) provided. axis = 0 mock_return = self.expected_result_axis0.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis) self.assertMaskedArrayEqual(result, self.expected_result_axis0) mock_method.assert_called_once_with(self.array, axis=axis) axis = 1 mock_return = self.expected_result_axis1.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis) self.assertMaskedArrayEqual(result, self.expected_result_axis1) mock_method.assert_called_once_with(self.array, axis=axis) def test_masked_above_tol(self): # Providing masked array with a high tolerance (mdtol) provided. axis = 0 mock_return = self.expected_result_axis0.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=.55) self.assertMaskedArrayEqual(result, self.expected_result_axis0) mock_method.assert_called_once_with(self.array, axis=axis) axis = 1 mock_return = self.expected_result_axis1.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=.55) self.assertMaskedArrayEqual(result, self.expected_result_axis1) mock_method.assert_called_once_with(self.array, axis=axis) def test_masked_below_tol(self): # Providing masked array with a tolerance on missing values, low # enough to modify the resulting mask for axis 0. axis = 0 result_axis_0 = self.expected_result_axis0.copy() result_axis_0.mask = np.array([True, True, False]) mock_return = ma.array([1, 2, 3], mask=None) with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=.45) self.assertMaskedArrayAlmostEqual(result, result_axis_0) mock_method.assert_called_once_with(self.array, axis=axis) axis = 1 mock_return = self.expected_result_axis1.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=.45) self.assertMaskedArrayEqual(result, self.expected_result_axis1) mock_method.assert_called_once_with(self.array, axis=axis) def test_masked_below_tol_alt(self): # Providing masked array with a tolerance on missing values, low # enough to modify the resulting mask for axis 1. axis = 1 result_axis_1 = self.expected_result_axis1.copy() result_axis_1.mask = np.array([True, True]) mock_return = self.expected_result_axis1.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=.1) self.assertMaskedArrayAlmostEqual(result, result_axis_1) mock_method.assert_called_once_with(self.array, axis=axis) def test_unmasked_with_mdtol(self): # Providing aggregator with an unmasked array and tolerance specified # for missing data - ensure that result is unaffected. data = self.array.data axis = 0 mock_return = self.expected_result_axis0.data.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis, mdtol=0.5) self.assertArrayAlmostEqual(result, mock_return.copy()) mock_method.assert_called_once_with(data, axis=axis) axis = 1 mock_return = self.expected_result_axis1.data.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis, mdtol=0.5) self.assertArrayAlmostEqual(result, mock_return.copy()) mock_method.assert_called_once_with(data, axis=axis) def test_unmasked(self): # Providing aggregator with an unmasked array and no additional keyword # arguments ensure that result is unaffected. data = self.array.data axis = 0 mock_return = self.expected_result_axis0.data.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis) self.assertArrayAlmostEqual(result, mock_return.copy()) mock_method.assert_called_once_with(data, axis=axis) axis = 1 mock_return = self.expected_result_axis1.data.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis) self.assertArrayAlmostEqual(result, mock_return.copy()) mock_method.assert_called_once_with(data, axis=axis) def test_returning_scalar_mdtol(self): # Test the case when the data aggregation function returns a scalar and # turns it into a masked array. axis = -1 data = self.array.flatten() mock_return = 2 with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis, mdtol=1) self.assertMaskedArrayEqual(result, ma.array(2, mask=False)) mock_method.assert_called_once_with(data, axis=axis) def test_returning_scalar_mdtol_alt(self): # Test the case when the data aggregation function returns a scalar # with no tolerance for missing data values and turns it into a masked # array. axis = -1 data = self.array.flatten() mock_return = 2 with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(data, axis, mdtol=0) self.assertMaskedArrayEqual(result, ma.array(2, mask=True)) mock_method.assert_called_once_with(data, axis=axis) def test_returning_non_masked_array_from_masked_array(self): # Providing a masked array, call_func returning a non-masked array, # resulting in a masked array output. axis = 0 mock_return = self.expected_result_axis0.data.copy() result_axis_0 = ma.array(mock_return, mask=[True, True, False]) with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=.45) self.assertMaskedArrayAlmostEqual(result, result_axis_0) mock_method.assert_called_once_with(self.array, axis=axis) axis = 1 mock_return = self.expected_result_axis1.data.copy() with patch.object(self.TEST, 'call_func', return_value=mock_return) as mock_method: result = self.TEST.aggregate(self.array, axis, mdtol=.45) self.assertMaskedArrayEqual(result, self.expected_result_axis1) mock_method.assert_called_once_with(self.array, axis=axis) def test_kwarg_pass_through_no_kwargs(self): call_func = Mock() data = sentinel.data axis = sentinel.axis aggregator = Aggregator('', call_func) aggregator.aggregate(data, axis) call_func.assert_called_once_with(data, axis=axis) def test_kwarg_pass_through_call_kwargs(self): call_func = Mock() data = sentinel.data axis = sentinel.axis kwargs = dict(wibble='wobble', foo='bar') aggregator = Aggregator('', call_func) aggregator.aggregate(data, axis, **kwargs) call_func.assert_called_once_with(data, axis=axis, **kwargs) def test_kwarg_pass_through_init_kwargs(self): call_func = Mock() data = sentinel.data axis = sentinel.axis kwargs = dict(wibble='wobble', foo='bar') aggregator = Aggregator('', call_func, **kwargs) aggregator.aggregate(data, axis) call_func.assert_called_once_with(data, axis=axis, **kwargs) def test_kwarg_pass_through_combined_kwargs(self): call_func = Mock() data = sentinel.data axis = sentinel.axis init_kwargs = dict(wibble='wobble', var=1.0) call_kwargs = dict(foo='foo', var=0.5) aggregator = Aggregator('', call_func, **init_kwargs) aggregator.aggregate(data, axis, **call_kwargs) expected_kwargs = init_kwargs.copy() expected_kwargs.update(call_kwargs) call_func.assert_called_once_with(data, axis=axis, **expected_kwargs) def test_mdtol_intercept(self): call_func = Mock() data = sentinel.data axis = sentinel.axis aggregator = Aggregator('', call_func) aggregator.aggregate(data, axis, wibble='wobble', mdtol=0.8) call_func.assert_called_once_with(data, axis=axis, wibble='wobble') def test_no_lazy_func(self): dummy_agg = Aggregator('custom_op', lambda x: 1) expected = 'custom_op aggregator does not support lazy operation' with self.assertRaisesRegexp(LazyAggregatorError, expected): dummy_agg.lazy_aggregate(np.arange(10), axis=0)
def test_no_lazy_func(self): dummy_agg = Aggregator('custom_op', lambda x: 1) expected = 'custom_op aggregator does not support lazy operation' with self.assertRaisesRegexp(LazyAggregatorError, expected): dummy_agg.lazy_aggregate(np.arange(10), axis=0)
def consecutive_dry_days(cube, period='year', length=6, threshold=1.): """ calculate consecutive dry days within an iris.cube.Cube Args: * cube (iris.cube.Cube): An iris.cube.Cube holding precipiation amount in mm/day * period (string): Period over that the CDD will be calculated. Can be 'year', 'season' or 'month'. If period is 'season' or 'month' the CDD will be averaged over the years Kwargs: * length (int): The number of days without rainfall that define a dry period * threshold (float): The upper limit of daily rainfall in mm that indicates 'no precipitation' Returns: An iris.cube.CubeList that holds two iris.cube.Cubes with the longest period of dry days in the given period and the mean of the number of dry periods with respect to the given length """ def _cdd_index(array, axis, threshold): """ Calculate the consecutive dry days index. This function is used as an iris.analysis.Aggregator Args: * array (numpy.array or numpy.ma.array): array that holds the precipitation data * axis (int): the number of the time-axis * threshold (float): the threshold that indicates a precipiation-less day Returns: the aggregation result, collapsing the 'axis' dimension of the 'data' argument """ from pycat.analysis.utils import (_get_max_true_block_length, _get_true_block_lengths) up_down = _get_true_block_lengths(array < threshold, axis) return _get_max_true_block_length(up_down) def _cdd_periods(array, axis, threshold, length): """ Calculate the number of consecutive dry days periods. This function is used as an iris.analysis.Aggregator Args: * array (numpy.array or numpy.ma.array): array that holds the precipitation data * axis (int): the number of the time-axis * threshold (float): the threshold that indicates a precipiation-less day * length (int): number of days that a dry period must last Returns: the aggregation result, collapsing the 'axis' dimension of the 'data' argument """ from pycat.analysis.utils import (_get_len_true_block_length, _get_true_block_lengths) up_down = _get_true_block_lengths(array < threshold, axis) return _get_len_true_block_length(up_down, length) # build the iris.analysis.Aggregators cdd_index = Aggregator('cdd_index', _cdd_index) cdd_periods = Aggregator('cdd_periods', _cdd_periods) # check if the cube already has the needed auxiliary coordinates if period == 'season': # add the season_year auxiliary coordinate try: years = np.unique(cube.coord('season_year').points) except CoordinateNotFoundError: ccat.add_season_year(cube, 'time') years = np.unique(cube.coord('season_year').points) constraint_year_key = 'season_year' else: # add calendar years try: years = np.unique(cube.coord('year').points) except CoordinateNotFoundError: ccat.add_year(cube, 'time') years = np.unique(cube.coord('year').points) constraint_year_key = 'year' if period in ['season', 'month']: try: index_period = np.unique(cube.coord('%s_number' % period).points) except CoordinateNotFoundError: cat = getattr(ccat, 'add_%s_number' % period) cat(cube, 'time') index_period = np.unique(cube.coord('%s_number' % period).points) # create time-axis of resulting cubes time_dimension = _make_time_dimension( cube.coord('time').units.num2date(cube.coord('time').points[0]), cube.coord('time').units.num2date(cube.coord('time').points[-1]), period=period) # create the empty resulting cubes dim_coords_and_dims = [] slices = [] for coord in cube.dim_coords: if coord.units.is_time_reference(): dim_coords_and_dims.append( (time_dimension, cube.coord_dims(coord))) slices.append(0) time_axis = cube.coord_dims(coord)[0] else: dim_coords_and_dims.append((coord, cube.coord_dims(coord))) slices.append(slice(None, None, None)) cdd_index_cube = _create_cube( long_name='Consecutive dry days is the greatest number of ' 'consecutive days per time period with daily ' 'precipitation amount below %s mm.' % threshold, var_name='consecutive_dry_days_index_per_time_period', units=iris.unit.Unit('1'), dim_coords_and_dims=dim_coords_and_dims) cdd_periods_cube = _create_cube( long_name='Number of cdd periods in given time period ' 'with more than %d days.' % length, var_name='number_of_cdd_periods_with_more_than_' '%ddays_per_time_period' % length, units=iris.unit.Unit('1'), dim_coords_and_dims=dim_coords_and_dims) # differentiate between the considered period if period == 'year': # just run the aggregation over all given years resulting in # the maximum cdd length and the number of cdd periods for each year for year in years: tmp_cube = cube.extract(iris.Constraint(year=year)) slices[time_axis] = year - years[0] cdd_index_data = tmp_cube.collapsed('time', cdd_index, threshold=threshold).data cdd_periods_data = tmp_cube.collapsed('time', cdd_periods, threshold=threshold, length=length).data cdd_index_cube.data[slices] = cdd_index_data cdd_periods_cube.data[slices] = cdd_periods_data return iris.cube.CubeList((cdd_index_cube, cdd_periods_cube)) else: # run the aggregation over all seasons/months of all years # afterwards aggregate the seasons/month by MAX Aggregator # for the cdd_index and the MEAN Aggregator for cdd_periods for year in years: for p in index_period: constraint_dict = { '%s_number' % period: p, constraint_year_key: year } tmp_cube = cube.extract(iris.Constraint(**constraint_dict)) if tmp_cube: # the extraction can lead to empty cubes for seasons # in the last year time_index = (year - years[0]) * len(index_period) + p # months numbers start at 1 if period == 'month': time_index -= 1 slices[time_axis] = time_index cdd_index_data = tmp_cube.collapsed( 'time', cdd_index, threshold=threshold).data cdd_periods_data = tmp_cube.collapsed('time', cdd_periods, threshold=threshold, length=length).data cdd_index_cube.data[slices] = cdd_index_data cdd_periods_cube.data[slices] = cdd_periods_data # aggregate over seasons/months cat = getattr(ccat, 'add_%s' % period) cat(cdd_index_cube, 'time') cat(cdd_periods_cube, 'time') cdd_index_mean = cdd_index_cube.aggregated_by(period, iris.analysis.MEAN) cdd_periods_mean = cdd_periods_cube.aggregated_by( period, iris.analysis.MEAN) cdd_index_mean.remove_coord('time') cdd_periods_mean.remove_coord('time') return iris.cube.CubeList((cdd_index_mean, cdd_periods_mean))