def test_no_fuzziness_with_one_dimensional_weights(self): """Test a simple case where we have no fuzziness in the spatial weights and an adjustment from the one_dimensional weights.""" plugin = SpatiallyVaryingWeightsFromMask(fuzzy_length=1) expected_result = np.array([[[0.4, 0., 0.2], [0.4, 0.2, 0.2]], [[0., 0., 0.5], [0., 0.5, 0.5]], [[0.6, 1., 0.3], [0.6, 0.3, 0.3]]], dtype=np.float32) result = plugin.process(self.cube_to_collapse, self.one_dimensional_weights_cube, "forecast_reference_time") self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata)
def test_fuzziness_with_one_dimensional_weights(self): """Test a simple case where we have some fuzziness in the spatial sweights and with adjustment from the one_dimensional weights.""" plugin = SpatiallyVaryingWeightsFromMask(fuzzy_length=2) expected_result = np.array( [[[0.25, 0., 0.15384616], [0.32037723, 0.15384616, 0.17789416]], [[0., 0., 0.3846154], [0., 0.3846154, 0.44473538]], [[0.75, 1., 0.4615385], [0.6796227, 0.4615385, 0.3773705]]], dtype=np.float32) result = plugin.process(self.cube_to_collapse, self.one_dimensional_weights_cube, "forecast_reference_time") self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata)
def test_fuzziness_no_one_dimensional_weights(self): """Test a simple case where we have some fuzziness in the spatial weights and no adjustment from the one_dimensional weights.""" plugin = SpatiallyVaryingWeightsFromMask(fuzzy_length=2) self.one_dimensional_weights_cube.data = np.ones((3)) expected_result = np.array( [[[0.33333334, 0., 0.25], [0.41421354, 0.25, 0.2928932]], [[0., 0., 0.25], [0., 0.25, 0.2928932]], [[0.6666667, 1., 0.5], [0.5857864, 0.5, 0.41421354]]], dtype=np.float32) result = plugin.process(self.cube_to_collapse, self.one_dimensional_weights_cube, "forecast_reference_time") self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata)
def test_no_fuzziness_no_one_dimensional_weights_transpose(self): """Test a simple case where we have no fuzziness in the spatial weights and no adjustment from the one_dimensional weights and transpose the input cube.""" plugin = SpatiallyVaryingWeightsFromMask(fuzzy_length=1) self.one_dimensional_weights_cube.data = np.ones((3)) expected_result = np.array( [[[0.5, 0., 0.33333333], [0.5, 0.33333333, 0.33333333]], [[0., 0., 0.33333333], [0., 0.33333333, 0.33333333]], [[0.5, 1., 0.33333333], [0.5, 0.33333333, 0.33333333]]], dtype=np.float32) self.cube_to_collapse.transpose([2, 0, 1, 3]) result = plugin.process(self.cube_to_collapse, self.one_dimensional_weights_cube, "forecast_reference_time") self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata)
def test_fuzziness_with_one_dimensional_weights(self): """Test a simple case where we have some fuzziness in the spatial sweights and with adjustment from the one_dimensional weights.""" plugin = SpatiallyVaryingWeightsFromMask(fuzzy_length=2) expected_result = np.array( [ [[0.1, 0.0, 0.1], [0.14142136, 0.1, 0.14142136]], [[0.0, 0.0, 0.25], [0.0, 0.25, 0.35355338]], [[0.3, 0.3, 0.3], [0.3, 0.3, 0.3]], ], dtype=np.float32, ) result = plugin.process( self.cube_to_collapse, self.one_dimensional_weights_cube, "forecast_reference_time", ) self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata)
def _update_spatial_weights(self, cube, weights, fuzzy_length): """ Update weights using spatial information Args: cube (iris.cube.Cube): Cube of input data to be blended weights (iris.cube.Cube): Initial 1D cube of weights scaled by self.weighting_coord fuzzy_length (float): Distance (in metres) over which to smooth weights at domain boundaries Returns: weights (iris.cube.Cube): Updated 3D cube of spatially-varying weights """ check_if_grid_is_equal_area(cube) grid_cells_x, _ = convert_distance_into_number_of_grid_cells( cube, fuzzy_length, int_grid_cells=False) SpatialWeightsPlugin = SpatiallyVaryingWeightsFromMask(grid_cells_x) weights = SpatialWeightsPlugin.process(cube, weights, self.blend_coord) return weights
class Test_process(IrisTest): """Test process method""" def setUp(self): """ Set up a basic cube and linear weights cube for the process method. Input cube has 2 thresholds and 3 forecast_reference_times """ thresholds = [10, 20] data = np.ones((2, 2, 3), dtype=np.float32) cycle1 = set_up_probability_cube( data, thresholds, spatial_grid="equalarea", time=datetime(2017, 11, 10, 4, 0), frt=datetime(2017, 11, 10, 0, 0), ) cycle2 = set_up_probability_cube( data, thresholds, spatial_grid="equalarea", time=datetime(2017, 11, 10, 4, 0), frt=datetime(2017, 11, 10, 1, 0), ) cycle3 = set_up_probability_cube( data, thresholds, spatial_grid="equalarea", time=datetime(2017, 11, 10, 4, 0), frt=datetime(2017, 11, 10, 2, 0), ) self.cube_to_collapse = CubeList([cycle1, cycle2, cycle3]).merge_cube() self.cube_to_collapse = squeeze(self.cube_to_collapse) self.cube_to_collapse.rename("weights") # This input array has 3 forecast reference times and 2 thresholds. # The two thresholds have the same weights. self.cube_to_collapse.data = np.array( [ [[[1, 0, 1], [1, 1, 1]], [[1, 0, 1], [1, 1, 1]]], [[[0, 0, 1], [0, 1, 1]], [[0, 0, 1], [0, 1, 1]]], [[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]], ], dtype=np.float32, ) self.cube_to_collapse.data = np.ma.masked_equal( self.cube_to_collapse.data, 0) # Create a one_dimensional weights cube by slicing the larger # weights cube. # The resulting cube only has a forecast_reference_time coordinate. self.one_dimensional_weights_cube = self.cube_to_collapse[:, 0, 0, 0] self.one_dimensional_weights_cube.remove_coord( "projection_x_coordinate") self.one_dimensional_weights_cube.remove_coord( "projection_y_coordinate") self.one_dimensional_weights_cube.remove_coord( find_threshold_coordinate(self.one_dimensional_weights_cube)) self.one_dimensional_weights_cube.data = np.array([0.2, 0.5, 0.3], dtype=np.float32) self.plugin = SpatiallyVaryingWeightsFromMask( "forecast_reference_time", fuzzy_length=2) self.plugin_no_fuzzy = SpatiallyVaryingWeightsFromMask( "forecast_reference_time", fuzzy_length=1) @ManageWarnings(record=True) def test_none_masked(self, warning_list=None): """Test when we have no masked data in the input cube.""" self.cube_to_collapse.data = np.ones(self.cube_to_collapse.data.shape) self.cube_to_collapse.data = np.ma.masked_equal( self.cube_to_collapse.data, 0) expected_data = np.array( [ [[0.2, 0.2, 0.2], [0.2, 0.2, 0.2]], [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], [[0.3, 0.3, 0.3], [0.3, 0.3, 0.3]], ], dtype=np.float32, ) message = "Expected masked input" result = self.plugin.process( self.cube_to_collapse, self.one_dimensional_weights_cube, ) self.assertTrue(any(message in str(item) for item in warning_list)) self.assertArrayEqual(result.data, expected_data) self.assertEqual(result.dtype, np.float32) @ManageWarnings( ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_all_masked(self): """Test when we have all masked data in the input cube.""" self.cube_to_collapse.data = np.ones(self.cube_to_collapse.data.shape) self.cube_to_collapse.data = np.ma.masked_equal( self.cube_to_collapse.data, 1) result = self.plugin.process( self.cube_to_collapse, self.one_dimensional_weights_cube, ) expected_data = np.zeros((3, 2, 3)) self.assertArrayAlmostEqual(expected_data, result.data) self.assertTrue(result.metadata, self.cube_to_collapse.data) @ManageWarnings( ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_no_fuzziness_no_one_dimensional_weights(self): """Test a simple case where we have no fuzziness in the spatial weights and no adjustment from the one_dimensional weights.""" self.one_dimensional_weights_cube.data = np.ones((3)) expected_result = np.array( [ [[0.5, 0.0, 0.333333], [0.5, 0.333333, 0.333333]], [[0.0, 0.0, 0.333333], [0.0, 0.333333, 0.333333]], [[0.5, 1.0, 0.333333], [0.5, 0.333333, 0.333333]], ], dtype=np.float32, ) result = self.plugin_no_fuzzy.process( self.cube_to_collapse, self.one_dimensional_weights_cube, ) self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata) @ManageWarnings( ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_no_fuzziness_no_one_dimensional_weights_transpose(self): """Test a simple case where we have no fuzziness in the spatial weights and no adjustment from the one_dimensional weights and transpose the input cube.""" self.one_dimensional_weights_cube.data = np.ones((3)) expected_result = np.array( [ [[0.5, 0.0, 0.333333], [0.5, 0.333333, 0.333333]], [[0.0, 0.0, 0.333333], [0.0, 0.333333, 0.333333]], [[0.5, 1.0, 0.333333], [0.5, 0.333333, 0.333333]], ], dtype=np.float32, ) self.cube_to_collapse.transpose([2, 0, 1, 3]) result = self.plugin_no_fuzzy.process( self.cube_to_collapse, self.one_dimensional_weights_cube, ) self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata) @ManageWarnings( ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_no_fuzziness_with_one_dimensional_weights(self): """Test a simple case where we have no fuzziness in the spatial weights and an adjustment from the one_dimensional weights.""" expected_result = np.array( [ [[0.4, 0.0, 0.2], [0.4, 0.2, 0.2]], [[0.0, 0.0, 0.5], [0.0, 0.5, 0.5]], [[0.6, 1.0, 0.3], [0.6, 0.3, 0.3]], ], dtype=np.float32, ) result = self.plugin_no_fuzzy.process( self.cube_to_collapse, self.one_dimensional_weights_cube, ) self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata) @ManageWarnings( ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_fuzziness_no_one_dimensional_weights(self): """Test a simple case where we have some fuzziness in the spatial weights and no adjustment from the one_dimensional weights.""" self.one_dimensional_weights_cube.data = np.ones((3)) expected_result = np.array( [ [[0.25, 0.0, 0.166667], [0.353553, 0.166667, 0.235702]], [[0.00, 0.0, 0.166667], [0.000000, 0.166667, 0.235702]], [[0.75, 1.0, 0.666667], [0.646447, 0.666667, 0.528595]], ], dtype=np.float32, ) result = self.plugin.process( self.cube_to_collapse, self.one_dimensional_weights_cube, ) self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata) @ManageWarnings( ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_fuzziness_with_one_dimensional_weights(self): """Test a simple case where we have some fuzziness in the spatial weights and with adjustment from the one_dimensional weights.""" expected_result = np.array( [ [[0.2, 0.0, 0.10], [0.282843, 0.10, 0.141421]], [[0.0, 0.0, 0.25], [0.000000, 0.25, 0.353553]], [[0.8, 1.0, 0.65], [0.717157, 0.65, 0.505025]], ], dtype=np.float32, ) result = self.plugin.process( self.cube_to_collapse, self.one_dimensional_weights_cube, ) self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata) def test_fuzziness_with_unequal_weightings(self): """Simulate the case of two models and a nowcast at short lead times: two unmasked slices with low weights, and one masked slice with high weights""" self.cube_to_collapse.data[0].mask = np.full_like( self.cube_to_collapse.data[0], False) self.one_dimensional_weights_cube.data = np.array([0.025, 1.0, 0.075], dtype=np.float32) expected_data = np.array( [ [[0.25, 0.25, 0.136364], [0.25, 0.136364, 0.0892939]], [[0.0, 0.0, 0.45454544], [0.0, 0.454545, 0.642824]], [[0.75, 0.75, 0.409091], [0.75, 0.409091, 0.267882]], ], dtype=np.float32, ) result = self.plugin.process( self.cube_to_collapse, self.one_dimensional_weights_cube, ) self.assertArrayAlmostEqual(result.data, expected_data)
def main(argv=None): """Load in arguments and ensure they are set correctly. Then load in the data to blend and calculate default weights using the method chosen before carrying out the blending.""" parser = ArgParser( description='Calculate the default weights to apply in weighted ' 'blending plugins using the ChooseDefaultWeightsLinear or ' 'ChooseDefaultWeightsNonLinear plugins. Then apply these ' 'weights to the dataset using the BasicWeightedAverage plugin.' ' Required for ChooseDefaultWeightsLinear: y0val and ynval.' ' Required for ChooseDefaultWeightsNonLinear: cval.' ' Required for ChooseWeightsLinear with dict: wts_dict.') parser.add_argument('--wts_calc_method', metavar='WEIGHTS_CALCULATION_METHOD', choices=['linear', 'nonlinear', 'dict'], default='linear', help='Method to use to calculate ' 'weights used in blending. "linear" (default): ' 'calculate linearly varying blending weights. ' '"nonlinear": calculate blending weights that decrease' ' exponentially with increasing blending coordinate. ' '"dict": calculate weights using a dictionary passed ' 'in as a command line argument.') parser.add_argument('coordinate', type=str, metavar='COORDINATE_TO_AVERAGE_OVER', help='The coordinate over which the blending ' 'will be applied.') parser.add_argument('--coordinate_unit', metavar='UNIT_STRING', default='hours since 1970-01-01 00:00:00', help='Units for blending coordinate. Default= ' 'hours since 1970-01-01 00:00:00') parser.add_argument('--calendar', metavar='CALENDAR', help='Calendar for time coordinate. Default=gregorian') parser.add_argument('--cycletime', metavar='CYCLETIME', type=str, help='The forecast reference time to be used after ' 'blending has been applied, in the format ' 'YYYYMMDDTHHMMZ. If not provided, the blended file ' 'will take the latest available forecast reference ' 'time from the input cubes supplied.') parser.add_argument('--model_id_attr', metavar='MODEL_ID_ATTR', type=str, default="mosg__model_configuration", help='The name of the netCDF file attribute to be ' 'used to identify the source model for ' 'multi-model blends. Default assumes Met Office ' 'model metadata. Must be present on all input ' 'files if blending over models.') parser.add_argument('--spatial_weights_from_mask', action='store_true', default=False, help='If set this option will result in the generation' ' of spatially varying weights based on the' ' masks of the data we are blending. The' ' one dimensional weights are first calculated ' ' using the chosen weights calculation method,' ' but the weights will then be adjusted spatially' ' based on where there is masked data in the data' ' we are blending. The spatial weights are' ' calculated using the' ' SpatiallyVaryingWeightsFromMask plugin.') parser.add_argument('weighting_mode', metavar='WEIGHTED_BLEND_MODE', choices=['weighted_mean', 'weighted_maximum'], help='The method used in the weighted blend. ' '"weighted_mean": calculate a normal weighted' ' mean across the coordinate. ' '"weighted_maximum": multiplies the values in the' ' coordinate by the weights, and then takes the' ' maximum.') parser.add_argument('input_filepaths', metavar='INPUT_FILES', nargs="+", help='Paths to input files to be blended.') parser.add_argument('output_filepath', metavar='OUTPUT_FILE', help='The output path for the processed NetCDF.') spatial = parser.add_argument_group( 'Spatial weights from mask options', 'Options for calculating the spatial weights using the ' 'SpatiallyVaryingWeightsFromMask plugin.') spatial.add_argument('--fuzzy_length', metavar='FUZZY_LENGTH', type=float, default=20000, help='When calculating spatially varying weights we' ' can smooth the weights so that areas close to' ' areas that are masked have lower weights than' ' those further away. This fuzzy length controls' ' the scale over which the weights are smoothed.' ' The fuzzy length is in terms of m, the' ' default is 20km. This distance is then' ' converted into a number of grid squares,' ' which does not have to be an integer. Assumes' ' the grid spacing is the same in the x and y' ' directions, and raises an error if this is not' ' true. See SpatiallyVaryingWeightsFromMask for' ' more detail.') linear = parser.add_argument_group( 'linear weights options', 'Options for the linear weights ' 'calculation in ' 'ChooseDefaultWeightsLinear') linear.add_argument('--y0val', metavar='LINEAR_STARTING_POINT', type=float, help='The relative value of the weighting start point ' '(lowest value of blend coord) for choosing default ' 'linear weights. This must be a positive float or 0.') linear.add_argument('--ynval', metavar='LINEAR_END_POINT', type=float, help='The relative value of the weighting ' 'end point (highest value of blend coord) for choosing' ' default linear weights. This must be a positive ' 'float or 0. Note that if blending over forecast ' 'reference time, ynval >= y0val would normally be ' 'expected (to give greater weight to the more recent ' 'forecast).') nonlinear = parser.add_argument_group( 'nonlinear weights options', 'Options for the non-linear ' 'weights calculation in ' 'ChooseDefaultWeightsNonLinear') nonlinear.add_argument('--cval', metavar='NON_LINEAR_FACTOR', type=float, help='Factor used to determine how skewed the ' 'non linear weights will be. ' 'A value of 1 implies equal weighting. If not ' 'set, a default value of cval=0.85 is set.') wts_dict = parser.add_argument_group( 'dict weights options', 'Options for linear weights to be ' 'calculated based on parameters ' 'read from a json file dict') wts_dict.add_argument('--wts_dict', metavar='WEIGHTS_DICTIONARY', help='Path to json file containing dictionary from ' 'which to calculate blending weights. Dictionary ' 'format is as specified in the improver.blending.' 'weights.ChooseWeightsLinear plugin.') wts_dict.add_argument('--weighting_coord', metavar='WEIGHTING_COORD', default='forecast_period', help='Name of ' 'coordinate over which linear weights should be ' 'scaled. This coordinate must be avilable in the ' 'weights dictionary.') args = parser.parse_args(args=argv) # if the linear weights method is called with non-linear args or vice # versa, exit with error if (args.wts_calc_method == "linear") and args.cval: parser.wrong_args_error('cval', 'linear') if ((args.wts_calc_method == "nonlinear") and np.any([args.y0val, args.ynval])): parser.wrong_args_error('y0val, ynval', 'non-linear') if (args.wts_calc_method == "dict") and not args.wts_dict: parser.error('Dictionary is required if --wts_calc_method="dict"') # set blending coordinate units if "time" in args.coordinate: coord_unit = Unit(args.coordinate_unit, args.calendar) elif args.coordinate_unit != 'hours since 1970-01-01 00:00:00.': coord_unit = args.coordinate_unit else: coord_unit = 'no_unit' # For blending across models, only blending across "model_id" is directly # supported. This is because the blending coordinate must be sortable, in # order to ensure that the data cube and the weights cube have coordinates # in the same order for blending. Whilst the model_configuration is # sortable itself, as it is associated with model_id, which is the # dimension coordinate, sorting the model_configuration coordinate can # result in the model_id coordinate becoming non-monotonic. As dimension # coordinates must be monotonic, this leads to the model_id coordinate # being demoted to an auxiliary coordinate. Therefore, for simplicity # model_id is used as the blending coordinate, instead of # model_configuration. # TODO: Support model_configuration as a blending coordinate directly. if args.coordinate == "model_configuration": blend_coord = "model_id" dict_coord = "model_configuration" else: blend_coord = args.coordinate dict_coord = args.coordinate # load cubes to be blended cubelist = load_cubelist(args.input_filepaths) # determine whether or not to equalise forecast periods for model # blending weights calculation weighting_coord = (args.weighting_coord if args.weighting_coord else "forecast_period") # prepare cubes for weighted blending merger = MergeCubesForWeightedBlending(blend_coord, weighting_coord=weighting_coord, model_id_attr=args.model_id_attr) cube = merger.process(cubelist, cycletime=args.cycletime) # if the coord for blending does not exist or has only one value, # update metadata only coord_names = [coord.name() for coord in cube.coords()] if (blend_coord not in coord_names) or (len( cube.coord(blend_coord).points) == 1): result = cube.copy() conform_metadata(result, cube, blend_coord, cycletime=args.cycletime) # raise a warning if this happened because the blend coordinate # doesn't exist if blend_coord not in coord_names: warnings.warn('Blend coordinate {} is not present on input ' 'data'.format(blend_coord)) # otherwise, calculate weights and blend across specified dimension else: weights = calculate_blending_weights( cube, blend_coord, args.wts_calc_method, wts_dict=args.wts_dict, weighting_coord=args.weighting_coord, coord_unit=coord_unit, y0val=args.y0val, ynval=args.ynval, cval=args.cval, dict_coord=dict_coord) if args.spatial_weights_from_mask: check_if_grid_is_equal_area(cube) grid_cells_x, _ = convert_distance_into_number_of_grid_cells( cube, args.fuzzy_length, int_grid_cells=False) SpatialWeightsPlugin = SpatiallyVaryingWeightsFromMask( grid_cells_x) weights = SpatialWeightsPlugin.process(cube, weights, blend_coord) # blend across specified dimension BlendingPlugin = WeightedBlendAcrossWholeDimension( blend_coord, args.weighting_mode, cycletime=args.cycletime) result = BlendingPlugin.process(cube, weights=weights) save_netcdf(result, args.output_filepath)
class Test_process(IrisTest): """Test process method""" def setUp(self): """ Set up a basic cube and linear weights cube for the process method. Input cube has 2 thresholds on and 3 forecast_reference_times """ thresholds = [10, 20] data = np.ones((2, 2, 3), dtype=np.float32) cycle1 = set_up_probability_cube( data, thresholds, spatial_grid="equalarea", time=datetime(2017, 11, 10, 4, 0), frt=datetime(2017, 11, 10, 0, 0), ) cycle2 = set_up_probability_cube( data, thresholds, spatial_grid="equalarea", time=datetime(2017, 11, 10, 4, 0), frt=datetime(2017, 11, 10, 1, 0), ) cycle3 = set_up_probability_cube( data, thresholds, spatial_grid="equalarea", time=datetime(2017, 11, 10, 4, 0), frt=datetime(2017, 11, 10, 2, 0), ) self.cube_to_collapse = CubeList([cycle1, cycle2, cycle3]).merge_cube() self.cube_to_collapse = squeeze(self.cube_to_collapse) self.cube_to_collapse.rename("weights") # This input array has 3 forecast reference times and 2 thresholds. # The two thresholds have the same weights. self.cube_to_collapse.data = np.array( [[[[1, 0, 1], [1, 1, 1]], [[1, 0, 1], [1, 1, 1]]], [[[0, 0, 1], [0, 1, 1]], [[0, 0, 1], [0, 1, 1]]], [[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]]], dtype=np.float32) self.cube_to_collapse.data = np.ma.masked_equal( self.cube_to_collapse.data, 0) # Create a one_dimensional weights cube by slicing the larger # weights cube. # The resulting cube only has a forecast_reference_time coordinate. self.one_dimensional_weights_cube = self.cube_to_collapse[:, 0, 0, 0] self.one_dimensional_weights_cube.remove_coord( "projection_x_coordinate") self.one_dimensional_weights_cube.remove_coord( "projection_y_coordinate") self.one_dimensional_weights_cube.remove_coord( find_threshold_coordinate(self.one_dimensional_weights_cube)) self.one_dimensional_weights_cube.data = np.array([0.2, 0.5, 0.3], dtype=np.float32) self.plugin = SpatiallyVaryingWeightsFromMask(fuzzy_length=4) @ManageWarnings(record=True) def test_none_masked(self, warning_list=None): """Test when we have no masked data in the input cube.""" self.cube_to_collapse.data = np.ones(self.cube_to_collapse.data.shape) self.cube_to_collapse.data = np.ma.masked_equal( self.cube_to_collapse.data, 0) expected_data = np.array([[[0.2, 0.2, 0.2], [0.2, 0.2, 0.2]], [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], [[0.3, 0.3, 0.3], [0.3, 0.3, 0.3]]], dtype=np.float32) message = ("Input cube to SpatiallyVaryingWeightsFromMask " "must be masked") result = self.plugin.process(self.cube_to_collapse, self.one_dimensional_weights_cube, "forecast_reference_time") self.assertTrue(any(message in str(item) for item in warning_list)) self.assertArrayEqual(result.data, expected_data) self.assertEqual(result.dtype, np.float32) @ManageWarnings( ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_all_masked(self): """Test when we have all masked data in the input cube.""" self.cube_to_collapse.data = np.ones(self.cube_to_collapse.data.shape) self.cube_to_collapse.data = np.ma.masked_equal( self.cube_to_collapse.data, 1) result = self.plugin.process(self.cube_to_collapse, self.one_dimensional_weights_cube, "forecast_reference_time") expected_data = np.zeros((3, 2, 3)) self.assertArrayAlmostEqual(expected_data, result.data) self.assertTrue(result.metadata, self.cube_to_collapse.data) @ManageWarnings( ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_no_fuzziness_no_one_dimensional_weights(self): """Test a simple case where we have no fuzziness in the spatial weights and no adjustment from the one_dimensional weights.""" plugin = SpatiallyVaryingWeightsFromMask(fuzzy_length=1) self.one_dimensional_weights_cube.data = np.ones((3)) expected_result = np.array( [[[0.5, 0., 0.33333333], [0.5, 0.33333333, 0.33333333]], [[0., 0., 0.33333333], [0., 0.33333333, 0.33333333]], [[0.5, 1., 0.33333333], [0.5, 0.33333333, 0.33333333]]], dtype=np.float32) result = plugin.process(self.cube_to_collapse, self.one_dimensional_weights_cube, "forecast_reference_time") self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata) @ManageWarnings( ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_no_fuzziness_no_one_dimensional_weights_transpose(self): """Test a simple case where we have no fuzziness in the spatial weights and no adjustment from the one_dimensional weights and transpose the input cube.""" plugin = SpatiallyVaryingWeightsFromMask(fuzzy_length=1) self.one_dimensional_weights_cube.data = np.ones((3)) expected_result = np.array( [[[0.5, 0., 0.33333333], [0.5, 0.33333333, 0.33333333]], [[0., 0., 0.33333333], [0., 0.33333333, 0.33333333]], [[0.5, 1., 0.33333333], [0.5, 0.33333333, 0.33333333]]], dtype=np.float32) self.cube_to_collapse.transpose([2, 0, 1, 3]) result = plugin.process(self.cube_to_collapse, self.one_dimensional_weights_cube, "forecast_reference_time") self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata) @ManageWarnings( ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_no_fuzziness_with_one_dimensional_weights(self): """Test a simple case where we have no fuzziness in the spatial weights and an adjustment from the one_dimensional weights.""" plugin = SpatiallyVaryingWeightsFromMask(fuzzy_length=1) expected_result = np.array([[[0.4, 0., 0.2], [0.4, 0.2, 0.2]], [[0., 0., 0.5], [0., 0.5, 0.5]], [[0.6, 1., 0.3], [0.6, 0.3, 0.3]]], dtype=np.float32) result = plugin.process(self.cube_to_collapse, self.one_dimensional_weights_cube, "forecast_reference_time") self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata) @ManageWarnings( ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_fuzziness_no_one_dimensional_weights(self): """Test a simple case where we have some fuzziness in the spatial weights and no adjustment from the one_dimensional weights.""" plugin = SpatiallyVaryingWeightsFromMask(fuzzy_length=2) self.one_dimensional_weights_cube.data = np.ones((3)) expected_result = np.array( [[[0.33333334, 0., 0.25], [0.41421354, 0.25, 0.2928932]], [[0., 0., 0.25], [0., 0.25, 0.2928932]], [[0.6666667, 1., 0.5], [0.5857864, 0.5, 0.41421354]]], dtype=np.float32) result = plugin.process(self.cube_to_collapse, self.one_dimensional_weights_cube, "forecast_reference_time") self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata) @ManageWarnings( ignored_messages=["Collapsing a non-contiguous coordinate."]) def test_fuzziness_with_one_dimensional_weights(self): """Test a simple case where we have some fuzziness in the spatial sweights and with adjustment from the one_dimensional weights.""" plugin = SpatiallyVaryingWeightsFromMask(fuzzy_length=2) expected_result = np.array( [[[0.25, 0., 0.15384616], [0.32037723, 0.15384616, 0.17789416]], [[0., 0., 0.3846154], [0., 0.3846154, 0.44473538]], [[0.75, 1., 0.4615385], [0.6796227, 0.4615385, 0.3773705]]], dtype=np.float32) result = plugin.process(self.cube_to_collapse, self.one_dimensional_weights_cube, "forecast_reference_time") self.assertArrayAlmostEqual(result.data, expected_result) self.assertEqual(result.metadata, self.cube_to_collapse.metadata)