def test_fails_input_not_a_cube(self): """Test it raises a Value Error if not supplied with a cube. """ plugin = LinearWeights() notacube = 0.0 msg = ('The first argument must be an instance of ' 'iris.cube.Cube') with self.assertRaisesRegexp(ValueError, msg): plugin.process(notacube, self.coord_name)
def test_works_scalar_coord(self): """Test it works if scalar coordinate. """ self.cube.add_aux_coord(AuxCoord(1, long_name="scalar_coord", units="no_unit")) coord = self.cube.coord("scalar_coord") plugin = LinearWeights(y0val=20.0, ynval=2.0) result = plugin.process(self.cube, coord) self.assertArrayAlmostEqual(result.data, np.array([1.0]))
def test_fails_ynval_and_slope_set(self): """Test it raises a Value Error if slope and ynval set. """ plugin = LinearWeights(y0val=10.0, slope=-5.0, ynval=5.0) msg = ('Relative end point weight or slope must be set' ' but not both.') with self.assertRaisesRegexp(ValueError, msg): plugin.process(self.cube, self.coord_name, self.coord_vals)
def test_fails_input_not_a_cube(self): """Test it raises a Value Error if not supplied with a cube. """ plugin = LinearWeights(y0val=20.0, ynval=2.0) notacube = 0.0 msg = "The first argument must be an instance of " "iris.cube.Cube" with self.assertRaisesRegex(TypeError, msg): plugin.process(notacube, self.coord_name)
def test_fails_weights_negative(self): """Test it raises a Value Error if weights become negative. """ plugin = LinearWeights(y0val=10.0, slope=-5.0) cubenew = add_realizations(self.cube, 6) coord = cubenew.coord('realization') msg = 'Weights must be positive' with self.assertRaisesRegexp(ValueError, msg): plugin.process(cubenew, coord)
def test_fails_coord_not_in_cube(self): """Test it raises a Value Error if coord not in the cube. """ coord = AuxCoord([], long_name="notset") plugin = LinearWeights() msg = ('The coord for this plugin must be ' 'an existing coordinate in the input cube') with self.assertRaisesRegexp(ValueError, msg): plugin.process(self.cube, coord)
def test_works_with_larger_num(self): """Test it works with larger num_of_vals. """ plugin = LinearWeights(y0val=10.0, ynval=5.0) cubenew = add_realizations(self.cube, 6) coord = cubenew.coord('realization') result = plugin.process(cubenew, coord) expected_result = np.array( [0.22222222, 0.2, 0.17777778, 0.15555556, 0.13333333, 0.11111111]) self.assertArrayAlmostEqual(result, expected_result)
def test_works_with_larger_num(self): """Test it works with larger num_of_vals. """ plugin = LinearWeights(y0val=10.0, ynval=5.0) cubenew = add_coordinate(self.cube, np.arange(6), "realization", dtype=np.int32) coord = cubenew.coord("realization") result = plugin.process(cubenew, coord) expected_result = np.array( [0.22222222, 0.2, 0.17777778, 0.15555556, 0.13333333, 0.11111111] ) self.assertArrayAlmostEqual(result.data, expected_result)
def test_works_with_missing_coord(self): """Test it works with missing coord """ plugin = LinearWeights(y0val=10.0, ynval=5.0) cubenew = add_realizations(self.cube, 6) coord_vals = '0, 1, 2, 3, 4, 5, 6' coord_name = 'realization' result = plugin.process(cubenew, coord_name, coord_vals) expected_result = np.array( [0.206349, 0.190476, 0.174603, 0.15873, 0.142857, 0.126984]) self.assertArrayAlmostEqual(result, expected_result)
def _calculate_blending_weights(self, cube): """ Wrapper for plugins to calculate blending weights by the appropriate method. Args: cube (iris.cube.Cube): Cube of input data to be blended Returns: iris.cube.Cube: Cube containing 1D array of weights for blending """ if self.wts_calc_method == "dict": if "model" in self.blend_coord: config_coord = "model_configuration" else: config_coord = self.blend_coord weights = ChooseWeightsLinear(self.weighting_coord, self.wts_dict, config_coord_name=config_coord)(cube) elif self.wts_calc_method == "linear": weights = ChooseDefaultWeightsLinear(y0val=self.y0val, ynval=self.ynval)( cube, self.blend_coord) elif self.wts_calc_method == "nonlinear": weights = ChooseDefaultWeightsNonLinear(self.cval)( cube, self.blend_coord, inverse_ordering=self.inverse_ordering) return weights
def test_works_y0val_and_ynval_set(self): """Test it works if y0val and ynval set. """ plugin = LinearWeights(y0val=10.0, ynval=5.0) result = plugin.process(self.cube, self.coord_name, self.coord_vals) expected_result = np.array([0.66666667, 0.33333333]) self.assertArrayAlmostEqual(result, expected_result)
def test_works_defaults_used(self): """Test it works if defaults used. """ plugin = LinearWeights() result = plugin.process(self.cube, self.coord_name, self.coord_vals) expected_result = np.array([0.90909091, 0.09090909]) self.assertArrayAlmostEqual(result, expected_result)
def test_works_scalar_coord(self): """Test it works if scalar coordinate. """ coord = self.cube.coord("scalar_coord") plugin = LinearWeights() result = plugin.process(self.cube, coord) self.assertArrayAlmostEqual(result, np.array([1.0]))
def test_basic(self): """Test that the plugin returns a cube of weights. """ plugin = LinearWeights(y0val=20.0, ynval=2.0) result = plugin.process(self.cube, self.coord_name) self.assertIsInstance(result, iris.cube.Cube)
def test_array_sum_equals_one(self): """Test that the resulting weights add up to one. """ plugin = LinearWeights() result = plugin.process(self.cube, self.coord_name, self.coord_vals) self.assertAlmostEquals(result.sum(), 1.0)
def test_basic(self): """Test that the plugin returns an array of weights. """ plugin = LinearWeights() result = plugin.process(self.cube, self.coord_name, self.coord_vals) self.assertIsInstance(result, np.ndarray)
def calculate_blending_weights(cube, blend_coord, method, wts_dict=None, weighting_coord=None, coord_unit=None, y0val=None, ynval=None, cval=None, dict_coord=None): """ Wrapper for plugins to calculate blending weights using the command line options specified. Args: cube (iris.cube.Cube): Cube of input data to be blended blend_coord (str): Coordinate over which blending will be performed (eg "model" for grid blending) method (str): Weights calculation method ("linear", "nonlinear", "dict" or "mask") Kwargs: wts_dict (str): File path to json file with parameters for linear weights calculation weighting_coord (str): Coordinate over which linear weights should be calculated from dict coord_unit (str or cf_units.Unit): Unit of blending coordinate (for default weights plugins) y0val (float): Intercept parameter for default linear weights plugin ynval (float): Gradient parameter for default linear weights plugin cval (float): Parameter for default non-linear weights plugin dict_coord (str): The coordinate that will be used when accessing the weights from the weights dictionary. Returns: weights (np.ndarray): 1D array of weights corresponding to slices in ascending order of blending coordinate. (Note: ChooseLinearWeights has the option to create a 3D array of spatially-varying weights with the "mask" option, however this is not currently supported by the blending plugin.) """ # sort input cube by blending coordinate cube = sort_coord_in_cube(cube, blend_coord, order="ascending") # calculate blending weights if method == "dict": # calculate linear weights from a dictionary with open(wts_dict, 'r') as wts: weights_dict = json.load(wts) weights_cube = ChooseWeightsLinear( weighting_coord, weights_dict, config_coord_name=dict_coord).process(cube) # sort weights cube by blending coordinate weights = sort_coord_in_cube(weights_cube, blend_coord, order="ascending") elif method == "linear": weights = ChooseDefaultWeightsLinear(y0val=y0val, ynval=ynval).process( cube, blend_coord, coord_unit=coord_unit) elif method == "nonlinear": # this is set here rather than in the CLI arguments in order to check # for invalid argument combinations cvalue = cval if cval else 0.85 weights = ChooseDefaultWeightsNonLinear(cvalue).process( cube, blend_coord, coord_unit=coord_unit) return weights
def test_fails_y0val_lessthan_zero(self): """Test it raises a Value Error if y0val less than zero. """ plugin = LinearWeights(y0val=-10.0) msg = ('y0val must be a float >= 0.0') with self.assertRaisesRegexp(ValueError, msg): plugin.process(self.cube, self.coord_name, self.coord_vals)
def main(argv=None): """Load in arguments and get going.""" parser = ArgParser( description="Calculate the threshold truth value of input data " "relative to the provided threshold value. By default data are " "tested to be above the thresholds, though the --below_threshold " "flag enables testing below thresholds. A fuzzy factor or fuzzy " "bounds may be provided to capture data that is close to the " "threshold.") parser.add_argument("input_filepath", metavar="INPUT_FILE", help="A path to an input NetCDF file to be processed") parser.add_argument("output_filepath", metavar="OUTPUT_FILE", help="The output path for the processed NetCDF") parser.add_argument("threshold_values", metavar="THRESHOLD_VALUES", nargs="*", type=float, help="Threshold value or values about which to " "calculate the truth values; e.g. 270 300. " "Must be omitted if --threshold_config is used.") parser.add_argument("--threshold_config", metavar="THRESHOLD_CONFIG", type=str, help="Threshold configuration JSON file containing " "thresholds and (optionally) fuzzy bounds. Best used " "in combination with --threshold_units. " "It should contain a dictionary of strings that can " "be interpreted as floats with the structure: " " \"THRESHOLD_VALUE\": [LOWER_BOUND, UPPER_BOUND] " "e.g: {\"280.0\": [278.0, 282.0], " "\"290.0\": [288.0, 292.0]}, or with structure " " \"THRESHOLD_VALUE\": \"None\" (no fuzzy bounds). " "Repeated thresholds with different bounds are not " "handled well. Only the last duplicate will be used.") parser.add_argument("--threshold_units", metavar="THRESHOLD_UNITS", default=None, type=str, help="Units of the threshold values. If not provided " "the units are assumed to be the same as those of the " "input dataset. Specifying the units here will allow " "a suitable conversion to match the input units if " "possible.") parser.add_argument("--below_threshold", default=False, action='store_true', help="By default truth values of 1 are returned for " "data ABOVE the threshold value(s). Using this flag " "changes this behaviour to return 1 for data below " "the threshold values.") parser.add_argument("--fuzzy_factor", metavar="FUZZY_FACTOR", default=None, type=float, help="A decimal fraction defining the factor about " "the threshold value(s) which should be treated as " "fuzzy. Data which fail a test against the hard " "threshold value may return a fractional truth value " "if they fall within this fuzzy factor region. Fuzzy " "factor must be in the range 0-1, with higher values " "indicating a narrower fuzzy factor region / sharper " "threshold. NB A fuzzy factor cannot be used with a " "zero threshold or a threshold_config file.") parser.add_argument("--collapse-coord", type=str, metavar="COLLAPSE-COORD", default="None", help="An optional ability to set which coordinate " "we want to collapse over. The default is set " "to None.") parser.add_argument("--vicinity", type=float, default=None, help="If set," " distance in metres used to define the vicinity " "within which to search for an occurrence.") args = parser.parse_args(args=argv) # Deal with mutual-exclusions that ArgumentParser can't handle: if args.threshold_values and args.threshold_config: raise parser.error("--threshold_config option is not compatible " "with THRESHOLD_VALUES list.") if args.fuzzy_factor and args.threshold_config: raise parser.error("--threshold_config option is not compatible " "with --fuzzy_factor option.") cube = load_cube(args.input_filepath) if args.threshold_config: try: # Read in threshold configuration from JSON file. with open(args.threshold_config, 'r') as input_file: thresholds_from_file = json.load(input_file) thresholds = [] fuzzy_bounds = [] is_fuzzy = True for key in thresholds_from_file.keys(): thresholds.append(float(key)) if is_fuzzy: # If the first threshold has no bounds, fuzzy_bounds is # set to None and subsequent bounds checks are skipped if thresholds_from_file[key] == "None": is_fuzzy = False fuzzy_bounds = None else: fuzzy_bounds.append(tuple(thresholds_from_file[key])) except ValueError as err: # Extend error message with hint for common JSON error. raise type(err)(err + " in JSON file {}. \nHINT: Try " "adding a zero after the decimal point.".format( args.threshold_config)) except Exception as err: # Extend any errors with message about WHERE this occurred. raise type(err)(err + " in JSON file {}".format(args.threshold_config)) else: thresholds = args.threshold_values fuzzy_bounds = None result_no_collapse_coord = BasicThreshold( thresholds, fuzzy_factor=args.fuzzy_factor, fuzzy_bounds=fuzzy_bounds, threshold_units=args.threshold_units, below_thresh_ok=args.below_threshold).process(cube) if args.vicinity is not None: # smooth thresholded occurrences over local vicinity result_no_collapse_coord = OccurrenceWithinVicinity( args.vicinity).process(result_no_collapse_coord) new_cube_name = in_vicinity_name_format( result_no_collapse_coord.name()) result_no_collapse_coord.rename(new_cube_name) if args.collapse_coord == "None": save_netcdf(result_no_collapse_coord, args.output_filepath) else: # Raise warning if result_no_collapse_coord is masked array if np.ma.isMaskedArray(result_no_collapse_coord.data): warnings.warn("Collapse-coord option not fully tested with " "masked data.") # This is where we fix values for y0val, slope and weighting_mode. # In this case they are fixed to the values required for realization # collapse. This can be changed if other functionality needs to be # implemented. weights = ChooseDefaultWeightsLinear(y0val=1.0, slope=0.0).process( result_no_collapse_coord, args.collapse_coord) BlendingPlugin = WeightedBlendAcrossWholeDimension( args.collapse_coord, weighting_mode='weighted_mean') result_collapse_coord = BlendingPlugin.process( result_no_collapse_coord, weights) save_netcdf(result_collapse_coord, args.output_filepath)