def process( orography: cli.inputcube, land_sea_mask: cli.inputcube = None, *, bands_config: cli.inputjson = None, ): """Runs topographic bands mask generation. Reads orography and land_sea_mask fields of a cube. Creates a series of masks, where each mask excludes data below or equal to the lower threshold and excludes data above the upper threshold. Args: orography (iris.cube.Cube): The orography on a standard grid. land_sea_mask (iris.cube.Cube): The land mask on standard grid, with land points set to one and sea points set to zero. If provided sea points will be set to zero in every band. If no land mask is provided, sea points will be included in the appropriate topographic band. bands_config (dict): Definition of orography bands required. The expected format of the dictionary is e.g {'bounds':[[0, 50], [50, 200]], 'units': 'm'} The default dictionary has the following form: {'bounds': [[-500., 50.], [50., 100.], [100., 150.],[150., 200.], [200., 250.], [250., 300.], [300., 400.], [400., 500.], [500., 650.],[650., 800.], [800., 950.], [950., 6000.]], 'units': 'm'} Returns: iris.cube.Cube: list of orographic band mask cube. """ from improver.generate_ancillaries.generate_ancillary import ( GenerateOrographyBandAncils, THRESHOLDS_DICT, ) if bands_config is None: bands_config = THRESHOLDS_DICT if land_sea_mask: land_sea_mask = next( land_sea_mask.slices( [land_sea_mask.coord(axis="y"), land_sea_mask.coord(axis="x")])) orography = next( orography.slices( [orography.coord(axis="y"), orography.coord(axis="x")])) result = GenerateOrographyBandAncils()(orography, bands_config, landmask=land_sea_mask) result = result.concatenate_cube() return result
def process(orography: cli.inputcube, land_sea_mask: cli.inputcube = None, *, bands_config: cli.inputjson = None): """Runs topographic weights generation. Reads the orography and land_sea_mask fields of a cube. Creates a series of topographic zone weights to indicate where an orography point sits within the defined topographic bands. If the orography point is in the centre of a topographic band, then a single band will have a weight 1.0. If the orography point is at the edge of a topographic band, then the upper band will have a 0.5 weight whilst the lower band will also have a 0.5 weight. Otherwise the weight will vary linearly between the centre of a topographic band and the edge. Args: orography (iris.cube.Cube): The orography on a standard grid. land_sea_mask (iris.cube.Cube): Land mask on a standard grid. If provided, sea points will be masked and set to the default fill value. If no land mask is provided, weights will be generated for sea points as well as land in the appropriate topographic band. bands_config (dict): Definition of orography bands required. The expected format of the dictionary is e.g {'bounds':[[0, 50], [50, 200]], 'units': 'm'} The default dictionary has the following form: {'bounds': [[-500., 50.], [50., 100.], [100., 150.],[150., 200.], [200., 250.], [250., 300.], [300., 400.], [400., 500.], [500., 650.],[650., 800.], [800., 950.], [950., 6000.]], 'units': 'm'} Returns: iris.cube.Cube: Cube containing the weights depending upon where the orography point is within the topographical zones. """ from improver.generate_ancillaries.generate_topographic_zone_weights \ import GenerateTopographicZoneWeights from improver.generate_ancillaries.generate_ancillary import ( THRESHOLDS_DICT) if bands_config is None: bands_config = THRESHOLDS_DICT if land_sea_mask: land_sea_mask = next(land_sea_mask.slices( [land_sea_mask.coord(axis='y'), land_sea_mask.coord(axis='x')])) orography = next(orography.slices( [orography.coord(axis='y'), orography.coord(axis='x')])) result = GenerateTopographicZoneWeights().process( orography, bands_config, landmask=land_sea_mask) return result
def process( start_cube: cli.inputcube, end_cube: cli.inputcube, *, interval_in_mins: int = None, times: cli.comma_separated_list = None, interpolation_method="linear", ): """Interpolate data between validity times. Interpolate data to intermediate times between the validity times of two cubes. This can be used to fill in missing data (e.g. for radar fields) or to ensure data is available at the required intervals when model data is not available at these times. Args: start_cube (iris.cube.Cube): Cube containing the data at the beginning. end_cube (iris.cube.Cube): Cube containing the data at the end. interval_in_mins (int): Specifies the interval in minutes at which to interpolate between the two input cubes. A number of minutes which does not divide up the interval equally will raise an exception. If intervals_in_mins is set then times can not be used. times (str): Specifies the times in the format {YYYYMMDD}T{HHMM}Z at which to interpolate between the two input cubes. Where {YYYYMMDD} is year, month, day and {HHMM} is hour and minutes e.g 20180116T0100Z. More than one time can be provided separated by a comma. If times are set, interval_in_mins can not be used. interpolation_method (str): ["linear", "solar", "daynight"] Specifies the interpolation method; solar interpolates using the solar elevation, daynight uses linear interpolation but sets night time points to 0.0 linear is linear interpolation. Returns: iris.cube.CubeList: A list of cubes interpolated to the desired times. The interpolated cubes will always be in chronological order of earliest to latest regardless of the order of the input. """ from improver.utilities.cube_manipulation import MergeCubes from improver.utilities.temporal import cycletime_to_datetime, iris_time_to_datetime from improver.utilities.temporal_interpolation import TemporalInterpolation (time_start,) = iris_time_to_datetime(start_cube.coord("time")) (time_end,) = iris_time_to_datetime(end_cube.coord("time")) if time_end < time_start: # swap cubes start_cube, end_cube = end_cube, start_cube if times is not None: times = [cycletime_to_datetime(timestr) for timestr in times] result = TemporalInterpolation( interval_in_minutes=interval_in_mins, times=times, interpolation_method=interpolation_method, )(start_cube, end_cube) return MergeCubes()(result)
def process( neighbour_cube: cli.inputcube, cube: cli.inputcube, lapse_rate: cli.inputcube = None, *, apply_lapse_rate_correction=False, land_constraint=False, similar_altitude=False, extract_percentiles: cli.comma_separated_list = None, ignore_ecc_bounds=False, new_title: str = None, suppress_warnings=False, ): """Module to run spot data extraction. Extract diagnostic data from gridded fields for spot data sites. It is possible to apply a temperature lapse rate adjustment to temperature data that helps to account for differences between the spot site's real altitude and that of the grid point from which the temperature data is extracted. Args: neighbour_cube (iris.cube.Cube): Cube of spot-data neighbours and the spot site information. cube (iris.cube.Cube): Cube containing the diagnostic data to be extracted. lapse_rate (iris.cube.Cube): Optional cube containing temperature lapse rates. If this cube is provided and a screen temperature cube is being processed, the lapse rates will be used to adjust the temperature to better represent each spot's site-altitude. apply_lapse_rate_correction (bool): Use to apply a lapse-rate correction to screen temperature data so that the data are a better match the altitude of the spot site for which they have been extracted. land_constraint (bool): Use to select the nearest-with-land-constraint neighbour-selection method from the neighbour_cube. This means that the grid points should be land points except for sites where none were found within the search radius when the neighbour cube was created. May be used with similar_altitude. similar_altitude (bool): Use to select the nearest-with-height-constraint neighbour-selection method from the neighbour_cube. These are grid points that were found to be the closest in altitude to the spot site within the search radius defined when the neighbour cube was created. May be used with land_constraint. extract_percentiles (list or int): If set to a percentile value or a list of percentile values, data corresponding to those percentiles will be returned. For example "25, 50, 75" will result in the 25th, 50th and 75th percentiles being returned from a cube of probabilities, percentiles or realizations. Deterministic input data will raise a warning message. Note that for percentiles inputs, the desired percentile(s) must exist in the input cube. ignore_ecc_bounds (bool): Demotes exceptions where calculated percentiles are outside the ECC bounds range to warnings. new_title (str): New title for the spot-extracted data. If None, this attribute is removed from the output cube since it has no prescribed standard and may therefore contain grid information that is no longer correct after spot-extraction. suppress_warnings (bool): Suppress warning output. This option should only be used if it is known that warnings will be generated but they are not required. Returns: iris.cube.Cube: Cube of spot data. Raises: ValueError: If the percentile diagnostic cube does not contain the requested percentile value. ValueError: If the lapse rate cube was provided but the diagnostic being processed is not air temperature. ValueError: If the lapse rate cube provided does not have the name "air_temperature_lapse_rate" ValueError: If the lapse rate cube does not contain a single valued height coordinate. Warns: warning: If diagnostic cube is not a known probabilistic type. warning: If a lapse rate cube was provided, but the height of the temperature does not match that of the data used. warning: If a lapse rate cube was not provided, but the option to apply the lapse rate correction was enabled. """ import warnings import iris import numpy as np from iris.exceptions import CoordinateNotFoundError from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( ConvertProbabilitiesToPercentiles, ) from improver.metadata.probabilistic import find_percentile_coordinate from improver.percentile import PercentileConverter from improver.spotdata.apply_lapse_rate import SpotLapseRateAdjust from improver.spotdata.neighbour_finding import NeighbourSelection from improver.spotdata.spot_extraction import SpotExtraction from improver.utilities.cube_extraction import extract_subcube neighbour_selection_method = NeighbourSelection( land_constraint=land_constraint, minimum_dz=similar_altitude).neighbour_finding_method_name() result = SpotExtraction( neighbour_selection_method=neighbour_selection_method)( neighbour_cube, cube, new_title=new_title) # If a probability or percentile diagnostic cube is provided, extract # the given percentile if available. This is done after the spot-extraction # to minimise processing time; usually there are far fewer spot sites than # grid points. if extract_percentiles: extract_percentiles = [np.float32(x) for x in extract_percentiles] try: perc_coordinate = find_percentile_coordinate(result) except CoordinateNotFoundError: if "probability_of_" in result.name(): result = ConvertProbabilitiesToPercentiles( ecc_bounds_warning=ignore_ecc_bounds)( result, percentiles=extract_percentiles) result = iris.util.squeeze(result) elif result.coords("realization", dim_coords=True): fast_percentile_method = not np.ma.isMaskedArray(result.data) result = PercentileConverter( "realization", percentiles=extract_percentiles, fast_percentile_method=fast_percentile_method, )(result) else: msg = ("Diagnostic cube is not a known probabilistic type. " "The {} percentile could not be extracted. Extracting " "data from the cube including any leading " "dimensions.".format(extract_percentiles)) if not suppress_warnings: warnings.warn(msg) else: constraint = [ "{}={}".format(perc_coordinate.name(), extract_percentiles) ] perc_result = extract_subcube(result, constraint) if perc_result is not None: result = perc_result else: msg = ("The percentile diagnostic cube does not contain the " "requested percentile value. Requested {}, available " "{}".format(extract_percentiles, perc_coordinate.points)) raise ValueError(msg) # Check whether a lapse rate cube has been provided and we are dealing with # temperature data and the lapse-rate option is enabled. if apply_lapse_rate_correction and lapse_rate: if not result.name() == "air_temperature": msg = ("A lapse rate cube was provided, but the diagnostic being " "processed is not air temperature and cannot be adjusted.") raise ValueError(msg) if not lapse_rate.name() == "air_temperature_lapse_rate": msg = ("A cube has been provided as a lapse rate cube but does " "not have the expected name air_temperature_lapse_rate: " "{}".format(lapse_rate.name())) raise ValueError(msg) try: lapse_rate_height_coord = lapse_rate.coord("height") except (ValueError, CoordinateNotFoundError): msg = ("Lapse rate cube does not contain a single valued height " "coordinate. This is required to ensure it is applied to " "equivalent temperature data.") raise ValueError(msg) # Check the height of the temperature data matches that used to # calculate the lapse rates. If so, adjust temperatures using the lapse # rate values. if cube.coord("height") == lapse_rate_height_coord: plugin = SpotLapseRateAdjust( neighbour_selection_method=neighbour_selection_method) result = plugin(result, neighbour_cube, lapse_rate) elif not suppress_warnings: warnings.warn( "A lapse rate cube was provided, but the height of the " "temperature data does not match that of the data used " "to calculate the lapse rates. As such the temperatures " "were not adjusted with the lapse rates.") elif apply_lapse_rate_correction and not lapse_rate: if not suppress_warnings: warnings.warn( "A lapse rate cube was not provided, but the option to " "apply the lapse rate correction was enabled. No lapse rate " "correction could be applied.") # Remove the internal model_grid_hash attribute if present. result.attributes.pop("model_grid_hash", None) return result
def process( wind_speed: cli.inputcube, sigma: cli.inputcube, target_orography: cli.inputcube, standard_orography: cli.inputcube, silhouette_roughness: cli.inputcube, vegetative_roughness: cli.inputcube = None, *, model_resolution: float, output_height_level: float = None, output_height_level_units="m", ): """Wind downscaling. Run wind downscaling to apply roughness correction and height correction to wind fields as described in Howard and Clark (2007). All inputs must be on the same standard grid. Args: wind_speed (iris.cube.Cube): Cube of wind speed on standard grid. Any units can be supplied. sigma (iris.cube.Cube): Cube of standard deviation of model orography height. Units of field: m. target_orography (iris.cube.Cube): Cube of orography to downscale fields to. Units of field: m. standard_orography (iris.cube.Cube): Cube of orography on standard grid. (interpolated model orography). Units of field: m. silhouette_roughness (iris.cube.Cube): Cube of model silhouette roughness. Units of field: dimensionless. vegetative_roughness (iris.cube.Cube): Cube of vegetative roughness length. Units of field: m. model_resolution (float): Original resolution of model orography (before interpolation to standard grid) Units of field: m. output_height_level (float): If only a single height level is desired as output from wind-downscaling, this option can be used to select the height level. If no units are provided with 'output_height_level_units', metres are assumed. output_height_level_units (str): If a single height level is selected as output using 'output_height_level', this additional argument may be used to specify the units of the value entered to select the level. e.g hPa. Returns: iris.cube.Cube: The processed Cube. Rises: ValueError: If the requested height value is not found. """ import warnings import iris from iris.exceptions import CoordinateNotFoundError from improver.utilities.cube_extraction import apply_extraction from improver.wind_calculations import wind_downscaling if output_height_level_units and output_height_level is None: warnings.warn( "output_height_level_units has been set but no " "associated height level has been provided. These units " "will have no effect." ) try: wind_speed_iterator = wind_speed.slices_over("realization") except CoordinateNotFoundError: wind_speed_iterator = [wind_speed] wind_speed_list = iris.cube.CubeList() for wind_speed_slice in wind_speed_iterator: result = wind_downscaling.RoughnessCorrection( silhouette_roughness, sigma, target_orography, standard_orography, model_resolution, z0_cube=vegetative_roughness, height_levels_cube=None, )(wind_speed_slice) wind_speed_list.append(result) wind_speed = wind_speed_list.merge_cube() non_dim_coords = [x.name() for x in wind_speed.coords(dim_coords=False)] if "realization" in non_dim_coords: wind_speed = iris.util.new_axis(wind_speed, "realization") if output_height_level is not None: constraints = {"height": output_height_level} units = {"height": output_height_level_units} single_level = apply_extraction( wind_speed, iris.Constraint(**constraints), units ) if not single_level: raise ValueError( "Requested height level not found, no cube " "returned. Available height levels are:\n" "{0:}\nin units of {1:}".format( wind_speed.coord("height").points, wind_speed.coord("height").units ) ) wind_speed = single_level return wind_speed
def process( cube: cli.inputcube, raw_cube: cli.inputcube = None, *, realizations_count: int = None, random_seed: int = None, ignore_ecc_bounds=False, ): """Converts an incoming cube into one containing realizations. Args: cube (iris.cube.Cube): A cube to be processed. raw_cube (iris.cube.Cube): Cube of raw (not post processed) weather data. If this argument is given ensemble realizations will be created from percentiles by reshuffling them in correspondence to the rank order of the raw ensemble. Otherwise, the percentiles are rebadged as realizations. realizations_count (int): The number of ensemble realizations in the output. random_seed (int): Option to specify a value for the random seed when reordering percentiles. This value is for testing purposes only, to ensure reproduceable outputs. It should not be used in real time operations as it may introduce a bias into the reordered forecasts. ignore_ecc_bounds (bool): If True where percentiles (calculated as an intermediate output before realization) exceed the ECC bounds range, raises a warning rather than an exception. Returns: iris.cube.Cube: The processed cube. """ from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( ConvertProbabilitiesToPercentiles, EnsembleReordering, RebadgePercentilesAsRealizations, ResamplePercentiles, ) from improver.metadata.probabilistic import is_probability if cube.coords("realization"): return cube if not cube.coords("percentile") and not is_probability(cube): raise ValueError("Unable to convert to realizations:\n" + str(cube)) if realizations_count is None: try: realizations_count = len(raw_cube.coord("realization").points) except AttributeError: # raised if raw_cube is None, hence has no attribute "coord" msg = "Either realizations_count or raw_cube must be provided" raise ValueError(msg) if cube.coords("percentile"): percentiles = ResamplePercentiles( ecc_bounds_warning=ignore_ecc_bounds)( cube, no_of_percentiles=realizations_count) else: percentiles = ConvertProbabilitiesToPercentiles( ecc_bounds_warning=ignore_ecc_bounds)( cube, no_of_percentiles=realizations_count) if raw_cube: result = EnsembleReordering()(percentiles, raw_cube, random_seed=random_seed) else: result = RebadgePercentilesAsRealizations()(percentiles) return result
def process(cube: cli.inputcube, raw_cube: cli.inputcube = None, *, realizations_count: int = None, random_seed: int = None, ignore_ecc_bounds=False): """Convert probabilities to ensemble realizations using Ensemble Copula Coupling. Probabilities are first converted to percentiles, which are then either rebadged as realizations or reordered if the raw_cube argument is given. Args: cube (iris.cube.Cube): Cube to be processed. raw_cube (iris.cube.Cube): Cube of raw (not post processed) weather data. If this argument is given ensemble realizations will be created from percentiles by reshuffling them in correspondence to the rank order of the raw ensemble. Otherwise, the percentiles are rebadged as realizations. realizations_count (int): Optional definition of the number of ensemble realizations to be generated. These are generated though an intermediate percentile representation. Theses percentiles will be distributed regularly with the aim of dividing into blocks of equal probability. If the raw_cube is given and the number of realization is not given the number of realizations is taken from the number of realizations in the raw_cube. random_seed (int): Option to specify a value for the random seed for testing purposes, otherwise the default random seed behaviours is utilised. The random seed is used in the generation of the random numbers used for splitting tied values within the raw ensemble, so that the values from the input percentiles can be ordered to match the raw ensemble. ignore_ecc_bounds (bool): If True, where percentiles (calculated as an intermediate output before realization) exceed to ECC bounds range, raises a warning rather than an exception. Returns: iris.cube.Cube: Processed result Cube. """ from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( ConvertProbabilitiesToPercentiles, RebadgePercentilesAsRealizations, EnsembleReordering) if realizations_count is None and raw_cube: # If realizations_count is not given, take the number from the raw # ensemble cube. realizations_count = len(raw_cube.coord("realization").points) result = ConvertProbabilitiesToPercentiles( ecc_bounds_warning=ignore_ecc_bounds)( cube, no_of_percentiles=realizations_count) if raw_cube: result = EnsembleReordering()(result, raw_cube, random_ordering=False, random_seed=random_seed) else: result = RebadgePercentilesAsRealizations()(result) return result