def test_clip(self): """Test clip function.""" cube = Cube(np.array([-10, 0, 10])) cube.add_dim_coord( iris.coords.DimCoord( np.arange(3), standard_name='time', units=Unit('days since 1950-01-01 00:00:00', calendar='gregorian'), ), 0, ) # Cube needs to be copied, since it is modified in-place and test cube # should not change. assert_array_equal( clip(cube.copy(), 0, None).data, np.array([0, 0, 10])) assert_array_equal( clip(cube.copy(), None, 0).data, np.array([-10, 0, 0])) assert_array_equal(clip(cube.copy(), -1, 2).data, np.array([-1, 0, 2])) # Masked cube TODO # No parameters specified with self.assertRaises(ValueError): clip(cube, None, None) # Maximum lower than minimum with self.assertRaises(ValueError): clip(cube, 10, 8)
class Test_merge_cube(tests.IrisTest): def setUp(self): self.cube1 = Cube([1, 2, 3], "air_temperature", units="K") self.cube1.add_aux_coord(AuxCoord([0], "height", units="m")) def test_pass(self): cube2 = self.cube1.copy() cube2.coord("height").points = [1] result = CubeList([self.cube1, cube2]).merge_cube() self.assertIsInstance(result, Cube) def test_fail(self): cube2 = self.cube1.copy() cube2.rename("not air temperature") with self.assertRaises(iris.exceptions.MergeError): CubeList([self.cube1, cube2]).merge_cube() def test_empty(self): with self.assertRaises(ValueError): CubeList([]).merge_cube() def test_single_cube(self): result = CubeList([self.cube1]).merge_cube() self.assertEqual(result, self.cube1) self.assertIsNot(result, self.cube1) def test_repeated_cube(self): with self.assertRaises(iris.exceptions.MergeError): CubeList([self.cube1, self.cube1]).merge_cube()
def resolve_wind_components( speed: Cube, angle: Cube, adj: ndarray ) -> Tuple[Cube, Cube]: """ Perform trigonometric reprojection onto x and y axes Args: speed: Cube containing wind speed data angle: Cube containing wind directions as angles from true North adj: 2D array of wind direction angle adjustments in radians, to convert zero reference from true North to grid North. Broadcast automatically if speed and angle cubes have extra dimensions. Returns: - Cube containing wind vector component in the positive x-direction u_speed - Cube containing wind vector component in the positive y-direction v_speed """ angle.convert_units("radians") angle.data += adj # output vectors should be pointing "to" not "from" if "wind_from_direction" in angle.name(): angle.data += np.pi sin_angle = np.sin(angle.data) cos_angle = np.cos(angle.data) uspeed = np.multiply(speed.data, sin_angle) vspeed = np.multiply(speed.data, cos_angle) return [speed.copy(data=uspeed), speed.copy(data=vspeed)]
def setUp(self): """Create a cube with a single non-zero point.""" data = np.zeros((2, 2, 2)) data[0][:][:] = 1.0 data[1][:][:] = 2.0 cube = Cube(data, standard_name="precipitation_amount", units="kg m^-2 s^-1") cube.add_dim_coord( DimCoord(np.linspace(-45.0, 45.0, 2), 'latitude', units='degrees'), 1) cube.add_dim_coord( DimCoord(np.linspace(120, 180, 2), 'longitude', units='degrees'), 2) time_origin = "hours since 1970-01-01 00:00:00" calendar = "gregorian" tunit = Unit(time_origin, calendar) cube.add_dim_coord(DimCoord([402192.5, 402193.5], "time", units=tunit), 0) self.cube = cube new_scalar_coord = iris.coords.AuxCoord(1, long_name='dummy_scalar_coord', units='no_unit') cube_with_scalar = cube.copy() cube_with_scalar.add_aux_coord(new_scalar_coord) self.cube_with_scalar = cube_with_scalar
def process(self, precip_cubes: Union[Cube, List[Cube]], orographic_enhancement_cube: Cube) -> CubeList: """Apply orographic enhancement by modifying the input fields. This can include either adding or deleting the orographic enhancement component from the input precipitation fields. Args: precip_cubes: Cube or iterable (list, CubeList or tuple) of cubes containing the input precipitation fields. orographic_enhancement_cube: Cube containing the orographic enhancement fields. Returns: CubeList of precipitation rate cubes that have been updated using orographic enhancement. """ if isinstance(precip_cubes, iris.cube.Cube): precip_cubes = iris.cube.CubeList([precip_cubes]) updated_cubes = iris.cube.CubeList([]) for precip_cube in precip_cubes: oe_cube = self._select_orographic_enhancement_cube( precip_cube, orographic_enhancement_cube.copy()) cube = self._apply_orographic_enhancement(precip_cube, oe_cube) cube = self._apply_minimum_precip_rate(precip_cube, cube) updated_cubes.append(cube) return updated_cubes
def make_percentile_cube(self, cube: Cube) -> Cube: """Returns a cube with the same metadata as the sample cube but with an added percentile dimension. Args: cube: Cube to copy meta data from. Returns: Cube like input but with added percentiles coordinate. Each slice along this coordinate is identical. """ pctcubelist = iris.cube.CubeList() pct_coord_name = "percentile" for pct in self.percentiles: pctcube = cube.copy() pctcube.add_aux_coord( iris.coords.DimCoord(np.float32(pct), long_name=pct_coord_name, units="%")) pctcubelist.append(pctcube) result = pctcubelist.merge_cube() # If percentile coord is not already a dimension, promote it. # This is required when self.percentiles is length 1. if result.coord_dims(pct_coord_name) == (): result = iris.util.new_axis(result, scalar_coord=pct_coord_name) return result
def _update_metadata(cube: Cube) -> Cube: """ Modify the meta data of input cube to resemble a Nowcast of lightning probability. 1. Rename to "probability_of_rate_of_lightning_above_threshold" 2. Remove "threshold" coord (or causes iris.exceptions.CoordinateNotFoundError) 3. Discard all cell_methods Args: cube: An input cube Returns: Output cube - a copy of input cube with meta-data relating to a Nowcast of lightning probability. The data array will be a copy of the input cube.data """ new_cube = cube.copy() new_cube.rename("probability_of_rate_of_lightning_above_threshold") threshold_coord = find_threshold_coordinate(new_cube) new_cube.remove_coord(threshold_coord) new_cube.cell_methods = None return new_cube
def setUp(self): """Set up the test inputs.""" self.time_0 = datetime.datetime(2017, 11, 1, 3) self.time_extra = datetime.datetime(2017, 11, 1, 6) self.time_1 = datetime.datetime(2017, 11, 1, 9) self.npoints = 10 data_time_0 = np.ones((self.npoints, self.npoints)) data_time_1 = np.ones((self.npoints, self.npoints)) * 7 cube_template = Cube(data_time_0, 'air_temperature', 'K') cube_template.add_dim_coord( DimCoord(np.linspace(-45.0, 45.0, self.npoints), 'latitude', units='degrees'), 0) cube_template.add_dim_coord( DimCoord(np.linspace(120, 180, self.npoints), 'longitude', units='degrees'), 1) time_origin = "seconds since 1970-01-01 00:00:00" calendar = "gregorian" tunit = Unit(time_origin, calendar) self.cube_time_0 = cube_template self.cube_time_1 = cube_template.copy(data=data_time_1) self.cube_time_0.add_aux_coord( DimCoord(self.time_0.timestamp(), "time", units=tunit)) self.cube_time_0.add_aux_coord( DimCoord(0, "forecast_period", units="hours")) self.cube_time_1.add_aux_coord( DimCoord(self.time_1.timestamp(), "time", units=tunit)) self.cube_time_1.add_aux_coord( DimCoord(6, "forecast_period", units="hours"))
def setUp(self): """Create a cube with a single non-zero point.""" data = np.zeros((2, 2, 2)) data[0][:][:] = 1.0 data[1][:][:] = 2.0 cube = Cube(data, standard_name="precipitation_amount", units="kg m^-2 s^-1") cube.add_dim_coord( DimCoord(np.linspace(-45.0, 45.0, 2), 'latitude', units='degrees'), 1) cube.add_dim_coord( DimCoord(np.linspace(120, 180, 2), 'longitude', units='degrees'), 2) time_origin = "hours since 1970-01-01 00:00:00" calendar = "gregorian" tunit = Unit(time_origin, calendar) cube.add_dim_coord(DimCoord([402192.5, 402193.5], "time", units=tunit), 0) cube.add_aux_coord(AuxCoord([402190.0, 402191.0], "forecast_reference_time", units=tunit), data_dims=0) cube.add_aux_coord(AuxCoord([3.0, 4.0], "forecast_period", units=tunit), data_dims=0) self.cube = cube new_scalar_coord = iris.coords.AuxCoord(1, long_name='dummy_scalar_coord', units='no_unit') cube_with_scalar = cube.copy() cube_with_scalar.add_aux_coord(new_scalar_coord) self.cube_with_scalar = cube_with_scalar data_threshold = np.zeros((2, 2, 2, 2)) data_threshold[:, 0, :, :] = 0.5 data_threshold[:, 1, :, :] = 0.8 cube_threshold = Cube(data_threshold, long_name="probability_of_precipitation_amount") cube_threshold.add_dim_coord( DimCoord([0.4, 1.0], long_name="threshold", units="kg m^-2 s^-1"), 0) cube_threshold.add_dim_coord( DimCoord([402192.5, 402193.5], "time", units=tunit), 1) cube_threshold.add_dim_coord( DimCoord(np.linspace(-45.0, 45.0, 2), 'latitude', units='degrees'), 2) cube_threshold.add_dim_coord( DimCoord(np.linspace(120, 180, 2), 'longitude', units='degrees'), 3) cube_threshold.add_aux_coord(AuxCoord([402190.0, 402191.0], "forecast_reference_time", units=tunit), data_dims=0) cube_threshold.add_aux_coord(AuxCoord([3.0, 4.0], "forecast_period", units=tunit), data_dims=0) cube_threshold.attributes.update({'relative_to_threshold': 'below'}) self.cube_threshold = cube_threshold
def check_input_cube_dims(self, input_cube: Cube, timezone_cube: Cube) -> None: """Ensures input cube has at least three dimensions: time, y, x. Promotes time to be the inner-most dimension (dim=-1). Does the same for the timezone_cube UTC_offset dimension. Raises: ValueError: If the input cube does not have exactly the expected three coords. If the spatial coords on input_cube and timezone_cube do not match. """ expected_coords = ["time"] + [input_cube.coord(axis=n).name() for n in "yx"] cube_coords = [coord.name() for coord in input_cube.coords(dim_coords=True)] if not all( [expected_coord in cube_coords for expected_coord in expected_coords] ): raise ValueError( f"Expected coords on input_cube: time, y, x ({expected_coords})." f"Found {cube_coords}" ) enforce_coordinate_ordering(input_cube, ["time"], anchor_start=False) self.timezone_cube = timezone_cube.copy() enforce_coordinate_ordering( self.timezone_cube, ["UTC_offset"], anchor_start=False ) if not spatial_coords_match([input_cube, self.timezone_cube]): raise ValueError( "Spatial coordinates on input_cube and timezone_cube do not match." )
def process(self, cube: Cube) -> Cube: """ Calculate probabilities between thresholds for the input cube Args: cube: Probability cube containing thresholded data (above or below) Returns: Cube containing probability of occurrence between thresholds """ # if cube has no threshold-type coordinate, raise an error try: self.thresh_coord = find_threshold_coordinate(cube) except CoordinateNotFoundError: raise ValueError("Input is not a probability cube " "(has no threshold-type coordinate)") self.cube = cube.copy() # check input cube units and convert if needed original_units = self.thresh_coord.units if original_units != self.threshold_units: self.cube.coord(self.thresh_coord).convert_units( self.threshold_units) # extract suitable cube slices self.cube_slices = self._slice_cube() # generate "between thresholds" probabilities output_cube = self._calculate_probabilities() self._update_metadata(output_cube, original_units) return output_cube
def sum_differences_between_adjacent_grid_squares( cube: Cube, thresholded_cubes: CubeList) -> Cube: """ Put the differences back onto the original grid by summing together the array with offsets. This covers the fact that the difference cubes will result in output on a staggered grid compared with the input cube. Args: cube: The cube with the original grid. thresholded_cubes: Cubelist containing differences between adjacent grid squares along x and differences between adjacent grid squares along y, which have been thresholded. Returns: Cube on the original grid with the values from the thresholded adjacent grid square difference cubes inserted. The resulting values have been restricted to be between 0 and 1. """ threshold_cube_x, threshold_cube_y = thresholded_cubes cube_on_orig_grid = cube.copy() cube_on_orig_grid.data = np.zeros(cube_on_orig_grid.shape) cube_on_orig_grid.data[..., :-1, :] += threshold_cube_y.data cube_on_orig_grid.data[..., 1:, :] += threshold_cube_y.data cube_on_orig_grid.data[..., :, :-1] += threshold_cube_x.data cube_on_orig_grid.data[..., :, 1:] += threshold_cube_x.data return cube_on_orig_grid
def maximum_within_vicinity(self, cube: Cube) -> Cube: """ Find grid points where a phenomenon occurs within a defined radius. The occurrences within this vicinity are maximised, such that all grid points within the vicinity are recorded as having an occurrence. For non-binary fields, if the vicinity of two occurrences overlap, the maximum value within the vicinity is chosen. If a land-mask cube has been supplied, process land and sea points separately. Args: cube: Thresholded cube. Returns: Cube where the occurrences have been spatially spread, so that they're equally likely to have occurred anywhere within the vicinity defined using the specified radius. """ if self.radius: grid_point_radius = distance_to_number_of_grid_cells( cube, self.radius) elif self.grid_point_radius is not None: grid_point_radius = self.grid_point_radius else: grid_point_radius = 0 # Convert the grid_point_radius into a number of points along an edge # length, including the central point, e.g. grid_point_radius = 1, # points along the edge = 3 grid_points = (2 * grid_point_radius) + 1 cube_dtype = cube.data.dtype cube_fill_value = netCDF4.default_fillvals.get(cube_dtype.str[1:], np.inf) max_cube = cube.copy() unmasked_cube_data = cube.data.copy() if np.ma.is_masked(cube.data): unmasked_cube_data = cube.data.data.copy() unmasked_cube_data[cube.data.mask] = -cube_fill_value if self.land_mask_cube: max_data = np.empty_like(cube.data) for match in (True, False): matched_data = unmasked_cube_data.copy() matched_data[self.land_mask != match] = -cube_fill_value matched_max_data = maximum_filter(matched_data, size=grid_points) max_data = np.where(self.land_mask == match, matched_max_data, max_data) else: # The following command finds the maximum value for each grid point # from within a square of length "size" max_data = maximum_filter(unmasked_cube_data, size=grid_points) if np.ma.is_masked(cube.data): # Update only the unmasked values max_cube.data.data[~cube.data.mask] = max_data[~cube.data.mask] else: max_cube.data = max_data return max_cube
def setUp(self): """Create a cube containing a regular lat-lon grid and other necessary ingredients for unit tests.""" data = np.zeros((20, 20)) latitudes = np.linspace(-90, 90, 20) longitudes = np.linspace(-180, 180, 20) latitude = DimCoord(latitudes, standard_name='latitude', units='degrees') longitude = DimCoord(longitudes, standard_name='longitude', units='degrees') cube = Cube(data, long_name="template", dim_coords_and_dims=[(latitude, 0), (longitude, 1)], units="1") orography = cube.copy() orography.data[0:10, 0:10] = 2. orography.rename('surface_altitude') land = cube.copy() land.rename('land_binary_mask') land.data = land.data + 1 self.orography = orography self.land = land self.directory = mkdtemp() self.orography_path = os.path.join(self.directory, 'highres_orog.nc') self.land_path = os.path.join(self.directory, 'land_mask.nc') save_netcdf(orography, self.orography_path) save_netcdf(land, self.land_path) self.diagnostics = { "wind_speed": { "diagnostic_name": "wind_speed", "extrema": False, "filepath": "horizontal_wind_speed_and_direction_at_10m", "interpolation_method": "use_nearest", "neighbour_finding": { "land_constraint": True, "method": "fast_nearest_neighbour", "vertical_bias": None } } }
def wind_dir_decider(self, where_low_r: ndarray, wdir_cube: Cube) -> None: """If the wind direction is so widely scattered that the r value is nearly zero then this indicates that the average wind direction is essentially meaningless. We therefore substitute this meaningless average wind direction value for the wind direction calculated from a larger sample by smoothing across a neighbourhood of points before rerunning the main technique. This is invoked rarely (1 : 100 000) Args: where_low_r: Array of boolean values. True where original wind direction estimate has low confidence. These points are replaced according to self.backup_method wdir_cube: Contains array of wind direction data (realization, y, x) Uses: self.wdir_slice_mean: Containing average wind direction angle (in degrees). self.wdir_complex: 3D array - wind direction angles from ensembles (in complex). self.r_vals_slice.data: 2D array - Radius taken from average complex wind direction angle. self.r_thresh: Any r value below threshold is regarded as meaningless. self.realization_axis: Axis to collapse over. self.n_realizations: Number of realizations available in the plugin. Used to set the neighbourhood radius as this is used to adjust the radius again in the neighbourhooding plugin. Defines: self.wdir_slice_mean.data: 2D array - Wind direction degrees where ambigious values have been replaced with data from first ensemble realization. """ if self.backup_method == "neighbourhood": # Performs smoothing over a 6km square neighbourhood. # Then calculates the mean wind direction. child_class = WindDirection(backup_method="first_realization") child_class.wdir_complex = self.nbhood( wdir_cube.copy(data=self.wdir_complex)).data child_class.realization_axis = self.realization_axis child_class.wdir_slice_mean = self.wdir_slice_mean.copy() child_class.calc_wind_dir_mean() improved_values = child_class.wdir_slice_mean.data else: # Takes realization zero (control member). improved_values = wdir_cube.extract( iris.Constraint(realization=0)).data # If the r-value is low - substitute average wind direction value for # the wind direction taken from the first ensemble realization. self.wdir_slice_mean.data = np.where(where_low_r, improved_values, self.wdir_slice_mean.data)
def setUp(self): """Create a cube containing a regular lat-lon grid.""" data = np.zeros((20, 20)) latitudes = np.linspace(-90, 90, 20) longitudes = np.linspace(-180, 180, 20) latitude = DimCoord(latitudes, standard_name='latitude', units='degrees') longitude = DimCoord(longitudes, standard_name='longitude', units='degrees') cube = Cube(data, long_name="test_data", dim_coords_and_dims=[(latitude, 0), (longitude, 1)], units="1") orography = cube.copy() orography.rename('surface_altitude') land = cube.copy() land.rename('land_binary_mask') land.data = land.data + 1 ancillary_data = {} ancillary_data.update({'orography': orography}) ancillary_data.update({'land_mask': land}) sites = OrderedDict() sites.update({ '100': { 'latitude': 50, 'longitude': 0, 'altitude': 10, 'gmtoffset': 0 } }) neighbour_list = np.empty(1, dtype=[('i', 'i8'), ('j', 'i8'), ('dz', 'f8'), ('edgepoint', 'bool_')]) self.cube = cube self.ancillary_data = ancillary_data self.sites = sites self.neighbour_list = neighbour_list
def process(self, spot_data_cube: Cube, neighbour_cube: Cube, gridded_lapse_rate_cube: Cube) -> Cube: """ Extract lapse rates from the appropriate grid points and apply them to the spot extracted temperatures. The calculation is:: lapse_rate_adjusted_temperatures = temperatures + lapse_rate * vertical_displacement Args: spot_data_cube: A spot data cube of temperatures for the spot data sites, extracted from the gridded temperature field. These temperatures will have been extracted using the same neighbour_cube and neighbour_selection_method that are being used here. neighbour_cube: The neighbour_cube that contains the grid coordinates at which lapse rates should be extracted and the vertical displacement between those grid points on the model orography and the spot data sites actual altitudes. This cube is only updated when a new site is added. gridded_lapse_rate_cube: A cube of temperature lapse rates on the same grid as that from which the spot data temperatures were extracted. Returns: A copy of the input spot_data_cube with the data modified by the lapse rates to give a better representation of the site's temperatures. """ # Check the cubes are compatible. check_grid_match( [neighbour_cube, spot_data_cube, gridded_lapse_rate_cube]) # Extract the lapse rates that correspond to the spot sites. spot_lapse_rate = SpotExtraction( neighbour_selection_method=self.neighbour_selection_method)( neighbour_cube, gridded_lapse_rate_cube) # Extract vertical displacements between the model orography and sites. method_constraint = iris.Constraint( neighbour_selection_method_name=self.neighbour_selection_method) data_constraint = iris.Constraint( grid_attributes_key="vertical_displacement") vertical_displacement = neighbour_cube.extract(method_constraint & data_constraint) # Apply lapse rate adjustment to the temperature at each site. new_temperatures = ( spot_data_cube.data + (spot_lapse_rate.data * vertical_displacement.data)).astype( np.float32) new_spot_cube = spot_data_cube.copy(data=new_temperatures) return new_spot_cube
def setUp(self): # Basic test values. src_x_y_value = np.array([ [20.12, 11.73, 0.01], [120.23, -20.73, 1.12], [290.34, 33.88, 2.23], [-310.45, 57.8, 3.34], ]) tgt_grid_x = np.array([-173.2, -100.3, -32.5, 1.4, 46.6, 150.7]) tgt_grid_y = np.array([-80.1, -30.2, 0.3, 47.4, 75.5]) # Make sample 1-D source cube. src = Cube(src_x_y_value[:, 2]) src.add_aux_coord( AuxCoord(src_x_y_value[:, 0], standard_name="longitude", units="degrees"), 0, ) src.add_aux_coord( AuxCoord(src_x_y_value[:, 1], standard_name="latitude", units="degrees"), 0, ) self.src_cube = src # Make sample grid cube. grid = Cube(np.zeros(tgt_grid_y.shape + tgt_grid_x.shape)) grid.add_dim_coord( DimCoord(tgt_grid_y, standard_name="latitude", units="degrees"), 0) grid.add_dim_coord( DimCoord(tgt_grid_x, standard_name="longitude", units="degrees"), 1) self.grid_cube = grid # Make expected-result, from the expected source-index at each point. expected_result_indices = np.array([ [1, 1, 1, 1, 1, 1], [1, 2, 0, 0, 0, 1], [1, 2, 2, 0, 0, 1], [3, 2, 2, 3, 3, 3], [3, 2, 3, 3, 3, 3], ]) self.expected_data = self.src_cube.data[expected_result_indices] # Make a 3D source cube, based on the existing 2d test data. z_cubes = [src.copy() for _ in range(3)] for i_z, z_cube in enumerate(z_cubes): z_cube.add_aux_coord(DimCoord([i_z], long_name="z")) z_cube.data = z_cube.data + 100.0 * i_z self.src_z_cube = CubeList(z_cubes).merge_cube() # Make a corresponding 3d expected result. self.expected_data_zxy = self.src_z_cube.data[:, expected_result_indices]
class Test_delete_attributes(IrisTest): """Test the delete_attributes method.""" def setUp(self): """Create a cube with attributes to be deleted.""" data = np.zeros((2, 2)) long_name = "probability_of_rainfall_rate" units = "m s^-1" attributes = { 'title': 'This is a cube', 'tithe': '10 percent', 'mosg_model': 'gl_det', 'mosg_grid_version': 1.0, 'mosg_grid_name': 'global' } self.cube = Cube(data, long_name=long_name, units=units) self.cube.attributes = attributes def test_basic(self): """Test that an empty call leaves the cube unchanged.""" cube = self.cube.copy() delete_attributes(cube, []) self.assertDictEqual(self.cube.attributes, cube.attributes) def test_accepts_string(self): """Test that a single string passed as an argument works.""" attributes_to_delete = 'title' attributes = copy(self.cube.attributes) attributes.pop(attributes_to_delete) delete_attributes(self.cube, attributes_to_delete) self.assertDictEqual(attributes, self.cube.attributes) def test_accepts_list_of_complete_matches(self): """Test that a list of complete attribute names removes the expected attributes.""" attributes_to_delete = ['title', 'tithe', 'mosg_model'] attributes = copy(self.cube.attributes) for item in attributes_to_delete: attributes.pop(item) delete_attributes(self.cube, attributes_to_delete) self.assertDictEqual(attributes, self.cube.attributes) def test_accepts_list_of_partial_matches(self): """Test that a list of partial patterns removes the expected attributes.""" attributes_to_delete = ['tit', 'mosg_grid'] expected = {'mosg_model': 'gl_det'} delete_attributes(self.cube, attributes_to_delete) self.assertDictEqual(expected, self.cube.attributes)
def process( self, cube: Cube, coord_name: str, inverse_ordering: bool = False, ) -> Cube: """ Calculate nonlinear weights for a given cube and coord. Args: cube: Cube to be blended across the coord. coord_name: Name of coordinate in the cube to be blended. inverse_ordering: The input cube blend coordinate will be in ascending order, so that calculated blend weights decrease with increasing value. For eg cycle blending by forecast reference time, we wish to weight more recent cubes more highly. This flag gives the option to reverse the blend coordinate order so as to have higher weights for the higher values. Returns: 1D cube of normalised (sum = 1.0) weights matching input dimension to be blended Raises: TypeError : input is not a cube """ if not isinstance(cube, iris.cube.Cube): msg = ("The first argument must be an instance of " "iris.cube.Cube but is" " {0:s}".format(str(type(cube)))) raise TypeError(msg) if inverse_ordering: # make a copy of the input cube from which to calculate weights inverted_cube = cube.copy() inverted_cube = sort_coord_in_cube(inverted_cube, coord_name, descending=True) cube = inverted_cube weights = self.nonlinear_weights(len(cube.coord(coord_name).points)) weights_cube = WeightsUtilities.build_weights_cube( cube, weights, coord_name) if inverse_ordering: # re-sort the weights cube so that it is in ascending order of # blend coordinate (and hence matches the input cube) weights_cube = sort_coord_in_cube(weights_cube, coord_name) return weights_cube
def create_cube_with_percentiles( percentiles: Union[List[float], ndarray], template_cube: Cube, cube_data: ndarray, cube_unit: Optional[Union[Unit, str]] = None, ) -> Cube: """ Create a cube with a percentile coordinate based on a template cube. The resulting cube will have an extra percentile coordinate compared with the template cube. The shape of the cube_data should be the shape of the desired output cube. Args: percentiles: Ensemble percentiles. There should be the same number of percentiles as the first dimension of cube_data. template_cube: Cube to copy metadata from. cube_data: Data to insert into the template cube. The shape of the cube_data, excluding the dimension associated with the percentile coordinate, should be the same as the shape of template_cube. For example, template_cube shape is (3, 3, 3), whilst the cube_data is (10, 3, 3, 3), where there are 10 percentiles. cube_unit: The units of the data within the cube, if different from those of the template_cube. Returns: Cube containing a percentile coordinate as the leading dimension (or scalar percentile coordinate if single-valued) """ # create cube with new percentile dimension cubes = iris.cube.CubeList([]) for point in percentiles: cube = template_cube.copy() cube.add_aux_coord( iris.coords.AuxCoord( np.float32(point), long_name="percentile", units=unit.Unit("%") ) ) cubes.append(cube) result = cubes.merge_cube() # replace data and units result.data = cube_data if cube_unit is not None: result.units = cube_unit return result
def maximum_within_vicinity(self, cube: Cube) -> Cube: """ Find grid points where a phenomenon occurs within a defined distance. The occurrences within this vicinity are maximised, such that all grid points within the vicinity are recorded as having an occurrence. For non-binary fields, if the vicinity of two occurrences overlap, the maximum value within the vicinity is chosen. If a land-mask cube has been supplied, process land and sea points separately. Args: cube: Thresholded cube. Returns: Cube where the occurrences have been spatially spread, so that they're equally likely to have occurred anywhere within the vicinity defined using the specified distance. """ grid_spacing = distance_to_number_of_grid_cells(cube, self.distance) # Convert the number of grid points (i.e. grid_spacing) represented # by self.distance, e.g. where grid_spacing=1 is an increment to # a central point, into grid_cells which is the total number of points # within the defined vicinity along the y axis e.g grid_cells=3. grid_cells = (2 * grid_spacing) + 1 max_cube = cube.copy() unmasked_cube_data = cube.data.copy() if np.ma.is_masked(cube.data): unmasked_cube_data = cube.data.data.copy() unmasked_cube_data[cube.data.mask] = -np.inf if self.land_mask_cube: max_data = np.empty_like(cube.data) for match in (True, False): matched_data = unmasked_cube_data.copy() matched_data[self.land_mask != match] = -np.inf matched_max_data = maximum_filter(matched_data, size=grid_cells) max_data = np.where(self.land_mask == match, matched_max_data, max_data) else: # The following command finds the maximum value for each grid point # from within a square of length "size" max_data = maximum_filter(unmasked_cube_data, size=grid_cells) if np.ma.is_masked(cube.data): # Update only the unmasked values max_cube.data.data[~cube.data.mask] = max_data[~cube.data.mask] else: max_cube.data = max_data return max_cube
def setUp(self): # Basic test values. src_x_y_value = np.array([ [20.12, 11.73, 0.01], [120.23, -20.73, 1.12], [290.34, 33.88, 2.23], [-310.45, 57.8, 3.34]]) tgt_grid_x = np.array([-173.2, -100.3, -32.5, 1.4, 46.6, 150.7]) tgt_grid_y = np.array([-80.1, -30.2, 0.3, 47.4, 75.5]) # Make sample 1-D source cube. src = Cube(src_x_y_value[:, 2]) src.add_aux_coord(AuxCoord(src_x_y_value[:, 0], standard_name='longitude', units='degrees'), 0) src.add_aux_coord(AuxCoord(src_x_y_value[:, 1], standard_name='latitude', units='degrees'), 0) self.src_cube = src # Make sample grid cube. grid = Cube(np.zeros(tgt_grid_y.shape + tgt_grid_x.shape)) grid.add_dim_coord(DimCoord(tgt_grid_y, standard_name='latitude', units='degrees'), 0) grid.add_dim_coord(DimCoord(tgt_grid_x, standard_name='longitude', units='degrees'), 1) self.grid_cube = grid # Make expected-result, from the expected source-index at each point. expected_result_indices = np.array([ [1, 1, 1, 1, 1, 1], [1, 2, 0, 0, 0, 1], [1, 2, 2, 0, 0, 1], [3, 2, 2, 3, 3, 3], [3, 2, 3, 3, 3, 3]]) self.expected_data = self.src_cube.data[expected_result_indices] # Make a 3D source cube, based on the existing 2d test data. z_cubes = [src.copy() for _ in range(3)] for i_z, z_cube in enumerate(z_cubes): z_cube.add_aux_coord(DimCoord([i_z], long_name='z')) z_cube.data = z_cube.data + 100.0 * i_z self.src_z_cube = CubeList(z_cubes).merge_cube() # Make a corresponding 3d expected result. self.expected_data_zxy = \ self.src_z_cube.data[:, expected_result_indices]
def make_shower_condition_cube(cube: Cube, in_place: bool = False) -> Cube: """ Modify the input cube's metadata and coordinates to produce a shower condition proxy. The input cube is expected to possess a single valued threshold coordinate. Args: cube: A thresholded diagnostic to be used as a proxy for showery conditions. The threshold coordinate should contain only one value, which denotes the key threshold that above which conditions are showery, and below which precipitation is more likely dynamic. in_place: If set true the cube is modified in place. By default a modified copy is returned. Returns: A shower condition probability cube that is an appropriately renamed version of the input with an updated threshold coordinate representing the probability of shower conditions occurring. Raises: CoordinateNotFoundError: Input has no threshold coordinate. ValueError: Input cube's threshold coordinate is multi-valued. """ if not in_place: cube = cube.copy() shower_condition_name = "shower_condition" cube.rename(f"probability_of_{shower_condition_name}_above_threshold") try: shower_threshold = find_threshold_coordinate(cube) except CoordinateNotFoundError as err: msg = "Input has no threshold coordinate and cannot be used" raise CoordinateNotFoundError(msg) from err try: (_, ) = shower_threshold.points except ValueError as err: msg = ("Expected a single valued threshold coordinate, but threshold " f"contains multiple points : {shower_threshold.points}") raise ValueError(msg) from err cube.coord(shower_threshold).rename(shower_condition_name) cube.coord(shower_condition_name).var_name = "threshold" cube.coord(shower_condition_name).points = FLOAT_DTYPE(1.0) cube.coord(shower_condition_name).units = 1 return cube
def test_multidim_cubes(): """ Test for :func:`esmf_regrid.experimental.unstructured_scheme.MeshToGridESMFRegridder`. Tests with multidimensional cubes. The source cube contains coordinates on the dimensions before and after the mesh dimension. """ mesh = _full_mesh() mesh_length = mesh.connectivity(contains_face=True).shape[0] h = 2 t = 3 height = DimCoord(np.arange(h), standard_name="height") time = DimCoord(np.arange(t), standard_name="time") src_data = np.empty([t, mesh_length, h]) src_data[:] = np.arange(t * h).reshape([t, h])[:, np.newaxis, :] mesh_cube = Cube(src_data) mesh_coord_x, mesh_coord_y = mesh.to_MeshCoords("face") mesh_cube.add_aux_coord(mesh_coord_x, 1) mesh_cube.add_aux_coord(mesh_coord_y, 1) mesh_cube.add_dim_coord(time, 0) mesh_cube.add_dim_coord(height, 2) n_lons = 6 n_lats = 5 lon_bounds = (-180, 180) lat_bounds = (-90, 90) tgt = _grid_cube(n_lons, n_lats, lon_bounds, lat_bounds, circular=True) src_cube = mesh_cube.copy() src_cube.transpose([1, 0, 2]) regridder = MeshToGridESMFRegridder(src_cube, tgt) result = regridder(mesh_cube) # Lenient check for data. expected_data = np.empty([t, n_lats, n_lons, h]) expected_data[:] = np.arange(t * h).reshape(t, h)[:, np.newaxis, np.newaxis, :] assert np.allclose(expected_data, result.data) expected_cube = Cube(expected_data) expected_cube.add_dim_coord(time, 0) expected_cube.add_dim_coord(tgt.coord("latitude"), 1) expected_cube.add_dim_coord(tgt.coord("longitude"), 2) expected_cube.add_dim_coord(height, 3) # Check metadata and scalar coords. result.data = expected_data assert expected_cube == result
def setUp(self): empty = Cube([]) self.cube_no_attrs = empty.copy() self.cube_a1 = empty.copy() self.cube_a1.attributes.update({"a": 1}) self.cube_a2 = empty.copy() self.cube_a2.attributes.update({"a": 2}) self.cube_a1b5 = empty.copy() self.cube_a1b5.attributes.update({"a": 1, "b": 5}) self.cube_a1b6 = empty.copy() self.cube_a1b6.attributes.update({"a": 1, "b": 6}) self.cube_a2b6 = empty.copy() self.cube_a2b6.attributes.update({"a": 2, "b": 6}) self.cube_b5 = empty.copy() self.cube_b5.attributes.update({"b": 5}) # Array attribute values v1 = np.array([11, 12, 13]) v2 = np.array([11, 9999, 13]) self.v1 = v1 self.v2 = v2 self.cube_a1b5v1 = empty.copy() self.cube_a1b5v1.attributes.update({"a": 1, "b": 5, "v": v1}) self.cube_a1b6v1 = empty.copy() self.cube_a1b6v1.attributes.update({"a": 1, "b": 6, "v": v1}) self.cube_a1b6v2 = empty.copy() self.cube_a1b6v2.attributes.update({"a": 1, "b": 6, "v": v2})
def setUp(self): empty = Cube([]) self.cube_no_attrs = empty.copy() self.cube_a1 = empty.copy() self.cube_a1.attributes.update({"a": 1}) self.cube_a2 = empty.copy() self.cube_a2.attributes.update({"a": 2}) self.cube_a1b5 = empty.copy() self.cube_a1b5.attributes.update({"a": 1, "b": 5}) self.cube_a1b6 = empty.copy() self.cube_a1b6.attributes.update({"a": 1, "b": 6}) self.cube_a2b6 = empty.copy() self.cube_a2b6.attributes.update({"a": 2, "b": 6}) self.cube_b5 = empty.copy() self.cube_b5.attributes.update({"b": 5}) # Array attribute values v1 = np.array([11, 12, 13]) v2 = np.array([11, 9999, 13]) self.v1 = v1 self.cube_a1b5v1 = empty.copy() self.cube_a1b5v1.attributes.update({"a": 1, "b": 5, "v": v1}) self.cube_a1b6v1 = empty.copy() self.cube_a1b6v1.attributes.update({"a": 1, "b": 6, "v": v1}) self.cube_a1b6v2 = empty.copy() self.cube_a1b6v2.attributes.update({"a": 1, "b": 6, "v": v2})
def setUp(self): empty = Cube([]) self.cube_no_attrs = empty.copy() self.cube_a1 = empty.copy() self.cube_a1.attributes.update({'a': 1}) self.cube_a2 = empty.copy() self.cube_a2.attributes.update({'a': 2}) self.cube_a1b5 = empty.copy() self.cube_a1b5.attributes.update({'a': 1, 'b': 5}) self.cube_a1b6 = empty.copy() self.cube_a1b6.attributes.update({'a': 1, 'b': 6}) self.cube_a2b6 = empty.copy() self.cube_a2b6.attributes.update({'a': 2, 'b': 6}) self.cube_b5 = empty.copy() self.cube_b5.attributes.update({'b': 5}) # Array attribute values v1 = np.array([11, 12, 13]) v2 = np.array([11, 9999, 13]) self.v1 = v1 self.cube_a1b5v1 = empty.copy() self.cube_a1b5v1.attributes.update({'a': 1, 'b': 5, 'v': v1}) self.cube_a1b6v1 = empty.copy() self.cube_a1b6v1.attributes.update({'a': 1, 'b': 6, 'v': v1}) self.cube_a1b6v2 = empty.copy() self.cube_a1b6v2.attributes.update({'a': 1, 'b': 6, 'v': v2})
def _nonlatlon_uv_cubes(x, y, u, v): # Create u and v test cubes from x, y, u, v arrays. coord_cls = DimCoord if x.ndim == 1 else AuxCoord x_coord = coord_cls(x, long_name='x') y_coord = coord_cls(y, long_name='y') u_cube = Cube(u, long_name='u', units='ms-1') if x.ndim == 1: u_cube.add_dim_coord(y_coord, 0) u_cube.add_dim_coord(x_coord, 1) else: u_cube.add_aux_coord(y_coord, (0, 1)) u_cube.add_aux_coord(x_coord, (0, 1)) v_cube = u_cube.copy() v_cube.rename('v') v_cube.data = v return u_cube, v_cube
def _nonlatlon_uv_cubes(x, y, u, v): # Create u and v test cubes from x, y, u, v arrays. coord_cls = DimCoord if x.ndim == 1 else AuxCoord x_coord = coord_cls(x, long_name="x") y_coord = coord_cls(y, long_name="y") u_cube = Cube(u, long_name="u", units="ms-1") if x.ndim == 1: u_cube.add_dim_coord(y_coord, 0) u_cube.add_dim_coord(x_coord, 1) else: u_cube.add_aux_coord(y_coord, (0, 1)) u_cube.add_aux_coord(x_coord, (0, 1)) v_cube = u_cube.copy() v_cube.rename("v") v_cube.data = v return u_cube, v_cube
def transform_grid_to_lat_lon(cube: Cube) -> Tuple[ndarray, ndarray]: """ Calculate the latitudes and longitudes of each points in the cube. Args: cube: Cube with points to transform Returns lats: Array of cube.data.shape of Latitude values lons: Array of cube.data.shape of Longitude values """ trg_latlon = ccrs.PlateCarree() trg_crs = cube.coord_system().as_cartopy_crs() cube = cube.copy() # TODO use the proj units that are accesible with later versions of proj # to determine the default units to convert to for a given projection. # Assuming proj units of metre for all projections not in degrees. for axis in ["x", "y"]: try: cube.coord(axis=axis).convert_units("m") except ValueError as err: msg = ( "Cube passed to transform_grid_to_lat_lon does not have an " f"{axis} coordinate with units that can be converted to metres. " ) raise ValueError(msg + str(err)) x_points = cube.coord(axis="x").points y_points = cube.coord(axis="y").points x_zeros = np.zeros_like(x_points) y_zeros = np.zeros_like(y_points) # Broadcast x points and y points onto grid all_x_points = y_zeros.reshape(len(y_zeros), 1) + x_points all_y_points = y_points.reshape(len(y_points), 1) + x_zeros # Transform points points = trg_latlon.transform_points(trg_crs, all_x_points, all_y_points) lons = points[..., 0] lats = points[..., 1] return lats, lons
def _apply_orographic_enhancement(self, precip_cube: Cube, oe_cube: Cube) -> Cube: """Combine the precipitation rate cube and the orographic enhancement cube. Args: precip_cube: Cube containing the input precipitation field. oe_cube: Cube containing the orographic enhancement field matching the validity time of the precipitation cube. Returns: Cube containing the precipitation rate field modified by the orographic enhancement cube. """ # Convert orographic enhancement into the units of the precipitation # rate cube. oe_cube.convert_units(precip_cube.units) # Set orographic enhancement to be zero for points with a # precipitation rate of < 1/32 mm/hr. original_units = Unit("mm/hr") threshold_in_cube_units = original_units.convert( self.min_precip_rate_mmh, precip_cube.units) # Ignore invalid warnings generated if e.g. a NaN is encountered # within the less than (<) comparison. with np.errstate(invalid="ignore"): oe_cube.data[precip_cube.data < threshold_in_cube_units] = 0.0 # Add / subtract orographic enhancement where data is not masked cube = precip_cube.copy() if self.operation == "add": cube.data = cube.data + oe_cube.data elif self.operation == "subtract": cube.data = cube.data - oe_cube.data else: msg = ("Operation '{}' not supported for combining " "precipitation rate and " "orographic enhancement.".format(self.operation)) raise ValueError(msg) return cube
class Test_is_compatible(tests.IrisTest): def setUp(self): self.test_cube = Cube([1.]) self.other_cube = self.test_cube.copy() def test_noncommon_array_attrs_compatible(self): # Non-common array attributes should be ok. self.test_cube.attributes['array_test'] = np.array([1.0, 2, 3]) self.assertTrue(self.test_cube.is_compatible(self.other_cube)) def test_matching_array_attrs_compatible(self): # Matching array attributes should be ok. self.test_cube.attributes['array_test'] = np.array([1.0, 2, 3]) self.other_cube.attributes['array_test'] = np.array([1.0, 2, 3]) self.assertTrue(self.test_cube.is_compatible(self.other_cube)) def test_different_array_attrs_incompatible(self): # Differing array attributes should make the cubes incompatible. self.test_cube.attributes['array_test'] = np.array([1.0, 2, 3]) self.other_cube.attributes['array_test'] = np.array([1.0, 2, 777.7]) self.assertFalse(self.test_cube.is_compatible(self.other_cube))
def test__lazy(self): cube = Cube(biggus.NumpyArrayAdapter(np.array([1, 0]))) self._check_copy(cube, cube.copy())
def test__masked_emptymask(self): cube = Cube(np.ma.array([0, 1])) self._check_copy(cube, cube.copy())
def test__masked_arraymask(self): cube = Cube(np.ma.array([0, 1], mask=[True, False])) self._check_copy(cube, cube.copy())
def test__scalar(self): cube = Cube(0) self._check_copy(cube, cube.copy())
def test__masked_scalar_emptymask(self): cube = Cube(np.ma.array(0)) self._check_copy(cube, cube.copy())
def test__masked_scalar_arraymask(self): cube = Cube(np.ma.array(0, mask=False)) self._check_copy(cube, cube.copy())