示例#1
0
    def resolve_wind_components(
        speed: Cube, angle: Cube, adj: ndarray
    ) -> Tuple[Cube, Cube]:
        """
        Perform trigonometric reprojection onto x and y axes

        Args:
            speed:
                Cube containing wind speed data
            angle:
                Cube containing wind directions as angles from true North
            adj:
                2D array of wind direction angle adjustments in radians, to
                convert zero reference from true North to grid North.
                Broadcast automatically if speed and angle cubes have extra
                dimensions.

        Returns:
            - Cube containing wind vector component in the positive
              x-direction u_speed
            - Cube containing wind vector component in the positive
              y-direction v_speed
        """
        angle.convert_units("radians")
        angle.data += adj

        # output vectors should be pointing "to" not "from"
        if "wind_from_direction" in angle.name():
            angle.data += np.pi
        sin_angle = np.sin(angle.data)
        cos_angle = np.cos(angle.data)
        uspeed = np.multiply(speed.data, sin_angle)
        vspeed = np.multiply(speed.data, cos_angle)
        return [speed.copy(data=uspeed), speed.copy(data=vspeed)]
示例#2
0
    def process(
        self,
        cube: Cube,
        new_name: Optional[str] = None,
        new_units: Optional[str] = None,
        coords_to_remove: Optional[List[str]] = None,
        attributes_dict: Optional[Dict[str, Any]] = None,
    ) -> Cube:
        """
        Perform compulsory and user-configurable metadata adjustments.  The
        compulsory adjustments are:

        - to collapse any scalar dimensions apart from realization (which is expected
          always to be a dimension);
        - to cast the cube data and coordinates into suitable datatypes;
        - to convert time-related metadata into the required units
        - to remove cell method ("point": "time").

        Args:
            cube:
                Input cube to be standardised
            new_name:
                Optional rename for output cube
            new_units:
                Optional unit conversion for output cube
            coords_to_remove:
                Optional list of scalar coordinates to remove from output cube
            attributes_dict:
                Optional dictionary of required attribute updates. Keys are
                attribute names, and values are the required value or "remove".

        Returns:
            The processed cube
        """
        cube = self._collapse_scalar_dimensions(cube)

        if new_name:
            cube.rename(new_name)
        if new_units:
            cube.convert_units(new_units)
        if coords_to_remove:
            self._remove_scalar_coords(cube, coords_to_remove)
        if attributes_dict:
            amend_attributes(cube, attributes_dict)
        self._discard_redundant_cell_methods(cube)

        # this must be done after unit conversion as if the input is an integer
        # field, unit conversion outputs the new data as float64
        self._standardise_dtypes_and_units(cube)

        return cube
示例#3
0
    def process(self, temperature: Cube, lapse_rate: Cube, source_orog: Cube,
                dest_orog: Cube) -> Cube:
        """Applies lapse rate correction to temperature forecast.  All cubes'
        units are modified in place.

        Args:
            temperature:
                Input temperature field to be adjusted
            lapse_rate:
                Cube of pre-calculated lapse rates
            source_orog:
                2D cube of source orography heights
            dest_orog:
                2D cube of destination orography heights

        Returns:
            Lapse-rate adjusted temperature field, in Kelvin
        """
        lapse_rate.convert_units("K m-1")
        self.xy_coords = [
            lapse_rate.coord(axis="y"),
            lapse_rate.coord(axis="x")
        ]

        self._check_dim_coords(temperature, lapse_rate)

        if not spatial_coords_match([temperature, source_orog]):
            raise ValueError(
                "Source orography spatial coordinates do not match "
                "temperature grid")

        if not spatial_coords_match([temperature, dest_orog]):
            raise ValueError(
                "Destination orography spatial coordinates do not match "
                "temperature grid")

        orog_diff = self._calc_orog_diff(source_orog, dest_orog)

        adjusted_temperature = []
        for lr_slice, t_slice in zip(lapse_rate.slices(self.xy_coords),
                                     temperature.slices(self.xy_coords)):
            newcube = t_slice.copy()
            newcube.convert_units("K")
            newcube.data += np.multiply(orog_diff.data, lr_slice.data)
            adjusted_temperature.append(newcube)

        return iris.cube.CubeList(adjusted_temperature).merge_cube()
示例#4
0
    def _calc_orog_diff(self, source_orog: Cube, dest_orog: Cube) -> Cube:
        """Get difference in orography heights, in metres

        Args:
            source_orog:
                2D cube of source orography heights (units modified in place)
            dest_orog:
                2D cube of destination orography heights (units modified in
                place)

        Returns:
            The difference cube
        """
        source_orog.convert_units("m")
        dest_orog.convert_units("m")
        orog_diff = next(dest_orog.slices(self.xy_coords)) - next(
            source_orog.slices(self.xy_coords))
        return orog_diff
示例#5
0
 def _check_inputs(cube: Cube, reference_cube: Cube,
                   limit: Optional[Cube]) -> None:
     """
     Check that the input cubes are compatible and the data is complete or
     masked as expected.
     """
     if np.isnan(reference_cube.data).any():
         raise ValueError(
             "The reference cube contains np.nan data indicating that it "
             "is not complete across the domain.")
     try:
         reference_cube.convert_units(cube.units)
         if limit is not None:
             limit.convert_units(cube.units)
     except ValueError as err:
         raise type(err)(
             "Reference cube and/or limit do not have units compatible with"
             " cube. " + str(err))
示例#6
0
    def _apply_orographic_enhancement(self, precip_cube: Cube,
                                      oe_cube: Cube) -> Cube:
        """Combine the precipitation rate cube and the orographic enhancement
        cube.

        Args:
            precip_cube:
                Cube containing the input precipitation field.
            oe_cube:
                Cube containing the orographic enhancement field matching
                the validity time of the precipitation cube.

        Returns:
            Cube containing the precipitation rate field modified by the
            orographic enhancement cube.
        """
        # Convert orographic enhancement into the units of the precipitation
        # rate cube.
        oe_cube.convert_units(precip_cube.units)

        # Set orographic enhancement to be zero for points with a
        # precipitation rate of < 1/32 mm/hr.
        original_units = Unit("mm/hr")
        threshold_in_cube_units = original_units.convert(
            self.min_precip_rate_mmh, precip_cube.units)

        # Ignore invalid warnings generated if e.g. a NaN is encountered
        # within the less than (<) comparison.
        with np.errstate(invalid="ignore"):
            oe_cube.data[precip_cube.data < threshold_in_cube_units] = 0.0

        # Add / subtract orographic enhancement where data is not masked
        cube = precip_cube.copy()
        if self.operation == "add":
            cube.data = cube.data + oe_cube.data
        elif self.operation == "subtract":
            cube.data = cube.data - oe_cube.data
        else:
            msg = ("Operation '{}' not supported for combining "
                   "precipitation rate and "
                   "orographic enhancement.".format(self.operation))
            raise ValueError(msg)

        return cube
示例#7
0
    def __init__(self,
                 vel_x: Cube,
                 vel_y: Cube,
                 attributes_dict: Optional[Dict] = None) -> None:
        """
        Initialises the plugin.  Velocities are expected to be on a regular
        grid (such that grid spacing in metres is the same at all points in
        the domain).

        Args:
            vel_x:
                Cube containing a 2D array of velocities along the x
                coordinate axis
            vel_y:
                Cube containing a 2D array of velocities along the y
                coordinate axis
            attributes_dict:
                Dictionary containing information for amending the attributes
                of the output cube.
        """

        # check each input velocity cube has precisely two non-scalar
        # dimension coordinates (spatial x/y)
        check_input_coords(vel_x)
        check_input_coords(vel_y)

        # check input velocity cubes have the same spatial coordinates
        if vel_x.coord(axis="x") != vel_y.coord(axis="x") or vel_x.coord(
                axis="y") != vel_y.coord(axis="y"):
            raise InvalidCubeError("Velocity cubes on unmatched grids")

        vel_x.convert_units("m s-1")
        vel_y.convert_units("m s-1")

        self.vel_x = vel_x
        self.vel_y = vel_y

        self.x_coord = vel_x.coord(axis="x")
        self.y_coord = vel_x.coord(axis="y")

        # Initialise metadata dictionary.
        if attributes_dict is None:
            attributes_dict = {}
        self.attributes_dict = attributes_dict
    def create_wet_bulb_temperature_cube(self, temperature: Cube,
                                         relative_humidity: Cube,
                                         pressure: Cube) -> Cube:
        """
        Creates a cube of wet bulb temperature values

        Args:
            temperature:
                Cube of air temperatures.
            relative_humidity:
                Cube of relative humidities.
            pressure:
                Cube of air pressures.

        Returns:
            Cube of wet bulb temperature (K).
        """
        temperature.convert_units("K")
        relative_humidity.convert_units(1)
        pressure.convert_units("Pa")
        wbt_data = self._calculate_wet_bulb_temperature(
            pressure.data, relative_humidity.data, temperature.data)

        attributes = generate_mandatory_attributes(
            [temperature, relative_humidity, pressure])
        wbt = create_new_diagnostic_cube("wet_bulb_temperature",
                                         "K",
                                         temperature,
                                         attributes,
                                         data=wbt_data)
        return wbt
示例#9
0
    def process(
        self,
        temperature: Cube,
        orography: Cube,
        land_sea_mask: Cube,
        model_id_attr: Optional[str] = None,
    ) -> Cube:
        """Calculates the lapse rate from the temperature and orography cubes.

        Args:
            temperature:
                Cube of air temperatures (K).
            orography:
                Cube containing orography data (metres)
            land_sea_mask:
                Cube containing a binary land-sea mask. True for land-points
                and False for Sea.
            model_id_attr:
                Name of the attribute used to identify the source model for
                blending. This is inherited from the input temperature cube.

        Returns:
            Cube containing lapse rate (K m-1)

        Raises
        ------
        TypeError: If input cubes are not cubes
        ValueError: If input cubes are the wrong units.

        """
        if not isinstance(temperature, iris.cube.Cube):
            msg = "Temperature input is not a cube, but {}"
            raise TypeError(msg.format(type(temperature)))

        if not isinstance(orography, iris.cube.Cube):
            msg = "Orography input is not a cube, but {}"
            raise TypeError(msg.format(type(orography)))

        if not isinstance(land_sea_mask, iris.cube.Cube):
            msg = "Land/Sea mask input is not a cube, but {}"
            raise TypeError(msg.format(type(land_sea_mask)))

        # Converts cube units.
        temperature_cube = temperature.copy()
        temperature_cube.convert_units("K")
        orography.convert_units("metres")

        # Extract x/y co-ordinates.
        x_coord = temperature_cube.coord(axis="x").name()
        y_coord = temperature_cube.coord(axis="y").name()

        # Extract orography and land/sea mask data.
        orography_data = next(orography.slices([y_coord, x_coord])).data
        land_sea_mask_data = next(land_sea_mask.slices([y_coord,
                                                        x_coord])).data
        # Fill sea points with NaN values.
        orography_data = np.where(land_sea_mask_data, orography_data, np.nan)

        # Create list of arrays over "realization" coordinate
        has_realization_dimension = False
        original_dimension_order = None
        if temperature_cube.coords("realization", dim_coords=True):
            original_dimension_order = get_dim_coord_names(temperature_cube)
            enforce_coordinate_ordering(temperature_cube, "realization")
            temp_data_slices = temperature_cube.data
            has_realization_dimension = True
        else:
            temp_data_slices = [temperature_cube.data]

        # Calculate lapse rate for each realization
        lapse_rate_data = []
        for temperature_data in temp_data_slices:
            lapse_rate_array = self._generate_lapse_rate_array(
                temperature_data, orography_data, land_sea_mask_data)
            lapse_rate_data.append(lapse_rate_array)
        lapse_rate_data = np.array(lapse_rate_data)
        if not has_realization_dimension:
            lapse_rate_data = np.squeeze(lapse_rate_data)

        attributes = generate_mandatory_attributes([temperature],
                                                   model_id_attr=model_id_attr)
        lapse_rate_cube = create_new_diagnostic_cube(
            "air_temperature_lapse_rate",
            "K m-1",
            temperature_cube,
            attributes,
            data=lapse_rate_data,
        )

        if original_dimension_order:
            enforce_coordinate_ordering(lapse_rate_cube,
                                        original_dimension_order)

        return lapse_rate_cube
示例#10
0
def calculate_uv_index(
    uv_downward: Cube,
    scale_factor: float = 3.6,
    model_id_attr: Optional[str] = None,
) -> Cube:
    """
    A plugin to calculate the uv index using radiation flux in UV downward
    at the surface and a scaling factor.
    The scaling factor is configurable by the user.

    Args:
        uv_downward:
            A cube of the radiation flux in UV downward at surface.
            This is a UM diagnostic produced by the UM radiation scheme
            see above or the paper referenced for more details.(W m-2)
        scale_factor:
            The uv scale factor. Default is 3.6 (m2 W-1). This factor has
            been empirically derived and should not be
            changed except if there are scientific reasons to
            do so. For more information see section 2.1.1 of the paper
            referenced below.
        model_id_attr:
            Name of the attribute used to identify the source model for
            blending.

    Returns:
        A cube of the calculated UV index.

    Raises:
        ValueError: If uv_downward is not named correctly.
        ValueError: If uv_downward contains values that are negative or
        not a number.

    References:
        Turner, E.C, Manners, J. Morcrette, C. J, O'Hagan, J. B,
        & Smedley, A.R.D. (2017): Toward a New UV Index Diagnostic
        in the Met Office's Forecast Model. Journal of Advances in
        Modeling Earth Systems 9, 2654-2671.

    """

    if uv_downward.name() != "surface_downwelling_ultraviolet_flux_in_air":
        msg = ("The radiation flux in UV downward has the wrong name, "
               "it should be "
               "surface_downwelling_ultraviolet_flux_in_air "
               "but is {}".format(uv_downward.name()))
        raise ValueError(msg)

    if np.any(uv_downward.data < 0) or np.isnan(uv_downward.data).any():
        msg = ("The radiation flux in UV downward contains data "
               "that is negative or NaN. Data should be >= 0.")
        raise ValueError(msg)

    uv_downward.convert_units("W m-2")
    uv_data = uv_downward.data * scale_factor
    attributes = generate_mandatory_attributes([uv_downward],
                                               model_id_attr=model_id_attr)
    uv_index = create_new_diagnostic_cube("ultraviolet_index",
                                          "1",
                                          uv_downward,
                                          attributes,
                                          data=uv_data)

    return uv_index
示例#11
0
    def process(self, cube_ens_wdir: Cube) -> Tuple[Cube, ndarray, ndarray]:
        """Create a cube containing the wind direction averaged over the
        ensemble realizations.

        Args:
            cube_ens_wdir:
                Cube containing wind direction from multiple ensemble
                realizations.

        Returns:
            - Cube containing the wind direction averaged from the
              ensemble realizations.
            - 3D array - Radius taken from average complex wind direction
              angle.
            - 3D array - The average distance from mean normalised - used
              as a confidence value.

        Raises:
            TypeError: If cube_wdir is not a cube.
        """

        if not isinstance(cube_ens_wdir, iris.cube.Cube):
            msg = "Wind direction input is not a cube, but {}"
            raise TypeError(msg.format(type(cube_ens_wdir)))

        try:
            cube_ens_wdir.convert_units("degrees")
        except ValueError as err:
            msg = "Input cube cannot be converted to degrees: {}".format(err)
            raise ValueError(msg)

        self.n_realizations = len(cube_ens_wdir.coord("realization").points)
        y_coord_name = cube_ens_wdir.coord(axis="y").name()
        x_coord_name = cube_ens_wdir.coord(axis="x").name()
        for wdir_slice in cube_ens_wdir.slices(
            ["realization", y_coord_name, x_coord_name]):
            self._reset()
            # Extract wind direction data.
            self.wdir_complex = self.deg_to_complex(wdir_slice.data)
            (self.realization_axis, ) = wdir_slice.coord_dims("realization")

            # Copies input cube and remove realization dimension to create
            # cubes for storing results.
            self.wdir_slice_mean = next(wdir_slice.slices_over("realization"))
            self.wdir_slice_mean.remove_coord("realization")

            # Derive average wind direction.
            self.calc_wind_dir_mean()

            # Find radius values for wind direction average.
            self.find_r_values()

            # Calculate the confidence measure based on the difference
            # between the complex average and the individual ensemble
            # realizations.
            self.calc_confidence_measure()

            # Finds any meaningless averages and substitute with
            # the wind direction taken from the first ensemble realization.
            # Mask True if r values below threshold.
            where_low_r = np.where(self.r_vals_slice.data < self.r_thresh,
                                   True, False)
            # If the any point in the array contains poor r-values,
            # trigger decider function.
            if where_low_r.any():
                self.wind_dir_decider(where_low_r, wdir_slice)

            # Append to cubelists.
            self.wdir_cube_list.append(self.wdir_slice_mean)
            self.r_vals_cube_list.append(self.r_vals_slice)
            self.confidence_measure_cube_list.append(self.confidence_slice)

        # Combine cubelists into cube.
        cube_mean_wdir = self.wdir_cube_list.merge_cube()
        cube_r_vals = self.r_vals_cube_list.merge_cube()
        cube_confidence_measure = self.confidence_measure_cube_list.merge_cube(
        )

        # Check that the dimensionality of coordinates of the output cube
        # matches the input cube.
        first_slice = next(cube_ens_wdir.slices_over(["realization"]))
        cube_mean_wdir = check_cube_coordinates(first_slice, cube_mean_wdir)

        # Change cube identifiers.
        cube_mean_wdir.add_cell_method(CellMethod("mean",
                                                  coords="realization"))
        cube_r_vals.long_name = "radius_of_complex_average_wind_from_direction"
        cube_r_vals.units = None
        cube_confidence_measure.long_name = "confidence_measure_of_wind_from_direction"
        cube_confidence_measure.units = None

        return cube_mean_wdir, cube_r_vals, cube_confidence_measure
示例#12
0
class VerticalUpdraught(BasePlugin):
    """
    Methods to calculate the maximum vertical updraught from CAPE and precipitation rate as
    defined in Hand (2002) and Golding (1998) with the precipitation rate modifier found in
    the UKPP CDP code.

    Hand, W. 2002. "The Met Office Convection Diagnosis Scheme." Meteorological Applications
        9(1): 69-83. doi:10.1017/S1350482702001081.
    Golding, B.W. 1998. "Nimrod: A system for generating automated very short range forecasts."
        Meteorol. Appl. 5: 1-16. doi:https://doi.org/10.1017/S1350482798000577.
    """
    def __init__(self, model_id_attr: str = None):
        """
        Set up class

        Args:
            model_id_attr:
                Name of model ID attribute to be copied from source cubes to output cube
        """
        self.model_id_attr = model_id_attr
        self.cape = Cube(None)
        self.precip = Cube(None)
        self.cube_names = [
            "atmosphere_convective_available_potential_energy",
            "lwe_precipitation_rate_max",
        ]
        self._minimum_cape = 10.0  # J kg-1. Minimum value to diagnose updraught from
        self._minimum_precip = 5.0  # mm h-1. Minimum value to diagnose updraught from

    def _parse_inputs(self, inputs: List[Cube]) -> None:
        """
        Separates input CubeList into CAPE and precipitation rate objects with standard units
        and raises Exceptions if it can't, or finds excess data.

        Args:
            inputs:
                List of Cubes containing exactly one of CAPE and Precipitation rate.
        Raises:
            ValueError:
                If additional cubes are found
        """
        cubes = CubeList(inputs)
        try:
            (self.cape, self.precip) = cubes.extract(self.cube_names)
        except ValueError as e:
            raise ValueError(
                f"Expected to find cubes of {self.cube_names}, not {[c.name() for c in cubes]}"
            ) from e
        if len(cubes) > 2:
            extras = [
                c.name() for c in cubes if c.name() not in self.cube_names
            ]
            raise ValueError(f"Unexpected Cube(s) found in inputs: {extras}")
        if not spatial_coords_match(inputs):
            raise ValueError(
                f"Spatial coords of input Cubes do not match: {cubes}")
        time_error_msg = self._input_times_error()
        if time_error_msg:
            raise ValueError(time_error_msg)
        self.cape.convert_units("J kg-1")
        self.precip.convert_units("mm h-1")
        if self.model_id_attr:
            if (self.cape.attributes[self.model_id_attr] !=
                    self.precip.attributes[self.model_id_attr]):
                raise ValueError(
                    f"Attribute {self.model_id_attr} does not match on input cubes. "
                    f"{self.cape.attributes[self.model_id_attr]} != "
                    f"{self.precip.attributes[self.model_id_attr]}")

    def _input_times_error(self) -> str:
        """
        Returns appropriate error message string if

        - CAPE cube time is unbounded
        - CAPE time point is lower bound of precip cube time point
        - CAPE and precip cubes have different forecast reference times
        """
        cape_time = self.cape.coord("time")
        if cape_time.has_bounds():
            return "CAPE cube must not have time bounds"
        if self.cape.coord("forecast_reference_time") != self.precip.coord(
                "forecast_reference_time"):
            return "Forecast reference times do not match"
        if not self.precip.coord("time").has_bounds():
            return "Precip cube must have time bounds"
        if cape_time.cell(0).point != self.precip.coord("time").cell(
                0).bound[0]:
            return "CAPE time must match precip cube's lower time bound"
        return ""

    def _updraught_from_cape(self) -> np.ndarray:
        """
        Calculate the updraught from CAPE data

        Calculation is 0.25 * sqrt(2 * cape)

        Returns zero where CAPE < 10 J kg-1
        """
        updraught = 0.25 * (2 * self.cape.data)**0.5
        updraught[self.cape.data < self._minimum_cape] = 0.0
        return updraught.astype(np.float32)

    def _updraught_increment_from_precip(self) -> np.ndarray:
        """
        Calculate the updraught increment from the precipitation rate.

        Calculation is 7.33 * (precip / 28.7)^0.22
        Where precipitation rate < 5 mm h-1, increment is zero.
        """
        increment = 7.33 * (self.precip.data / 28.7)**0.22
        increment[self.precip.data < self._minimum_precip] = 0.0
        return increment.astype(np.float32)

    def _make_updraught_cube(self, data: np.ndarray) -> Cube:
        """Puts the data array into a CF-compliant cube"""
        attributes = {}
        if self.model_id_attr:
            attributes[self.model_id_attr] = self.precip.attributes[
                self.model_id_attr]
        cube = create_new_diagnostic_cube(
            "maximum_vertical_updraught",
            "m s-1",
            self.precip,
            mandatory_attributes=generate_mandatory_attributes(
                [self.precip, self.cape]),
            optional_attributes=attributes,
            data=data,
        )
        return cube

    def process(self, inputs: List[Cube]) -> Cube:
        """Executes methods to calculate updraught from CAPE and precipitation rate
        and packages this as a Cube with appropriate metadata.

        Args:
            inputs:
                List of CAPE and precipitation rate cubes (any order)

        Returns:
            Cube:
                Containing maximum vertical updraught
        """
        self._parse_inputs(inputs)
        return self._make_updraught_cube(
            self._updraught_from_cape() +
            self._updraught_increment_from_precip())
示例#13
0
    def process(self,
                cube1: Cube,
                cube2: Cube,
                boxsize: int = 30) -> Tuple[Cube, Cube]:
        """
        Extracts data from input cubes, performs dimensionless advection
        displacement calculation, and creates new cubes with advection
        velocities in metres per second.  Each input cube should have precisely
        two non-scalar dimension coordinates (spatial x/y), and are expected to
        be in a projection such that grid spacing is the same (or very close)
        at all points within the spatial domain.  Each input cube must also
        have a scalar "time" coordinate.

        Args:
            cube1:
                2D cube that advection will be FROM / advection start point.
                This may be an earlier observation or an extrapolation forecast
                for the current time.
            cube2:
                2D cube that advection will be TO / advection end point.
                This will be the most recent observation.
            boxsize:
                The side length of the square box over which to solve the
                optical flow constraint.  This should be greater than the
                data smoothing radius.

        Returns:
            - 2D cube of advection velocities in the x-direction
            - 2D cube of advection velocities in the y-direction
        """
        # clear existing parameters
        self.data_smoothing_radius = None
        self.boxsize = None

        # check input cubes have appropriate and matching contents and dimensions
        self._check_input_cubes(cube1, cube2)

        # get time over which advection displacement has occurred
        time_diff_seconds = self._get_advection_time(cube1, cube2)

        # if time difference is greater 15 minutes, increase data smoothing
        # radius so that larger advection displacements can be resolved
        grid_length_km = calculate_grid_spacing(cube1, "km")
        data_smoothing_radius = self._get_smoothing_radius(
            time_diff_seconds, grid_length_km)

        # fail if self.boxsize is less than data smoothing radius
        self.boxsize = boxsize
        if self.boxsize < data_smoothing_radius:
            msg = ("Box size {} too small (should not be less than data "
                   "smoothing radius {})")
            raise ValueError(msg.format(self.boxsize, data_smoothing_radius))

        # convert units to mm/hr as these avoid the need to manipulate tiny
        # decimals
        cube1 = cube1.copy()
        cube2 = cube2.copy()

        try:
            cube1.convert_units("mm/hr")
            cube2.convert_units("mm/hr")
        except ValueError as err:
            msg = ("Input data are in units that cannot be converted to mm/hr "
                   "which are the required units for use with optical flow.")
            raise ValueError(msg) from err

        # extract 2-dimensional data arrays
        data1 = next(
            cube1.slices([cube1.coord(axis="y"),
                          cube1.coord(axis="x")])).data
        data2 = next(
            cube2.slices([cube2.coord(axis="y"),
                          cube2.coord(axis="x")])).data

        # fill any mask with 0 values so fill_values are not spread into the
        # domain when smoothing the fields.
        if np.ma.is_masked(data1):
            data1 = data1.filled(0)
        if np.ma.is_masked(data2):
            data2 = data2.filled(0)

        # if input arrays have no non-zero values, set velocities to zero here
        # and raise a warning
        if np.allclose(data1, np.zeros(data1.shape)) or np.allclose(
                data2, np.zeros(data2.shape)):
            msg = ("No non-zero data in input fields: setting optical flow "
                   "velocities to zero")
            warnings.warn(msg)
            ucomp = np.zeros(data1.shape, dtype=np.float32)
            vcomp = np.zeros(data2.shape, dtype=np.float32)
        else:
            # calculate dimensionless displacement between the two input fields
            ucomp, vcomp = self.process_dimensionless(data1, data2, 1, 0,
                                                      data_smoothing_radius)
            # convert displacements to velocities in metres per second
            for vel in [ucomp, vcomp]:
                vel *= np.float32(1000.0 * grid_length_km)
                vel /= time_diff_seconds

        # create velocity output cubes based on metadata from later input cube
        ucube = iris.cube.Cube(
            ucomp,
            long_name="precipitation_advection_x_velocity",
            units="m s-1",
            dim_coords_and_dims=[
                (cube2.coord(axis="y"), 0),
                (cube2.coord(axis="x"), 1),
            ],
            aux_coords_and_dims=[(cube2.coord("time"), None)],
        )
        vcube = ucube.copy(vcomp)
        vcube.rename("precipitation_advection_y_velocity")

        return ucube, vcube