Ejemplo n.º 1
0
def latitude_to_threshold(
    latitude: np.ndarray, midlatitude: float, tropics: float,
) -> np.ndarray:
    """
    Rescale a latitude range into a range of threshold values suitable for
    thresholding a different diagnostic. This is based on the value provided
    for that diagnostic at midlatitude (more than 50 degrees from the equator)
    and in the tropics (closer than 10 degrees from the equator). Varies
    linearly in between.

    Args:
        latitude:
            An array of latitude points (e.g. cube.coord("latitude").points)
        midlatitude:
            The threshold value to return above 50N or below 50S.
        tropics:
            The threshold value to return below 10N or above 10S.

    Returns:
        An array of thresholds, one for each latitude point
    """
    return np.where(
        latitude > 0,
        rescale(latitude, (50.0, 10), (midlatitude, tropics), clip=True),
        rescale(latitude, (-50.0, -10), (midlatitude, tropics), clip=True),
    )
Ejemplo n.º 2
0
    def apply_ice(self, prob_lightning_cube, ice_cube):
        """
        Modify Nowcast of lightning probability with ice data from a radar
        composite (VII; Vertically Integrated Ice)

        Args:
            prob_lightning_cube (iris.cube.Cube):
                First-guess lightning probability.
                The forecast_period coord is modified in-place to "minutes".
            ice_cube (iris.cube.Cube):
                Analysis of vertically integrated ice (VII) from radar
                thresholded at self.ice_thresholds.
                Units of threshold coord modified in-place to kg m^-2

        Returns:
            iris.cube.Cube:
                Output cube containing updated nowcast lightning probability.
                This cube will have the same dimensions and meta-data as
                prob_lightning_cube.
                The influence of the data in ice_cube reduces linearly to zero
                as forecast_period increases to 2H30M.

        Raises:
            iris.exceptions.ConstraintMismatchError:
                If ice_cube does not contain the expected thresholds.
        """
        prob_lightning_cube.coord("forecast_period").convert_units("minutes")
        # check prob-ice threshold units are as expected
        ice_threshold_coord = find_threshold_coordinate(ice_cube)
        ice_threshold_coord.convert_units("kg m^-2")
        new_cube_list = iris.cube.CubeList([])
        err_string = "No matching prob(Ice) cube for threshold {}"
        for cube_slice in prob_lightning_cube.slices_over("time"):
            fcmins = cube_slice.coord("forecast_period").points[0]
            for threshold, prob_max in zip(self.ice_thresholds,
                                           self.ice_scaling):
                ice_slice = ice_cube.extract(
                    iris.Constraint(coord_values={
                        ice_threshold_coord:
                        lambda t: isclose(t.point, threshold)
                    }))
                if not isinstance(ice_slice, iris.cube.Cube):
                    raise ConstraintMismatchError(err_string.format(threshold))
                # Linearly reduce impact of ice as fcmins increases to 2H30M.
                ice_scaling = [0.0, (prob_max * (1.0 - (fcmins / 150.0)))]
                if ice_scaling[1] > 0:
                    cube_slice.data = np.maximum(
                        rescale(
                            ice_slice.data,
                            data_range=(0.0, 1.0),
                            scale_range=ice_scaling,
                            clip=True,
                        ),
                        cube_slice.data,
                    )
            new_cube_list.append(cube_slice)

        new_cube = new_cube_list.merge_cube()
        new_cube = check_cube_coordinates(prob_lightning_cube, new_cube)
        return new_cube
Ejemplo n.º 3
0
 def test_clip(self):
     """Test that the method clips values when out of range"""
     expected = self.cube.data.copy()
     expected[...] = 108.
     expected[0, 7, 7] = 100.
     result = rescale(self.cube.data, data_range=(0.2, 1.2),
                      scale_range=(100., 110.), clip=True)
     self.assertArrayAlmostEqual(result, expected)
Ejemplo n.º 4
0
 def test_rescaling_outrange(self):
     """Test that the method gives the expected values when out of range"""
     expected = self.cube.data.copy()
     expected[...] = 108.
     expected[0, 7, 7] = 98.
     result = rescale(self.cube.data, data_range=(0.2, 1.2),
                      scale_range=(100., 110.))
     self.assertArrayAlmostEqual(result, expected)
Ejemplo n.º 5
0
 def test_rescaling_inrange(self):
     """Test that the method returns the expected values when in range"""
     expected = self.cube.data.copy()
     expected[...] = 110.
     expected[0, 7, 7] = 100.
     result = rescale(self.cube.data, data_range=(0., 1.),
                      scale_range=(100., 110.))
     self.assertArrayAlmostEqual(result, expected)
Ejemplo n.º 6
0
 def test_inverted_clip(self):
     """Test that the method clips values when the range minimum is not first"""
     expected = self.cube.data.copy()
     expected[...] = 108.0
     expected[0, 7, 7] = 100.0
     result = rescale(self.cube.data,
                      data_range=(1.2, 0.2),
                      scale_range=(110.0, 100.0),
                      clip=True)
     self.assertArrayAlmostEqual(result, expected)
Ejemplo n.º 7
0
    def _rescale_masked_weights(self, weights):
        """Apply fuzzy smoothing to weights at the edge of masked areas

        Args:
            weights (iris.cube.Cube):
                Pre-normalised weights where the weights of masked data points
                have been set to 0

        Returns:
            (tuple): tuple containing:
                **weights** (iris.cube.Cube):
                    Weights where MASKED slices have been rescaled, but UNMASKED
                    slices have not
                **rescaled** (iris.cube.Cube):
                    Binary (0/1) map showing which weights have been rescaled
        """
        is_rescaled = iris.cube.CubeList()
        rescaled_weights = iris.cube.CubeList()
        for weights_slice in weights.slices_over(self.blend_coord):
            weights_nonzero = np.where(weights_slice.data > 0, True, False)
            if np.all(weights_nonzero):
                # if there are no masked points in this slice, keep current weights
                # and mark as unchanged (not rescaled)
                rescaled_weights.append(weights_slice)
                is_rescaled.append(weights_slice.copy(data=~weights_nonzero))
            else:
                weights_orig = weights_slice.data.copy()

                # calculate the distance to the nearest invalid point, in grid squares,
                # for each point on the grid
                distance = distance_transform_edt(weights_nonzero)

                # calculate a 0-1 scaling factor based on the distance from the
                # nearest invalid data point, which scales between 1 at the fuzzy length
                # towards 0 for points closest to the edge of the mask
                fuzzy_factor = rescale(distance,
                                       data_range=[0.0, self.fuzzy_length],
                                       clip=True)

                # multiply existing weights by fuzzy scaling factor
                rescaled_weights_data = np.multiply(
                    weights_slice.data, fuzzy_factor).astype(FLOAT_DTYPE)
                rescaled_weights.append(
                    weights_slice.copy(data=rescaled_weights_data))

                # identify spatial points where weights have been rescaled
                is_rescaled_data = np.where(
                    rescaled_weights_data != weights_orig, True, False)
                is_rescaled.append(weights_slice.copy(data=is_rescaled_data))

        weights = rescaled_weights.merge_cube()
        rescaled = is_rescaled.merge_cube()

        return weights, rescaled
Ejemplo n.º 8
0
    def smooth_initial_weights(self, weights_from_mask):
        """
        Create fuzzy weights around points in the weights_from_mask with
        zero weight.

        This works by doing an euclidean distance transform based on how far
        each grid point is from a masked point. This returns an array
        containing the distance each point is from the nearest masked point.
        The result is then rescaled so that any point that are at least as far
        as the fuzzy_length away from a masked point are set
        back to a weight of one and any points that are closer than the
        fuzzy_length to a masked point are scaled to be between 0 and 1.

        Args:
            weights_from_mask (iris.cube.Cube):
                A cube containing an initial set of weights based on the mask
                on the input cube.
        Returns:
            result (iris.cube.Cube):
                A cube containing the fuzzy weights calculated based on the
                weights_from_mask. The dimension order may have changed from
                the input cube as it has been sliced over x and y coordinates.
        """
        result = iris.cube.CubeList()
        x_coord = weights_from_mask.coord(axis='x').name()
        y_coord = weights_from_mask.coord(axis='y').name()
        # The distance_transform_edt works on N-D cubes, so we want to make
        # sure we only apply it to x-y slices.
        for weights in weights_from_mask.slices([y_coord, x_coord]):
            if np.all(weights.data == 1.0):
                # distance_transform_edt doesn't produce what we want if there
                # are no zeros present.
                result.append(weights.copy())
            else:
                fuzzy_data = distance_transform_edt(weights.data == 1., 1)
                fuzzy_data = fuzzy_data.astype(np.float32)
                rescaled_fuzzy_data = rescale(
                    fuzzy_data, data_range=[0., self.fuzzy_length],
                    clip=True)
                result.append(weights.copy(data=rescaled_fuzzy_data))
        result = result.merge_cube()
        return result
Ejemplo n.º 9
0
    def process(self, input_cube):
        """Convert each point to a truth value based on provided threshold
        values. The truth value may or may not be fuzzy depending upon if
        fuzzy_bounds are supplied.  If the plugin has a "threshold_units"
        member, this is used to convert both thresholds and fuzzy bounds into
        the units of the input cube.

        Args:
            input_cube (iris.cube.Cube):
                Cube to threshold. The code is dimension-agnostic.

        Returns:
            iris.cube.Cube:
                Cube after a threshold has been applied. The data within this
                cube will contain values between 0 and 1 to indicate whether
                a given threshold has been exceeded or not.

                The cube meta-data will contain:
                * Input_cube name prepended with
                probability_of_X_above(or below)_threshold (where X is
                the diagnostic under consideration)
                * Threshold dimension coordinate with same units as input_cube
                * Threshold attribute ("greater_than",
                "greater_than_or_equal_to", "less_than", or
                less_than_or_equal_to" depending on the operator)
                * Cube units set to (1).

        Raises:
            ValueError: if a np.nan value is detected within the input cube.

        """
        if np.isnan(input_cube.data).any():
            raise ValueError("Error: NaN detected in input cube data")

        if self.threshold_units is not None:
            self.thresholds = [
                self.threshold_units.convert(threshold, input_cube.units)
                for threshold in self.thresholds
            ]
            self.fuzzy_bounds = [
                tuple([
                    self.threshold_units.convert(threshold, input_cube.units)
                    for threshold in bounds
                ]) for bounds in self.fuzzy_bounds
            ]

        self.threshold_coord_name = input_cube.name()

        thresholded_cubes = iris.cube.CubeList()
        for threshold, bounds in zip(self.thresholds, self.fuzzy_bounds):
            cube = input_cube.copy()
            # if upper and lower bounds are equal, set a deterministic 0/1
            # probability based on exceedance of the threshold
            if bounds[0] == bounds[1]:
                truth_value = self.comparison_operator["function"](cube.data,
                                                                   threshold)
            # otherwise, scale exceedance probabilities linearly between 0/1
            # at the min/max fuzzy bounds and 0.5 at the threshold value
            else:
                truth_value = np.where(
                    cube.data < threshold,
                    rescale(
                        cube.data,
                        data_range=(bounds[0], threshold),
                        scale_range=(0.0, 0.5),
                        clip=True,
                    ),
                    rescale(
                        cube.data,
                        data_range=(threshold, bounds[1]),
                        scale_range=(0.5, 1.0),
                        clip=True,
                    ),
                )
                # if requirement is for probabilities less_than or
                # less_than_or_equal_to the threshold (rather than
                # greater_than or greater_than_or_equal_to), invert
                # the exceedance probability
                if "less_than" in self.comparison_operator["spp_string"]:
                    truth_value = 1.0 - truth_value

            truth_value = truth_value.astype(FLOAT_DTYPE)

            if np.ma.is_masked(cube.data):
                # update unmasked points only
                cube.data[~input_cube.data.mask] = truth_value[~input_cube.
                                                               data.mask]
            else:
                cube.data = truth_value

            self._add_threshold_coord(cube, threshold)

            for func in self.each_threshold_func:
                cube = func(cube)

            thresholded_cubes.append(cube)

        (cube, ) = thresholded_cubes.merge()

        self._update_metadata(cube)
        enforce_coordinate_ordering(cube, ["realization", "percentile"])

        return cube
Ejemplo n.º 10
0
    def process(self, input_cube):
        """Convert each point to a truth value based on provided threshold
        values. The truth value may or may not be fuzzy depending upon if
        fuzzy_bounds are supplied.

        Args:
            input_cube (iris.cube.Cube):
                Cube to threshold. The code is dimension-agnostic.

        Returns:
            cube (iris.cube.Cube):
                Cube after a threshold has been applied. The data within this
                cube will contain values between 0 and 1 to indicate whether
                a given threshold has been exceeded or not.

                The cube meta-data will contain:
                 * input_cube name prepended with `probability_of_`
                 * threshold dimension coordinate with same units as input_cube
                 * threshold attribute (above or below threshold)
                 * cube units set to (1).

        Raises:
            ValueError: if a np.nan value is detected within the input cube.

        """
        thresholded_cubes = iris.cube.CubeList()
        if np.isnan(input_cube.data).any():
            raise ValueError("Error: NaN detected in input cube data")

        for threshold, bounds in zip(self.thresholds, self.fuzzy_bounds):
            cube = input_cube.copy()
            if bounds[0] == bounds[1]:
                truth_value = cube.data > threshold
            else:
                truth_value = np.where(
                    cube.data < threshold,
                    rescale(cube.data,
                            data_range=(bounds[0], threshold),
                            scale_range=(0., 0.5),
                            clip=True),
                    rescale(cube.data,
                            data_range=(threshold, bounds[1]),
                            scale_range=(0.5, 1.),
                            clip=True),
                )
            truth_value = truth_value.astype(np.float64)
            if self.below_thresh_ok:
                truth_value = 1. - truth_value
            cube.data = truth_value

            coord = iris.coords.DimCoord(threshold,
                                         long_name="threshold",
                                         units=cube.units)
            cube.add_aux_coord(coord)
            cube = iris.util.new_axis(cube, 'threshold')
            thresholded_cubes.append(cube)

        cube, = thresholded_cubes.concatenate()

        # TODO: Correct when formal cf-standards exists
        # Force the metadata to temporary conventions
        if self.below_thresh_ok:
            cube.attributes.update({'relative_to_threshold': 'below'})
        else:
            cube.attributes.update({'relative_to_threshold': 'above'})
        cube.rename("probability_of_{}".format(cube.name()))
        cube.units = Unit(1)

        cube = ExtractData.make_stat_coordinate_first(cube)

        return cube
Ejemplo n.º 11
0
 def test_zero_range_output(self):
     """Test that the method returns the expected error"""
     msg = "Cannot rescale a zero output range"
     with self.assertRaisesRegex(ValueError, msg):
         rescale(self.cube.data, scale_range=[4, 4])
Ejemplo n.º 12
0
 def test_basic(self):
     """Test that the method returns the expected array type"""
     result = rescale(self.cube.data)
     self.assertIsInstance(result, np.ndarray)
Ejemplo n.º 13
0
    def process(self, input_cube):
        """Convert each point to a truth value based on provided threshold
        values. The truth value may or may not be fuzzy depending upon if
        fuzzy_bounds are supplied.  If the plugin has a "threshold_units"
        member, this is used to convert both thresholds and fuzzy bounds into
        the units of the input cube.

        Args:
            input_cube (iris.cube.Cube):
                Cube to threshold. The code is dimension-agnostic.

        Returns:
            cube (iris.cube.Cube):
                Cube after a threshold has been applied. The data within this
                cube will contain values between 0 and 1 to indicate whether
                a given threshold has been exceeded or not.

                The cube meta-data will contain:
                * Input_cube name prepended with
                probability_of_X_above(or below)_threshold (where X is
                the diagnostic under consideration)
                * Threshold dimension coordinate with same units as input_cube
                * Threshold attribute (above or below threshold)
                * Cube units set to (1).

        Raises:
            ValueError: if a np.nan value is detected within the input cube.

        """
        # Record input cube data type to ensure consistent output, though
        # integer data must become float to enable fuzzy thresholding.
        input_cube_dtype = input_cube.dtype
        if input_cube.dtype.kind == 'i':
            input_cube_dtype = np.float32

        thresholded_cubes = iris.cube.CubeList()
        if np.isnan(input_cube.data).any():
            raise ValueError("Error: NaN detected in input cube data")

        # if necessary, convert thresholds and fuzzy bounds into cube units
        if self.threshold_units is not None:
            self.thresholds = [
                self.threshold_units.convert(threshold, input_cube.units)
                for threshold in self.thresholds
            ]
            self.fuzzy_bounds = [
                tuple([
                    self.threshold_units.convert(threshold, input_cube.units)
                    for threshold in bounds
                ]) for bounds in self.fuzzy_bounds
            ]

        # set name of threshold coordinate to match input diagnostic
        self.threshold_coord_name = input_cube.name()

        # apply fuzzy thresholding
        for threshold, bounds in zip(self.thresholds, self.fuzzy_bounds):
            cube = input_cube.copy()
            # if upper and lower bounds are equal, set a deterministic 0/1
            # probability based on exceedance of the threshold
            if bounds[0] == bounds[1]:
                truth_value = cube.data > threshold
            # otherwise, scale exceedance probabilities linearly between 0/1
            # at the min/max fuzzy bounds and 0.5 at the threshold value
            else:
                truth_value = np.where(
                    cube.data < threshold,
                    rescale(cube.data,
                            data_range=(bounds[0], threshold),
                            scale_range=(0., 0.5),
                            clip=True),
                    rescale(cube.data,
                            data_range=(threshold, bounds[1]),
                            scale_range=(0.5, 1.),
                            clip=True),
                )
            truth_value = truth_value.astype(input_cube_dtype)
            # if requirement is for probabilities below threshold (rather than
            # above), invert the exceedance probability
            if self.below_thresh_ok:
                truth_value = 1. - truth_value

            cube.data = truth_value
            # Overwrite masked values that have been thresholded
            # with the un-thresholded values from the input cube.
            if np.ma.is_masked(cube.data):
                cube.data[input_cube.data.mask] = (
                    input_cube.data[input_cube.data.mask])
            cube = self._add_threshold_coord(cube, threshold)
            thresholded_cubes.append(cube)

        cube, = thresholded_cubes.concatenate()
        # TODO: Correct when formal cf-standards exists
        # Force the metadata to temporary conventions
        if self.below_thresh_ok:
            cube.attributes.update({'relative_to_threshold': 'below'})
            cube.rename("probability_of_{}_below_threshold".format(
                cube.name()))
        else:
            cube.attributes.update({'relative_to_threshold': 'above'})
            cube.rename("probability_of_{}_above_threshold".format(
                cube.name()))
        cube.units = Unit(1)

        cube = enforce_coordinate_ordering(cube,
                                           ["realization", "percentile_over"])

        return cube
Ejemplo n.º 14
0
    def process(self, input_cube):
        """Convert each point to a truth value based on provided threshold
        values. The truth value may or may not be fuzzy depending upon if
        fuzzy_bounds are supplied.  If the plugin has a "threshold_units"
        member, this is used to convert both thresholds and fuzzy bounds into
        the units of the input cube.

        Args:
            input_cube (iris.cube.Cube):
                Cube to threshold. The code is dimension-agnostic.

        Returns:
            iris.cube.Cube:
                Cube after a threshold has been applied. The data within this
                cube will contain values between 0 and 1 to indicate whether
                a given threshold has been exceeded or not.

                The cube meta-data will contain:
                * Input_cube name prepended with
                probability_of_X_above(or below)_threshold (where X is
                the diagnostic under consideration)
                * Threshold dimension coordinate with same units as input_cube
                * Threshold attribute ("greater_than",
                "greater_than_or_equal_to", "less_than", or
                less_than_or_equal_to" depending on the operator)
                * Cube units set to (1).

        Raises:
            ValueError: if a np.nan value is detected within the input cube.

        """
        # Record input cube data type to ensure consistent output, though
        # integer data must become float to enable fuzzy thresholding.
        input_cube_dtype = input_cube.dtype
        if input_cube.dtype.kind == "i":
            input_cube_dtype = FLOAT_DTYPE

        thresholded_cubes = iris.cube.CubeList()
        if np.isnan(input_cube.data).any():
            raise ValueError("Error: NaN detected in input cube data")

        # if necessary, convert thresholds and fuzzy bounds into cube units
        if self.threshold_units is not None:
            self.thresholds = [
                self.threshold_units.convert(threshold, input_cube.units)
                for threshold in self.thresholds
            ]
            self.fuzzy_bounds = [
                tuple([
                    self.threshold_units.convert(threshold, input_cube.units)
                    for threshold in bounds
                ]) for bounds in self.fuzzy_bounds
            ]

        # set name of threshold coordinate to match input diagnostic
        self.threshold_coord_name = input_cube.name()

        # apply fuzzy thresholding
        for threshold, bounds in zip(self.thresholds, self.fuzzy_bounds):
            cube = input_cube.copy()
            # if upper and lower bounds are equal, set a deterministic 0/1
            # probability based on exceedance of the threshold
            if bounds[0] == bounds[1]:
                truth_value = self.comparison_operator["function"](cube.data,
                                                                   threshold)
            # otherwise, scale exceedance probabilities linearly between 0/1
            # at the min/max fuzzy bounds and 0.5 at the threshold value
            else:
                truth_value = np.where(
                    cube.data < threshold,
                    rescale(
                        cube.data,
                        data_range=(bounds[0], threshold),
                        scale_range=(0.0, 0.5),
                        clip=True,
                    ),
                    rescale(
                        cube.data,
                        data_range=(threshold, bounds[1]),
                        scale_range=(0.5, 1.0),
                        clip=True,
                    ),
                )
                # if requirement is for probabilities less_than or
                # less_than_or_equal_to the threshold (rather than
                # greater_than or greater_than_or_equal_to), invert
                # the exceedance probability
                if "less_than" in self.comparison_operator["spp_string"]:
                    truth_value = 1.0 - truth_value
            truth_value = np.ma.masked_where(np.ma.getmask(cube.data),
                                             truth_value)
            truth_value = truth_value.astype(input_cube_dtype)

            cube.data = truth_value
            # Overwrite masked values that have been thresholded
            # with the un-thresholded values from the input cube.
            if np.ma.is_masked(cube.data):
                cube.data[input_cube.data.mask] = input_cube.data[
                    input_cube.data.mask]
            cube = self._add_threshold_coord(cube, threshold)

            for func in self.each_threshold_func:
                cube = func(cube)

            thresholded_cubes.append(cube)

        (cube, ) = thresholded_cubes.concatenate()
        if len(self.thresholds) == 1:
            # if only one threshold has been provided, this should be scalar
            cube = next(cube.slices_over(cube.coord(var_name="threshold")))

        cube.rename("probability_of_{}_{}_threshold".format(
            cube.name(), probability_is_above_or_below(cube)))
        cube.units = Unit(1)

        enforce_coordinate_ordering(cube, ["realization", "percentile"])

        return cube