Пример #1
0
 def test_input(self):
     """Test that the method does not modify the input cubes."""
     cube_a = self.cube_a.copy()
     cube_b = self.cube_b.copy()
     apply_double_scaling(self.cube_a, self.cube_b, self.thr_a, self.thr_b)
     self.assertArrayAlmostEqual(cube_a.data, self.cube_a.data)
     self.assertArrayAlmostEqual(cube_b.data, self.cube_b.data)
Пример #2
0
 def test_basic(self):
     """Test that the method returns the expected cube type"""
     result = apply_double_scaling(self.cube_a,
                                   self.cube_b,
                                   self.thr_a,
                                   self.thr_b)
     self.assertIsInstance(result, np.ndarray)
Пример #3
0
 def test_values_default(self):
     """Test that the method returns the expected data values with default
     minimum function"""
     # Create an array of correct shape and fill with expected value
     expected = np.full_like(self.cube_a.data, 0.9)
     # Row zero should be changed to all-zeroes
     expected[0, :] = [0.0, 0.0, 0.0, 0.0]
     # Row one should be like cube_a but with most values reduced to 0.5
     expected[1, :] = [0.0, 0.4, 0.5, 0.5]
     # Row two should be like cube_a but with late values limited to 0.9
     expected[2, :] = [0.0, 0.4, 0.8, 0.9]
     self.cube_a.data[0, :] = [0.0, 0.0, 0.0, 0.0]
     self.cube_a.data[1, :] = [0.5, 0.5, 0.5, 0.5]
     self.cube_a.data[2, :] = [1.0, 1.0, 1.0, 1.0]
     self.cube_b.data[0, :] = np.arange(0.0, 1.6, 0.4)
     self.cube_b.data[1, :] = np.arange(0.0, 1.6, 0.4)
     self.cube_b.data[2, :] = np.arange(0.0, 1.6, 0.4)
     result = apply_double_scaling(self.cube_a, self.cube_b, self.thr_a,
                                   self.thr_b)
     self.assertArrayAlmostEqual(result, expected)
Пример #4
0
 def test_values_max(self):
     """Test that the method returns the expected data values with max
     function"""
     expected = self.cube_a.data.copy()
     # Row zero should be unchanged from ltng_cube
     expected[0, :] = np.arange(0., 1.6, 0.4)
     # Row one should be like cube_a but with early values raised to 0.5
     expected[1, :] = [0.5, 0.5, 0.8, 1.2]
     # Row two should be like cube_a but with most values raised to 0.9
     expected[2, :] = [0.9, 0.9, 0.9, 1.2]
     self.cube_a.data[0, :] = [0., 0., 0., 0.]
     self.cube_a.data[1, :] = [0.5, 0.5, 0.5, 0.5]
     self.cube_a.data[2, :] = [1., 1., 1., 1.]
     self.cube_b.data[0, :] = np.arange(0., 1.6, 0.4)
     self.cube_b.data[1, :] = np.arange(0., 1.6, 0.4)
     self.cube_b.data[2, :] = np.arange(0., 1.6, 0.4)
     result = apply_double_scaling(self.cube_a,
                                   self.cube_b,
                                   self.thr_a,
                                   self.thr_b,
                                   combine_function=np.maximum)
     self.assertArrayAlmostEqual(result, expected)
Пример #5
0
    def apply_precip(self, prob_lightning_cube: Cube,
                     prob_precip_cube: Cube) -> Cube:
        """
        Modify Nowcast of lightning probability with precipitation rate
        probabilities at thresholds of 0.5, 7 and 35 mm/h.

        Args:
            prob_lightning_cube:
                First-guess lightning probability.

            prob_precip_cube:
                Nowcast precipitation probability
                (threshold > 0.5, 7., 35. mm hr-1)
                Units of threshold coord modified in-place to mm hr-1

        Returns:
            Output cube containing updated nowcast lightning probability.
            This cube will have the same dimensions and meta-data as
            prob_lightning_cube.

        Raises:
            iris.exceptions.ConstraintMismatchError:
                If prob_precip_cube does not contain the expected thresholds.
        """
        new_cube_list = iris.cube.CubeList([])
        # check prob-precip threshold units are as expected
        precip_threshold_coord = find_threshold_coordinate(prob_precip_cube)
        precip_threshold_coord.convert_units("mm hr-1")
        # extract precipitation probabilities at required thresholds
        for cube_slice in prob_lightning_cube.slices_over("time"):
            this_time = iris_time_to_datetime(
                cube_slice.coord("time").copy())[0]
            this_precip = prob_precip_cube.extract(
                iris.Constraint(time=this_time)
                & iris.Constraint(coord_values={
                    precip_threshold_coord:
                    lambda t: isclose(t.point, 0.5)
                }))
            high_precip = prob_precip_cube.extract(
                iris.Constraint(time=this_time)
                & iris.Constraint(coord_values={
                    precip_threshold_coord:
                    lambda t: isclose(t.point, 7.0)
                }))
            torr_precip = prob_precip_cube.extract(
                iris.Constraint(time=this_time)
                & iris.Constraint(coord_values={
                    precip_threshold_coord:
                    lambda t: isclose(t.point, 35.0)
                }))
            err_string = "No matching {} cube for {}"
            if not isinstance(this_precip, iris.cube.Cube):
                raise ConstraintMismatchError(
                    err_string.format("any precip", this_time))
            if not isinstance(high_precip, iris.cube.Cube):
                raise ConstraintMismatchError(
                    err_string.format("high precip", this_time))
            if not isinstance(torr_precip, iris.cube.Cube):
                raise ConstraintMismatchError(
                    err_string.format("intense precip", this_time))
            # Increase prob(lightning) to Risk 2 (pl_dict[2]) when
            #   prob(precip > 7mm/hr) > phighthresh
            cube_slice.data = np.where(
                (high_precip.data >= self.phighthresh)
                & (cube_slice.data < self.pl_dict[2]),
                self.pl_dict[2],
                cube_slice.data,
            )
            # Increase prob(lightning) to Risk 1 (pl_dict[1]) when
            #   prob(precip > 35mm/hr) > ptorrthresh
            cube_slice.data = np.where(
                (torr_precip.data >= self.ptorrthresh)
                & (cube_slice.data < self.pl_dict[1]),
                self.pl_dict[1],
                cube_slice.data,
            )

            # Decrease prob(lightning) where prob(precip > 0.5 mm hr-1) is low.
            cube_slice.data = apply_double_scaling(this_precip, cube_slice,
                                                   self.precipthr,
                                                   self.ltngthr)

            new_cube_list.append(cube_slice)

        new_cube = new_cube_list.merge_cube()
        new_cube = check_cube_coordinates(prob_lightning_cube, new_cube)
        return new_cube