Esempio n. 1
0
    def test_mismatching_forecast_periods(self):
        """Test case in which the forecast periods differ."""
        forecast2 = set_up_variable_cube(
            np.ones((3, 3), dtype=np.float32),
            frt=datetime.datetime(2017, 11, 11, 1, 0),
            time=datetime.datetime(2017, 11, 11, 5, 0),
        )
        forecasts = iris.cube.CubeList([self.forecast1,
                                        forecast2]).merge_cube()

        msg = ("Forecasts have been provided with differing forecast periods "
               "\[10800 14400\]")

        with self.assertRaisesRegex(ValueError, msg):
            check_forecast_consistency(forecasts)
Esempio n. 2
0
    def test_mismatching_frt_hours(self):
        """Test case in which forecast reference time hours differ."""
        forecast2 = set_up_variable_cube(
            np.ones((3, 3), dtype=np.float32),
            frt=datetime.datetime(2017, 11, 11, 2, 0),
            time=datetime.datetime(2017, 11, 11, 4, 0),
        )
        forecasts = iris.cube.CubeList([self.forecast1,
                                        forecast2]).merge_cube()

        msg = ("Forecasts have been provided with differing hours for the "
               "forecast reference time {1, 2}")

        with self.assertRaisesRegex(ValueError, msg):
            check_forecast_consistency(forecasts)
Esempio n. 3
0
    def test_matching_forecasts(self):
        """Test case in which forecasts share frt hour and forecast period
        values. No result is expected in this case, hence there is no value
        comparison; the test is the absence of an exception."""

        check_forecast_consistency(self.forecasts)
    def process(self, historic_forecasts, truths):
        """
        Slice data over threshold and time coordinates to construct reliability
        tables. These are summed over time to give a single table for each
        threshold, constructed from all the provided historic forecasts and
        truths.

        .. See the documentation for an example of the resulting reliability
           table cube.
        .. include:: extended_documentation/calibration/
           reliability_calibration/reliability_calibration_examples.rst

        Note that the forecast and truth data used is probabilistic, i.e. has
        already been thresholded relative to the thresholds of interest, using
        the equality operator required. As such this plugin is agnostic as to
        whether the data is thresholded below or above a given diagnostic
        threshold.

        Args:
            historic_forecasts (iris.cube.Cube):
                A cube containing the historical forecasts used in calibration.
                These are expected to all have a consistent cycle hour, that is
                the hour in the forecast reference time.
            truths (iris.cube.Cube):
                A cube containing the thresholded gridded truths used in
                calibration.
        Returns:
            iris.cube.CubeList:
                A cubelist of reliability table cubes, one for each threshold
                in the historic forecast cubes.
        Raises:
            ValueError: If the forecast and truth cubes have differing
                        threshold coordinates.
        """
        historic_forecasts, truths = filter_non_matching_cubes(
            historic_forecasts, truths)

        threshold_coord = find_threshold_coordinate(historic_forecasts)
        truth_threshold_coord = find_threshold_coordinate(truths)
        if not threshold_coord == truth_threshold_coord:
            msg = "Threshold coordinates differ between forecasts and truths."
            raise ValueError(msg)

        time_coord = historic_forecasts.coord("time")

        check_forecast_consistency(historic_forecasts)
        reliability_cube = self._create_reliability_table_cube(
            historic_forecasts, threshold_coord)

        reliability_tables = iris.cube.CubeList()
        threshold_slices = zip(
            historic_forecasts.slices_over(threshold_coord),
            truths.slices_over(threshold_coord),
        )
        for forecast_slice, truth_slice in threshold_slices:

            threshold_reliability = []
            time_slices = zip(
                forecast_slice.slices_over(time_coord),
                truth_slice.slices_over(time_coord),
            )
            for forecast, truth in time_slices:

                reliability_table = self._populate_reliability_bins(
                    forecast.data, truth.data)

                threshold_reliability.append(reliability_table)

            # Stack and sum reliability tables for all times
            table_values = np.stack(threshold_reliability)
            table_values = np.sum(table_values, axis=0, dtype=np.float32)

            reliability_entry = reliability_cube.copy(data=table_values)
            reliability_entry.replace_coord(
                forecast_slice.coord(threshold_coord))
            reliability_tables.append(reliability_entry)

        return MergeCubes()(reliability_tables)