예제 #1
0
    def test_mismatched_time_bounds_ranges(self):
        """Test for mismatched bounds ranges error."""
        frt = dt(2017, 11, 9, 21, 0)
        times = [
            dt(2017, 11, 10, 3, 0),
            dt(2017, 11, 10, 4, 0),
            dt(2017, 11, 10, 5, 0)
        ]
        time_bounds = np.array(
            [[dt(2017, 11, 10, 2, 0),
              dt(2017, 11, 10, 3, 0)],
             [dt(2017, 11, 10, 3, 0),
              dt(2017, 11, 10, 4, 0)],
             [dt(2017, 11, 10, 2, 0),
              dt(2017, 11, 10, 5, 0)]])

        cubelist = iris.cube.CubeList([])
        for tpoint, tbounds in zip(times, time_bounds):
            cube = set_up_probability_cube(0.6 * np.ones(
                (2, 3, 3), dtype=np.float32),
                                           np.array([278., 280.],
                                                    dtype=np.float32),
                                           time=tpoint,
                                           frt=frt,
                                           time_bounds=tbounds)
            cubelist.append(cube)

        msg = "Cube with mismatching time bounds ranges"
        with self.assertRaisesRegex(ValueError, msg):
            merge_cubes(cubelist, blend_coord="time")
예제 #2
0
 def test_multi_model(self):
     """Test Multi models merge OK"""
     cubes = iris.cube.CubeList([self.cube, self.cube_ukv])
     result = merge_cubes(cubes)
     self.assertIsInstance(result, Cube)
     self.assertArrayAlmostEqual(
         result.coord("model_realization").points, [0., 1., 2., 1000.])
    def test_specific_cycletime(self):
        """Test that the plugin setup with a specific cycletime returns a cube
        in which the forecast reference time has been changed to match the
        given cycletime. The forecast period should also have been adjusted to
        be given relative to this time.

        For this we need a single time in our cube and so to blend over
        something else. In this case we create a "model_id" coordinate as if we
        are model blending."""

        coord_name = "model_id"

        cube1 = self.cube[0].copy()
        model_crd1 = iris.coords.DimCoord([0], long_name=coord_name, units=1)
        cube1.add_aux_coord(model_crd1)

        cube2 = self.cube[0].copy()
        model_crd2 = iris.coords.DimCoord([1], long_name=coord_name, units=1)
        cube2.add_aux_coord(model_crd2)

        cubes = iris.cube.CubeList([cube1, cube2])
        cube = merge_cubes(cubes)

        plugin = WeightedBlendAcrossWholeDimension(coord_name)
        expected_frt = 1447837200
        expected_forecast_period = 61200
        result = plugin.process(cube, cycletime='20151118T0900Z')

        self.assertEqual(
            result.coord('forecast_reference_time').points, expected_frt)
        self.assertEqual(
            result.coord('forecast_period').points, expected_forecast_period)
        self.assertEqual(
            result.coord('time').points,
            cube.coord('time').points)
예제 #4
0
def load_cube(filepath, constraints=None, no_lazy_load=False):
    """Load the filepath provided using Iris into a cube.

    Args:
        filepath (str or list):
            Filepath that will be loaded or list of filepaths that can be
            merged into a single cube upon loading.
        constraints (iris.Constraint, str or None):
            Constraint to be applied when loading from the input filepath.
            This can be in the form of an iris.Constraint or could be a string
            that is intended to match the name of the cube.
            The default is None.
        no_lazy_load (bool)
            If True, bypass cube deferred (lazy) loading and load the whole
            cube into memory. This can increase performance at the cost of
            memory. If False (default) then lazy load.

    Returns:
        cube (iris.cube.Cube):
            Cube that has been loaded from the input filepath given the
            constraints provided.
    """
    # Remove metadata prefix cube if present
    constraints = iris.Constraint(
        cube_func=lambda cube: cube.long_name != 'prefixes') & constraints

    # Load each file individually to avoid partial merging (not used
    # iris.load_raw() due to issues with time representation)
    if isinstance(filepath, str):
        cubes = iris.load(filepath, constraints=constraints)
    else:
        cubes = iris.cube.CubeList([])
        for item in filepath:
            cubes.extend(iris.load(item, constraints=constraints))

    # Merge loaded cubes
    if not cubes:
        message = "No cubes found using contraints {}".format(constraints)
        raise ValueError(message)
    elif len(cubes) == 1:
        cube = cubes[0]
    else:
        cube = merge_cubes(cubes)

    # Remove metadata prefix cube attributes
    if 'bald__isPrefixedBy' in cube.attributes.keys():
        cube.attributes.pop('bald__isPrefixedBy')

    # Ensure the probabilistic coordinates are the first coordinates within a
    # cube and are in the specified order.
    cube = enforce_coordinate_ordering(
        cube, ["realization", "percentile_over", "threshold"])
    # Ensure the y and x dimensions are the last dimensions within the cube.
    y_name = cube.coord(axis="y").name()
    x_name = cube.coord(axis="x").name()
    cube = enforce_coordinate_ordering(cube, [y_name, x_name], anchor="end")
    if no_lazy_load:
        # Force the cube's data into memory by touching the .data attribute.
        cube.data
    return cube
예제 #5
0
 def test_basic(self, warning_list=None):
     """Test that the utility returns an iris.cube.Cube."""
     result = merge_cubes(self.cube)
     self.assertTrue(
         any(item.category == UserWarning for item in warning_list))
     warning_msg = "Only a single cube "
     self.assertTrue(any(warning_msg in str(item) for item in warning_list))
     self.assertIsInstance(result, Cube)
예제 #6
0
 def test_lagged_ukv(self):
     """Test Lagged ukv merge OK"""
     cubes = iris.cube.CubeList(
         [self.cube_ukv, self.cube_ukv_t1, self.cube_ukv_t2])
     result = merge_cubes(cubes)
     self.assertIsInstance(result, Cube)
     self.assertArrayAlmostEqual(
         result.coord("forecast_period").points, [6.0, 5.0, 4.0])
예제 #7
0
 def test_lagged_ukv(self):
     """Test lagged UKV merge OK (forecast periods in seconds)"""
     expected_fp_points = 3600 * np.array([6, 5, 4], dtype=np.int32)
     cubes = iris.cube.CubeList(
         [self.cube_ukv, self.cube_ukv_t1, self.cube_ukv_t2])
     result = merge_cubes(cubes)
     self.assertIsInstance(result, Cube)
     self.assertArrayAlmostEqual(
         result.coord("forecast_period").points, expected_fp_points)
예제 #8
0
    def test_model_id_attr_mismatch(self):
        """Test that when a model ID attribute string is specified that does
        not match the model ID attribute key name on both cubes to be merged,
        an error is thrown"""
        cubes = iris.cube.CubeList(
            [self.cube_non_mo_ens, self.cube_non_mo_det])

        # The test cubes contain the 'non_mo_model_config' attribute key.
        # We'll specify 'non_matching_model_config' as our model ID
        # attribute key argument. Merge_cubes should then raise a warning
        # as our specified model ID does not match that on the cubes and it
        # will not be able to build a model ID coordinate.
        msg = ('Cannot create model ID coordinate for grid blending '
               'as the model ID attribute specified is not found '
               'within the cube attributes')

        with self.assertRaisesRegex(ValueError, msg):
            merge_cubes(cubes, model_id_attr='non_matching_model_config')
예제 #9
0
 def test_non_mo_model_id(self):
     """Test that a model ID attribute string can be specified when
     merging multi model cubes"""
     cubes = iris.cube.CubeList(
         [self.cube_non_mo_ens, self.cube_non_mo_det])
     result = merge_cubes(cubes, model_id_attr='non_mo_model_config')
     self.assertIsInstance(result, Cube)
     self.assertArrayAlmostEqual(
         result.coord("model_realization").points, [0., 3., 4., 1000.])
예제 #10
0
 def test_one_threshold_data(self):
     """Test threshold data where one cube has single threshold as dim"""
     ukv_prob = self.prob_ukv[0]
     ukv_prob = iris.util.new_axis(ukv_prob, 'threshold')
     enuk_prob = self.prob_enuk[0]
     cubes = iris.cube.CubeList([ukv_prob, enuk_prob])
     result = merge_cubes(cubes, model_id_attr='mosg__model_configuration')
     self.assertArrayAlmostEqual(
         result.coord("model_id").points, [0., 1000.])
     self.assertEqual(ukv_prob.data.shape, (1, 1, 3, 3))
     self.assertEqual(enuk_prob.data.shape, (1, 3, 3))
     self.assertEqual(result.data.shape, (2, 3, 3))
예제 #11
0
    def test_model_id_attr_mismatch_one_cube(self):
        """Test that when a model ID attribute string is specified that only
        matches the model ID attribute key name on one of the cubes to be
        merged, an error is thrown"""

        # Change the model ID attribute key on one of the test cubes so that
        # it matches the model ID argument. Merge_cubes should still raise
        # an error as the model ID attribute key has to match on all cubes
        # to be blended.
        self.cube_non_mo_det.attributes.pop('non_mo_model_config')
        self.cube_non_mo_det.attributes[
            'non_matching_model_config'] = 'non_uk_det'

        cubes = iris.cube.CubeList(
            [self.cube_non_mo_ens, self.cube_non_mo_det])

        msg = ('Cannot create model ID coordinate for grid blending '
               'as the model ID attribute specified is not found '
               'within the cube attributes')

        with self.assertRaisesRegex(ValueError, msg):
            merge_cubes(cubes, model_id_attr='non_matching_model_config')
    def process(self, cubes):
        """
        Separate the input cubes into the historic_forecasts and truth based
        on the metadata information supplied within the input dictionaries.

        Args:
             cubes (iris.cube.CubeList):
                CubeList of input cubes that are expected to contain a mixture
                of historic forecasts and truth.

        Returns:
            (tuple): tuple containing:
                iris.cube.Cube:
                    A cube containing the historic forecasts.
                iris.cube.Cube:
                    A cube containing the truth datasets.
        """
        historic_forecasts = self._find_required_cubes_using_metadata(
            cubes, self.historic_forecast_dict)
        truths = self._find_required_cubes_using_metadata(
            cubes, self.truth_dict)
        # Use improver merge_cubes to equalise attributes
        return merge_cubes(historic_forecasts), merge_cubes(truths)
예제 #13
0
    def process(self, cube_gust, cube_ws):
        """
        Create a cube containing the wind_gust diagnostic.

        Args:
            cube_gust (iris.cube.Cube):
                Cube contain one or more percentiles of wind_gust data.
            cube_ws (iris.cube.Cube):
                Cube contain one or more percentiles of wind_speed data.

        Returns:
            result (iris.cube.Cube):
                Cube containing the wind-gust diagnostic data.

        """

        # Extract wind-gust data
        (req_cube_gust,
         perc_coord_gust) = self.extract_percentile_data(cube_gust,
                                                         self.percentile_gust,
                                                         "wind_speed_of_gust")
        # Extract wind-speed data
        (req_cube_ws,
         perc_coord_ws) = (
             self.extract_percentile_data(cube_ws,
                                          self.percentile_windspeed,
                                          "wind_speed"))
        if perc_coord_gust.name() != perc_coord_ws.name():
            msg = ('Percentile coord of wind-gust data'
                   'does not match coord of wind-speed data'
                   ' {0:s} {1:s}.'.format(perc_coord_gust.name(),
                                          perc_coord_ws.name()))
            raise ValueError(msg)
        # Add metadata to both cubes
        req_cube_gust = self.add_metadata(req_cube_gust)
        req_cube_ws = self.add_metadata(req_cube_ws)
        # Merge cubes
        merged_cube = merge_cubes(iris.cube.CubeList([req_cube_gust,
                                                      req_cube_ws]))
        # Calculate wind-gust diagnostic
        cube_max = merged_cube.collapsed(perc_coord_gust.name(),
                                         iris.analysis.MAX)

        # Update metadata
        result = self.update_metadata_after_max(cube_max,
                                                perc_coord_gust.name())

        return result
 def test_unify_frt(self):
     """Test function equalises forecast reference times if weighting a
     model blend by forecast_period"""
     expected_frt, = self.enuk_cube.coord("forecast_reference_time").points
     expected_fp = 3 * 3600
     rationalise_blend_time_coords(
         self.cubelist, "model", weighting_coord="forecast_period")
     merged_cube = merge_cubes(
         self.cubelist, model_id_attr="mosg__model_configuration")
     for coord in ["forecast_reference_time", "forecast_period"]:
         self.assertEqual(len(merged_cube.coord(coord).points), 1)
     self.assertEqual(
         merged_cube.coord("forecast_reference_time").points[0],
         expected_frt)
     self.assertEqual(
         merged_cube.coord("forecast_period").points[0], expected_fp)
 def test_cycletime(self):
     """Test function sets different cycle time if passed in as argument"""
     expected_frt, = (
         self.enuk_cube.coord("forecast_reference_time").points -
         (3 * 3600)
     )
     expected_fp = 6 * 3600
     rationalise_blend_time_coords(
         self.cubelist, "model", weighting_coord="forecast_period",
         cycletime='20170109T2100Z')
     merged_cube = merge_cubes(
         self.cubelist, model_id_attr="mosg__model_configuration")
     for coord in ["forecast_reference_time", "forecast_period"]:
         self.assertEqual(len(merged_cube.coord(coord).points), 1)
     self.assertEqual(
         merged_cube.coord("forecast_reference_time").points[0],
         expected_frt)
     self.assertEqual(
         merged_cube.coord("forecast_period").points[0], expected_fp)
예제 #16
0
    def process(self, cubelist):
        """
        Take an input cubelist containing forecasts from different cycles and
        merges them into a single cube.

        The steps taken are:
            1. If no cycletime is given then find the latest cycle time from
               the input cubes.
            2. Update the forecast periods in each input cube to be relative
               to the new cycletime.
            3. Checks if there are duplicate realization numbers. If a
               duplicate is found, renumbers all of the realizations to remove
               any duplicates.
            4. Merge cubes into one cube, removing any metadata that
               doesn't match.
        """
        if self.cycletime is None:
            cycletime = find_latest_cycletime(cubelist)
        else:
            cycletime = cycletime_to_datetime(self.cycletime)
        cubelist = unify_forecast_reference_time(cubelist, cycletime)

        # Take all the realizations from all the input cube and
        # put in one array
        all_realizations = [
            cube.coord("realization").points for cube in cubelist
        ]
        all_realizations = np.concatenate(all_realizations)
        # Find unique realiations
        unique_realizations = np.unique(all_realizations)

        # If we have fewer unique realizations than total realizations we have
        # duplicate realizations so we rebadge all realizations in the cubelist
        if len(unique_realizations) < len(all_realizations):
            first_realization = 0
            for cube in cubelist:
                n_realization = len(cube.coord("realization").points)
                cube.coord("realization").points = np.arange(
                    first_realization, first_realization + n_realization)
                first_realization = first_realization + n_realization

        lagged_ensemble = merge_cubes(cubelist)
        return lagged_ensemble
예제 #17
0
def process(cube: cli.inputcube,
            advection_velocity: inputadvection,
            orographic_enhancement: cli.inputcube = None,
            *,
            attributes_config: cli.inputjson = None,
            max_lead_time: int = 360, lead_time_interval: int = 15):
    """Module  to extrapolate input cubes given advection velocity fields.

    Args:
        cube (iris.cube.Cube):
            The data to be advected.
        advection_velocity (iris.cube.CubeList):
            Advection cubes of U and V.
            These must have the names of.
            precipitation_advection_x_velocity
            precipitation_advection_y_velocity
        orographic_enhancement (iris.cube.Cube):
            Cube containing orographic enhancement forecasts for the lead times
            at which an extrapolation nowcast is required.
        attributes_config (dict):
            Dictionary containing the required changes to the attributes.
        max_lead_time (int):
            Maximum lead time required (mins).
        lead_time_interval (int):
            Interval between required lead times (mins).

    Returns:
        iris.cube.CubeList:
            New cubes with updated time and extrapolated data.
    """
    from improver.nowcasting.forecasting import CreateExtrapolationForecast
    from improver.utilities.cube_manipulation import merge_cubes

    u_cube, v_cube = advection_velocity

    # extrapolate input data to required lead times
    forecast_plugin = CreateExtrapolationForecast(
        cube, u_cube, v_cube, orographic_enhancement,
        attributes_dict=attributes_config)
    forecast_cubes = forecast_plugin.process(lead_time_interval, max_lead_time)

    return merge_cubes(forecast_cubes)
    def setUp(self):
        """Set up a list of cubes from different models with some probability
        data in them."""

        # make a cube with a forecast reference time and period labelled as
        # coming from the UKV
        data = np.full((3, 3), 0.6, dtype=np.float32)
        self.ukv_cube = set_up_variable_cube(
            data, name='probability_of_air_temperature', units='1',
            time=datetime(2017, 1, 10, 3, 0), frt=datetime(2017, 1, 9, 23, 0),
            standard_grid_metadata='uk_det')

        # make a cube labelled as coming from MOGREPS-UK, with a different
        # forecast reference time from the UKV cube
        self.enuk_cube = set_up_variable_cube(
            data, name='probability_of_air_temperature', units='1',
            time=datetime(2017, 1, 10, 3, 0), frt=datetime(2017, 1, 10, 0, 0),
            standard_grid_metadata='uk_ens')

        # make a cube list and merged cube containing the two model cubes, for
        # use in defining reference coordinates for tests below
        self.cubelist = iris.cube.CubeList([self.ukv_cube, self.enuk_cube])
        self.cube = merge_cubes(
            self.cubelist, model_id_attr='mosg__model_configuration')
예제 #19
0
 def test_basic(self):
     """Test that the utility returns an iris.cube.Cube."""
     result = merge_cubes([self.cube_ukv, self.cube_ukv_t1])
     self.assertIsInstance(result, Cube)
 def test_remove_fp(self):
     """Test function removes forecast_period coord if blending over
     forecast_reference_time"""
     rationalise_blend_time_coords(self.cubelist, "forecast_reference_time")
     merged_cube = merge_cubes(self.cubelist)
     self.assertTrue("forecast_period" not in merged_cube.coords())
예제 #21
0
def process(orography, landmask, site_list,
            all_methods=False, land_constraint=None, minimum_dz=None,
            search_radius=None, node_limit=None, site_coordinate_system=None,
            site_coordinate_options=None, site_x_coordinate=None,
            site_y_coordinate=None):
    """Module to create neighbour cubes for extracting spot data.

    Determine grid point coordinates within the provided cubes that neighbour
    spot data sites defined within the provided JSON/Dictionary.
    If no options are set the returned cube will contain the nearest neighbour
    found for each site. Other constrained neighbour finding methods can be
    set with options below.
    1. Nearest neighbour.
    2. Nearest land point neighbour.
    3. Nearest neighbour with minimum height difference.
    4. Nearest land point neighbour with minimum height difference.

    Args:
        orography (iris.cube.Cube):
            Cube of model orography for the model grid on which neighbours are
            being found.
        landmask (iris.cube.Cube):
            Cube of model land mask for the model grid on which neighbours are
            being found.
        site_list (dict):
            Dictionary that contains the spot sites for which neighbouring grid
            points are to be found.
        all_methods (bool):
            If True, this will return a cube containing the nearest grid point
            neighbours to spot sites as defined by each possible combination
            of constraints.
            Default is False.
        land_constraint (bool):
            If True, this will return a cube containing the nearest grid point
            neighbours to spot sites that are also land points. May be used
            with the minimum_dz option.
            Default is None.
        minimum_dz (bool):
            If True, this will return a cube containing the nearest grid point
            neighbour to each spot site that is found, within a given search
            radius, to minimise the height difference between the two. May be
            used with the land_constraint option.
            Default is None.
        search_radius (float):
            The radius in metres about a spot site within which to search for
            a grid point neighbour that is land or which has a smaller height
            difference than the nearest.
            Default is None.
        node_limit (int):
            When searching within the defined search_radius for suitable
            neighbours, a KDTree is constructed. This node_limit prevents the
            tree from becoming too large for large search radii. A default of
            36 will be set, which is to say the nearest 36 grid points will be
            considered. If the search radius is likely to contain more than
            36 points, this value should be increased to ensure all point
            are considered.
            Default is None.
        site_coordinate_system (cartopy coordinate system):
            The coordinate system in which the site coordinates are provided
            within the site list. This must be provided as the name of a
            cartopy coordinate system. The Default will become PlateCarree.
            Default is None.
        site_coordinate_options (str):
            JSON formatted string of options passed to the cartopy coordinate
            system given in site_coordinate_system. "globe" is handled as a
            special case to construct a cartopy Globe object.
            Default is None.
        site_x_coordinate (str):
            The key that identifies site x coordinates in the provided site
            dictionary. Defaults to longitude.
            Default is None.
        site_y_coordinate (str):
            The key that identifies site y coordinates in the provided site
            dictionary. Defaults to latitude.
            Default is None.

    Returns:
        iris.cube.Cube:
            The processed Cube.

    Raises:
        ValueError:
            If all_methods is used with land_constraint or minimum_dz.

    """
    # Check valid options have been selected.
    if all_methods is True and (land_constraint or minimum_dz):
        raise ValueError(
            'Cannot use all_methods option with other constraints.')

    # Filter kwargs for those expected by plugin and which are set.
    # This preserves the plugin defaults for unset options.
    args = {
        'land_constraint': land_constraint,
        'minimum_dz': minimum_dz,
        'search_radius': search_radius,
        'site_coordinate_system': site_coordinate_system,
        'site_coordinate_options': site_coordinate_options,
        'site_x_coordinate': site_x_coordinate,
        'node_limit': node_limit,
        'site_y_coordinate': site_y_coordinate
    }
    fargs = (site_list, orography, landmask)
    kwargs = {k: v for (k, v) in args.items() if v is not None}

    # Deal with coordinate systems for sites other than PlateCarree.
    if 'site_coordinate_system' in kwargs.keys():
        scrs = kwargs['site_coordinate_system']
        if scrs not in PROJECTION_LIST:
            raise ValueError('invalid projection {}'.format(scrs))
        site_crs = getattr(ccrs, scrs)
        scrs_opts = json.loads(kwargs.pop('site_coordinate_options', '{}'))
        if 'globe' in scrs_opts:
            crs_globe = ccrs.Globe(**scrs_opts['globe'])
            del scrs_opts['globe']
        else:
            crs_globe = ccrs.Globe()
        kwargs['site_coordinate_system'] = site_crs(
            globe=crs_globe, **scrs_opts)
    # Call plugin to generate neighbour cubes
    if all_methods:
        methods = [
            {**kwargs, 'land_constraint': False, 'minimum_dz': False},
            {**kwargs, 'land_constraint': True, 'minimum_dz': False},
            {**kwargs, 'land_constraint': False, 'minimum_dz': True},
            {**kwargs, 'land_constraint': True, 'minimum_dz': True}
        ]

        all_methods = iris.cube.CubeList([])
        for method in methods:
            all_methods.append(NeighbourSelection(**method).process(*fargs))

        squeezed_cubes = iris.cube.CubeList([])
        for index, cube in enumerate(all_methods):
            cube.coord('neighbour_selection_method').points = np.int32(index)
            squeezed_cubes.append(iris.util.squeeze(cube))

        result = merge_cubes(squeezed_cubes)
    else:
        result = NeighbourSelection(**kwargs).process(*fargs)

    result = enforce_coordinate_ordering(
        result,
        ['spot_index', 'neighbour_selection_method', 'grid_attributes'])

    return result
예제 #22
0
 def test_no_model_id_attr_multi_model(self):
     """Test multiple model blending fails and results in a merge error if
     no model_id_attr is specified."""
     cubes = iris.cube.CubeList([self.cube, self.cube_ukv])
     with self.assertRaises(MergeError):
         merge_cubes(cubes)
예제 #23
0
 def test_identical_cubes(self):
     """Test that merging identical cubes fails."""
     cubes = iris.cube.CubeList([self.cube, self.cube])
     msg = "failed to merge into a single cube"
     with self.assertRaisesRegex(DuplicateDataError, msg):
         merge_cubes(cubes)
예제 #24
0
 def test_threshold_data(self):
     """Test threshold data merges OK"""
     cubes = iris.cube.CubeList([self.prob_ukv, self.prob_enuk])
     result = merge_cubes(cubes, model_id_attr="mosg__model_configuration")
     self.assertArrayAlmostEqual(
         result.coord("model_id").points, [0., 1000.])
예제 #25
0
    def process(self, cube_t0, cube_t1):
        """
        Interpolate data to intermediate times between validity times of
        cube_t0 and cube_t1.

        Args:
            cube_t0 (iris.cube.Cube):
                A diagnostic cube valid at the beginning of the period within
                which interpolation is to be permitted.
            cube_t1 (iris.cube.Cube):
                A diagnostic cube valid at the end of the period within which
                interpolation is to be permitted.

        Returns:
            interpolated_cubes (iris.cube.CubeList):
                A list of cubes interpolated to the desired times.

        Raises:
            TypeError: If cube_t0 and cube_t1 are not of type iris.cube.Cube.
            CoordinateNotFoundError: The input cubes contain no time
                                     coordinate.
            ValueError: Cubes contain multiple validity times.
            ValueError: The input cubes are ordered such that the initial time
                        cube has a later validity time than the final cube.
        """
        if (not isinstance(cube_t0, iris.cube.Cube)
                or not isinstance(cube_t1, iris.cube.Cube)):
            msg = ('Inputs to TemporalInterpolation are not of type '
                   'iris.cube.Cube, first input is type '
                   '{}, second input is type {}'.format(
                       type(cube_t0), type(cube_t1)))
            raise TypeError(msg)

        try:
            initial_time, = iris_time_to_datetime(cube_t0.coord('time'))
            final_time, = iris_time_to_datetime(cube_t1.coord('time'))
        except CoordinateNotFoundError:
            msg = ('Cube provided to TemporalInterpolation contains no time '
                   'coordinate.')
            raise CoordinateNotFoundError(msg)
        except ValueError:
            msg = ('Cube provided to TemporalInterpolation contains multiple '
                   'validity times, only one expected.')
            raise ValueError(msg)

        if initial_time > final_time:
            raise ValueError('TemporalInterpolation input cubes '
                             'ordered incorrectly'
                             ', with the final time being before the initial '
                             'time.')

        time_list = self.construct_time_list(initial_time, final_time)
        cubes = iris.cube.CubeList([cube_t0, cube_t1])
        cube = merge_cubes(cubes)

        interpolated_cube = cube.interpolate(time_list, iris.analysis.Linear())
        self.enforce_time_coords_dtype(interpolated_cube)
        interpolated_cubes = iris.cube.CubeList()
        if self.interpolation_method == 'solar':
            interpolated_cubes = self.solar_interpolate(
                cube, interpolated_cube)
        elif self.interpolation_method == 'daynight':
            interpolated_cubes = (self.daynight_interpolate(interpolated_cube))
        else:
            for single_time in interpolated_cube.slices_over('time'):
                interpolated_cubes.append(single_time)

        return interpolated_cubes
예제 #26
0
    def process(self, cube_t0, cube_t1):
        """
        Interpolate data to intermediate times between validity times of
        cube_t0 and cube_t1.

        Args:
            cube_t0 (iris.cube.Cube):
                A diagnostic cube valid at the beginning of the period within
                which interpolation is to be permitted.
            cube_t1 (iris.cube.Cube):
                A diagnostic cube valid at the end of the period within which
                interpolation is to be permitted.

        Returns:
            interpolated_cubes (iris.cube.CubeList):
                A list of cubes interpolated to the desired times.

        Raises:
            TypeError: If cube_t0 and cube_t1 are not of type iris.cube.Cube.
            CoordinateNotFoundError: The input cubes contain no time
                                     coordinate.
            ValueError: Cubes contain multiple validity times.
            ValueError: The input cubes are ordered such that the initial time
                        cube has a later validity time than the final cube.
        """
        if (not isinstance(cube_t0, iris.cube.Cube)
                or not isinstance(cube_t1, iris.cube.Cube)):
            raise TypeError('Inputs to TemporalInterpolation are not of type '
                            'iris.cube.Cube')

        try:
            initial_time, = iris_time_to_datetime(cube_t0.coord('time'))
            final_time, = iris_time_to_datetime(cube_t1.coord('time'))
        except CoordinateNotFoundError:
            msg = ('Cube provided to time_interpolate contains no time '
                   'coordinate.')
            raise CoordinateNotFoundError(msg)
        except ValueError:
            msg = ('Cube provided to time_interpolate contains multiple '
                   'validity times, only one expected.')
            raise ValueError(msg)

        if initial_time > final_time:
            raise ValueError('time_interpolate input cubes ordered incorrectly'
                             ', with the final time being before the initial '
                             'time.')

        time_list = self.construct_time_list(initial_time, final_time)
        cubes = iris.cube.CubeList([cube_t0, cube_t1])
        cube = merge_cubes(cubes)

        interpolated_cube = cube.interpolate(time_list, iris.analysis.Linear())

        # iris.analysis.Linear() modifies the dtype of time and forecast_period
        # coords so need to revert back
        dtype_time = cube_t0.coord('time').points.dtype
        dtype_fp = cube_t0.coord('forecast_period').points.dtype

        interpolated_cubes = iris.cube.CubeList()
        for single_time in interpolated_cube.slices_over('time'):
            coord_time = single_time.coord('time')
            coord_time.points = np.around(coord_time.points).astype(dtype_time)
            coord_fp = single_time.coord('forecast_period')
            coord_fp.points = np.around(coord_fp.points).astype(dtype_fp)
            interpolated_cubes.append(single_time)

        return interpolated_cubes
예제 #27
0
 def test_threshold_data(self):
     """Test threshold data merges OK"""
     cubes = iris.cube.CubeList([self.prob_ukv, self.prob_enuk])
     result = merge_cubes(cubes)
     self.assertArrayAlmostEqual(
         result.coord("model_id").points, [0, 1000])
예제 #28
0
def process(cube: cli.inputcube,
            advection_velocity: inputadvection,
            orographic_enhancement: cli.inputcube,
            *,
            attributes_config: cli.inputjson = None,
            max_lead_time=360,
            lead_time_interval=15,
            accumulation_period=15,
            accumulation_units='m'):
    """Module to extrapolate and accumulate the weather with 1 min fidelity.

    Args:
        cube (iris.cube.Cube):
            The input Cube to be processed.
        advection_velocity (iris.cube.CubeList):
            Advection cubes of U and V.
        orographic_enhancement (iris.cube.Cube):
            Cube containing the orographic enhancement fields. May have data
            for multiple times in the cube.
        attributes_config (dict):
            Dictionary containing the required changes to the attributes.
        max_lead_time (int):
            Maximum lead time required (mins).
        lead_time_interval (int):
            Interval between required lead times (mins).
        accumulation_period (int):
            The period over which the accumulation is calculated (mins).
            Only full accumulation periods will be computed. At lead times
            that are shorter than the accumulation period, no accumulation
            output will be produced.
        accumulation_units (str):
            Desired units in which the accumulations should be expressed.
            e.g. 'mm'

    Returns:
        iris.cube.CubeList:
            New cubes with accumulated data.

    Raises:
        ValueError:
            If advection_velocity doesn't contain x and y velocity.
    """
    from iris import Constraint

    import numpy as np

    from improver.nowcasting.accumulation import Accumulation
    from improver.nowcasting.forecasting import CreateExtrapolationForecast
    from improver.utilities.cube_manipulation import merge_cubes

    u_cube, v_cube = advection_velocity

    if not (u_cube and v_cube):
        raise ValueError("Neither u_cube or v_cube can be None")

    # extrapolate input data to the maximum required lead time
    forecast_cubes = CreateExtrapolationForecast(
        cube,
        u_cube,
        v_cube,
        orographic_enhancement,
        attributes_dict=attributes_config).process(ACCUMULATION_FIDELITY,
                                                   max_lead_time)

    lead_times = (np.arange(lead_time_interval, max_lead_time + 1,
                            lead_time_interval))

    # Accumulate high frequency rate into desired accumulation intervals.
    result = Accumulation(accumulation_units=accumulation_units,
                          accumulation_period=accumulation_period * 60,
                          forecast_periods=lead_times *
                          60).process(forecast_cubes)

    return merge_cubes(result)
예제 #29
0
def process(start_cube: cli.inputcube,
            end_cube: cli.inputcube,
            *,
            interval_in_mins: int = None,
            times: cli.comma_separated_list = None,
            interpolation_method='linear'):
    """Interpolate data between validity times.

    Interpolate data to intermediate times between the validity times of two
    cubes. This can be used to fill in missing data (e.g. for radar fields)
    or to ensure data is available at the required intervals when model data
    is not available at these times.

    Args:
        start_cube (iris.cube.Cube):
            Cube containing the data at the beginning.
        end_cube (iris.cube.Cube):
            Cube containing the data at the end.
        interval_in_mins (int):
            Specifies the interval in minutes at which to interpolate between
            the two input cubes.
            A number of minutes which does not divide up the interval equally
            will raise an exception.
            If intervals_in_mins is set then times can not be used.
        times (str):
            Specifies the times in the format {YYYYMMDD}T{HHMM}Z
            at which to interpolate between the two input cubes.
            Where {YYYYMMDD} is year, month, day and {HHMM} is hour and minutes
            e.g 20180116T0100Z. More than one time can be provided separated
            by a comma.
            If times are set, interval_in_mins can not be used.
        interpolation_method (str):
            ["linear", "solar", "daynight"]
            Specifies the interpolation method;
            solar interpolates using the solar elevation,
            daynight uses linear interpolation but sets night time points to
            0.0 linear is linear interpolation.

    Returns:
        iris.cube.CubeList:
            A list of cubes interpolated to the desired times. The
            interpolated cubes will always be in chronological order of
            earliest to latest regardless of the order of the input.
    """
    from improver.utilities.cube_manipulation import merge_cubes
    from improver.utilities.temporal import (
        cycletime_to_datetime, iris_time_to_datetime)
    from improver.utilities.temporal_interpolation import TemporalInterpolation

    time_start, = iris_time_to_datetime(start_cube.coord('time'))
    time_end, = iris_time_to_datetime(end_cube.coord('time'))
    if time_end < time_start:
        # swap cubes
        start_cube, end_cube = end_cube, start_cube

    if times is not None:
        times = [cycletime_to_datetime(timestr) for timestr in times]

    result = TemporalInterpolation(
        interval_in_minutes=interval_in_mins, times=times,
        interpolation_method=interpolation_method
    ).process(start_cube, end_cube)
    return merge_cubes(result)
예제 #30
0
def main(argv=None):
    """Load in arguments and get going."""
    description = (
        "Determine grid point coordinates within the provided cubes that "
        "neighbour spot data sites defined within the provided JSON "
        "file. If no options are set the returned netCDF file will contain the"
        " nearest neighbour found for each site. Other constrained neighbour "
        "finding methods can be set with options below.")
    options = ("\n\nThese methods are:\n\n 1. nearest neighbour\n"
               " 2. nearest land point neighbour\n"
               " 3. nearest neighbour with minimum height difference\n"
               " 4. nearest land point neighbour with minimum height "
               "difference")

    parser = ArgParser(description=('\n'.join(wrap(description, width=79)) +
                                    options),
                       formatter_class=RawDescriptionHelpFormatter)
    parser.add_argument("site_list_filepath",
                        metavar="SITE_LIST_FILEPATH",
                        help="Path to a JSON file that contains the spot sites"
                        " for which neighbouring grid points are to be found.")
    parser.add_argument("orography_filepath",
                        metavar="OROGRAPHY_FILEPATH",
                        help="Path to a NetCDF file of model orography for the"
                        " model grid on which neighbours are being found.")
    parser.add_argument("landmask_filepath",
                        metavar="LANDMASK_FILEPATH",
                        help="Path to a NetCDF file of model land mask for the"
                        " model grid on which neighbours are being found.")
    parser.add_argument("output_filepath",
                        metavar="OUTPUT_FILEPATH",
                        help="The output path for the resulting NetCDF")

    parser.add_argument(
        "--all_methods",
        default=False,
        action='store_true',
        help="If set this will return a cube containing the nearest grid point"
        " neighbours to spot sites as defined by each possible combination of"
        " constraints.")

    group = parser.add_argument_group('Apply constraints to neighbour choice')
    group.add_argument(
        "--land_constraint",
        default=False,
        action='store_true',
        help="If set this will return a cube containing the nearest grid point"
        " neighbours to spot sites that are also land points. May be used with"
        " the minimum_dz option.")
    group.add_argument(
        "--minimum_dz",
        default=False,
        action='store_true',
        help="If set this will return a cube containing the nearest grid point"
        " neighbour to each spot site that is found, within a given search"
        " radius, to minimise the height difference between the two. May be"
        " used with the land_constraint option.")
    group.add_argument(
        "--search_radius",
        metavar="SEARCH_RADIUS",
        type=float,
        help="The radius in metres about a spot site within which to search"
        " for a grid point neighbour that is land or which has a smaller "
        " height difference than the nearest. The default value is 10000m "
        "(10km).")
    group.add_argument(
        "--node_limit",
        metavar="NODE_LIMIT",
        type=int,
        help="When searching within the defined search_radius for suitable "
        "neighbours, a KDTree is constructed. This node_limit prevents the "
        "tree from becoming too large for large search radii. A default of 36"
        " is set, which is to say the nearest 36 grid points will be "
        "considered. If the search_radius is likely to contain more than 36 "
        "points, this value should be increased to ensure all points are "
        "considered.")

    s_group = parser.add_argument_group('Site list options')
    s_group.add_argument(
        "--site_coordinate_system",
        metavar="SITE_COORDINATE_SYSTEM",
        help="The coordinate system in which the site coordinates are provided"
        " within the site list. This must be provided as the name of a cartopy"
        " coordinate system. The default is a PlateCarree system, with site"
        " coordinates given by latitude/longitude pairs. This can be a"
        " complete definition, including parameters required to modify a"
        " default system, e.g. Miller(central_longitude=90). If a globe is"
        " required this can be specified as e.g."
        " Globe(semimajor_axis=100, semiminor_axis=100).")
    s_group.add_argument(
        "--site_x_coordinate",
        metavar="SITE_X_COORDINATE",
        help="The x coordinate key within the JSON file. The plugin default is"
        " 'longitude', but can be changed using this option if required.")
    s_group.add_argument(
        "--site_y_coordinate",
        metavar="SITE_Y_COORDINATE",
        help="The y coordinate key within the JSON file. The plugin default is"
        " 'latitude', but can be changed using this option if required.")

    meta_group = parser.add_argument_group("Metadata")
    meta_group.add_argument(
        "--metadata_json",
        metavar="METADATA_JSON",
        default=None,
        help="If provided, this JSON file can be used to modify the metadata "
        "of the returned netCDF file. Defaults to None.")

    args = parser.parse_args(args=argv)

    # Open input files
    with open(args.site_list_filepath, 'r') as site_file:
        sitelist = json.load(site_file)
    orography = load_cube(args.orography_filepath)
    landmask = load_cube(args.landmask_filepath)
    fargs = (sitelist, orography, landmask)

    # Filter kwargs for those expected by plugin and which are set.
    # This preserves the plugin defaults for unset options.
    kwarg_list = [
        'land_constraint', 'minimum_dz', 'search_radius',
        'site_coordinate_system', 'site_x_coordinate', 'node_limit',
        'site_y_coordinate'
    ]
    kwargs = {
        k: v
        for (k, v) in vars(args).items() if k in kwarg_list and v is not None
    }

    # Deal with coordinate systems for sites other than PlateCarree.
    if 'site_coordinate_system' in kwargs.keys():
        scrs = kwargs['site_coordinate_system']
        kwargs['site_coordinate_system'] = safe_eval(scrs, ccrs,
                                                     PROJECTION_LIST)

    # Check valid options have been selected.
    if args.all_methods is True and (kwargs['land_constraint'] is True
                                     or kwargs['minimum_dz'] is True):
        raise ValueError(
            'Cannot use all_methods option with other constraints.')

    # Call plugin to generate neighbour cubes
    if args.all_methods:
        methods = []
        methods.append({
            **kwargs, 'land_constraint': False,
            'minimum_dz': False
        })
        methods.append({
            **kwargs, 'land_constraint': True,
            'minimum_dz': False
        })
        methods.append({
            **kwargs, 'land_constraint': False,
            'minimum_dz': True
        })
        methods.append({**kwargs, 'land_constraint': True, 'minimum_dz': True})

        all_methods = iris.cube.CubeList([])
        for method in methods:
            all_methods.append(NeighbourSelection(**method).process(*fargs))

        squeezed_cubes = iris.cube.CubeList([])
        for index, cube in enumerate(all_methods):
            cube.coord('neighbour_selection_method').points = index
            squeezed_cubes.append(iris.util.squeeze(cube))
        result = merge_cubes(squeezed_cubes)
    else:
        result = NeighbourSelection(**kwargs).process(*fargs)

    result = enforce_coordinate_ordering(
        result,
        ['spot_index', 'neighbour_selection_method', 'grid_attributes'])

    # Modify final metadata as described by provided JSON file.
    if args.metadata_json:
        with open(args.metadata_json, 'r') as input_file:
            metadata_dict = json.load(input_file)
        result = amend_metadata(result, **metadata_dict)

    # Save the neighbour cube
    save_netcdf(result, args.output_filepath)