コード例 #1
0
    def process(self, cubes):
        """
        Call the calculate_wet_bulb_temperature function to calculate wet bulb
        temperatures. This process function splits input cubes over vertical
        levels to mitigate memory issues when trying to operate on multi-level
        data.

        Args:
            cubes (iris.cube.CubeList or list or iris.cube.Cube):
                containing:
                    temperature (iris.cube.Cube):
                        Cube of air temperatures.
                    relative_humidity (iris.cube.Cube):
                        Cube of relative humidities.
                    pressure (iris.cube.Cube):
                        Cube of air pressures.

        Returns:
            iris.cube.Cube:
                Cube of wet bulb temperature (K).
        """
        names_to_extract = [
            "air_temperature", "relative_humidity", "air_pressure"
        ]
        if len(cubes) != len(names_to_extract):
            raise ValueError(
                f"Expected {len(names_to_extract)} cubes, found {len(cubes)}")

        temperature, relative_humidity, pressure = tuple(
            CubeList(cubes).extract_strict(n) for n in names_to_extract)

        slices = self._slice_inputs(temperature, relative_humidity, pressure)

        cubelist = iris.cube.CubeList([])
        for t_slice, rh_slice, p_slice in slices:
            cubelist.append(
                self.create_wet_bulb_temperature_cube(t_slice.copy(),
                                                      rh_slice.copy(),
                                                      p_slice.copy()))
        wet_bulb_temperature = cubelist.merge_cube()

        # re-promote any scalar coordinates lost in slice / merge
        wet_bulb_temperature = check_cube_coordinates(temperature,
                                                      wet_bulb_temperature)

        return wet_bulb_temperature
コード例 #2
0
    def process(self, threshold_cube):
        """
        Slice the percentiles cube over any non-spatial coordinates
        (realization, time, etc) if present, and call the percentile
        interpolation method for each resulting cube.

        Args:
            threshold_cube (iris.cube.Cube):
                A cube of values, that effectively behave as thresholds, for
                which it is desired to obtain probability values from a
                percentiled reference cube.
        Returns:
            iris.cube.Cube:
                A cube of probabilities obtained by interpolating between
                percentile values at the "threshold" level.
        """
        cube_slices = self.percentiles_cube.slices(
            [self.percentile_coordinate, self.percentiles_cube.coord(axis='y'),
             self.percentiles_cube.coord(axis='x')])

        if threshold_cube.ndim != 2:
            msg = ('threshold cube has too many ({} > 2) dimensions - slicing '
                   'to x-y grid'.format(threshold_cube.ndim))
            warnings.warn(msg)
            threshold_cube = next(threshold_cube.slices([
                threshold_cube.coord(axis='y'),
                threshold_cube.coord(axis='x')]))

        if threshold_cube.units != self.percentiles_cube.units:
            threshold_cube.convert_units(self.percentiles_cube.units)

        output_cubes = iris.cube.CubeList()
        for cube_slice in cube_slices:
            output_cube = self.percentile_interpolation(threshold_cube,
                                                        cube_slice)
            output_cubes.append(output_cube)

        probability_cube = output_cubes.merge_cube()

        reference_cube = next(self.percentiles_cube.slices_over(
            self.percentile_coordinate))

        probability_cube = check_cube_coordinates(reference_cube,
                                                  probability_cube)
        return probability_cube
コード例 #3
0
    def _apply_orographic_enhancement(self, precip_cube, oe_cube):
        """Combine the precipitation rate cube and the orographic enhancement
        cube.

        Args:
            precip_cube (iris.cube.Cube):
                Cube containing the input precipitation field.
            oe_cube (iris.cube.Cube):
                Cube containing the orographic enhancement field matching
                the validity time of the precipitation cube.

        Returns:
            cube (iris.cube.Cube):
                Cube containing the precipitation rate field modified by the
                orographic enhancement cube.

        """
        # Ensure the orographic enhancement cube matches the
        # dimensions of the precip_cube.
        oe_cube = check_cube_coordinates(precip_cube, oe_cube.copy())

        # Ensure that orographic enhancement is in the units of the
        # precipitation rate cube.
        oe_cube.convert_units(precip_cube.units)

        # Set orographic enhancement to be zero for points with a
        # precipitation rate of < 1/32 mm/hr.
        original_units = Unit("mm/hr")
        threshold_in_cube_units = (original_units.convert(
            self.min_precip_rate_mmh, precip_cube.units))

        # Ignore invalid warnings generated if e.g. a NaN is encountered
        # within the less than (<) comparison.
        with np.errstate(invalid='ignore'):
            oe_cube.data[precip_cube.data < threshold_in_cube_units] = 0.

        # Use CubeCombiner to combine the cubes.
        temp_cubelist = iris.cube.CubeList([precip_cube, oe_cube])
        cube = CubeCombiner(self.operation).process(temp_cubelist,
                                                    precip_cube.name())
        return cube
コード例 #4
0
    def process(self, cube: Cube) -> Cube:
        """
        Ensure that the cube passed to the maximum_within_vicinity method is
        2d and subsequently merged back together.

        Args:
            cube:
                Thresholded cube.

        Returns:
            Cube containing the occurrences within a vicinity for each
            xy 2d slice, which have been merged back together.
        """

        max_cubes = CubeList([])
        for cube_slice in cube.slices(
            [cube.coord(axis="y"), cube.coord(axis="x")]):
            max_cubes.append(self.maximum_within_vicinity(cube_slice))
        result_cube = max_cubes.merge_cube()

        # Put dimensions back if they were there before.
        result_cube = check_cube_coordinates(cube, result_cube)
        return result_cube
コード例 #5
0
    def process(self, cube, alphas_x=None, alphas_y=None, mask_cube=None):
        """
        Set up the alpha parameters and run the recursive filter.

        The steps undertaken are:

        1. Split the input cube into slices determined by the co-ordinates in
           the x and y directions.
        2. Construct an array of filter parameters (alphas_x and alphas_y) for
           each cube slice that are used to weight the recursive filter in
           the x- and y-directions.
        3. Pad each cube slice with a square-neighbourhood halo and apply
           the recursive filter for the required number of iterations.
        4. Remove the halo from the cube slice and append the recursed cube
           slice to a 'recursed cube'.
        5. Merge all the cube slices in the 'recursed cube' into a 'new cube'.
        6. Modify the 'new cube' so that its scalar dimension co-ordinates are
           consistent with those in the original input cube.
        7. Return the 'new cube' which now contains the recursively filtered
           values for the original input cube.

        Args:
            cube (Iris.cube.Cube):
                Cube containing the input data to which the recursive filter
                will be applied.

        Keyword Args:
            alphas_x (Iris.cube.Cube or None):
                Cube containing array of alpha values that will be used when
                applying the recursive filter along the x-axis.
            alphas_y (Iris.cube.Cube or None):
                Cube containing array of alpha values that will be used when
                applying the recursive filter along the y-axis.
            mask_cube (Iris.cube.Cube or None):
                Cube containing an external mask to apply to the cube before
                applying the recursive filter.

        Returns:
            new_cube (Iris.cube.Cube):
                Cube containing the smoothed field after the recursive filter
                method has been applied.
        """
        cube_format = next(cube.slices([cube.coord(axis='y'),
                                        cube.coord(axis='x')]))
        alphas_x = self.set_alphas(cube_format, self.alpha_x, alphas_x)
        alphas_y = self.set_alphas(cube_format, self.alpha_y, alphas_y)

        recursed_cube = iris.cube.CubeList()
        for output in cube.slices([cube.coord(axis='y'),
                                   cube.coord(axis='x')]):

            # Setup cube and mask for processing.
            # This should set up a mask full of 1.0 if None is provided
            # and set the data 0.0 where mask is 0.0 or the data is NaN
            output, mask, nan_array = (
                SquareNeighbourhood().set_up_cubes_to_be_neighbourhooded(
                    output, mask_cube))
            mask = mask.data.squeeze()

            padded_cube = SquareNeighbourhood().pad_cube_with_halo(
                output, self.edge_width, self.edge_width)

            new_cube = self.run_recursion(padded_cube, alphas_x, alphas_y,
                                          self.iterations)
            new_cube = SquareNeighbourhood().remove_halo_from_cube(
                new_cube, self.edge_width, self.edge_width)
            if self.re_mask:
                new_cube.data[nan_array.astype(bool)] = np.nan
                new_cube.data = np.ma.masked_array(new_cube.data,
                                                   mask=np.logical_not(mask))

            recursed_cube.append(new_cube)

        new_cube = recursed_cube.merge_cube()
        new_cube = check_cube_coordinates(cube, new_cube)

        return new_cube
コード例 #6
0
ファイル: use_nbhood.py プロジェクト: tjtg/improver
    def process(self, cube: Cube, mask_cube: Cube) -> Cube:
        """
        Apply neighbourhood processing with a mask to the input cube,
        collapsing the coord_for_masking if collapse_weights have been provided.

        Args:
            cube:
                Cube containing the array to which the square neighbourhood
                will be applied.
            mask_cube:
                Cube containing the array to be used as a mask. The data in
                this array is not an instance of numpy.ma.MaskedArray. Any sea
                points that should be ignored are set to zeros in every layer
                of the mask_cube.

        Returns:
            Cube containing the smoothed field after the square
            neighbourhood method has been applied when applying masking
            for each point along the coord_for_masking coordinate.
            The resulting cube is concatenated so that the dimension
            coordinates match the input cube.
        """
        plugin = NeighbourhoodProcessing(
            self.neighbourhood_method,
            self.radii,
            lead_times=self.lead_times,
            weighted_mode=self.weighted_mode,
            sum_only=self.sum_only,
            re_mask=self.re_mask,
        )
        yname = cube.coord(axis="y").name()
        xname = cube.coord(axis="x").name()
        result_slices = iris.cube.CubeList([])
        # Take 2D slices of the input cube for memory issues.
        prev_x_y_slice = None
        for x_y_slice in cube.slices([yname, xname]):
            if prev_x_y_slice is not None and np.array_equal(
                    prev_x_y_slice.data, x_y_slice.data):
                # Use same result as last time!
                prev_result = result_slices[-1].copy()
                for coord in x_y_slice.coords(dim_coords=False):
                    prev_result.coord(coord).points = coord.points.copy()
                result_slices.append(prev_result)
                continue
            prev_x_y_slice = x_y_slice

            cube_slices = iris.cube.CubeList([])
            # Apply each mask in in mask_cube to the 2D input slice.
            for mask_slice in mask_cube.slices_over(self.coord_for_masking):
                output_cube = plugin(x_y_slice, mask_cube=mask_slice)
                coord_object = mask_slice.coord(self.coord_for_masking).copy()
                output_cube.add_aux_coord(coord_object)
                output_cube = iris.util.new_axis(output_cube,
                                                 self.coord_for_masking)
                cube_slices.append(output_cube)
            concatenated_cube = cube_slices.concatenate_cube()
            if self.collapse_weights is not None:
                concatenated_cube = self.collapse_mask_coord(concatenated_cube)
            result_slices.append(concatenated_cube)
        result = result_slices.merge_cube()
        # Promote any single value dimension coordinates if they were
        # dimension on the input cube.
        exception_coordinates = find_dimension_coordinate_mismatch(
            cube, result, two_way_mismatch=False)
        result = check_cube_coordinates(
            cube, result, exception_coordinates=exception_coordinates)
        return result
コード例 #7
0
    def process(self, cube, mask_cube):
        """
        1. Iterate over the chosen coordinate within the mask_cube and apply
           the mask at each iteration to the cube that is to be neighbourhood
           processed.
        2. Concatenate the cubes from each iteration together to create a
           single cube.

        Args:
            cube (Iris.cube.Cube):
                Cube containing the array to which the square neighbourhood
                will be applied.
            mask_cube (Iris.cube.Cube):
                Cube containing the array to be used as a mask.

        Returns:
            concatenated_cube (Iris.cube.Cube):
                Cube containing the smoothed field after the square
                neighbourhood method has been applied when applying masking
                for each point along the coord_for_masking coordinate.
                The resulting cube is concatenated so that the dimension
                coordinates match the input cube.

        """
        yname = cube.coord(axis='y').name()
        xname = cube.coord(axis='x').name()
        result_slices = iris.cube.CubeList([])
        # Take 2D slices of the input cube for memory issues.
        prev_x_y_slice = None
        for x_y_slice in cube.slices([yname, xname]):
            if (prev_x_y_slice is not None
                    and np.array_equal(prev_x_y_slice.data, x_y_slice.data)):
                # Use same result as last time!
                prev_result = result_slices[-1].copy()
                for coord in x_y_slice.coords(dim_coords=False):
                    prev_result.coord(coord).points = coord.points.copy()
                result_slices.append(prev_result)
                continue
            prev_x_y_slice = x_y_slice

            cube_slices = iris.cube.CubeList([])
            # Apply each mask in in mask_cube to the 2D input slice.
            for cube_slice in mask_cube.slices_over(self.coord_for_masking):
                output_cube = NeighbourhoodProcessing(
                    self.neighbourhood_method,
                    self.radii,
                    lead_times=self.lead_times,
                    weighted_mode=self.weighted_mode,
                    sum_or_fraction=self.sum_or_fraction,
                    re_mask=self.re_mask).process(x_y_slice,
                                                  mask_cube=cube_slice)
                coord_object = cube_slice.coord(self.coord_for_masking).copy()
                output_cube.add_aux_coord(coord_object)
                output_cube = iris.util.new_axis(output_cube,
                                                 self.coord_for_masking)
                cube_slices.append(output_cube)
            concatenated_cube = cube_slices.concatenate_cube()
            exception_coordinates = (find_dimension_coordinate_mismatch(
                x_y_slice, concatenated_cube, two_way_mismatch=False))
            concatenated_cube = check_cube_coordinates(
                x_y_slice,
                concatenated_cube,
                exception_coordinates=exception_coordinates)
            result_slices.append(concatenated_cube)
        result = result_slices.merge_cube()
        exception_coordinates = (find_dimension_coordinate_mismatch(
            cube, result, two_way_mismatch=False))
        result = check_cube_coordinates(
            cube, result, exception_coordinates=exception_coordinates)

        return result
コード例 #8
0
    def process(self, cube, alphas_x=None, alphas_y=None):
        """
        Set up the alpha parameters and run the recursive filter.

        The steps undertaken are:

        1. Split the input cube into slices determined by the co-ordinates in
           the x and y directions.
        2. Construct an array of filter parameters (alphas_x and alphas_y) for
           each cube slice that are used to weight the recursive filter in
           the x- and y-directions.
        3. Pad each cube slice with a square-neighbourhood halo and apply
           the recursive filter for the required number of iterations.
        4. Remove the halo from the cube slice and append the recursed cube
           slice to a 'recursed cube'.
        5. Merge all the cube slices in the 'recursed cube' into a 'new cube'.
        6. Modify the 'new cube' so that its scalar dimension co-ordinates are
           consistent with those in the original input cube.
        7. Return the 'new cube' which now contains the recursively filtered
           values for the original input cube.

        Args:
            cube (Iris.cube.Cube):
                Cube containing the input data to which the recursive filter
                will be applied.

        Keyword Args:
            alphas_x (Iris.cube.Cube or None):
                Cube containing array of alpha values that will be used when
                applying the recursive filter along the x-axis.
            alphas_y (Iris.cube.Cube or None):
                Cube containing array of alpha values that will be used when
                applying the recursive filter along the y-axis.

        Returns:
            new_cube (Iris.cube.Cube):
                Cube containing the smoothed field after the recursive filter
                method has been applied.
        """

        cube_format = next(
            cube.slices([cube.coord(axis='y'),
                         cube.coord(axis='x')]))
        alphas_x = self.set_alphas(cube_format, self.alpha_x, alphas_x)
        alphas_y = self.set_alphas(cube_format, self.alpha_y, alphas_y)

        recursed_cube = iris.cube.CubeList()
        for output in cube.slices([cube.coord(axis='y'),
                                   cube.coord(axis='x')]):

            padded_cube = SquareNeighbourhood().pad_cube_with_halo(
                output, self.edge_width, self.edge_width)
            new_cube = self.run_recursion(padded_cube, alphas_x, alphas_y,
                                          self.iterations)
            new_cube = SquareNeighbourhood().remove_halo_from_cube(
                new_cube, self.edge_width, self.edge_width)
            recursed_cube.append(new_cube)

        new_cube = recursed_cube.merge_cube()
        new_cube = check_cube_coordinates(cube, new_cube)
        return new_cube
コード例 #9
0
ファイル: recursive_filter.py プロジェクト: nivnac/improver
    def process(self, cube: Cube, smoothing_coefficients: CubeList) -> Cube:
        """
        Set up the smoothing_coefficient parameters and run the recursive
        filter. Smoothing coefficients can be generated using
        :class:`~.OrographicSmoothingCoefficients`
        and :func:`~improver.cli.generate_orographic_smoothing_coefficients`.
        The steps undertaken are:

        1. Split the input cube into slices determined by the co-ordinates in
           the x and y directions.
        2. Construct an array of filter parameters (smoothing_coefficients_x
           and smoothing_coefficients_y) for each cube slice that are used to
           weight the recursive filter in the x- and y-directions.
        3. Pad each cube slice with a square-neighbourhood halo and apply
           the recursive filter for the required number of iterations.
        4. Remove the halo from the cube slice and append the recursed cube
           slice to a 'recursed cube'.
        5. Merge all the cube slices in the 'recursed cube' into a 'new cube'.
        6. Modify the 'new cube' so that its scalar dimension co-ordinates are
           consistent with those in the original input cube.
        7. Return the 'new cube' which now contains the recursively filtered
           values for the original input cube.

        The smoothing_coefficient determines how much "value" of a cell
        undergoing filtering is comprised of the current value at that cell and
        how much comes from the adjacent cell preceding it in the direction in
        which filtering is being applied. A larger smoothing_coefficient
        results in a more significant proportion of a cell's new value coming
        from its neighbouring cell.

        Args:
            cube:
                Cube containing the input data to which the recursive filter
                will be applied.
            smoothing_coefficients:
                A cubelist containing two cubes of smoothing_coefficient values,
                one corresponding to smoothing in the x-direction, and the other
                to smoothing in the y-direction.

        Returns:
            Cube containing the smoothed field after the recursive filter
            method has been applied.

        Raises:
            ValueError:
                If the cube contains masked data from multiple cycles or times
        """
        cube_format = next(
            cube.slices([cube.coord(axis="y"),
                         cube.coord(axis="x")]))
        coeffs_x, coeffs_y = self._validate_coefficients(
            cube_format, smoothing_coefficients)

        mask_cube = None
        if np.ma.is_masked(cube.data):
            # Assumes mask is the same for each x-y slice.  This may not be
            # true if there are several time slices in the cube - so throw
            # an error if this is so.
            for coord in TIME_COORDS:
                if cube.coords(coord) and len(cube.coord(coord).points) > 1:
                    raise ValueError(
                        "Dealing with masks from multiple time points is unsupported"
                    )

            mask_cube = cube_format.copy(data=cube_format.data.mask)
            coeffs_x, coeffs_y = self._update_coefficients_from_mask(
                coeffs_x,
                coeffs_y,
                mask_cube,
            )

        padded_coefficients_x, padded_coefficients_y = self._pad_coefficients(
            coeffs_x, coeffs_y)

        recursed_cube = iris.cube.CubeList()
        for output in cube.slices([cube.coord(axis="y"),
                                   cube.coord(axis="x")]):

            padded_cube = pad_cube_with_halo(output,
                                             2 * self.edge_width,
                                             2 * self.edge_width,
                                             pad_method="symmetric")

            new_cube = self._run_recursion(
                padded_cube,
                padded_coefficients_x,
                padded_coefficients_y,
                self.iterations,
            )
            new_cube = remove_halo_from_cube(new_cube, 2 * self.edge_width,
                                             2 * self.edge_width)

            if mask_cube is not None:
                new_cube.data = np.ma.MaskedArray(new_cube.data,
                                                  mask=mask_cube.data)

            recursed_cube.append(new_cube)

        new_cube = recursed_cube.merge_cube()
        new_cube = check_cube_coordinates(cube, new_cube)

        return new_cube
コード例 #10
0
ファイル: test_cube_checker.py プロジェクト: zfan001/improver
 def test_basic(self):
     """Test returns iris.cube.Cube."""
     result = check_cube_coordinates(self.cube, self.cube)
     self.assertIsInstance(result, Cube)
コード例 #11
0
ファイル: square_kernel.py プロジェクト: nivnac/improver
    def run(self,
            cube: Cube,
            radius: float,
            mask_cube: Optional[Cube] = None) -> Cube:
        """
        Call the methods required to apply a square neighbourhood
        method to a cube.

        The steps undertaken are:

        1. Set up cubes by determining, if the arrays are masked.
        2. Pad the input array with a halo and then calculate the neighbourhood
           of the haloed array.
        3. Remove the halo from the neighbourhooded array and deal with a mask,
           if required.

        Args:
            cube:
                Cube containing the array to which the square neighbourhood
                will be applied.
            radius:
                Radius in metres for use in specifying the number of
                grid cells used to create a square neighbourhood.
            mask_cube:
                Cube containing the array to be used as a mask.

        Returns:
            Cube containing the smoothed field after the square
            neighbourhood method has been applied.
        """
        # If the data is masked, the mask will be processed as well as the
        # original_data * mask array.
        check_radius_against_distance(cube, radius)
        original_attributes = cube.attributes
        original_methods = cube.cell_methods
        grid_cells = distance_to_number_of_grid_cells(cube, radius)
        nb_size = 2 * grid_cells + 1
        try:
            mask_cube_data = mask_cube.data
        except AttributeError:
            mask_cube_data = None

        result_slices = iris.cube.CubeList()
        for cube_slice in cube.slices(
            [cube.coord(axis="y"), cube.coord(axis="x")]):
            cube_slice.data = self._calculate_neighbourhood(
                cube_slice.data,
                mask_cube_data,
                nb_size,
                self.sum_or_fraction == "sum",
                self.re_mask,
            )
            result_slices.append(cube_slice)
        neighbourhood_averaged_cube = result_slices.merge_cube()

        neighbourhood_averaged_cube.cell_methods = original_methods
        neighbourhood_averaged_cube.attributes = original_attributes

        neighbourhood_averaged_cube = check_cube_coordinates(
            cube, neighbourhood_averaged_cube)
        return neighbourhood_averaged_cube
コード例 #12
0
    def process(self,
                cube,
                weights=None,
                cycletime=None,
                attributes_dict=None):
        """Calculate weighted blend across the chosen coord, for either
           probabilistic or percentile data. If there is a percentile
           coordinate on the cube, it will blend using the
           PercentileBlendingAggregator but the percentile coordinate must
           have at least two points.

        Args:
            cube (iris.cube.Cube):
                Cube to blend across the coord.
            weights (iris.cube.Cube):
                Cube of blending weights. If None, the diagnostic cube is
                blended with equal weights across the blending dimension.
            cycletime (str):
                The cycletime in a YYYYMMDDTHHMMZ format e.g. 20171122T0100Z.
                This can be used to manually set the forecast reference time
                on the output blended cube. If not set, the most recent
                forecast reference time from the contributing cubes is used.
            attributes_dict (dict or None):
                Changes to cube attributes to be applied after blending. See
                :func:`~improver.metadata.amend.amend_attributes` for required
                format. If mandatory attributes are not set here, default
                values are used.

        Returns:
            iris.cube.Cube:
                containing the weighted blend across the chosen coord.
        Raises:
            TypeError : If the first argument not a cube.
            CoordinateNotFoundError : If coordinate to be collapsed not found
                                      in cube.
            CoordinateNotFoundError : If coordinate to be collapsed not found
                                      in provided weights cube.
            ValueError : If coordinate to be collapsed is not a dimension.
        """
        if not isinstance(cube, iris.cube.Cube):
            msg = ("The first argument must be an instance of iris.cube.Cube "
                   "but is {}.".format(type(cube)))
            raise TypeError(msg)

        if not cube.coords(self.blend_coord):
            msg = "Coordinate to be collapsed not found in cube."
            raise CoordinateNotFoundError(msg)

        blend_coord_dims = cube.coord_dims(self.blend_coord)
        if not blend_coord_dims:
            raise ValueError("Blending coordinate {} has no associated "
                             "dimension".format(self.blend_coord))

        # Ensure input cube and weights cube are ordered equivalently along
        # blending coordinate.
        cube = sort_coord_in_cube(cube, self.blend_coord)
        if weights is not None:
            if not weights.coords(self.blend_coord):
                msg = "Coordinate to be collapsed not found in weights cube."
                raise CoordinateNotFoundError(msg)
            weights = sort_coord_in_cube(weights, self.blend_coord)

        # Check that the time coordinate is single valued if required.
        self.check_compatible_time_points(cube)

        # Check to see if the data is percentile data
        perc_coord = self.check_percentile_coord(cube)

        # Establish metadata changes to be made after blending
        self.cycletime_point = (
            self._get_cycletime_point(cube, cycletime) if self.blend_coord
            in ["forecast_reference_time", "model_id"] else None)
        self._set_coords_to_remove(cube)

        # Do blending and update metadata
        if perc_coord:
            result = self.percentile_weighted_mean(cube, weights, perc_coord)
        else:
            result = self.weighted_mean(cube, weights)
        self._update_blended_metadata(result, attributes_dict)

        # Checks the coordinate dimensions match the first relevant cube in the unblended cubeList.
        result = check_cube_coordinates(
            next(cube.slices_over(self.blend_coord)), result)

        return result
コード例 #13
0
    def run(self, cube, radius, mask_cube=None):
        """
        Call the methods required to apply a square neighbourhood
        method to a cube.

        The steps undertaken are:

        1. Set up cubes by determining, if the arrays are masked.
        2. Pad the input array with a halo and then calculate the neighbourhood
           of the haloed array.
        3. Remove the halo from the neighbourhooded array and deal with a mask,
           if required.

        Args:
            cube (Iris.cube.Cube):
                Cube containing the array to which the square neighbourhood
                will be applied.
            radius (Float):
                Radius in metres for use in specifying the number of
                grid cells used to create a square neighbourhood.

        Keyword Args:
            mask_cube (Iris.cube.Cube):
                Cube containing the array to be used as a mask.

        Returns:
            neighbourhood_averaged_cube (Iris.cube.Cube):
                Cube containing the smoothed field after the square
                neighbourhood method has been applied.
        """
        # If the data is masked, the mask will be processed as well as the
        # original_data * mask array.
        original_attributes = cube.attributes
        original_methods = cube.cell_methods
        grid_cells_x, grid_cells_y = (
            convert_distance_into_number_of_grid_cells(
                cube, radius, MAX_RADIUS_IN_GRID_CELLS))
        result_slices = iris.cube.CubeList()
        for cube_slice in cube.slices(
            [cube.coord(axis='y'), cube.coord(axis='x')]):
            (cube_slice, mask,
             nan_array) = (self.set_up_cubes_to_be_neighbourhooded(
                 cube_slice, mask_cube))
            neighbourhood_averaged_cube = (
                self._pad_and_calculate_neighbourhood(cube_slice, mask,
                                                      grid_cells_x,
                                                      grid_cells_y))
            neighbourhood_averaged_cube = (self._remove_padding_and_mask(
                neighbourhood_averaged_cube, cube_slice, mask, grid_cells_x,
                grid_cells_y))
            neighbourhood_averaged_cube.data[nan_array.astype(bool)] = np.nan
            result_slices.append(neighbourhood_averaged_cube)

        neighbourhood_averaged_cube = result_slices.merge_cube()

        neighbourhood_averaged_cube.cell_methods = original_methods
        neighbourhood_averaged_cube.attributes = original_attributes

        neighbourhood_averaged_cube = check_cube_coordinates(
            cube, neighbourhood_averaged_cube)
        return neighbourhood_averaged_cube
コード例 #14
0
    def _modify_first_guess(
        self,
        cube: Cube,
        first_guess_lightning_cube: Cube,
        lightning_rate_cube: Cube,
        prob_precip_cube: Cube,
        prob_vii_cube: Optional[Cube] = None,
    ) -> Cube:
        """
        Modify first-guess lightning probability with nowcast data.

        Args:
            cube:
                Provides the meta-data for the Nowcast lightning probability
                output cube.
            first_guess_lightning_cube:
                First-guess lightning probability.
                Must have same x & y dimensions as cube.
                Time dimension should overlap that of cube (closest slice in
                time is used with a maximum time mismatch of 2 hours).
                This is included to allow this cube to come from a different
                modelling system, such as the UM.
            lightning_rate_cube:
                Nowcast lightning rate.
                Must have same dimensions as cube.
            prob_precip_cube:
                Nowcast precipitation probability (threshold > 0.5, 7, 35).
                Must have same other dimensions as cube.
            prob_vii_cube:
                Radar-derived vertically integrated ice content (VII).
                Must have same x and y dimensions as cube.
                Time should be a scalar coordinate.
                Must have a threshold coordinate with points matching.
                self.vii_thresholds.
                Can be <No cube> or None or anything that evaluates to False.

        Returns:
            Output cube containing Nowcast lightning probability.

        Raises:
            iris.exceptions.ConstraintMismatchError:
                If lightning_rate_cube or first_guess_lightning_cube do not
                contain the expected times.
        """
        new_cube_list = iris.cube.CubeList([])
        # Loop over required forecast validity times
        for cube_slice in cube.slices_over("time"):
            this_time = iris_time_to_datetime(
                cube_slice.coord("time").copy())[0]
            lightning_rate_slice = lightning_rate_cube.extract(
                iris.Constraint(time=this_time))
            err_string = "No matching {} cube for {}"
            if not isinstance(lightning_rate_slice, iris.cube.Cube):
                raise ConstraintMismatchError(
                    err_string.format("lightning", this_time))
            first_guess_slice = extract_nearest_time_point(
                first_guess_lightning_cube,
                this_time,
                allowed_dt_difference=7201)
            first_guess_slice = cube_slice.copy(data=first_guess_slice.data)
            first_guess_slice.coord("forecast_period").convert_units("minutes")
            fcmins = first_guess_slice.coord("forecast_period").points[0]

            # Increase prob(lightning) to Risk 2 (pl_dict[2]) when
            #   lightning nearby (lrt_lev2)
            # (and leave unchanged when condition is not met):
            first_guess_slice.data = np.where(
                (lightning_rate_slice.data >= self.lrt_lev2)
                & (first_guess_slice.data < self.pl_dict[2]),
                self.pl_dict[2],
                first_guess_slice.data,
            )

            # Increase prob(lightning) to Risk 1 (pl_dict[1]) when within
            #   lightning storm (lrt_lev1):
            # (and leave unchanged when condition is not met):
            lratethresh = self.lrt_lev1(fcmins)
            first_guess_slice.data = np.where(
                (lightning_rate_slice.data >= lratethresh)
                & (first_guess_slice.data < self.pl_dict[1]),
                self.pl_dict[1],
                first_guess_slice.data,
            )

            new_cube_list.append(first_guess_slice)

        new_prob_lightning_cube = new_cube_list.merge_cube()
        new_prob_lightning_cube = check_cube_coordinates(
            cube, new_prob_lightning_cube)

        # Apply precipitation adjustments.
        new_prob_lightning_cube = self.apply_precip(new_prob_lightning_cube,
                                                    prob_precip_cube)

        # If we have VII data, increase prob(lightning) accordingly.
        if prob_vii_cube:
            new_prob_lightning_cube = self.apply_ice(new_prob_lightning_cube,
                                                     prob_vii_cube)
        return new_prob_lightning_cube
コード例 #15
0
    def apply_precip(self, prob_lightning_cube: Cube,
                     prob_precip_cube: Cube) -> Cube:
        """
        Modify Nowcast of lightning probability with precipitation rate
        probabilities at thresholds of 0.5, 7 and 35 mm/h.

        Args:
            prob_lightning_cube:
                First-guess lightning probability.

            prob_precip_cube:
                Nowcast precipitation probability
                (threshold > 0.5, 7., 35. mm hr-1)
                Units of threshold coord modified in-place to mm hr-1

        Returns:
            Output cube containing updated nowcast lightning probability.
            This cube will have the same dimensions and meta-data as
            prob_lightning_cube.

        Raises:
            iris.exceptions.ConstraintMismatchError:
                If prob_precip_cube does not contain the expected thresholds.
        """
        new_cube_list = iris.cube.CubeList([])
        # check prob-precip threshold units are as expected
        precip_threshold_coord = find_threshold_coordinate(prob_precip_cube)
        precip_threshold_coord.convert_units("mm hr-1")
        # extract precipitation probabilities at required thresholds
        for cube_slice in prob_lightning_cube.slices_over("time"):
            this_time = iris_time_to_datetime(
                cube_slice.coord("time").copy())[0]
            this_precip = prob_precip_cube.extract(
                iris.Constraint(time=this_time)
                & iris.Constraint(coord_values={
                    precip_threshold_coord:
                    lambda t: isclose(t.point, 0.5)
                }))
            high_precip = prob_precip_cube.extract(
                iris.Constraint(time=this_time)
                & iris.Constraint(coord_values={
                    precip_threshold_coord:
                    lambda t: isclose(t.point, 7.0)
                }))
            torr_precip = prob_precip_cube.extract(
                iris.Constraint(time=this_time)
                & iris.Constraint(coord_values={
                    precip_threshold_coord:
                    lambda t: isclose(t.point, 35.0)
                }))
            err_string = "No matching {} cube for {}"
            if not isinstance(this_precip, iris.cube.Cube):
                raise ConstraintMismatchError(
                    err_string.format("any precip", this_time))
            if not isinstance(high_precip, iris.cube.Cube):
                raise ConstraintMismatchError(
                    err_string.format("high precip", this_time))
            if not isinstance(torr_precip, iris.cube.Cube):
                raise ConstraintMismatchError(
                    err_string.format("intense precip", this_time))
            # Increase prob(lightning) to Risk 2 (pl_dict[2]) when
            #   prob(precip > 7mm/hr) > phighthresh
            cube_slice.data = np.where(
                (high_precip.data >= self.phighthresh)
                & (cube_slice.data < self.pl_dict[2]),
                self.pl_dict[2],
                cube_slice.data,
            )
            # Increase prob(lightning) to Risk 1 (pl_dict[1]) when
            #   prob(precip > 35mm/hr) > ptorrthresh
            cube_slice.data = np.where(
                (torr_precip.data >= self.ptorrthresh)
                & (cube_slice.data < self.pl_dict[1]),
                self.pl_dict[1],
                cube_slice.data,
            )

            # Decrease prob(lightning) where prob(precip > 0.5 mm hr-1) is low.
            cube_slice.data = apply_double_scaling(this_precip, cube_slice,
                                                   self.precipthr,
                                                   self.ltngthr)

            new_cube_list.append(cube_slice)

        new_cube = new_cube_list.merge_cube()
        new_cube = check_cube_coordinates(prob_lightning_cube, new_cube)
        return new_cube
コード例 #16
0
    def process(cube_ens_wdir):
        """Create a cube containing the wind direction averaged over the
        ensemble realizations.

        Args:
            cube_ens_wdir (iris.cube.Cube):
                Cube containing wind direction from multiple ensemble
                realizations.

        Returns:
            cube_mean_wdir (iris.cube.Cube):
                Cube containing the wind direction averaged from the
                ensemble realizations.
            cube_r_vals (np.ndarray):
                3D array - Radius taken from average complex wind direction
                angle.
            cube_confidence_measure (np.ndarray):
                3D array - The average distance from mean normalised - used
                as a confidence value.

        Raises
        ------
        TypeError: If cube_wdir is not a cube.

        """

        # Any points where the r-values are below the threshold is regarded as
        # containing ambigous data.
        r_thresh = 0.01

        if not isinstance(cube_ens_wdir, iris.cube.Cube):
            msg = "Wind direction input is not a cube, but {}"
            raise TypeError(msg.format(type(cube_ens_wdir)))

        try:
            cube_ens_wdir.convert_units("degrees")
        except ValueError as err:
            msg = "Input cube cannot be converted to degrees: {}".format(err)
            raise ValueError(msg)

        # Force input cube to float32.
        enforce_float32_precision(cube_ens_wdir)

        # Creates cubelists to hold data.
        wdir_cube_list = iris.cube.CubeList()
        r_vals_cube_list = iris.cube.CubeList()
        confidence_measure_cube_list = iris.cube.CubeList()

        y_coord_name = cube_ens_wdir.coord(axis="y").name()
        x_coord_name = cube_ens_wdir.coord(axis="x").name()
        for slice_ens_wdir in cube_ens_wdir.slices(["realization",
                                                    y_coord_name,
                                                    x_coord_name]):
            # Extract wind direction data.
            wind_dir_deg = slice_ens_wdir.data
            realization_axis = slice_ens_wdir.coord_dims("realization")[0]

            # Copies input cube and remove realization dimension to create
            # cubes for storing results.
            slice_mean_wdir = next(slice_ens_wdir.slices_over("realization"))
            slice_mean_wdir.remove_coord("realization")

            # Convert wind direction from degrees to complex numbers.
            wind_dir_complex = WindDirection.deg_to_complex(wind_dir_deg)

            # Find the complex average -  which actually signifies a point
            # between all of the data points in POLAR coordinates.
            # NOT the average DEGREE ANGLE.
            wind_dir_complex_mean = np.mean(wind_dir_complex,
                                            axis=realization_axis)

            # Convert complex average values to degrees to produce average
            # wind direction.
            wind_dir_deg_mean = WindDirection.complex_to_deg(
                wind_dir_complex_mean)

            # Find radius values for wind direction average.
            r_vals = WindDirection.find_r_values(wind_dir_complex_mean)

            # Calculate the confidence measure based on the difference
            # between the complex average and the individual ensemble
            # realizations.
            # TODO: This will still need some further investigation.
            #        This is will be the subject of another ticket.
            confidence_measure = WindDirection.calc_confidence_measure(
                wind_dir_complex, wind_dir_deg_mean, r_vals, r_thresh,
                realization_axis)

            # Finds any meaningless averages and substitute with
            # the wind direction taken from the first ensemble realization.
            wind_dir_deg_mean = WindDirection.wind_dir_decider(
                wind_dir_deg, wind_dir_deg_mean, r_vals, r_thresh)

            # Save data into cubes (create new cubes for r and
            # confidence measure data).
            slice_mean_wdir.data = wind_dir_deg_mean
            slice_r_vals = slice_mean_wdir.copy(data=r_vals)
            slice_confidence_measure = (
                slice_mean_wdir.copy(data=confidence_measure))
            # Append to cubelists.
            wdir_cube_list.append(slice_mean_wdir)
            r_vals_cube_list.append(slice_r_vals)
            confidence_measure_cube_list.append(slice_confidence_measure)

        # Combine cubelists into cube.
        cube_mean_wdir = wdir_cube_list.merge_cube()
        cube_r_vals = r_vals_cube_list.merge_cube()
        cube_confidence_measure = confidence_measure_cube_list.merge_cube()

        # Check that the dimensionality of coordinates of the output cube
        # matches the input cube.
        first_slice = next(cube_ens_wdir.slices_over(["realization"]))
        cube_mean_wdir = check_cube_coordinates(first_slice, cube_mean_wdir)

        # Change cube identifiers.
        cube_mean_wdir.add_cell_method(CellMethod("mean",
                                                  coords="realization"))
        cube_r_vals.long_name = "radius_of_complex_average_wind_from_direction"
        cube_r_vals.units = None
        cube_confidence_measure.long_name = (
            "confidence_measure_of_wind_from_direction")
        cube_confidence_measure.units = None

        return cube_mean_wdir, cube_r_vals, cube_confidence_measure
コード例 #17
0
ファイル: test_cube_checker.py プロジェクト: zfan001/improver
 def test_coord_promotion_and_reordering(self):
     """Test case in which a scalar coordinate are promoted but the order
     must be corrected to match the progenitor cube."""
     self.cube.transpose(new_order=[1, 0, 2])
     result = check_cube_coordinates(self.cube, self.squeezed_cube)
     self.assertEqual(result.dim_coords, self.cube.dim_coords)
コード例 #18
0
ファイル: test_cube_checker.py プロジェクト: zfan001/improver
 def test_basic_transpose(self):
     """Test when we only want to transpose the new_cube."""
     new_cube = self.cube.copy()
     new_cube.transpose([2, 1, 0])
     result = check_cube_coordinates(self.cube, new_cube)
     self.assertEqual(result.dim_coords, self.cube.dim_coords)
コード例 #19
0
ファイル: nbhood.py プロジェクト: zfan001/improver
    def process(self, cube: Cube, mask_cube: Optional[Cube] = None) -> Cube:
        """
        Supply neighbourhood processing method, in order to smooth the
        input cube.

        Args:
            cube:
                Cube to apply a neighbourhood processing method to, in order to
                generate a smoother field.
            mask_cube:
                Cube containing the array to be used as a mask.

        Returns:
            Cube after applying a neighbourhood processing method, so that
            the resulting field is smoothed.
        """
        if not getattr(self.neighbourhood_method, "run", None) or not callable(
                self.neighbourhood_method.run):
            msg = ("{} is not valid as a neighbourhood_method. "
                   "Please choose a valid neighbourhood_method with a "
                   "run method.".format(self.neighbourhood_method))
            raise ValueError(msg)

        # Check if a dimensional realization coordinate exists. If so, the
        # cube is sliced, so that it becomes a scalar coordinate.
        try:
            cube.coord("realization", dim_coords=True)
        except iris.exceptions.CoordinateNotFoundError:
            slices_over_realization = [cube]
        else:
            slices_over_realization = cube.slices_over("realization")

        if np.isnan(cube.data).any():
            raise ValueError("Error: NaN detected in input cube data")

        cubes_real = []
        for cube_realization in slices_over_realization:
            if self.lead_times is None:
                cube_new = self.neighbourhood_method.run(cube_realization,
                                                         self.radii,
                                                         mask_cube=mask_cube)
            else:
                # Interpolate to find the radius at each required lead time.
                fp_coord = forecast_period_coord(cube_realization)
                fp_coord.convert_units("hours")
                required_radii = self._find_radii(
                    cube_lead_times=fp_coord.points)

                cubes_time = iris.cube.CubeList([])
                # Find the number of grid cells required for creating the
                # neighbourhood, and then apply the neighbourhood
                # processing method to smooth the field.
                for cube_slice, radius in zip(
                        cube_realization.slices_over("time"), required_radii):
                    cube_slice = self.neighbourhood_method.run(
                        cube_slice, radius, mask_cube=mask_cube)
                    cubes_time.append(cube_slice)
                cube_new = MergeCubes()(cubes_time)

            cubes_real.append(cube_new)

        if len(cubes_real) > 1:
            combined_cube = MergeCubes()(cubes_real,
                                         slice_over_realization=True)
        else:
            combined_cube = cubes_real[0]

        # Promote dimensional coordinates that used to be present.
        exception_coordinates = find_dimension_coordinate_mismatch(
            cube, combined_cube, two_way_mismatch=False)
        combined_cube = check_cube_coordinates(
            cube, combined_cube, exception_coordinates=exception_coordinates)

        return combined_cube
コード例 #20
0
    def rank_ecc(post_processed_forecast_percentiles,
                 raw_forecast_realizations,
                 random_ordering=False,
                 random_seed=None):
        """
        Function to apply Ensemble Copula Coupling. This ranks the
        post-processed forecast realizations based on a ranking determined from
        the raw forecast realizations.

        Args:
            post_processed_forecast_percentiles (cube):
                Cube for post-processed percentiles. The percentiles are
                assumed to be in ascending order.
            raw_forecast_realizations (cube):
                Cube containing the raw (not post-processed) forecasts.
                The probabilistic dimension is assumed to be the zeroth
                dimension.
            random_ordering (Logical):
                If random_ordering is True, the post-processed forecasts are
                reordered randomly, rather than using the ordering of the
                raw ensemble.
            random_seed (Integer or None):
                If random_seed is an integer, the integer value is used for
                the random seed.
                If random_seed is None, no random seed is set, so the random
                values generated are not reproducible.

        Returns:
            iris.cube.Cube:
                Cube for post-processed realizations where at a particular grid
                point, the ranking of the values within the ensemble matches
                the ranking from the raw ensemble.

        """
        results = iris.cube.CubeList([])
        for rawfc, calfc in zip(
                raw_forecast_realizations.slices_over("time"),
                post_processed_forecast_percentiles.slices_over("time")):
            if random_seed is not None:
                random_seed = int(random_seed)
            random_seed = np.random.RandomState(random_seed)
            random_data = random_seed.rand(*rawfc.data.shape)
            if random_ordering:
                # Returns the indices that would sort the array.
                # As these indices are from a random dataset, only an argsort
                # is used.
                ranking = np.argsort(random_data, axis=0)
            else:
                # Lexsort returns the indices sorted firstly by the
                # primary key, the raw forecast data (unless random_ordering
                # is enabled), and secondly by the secondary key, an array of
                # random data, in order to split tied values randomly.
                sorting_index = np.lexsort((random_data, rawfc.data), axis=0)
                # Returns the indices that would sort the array.
                ranking = np.argsort(sorting_index, axis=0)
            # Index the post-processed forecast data using the ranking array.
            # np.choose allows indexing of a 3d array using a 3d array,
            calfc.data = np.choose(ranking, calfc.data)
            results.append(calfc)
        # Ensure we haven't lost any dimensional coordinates with only one
        # value in.
        results = results.merge_cube()
        results = check_cube_coordinates(post_processed_forecast_percentiles,
                                         results)
        return results
コード例 #21
0
    def run(self, cube, radius, mask_cube=None):
        """
        Method to apply a circular kernel to the data within the input cube in
        order to derive percentiles over the kernel.

        Args:
            cube (iris.cube.Cube):
                Cube containing array to apply processing to.
            radius (float):
                Radius in metres for use in specifying the number of
                grid cells used to create a circular neighbourhood.
            mask_cube (iris.cube.Cube or None):
                Cube containing the array to be used as a mask.

        Returns:
            iris.cube.Cube:
                Cube containing the percentile fields.
                Has percentile as an added dimension.

        """
        if mask_cube is not None:
            msg = ("The use of a mask cube with a circular kernel is not "
                   "yet implemented.")
            raise NotImplementedError(msg)

        # Check that the cube has an equal area grid.
        check_if_grid_is_equal_area(cube)
        # Take data array and identify X and Y axes indices
        grid_cells_x = convert_distance_into_number_of_grid_cells(
            cube, radius, max_distance_in_grid_cells=MAX_RADIUS_IN_GRID_CELLS)
        ranges_tuple = (grid_cells_x, grid_cells_x)
        ranges_xy = np.array(ranges_tuple)
        kernel = circular_kernel(ranges_xy, ranges_tuple, weighted_mode=False)
        # Loop over each 2D slice to reduce memory demand and derive
        # percentiles on the kernel. Will return an extra dimension.
        pctcubelist = iris.cube.CubeList()
        for slice_2d in cube.slices(['projection_y_coordinate',
                                     'projection_x_coordinate']):
            pctcubelist.append(
                self.pad_and_unpad_cube(slice_2d, kernel))
        result = pctcubelist.merge_cube()
        exception_coordinates = (
            find_dimension_coordinate_mismatch(
                cube, result, two_way_mismatch=False))
        result = (
            check_cube_coordinates(
                cube, result, exception_coordinates=exception_coordinates))

        # Arrange cube, so that the coordinate order is:
        # realization, percentile, other coordinates.
        required_order = []
        if result.coords("realization"):
            if result.coords("realization", dimensions=[]):
                result = iris.util.new_axis(result, "realization")
            required_order.append(result.coord_dims("realization")[0])
        if result.coords("percentile"):
            required_order.append(
                result.coord_dims("percentile")[0])
        other_coords = []
        for coord in result.dim_coords:
            if coord.name() not in ["realization",
                                    "percentile"]:
                other_coords.append(result.coord_dims(coord.name())[0])
        required_order.extend(other_coords)
        result.transpose(required_order)

        return result
コード例 #22
0
    def process(
        self,
        cube,
        smoothing_coefficients_x=None,
        smoothing_coefficients_y=None,
        mask_cube=None,
    ):
        """
        Set up the smoothing_coefficient parameters and run the recursive
        filter. The steps undertaken are:

        1. Split the input cube into slices determined by the co-ordinates in
           the x and y directions.
        2. Construct an array of filter parameters (smoothing_coefficients_x
           and smoothing_coefficients_y) for each cube slice that are used to
           weight the recursive filter in the x- and y-directions.
        3. Pad each cube slice with a square-neighbourhood halo and apply
           the recursive filter for the required number of iterations.
        4. Remove the halo from the cube slice and append the recursed cube
           slice to a 'recursed cube'.
        5. Merge all the cube slices in the 'recursed cube' into a 'new cube'.
        6. Modify the 'new cube' so that its scalar dimension co-ordinates are
           consistent with those in the original input cube.
        7. Return the 'new cube' which now contains the recursively filtered
           values for the original input cube.

        Args:
            cube (iris.cube.Cube):
                Cube containing the input data to which the recursive filter
                will be applied.
            smoothing_coefficients_x (iris.cube.Cube or None):
                Cube containing array of smoothing_coefficient values that will
                be used when applying the recursive filter along the x-axis.
            smoothing_coefficients_y (iris.cube.Cube or None):
                Cube containing array of smoothing_coefficient values that will
                be used when applying the recursive filter along the y-axis.
            mask_cube (iris.cube.Cube or None):
                Cube containing an external mask to apply to the cube before
                applying the recursive filter.

        Returns:
            iris.cube.Cube:
                Cube containing the smoothed field after the recursive filter
                method has been applied.

        Raises:
            ValueError: If any smoothing_coefficient cube value is over 0.5
        """
        for smoothing_coefficient in (
                smoothing_coefficients_x,
                smoothing_coefficients_y,
        ):
            if (smoothing_coefficient is not None
                    and (smoothing_coefficient.data > 0.5).any()):
                raise ValueError(
                    "All smoothing_coefficient values must be less than 0.5. "
                    "A large smoothing_coefficient value leads to poor "
                    "conservation of probabilities")

        cube_format = next(
            cube.slices([cube.coord(axis="y"),
                         cube.coord(axis="x")]))
        smoothing_coefficients_x = self._set_smoothing_coefficients(
            cube_format, self.smoothing_coefficient_x,
            smoothing_coefficients_x)
        smoothing_coefficients_y = self._set_smoothing_coefficients(
            cube_format, self.smoothing_coefficient_y,
            smoothing_coefficients_y)

        recursed_cube = iris.cube.CubeList()
        for output in cube.slices([cube.coord(axis="y"),
                                   cube.coord(axis="x")]):

            # Setup cube and mask for processing.
            # This should set up a mask full of 1.0 if None is provided
            # and set the data 0.0 where mask is 0.0 or the data is NaN
            (
                output,
                mask,
                nan_array,
            ) = SquareNeighbourhood().set_up_cubes_to_be_neighbourhooded(
                output, mask_cube)
            mask = mask.data.squeeze()

            padded_cube = pad_cube_with_halo(output,
                                             2 * self.edge_width,
                                             2 * self.edge_width,
                                             pad_method="symmetric")

            new_cube = self._run_recursion(
                padded_cube,
                smoothing_coefficients_x,
                smoothing_coefficients_y,
                self.iterations,
            )
            new_cube = remove_halo_from_cube(new_cube, 2 * self.edge_width,
                                             2 * self.edge_width)
            if self.re_mask:
                new_cube.data[nan_array.astype(bool)] = np.nan
                new_cube.data = np.ma.masked_array(new_cube.data,
                                                   mask=np.logical_not(mask))

            recursed_cube.append(new_cube)

        new_cube = recursed_cube.merge_cube()
        new_cube = check_cube_coordinates(cube, new_cube)

        return new_cube
コード例 #23
0
ファイル: recursive_filter.py プロジェクト: pnijhara/improver
    def process(
        self,
        cube,
        smoothing_coefficients_x,
        smoothing_coefficients_y,
        mask_cube=None,
    ):
        """
        Set up the smoothing_coefficient parameters and run the recursive
        filter. Smoothing coefficients can be generated using
        :func:`~improver.utilities.ancillary_creation.OrographicSmoothingCoefficients`
        and :func:`~improver.cli.generate_orographic_smoothing_coefficients`.
        The steps undertaken are:

        1. Split the input cube into slices determined by the co-ordinates in
           the x and y directions.
        2. Construct an array of filter parameters (smoothing_coefficients_x
           and smoothing_coefficients_y) for each cube slice that are used to
           weight the recursive filter in the x- and y-directions.
        3. Pad each cube slice with a square-neighbourhood halo and apply
           the recursive filter for the required number of iterations.
        4. Remove the halo from the cube slice and append the recursed cube
           slice to a 'recursed cube'.
        5. Merge all the cube slices in the 'recursed cube' into a 'new cube'.
        6. Modify the 'new cube' so that its scalar dimension co-ordinates are
           consistent with those in the original input cube.
        7. Return the 'new cube' which now contains the recursively filtered
           values for the original input cube.

        The smoothing_coefficient determines how much "value" of a cell
        undergoing filtering is comprised of the current value at that cell and
        how much comes from the adjacent cell preceding it in the direction in
        which filtering is being applied. A larger smoothing_coefficient
        results in a more significant proportion of a cell's new value coming
        from its neighbouring cell.

        Args:
            cube (iris.cube.Cube):
                Cube containing the input data to which the recursive filter
                will be applied.
            smoothing_coefficients_x (iris.cube.Cube):
                Cube containing array of smoothing_coefficient values that will
                be used when applying the recursive filter along the x-axis.
            smoothing_coefficients_y (iris.cube.Cube):
                Cube containing array of smoothing_coefficient values that will
                be used when applying the recursive filter along the y-axis.
            mask_cube (iris.cube.Cube or None):
                Cube containing an external mask to apply to the cube before
                applying the recursive filter.

        Returns:
            iris.cube.Cube:
                Cube containing the smoothed field after the recursive filter
                method has been applied.

        Raises:
            ValueError: If any smoothing_coefficient cube value is over 0.5
        """
        for smoothing_coefficient in (
                smoothing_coefficients_x,
                smoothing_coefficients_y,
        ):
            if (smoothing_coefficient.data > 0.5).any():
                raise ValueError(
                    "All smoothing_coefficient values must be less than 0.5. "
                    "A large smoothing_coefficient value leads to poor "
                    "conservation of probabilities")
        cube_format = next(
            cube.slices([cube.coord(axis="y"),
                         cube.coord(axis="x")]))
        self._validate_smoothing_coefficients(cube_format,
                                              smoothing_coefficients_x)
        smoothing_coefficients_x = self._set_smoothing_coefficients(
            smoothing_coefficients_x)
        self._validate_smoothing_coefficients(cube_format,
                                              smoothing_coefficients_y)
        smoothing_coefficients_y = self._set_smoothing_coefficients(
            smoothing_coefficients_y)

        recursed_cube = iris.cube.CubeList()
        for output in cube.slices([cube.coord(axis="y"),
                                   cube.coord(axis="x")]):

            # Setup cube and mask for processing.
            # This should set up a mask full of 1.0 if None is provided
            # and set the data 0.0 where mask is 0.0 or the data is NaN
            output, mask, nan_array = self.set_up_cubes(output, mask_cube)
            mask = mask.data.squeeze()

            padded_cube = pad_cube_with_halo(output,
                                             2 * self.edge_width,
                                             2 * self.edge_width,
                                             pad_method="symmetric")

            new_cube = self._run_recursion(
                padded_cube,
                smoothing_coefficients_x,
                smoothing_coefficients_y,
                self.iterations,
            )
            new_cube = remove_halo_from_cube(new_cube, 2 * self.edge_width,
                                             2 * self.edge_width)
            if self.re_mask:
                new_cube.data[nan_array] = np.nan
                new_cube.data = np.ma.masked_array(new_cube.data,
                                                   mask=np.logical_not(mask),
                                                   copy=False)

            recursed_cube.append(new_cube)

        new_cube = recursed_cube.merge_cube()
        new_cube = check_cube_coordinates(cube, new_cube)

        return new_cube
コード例 #24
0
    def process(self, cube: Cube) -> Cube:
        """
        Produces the vicinity processed data. The input data is sliced to
        yield y-x slices to which the maximum_within_vicinity method is applied.
        The different vicinity radii (if multiple) are looped over and a
        coordinate recording the radius used is added to each resulting cube.
        A single cube is returned with the leading coordinates of the input cube
        preserved. If a single vicinity radius is provided, a new scalar
        radius_of_vicinity coordinate will be found on the returned cube. If
        multiple radii are provided, this coordinate will be a dimension
        coordinate following any probabilistic / realization coordinates.

        Args:
            cube:
                Thresholded cube.

        Returns:
            Cube containing the occurrences within a vicinity for each radius,
            calculated for each yx slice, which have been merged to yield a
            single cube.

        Raises:
            ValueError: Cube and land mask have differing spatial coordinates.
        """
        if self.land_mask_cube and not spatial_coords_match(
            [cube, self.land_mask_cube]):
            raise ValueError(
                "Supplied cube do not have the same spatial coordinates and land mask"
            )

        if not self.native_grid_point_radius:
            grid_point_radii = [
                distance_to_number_of_grid_cells(cube, radius)
                for radius in self.radii
            ]
        else:
            grid_point_radii = self.radii

        radii_cubes = CubeList()

        # List of non-spatial dimensions to restore as leading on the output.
        leading_dimensions = [
            crd.name() for crd in cube.coords(dim_coords=True)
            if not crd.coord_system
        ]

        for radius, grid_point_radius in zip(self.radii, grid_point_radii):
            max_cubes = CubeList([])
            for cube_slice in cube.slices(
                [cube.coord(axis="y"),
                 cube.coord(axis="x")]):
                max_cubes.append(
                    self.maximum_within_vicinity(cube_slice,
                                                 grid_point_radius))
            result_cube = max_cubes.merge_cube()

            # Put dimensions back if they were there before.
            result_cube = check_cube_coordinates(cube, result_cube)

            # Add a coordinate recording the vicinity radius applied to the data.
            self._add_vicinity_coordinate(result_cube, radius)

            radii_cubes.append(result_cube)

        # Merge cubes produced for each vicinity radius.
        result_cube = radii_cubes.merge_cube()

        # Enforce order of leading dimensions on the output to match the input.
        enforce_coordinate_ordering(result_cube, leading_dimensions)

        if is_probability(result_cube):
            result_cube.rename(in_vicinity_name_format(result_cube.name()))
        else:
            result_cube.rename(f"{result_cube.name()}_in_vicinity")

        return result_cube
コード例 #25
0
    def process(self, cube_ens_wdir):
        """Create a cube containing the wind direction averaged over the
        ensemble realizations.

        Args:
            cube_ens_wdir (iris.cube.Cube):
                Cube containing wind direction from multiple ensemble
                realizations.

        Returns:
            iris.cube.Cube:
                Cube containing the wind direction averaged from the
                ensemble realizations.
            cube_r_vals (numpy.ndarray):
                3D array - Radius taken from average complex wind direction
                angle.
            cube_confidence_measure (numpy.ndarray):
                3D array - The average distance from mean normalised - used
                as a confidence value.

        Raises
        ------
        TypeError: If cube_wdir is not a cube.

        """

        if not isinstance(cube_ens_wdir, iris.cube.Cube):
            msg = "Wind direction input is not a cube, but {}"
            raise TypeError(msg.format(type(cube_ens_wdir)))

        try:
            cube_ens_wdir.convert_units("degrees")
        except ValueError as err:
            msg = "Input cube cannot be converted to degrees: {}".format(err)
            raise ValueError(msg)

        self.n_realizations = len(cube_ens_wdir.coord("realization").points)
        y_coord_name = cube_ens_wdir.coord(axis="y").name()
        x_coord_name = cube_ens_wdir.coord(axis="x").name()
        for wdir_slice in cube_ens_wdir.slices(
            ["realization", y_coord_name, x_coord_name]):
            self._reset()
            # Extract wind direction data.
            self.wdir_complex = self.deg_to_complex(wdir_slice.data)
            (self.realization_axis, ) = wdir_slice.coord_dims("realization")

            # Copies input cube and remove realization dimension to create
            # cubes for storing results.
            self.wdir_slice_mean = next(wdir_slice.slices_over("realization"))
            self.wdir_slice_mean.remove_coord("realization")

            # Derive average wind direction.
            self.calc_wind_dir_mean()

            # Find radius values for wind direction average.
            self.find_r_values()

            # Calculate the confidence measure based on the difference
            # between the complex average and the individual ensemble
            # realizations.
            self.calc_confidence_measure()

            # Finds any meaningless averages and substitute with
            # the wind direction taken from the first ensemble realization.
            # Mask True if r values below threshold.
            where_low_r = np.where(self.r_vals_slice.data < self.r_thresh,
                                   True, False)
            # If the any point in the array contains poor r-values,
            # trigger decider function.
            if where_low_r.any():
                self.wind_dir_decider(where_low_r, wdir_slice)

            # Append to cubelists.
            self.wdir_cube_list.append(self.wdir_slice_mean)
            self.r_vals_cube_list.append(self.r_vals_slice)
            self.confidence_measure_cube_list.append(self.confidence_slice)

        # Combine cubelists into cube.
        cube_mean_wdir = self.wdir_cube_list.merge_cube()
        cube_r_vals = self.r_vals_cube_list.merge_cube()
        cube_confidence_measure = self.confidence_measure_cube_list.merge_cube(
        )

        # Check that the dimensionality of coordinates of the output cube
        # matches the input cube.
        first_slice = next(cube_ens_wdir.slices_over(["realization"]))
        cube_mean_wdir = check_cube_coordinates(first_slice, cube_mean_wdir)

        # Change cube identifiers.
        cube_mean_wdir.add_cell_method(CellMethod("mean",
                                                  coords="realization"))
        cube_r_vals.long_name = "radius_of_complex_average_wind_from_direction"
        cube_r_vals.units = None
        cube_confidence_measure.long_name = "confidence_measure_of_wind_from_direction"
        cube_confidence_measure.units = None

        return cube_mean_wdir, cube_r_vals, cube_confidence_measure
コード例 #26
0
ファイル: nbhood.py プロジェクト: hrance/improver
    def process(self, cube):
        """
        Supply neighbourhood processing method, in order to smooth the
        input cube.

        Parameters
        ----------
        cube : Iris.cube.Cube
            Cube to apply a neighbourhood processing method to, in order to
            generate a smoother field.

        Returns
        -------
        cube : Iris.cube.Cube
            Cube after applying a neighbourhood processing method, so that the
            resulting field is smoothed.

        """
        if (not getattr(self.neighbourhood_method, "run", None)
                or not callable(self.neighbourhood_method.run)):
            msg = ("{} is not valid as a neighbourhood_method. "
                   "Please choose a valid neighbourhood_method with a "
                   "run method.".format(self.neighbourhood_method))
            raise ValueError(msg)

        # Check if the realization coordinate exists. If there are multiple
        # values for the realization, then an exception is raised. Otherwise,
        # the cube is sliced, so that the realization becomes a scalar
        # coordinate.
        try:
            realiz_coord = cube.coord('realization')
        except iris.exceptions.CoordinateNotFoundError:
            if 'source_realizations' in cube.attributes:
                num_ens = len(cube.attributes['source_realizations'])
            else:
                num_ens = 1.0
            slices_over_realization = [cube]
        else:
            num_ens = len(realiz_coord.points)
            slices_over_realization = cube.slices_over("realization")
            if 'source_realizations' in cube.attributes:
                msg = ("Realizations and attribute source_realizations "
                       "should not both be set in input cube")
                raise ValueError(msg)

        if np.isnan(cube.data).any():
            raise ValueError("Error: NaN detected in input cube data")

        cubelist = iris.cube.CubeList([])
        for cube_realization in slices_over_realization:
            if self.lead_times is None:
                radius = self._find_radii(num_ens)
                cube_new = self.neighbourhood_method.run(
                    cube_realization, radius)
            else:
                cube_lead_times = (find_required_lead_times(cube_realization))
                # Interpolate to find the radius at each required lead time.
                required_radii = (self._find_radii(
                    num_ens, cube_lead_times=cube_lead_times))

                cubes = iris.cube.CubeList([])
                # Find the number of grid cells required for creating the
                # neighbourhood, and then apply the neighbourhood
                # processing method to smooth the field.
                for cube_slice, radius in (zip(
                        cube_realization.slices_over("time"), required_radii)):
                    cube_slice = self.neighbourhood_method.run(
                        cube_slice, radius)
                    cube_slice = iris.util.new_axis(cube_slice, "time")
                    cubes.append(cube_slice)
                cube_new = concatenate_cubes(cubes,
                                             coords_to_slice_over=["time"])
            if cube_new.coords("realization", dim_coords=False):
                cube_new = iris.util.new_axis(cube_new, "realization")
            cubelist.append(cube_new)
        combined_cube = cubelist.concatenate_cube()
        # Promote dimensional coordinates that have been demoted to scalars.
        exception_coordinates = (find_dimension_coordinate_mismatch(
            cube, combined_cube, two_way_mismatch=False))
        combined_cube = check_cube_coordinates(
            cube, combined_cube, exception_coordinates=exception_coordinates)
        return combined_cube
コード例 #27
0
 def test_basic(self):
     """Test returns iris.cube.Cube."""
     cube = set_up_cube()
     result = check_cube_coordinates(cube, cube)
     self.assertIsInstance(result, Cube)
コード例 #28
0
    def process(self, cube, alphas_x=None, alphas_y=None, mask_cube=None):
        """
        Set up the alpha parameters and run the recursive filter.

        The steps undertaken are:

        1. Split the input cube into slices determined by the co-ordinates in
           the x and y directions.
        2. Construct an array of filter parameters (alphas_x and alphas_y) for
           each cube slice that are used to weight the recursive filter in
           the x- and y-directions.
        3. Pad each cube slice with a square-neighbourhood halo and apply
           the recursive filter for the required number of iterations.
        4. Remove the halo from the cube slice and append the recursed cube
           slice to a 'recursed cube'.
        5. Merge all the cube slices in the 'recursed cube' into a 'new cube'.
        6. Modify the 'new cube' so that its scalar dimension co-ordinates are
           consistent with those in the original input cube.
        7. Return the 'new cube' which now contains the recursively filtered
           values for the original input cube.

        Args:
            cube (Iris.cube.Cube):
                Cube containing the input data to which the recursive filter
                will be applied.

        Keyword Args:
            alphas_x (Iris.cube.Cube or None):
                Cube containing array of alpha values that will be used when
                applying the recursive filter along the x-axis.
            alphas_y (Iris.cube.Cube or None):
                Cube containing array of alpha values that will be used when
                applying the recursive filter along the y-axis.
            mask_cube (Iris.cube.Cube or None):
                Cube containing an external mask to apply to the cube before
                applying the recursive filter.

        Returns:
            new_cube (Iris.cube.Cube):
                Cube containing the smoothed field after the recursive filter
                method has been applied.
        """
        cube_format = next(
            cube.slices([cube.coord(axis='y'),
                         cube.coord(axis='x')]))
        alphas_x = self.set_alphas(cube_format, self.alpha_x, alphas_x)
        alphas_y = self.set_alphas(cube_format, self.alpha_y, alphas_y)

        # Extract mask if present on input cube or provided separately.
        try:
            mask, = SquareNeighbourhood._set_up_cubes_to_be_neighbourhooded(
                cube, mask_cube).extract('mask_data')
            mask = mask.data.squeeze()
        except ValueError:
            mask = np.ones((cube_format.data.shape))

        recursed_cube = iris.cube.CubeList()
        for output in cube.slices([cube.coord(axis='y'),
                                   cube.coord(axis='x')]):

            # Use mask to zero masked areas.
            output.data = output.data * mask
            # Zero any remaining NaN values not covered by mask.
            output.data = np.nan_to_num(output.data)

            padded_cube = SquareNeighbourhood().pad_cube_with_halo(
                output, self.edge_width, self.edge_width)
            new_cube = self.run_recursion(padded_cube, alphas_x, alphas_y,
                                          self.iterations)
            new_cube = SquareNeighbourhood().remove_halo_from_cube(
                new_cube, self.edge_width, self.edge_width)
            if self.re_mask:
                new_cube.data = np.ma.masked_array(new_cube.data,
                                                   mask=np.logical_not(mask))
            recursed_cube.append(new_cube)

        new_cube = recursed_cube.merge_cube()
        new_cube = check_cube_coordinates(cube, new_cube)

        return new_cube