示例#1
0
    def get_weights_array(self, cube: Cube,
                          weights: Optional[Cube]) -> ndarray:
        """
        Given a 1 or multidimensional cube of weights, reshape and broadcast
        these to the shape of the data cube. If no weights are provided, an
        array of weights is returned that equally weights all slices across
        the blending coordinate.

        Args:
            cube:
                Template cube to reshape weights, with a leading blend coordinate
            weights:
                Cube of initial blending weights or None

        Returns:
            An array of weights that matches the template cube shape.
        """
        if weights:
            weights_array = self.shape_weights(cube, weights)
        else:
            (number_of_fields, ) = cube.coord(self.blend_coord).shape
            weight = FLOAT_DTYPE(1.0 / number_of_fields)
            weights_array = np.broadcast_to(weight, cube.shape)

        return weights_array
示例#2
0
def make_shower_condition_cube(cube: Cube, in_place: bool = False) -> Cube:
    """
    Modify the input cube's metadata and coordinates to produce a shower
    condition proxy. The input cube is expected to possess a single valued
    threshold coordinate.

    Args:
        cube:
            A thresholded diagnostic to be used as a proxy for showery conditions.
            The threshold coordinate should contain only one value, which denotes
            the key threshold that above which conditions are showery, and below
            which precipitation is more likely dynamic.
        in_place:
            If set true the cube is modified in place. By default a modified
            copy is returned.

    Returns:
        A shower condition probability cube that is an appropriately renamed
        version of the input with an updated threshold coordinate representing
        the probability of shower conditions occurring.

    Raises:
        CoordinateNotFoundError: Input has no threshold coordinate.
        ValueError: Input cube's threshold coordinate is multi-valued.
    """

    if not in_place:
        cube = cube.copy()

    shower_condition_name = "shower_condition"
    cube.rename(f"probability_of_{shower_condition_name}_above_threshold")
    try:
        shower_threshold = find_threshold_coordinate(cube)
    except CoordinateNotFoundError as err:
        msg = "Input has no threshold coordinate and cannot be used"
        raise CoordinateNotFoundError(msg) from err

    try:
        (_, ) = shower_threshold.points
    except ValueError as err:
        msg = ("Expected a single valued threshold coordinate, but threshold "
               f"contains multiple points : {shower_threshold.points}")
        raise ValueError(msg) from err

    cube.coord(shower_threshold).rename(shower_condition_name)
    cube.coord(shower_condition_name).var_name = "threshold"
    cube.coord(shower_condition_name).points = FLOAT_DTYPE(1.0)
    cube.coord(shower_condition_name).units = 1

    return cube
示例#3
0
    def non_percentile_weights(self, cube, weights):
        """
        Given a 1 or multidimensional cube of weights, reshape and broadcast
        these in such a way as to make them applicable to the data cube. If no
        weights are provided, an array of weights is returned that equally
        weights all slices across the blending coordinate of the cube.

        Args:
            cube (iris.cube.Cube):
                The data cube on which a coordinate is being blended.
            weights (iris.cube.Cube or None):
                Cube of blending weights or None.
        Returns:
            numpy.ndarray:
                An array of weights that matches the cube data shape.
        """
        if weights:
            weights_array = self.shape_weights(cube, weights)
        else:
            (number_of_fields,) = cube.coord(self.blend_coord).shape
            weight = FLOAT_DTYPE(1.0 / number_of_fields)
            weights_array = np.broadcast_to(weight, cube.shape)

        return weights_array
示例#4
0
def parse_constraint_list(
    constraints: List[str], units: Optional[List[str]] = None
) -> Tuple[Constraint, Optional[Dict], Optional[float], Optional[Dict]]:
    """
    For simple constraints of a key=value format, these are passed in as a
    list of strings and converted to key-value pairs prior to creating the
    constraints.
    For more complex constraints, the list of strings given as input
    are evaluated by parsing for specific identifiers and then the constraints
    are created as required.
    The simple key-value pairs and other constraints are merged into a single
    constraint.

    Args:
        constraints:
            List of string constraints with keys and values split by "=":
            e.g: ["kw1=val1", "kw2 = val2", "kw3=val3"], where the vals
            could include ranges e.g. [0:20] or ranges with a step value e.g.
            [0:20:3].
        units:
            List of units (as strings) corresponding to each coordinate in the
            list of constraints.  One or more "units" may be None, and units
            may only be associated with coordinate constraints.

    Returns:
        - A combination of all the constraints that were supplied.
        - A dictionary of unit keys and values
        - A list containing the min and max values for a longitude constraint
        - A dictionary of coordinate and the step value, i.e. a step of 2 will
          skip every other point
    """

    if units is None:
        list_units = len(constraints) * [None]
        units_dict = None
    else:
        if len(units) != len(constraints):
            msg = "units list must match constraints"
            raise ValueError(msg)
        list_units = units
        units_dict = {}

    simple_constraints_dict = {}
    complex_constraints = []
    longitude_constraint = None
    thinning_values = {}
    for constraint_pair, unit_val in zip(constraints, list_units):
        key, value = constraint_pair.split("=", 1)
        key = key.strip(" ")
        value = value.strip(" ")

        if ":" in value:
            range_dict = parse_range_string_to_dict(value)

            # longitude is a circular coordinate, so needs to be treated in a
            # different way to a normal constraint
            if key == "longitude":
                longitude_constraint = [
                    FLOAT_DTYPE(range_dict[k]) for k in ["min", "max"]
                ]
            else:
                complex_constraints.append(
                    create_sorted_lambda_constraint(
                        key, [range_dict["min"], range_dict["max"]]
                    )
                )
            if range_dict.get("step", None):
                thinning_values[key] = int(range_dict["step"])
        else:
            try:
                typed_value = literal_eval(value)
            except ValueError:
                simple_constraints_dict[key] = value
            else:
                simple_constraints_dict[key] = create_constraint(typed_value)

        if unit_val is not None and unit_val.capitalize() != "None":
            units_dict[key] = unit_val.strip(" ")

    if simple_constraints_dict:
        simple_constraints = Constraint(**simple_constraints_dict)
    else:
        simple_constraints = None

    constraints = simple_constraints
    for constr in complex_constraints:
        constraints = constraints & constr

    return constraints, units_dict, longitude_constraint, thinning_values
示例#5
0
    def percentile_weights(self, cube, weights, perc_coord):
        """
        Given a 1, or multidimensional cube of weights, reshape and broadcast
        these in such a way as to make them applicable to the data cube. If no
        weights are provided, an array of weights is returned that equally
        weights all slices across the blending coordinate of the cube.

        For percentiles the dimensionality of the weights cube is checked
        against the cube without including the percentile coordinate for
        which no weights are likely to ever be provided (e.g. we don't want to
        weight different percentiles differently across the blending
        coordinate). Reshape and broadcast to match the data shape excluding
        the percentile dimension before finally broadcasting to match at the
        end.

        Args:
            cube (iris.cube.Cube):
                The data cube on which a coordinate is being blended.
            weights (iris.cube.Cube or None):
                Cube of blending weights or None.
            perc_coord (iris.coords.Coord):
                Percentile coordinate

        Returns:
            numpy.ndarray:
                An array of weights that matches the cube data shape.
        """
        # Percentile blending preserves the percentile dimension, but we will
        # not want to vary weights by percentile. If all the other dimensions
        # match for the cube and weights we can assume that a suitable 3D
        # weights cube has been provided and use it directly. To this end we
        # need to compare the shape of the cube excluding the percentile dim.
        non_perc_crds = [
            crd.name()
            for crd in cube.coords(dim_coords=True)
            if not crd.name() == perc_coord.name()
        ]
        non_perc_slice = next(cube.slices(non_perc_crds))

        # The weights need to be broadcast to match the percentile cube shape,
        # which means broadcasting across the percentile dimension.
        crd_dims = [cube.coord_dims(crd)[0] for crd in non_perc_crds]

        if weights:
            weights_array = self.shape_weights(non_perc_slice, weights)
            weights_array = iris.util.broadcast_to_shape(
                weights_array, cube.shape, tuple(crd_dims)
            )
        else:
            (number_of_fields,) = cube.coord(self.blend_coord).shape
            weight = FLOAT_DTYPE(1.0 / number_of_fields)
            weights_array = np.broadcast_to(weight, cube.shape)

        (blend_dim,) = cube.coord_dims(self.blend_coord)
        (perc_dim,) = cube.coord_dims(perc_coord)

        # The percentile aggregator performs some coordinate reordering on
        # the data. We don't have sufficient information in the aggregator
        # to modify the weight order correctly, so we do it in advance.
        weights_array = np.moveaxis(weights_array, (blend_dim, perc_dim), (0, 1))

        # Check the weights add up to 1 across the blending dimension.
        self.check_weights(weights_array, 0)

        return weights_array