Example #1
0
    def __init__(self, cube):
        """
        Represents the cube metadata and associated coordinate metadata that
        allows suitable cubes for concatenation to be identified.

        Args:

        * cube:
            The :class:`iris.cube.Cube` source-cube.

        """
        self.aux_coords_and_dims = []
        self.aux_metadata = []
        self.dim_coords = cube.dim_coords
        self.dim_metadata = []
        self.ndim = cube.ndim
        self.scalar_coords = []
        self.cell_measures_and_dims = cube._cell_measures_and_dims
        self.dim_mapping = []

        # Determine whether there are any anonymous cube dimensions.
        covered = set(cube.coord_dims(coord)[0] for coord in self.dim_coords)
        self.anonymous = covered != set(range(self.ndim))

        self.defn = cube.metadata
        self.data_type = cube.dtype
        self.fill_value = cube.fill_value

        #
        # Collate the dimension coordinate metadata.
        #
        for ind, coord in enumerate(self.dim_coords):
            dims = cube.coord_dims(coord)
            metadata = _CoordMetaData(coord, dims)
            self.dim_metadata.append(metadata)
            self.dim_mapping.append(dims[0])

        #
        # Collate the auxiliary coordinate metadata and scalar coordinates.
        #
        axes = dict(T=0, Z=1, Y=2, X=3)

        # Coordinate sort function - by guessed coordinate axis, then
        # by coordinate definition, then by dimensions, in ascending order.
        def key_func(coord):
            return (axes.get(guess_coord_axis(coord), len(axes) + 1),
                    coord._as_defn(),
                    cube.coord_dims(coord))

        for coord in sorted(cube.aux_coords, key=key_func):
            dims = cube.coord_dims(coord)
            if dims:
                metadata = _CoordMetaData(coord, dims)
                self.aux_metadata.append(metadata)
                coord_and_dims = _CoordAndDims(coord, tuple(dims))
                self.aux_coords_and_dims.append(coord_and_dims)
            else:
                self.scalar_coords.append(coord)
    def __init__(self, cube):
        """
        Represents the cube metadata and associated coordinate metadata that
        allows suitable cubes for concatenation to be identified.

        Args:

        * cube:
            The :class:`iris.cube.Cube` source-cube.

        """
        self.aux_coords_and_dims = []
        self.aux_metadata = []
        self.dim_coords = cube.dim_coords
        self.dim_metadata = []
        self.mdi = None
        self.ndim = cube.ndim
        self.scalar_coords = []

        # Determine whether there are any anonymous cube dimensions.
        covered = set(cube.coord_dims(coord)[0] for coord in self.dim_coords)
        self.anonymous = covered != set(range(self.ndim))

        self.defn = cube.metadata
        self.data_type = cube.data.dtype

        if ma.isMaskedArray(cube.data):
            # Only set when we're dealing with a masked payload.
            self.mdi = cube.data.fill_value

        #
        # Collate the dimension coordinate metadata.
        #
        for coord in self.dim_coords:
            metadata = _CoordMetaData(coord, cube.coord_dims(coord))
            self.dim_metadata.append(metadata)

        #
        # Collate the auxiliary coordinate metadata and scalar coordinates.
        #
        axes = dict(T=0, Z=1, Y=2, X=3)
        # Coordinate sort function - by guessed coordinate axis, then
        # by coordinate definition, then by dimensions, in ascending order.
        key_func = lambda coord: (axes.get(guess_coord_axis(coord),
                                           len(axes) + 1),
                                  coord._as_defn(),
                                  cube.coord_dims(coord))

        for coord in sorted(cube.aux_coords, key=key_func):
            dims = cube.coord_dims(coord)
            if dims:
                metadata = _CoordMetaData(coord, dims)
                self.aux_metadata.append(metadata)
                coord_and_dims = _CoordAndDims(coord, tuple(dims))
                self.aux_coords_and_dims.append(coord_and_dims)
            else:
                self.scalar_coords.append(coord)
Example #3
0
def _create_cf_cell_methods(cube, dimension_names):
    """Create CF-netCDF string representation of a cube cell methods."""
    cell_methods = []

    # Identify the collection of coordinates that represent CF-netCDF coordinate variables.
    cf_coordinates = cube.dim_coords
    
    for cm in cube.cell_methods:
        names = ''

        for name in cm.coord_names:
            coord = cube.coords(name)

            if coord:
                coord = coord[0]
                if coord in cf_coordinates:
                    name = dimension_names[cube.coord_dims(coord)[0]]

            names += '%s: ' % name
        
        interval = ' '.join(['interval: %s' % interval for interval in cm.intervals or []])
        comment = ' '.join(['comment: %s' % comment for comment in cm.comments or []])
        extra = ' '.join([interval, comment]).strip()
        
        if extra:
            extra = ' (%s)' % extra
            
        cell_methods.append(names + cm.method + extra)
            
    return ' '.join(cell_methods)
Example #4
0
def _coord_netcdf_variable_name(cube, coord):
    """
    Returns a CF-netCDF variable name for the given coordinate.

    Args:

    * cube (:class:`iris.cube.Cube`):
        The cube that contains the given coordinate.

    * coord (:class:`iris.coords.Coord`):
        An instance of a coordinate for which a CF-netCDF variable
        name is required.

    Returns:
        A CF-netCDF variable name as a string.

    """
    if coord.var_name is not None:
        cf_name = coord.var_name
    else:
        name = coord.standard_name or coord.long_name
        if not name or set(name).intersection(string.whitespace):
            # Auto-generate name based on associated dimensions.
            name = ''
            for dim in cube.coord_dims(coord):
                name += 'dim{}'.format(dim)
            # Handle scalar coordinate (dims == ()).
            if not name:
                name = 'unknown_scalar'
        # Convert to lower case and replace whitespace by underscores.
        cf_name = '_'.join(name.lower().split())

    return cf_name
Example #5
0
def _get_plot_defn(cube, mode, ndims=2):
    """
    Return data and plot-axis coords given a cube & a mode of either
    POINT_MODE or BOUND_MODE.

    """
    if cube.ndim != ndims:
        msg = 'Cube must be %s-dimensional. Got %s dimensions.'
        raise ValueError(msg % (ndims, cube.ndim))

    # Start by taking the DimCoords from each dimension.
    coords = [None] * ndims
    for dim_coord in cube.dim_coords:
        dim = cube.coord_dims(dim_coord)[0]
        coords[dim] = dim_coord

    # When appropriate, restrict to 1D with bounds.
    if mode == iris.coords.BOUND_MODE:
        coords = list(map(_valid_bound_coord, coords))

    def guess_axis(coord):
        axis = None
        if coord is not None:
            axis = iris.util.guess_coord_axis(coord)
        return axis

    # Allow DimCoords in aux_coords to fill in for missing dim_coords.
    for dim, coord in enumerate(coords):
        if coord is None:
            aux_coords = cube.coords(dimensions=dim)
            aux_coords = [coord for coord in aux_coords
                          if isinstance(coord, iris.coords.DimCoord)]
            if aux_coords:
                key_func = lambda coord: coord._as_defn()
                aux_coords.sort(key=key_func)
                coords[dim] = aux_coords[0]

    if mode == iris.coords.POINT_MODE:
        # Allow multi-dimensional aux_coords to override the dim_coords
        # along the Z axis. This results in a preference for using the
        # derived altitude over model_level_number or level_height.
        # Limit to Z axis to avoid preferring latitude over grid_latitude etc.
        axes = list(map(guess_axis, coords))
        axis = 'Z'
        if axis in axes:
            for coord in cube.coords(dim_coords=False):
                if max(coord.shape) > 1 and \
                        iris.util.guess_coord_axis(coord) == axis:
                    coords[axes.index(axis)] = coord

    # Re-order the coordinates to achieve the preferred
    # horizontal/vertical associations.
    def sort_key(coord):
        order = {'X': 2, 'T': 1, 'Y': -1, 'Z': -2}
        axis = guess_axis(coord)
        return (order.get(axis, 0), coord and coord.name())
    sorted_coords = sorted(coords, key=sort_key)

    transpose = (sorted_coords != coords)
    return PlotDefn(sorted_coords, transpose)
Example #6
0
def _dereference_args(factory, reference_targets, regrid_cache, cube):
    """Converts all the arguments for a factory into concrete coordinates."""
    args = []
    for arg in factory.args:
        if isinstance(arg, Reference):
            if arg.name in reference_targets:
                src = reference_targets[arg.name].as_cube()
                # If necessary, regrid the reference cube to
                # match the grid of this cube.
                src = _ensure_aligned(regrid_cache, src, cube)
                if src is not None:
                    new_coord = iris.coords.AuxCoord(src.data,
                                                     src.standard_name,
                                                     src.long_name,
                                                     src.var_name,
                                                     src.units,
                                                     attributes=src.attributes)
                    dims = [cube.coord_dims(src_coord)[0]
                                for src_coord in src.dim_coords]
                    cube.add_aux_coord(new_coord, dims)
                    args.append(new_coord)
                else:
                    raise _ReferenceError('Unable to regrid reference for'
                                          ' {!r}'.format(arg.name))
            else:
                raise _ReferenceError("The file(s) {{filenames}} don't contain"
                                      " field(s) for {!r}.".format(arg.name))
        else:
            # If it wasn't a Reference, then arg is a dictionary
            # of keyword arguments for cube.coord(...).
            args.append(cube.coord(**arg))
    return args
Example #7
0
    def test_contrived_differential1(self):
        # testing :
        # F = ( cos(lat) cos(lon) )
        # dF/dLon = - sin(lon) cos(lat)     (and to simplify /cos(lat) )
        cube = build_cube(numpy.empty((30, 60)), spherical=True)

        x = cube.coord('longitude')
        y = cube.coord('latitude')
        y_dim = cube.coord_dims(y)[0]

        cos_x_pts = numpy.cos(numpy.radians(x.points)).reshape(1, x.shape[0])
        cos_y_pts = numpy.cos(numpy.radians(y.points)).reshape(y.shape[0], 1)
    
        cube.data = cos_y_pts * cos_x_pts
    
        lon_coord = x.unit_converted('radians')
        lat_coord = y.unit_converted('radians')
        cos_lat_coord = iris.coords.AuxCoord.from_coord(lat_coord)
        cos_lat_coord.points = numpy.cos(lat_coord.points)
        cos_lat_coord.units = '1'
        cos_lat_coord.rename('cos({})'.format(lat_coord.name()))
        
        temp = iris.analysis.calculus.differentiate(cube, lon_coord)
        df_dlon = iris.analysis.maths.divide(temp, cos_lat_coord, y_dim)

        x = df_dlon.coord('longitude')
        y = df_dlon.coord('latitude')
        
        sin_x_pts = numpy.sin(numpy.radians(x.points)).reshape(1, x.shape[0])
        y_ones = numpy.ones((y.shape[0] , 1))
        
        data = - sin_x_pts * y_ones
        result = df_dlon.copy(data=data)
        
        numpy.testing.assert_array_almost_equal(result.data, df_dlon.data, decimal=3)
Example #8
0
def cube_delta(cube, coord):
    """
    Given a cube calculate the difference between each value in the
    given coord's direction.


    Args:

    * coord
        either a Coord instance or the unique name of a coordinate in the cube.
        If a Coord instance is provided, it does not necessarily have to
        exist in the cube.

    Example usage::

        change_in_temperature_wrt_pressure = \
cube_delta(temperature_cube, 'pressure')

    .. note:: Missing data support not yet implemented.

    """
    # handle the case where a user passes a coordinate name
    if isinstance(coord, six.string_types):
        coord = cube.coord(coord)

    if coord.ndim != 1:
        raise iris.exceptions.CoordinateMultiDimError(coord)

    # Try and get a coord dim
    delta_dims = cube.coord_dims(coord.name())
    if (coord.shape[0] == 1 and not getattr(coord, "circular", False)) or not delta_dims:
        raise ValueError("Cannot calculate delta over {!r} as it has " "length of 1.".format(coord.name()))
    delta_dim = delta_dims[0]

    # Calculate the actual delta, taking into account whether the given
    # coordinate is circular.
    delta_cube_data = delta(cube.data, delta_dim, circular=getattr(coord, "circular", False))

    # If the coord/dim is circular there is no change in cube shape
    if getattr(coord, "circular", False):
        delta_cube = cube.copy(data=delta_cube_data)
    else:
        # Subset the cube to the appropriate new shape by knocking off
        # the last row of the delta dimension.
        subset_slice = [slice(None, None)] * cube.ndim
        subset_slice[delta_dim] = slice(None, -1)
        delta_cube = cube[tuple(subset_slice)]
        delta_cube.data = delta_cube_data

    # Replace the delta_dim coords with midpoints
    # (no shape change if circular).
    for cube_coord in cube.coords(dimensions=delta_dim):
        delta_cube.replace_coord(_construct_midpoint_coord(cube_coord, circular=getattr(coord, "circular", False)))

    delta_cube.rename("change_in_{}_wrt_{}".format(delta_cube.name(), coord.name()))

    return delta_cube
Example #9
0
def differentiate(cube, coord_to_differentiate):
    r"""
    Calculate the differential of a given cube with respect to the coord_to_differentiate.
    
    Args:

    * coord_to_differentiate:
        Either a Coord instance or the unique name of a coordinate which exists in the cube.
        If a Coord instance is provided, it does not necessarily have to exist on the cube.

    Example usage::
    
        u_wind_acceleration = differentiate(u_wind_cube, 'forecast_time')

    The algorithm used is equivalent to:
    
    .. math::
    
        d_i = \frac{v_{i+1}-v_i}{c_{i+1}-c_i}
    
    Where ``d`` is the differential, ``v`` is the data value, ``c`` is the coordinate value and ``i`` is the index in the differential
    direction. Hence, in a normal situation if a cube has a shape (x: n; y: m) differentiating with respect to x will result in a cube
    of shape (x: n-1; y: m) and differentiating with respect to y will result in (x: n; y: m-1). If the coordinate to differentiate is
    :attr:`circular <iris.coords.DimCoord.circular>` then the resultant shape will be the same as the input cube. 
    

    .. note:: Difference method used is the same as :func:`cube_delta` and therefore has the same limitations.
    
    .. note:: Spherical differentiation does not occur in this routine.

    """
    # Get the delta cube in the required differential direction. Don't add this to the resultant
    # cube's history as we will do that ourself.
    # This operation results in a copy of the original cube.
    delta_cube = cube_delta(cube, coord_to_differentiate, update_history=False)
    
    if isinstance(coord_to_differentiate, basestring):
        coord = cube.coord(coord_to_differentiate)
    else:
        coord = coord_to_differentiate
    
    delta_coord = _construct_delta_coord(coord)
    delta_dim = cube.coord_dims(coord)[0]

    # calculate delta_cube / delta_coord to give the differential. Don't update the history, as we will
    # do this ourself.
    delta_cube = iris.analysis.maths.divide(delta_cube, delta_coord, delta_dim,
                                            update_history=False)

    # Update the history of the new cube
    delta_cube.add_history('differential of %s wrt to %s' % (cube.name(), coord.name()) )
    
    # Update the standard name
    delta_cube.rename(('derivative_of_%s_wrt_%s' % (cube.name(), coord.name())) )
    return delta_cube
Example #10
0
File: plot.py Project: bblay/iris
def _get_plot_defn_custom_coords_picked(cube, coords, mode, ndims=2):
    def as_coord(coord):
        if isinstance(coord, basestring):
            coord = cube.coord(name=coord)
        else:
            coord = cube.coord(coord=coord)
        return coord

    coords = map(as_coord, coords)

    # Check that we were given the right number of coordinates
    if len(coords) != ndims:
        coord_names = ", ".join([coord.name() for coord in coords])
        raise ValueError(
            "The list of coordinates given (%s) should have the"
            " same length (%s) as the dimensionality of the"
            " required plot (%s)" % (coord_names, len(coords), ndims)
        )

    # Check which dimensions are spanned by each coordinate.
    get_span = lambda coord: set(cube.coord_dims(coord))
    spans = map(get_span, coords)
    for span, coord in zip(spans, coords):
        if not span:
            msg = "The coordinate {!r} doesn't span a data dimension."
            raise ValueError(msg.format(coord.name()))
        if mode == iris.coords.BOUND_MODE and len(span) != 1:
            raise ValueError(
                "The coordinate {!r} is multi-dimensional and"
                " cannot be used in a cell-based plot.".format(coord.name())
            )

    # Check the combination of coordinates spans enough (ndims) data
    # dimensions.
    total_span = set().union(*spans)
    if len(total_span) != ndims:
        coord_names = ", ".join([coord.name() for coord in coords])
        raise ValueError("The given coordinates ({}) don't span the {} data" " dimensions.".format(coord_names, ndims))

    # If we have 2-dimensional data, and one or more 1-dimensional
    # coordinates, check if we need to transpose.
    transpose = False
    if ndims == 2 and min(map(len, spans)) == 1:
        for i, span in enumerate(spans):
            if len(span) == 1:
                if list(span)[0] == i:
                    transpose = True
                    break

    # Note the use of `reversed` to convert from the X-then-Y
    # convention of the end-user API to the V-then-U convention used by
    # the plotting routines.
    plot_coords = list(reversed(coords))
    return PlotDefn(plot_coords, transpose)
Example #11
0
def _broadcast_cube_coord_data(cube, other, operation_name, dim=None):
    # What dimension are we processing?
    data_dimension = None
    if dim is not None:
        # Ensure the given dim matches the coord
        if other in cube.coords() and cube.coord_dims(other) != [dim]:
            raise ValueError("dim provided does not match dim found for coord")
        data_dimension = dim
    else:
        # Try and get a coord dim
        if other.shape != (1,):
            try:
                coord_dims = cube.coord_dims(other)
                data_dimension = coord_dims[0] if coord_dims else None
            except iris.exceptions.CoordinateNotFoundError:
                raise ValueError("Could not determine dimension for %s. "
                                 "Use %s(cube, coord, dim=dim)"
                                 % (operation_name, operation_name))

    if other.ndim != 1:
        raise iris.exceptions.CoordinateMultiDimError(other)

    if other.has_bounds():
        warnings.warn('Using {!r} with a bounded coordinate is not well '
                      'defined; ignoring bounds.'.format(operation_name))

    points = other.points

    # If the `data_dimension` is defined then shape the provided points for
    # proper array broadcasting
    if data_dimension is not None:
        points_shape = [1] * cube.ndim
        points_shape[data_dimension] = -1
        points = points.reshape(points_shape)

    return points
Example #12
0
    def _create_cf_cell_methods(self, cube, dimension_names):
        """
        Create CF-netCDF string representation of a cube cell methods.

        Args:

        * cube (:class:`iris.cube.Cube`) or cubelist
          (:class:`iris.cube.CubeList`):
            A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
            cubes to be saved to a netCDF file.
        * dimension_names (list):
            Names associated with the dimensions of the cube.

        Returns:
            CF-netCDF string representation of a cube cell methods.

        """
        cell_methods = []

        # Identify the collection of coordinates that represent CF-netCDF
        # coordinate variables.
        cf_coordinates = cube.dim_coords

        for cm in cube.cell_methods:
            names = ''

            for name in cm.coord_names:
                coord = cube.coords(name)

                if coord:
                    coord = coord[0]
                    if coord in cf_coordinates:
                        name = dimension_names[cube.coord_dims(coord)[0]]

                names += '%s: ' % name

            interval = ' '.join(['interval: %s' % interval for interval in
                                 cm.intervals or []])
            comment = ' '.join(['comment: %s' % comment for comment in
                                cm.comments or []])
            extra = ' '.join([interval, comment]).strip()

            if extra:
                extra = ' (%s)' % extra

            cell_methods.append(names + cm.method + extra)

        return ' '.join(cell_methods)
Example #13
0
    def _create_cf_cell_methods(self, cube, dimension_names):
        """
        Create CF-netCDF string representation of a cube cell methods.

        Args:

        * cube (:class:`iris.cube.Cube`) or cubelist
          (:class:`iris.cube.CubeList`):
            A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or list of
            cubes to be saved to a netCDF file.
        * dimension_names (list):
            Names associated with the dimensions of the cube.

        Returns:
            CF-netCDF string representation of a cube cell methods.

        """
        cell_methods = []

        # Identify the collection of coordinates that represent CF-netCDF
        # coordinate variables.
        cf_coordinates = cube.dim_coords

        for cm in cube.cell_methods:
            names = ''

            for name in cm.coord_names:
                coord = cube.coords(name)

                if coord:
                    coord = coord[0]
                    if coord in cf_coordinates:
                        name = dimension_names[cube.coord_dims(coord)[0]]

                names += '%s: ' % name

            interval = ' '.join(['interval: %s' % interval for interval in
                                 cm.intervals or []])
            comment = ' '.join(['comment: %s' % comment for comment in
                                cm.comments or []])
            extra = ' '.join([interval, comment]).strip()

            if extra:
                extra = ' (%s)' % extra

            cell_methods.append(names + cm.method + extra)

        return ' '.join(cell_methods)
Example #14
0
File: _linear.py Project: ckmo/iris
 def construct_new_coord(coord):
     dims = cube.coord_dims(coord)
     if coord in self._src_coords:
         index = self._src_coords.index(coord)
         new_points = sample_points[index]
         new_coord = construct_new_coord_given_points(coord, new_points)
         # isinstance not possible here as a dimension coordinate can be
         # mapped to the aux coordinates of a cube.
         if coord in cube.aux_coords:
             dims = [self._interp_dims[index]]
     else:
         if set(dims).intersection(set(self._interp_dims)):
             # Interpolate the coordinate payload.
             new_coord = self._resample_coord(sample_points, coord,
                                              dims)
         else:
             new_coord = coord.copy()
     return new_coord, dims
Example #15
0
 def construct_new_coord(coord):
     dims = cube.coord_dims(coord)
     try:
         index = self._src_coords.index(coord)
         new_points = sample_points[index]
         new_coord = construct_new_coord_given_points(coord, new_points)
         # isinstance not possible here as a dimension coordinate can be
         # mapped to the aux coordinates of a cube.
         if coord in cube.aux_coords:
             dims = [self._interp_dims[index]]
     except ValueError:
         if set(dims).intersection(set(self._interp_dims)):
             # Interpolate the coordinate payload.
             new_coord = self._resample_coord(sample_points, coord,
                                              dims)
         else:
             new_coord = coord.copy()
     return new_coord, dims
Example #16
0
 def _equivalent_nn_indices(cube, sample_points,
                            require_single_point=False):
     indices = [slice(None) for _ in cube.shape]
     for coord_spec, point in sample_points:
         coord = cube.coord(coord_spec)
         dim, = cube.coord_dims(coord)  # expect only 1d --> single dim !
         dim_index = coord.nearest_neighbour_index(point)
         if require_single_point:
             # Mimic error behaviour of the original "data-value" function:
             # Any dim already addressed must get the same index.
             if indices[dim] != slice(None) and indices[dim] != dim_index:
                 raise ValueError('indices over-specified')
         indices[dim] = dim_index
     if require_single_point:
         # Mimic error behaviour of the original "data-value" function:
         # All dims must have an index.
         if any(index == slice(None) for index in indices):
             raise ValueError('result expected to be a single point')
     return tuple(indices)
Example #17
0
 def _equivalent_nn_indices(cube, sample_points,
                            require_single_point=False):
     indices = [slice(None) for _ in cube.shape]
     for coord_spec, point in sample_points:
         coord = cube.coord(coord_spec)
         dim, = cube.coord_dims(coord)  # expect only 1d --> single dim !
         dim_index = coord.nearest_neighbour_index(point)
         if require_single_point:
             # Mimic error behaviour of the original "data-value" function:
             # Any dim already addressed must get the same index.
             if indices[dim] != slice(None) and indices[dim] != dim_index:
                 raise ValueError('indices over-specified')
         indices[dim] = dim_index
     if require_single_point:
         # Mimic error behaviour of the original "data-value" function:
         # All dims must have an index.
         if any(index == slice(None) for index in indices):
             raise ValueError('result expected to be a single point')
     return tuple(indices)
Example #18
0
def _compute_anomalies(cube, reference, period, seasons):
    cube_coord = _get_period_coord(cube, period, seasons)
    ref_coord = _get_period_coord(reference, period, seasons)

    data = cube.core_data()
    cube_time = cube.coord('time')
    ref = {}
    for ref_slice in reference.slices_over(ref_coord):
        ref[ref_slice.coord(ref_coord).points[0]] = ref_slice.core_data()

    cube_coord_dim = cube.coord_dims(cube_coord)[0]
    slicer = [slice(None)] * len(data.shape)
    new_data = []
    for i in range(cube_time.shape[0]):
        slicer[cube_coord_dim] = i
        new_data.append(data[tuple(slicer)] - ref[cube_coord.points[i]])
    data = da.stack(new_data, axis=cube_coord_dim)
    cube = cube.copy(data)
    cube.remove_coord(cube_coord)
    return cube
Example #19
0
def get_time_weights(cube):
    """Compute the weighting of the time axis.

    Parameters
    ----------
    cube: iris.cube.Cube
        input cube.

    Returns
    -------
    numpy.array
        Array of time weights for averaging.
    """
    time = cube.coord('time')
    time_weights = time.bounds[..., 1] - time.bounds[..., 0]
    time_weights = time_weights.squeeze()
    if time_weights.shape == ():
        time_weights = da.broadcast_to(time_weights, cube.shape)
    else:
        time_weights = iris.util.broadcast_to_shape(time_weights, cube.shape,
                                                    cube.coord_dims('time'))
    return time_weights
Example #20
0
    def test_contrived_differential1(self):
        # testing :
        # F = ( cos(lat) cos(lon) )
        # dF/dLon = - sin(lon) cos(lat)     (and to simplify /cos(lat) )
        cube = build_cube(np.empty((30, 60)), spherical=True)

        x = cube.coord('longitude')
        y = cube.coord('latitude')
        y_dim = cube.coord_dims(y)[0]

        cos_x_pts = np.cos(np.radians(x.points)).reshape(1, x.shape[0])
        cos_y_pts = np.cos(np.radians(y.points)).reshape(y.shape[0], 1)

        cube.data = cos_y_pts * cos_x_pts

        lon_coord = x.copy()
        lon_coord.convert_units('radians')
        lat_coord = y.copy()
        lat_coord.convert_units('radians')
        cos_lat_coord = iris.coords.AuxCoord.from_coord(lat_coord)
        cos_lat_coord.points = np.cos(lat_coord.points)
        cos_lat_coord.units = '1'
        cos_lat_coord.rename('cos({})'.format(lat_coord.name()))

        temp = iris.analysis.calculus.differentiate(cube, lon_coord)
        df_dlon = iris.analysis.maths.divide(temp, cos_lat_coord, y_dim)

        x = df_dlon.coord('longitude')
        y = df_dlon.coord('latitude')

        sin_x_pts = np.sin(np.radians(x.points)).reshape(1, x.shape[0])
        y_ones = np.ones((y.shape[0], 1))

        data = -sin_x_pts * y_ones
        result = df_dlon.copy(data=data)

        np.testing.assert_array_almost_equal(result.data,
                                             df_dlon.data,
                                             decimal=3)
Example #21
0
def _dereference_args(factory, reference_targets, regrid_cache, cube):
    """Converts all the arguments for a factory into concrete coordinates."""
    args = []
    for arg in factory.args:
        if isinstance(arg, Reference):
            if arg.name in reference_targets:
                src = reference_targets[arg.name].as_cube()
                # If necessary, regrid the reference cube to
                # match the grid of this cube.
                src = _ensure_aligned(regrid_cache, src, cube)
                if src is not None:
                    new_coord = iris.coords.AuxCoord(
                        src.data,
                        src.standard_name,
                        src.long_name,
                        src.var_name,
                        src.units,
                        attributes=src.attributes,
                    )
                    dims = [
                        cube.coord_dims(src_coord)[0]
                        for src_coord in src.dim_coords
                    ]
                    cube.add_aux_coord(new_coord, dims)
                    args.append(new_coord)
                else:
                    raise _ReferenceError("Unable to regrid reference for"
                                          " {!r}".format(arg.name))
            else:
                raise _ReferenceError("The source data contains no "
                                      "field(s) for {!r}.".format(arg.name))
        else:
            # If it wasn't a Reference, then arg is a dictionary
            # of keyword arguments for cube.coord(...).
            args.append(cube.coord(**arg))
    return args
Example #22
0
def _multiply_divide_common(operation_function,
                            operation_symbol,
                            operation_noun,
                            cube,
                            other,
                            dim=None,
                            in_place=False):
    """
    Function which shares common code between multiplication and division of cubes.

    operation_function   - function which does the operation (e.g. numpy.divide)
    operation_symbol     - the textual symbol of the operation (e.g. '/')
    operation_noun       - the noun of the operation (e.g. 'division')
    operation_past_tense - the past tense of the operation (e.g. 'divided')

    .. seealso:: For information on the dim keyword argument see :func:`multiply`.

    """
    if not isinstance(cube, iris.cube.Cube):
        raise TypeError(
            'The "cube" argument must be an instance of iris.Cube.')

    if isinstance(other, (int, float)):
        other = np.array(other)

    other_unit = None

    if isinstance(other, np.ndarray):
        _assert_compatible(cube, other)

        if in_place:
            new_cube = cube
            new_cube.data = operation_function(cube.data, other)
        else:
            new_cube = cube.copy(data=operation_function(cube.data, other))

        other_unit = '1'
    elif isinstance(other, iris.coords.Coord):
        # Deal with cube multiplication/division by coordinate

        # What dimension are we processing?
        data_dimension = None
        if dim is not None:
            # Ensure the given dim matches the coord
            if other in cube.coords() and cube.coord_dims(other) != [dim]:
                raise ValueError(
                    "dim provided does not match dim found for coord")
            data_dimension = dim
        else:
            # Try and get a coord dim
            if other.shape != (1, ):
                try:
                    coord_dims = cube.coord_dims(other)
                    data_dimension = coord_dims[0] if coord_dims else None
                except iris.exceptions.CoordinateNotFoundError:
                    raise ValueError(
                        "Could not determine dimension for mul/div. Use mul(coord, dim=dim)"
                    )

        if other.ndim != 1:
            raise iris.exceptions.CoordinateMultiDimError(other)

        if other.has_bounds():
            warnings.warn(
                '%s by a bounded coordinate not well defined, ignoring bounds.'
                % operation_noun)

        points = other.points

        # If the axis is defined then shape the provided points so that we can do the
        # division (this is needed as there is no "axis" keyword to numpy's divide/multiply)
        if data_dimension is not None:
            points_shape = [1] * cube.ndim
            points_shape[data_dimension] = -1
            points = points.reshape(points_shape)

        if in_place:
            new_cube = cube
            new_cube.data = operation_function(cube.data, points)
        else:
            new_cube = cube.copy(data=operation_function(cube.data, points))

        other_unit = other.units
    elif isinstance(other, iris.cube.Cube):
        # Deal with cube multiplication/division by cube

        if in_place:
            new_cube = cube
            new_cube.data = operation_function(cube.data, other.data)
        else:
            new_cube = cube.copy(
                data=operation_function(cube.data, other.data))

        other_unit = other.units
    else:
        return NotImplemented

    # Update the units
    if operation_function == np.multiply:
        new_cube.units = cube.units * other_unit
    elif operation_function == np.divide:
        new_cube.units = cube.units / other_unit

    iris.analysis.clear_phenomenon_identity(new_cube)

    return new_cube
Example #23
0
def linear(cube, sample_points, extrapolation_mode='linear'):
    """
    Return a cube of the linearly interpolated points given the desired
    sample points.
    
    Given a list of tuple pairs mapping coordinates to their desired
    values, return a cube with linearly interpolated values. If more
    than one coordinate is specified, the linear interpolation will be
    carried out in sequence, thus providing n-linear interpolation
    (bi-linear, tri-linear, etc.).
    
    .. note::
        By definition, linear interpolation requires all coordinates to
        be 1-dimensional.
    
    Args:
    
    * cube
        The cube to be interpolated.
        
    * sample_points
        List of one or more tuple pairs mapping coordinate to desired
        points to interpolate. Points may be a scalar or a numpy array
        of values.
    
    Kwargs:
    
    * extrapolation_mode - string - one of 'linear', 'nan' or 'error'
    
        * If 'linear' the point will be calculated by extending the
          gradient of closest two points.
        * If 'nan' the extrapolation point will be put as a NAN.
        * If 'error' a value error will be raised notifying of the
          attempted extrapolation.
    
    .. note::
        If the source cube's data, or any of its resampled coordinates,
        have an integer data type they will be promoted to a floating
        point data type in the result.
     
    """
    if not isinstance(cube, iris.cube.Cube):
        raise ValueError('Expecting a cube instance, got %s' % type(cube))

    if isinstance(sample_points, dict):
        warnings.warn('Providing a dictionary to specify points is deprecated. Please provide a list of (coordinate, values) pairs.')
        sample_points = sample_points.items()

    # catch the case where a user passes a single (coord/name, value) pair rather than a list of pairs
    if sample_points and not (isinstance(sample_points[0], collections.Container) and not isinstance(sample_points[0], basestring)):
        raise TypeError('Expecting the sample points to be a list of tuple pairs representing (coord, points), got a list of %s.' % type(sample_points[0]))
    
    points = []
    for (coord, values) in sample_points:
        if isinstance(coord, basestring):
            coord = cube.coord(coord)
        else:
            coord = cube.coord(coord=coord)
        points.append((coord, values))
    sample_points = points

    if len(sample_points) == 0:
        raise ValueError('Expecting a non-empty list of coord value pairs, got %r.' % sample_points)

    if cube.data.dtype.kind == 'i':
        raise ValueError("Cannot linearly interpolate a cube which has integer type data. Consider casting the "
                         "cube's data to floating points in order to continue.")

    bounds_error = (extrapolation_mode == 'error')

    # Handle an over-specified points_dict or a specification which does not describe a data dimension
    data_dimensions_requested = []
    for coord, values in sample_points:
        if coord.ndim > 1:
            raise ValueError('Cannot linearly interpolate over {!r} as it is'
                             ' multi-dimensional.'.format(coord.name()))
        data_dim = cube.coord_dims(coord)
        if not data_dim:
            raise ValueError('Requested a point over a coordinate which does'
                             ' not describe a dimension: {!r}.'.format(
                                 coord.name()))
        else:
            data_dim = data_dim[0]
        if data_dim in data_dimensions_requested:
            raise ValueError('Requested a point which over specifies a'
                             ' dimension: {!r}. '.format(coord.name()))
        data_dimensions_requested.append(data_dim)

    # Iterate over all of the requested keys in the given points_dict calling this routine repeatedly.
    if len(sample_points) > 1:
        result = cube
        for coord, cells in sample_points:
            result = linear(result, [(coord, cells)], extrapolation_mode=extrapolation_mode)
        return result
    
    else:
        # Now we must be down to a single sample coordinate and its
        # values.
        src_coord, requested_points = sample_points[0]

        # 1) Define the interpolation characteristics.

        # Get the sample dimension (which we have already tested is not None)
        sample_dim = cube.coord_dims(src_coord)[0]

        # Construct source data & source coordinate values suitable for
        # SciPy's interp1d.
        if getattr(src_coord, 'circular', False):
            coord_slice_in_cube = [slice(None, None)] * cube.data.ndim
            coord_slice_in_cube[sample_dim] = slice(0, 1)
            modulus = numpy.array(src_coord.units.modulus or 0,
                                  dtype=src_coord.dtype)
            src_points = numpy.append(src_coord.points,
                                  src_coord.points[0] + modulus)
            data = numpy.append(cube.data,
                                cube.data[tuple(coord_slice_in_cube)],
                                axis=sample_dim)
        else:
            src_points = src_coord.points
            data = cube.data
        
        if len(src_points) <= 1:
            raise ValueError('Cannot linearly interpolate a coordinate {!r}'
                             ' with one point.'.format(src_coord.name()))
        
        monotonic, direction = iris.util.monotonic(src_points,
                                                   return_direction=True)
        if not monotonic:
            raise ValueError('Unable to linearly interpolate this cube as the'
                             ' coordinate {!r} is not monotonic'.format(
                                src_coord.name()))
        # SciPy's interp1d requires monotonic increasing coord values.
        if direction == -1:
            src_points = iris.util.reverse(src_points, axes=0)
            data = iris.util.reverse(data, axes=sample_dim)

        # Wrap it all up in a function which makes the right kind of
        # interpolator/extrapolator.
        # NB. This uses a closure to capture the values of src_points,
        # bounds_error, and extrapolation_mode.
        def interpolate(fx, new_x, **kwargs):
            # SciPy's interp1d needs float values, so if we're given
            # integer values, convert them to the smallest possible
            # float dtype that can accurately preserve the values.
            if fx.dtype.kind == 'i':
                fx = fx.astype(numpy.promote_types(fx.dtype, numpy.float16))
            x = src_points.astype(fx.dtype)
            interpolator = interp1d(x, fx, kind='linear',
                                    bounds_error=bounds_error, **kwargs)
            if extrapolation_mode == 'linear':
                interpolator = Linear1dExtrapolator(interpolator)
            new_fx = interpolator(numpy.array(new_x, dtype=fx.dtype))
            return new_fx

        # 2) Interpolate the data and produce our new Cube.
        data = interpolate(data, requested_points, axis=sample_dim, copy=False)
        new_cube = iris.cube.Cube(data)
        new_cube.metadata = cube.metadata

        # If requested_points is an array scalar then `new_cube` will
        # have one less dimension than `cube`. (The `sample_dim`
        # dimension will vanish.) In which case we build a mapping from
        # `cube` dimensions to `new_cube` dimensions.
        dim_mapping = None
        if new_cube.ndim != cube.ndim:
            dim_mapping = {i: i for i in range(sample_dim)}
            dim_mapping[sample_dim] = None
            for i in range(sample_dim + 1, cube.ndim):
                dim_mapping[i] = i - 1

        # 2) Copy/interpolate the coordinates.
        for dim_coord in cube.dim_coords:
            dims = cube.coord_dims(dim_coord)
            if sample_dim in dims:
                new_coord = _resample_coord(dim_coord, src_coord, direction,
                                            requested_points, interpolate)
            else:
                new_coord = dim_coord.copy()
            if dim_mapping:
                dims = [dim_mapping[dim] for dim in dims
                            if dim_mapping[dim] is not None]
            if isinstance(new_coord, iris.coords.DimCoord) and dims:
                new_cube.add_dim_coord(new_coord, dims)
            else:
                new_cube.add_aux_coord(new_coord, dims)

        for coord in cube.aux_coords:
            dims = cube.coord_dims(coord)
            if sample_dim in dims:
                new_coord = _resample_coord(coord, src_coord, direction,
                                            requested_points, interpolate)
            else:
                new_coord = coord.copy()
            if dim_mapping:
                dims = [dim_mapping[dim] for dim in dims
                            if dim_mapping[dim] is not None]
            new_cube.add_aux_coord(new_coord, dims)

        return new_cube
Example #24
0
def _get_xy_coords(cube):
    """
    Return the x and y coordinates from a cube.

    This function will preferentially return a pair of dimension
    coordinates (if there are more than one potential x or y dimension
    coordinates a ValueError will be raised). If the cube does not have
    a pair of x and y dimension coordinates it will return 1D auxiliary
    coordinates (including scalars). If there is not one and only one set
    of x and y auxiliary coordinates a ValueError will be raised.

    Having identified the x and y coordinates, the function checks that they
    have equal coordinate systems and that they do not occupy the same
    dimension on the cube.

    Args:

    * cube:
        An instance of :class:`iris.cube.Cube`.

    Returns:
        A tuple containing the cube's x and y coordinates.

    """
    # Look for a suitable dimension coords first.
    x_coords = cube.coords(axis='x', dim_coords=True)
    if not x_coords:
        # If there is no x coord in dim_coords look for scalars or
        # monotonic coords in aux_coords.
        x_coords = [
            coord for coord in cube.coords(axis='x', dim_coords=False)
            if coord.ndim == 1 and coord.is_monotonic()
        ]
    if len(x_coords) != 1:
        raise ValueError('Cube {!r} must contain a single 1D x '
                         'coordinate.'.format(cube.name()))
    x_coord = x_coords[0]

    # Look for a suitable dimension coords first.
    y_coords = cube.coords(axis='y', dim_coords=True)
    if not y_coords:
        # If there is no y coord in dim_coords look for scalars or
        # monotonic coords in aux_coords.
        y_coords = [
            coord for coord in cube.coords(axis='y', dim_coords=False)
            if coord.ndim == 1 and coord.is_monotonic()
        ]
    if len(y_coords) != 1:
        raise ValueError('Cube {!r} must contain a single 1D y '
                         'coordinate.'.format(cube.name()))
    y_coord = y_coords[0]

    if x_coord.coord_system != y_coord.coord_system:
        raise ValueError("The cube's x ({!r}) and y ({!r}) "
                         "coordinates must have the same coordinate "
                         "system.".format(x_coord.name(), y_coord.name()))

    # The x and y coordinates must describe different dimensions
    # or be scalar coords.
    x_dims = cube.coord_dims(x_coord)
    x_dim = None
    if x_dims:
        x_dim = x_dims[0]

    y_dims = cube.coord_dims(y_coord)
    y_dim = None
    if y_dims:
        y_dim = y_dims[0]

    if x_dim is not None and y_dim == x_dim:
        raise ValueError("The cube's x and y coords must not describe the "
                         "same data dimension.")

    return x_coord, y_coord
Example #25
0
def _multiply_divide_common(operation_function, operation_symbol, operation_noun,
                            cube, other, dim=None, update_history=True):
    """
    Function which shares common code between multiplication and division of cubes.

    operation_function   - function which does the operation (e.g. numpy.divide)
    operation_symbol     - the textual symbol of the operation (e.g. '/')
    operation_noun       - the noun of the operation (e.g. 'division')
    operation_past_tense - the past tesnse of the operation (e.g. 'divided')

    .. seealso:: For information on the dim keyword argument see :func:`multiply`.

    """
    if not isinstance(cube, iris.cube.Cube):
        raise TypeError('The "cube" argument must be an instance of iris.Cube.')

    if isinstance(other, (int, float)):
        other = np.array(other)

    other_unit = None
    history = None

    if isinstance(other, np.ndarray):
        _assert_compatible(cube, other)

        copy_cube = cube.copy(data=operation_function(cube.data, other))

        if update_history:
            if other.ndim == 0:
                history = '%s %s %s' % (cube.name(), operation_symbol, other)
            else:
                history = '%s %s array' % (cube.name(), operation_symbol)

        other_unit = '1'
    elif isinstance(other, iris.coords.Coord):
        # Deal with cube multiplication/division by coordinate

        # What dimension are we processing?
        data_dimension = None
        if dim is not None:
            # Ensure the given dim matches the coord
            if other in cube.coords() and cube.coord_dims(other) != [dim]:
                raise ValueError("dim provided does not match dim found for coord")
            data_dimension = dim
        else:
            # Try and get a coord dim
            if other.shape != (1,):
                try:
                    coord_dims = cube.coord_dims(other)
                    data_dimension = coord_dims[0] if coord_dims else None
                except iris.exceptions.CoordinateNotFoundError:
                    raise ValueError("Could not determine dimension for mul/div. Use mul(coord, dim=dim)")

        if other.ndim != 1:
            raise iris.exceptions.CoordinateMultiDimError(other)

        if other.has_bounds():
            warnings.warn('%s by a bounded coordinate not well defined, ignoring bounds.' % operation_noun)

        points = other.points

        # If the axis is defined then shape the provided points so that we can do the
        # division (this is needed as there is no "axis" keyword to numpy's divide/multiply)
        if data_dimension is not None:
            points_shape = [1] * cube.data.ndim
            points_shape[data_dimension] = -1
            points = points.reshape(points_shape)

        copy_cube = cube.copy(data=operation_function(cube.data, points))

        if update_history:
            history = '%s %s %s' % (cube.name(), operation_symbol, other.name())

        other_unit = other.units
    elif isinstance(other, iris.cube.Cube):
        # Deal with cube multiplication/division by cube
        copy_cube = cube.copy(data=operation_function(cube.data, other.data))

        if update_history:
            history = '%s %s %s' % (cube.name() or 'unknown', operation_symbol,
                                    other.name() or 'unknown')

        other_unit = other.units
    else:
        return NotImplemented

    # Update the units
    if operation_function == np.multiply:
        copy_cube.units = cube.units * other_unit
    elif operation_function == np.divide:
        copy_cube.units = cube.units / other_unit

    iris.analysis.clear_phenomenon_identity(copy_cube)

    if history is not None:
        copy_cube.add_history(history)

    return copy_cube
Example #26
0
 def get_span(coord):
     if isinstance(coord, int):
         span = set([coord])
     else:
         span = set(cube.coord_dims(coord))
     return span
Example #27
0
def differentiate(cube, coord_to_differentiate):
    r"""
    Calculate the differential of a given cube with respect to the
    coord_to_differentiate.

    Args:

    * coord_to_differentiate:
        Either a Coord instance or the unique name of a coordinate which
        exists in the cube.
        If a Coord instance is provided, it does not necessarily have to
        exist on the cube.

    Example usage::

        u_wind_acceleration = differentiate(u_wind_cube, 'forecast_time')

    The algorithm used is equivalent to:

    .. math::

        d_i = \frac{v_{i+1}-v_i}{c_{i+1}-c_i}

    Where ``d`` is the differential, ``v`` is the data value, ``c`` is
    the coordinate value and ``i`` is the index in the differential
    direction. Hence, in a normal situation if a cube has a shape
    (x: n; y: m) differentiating with respect to x will result in a cube
    of shape (x: n-1; y: m) and differentiating with respect to y will
    result in (x: n; y: m-1). If the coordinate to differentiate is
    :attr:`circular <iris.coords.DimCoord.circular>` then the resultant
    shape will be the same as the input cube.

    In the returned cube the `coord_to_differentiate` object is
    redefined such that the output coordinate values are set to the
    averages of the original coordinate values (i.e. the mid-points).
    Similarly, the output lower bounds values are set to the averages of
    the original lower bounds values and the output upper bounds values
    are set to the averages of the original upper bounds values. In more
    formal terms:

    * `C[i] = (c[i] + c[i+1]) / 2`
    * `B[i, 0] = (b[i, 0] + b[i+1, 0]) / 2`
    * `B[i, 1] = (b[i, 1] + b[i+1, 1]) / 2`

    where `c` and `b` represent the input coordinate values and bounds,
    and `C` and `B` the output coordinate values and bounds.

    .. note:: Difference method used is the same as :func:`cube_delta`
    and therefore has the same limitations.

    .. note:: Spherical differentiation does not occur in this routine.

    """
    # Get the delta cube in the required differential direction.
    # This operation results in a copy of the original cube.
    delta_cube = cube_delta(cube, coord_to_differentiate)

    if isinstance(coord_to_differentiate, basestring):
        coord = cube.coord(coord_to_differentiate)
    else:
        coord = coord_to_differentiate

    delta_coord = _construct_delta_coord(coord)
    delta_dim = cube.coord_dims(coord.name())[0]

    # calculate delta_cube / delta_coord to give the differential.
    delta_cube = iris.analysis.maths.divide(delta_cube, delta_coord, delta_dim)

    # Update the standard name
    delta_cube.rename('derivative_of_{}_wrt_{}'.format(cube.name(),
                                                       coord.name()))
    return delta_cube
Example #28
0
def _get_plot_defn(cube, mode, ndims=2):
    """
    Return data and plot-axis coords given a cube & a mode of either
    POINT_MODE or BOUND_MODE.

    """
    if cube.ndim != ndims:
        msg = 'Cube must be %s-dimensional. Got %s dimensions.'
        raise ValueError(msg % (ndims, cube.ndim))

    # Start by taking the DimCoords from each dimension.
    coords = [None] * ndims
    for dim_coord in cube.dim_coords:
        dim = cube.coord_dims(dim_coord)[0]
        coords[dim] = dim_coord

    # When appropriate, restrict to 1D with bounds.
    if mode == iris.coords.BOUND_MODE:
        coords = list(map(_valid_bound_coord, coords))

    def guess_axis(coord):
        axis = None
        if coord is not None:
            axis = iris.util.guess_coord_axis(coord)
        return axis

    # Allow DimCoords in aux_coords to fill in for missing dim_coords.
    for dim, coord in enumerate(coords):
        if coord is None:
            aux_coords = cube.coords(dimensions=dim)
            aux_coords = [
                coord for coord in aux_coords
                if isinstance(coord, iris.coords.DimCoord)
            ]
            if aux_coords:
                aux_coords.sort(key=lambda coord: coord._as_defn())
                coords[dim] = aux_coords[0]

    if mode == iris.coords.POINT_MODE:
        # Allow multi-dimensional aux_coords to override the dim_coords
        # along the Z axis. This results in a preference for using the
        # derived altitude over model_level_number or level_height.
        # Limit to Z axis to avoid preferring latitude over grid_latitude etc.
        axes = list(map(guess_axis, coords))
        axis = 'Z'
        if axis in axes:
            for coord in cube.coords(dim_coords=False):
                if max(coord.shape) > 1 and \
                        iris.util.guess_coord_axis(coord) == axis:
                    coords[axes.index(axis)] = coord

    # Re-order the coordinates to achieve the preferred
    # horizontal/vertical associations.
    def sort_key(coord):
        order = {'X': 2, 'T': 1, 'Y': -1, 'Z': -2}
        axis = guess_axis(coord)
        return (order.get(axis, 0), coord and coord.name())

    sorted_coords = sorted(coords, key=sort_key)

    transpose = (sorted_coords != coords)
    return PlotDefn(sorted_coords, transpose)
Example #29
0
def linear(cube, sample_points, extrapolation_mode='linear'):
    """
    Return a cube of the linearly interpolated points given the desired
    sample points.

    Given a list of tuple pairs mapping coordinates (or coordinate names)
    to their desired values, return a cube with linearly interpolated values.
    If more than one coordinate is specified, the linear interpolation will be
    carried out in sequence, thus providing n-linear interpolation
    (bi-linear, tri-linear, etc.).

    If the input cube's data is masked, the result cube will have a data
    mask interpolated to the new sample points

    .. testsetup::

        import numpy as np

    For example:

        >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
        >>> sample_points = [('latitude', np.linspace(-90, 90, 10)),
        ...                  ('longitude', np.linspace(-180, 180, 20))]
        >>> iris.analysis.interpolate.linear(cube, sample_points)
        <iris 'Cube' of air_temperature / (K) (latitude: 10; longitude: 20)>

    .. note::

        By definition, linear interpolation requires all coordinates to
        be 1-dimensional.

    .. note::

        If a specified coordinate is single valued its value will be
        extrapolated to the desired sample points by assuming a gradient of
        zero.

    Args:

    * cube
        The cube to be interpolated.

    * sample_points
        List of one or more tuple pairs mapping coordinate to desired
        points to interpolate. Points may be a scalar or a numpy array
        of values.  Multi-dimensional coordinates are not supported.

    Kwargs:

    * extrapolation_mode - string - one of 'linear', 'nan' or 'error'

        * If 'linear' the point will be calculated by extending the
          gradient of closest two points.
        * If 'nan' the extrapolation point will be put as a NAN.
        * If 'error' a value error will be raised notifying of the
          attempted extrapolation.

    .. note::

        If the source cube's data, or any of its resampled coordinates,
        have an integer data type they will be promoted to a floating
        point data type in the result.

    """
    if not isinstance(cube, iris.cube.Cube):
        raise ValueError('Expecting a cube instance, got %s' % type(cube))

    if isinstance(sample_points, dict):
        warnings.warn('Providing a dictionary to specify points is deprecated. Please provide a list of (coordinate, values) pairs.')
        sample_points = sample_points.items()

    # catch the case where a user passes a single (coord/name, value) pair rather than a list of pairs
    if sample_points and not (isinstance(sample_points[0], collections.Container) and not isinstance(sample_points[0], basestring)):
        raise TypeError('Expecting the sample points to be a list of tuple pairs representing (coord, points), got a list of %s.' % type(sample_points[0]))

    points = []
    for (coord, values) in sample_points:
        if isinstance(coord, basestring):
            coord = cube.coord(coord)
        else:
            coord = cube.coord(coord=coord)
        points.append((coord, values))
    sample_points = points

    if len(sample_points) == 0:
        raise ValueError('Expecting a non-empty list of coord value pairs, got %r.' % sample_points)

    if cube.data.dtype.kind == 'i':
        raise ValueError("Cannot linearly interpolate a cube which has integer type data. Consider casting the "
                         "cube's data to floating points in order to continue.")

    bounds_error = (extrapolation_mode == 'error')

    # Handle an over-specified points_dict or a specification which does not describe a data dimension
    data_dimensions_requested = []
    for coord, values in sample_points:
        if coord.ndim > 1:
            raise ValueError('Cannot linearly interpolate over {!r} as it is'
                             ' multi-dimensional.'.format(coord.name()))
        data_dim = cube.coord_dims(coord)
        if not data_dim:
            raise ValueError('Requested a point over a coordinate which does'
                             ' not describe a dimension: {!r}.'.format(
                                 coord.name()))
        else:
            data_dim = data_dim[0]
        if data_dim in data_dimensions_requested:
            raise ValueError('Requested a point which over specifies a'
                             ' dimension: {!r}. '.format(coord.name()))
        data_dimensions_requested.append(data_dim)

    # Iterate over all of the requested keys in the given points_dict calling this routine repeatedly.
    if len(sample_points) > 1:
        result = cube
        for coord, cells in sample_points:
            result = linear(result, [(coord, cells)], extrapolation_mode=extrapolation_mode)
        return result

    else:
        # Now we must be down to a single sample coordinate and its
        # values.
        src_coord, requested_points = sample_points[0]
        sample_values = np.array(requested_points)

        # 1) Define the interpolation characteristics.

        # Get the sample dimension (which we have already tested is not None)
        sample_dim = cube.coord_dims(src_coord)[0]

        # Construct source data & source coordinate values suitable for
        # SciPy's interp1d.
        if getattr(src_coord, 'circular', False):
            coord_slice_in_cube = [slice(None, None)] * cube.ndim
            coord_slice_in_cube[sample_dim] = slice(0, 1)
            modulus = np.array(src_coord.units.modulus or 0,
                               dtype=src_coord.dtype)
            src_points = np.append(src_coord.points,
                                   src_coord.points[0] + modulus)

            # TODO: Restore this code after resolution of the following issue:
            # https://github.com/numpy/numpy/issues/478
#            data = np.append(cube.data,
#                             cube.data[tuple(coord_slice_in_cube)],
#                             axis=sample_dim)
            # This is the alternative, temporary workaround.
            # It doesn't use append on an nD mask.
            if (not isinstance(cube.data, ma.MaskedArray) or
                not isinstance(cube.data.mask, np.ndarray) or
                len(cube.data.mask.shape) == 0):
                data = np.append(cube.data,
                                 cube.data[tuple(coord_slice_in_cube)],
                                 axis=sample_dim)
            else:
                new_data = np.append(cube.data.data,
                                     cube.data.data[tuple(coord_slice_in_cube)],
                                     axis=sample_dim)
                new_mask = np.append(cube.data.mask,
                                     cube.data.mask[tuple(coord_slice_in_cube)],
                                     axis=sample_dim)
                data = ma.array(new_data, mask=new_mask)
        else:
            src_points = src_coord.points
            data = cube.data

        # Map all the requested values into the range of the source
        # data (centered over the centre of the source data to allow
        # extrapolation where required).
        src_axis = iris.util.guess_coord_axis(src_coord)
        if src_axis == 'X' and src_coord.units.modulus:
            modulus = src_coord.units.modulus
            offset = (src_points.max() + src_points.min() - modulus) * 0.5
            sample_values = ((sample_values - offset) % modulus) + offset

        if len(src_points) == 1:
            if extrapolation_mode == 'error' and \
                    np.any(sample_values != src_points):
                raise ValueError('Attempting to extrapolate from a single '
                                 'point with extrapolation mode set '
                                 'to {!r}.'.format(extrapolation_mode))
            direction = 0

            def interpolate(fx, new_x, axis=None, **kwargs):
                # All kwargs other than axis are ignored.
                if axis is None:
                    axis = -1
                new_x = np.array(new_x)
                new_shape = list(fx.shape)
                new_shape[axis] = new_x.size
                fx = np.broadcast_arrays(fx, np.empty(new_shape))[0].copy()
                if extrapolation_mode == 'nan':
                    indices = [slice(None)] * fx.ndim
                    indices[axis] = new_x != src_points
                    fx[tuple(indices)] = np.nan
                # If new_x is a scalar, then remove the dimension from fx.
                if not new_x.shape:
                    del new_shape[axis]
                    fx.shape = new_shape
                return fx
        else:
            monotonic, direction = iris.util.monotonic(src_points,
                                                       return_direction=True)
            if not monotonic:
                raise ValueError('Unable to linearly interpolate this '
                                 'cube as the coordinate {!r} is not '
                                 'monotonic'.format(src_coord.name()))

            # SciPy's interp1d requires monotonic increasing coord values.
            if direction == -1:
                src_points = iris.util.reverse(src_points, axes=0)
                data = iris.util.reverse(data, axes=sample_dim)

            # Wrap it all up in a function which makes the right kind of
            # interpolator/extrapolator.
            # NB. This uses a closure to capture the values of src_points,
            # bounds_error, and extrapolation_mode.
            def interpolate(fx, new_x, **kwargs):
                # SciPy's interp1d needs float values, so if we're given
                # integer values, convert them to the smallest possible
                # float dtype that can accurately preserve the values.
                if fx.dtype.kind == 'i':
                    fx = fx.astype(np.promote_types(fx.dtype, np.float16))
                x = src_points.astype(fx.dtype)
                interpolator = interp1d(x, fx, kind='linear',
                                        bounds_error=bounds_error, **kwargs)
                if extrapolation_mode == 'linear':
                    interpolator = Linear1dExtrapolator(interpolator)
                new_fx = interpolator(np.array(new_x, dtype=fx.dtype))
                return new_fx

        # 2) Interpolate the data and produce our new Cube.
        if isinstance(data, ma.MaskedArray):
            # interpolate data, ignoring the mask
            new_data = interpolate(data.data, sample_values, axis=sample_dim,
                                   copy=False)
            # Mask out any results which contain a non-zero contribution
            # from a masked value when interpolated from mask cast as 1,0.
            mask_dataset = ma.getmaskarray(data).astype(float)
            new_mask = interpolate(mask_dataset, sample_values,
                                   axis=sample_dim, copy=False) > 0
            # create new_data masked array
            new_data = ma.MaskedArray(new_data, mask=new_mask)
        else:
            new_data = interpolate(data, sample_values, axis=sample_dim,
                                   copy=False)
        new_cube = iris.cube.Cube(new_data)
        new_cube.metadata = cube.metadata

        # If requested_points is an array scalar then `new_cube` will
        # have one less dimension than `cube`. (The `sample_dim`
        # dimension will vanish.) In which case we build a mapping from
        # `cube` dimensions to `new_cube` dimensions.
        dim_mapping = None
        if new_cube.ndim != cube.ndim:
            dim_mapping = {i: i for i in range(sample_dim)}
            dim_mapping[sample_dim] = None
            for i in range(sample_dim + 1, cube.ndim):
                dim_mapping[i] = i - 1

        # 2) Copy/interpolate the coordinates.
        for dim_coord in cube.dim_coords:
            dims = cube.coord_dims(dim_coord)
            if sample_dim in dims:
                new_coord = _resample_coord(dim_coord, src_coord, direction,
                                            requested_points, interpolate)
            else:
                new_coord = dim_coord.copy()
            if dim_mapping:
                dims = [dim_mapping[dim] for dim in dims
                            if dim_mapping[dim] is not None]
            if isinstance(new_coord, iris.coords.DimCoord) and dims:
                new_cube.add_dim_coord(new_coord, dims)
            else:
                new_cube.add_aux_coord(new_coord, dims)

        for coord in cube.aux_coords:
            dims = cube.coord_dims(coord)
            if sample_dim in dims:
                new_coord = _resample_coord(coord, src_coord, direction,
                                            requested_points, interpolate)
            else:
                new_coord = coord.copy()
            if dim_mapping:
                dims = [dim_mapping[dim] for dim in dims
                            if dim_mapping[dim] is not None]
            new_cube.add_aux_coord(new_coord, dims)

        return new_cube
Example #30
0
def nearest_neighbour_indices(cube, sample_points):
    """
    Returns the indices to select the data value(s) closest to the given coordinate point values.

    The sample_points mapping does not have to include coordinate values corresponding to all data
    dimensions. Any dimensions unspecified will default to a full slice.

    For example:

        >>> cube = iris.load_cube(iris.sample_data_path('ostia_monthly.nc'))
        >>> iris.analysis.interpolate.nearest_neighbour_indices(cube, [('latitude', 0), ('longitude', 10)])
        (slice(None, None, None), 9, 12)
        >>> iris.analysis.interpolate.nearest_neighbour_indices(cube, [('latitude', 0)])
        (slice(None, None, None), 9, slice(None, None, None))

    Args:

    * cube:
        An :class:`iris.cube.Cube`.
    * sample_points
        A list of tuple pairs mapping coordinate instances or unique coordinate names in the cube to point values.

    Returns:
        The tuple of indices which will select the point in the cube closest to the supplied coordinate values.

    .. note::

        Nearest neighbour interpolation of multidimensional coordinates is not
        yet supported.

    """
    if isinstance(sample_points, dict):
        warnings.warn(
            'Providing a dictionary to specify points is deprecated. Please provide a list of (coordinate, values) pairs.'
        )
        sample_points = sample_points.items()

    if sample_points:
        try:
            coord, values = sample_points[0]
        except ValueError:
            raise ValueError(
                'Sample points must be a list of (coordinate, value) pairs. Got %r.'
                % sample_points)

    points = []
    for coord, values in sample_points:
        if isinstance(coord, basestring):
            coord = cube.coord(coord)
        else:
            coord = cube.coord(coord)
        points.append((coord, values))
    sample_points = points

    # Build up a list of indices to span the cube.
    indices = [slice(None, None)] * cube.ndim

    # Build up a dictionary which maps the cube's data dimensions to a list (which will later
    # be populated by coordinates in the sample points list)
    dim_to_coord_map = {}
    for i in range(cube.ndim):
        dim_to_coord_map[i] = []

    # Iterate over all of the specifications provided by sample_points
    for coord, point in sample_points:
        data_dim = cube.coord_dims(coord)

        # If no data dimension then we don't need to make any modifications to indices.
        if not data_dim:
            continue
        elif len(data_dim) > 1:
            raise iris.exceptions.CoordinateMultiDimError(
                "Nearest neighbour interpolation of multidimensional "
                "coordinates is not supported.")
        data_dim = data_dim[0]

        dim_to_coord_map[data_dim].append(coord)

        #calculate the nearest neighbour
        min_index = coord.nearest_neighbour_index(point)

        if getattr(coord, 'circular', False):
            warnings.warn(
                "Nearest neighbour on a circular coordinate may not be picking the nearest point.",
                DeprecationWarning)

        # If the dimension has already been interpolated then assert that the index from this coordinate
        # agrees with the index already calculated, otherwise we have a contradicting specification
        if indices[data_dim] != slice(None,
                                      None) and min_index != indices[data_dim]:
            raise ValueError(
                'The coordinates provided (%s) over specify dimension %s.' %
                (', '.join(
                    [coord.name()
                     for coord in dim_to_coord_map[data_dim]]), data_dim))

        indices[data_dim] = min_index

    return tuple(indices)
Example #31
0
def linear(cube, sample_points, extrapolation_mode='linear'):
    """
    Return a cube of the linearly interpolated points given the desired
    sample points.

    Given a list of tuple pairs mapping coordinates (or coordinate names)
    to their desired values, return a cube with linearly interpolated values.
    If more than one coordinate is specified, the linear interpolation will be
    carried out in sequence, thus providing n-linear interpolation
    (bi-linear, tri-linear, etc.).

    If the input cube's data is masked, the result cube will have a data
    mask interpolated to the new sample points

    .. testsetup::

        import numpy as np

    For example:

        >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
        >>> sample_points = [('latitude', np.linspace(-90, 90, 10)),
        ...                  ('longitude', np.linspace(-180, 180, 20))]
        >>> iris.analysis.interpolate.linear(cube, sample_points)
        <iris 'Cube' of air_temperature / (K) (latitude: 10; longitude: 20)>

    .. note::

        By definition, linear interpolation requires all coordinates to
        be 1-dimensional.

    .. note::

        If a specified coordinate is single valued its value will be
        extrapolated to the desired sample points by assuming a gradient of
        zero.

    Args:

    * cube
        The cube to be interpolated.

    * sample_points
        List of one or more tuple pairs mapping coordinate to desired
        points to interpolate. Points may be a scalar or a numpy array
        of values.  Multi-dimensional coordinates are not supported.

    Kwargs:

    * extrapolation_mode - string - one of 'linear', 'nan' or 'error'

        * If 'linear' the point will be calculated by extending the
          gradient of closest two points.
        * If 'nan' the extrapolation point will be put as a NAN.
        * If 'error' a value error will be raised notifying of the
          attempted extrapolation.

    .. note::

        If the source cube's data, or any of its resampled coordinates,
        have an integer data type they will be promoted to a floating
        point data type in the result.

    """
    if not isinstance(cube, iris.cube.Cube):
        raise ValueError('Expecting a cube instance, got %s' % type(cube))

    if isinstance(sample_points, dict):
        warnings.warn(
            'Providing a dictionary to specify points is deprecated. Please provide a list of (coordinate, values) pairs.'
        )
        sample_points = sample_points.items()

    # catch the case where a user passes a single (coord/name, value) pair rather than a list of pairs
    if sample_points and not (
            isinstance(sample_points[0], collections.Container)
            and not isinstance(sample_points[0], basestring)):
        raise TypeError(
            'Expecting the sample points to be a list of tuple pairs representing (coord, points), got a list of %s.'
            % type(sample_points[0]))

    points = []
    for (coord, values) in sample_points:
        if isinstance(coord, basestring):
            coord = cube.coord(coord)
        else:
            coord = cube.coord(coord)
        points.append((coord, values))
    sample_points = points

    if len(sample_points) == 0:
        raise ValueError(
            'Expecting a non-empty list of coord value pairs, got %r.' %
            sample_points)

    if cube.data.dtype.kind == 'i':
        raise ValueError(
            "Cannot linearly interpolate a cube which has integer type data. Consider casting the "
            "cube's data to floating points in order to continue.")

    bounds_error = (extrapolation_mode == 'error')

    # Handle an over-specified points_dict or a specification which does not describe a data dimension
    data_dimensions_requested = []
    for coord, values in sample_points:
        if coord.ndim > 1:
            raise ValueError('Cannot linearly interpolate over {!r} as it is'
                             ' multi-dimensional.'.format(coord.name()))
        data_dim = cube.coord_dims(coord)
        if not data_dim:
            raise ValueError('Requested a point over a coordinate which does'
                             ' not describe a dimension: {!r}.'.format(
                                 coord.name()))
        else:
            data_dim = data_dim[0]
        if data_dim in data_dimensions_requested:
            raise ValueError('Requested a point which over specifies a'
                             ' dimension: {!r}. '.format(coord.name()))
        data_dimensions_requested.append(data_dim)

    # Iterate over all of the requested keys in the given points_dict calling this routine repeatedly.
    if len(sample_points) > 1:
        result = cube
        for coord, cells in sample_points:
            result = linear(result, [(coord, cells)],
                            extrapolation_mode=extrapolation_mode)
        return result

    else:
        # Now we must be down to a single sample coordinate and its
        # values.
        src_coord, requested_points = sample_points[0]
        sample_values = np.array(requested_points)

        # 1) Define the interpolation characteristics.

        # Get the sample dimension (which we have already tested is not None)
        sample_dim = cube.coord_dims(src_coord)[0]

        # Construct source data & source coordinate values suitable for
        # SciPy's interp1d.
        if getattr(src_coord, 'circular', False):
            src_points, data = _extend_circular_coord_and_data(
                src_coord, cube.data, sample_dim)
        else:
            src_points = src_coord.points
            data = cube.data

        # Map all the requested values into the range of the source
        # data (centered over the centre of the source data to allow
        # extrapolation where required).
        src_axis = iris.util.guess_coord_axis(src_coord)
        if src_axis == 'X' and src_coord.units.modulus:
            modulus = src_coord.units.modulus
            offset = (src_points.max() + src_points.min() - modulus) * 0.5
            sample_values = ((sample_values - offset) % modulus) + offset

        if len(src_points) == 1:
            if extrapolation_mode == 'error' and \
                    np.any(sample_values != src_points):
                raise ValueError('Attempting to extrapolate from a single '
                                 'point with extrapolation mode set '
                                 'to {!r}.'.format(extrapolation_mode))
            direction = 0

            def interpolate(fx, new_x, axis=None, **kwargs):
                # All kwargs other than axis are ignored.
                if axis is None:
                    axis = -1
                new_x = np.array(new_x)
                new_shape = list(fx.shape)
                new_shape[axis] = new_x.size
                fx = np.broadcast_arrays(fx, np.empty(new_shape))[0].copy()
                if extrapolation_mode == 'nan':
                    indices = [slice(None)] * fx.ndim
                    indices[axis] = new_x != src_points
                    fx[tuple(indices)] = np.nan
                # If new_x is a scalar, then remove the dimension from fx.
                if not new_x.shape:
                    del new_shape[axis]
                    fx.shape = new_shape
                return fx
        else:
            monotonic, direction = iris.util.monotonic(src_points,
                                                       return_direction=True)
            if not monotonic:
                raise ValueError('Unable to linearly interpolate this '
                                 'cube as the coordinate {!r} is not '
                                 'monotonic'.format(src_coord.name()))

            # SciPy's interp1d requires monotonic increasing coord values.
            if direction == -1:
                src_points = iris.util.reverse(src_points, axes=0)
                data = iris.util.reverse(data, axes=sample_dim)

            # Wrap it all up in a function which makes the right kind of
            # interpolator/extrapolator.
            # NB. This uses a closure to capture the values of src_points,
            # bounds_error, and extrapolation_mode.
            def interpolate(fx, new_x, **kwargs):
                # SciPy's interp1d needs float values, so if we're given
                # integer values, convert them to the smallest possible
                # float dtype that can accurately preserve the values.
                if fx.dtype.kind == 'i':
                    fx = fx.astype(np.promote_types(fx.dtype, np.float16))
                x = src_points.astype(fx.dtype)
                interpolator = interp1d(x,
                                        fx,
                                        kind='linear',
                                        bounds_error=bounds_error,
                                        **kwargs)
                if extrapolation_mode == 'linear':
                    interpolator = Linear1dExtrapolator(interpolator)
                new_fx = interpolator(np.array(new_x, dtype=fx.dtype))
                return new_fx

        # 2) Interpolate the data and produce our new Cube.
        if isinstance(data, ma.MaskedArray):
            # interpolate data, ignoring the mask
            new_data = interpolate(data.data,
                                   sample_values,
                                   axis=sample_dim,
                                   copy=False)
            # Mask out any results which contain a non-zero contribution
            # from a masked value when interpolated from mask cast as 1,0.
            mask_dataset = ma.getmaskarray(data).astype(float)
            new_mask = interpolate(
                mask_dataset, sample_values, axis=sample_dim, copy=False) > 0
            # create new_data masked array
            new_data = ma.MaskedArray(new_data, mask=new_mask)
        else:
            new_data = interpolate(data,
                                   sample_values,
                                   axis=sample_dim,
                                   copy=False)
        new_cube = iris.cube.Cube(new_data)
        new_cube.metadata = cube.metadata

        # If requested_points is an array scalar then `new_cube` will
        # have one less dimension than `cube`. (The `sample_dim`
        # dimension will vanish.) In which case we build a mapping from
        # `cube` dimensions to `new_cube` dimensions.
        dim_mapping = None
        if new_cube.ndim != cube.ndim:
            dim_mapping = {i: i for i in range(sample_dim)}
            dim_mapping[sample_dim] = None
            for i in range(sample_dim + 1, cube.ndim):
                dim_mapping[i] = i - 1

        # 2) Copy/interpolate the coordinates.
        for dim_coord in cube.dim_coords:
            dims = cube.coord_dims(dim_coord)
            if sample_dim in dims:
                new_coord = _resample_coord(dim_coord, src_coord, direction,
                                            requested_points, interpolate)
            else:
                new_coord = dim_coord.copy()
            if dim_mapping:
                dims = [
                    dim_mapping[dim] for dim in dims
                    if dim_mapping[dim] is not None
                ]
            if isinstance(new_coord, iris.coords.DimCoord) and dims:
                new_cube.add_dim_coord(new_coord, dims)
            else:
                new_cube.add_aux_coord(new_coord, dims)

        for coord in cube.aux_coords:
            dims = cube.coord_dims(coord)
            if sample_dim in dims:
                new_coord = _resample_coord(coord, src_coord, direction,
                                            requested_points, interpolate)
            else:
                new_coord = coord.copy()
            if dim_mapping:
                dims = [
                    dim_mapping[dim] for dim in dims
                    if dim_mapping[dim] is not None
                ]
            new_cube.add_aux_coord(new_coord, dims)

        return new_cube
Example #32
0
def _nearest_neighbour_indices_ndcoords(cube, sample_point, cache=None):
    """
    See documentation for :func:`iris.analysis.interpolate.nearest_neighbour_indices`.

    This function is adapted for points sampling a multi-dimensional coord,
    and can currently only do nearest neighbour interpolation.

    Because this function can be slow for multidimensional coordinates,
    a 'cache' dictionary can be provided by the calling code.

    """

    # Developer notes:
    # A "sample space cube" is made which only has the coords and dims we are sampling on.
    # We get the nearest neighbour using this sample space cube.

    if isinstance(sample_point, dict):
        warnings.warn(
            'Providing a dictionary to specify points is deprecated. Please provide a list of (coordinate, values) pairs.'
        )
        sample_point = sample_point.items()

    if sample_point:
        try:
            coord, value = sample_point[0]
        except ValueError:
            raise ValueError(
                'Sample points must be a list of (coordinate, value) pairs. Got %r.'
                % sample_point)

    # Convert names to coords in sample_point
    point = []
    ok_coord_ids = set(map(id, cube.dim_coords + cube.aux_coords))
    for coord, value in sample_point:
        if isinstance(coord, basestring):
            coord = cube.coord(coord)
        else:
            coord = cube.coord(coord)
        if id(coord) not in ok_coord_ids:
            msg = ('Invalid sample coordinate {!r}: derived coordinates are'
                   ' not allowed.'.format(coord.name()))
            raise ValueError(msg)
        point.append((coord, value))

    # Reformat sample_point for use in _cartesian_sample_points(), below.
    sample_point = np.array([[value] for coord, value in point])
    sample_point_coords = [coord for coord, value in point]
    sample_point_coord_names = [coord.name() for coord, value in point]

    # Which dims are we sampling?
    sample_dims = set()
    for coord in sample_point_coords:
        for dim in cube.coord_dims(coord):
            sample_dims.add(dim)
    sample_dims = sorted(list(sample_dims))

    # Extract a sub cube that lives in just the sampling space.
    sample_space_slice = [0] * cube.ndim
    for sample_dim in sample_dims:
        sample_space_slice[sample_dim] = slice(None, None)
    sample_space_slice = tuple(sample_space_slice)
    sample_space_cube = cube[sample_space_slice]

    #...with just the sampling coords
    for coord in sample_space_cube.coords():
        if not coord.name() in sample_point_coord_names:
            sample_space_cube.remove_coord(coord)

    # Order the sample point coords according to the sample space cube coords
    sample_space_coord_names = [
        coord.name() for coord in sample_space_cube.coords()
    ]
    new_order = [
        sample_space_coord_names.index(name)
        for name in sample_point_coord_names
    ]
    sample_point = np.array([sample_point[i] for i in new_order])
    sample_point_coord_names = [sample_point_coord_names[i] for i in new_order]

    # Convert the sample point to cartesian coords.
    # If there is no latlon within the coordinate there will be no change.
    # Otherwise, geographic latlon is replaced with cartesian xyz.
    cartesian_sample_point = _cartesian_sample_points(
        sample_point, sample_point_coord_names)[0]

    sample_space_coords = sample_space_cube.dim_coords + sample_space_cube.aux_coords
    sample_space_coords_and_dims = [(coord,
                                     sample_space_cube.coord_dims(coord))
                                    for coord in sample_space_coords]

    if cache is not None and cube in cache:
        kdtree = cache[cube]
    else:
        # Create a "sample space position" for each datum: sample_space_data_positions[coord_index][datum_index]
        sample_space_data_positions = np.empty(
            (len(sample_space_coords_and_dims), sample_space_cube.data.size),
            dtype=float)
        for d, ndi in enumerate(np.ndindex(sample_space_cube.data.shape)):
            for c, (coord,
                    coord_dims) in enumerate(sample_space_coords_and_dims):
                # Index of this datum along this coordinate (could be nD).
                keys = tuple(ndi[ind]
                             for ind in coord_dims) if coord_dims else slice(
                                 None, None)
                # Position of this datum along this coordinate.
                sample_space_data_positions[c][d] = coord.points[keys]

        # Convert to cartesian coordinates. Flatten for kdtree compatibility.
        cartesian_space_data_coords = _cartesian_sample_points(
            sample_space_data_positions, sample_point_coord_names)

        # Get the nearest datum index to the sample point. This is the goal of the function.
        kdtree = scipy.spatial.cKDTree(cartesian_space_data_coords)

    cartesian_distance, datum_index = kdtree.query(cartesian_sample_point)
    sample_space_ndi = np.unravel_index(datum_index,
                                        sample_space_cube.data.shape)

    # Turn sample_space_ndi into a main cube slice.
    # Map sample cube to main cube dims and leave the rest as a full slice.
    main_cube_slice = [slice(None, None)] * cube.ndim
    for sample_coord, sample_coord_dims in sample_space_coords_and_dims:
        # Find the coord in the main cube
        main_coord = cube.coord(sample_coord.name())
        main_coord_dims = cube.coord_dims(main_coord)
        # Mark the nearest data index/indices with respect to this coord
        for sample_i, main_i in zip(sample_coord_dims, main_coord_dims):
            main_cube_slice[main_i] = sample_space_ndi[sample_i]

    # Update cache
    if cache is not None:
        cache[cube] = kdtree

    return tuple(main_cube_slice)
Example #33
0
    def __init__(self, cube):
        """
        Represents the cube metadata and associated coordinate metadata that
        allows suitable cubes for concatenation to be identified.

        Args:

        * cube:
            The :class:`iris.cube.Cube` source-cube.

        """
        self.aux_coords_and_dims = []
        self.aux_metadata = []
        self.dim_coords = cube.dim_coords
        self.dim_metadata = []
        self.ndim = cube.ndim
        self.scalar_coords = []
        self.cell_measures_and_dims = []
        self.cm_metadata = []
        self.ancillary_variables_and_dims = []
        self.av_metadata = []
        self.dim_mapping = []

        # Determine whether there are any anonymous cube dimensions.
        covered = set(cube.coord_dims(coord)[0] for coord in self.dim_coords)
        self.anonymous = covered != set(range(self.ndim))

        self.defn = cube.metadata
        self.data_type = cube.dtype

        #
        # Collate the dimension coordinate metadata.
        #
        for ind, coord in enumerate(self.dim_coords):
            dims = cube.coord_dims(coord)
            metadata = _CoordMetaData(coord, dims)
            self.dim_metadata.append(metadata)
            self.dim_mapping.append(dims[0])

        #
        # Collate the auxiliary coordinate metadata and scalar coordinates.
        #
        axes = dict(T=0, Z=1, Y=2, X=3)

        # Coordinate sort function - by guessed coordinate axis, then
        # by coordinate name, then by dimensions, in ascending order.
        def key_func(coord):
            return (
                axes.get(guess_coord_axis(coord),
                         len(axes) + 1),
                coord.name(),
                cube.coord_dims(coord),
            )

        for coord in sorted(cube.aux_coords, key=key_func):
            dims = cube.coord_dims(coord)
            if dims:
                metadata = _CoordMetaData(coord, dims)
                self.aux_metadata.append(metadata)
                coord_and_dims = _CoordAndDims(coord, tuple(dims))
                self.aux_coords_and_dims.append(coord_and_dims)
            else:
                self.scalar_coords.append(coord)

        def meta_key_func(dm):
            return (dm.metadata, dm.cube_dims(cube))

        for cm in sorted(cube.cell_measures(), key=meta_key_func):
            dims = cube.cell_measure_dims(cm)
            metadata = _OtherMetaData(cm, dims)
            self.cm_metadata.append(metadata)
            cm_and_dims = _CoordAndDims(cm, tuple(dims))
            self.cell_measures_and_dims.append(cm_and_dims)

        for av in sorted(cube.ancillary_variables(), key=meta_key_func):
            dims = cube.ancillary_variable_dims(av)
            metadata = _OtherMetaData(av, dims)
            self.av_metadata.append(metadata)
            av_and_dims = _CoordAndDims(av, tuple(dims))
            self.ancillary_variables_and_dims.append(av_and_dims)
Example #34
0
    def _extract_coord_payload(self, cube):
        """
        Extract all relevant coordinate data and metadata from the cube.

        In particular, for each scalar coordinate determine its definition,
        its cell (point and bound) value and all other scalar coordinate metadata
        that allows us to fully reconstruct that scalar coordinate. Note that all
        scalar data is sorted in order of the scalar coordinate definition.

        The coordinate payload of the cube also includes any associated vector
        coordinates that describe that cube, and descriptions of any auxiliary
        coordinate factories.

        """
        scalar_defns = []
        scalar_values = []
        scalar_metadata = []
        vector_dim_coords_and_dims = []
        vector_aux_coords_and_dims = []

        cube_aux_coords = cube.aux_coords
        coords = cube.dim_coords + cube_aux_coords
        
        # Coordinate hint ordering dictionary - from most preferred to least.
        # Copes with duplicate hint entries, where the most preferred is king.
        hint_dict = {name: i for i, name in zip(range(len(self._hints), 0, -1), self._hints[::-1])}
        # Coordinate axis ordering dictionary.
        axis_dict = {'T': 0, 'Z': 1, 'Y': 2, 'X': 3}
        # Coordinate sort function - by coordinate hint, then by guessed coordinate axis, then
        # by coordinate definition, in ascending order.
        key_func = lambda coord: (hint_dict.get(coord.name(), len(hint_dict) + 1),
                                  axis_dict.get(iris.util.guess_coord_axis(coord), len(axis_dict) + 1),
                                  coord._as_defn())

        # Order the coordinates by hints, axis, and definition.
        for coord in sorted(coords, key=key_func):
            if not cube.coord_dims(coord) and coord.shape == (1,):
                # Extract the scalar coordinate data and metadata.
                scalar_defns.append(coord._as_defn())
                scalar_values.append(coord.cell(0))
                points_dtype = coord.points.dtype
                bounds_dtype = coord.bounds.dtype if coord.bounds is not None else None
                kwargs = {'circular': coord.circular} if isinstance(coord, iris.coords.DimCoord) else {}
                scalar_metadata.append(_CoordMetaData(points_dtype, bounds_dtype, kwargs))
            else:
                # Extract the vector coordinate and metadata.
                if coord in cube_aux_coords:
                    vector_aux_coords_and_dims.append(_CoordAndDims(coord, tuple(cube.coord_dims(coord))))
                else:
                    vector_dim_coords_and_dims.append(_CoordAndDims(coord, tuple(cube.coord_dims(coord))))
 
        factory_defns = []
        for factory in sorted(cube.aux_factories, key=lambda factory: factory._as_defn()):
            dependency_defns = []
            dependencies = factory.dependencies
            for key in sorted(dependencies):
                coord = dependencies[key]
                dependency_defns.append((key, coord._as_defn()))
            factory_defn = _FactoryDefn(type(factory), dependency_defns)
            factory_defns.append(factory_defn)

        scalar = _ScalarCoordPayload(scalar_defns, scalar_values, scalar_metadata)
        vector = _VectorCoordPayload(vector_dim_coords_and_dims, vector_aux_coords_and_dims)

        return _CoordPayload(scalar, vector, factory_defns)
Example #35
0
 def key_func(coord):
     return (
         axes.get(guess_coord_axis(coord), len(axes) + 1),
         coord._as_defn(),
         cube.coord_dims(coord),
     )
Example #36
0
def _add_subtract_common(operation_function,
                         operation_symbol,
                         operation_noun,
                         operation_past_tense,
                         cube,
                         other,
                         dim=None,
                         ignore=True,
                         in_place=False):
    """
    Function which shares common code between addition and subtraction of cubes.

    operation_function   - function which does the operation (e.g. numpy.subtract)
    operation_symbol     - the textual symbol of the operation (e.g. '-')
    operation_noun       - the noun of the operation (e.g. 'subtraction')
    operation_past_tense - the past tense of the operation (e.g. 'subtracted')

    """
    if not isinstance(cube, iris.cube.Cube):
        raise TypeError(
            'The "cube" argument must be an instance of iris.Cube.')

    if isinstance(other, (int, float)):
        # Promote scalar to a coordinate and associate unit type with cube unit type
        other = np.array(other)

    # Check that the units of the cube and the other item are the same, or if the other does not have a unit, skip this test
    if cube.units != getattr(other, 'units', cube.units):
        raise iris.exceptions.NotYetImplementedError('Differing units (%s & %s) %s not implemented' % \
                                                     (cube.units, other.units, operation_noun))

    if isinstance(other, np.ndarray):
        _assert_compatible(cube, other)

        if in_place:
            new_cube = cube
            operation_function(new_cube.data, other, new_cube.data)
        else:
            new_cube = cube.copy(data=operation_function(cube.data, other))
    elif isinstance(other, iris.coords.Coord):
        # Deal with cube addition/subtraction by coordinate

        # What dimension are we processing?
        data_dimension = None
        if dim is not None:
            # Ensure the given dim matches the coord
            if other in cube.coords() and cube.coord_dims(other) != [dim]:
                raise ValueError(
                    "dim provided does not match dim found for coord")
            data_dimension = dim
        else:
            # Try and get a coord dim
            if other.shape != (1, ):
                try:
                    coord_dims = cube.coord_dims(other)
                    data_dimension = coord_dims[0] if coord_dims else None
                except iris.exceptions.CoordinateNotFoundError:
                    raise ValueError(
                        "Could not determine dimension for add/sub. Use add(coord, dim=dim)"
                    )

        if other.ndim != 1:
            raise iris.exceptions.CoordinateMultiDimError(other)

        if other.has_bounds():
            warnings.warn(
                '%s by a bounded coordinate not well defined, ignoring bounds.'
                % operation_noun)

        points = other.points

        if data_dimension is not None:
            points_shape = [1] * cube.ndim
            points_shape[data_dimension] = -1
            points = points.reshape(points_shape)

        if in_place:
            new_cube = cube
            operation_function(new_cube.data, points, new_cube.data)
        else:
            new_cube = cube.copy(data=operation_function(cube.data, points))
    elif isinstance(other, iris.cube.Cube):
        # Deal with cube addition/subtraction by cube

        # get a coordinate comparison of this cube and the cube to do the operation with
        coord_comp = iris.analysis.coord_comparison(cube, other)

        if coord_comp['transposable']:
            # User does not need to transpose their cubes if numpy
            # array broadcasting will make the dimensions match
            broadcast_padding = cube.ndim - other.ndim
            coord_dims_equal = True
            for coord_group in coord_comp['transposable']:
                cube_coord, other_coord = coord_group.coords
                cube_coord_dims = cube.coord_dims(coord=cube_coord)
                other_coord_dims = other.coord_dims(coord=other_coord)
                other_coord_dims_broadcasted = tuple(
                    [dim + broadcast_padding for dim in other_coord_dims])
                if cube_coord_dims != other_coord_dims_broadcasted:
                    coord_dims_equal = False

            if not coord_dims_equal:
                raise ValueError('Cubes cannot be %s, differing axes. '
                                 'cube.transpose() may be required to '
                                 're-order the axes.' % operation_past_tense)

        # provide a deprecation warning if the ignore keyword has been set
        if ignore is not True:
            warnings.warn(
                'The "ignore" keyword has been deprecated in add/subtract. This functionality is now automatic. '
                'The provided value to "ignore" has been ignored, and has been automatically calculated.'
            )

        bad_coord_grps = (coord_comp['ungroupable_and_dimensioned'] +
                          coord_comp['resamplable'])
        if bad_coord_grps:
            raise ValueError(
                'This operation cannot be performed as there are differing coordinates (%s) remaining '
                'which cannot be ignored.' %
                ', '.join({coord_grp.name()
                           for coord_grp in bad_coord_grps}))

        if in_place:
            new_cube = cube
            operation_function(new_cube.data, other.data, new_cube.data)
        else:
            new_cube = cube.copy(
                data=operation_function(cube.data, other.data))

        # If a coordinate is to be ignored - remove it
        ignore = filter(
            None, [coord_grp[0] for coord_grp in coord_comp['ignorable']])
        if not ignore:
            ignore_string = ''
        else:
            ignore_string = ' (ignoring %s)' % ', '.join(
                [coord.name() for coord in ignore])
        for coord in ignore:
            new_cube.remove_coord(coord)

    else:
        return NotImplemented

    iris.analysis.clear_phenomenon_identity(new_cube)

    return new_cube
Example #37
0
def _nearest_neighbour_indices_ndcoords(cube, sample_point, cache=None):
    """
    See documentation for :func:`iris.analysis.interpolate.nearest_neighbour_indices`.

    This function is adapted for points sampling a multi-dimensional coord,
    and can currently only do nearest neighbour interpolation.

    Because this function can be slow for multidimensional coordinates,
    a 'cache' dictionary can be provided by the calling code.

    """

    # Developer notes:
    # A "sample space cube" is made which only has the coords and dims we are sampling on.
    # We get the nearest neighbour using this sample space cube.

    if isinstance(sample_point, dict):
        warnings.warn('Providing a dictionary to specify points is deprecated. Please provide a list of (coordinate, values) pairs.')
        sample_point = list(sample_point.items())

    if sample_point:
        try:
            coord, value = sample_point[0]
        except ValueError:
            raise ValueError('Sample points must be a list of (coordinate, value) pairs. Got %r.' % sample_point)

    # Convert names to coords in sample_point
    point = []
    ok_coord_ids = set(map(id, cube.dim_coords + cube.aux_coords))
    for coord, value in sample_point:
        if isinstance(coord, six.string_types):
            coord = cube.coord(coord)
        else:
            coord = cube.coord(coord)
        if id(coord) not in ok_coord_ids:
            msg = ('Invalid sample coordinate {!r}: derived coordinates are'
                   ' not allowed.'.format(coord.name()))
            raise ValueError(msg)
        point.append((coord, value))

    # Reformat sample_point for use in _cartesian_sample_points(), below.
    sample_point = np.array([[value] for coord, value in point])
    sample_point_coords = [coord for coord, value in point]
    sample_point_coord_names = [coord.name() for coord, value in point]

    # Which dims are we sampling?
    sample_dims = set()
    for coord in sample_point_coords:
        for dim in cube.coord_dims(coord):
            sample_dims.add(dim)
    sample_dims = sorted(list(sample_dims))

    # Extract a sub cube that lives in just the sampling space.
    sample_space_slice = [0] * cube.ndim
    for sample_dim in sample_dims:
        sample_space_slice[sample_dim] = slice(None, None)
    sample_space_slice = tuple(sample_space_slice)
    sample_space_cube = cube[sample_space_slice]

    #...with just the sampling coords
    for coord in sample_space_cube.coords():
        if not coord.name() in sample_point_coord_names:
            sample_space_cube.remove_coord(coord)

    # Order the sample point coords according to the sample space cube coords
    sample_space_coord_names = [coord.name() for coord in sample_space_cube.coords()]
    new_order = [sample_space_coord_names.index(name) for name in sample_point_coord_names]
    sample_point = np.array([sample_point[i] for i in new_order])
    sample_point_coord_names = [sample_point_coord_names[i] for i in new_order]

    # Convert the sample point to cartesian coords.
    # If there is no latlon within the coordinate there will be no change.
    # Otherwise, geographic latlon is replaced with cartesian xyz.
    cartesian_sample_point = _cartesian_sample_points(sample_point, sample_point_coord_names)[0]

    sample_space_coords = sample_space_cube.dim_coords + sample_space_cube.aux_coords
    sample_space_coords_and_dims = [(coord, sample_space_cube.coord_dims(coord)) for coord in sample_space_coords]

    if cache is not None and cube in cache:
        kdtree = cache[cube]
    else:
        # Create a "sample space position" for each datum: sample_space_data_positions[coord_index][datum_index]
        sample_space_data_positions = np.empty((len(sample_space_coords_and_dims), sample_space_cube.data.size), dtype=float)
        for d, ndi in enumerate(np.ndindex(sample_space_cube.data.shape)):
            for c, (coord, coord_dims) in enumerate(sample_space_coords_and_dims):
                # Index of this datum along this coordinate (could be nD).
                keys = tuple(ndi[ind] for ind in coord_dims) if coord_dims else slice(None, None)
                # Position of this datum along this coordinate.
                sample_space_data_positions[c][d] = coord.points[keys]

        # Convert to cartesian coordinates. Flatten for kdtree compatibility.
        cartesian_space_data_coords = _cartesian_sample_points(sample_space_data_positions, sample_point_coord_names)

        # Get the nearest datum index to the sample point. This is the goal of the function.
        kdtree = scipy.spatial.cKDTree(cartesian_space_data_coords)

    cartesian_distance, datum_index = kdtree.query(cartesian_sample_point)
    sample_space_ndi = np.unravel_index(datum_index, sample_space_cube.data.shape)

    # Turn sample_space_ndi into a main cube slice.
    # Map sample cube to main cube dims and leave the rest as a full slice.
    main_cube_slice = [slice(None, None)] * cube.ndim
    for sample_coord, sample_coord_dims in sample_space_coords_and_dims:
        # Find the coord in the main cube
        main_coord = cube.coord(sample_coord.name())
        main_coord_dims = cube.coord_dims(main_coord)
        # Mark the nearest data index/indices with respect to this coord
        for sample_i, main_i in zip(sample_coord_dims, main_coord_dims):
            main_cube_slice[main_i] = sample_space_ndi[sample_i]


    # Update cache
    if cache is not None:
        cache[cube] = kdtree

    return tuple(main_cube_slice)
Example #38
0
def _nearest_neighbour_indices_ndcoords(cube, sample_points, cache=None):
    """
    See documentation for :func:`iris.analysis.interpolate.nearest_neighbour_indices`.

    'sample_points' is of the form [[coord-or-coord-name, point-value(s)]*].
    The lengths of all the point-values sequences must be equal.

    This function is adapted for points sampling a multi-dimensional coord,
    and can currently only do nearest neighbour interpolation.

    Because this function can be slow for multidimensional coordinates,
    a 'cache' dictionary can be provided by the calling code.

    """

    # Developer notes:
    # A "sample space cube" is made which only has the coords and dims we are sampling on.
    # We get the nearest neighbour using this sample space cube.

    if isinstance(sample_points, dict):
        msg = ('Providing a dictionary to specify points is deprecated. '
               'Please provide a list of (coordinate, values) pairs.')
        warn_deprecated(msg)
        sample_points = list(sample_points.items())

    if sample_points:
        try:
            coord, value = sample_points[0]
        except ValueError:
            raise ValueError('Sample points must be a list of (coordinate, value) pairs. Got %r.' % sample_points)

    # Convert names to coords in sample_point
    # Reformat sample point values for use in _cartesian_sample_points(), below.
    coord_values = []
    sample_point_coords = []
    sample_point_coord_names = []
    ok_coord_ids = set(map(id, cube.dim_coords + cube.aux_coords))
    for coord, value in sample_points:
        coord = cube.coord(coord)
        if id(coord) not in ok_coord_ids:
            msg = ('Invalid sample coordinate {!r}: derived coordinates are'
                   ' not allowed.'.format(coord.name()))
            raise ValueError(msg)
        sample_point_coords.append(coord)
        sample_point_coord_names.append(coord.name())
        value = np.array(value, ndmin=1)
        coord_values.append(value)

    coord_point_lens = np.array([len(value) for value in coord_values])
    if not np.all(coord_point_lens == coord_point_lens[0]):
        msg = 'All coordinates must have the same number of sample points.'
        raise ValueError(msg)

    coord_values = np.array(coord_values)

    # Which dims are we sampling?
    sample_dims = set()
    for coord in sample_point_coords:
        for dim in cube.coord_dims(coord):
            sample_dims.add(dim)
    sample_dims = sorted(list(sample_dims))

    # Extract a sub cube that lives in just the sampling space.
    sample_space_slice = [0] * cube.ndim
    for sample_dim in sample_dims:
        sample_space_slice[sample_dim] = slice(None, None)
    sample_space_slice = tuple(sample_space_slice)
    sample_space_cube = cube[sample_space_slice]

    #...with just the sampling coords
    for coord in sample_space_cube.coords():
        if not coord.name() in sample_point_coord_names:
            sample_space_cube.remove_coord(coord)

    # Order the sample point coords according to the sample space cube coords
    sample_space_coord_names = [coord.name() for coord in sample_space_cube.coords()]
    new_order = [sample_space_coord_names.index(name) for name in sample_point_coord_names]
    coord_values = np.array([coord_values[i] for i in new_order])
    sample_point_coord_names = [sample_point_coord_names[i] for i in new_order]

    sample_space_coords = sample_space_cube.dim_coords + sample_space_cube.aux_coords
    sample_space_coords_and_dims = [(coord, sample_space_cube.coord_dims(coord)) for coord in sample_space_coords]

    if cache is not None and cube in cache:
        kdtree = cache[cube]
    else:
        # Create a "sample space position" for each datum: sample_space_data_positions[coord_index][datum_index]
        sample_space_data_positions = np.empty((len(sample_space_coords_and_dims), sample_space_cube.data.size), dtype=float)
        for d, ndi in enumerate(np.ndindex(sample_space_cube.data.shape)):
            for c, (coord, coord_dims) in enumerate(sample_space_coords_and_dims):
                # Index of this datum along this coordinate (could be nD).
                keys = tuple(ndi[ind] for ind in coord_dims) if coord_dims else slice(None, None)
                # Position of this datum along this coordinate.
                sample_space_data_positions[c][d] = coord.points[keys]

        # Convert to cartesian coordinates. Flatten for kdtree compatibility.
        cartesian_space_data_coords = _cartesian_sample_points(sample_space_data_positions, sample_point_coord_names)

        # Create a kdtree for the nearest-distance lookup to these 3d points.
        kdtree = scipy.spatial.cKDTree(cartesian_space_data_coords)
        # This can find the nearest datum point to any given target point,
        # which is the goal of this function.

    # Update cache
    if cache is not None:
        cache[cube] = kdtree

    # Convert the sample points to cartesian (3d) coords.
    # If there is no latlon within the coordinate there will be no change.
    # Otherwise, geographic latlon is replaced with cartesian xyz.
    cartesian_sample_points = _cartesian_sample_points(
        coord_values, sample_point_coord_names)

    # Use kdtree to get the nearest sourcepoint index for each target point.
    _, datum_index_lists = kdtree.query(cartesian_sample_points)

    # Convert flat indices back into multidimensional sample-space indices.
    sample_space_dimension_indices = np.unravel_index(
        datum_index_lists, sample_space_cube.data.shape)
    # Convert this from "pointwise list of index arrays for each dimension",
    # to "list of cube indices for each point".
    sample_space_ndis = np.array(sample_space_dimension_indices).transpose()

    # For the returned result, we must convert these indices into the source
    # (sample-space) cube, to equivalent indices into the target 'cube'.

    # Make a result array: (cube.ndim * <index>), per sample point.
    n_points = coord_values.shape[-1]
    main_cube_slices = np.empty((n_points, cube.ndim), dtype=object)
    # Initialise so all unused indices are ":".
    main_cube_slices[:] = slice(None)

    # Move result indices according to the source (sample) and target (cube)
    # dimension mappings.
    for sample_coord, sample_coord_dims in sample_space_coords_and_dims:
        # Find the coord in the main cube
        main_coord = cube.coord(sample_coord.name())
        main_coord_dims = cube.coord_dims(main_coord)
        # Fill nearest-point data indices for each coord dimension.
        for sample_i, main_i in zip(sample_coord_dims, main_coord_dims):
            main_cube_slices[:, main_i] = sample_space_ndis[:, sample_i]

    # Return as a list of **tuples** : required for correct indexing usage.
    result = [tuple(inds) for inds in main_cube_slices]
    return result
Example #39
0
def _add_subtract_common(operation_function, operation_symbol, operation_noun, operation_past_tense,
                         cube, other, dim=None, ignore=True, update_history=True, in_place=False):
    """
    Function which shares common code between addition and subtraction of cubes.

    operation_function   - function which does the operation (e.g. numpy.subtract)
    operation_symbol     - the textual symbol of the operation (e.g. '-')
    operation_noun       - the noun of the operation (e.g. 'subtraction')
    operation_past_tense - the past tense of the operation (e.g. 'subtracted')

    """
    if not isinstance(cube, iris.cube.Cube):
        raise TypeError('The "cube" argument must be an instance of iris.Cube.')

    if isinstance(other, (int, float)):
        # Promote scalar to a coordinate and associate unit type with cube unit type
        other = np.array(other)

    # Check that the units of the cube and the other item are the same, or if the other does not have a unit, skip this test
    if cube.units != getattr(other, 'units', cube.units) :
        raise iris.exceptions.NotYetImplementedError('Differing units (%s & %s) %s not implemented' % \
                                                     (cube.units, other.units, operation_noun))

    history = None

    if isinstance(other, np.ndarray):
        _assert_compatible(cube, other)

        if in_place:
            new_cube = cube
            operation_function(new_cube.data, other, new_cube.data)
        else:
            new_cube = cube.copy(data=operation_function(cube.data, other))

        if update_history:
            if other.ndim == 0:
                history = '%s %s %s' % (cube.name(), operation_symbol, other)
            else:
                history = '%s %s array' % (cube.name(), operation_symbol)
    elif isinstance(other, iris.coords.Coord):
        # Deal with cube addition/subtraction by coordinate

        # What dimension are we processing?
        data_dimension = None
        if dim is not None:
            # Ensure the given dim matches the coord
            if other in cube.coords() and cube.coord_dims(other) != [dim]:
                raise ValueError("dim provided does not match dim found for coord")
            data_dimension = dim
        else:
            # Try and get a coord dim
            if other.shape != (1,):
                try:
                    coord_dims = cube.coord_dims(other)
                    data_dimension = coord_dims[0] if coord_dims else None
                except iris.exceptions.CoordinateNotFoundError:
                    raise ValueError("Could not determine dimension for add/sub. Use add(coord, dim=dim)")

        if other.ndim != 1:
            raise iris.exceptions.CoordinateMultiDimError(other)

        if other.has_bounds():
            warnings.warn('%s by a bounded coordinate not well defined, ignoring bounds.' % operation_noun)

        points = other.points

        if data_dimension is not None:
            points_shape = [1] * cube.data.ndim
            points_shape[data_dimension] = -1
            points = points.reshape(points_shape)

        if in_place:
            new_cube = cube
            operation_function(new_cube.data, points, new_cube.data)
        else:
            new_cube = cube.copy(data=operation_function(cube.data, points))

        if update_history:
            history = '%s %s %s (coordinate)' % (cube.name(), operation_symbol, other.name())
    elif isinstance(other, iris.cube.Cube):
        # Deal with cube addition/subtraction by cube

        # get a coordinate comparison of this cube and the cube to do the operation with
        coord_comp = iris.analysis.coord_comparison(cube, other)

        if coord_comp['transposable']:
            raise ValueError('Cubes cannot be %s, differing axes. '
                                 'cube.transpose() may be required to re-order the axes.' % operation_past_tense)

        # provide a deprecation warning if the ignore keyword has been set
        if ignore is not True:
            warnings.warn('The "ignore" keyword has been deprecated in add/subtract. This functionality is now automatic. '
                          'The provided value to "ignore" has been ignored, and has been automatically calculated.')

        bad_coord_grps = (coord_comp['ungroupable_and_dimensioned'] + coord_comp['resamplable'])
        if bad_coord_grps:
            raise ValueError('This operation cannot be performed as there are differing coordinates (%s) remaining '
                             'which cannot be ignored.' % ', '.join({coord_grp.name() for coord_grp in bad_coord_grps}))

        if in_place:
            new_cube = cube
            operation_function(new_cube.data, other.data, new_cube.data)
        else:
            new_cube = cube.copy(data=operation_function(cube.data, other.data))

        # If a coordinate is to be ignored - remove it
        ignore = filter(None, [coord_grp[0] for coord_grp in coord_comp['ignorable']])
        if not ignore:
            ignore_string = ''
        else:
            ignore_string = ' (ignoring %s)' % ', '.join([coord.name() for coord in ignore])
        for coord in ignore:
            new_cube.remove_coord(coord)

        if update_history:
            history = '%s %s %s%s' % (cube.name() or 'unknown', operation_symbol,
                                      other.name() or 'unknown', ignore_string)

    else:
        return NotImplemented

    iris.analysis.clear_phenomenon_identity(new_cube)

    if history is not None:
        new_cube.add_history(history)

    return new_cube
Example #40
0
def nearest_neighbour_indices(cube, sample_points):
    """
    Returns the indices to select the data value(s) closest to the given coordinate point values.

    The sample_points mapping does not have to include coordinate values corresponding to all data
    dimensions. Any dimensions unspecified will default to a full slice.

    For example:

        >>> cube = iris.load_cube(iris.sample_data_path('ostia_monthly.nc'))
        >>> iris.analysis.interpolate.nearest_neighbour_indices(cube, [('latitude', 0), ('longitude', 10)])
        (slice(None, None, None), 9, 12)
        >>> iris.analysis.interpolate.nearest_neighbour_indices(cube, [('latitude', 0)])
        (slice(None, None, None), 9, slice(None, None, None))

    Args:

    * cube:
        An :class:`iris.cube.Cube`.
    * sample_points
        A list of tuple pairs mapping coordinate instances or unique coordinate names in the cube to point values.

    Returns:
        The tuple of indices which will select the point in the cube closest to the supplied coordinate values.

    .. note::

        Nearest neighbour interpolation of multidimensional coordinates is not
        yet supported.

    .. deprecated:: 1.10

        The module :mod:`iris.analysis.interpolate` is deprecated.
        Please replace usage of
        :func:`iris.analysis.interpolate.nearest_neighbour_indices`
        with :meth:`iris.coords.Coord.nearest_neighbour_index`.

    """
    if isinstance(sample_points, dict):
        msg = ('Providing a dictionary to specify points is deprecated. '
               'Please provide a list of (coordinate, values) pairs.')
        warn_deprecated(msg)
        sample_points = list(sample_points.items())

    if sample_points:
        try:
            coord, values = sample_points[0]
        except ValueError:
            raise ValueError('Sample points must be a list of (coordinate, value) pairs. Got %r.' % sample_points)

    points = []
    for coord, values in sample_points:
        if isinstance(coord, six.string_types):
            coord = cube.coord(coord)
        else:
            coord = cube.coord(coord)
        points.append((coord, values))
    sample_points = points

    # Build up a list of indices to span the cube.
    indices = [slice(None, None)] * cube.ndim
    
    # Build up a dictionary which maps the cube's data dimensions to a list (which will later
    # be populated by coordinates in the sample points list)
    dim_to_coord_map = {}
    for i in range(cube.ndim):
        dim_to_coord_map[i] = []

    # Iterate over all of the specifications provided by sample_points
    for coord, point in sample_points:
        data_dim = cube.coord_dims(coord)

        # If no data dimension then we don't need to make any modifications to indices.
        if not data_dim:
            continue
        elif len(data_dim) > 1:
            raise iris.exceptions.CoordinateMultiDimError("Nearest neighbour interpolation of multidimensional "
                                                          "coordinates is not supported.")
        data_dim = data_dim[0]

        dim_to_coord_map[data_dim].append(coord)

        #calculate the nearest neighbour
        min_index = coord.nearest_neighbour_index(point)

        if getattr(coord, 'circular', False):
            warnings.warn("Nearest neighbour on a circular coordinate may not be picking the nearest point.", DeprecationWarning)

        # If the dimension has already been interpolated then assert that the index from this coordinate
        # agrees with the index already calculated, otherwise we have a contradicting specification
        if indices[data_dim] != slice(None, None) and min_index != indices[data_dim]:
            raise ValueError('The coordinates provided (%s) over specify dimension %s.' %
                                        (', '.join([coord.name() for coord in dim_to_coord_map[data_dim]]), data_dim))

        indices[data_dim] = min_index

    return tuple(indices)
Example #41
0
def _create_cf_variable(dataset, cube, dimension_names, coord, factory_defn):
    """
    Create the associated CF-netCDF variable in the netCDF dataset for the 
    given coordinate. If required, also create the CF-netCDF bounds variable
    and associated dimension. 
    
    Args:

    * dataset (:class:`netCDF4.Dataset`):
        The CF-netCDF data file being created.
    * cube (:class:`iris.cube.Cube`):
        The associated cube being saved to CF-netCDF file.
    * dimension_names:
        List of string names for each dimension of the cube.
    * coord (:class:`iris.coords.Coord`):
        The coordinate to be saved to CF-netCDF file.
    * factory_defn (:class:`_FactoryDefn`):
        An optional description of the AuxCoordFactory relevant to this
        cube.

    Returns:
        The string name of the associated CF-netCDF variable saved.
    
    """
    cf_name = coord.name()

    # Derive the data dimension names for the coordinate.
    cf_dimensions = [dimension_names[dim] for dim in cube.coord_dims(coord)]

    if np.issubdtype(coord.points.dtype, np.str):
        string_dimension_depth = coord.points.dtype.itemsize
        string_dimension_name = 'string%d' % string_dimension_depth

        # Determine whether to create the string length dimension.
        if string_dimension_name not in dataset.dimensions:
            dataset.createDimension(string_dimension_name, string_dimension_depth)

        # Add the string length dimension to dimension names.
        cf_dimensions.append(string_dimension_name)

        # Create the label coordinate variable.
        cf_var = dataset.createVariable(cf_name, '|S1', cf_dimensions)

        # Add the payload to the label coordinate variable.
        if len(cf_dimensions) == 1:
            cf_var[:] = list('%- *s' % (string_dimension_depth, coord.points[0]))
        else:
            for index in np.ndindex(coord.points.shape):
                index_slice = tuple(list(index) + [slice(None, None)])
                cf_var[index_slice] = list('%- *s' % (string_dimension_depth, coord.points[index]))
    else:
        # Identify the collection of coordinates that represent CF-netCDF coordinate variables.
        cf_coordinates = cube.dim_coords

        if coord in cf_coordinates:
            # By definition of a CF-netCDF coordinate variable this coordinate must be 1-D
            # and the name of the CF-netCDF variable must be the same as its dimension name.
            cf_name = cf_dimensions[0]

        # Create the CF-netCDF variable.
        cf_var = dataset.createVariable(cf_name, coord.points.dtype, cf_dimensions)

        # Add the axis attribute for spatio-temporal CF-netCDF coordinates.
        if coord in cf_coordinates:
            axis = iris.util.guess_coord_axis(coord)
            if axis is not None and axis.lower() in SPATIO_TEMPORAL_AXES:
                cf_var.axis = axis.upper()

        # Add the data to the CF-netCDF variable.
        cf_var[:] = coord.points

        # Create the associated CF-netCDF bounds variable.
        _create_bounds(dataset, coord, cf_var, cf_name)

    # Deal with CF-netCDF units and standard name.
    standard_name, long_name, units = _cf_coord_identity(coord)

    # If this coordinate should describe a dimensionless vertical
    # coordinate, then override `standard_name`, `long_name`, and `axis`,
    # and also set the `formula_terms` attribute.
    if factory_defn:
        dependencies = cube.aux_factories[0].dependencies
        if coord is dependencies[factory_defn.primary]:
            standard_name = factory_defn.std_name
            cf_var.axis = 'Z'

            fmt = factory_defn.formula_terms_format
            names = {key: coord.name() for key, coord in
                            dependencies.iteritems()}
            formula_terms = fmt.format(**names)
            cf_var.formula_terms = formula_terms

    if units != 'unknown':
        cf_var.units = units

    if standard_name is not None:
        cf_var.standard_name = standard_name

    if long_name is not None:
        cf_var.long_name = long_name

    # Add the CF-netCDF calendar attribute.
    if coord.units.calendar:
        cf_var.calendar = coord.units.calendar

    # Add any other custom coordinate attributes.
    for name in sorted(coord.attributes):
        value = coord.attributes[name]

        if name == 'STASH':
            # Adopting provisional Metadata Conventions for representing MO Scientific Data encoded in NetCDF Format.
            name = 'ukmo__um_stash_source'
            value = str(value)

        # Don't clobber existing attributes.
        if not hasattr(cf_var, name):
            setattr(cf_var, name, value)

    return cf_name
def _nearest_neighbour_indices_ndcoords(cube, sample_points, cache=None):
    """
    See documentation for :func:`iris.analysis.interpolate.nearest_neighbour_indices`.

    'sample_points' is of the form [[coord-or-coord-name, point-value(s)]*].
    The lengths of all the point-values sequences must be equal.

    This function is adapted for points sampling a multi-dimensional coord,
    and can currently only do nearest neighbour interpolation.

    Because this function can be slow for multidimensional coordinates,
    a 'cache' dictionary can be provided by the calling code.

    .. Note::

        If the points are longitudes/latitudes, these are handled correctly as
        points on the sphere, but the values must be in 'degrees'.

    """

    # Developer notes:
    # A "sample space cube" is made which only has the coords and dims we are sampling on.
    # We get the nearest neighbour using this sample space cube.

    if isinstance(sample_points, dict):
        msg = ('Providing a dictionary to specify points is deprecated. '
               'Please provide a list of (coordinate, values) pairs.')
        warn_deprecated(msg)
        sample_points = list(sample_points.items())

    if sample_points:
        try:
            coord, value = sample_points[0]
        except ValueError:
            raise ValueError(
                'Sample points must be a list of (coordinate, value) pairs. Got %r.'
                % sample_points)

    # Convert names to coords in sample_point
    # Reformat sample point values for use in _cartesian_sample_points(), below.
    coord_values = []
    sample_point_coords = []
    sample_point_coord_names = []
    ok_coord_ids = set(map(id, cube.dim_coords + cube.aux_coords))
    for coord, value in sample_points:
        coord = cube.coord(coord)
        if id(coord) not in ok_coord_ids:
            msg = ('Invalid sample coordinate {!r}: derived coordinates are'
                   ' not allowed.'.format(coord.name()))
            raise ValueError(msg)
        sample_point_coords.append(coord)
        sample_point_coord_names.append(coord.name())
        value = np.array(value, ndmin=1)
        coord_values.append(value)

    coord_point_lens = np.array([len(value) for value in coord_values])
    if not np.all(coord_point_lens == coord_point_lens[0]):
        msg = 'All coordinates must have the same number of sample points.'
        raise ValueError(msg)

    coord_values = np.array(coord_values)

    # Which dims are we sampling?
    sample_dims = set()
    for coord in sample_point_coords:
        for dim in cube.coord_dims(coord):
            sample_dims.add(dim)
    sample_dims = sorted(list(sample_dims))

    # Extract a sub cube that lives in just the sampling space.
    sample_space_slice = [0] * cube.ndim
    for sample_dim in sample_dims:
        sample_space_slice[sample_dim] = slice(None, None)
    sample_space_slice = tuple(sample_space_slice)
    sample_space_cube = cube[sample_space_slice]

    #...with just the sampling coords
    for coord in sample_space_cube.coords():
        if not coord.name() in sample_point_coord_names:
            sample_space_cube.remove_coord(coord)

    # Order the sample point coords according to the sample space cube coords
    sample_space_coord_names = [
        coord.name() for coord in sample_space_cube.coords()
    ]
    new_order = [
        sample_space_coord_names.index(name)
        for name in sample_point_coord_names
    ]
    coord_values = np.array([coord_values[i] for i in new_order])
    sample_point_coord_names = [sample_point_coord_names[i] for i in new_order]

    sample_space_coords = sample_space_cube.dim_coords + sample_space_cube.aux_coords
    sample_space_coords_and_dims = [(coord,
                                     sample_space_cube.coord_dims(coord))
                                    for coord in sample_space_coords]

    if cache is not None and cube in cache:
        kdtree = cache[cube]
    else:
        # Create a "sample space position" for each datum: sample_space_data_positions[coord_index][datum_index]
        sample_space_data_positions = np.empty(
            (len(sample_space_coords_and_dims), sample_space_cube.data.size),
            dtype=float)
        for d, ndi in enumerate(np.ndindex(sample_space_cube.data.shape)):
            for c, (coord,
                    coord_dims) in enumerate(sample_space_coords_and_dims):
                # Index of this datum along this coordinate (could be nD).
                keys = tuple(ndi[ind]
                             for ind in coord_dims) if coord_dims else slice(
                                 None, None)
                # Position of this datum along this coordinate.
                sample_space_data_positions[c][d] = coord.points[keys]

        # Convert to cartesian coordinates. Flatten for kdtree compatibility.
        cartesian_space_data_coords = _cartesian_sample_points(
            sample_space_data_positions, sample_point_coord_names)

        # Create a kdtree for the nearest-distance lookup to these 3d points.
        kdtree = scipy.spatial.cKDTree(cartesian_space_data_coords)
        # This can find the nearest datum point to any given target point,
        # which is the goal of this function.

    # Update cache
    if cache is not None:
        cache[cube] = kdtree

    # Convert the sample points to cartesian (3d) coords.
    # If there is no latlon within the coordinate there will be no change.
    # Otherwise, geographic latlon is replaced with cartesian xyz.
    cartesian_sample_points = _cartesian_sample_points(
        coord_values, sample_point_coord_names)

    # Use kdtree to get the nearest sourcepoint index for each target point.
    _, datum_index_lists = kdtree.query(cartesian_sample_points)

    # Convert flat indices back into multidimensional sample-space indices.
    sample_space_dimension_indices = np.unravel_index(
        datum_index_lists, sample_space_cube.data.shape)
    # Convert this from "pointwise list of index arrays for each dimension",
    # to "list of cube indices for each point".
    sample_space_ndis = np.array(sample_space_dimension_indices).transpose()

    # For the returned result, we must convert these indices into the source
    # (sample-space) cube, to equivalent indices into the target 'cube'.

    # Make a result array: (cube.ndim * <index>), per sample point.
    n_points = coord_values.shape[-1]
    main_cube_slices = np.empty((n_points, cube.ndim), dtype=object)
    # Initialise so all unused indices are ":".
    main_cube_slices[:] = slice(None)

    # Move result indices according to the source (sample) and target (cube)
    # dimension mappings.
    for sample_coord, sample_coord_dims in sample_space_coords_and_dims:
        # Find the coord in the main cube
        main_coord = cube.coord(sample_coord.name())
        main_coord_dims = cube.coord_dims(main_coord)
        # Fill nearest-point data indices for each coord dimension.
        for sample_i, main_i in zip(sample_coord_dims, main_coord_dims):
            main_cube_slices[:, main_i] = sample_space_ndis[:, sample_i]

    # Return as a list of **tuples** : required for correct indexing usage.
    result = [tuple(inds) for inds in main_cube_slices]
    return result
Example #43
0
    def _create_cf_variable(self, cube, dimension_names, coord, factory_defn):
        """
        Create the associated CF-netCDF variable in the netCDF dataset for the
        given coordinate. If required, also create the CF-netCDF bounds
        variable and associated dimension.

        Args:

        * dataset (:class:`netCDF4.Dataset`):
            The CF-netCDF data file being created.
        * cube (:class:`iris.cube.Cube`):
            The associated cube being saved to CF-netCDF file.
        * dimension_names (list):
            Names for each dimension of the cube.
        * coord (:class:`iris.coords.Coord`):
            The coordinate to be saved to CF-netCDF file.
        * factory_defn (:class:`_FactoryDefn`):
            An optional description of the AuxCoordFactory relevant to this
            cube.

        Returns:
            The string name of the associated CF-netCDF variable saved.

        """
        cf_name = self._get_coord_variable_name(cube, coord)
        while cf_name in self._dataset.variables:
            cf_name = self._increment_name(cf_name)

        # Derive the data dimension names for the coordinate.
        cf_dimensions = [dimension_names[dim] for dim in
                         cube.coord_dims(coord)]

        if np.issubdtype(coord.points.dtype, np.str):
            string_dimension_depth = coord.points.dtype.itemsize
            string_dimension_name = 'string%d' % string_dimension_depth

            # Determine whether to create the string length dimension.
            if string_dimension_name not in self._dataset.dimensions:
                self._dataset.createDimension(string_dimension_name,
                                              string_dimension_depth)

            # Add the string length dimension to dimension names.
            cf_dimensions.append(string_dimension_name)

            # Create the label coordinate variable.
            cf_var = self._dataset.createVariable(cf_name, '|S1',
                                                  cf_dimensions)

            # Add the payload to the label coordinate variable.
            if len(cf_dimensions) == 1:
                cf_var[:] = list('%- *s' % (string_dimension_depth,
                                            coord.points[0]))
            else:
                for index in np.ndindex(coord.points.shape):
                    index_slice = tuple(list(index) + [slice(None, None)])
                    cf_var[index_slice] = list('%- *s' %
                                               (string_dimension_depth,
                                                coord.points[index]))
        else:
            # Identify the collection of coordinates that represent CF-netCDF
            # coordinate variables.
            cf_coordinates = cube.dim_coords

            if coord in cf_coordinates:
                # By definition of a CF-netCDF coordinate variable this
                # coordinate must be 1-D and the name of the CF-netCDF variable
                # must be the same as its dimension name.
                cf_name = cf_dimensions[0]

            # Create the CF-netCDF variable.
            cf_var = self._dataset.createVariable(cf_name, coord.points.dtype,
                                                  cf_dimensions)

            # Add the axis attribute for spatio-temporal CF-netCDF coordinates.
            if coord in cf_coordinates:
                axis = iris.util.guess_coord_axis(coord)
                if axis is not None and axis.lower() in SPATIO_TEMPORAL_AXES:
                    cf_var.axis = axis.upper()

            # Add the data to the CF-netCDF variable.
            cf_var[:] = coord.points

            # Create the associated CF-netCDF bounds variable.
            self._create_cf_bounds(coord, cf_var, cf_name)

        # Deal with CF-netCDF units and standard name.
        standard_name, long_name, units = self._cf_coord_identity(coord)

        # If this coordinate should describe a dimensionless vertical
        # coordinate, then override `standard_name`, `long_name`, and `axis`,
        # and also set the `formula_terms` attribute.
        if factory_defn:
            dependencies = cube.aux_factories[0].dependencies
            if coord is dependencies[factory_defn.primary]:
                standard_name = factory_defn.std_name
                cf_var.axis = 'Z'

                fmt = factory_defn.formula_terms_format
                names = {key: coord.name() for key, coord in
                         dependencies.iteritems()}
                formula_terms = fmt.format(**names)
                cf_var.formula_terms = formula_terms

        if units != 'unknown':
            cf_var.units = units

        if standard_name is not None:
            cf_var.standard_name = standard_name

        if long_name is not None:
            cf_var.long_name = long_name

        # Add the CF-netCDF calendar attribute.
        if coord.units.calendar:
            cf_var.calendar = coord.units.calendar

        # Add any other custom coordinate attributes.
        for name in sorted(coord.attributes):
            value = coord.attributes[name]

            if name == 'STASH':
                # Adopting provisional Metadata Conventions for representing MO
                # Scientific Data encoded in NetCDF Format.
                name = 'ukmo__um_stash_source'
                value = str(value)

            # Don't clobber existing attributes.
            if not hasattr(cf_var, name):
                setattr(cf_var, name, value)

        return cf_name
Example #44
0
def differentiate(cube, coord_to_differentiate):
    r"""
    Calculate the differential of a given cube with respect to the
    coord_to_differentiate.

    Args:

    * coord_to_differentiate:
        Either a Coord instance or the unique name of a coordinate which
        exists in the cube.
        If a Coord instance is provided, it does not necessarily have to
        exist on the cube.

    Example usage::

        u_wind_acceleration = differentiate(u_wind_cube, 'forecast_time')

    The algorithm used is equivalent to:

    .. math::

        d_i = \frac{v_{i+1}-v_i}{c_{i+1}-c_i}

    Where ``d`` is the differential, ``v`` is the data value, ``c`` is
    the coordinate value and ``i`` is the index in the differential
    direction. Hence, in a normal situation if a cube has a shape
    (x: n; y: m) differentiating with respect to x will result in a cube
    of shape (x: n-1; y: m) and differentiating with respect to y will
    result in (x: n; y: m-1). If the coordinate to differentiate is
    :attr:`circular <iris.coords.DimCoord.circular>` then the resultant
    shape will be the same as the input cube.

    In the returned cube the `coord_to_differentiate` object is
    redefined such that the output coordinate values are set to the
    averages of the original coordinate values (i.e. the mid-points).
    Similarly, the output lower bounds values are set to the averages of
    the original lower bounds values and the output upper bounds values
    are set to the averages of the original upper bounds values. In more
    formal terms:

    * `C[i] = (c[i] + c[i+1]) / 2`
    * `B[i, 0] = (b[i, 0] + b[i+1, 0]) / 2`
    * `B[i, 1] = (b[i, 1] + b[i+1, 1]) / 2`

    where `c` and `b` represent the input coordinate values and bounds,
    and `C` and `B` the output coordinate values and bounds.

    .. note:: Difference method used is the same as :func:`cube_delta`
        and therefore has the same limitations.

    .. note:: Spherical differentiation does not occur in this routine.

    """
    # Get the delta cube in the required differential direction.
    # This operation results in a copy of the original cube.
    delta_cube = cube_delta(cube, coord_to_differentiate)

    if isinstance(coord_to_differentiate, str):
        coord = cube.coord(coord_to_differentiate)
    else:
        coord = coord_to_differentiate

    delta_coord = _construct_delta_coord(coord)
    delta_dim = cube.coord_dims(coord.name())[0]

    # calculate delta_cube / delta_coord to give the differential.
    delta_cube = iris.analysis.maths.divide(delta_cube, delta_coord, delta_dim)

    # Update the standard name
    delta_cube.rename('derivative_of_{}_wrt_{}'.format(cube.name(),
                                                       coord.name()))
    return delta_cube
Example #45
0
def _get_xy_coords(cube):
    """
    Return the x and y coordinates from a cube.

    This function will preferentially return a pair of dimension
    coordinates (if there are more than one potential x or y dimension
    coordinates a ValueError will be raised). If the cube does not have
    a pair of x and y dimension coordinates it will return 1D auxiliary
    coordinates (including scalars). If there is not one and only one set
    of x and y auxiliary coordinates a ValueError will be raised.

    Having identified the x and y coordinates, the function checks that they
    have equal coordinate systems and that they do not occupy the same
    dimension on the cube.

    Args:

    * cube:
        An instance of :class:`iris.cube.Cube`.

    Returns:
        A tuple containing the cube's x and y coordinates.

    """
    # Look for a suitable dimension coords first.
    x_coords = cube.coords(axis='x', dim_coords=True)
    if not x_coords:
        # If there is no x coord in dim_coords look for scalars or
        # monotonic coords in aux_coords.
        x_coords = [coord for coord in cube.coords(axis='x', dim_coords=False)
                    if coord.ndim == 1 and coord.is_monotonic()]
    if len(x_coords) != 1:
        raise ValueError('Cube {!r} must contain a single 1D x '
                         'coordinate.'.format(cube.name()))
    x_coord = x_coords[0]

    # Look for a suitable dimension coords first.
    y_coords = cube.coords(axis='y', dim_coords=True)
    if not y_coords:
        # If there is no y coord in dim_coords look for scalars or
        # monotonic coords in aux_coords.
        y_coords = [coord for coord in cube.coords(axis='y', dim_coords=False)
                    if coord.ndim == 1 and coord.is_monotonic()]
    if len(y_coords) != 1:
        raise ValueError('Cube {!r} must contain a single 1D y '
                         'coordinate.'.format(cube.name()))
    y_coord = y_coords[0]

    if x_coord.coord_system != y_coord.coord_system:
        raise ValueError("The cube's x ({!r}) and y ({!r}) "
                         "coordinates must have the same coordinate "
                         "system.".format(x_coord.name(), y_coord.name()))

    # The x and y coordinates must describe different dimensions
    # or be scalar coords.
    x_dims = cube.coord_dims(x_coord)
    x_dim = None
    if x_dims:
        x_dim = x_dims[0]

    y_dims = cube.coord_dims(y_coord)
    y_dim = None
    if y_dims:
        y_dim = y_dims[0]

    if x_dim is not None and y_dim == x_dim:
        raise ValueError("The cube's x and y coords must not describe the "
                         "same data dimension.")

    return x_coord, y_coord
Example #46
0
def _add_subtract_common(operation_function, operation_noun,
                         operation_past_tense, cube, other, dim=None,
                         ignore=True, in_place=False):
    """
    Function which shares common code between addition and subtraction of cubes.

    operation_function   - function which does the operation (e.g. numpy.subtract)
    operation_symbol     - the textual symbol of the operation (e.g. '-')
    operation_noun       - the noun of the operation (e.g. 'subtraction')
    operation_past_tense - the past tense of the operation (e.g. 'subtracted')

    """
    _assert_is_cube(cube)
    _assert_matching_units(cube, other, operation_noun)

    if isinstance(other, iris.cube.Cube):
        # get a coordinate comparison of this cube and the cube to do the
        # operation with
        coord_comp = iris.analysis.coord_comparison(cube, other)

        if coord_comp['transposable']:
            # User does not need to transpose their cubes if numpy
            # array broadcasting will make the dimensions match
            broadcast_padding = cube.ndim - other.ndim
            coord_dims_equal = True
            for coord_group in coord_comp['transposable']:
                cube_coord, other_coord = coord_group.coords
                cube_coord_dims = cube.coord_dims(coord=cube_coord)
                other_coord_dims = other.coord_dims(coord=other_coord)
                other_coord_dims_broadcasted = tuple(
                    [dim + broadcast_padding for dim in other_coord_dims])
                if cube_coord_dims != other_coord_dims_broadcasted:
                    coord_dims_equal = False

            if not coord_dims_equal:
                raise ValueError('Cubes cannot be %s, differing axes. '
                                 'cube.transpose() may be required to '
                                 're-order the axes.' % operation_past_tense)

        # provide a deprecation warning if the ignore keyword has been set
        if ignore is not True:
            warnings.warn('The "ignore" keyword has been deprecated in '
                          'add/subtract. This functionality is now automatic. '
                          'The provided value to "ignore" has been ignored, '
                          'and has been automatically calculated.')

        bad_coord_grps = (coord_comp['ungroupable_and_dimensioned']
                          + coord_comp['resamplable'])
        if bad_coord_grps:
            raise ValueError('This operation cannot be performed as there are '
                             'differing coordinates (%s) remaining '
                             'which cannot be ignored.'
                             % ', '.join({coord_grp.name() for coord_grp
                                          in bad_coord_grps}))
    else:
        coord_comp = None

    new_cube = _binary_op_common(operation_function, operation_noun, cube,
                                 other, cube.units, dim, in_place)

    if coord_comp:
        # If a coordinate is to be ignored - remove it
        ignore = filter(None, [coord_grp[0] for coord_grp
                               in coord_comp['ignorable']])
        for coord in ignore:
            new_cube.remove_coord(coord)

    return new_cube
Example #47
0
    def _extract_coord_payload(self, cube):
        """
        Extract all relevant coordinate data and metadata from the cube.

        In particular, for each scalar coordinate determine its definition,
        its cell (point and bound) value and all other scalar coordinate metadata
        that allows us to fully reconstruct that scalar coordinate. Note that all
        scalar data is sorted in order of the scalar coordinate definition.

        The coordinate payload of the cube also includes any associated vector
        coordinates that describe that cube, and descriptions of any auxiliary
        coordinate factories.

        """
        scalar_defns = []
        scalar_values = []
        scalar_metadata = []
        vector_dim_coords_and_dims = []
        vector_aux_coords_and_dims = []

        cube_aux_coords = cube.aux_coords
        coords = cube.dim_coords + cube_aux_coords
        
        # Coordinate hint ordering dictionary - from most preferred to least.
        # Copes with duplicate hint entries, where the most preferred is king.
        hint_dict = {name: i for i, name in zip(range(len(self._hints), 0, -1), self._hints[::-1])}
        # Coordinate axis ordering dictionary.
        axis_dict = {'T': 0, 'Z': 1, 'Y': 2, 'X': 3}
        # Coordinate sort function.
        key_func = lambda coord: (not np.issubdtype(coord.points.dtype,
                                                    np.number),
                                  not isinstance(coord, iris.coords.DimCoord),
                                  hint_dict.get(coord.name(),
                                                len(hint_dict) + 1),
                                  axis_dict.get(iris.util.guess_coord_axis(coord),
                                                len(axis_dict) + 1),
                                  coord._as_defn())

        # Order the coordinates by hints, axis, and definition.
        for coord in sorted(coords, key=key_func):
            if not cube.coord_dims(coord) and coord.shape == (1,):
                # Extract the scalar coordinate data and metadata.
                scalar_defns.append(coord._as_defn())
                scalar_values.append(coord.cell(0))
                points_dtype = coord.points.dtype
                bounds_dtype = coord.bounds.dtype if coord.bounds is not None else None
                kwargs = {'circular': coord.circular} if isinstance(coord, iris.coords.DimCoord) else {}
                scalar_metadata.append(_CoordMetaData(points_dtype, bounds_dtype, kwargs))
            else:
                # Extract the vector coordinate and metadata.
                if coord in cube_aux_coords:
                    vector_aux_coords_and_dims.append(_CoordAndDims(coord, tuple(cube.coord_dims(coord))))
                else:
                    vector_dim_coords_and_dims.append(_CoordAndDims(coord, tuple(cube.coord_dims(coord))))
 
        factory_defns = []
        for factory in sorted(cube.aux_factories, key=lambda factory: factory._as_defn()):
            dependency_defns = []
            dependencies = factory.dependencies
            for key in sorted(dependencies):
                coord = dependencies[key]
                dependency_defns.append((key, coord._as_defn()))
            factory_defn = _FactoryDefn(type(factory), dependency_defns)
            factory_defns.append(factory_defn)

        scalar = _ScalarCoordPayload(scalar_defns, scalar_values, scalar_metadata)
        vector = _VectorCoordPayload(vector_dim_coords_and_dims, vector_aux_coords_and_dims)

        return _CoordPayload(scalar, vector, factory_defns)
Example #48
0
 def key_func(coord):
     return (axes.get(guess_coord_axis(coord), len(axes) + 1),
             coord._as_defn(),
             cube.coord_dims(coord))
Example #49
0
def cube_delta(cube, coord):
    """
    Given a cube calculate the difference between each value in the
    given coord's direction.


    Args:

    * coord
        either a Coord instance or the unique name of a coordinate in the cube.
        If a Coord instance is provided, it does not necessarily have to
        exist in the cube.

    Example usage::

        change_in_temperature_wrt_pressure = \
cube_delta(temperature_cube, 'pressure')

    .. note:: Missing data support not yet implemented.

    """
    # handle the case where a user passes a coordinate name
    if isinstance(coord, str):
        coord = cube.coord(coord)

    if coord.ndim != 1:
        raise iris.exceptions.CoordinateMultiDimError(coord)

    # Try and get a coord dim
    delta_dims = cube.coord_dims(coord.name())
    if ((coord.shape[0] == 1 and not getattr(coord, 'circular', False))
            or not delta_dims):
        raise ValueError('Cannot calculate delta over {!r} as it has '
                         'length of 1.'.format(coord.name()))
    delta_dim = delta_dims[0]

    # Calculate the actual delta, taking into account whether the given
    # coordinate is circular.
    delta_cube_data = delta(cube.data,
                            delta_dim,
                            circular=getattr(coord, 'circular', False))

    # If the coord/dim is circular there is no change in cube shape
    if getattr(coord, 'circular', False):
        delta_cube = cube.copy(data=delta_cube_data)
    else:
        # Subset the cube to the appropriate new shape by knocking off
        # the last row of the delta dimension.
        subset_slice = [slice(None, None)] * cube.ndim
        subset_slice[delta_dim] = slice(None, -1)
        delta_cube = cube[tuple(subset_slice)]
        delta_cube.data = delta_cube_data

    # Replace the delta_dim coords with midpoints
    # (no shape change if circular).
    for cube_coord in cube.coords(dimensions=delta_dim):
        delta_cube.replace_coord(
            _construct_midpoint_coord(cube_coord,
                                      circular=getattr(coord, 'circular',
                                                       False)))

    delta_cube.rename('change_in_{}_wrt_{}'.format(delta_cube.name(),
                                                   coord.name()))

    return delta_cube
Example #50
0
def anomalies(cube,
              period,
              reference=None,
              standardize=False,
              seasons=('DJF', 'MAM', 'JJA', 'SON')):
    """Compute anomalies using a mean with the specified granularity.

    Computes anomalies based on daily, monthly, seasonal or yearly means for
    the full available period

    Parameters
    ----------
    cube: iris.cube.Cube
        input cube.

    period: str
        Period to compute the statistic over.
        Available periods: 'full', 'season', 'seasonal', 'monthly', 'month',
        'mon', 'daily', 'day'

    reference: list int, optional, default: None
        Period of time to use a reference, as needed for the 'extract_time'
        preprocessor function
        If None, all available data is used as a reference

    standardize: bool, optional
        If True standardized anomalies are calculated

    seasons: list or tuple of str, optional
        Seasons to use if needed. Defaults to ('DJF', 'MAM', 'JJA', 'SON')

    Returns
    -------
    iris.cube.Cube
        Anomalies cube
    """
    if reference is None:
        reference_cube = cube
    else:
        reference_cube = extract_time(cube, **reference)
    reference = climate_statistics(reference_cube,
                                   period=period,
                                   seasons=seasons)
    if period in ['full']:
        metadata = copy.deepcopy(cube.metadata)
        cube = cube - reference
        cube.metadata = metadata
        if standardize:
            cube_stddev = climate_statistics(cube,
                                             operator='std_dev',
                                             period=period,
                                             seasons=seasons)
            cube = cube / cube_stddev
            cube.units = '1'
        return cube

    cube = _compute_anomalies(cube, reference, period, seasons)

    # standardize the results if requested
    if standardize:
        cube_stddev = climate_statistics(cube,
                                         operator='std_dev',
                                         period=period)
        tdim = cube.coord_dims('time')[0]
        reps = cube.shape[tdim] / cube_stddev.shape[tdim]
        if not reps % 1 == 0:
            raise ValueError(
                "Cannot safely apply preprocessor to this dataset, "
                "since the full time period of this dataset is not "
                f"a multiple of the period '{period}'")
        cube.data = cube.core_data() / da.concatenate(
            [cube_stddev.core_data() for _ in range(int(reps))], axis=tdim)
        cube.units = '1'
    return cube
Example #51
0
def linear(cube, sample_points, extrapolation_mode='linear'):
    """
    Return a cube of the linearly interpolated points given the desired
    sample points.
    
    Given a list of tuple pairs mapping coordinates to their desired
    values, return a cube with linearly interpolated values. If more
    than one coordinate is specified, the linear interpolation will be
    carried out in sequence, thus providing n-linear interpolation
    (bi-linear, tri-linear, etc.).
    
    .. note::
        By definition, linear interpolation requires all coordinates to
        be 1-dimensional.
    
    Args:
    
    * cube
        The cube to be interpolated.
        
    * sample_points
        List of one or more tuple pairs mapping coordinate to desired
        points to interpolate. Points may be a scalar or a numpy array
        of values.
    
    Kwargs:
    
    * extrapolation_mode - string - one of 'linear', 'nan' or 'error'
    
        * If 'linear' the point will be calculated by extending the
          gradient of closest two points.
        * If 'nan' the extrapolation point will be put as a NAN.
        * If 'error' a value error will be raised notifying of the
          attempted extrapolation.
    
    .. note::
        The datatype of the resultant cube's data and coordinates will
        updated to the data type of the incoming cube.
     
    """
    if not isinstance(cube, iris.cube.Cube):
        raise ValueError('Expecting a cube instance, got %s' % type(cube))

    if isinstance(sample_points, dict):
        warnings.warn('Providing a dictionary to specify points is deprecated. Please provide a list of (coordinate, values) pairs.')
        sample_points = sample_points.items()

    # catch the case where a user passes a single (coord/name, value) pair rather than a list of pairs
    if sample_points and not (isinstance(sample_points[0], collections.Container) and not isinstance(sample_points[0], basestring)):
        raise TypeError('Expecting the sample points to be a list of tuple pairs representing (coord, points), got a list of %s.' % type(sample_points[0]))
    
    points = []
    for (coord, values) in sample_points:
        if isinstance(coord, basestring):
            coord = cube.coord(coord)
        else:
            coord = cube.coord(coord=coord)
        points.append((coord, values))
    sample_points = points

    if len(sample_points) == 0:
        raise ValueError('Expecting a non-empty list of coord value pairs, got %r.' % sample_points)

    if cube.data.dtype.kind == 'i':
        raise ValueError("Cannot linearly interpolate a cube which has integer type data. Consider casting the "
                         "cube's data to floating points in order to continue.")

    bounds_error = (extrapolation_mode == 'error')

    # Handle an over-specified points_dict or a specification which does not describe a data dimension
    data_dimensions_requested = []
    for coord, values in sample_points:
        if coord.ndim > 1:
            raise ValueError('Cannot linearly interpolate over %s as it is multi-dimensional.' % coord.name())
        data_dim = cube.coord_dims(coord)
        if not data_dim:
            raise ValueError('Requested a point over a coordinate which does not describe a dimension (%s).' % coord.name())
        else:
            data_dim = data_dim[0]
        if data_dim in data_dimensions_requested:
            raise ValueError('Requested a point which over specifies a dimension: (%s). ' % coord.name())
        data_dimensions_requested.append(data_dim)

    # Iterate over all of the requested keys in the given points_dict calling this routine repeatedly.
    if len(sample_points) > 1:
        result = cube
        for coord, cells in sample_points:
            result = linear(result, [(coord, cells)], extrapolation_mode=extrapolation_mode)
        return result
    
    else:
        # take the single coordinate name and associated cells from the dictionary
        coord, requested_points = sample_points[0]
        
        requested_points = numpy.array(requested_points, dtype=cube.data.dtype)
        
        # build up indices so that we can quickly subset the original cube to be of the desired size
        new_cube_slices = [slice(None, None)] * cube.data.ndim
        # get this coordinate's index position (which we have already tested is not None)
        data_dim = cube.coord_dims(coord)[0]
        
        if requested_points.ndim > 0:
            # we want the interested dimension to be of len(requested_points)
            new_cube_slices[data_dim] = tuple([0] * len(requested_points))
        else:
            new_cube_slices[data_dim] = 0
        
        # Subset the original cube to get an appropriately sized cube.
        # NB. This operation will convert any DimCoords on the dimension
        # being sliced into AuxCoords. This removes the value of their
        # `circular` flags, and there's nowhere left to put it.
        new_cube = cube[tuple(new_cube_slices)]

        # now that we have got a cube at the desired location, get the data.
        if getattr(coord, 'circular', False):
            coord_slice_in_cube = [slice(None, None)] * cube.data.ndim
            coord_slice_in_cube[data_dim] = slice(0, 1)
            points = numpy.append(coord.points, coord.points[0] + numpy.array(coord.units.modulus or 0, dtype=coord.dtype))
            data = numpy.append(cube.data, cube.data[tuple(coord_slice_in_cube)], axis=data_dim)
        else:
            points = coord.points
            data = cube.data
        
        if len(points) <= 1:
            raise ValueError('Cannot linearly interpolate a coordinate (%s) with one point.' % coord.name())
        
        monotonic, direction = iris.util.monotonic(points, return_direction=True)
        if not monotonic:
            raise ValueError('Unable to linearly interpolate this cube as the coordinate "%s" is not monotonic' % coord.name())
        
        # if the coord is monotonic decreasing, then we need to flip it as SciPy's interp1d is expecting monotonic increasing.
        if direction == -1:
            points = iris.util.reverse(points, axes=0)
            data = iris.util.reverse(data, axes=data_dim)
        
        # limit the datatype of the outcoming points to be the datatype of the cube's data
        # (otherwise, interp1d will up-cast an incoming pair. i.e. (int32, float32) -> float64)
        if points.dtype.num < data.dtype.num:
            points = points.astype(data.dtype)
        
        # Now that we have subsetted the original cube, we must update all coordinates on the data dimension.
        for shared_dim_coord in cube.coords(contains_dimension=data_dim):
            if shared_dim_coord.ndim != 1:
                raise iris.exceptions.NotYetImplementedError('Linear interpolation of multi-dimensional coordinates.')
            
            new_coord = new_cube.coord(coord=shared_dim_coord)
            new_coord.bounds = None
            
            if shared_dim_coord._as_defn() != coord._as_defn():
                shared_coord_points = shared_dim_coord.points
                if getattr(coord, 'circular', False):
                    mod_val = numpy.array(shared_dim_coord.units.modulus or 0, dtype=shared_coord_points.dtype)
                    shared_coord_points = numpy.append(shared_coord_points, shared_coord_points[0] + mod_val)
                
                # If the coordinate which we were interpolating over was monotonic decreasing,
                # we need to flip this coordinate's values
                if direction == -1:
                    shared_coord_points = iris.util.reverse(shared_coord_points, axes=0)
                
                coord_points = points
                
                if shared_coord_points.dtype.num < data.dtype.num:
                    shared_coord_points = shared_coord_points.astype(data.dtype)
                
                interpolator = interpolate.interp1d(coord_points, shared_coord_points,
                                                    kind='linear', bounds_error=bounds_error)
                
                if extrapolation_mode == 'linear':
                    interpolator = iris.util.Linear1dExtrapolator(interpolator)
                
                new_coord.points = interpolator(requested_points)
            else:
                new_coord.points = requested_points
                    
        # now we can go ahead and interpolate the data
        interpolator = interpolate.interp1d(points, data, axis=data_dim,
                                            kind='linear', copy=False,
                                            bounds_error=bounds_error)
        
        if extrapolation_mode == 'linear':
            interpolator = iris.util.Linear1dExtrapolator(interpolator)
        
        new_cube.data = interpolator(requested_points)
        
        return new_cube
Example #52
0
def fix_bpch2coards(cube, field, filename):
    """
    An Iris load callback for properly loading the NetCDF files
    created by BPCH2COARDS (GAMAP v2-17+).

    """
    global _coordcache2

    # units
    units = field.units
    try:
        cube.units = units
    except ValueError:
        # Try to get equivalent units compatible with udunits.
        # Store original unit as cube attribute
        conform_units = ctm2cf.get_cfcompliant_units(units)
        try:
            cube.units = conform_units
        except ValueError:
            warnings.warn("Invalid udunits2 '{0}'".format(units))
    cube.attributes["ctm_units"] = units

    # a hack for keeping cube's long_name but show var_name in cube summary
    iris.std_names.STD_NAMES[cube.var_name] = {'canonical_units': cube.units}
    cube.standard_name = cube.var_name

    # attributes
    # TODO: don't remove all attributes
    cube.attributes.clear()

    # longitude coordinate (non strictly monotonic) degrees -> degrees_east
    try:
        lon = cube.coord('longitude')
        lon_dim = cube.coord_dims(lon)[0]
        cache_key = 'longitude', filename

        if _coordcache2.get(cache_key) is None:
            west_ind = np.nonzero(lon.points >= 180.)
            lon.points[west_ind] = -1. * (360. - lon.points)
            lon.units = 'degrees_east'
            _coordcache2[cache_key] = iris.coords.DimCoord.from_coord(lon)

        cube.remove_coord(lon)
        cube.add_dim_coord(_coordcache2[cache_key], lon_dim)
    except iris.exceptions.CoordinateNotFoundError:
        pass

    # levels coordinate
    # 'sigma_level' depreciated in the CF standard (not supported by UDUNITS)
    try:
        lev = cube.coord('Eta Centers')
        lev_dim = cube.coord_dims(lev)[0]
        lev_std_name = 'atmosphere_hybrid_sigma_pressure_coordinate'
        cache_key = lev_std_name, filename

        if _coordcache2.get(cache_key) is None:
            lev.standard_name = lev_std_name
            lev.units = iris.unit.Unit('1')
            d = nc.Dataset(filename)
            elev = d.variables['edge'][:]
            lev.bounds = np.column_stack((elev[:-1], elev[1:]))
            _coordcache2[cache_key] = iris.coords.DimCoord.from_coord(lev)

        cube.remove_coord(lev)
        cube.add_dim_coord(_coordcache2[cache_key], lev_dim)
    except iris.exceptions.CoordinateNotFoundError:
        pass

    # time: dimension -> scalar coordinate (+ add bounds)
    try:
        time_coord = cube.coord('time')
        time_dim = cube.coord_dims(time_coord)[0]

        with iris.FUTURE.context(cell_datetime_objects=True):
            tstart = time_coord.cell(0).point
        delta_t = time_coord.attributes.pop('delta_t')
        tend = tstart + timeutil.strp_relativedelta(delta_t)
        time_coord.bounds = [timeutil.time2tau(tstart),
                             timeutil.time2tau(tend)]
        if cube.shape[time_dim] == 1:
            slices_dims = [d for d in range(cube.ndim) if d != time_dim]
            return cube.slices(slices_dims).next()
    except iris.exceptions.CoordinateNotFoundError:
        pass
Example #53
0
 def get_span(coord):
     if isinstance(coord, int):
         span = set([coord])
     else:
         span = set(cube.coord_dims(coord))
     return span
Example #54
0
def _add_subtract_common(operation_function,
                         operation_noun,
                         operation_past_tense,
                         cube,
                         other,
                         dim=None,
                         ignore=True,
                         in_place=False):
    """
    Function which shares common code between addition and subtraction of cubes.

    operation_function   - function which does the operation (e.g. numpy.subtract)
    operation_symbol     - the textual symbol of the operation (e.g. '-')
    operation_noun       - the noun of the operation (e.g. 'subtraction')
    operation_past_tense - the past tense of the operation (e.g. 'subtracted')

    """
    _assert_is_cube(cube)
    _assert_matching_units(cube, other, operation_noun)

    if isinstance(other, iris.cube.Cube):
        # get a coordinate comparison of this cube and the cube to do the
        # operation with
        coord_comp = iris.analysis.coord_comparison(cube, other)

        if coord_comp['transposable']:
            # User does not need to transpose their cubes if numpy
            # array broadcasting will make the dimensions match
            broadcast_padding = cube.ndim - other.ndim
            coord_dims_equal = True
            for coord_group in coord_comp['transposable']:
                cube_coord, other_coord = coord_group.coords
                cube_coord_dims = cube.coord_dims(cube_coord)
                other_coord_dims = other.coord_dims(other_coord)
                other_coord_dims_broadcasted = tuple(
                    [dim + broadcast_padding for dim in other_coord_dims])
                if cube_coord_dims != other_coord_dims_broadcasted:
                    coord_dims_equal = False

            if not coord_dims_equal:
                raise ValueError('Cubes cannot be %s, differing axes. '
                                 'cube.transpose() may be required to '
                                 're-order the axes.' % operation_past_tense)

        # provide a deprecation warning if the ignore keyword has been set
        if ignore is not True:
            warnings.warn('The "ignore" keyword has been deprecated in '
                          'add/subtract. This functionality is now automatic. '
                          'The provided value to "ignore" has been ignored, '
                          'and has been automatically calculated.')

        bad_coord_grps = (coord_comp['ungroupable_and_dimensioned'] +
                          coord_comp['resamplable'])
        if bad_coord_grps:
            raise ValueError(
                'This operation cannot be performed as there are '
                'differing coordinates (%s) remaining '
                'which cannot be ignored.' %
                ', '.join({coord_grp.name()
                           for coord_grp in bad_coord_grps}))
    else:
        coord_comp = None

    new_cube = _binary_op_common(operation_function, operation_noun, cube,
                                 other, cube.units, dim, in_place)

    if coord_comp:
        # If a coordinate is to be ignored - remove it
        ignore = filter(
            None, [coord_grp[0] for coord_grp in coord_comp['ignorable']])
        for coord in ignore:
            new_cube.remove_coord(coord)

    return new_cube