Example #1
0
def hypsometric_interpolation(voided_ddem: Union[np.ndarray,
                                                 np.ma.masked_array],
                              ref_dem: Union[np.ndarray, np.ma.masked_array],
                              mask: np.ndarray) -> np.ma.masked_array:
    """
    Interpolate a dDEM using hypsometric interpolation within the given mask.

    Using `ref_dem`, elevation bins of constant height (hard-coded to 50 m for now) are created.
    Gaps in `voided-ddem`, within the provided `mask`, are filled with the median dDEM value within that bin.

    :param voided_ddem: A dDEM with voids (either an array with nans or a masked array).
    :param ref_dem: The reference DEM in the dDEM comparison.
    :param mask: A mask to delineate the area that will be interpolated (True means hypsometric will be used).
    """
    # Get ddem array with invalid pixels converted to NaN and mask of invalid pixels
    ddem, ddem_mask = spatial_tools.get_array_and_mask(voided_ddem)

    # Get ref_dem array with invalid pixels converted to NaN and mask of invalid pixels
    dem, dem_mask = spatial_tools.get_array_and_mask(ref_dem)

    # Make sure the mask does not have e.g. the shape (1, height, width)
    mask = mask.squeeze()

    # A mask of inlier values: The union of the mask and the inverted exclusion masks of both rasters.
    inlier_mask = mask & (~ddem_mask & ~dem_mask)
    if np.count_nonzero(inlier_mask) == 0:
        warnings.warn("No valid data found within mask, returning copy",
                      UserWarning)
        return np.copy(ddem)

    # Estimate the elevation dependent gradient.
    gradient = xdem.volume.hypsometric_binning(ddem[inlier_mask],
                                               dem[inlier_mask])

    # Interpolate possible missing elevation bins in 1D - no extrapolation done here
    interpolated_gradient = xdem.volume.interpolate_hypsometric_bins(gradient)

    gradient_model = scipy.interpolate.interp1d(
        interpolated_gradient.index.mid,
        interpolated_gradient["value"].values,
        fill_value="extrapolate")

    # Create an idealized dDEM using the relationship between elevation and dDEM
    idealized_ddem = np.zeros_like(dem)
    idealized_ddem[mask] = gradient_model(dem[mask])

    # Replace ddem gaps with idealized hypsometric ddem, but only within mask
    corrected_ddem = np.where(ddem_mask & mask, idealized_ddem, ddem)

    output = np.ma.masked_array(corrected_ddem,
                                mask=~np.isfinite(corrected_ddem))

    assert output is not None

    return output
Example #2
0
def norm_regional_hypsometric_interpolation(
        voided_ddem: Union[np.ndarray, np.ma.masked_array],
        ref_dem: Union[np.ndarray, np.ma.masked_array],
        glacier_index_map: np.ndarray,
        min_coverage: float = 0.1,
        regional_signal: Optional[pd.DataFrame] = None,
        verbose: bool = False,
        min_elevation_range: float = 0.33,
        idealized_ddem: bool = False) -> np.ndarray:
    """
    Interpolate missing values by scaling the normalized regional hypsometric signal to each glacier separately.

    Only missing values are interpolated. The rest of the glacier's values are fixed.

    :param voided_ddem: The voided dDEM to fill NaNs in.
    :param ref_dem: A void-free reference DEM.
    :param glacier_index_map: An array glacier indices of the same shape as the previous inputs.
    :param min_coverage: The minimum fractional coverage of a glacier to interpolate. Defaults to 10%.
    :param regional_signal: A regional signal is already estimate. Otherwise one will be estimated.
    :param verbose: Show progress bars.
    :param min_elevation_range: The minimum allowed min/max bin range to scale a signal from.\
            Default: 1/3 of the elevation range needs to be present.
    :param idealized_ddem: Replace observed glacier values with the hypsometric signal. Good for error assessments.

    :raises AssertionError: If `ref_dem` has voids.

    :returns: A dDEM where glacier's that fit the min_coverage criterion are interpolated.
    """
    # Extract the array and nan parts of the inputs.
    ddem_arr, ddem_nans = spatial_tools.get_array_and_mask(voided_ddem)
    ref_arr, ref_nans = spatial_tools.get_array_and_mask(ref_dem)

    # The reference DEM should be void free
    assert np.count_nonzero(ref_nans) == 0, "Reference DEM has voids"

    # If the regional signal was not given as an argument, find it from the dDEM.
    if regional_signal is None:
        regional_signal = get_regional_hypsometric_signal(
            ddem=ddem_arr,
            ref_dem=ref_arr,
            glacier_index_map=glacier_index_map,
            verbose=verbose)

    # The unique indices are the unique glaciers.
    unique_indices = np.unique(glacier_index_map)

    # Make a copy of the dDEM which will be filled iteratively.
    ddem_filled = ddem_arr.copy()
    # Loop over all glaciers and fill the dDEM accordingly.
    for i in tqdm(unique_indices,
                  desc="Interpolating dDEM",
                  disable=(not verbose)):
        if i == 0:  # i==0 is assumed to mean stable ground.
            continue
        # Create a mask representing a particular glacier.
        glacier_values = (glacier_index_map == i)

        # The inlier mask is where that particular glacier is and where nans don't exist.
        inlier_mask = glacier_values & ~ddem_nans

        # If the fractional coverage is smaller than the given threshold, skip the glacier.
        if (np.count_nonzero(inlier_mask) /
                np.count_nonzero(glacier_values)) < min_coverage:
            continue

        # Extract only the finite difference and elevation values that correspond to the glacier.
        differences = ddem_arr[inlier_mask]
        elevations = ref_arr[inlier_mask]

        # Get the reference elevation min and max
        elev_min = ref_arr[glacier_values].min()
        elev_max = ref_arr[glacier_values].max()

        # Copy the signal
        signal = regional_signal["w_mean"].copy()
        # Scale the signal elevation midpoints to the glacier elevation range.
        midpoints = signal.index.mid
        midpoints *= elev_max - elev_min
        midpoints += elev_min
        step = midpoints[1] - midpoints[0]
        # Create an interval structure from the midpoints and the step size.
        signal.index = pd.IntervalIndex.from_arrays(left=midpoints - step / 2,
                                                    right=midpoints + step / 2)

        # Find the hypsometric bins of the glacier.
        hypsometric_bins = hypsometric_binning(
            ddem=differences,
            ref_dem=elevations,
            bins=np.r_[
                [signal.index.left[0]], signal.index.
                right],  # This will generate the same steps as the signal.
            kind="custom")
        bin_stds = hypsometric_binning(ddem=differences,
                                       ref_dem=elevations,
                                       bins=np.r_[[signal.index.left[0]],
                                                  signal.index.right],
                                       kind="custom",
                                       aggregation_function=np.nanstd)
        # Check which of the bins were non-empty.
        non_empty_bins = np.isfinite(hypsometric_bins["value"])

        non_empty_range = np.sum(non_empty_bins[non_empty_bins].index.length)
        full_range = np.sum(hypsometric_bins.index.length)

        if (non_empty_range / full_range) < min_elevation_range:
            continue

        # A theoretical minimum of 2 bins are needed for the curve fit.
        if np.count_nonzero(non_empty_bins) < 2:
            continue

        # The weights are the squared inverse of the standard deviation of each bin.
        bin_weights = bin_stds["value"].values[non_empty_bins] / \
            np.sqrt(hypsometric_bins["count"].values[non_empty_bins])
        bin_weights[bin_weights ==
                    0.0] = 1e-8  # Avoid divide by zero problems.

        # Fit linear coefficients to scale the regional signal to the hypsometric bins properly.
        # The inverse of the pixel counts are used as weights, to properly disregard poorly constrained bins.
        with warnings.catch_warnings():
            # curve_fit will sometimes say "can't estimate covariance". This is okay.
            warnings.filterwarnings("ignore", message="covariance")
            coeffs = scipy.optimize.curve_fit(
                f=lambda x, a, b: a * x +
                b,  # Estimate a linear function "f(x) = ax + b".
                xdata=signal.values[
                    non_empty_bins],  # The xdata is the normalized regional signal
                ydata=hypsometric_bins["value"].
                values[non_empty_bins],  # The ydata is the actual values.
                p0=[
                    1, 0
                ],  # The initial guess of a and b (doesn't matter too much)
                sigma=bin_weights,
            )[0]

        # Create a linear model from the elevations and the scaled regional signal.
        model = scipy.interpolate.interp1d(signal.index.mid,
                                           np.poly1d(coeffs)(signal.values),
                                           bounds_error=False,
                                           fill_value="extrapolate")

        # Find which values to fill using the model (all nans within the glacier extent)
        if not idealized_ddem:
            values_to_fill = glacier_values & ddem_nans
        # If it should be idealized, replace all glacier values with the model
        else:
            values_to_fill = glacier_values
        # Fill the nans using the scaled regional signal.
        ddem_filled[values_to_fill] = model(ref_arr[values_to_fill])

    return ddem_filled
Example #3
0
def get_regional_hypsometric_signal(
        ddem: Union[np.ndarray, np.ma.masked_array],
        ref_dem: Union[np.ndarray, np.ma.masked_array],
        glacier_index_map: np.ndarray,
        n_bins: int = 20,
        verbose: bool = False,
        min_coverage: float = 0.05) -> pd.DataFrame:
    """
    Get the normalized regional hypsometric elevation change signal, read "the general shape of it".

    :param ddem: The dDEM to analyse.
    :param ref_dem: A void-free reference DEM.
    :param glacier_index_map: An array glacier indices of the same shape as the previous inputs.
    :param verbose: Show progress bar.
    n_bins = 20  # TODO: This should be an argument.
    :param n_bins: The number of elevation bins to subdivide each glacier in.

    :returns: A DataFrame of bin statistics, scaled by elevation and elevation change.
    """
    # Extract the array and mask representations of the arrays.
    ddem_arr, ddem_mask = spatial_tools.get_array_and_mask(ddem.squeeze())
    ref_arr, ref_mask = spatial_tools.get_array_and_mask(ref_dem.squeeze())

    # The reference DEM should be void free
    assert np.count_nonzero(ref_mask) == 0, "Reference DEM has voids"

    # The unique indices are the unique glaciers.
    unique_indices = np.unique(glacier_index_map)

    # Create empty (ddem) value and (pixel) count arrays which will be filled iteratively.
    values = np.empty((n_bins, unique_indices.shape[0]), dtype=float) * np.nan
    counts = np.empty((n_bins, unique_indices.shape[0]), dtype=float) * np.nan

    # Start a counter of glaciers that are actually processed.
    count = 0
    # Loop over each unique glacier.
    for i in tqdm(np.unique(glacier_index_map),
                  desc="Finding regional signal",
                  disable=(not verbose)):
        # If i ==0, it's assumed to be periglacial.
        if i == 0:
            continue
        # Create a mask representing a particular glacier.
        glacier_values = (glacier_index_map == i)

        # Stop if the "glacier" is tiny. It might be a cropped glacier outline for example.
        if np.count_nonzero(glacier_values) < 10:
            continue

        # The inlier mask is where that particular glacier is and where nans don't exist.
        inlier_mask = glacier_values & ~ddem_mask

        # Skip if the coverage is below the threshold
        if (np.count_nonzero(inlier_mask) /
                np.count_nonzero(glacier_values)) < min_coverage:
            continue

        # Extract only the difference and elevation values that correspond to the glacier.
        differences = ddem_arr[inlier_mask]
        elevations = ref_arr[inlier_mask]

        # Run the hypsometric binning.
        try:
            bins = hypsometric_binning(differences,
                                       elevations,
                                       bins=n_bins,
                                       kind="count")
        except ValueError:  # ValueError: zero-size array to reduction operation minimum which has no identity on "zbins=" call
            continue

        # Min-max scale by elevation.
        bins.index = (bins.index.mid - bins.index.left.min()) / (
            bins.index.right.max() - bins.index.left.min())

        # Scale by difference.
        bins["value"] = (bins["value"] - np.nanmin(bins["value"])) / \
            (np.nanmax(bins["value"]) - np.nanmin(bins["value"]))

        # Assign the values and counts to the output array.
        values[:, count] = bins["value"]
        counts[:, count] = bins["count"]

        count += 1

    output = pd.DataFrame(
        data={
            "w_mean":
            np.nansum(values * counts, axis=1) / np.nansum(counts, axis=1),
            "median":
            np.nanmedian(values, axis=1),
            "std":
            np.nanstd(values, axis=1),
            "sigma-1-lower":
            np.nanpercentile(values, 16, axis=1),
            "sigma-1-upper":
            np.nanpercentile(values, 84, axis=1),
            "sigma-2-lower":
            np.nanpercentile(values, 2.5, axis=1),
            "sigma-2-upper":
            np.nanpercentile(values, 97.5, axis=1),
            "count":
            np.nansum(counts, axis=1).astype(int),
        },
        index=pd.IntervalIndex.from_breaks(
            np.linspace(0, 1, n_bins + 1, dtype="float64")),
    )

    return output
Example #4
0
def local_hypsometric_interpolation(voided_ddem: Union[np.ndarray,
                                                       np.ma.masked_array],
                                    ref_dem: Union[np.ndarray,
                                                   np.ma.masked_array],
                                    mask: np.ndarray,
                                    min_coverage: float = 0.2,
                                    count_threshold: Optional[int] = 1,
                                    nodata: Union[float, int] = -9999,
                                    plot: bool = False) -> np.ma.masked_array:
    """
    Interpolate a dDEM using local hypsometric interpolation.
    The algorithm loops through each features in the vector file.

    The dDEM is assumed to have been created as "voided_ddem = reference_dem - other_dem".

    :param voided_ddem: A dDEM with voids (either an array with nans or a masked array).
    :param ref_dem: The reference DEM in the dDEM comparison.
    :param mask: A raster of same shape as voided_ddem and ref_dem, containing a diferent non-0 pixel value for \
each geometry on which to loop.
    :param min_coverage: Optional. The minimum coverage fraction to be considered for interpolation.
    :param count_threshold: Optional. A pixel count threshold to exclude during the hypsometric curve fit.
    :param nodata: Optional. No data value to be used for the output masked_array.
    :param plot: Set to True to display intermediate plots.

    :returns: A dDEM with gaps filled by applying a hypsometric interpolation for each geometry in mask, \
for areas filling the min_coverage criterion.
    """
    # Remove any unnecessary dimension
    orig_shape = voided_ddem.shape
    voided_ddem = voided_ddem.squeeze()
    ref_dem = ref_dem.squeeze()
    mask = mask.squeeze()

    # Check that all arrays have same dimensions
    assert voided_ddem.shape == ref_dem.shape == mask.shape

    # Get ddem array with invalid pixels converted to NaN and mask of invalid pixels
    ddem, ddem_mask = spatial_tools.get_array_and_mask(voided_ddem)

    # Get ref_dem array with invalid pixels converted to NaN and mask of invalid pixels
    dem, dem_mask = spatial_tools.get_array_and_mask(ref_dem)

    # A mask of inlier values: The union of the mask and the inverted exclusion masks of both rasters.
    inlier_mask = (mask != 0) & (~ddem_mask & ~dem_mask)
    if np.count_nonzero(inlier_mask) == 0:
        warnings.warn("No valid data found within mask, returning copy",
                      UserWarning)
        return np.copy(ddem)

    if plot:
        plt.matshow(inlier_mask)
        plt.title("inlier mask")
        plt.show()

    # List of indexes to loop on
    geometry_index = np.unique(mask[mask != 0])
    print("Found {:d} geometries".format(len(geometry_index)))

    # Get fraction of valid pixels for each geometry
    coverage = np.zeros(len(geometry_index))
    for k, index in enumerate(geometry_index):
        local_inlier_mask = inlier_mask & (mask == index)
        total_pixels = np.count_nonzero((mask == index))
        valid_pixels = np.count_nonzero(local_inlier_mask)
        coverage[k] = valid_pixels / float(total_pixels)

    # Filter geometries with too little coverage
    valid_geometry_index = geometry_index[coverage >= min_coverage]
    print("Found {:d} geometries with sufficient coverage".format(
        len(valid_geometry_index)))

    idealized_ddem = nodata * np.ones_like(dem)

    for k, index in enumerate(valid_geometry_index):

        # Mask of valid pixel within geometry
        local_mask = (mask == index)
        local_inlier_mask = inlier_mask & (local_mask)

        # Estimate the elevation dependent gradient
        gradient = xdem.volume.hypsometric_binning(ddem[local_mask],
                                                   dem[local_mask])

        # Remove bins with loo low count
        filt_gradient = gradient.copy()
        if count_threshold > 1:
            bins_under_threshold = filt_gradient["count"] < count_threshold
            filt_gradient.loc[bins_under_threshold, "value"] = np.nan

        # Interpolate missing elevation bins
        interpolated_gradient = xdem.volume.interpolate_hypsometric_bins(
            filt_gradient)

        # At least 2 points needed for interp1d, if not skip feature
        nvalues = len(interpolated_gradient['value'].values)
        if nvalues < 2:
            warnings.warn(
                "Not enough valid bins for feature with index {:d} -> skipping interpolation"
                .format(index), UserWarning)
            continue

        # Create a model for 2D interpolation
        gradient_model = scipy.interpolate.interp1d(
            interpolated_gradient.index.mid,
            interpolated_gradient['value'].values,
            fill_value="extrapolate")

        if plot:
            local_ddem = np.where(local_inlier_mask, ddem, np.nan)
            vmax = max(np.abs(np.nanpercentile(local_ddem, [2, 98])))
            rowmin, rowmax, colmin, colmax = spatial_tools.get_valid_extent(
                mask == index)

            fig = plt.figure(figsize=(12, 8))
            plt.subplot(121)
            plt.imshow((mask == index)[rowmin:rowmax, colmin:colmax],
                       cmap='Greys',
                       vmin=0,
                       vmax=2,
                       interpolation='none')

            plt.imshow(local_ddem[rowmin:rowmax, colmin:colmax],
                       cmap='RdYlBu',
                       vmin=-vmax,
                       vmax=vmax,
                       interpolation='none')
            plt.colorbar()
            plt.title("ddem for geometry # {:d}".format(index))

            plt.subplot(122)
            plt.plot(gradient["value"], gradient.index.mid, label='raw')
            plt.plot(interpolated_gradient,
                     gradient.index.mid,
                     label='interpolated',
                     ls='--')
            plt.xlabel('ddem')
            plt.ylabel('Elevation')
            plt.legend()
            plt.title("Average ddem per elevation bin")
            plt.tight_layout()
            plt.show()

        # Create an idealized dDEM (only considering the dH gradient)
        idealized_ddem[mask == index] = gradient_model(dem[mask == index])

    # Measure the difference between the original dDEM and the idealized dDEM
    assert ddem.shape == idealized_ddem.shape
    ddem_difference = ddem.astype("float32") - idealized_ddem.astype("float32")
    ddem_difference[idealized_ddem == nodata] = np.nan

    # Spatially interpolate the difference between these two products.
    interpolated_ddem_diff = linear_interpolation(
        np.where(ddem_mask, np.nan, ddem_difference))
    interpolated_ddem_diff[np.isnan(interpolated_ddem_diff)] = 0

    # Correct the idealized dDEM with the difference to the original dDEM.
    corrected_ddem = idealized_ddem + interpolated_ddem_diff

    # Set Nans to nodata
    corrected_ddem[~np.isfinite(corrected_ddem)] = nodata

    output = np.ma.masked_array(
        corrected_ddem,
        mask=(corrected_ddem == nodata
              )  # mask=((mask != 0) & (ddem_mask | dem_mask))
    ).reshape(orig_shape)

    assert output is not None

    return output
Example #5
0
def hypsometric_binning(
        ddem: np.ndarray,
        ref_dem: np.ndarray,
        bins: Union[float, np.ndarray] = 50.0,
        kind: str = "fixed",
        aggregation_function: Callable = np.median) -> pd.DataFrame:
    """
    Separate the dDEM in discrete elevation bins.
    The elevation bins will be calculated based on all ref_dem valid pixels.
    ddem may contain NaN/masked values over the same area, they will be excluded before the aggregation.

    It is assumed that the dDEM is calculated as 'ref_dem - dem' (not 'dem - ref_dem').

    :param ddem: The dDEM as a 2D or 1D array.
    :param ref_dem: The reference DEM as a 2D or 1D array.
    :param bins: The bin size, count, or array, depending on the binning method ('kind').
    :param kind: The kind of binning to do. Choices: ['fixed', 'count', 'quantile', 'custom'].
    :param aggregation_function: The function to aggregate the elevation values within a bin. Defaults to the median.

    :returns: A Pandas DataFrame with elevation bins and dDEM statistics.
    """
    assert ddem.shape == ref_dem.shape

    # Convert ddem mask into NaN
    ddem, _ = spatial_tools.get_array_and_mask(ddem)

    # Extract only the valid values, i.e. valid in ref_dem
    valid_mask = ~spatial_tools.get_mask(ref_dem)
    ddem = np.array(ddem[valid_mask])
    ref_dem = np.array(ref_dem.squeeze()[valid_mask])

    if isinstance(bins, np.ndarray):
        zbins = bins
    elif kind == "fixed":
        zbins = np.arange(ref_dem.min(),
                          ref_dem.max() + bins + 1e-6,
                          step=bins)  # +1e-6 in case min=max (1 point)
    elif kind == "count":
        # Make bins between mean_dem.min() and a little bit above mean_dem.max().
        # The bin count has to be bins + 1 because zbins[0] will be a "below min value" bin, which will be irrelevant.
        zbins = np.linspace(ref_dem.min(),
                            ref_dem.max() + 1e-6 / bins,
                            num=int(bins + 1))
    elif kind == "quantile":
        # Make the percentile steps. The bins + 1 is explained above.
        steps = np.linspace(0, 100, num=int(bins) + 1)
        zbins = np.fromiter((np.percentile(ref_dem, step) for step in steps),
                            dtype=float)
        # The uppermost bin needs to be a tiny amount larger than the highest value to include it.
        zbins[-1] += 1e-6
    elif kind == "custom":
        zbins = bins  # type: ignore
    else:
        raise ValueError(
            f"Invalid bin kind: {kind}. Choices: ['fixed', 'count', 'quantile', 'custom']"
        )

    # Generate bins and get bin indices from the mean DEM
    indices = np.digitize(ref_dem, bins=zbins)

    # Calculate statistics for each bin.
    # If no values exist, all stats should be nans (except count with should be 0)
    # medians, means, stds, nmads = (np.zeros(shape=bins.shape[0] - 1, dtype=ddem.dtype) * np.nan, ) * 4
    values = np.zeros(shape=zbins.shape[0] - 1, dtype=ddem.dtype) * np.nan
    counts = np.zeros_like(values, dtype=int)
    for i in np.arange(indices.min(), indices.max() + 1):
        values_in_bin = ddem[indices == i]

        # Remove possible Nans
        values_in_bin = values_in_bin[np.isfinite(values_in_bin)]

        # Skip if no values are in the bin.
        if values_in_bin.shape[0] == 0:
            continue

        try:
            values[i - 1] = aggregation_function(values_in_bin)
            counts[i - 1] = values_in_bin.shape[0]
        except IndexError as exception:
            # If custom bins were added, i may exceed the bin range, which will be silently ignored.
            if kind == "custom" and "out of bounds" in str(exception):
                continue
            raise exception

    # Collect the results in a dataframe
    output = pd.DataFrame(index=pd.IntervalIndex.from_breaks(zbins),
                          data=np.vstack([values, counts]).T,
                          columns=["value", "count"])

    return output
Example #6
0
def get_terrain_attribute(
    dem: np.ndarray | np.ma.masked_array | RasterType,
    attribute: str | list[str],
    resolution: tuple[float, float] | float | None = None,
    degrees: bool = True,
    hillshade_altitude: float = 45.0,
    hillshade_azimuth: float = 315.0,
    hillshade_z_factor: float = 1.0,
    fill_method: str = "median",
    edge_method: str = "nearest",
) -> np.ndarray | list[np.ndarray] | Raster | list[Raster]:
    """
    Derive one or multiple terrain attributes from a DEM.

    Attributes:
        * 'slope': The slope in degrees or radians (degs: 0=flat, 90=vertical).
        * 'aspect': The slope aspect in degrees or radians (degs: 0=N, 90=E, 180=S, 270=W)
        * 'hillshade': The shaded slope in relation to its aspect.
        * 'curvature': The second derivative of elevation (the rate of slope change per pixel), multiplied by 100.
        * 'planform_curvature': The curvature perpendicular to the direction of the slope.
        * 'profile_curvature': The curvature parallel to the direction of the slope.
        * 'surface_fit': A quadric surface fit for each individual pixel. For more info, see get_quadric_coefficients()

    :param dem: The DEM to analyze.
    :param attribute: The terrain attribute(s) to calculate.
    :param resolution: The X/Y or (X, Y) resolution of the DEM.
    :param degrees: Convert radians to degrees?
    :param hillshade_altitude: The shading altitude in degrees (0-90°). 90° is straight from above.
    :param hillshade_azimuth: The shading azimuth in degrees (0-360°) going clockwise, starting from north.
    :param hillshade_z_factor: Vertical exaggeration factor.
    :param fill_method: See the 'get_quadric_coefficients()' docstring for information.
    :param edge_method: see the 'get_quadric_coefficients()' docstring for information.

    :raises ValueError: If the inputs are poorly formatted or are invalid.

    :examples:
        >>> dem = np.repeat(np.arange(3), 3).reshape(3, 3)
        >>> dem
        array([[0, 0, 0],
               [1, 1, 1],
               [2, 2, 2]])
        >>> slope, aspect = get_terrain_attribute(dem, ["slope", "aspect"], resolution=1)
        >>> slope  # Note the flattening edge effect; see 'get_quadric_coefficients()' for more.
        array([[26.56505118, 26.56505118, 26.56505118],
               [45.        , 45.        , 45.        ],
               [26.56505118, 26.56505118, 26.56505118]])
        >>> aspect
        array([[0., 0., 0.],
               [0., 0., 0.],
               [0., 0., 0.]])

    :returns: One or multiple arrays of the requested attribute(s)
    """
    if isinstance(dem, gu.Raster):
        if resolution is None:
            resolution = dem.res

    if resolution is None:
        raise ValueError("'resolution' must be provided as an argument.")
    # Validate and format the inputs
    if isinstance(attribute, str):
        attribute = [attribute]

    choices = [
        "slope", "aspect", "hillshade", "curvature", "planform_curvature",
        "profile_curvature", "surface_fit"
    ]
    for attr in attribute:
        if attr not in choices:
            raise ValueError(
                f"Attribute '{attr}' is not supported. Choices: {choices}")

    if not isinstance(resolution, Sized):
        resolution = (float(resolution), float(resolution))

    if (hillshade_azimuth < 0.0) or (hillshade_azimuth > 360.0):
        raise ValueError(
            f"Azimuth must be a value between 0 and 360 degrees (given value: {hillshade_azimuth})"
        )
    if (hillshade_altitude < 0.0) or (hillshade_altitude > 90):
        raise ValueError(
            "Altitude must be a value between 0 and 90 degress (given value: {altitude})"
        )
    if (hillshade_z_factor < 0.0) or not np.isfinite(hillshade_z_factor):
        raise ValueError(
            f"z_factor must be a non-negative finite value (given value: {hillshade_z_factor})"
        )

    dem_arr = spatial_tools.get_array_and_mask(dem)[0]

    # Initialize the terrain_attributes dictionary, which will be filled with the requested values.
    terrain_attributes: dict[str, np.ndarray] = {}

    # These require the get_quadric_coefficients() function, which require the same X/Y resolution.
    attributes_requiring_surface_fit = [
        attr for attr in attribute if attr in [
            "curvature", "planform_curvature", "profile_curvature", "slope",
            "hillshade", "aspect"
        ]
    ]

    # Check which products should be made
    make_aspect = any(attr in attribute for attr in ["aspect", "hillshade"])
    make_slope = any(attr in attribute for attr in [
        "slope", "hillshade", "planform_curvature", "aspect",
        "profile_curvature"
    ])
    make_hillshade = "hillshade" in attribute
    make_surface_fit = len(attributes_requiring_surface_fit) > 0
    make_curvature = "curvature" in attribute
    make_planform_curvature = "planform_curvature" in attribute
    make_profile_curvature = "profile_curvature" in attribute

    if make_surface_fit:
        if resolution[0] != resolution[1]:
            raise ValueError(
                f"Quadric surface fit requires the same X and Y resolution ({resolution} was given). "
                f"This was required by: {attributes_requiring_surface_fit}")
        terrain_attributes["surface_fit"] = get_quadric_coefficients(
            dem=dem_arr,
            resolution=resolution[0],
            fill_method=fill_method,
            edge_method=edge_method)

    if make_slope:
        # SLOPE = ARCTAN((G²+H²)**(1/2))
        terrain_attributes["slope"] = np.arctan(
            (terrain_attributes["surface_fit"][6, :, :]**2 +
             terrain_attributes["surface_fit"][7, :, :]**2)**0.5)

    if make_aspect:
        # ASPECT = ARCTAN(-H/-G)  # This did not work
        # ASPECT = (ARCTAN2(-G, H) + 0.5PI) % 2PI  did work.
        terrain_attributes["aspect"] = (
            np.arctan2(-terrain_attributes["surface_fit"][6, :, :],
                       terrain_attributes["surface_fit"][7, :, :]) +
            np.pi / 2) % (2 * np.pi)

    if make_hillshade:
        # If a different z-factor was given, slopemap with exaggerated gradients.
        if hillshade_z_factor != 1.0:
            slopemap = np.arctan(
                np.tan(terrain_attributes["slope"]) * hillshade_z_factor)
        else:
            slopemap = terrain_attributes["slope"]

        azimuth_rad = np.deg2rad(360 - hillshade_azimuth)
        altitude_rad = np.deg2rad(hillshade_altitude)
        terrain_attributes["hillshade"] = np.clip(
            255 * (np.sin(altitude_rad) * np.cos(slopemap) +
                   np.cos(altitude_rad) * np.sin(slopemap) *
                   np.sin(azimuth_rad - terrain_attributes["aspect"])),
            0,
            255,
        ).astype("float32")

    if make_curvature:
        # Curvature is the second derivative of the surface fit equation. See the ArcGIS documentation.
        # (URL in get_quadric_coefficients() docstring)
        # Curvature = -2(D + E) * 100
        terrain_attributes["curvature"] = (
            -2 * (terrain_attributes["surface_fit"][3, :, :] +
                  terrain_attributes["surface_fit"][4, :, :]) * 100)

    if make_planform_curvature:
        # PLANC = 2(DH² + EG² -FGH)/(G²+H²)
        with warnings.catch_warnings():
            warnings.filterwarnings(
                "ignore", "invalid value encountered in true_divide")
            terrain_attributes["planform_curvature"] = (
                2 * (terrain_attributes["surface_fit"][3, :, :] *
                     terrain_attributes["surface_fit"][7, :, :]**2 +
                     terrain_attributes["surface_fit"][4, :, :] *
                     terrain_attributes["surface_fit"][6, :, :]**2 -
                     terrain_attributes["surface_fit"][5, :, :] *
                     terrain_attributes["surface_fit"][6, :, :] *
                     terrain_attributes["surface_fit"][7, :, :]) /
                (terrain_attributes["surface_fit"][6, :, :]**2 +
                 terrain_attributes["surface_fit"][7, :, :]**2) * 100)

        # Completely flat surfaces trigger the warning above. These need to be set to zero
        terrain_attributes["planform_curvature"][terrain_attributes["slope"] ==
                                                 0.0] = 0.0

    if make_profile_curvature:
        # PROFC = -2(DH² + EG² + FGH)/(G²+H²)
        with warnings.catch_warnings():
            warnings.filterwarnings(
                "ignore", "invalid value encountered in true_divide")
            terrain_attributes["profile_curvature"] = (
                -2 * (terrain_attributes["surface_fit"][3, :, :] *
                      terrain_attributes["surface_fit"][7, :, :]**2 +
                      terrain_attributes["surface_fit"][4, :, :] *
                      terrain_attributes["surface_fit"][6, :, :]**2 +
                      terrain_attributes["surface_fit"][5, :, :] *
                      terrain_attributes["surface_fit"][6, :, :] *
                      terrain_attributes["surface_fit"][7, :, :]) /
                (terrain_attributes["surface_fit"][6, :, :]**2 +
                 terrain_attributes["surface_fit"][7, :, :]**2) * 100)

        # Completely flat surfaces trigger the warning above. These need to be set to zero
        terrain_attributes["profile_curvature"][terrain_attributes["slope"] ==
                                                0.0] = 0.0

    # Convert the unit if wanted.
    if degrees:
        for attr in ["slope", "aspect"]:
            if attr not in terrain_attributes:
                continue
            terrain_attributes[attr] = np.rad2deg(terrain_attributes[attr])

    output_attributes = [
        terrain_attributes[key].reshape(dem.shape) for key in attribute
    ]

    if isinstance(dem, gu.Raster):
        output_attributes = [
            gu.Raster.from_array(attr,
                                 transform=dem.transform,
                                 crs=dem.crs,
                                 nodata=None) for attr in output_attributes
        ]

    return output_attributes if len(
        output_attributes) > 1 else output_attributes[0]
Example #7
0
def get_quadric_coefficients(dem: np.ndarray,
                             resolution: float,
                             fill_method: str = "none",
                             edge_method: str = "none") -> np.ndarray:
    """
    Return the 9 coefficients of a quadric surface fit to every pixel in the raster.

    Mostly inspired by: https://desktop.arcgis.com/en/arcmap/10.3/tools/spatial-analyst-toolbox/how-curvature-works.htm

    The function that is solved is:
    Z = Ax²y² + Bx²y + Cxy² + Dx² + Ey² + Fxy + Gx + Hy + I

    Where Z is the elevation, x is the distance from left-right and y is the distance from top-bottom.
    Each pixel's fit can be accessed by coefficients[:, row, col], returning an array of shape 9.
    The 9 coefficients correspond to those in the equation above.

    Fill methods
        If the 3x3 matrix to fit the quadric function on has NaNs, these need to be handled:
        * 'median': NaNs are filled with the median value of the matrix.
        * 'mean': NaNs are filled with the mean value of the matrix.
        * 'none': If NaNs are encountered, skip the entire cell (default for GDAL and SAGA).

    Edge methods
        Each iteration requires a 3x3 matrix, so special edge cases have to be made.
        * 'nearest': Pixels outside the range are filled using the closest pixel value.
        * 'wrap': The array is wrapped so pixels near the right edge will be sampled from the left, etc.
        * 'none': Edges will not be analyzed, leaving a 1 pixel edge of NaNs.

    Quirks:
        * Edges are naively treated by filling the closest value, so that a 3x3 matrix is always calculated.\
                It may therefore be slightly off in the edges.
        * NaNs and infs are filled with the median of the finites in the matrix, possibly affecting the fit.
        * The X and Y resolution needs to be the same. It does not work if they differ.

    :param dem: The 2D DEM to be analyzed (3D DEMs of shape (1, row, col) are not supported)
    :param resolution: The X/Y resolution of the DEM.
    :param fill_method: Fill method to use for NaNs in the 3x3 matrix.
    :param edge_method: The method to use near the array edge.

    :raises ValueError: If the inputs are poorly formatted.
    :raises RuntimeError: If unexpected backend errors occurred.

    :examples:
        >>> dem = np.array([[1, 1, 1],
        ...                 [1, 2, 1],
        ...                 [1, 1, 1]], dtype="float32")
        >>> coeffs = get_quadric_coefficients(dem, resolution=1.0)
        >>> coeffs.shape
        (9, 3, 3)
        >>> coeffs[:, 1, 1]
        array([ 1.,  0.,  0., -1., -1.,  0.,  0.,  0.,  2.])

    :returns: An array of coefficients for each pixel of shape (9, row, col).
    """
    # This function only formats and validates the inputs. For the true functionality, see _get_quadric_coefficients()
    dem_arr = spatial_tools.get_array_and_mask(dem)[0]

    if len(dem_arr.shape) != 2:
        raise ValueError(
            f"Invalid input array shape: {dem.shape}, parsed into {dem_arr.shape}. "
            "Expected 2D array or 3D array of shape (1, row, col)")

    if any(dim < 3 for dim in dem_arr.shape):
        raise ValueError(
            f"DEM (shape: {dem.shape}) is too small. Smallest supported shape is (3, 3)"
        )

    # Resolution is in other tools accepted as a tuple. Here, it must be just one number, so it's best to sanity check.
    if isinstance(resolution, Sized):
        raise ValueError("Resolution must be the same for X and Y directions")

    allowed_fill_methods = ["median", "mean", "none"]
    allowed_edge_methods = ["nearest", "wrap", "none"]
    for value, name, allowed in zip(
        [fill_method, edge_method], ["fill", "edge"],
        (allowed_fill_methods, allowed_edge_methods)):
        if value.lower() not in allowed:
            raise ValueError(
                f"Invalid {name} method: '{value}'. Choices: {allowed}")

    # Try to run the numba JIT code. It should never fail at this point, so if it does, it should be reported!
    try:
        coeffs = _get_quadric_coefficients(dem_arr,
                                           resolution,
                                           fill_method=fill_method.lower(),
                                           edge_method=edge_method.lower())
    except Exception as exception:
        raise RuntimeError(
            "Unhandled numba exception. Please raise an issue of what happened."
        ) from exception

    return coeffs