Example #1
0
def mode_filter(in_raster, width=5, iterations=1, circular=True):
    kernel = create_kernel(
        width,
        circular=circular,
        holed=False,
        normalise=False,
        weighted_edges=False,
        weighted_distance=False,
    )

    if iterations == 1:
        # return mode_array(in_raster, kernel)
        return majority(in_raster, kernel)
    else:
        # result = mode_array(in_raster, kernel)
        result = majority(in_raster, kernel)
        for x in range(iterations - 1):
            result = majority(result, kernel)
            # result = mode_array(result, kernel)
        return result
 def test_majority(self):
     img = data.camera()
     elem = np.ones((3, 3), dtype=np.uint8)
     expected = rank.windowed_histogram(img,
                                        elem).argmax(-1).astype(np.uint8)
     assert_equal(expected, rank.majority(img, elem))
 def majority(self, image, radius):
     return majority(image.astype('uint8'), square(2 * radius + 1))
Example #4
0
        # raster 2D shape (the raster is not 3D anymore as it looses the
        # depth and has only one band).
        arr = src.read()
        prediction = clf.predict(reshape_as_image(arr).reshape(-1, src.count))
        prediction = np.reshape(prediction, src.shape).astype(np.uint8)

        # Create a mask of NoData values in the original raster and
        # change all predictions in the mask to 0 (i.e. unburned).
        mask = np.all((arr == L8_NODATA_VALUE), axis=0)
        prediction[mask] = 0

        # Apply a majority filter using a rolling window to remove
        # the salt-and-pepper noise on the prediction raster. Check
        # https://en.wikipedia.org/wiki/Salt-and-pepper_noise for
        # a description of this phenomenon.
        prediction = majority(prediction, square(FILTER_NEIGHBOURS))

        # Vectorize contiguous areas of burned pixels (i.e. pixels
        # whose value is 1).
        features = shapes(prediction, mask=(prediction == 1), transform=src.transform)

        for feature in features:

            # Create a Shapely geometry of the polygon and compute
            # its area in a planar spatial reference to get a result
            # in meters rather than degrees.
            geom = shape(feature[0])
            transformed_geom = transform(
                functools.partial(
                    pyproj.transform,
                    pyproj.Proj("EPSG:4326"),
Example #5
0
 def test_majority(self):
     img = data.camera()
     elem = np.ones((3, 3), dtype=np.uint8)
     expected = rank.windowed_histogram(
         img, elem).argmax(-1).astype(np.uint8)
     assert_equal(expected, rank.majority(img, elem))
Example #6
0
def major(img):
    maj_img = majority(img, disk(5))
    return maj_img
Example #7
0
    def postfilter(self):
        """Tidy the output bec zones by applying several filters:
        - majority
        - noise
        - area closing (fill in 0 areas created by noise filter)
        - majority (again) to tidy edge effects created by area_closing()
        - noise (again) to remove any noise created by 2nd majority
        """
        # shortcuts
        config = self.config
        data = self.data

        # before performing the majority filter, group high elevation
        # labels across rule polygons (alpine, parkland, woodland)
        data["becinit_grouped"] = data["becinit"].copy()

        # define new becvalues for aggregated high elevation labels
        # generate these dynamically based on current max value because using
        # arbitrary large values decreases performace of scikit-img rank
        # filters (majority)
        if len(self.high_elevation_types) >= 1:

            max_value = data["becmaster"]["becvalue"].max()
            high_elevation_aggregates = {
                "alpine": max_value + 1,
                "parkland": max_value + 2,
                "woodland": max_value + 3,
            }
            for key in high_elevation_aggregates:
                if key in self.high_elevation_types:
                    for becvalue in self.high_elevation_dissolves[key]:
                        data["becinit_grouped"] = np.where(
                            data["becinit_grouped"] == becvalue,
                            high_elevation_aggregates[key],
                            data["becinit_grouped"],
                        )

        # ----------------------------------------------------------------
        # majority filter
        # ----------------------------------------------------------------
        LOG.info("Running majority filter")
        data["majority"] = np.where(
            data["slope"] <
            config["majority_filter_steep_slope_threshold_percent"],
            majority(
                data["becinit_grouped"],
                morphology.rectangle(nrows=self.filtersize_low,
                                     ncols=self.filtersize_low),
            ),
            majority(
                data["becinit_grouped"],
                morphology.rectangle(nrows=self.filtersize_steep,
                                     ncols=self.filtersize_steep),
            ),
        )

        # to ungroup the high elevation values while retaining the result of
        # the majority filter, loop through the rule polygons and re-assign
        # the becvalues
        data["postmajority"] = data["majority"].copy()

        for zone in self.high_elevation_types:
            for lookup in [
                    r for r in self.high_elevation_merges if r["type"] == zone
            ]:
                data["postmajority"][
                    (data["ruleimg"] == lookup["rule"])
                    & (data["majority"] == high_elevation_aggregates[zone]
                       )] = lookup["becvalue"]

        # ----------------------------------------------------------------
        # Basic noise filter
        # Remove holes < the noise_removal_threshold within each zone
        # ----------------------------------------------------------------
        LOG.info("Running noise removal filter")

        # convert noise_removal_threshold value from ha to n cells
        noise_threshold = int((config["noise_removal_threshold_ha"] * 10000) /
                              (config["cell_size_metres"]**2))

        # initialize the output raster for noise filter
        data["noise"] = np.zeros(shape=self.shape, dtype="uint16")

        # process each non zero becvalues
        for becvalue in [v for v in self.beclabel_lookup if v != 0]:

            # extract given becvalue
            X = np.where(data["postmajority"] == becvalue, 1, 0)

            # fill holes, remove small objects
            Y = morphology.remove_small_holes(
                X, noise_threshold, connectivity=config["cell_connectivity"])
            Z = morphology.remove_small_objects(
                Y, noise_threshold, connectivity=config["cell_connectivity"])

            # insert values into output
            data["noise"] = np.where(Z != 0, becvalue, data["noise"])

        # ----------------------------------------------------------------
        # Fill holes introduced by noise filter
        #
        # The noise filter removes small holes / objects surrounded by
        # contiguous zones.
        # When a small area is bordered by more than 1 becvalue, it does not
        # get filled and leaves a hole.
        # Fill these holes using the distance transform (as done with
        # expansion of rule polys). Restrict the expansion to within the rule
        # polys only, otherwise the results bleed to the edges of the extent
        # (note that this removes need for area closing, edges are filled too)
        # ----------------------------------------------------------------
        a = np.where(data["noise"] == 0, 1, 0)
        b, c = ndimage.distance_transform_edt(a, return_indices=True)
        data["noise_fill"] = np.where(
            (data["noise"] == 0) & (data["ruleimg"] != 0),
            data["noise"][c[0], c[1]],
            data["noise"],
        )

        # ----------------------------------------------------------------
        # High elevation noise removal
        # Process alpine / parkland / woodland / high elevation labels
        # and merge the with the label below if not of sufficent size
        # ----------------------------------------------------------------
        # initialize output image
        data["highelev"] = data["noise_fill"].copy()

        # convert high_elevation_removal_threshold value from ha to n cells
        high_elevation_removal_threshold = int(
            (self.config["high_elevation_removal_threshold_ha"] * 10000) /
            (self.config["cell_size_metres"]**2))

        # remove high elevation noise only if high elevation types are present
        if len(self.high_elevation_types) >= 1:

            # Because we are finding noise by aggregating and finding holes,
            # iterate through all but the lowest high elevation type.
            dissolve_types = list(self.high_elevation_dissolves.keys())
            for i, highelev_type in enumerate(dissolve_types[:-1]):
                LOG.info(
                    "Running high_elevation_removal_threshold on {}".format(
                        highelev_type))

                # Extract area of interest
                # eg, Find and aggregate all parkland values - holes within the
                # created patches can be assumed to be alpine, so we can fill
                # holes < area threshold

                # find all becvalues of zone below zone of interest
                # (all parkland becvalues if we are eliminating alpine)
                to_agg = self.high_elevation_dissolves[dissolve_types[i + 1]]

                # aggregate the areas, creating a boolean array
                X = np.isin(data["highelev"], to_agg)

                # remove small holes (below our threshold) within the boolean array
                Y = morphology.remove_small_holes(
                    X,
                    high_elevation_removal_threshold,
                    connectivity=config["cell_connectivity"],
                )

                # find the difference
                # (just fill the holes, don't write the entire zones)
                Z = np.where((X == 0) & (Y == 1), 1, 0)

                # note that for QA, we could add  X/Y/Z arrays to the data dict
                # something like this, - they'll get written to temp
                # data[highelev_type+"_X"] = X
                # data[highelev_type+"_Y"] = Y

                # remove the small areas in the output image by looping through
                # the merges for the given type, this iterates through the
                # rule polygons.
                for merge in [
                        m for m in self.high_elevation_merges
                        if m["type"] == highelev_type
                ]:
                    data["highelev"] = np.where(
                        (Z == 1) & (data["ruleimg"] == merge["rule"]),
                        merge["becvalue_target"],
                        data["highelev"],
                    )

        # ----------------------------------------------------------------
        # Convert to poly
        # ----------------------------------------------------------------
        fc = FeatureCollection([
            Feature(geometry=s, properties={"becvalue": v})
            for i, (s, v) in enumerate(
                shapes(
                    data["highelev"],
                    transform=self.transform,
                    connectivity=(config["cell_connectivity"] * 4),
                ))
        ])
        data["becvalue_polys"] = gpd.GeoDataFrame.from_features(fc)

        # add beclabel column to output polygons
        data["becvalue_polys"]["BGC_LABEL"] = data["becvalue_polys"][
            "becvalue"].map(self.beclabel_lookup)

        # set crs
        data["becvalue_polys"].crs = "EPSG:3005"

        # clip to aggregated rule polygons
        # (buffer the dissolved rules out and in to ensure no small holes
        # are created by dissolve due to precision errors)
        data["rulepolys"]["rules"] = 1
        X = data["rulepolys"].dissolve(by="rules").buffer(0.01).buffer(-0.01)
        Y = gpd.GeoDataFrame(X).rename(columns={
            0: "geometry"
        }).set_geometry("geometry")
        data["becvalue_polys"] = gpd.overlay(data["becvalue_polys"],
                                             Y,
                                             how="intersection")

        # add area_ha column
        data["becvalue_polys"]["AREA_HA"] = (
            data["becvalue_polys"]["geometry"].area / 10000)

        # round to 1 decimal place
        data["becvalue_polys"].AREA_HA = data["becvalue_polys"].AREA_HA.round(
            1)

        # remove rulepoly fields
        data["becvalue_polys"] = data["becvalue_polys"][[
            "BGC_LABEL", "AREA_HA", "becvalue", "geometry"
        ]]

        self.data = data