Пример #1
0
    def setUp(self):
        from pylandstats.multilandscape import MultiLandscape

        self.landscapes = [
            pls.Landscape(np.load('tests/input_data/ls100_06.npy',
                                  allow_pickle=True),
                          res=(100, 100)),
            pls.Landscape(np.load('tests/input_data/ls250_06.npy',
                                  allow_pickle=True),
                          res=(250, 250))
        ]
        self.landscape_fps = [
            'tests/input_data/ls100_06.tif', 'tests/input_data/ls250_06.tif'
        ]
        self.attribute_name = 'resolution'
        self.attribute_values = [100, 250]
        self.inexistent_class_val = 999

        # use this class just for testing purposes
        class InstantiableMultiLandscape(MultiLandscape):
            def __init__(self, *args, **kwargs):
                super(InstantiableMultiLandscape,
                      self).__init__(*args, **kwargs)

        self.InstantiableMultiLandscape = InstantiableMultiLandscape
Пример #2
0
    def test_metrics_warnings(self):
        # test that warnings are raised

        # class-level metrics
        # euclidean nearest neighbor will return nan (and raise an informative
        # warning) if there is not at least two patches of each class. Let us
        # test this by creating a landscape with a background of class 1 and a
        # single patch of class 2
        arr = np.ones((4, 4))
        arr[1:-1, 1:-1] = 2
        ls = pls.Landscape(arr, res=(1, 1))

        # let us test that both the computation at the class-level (1 and 2)
        # and at the landscape level (`class_val` of `None`) raise at least one
        # warning (the exact number of warnings raised can be different in
        # Python 2 and 3)
        for class_val in [1, 2, None]:
            with warnings.catch_warnings(record=True) as w:
                ls.euclidean_nearest_neighbor(class_val)
                self.assertGreater(len(w), 0)

        # landscape-level metrics
        # some landscape-level metrics require at least two classes.
        ls = pls.Landscape(np.ones((4, 4)), res=(1, 1))
        for method in ['contagion', 'shannon_diversity_index']:
            with warnings.catch_warnings(record=True) as w:
                getattr(ls, method)()
                self.assertGreater(len(w), 0)
Пример #3
0
 def test_io(self):
     ls = pls.Landscape('tests/input_data/ls250_06.tif')
     # resolutions are not exactly 250, they are between [249, 251], so we
     # need to use a large delta
     self.assertAlmostEqual(ls.cell_width, 250, delta=1)
     self.assertAlmostEqual(ls.cell_height, 250, delta=1)
     self.assertAlmostEqual(ls.cell_area, 250 * 250, delta=250)
Пример #4
0
def parse_raster():
    """Steps taken to create data:
        1. Raster representation of land uses was created using gdal_rasterize in 
            QGIS using a cell size of 30 map units (feet). 
        2. Tract table was split into individual shapefiles based on geoid using
            Split vector layer tool in QGIS
        3. Split census tract shapefiles were used to split Raster created in 
            step 1 into individual rasters using gdal_wrap tool within shell 
            script
    TODO: 
        - automate rasterization using gdal_rasterize with postgresql layer
    
    """
    import pylandstats as pls

    raster_dir = "/home/natron/temp/split_raster"

    land_metrics = [
        "number_of_patches", "patch_density", "largest_patch_index",
        "total_edge", "edge_density", "landscape_shape_index", "contagion",
        "shannon_diversity_index"
    ]
    all_geoids = []
    for img in os.listdir(raster_dir):
        geoid = img[8:-4]
        land = pls.Landscape(os.path.join(raster_dir, img))
        land_stats = land.compute_landscape_metrics_df()
        ls_dict = land_stats[land_metrics].to_dict("records")[0]
        ls_dict["geoid10"] = geoid
        all_geoids.append(ls_dict)
    df = pd.DataFrame(all_geoids)
    add_features(df, "geoid10")
Пример #5
0
 def setUp(self):
     self.masks_arr = np.load('tests/input_data/masks_arr.npy')
     self.landscape = pls.Landscape(
         np.load('tests/input_data/ls250_06.npy'), res=(250, 250))
     self.landscape_fp = 'tests/input_data/ls250_06.tif'
     self.landscape_transform = affine.Affine(
         249.96431809611167, 0.0, 4037084.1862939927, 0.0,
         -250.7188576750866, 2631436.6068059015)
     self.landscape_crs = {'init': 'epsg:3035'}
     # for buffer analysis
     self.geom = geometry.Point(6.6327025, 46.5218269)
     self.geom_crs = {'init': 'epsg:4326'}
     self.buffer_dists = [10000, 15000, 20000]
Пример #6
0
    def test_io(self):
        ls = pls.Landscape('tests/input_data/ls250_06.tif')
        # resolutions are not exactly 250, they are between [249, 251], so we
        # need to use a large delta
        self.assertAlmostEqual(ls.cell_width, 250, delta=1)
        self.assertAlmostEqual(ls.cell_height, 250, delta=1)
        self.assertAlmostEqual(ls.cell_area, 250 * 250, delta=250)

        # test that the transform is None if we instantiate a landscape from
        # an ndarray (without providing the `transform` argument, but that it
        # is not none we get the landscape transform from a raster path
        self.assertIsNone(self.ls.transform)
        self.assertIsNotNone(ls.transform)
Пример #7
0
def createMetric(args):
    file = args[0]
    metrics = args[1]
    csvfile = args[2]
    start_time = args[3]
    ls = pls.Landscape(file)
    landscape_metrics_df = ls.compute_landscape_metrics_df(metrics=metrics)
    landscape_metrics_df['image'] = file
    landscape_metrics_df['start_time'] = start_time
    landscape_metrics_df['write_time'] = time.time()

    landscape_metrics_df.to_csv(csvfile, mode='a', header=False)
    print('----------------------------------------')
    print('createMetric', file, metrics, csvfile)
    print('----------------------------------------')
Пример #8
0
    def setUp(self):
        self.masks_arr = np.load('tests/input_data/masks_arr.npy',
                                 allow_pickle=True)
        self.landscape = pls.Landscape(np.load('tests/input_data/ls250_06.npy',
                                               allow_pickle=True),
                                       res=(250, 250))
        self.landscape_fp = 'tests/input_data/ls250_06.tif'
        with rio.open(self.landscape_fp) as src:
            self.landscape_transform = src.transform
            self.landscape_crs = src.crs
        # for buffer analysis
        self.geom = geometry.Point(6.6327025, 46.5218269)
        self.buffer_dists = [10000, 15000, 20000]

        self.tmp_dir = path.join('tests/tmp')
        os.mkdir(self.tmp_dir)
Пример #9
0
    def test_plot_landscape(self):
        # first test for a landscape without affine transform (instantiated
        # from an ndarray and without providing a non-None `transform`
        # argument)
        ax = self.ls.plot_landscape()
        # returned axis must be instances of matplotlib axes
        self.assertIsInstance(ax, plt.Axes)

        # now do the same test for a landscape with affine transform (e.g.,
        # instantiated from a raster file)
        ls = pls.Landscape('tests/input_data/ls250_06.tif')
        ax = ls.plot_landscape()
        self.assertIsInstance(ax, plt.Axes)
        # and further test that the plot bounds correspond to the transform's
        # offsets
        self.assertAlmostEqual(ax.get_xlim()[0], ls.transform.xoff)
        self.assertAlmostEqual(ax.get_ylim()[1], ls.transform.yoff)
 def compute_metrics(row, metrics):
     # landscape_arr = sg.generate_landscape_arr(shade_threshold,
     #                                           row['change_prop'],
     #                                           interaction=row['interaction'])
     lulc_arr = scenario_lulc_da.sel({
         scenario_dim: row[scenario_dim]
         for scenario_dim in scenario_dims
     }).values
     landscape_arr = np.full_like(lulc_arr, nodata)
     landscape_arr[lulc_arr != nodata] = OTHER_CLASS_VAL
     landscape_arr[np.isin(
         lulc_arr,
         biophysical_df[biophysical_df['shade'] >= shade_threshold]
         ['lucode'])] = HIGH_TREE_CLASS_VAL
     ls = pls.Landscape(landscape_arr, (res, res), nodata)
     # return [
     #     getattr(ls, metric)(high_tree_class_val) for metric in metrics]
     return pd.Series({
         metric: getattr(ls, metric)(HIGH_TREE_CLASS_VAL)
         for metric in metrics
     })
Пример #11
0
 def setUp(self):
     ls_arr = np.load('tests/input_data/ls250_06.npy')
     self.ls = pls.Landscape(ls_arr, res=(250, 250))
Пример #12
0
 def test_transonic(self):
     ls_arr = np.load('tests/input_data/ls250_06.npy', allow_pickle=True)
     ls = pls.Landscape(ls_arr, res=(250, 250))
     adjacency_df = ls._adjacency_df
     self.assertIsInstance(adjacency_df, pd.DataFrame)
def _analyse_fragmentation(
    landcover: Union[os.PathLike, xr.DataArray],
    rois: Optional[gpd.GeoDataFrame] = None,
    target_crs: Optional[Union[str, pyproj.CRS]] = None,
    target_x_res: float = 300,
    target_y_res: float = 300,
    no_data: int = 0,
    rois_index_col: str = "name",
    **kwargs
) -> pd.DataFrame:
    """
    Compute pylandstats class fragmentation metrics for ROIs on a landcover map.

    For a list of all computable metrics, see:
    https://pylandstats.readthedocs.io/en/latest/landscape.html

    Args:
        landcover (Union[os.PathLike, xr.DataArray]): The landcover data to use.
        rois (Optional[gpd.GeoDataFrame], optional): A geopandas dataframe which
            contains the list of the geometries for which a class fragmentation analysis
            should be performed. Defaults to None.
        target_crs (Optional[Union[str, pyproj.CRS]], optional): The coordinate
            reference system to use for the class metric computation. For interpretable
            results a CRS with units of meter (e.g. UTM) should be used.
            Defaults to None.
        target_x_res (float, optional): The target pixel resolution along the
            x-direction in the target coordinate reference system. If the CRS has units
            of meter, this corresponds to meters per pixel. Up/downsampling is
            performed via nearest-neighbor sampling with rasterio. Defaults to 300.
        target_y_res (float, optional): The target pixel resolution along the
            y-direction in the target coordinate reference system. If the CRS has units
            of meter, this corresponds to meters per pixel. Up/downsampling is
            performed via nearest-neighbor sampling with rasterio. Defaults to 300.
        no_data (int, optional): The no-data value for the landcover data.
            Defaults to 0.
        rois_index_col (str, optional): Name of the attribute that will distinguish
            region of interest in `rois`. Defaults to "name".
        **kwargs: Keyword arguments of the `compute_class_metrics_df` of pylandstats

    Returns:
        pd.DataFrame: The pandas dataframe containing the computed metrics for each
            landcover class in `landcover` and region of interest given in `rois`
    """

    # 1 Load the data
    if isinstance(landcover, os.PathLike):
        data_original = rxr.open_rasterio(pathlib.Path(landcover))
    elif isinstance(landcover, xr.DataArray):
        data_original = copy(landcover)

    # 2 Reproject to relevant CRS and resolution
    data_reprojected = data_original.rio.reproject(
        target_crs, resolution=(target_x_res, target_y_res)
    )

    # 3 Calculate final resolution
    x_data, y_data = (data_reprojected.x.data, data_reprojected.y.data)
    x_res = abs(x_data[-1] - x_data[0]) / len(x_data)
    y_res = abs(y_data[-1] - y_data[0]) / len(y_data)
    # Free up memory
    del data_original

    # 4 Perform pylandstats analysis on clipped, reprojected region
    # Convert to pylandstats landscape
    data_landscape = pls.Landscape(
        data_reprojected.data.squeeze(),
        res=(x_res, y_res),
        nodata=no_data,
        transform=data_reprojected.rio.transform(),
    )

    # Perform zonal analysis of the rois
    if rois is None:
        zonal_analyser = data_landscape
    else:
        zonal_analyser = pls.ZonalAnalysis(
            data_landscape,
            landscape_crs=target_crs,
            masks=rois,
            masks_index_col=rois_index_col,
        )
    return zonal_analyser.compute_class_metrics_df(**kwargs)