示例#1
0
    def test_case_j(self):
        """
        Origin = (600, -400)
                  +-----------+
                  -           -
         O+----+  -           -
          -    -  -           -
          -    -  -           -
          +----+  -           -
                  -           -
                  +-----------+
        """
        # indices based on full array
        ul = (600, -400)
        ur = (ul[0], ul[1] + self.subs_shape[1])
        lr = (ul[0] + self.subs_shape[0], ul[1] + self.subs_shape[1])
        ll = (ul[0] + self.subs_shape[0], ul[1])

        # real world coords (note reversing (y, x) to (x, y)
        ul_xy_map = ul[::-1] * self.geobox.transform
        ur_xy_map = ur[::-1] * self.geobox.transform
        lr_xy_map = lr[::-1] * self.geobox.transform
        ll_xy_map = ll[::-1] * self.geobox.transform

        # read subset
        with self.assertRaises(IndexError):
            read_subset(self.ds, ul_xy_map, ur_xy_map, lr_xy_map, ll_xy_map)
示例#2
0
    def test_case_i(self):
        """
        Origin = (1100, 1400); `O`

        +-----------+
        -           -
        -           -
        -           -
        -           -
        -       O+-----+
        -        -     -
        +-----------+---
                 -     -
                 -     -
                 +-----+
        """
        # indices based on full array
        ul = (1100, 1400)
        ur = (ul[0], ul[1] + self.subs_shape[1])
        lr = (ul[0] + self.subs_shape[0], ul[1] + self.subs_shape[1])
        ll = (ul[0] + self.subs_shape[0], ul[1])

        # real world coords (note reversing (y, x) to (x, y)
        ul_xy_map = ul[::-1] * self.geobox.transform
        ur_xy_map = ur[::-1] * self.geobox.transform
        lr_xy_map = lr[::-1] * self.geobox.transform
        ll_xy_map = ll[::-1] * self.geobox.transform

        # read subset
        data, gb = read_subset(self.ds, ul_xy_map, ur_xy_map, lr_xy_map,
                               ll_xy_map)

        count = 100 * 100
        self.assertTrue(data.sum() == count)
示例#3
0
    def test_correct_subset(self):
        """
        Test that the subset is what we expect.
        Read a 10 by 10 starting at the UL corner.
        """
        img, geobox = ut.create_test_image()

        cols, rows = geobox.get_shape_xy()

        # Temporarily write the image to disk
        temp_dir = tempfile.mkdtemp()
        fname = os.path.join(temp_dir, 'test_image')
        write_img(img, fname, geobox=geobox)

        # Create box to read 10 pixels below the image bounds
        UL = geobox.convert_coordinates((0, 0))
        UR = geobox.convert_coordinates((9, 0))
        LR = geobox.convert_coordinates((9, 9))
        LL = geobox.convert_coordinates((0, 9))

        kwds = {
            'fname': fname,
            'ul_xy': UL,
            'ur_xy': UR,
            'lr_xy': LR,
            'll_xy': LL
        }

        subs, geobox = read_subset(**kwds)

        base = img[0:10, 0:10]

        result = numpy.sum(base - subs)

        self.assertTrue(result == 0)

        # Cleanup
        shutil.rmtree(temp_dir)
示例#4
0
def get_dsm(
    acquisition,
    pathname,
    buffer_distance=8000,
    out_group=None,
    compression=H5CompressionFilter.LZF,
    filter_opts=None,
):
    """
    Given an acquisition and a national Digitial Surface Model,
    extract a subset from the DSM based on the acquisition extents
    plus an x & y margins. The subset is then smoothed with a 3x3
    gaussian filter.
    A square margins is applied to the extents.

    :param acquisition:
        An instance of an acquisition object.

    :param pathname:
        A string pathname of the DSM with a ':' to seperate the
        filename from the import HDF5 dataset name.

    :param buffer_distance:
        A number representing the desired distance (in the same
        units as the acquisition) in which to calculate the extra
        number of pixels required to buffer an image.
        Default is 8000.

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.

        The dataset name will be as follows:

        * DatasetName.DSM_SMOOTHED

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF

    :filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.
    """
    # Use the 1st acquisition to setup the geobox
    geobox = acquisition.gridded_geo_box()
    shape = geobox.get_shape_yx()

    # buffered image extents/margins
    margins = pixel_buffer(acquisition, buffer_distance)

    # Get the dimensions and geobox of the new image
    dem_cols = shape[1] + margins.left + margins.right
    dem_rows = shape[0] + margins.top + margins.bottom
    dem_shape = (dem_rows, dem_cols)
    dem_origin = geobox.convert_coordinates(
        (0 - margins.left, 0 - margins.top))
    dem_geobox = GriddedGeoBox(
        dem_shape,
        origin=dem_origin,
        pixelsize=geobox.pixelsize,
        crs=geobox.crs.ExportToWkt(),
    )

    # split the DSM filename, dataset name, and load
    fname, dname = pathname.split(":")
    with h5py.File(fname, "r") as dsm_fid:
        dsm_ds = dsm_fid[dname]
        dsm_geobox = GriddedGeoBox.from_dataset(dsm_ds)

        # calculate full border extents into CRS of DSM
        extents = dem_geobox.project_extents(dsm_geobox.crs)
        ul_xy = (extents[0], extents[3])
        ur_xy = (extents[2], extents[3])
        lr_xy = (extents[2], extents[1])
        ll_xy = (extents[0], extents[1])

        # load the subset and corresponding geobox
        subs, subs_geobox = read_subset(dsm_ds,
                                        ul_xy,
                                        ur_xy,
                                        lr_xy,
                                        ll_xy,
                                        edge_buffer=1)

        # ancillary metadata tracking
        metadata = current_h5_metadata(dsm_fid, dataset_path=dname)

    # Retrive the DSM data
    dsm_data = reproject_array_to_array(subs,
                                        subs_geobox,
                                        dem_geobox,
                                        resampling=Resampling.bilinear)

    # free memory
    subs = None

    # Output the reprojected result
    # Initialise the output files
    if out_group is None:
        fid = h5py.File("dsm-subset.h5",
                        "w",
                        driver="core",
                        backing_store=False)
    else:
        fid = out_group

    if filter_opts is None:
        filter_opts = {}
    else:
        filter_opts = filter_opts.copy()

    if acquisition.tile_size[0] == 1:
        filter_opts["chunks"] = (1, dem_cols)
    else:
        # TODO: rework the tiling regime for larger dsm
        # for non single row based tiles, we won't have ideal
        # matching reads for tiled processing between the acquisition
        # and the DEM
        filter_opts["chunks"] = acquisition.tile_size
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()

    group = fid.create_group(GroupName.ELEVATION_GROUP.value)

    param_grp = group.create_group("PARAMETERS")
    param_grp.attrs["left_buffer"] = margins.left
    param_grp.attrs["right_buffer"] = margins.right
    param_grp.attrs["top_buffer"] = margins.top
    param_grp.attrs["bottom_buffer"] = margins.bottom

    # dataset attributes
    attrs = {
        "crs_wkt": geobox.crs.ExportToWkt(),
        "geotransform": dem_geobox.transform.to_gdal(),
    }

    # Smooth the DSM
    dsm_data = filter_dsm(dsm_data)
    dname = DatasetName.DSM_SMOOTHED.value
    out_sm_dset = group.create_dataset(dname, data=dsm_data, **kwargs)
    desc = "A subset of a Digital Surface Model smoothed with a gaussian " "kernel."
    attrs["description"] = desc
    attrs["id"] = numpy.array([metadata["id"]], VLEN_STRING)
    attach_image_attributes(out_sm_dset, attrs)

    if out_group is None:
        return fid
示例#5
0
def load_brdf_tile(src_poly, src_crs, fid, dataset_name, fid_mask):
    """
    Summarize BRDF data from a single tile.
    """
    ds = fid[dataset_name]

    def segmentize_src_poly(length_scale):
        src_poly_geom = ogr.CreateGeometryFromWkt(src_poly.wkt)
        src_poly_geom.Segmentize(length_scale)
        return wkt.loads(src_poly_geom.ExportToWkt())

    ds_height, ds_width = ds.shape

    dst_geotransform = rasterio.transform.Affine.from_gdal(
        *ds.attrs['geotransform'])
    dst_crs = CRS.from_wkt(ds.attrs['crs_wkt'])

    # assumes the length scales are the same (m)
    dst_poly = ops.transform(
        coord_transformer(src_crs, dst_crs),
        segmentize_src_poly(np.sqrt(np.abs(dst_geotransform.determinant))))

    bound_poly = ops.transform(lambda x, y: dst_geotransform * (x, y),
                               box(0., 0., ds_width, ds_height, ccw=False))
    if not bound_poly.intersects(dst_poly):
        return BrdfTileSummary.empty()

    ocean_poly = ops.transform(lambda x, y: fid_mask.transform * (x, y),
                               box(0., 0., fid_mask.width, fid_mask.height))
    if not ocean_poly.intersects(dst_poly):
        return BrdfTileSummary.empty()

    # read ocean mask file for correspoing tile window
    # land=1, ocean=0
    bound_poly_coords = list(bound_poly.exterior.coords)[:4]
    ocean_mask, _ = read_subset(fid_mask, *bound_poly_coords)
    ocean_mask = ocean_mask.astype(bool)

    # inside=1, outside=0
    roi_mask = rasterize([(dst_poly, 1)],
                         fill=0,
                         out_shape=(ds_height, ds_width),
                         transform=dst_geotransform)
    roi_mask = roi_mask.astype(bool)

    # both ocean_mask and mask shape should be same
    if ocean_mask.shape != roi_mask.shape:
        raise ValueError('ocean mask and ROI mask do not have the same shape')
    if roi_mask.shape != ds.shape:
        raise ValueError(
            'BRDF dataset and ROI mask do not have the same shape')

    roi_mask = roi_mask & ocean_mask

    def layer_sum(param):
        layer = ds[param][:, :]
        common_mask = roi_mask & (layer != ds.attrs['_FillValue'])
        layer = layer.astype('float32')
        layer[~common_mask] = np.nan
        layer = ds.attrs['scale_factor'] * (layer - ds.attrs['add_offset'])
        return {'sum': np.nansum(layer), 'count': np.sum(common_mask)}

    return BrdfTileSummary(
        {param: layer_sum(param.value)
         for param in BrdfModelParameters}, [current_h5_metadata(fid)['id']])