コード例 #1
0
def create_dataset(
    group: h5py.Group,
    band_name: str,
    shape: Iterable[int],
    attrs: Dict,
    dtype=np.int16,
    chunks: Iterable[int] = (240, 240),
    compression: H5CompressionFilter = H5CompressionFilter.LZF,
    filter_opts: Optional[Dict] = None,
):
    """ creates dataset and attaches attributes for h5 object. """

    if filter_opts is None:
        filter_opts = {}
    else:
        filter_opts = filter_opts.copy()

    if "chunks" not in filter_opts:
        filter_opts["chunks"] = chunks

    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
    ds = group.create_dataset(band_name, shape=shape, dtype=dtype, **kwargs)

    attach_image_attributes(ds, attrs)

    return ds
コード例 #2
0
    def test_attach_image_attributes(self):
        """
        Test the attach_image_attributes function.
        """
        attrs = {'CLASS': 'IMAGE',
                 'IMAGE_VERSION': '1.2',
                 'DISPLAY_ORIGIN': 'UL'}

        fname = 'test_attach_image_attributes.h5'
        with h5py.File(fname, **self.memory_kwargs) as fid:
            dset = fid.create_dataset('data', data=self.image_data)
            hdf5.attach_image_attributes(dset, attrs)
            test = {k: v for k, v in dset.attrs.items()}
            self.assertDictEqual(test, attrs)
コード例 #3
0
def _convert_4d(rds, fid, dataset_name, compression, filter_opts):
    """
    Private routine for converting the multiples of 37 layer
    atmospheric data in the GRIB file to HDF5.
    For a months worth of data, the dimensions become:
        * (day, atmospheric level, y, x)
    """
    attrs = {
        "geotransform": rds.transform.to_gdal(),
        "crs_wkt": rds.crs.wkt,
        "history": "Converted to HDF5",
    }

    # band groups of 37, nrows to process (ytile)
    band_groups = range(1, rds.count + 1, 37)
    ytile = filter_opts["chunks"][2]
    dims = (len(band_groups), 37, rds.height, rds.width)
    tiles = generate_tiles(rds.width, rds.height, rds.width, ytile)

    # dataset creation options
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
    kwargs["shape"] = dims
    kwargs["dtype"] = rds.dtypes[0]

    dataset = fid.create_dataset(dataset_name, **kwargs)
    attach_image_attributes(dataset, attrs)

    # add dimension labels, but should we also include dimension scales?
    dataset.dims[0].label = "Day"
    dataset.dims[1].label = "Atmospheric Level"
    dataset.dims[2].label = "Y"
    dataset.dims[3].label = "X"

    # process by spatial tile containing 37 atmospheric layers for 1 day
    for i, bg in enumerate(band_groups):
        bands = list(range(bg, bg + 37))
        for tile in tiles:
            idx = (
                slice(i, bg),
                slice(None),
                slice(tile[0][0], tile[0][1]),
                slice(tile[1][0], tile[1][1]),
            )
            dataset[idx] = rds.read(bands, window=tile)

    # metadata
    metadata = metadata_dataframe(rds)
    write_dataframe(metadata, "METADATA", fid, compression)
コード例 #4
0
    def test_attach_image_attributes(self):
        """
        Test the attach_image_attributes function.
        """
        attrs = {
            "CLASS": "IMAGE",
            "IMAGE_VERSION": "1.2",
            "DISPLAY_ORIGIN": "UL"
        }

        fname = "test_attach_image_attributes.h5"
        with h5py.File(fname, "w", **self.memory_kwargs) as fid:
            dset = fid.create_dataset("data", data=self.image_data)
            hdf5.attach_image_attributes(dset, attrs)
            test = {k: v for k, v in dset.attrs.items()}
            self.assertDictEqual(test, attrs)
コード例 #5
0
def _convert_3d(rds, fid, dataset_name, compression, filter_opts):
    """
    Private routine for converting the 37 layer atmospheric data
    in the GRIB file to HDF5.
    """
    # basic metadata to attach to the dataset
    attrs = {
        'geotransform': rds.transform.to_gdal(),
        'crs_wkt': rds.crs.wkt,
        'history': 'Converted to HDF5'
    }

    # bands list, nrows to process (ytile)
    bands = list(range(1, rds.count + 1))
    ytile = filter_opts['chunks'][1]
    dims = (rds.count, rds.height, rds.width)

    # dataset creation options
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
    kwargs['shape'] = dims
    kwargs['dtype'] = rds.dtypes[0]

    dataset = fid.create_dataset(dataset_name, **kwargs)
    attach_image_attributes(dataset, attrs)

    # add dimension labels, but should we also include dimension scales?
    dataset.dims[0].label = 'Atmospheric Level'
    dataset.dims[1].label = 'Y'
    dataset.dims[2].label = 'X'

    # process by tile
    for tile in generate_tiles(rds.width, rds.height, rds.width, ytile):
        idx = (
            slice(None),
            slice(tile[0][0], tile[0][1]),
            slice(tile[1][0], tile[1][1])
        )
        dataset[idx] = rds.read(bands, window=tile)

    # metadata
    metadata = metadata_dataframe(rds)
    write_dataframe(metadata, 'METADATA', fid, compression)
コード例 #6
0
def mndwi(wagl_h5_file, granule, out_fname):
    """
    Computes the mndwi for a given granule in a wagl h5 file.

    Parameters
    ----------
    wagl_h5_file : str
        wagl-water-atcor generated h5 file

    granule : str
        Group path of the granule within the h5 file

    out_fname : str
        Output filename of the h5 file
    """

    # specify the reflectance products to use in generating mndwi
    products = ["LMBADJ"]

    # specify the resampling approach for the SWIR band
    resample_approach = Resampling.bilinear

    h5_fid = h5py.File(out_fname, "w")

    # find the granule index in the wagl_h5_file
    fid = h5py.File(wagl_h5_file, "r")
    granule_fid = fid[granule]
    paths = find(granule_fid, "IMAGE")

    # get platform name
    md = yaml.load(fid[granule + "/METADATA/CURRENT"][()],
                   Loader=yaml.FullLoader)
    platform_id = md["source_datasets"]["platform_id"]

    # store mndwi-based products into a group
    mndwi_grp = h5_fid.create_group("mndwi")

    for i, prod in enumerate(products):

        # search the h5 groups & get paths to the green and swir bands
        green_path, swir_path = get_mndwi_bands(granule, platform_id, prod,
                                                paths)

        green_ds = granule_fid[green_path]
        chunks = green_ds.chunks
        nRows, nCols = green_ds.shape
        geobox = GriddedGeoBox.from_dataset(green_ds)
        nodata = green_ds.attrs["no_data_value"]

        # create output h5 attributes
        desc = "MNDWI derived with {0} and {1} ({2} reflectances)".format(
            psplit(green_path)[-1],
            psplit(swir_path)[-1],
            prod,
        )

        attrs = {
            "crs_wkt": geobox.crs.ExportToWkt(),
            "geotransform": geobox.transform.to_gdal(),
            "no_data_value": nodata,
            "granule": granule,
            "description": desc,
            "platform": platform_id,
            "spatial_resolution": abs(geobox.transform.a),
        }

        if platform_id.startswith("SENTINEL_2"):
            # we need to upscale the swir band
            swir_ds = granule_fid[swir_path]
            swir_im = reproject_array_to_array(
                src_img=swir_ds[:],
                src_geobox=GriddedGeoBox.from_dataset(swir_ds),
                dst_geobox=geobox,
                src_nodata=swir_ds.attrs["no_data_value"],
                dst_nodata=nodata,
                resampling=resample_approach,
            )
            attrs["SWIR_resampling_method"] = resample_approach.name

        else:
            swir_im = granule_fid[swir_path][:]

        # ------------------------- #
        #  Compute mndwi via tiles  #
        #   and save tiles to h5    #
        # ------------------------- #
        tiles = generate_tiles(samples=nRows,
                               lines=nCols,
                               xtile=chunks[1],
                               ytile=chunks[0])

        # create mndwi dataset
        mndwi_ds = mndwi_grp.create_dataset(
            f"mndwi_image_{prod}",
            shape=(nRows, nCols),
            dtype="float32",
            compression="lzf",
            chunks=chunks,
            shuffle=True,
        )

        for tile in tiles:
            green_tile = green_ds[tile]
            swir_tile = swir_im[tile]
            mndwi_tile = compute_mndwi(green_tile, swir_tile)

            # perform masking
            mask = ((green_tile == nodata)
                    | (swir_tile == nodata)
                    | (~np.isfinite(mndwi_tile)))
            mndwi_tile[mask] = nodata

            mndwi_ds[tile] = mndwi_tile

        # add attrs to dataset
        attach_image_attributes(mndwi_ds, attrs)

    fid.close()
    h5_fid.close()
コード例 #7
0
def calculate_angles(
    acquisition,
    lon_lat_group,
    out_group=None,
    compression=H5CompressionFilter.LZF,
    filter_opts=None,
    tle_path=None,
    trackpoints=12,
):
    """
    Calculate the satellite view, satellite azimuth, solar zenith,
    solar azimuth, and relative aziumth angle grids, as well as the
    time grid. All grids are output as float32 ENVI files.
    A wrapper routine for the ``angle_all`` Fortran module built via
    ``F2Py``.

    :param acquisition:
        An instance of an `Acquisition` object.

    :param lon_lat_group::
        The root HDF5 `Group` that contains the longitude and
        latitude datasets.
        The dataset pathnames are given by:

        * DatasetName.LON
        * DatasetName.LAT

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.

        The dataset names will be as follows:

        * DatasetName.SATELLITE_VIEW
        * DatasetName.SATELLITE_AZIMUTH
        * DatasetName.SOLAR_ZENITH
        * DatasetName.SOLAR_AZIMUTH
        * DatasetName.RELATIVE_AZIMUTH
        * DatasetName.TIME
        * DatasetName.CENTRELINE
        * DatasetName.BOXLINE
        * DatasetName.SPHEROID
        * DatasetName.ORBITAL_ELEMENTS
        * DatasetName.SATELLITE_MODEL
        * DatasetName.SATELLITE_TRACK

    :param trackpoints:
        Number of trackpoints to use when calculating solar angles
        Default is 12

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF

    :filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :param tle_path:
        A `str` to the directory containing the Two Line Element data.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.
    """
    century = calculate_julian_century(acquisition.acquisition_datetime)
    geobox = acquisition.gridded_geo_box()

    # longitude and latitude datasets
    longitude = lon_lat_group[DatasetName.LON.value]
    latitude = lon_lat_group[DatasetName.LAT.value]

    # Determine approximate pixel size
    lat_data = latitude[0:2, 0]
    psy = abs(lat_data[1] - lat_data[0])
    lon_data = longitude[0, 0:2]
    psx = abs(lon_data[1] - lon_data[0])

    # Min and Max lat extents
    # This method should handle northern and southern hemispheres
    # TODO: Put in a conditional over the 1 degree buffer
    min_lat = (min(
        min(geobox.ul_lonlat[1], geobox.ur_lonlat[1]),
        min(geobox.ll_lonlat[1], geobox.lr_lonlat[1]),
    ) - 1)
    max_lat = (max(
        max(geobox.ul_lonlat[1], geobox.ur_lonlat[1]),
        max(geobox.ll_lonlat[1], geobox.lr_lonlat[1]),
    ) + 1)

    # Get the lat/lon of the scene centre
    # check if we have a file with GPS satellite track points
    # which can be used for cases of image granules/tiles, eg Sentinel-2A
    if acquisition.gps_file:
        points = acquisition.read_gps_file()
        subs = points[(points.latitude >= min_lat)
                      & (points.latitude <= max_lat)]
        idx = subs.shape[0] // 2 - 1
        centre_xy = (subs.iloc[idx].longitude, subs.iloc[idx].latitude)
    else:
        centre_xy = geobox.centre_lonlat

    # Get the earth spheroidal paramaters
    spheroid = setup_spheroid(geobox.crs.ExportToWkt())

    # Get the satellite orbital elements
    orbital_elements = setup_orbital_elements(acquisition, tle_path)

    # Get the satellite model paramaters

    smodel = setup_smodel(centre_xy[0], centre_xy[1], spheroid[0],
                          orbital_elements[0], psx, psy)

    # Get the times and satellite track information
    track = setup_times(
        min_lat,
        max_lat,
        spheroid[0],
        orbital_elements[0],
        smodel[0],
        psx,
        psy,
        trackpoints,
    )

    # Initialise the output files
    if out_group is None:
        fid = h5py.File("satellite-solar-angles.h5",
                        "w",
                        driver="core",
                        backing_store=False)
    else:
        fid = out_group

    if GroupName.SAT_SOL_GROUP.value not in fid:
        fid.create_group(GroupName.SAT_SOL_GROUP.value)

    if filter_opts is None:
        filter_opts = {}
    else:
        filter_opts = filter_opts.copy()
    filter_opts["chunks"] = acquisition.tile_size

    grp = fid[GroupName.SAT_SOL_GROUP.value]

    # store the parameter settings used with the satellite and solar angles
    # function
    params = {
        "dimensions": (acquisition.lines, acquisition.samples),
        "lines": acquisition.lines,
        "samples": acquisition.samples,
        "century": century,
        "decimal_hour": acquisition.decimal_hour(),
        "acquisition_datetime": acquisition.acquisition_datetime,
        "centre_longitude_latitude": centre_xy,
        "minimum_latiude": min_lat,
        "maximum_latiude": max_lat,
        "latitude_buffer": 1.0,
        "max_view_angle": acquisition.maximum_view_angle,
    }
    _store_parameter_settings(grp, spheroid[1], orbital_elements[1], smodel[1],
                              track[1], params)

    out_dtype = "float32"
    no_data = np.nan
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
    kwargs["shape"] = (acquisition.lines, acquisition.samples)
    kwargs["fillvalue"] = no_data
    kwargs["dtype"] = out_dtype

    sat_v_ds = grp.create_dataset(DatasetName.SATELLITE_VIEW.value, **kwargs)
    sat_az_ds = grp.create_dataset(DatasetName.SATELLITE_AZIMUTH.value,
                                   **kwargs)
    sol_z_ds = grp.create_dataset(DatasetName.SOLAR_ZENITH.value, **kwargs)
    sol_az_ds = grp.create_dataset(DatasetName.SOLAR_AZIMUTH.value, **kwargs)
    rel_az_ds = grp.create_dataset(DatasetName.RELATIVE_AZIMUTH.value,
                                   **kwargs)
    time_ds = grp.create_dataset(DatasetName.TIME.value, **kwargs)

    # base attributes for image datasets
    attrs = {
        "crs_wkt": geobox.crs.ExportToWkt(),
        "geotransform": geobox.transform.to_gdal(),
        "no_data_value": no_data,
    }
    attach_image_attributes(sat_v_ds, attrs)
    attach_image_attributes(sat_az_ds, attrs)
    attach_image_attributes(sol_z_ds, attrs)
    attach_image_attributes(sol_az_ds, attrs)
    attach_image_attributes(rel_az_ds, attrs)
    attach_image_attributes(time_ds, attrs)

    attrs = {
        "description": "Contains the satellite viewing angle in degrees.",
        "units": "degrees",
        "alias": "satellite-view",
    }
    attach_attributes(sat_v_ds, attrs)

    attrs = {
        "description": "Contains the satellite azimuth angle in degrees.",
        "units": "degrees",
        "alias": "satellite-azimuth",
    }
    attach_attributes(sat_az_ds, attrs)

    attrs = {
        "description": "Contains the solar zenith angle in degrees.",
        "units": "degrees",
        "alias": "solar-zenith",
    }
    attach_attributes(sol_z_ds, attrs)

    attrs = {
        "description": "Contains the solar azimuth angle in degrees.",
        "units": "degrees",
        "alias": "solar-azimuth",
    }
    attach_attributes(sol_az_ds, attrs)

    attrs = {
        "description": "Contains the relative azimuth angle in degrees.",
        "units": "degrees",
        "alias": "relative-azimuth",
    }
    attach_attributes(rel_az_ds, attrs)

    attrs = {
        "description": "Contains the time from apogee in seconds.",
        "units": "seconds",
        "alias": "timedelta",
    }
    attach_attributes(time_ds, attrs)

    # Initialise centre line variables
    x_cent = np.zeros((acquisition.lines), dtype=out_dtype)
    n_cent = np.zeros((acquisition.lines), dtype=out_dtype)

    for tile in acquisition.tiles():
        idx = (slice(tile[0][0], tile[0][1]), slice(tile[1][0], tile[1][1]))

        # read the lon and lat tile
        lon_data = longitude[idx]
        lat_data = latitude[idx]

        # may not be processing full row wise (all columns)
        dims = lon_data.shape
        col_offset = idx[1].start

        view = np.full(dims, no_data, dtype=out_dtype)
        azi = np.full(dims, no_data, dtype=out_dtype)
        asol = np.full(dims, no_data, dtype=out_dtype)
        soazi = np.full(dims, no_data, dtype=out_dtype)
        rela_angle = np.full(dims, no_data, dtype=out_dtype)
        time = np.full(dims, no_data, dtype=out_dtype)
        # loop each row within each tile (which itself could be a single row)
        for i in range(lon_data.shape[0]):
            row_id = idx[0].start + i + 1  # FORTRAN 1 based index

            stat = angle(
                dims[1],
                acquisition.lines,
                row_id,
                col_offset,
                lat_data[i],
                lon_data[i],
                spheroid[0],
                orbital_elements[0],
                acquisition.decimal_hour(),
                century,
                trackpoints,
                smodel[0],
                track[0],
                view[i],
                azi[i],
                asol[i],
                soazi[i],
                rela_angle[i],
                time[i],
                x_cent,
                n_cent,
            )
            # x_cent[idx[0]], n_cent[idx[0]])

            if stat != 0:
                msg = ("Error in calculating angles at row: {}.\n"
                       "No interval found in track!")
                raise RuntimeError(msg.format(row_id - 1))

        # output to disk
        sat_v_ds[idx] = view
        sat_az_ds[idx] = azi
        sol_z_ds[idx] = asol
        sol_az_ds[idx] = soazi
        rel_az_ds[idx] = rela_angle
        time_ds[idx] = time

    # outputs
    # TODO: rework create_boxline so that it reads tiled data effectively
    create_centreline_dataset(geobox, x_cent, n_cent, grp)
    create_boxline(
        acquisition,
        sat_v_ds[:],
        grp[DatasetName.CENTRELINE.value],
        grp,
        acquisition.maximum_view_angle,
    )

    if out_group is None:
        return fid
コード例 #8
0
def incident_angles(
    satellite_solar_group,
    slope_aspect_group,
    out_group=None,
    compression=H5CompressionFilter.LZF,
    filter_opts=None,
):
    """
    Calculates the incident angle and the azimuthal incident angle.

    :param satellite_solar_group:
        The root HDF5 `Group` that contains the solar zenith and
        solar azimuth datasets specified by the pathnames given by:

        * DatasetName.SOLAR_ZENITH
        * DatasetName.SOLAR_AZIMUTH

    :param slope_aspect_group:
        The root HDF5 `Group` that contains the slope and aspect
        datasets specified by the pathnames given by:

        * DatasetName.SLOPE
        * DatasetName.ASPECT

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.

        The dataset names will be as follows:

        * DatasetName.INCIDENT
        * DatasetName.AZIMUTHAL_INCIDENT

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF

    :filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.
    """
    # dataset arrays
    dname = DatasetName.SOLAR_ZENITH.value
    solar_zenith_dataset = satellite_solar_group[dname]
    dname = DatasetName.SOLAR_AZIMUTH.value
    solar_azimuth_dataset = satellite_solar_group[dname]
    slope_dataset = slope_aspect_group[DatasetName.SLOPE.value]
    aspect_dataset = slope_aspect_group[DatasetName.ASPECT.value]

    geobox = GriddedGeoBox.from_dataset(solar_zenith_dataset)
    shape = geobox.get_shape_yx()
    rows, cols = shape
    crs = geobox.crs.ExportToWkt()

    # Initialise the output files
    if out_group is None:
        fid = h5py.File("incident-angles.h5",
                        "w",
                        driver="core",
                        backing_store=False)
    else:
        fid = out_group

    if GroupName.INCIDENT_GROUP.value not in fid:
        fid.create_group(GroupName.INCIDENT_GROUP.value)

    if filter_opts is None:
        filter_opts = {}

    grp = fid[GroupName.INCIDENT_GROUP.value]
    tile_size = solar_zenith_dataset.chunks
    filter_opts["chunks"] = tile_size
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
    no_data = numpy.nan
    kwargs["shape"] = shape
    kwargs["fillvalue"] = no_data
    kwargs["dtype"] = "float32"

    # output datasets
    dataset_name = DatasetName.INCIDENT.value
    incident_dset = grp.create_dataset(dataset_name, **kwargs)
    dataset_name = DatasetName.AZIMUTHAL_INCIDENT.value
    azi_inc_dset = grp.create_dataset(dataset_name, **kwargs)

    # attach some attributes to the image datasets
    attrs = {
        "crs_wkt": crs,
        "geotransform": geobox.transform.to_gdal(),
        "no_data_value": no_data,
    }
    desc = "Contains the incident angles in degrees."
    attrs["description"] = desc
    attrs["alias"] = "incident"
    attach_image_attributes(incident_dset, attrs)

    desc = "Contains the azimuthal incident angles in degrees."
    attrs["description"] = desc
    attrs["alias"] = "azimuthal-incident"
    attach_image_attributes(azi_inc_dset, attrs)

    # process by tile
    for tile in generate_tiles(cols, rows, tile_size[1], tile_size[0]):
        # Row and column start and end locations
        ystart = tile[0][0]
        xstart = tile[1][0]
        yend = tile[0][1]
        xend = tile[1][1]
        idx = (slice(ystart, yend), slice(xstart, xend))

        # Tile size
        ysize = yend - ystart
        xsize = xend - xstart

        # Read the data for the current tile
        # Convert to required datatype and transpose
        sol_zen = as_array(solar_zenith_dataset[idx],
                           dtype=numpy.float32,
                           transpose=True)
        sol_azi = as_array(solar_azimuth_dataset[idx],
                           dtype=numpy.float32,
                           transpose=True)
        slope = as_array(slope_dataset[idx],
                         dtype=numpy.float32,
                         transpose=True)
        aspect = as_array(aspect_dataset[idx],
                          dtype=numpy.float32,
                          transpose=True)

        # Initialise the work arrays
        incident = numpy.zeros((ysize, xsize), dtype="float32")
        azi_incident = numpy.zeros((ysize, xsize), dtype="float32")

        # Process the current tile
        incident_angle(
            xsize,
            ysize,
            sol_zen,
            sol_azi,
            slope,
            aspect,
            incident.transpose(),
            azi_incident.transpose(),
        )

        # Write the current tile to disk
        incident_dset[idx] = incident
        azi_inc_dset[idx] = azi_incident

    if out_group is None:
        return fid
コード例 #9
0
def relative_azimuth_slope(
    incident_angles_group,
    exiting_angles_group,
    out_group=None,
    compression=H5CompressionFilter.LZF,
    filter_opts=None,
):
    """
    Calculates the relative azimuth angle on the slope surface.

    :param incident_angles_group:
        The root HDF5 `Group` that contains the azimuthal incident
        angle dataset specified by the pathname given by:

        * DatasetName.AZIMUTHAL_INCIDENT

    :param exiting_angles_group:
        The root HDF5 `Group` that contains the azimuthal exiting
        angle dataset specified by the pathname given by:

        * DatasetName.AZIMUTHAL_EXITING

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.

        The dataset names will be as follows:

        * DatasetName.RELATIVE_SLOPE

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF

    :filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.
    """
    # dataset arrays
    dname = DatasetName.AZIMUTHAL_INCIDENT.value
    azimuth_incident_dataset = incident_angles_group[dname]
    dname = DatasetName.AZIMUTHAL_EXITING.value
    azimuth_exiting_dataset = exiting_angles_group[dname]

    geobox = GriddedGeoBox.from_dataset(azimuth_incident_dataset)
    shape = geobox.get_shape_yx()
    rows, cols = shape
    crs = geobox.crs.ExportToWkt()

    # Initialise the output files
    if out_group is None:
        fid = h5py.File("relative-azimuth-angles.h5",
                        "w",
                        driver="core",
                        backing_store=False)
    else:
        fid = out_group

    if GroupName.REL_SLP_GROUP.value not in fid:
        fid.create_group(GroupName.REL_SLP_GROUP.value)

    if filter_opts is None:
        filter_opts = {}

    grp = fid[GroupName.REL_SLP_GROUP.value]
    tile_size = azimuth_incident_dataset.chunks
    filter_opts["chunks"] = tile_size
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
    no_data = numpy.nan
    kwargs["shape"] = shape
    kwargs["fillvalue"] = no_data
    kwargs["dtype"] = "float32"

    # output datasets
    out_dset = grp.create_dataset(DatasetName.RELATIVE_SLOPE.value, **kwargs)

    # attach some attributes to the image datasets
    attrs = {
        "crs_wkt": crs,
        "geotransform": geobox.transform.to_gdal(),
        "no_data_value": no_data,
    }
    desc = "Contains the relative azimuth angles on the slope surface in " "degrees."
    attrs["description"] = desc
    attrs["alias"] = "relative-slope"
    attach_image_attributes(out_dset, attrs)

    # process by tile
    for tile in generate_tiles(cols, rows, tile_size[1], tile_size[0]):
        # Row and column start and end locations
        ystart, yend = tile[0]
        xstart, xend = tile[1]
        idx = (slice(ystart, yend), slice(xstart, xend))

        # Read the data for the current tile
        azi_inc = azimuth_incident_dataset[idx]
        azi_exi = azimuth_exiting_dataset[idx]

        # Process the tile
        rel_azi = azi_inc - azi_exi
        rel_azi[rel_azi <= -180.0] += 360.0
        rel_azi[rel_azi > 180.0] -= 360.0

        # Write the current tile to disk
        out_dset[idx] = rel_azi

    if out_group is None:
        return fid
コード例 #10
0
ファイル: dsm.py プロジェクト: ASVincent/wagl
def get_dsm(acquisition, national_dsm, buffer_distance=8000, out_group=None,
            compression=H5CompressionFilter.LZF, filter_opts=None):
    """
    Given an acquisition and a national Digitial Surface Model,
    extract a subset from the DSM based on the acquisition extents
    plus an x & y margins. The subset is then smoothed with a 3x3
    gaussian filter.
    A square margins is applied to the extents.

    :param acquisition:
        An instance of an acquisition object.

    :param national_dsm:
        A string containing the full filepath name to an image on
        disk containing national digital surface model.

    :param buffer_distance:
        A number representing the desired distance (in the same
        units as the acquisition) in which to calculate the extra
        number of pixels required to buffer an image.
        Default is 8000.

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.

        The dataset name will be as follows:

        * DatasetName.DSM_SMOOTHED

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF 

    :filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.
    """
    # Use the 1st acquisition to setup the geobox
    geobox = acquisition.gridded_geo_box()
    shape = geobox.get_shape_yx()

    # buffered image extents/margins
    margins = pixel_buffer(acquisition, buffer_distance)

    # Get the dimensions and geobox of the new image
    dem_cols = shape[1] + margins.left + margins.right
    dem_rows = shape[0] + margins.top + margins.bottom
    dem_shape = (dem_rows, dem_cols)
    dem_origin = geobox.convert_coordinates((0 - margins.left,
                                             0 - margins.top))
    dem_geobox = GriddedGeoBox(dem_shape, origin=dem_origin,
                               pixelsize=geobox.pixelsize,
                               crs=geobox.crs.ExportToWkt())

    # Retrive the DSM data
    dsm_data = reproject_file_to_array(national_dsm, dst_geobox=dem_geobox,
                                       resampling=Resampling.bilinear)

    # Output the reprojected result
    # Initialise the output files
    if out_group is None:
        fid = h5py.File('dsm-subset.h5', driver='core', backing_store=False)
    else:
        fid = out_group

    if filter_opts is None:
        filter_opts = {}
    else:
        filter_opts = filter_opts.copy()

    if acquisition.tile_size[0] == 1:
        filter_opts['chunks'] = (1, dem_cols)
    else:
        # TODO: rework the tiling regime for larger dsm
        # for non single row based tiles, we won't have ideal
        # matching reads for tiled processing between the acquisition
        # and the DEM
        filter_opts['chunks'] = acquisition.tile_size
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()

    group = fid.create_group(GroupName.ELEVATION_GROUP.value)

    param_grp = group.create_group('PARAMETERS')
    param_grp.attrs['left_buffer'] = margins.left
    param_grp.attrs['right_buffer'] = margins.right
    param_grp.attrs['top_buffer'] = margins.top
    param_grp.attrs['bottom_buffer'] = margins.bottom

    # dataset attributes
    attrs = {'crs_wkt': geobox.crs.ExportToWkt(),
             'geotransform': dem_geobox.transform.to_gdal()}

    # Smooth the DSM
    dsm_data = filter_dsm(dsm_data)
    dname = DatasetName.DSM_SMOOTHED.value
    out_sm_dset = group.create_dataset(dname, data=dsm_data, **kwargs)
    desc = ("A subset of a Digital Surface Model smoothed with a gaussian "
            "kernel.")
    attrs['description'] = desc
    attach_image_attributes(out_sm_dset, attrs)

    if out_group is None:
        return fid
コード例 #11
0
def get_dsm(
    acquisition,
    pathname,
    buffer_distance=8000,
    out_group=None,
    compression=H5CompressionFilter.LZF,
    filter_opts=None,
):
    """
    Given an acquisition and a national Digitial Surface Model,
    extract a subset from the DSM based on the acquisition extents
    plus an x & y margins. The subset is then smoothed with a 3x3
    gaussian filter.
    A square margins is applied to the extents.

    :param acquisition:
        An instance of an acquisition object.

    :param pathname:
        A string pathname of the DSM with a ':' to seperate the
        filename from the import HDF5 dataset name.

    :param buffer_distance:
        A number representing the desired distance (in the same
        units as the acquisition) in which to calculate the extra
        number of pixels required to buffer an image.
        Default is 8000.

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.

        The dataset name will be as follows:

        * DatasetName.DSM_SMOOTHED

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF

    :filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.
    """
    # Use the 1st acquisition to setup the geobox
    geobox = acquisition.gridded_geo_box()
    shape = geobox.get_shape_yx()

    # buffered image extents/margins
    margins = pixel_buffer(acquisition, buffer_distance)

    # Get the dimensions and geobox of the new image
    dem_cols = shape[1] + margins.left + margins.right
    dem_rows = shape[0] + margins.top + margins.bottom
    dem_shape = (dem_rows, dem_cols)
    dem_origin = geobox.convert_coordinates(
        (0 - margins.left, 0 - margins.top))
    dem_geobox = GriddedGeoBox(
        dem_shape,
        origin=dem_origin,
        pixelsize=geobox.pixelsize,
        crs=geobox.crs.ExportToWkt(),
    )

    # split the DSM filename, dataset name, and load
    fname, dname = pathname.split(":")
    with h5py.File(fname, "r") as dsm_fid:
        dsm_ds = dsm_fid[dname]
        dsm_geobox = GriddedGeoBox.from_dataset(dsm_ds)

        # calculate full border extents into CRS of DSM
        extents = dem_geobox.project_extents(dsm_geobox.crs)
        ul_xy = (extents[0], extents[3])
        ur_xy = (extents[2], extents[3])
        lr_xy = (extents[2], extents[1])
        ll_xy = (extents[0], extents[1])

        # load the subset and corresponding geobox
        subs, subs_geobox = read_subset(dsm_ds,
                                        ul_xy,
                                        ur_xy,
                                        lr_xy,
                                        ll_xy,
                                        edge_buffer=1)

        # ancillary metadata tracking
        metadata = current_h5_metadata(dsm_fid, dataset_path=dname)

    # Retrive the DSM data
    dsm_data = reproject_array_to_array(subs,
                                        subs_geobox,
                                        dem_geobox,
                                        resampling=Resampling.bilinear)

    # free memory
    subs = None

    # Output the reprojected result
    # Initialise the output files
    if out_group is None:
        fid = h5py.File("dsm-subset.h5",
                        "w",
                        driver="core",
                        backing_store=False)
    else:
        fid = out_group

    if filter_opts is None:
        filter_opts = {}
    else:
        filter_opts = filter_opts.copy()

    if acquisition.tile_size[0] == 1:
        filter_opts["chunks"] = (1, dem_cols)
    else:
        # TODO: rework the tiling regime for larger dsm
        # for non single row based tiles, we won't have ideal
        # matching reads for tiled processing between the acquisition
        # and the DEM
        filter_opts["chunks"] = acquisition.tile_size
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()

    group = fid.create_group(GroupName.ELEVATION_GROUP.value)

    param_grp = group.create_group("PARAMETERS")
    param_grp.attrs["left_buffer"] = margins.left
    param_grp.attrs["right_buffer"] = margins.right
    param_grp.attrs["top_buffer"] = margins.top
    param_grp.attrs["bottom_buffer"] = margins.bottom

    # dataset attributes
    attrs = {
        "crs_wkt": geobox.crs.ExportToWkt(),
        "geotransform": dem_geobox.transform.to_gdal(),
    }

    # Smooth the DSM
    dsm_data = filter_dsm(dsm_data)
    dname = DatasetName.DSM_SMOOTHED.value
    out_sm_dset = group.create_dataset(dname, data=dsm_data, **kwargs)
    desc = "A subset of a Digital Surface Model smoothed with a gaussian " "kernel."
    attrs["description"] = desc
    attrs["id"] = numpy.array([metadata["id"]], VLEN_STRING)
    attach_image_attributes(out_sm_dset, attrs)

    if out_group is None:
        return fid
コード例 #12
0
def convert_file(fname,
                 out_fname,
                 group_name='/',
                 dataset_name='dataset',
                 compression=H5CompressionFilter.LZF,
                 filter_opts=None,
                 attrs=None):
    """
    Convert generic single band image file to HDF5.
    Processes in a tiled fashion to minimise memory use.
    Will process all columns by n (default 256) rows at a time,
    where n can be specified via command line using:
    --filter-opts '{"chunks": (n, xsize)}'

    :param fname:
        A str containing the raster filename.

    :param out_fname:
        A str containing the output filename for the HDF5 file.

    :param dataset_name:
        A str containing the dataset name to use in the HDF5 file.

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF

    :param filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :param attrs:
        A dict containing any attribute information to be attached
        to the HDF5 Dataset.

    :return:
        None. Content is written directly to disk.
    """
    # opening as `append` mode allows us to add additional datasets
    with h5py.File(out_fname) as fid:
        with rasterio.open(fname) as ds:

            # create empty or copy the user supplied filter options
            if not filter_opts:
                filter_opts = dict()
            else:
                filter_opts = filter_opts.copy()

            # use sds native chunks if none are provided
            if 'chunks' not in filter_opts:
                filter_opts['chunks'] = (256, 256)

            # read all cols for n rows (ytile), as the GA's DEM is BSQ interleaved
            ytile = filter_opts['chunks'][0]

            # dataset attributes
            if attrs:
                attrs = attrs.copy()
            else:
                attrs = {}

            attrs['geotransform'] = ds.transform.to_gdal()
            attrs['crs_wkt'] = ds.crs.wkt

            # dataset creation options
            kwargs = compression.config(
                **filter_opts).dataset_compression_kwargs()
            kwargs['shape'] = (ds.height, ds.width)
            kwargs['dtype'] = ds.dtypes[0]

            dataset_name = ppjoin(group_name, dataset_name)
            dataset = fid.create_dataset(dataset_name, **kwargs)
            attach_image_attributes(dataset, attrs)

            # process each tile
            for tile in generate_tiles(ds.width, ds.height, ds.width, ytile):
                idx = (slice(tile[0][0],
                             tile[0][1]), slice(tile[1][0], tile[1][1]))
                data = ds.read(1, window=tile)
                dataset[idx] = data
コード例 #13
0
def combine_shadow_masks(self_shadow_group,
                         cast_shadow_sun_group,
                         cast_shadow_satellite_group,
                         out_group=None,
                         compression=H5CompressionFilter.LZF,
                         filter_opts=None):
    """
    A convienice function for combining the shadow masks into a single
    boolean array.

    :param self_shadow_group:
        The root HDF5 `Group` that contains the self shadow
        dataset specified by the pathname given by:

        * DatasetName.SELF_SHADOW

    :param cast_shadow_sun_group:
        The root HDF5 `Group` that contains the cast shadow
        (solar direction) dataset specified by the pathname
        given by:

        * DatasetName.CAST_SHADOW_FMT

    :param cast_shadow_sun_group:
        The root HDF5 `Group` that contains the cast shadow
        (satellite direction) dataset specified by the pathname
        given by:

        * DatasetName.CAST_SHDADOW_FMT

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.

        The dataset names will be given by the format string detailed
        by:

        * DatasetName.COMBINED_SHADOW

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF 

    :filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.
    """
    # access the datasets
    dname_fmt = DatasetName.CAST_SHADOW_FMT.value
    self_shad = self_shadow_group[DatasetName.SELF_SHADOW.value]
    cast_sun = cast_shadow_sun_group[dname_fmt.format(source='SUN')]
    dname = dname_fmt.format(source='SATELLITE')
    cast_sat = cast_shadow_satellite_group[dname]
    geobox = GriddedGeoBox.from_dataset(self_shad)

    # Initialise the output files
    if out_group is None:
        fid = h5py.File('combined-shadow.h5',
                        driver='core',
                        backing_store=False)
    else:
        fid = out_group

    if GroupName.SHADOW_GROUP.value not in fid:
        fid.create_group(GroupName.SHADOW_GROUP.value)

    if filter_opts is None:
        filter_opts = {}
    else:
        filter_opts = filter_opts.copy()

    grp = fid[GroupName.SHADOW_GROUP.value]
    tile_size = cast_sun.chunks
    filter_opts['chunks'] = tile_size
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
    cols, rows = geobox.get_shape_xy()
    kwargs['shape'] = (rows, cols)
    kwargs['dtype'] = 'bool'

    # output dataset
    out_dset = grp.create_dataset(DatasetName.COMBINED_SHADOW.value, **kwargs)

    # attach some attributes to the image datasets
    attrs = {
        'crs_wkt': geobox.crs.ExportToWkt(),
        'geotransform': geobox.transform.to_gdal()
    }
    desc = ("Combined shadow masks: 1. self shadow, "
            "2. cast shadow (solar direction), "
            "3. cast shadow (satellite direction).")
    attrs['description'] = desc
    attrs['mask_values'] = "False = Shadow; True = Non Shadow"
    attrs['alias'] = 'terrain-shadow'
    attach_image_attributes(out_dset, attrs)

    # process by tile
    for tile in generate_tiles(cols, rows, tile_size[1], tile_size[0]):
        # Row and column start locations
        ystart, yend = tile[0]
        xstart, xend = tile[1]
        idx = (slice(ystart, yend), slice(xstart, xend))

        out_dset[idx] = (self_shad[idx] & cast_sun[idx] & cast_sat[idx])

    if out_group is None:
        return fid
コード例 #14
0
def self_shadow(incident_angles_group,
                exiting_angles_group,
                out_group=None,
                compression=H5CompressionFilter.LZF,
                filter_opts=None):
    """
    Computes the self shadow mask.

    :param incident_angles_group:
        The root HDF5 `Group` that contains the incident
        angle dataset specified by the pathname given by:

        * DatasetName.INCIDENT

    :param exiting_angles_group:
        The root HDF5 `Group` that contains the exiting
        angle dataset specified by the pathname given by:

        * DatasetName.EXITING

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.
        The dataset name will be given by:

        * DatasetName.SELF_SHADOW

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF 

    :filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.
    """
    incident_angle = incident_angles_group[DatasetName.INCIDENT.value]
    exiting_angle = exiting_angles_group[DatasetName.EXITING.value]
    geobox = GriddedGeoBox.from_dataset(incident_angle)

    # Initialise the output file
    if out_group is None:
        fid = h5py.File('self-shadow.h5', driver='core', backing_store=False)
    else:
        fid = out_group

    if filter_opts is None:
        filter_opts = {}
    else:
        filter_opts = filter_opts.copy()

    if GroupName.SHADOW_GROUP.value not in fid:
        fid.create_group(GroupName.SHADOW_GROUP.value)

    grp = fid[GroupName.SHADOW_GROUP.value]

    tile_size = exiting_angle.chunks
    filter_opts['chunks'] = tile_size
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
    cols, rows = geobox.get_shape_xy()
    kwargs['shape'] = (rows, cols)
    kwargs['dtype'] = 'bool'

    # output dataset
    dataset_name = DatasetName.SELF_SHADOW.value
    out_dset = grp.create_dataset(dataset_name, **kwargs)

    # attach some attributes to the image datasets
    attrs = {
        'crs_wkt': geobox.crs.ExportToWkt(),
        'geotransform': geobox.transform.to_gdal()
    }
    desc = "Self shadow mask derived using the incident and exiting angles."
    attrs['description'] = desc
    attrs['alias'] = 'self-shadow'
    attach_image_attributes(out_dset, attrs)

    # process by tile
    for tile in generate_tiles(cols, rows, tile_size[1], tile_size[0]):
        # Row and column start locations
        ystart, yend = tile[0]
        xstart, xend = tile[1]
        idx = (slice(ystart, yend), slice(xstart, xend))

        # Read the data for the current tile
        inc = numpy.radians(incident_angle[idx])
        exi = numpy.radians(exiting_angle[idx])

        # Process the tile
        mask = numpy.ones(inc.shape, dtype='uint8')
        mask[numpy.cos(inc) <= 0.0] = 0
        mask[numpy.cos(exi) <= 0.0] = 0

        # Write the current tile to disk
        out_dset[idx] = mask

    if out_group is None:
        return fid
コード例 #15
0
def calculate_cast_shadow(acquisition,
                          dsm_group,
                          satellite_solar_group,
                          buffer_distance,
                          out_group=None,
                          compression=H5CompressionFilter.LZF,
                          filter_opts=None,
                          solar_source=True):
    """
    This code is an interface to the fortran code
    cast_shadow_main.f90 written by Fuqin (and modified to
    work with F2py).

    The following was taken from the top of the Fotran program:
    "cast_shadow_main.f90":

    Creates a shadow mask for a standard Landsat scene
    the program was originally written by DLB Jupp in Oct. 2010
    for a small sub_matrix and was modified by Fuqin Li in Oct.
    2010 so that the program can be used for large landsat scene.

    Basically, a sub-matrix A is embedded in a larger DEM image
    and the borders must be large enough to find the shaded pixels.
    If we assume the solar azimuth and zenith angles change very
    little within the sub-matrix A, then the Landsat scene can be
    divided into several sub_matrix.
    For Australian region, with 0 .00025 degree resolution, the
    sub-marix A is set to 500x500

    we also need to set extra DEM lines/columns to run the Landsat
    scene. This will change with elevation
    difference within the scene and solar zenith angle. For
    Australian region and Landsat scene with 0.00025 degree
    resolution, the maximum extra lines are set to 250 pixels/lines
    for each direction. This figure shold be sufficient for everywhere
    and anytime in Australia. Thus the DEM image will be larger than
    landsat image for 500 lines x 500 columns

    :param acquisition:
        An instance of an acquisition object.

    :param dsm_group:
        The root HDF5 `Group` that contains the Digital Surface Model
        data.
        The dataset pathnames are given by:

        * DatasetName.DSM_SMOOTHED

        The dataset must have the same dimensions as `acquisition`
        plus a margin of widths specified by margin.

    :param satellite_solar_group:
        The root HDF5 `Group` that contains the satellite and solar
        datasets specified by the pathnames given by:

        * DatasetName.SOLAR_ZENITH
        * DatasetName.SOLAR_AZIMUTH
        * DatasetName.SATELLITE_VIEW
        * DatasetName.SATELLITE_AZIMUTH

    :param buffer_distance:
        A number representing the desired distance (in the same
        units as the acquisition) in which to calculate the extra
        number of pixels required to buffer an image.
        Default is 8000.

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.

        The dataset names will be given by the format string detailed
        by:

        * DatasetName.CAST_SHADOW_FMT

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF 

    :filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :param solar_source:
        A `bool` indicating whether or not the source for the line
        of sight comes from the sun (True; Default), or False
        indicating the satellite.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.

    :warning:
        The Fortran code cannot be compiled with ``-O3`` as it
        produces incorrect results if it is.
    """
    # Setup the geobox
    geobox = acquisition.gridded_geo_box()
    x_res, y_res = geobox.pixelsize
    x_origin, y_origin = geobox.origin

    # Are we in UTM or geographics?
    is_utm = not geobox.crs.IsGeographic()

    # Retrive the spheroid parameters
    # (used in calculating pixel size in metres per lat/lon)
    spheroid, _ = setup_spheroid(geobox.crs.ExportToWkt())

    # Define Top, Bottom, Left, Right pixel buffer margins
    margins = pixel_buffer(acquisition, buffer_distance)

    if solar_source:
        zenith_name = DatasetName.SOLAR_ZENITH.value
        azimuth_name = DatasetName.SOLAR_AZIMUTH.value
    else:
        zenith_name = DatasetName.SATELLITE_VIEW.value
        azimuth_name = DatasetName.SATELLITE_AZIMUTH.value

    zenith_angle = satellite_solar_group[zenith_name][:]
    azimuth_angle = satellite_solar_group[azimuth_name][:]
    elevation = dsm_group[DatasetName.DSM_SMOOTHED.value][:]

    # block height and width of the window/submatrix used in the cast
    # shadow algorithm
    block_width = margins.left + margins.right
    block_height = margins.top + margins.bottom

    # Compute the cast shadow mask
    ierr, mask = cast_shadow_main(elevation, zenith_angle, azimuth_angle,
                                  x_res, y_res, spheroid, y_origin, x_origin,
                                  margins.left, margins.right, margins.top,
                                  margins.bottom, block_height, block_width,
                                  is_utm)

    if ierr:
        raise CastShadowError(ierr)

    source_dir = 'SUN' if solar_source else 'SATELLITE'

    # Initialise the output file
    if out_group is None:
        fid = h5py.File('cast-shadow-{}.h5'.format(source_dir),
                        driver='core',
                        backing_store=False)
    else:
        fid = out_group

    if GroupName.SHADOW_GROUP.value not in fid:
        fid.create_group(GroupName.SHADOW_GROUP.value)

    if filter_opts is None:
        filter_opts = {}
    else:
        filter_opts = filter_opts.copy()

    grp = fid[GroupName.SHADOW_GROUP.value]
    tile_size = satellite_solar_group[zenith_name].chunks
    filter_opts['chunks'] = tile_size
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
    kwargs['dtype'] = 'bool'

    dname_fmt = DatasetName.CAST_SHADOW_FMT.value
    out_dset = grp.create_dataset(dname_fmt.format(source=source_dir),
                                  data=mask,
                                  **kwargs)

    # attach some attributes to the image datasets
    attrs = {
        'crs_wkt': geobox.crs.ExportToWkt(),
        'geotransform': geobox.transform.to_gdal()
    }
    desc = ("The cast shadow mask determined using the {} "
            "as the source direction.").format(source_dir)
    attrs['description'] = desc
    attrs['alias'] = 'cast-shadow-{}'.format(source_dir).lower()
    attach_image_attributes(out_dset, attrs)

    if out_group is None:
        return fid
コード例 #16
0
def slope_aspect_arrays(acquisition, dsm_group, buffer_distance,
                        out_group=None, compression=H5CompressionFilter.LZF,
                        filter_opts=None):
    """
    Calculates slope and aspect.

    :param acquisition:
        An instance of an acquisition object.

    :param dsm_group:
        The root HDF5 `Group` that contains the Digital Surface Model
        data.
        The dataset pathname is given by:

        * DatasetName.DSM_SMOOTHED

        The dataset must have the same dimensions as `acquisition`
        plus a margin of widths specified by margin.

    :param buffer_distance:
        A number representing the desired distance (in the same
        units as the acquisition) in which to calculate the extra
        number of pixels required to buffer an image.
        Default is 8000.

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.

        The dataset names will be given by the format string detailed
        by:

        * DatasetName.SLOPE
        * DatasetName.ASPECT

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF 

    :filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.
    """

    # Setup the geobox
    geobox = acquisition.gridded_geo_box()

    # Retrive the spheroid parameters
    # (used in calculating pixel size in metres per lat/lon)
    spheroid, _ = setup_spheroid(geobox.crs.ExportToWkt())

    # Are we in projected or geographic space
    is_utm = not geobox.crs.IsGeographic()

    # Define Top, Bottom, Left, Right pixel margins
    margins = pixel_buffer(acquisition, buffer_distance)

    # Get the x and y pixel sizes
    _, y_origin = geobox.origin
    x_res, y_res = geobox.pixelsize

    # Get acquisition dimensions and add 1 pixel top, bottom, left & right
    cols, rows = geobox.get_shape_xy()
    ncol = cols + 2
    nrow = rows + 2

    # elevation dataset
    elevation = dsm_group[DatasetName.DSM_SMOOTHED.value]
    ele_rows, ele_cols  = elevation.shape

    # TODO: check that the index is correct
    # Define the index to read the DEM subset
    ystart, ystop = (margins.top - 1, ele_rows - (margins.bottom - 1))
    xstart, xstop = (margins.left - 1, ele_cols - (margins.right - 1))
    idx = (slice(ystart, ystop), slice(xstart, xstop))

    subset = as_array(elevation[idx], dtype=numpy.float32, transpose=True)

    # Define an array of latitudes
    # This will be ignored if is_utm == True
    alat = numpy.array([y_origin - i * y_res for i in range(-1, nrow - 1)],
                       dtype=numpy.float64)  # yes, I did mean float64.

    # Output the reprojected result
    # Initialise the output files
    if out_group is None:
        fid = h5py.File('slope-aspect.h5', driver='core',
                        backing_store=False)
    else:
        fid = out_group

    if GroupName.SLP_ASP_GROUP.value not in fid:
        fid.create_group(GroupName.SLP_ASP_GROUP.value)

    if filter_opts is None:
        filter_opts = {}
    else:
        filter_opts = filter_opts.copy()
    filter_opts['chunks'] = acquisition.tile_size

    group = fid[GroupName.SLP_ASP_GROUP.value]

    # metadata for calculation
    param_group = group.create_group('PARAMETERS')
    param_group.attrs['dsm_index'] = ((ystart, ystop), (xstart, xstop))
    param_group.attrs['pixel_buffer'] = '1 pixel'

    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
    no_data = -999
    kwargs['fillvalue'] = no_data

    # Define the output arrays. These will be transposed upon input
    slope = numpy.zeros((rows, cols), dtype='float32')
    aspect = numpy.zeros((rows, cols), dtype='float32')

    slope_aspect(ncol, nrow, cols, rows, x_res, y_res, spheroid, alat, is_utm,
                 subset, slope.transpose(), aspect.transpose())

    # output datasets
    dname = DatasetName.SLOPE.value
    slope_dset = group.create_dataset(dname, data=slope, **kwargs)
    dname = DatasetName.ASPECT.value
    aspect_dset = group.create_dataset(dname, data=aspect, **kwargs)

    # attach some attributes to the image datasets
    attrs = {'crs_wkt': geobox.crs.ExportToWkt(),
             'geotransform': geobox.transform.to_gdal(),
             'no_data_value': no_data}
    desc = "The slope derived from the input elevation model."
    attrs['description'] = desc
    attach_image_attributes(slope_dset, attrs)

    desc = "The aspect derived from the input elevation model."
    attrs['description'] = desc
    attach_image_attributes(aspect_dset, attrs)

    if out_group is None:
        return fid
コード例 #17
0
ファイル: temperature.py プロジェクト: ASVincent/wagl
def surface_brightness_temperature(acquisition, interpolation_group,
                                   out_group=None,
                                   compression=H5CompressionFilter.LZF,
                                   filter_opts=None):
    """
    Convert Thermal acquisition to Surface Brightness Temperature.

    T[Kelvin] = k2 / ln( 1 + (k1 / I[0]) )

    where T is the surface brightness temperature (the surface temperature
    if the surface is assumed to be an ideal black body i.e. unit emissivity),
    k1 & k2 are calibration constants specific to the platform/sensor/band,
    and I[0] is the surface radiance (the integrated band radiance, in
    Watts per square metre per steradian per thousand nanometres).

    I = t I[0] + d

    where I is the radiance at the sensor, t is the transmittance (through
    the atmosphere), and d is radiance from the atmosphere itself.

    :param acquisition:
        An instance of an acquisition object.

    :param interpolation_group:
        The root HDF5 `Group` that contains the interpolated
        atmospheric coefficients.
        The dataset pathnames are given by the following string format:

        * DatasetName.INTERPOLATION_FMT

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.

        The dataset names will be given by the format string detailed
        by:

        * DatasetName.TEMPERATURE_FMT

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF 

    :filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.

    :notes:
        This function used to accept `NumPy` like datasets as inputs,
        but as this functionality was never used, it was simpler to
        parse through the H5 Group object, which in most cases
        reduced the number or parameters being parsed through.
        Thereby simplifying the overall workflow, and making it
        consistant with other functions within the overall workflow.
    """
    acq = acquisition
    geobox = acq.gridded_geo_box()
    bn = acq.band_name

    # retrieve the upwelling radiation and transmittance datasets
    dname_fmt = DatasetName.INTERPOLATION_FMT.value
    dname = dname_fmt.format(coefficient=AC.PATH_UP.value, band_name=bn)
    upwelling_radiation = interpolation_group[dname]
    dname = dname_fmt.format(coefficient=AC.TRANSMITTANCE_UP.value, band_name=bn)
    transmittance = interpolation_group[dname]

    # Initialise the output file
    if out_group is None:
        fid = h5py.File('surface-temperature.h5', driver='core',
                        backing_store=False)
    else:
        fid = out_group

    if GroupName.STANDARD_GROUP.value not in fid:
        fid.create_group(GroupName.STANDARD_GROUP.value)

    if filter_opts is None:
        filter_opts = {}
    else:
        filter_opts = filter_opts.copy()
    filter_opts['chunks'] = acq.tile_size

    group = fid[GroupName.STANDARD_GROUP.value]
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
    kwargs['shape'] = (acq.lines, acq.samples)
    kwargs['fillvalue'] = NO_DATA_VALUE
    kwargs['dtype'] = 'float32'

    # attach some attributes to the image datasets
    attrs = {'crs_wkt': geobox.crs.ExportToWkt(),
             'geotransform': geobox.transform.to_gdal(),
             'no_data_value': kwargs['fillvalue'],
             'platform_id': acq.platform_id,
             'sensor_id': acq.sensor_id,
             'band_id': acq.band_id,
             'band_name': bn,
             'alias': acq.alias}

    name_fmt = DatasetName.TEMPERATURE_FMT.value
    dataset_name = name_fmt.format(product=ArdProducts.SBT.value,
                                   band_name=acq.band_name)
    out_dset = group.create_dataset(dataset_name, **kwargs)

    desc = "Surface Brightness Temperature in Kelvin."
    attrs['description'] = desc
    attach_image_attributes(out_dset, attrs)

    # pylint: disable=unused-variable
    # constants
    k1 = acq.K1
    k2 = acq.K2

    # process each tile
    for tile in acq.tiles():
        idx = (slice(tile[0][0], tile[0][1]), slice(tile[1][0], tile[1][1]))

        radiance = acq.radiance_data(window=tile, out_no_data=NO_DATA_VALUE)
        path_up = upwelling_radiation[idx]
        trans = transmittance[idx]
        mask = ~numpy.isfinite(trans)
        expr = "(radiance - path_up) / trans"
        corrected_radiance = numexpr.evaluate(expr)
        mask |= corrected_radiance <= 0
        expr = "k2 / log(k1 / corrected_radiance + 1)"
        brightness_temp = numexpr.evaluate(expr)
        brightness_temp[mask] = kwargs['fillvalue']

        out_dset[idx] = brightness_temp
    acq.close()  # If dataset is cached; clear it

    if out_group is None:
        return fid
コード例 #18
0
def convert_file(
    fname: Path,
    out_h5: h5py.Group,
    out_dataset_path: str = "SWFO-DSM",
    compression=H5CompressionFilter.LZF,
    filter_opts=None,
    attrs=None,
):
    """
    Convert generic single band image file to HDF5.
    Processes in a tiled fashion to minimise memory use.
    Will process all columns by n (default 256) rows at a time,
    where n can be specified via command line using:
    --filter-opts '{"chunks": (n, xsize)}'

    :param fname:
        A str containing the raster filename.

    :param out_fname:
        A str containing the output filename for the HDF5 file.

    :param dataset_name:
        A str containing the dataset name to use in the HDF5 file.

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF

    :param filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :param attrs:
        A dict containing any attribute information to be attached
        to the HDF5 Dataset.

    :return:
        None. Content is written directly to disk.
    """
    with rasterio.open(str(fname), "r") as ds:

        # create empty or copy the user supplied filter options
        if not filter_opts:
            filter_opts = dict()
        else:
            filter_opts = filter_opts.copy()

        # use sds native chunks if none are provided
        if "chunks" not in filter_opts:
            filter_opts["chunks"] = (min(256, ds.height), min(256, ds.width))

        # read all cols for n rows (ytile), as the GA's DEM is BSQ interleaved
        ytile = filter_opts["chunks"][0]

        # dataset attributes
        if attrs:
            attrs = attrs.copy()
        else:
            attrs = {}
        attrs["geotransform"] = ds.transform.to_gdal()
        attrs["crs_wkt"] = ds.crs.wkt

        # dataset creation options
        kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
        kwargs["shape"] = (ds.height, ds.width)
        kwargs["dtype"] = ds.dtypes[0]

        dataset = out_h5.create_dataset(out_dataset_path, **kwargs)
        attach_image_attributes(dataset, attrs)

        # process each tile
        for tile in generate_tiles(ds.width, ds.height, ds.width, ytile):
            idx = (slice(tile[0][0],
                         tile[0][1]), slice(tile[1][0], tile[1][1]))
            data = ds.read(1, window=tile)
            dataset[idx] = data

        assert ds.count == 1  # checksum call assumes single band image
        metadata = {
            "id":
            str(
                generate_fallback_uuid(PRODUCT_HREF,
                                       path=str(fname.stem),
                                       checksum=ds.checksum(1)))
        }

    return [metadata], [out_dataset_path]
コード例 #19
0
ファイル: mcd43a1.py プロジェクト: sixy6e/swfo
def convert_vrt(
    fname,
    out_h5: h5py.Group,
    dataset_name="dataset",
    compression=H5CompressionFilter.LZF,
    filter_opts=None,
    attrs=None,
):
    """
    Convert the VRT mosaic to HDF5.
    """
    with rasterio.open(fname) as rds:
        # set default chunks and set dimensions
        if rds.count == 3:
            chunks = (3, 256, 256)
            dims = (3, rds.height, rds.width)
        else:
            chunks = (256, 256)
            dims = (rds.height, rds.width)

        # create empty or copy the user supplied filter options
        if not filter_opts:
            filter_opts = dict()
            filter_opts["chunks"] = chunks
        else:
            filter_opts = filter_opts.copy()

        if "chunks" not in filter_opts:
            filter_opts["chunks"] = chunks

        # modify to have 3D chunks if we have a multiband vrt
        if rds.count == 3 and len(filter_opts["chunks"]) != 3:
            # copy the users original 2D chunk and insert the third
            chunks = list(filter_opts["chunks"])
            chunks.insert(0, 3)
            filter_opts["chunks"] = chunks

        # dataset attributes
        if attrs:
            attrs = attrs.copy()
        else:
            attrs = {}

        attrs["geotransform"] = rds.transform.to_gdal()
        attrs["crs_wkt"] = rds.crs.wkt
        attrs["nodata"] = rds.nodata

        # dataset creation options
        kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
        kwargs["shape"] = dims
        kwargs["dtype"] = rds.dtypes[0]

        dataset = out_h5.create_dataset(dataset_name, **kwargs)
        attach_image_attributes(dataset, attrs)

        # tiled processing (all cols by chunked rows)
        ytile = filter_opts["chunks"][1] if rds.count == 3 else filter_opts["chunks"][0]
        tiles = generate_tiles(rds.width, rds.height, rds.width, ytile)

        for tile in tiles:
            # numpy index
            if rds.count == 3:
                idx = (
                    slice(None),
                    slice(tile[0][0], tile[0][1]),
                    slice(tile[1][0], tile[1][1]),
                )
            else:
                idx = (slice(tile[0][0], tile[0][1]), slice(tile[1][0], tile[1][1]))

            # ensure single band rds is read as 2D not 3D
            data = rds.read(window=tile) if rds.count == 3 else rds.read(1, window=tile)

            # write
            dataset[idx] = data
コード例 #20
0
ファイル: reflectance.py プロジェクト: sixy6e/wagl
def calculate_reflectance(
    acquisition,
    interpolation_group,
    satellite_solar_group,
    slope_aspect_group,
    relative_slope_group,
    incident_angles_group,
    exiting_angles_group,
    shadow_masks_group,
    ancillary_group,
    rori,
    out_group=None,
    compression=H5CompressionFilter.LZF,
    filter_opts=None,
    normalized_solar_zenith=45.0,
    esun=None,
):
    """
    Calculates Lambertian, BRDF corrected and BRDF + terrain
    illumination corrected surface reflectance.

    :param acquisition:
        An instance of an acquisition object.

    :param interpolation_group:
        The root HDF5 `Group` that contains the interpolated
        atmospheric coefficients.
        The dataset pathnames are given by:

        * DatasetName.INTERPOLATION_FMT

    :param satellite_solar_group:
        The root HDF5 `Group` that contains the solar zenith and
        solar azimuth datasets specified by the pathnames given by:

        * DatasetName.SOLAR_ZENITH
        * DatasetName.SOLAR_AZIMUTH
        * DatasetName.SATELLITE_VIEW
        * DatasetName.SATELLITE_AZIMUTH
        * DatasetName.RELATIVE_AZIMUTH

    :param slope_aspect_group:
        The root HDF5 `Group` that contains the slope and aspect
        datasets specified by the pathnames given by:

        * DatasetName.SLOPE
        * DatasetName.ASPECT

    :param relative_slope_group:
        The root HDF5 `Group` that contains the relative slope dataset
        specified by the pathname given by:

        * DatasetName.RELATIVE_SLOPE

    :param incident_angles_group:
        The root HDF5 `Group` that contains the incident
        angle dataset specified by the pathname given by:

        * DatasetName.INCIDENT

    :param exiting_angles_group:
        The root HDF5 `Group` that contains the exiting
        angle dataset specified by the pathname given by:

        * DatasetName.EXITING

    :param shadow_masks_group:
        The root HDF5 `Group` that contains the combined shadow
        masks; self shadow, cast shadow (solar),
        cast shadow (satellite), dataset specified by the pathname
        given by:

        * DatasetName.COMBINED_SHADOW

    :param ancillary_group:
        The root HDF5 `Group` that contains the Isotropic (iso),
        RossThick (vol), and LiSparseR (geo) BRDF scalar parameters.
        The dataset pathnames are given by:

        * DatasetName.BRDF_FMT

    :param rori:
        Threshold for terrain correction. Fuqin to document.

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.

        The dataset names will be given by the format string detailed
        by:

        * DatasetName.REFLECTANCE_FMT

        The reflectance products are:

        * lambertian
        * nbar (BRDF corrected reflectance)
        * nbart (BRDF + terrain illumination corrected reflectance)

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF

    :param filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :param normalized_solar_zenith:
        A float value type to normalize reflectance to a particular angle.

    :param esun
        A float value type. A solar solar irradiance normal to atmosphere
        in unit of W/sq cm/sr/nm.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.
    """
    geobox = acquisition.gridded_geo_box()
    bn = acquisition.band_name

    dname_fmt = DatasetName.INTERPOLATION_FMT.value
    fv_dataset = interpolation_group[dname_fmt.format(coefficient=AC.FV.value,
                                                      band_name=bn)]
    fs_dataset = interpolation_group[dname_fmt.format(coefficient=AC.FS.value,
                                                      band_name=bn)]
    b_dataset = interpolation_group[dname_fmt.format(coefficient=AC.B.value,
                                                     band_name=bn)]
    s_dataset = interpolation_group[dname_fmt.format(coefficient=AC.S.value,
                                                     band_name=bn)]
    a_dataset = interpolation_group[dname_fmt.format(coefficient=AC.A.value,
                                                     band_name=bn)]
    dir_dataset = interpolation_group[dname_fmt.format(
        coefficient=AC.DIR.value, band_name=bn)]
    dif_dataset = interpolation_group[dname_fmt.format(
        coefficient=AC.DIF.value, band_name=bn)]
    ts_dataset = interpolation_group[dname_fmt.format(coefficient=AC.TS.value,
                                                      band_name=bn)]
    solar_zenith_dset = satellite_solar_group[DatasetName.SOLAR_ZENITH.value]
    solar_azimuth_dset = satellite_solar_group[DatasetName.SOLAR_AZIMUTH.value]
    satellite_v_dset = satellite_solar_group[DatasetName.SATELLITE_VIEW.value]
    relative_a_dset = satellite_solar_group[DatasetName.RELATIVE_AZIMUTH.value]
    slope_dataset = slope_aspect_group[DatasetName.SLOPE.value]
    aspect_dataset = slope_aspect_group[DatasetName.ASPECT.value]
    relative_s_dset = relative_slope_group[DatasetName.RELATIVE_SLOPE.value]
    incident_angle_dataset = incident_angles_group[DatasetName.INCIDENT.value]
    exiting_angle_dataset = exiting_angles_group[DatasetName.EXITING.value]
    shadow_dataset = shadow_masks_group[DatasetName.COMBINED_SHADOW.value]

    dname_fmt = DatasetName.BRDF_FMT.value
    dname = dname_fmt.format(band_name=bn,
                             parameter=BrdfDirectionalParameters.ALPHA_1.value)
    brdf_alpha1 = ancillary_group[dname][()]

    dname = dname_fmt.format(band_name=bn,
                             parameter=BrdfDirectionalParameters.ALPHA_2.value)
    brdf_alpha2 = ancillary_group[dname][()]

    # Initialise the output file
    if out_group is None:
        fid = h5py.File("surface-reflectance.h5",
                        "w",
                        driver="core",
                        backing_store=False)
    else:
        fid = out_group

    if GroupName.STANDARD_GROUP.value not in fid:
        fid.create_group(GroupName.STANDARD_GROUP.value)

    if filter_opts is None:
        filter_opts = {}
    else:
        filter_opts = filter_opts.copy()
    filter_opts["chunks"] = acquisition.tile_size

    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
    grp = fid[GroupName.STANDARD_GROUP.value]
    kwargs["shape"] = (acquisition.lines, acquisition.samples)
    kwargs["fillvalue"] = NO_DATA_VALUE
    kwargs["dtype"] = "int16"

    # create the datasets
    dname_fmt = DatasetName.REFLECTANCE_FMT.value
    dname = dname_fmt.format(product=AP.LAMBERTIAN.value, band_name=bn)
    lmbrt_dset = grp.create_dataset(dname, **kwargs)

    dname = dname_fmt.format(product=AP.NBAR.value, band_name=bn)
    nbar_dset = grp.create_dataset(dname, **kwargs)

    dname = dname_fmt.format(product=AP.NBART.value, band_name=bn)
    nbart_dset = grp.create_dataset(dname, **kwargs)

    # attach some attributes to the image datasets
    attrs = {
        "crs_wkt": geobox.crs.ExportToWkt(),
        "geotransform": geobox.transform.to_gdal(),
        "no_data_value": kwargs["fillvalue"],
        "rori_threshold_setting": rori,
        "platform_id": acquisition.platform_id,
        "sensor_id": acquisition.sensor_id,
        "band_id": acquisition.band_id,
        "band_name": bn,
        "alias": acquisition.alias,
    }

    desc = "Contains the lambertian reflectance data scaled by 10000."
    attrs["description"] = desc
    attach_image_attributes(lmbrt_dset, attrs)

    desc = "Contains the brdf corrected reflectance data scaled by 10000."
    attrs["description"] = desc
    attach_image_attributes(nbar_dset, attrs)

    desc = "Contains the brdf and terrain corrected reflectance data scaled " "by 10000."
    attrs["description"] = desc
    attach_image_attributes(nbart_dset, attrs)

    # process by tile
    for tile in acquisition.tiles():
        # tile indices
        idx = (slice(tile[0][0], tile[0][1]), slice(tile[1][0], tile[1][1]))

        # define some static arguments
        acq_args = {"window": tile, "out_no_data": NO_DATA_VALUE, "esun": esun}
        f32_args = {"dtype": numpy.float32, "transpose": True}

        # Read the data corresponding to the current tile for all dataset
        # Convert the datatype if required and transpose
        band_data = as_array(acquisition.radiance_data(**acq_args), **f32_args)

        shadow = as_array(shadow_dataset[idx], numpy.int8, transpose=True)
        solar_zenith = as_array(solar_zenith_dset[idx], **f32_args)
        solar_azimuth = as_array(solar_azimuth_dset[idx], **f32_args)
        satellite_view = as_array(satellite_v_dset[idx], **f32_args)
        relative_angle = as_array(relative_a_dset[idx], **f32_args)
        slope = as_array(slope_dataset[idx], **f32_args)
        aspect = as_array(aspect_dataset[idx], **f32_args)
        incident_angle = as_array(incident_angle_dataset[idx], **f32_args)
        exiting_angle = as_array(exiting_angle_dataset[idx], **f32_args)
        relative_slope = as_array(relative_s_dset[idx], **f32_args)
        a_mod = as_array(a_dataset[idx], **f32_args)
        b_mod = as_array(b_dataset[idx], **f32_args)
        s_mod = as_array(s_dataset[idx], **f32_args)
        fs = as_array(fs_dataset[idx], **f32_args)
        fv = as_array(fv_dataset[idx], **f32_args)
        ts = as_array(ts_dataset[idx], **f32_args)
        direct = as_array(dir_dataset[idx], **f32_args)
        diffuse = as_array(dif_dataset[idx], **f32_args)

        # Allocate the output arrays
        xsize, ysize = band_data.shape  # band_data has been transposed
        ref_lm = numpy.zeros((ysize, xsize), dtype="int16")
        ref_brdf = numpy.zeros((ysize, xsize), dtype="int16")
        ref_terrain = numpy.zeros((ysize, xsize), dtype="int16")

        # Allocate the work arrays (single row of data)
        ref_lm_work = numpy.zeros(xsize, dtype="float32")
        ref_brdf_work = numpy.zeros(xsize, dtype="float32")
        ref_terrain_work = numpy.zeros(xsize, dtype="float32")

        # Run terrain correction
        reflectance(
            xsize,
            ysize,
            rori,
            brdf_alpha1,
            brdf_alpha2,
            acquisition.reflectance_adjustment,
            kwargs["fillvalue"],
            band_data,
            shadow,
            solar_zenith,
            solar_azimuth,
            satellite_view,
            relative_angle,
            slope,
            aspect,
            incident_angle,
            exiting_angle,
            relative_slope,
            a_mod,
            b_mod,
            s_mod,
            fs,
            fv,
            ts,
            direct,
            diffuse,
            ref_lm_work,
            ref_brdf_work,
            ref_terrain_work,
            ref_lm.transpose(),
            ref_brdf.transpose(),
            ref_terrain.transpose(),
            normalized_solar_zenith,
        )

        # Write the current tile to disk
        lmbrt_dset[idx] = ref_lm
        nbar_dset[idx] = ref_brdf
        nbart_dset[idx] = ref_terrain

    # close any still opened files, arrays etc associated with the acquisition
    acquisition.close()

    if out_group is None:
        return fid
コード例 #21
0
def create_lat_grid(
    acquisition,
    out_fname=None,
    compression=H5CompressionFilter.LZF,
    filter_opts=None,
    depth=7,
):
    """Create latitude grid.

    :param acquisition:
        An instance of an `Acquisition` object.

    :param out_fname:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver.
        Otherwise it should be a string containing the full file path
        name to a writeable location on disk in which to save the HDF5
        file.

        The dataset path names will be as follows:

        * contants.DatasetName.LAT.value

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF

    :filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.
    """
    # Initialise the output files
    if out_fname is None:
        fid = h5py.File("latitude.h5", "w", driver="core", backing_store=False)
    else:
        fid = h5py.File(out_fname, "w")

    geobox = acquisition.gridded_geo_box()

    # define some base attributes for the image datasets
    attrs = {
        "crs_wkt": geobox.crs.ExportToWkt(),
        "geotransform": geobox.transform.to_gdal(),
        "description": LAT_DESC,
    }

    if filter_opts is None:
        filter_opts = {}

    filter_opts["chunks"] = acquisition.tile_size
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()

    lat_grid = create_grid(geobox, get_lat_coordinate, depth)

    grp = fid.create_group(GroupName.LON_LAT_GROUP.value)
    dset = grp.create_dataset(DatasetName.LAT.value, data=lat_grid, **kwargs)
    attach_image_attributes(dset, attrs)

    return fid
コード例 #22
0
def create_lon_lat_grids(
    acquisition,
    out_group=None,
    compression=H5CompressionFilter.LZF,
    filter_opts=None,
    depth=7,
):
    """
    Creates 2 by 2D NumPy arrays containing longitude and latitude
    co-ordinates for each array element.

    :param acquisition:
        An instance of an `Acquisition` object.

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.

        The dataset names will be given by:

        * contants.DatasetName.LON.value
        * contants.DatasetName.LAT.value

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF

    :filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.
    """
    geobox = acquisition.gridded_geo_box()
    # Define the lon and lat transform funtions
    lon_func = partial(get_lon_coordinate, geobox=geobox, centre=True)
    lat_func = partial(get_lat_coordinate, geobox=geobox, centre=True)

    # Get some basic info about the image
    shape = geobox.get_shape_yx()

    # Initialise the array to contain the result
    result = numpy.zeros(shape, dtype="float64")
    interpolate_grid(result, lon_func, depth=depth, origin=(0, 0), shape=shape)

    # Initialise the output files
    if out_group is None:
        fid = h5py.File("longitude-latitude.h5",
                        "w",
                        driver="core",
                        backing_store=False)
    else:
        fid = out_group

    if GroupName.LON_LAT_GROUP.value not in fid:
        fid.create_group(GroupName.LON_LAT_GROUP.value)

    grp = fid[GroupName.LON_LAT_GROUP.value]

    # define some base attributes for the image datasets
    attrs = {
        "crs_wkt": geobox.crs.ExportToWkt(),
        "geotransform": geobox.transform.to_gdal(),
        "description": LON_DESC,
    }

    if filter_opts is None:
        filter_opts = {}

    filter_opts["chunks"] = acquisition.tile_size
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
    lon_dset = grp.create_dataset(DatasetName.LON.value, data=result, **kwargs)
    attach_image_attributes(lon_dset, attrs)

    result = numpy.zeros(shape, dtype="float64")
    interpolate_grid(result, lat_func, depth=depth, origin=(0, 0), shape=shape)

    attrs["description"] = LAT_DESC
    lat_dset = grp.create_dataset(DatasetName.LAT.value, data=result, **kwargs)
    attach_image_attributes(lat_dset, attrs)

    return fid
コード例 #23
0
def exiting_angles(satellite_solar_group,
                   slope_aspect_group,
                   out_group=None,
                   compression=H5CompressionFilter.LZF,
                   filter_opts=None):
    """
    Calculates the exiting angle and the azimuthal exiting angle.

    :param satellite_solar_group:
        The root HDF5 `Group` that contains the satellite view and
        satellite azimuth datasets specified by the pathnames given by:

        * DatasetName.SATELLITE_VIEW
        * DatasetName.SATELLITE_AZIMUTH
        
    :param slope_aspect_group:
        The root HDF5 `Group` that contains the slope and aspect
        datasets specified by the pathnames given by:

        * DatasetName.SLOPE
        * DatasetName.ASPECT

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.

        The dataset names will be as follows:

        * DatasetName.EXITING
        * DatasetName.AZIMUTHAL_EXITING

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF 

    :filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.
    """
    # dataset arrays
    dname = DatasetName.SATELLITE_VIEW.value
    satellite_view_dataset = satellite_solar_group[dname]
    dname = DatasetName.SATELLITE_AZIMUTH.value
    satellite_azimuth_dataset = satellite_solar_group[dname]
    slope_dataset = slope_aspect_group[DatasetName.SLOPE.value]
    aspect_dataset = slope_aspect_group[DatasetName.ASPECT.value]

    geobox = GriddedGeoBox.from_dataset(satellite_view_dataset)
    shape = geobox.get_shape_yx()
    rows, cols = shape
    crs = geobox.crs.ExportToWkt()

    # Initialise the output files
    if out_group is None:
        fid = h5py.File('exiting-angles.h5',
                        driver='core',
                        backing_store=False)
    else:
        fid = out_group

    if GroupName.EXITING_GROUP.value not in fid:
        fid.create_group(GroupName.EXITING_GROUP.value)

    if filter_opts is None:
        filter_opts = {}

    grp = fid[GroupName.EXITING_GROUP.value]
    tile_size = satellite_view_dataset.chunks
    filter_opts['chunks'] = tile_size
    kwargs = compression.config(**filter_opts).dataset_compression_kwargs()
    no_data = -999
    kwargs['shape'] = shape
    kwargs['fillvalue'] = no_data
    kwargs['dtype'] = 'float32'

    # output datasets
    dataset_name = DatasetName.EXITING.value
    exiting_dset = grp.create_dataset(dataset_name, **kwargs)
    dataset_name = DatasetName.AZIMUTHAL_EXITING.value
    azi_exit_dset = grp.create_dataset(dataset_name, **kwargs)

    # attach some attributes to the image datasets
    attrs = {
        'crs_wkt': crs,
        'geotransform': geobox.transform.to_gdal(),
        'no_data_value': no_data
    }
    desc = "Contains the exiting angles in degrees."
    attrs['description'] = desc
    attrs['alias'] = 'exiting'
    attach_image_attributes(exiting_dset, attrs)

    desc = "Contains the azimuthal exiting angles in degrees."
    attrs['description'] = desc
    attrs['alias'] = 'azimuthal-exiting'
    attach_image_attributes(azi_exit_dset, attrs)

    # process by tile
    for tile in generate_tiles(cols, rows, tile_size[1], tile_size[0]):
        # Row and column start and end locations
        ystart = tile[0][0]
        xstart = tile[1][0]
        yend = tile[0][1]
        xend = tile[1][1]
        idx = (slice(ystart, yend), slice(xstart, xend))

        # Tile size
        ysize = yend - ystart
        xsize = xend - xstart

        # Read the data for the current tile
        # Convert to required datatype and transpose
        sat_view = as_array(satellite_view_dataset[idx],
                            dtype=numpy.float32,
                            transpose=True)
        sat_azi = as_array(satellite_azimuth_dataset[idx],
                           dtype=numpy.float32,
                           transpose=True)
        slope = as_array(slope_dataset[idx],
                         dtype=numpy.float32,
                         transpose=True)
        aspect = as_array(aspect_dataset[idx],
                          dtype=numpy.float32,
                          transpose=True)

        # Initialise the work arrays
        exiting = numpy.zeros((ysize, xsize), dtype='float32')
        azi_exiting = numpy.zeros((ysize, xsize), dtype='float32')

        # Process the current tile
        exiting_angle(xsize, ysize, sat_view, sat_azi, slope, aspect,
                      exiting.transpose(), azi_exiting.transpose())

        # Write the current to disk
        exiting_dset[idx] = exiting
        azi_exit_dset[idx] = azi_exiting

    if out_group is None:
        return fid