Beispiel #1
0
def test_gdal_cog_compareWeb(runner):
    """Test GDAL COG."""
    with runner.isolated_filesystem():
        profile = cog_profiles.get("jpeg")
        profile["blockxsize"] = 256
        profile["blockysize"] = 256

        # rio cogeo GDAL COG
        cog_translate(
            raster_path_rgba,
            "gdalcogeo.tif",
            profile.copy(),
            quiet=True,
            use_cog_driver=True,
            web_optimized=True,
        )

        # pure COG
        copy(
            raster_path_rgba,
            "cog.tif",
            driver="COG",
            blocksize=256,
            compress="JPEG",
            TILING_SCHEME="GoogleMapsCompatible",
        )

        with rasterio.open("gdalcogeo.tif") as gdalcogeo, rasterio.open(
            "cog.tif"
        ) as cog:
            assert cog.meta == gdalcogeo.meta
Beispiel #2
0
def convert_to_cog(source_file, destination_file, resampling=rasterio.enums.Resampling.gauss, blocksize=256,
                   overview_blocksize=256, creation_options=None):
    """Convert source file to a Cloud Optimized GeoTiff new file.

    :param source_file: path to the original raster
    :param destination_file: path to the new raster
    :param resampling: which Resampling to use on reading, default Resampling.gauss
    :param blocksize: the size of the blocks default 256
    :param overview_blocksize: the block size of the overviews, default 256
    :param creation_options: <dictioanry>, options that can override the source raster profile,
                          notice that you can't override tiled=True, and the blocksize
    """

    with rasterio.open(source_file) as src:
        # creation_options overrides proile
        source_profile = src.profile
    creation_options = _creation_options_for_cog(creation_options, source_profile, blocksize)

    with rasterio.Env(GDAL_TIFF_INTERNAL_MASK=True, GDAL_TIFF_OVR_BLOCKSIZE=overview_blocksize):
        with TemporaryDirectory() as temp_dir:
            temp_file = os.path.join(temp_dir, 'temp.tif')
            rasterio_sh.copy(source_file, temp_file, **creation_options)
            with rasterio.open(temp_file, 'r+') as dest:
                factors = _calc_overviews_factors(dest)
                dest.build_overviews(factors, resampling=resampling)
                dest.update_tags(ns='rio_overview', resampling=resampling.name)

                telluric_tags = _get_telluric_tags(source_file)
                if telluric_tags:
                    dest.update_tags(**telluric_tags)

            rasterio_sh.copy(temp_file, destination_file,
                             COPY_SRC_OVERVIEWS=True, **creation_options)
Beispiel #3
0
    def compress_image(self, image_path, save_directory, save=True):
        if not os.path.exists(save_directory):
            os.makedirs(save_directory)
        print("Compressing image ", self.IND)
        self.IND += 1
        meta = ""
        name = self.get_file_name(image_path)
        band = []
        if not os.path.exists(save_directory + "/" + name):
            dst_crs = rst.crs.CRS.from_epsg(
                4326)  # Coordinate system Hu Tzu Shan 1950
            dst_width = self.max_width
            dst_height = self.max_height

            xres = (self.maxRight - self.minLeft) / dst_width
            yres = (self.maxTop - self.minBottom) / dst_height
            dst_transform = affine.Affine(xres, 0.0, self.minLeft, 0.0, -yres,
                                          self.maxTop)
            vrt_options = {
                'resampling': rst.enums.Resampling.cubic,
                'crs': dst_crs,
                'transform': dst_transform,
                'height': dst_height,
                'width': dst_width
            }
            if (save):
                raster = rst.open(image_path)
                with WarpedVRT(raster, **vrt_options) as vrt:
                    rio_shutil.copy(vrt,
                                    save_directory + "/" + name,
                                    driver='GTiff')
                    raster.close()

        return name, band, meta
Beispiel #4
0
def write_cog(layout, location, data, filename_template):
    """
    Write a GPS tile out as a Cloud-Optimized GeoTIFF

    Uses GDAL to write a COG out to disk, utilizing the given layout and location
    to generate the GeoTIFF header.

    Args:
        layout (``gps.LayoutDefinition``): The layout for the source layer
        location (``gps.SpatialKey`` or ``gps.SpaceTimeKey``): The cell identifier
            in the layer corresponding to the image data
        data (``gps.Tile``): The image data
        filename_template (str): A pattern giving the destination for the target
            image.  Contains two '{}' tokens which will be ``.format``ed with the
            column and row of the ``location``, in that order.  May be an S3 uri or
            local path.
    """
    bands, w, h = data.cells.shape
    nodata = data.no_data_value
    dtype = data.cells.dtype
    cw, ch = cell_size(layout)
    ex = extent_for_cell(layout, location)
    overview_level = int(log(w) / log(2) - 8)

    with rstr.io.MemoryFile() as memfile:
        with memfile.open(driver='GTiff',
                          count=bands,
                          width=w,
                          height=h,
                          transform=Affine(cw, 0.0, ex.xmin, 0.0, -ch,
                                           ex.ymax),
                          crs=rstr.crs.CRS.from_epsg(4326),
                          nodata=nodata,
                          dtype=dtype,
                          compress='lzw',
                          tiled=True) as mem:
            windows = list(mem.block_windows(1))
            for _, w in windows:
                segment = data.cells[:, w.row_off:(w.row_off + w.height),
                                     w.col_off:(w.col_off + w.width)]
                mem.write(segment, window=w)
                mask_value = np.all(segment != nodata, axis=0).astype(
                    np.uint8) * 255
                mem.write_mask(mask_value, window=w)

            overviews = [2**j for j in range(1, overview_level + 1)]
            mem.build_overviews(overviews, rwarp.Resampling.nearest)
            mem.update_tags(ns='rio_oveview',
                            resampling=rwarp.Resampling.nearest.value)

            uri = urlparse.urlparse(filename_template)
            if uri.scheme == 's3':
                boto3.resource('s3').Bucket(uri.netloc).upload_fileobj(
                    memfile,
                    uri.path.format(location.col, location.row)[1:])
            else:
                rshutil.copy(mem,
                             filename_template.format(location.col,
                                                      location.row),
                             copy_src_overviews=True)
def to_vrt(data,
           filename,
           resampling=None,
           nodata=None,
           init_dest_nodata=True,
           warp_mem_limit=128):
    """
    Writes a file to a VRT file

    Args:
        data (DataArray): The ``xarray.DataArray`` to write.
        filename (str): The output file name to write to.
        resampling (Optional[object]): The resampling algorithm for ``rasterio.vrt.WarpedVRT``. Default is 'nearest'.
        nodata (Optional[float or int]): The 'no data' value for ``rasterio.vrt.WarpedVRT``.
        init_dest_nodata (Optional[bool]): Whether or not to initialize output to ``nodata`` for ``rasterio.vrt.WarpedVRT``.
        warp_mem_limit (Optional[int]): The GDAL memory limit for ``rasterio.vrt.WarpedVRT``.

    Example:
        >>> import geowombat as gw
        >>> from rasterio.enums import Resampling
        >>>
        >>> with gw.config.update(ref_crs=102033):
        >>>
        >>>     with gw.open('image.tif') as ds:
        >>>
        >>>         gw.to_vrt(ds,
        >>>                   'image.vrt',
        >>>                   resampling=Resampling.cubic,
        >>>                   warp_mem_limit=256)
    """

    if not resampling:
        resampling = Resampling.nearest

    # Open the input file on disk
    with rio.open(data.filename) as src:

        with WarpedVRT(
                src,
                src_crs=src.crs,  # the original CRS
                crs=data.crs,  # the transformed CRS
                src_transform=src.transform,  # the original transform
                transform=data.transform,  # the new transform
                dtype=data.gw.dtype,
                resampling=resampling,
                nodata=nodata,
                init_dest_nodata=init_dest_nodata,
                warp_mem_limit=warp_mem_limit) as vrt:

            rio_shutil.copy(vrt, filename, driver='VRT')
Beispiel #6
0
def tileserver_optimized_raster(src, dest):
    """ This method converts a raster to a tileserver optimized raster.
        The method will reproject the raster to align to the xyz system, in resolution and projection
        It will also create overviews
        And finally it will arragne the raster in a cog way.
        You could take the dest file upload it to a web server that supports ranges and user GeoRaster.get_tile
        on it,
        You are geranteed that you will get as minimal data as possible
    """
    src_raster = tl.GeoRaster2.open(src)
    bounding_box = src_raster.footprint().get_shape(
        tl.constants.WGS84_CRS).bounds
    tile = mercantile.bounding_tile(*bounding_box)
    dest_resolution = mercator_upper_zoom_level(src_raster)
    bounds = tl.GeoVector.from_xyz(tile.x, tile.y, tile.z).get_bounds(
        tl.constants.WEB_MERCATOR_CRS)
    create_options = {
        "tiled": "YES",
        "blocksize": 256,
        "compress": "DEFLATE",
        "photometric": "MINISBLACK"
    }
    with TemporaryDirectory() as temp_dir:
        temp_file = os.path.join(temp_dir, 'temp.tif')

        warp(src,
             temp_file,
             dst_crs=tl.constants.WEB_MERCATOR_CRS,
             resolution=dest_resolution,
             dst_bounds=bounds,
             create_options=create_options)

        with rasterio.Env(GDAL_TIFF_INTERNAL_MASK=True,
                          GDAL_TIFF_OVR_BLOCKSIZE=256):
            resampling = rasterio.enums.Resampling.gauss
            with rasterio.open(temp_file, 'r+') as tmp_raster:
                factors = _calc_overviews_factors(tmp_raster)
                tmp_raster.build_overviews(factors, resampling=resampling)
                tmp_raster.update_tags(ns='rio_overview',
                                       resampling=resampling.name)
                telluric_tags = _get_telluric_tags(src)
                if telluric_tags:
                    tmp_raster.update_tags(**telluric_tags)

            rasterio_sh.copy(temp_file,
                             dest,
                             COPY_SRC_OVERVIEWS=True,
                             tiled=True,
                             compress='DEFLATE',
                             photometric='MINISBLACK')
    def __init__(self):
        if not os.path.exists("Reduced"):
            os.makedirs("Reduced")
        reduction_size = float(sys.argv[2])
        dst_crs = rst.crs.CRS.from_epsg(4326)
        for image_path in self.image_locations(sys.argv[1]):
            name = self.get_file_name(image_path)
            print(name)
            if not os.path.exists("Reduced/" + name):
                with rst.open(image_path) as src:
                    band = src.read(1)
                    meta = src.profile

                    top = 0
                    left = 0
                    bottom = band.shape[0]
                    right = band.shape[1]

                    print("top: ", top)
                    print("bottom: ", bottom)
                    print("left: ", left)
                    print("right: ", right)

                    dst_width = int((right - left) * reduction_size)
                    dst_height = int((bottom - top) * reduction_size)

                    print("previous width: ", band.shape[1])
                    print("dst_width: ", dst_width)
                    print("previous height: ", band.shape[0])
                    print("dst_height: ", dst_height)

                    leftCoord, topCoord = src.xy(top, left)
                    rightCoord, bottomCoord = src.xy(bottom, right)

                    xres = (rightCoord - leftCoord) / dst_width
                    yres = (topCoord - bottomCoord) / dst_height
                    dst_transform = affine.Affine(xres, 0.0, leftCoord, 0.0,
                                                  -yres, topCoord)
                    vrt_options = {
                        'resampling': rst.enums.Resampling.cubic,
                        'crs': dst_crs,
                        'transform': dst_transform,
                        'height': dst_height,
                        'width': dst_width
                    }
                    with WarpedVRT(src, **vrt_options) as vrt:
                        rio_shutil.copy(vrt, "Reduced/" + name, driver='GTiff')
Beispiel #8
0
    def normalize_to_std_grid(self, inputs, resamplemethod='nearest'):
        """
        Uses rasterio virtual raster to standardize grids of different crs, resolution, boundaries based on  a shapefile geometry feature
        :param inputs: a list of (daily) raster input files for the water balance.
        :param outloc: output locations 'temp' for the virtual files
        :return: list of numpy arrays
        """
        outputs = []
        npy_outputs = []
        if resamplemethod == 'nearest':
            rs = Resampling.nearest
        else:
            print('only nearest neighbor resampling is supported at this time')
            sys.exit(0)

        for i, warpfile in enumerate(inputs):
            # print('warpfile', warpfile)
            with rasterio.open(warpfile) as src:
                # TODO - make the default configurable.
                #                 if src.crs == None:
                #                     src.crs = CRS.from_epsg(4326)
                # create the virtual raster based on the standard rasterio attributes from the sample tiff and shapefile feature.
                with WarpedVRT(src,
                               resampling=rs,
                               crs=self.crs,
                               transform=self.transform,
                               height=self.rows,
                               width=self.cols) as vrt:
                    data = vrt.read()
                    # print(type(vrt))
                    # save the file as an enumerated tiff. reopen outside this loop with the outputs list
                    outwarp = os.path.join(self.temp_folder,
                                           'temp_{}.tif'.format(i))
                    rio_shutil.copy(vrt, outwarp, driver='GTiff')
                    outputs.append(outwarp)

        # output each virtual file as a temporary .tif file in a temp folder somewhere in the outputs directory.
        # for each file in the temp directory read in the raster as a numpy array and return the list of numpy arrays
        # from this method for us in the rest of the code.
        for ow in outputs:
            with rasterio.open(ow, 'r') as src:
                arr = src.read(1)
                npy_outputs.append(arr)

        return npy_outputs
Beispiel #9
0
def test_crs_should_be_set(path_rgb_byte_tif, tmpdir, complex):

    """When ``dst_height``, ``dst_width``, and ``dst_transform`` are set
    :py:class:`rasterio.warp.WarpedVRT` calls ``GDALCreateWarpedVRT()``,
    which requires the caller to then set a projection with
    ``GDALSetProjection()``.

    Permalink to ``GDALCreateWarpedVRT()`` call:

        https://github.com/mapbox/rasterio/blob/1f759e5f67628f163ea2550d8926b91545245712/rasterio/_warp.pyx#L753

    """

    vrt_path = str(tmpdir.join('test_crs_should_be_set.vrt'))

    with rasterio.open(path_rgb_byte_tif) as src:

        dst_crs = 'EPSG:4326'
        dst_height = dst_width = 10
        dst_bounds = transform_bounds(src.crs, dst_crs, *src.bounds)

        # Destination transform
        left, bottom, right, top = dst_bounds
        xres = (right - left) / dst_width
        yres = (top - bottom) / dst_height
        dst_transform = affine.Affine(
            xres, 0.0, left, 0.0, -yres, top)

        # The 'complex' test case hits the affected code path
        vrt_options = {'dst_crs': dst_crs}
        if complex:
            vrt_options.update(
                dst_crs=dst_crs,
                dst_height=dst_height,
                dst_width=dst_width,
                dst_transform=dst_transform,
                resampling=Resampling.nearest)

        with WarpedVRT(src, **vrt_options) as vrt:
            rio_shutil.copy(vrt, vrt_path, driver='VRT')
        with rasterio.open(vrt_path) as src:
            assert src.crs
Beispiel #10
0
def test_crs_should_be_set(path_rgb_byte_tif, tmpdir, complex):

    """When ``dst_height``, ``dst_width``, and ``dst_transform`` are set
    :py:class:`rasterio.warp.WarpedVRT` calls ``GDALCreateWarpedVRT()``,
    which requires the caller to then set a projection with
    ``GDALSetProjection()``.

    Permalink to ``GDALCreateWarpedVRT()`` call:

        https://github.com/mapbox/rasterio/blob/1f759e5f67628f163ea2550d8926b91545245712/rasterio/_warp.pyx#L753

    """

    vrt_path = str(tmpdir.join("test_crs_should_be_set.vrt"))

    with rasterio.open(path_rgb_byte_tif) as src:

        dst_crs = "EPSG:4326"
        dst_height = dst_width = 10
        dst_bounds = transform_bounds(src.crs, dst_crs, *src.bounds)

        # Destination transform
        left, bottom, right, top = dst_bounds
        xres = (right - left) / dst_width
        yres = (top - bottom) / dst_height
        dst_transform = affine.Affine(xres, 0.0, left, 0.0, -yres, top)

        # The 'complex' test case hits the affected code path
        vrt_options = {"dst_crs": dst_crs}
        if complex:
            vrt_options.update(
                dst_crs=dst_crs,
                dst_height=dst_height,
                dst_width=dst_width,
                dst_transform=dst_transform,
                resampling=Resampling.nearest,
            )

        with WarpedVRT(src, **vrt_options) as vrt:
            rio_shutil.copy(vrt, vrt_path, driver="VRT")
        with rasterio.open(vrt_path) as src:
            assert src.crs
Beispiel #11
0
def test_validate_optimized(tmpdir):
    from terracotta import cog

    outfile = str(tmpdir / 'raster.tif')
    raster_data = 1000 * np.random.rand(512, 512).astype(np.uint16)

    profile = BASE_PROFILE.copy()
    profile.update(height=raster_data.shape[0],
                   width=raster_data.shape[1],
                   tiled=True,
                   blockxsize=256,
                   blockysize=256)

    with MemoryFile() as mf, mf.open(**profile) as dst:
        dst.write(raster_data, 1)

        overviews = [2**j for j in range(1, 4)]
        dst.build_overviews(overviews, Resampling.nearest)

        copy(dst, outfile, copy_src_overviews=True, **profile)

    assert cog.validate(outfile)
Beispiel #12
0
def test_gdal_cog_compare(runner):
    """Test GDAL COG."""
    with runner.isolated_filesystem():
        profile = cog_profiles.get("jpeg")
        profile["blockxsize"] = 256
        profile["blockysize"] = 256

        # rio cogeo GDAL COG
        cog_translate(
            raster_path_rgba,
            "gdalcogeo.tif",
            profile.copy(),
            quiet=True,
            use_cog_driver=True,
        )

        # pure COG
        copy(raster_path_rgba, "cog.tif", driver="COG", blocksize=256, compress="JPEG")

        # rio cogeo cog
        cog_translate(
            raster_path_rgba,
            "riocogeo.tif",
            profile.copy(),
            indexes=(
                1,
                2,
                3,
            ),
            add_mask=True,
            quiet=True,
        )

        with rasterio.open("riocogeo.tif") as riocogeo, rasterio.open(
            "gdalcogeo.tif"
        ) as gdalcogeo, rasterio.open("cog.tif") as cog:
            assert cog.profile == gdalcogeo.profile == riocogeo.profile
            assert cog.overviews(1) == gdalcogeo.overviews(1) == riocogeo.overviews(1)
Beispiel #13
0
def cog_translate(
    src_path,
    dst_path,
    dst_kwargs,
    indexes=None,
    overview_level=5,
    overview_resampling=None,
    config=None,
):
    """
    Create Cloud Optimized Geotiff.

    Parameters
    ----------
    src_path : str or PathLike object
        A dataset path or URL. Will be opened in "r" mode.
    dst_path : str or Path-like object
        An output dataset path or or PathLike object.
        Will be opened in "w" mode.
    dst_kwargs: dict
        output dataset creation options.
    indexes : tuple, int, optional
        Raster band indexes to copy.
    overview_level : int, optional (default: 6)
        COGEO overview (decimation) level
    overview_resampling : str, [average, nearest, mode]
    config : dict
        Rasterio Env options.

    """
    config = config or {}

    nodata_mask = None
    src = gdal.Open(src_path, gdal.GA_ReadOnly)
    band = src.GetRasterBand(1)
    nodata = band.GetNoDataValue()

    # Update nodata mask only if nodata is a negative integer value
    if band.DataType == gdal.GDT_Byte and nodata and nodata < 0:
        nodata_mask = 255

    with rasterio.Env(**config):
        with rasterio.open(src_path) as src:

            indexes = indexes if indexes else src.indexes
            meta = src.meta
            meta["count"] = len(indexes)
            meta.pop("alpha", None)

            meta.update(**dst_kwargs)
            meta.pop("compress", None)
            meta.pop("photometric", None)
            if nodata_mask is not None:
                meta['nodata'] = nodata
                meta['dtype'] = 'int16'
            meta['stats'] = True

            with MemoryFile() as memfile:
                with memfile.open(**meta) as mem:
                    wind = list(mem.block_windows(1))
                    for ij, w in wind:
                        matrix = src.read(window=w, indexes=indexes)
                        if nodata_mask is not None:
                            matrix = numpy.array(matrix, dtype='int16')
                            matrix[matrix == nodata_mask] = nodata

                        mem.write(matrix, window=w)

                    if overview_resampling is not None:
                        overviews = [
                            2**j for j in range(1, overview_level + 1)
                        ]

                        mem.build_overviews(overviews,
                                            Resampling[overview_resampling])
                        mem.update_tags(
                            OVR_RESAMPLING_ALG=Resampling[overview_resampling].
                            name.upper())

                    try:
                        copy(mem, dst_path, **dst_kwargs)
                        LOG.info(
                            f"Created a cloud optimized GeoTIFF file, {dst_path}"
                        )
                    except Exception:
                        LOG.exception(
                            f"Error while creating a cloud optimized GeoTIFF file, {dst_path}"
                        )
                        raise
Beispiel #14
0
def cog_translate(  # noqa: C901
    source: Union[str, pathlib.PurePath, DatasetReader, DatasetWriter,
                  WarpedVRT],
    dst_path: Union[str, pathlib.PurePath],
    dst_kwargs: Dict,
    indexes: Optional[Sequence[int]] = None,
    nodata: Optional[Union[str, int, float]] = None,
    dtype: Optional[str] = None,
    add_mask: bool = False,
    overview_level: Optional[int] = None,
    overview_resampling: str = "nearest",
    web_optimized: bool = False,
    tms: morecantile.TileMatrixSet = morecantile.tms.get("WebMercatorQuad"),
    zoom_level_strategy: str = "auto",
    aligned_levels: Optional[int] = None,
    resampling: str = "nearest",
    in_memory: Optional[bool] = None,
    config: Optional[Dict] = None,
    allow_intermediate_compression: bool = False,
    forward_band_tags: bool = False,
    quiet: bool = False,
    temporary_compression: str = "DEFLATE",
):
    """
    Create Cloud Optimized Geotiff.

    Parameters
    ----------
    source : str, PathLike object or rasterio.io.DatasetReader
        A dataset path, URL or rasterio.io.DatasetReader object.
        Will be opened in "r" mode.
    dst_path : str or PathLike object
        An output dataset path or or PathLike object.
        Will be opened in "w" mode.
    dst_kwargs: dict
        Output dataset creation options.
    indexes : tuple or int, optional
        Raster band indexes to copy.
    nodata, int, optional
        Overwrite nodata masking values for input dataset.
    dtype: str, optional
        Overwrite output data type. Default will be the input data type.
    add_mask, bool, optional
        Force output dataset creation with a mask.
    overview_level : int, optional (default: None)
        COGEO overview (decimation) level. By default, inferred from data size.
    overview_resampling : str, optional (default: "nearest")
        Resampling algorithm for overviews
    web_optimized: bool, optional (default: False)
        Create web-optimized cogeo.
    tms: morecantile.TileMatrixSet, optional (default: "WebMercatorQuad")
        TileMatrixSet to use for reprojection, resolution and alignment.
    zoom_level_strategy: str, optional (default: auto)
        Strategy to determine zoom level (same as in GDAL 3.2).
        LOWER will select the zoom level immediately below the theoretical computed non-integral zoom level, leading to subsampling.
        On the contrary, UPPER will select the immediately above zoom level, leading to oversampling.
        Defaults to AUTO which selects the closest zoom level.
        ref: https://gdal.org/drivers/raster/cog.html#raster-cog
    aligned_levels: int, optional.
        Number of overview levels for which GeoTIFF tile and tiles defined in the tiling scheme match.
        Default is to use the maximum overview levels.
    resampling : str, optional (default: "nearest")
        Resampling algorithm.
    in_memory: bool, optional
        Force processing raster in memory (default: process in memory if small)
    config : dict
        Rasterio Env options.
    allow_intermediate_compression: bool, optional (default: False)
        Allow intermediate file compression to reduce memory/disk footprint.
        Note: This could reduce the speed of the process.
        Ref: https://github.com/cogeotiff/rio-cogeo/issues/103
    forward_band_tags:  bool, optional
        Forward band tags to output bands.
        Ref: https://github.com/cogeotiff/rio-cogeo/issues/19
    quiet: bool, optional (default: False)
        Mask processing steps.
    temporary_compression: str, optional
        Compression used for the intermediate file, default is deflate.

    """
    if isinstance(indexes, int):
        indexes = (indexes, )

    config = config or {}
    with rasterio.Env(**config):
        with ExitStack() as ctx:
            if isinstance(source, (DatasetReader, DatasetWriter, WarpedVRT)):
                src_dst = source
            else:
                src_dst = ctx.enter_context(rasterio.open(source))

            meta = src_dst.meta
            indexes = indexes if indexes else src_dst.indexes
            nodata = nodata if nodata is not None else src_dst.nodata
            dtype = dtype if dtype else src_dst.dtypes[0]
            alpha = utils.has_alpha_band(src_dst)
            mask = utils.has_mask_band(src_dst)

            if not add_mask and (
                (nodata is not None or alpha)
                    and dst_kwargs.get("compress") in ["JPEG", "jpeg"]):
                warnings.warn(
                    "Using lossy compression with Nodata or Alpha band "
                    "can results in unwanted artefacts.",
                    LossyCompression,
                )

            tilesize = min(int(dst_kwargs["blockxsize"]),
                           int(dst_kwargs["blockysize"]))

            if src_dst.width < tilesize or src_dst.height < tilesize:
                tilesize = 2**int(
                    math.log(min(src_dst.width, src_dst.height), 2))
                if tilesize < 64:
                    warnings.warn(
                        "Raster has dimension < 64px. Output COG cannot be tiled"
                        " and overviews cannot be added.",
                        IncompatibleBlockRasterSize,
                    )
                    dst_kwargs.pop("blockxsize", None)
                    dst_kwargs.pop("blockysize", None)
                    dst_kwargs.pop("tiled")
                    overview_level = 0

                else:
                    warnings.warn(
                        "Block Size are bigger than raster sizes. "
                        "Setting blocksize to {}".format(tilesize),
                        IncompatibleBlockRasterSize,
                    )
                    dst_kwargs["blockxsize"] = tilesize
                    dst_kwargs["blockysize"] = tilesize

            vrt_params = {
                "add_alpha": True,
                "dtype": dtype,
                "width": src_dst.width,
                "height": src_dst.height,
            }

            if nodata is not None:
                vrt_params.update(
                    dict(nodata=nodata, add_alpha=False, src_nodata=nodata))

            if alpha:
                vrt_params.update(dict(add_alpha=False))

            if web_optimized:
                params = utils.get_web_optimized_params(
                    src_dst,
                    tilesize=tilesize,
                    warp_resampling=resampling,
                    zoom_level_strategy=zoom_level_strategy,
                    aligned_levels=aligned_levels,
                    tms=tms,
                )
                vrt_params.update(**params)

            with WarpedVRT(src_dst, **vrt_params) as vrt_dst:
                meta = vrt_dst.meta
                meta["count"] = len(indexes)

                if add_mask:
                    meta.pop("nodata", None)
                    meta.pop("alpha", None)

                if (dst_kwargs.get("photometric", "").upper() == "YCBCR"
                        and meta["count"] == 1):
                    warnings.warn(
                        "PHOTOMETRIC=YCBCR not supported on a 1-band raster"
                        " and has been set to 'MINISBLACK'")
                    dst_kwargs["photometric"] = "MINISBLACK"

                meta.update(**dst_kwargs)
                meta.pop("compress", None)
                meta.pop("photometric", None)

                if allow_intermediate_compression:
                    meta["compress"] = temporary_compression

                if in_memory is None:
                    in_memory = vrt_dst.width * vrt_dst.height < IN_MEMORY_THRESHOLD

                if in_memory:
                    tmpfile = ctx.enter_context(MemoryFile())
                    tmp_dst = ctx.enter_context(tmpfile.open(**meta))
                else:
                    tmpfile = ctx.enter_context(TemporaryRasterFile(dst_path))
                    tmp_dst = ctx.enter_context(
                        rasterio.open(tmpfile.name, "w", **meta))

                # Transfer color interpolation
                if len(indexes) == 1 and (vrt_dst.colorinterp[indexes[0] - 1]
                                          is not ColorInterp.palette):
                    tmp_dst.colorinterp = [ColorInterp.gray]
                else:
                    tmp_dst.colorinterp = [
                        vrt_dst.colorinterp[b - 1] for b in indexes
                    ]

                if tmp_dst.colorinterp[0] is ColorInterp.palette:
                    try:
                        tmp_dst.write_colormap(1, vrt_dst.colormap(1))
                    except ValueError:
                        warnings.warn(
                            "Dataset has `Palette` color interpretation"
                            " but is missing colormap information")

                wind = list(tmp_dst.block_windows(1))

                if not quiet:
                    click.echo("Reading input: {}".format(source), err=True)
                fout = os.devnull if quiet else sys.stderr
                with click.progressbar(
                        wind, file=fout,
                        show_percent=True) as windows:  # type: ignore
                    for _, w in windows:
                        matrix = vrt_dst.read(window=w, indexes=indexes)
                        tmp_dst.write(matrix, window=w)

                        if add_mask or mask:
                            # Cast mask to uint8 to fix rasterio 1.1.2 error (ref #115)
                            mask_value = vrt_dst.dataset_mask(
                                window=w).astype("uint8")
                            tmp_dst.write_mask(mask_value, window=w)

                if overview_level is None:
                    overview_level = get_maximum_overview_level(
                        vrt_dst.width, vrt_dst.height, minsize=tilesize)

                if not quiet and overview_level:
                    click.echo("Adding overviews...", err=True)

                overviews = [2**j for j in range(1, overview_level + 1)]
                tmp_dst.build_overviews(overviews,
                                        ResamplingEnums[overview_resampling])

                if not quiet:
                    click.echo("Updating dataset tags...", err=True)

                for i, b in enumerate(indexes):
                    tmp_dst.set_band_description(i + 1,
                                                 src_dst.descriptions[b - 1])
                    if forward_band_tags:
                        tmp_dst.update_tags(i + 1, **src_dst.tags(b))

                tags = src_dst.tags()
                tags.update(
                    dict(
                        OVR_RESAMPLING_ALG=ResamplingEnums[overview_resampling]
                        .name.upper()))
                tmp_dst.update_tags(**tags)
                tmp_dst._set_all_scales(
                    [vrt_dst.scales[b - 1] for b in indexes])
                tmp_dst._set_all_offsets(
                    [vrt_dst.offsets[b - 1] for b in indexes])

                if not quiet:
                    click.echo("Writing output to: {}".format(dst_path),
                               err=True)
                copy(tmp_dst, dst_path, copy_src_overviews=True, **dst_kwargs)
def optimize_rasters(raster_files: Sequence[Sequence[Path]],
                     output_folder: Path,
                     overwrite: bool = False,
                     resampling_method: str = 'average',
                     reproject: bool = False,
                     in_memory: bool = None,
                     compression: str = 'auto',
                     quiet: bool = False) -> None:
    """Optimize a collection of raster files for use with Terracotta.

    First argument is a list of input files or glob patterns.

    Example:

        $ terracotta optimize-rasters rasters/*.tif -o cloud-optimized/

    Note that all rasters may only contain a single band.
    """
    raster_files_flat = sorted(set(itertools.chain.from_iterable(raster_files)))

    if not raster_files_flat:
        click.echo('No files given')
        return

    rs_method = RESAMPLING_METHODS[resampling_method]

    if compression == 'auto':
        compression = _prefered_compression_method()

    total_pixels = 0
    for f in raster_files_flat:
        if not f.is_file():
            raise click.BadParameter(f'Input raster {f!s} is not a file')

        with rasterio.open(str(f), 'r') as src:
            if src.count > 1 and not quiet:
                click.echo(
                    f'Warning: raster file {f!s} has more than one band. '
                    'Only the first one will be used.', err=True
                )
            total_pixels += src.height * src.width

    output_folder.mkdir(exist_ok=True)

    if not quiet:
        # insert newline for nicer progress bar style
        click.echo('')

    sub_pbar_args = dict(
        disable=quiet,
        leave=False,
        bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt}'
    )

    with contextlib.ExitStack() as outer_env:
        pbar = outer_env.enter_context(tqdm.tqdm(
            total=total_pixels, smoothing=0, disable=quiet,
            bar_format='{l_bar}{bar}| [{elapsed}<{remaining}{postfix}]',
            desc='Optimizing rasters'
        ))
        outer_env.enter_context(rasterio.Env(**GDAL_CONFIG))

        for input_file in raster_files_flat:
            if len(input_file.name) > 30:
                short_name = input_file.name[:13] + '...' + input_file.name[-13:]
            else:
                short_name = input_file.name

            pbar.set_postfix(file=short_name)

            output_file = output_folder / input_file.with_suffix('.tif').name

            if not overwrite and output_file.is_file():
                raise click.BadParameter(
                    f'Output file {output_file!s} exists (use --overwrite to ignore)'
                )

            with contextlib.ExitStack() as es, warnings.catch_warnings():
                warnings.filterwarnings('ignore', message='invalid value encountered.*')

                src = es.enter_context(rasterio.open(str(input_file)))

                if reproject:
                    vrt = es.enter_context(_get_vrt(src, rs_method=rs_method))
                else:
                    vrt = src

                profile = vrt.profile.copy()
                profile.update(COG_PROFILE)

                if in_memory is None:
                    in_memory = vrt.width * vrt.height < IN_MEMORY_THRESHOLD

                if in_memory:
                    memfile = es.enter_context(MemoryFile())
                    dst = es.enter_context(memfile.open(**profile))
                else:
                    tempraster = es.enter_context(TemporaryRasterFile(basedir=output_folder))
                    dst = es.enter_context(rasterio.open(tempraster, 'w', **profile))

                # iterate over blocks
                windows = list(dst.block_windows(1))

                for _, w in tqdm.tqdm(windows, desc='Reading', **sub_pbar_args):
                    block_data = vrt.read(window=w, indexes=[1])
                    dst.write(block_data, window=w)
                    block_mask = vrt.dataset_mask(window=w).astype('uint8')
                    dst.write_mask(block_mask, window=w)

                # add overviews
                if not in_memory:
                    # work around bug mapbox/rasterio#1497
                    dst.close()
                    dst = es.enter_context(rasterio.open(tempraster, 'r+'))

                max_overview_level = math.ceil(math.log2(max(
                    dst.height // profile['blockysize'],
                    dst.width // profile['blockxsize']
                )))

                overviews = [2 ** j for j in range(1, max_overview_level + 1)]
                with tqdm.tqdm(desc='Creating overviews', total=1, **sub_pbar_args):
                    dst.build_overviews(overviews, rs_method)

                dst.update_tags(ns='rio_overview', resampling=rs_method.value)

                # copy to destination (this is necessary to push overviews to start of file)
                with tqdm.tqdm(desc='Compressing', total=1, **sub_pbar_args):
                    copy(
                        dst, str(output_file), copy_src_overviews=True,
                        compress=compression, **COG_PROFILE
                    )

            pbar.update(dst.height * dst.width)
Beispiel #16
0
def cog_translate(
    src_path,
    dst_path,
    dst_kwargs,
    indexes=None,
    nodata=None,
    alpha=None,
    overview_level=6,
    overview_resampling="nearest",
    config=None,
):
    """
    Create Cloud Optimized Geotiff.

    Parameters
    ----------
    src_path : str or PathLike object
        A dataset path or URL. Will be opened in "r" mode.
    dst_path : str or Path-like object
        An output dataset path or or PathLike object.
        Will be opened in "w" mode.
    dst_kwargs: dict
        output dataset creation options.
    indexes : tuple, int, optional
        Raster band indexes to copy.
    nodata, int, optional
        nodata value for mask creation.
    alpha, int, optional
        alpha band index for mask creation.
    overview_level : int, optional (default: 6)
        COGEO overview (decimation) level
    config : dict
        Rasterio Env options.

    """
    config = config or {}

    with rasterio.Env(**config):
        with rasterio.open(src_path) as src:

            indexes = indexes if indexes else src.indexes
            meta = src.meta
            meta["count"] = len(indexes)
            meta.pop("nodata", None)
            meta.pop("alpha", None)

            meta.update(**dst_kwargs)
            meta.pop("compress", None)
            meta.pop("photometric", None)

            with MemoryFile() as memfile:
                with memfile.open(**meta) as mem:
                    wind = list(mem.block_windows(1))
                    with click.progressbar(wind,
                                           length=len(wind),
                                           file=sys.stderr,
                                           show_percent=True) as windows:
                        for ij, w in windows:
                            matrix = src.read(window=w, indexes=indexes)
                            mem.write(matrix, window=w)

                            if nodata is not None:
                                mask_value = (
                                    numpy.all(matrix != nodata, axis=0).astype(
                                        numpy.uint8) * 255)
                            elif alpha is not None:
                                mask_value = src.read(alpha, window=w)
                            else:
                                mask_value = src.dataset_mask(window=w)
                            mem.write_mask(mask_value, window=w)

                    overviews = [2**j for j in range(1, overview_level + 1)]

                    mem.build_overviews(overviews,
                                        Resampling[overview_resampling])
                    mem.update_tags(
                        ns="rio_overview",
                        resampling=Resampling[overview_resampling].value,
                    )

                    copy(mem, dst_path, copy_src_overviews=True, **dst_kwargs)
Beispiel #17
0
def cog_translate(
    src_path,
    dst_path,
    dst_kwargs,
    indexes=None,
    nodata=None,
    add_mask=None,
    overview_level=None,
    overview_resampling="nearest",
    web_optimized=False,
    latitude_adjustment=True,
    resampling="nearest",
    in_memory=None,
    config=None,
    quiet=False,
):
    """
    Create Cloud Optimized Geotiff.

    Parameters
    ----------
    src_path : str or PathLike object
        A dataset path or URL. Will be opened in "r" mode.
    dst_path : str or Path-like object
        An output dataset path or or PathLike object.
        Will be opened in "w" mode.
    dst_kwargs: dict
        Output dataset creation options.
    indexes : tuple, int, optional
        Raster band indexes to copy.
    nodata, int, optional
        Overwrite nodata masking values for input dataset.
    add_mask, bool, optional
        Force output dataset creation with a mask.
    overview_level : int, optional (default: 6)
        COGEO overview (decimation) level
    overview_resampling : str, optional (default: "nearest")
        Resampling algorithm for overviews
    web_optimized: bool, option (default: False)
        Create web-optimized cogeo.
    latitude_adjustment: bool, option (default: True)
        Use mercator meters for zoom calculation or ensure max zoom equality.
    resampling : str, optional (default: "nearest")
        Resampling algorithm.
    in_memory: bool, optional
        Force processing raster in memory (default: process in memory if small)
    config : dict
        Rasterio Env options.
    quiet: bool, optional (default: False)
        Mask processing steps.

    """
    config = config or {}

    with rasterio.Env(**config):
        with rasterio.open(src_path) as src_dst:
            meta = src_dst.meta
            indexes = indexes if indexes else src_dst.indexes
            nodata = nodata if nodata is not None else src_dst.nodata
            alpha = has_alpha_band(src_dst)
            mask = has_mask_band(src_dst)

            if not add_mask and (
                (nodata is not None or alpha)
                    and dst_kwargs.get("compress") in ["JPEG", "jpeg"]):
                warnings.warn(
                    "Using lossy compression with Nodata or Alpha band "
                    "can results in unwanted artefacts.",
                    LossyCompression,
                )

            tilesize = min(int(dst_kwargs["blockxsize"]),
                           int(dst_kwargs["blockysize"]))

            if src_dst.width < tilesize or src_dst.height < tilesize:
                tilesize = 2**int(
                    math.log(min(src_dst.width, src_dst.height), 2))
                if tilesize < 64:
                    warnings.warn(
                        "Raster has dimension < 64px. Output COG cannot be tiled"
                        " and overviews cannot be added.",
                        IncompatibleBlockRasterSize,
                    )
                    dst_kwargs.pop("blockxsize", None)
                    dst_kwargs.pop("blockysize", None)
                    dst_kwargs.pop("tiled")
                    overview_level = 0

                else:
                    warnings.warn(
                        "Block Size are bigger than raster sizes. "
                        "Setting blocksize to {}".format(tilesize),
                        IncompatibleBlockRasterSize,
                    )
                    dst_kwargs["blockxsize"] = tilesize
                    dst_kwargs["blockysize"] = tilesize

            vrt_params = dict(add_alpha=True)

            if nodata is not None:
                vrt_params.update(
                    dict(nodata=nodata, add_alpha=False, src_nodata=nodata))

            if alpha:
                vrt_params.update(dict(add_alpha=False))

            if web_optimized:
                bounds = list(
                    transform_bounds(*[src_dst.crs, "epsg:4326"] +
                                     list(src_dst.bounds),
                                     densify_pts=21))
                center = [(bounds[0] + bounds[2]) / 2,
                          (bounds[1] + bounds[3]) / 2]

                lat = 0 if latitude_adjustment else center[1]
                max_zoom = get_max_zoom(src_dst, lat=lat, tilesize=tilesize)

                extrema = tile_extrema(bounds, max_zoom)
                w, n = mercantile.xy(*mercantile.ul(
                    extrema["x"]["min"], extrema["y"]["min"], max_zoom))
                vrt_res = _meters_per_pixel(max_zoom, 0, tilesize=tilesize)
                vrt_transform = Affine(vrt_res, 0, w, 0, -vrt_res, n)

                vrt_width = (extrema["x"]["max"] -
                             extrema["x"]["min"]) * tilesize
                vrt_height = (extrema["y"]["max"] -
                              extrema["y"]["min"]) * tilesize

                vrt_params.update(
                    dict(
                        crs="epsg:3857",
                        transform=vrt_transform,
                        width=vrt_width,
                        height=vrt_height,
                        resampling=ResamplingEnums[resampling],
                    ))

            with WarpedVRT(src_dst, **vrt_params) as vrt_dst:
                meta = vrt_dst.meta
                meta["count"] = len(indexes)

                if add_mask:
                    meta.pop("nodata", None)
                    meta.pop("alpha", None)

                meta.update(**dst_kwargs)
                meta.pop("compress", None)
                meta.pop("photometric", None)

                if in_memory is None:
                    in_memory = vrt_dst.width * vrt_dst.height < IN_MEMORY_THRESHOLD

                with ExitStack() as ctx:
                    if in_memory:
                        tmpfile = ctx.enter_context(MemoryFile())
                        tmp_dst = ctx.enter_context(tmpfile.open(**meta))
                    else:
                        tmpfile = ctx.enter_context(
                            TemporaryRasterFile(dst_path))
                        tmp_dst = ctx.enter_context(
                            rasterio.open(tmpfile.name, "w", **meta))

                    wind = list(tmp_dst.block_windows(1))

                    if not quiet:
                        click.echo("Reading input: {}".format(src_path),
                                   err=True)
                    fout = os.devnull if quiet else sys.stderr
                    with click.progressbar(wind,
                                           length=len(wind),
                                           file=fout,
                                           show_percent=True) as windows:
                        for ij, w in windows:
                            matrix = vrt_dst.read(window=w, indexes=indexes)
                            tmp_dst.write(matrix, window=w)

                            if add_mask or mask:
                                mask_value = vrt_dst.dataset_mask(window=w)
                                tmp_dst.write_mask(mask_value, window=w)

                    if overview_level is None:
                        overview_level = get_maximum_overview_level(
                            vrt_dst, tilesize)

                    if not quiet and overview_level:
                        click.echo("Adding overviews...", err=True)

                    overviews = [2**j for j in range(1, overview_level + 1)]
                    tmp_dst.build_overviews(
                        overviews, ResamplingEnums[overview_resampling])

                    if not quiet:
                        click.echo("Updating dataset tags...", err=True)

                    for i, b in enumerate(indexes):
                        tmp_dst.set_band_description(
                            i + 1, src_dst.descriptions[b - 1])

                    tags = src_dst.tags()
                    tags.update(
                        dict(OVR_RESAMPLING_ALG=ResamplingEnums[
                            overview_resampling].name.upper()))
                    tmp_dst.update_tags(**tags)

                    if not quiet:
                        click.echo("Writing output to: {}".format(dst_path),
                                   err=True)
                    copy(tmp_dst,
                         dst_path,
                         copy_src_overviews=True,
                         **dst_kwargs)
Beispiel #18
0
    def region_of_interest(self, image_path, band_num=4):
        dst_crs = rst.crs.CRS.from_epsg(4326)
        if not os.path.exists("IntRegImgs"):
            os.makedirs("IntRegImgs")
        name = self.get_file_name(image_path)
        if not os.path.exists("IntRegImgs/" + name):
            try:
                with rst.open(image_path) as src:
                    # Write bands
                    band = src.read(band_num)
                    meta = src.profile
                    meta['count'] = 1
                    meta['dtype'] = 'uint8'
                    max_val = np.max(band)
                    band = band / max_val
                    band = band * 255
                    band = band.astype(np.uint8)
                    if not os.path.exists("Band4"):
                        os.makedirs("Band4")
                    raster = []
                    if not os.path.exists("Band4/" + name):
                        raster = rst.open("Band4" + '/' + name, 'w', **meta)
                        raster.write(band, 1)
                        no_black_pix = np.where(band != 0)
                        top = np.amin(no_black_pix[0])
                        left = np.amin(no_black_pix[1])
                        bottom = np.amax(no_black_pix[0])
                        right = np.amax(no_black_pix[1])

                        dst_width = right - left
                        dst_height = bottom - top

                        leftCoord, topCoord = src.xy(top, left)
                        rightCoord, bottomCoord = src.xy(bottom, right)

                        if self.index == 0:
                            self.minBottom = bottomCoord
                            self.minLeft = leftCoord
                            self.maxTop = topCoord
                            self.maxRight = rightCoord
                            self.max_width = dst_width
                            self.max_height = dst_height
                            self.index += 1

                        else:
                            if bottomCoord < self.minBottom:
                                self.minBottom = bottomCoord
                            if leftCoord < self.minLeft:
                                self.minLeft = leftCoord
                            if rightCoord > self.maxRight:
                                self.maxRight = rightCoord
                            if topCoord > self.maxTop:
                                self.maxTop = topCoord
                            if dst_width > self.max_width:
                                self.max_width = dst_width
                            if dst_height > self.max_height:
                                self.max_height = dst_height

                        xres = meta['transform'][0]
                        yres = -meta['transform'][4]
                        dst_transform = affine.Affine(xres, 0.0, leftCoord,
                                                      0.0, -yres, topCoord)
                        vrt_options = {
                            'resampling': rst.enums.Resampling.cubic,
                            'crs': dst_crs,
                            'transform': dst_transform,
                            'height': dst_height,
                            'width': dst_width
                        }
                        with WarpedVRT(raster, **vrt_options) as vrt:
                            rio_shutil.copy(vrt,
                                            "IntRegImgs/" + name,
                                            driver='GTiff')
                            raster.close()
                    else:
                        raster = rst.open("Band4/" + name)
                        test_band = raster.read(1)
                        no_black_pix = np.where(band != 0)
                        top = np.amin(no_black_pix[0])
                        left = np.amin(no_black_pix[1])
                        bottom = np.amax(no_black_pix[0])
                        right = np.amax(no_black_pix[1])

                        dst_width = right - left
                        dst_height = bottom - top

                        leftCoord, topCoord = src.xy(top, left, offset='ul')
                        rightCoord, bottomCoord = src.xy(bottom,
                                                         right,
                                                         offset='lr')

                        if self.index == 0:
                            self.minBottom = bottomCoord
                            self.minLeft = leftCoord
                            self.maxTop = topCoord
                            self.maxRight = rightCoord
                            self.max_width = dst_width
                            self.max_height = dst_height
                            self.index += 1

                        else:
                            if bottomCoord < self.minBottom:
                                self.minBottom = bottomCoord
                            if leftCoord < self.minLeft:
                                self.minLeft = leftCoord
                            if rightCoord > self.maxRight:
                                self.maxRight = rightCoord
                            if topCoord > self.maxTop:
                                self.maxTop = topCoord
                            if dst_width > self.max_width:
                                self.max_width = dst_width
                            if dst_height > self.max_height:
                                self.max_height = dst_height

                        xres = meta['transform'][0]
                        yres = -meta['transform'][4]
                        dst_transform = affine.Affine(xres, 0.0, leftCoord,
                                                      0.0, -yres, topCoord)
                        vrt_options = {
                            'resampling': rst.enums.Resampling.cubic,
                            'crs': dst_crs,
                            'transform': dst_transform,
                            'height': dst_height,
                            'width': dst_width
                        }
                        with WarpedVRT(raster, **vrt_options) as vrt:
                            rio_shutil.copy(vrt,
                                            "IntRegImgs/" + name,
                                            driver='GTiff')
                    raster.close()
            except:
                print("Could not open the file: ", name)
        else:
            raster = rst.open("IntRegImgs/" + name)
            test_band = raster.read(1)
            leftCoord, topCoord = raster.xy(0, 0)
            rightCoord, bottomCoord = raster.xy(test_band.shape[0] - 1,
                                                test_band.shape[1] - 1)

            if self.index == 0:
                self.minBottom = bottomCoord
                self.minLeft = leftCoord
                self.maxTop = topCoord
                self.maxRight = rightCoord
                self.max_width = dst_width
                self.max_height = dst_height
                self.index += 1

            else:
                if bottomCoord < self.minBottom:
                    self.minBottom = bottomCoord
                if leftCoord < self.minLeft:
                    self.minLeft = leftCoord
                if rightCoord > self.maxRight:
                    self.maxRight = rightCoord
                if topCoord > self.maxTop:
                    self.maxTop = topCoord
                if dst_width > self.max_width:
                    self.max_width = dst_width
                if dst_height > self.max_height:
                    self.max_height = dst_height
def create_overview_cogs(
    mosaic_path: str,
    output_profile: Dict,
    prefix: str = "mosaic_ovr",
    max_overview_level: int = 6,
    method: str = "first",
    config: Dict = None,
    threads=1,
    in_memory: bool = True,
) -> None:
    """
    Create Low resolution mosaic image from a mosaicJSON.

    The output will be a web optimized COG with bounds matching the mosaicJSON bounds and
    with its resolution matching the mosaic MinZoom - 1.

    Attributes
    ----------
    mosaic_path : str, required
        Mosaic definition path.
    output_profile : dict, required
    prefix : str
    max_overview_level : int
    method: str, optional
        pixel_selection method name (default is 'first').
    config : dict
        Rasterio Env options.
    threads: int, optional
        maximum number of threads to use (default is 1).
    in_memory: bool, optional
        Force COG creation in memory (default is True).

    """
    pixel_method = PIXSEL_METHODS[method]

    with MosaicBackend(mosaic_path) as mosaic:
        base_zoom = mosaic.metadata["minzoom"] - 1
        mosaic_quadkey_zoom = mosaic.quadkey_zoom
        bounds = mosaic.metadata["bounds"]
        mosaic_quadkeys = set(mosaic._quadkeys)

        # Select a random quakey/asset and get dataset info
        tile = mercantile.quadkey_to_tile(random.sample(mosaic_quadkeys, 1)[0])
        assets = mosaic.assets_for_tile(*tile)
        info = _get_info(assets[0])

        extrema = tile_extrema(bounds, base_zoom)
        tilesize = 256
        resolution = _meters_per_pixel(base_zoom, 0, tilesize=tilesize)

        # Create multiples files if coverage is too big
        extremas = _split_extrema(extrema, max_ovr=max_overview_level)
        for ix, extrema in enumerate(extremas):
            click.echo(f"Part {1 + ix}/{len(extremas)}", err=True)
            output_path = f"{prefix}_{ix}.tif"

            blocks = list(_get_blocks(extrema, tilesize))
            random.shuffle(blocks)

            width = (extrema["x"]["max"] - extrema["x"]["min"]) * tilesize
            height = (extrema["y"]["max"] - extrema["y"]["min"]) * tilesize
            w, n = mercantile.xy(*mercantile.ul(
                extrema["x"]["min"], extrema["y"]["min"], base_zoom))

            params = dict(
                driver="GTiff",
                dtype=info["dtype"],
                count=len(info["band_descriptions"]),
                width=width,
                height=height,
                crs="epsg:3857",
                transform=Affine(resolution, 0, w, 0, -resolution, n),
                nodata=info["nodata_value"],
            )
            params.update(**output_profile)

            config = config or {}
            with rasterio.Env(**config):
                with ExitStack() as ctx:
                    if in_memory:
                        tmpfile = ctx.enter_context(MemoryFile())
                        tmp_dst = ctx.enter_context(tmpfile.open(**params))
                    else:
                        tmpfile = ctx.enter_context(
                            TemporaryRasterFile(output_path))
                        tmp_dst = ctx.enter_context(
                            rasterio.open(tmpfile.name, "w", **params))

                    def _get_tile(wind):
                        idx, window = wind
                        x = extrema["x"]["min"] + idx[1]
                        y = extrema["y"]["min"] + idx[0]
                        t = mercantile.Tile(x, y, base_zoom)

                        kds = set(find_quadkeys(t, mosaic_quadkey_zoom))
                        if not mosaic_quadkeys.intersection(kds):
                            return window, None, None

                        try:
                            (tile, mask), _ = mosaic.tile(
                                t.x,
                                t.y,
                                t.z,
                                tilesize=tilesize,
                                pixel_selection=pixel_method(),
                            )
                        except NoAssetFoundError:
                            return window, None, None

                        return window, tile, mask

                    with futures.ThreadPoolExecutor(
                            max_workers=threads) as executor:
                        future_work = [
                            executor.submit(_get_tile, item) for item in blocks
                        ]
                        with click.progressbar(
                                futures.as_completed(future_work),
                                length=len(future_work),
                                show_percent=True,
                                label="Loading tiles",
                        ) as future:
                            for res in future:
                                pass

                    for f in _filter_futures(future_work):
                        window, tile, mask = f
                        if tile is None:
                            continue

                        tmp_dst.write(tile, window=window)
                        if info["nodata_type"] == "Mask":
                            tmp_dst.write_mask(mask.astype("uint8"),
                                               window=window)

                    min_tile_size = tilesize = min(
                        int(output_profile["blockxsize"]),
                        int(output_profile["blockysize"]),
                    )
                    overview_level = get_maximum_overview_level(
                        tmp_dst.width, tmp_dst.height, minsize=min_tile_size)
                    overviews = [2**j for j in range(1, overview_level + 1)]
                    tmp_dst.build_overviews(overviews)
                    copy(tmp_dst,
                         output_path,
                         copy_src_overviews=True,
                         **params)
Beispiel #20
0
    def close(self, exc_type=None, exc_value=None, exc_traceback=None):
        """Build overviews and write file."""
        try:
            # only in case no Exception was raised
            if not exc_type:
                # build overviews
                if self.overviews and self.dst is not None:
                    logger.debug(
                        "build overviews using %s resampling and levels %s",
                        self.overviews_resampling, self.overviews_levels)
                    self.dst.build_overviews(
                        self.overviews_levels,
                        Resampling[self.overviews_resampling])
                    self.dst.update_tags(ns='rio_overview',
                                         resampling=self.overviews_resampling)
                # write
                if self.cog:
                    if path_is_remote(self.path):
                        # remote COG: copy to tempfile and upload to destination
                        logger.debug("upload to %s", self.path)
                        # TODO this writes a memoryfile to disk and uploads the file,
                        # this is inefficient but until we find a solution to copy
                        # from one memoryfile to another the rasterio way (rasterio needs
                        # to rearrange the data so the overviews are at the beginning of
                        # the GTiff in order to be a valid COG).
                        with NamedTemporaryFile() as tmp_dst:
                            copy(self.dst,
                                 tmp_dst.name,
                                 copy_src_overviews=True,
                                 **self._profile)
                            self._bucket_resource.upload_file(
                                Filename=tmp_dst.name,
                                Key="/".join(self.path.split("/")[3:]),
                            )
                    else:
                        # local COG: copy to destination
                        logger.debug("write to %s", self.path)
                        copy(self.dst,
                             self.path,
                             copy_src_overviews=True,
                             **self._profile)
                else:
                    if path_is_remote(self.path):
                        # remote GTiff: upload memfile or tempfile to destination
                        logger.debug("upload to %s", self.path)
                        if self.in_memory:
                            self._bucket_resource.put_object(
                                Body=self._memfile,
                                Key="/".join(self.path.split("/")[3:]),
                            )
                        else:
                            self._bucket_resource.upload_file(
                                Filename=self._tempfile.name,
                                Key="/".join(self.path.split("/")[3:]),
                            )
                    else:
                        # local GTiff: already written, do nothing
                        pass

        finally:
            self._ctx.close()
Beispiel #21
0
def cog_translate(
    src_path,
    dst_path,
    dst_kwargs,
    indexes=None,
    nodata=None,
    add_mask=None,
    overview_level=None,
    overview_resampling="nearest",
    config=None,
    quiet=False,
):
    """
    Create Cloud Optimized Geotiff.

    Parameters
    ----------
    src_path : str or PathLike object
        A dataset path or URL. Will be opened in "r" mode.
    dst_path : str or Path-like object
        An output dataset path or or PathLike object.
        Will be opened in "w" mode.
    dst_kwargs: dict
        output dataset creation options.
    indexes : tuple, int, optional
        Raster band indexes to copy.
    nodata, int, optional
        Overwrite nodata masking values for input dataset.
    add_mask, bool, optional
        Force output dataset creation with a mask.
    overview_level : int, optional (default: 6)
        COGEO overview (decimation) level
    overview_resampling : str, optional (default: "nearest")
        Resampling algorithm for overviews
    config : dict
        Rasterio Env options.
    quiet: bool, optional (default: False)
        Mask processing steps.

    """
    config = config or {}

    if overview_level is None:
        overview_level = get_maximum_overview_level(
            src_path,
            min(int(dst_kwargs["blockxsize"]), int(dst_kwargs["blockysize"])))

    with rasterio.Env(**config):
        with rasterio.open(src_path) as src_dst:
            meta = src_dst.meta
            indexes = indexes if indexes else src_dst.indexes
            nodata = nodata if nodata is not None else src_dst.nodata
            alpha = has_alpha_band(src_dst)
            mask = has_mask_band(src_dst)

            if not add_mask and (
                (nodata is not None or alpha)
                    and dst_kwargs.get("compress") in ["JPEG", "jpeg"]):
                warnings.warn(
                    "Using lossy compression with Nodata or Alpha band "
                    "can results in unwanted artefacts.",
                    LossyCompression,
                )

            vrt_params = dict(add_alpha=True)

            if nodata is not None:
                vrt_params.update(
                    dict(nodata=nodata, add_alpha=False, src_nodata=nodata))

            if alpha:
                vrt_params.update(dict(add_alpha=False))

            with WarpedVRT(src_dst, **vrt_params) as vrt_dst:
                meta = vrt_dst.meta
                meta["count"] = len(indexes)

                if add_mask:
                    meta.pop("nodata", None)
                    meta.pop("alpha", None)

                meta.update(**dst_kwargs)
                meta.pop("compress", None)
                meta.pop("photometric", None)

                with MemoryFile() as memfile:
                    with memfile.open(**meta) as mem:
                        wind = list(mem.block_windows(1))

                        if not quiet:
                            click.echo("Reading input: {}".format(src_path),
                                       err=True)
                        fout = os.devnull if quiet else sys.stderr
                        with click.progressbar(wind,
                                               length=len(wind),
                                               file=fout,
                                               show_percent=True) as windows:
                            for ij, w in windows:
                                matrix = vrt_dst.read(window=w,
                                                      indexes=indexes)
                                mem.write(matrix, window=w)

                                if add_mask or mask:
                                    mask_value = vrt_dst.dataset_mask(window=w)
                                    mem.write_mask(mask_value, window=w)

                        if not quiet:
                            click.echo("Adding overviews...", err=True)
                        overviews = [
                            2**j for j in range(1, overview_level + 1)
                        ]
                        mem.build_overviews(overviews,
                                            Resampling[overview_resampling])

                        if not quiet:
                            click.echo("Updating dataset tags...", err=True)

                        for i, b in enumerate(indexes):
                            mem.set_band_description(
                                i + 1, src_dst.descriptions[b - 1])

                        tags = src_dst.tags()
                        tags.update(
                            dict(OVR_RESAMPLING_ALG=Resampling[
                                overview_resampling].name.upper()))
                        mem.update_tags(**tags)

                        if not quiet:
                            click.echo(
                                "Writing output to: {}".format(dst_path),
                                err=True)
                        copy(mem,
                             dst_path,
                             copy_src_overviews=True,
                             **dst_kwargs)
Beispiel #22
0
 def save_file(self, path=None):
     if path is None:
         path = self.path
     shutil.copy(self.writable_file, path, driver='GTiff')
     return True
Beispiel #23
0
 def save_file_as_georeferenced(self, path=None):
     if path is None:
         path = self.path
     shutil.copy(self.file, path, driver='GTiff')
     return True
Beispiel #24
0
def _optimize_single_raster(input_file: Path, output_folder: Path,
                            reproject: bool, rs_method: Any,
                            in_memory: Union[bool, None], compression: str,
                            quiet: bool, progress_suffix: str) -> None:
    output_file = _output_file(output_folder, input_file)

    if not quiet:
        click.echo(f'\r{input_file.name} ... {progress_suffix}')

    with contextlib.ExitStack() as es, warnings.catch_warnings():
        warnings.filterwarnings('ignore',
                                message='invalid value encountered.*')

        src = es.enter_context(rasterio.open(str(input_file)))

        if reproject:
            vrt = es.enter_context(_get_vrt(src, rs_method=rs_method))
        else:
            vrt = src

        profile = vrt.profile.copy()
        profile.update(COG_PROFILE)

        if in_memory is None:
            in_memory = vrt.width * vrt.height < IN_MEMORY_THRESHOLD

        if in_memory:
            memfile = es.enter_context(MemoryFile())
            dst = es.enter_context(memfile.open(**profile))
        else:
            tempraster = es.enter_context(
                TemporaryRasterFile(basedir=output_folder))
            dst = es.enter_context(rasterio.open(tempraster, 'w', **profile))

        # iterate over blocks
        windows = list(dst.block_windows(1))

        for _, w in windows:
            block_data = vrt.read(window=w, indexes=[1])
            dst.write(block_data, window=w)
            block_mask = vrt.dataset_mask(window=w).astype('uint8')
            dst.write_mask(block_mask, window=w)

        # add overviews
        if not in_memory:
            # work around bug mapbox/rasterio#1497
            dst.close()
            dst = es.enter_context(rasterio.open(tempraster, 'r+'))

        max_overview_level = math.ceil(
            math.log2(
                max(dst.height // profile['blockysize'],
                    dst.width // profile['blockxsize'], 1)))

        if max_overview_level > 0:
            overviews = [2**j for j in range(1, max_overview_level + 1)]
            dst.build_overviews(overviews, rs_method)

            dst.update_tags(ns='rio_overview', resampling=rs_method.value)

        # copy to destination (this is necessary to push overviews to start of file)
        copy(dst,
             str(output_file),
             copy_src_overviews=True,
             compress=compression,
             **COG_PROFILE)
Beispiel #25
0
def to_vrt(data,
           filename,
           overwrite=False,
           resampling=None,
           nodata=None,
           init_dest_nodata=True,
           warp_mem_limit=128):

    """
    Writes a file to a VRT file

    Args:
        data (DataArray): The ``xarray.DataArray`` to write.
        filename (str): The output file name to write to.
        overwrite (Optional[bool]): Whether to overwrite an existing VRT file.
        resampling (Optional[object]): The resampling algorithm for ``rasterio.vrt.WarpedVRT``. Default is 'nearest'.
        nodata (Optional[float or int]): The 'no data' value for ``rasterio.vrt.WarpedVRT``.
        init_dest_nodata (Optional[bool]): Whether or not to initialize output to ``nodata`` for ``rasterio.vrt.WarpedVRT``.
        warp_mem_limit (Optional[int]): The GDAL memory limit for ``rasterio.vrt.WarpedVRT``.

    Example:
        >>> import geowombat as gw
        >>> from rasterio.enums import Resampling
        >>>
        >>> # Transform a CRS and save to VRT
        >>> with gw.config.update(ref_crs=102033):
        >>>     with gw.open('image.tif') as src:
        >>>         gw.to_vrt(src,
        >>>                   'output.vrt',
        >>>                   resampling=Resampling.cubic,
        >>>                   warp_mem_limit=256)
        >>>
        >>> # Load multiple files set to a common geographic extent
        >>> bounds = (left, bottom, right, top)
        >>> with gw.config.update(ref_bounds=bounds):
        >>>     with gw.open(['image1.tif', 'image2.tif'], mosaic=True) as src:
        >>>         gw.to_vrt(src, 'output.vrt')
    """

    if Path(filename).is_file():

        if overwrite:
            Path(filename).unlink()
        else:
            logger.warning(f'  The VRT file {filename} already exists.')
            return

    if not resampling:
        resampling = Resampling.nearest

    if isinstance(data.attrs['filename'], str) or isinstance(data.attrs['filename'], Path):

        # Open the input file on disk
        with rio.open(data.attrs['filename']) as src:

            with WarpedVRT(src,
                           src_crs=src.crs,                         # the original CRS
                           crs=data.crs,                            # the transformed CRS
                           src_transform=src.gw.transform,             # the original transform
                           transform=data.gw.transform,                # the new transform
                           dtype=data.dtype,
                           resampling=resampling,
                           nodata=nodata,
                           init_dest_nodata=init_dest_nodata,
                           warp_mem_limit=warp_mem_limit) as vrt:

                rio_shutil.copy(vrt, filename, driver='VRT')

    else:

        if not data.gw.filenames:
            logger.exception('  The data filenames attribute is empty. Use gw.open(..., persist_filenames=True).')
            raise KeyError

        separate = True if data.gw.data_are_separate and data.gw.data_are_stacked else False

        vrt_options = gdal.BuildVRTOptions(outputBounds=data.gw.bounds,
                                           xRes=data.gw.cellx,
                                           yRes=data.gw.celly,
                                           separate=separate,
                                           outputSRS=data.crs)

        ds = gdal.BuildVRT(filename, data.gw.filenames, options=vrt_options)

        ds = None
fname2 = sys.argv[2]
fname3 = sys.argv[3]

#Open up our data files:
a = rio.open(fname1, mode='r', driver='ISIS3')
a1 = a.read()[0]

b = rio.open(fname2, mode='r', driver='ISIS3')
b1 = b.read()[0]
b.close
#keeping memory footprint as small as possible by deallocating the import
#objects and running garbage collection
del b

#Create dummy cube to hold merged data
shutil.copy(a, fname3, driver='ISIS3')
a.close
del a
gc.collect()

#Mask arrays so we don't run into overflow errors with null values
a1 = np.ma.masked_where(a1 < 0, a1)
b1 = np.ma.masked_where(b1 < 0, b1)

#Trying to process the entire array at once tends to crash memory
#so we're going to take a divide and conquer approach by processing
#the array in six segments.

array_subdivs_x = np.linspace(0, a1.shape[1], num=8, dtype=int)
array_subdivs_y = np.linspace(0, a1.shape[0], num=4, dtype=int)
Beispiel #27
0
def cog_translate(
    source,
    dst_path,
    dst_kwargs,
    indexes=None,
    nodata=None,
    dtype=None,
    add_mask=None,
    overview_level=None,
    overview_resampling="nearest",
    web_optimized=False,
    latitude_adjustment=True,
    resampling="nearest",
    in_memory=None,
    config=None,
    allow_intermediate_compression=False,
    forward_band_tags=False,
    quiet=False,
):
    """
    Create Cloud Optimized Geotiff.

    Parameters
    ----------
    source : str, PathLike object or rasterio.io.DatasetReader
        A dataset path, URL or rasterio.io.DatasetReader object.
        Will be opened in "r" mode.
    dst_path : str or Path-like object
        An output dataset path or or PathLike object.
        Will be opened in "w" mode.
    dst_kwargs: dict
        Output dataset creation options.
    indexes : tuple or int, optional
        Raster band indexes to copy.
    nodata, int, optional
        Overwrite nodata masking values for input dataset.
    dtype: str, optional
        Overwrite output data type. Default will be the input data type.
    add_mask, bool, optional
        Force output dataset creation with a mask.
    overview_level : int, optional (default: 6)
        COGEO overview (decimation) level
    overview_resampling : str, optional (default: "nearest")
        Resampling algorithm for overviews
    web_optimized: bool, option (default: False)
        Create web-optimized cogeo.
    latitude_adjustment: bool, option (default: True)
        Use mercator meters for zoom calculation or ensure max zoom equality.
    resampling : str, optional (default: "nearest")
        Resampling algorithm.
    in_memory: bool, optional
        Force processing raster in memory (default: process in memory if small)
    config : dict
        Rasterio Env options.
    allow_intermediate_compression: bool, optional (default: False)
        Allow intermediate file compression to reduce memory/disk footprint.
        Note: This could reduce the speed of the process.
        Ref: https://github.com/cogeotiff/rio-cogeo/issues/103
    forward_band_tags:  bool, optional
        Forward band tags to output bands.
        Ref: https://github.com/cogeotiff/rio-cogeo/issues/19
    quiet: bool, optional (default: False)
        Mask processing steps.

    """
    if isinstance(indexes, int):
        indexes = (indexes, )

    config = config or {}
    with rasterio.Env(**config):
        with ExitStack() as ctx:
            if isinstance(source, (DatasetReader, DatasetWriter, WarpedVRT)):
                src_dst = source
            else:
                src_dst = ctx.enter_context(rasterio.open(source))

            meta = src_dst.meta
            indexes = indexes if indexes else src_dst.indexes
            nodata = nodata if nodata is not None else src_dst.nodata
            dtype = dtype if dtype else src_dst.dtypes[0]
            alpha = has_alpha_band(src_dst)
            mask = has_mask_band(src_dst)

            if not add_mask and (
                (nodata is not None or alpha)
                    and dst_kwargs.get("compress") in ["JPEG", "jpeg"]):
                warnings.warn(
                    "Using lossy compression with Nodata or Alpha band "
                    "can results in unwanted artefacts.",
                    LossyCompression,
                )

            tilesize = min(int(dst_kwargs["blockxsize"]),
                           int(dst_kwargs["blockysize"]))

            if src_dst.width < tilesize or src_dst.height < tilesize:
                tilesize = 2**int(
                    math.log(min(src_dst.width, src_dst.height), 2))
                if tilesize < 64:
                    warnings.warn(
                        "Raster has dimension < 64px. Output COG cannot be tiled"
                        " and overviews cannot be added.",
                        IncompatibleBlockRasterSize,
                    )
                    dst_kwargs.pop("blockxsize", None)
                    dst_kwargs.pop("blockysize", None)
                    dst_kwargs.pop("tiled")
                    overview_level = 0

                else:
                    warnings.warn(
                        "Block Size are bigger than raster sizes. "
                        "Setting blocksize to {}".format(tilesize),
                        IncompatibleBlockRasterSize,
                    )
                    dst_kwargs["blockxsize"] = tilesize
                    dst_kwargs["blockysize"] = tilesize

            vrt_params = dict(add_alpha=True, dtype=dtype)

            if nodata is not None:
                vrt_params.update(
                    dict(nodata=nodata, add_alpha=False, src_nodata=nodata))

            if alpha:
                vrt_params.update(dict(add_alpha=False))

            if web_optimized:
                bounds = list(
                    transform_bounds(*[src_dst.crs, "epsg:4326"] +
                                     list(src_dst.bounds),
                                     densify_pts=21))
                center = [(bounds[0] + bounds[2]) / 2,
                          (bounds[1] + bounds[3]) / 2]

                lat = 0 if latitude_adjustment else center[1]
                max_zoom = get_max_zoom(src_dst, lat=lat, tilesize=tilesize)

                extrema = tile_extrema(bounds, max_zoom)
                w, n = mercantile.xy(*mercantile.ul(
                    extrema["x"]["min"], extrema["y"]["min"], max_zoom))
                vrt_res = _meters_per_pixel(max_zoom, 0, tilesize=tilesize)
                vrt_transform = Affine(vrt_res, 0, w, 0, -vrt_res, n)

                vrt_width = (extrema["x"]["max"] -
                             extrema["x"]["min"]) * tilesize
                vrt_height = (extrema["y"]["max"] -
                              extrema["y"]["min"]) * tilesize

                vrt_params.update(
                    dict(
                        crs="epsg:3857",
                        transform=vrt_transform,
                        width=vrt_width,
                        height=vrt_height,
                        resampling=ResamplingEnums[resampling],
                    ))

            with WarpedVRT(src_dst, **vrt_params) as vrt_dst:
                meta = vrt_dst.meta
                meta["count"] = len(indexes)

                if add_mask:
                    meta.pop("nodata", None)
                    meta.pop("alpha", None)

                if (dst_kwargs.get("photometric", "").upper() == "YCBCR"
                        and meta["count"] == 1):
                    warnings.warn(
                        "PHOTOMETRIC=YCBCR not supported on a 1-band raster"
                        " and has been set to 'MINISBLACK'")
                    dst_kwargs["photometric"] = "MINISBLACK"

                meta.update(**dst_kwargs)
                if not allow_intermediate_compression:
                    meta.pop("compress", None)
                    meta.pop("photometric", None)

                if in_memory is None:
                    in_memory = vrt_dst.width * vrt_dst.height < IN_MEMORY_THRESHOLD

                if in_memory:
                    tmpfile = ctx.enter_context(MemoryFile())
                    tmp_dst = ctx.enter_context(tmpfile.open(**meta))
                else:
                    tmpfile = ctx.enter_context(TemporaryRasterFile(dst_path))
                    tmp_dst = ctx.enter_context(
                        rasterio.open(tmpfile.name, "w", **meta))

                # Transfer color interpolation
                if len(indexes) == 1 and (vrt_dst.colorinterp[indexes[0] - 1]
                                          is not ColorInterp.palette):
                    tmp_dst.colorinterp = [ColorInterp.gray]
                else:
                    tmp_dst.colorinterp = [
                        vrt_dst.colorinterp[b - 1] for b in indexes
                    ]

                if tmp_dst.colorinterp[0] is ColorInterp.palette:
                    try:
                        tmp_dst.write_colormap(1, vrt_dst.colormap(1))
                    except ValueError:
                        warnings.warn(
                            "Dataset has `Palette` color interpretation"
                            " but is missing colormap information")

                wind = list(tmp_dst.block_windows(1))

                if not quiet:
                    click.echo("Reading input: {}".format(source), err=True)
                fout = os.devnull if quiet else sys.stderr
                with click.progressbar(wind,
                                       length=len(wind),
                                       file=fout,
                                       show_percent=True) as windows:
                    for ij, w in windows:
                        matrix = vrt_dst.read(window=w, indexes=indexes)
                        tmp_dst.write(matrix, window=w)

                        if add_mask or mask:
                            # Cast mask to uint8 to fix rasterio 1.1.2 error (ref #115)
                            mask_value = vrt_dst.dataset_mask(
                                window=w).astype("uint8")
                            tmp_dst.write_mask(mask_value, window=w)

                if overview_level is None:
                    overview_level = get_maximum_overview_level(
                        vrt_dst, tilesize)

                if not quiet and overview_level:
                    click.echo("Adding overviews...", err=True)

                overviews = [2**j for j in range(1, overview_level + 1)]
                tmp_dst.build_overviews(overviews,
                                        ResamplingEnums[overview_resampling])

                if not quiet:
                    click.echo("Updating dataset tags...", err=True)

                for i, b in enumerate(indexes):
                    tmp_dst.set_band_description(i + 1,
                                                 src_dst.descriptions[b - 1])
                    if forward_band_tags:
                        tmp_dst.update_tags(i + 1, **src_dst.tags(b))

                tags = src_dst.tags()
                tags.update(
                    dict(
                        OVR_RESAMPLING_ALG=ResamplingEnums[overview_resampling]
                        .name.upper()))
                tmp_dst.update_tags(**tags)
                tmp_dst._set_all_scales(
                    [vrt_dst.scales[b - 1] for b in indexes])
                tmp_dst._set_all_offsets(
                    [vrt_dst.offsets[b - 1] for b in indexes])

                if not quiet:
                    click.echo("Writing output to: {}".format(dst_path),
                               err=True)
                copy(tmp_dst, dst_path, copy_src_overviews=True, **dst_kwargs)
    def processAlgorithmRasterio(self, parameters, context, feedback):
        """
        Here is where the processing itself takes place.
        """
        import rasterio
        from osgeo import gdal
        from rasterio import shutil as rio_shutil
        from rasterio.vrt import WarpedVRT

        def get_inputfilepath(layer):
            return os.path.realpath(layer.source().split("|layername")[0])

        img_ref = get_inputfilepath(
            self.parameterAsRasterLayer(parameters, self.IMG_REF, context))
        file_in = get_inputfilepath(
            self.parameterAsRasterLayer(parameters, self.INPUT, context))
        output_file = self.parameterAsOutputLayer(parameters, self.OUTPUT,
                                                  context)

        feedback.pushInfo("Co-registration:")
        feedback.pushInfo("\nProcessing file: " + file_in)

        # extract some info
        with rasterio.open(img_ref) as target:
            dst_crs = target.crs
            x_res, y_res = target.res
            vrt_options = {
                'crs': target.crs,
                'transform': target.transform,
                'height': target.height,
                'width': target.width,
                'nodata': target.nodata
            }
        with rasterio.open(file_in) as src:
            src_crs = src.crs

        # ----- reprojection
        if src_crs != dst_crs:
            feedback.pushInfo(
                "--> reprojection is required, to CRS: {}".format(dst_crs))
            # reproject
            reprj_file_tmp = tempfile.NamedTemporaryFile(suffix=".tif",
                                                         delete=True)
            reprj_file = reprj_file_tmp.name
            resample = gdal.GRA_NearestNeighbour
            gdal.Warp(reprj_file,
                      file_in,
                      srcSRS=src_crs,
                      dstSRS=dst_crs,
                      xRes=x_res,
                      yRes=y_res,
                      resampleAlg=resample)
        else:
            reprj_file_tmp = False
            reprj_file = file_in

        # ----- set extent and align pixels based on PU
        feedback.pushInfo("--> set extent and align pixels")
        if target.nodata is not None:
            feedback.pushInfo("--> nodata as: {}".format(target.nodata))

        with rasterio.open(reprj_file) as src:
            with WarpedVRT(src, **vrt_options) as vrt:
                rio_shutil.copy(vrt, output_file, driver='GTiff')

        feedback.pushInfo("--> done\n")

        if reprj_file_tmp:
            reprj_file_tmp.close()

        return {self.OUTPUT: output_file}
Beispiel #29
0
def cog_translate(  # noqa: C901
    source: Union[str, pathlib.PurePath, DatasetReader, DatasetWriter,
                  WarpedVRT],
    dst_path: Union[str, pathlib.PurePath],
    dst_kwargs: Dict,
    indexes: Optional[Sequence[int]] = None,
    nodata: Optional[Union[str, int, float]] = None,
    dtype: Optional[str] = None,
    add_mask: bool = False,
    overview_level: Optional[int] = None,
    overview_resampling: str = "nearest",
    web_optimized: bool = False,
    tms: Optional[morecantile.TileMatrixSet] = None,
    zoom_level_strategy: str = "auto",
    zoom_level: Optional[int] = None,
    aligned_levels: Optional[int] = None,
    resampling: str = "nearest",
    in_memory: Optional[bool] = None,
    config: Optional[Dict] = None,
    allow_intermediate_compression: bool = False,
    forward_band_tags: bool = False,
    quiet: bool = False,
    temporary_compression: str = "DEFLATE",
    colormap: Optional[Dict] = None,
    additional_cog_metadata: Optional[Dict] = None,
    use_cog_driver: bool = False,
):
    """
    Create Cloud Optimized Geotiff.

    Parameters
    ----------
    source : str, PathLike object or rasterio.io.DatasetReader
        A dataset path, URL or rasterio.io.DatasetReader object.
        Will be opened in "r" mode.
    dst_path : str or PathLike object
        An output dataset path or or PathLike object.
        Will be opened in "w" mode.
    dst_kwargs: dict
        Output dataset creation options.
    indexes : tuple or int, optional
        Raster band indexes to copy.
    nodata, int, optional
        Overwrite nodata masking values for input dataset.
    dtype: str, optional
        Overwrite output data type. Default will be the input data type.
    add_mask, bool, optional
        Force output dataset creation with a mask.
    overview_level : int, optional (default: None)
        COGEO overview (decimation) level. By default, inferred from data size.
    overview_resampling : str, optional (default: "nearest")
        Resampling algorithm for overviews
    web_optimized: bool, optional (default: False)
        Create web-optimized cogeo.
    tms: morecantile.TileMatrixSet, optional (default: "WebMercatorQuad")
        TileMatrixSet to use for reprojection, resolution and alignment.
    zoom_level_strategy: str, optional (default: auto)
        Strategy to determine zoom level (same as in GDAL 3.2).
        LOWER will select the zoom level immediately below the theoretical computed non-integral zoom level, leading to subsampling.
        On the contrary, UPPER will select the immediately above zoom level, leading to oversampling.
        Defaults to AUTO which selects the closest zoom level.
        ref: https://gdal.org/drivers/raster/cog.html#raster-cog
    zoom_level: int, optional.
        Zoom level number (starting at 0 for coarsest zoom level). If this option is specified, `--zoom-level-strategy` is ignored.
    aligned_levels: int, optional.
        Number of overview levels for which GeoTIFF tile and tiles defined in the tiling scheme match.
        Default is to use the maximum overview levels. Note: GDAL use number of resolution levels instead of overview levels.
    resampling : str, optional (default: "nearest")
        Resampling algorithm.
    in_memory: bool, optional
        Force processing raster in memory (default: process in memory if small)
    config : dict
        Rasterio Env options.
    allow_intermediate_compression: bool, optional (default: False)
        Allow intermediate file compression to reduce memory/disk footprint.
        Note: This could reduce the speed of the process.
        Ref: https://github.com/cogeotiff/rio-cogeo/issues/103
    forward_band_tags:  bool, optional
        Forward band tags to output bands.
        Ref: https://github.com/cogeotiff/rio-cogeo/issues/19
    quiet: bool, optional (default: False)
        Mask processing steps.
    temporary_compression: str, optional
        Compression used for the intermediate file, default is deflate.
    colormap: dict, optional
        Overwrite or add a colormap to the output COG.
    additional_cog_metadata: dict, optional
        Additional dataset metadata to add to the COG.
    use_cog_driver: bool, optional (default: False)
        Use GDAL COG driver if set to True. COG driver is available starting with GDAL 3.1.

    """
    tms = tms or morecantile.tms.get("WebMercatorQuad")

    dst_kwargs = dst_kwargs.copy()

    if isinstance(indexes, int):
        indexes = (indexes, )

    config = config or {}
    with rasterio.Env(**config):
        with ExitStack() as ctx:
            if isinstance(source, (DatasetReader, DatasetWriter, WarpedVRT)):
                src_dst = source
            else:
                src_dst = ctx.enter_context(rasterio.open(source))

            meta = src_dst.meta
            indexes = indexes if indexes else src_dst.indexes
            nodata = nodata if nodata is not None else src_dst.nodata
            dtype = dtype if dtype else src_dst.dtypes[0]
            alpha = utils.has_alpha_band(src_dst)
            mask = utils.has_mask_band(src_dst)

            if colormap and len(indexes) > 1:
                raise IncompatibleOptions(
                    "Cannot add a colormap for multiple bands data.")

            if not add_mask and (
                (nodata is not None or alpha)
                    and dst_kwargs.get("compress", "").lower() == "jpeg"):
                warnings.warn(
                    "Nodata/Alpha band will be translated to an internal mask band.",
                )
                add_mask = True
                indexes = (utils.non_alpha_indexes(src_dst)
                           if len(indexes) not in [1, 3] else indexes)

            tilesize = min(int(dst_kwargs["blockxsize"]),
                           int(dst_kwargs["blockysize"]))

            if src_dst.width < tilesize or src_dst.height < tilesize:
                tilesize = 2**int(
                    math.log(min(src_dst.width, src_dst.height), 2))
                if tilesize < 64:
                    warnings.warn(
                        "Raster has dimension < 64px. Output COG cannot be tiled"
                        " and overviews cannot be added.",
                        IncompatibleBlockRasterSize,
                    )
                    dst_kwargs.pop("blockxsize", None)
                    dst_kwargs.pop("blockysize", None)
                    dst_kwargs.pop("tiled")
                    overview_level = 0

                else:
                    warnings.warn(
                        "Block Size are bigger than raster sizes. "
                        "Setting blocksize to {}".format(tilesize),
                        IncompatibleBlockRasterSize,
                    )
                    dst_kwargs["blockxsize"] = tilesize
                    dst_kwargs["blockysize"] = tilesize

            vrt_params = {
                "add_alpha": True,
                "dtype": dtype,
                "width": src_dst.width,
                "height": src_dst.height,
                "resampling": ResamplingEnums[resampling],
            }

            if nodata is not None:
                vrt_params.update(
                    dict(nodata=nodata, add_alpha=False, src_nodata=nodata))

            if alpha:
                vrt_params.update(dict(add_alpha=False))

            if web_optimized and not use_cog_driver:
                params = utils.get_web_optimized_params(
                    src_dst,
                    zoom_level_strategy=zoom_level_strategy,
                    zoom_level=zoom_level,
                    aligned_levels=aligned_levels,
                    tms=tms,
                )
                vrt_params.update(**params)

            with WarpedVRT(src_dst, **vrt_params) as vrt_dst:
                meta = vrt_dst.meta
                meta["count"] = len(indexes)

                if add_mask:
                    meta.pop("nodata", None)
                    meta.pop("alpha", None)

                if (dst_kwargs.get("photometric", "").upper() == "YCBCR"
                        and meta["count"] == 1):
                    warnings.warn(
                        "PHOTOMETRIC=YCBCR not supported on a 1-band raster"
                        " and has been set to 'MINISBLACK'")
                    dst_kwargs["photometric"] = "MINISBLACK"

                meta.update(**dst_kwargs)
                meta.pop("compress", None)
                meta.pop("photometric", None)

                if allow_intermediate_compression:
                    meta["compress"] = temporary_compression

                if in_memory is None:
                    in_memory = vrt_dst.width * vrt_dst.height < IN_MEMORY_THRESHOLD

                if in_memory:
                    tmpfile = ctx.enter_context(MemoryFile())
                    tmp_dst = ctx.enter_context(tmpfile.open(**meta))
                else:
                    tmpfile = ctx.enter_context(TemporaryRasterFile(dst_path))
                    tmp_dst = ctx.enter_context(
                        rasterio.open(tmpfile.name, "w", **meta))

                # Transfer color interpolation
                if len(indexes) == 1 and (vrt_dst.colorinterp[indexes[0] - 1]
                                          is not ColorInterp.palette):
                    tmp_dst.colorinterp = [ColorInterp.gray]
                else:
                    tmp_dst.colorinterp = [
                        vrt_dst.colorinterp[b - 1] for b in indexes
                    ]

                if colormap:
                    if tmp_dst.colorinterp[0] is not ColorInterp.palette:
                        tmp_dst.colorinterp = [ColorInterp.palette]
                        warnings.warn(
                            "Dataset color interpretation was set to `Palette`"
                        )
                    tmp_dst.write_colormap(1, colormap)

                elif tmp_dst.colorinterp[0] is ColorInterp.palette:
                    try:
                        tmp_dst.write_colormap(1, vrt_dst.colormap(1))
                    except ValueError:
                        warnings.warn(
                            "Dataset has `Palette` color interpretation"
                            " but is missing colormap information")

                wind = list(tmp_dst.block_windows(1))

                if not quiet:
                    click.echo("Reading input: {}".format(source), err=True)

                fout = ctx.enter_context(open(os.devnull,
                                              "w")) if quiet else sys.stderr
                with click.progressbar(
                        wind, file=fout,
                        show_percent=True) as windows:  # type: ignore
                    for _, w in windows:
                        matrix = vrt_dst.read(window=w, indexes=indexes)
                        tmp_dst.write(matrix, window=w)

                        if add_mask or mask:
                            # Cast mask to uint8 to fix rasterio 1.1.2 error (ref #115)
                            mask_value = vrt_dst.dataset_mask(
                                window=w).astype("uint8")
                            tmp_dst.write_mask(mask_value, window=w)

                if overview_level is None:
                    overview_level = get_maximum_overview_level(
                        vrt_dst.width, vrt_dst.height, minsize=tilesize)

                if not quiet and overview_level:
                    click.echo("Adding overviews...", err=True)

                overviews = [2**j for j in range(1, overview_level + 1)]
                tmp_dst.build_overviews(overviews,
                                        ResamplingEnums[overview_resampling])

                if not quiet:
                    click.echo("Updating dataset tags...", err=True)

                for i, b in enumerate(indexes):
                    tmp_dst.set_band_description(i + 1,
                                                 src_dst.descriptions[b - 1])
                    if forward_band_tags:
                        tmp_dst.update_tags(i + 1, **src_dst.tags(b))

                tags = src_dst.tags()
                tags.update(
                    dict(
                        OVR_RESAMPLING_ALG=ResamplingEnums[overview_resampling]
                        .name.upper()))
                if additional_cog_metadata:
                    tags.update(**additional_cog_metadata)

                if web_optimized and not use_cog_driver:
                    default_zoom = tms.zoom_for_res(
                        max(tmp_dst.res),
                        max_z=30,
                        zoom_level_strategy=zoom_level_strategy,
                    )
                    dst_kwargs.update({
                        "@TILING_SCHEME_NAME":
                        tms.identifier,
                        "@TILING_SCHEME_ZOOM_LEVEL":
                        zoom_level if zoom_level is not None else default_zoom,
                    })

                    if aligned_levels:
                        dst_kwargs.update(
                            {"@TILING_SCHEME_ALIGNED_LEVELS": aligned_levels})

                tmp_dst.update_tags(**tags)
                tmp_dst._set_all_scales(
                    [vrt_dst.scales[b - 1] for b in indexes])
                tmp_dst._set_all_offsets(
                    [vrt_dst.offsets[b - 1] for b in indexes])

                if not quiet:
                    click.echo("Writing output to: {}".format(dst_path),
                               err=True)

                if use_cog_driver:
                    if not GDALVersion.runtime().at_least("3.1"):
                        raise Exception(
                            "GDAL 3.1 or above required to use the COG driver."
                        )

                    dst_kwargs["driver"] = "COG"
                    if web_optimized:
                        dst_kwargs["TILING_SCHEME"] = (
                            "GoogleMapsCompatible" if tms.identifier
                            == "WebMercatorQuad" else tms.identifier)

                        if zoom_level is not None:
                            if not GDALVersion.runtime().at_least("3.5"):
                                warnings.warn(
                                    "ZOOM_LEVEL option is only available with GDAL >3.5."
                                )

                            dst_kwargs["ZOOM_LEVEL"] = zoom_level

                        dst_kwargs["ZOOM_LEVEL_STRATEGY"] = zoom_level_strategy

                        if aligned_levels is not None:
                            # GDAL uses Number of resolution (not overviews)
                            # See https://github.com/OSGeo/gdal/issues/5336#issuecomment-1042946603
                            dst_kwargs["aligned_levels"] = aligned_levels + 1

                    if add_mask and dst_kwargs.get("compress", "") != "JPEG":
                        warnings.warn(
                            "With GDAL COG driver, mask band will be translated to an alpha band."
                        )

                    dst_kwargs["overview_resampling"] = overview_resampling
                    dst_kwargs["warp_resampling"] = resampling
                    dst_kwargs["blocksize"] = tilesize
                    dst_kwargs.pop("blockxsize", None)
                    dst_kwargs.pop("blockysize", None)
                    dst_kwargs.pop("tiled", None)
                    dst_kwargs.pop("interleave", None)
                    dst_kwargs.pop("photometric", None)

                    copy(tmp_dst, dst_path, **dst_kwargs)

                else:
                    copy(tmp_dst,
                         dst_path,
                         copy_src_overviews=True,
                         **dst_kwargs)