Пример #1
0
def raster_get_nodata_value(
    raster: Union[List[Union[gdal.Dataset, str]], gdal.Dataset, str],
) -> Union[List[Optional[Number]], Optional[Number]]:
    """Get the nodata value of a raster or a from a list of rasters.

    Args:
        raster (path | raster | list): The raster(s) to retrieve nodata values from.

    Returns:
        Returns the nodata value from a raster or a list of rasters
    """
    type_check(raster, [list, str, gdal.Dataset], "raster")

    rasters = get_raster_path(raster, return_list=True)

    nodata_values = []
    for internal_raster in rasters:
        if not is_raster(internal_raster):
            raise ValueError(f"Input raster is invalid: {internal_raster}")

        raster_metadata = raster_to_metadata(internal_raster)

        if not isinstance(raster_metadata, dict):
            raise Exception("Metadata is in the wrong format.")

        raster_nodata = raster_metadata["nodata_value"]

        nodata_values.append(raster_nodata)

    if isinstance(raster, list):
        return nodata_values
    else:
        return nodata_values[0]
Пример #2
0
def raster_to_metadata(
    raster: Union[List[Union[str, gdal.Dataset]], str, gdal.Dataset],
    create_geometry: bool = False,
) -> Union[Metadata_raster, List[Metadata_raster]]:
    """Reads a raster from a list of rasters, string or a dataset and returns metadata.

    Args:
        raster (list, path | Dataset): The raster to calculate metadata for.

    **kwargs:
        create_geometry (bool): If False footprints of the raster is calculated, including
        in latlng (wgs84). Requires a reprojection check. Do not use if not required
        and performance is essential. Produces geojsons as well.

    Returns:
        A dictionary containing metadata about the raster.
    """
    type_check(raster, [str, gdal.Dataset], "raster")
    type_check(create_geometry, [bool], "create_geometry")

    input_list = get_raster_path(raster, return_list=True)
    return_list = []

    for readied_raster in input_list:
        if is_raster(readied_raster):
            return_list.append(
                _raster_to_metadata(readied_raster,
                                    create_geometry=create_geometry))
        else:
            raise TypeError(f"Input: {readied_raster} is not a raster.")

    if isinstance(raster, list):
        return return_list

    return return_list[0]
Пример #3
0
def raster_has_nodata_value(
    raster: Union[List[Union[gdal.Dataset, str]], gdal.Dataset, str],
) -> Union[bool, List[bool]]:
    """Check if a raster or a list of rasters contain nodata values

    Args:
        raster (path | raster | list): The raster(s) to check for nodata values.

    Returns:
        True if input raster has nodata values. If a list is the input, the output
        is a list of booleans indicating if the input raster has nodata values.
    """
    type_check(raster, [list, str, gdal.Dataset], "raster")

    nodata_values = []
    rasters = get_raster_path(raster, return_list=True)

    for internal_raster in rasters:
        if not is_raster(internal_raster):
            raise ValueError(f"Input raster is invalid: {internal_raster}")

        raster_metadata = raster_to_metadata(internal_raster)

        if not isinstance(raster_metadata, dict):
            raise Exception("Metadata is in the wrong format.")

        raster_nodata = raster_metadata["nodata_value"]

        if raster_nodata is not None:
            nodata_values.append(True)
        else:
            nodata_values.append(False)

    if isinstance(raster, list):
        return nodata_values
    else:
        return nodata_values[0]
Пример #4
0
def open_vector(
        vector: Union[str, ogr.DataSource, gdal.Dataset],
        convert_mem_driver: bool = True,
        writeable: bool = True,
        layer: int = -1,
        where: tuple = (),
) -> ogr.DataSource:
    """Opens a vector to an ogr.Datasource class.

    Args:
        vector (path | datasource): A path to a vector or a ogr datasource.

        convert_mem_driver (bool): Converts MEM driver vectors to /vsimem/ geopackage.

        writable (bool): Should the opened raster be writeable.

    Returns:
        A gdal.Dataset
    """
    type_check(vector, [str, ogr.DataSource, gdal.Dataset], "vector")
    type_check(convert_mem_driver, [bool], "convert_mem_driver")
    type_check(writeable, [bool], "writeable")
    type_check(layer, [int], "layer")

    try:
        opened: Optional[ogr.DataSource] = None
        if is_vector(vector):
            gdal.PushErrorHandler("CPLQuietErrorHandler")

            if isinstance(vector, str):
                opened = ogr.Open(vector, 1) if writeable else ogr.Open(
                    vector, 0)
            elif isinstance(vector, ogr.DataSource):
                opened = vector
            else:
                raise Exception(f"Could not read input vector: {vector}")

            gdal.PopErrorHandler()
        elif is_raster(vector):
            temp_opened: Optional[gdal.Dataset] = None
            if isinstance(vector, str):
                gdal.PushErrorHandler("CPLQuietErrorHandler")

                temp_opened = (gdal.Open(vector, 1)
                               if writeable else gdal.Open(vector, 0))

                gdal.PopErrorHandler()
            elif isinstance(vector, gdal.Dataset):
                temp_opened = vector
            else:
                raise Exception(f"Could not read input vector: {vector}")

            projection: osr.SpatialReference = osr.SpatialReference()
            projection.ImportFromWkt(temp_opened.GetProjection())
            transform: List[Number] = temp_opened.GetGeoTransform()

            width: int = temp_opened.RasterXSize
            height: int = temp_opened.RasterYSize

            x_min: Number = transform[0]
            y_max: Number = transform[3]

            x_max = x_min + width * transform[1] + height * transform[
                2]  # Handle skew
            y_min = y_max + width * transform[4] + height * transform[
                5]  # Handle skew

            bottom_left = [x_min, y_min]
            top_left = [x_min, y_max]
            top_right = [x_max, y_max]
            bottom_right = [x_max, y_min]

            coord_array = [
                [bottom_left[1], bottom_left[0]],
                [top_left[1], top_left[0]],
                [top_right[1], top_right[0]],
                [bottom_right[1], bottom_right[0]],
                [bottom_left[1], bottom_left[0]],
            ]

            wkt_coords = ""
            for coord in coord_array:
                wkt_coords += f"{coord[1]} {coord[0]}, "
            wkt_coords = wkt_coords[:-2]  # Remove the last ", "

            extent_wkt = f"POLYGON (({wkt_coords}))"

            extent_name = f"/vsimem/{uuid4().int}_extent.GPKG"

            extent_driver = ogr.GetDriverByName("GPKG")
            extent_ds = extent_driver.CreateDataSource(extent_name)
            extent_layer = extent_ds.CreateLayer(f"auto_extent_{uuid4().int}",
                                                 projection, ogr.wkbPolygon)

            feature = ogr.Feature(extent_layer.GetLayerDefn())
            extent_geom = ogr.CreateGeometryFromWkt(extent_wkt, projection)
            feature.SetGeometry(extent_geom)
            extent_layer.CreateFeature(feature)
            feature = None

            opened = extent_ds
        else:
            raise Exception(f"Could not read input vector: {vector}")
    except:
        raise Exception(f"Could not read input vector: {vector}")

    if opened is None:
        raise Exception(f"Could not read input vector: {vector}")

    driver: ogr.Driver = opened.GetDriver()
    driver_name: str = driver.GetName()

    if driver is None:
        raise Exception("Unable to parse the driver of vector.")

    if layer != -1:
        layer_count = opened.GetLayerCount()

        if layer > layer_count - 1:
            raise Exception(f"Requested a non-existing layer: {layer}")

        if layer_count > 1:
            driver_name = "Memory"

    if convert_mem_driver and driver_name == "Memory":
        path = opened.GetDescription()
        basename = os.path.basename(path)
        name = os.path.splitext(basename)[0]
        raster_name = f"/vsimem/{name}_{uuid4().int}.gpkg"
        driver = gdal.GetDriverByName("GPKG")

        if layer != -1:
            opened = driver.CreateDataSource(raster_name)
            orignal_layer = opened.GetLayerByIndex(layer)
            opened.CopyLayer(orignal_layer, orignal_layer.GetDescription(),
                             ["OVERWRITE=YES"])
        else:
            opened = driver.CreateCopy(raster_name, opened)

    return opened
Пример #5
0
def internal_clip_vector(
    vector: Union[str, ogr.DataSource],
    clip_geom: Union[str, ogr.DataSource, gdal.Dataset],
    out_path: Optional[str] = None,
    process_layer: int = 0,
    process_layer_clip: int = 0,
    to_extent: bool = False,
    target_projection: Optional[Union[str, ogr.DataSource, gdal.Dataset,
                                      osr.SpatialReference, int]] = None,
    preserve_fid: bool = True,
) -> str:
    """Clips a vector to a geometry.

    Returns:
        A clipped ogr.Datasource or the path to one.
    """
    type_check(vector, [str, ogr.DataSource], "vector")
    type_check(clip_geom, [ogr.DataSource, gdal.Dataset, str, list, tuple],
               "clip_geom")
    type_check(out_path, [str], "out_path", allow_none=True)
    type_check(process_layer, [int], "process_layer")
    type_check(process_layer_clip, [int], "process_layer_clip")
    type_check(to_extent, [bool], "to_extent")
    type_check(
        target_projection,
        [str, ogr.DataSource, gdal.Dataset, osr.SpatialReference, int],
        "target_projection",
        allow_none=True,
    )
    type_check(preserve_fid, [bool], "preserve_fid")

    out_format = ".gpkg"
    out_target = f"/vsimem/clipped_{uuid4().int}{out_format}"

    if out_path is not None:
        out_target = out_path
        out_format = path_to_driver_vector(out_path)

    options = []

    geometry_to_clip = None
    if is_vector(clip_geom):
        if to_extent:
            extent = internal_vector_to_metadata(
                clip_geom, create_geometry=True)["extent_datasource"]
            geometry_to_clip = internal_vector_to_memory(extent)
        else:
            geometry_to_clip = open_vector(clip_geom, layer=process_layer_clip)
    elif is_raster(clip_geom):
        extent = internal_raster_to_metadata(
            clip_geom, create_geometry=True)["extent_datasource"]
        geometry_to_clip = internal_vector_to_memory(extent)
    else:
        raise ValueError(
            f"Invalid input in clip_geom, unable to parse: {clip_geom}")

    clip_vector_path = internal_vector_to_metadata(geometry_to_clip)["path"]
    options.append(f"-clipsrc {clip_vector_path}")

    if preserve_fid:
        options.append("-preserve_fid")
    else:
        options.append("-unsetFid")

    out_projection = None
    if target_projection is not None:
        out_projection = parse_projection(target_projection, return_wkt=True)
        options.append(f"-t_srs {out_projection}")

    origin = open_vector(vector, layer=process_layer)

    # dst  # src
    success = gdal.VectorTranslate(
        out_target,
        get_vector_path(origin),
        format=out_format,
        options=" ".join(options),
    )

    if success != 0:
        return out_target
    else:
        raise Exception("Error while clipping geometry.")
Пример #6
0
def _warp_raster(
    raster: Union[str, gdal.Dataset],
    out_path: Optional[str] = None,
    projection: Optional[Union[int, str, gdal.Dataset, ogr.DataSource,
                               osr.SpatialReference]] = None,
    clip_geom: Optional[Union[str, ogr.DataSource]] = None,
    target_size: Optional[Union[Tuple[Number], Number]] = None,
    target_in_pixels: bool = False,
    resample_alg: str = "nearest",
    crop_to_geom: bool = True,
    all_touch: bool = True,
    adjust_bbox: bool = True,
    overwrite: bool = True,
    creation_options: Union[list, None] = None,
    src_nodata: Union[str, int, float] = "infer",
    dst_nodata: Union[str, int, float] = "infer",
    layer_to_clip: int = 0,
    prefix: str = "",
    postfix: str = "_resampled",
) -> str:
    """WARNING: INTERNAL. DO NOT USE."""
    raster_list, path_list = ready_io_raster(raster, out_path, overwrite,
                                             prefix, postfix)

    origin = open_raster(raster_list[0])
    out_name = path_list[0]
    raster_metadata = raster_to_metadata(origin, create_geometry=True)

    # options
    warp_options = []
    if all_touch:
        warp_options.append("CUTLINE_ALL_TOUCHED=TRUE")
    else:
        warp_options.append("CUTLINE_ALL_TOUCHED=FALSE")

    origin_projection: osr.SpatialReference = raster_metadata["projection_osr"]
    origin_extent: ogr.Geometry = raster_metadata["extent_geom_latlng"]

    target_projection = origin_projection
    if projection is not None:
        target_projection = parse_projection(projection)

    if clip_geom is not None:
        if is_raster(clip_geom):
            opened_raster = open_raster(clip_geom)
            clip_metadata_raster = raster_to_metadata(opened_raster,
                                                      create_geometry=True)
            clip_ds = clip_metadata_raster["extent_datasource"]
            clip_metadata = internal_vector_to_metadata(clip_ds,
                                                        create_geometry=True)
        elif is_vector(clip_geom):
            clip_ds = open_vector(clip_geom)
            clip_metadata = internal_vector_to_metadata(clip_ds,
                                                        create_geometry=True)
        else:
            if file_exists(clip_geom):
                raise ValueError(f"Unable to parse clip geometry: {clip_geom}")
            else:
                raise ValueError(f"Unable to find clip geometry {clip_geom}")

        if layer_to_clip > (clip_metadata["layer_count"] - 1):
            raise ValueError("Requested an unable layer_to_clip.")

        clip_projection = clip_metadata["projection_osr"]
        clip_extent = clip_metadata["extent_geom_latlng"]

        # Fast check: Does the extent of the two inputs overlap?
        if not origin_extent.Intersects(clip_extent):
            raise Exception("Clipping geometry did not intersect raster.")

        # Check if projections match, otherwise reproject target geom.
        if not target_projection.IsSame(clip_projection):
            clip_metadata["extent"] = reproject_extent(
                clip_metadata["extent"],
                clip_projection,
                target_projection,
            )

        # The extent needs to be reprojected to the target.
        # this ensures that adjust_bbox works.
        x_min_og, y_max_og, x_max_og, y_min_og = reproject_extent(
            raster_metadata["extent"],
            origin_projection,
            target_projection,
        )
        output_bounds = (x_min_og, y_min_og, x_max_og, y_max_og
                         )  # gdal_warp format

        if crop_to_geom:

            if adjust_bbox:
                output_bounds = align_bbox(
                    raster_metadata["extent"],
                    clip_metadata["extent"],
                    raster_metadata["pixel_width"],
                    raster_metadata["pixel_height"],
                    warp_format=True,
                )

            else:
                x_min_og, y_max_og, x_max_og, y_min_og = clip_metadata[
                    "extent"]
                output_bounds = (
                    x_min_og,
                    y_min_og,
                    x_max_og,
                    y_max_og,
                )  # gdal_warp format

        if clip_metadata["layer_count"] > 1:
            clip_ds = vector_to_memory(
                clip_ds,
                memory_path=f"clip_geom_{uuid4().int}.gpkg",
                layer_to_extract=layer_to_clip,
            )
        elif not isinstance(clip_ds, str):
            clip_ds = vector_to_memory(
                clip_ds,
                memory_path=f"clip_geom_{uuid4().int}.gpkg",
            )

        if clip_ds is None:
            raise ValueError(f"Unable to parse input clip geom: {clip_geom}")

    x_res, y_res, x_pixels, y_pixels = raster_size_from_list(
        target_size, target_in_pixels)

    out_format = path_to_driver_raster(out_name)
    out_creation_options = default_options(creation_options)

    # nodata
    out_nodata = None
    if src_nodata is not None:
        out_nodata = raster_metadata["nodata_value"]
    else:
        if dst_nodata == "infer":
            out_nodata = gdal_nodata_value_from_type(
                raster_metadata["datatype_gdal_raw"])
        else:
            out_nodata = dst_nodata

    # Removes file if it exists and overwrite is True.
    remove_if_overwrite(out_path, overwrite)

    warped = gdal.Warp(
        out_name,
        origin,
        xRes=x_res,
        yRes=y_res,
        width=x_pixels,
        height=y_pixels,
        cutlineDSName=clip_ds,
        outputBounds=output_bounds,
        format=out_format,
        srcSRS=origin_projection,
        dstSRS=target_projection,
        resampleAlg=translate_resample_method(resample_alg),
        creationOptions=out_creation_options,
        warpOptions=warp_options,
        srcNodata=src_nodata,
        dstNodata=out_nodata,
        targetAlignedPixels=False,
        cropToCutline=False,
        multithread=True,
    )

    if warped is None:
        raise Exception(f"Error while warping raster: {raster}")

    return out_name
Пример #7
0
def download_s2_tile(
    scihub_username,
    scihub_password,
    onda_username,
    onda_password,
    destination,
    aoi_vector,
    date_start="20200601",
    date_end="20210101",
    clouds=10,
    producttype="S2MSI2A",
    tile=None,
    retry_count=10,
    retry_wait_min=30,
    retry_current=0,
    retry_downloaded=[],
    api_url="http://apihub.copernicus.eu/apihub",
):
    print("Downloading Sentinel-2 tiles")
    try:
        api = SentinelAPI(scihub_username,
                          scihub_password,
                          api_url,
                          timeout=60)
    except Exception as e:
        print(e)
        raise Exception("Error connecting to SciHub")

    if is_vector(aoi_vector):
        geom = internal_vector_to_metadata(aoi_vector, create_geometry=True)
    elif is_raster(aoi_vector):
        geom = raster_to_metadata(aoi_vector, create_geometry=True)

    geom_extent = geom["extent_wkt_latlng"]

    download_products = OrderedDict()
    download_ids = []

    date = (date_start, date_end)

    if tile is not None and tile != "":
        kw = {"raw": f"tileid:{tile} OR filename:*_T{tile}_*"}

        try:
            products = api.query(
                date=date,
                platformname="Sentinel-2",
                cloudcoverpercentage=(0, clouds),
                producttype="S2MSI2A",
                timeout=60,
                **kw,
            )
        except Exception as e:
            print(e)
            raise Exception("Error connecting to SciHub")
    else:
        try:
            products = api.query(
                geom_extent,
                date=date,
                platformname="Sentinel-2",
                cloudcoverpercentage=(0, clouds),
                producttype=producttype,
            )

        except Exception as e:
            print(e)
            raise Exception("Error connecting to SciHub")

    for product in products:
        dic = products[product]

        product_tile = dic["title"].split("_")[-2][1:]
        if (tile is not None and tile != "") and product_tile != tile:
            continue

        download_products[product] = dic
        download_ids.append(product)

    print(f"Downloading {len(download_products)} tiles")

    downloaded = [] + retry_downloaded
    for img_id in download_ids:
        out_path = destination + download_products[img_id]["filename"] + ".zip"

        if out_path in downloaded:
            continue

        # /footprint url for.
        download_url = (
            f"https://catalogue.onda-dias.eu/dias-catalogue/Products({img_id})/$value"
        )

        try:
            content_size = get_content_size(download_url,
                                            auth=(onda_username,
                                                  onda_password))
        except Exception as e:
            print(f"Failed to get content size for {img_id}")
            print(e)
            continue

        try:
            if content_size > 0:

                if os.path.isfile(
                        out_path) and content_size == os.path.getsize(
                            out_path):
                    downloaded.append(out_path)
                    print(f"Skipping {img_id}")
                else:
                    print(f"Downloading: {img_id}")
                    download(
                        download_url,
                        out_path,
                        auth=HTTPBasicAuth(onda_username, onda_password),
                        verbose=False,
                        skip_if_exists=True,
                    )

                    downloaded.append(out_path)
            else:
                print("Requesting from archive. Not downloaded.")
                order_url = f"https://catalogue.onda-dias.eu/dias-catalogue/Products({img_id})/Ens.Order"
                order_response = order(order_url,
                                       auth=(onda_username, onda_password))

        except Exception as e:
            print(f"Error downloading {img_id}: {e}")

    if len(downloaded) >= len(download_ids):
        return downloaded
    elif retry_current < retry_count:
        print(
            f"Retrying {retry_current}/{retry_count}. Sleeping for {retry_wait_min} minutes."
        )
        sleep(retry_wait_min * 60)
        download_s2_tile(
            scihub_username,
            scihub_password,
            onda_username,
            onda_password,
            destination,
            aoi_vector,
            date_start=date_start,
            date_end=date_end,
            clouds=clouds,
            producttype=producttype,
            tile=tile,
            retry_count=retry_count,
            retry_wait_min=retry_wait_min,
            retry_current=retry_current + 1,
            retry_downloaded=retry_downloaded + downloaded,
        )
    else:
        return retry_downloaded + downloaded
Пример #8
0
def rasters_are_aligned(
    rasters: List[Union[str, gdal.Dataset]],
    same_extent: bool = False,
    same_dtype: bool = False,
    same_nodata: bool = False,
    threshold=0.001,
) -> bool:
    """Verifies if a list of rasters are aligned.

    Args:
        rasters (list): A list of raster, either in gdal.Dataset or a string
        refering to the dataset.

    **kwargs:
        same_extent (bool): Should all the rasters have the same extent?

        same_dtype (bool): Should all the rasters have the same data type?

        same_dtype (bool): Should all the rasters have the same data nodata value?
    Returns:
        True if rasters and aligned and optional parameters are True, False
        otherwise.
    """
    type_check(rasters, [list], "rasters")
    type_check(same_extent, [bool], "same_extent")
    type_check(same_dtype, [bool], "same_dtype")
    type_check(same_nodata, [bool], "same_nodata")

    if len(rasters) == 1:
        if not is_raster(rasters[0]):
            raise ValueError(f"Input raster is invalid. {rasters[0]}")

        return True

    base: Metadata_raster_comp = {
        "projection": None,
        "pixel_width": None,
        "pixel_height": None,
        "x_min": None,
        "y_max": None,
        "transform": None,
        "width": None,
        "height": None,
        "datatype": None,
        "nodata_value": None,
    }

    for index, raster in enumerate(rasters):
        meta = _raster_to_metadata(raster)
        if index == 0:
            base["name"] = meta["name"]
            base["projection"] = meta["projection"]
            base["pixel_width"] = meta["pixel_width"]
            base["pixel_height"] = meta["pixel_height"]
            base["x_min"] = meta["x_min"]
            base["y_max"] = meta["y_max"]
            base["transform"] = meta["transform"]
            base["width"] = meta["width"]
            base["height"] = meta["height"]
            base["datatype"] = meta["datatype"]
            base["nodata_value"] = meta["nodata_value"]
        else:
            if meta["projection"] != base["projection"]:
                print(base["name"] + " did not match " + meta["name"] +
                      " projection")
                return False
            if meta["pixel_width"] != base["pixel_width"]:
                if abs(meta["pixel_width"] - base["pixel_width"]) > threshold:
                    print(base["name"] + " did not match " + meta["name"] +
                          " pixel_width")
                    return False
            if meta["pixel_height"] != base["pixel_height"]:
                if abs(meta["pixel_height"] -
                       base["pixel_height"]) > threshold:
                    print(base["name"] + " did not match " + meta["name"] +
                          " pixel_height")
                    return False
            if meta["x_min"] != base["x_min"]:
                if abs(meta["x_min"] - base["x_min"]) > threshold:
                    print(base["name"] + " did not match " + meta["name"] +
                          " x_min")
                    return False
            if meta["y_max"] != base["y_max"]:
                if abs(meta["y_max"] - base["y_max"]) > threshold:
                    print(base["name"] + " did not match " + meta["name"] +
                          " y_max")
                    return False
            if same_extent:
                if meta["transform"] != base["transform"]:
                    return False
                if meta["height"] != base["height"]:
                    return False
                if meta["width"] != base["width"]:
                    return False

            if same_dtype:
                if meta["datatype"] != base["datatype"]:
                    return False

            if same_nodata:
                if meta["nodata_value"] != base["nodata_value"]:
                    return False

    return True
Пример #9
0
def align_rasters(
    rasters: List[Union[str, gdal.Dataset]],
    out_path: Optional[Union[List[str], str]] = None,
    master: Optional[Union[gdal.Dataset, str]] = None,
    postfix: str = "_aligned",
    bounding_box: Union[str, gdal.Dataset, ogr.DataSource, list,
                        tuple] = "intersection",
    resample_alg: str = "nearest",
    target_size: Optional[Union[tuple, list, int, float, str,
                                gdal.Dataset]] = None,
    target_in_pixels: bool = False,
    projection: Optional[Union[int, str, gdal.Dataset, ogr.DataSource,
                               osr.SpatialReference]] = None,
    overwrite: bool = True,
    creation_options: list = [],
    src_nodata: Optional[Union[str, int, float]] = "infer",
    dst_nodata: Optional[Union[str, int, float]] = "infer",
    prefix: str = "",
    ram=8000,
    skip_existing=False,
) -> List[str]:
    type_check(rasters, [list], "rasters")
    type_check(out_path, [list, str], "out_path", allow_none=True)
    type_check(master, [list, str], "master", allow_none=True)
    type_check(bounding_box, [str, gdal.Dataset, ogr.DataSource, list, tuple],
               "bounding_box")
    type_check(resample_alg, [str], "resample_alg")
    type_check(
        target_size,
        [tuple, list, int, float, str, gdal.Dataset],
        "target_size",
        allow_none=True,
    )
    type_check(
        target_in_pixels,
        [int, str, gdal.Dataset, ogr.DataSource, osr.SpatialReference],
        "target_in_pixels",
        allow_none=True,
    )
    type_check(overwrite, [bool], "overwrite")
    type_check(creation_options, [list], "creation_options")
    type_check(src_nodata, [str, int, float], "src_nodata", allow_none=True)
    type_check(dst_nodata, [str, int, float], "dst_nodata", allow_none=True)
    type_check(prefix, [str], "prefix")
    type_check(postfix, [str], "postfix")

    raster_list, path_list = ready_io_raster(
        rasters,
        out_path,
        overwrite=overwrite,
        prefix=prefix,
        postfix=postfix,
        uuid=False,
    )

    x_pixels = None
    y_pixels = None
    x_res = None
    y_res = None
    target_projection = None
    target_bounds = None

    reprojected_rasters: List[str] = []

    # Read the metadata for each raster.
    # Catalogue the used projections, to choose the most common one if necessary.
    used_projections: List[dict] = []
    metadata: List[str] = []

    for raster in rasters:
        meta = raster_to_metadata(raster)
        metadata.append(meta)
        used_projections.append(meta["projection"])

    # If there is a master layer, copy information from that layer.
    if master is not None:
        master_metadata = raster_to_metadata(master)

        target_projection = master_metadata["projection_osr"]
        x_min, y_max, x_max, y_min = master_metadata["extent"]

        # Set the target values.
        target_bounds = (x_min, y_min, x_max, y_max)
        x_res = master_metadata["pixel_width"]
        y_res = master_metadata["pixel_height"]
        x_pixels = master_metadata["width"]
        y_pixels = master_metadata["height"]
        target_size = (x_res, y_res)

        target_in_pixels = False

    # We allow overwrite of parameters specifically set.
    # Handle projection
    if projection is not None:
        target_projection = parse_projection(projection)

    # If no projection is specified, other from master or parameters. The most common one is chosen.
    elif target_projection is None:

        # Sort and count the projections
        projection_counter: dict = {}
        for proj in used_projections:
            if proj in projection_counter:
                projection_counter[proj] += 1
            else:
                projection_counter[proj] = 1

        # Choose most common projection
        most_common_projection = sorted(projection_counter,
                                        key=projection_counter.get,
                                        reverse=True)

        target_projection = parse_projection(most_common_projection[0])

    if target_size is not None:

        # If a raster is input, use it's pixel size as target values.
        if isinstance(target_size, (gdal.Dataset, str)):
            if isinstance(target_size, str) and not is_raster(target_size):
                raise ValueError(
                    f"Unable to parse the raster used for target_size: {target_size}"
                )

            # Reprojection is necessary to ensure the correct pixel_size
            reprojected_target_size = internal_reproject_raster(
                target_size, target_projection)
            target_size_raster = raster_to_metadata(reprojected_target_size)

            # Set the target values.
            x_res = target_size_raster["width"]
            y_res = target_size_raster["height"]
        else:
            # If a list, tuple, int or float is passed. Turn them into target values.
            x_res, y_res, x_pixels, y_pixels = raster_size_from_list(
                target_size, target_in_pixels)

    # If nothing has been specified, we will infer the pixel_size based on the median of all input rasters.
    elif x_res is None and y_res is None and x_pixels is None and y_pixels is None:

        # Ready numpy arrays for insertion
        x_res_arr = np.empty(len(raster_list), dtype="float32")
        y_res_arr = np.empty(len(raster_list), dtype="float32")

        for index, raster in enumerate(raster_list):
            # It is necessary to reproject each raster, as pixel height and width might be different after projection.
            reprojected = internal_reproject_raster(raster, target_projection)
            target_size_raster = raster_to_metadata(reprojected)

            # Add the pixel sizes to the numpy arrays
            x_res_arr[index] = target_size_raster["pixel_width"]
            y_res_arr[index] = target_size_raster["pixel_height"]

            # Keep track of the reprojected arrays so we only reproject rasters once.
            reprojected_rasters.append(reprojected)

        # Use the median values of pixel sizes as target values.
        x_res = np.median(x_res_arr)
        y_res = np.median(y_res_arr)

    if target_bounds is None:

        # If a bounding box is supplied, simply use that one. It must be in the target projection.
        if isinstance(bounding_box, (list, tuple)):
            if len(bounding_box) != 4:
                raise ValueError(
                    "bounding_box as a list/tuple must have 4 values.")
            target_bounds = bounding_box

        # If the bounding box is a raster. Take the extent and reproject it to the target projection.
        elif is_raster(bounding_box):
            reprojected_bbox_raster = raster_to_metadata(
                internal_reproject_raster(bounding_box, target_projection))

            x_min, y_max, x_max, y_min = reprojected_bbox_raster["extent"]

            # add to target values.
            target_bounds = (x_min, y_min, x_max, y_max)

        # If the bounding box is a raster. Take the extent and reproject it to the target projection.
        elif is_vector(bounding_box):
            reprojected_bbox_vector = internal_vector_to_metadata(
                internal_reproject_vector(bounding_box, target_projection))

            x_min, y_max, x_max, y_min = reprojected_bbox_vector["extent"]

            # add to target values.
            target_bounds = (x_min, y_min, x_max, y_max)

        # If the bounding box is a string, we either take the union or the intersection of all the
        # bounding boxes of the input rasters.
        elif isinstance(bounding_box, str):
            if bounding_box == "intersection" or bounding_box == "union":
                extents = []

                # If the rasters have not been reprojected, reproject them now.
                if len(reprojected_rasters) != len(raster_list):
                    reprojected_rasters = []

                    for raster in raster_list:
                        raster_metadata = raster_to_metadata(raster)

                        if raster_metadata["projection_osr"].IsSame(
                                target_projection):
                            reprojected_rasters.append(raster)
                        else:
                            reprojected = internal_reproject_raster(
                                raster, target_projection)
                            reprojected_rasters.append(reprojected)

                # Add the extents of the reprojected rasters to the extents list.
                for reprojected_raster in reprojected_rasters:
                    reprojected_raster_metadata = dict(
                        raster_to_metadata(reprojected_raster))
                    extents.append(reprojected_raster_metadata["extent"])

                # Placeholder values
                x_min, y_max, x_max, y_min = extents[0]

                # Loop the extents. Narrowing if intersection, expanding if union.
                for index, extent in enumerate(extents):
                    if index == 0:
                        continue

                    if bounding_box == "intersection":
                        if extent[0] > x_min:
                            x_min = extent[0]
                        if extent[1] < y_max:
                            y_max = extent[1]
                        if extent[2] < x_max:
                            x_max = extent[2]
                        if extent[3] > y_min:
                            y_min = extent[3]

                    elif bounding_box == "union":
                        if extent[0] < x_min:
                            x_min = extent[0]
                        if extent[1] > y_max:
                            y_max = extent[1]
                        if extent[2] > x_max:
                            x_max = extent[2]
                        if extent[3] < y_min:
                            y_min = extent[3]

                # Add to target values.
                target_bounds = (x_min, y_min, x_max, y_max)

            else:
                raise ValueError(
                    f"Unable to parse or infer target_bounds: {target_bounds}")
        else:
            raise ValueError(
                f"Unable to parse or infer target_bounds: {target_bounds}")
    """ 
        If the rasters have not been reprojected, we reproject them now.
        The reprojection is necessary as warp has to be a two step process
        in order to align the rasters properly. This might not be necessary
        in a future version of gdal.
    """
    if len(reprojected_rasters) != len(raster_list):
        reprojected_rasters = []

        for raster in raster_list:
            raster_metadata = raster_to_metadata(raster)

            # If the raster is already the correct projection, simply append the raster.
            if raster_metadata["projection_osr"].IsSame(target_projection):
                reprojected_rasters.append(raster)
            else:
                reprojected = internal_reproject_raster(
                    raster, target_projection)
                reprojected_rasters.append(reprojected)

    # If any of the target values are still undefined. Throw an error!
    if target_projection is None or target_bounds is None:
        raise Exception(
            "Error while preparing the target projection or bounds.")

    if x_res is None and y_res is None and x_pixels is None and y_pixels is None:
        raise Exception("Error while preparing the target pixel size.")

    # This is the list of rasters to return. If output is not memory, it's a list of paths.
    return_list: List[str] = []
    for index, raster in enumerate(reprojected_rasters):
        raster_metadata = raster_to_metadata(raster)

        out_name = path_list[index]
        out_format = path_to_driver_raster(out_name)

        if skip_existing and os.path.exists(out_name):
            return_list.append(out_name)
            continue

        # Handle nodata.
        out_src_nodata = None
        out_dst_nodata = None
        if src_nodata == "infer":
            out_src_nodata = raster_metadata["nodata_value"]

            if out_src_nodata is None:
                out_src_nodata = gdal_nodata_value_from_type(
                    raster_metadata["datatype_gdal_raw"])

        elif src_nodata == None:
            out_src_nodata = None
        elif not isinstance(src_nodata, str):
            out_src_nodata = src_nodata

        if dst_nodata == "infer":
            out_dst_nodata = out_src_nodata
        elif dst_nodata == False or dst_nodata == None:
            out_dst_nodata = None
        elif src_nodata == None:
            out_dst_nodata = None
        elif not isinstance(dst_nodata, str):
            out_dst_nodata = dst_nodata

        # Removes file if it exists and overwrite is True.
        remove_if_overwrite(out_name, overwrite)

        # Hand over to gdal.Warp to do the heavy lifting!
        warped = gdal.Warp(
            out_name,
            raster,
            xRes=x_res,
            yRes=y_res,
            width=x_pixels,
            height=y_pixels,
            dstSRS=target_projection,
            outputBounds=target_bounds,
            format=out_format,
            resampleAlg=translate_resample_method(resample_alg),
            creationOptions=default_options(creation_options),
            srcNodata=out_src_nodata,
            dstNodata=out_dst_nodata,
            targetAlignedPixels=False,
            cropToCutline=False,
            multithread=True,
            warpMemoryLimit=ram,
        )

        if warped == None:
            raise Exception("Error while warping rasters.")

        return_list.append(out_name)

    if not rasters_are_aligned(return_list, same_extent=True):
        raise Exception("Error while aligning rasters. Output is not aligned")

    return return_list
Пример #10
0
def _clip_raster(
    raster: Union[str, gdal.Dataset],
    clip_geom: Union[str, ogr.DataSource, gdal.Dataset],
    out_path: Optional[str] = None,
    resample_alg: str = "nearest",
    crop_to_geom: bool = True,
    adjust_bbox: bool = True,
    all_touch: bool = True,
    overwrite: bool = True,
    creation_options: list = [],
    dst_nodata: Union[str, int, float] = "infer",
    layer_to_clip: int = 0,
    prefix: str = "",
    postfix: str = "_clipped",
    verbose: int = 1,
    uuid: bool = False,
    ram: int = 8000,
) -> str:
    """OBS: Internal. Single output.

    Clips a raster(s) using a vector geometry or the extents of
    a raster.
    """
    type_check(raster, [str, gdal.Dataset], "raster")
    type_check(clip_geom, [str, ogr.DataSource, gdal.Dataset], "clip_geom")
    type_check(out_path, [str], "out_path", allow_none=True)
    type_check(resample_alg, [str], "resample_alg")
    type_check(crop_to_geom, [bool], "crop_to_geom")
    type_check(adjust_bbox, [bool], "adjust_bbox")
    type_check(all_touch, [bool], "all_touch")
    type_check(dst_nodata, [str, int, float], "dst_nodata")
    type_check(layer_to_clip, [int], "layer_to_clip")
    type_check(overwrite, [bool], "overwrite")
    type_check(creation_options, [list], "creation_options")
    type_check(prefix, [str], "prefix")
    type_check(postfix, [str], "postfix")
    type_check(verbose, [int], "verbose")
    type_check(uuid, [bool], "uuid")

    _, path_list = ready_io_raster(raster,
                                   out_path,
                                   overwrite=overwrite,
                                   prefix=prefix,
                                   postfix=postfix,
                                   uuid=uuid)

    if out_path is not None:
        if "vsimem" not in out_path:
            if not os.path.isdir(os.path.split(os.path.normpath(out_path))[0]):
                raise ValueError(
                    f"out_path folder does not exists: {out_path}")

    # Input is a vector.
    if is_vector(clip_geom):
        clip_ds = open_vector(clip_geom)

        clip_metadata = internal_vector_to_metadata(
            clip_ds, process_layer=layer_to_clip)

        if clip_metadata["layer_count"] > 1:
            clip_ds = internal_vector_to_memory(clip_ds,
                                                layer_to_extract=layer_to_clip)

        if isinstance(clip_ds, ogr.DataSource):
            clip_ds = clip_ds.GetName()

    # Input is a raster (use extent)
    elif is_raster(clip_geom):
        clip_metadata = raster_to_metadata(clip_geom, create_geometry=True)
        clip_metadata["layer_count"] = 1
        clip_ds = clip_metadata["extent_datasource"].GetName()
    else:
        if file_exists(clip_geom):
            raise ValueError(f"Unable to parse clip geometry: {clip_geom}")
        else:
            raise ValueError(f"Unable to locate clip geometry {clip_geom}")

    if layer_to_clip > (clip_metadata["layer_count"] - 1):
        raise ValueError("Requested an unable layer_to_clip.")

    if clip_ds is None:
        raise ValueError(f"Unable to parse input clip geom: {clip_geom}")

    clip_projection = clip_metadata["projection_osr"]
    clip_extent = clip_metadata["extent"]

    # options
    warp_options = []
    if all_touch:
        warp_options.append("CUTLINE_ALL_TOUCHED=TRUE")
    else:
        warp_options.append("CUTLINE_ALL_TOUCHED=FALSE")

    origin_layer = open_raster(raster)

    raster_metadata = raster_to_metadata(raster)
    origin_projection = raster_metadata["projection_osr"]
    origin_extent = raster_metadata["extent"]

    # Check if projections match, otherwise reproject target geom.
    if not origin_projection.IsSame(clip_projection):
        clip_metadata["extent"] = reproject_extent(
            clip_metadata["extent"],
            clip_projection,
            origin_projection,
        )

    # Fast check: Does the extent of the two inputs overlap?
    if not gdal_bbox_intersects(origin_extent, clip_extent):
        raise Exception("Geometries did not intersect.")

    output_bounds = raster_metadata["extent_gdal_warp"]

    if crop_to_geom:

        if adjust_bbox:
            output_bounds = align_bbox(
                raster_metadata["extent"],
                clip_metadata["extent"],
                raster_metadata["pixel_width"],
                raster_metadata["pixel_height"],
                warp_format=True,
            )

        else:
            output_bounds = clip_metadata["extent_gdal_warp"]

    # formats
    out_name = path_list[0]
    out_format = path_to_driver_raster(out_name)
    out_creation_options = default_options(creation_options)

    # nodata
    src_nodata = raster_metadata["nodata_value"]
    out_nodata = None
    if src_nodata is not None:
        out_nodata = src_nodata
    else:
        if dst_nodata == "infer":
            out_nodata = gdal_nodata_value_from_type(
                raster_metadata["datatype_gdal_raw"])
        elif dst_nodata is None:
            out_nodata = None
        elif isinstance(dst_nodata, (int, float)):
            out_nodata = dst_nodata
        else:
            raise ValueError(f"Unable to parse nodata_value: {dst_nodata}")

    # Removes file if it exists and overwrite is True.
    remove_if_overwrite(out_path, overwrite)

    if verbose == 0:
        gdal.PushErrorHandler("CPLQuietErrorHandler")

    clipped = gdal.Warp(
        out_name,
        origin_layer,
        format=out_format,
        resampleAlg=translate_resample_method(resample_alg),
        targetAlignedPixels=False,
        outputBounds=output_bounds,
        xRes=raster_metadata["pixel_width"],
        yRes=raster_metadata["pixel_height"],
        cutlineDSName=clip_ds,
        cropToCutline=
        False,  # GDAL does this incorrectly when targetAlignedPixels is True.
        creationOptions=out_creation_options,
        warpMemoryLimit=ram,
        warpOptions=warp_options,
        srcNodata=raster_metadata["nodata_value"],
        dstNodata=out_nodata,
        multithread=True,
    )

    if verbose == 0:
        gdal.PopErrorHandler()

    if clipped is None:
        raise Exception("Error while clipping raster.")

    return out_name
Пример #11
0
def backscatter_step1(
    zip_file,
    out_path,
    gpt_path="~/snap/bin/gpt",
    extent=None,
    tmp_folder=None,
):
    graph = "backscatter_step1.xml"

    # Get absolute location of graph processing tool
    gpt = find_gpt(gpt_path)

    out_path_ext = out_path + ".dim"
    if os.path.exists(out_path_ext):
        print(f"{out_path_ext} already processed")
        return out_path_ext

    xmlfile = os.path.join(os.path.dirname(__file__), f"./graphs/{graph}")

    snap_graph_step1 = open(xmlfile, "r")
    snap_graph_step1_str = snap_graph_step1.read()
    snap_graph_step1.close()

    if extent is not None:
        if is_vector(extent):
            metadata = vector_to_metadata(extent, create_geometry=True)
        elif is_raster(extent):
            metadata = raster_to_metadata(extent, create_geometry=True)
        elif isinstance(extent, str):
            metadata = raster_to_metadata(extent, create_geometry=True)
        else:
            raise ValueError("Extent must be a vector, raster or a path to a raster.")

        interest_area = metadata["extent_wkt_latlng"]

    else:
        interest_area = "POLYGON ((-180.0 -90.0, 180.0 -90.0, 180.0 90.0, -180.0 90.0, -180.0 -90.0))"

    snap_graph_step1_str = snap_graph_step1_str.replace("${extent}", interest_area)
    snap_graph_step1_str = snap_graph_step1_str.replace("${inputfile}", zip_file)
    snap_graph_step1_str = snap_graph_step1_str.replace("${outputfile}", out_path)

    xmlfile = tmp_folder + os.path.basename(out_path) + "_graph.xml"

    f = open(xmlfile, "w")
    f.write(snap_graph_step1_str)
    f.close()

    command = [
        gpt,
        os.path.abspath(xmlfile),
        f"-q {cpu_count()}",
    ]

    if platform == "linux" or platform == "linux2":
        cmd = " ".join(command)
    else:
        cmd = f'cmd /c {" ".join(command)}'

    os.system(cmd)

    return out_path_ext