Beispiel #1
0
def _raster_set_datatype(
    raster: Union[str, gdal.Dataset],
    dtype: str,
    out_path: Optional[str],
    overwrite: bool = True,
    creation_options: Union[list, None] = None,
) -> str:
    """OBS: INTERNAL: Single output.

    Changes the datatype of a raster.
    """
    type_check(raster, [str, gdal.Dataset], "raster")
    type_check(dtype, [str], "dtype")
    type_check(out_path, [list, str], "out_path", allow_none=True)
    type_check(creation_options, [list], "creation_options", allow_none=True)

    ref = open_raster(raster)
    metadata = raster_to_metadata(ref)

    path = out_path
    if path is None:
        name = metadata["name"]
        path = f"/vsimem/{name}_{uuid4().int}.tif"

    driver = gdal.GetDriverByName(path_to_driver_raster(path))

    remove_if_overwrite(path, overwrite)

    copy = driver.Create(
        path,
        metadata["height"],
        metadata["width"],
        metadata["band_count"],
        translate_datatypes(dtype),
        default_options(creation_options),
    )

    copy.SetProjection(metadata["projection"])
    copy.SetGeoTransform(metadata["transform"])

    array = raster_to_array(ref)

    for band_idx in range(metadata["band_count"]):
        band = copy.GetRasterBand(band_idx + 1)
        band.WriteArray(array[:, :, band_idx])
        band.SetNoDataValue(metadata["nodata_value"])

    return path
Beispiel #2
0
def internal_singlepart_to_multipart(
    vector: Union[str, ogr.DataSource],
    out_path: Optional[str] = None,
    overwrite: bool = True,
    add_index: bool = True,
    process_layer: int = -1,
) -> str:
    type_check(vector, [str, ogr.DataSource], "vector")
    type_check(out_path, [str], "out_path", allow_none=True)
    type_check(overwrite, [bool], "overwrite")
    type_check(add_index, [bool], "add_index")
    type_check(process_layer, [int], "process_layer")

    vector_list, path_list = ready_io_vector(vector, out_path, overwrite=overwrite)
    ref = open_vector(vector_list[0])
    out_name = path_list[0]

    out_format = path_to_driver_vector(out_name)
    driver = ogr.GetDriverByName(out_format)
    overwrite_required(out_name, overwrite)

    metadata = internal_vector_to_metadata(ref)

    remove_if_overwrite(out_name, overwrite)

    destination: ogr.DataSource = driver.CreateDataSource(out_name)

    for index, layer_meta in enumerate(metadata["layers"]):
        if process_layer != -1 and index != process_layer:
            continue

        name = layer_meta["layer_name"]
        geom = layer_meta["column_geom"]

        sql = f"SELECT ST_Collect({geom}) AS geom FROM {name};"

        result = ref.ExecuteSQL(sql, dialect="SQLITE")
        destination.CopyLayer(result, name, ["OVERWRITE=YES"])

    if add_index:
        vector_add_index(destination)

    destination.FlushCache()

    return out_name
Beispiel #3
0
def _raster_to_disk(
    raster,
    out_path,
    overwrite=True,
    creation_options=None,
) -> str:
    """WARNING: INTERNAL. DO NOT USE."""
    ref = open_raster(raster)

    driver = gdal.GetDriverByName(path_to_driver_raster(out_path))

    if driver is None:
        raise Exception(
            f"Error while parsing driver from extension: {out_path}")

    remove_if_overwrite(out_path, overwrite)
    driver.CreateCopy(out_path, ref, options=creation_options)

    return out_path
Beispiel #4
0
def internal_vector_to_disk(
    vector: Union[str, ogr.DataSource],
    out_path: str,
    overwrite: bool = True,
) -> str:
    """OBS: Internal. Single output.

    Copies a vector source to disk.
    """
    type_check(vector, [str, ogr.DataSource], "vector")
    type_check(out_path, [str], "out_path")
    type_check(overwrite, [bool], "overwrite")

    overwrite_required(out_path, overwrite)

    datasource = open_vector(vector)
    metadata = internal_vector_to_metadata(vector)

    if not os.path.dirname(os.path.abspath(out_path)):
        raise ValueError(
            f"Output folder does not exist. Please create first. {out_path}")

    driver = ogr.GetDriverByName(path_to_driver_vector(out_path))

    if driver is None:
        raise Exception(f"Error while parsing driver for: {vector}")

    remove_if_overwrite(out_path, overwrite)

    copy = driver.CreateDataSource(out_path)

    for layer_idx in range(metadata["layer_count"]):
        layer_name = metadata["layers"][layer_idx]["layer_name"]
        copy.CopyLayer(datasource.GetLayer(layer_idx), str(layer_name),
                       ["OVERWRITE=YES"])

    # Flush to disk
    copy = None

    return out_path
Beispiel #5
0
def raster_mask_values(
    raster: Union[gdal.Dataset, str, list],
    values_to_mask: list,
    out_path: Union[list, str, None] = None,
    include_original_nodata: bool = True,
    dst_nodata: Union[float, int, str, list, None] = "infer",
    in_place: bool = False,
    overwrite: bool = True,
    opened: bool = False,
    prefix: str = "",
    postfix: str = "_nodata_masked",
    creation_options: list = [],
) -> Union[list, gdal.Dataset, str]:
    """Mask a raster with a list of values.

    Args:
        raster (path | raster | list): The raster(s) to retrieve nodata values from.

        values_to_mask (list): The list of values to mask in the raster(s)

    **kwargs:
        include_original_nodata: (bool): If True, the nodata value of the raster(s) will be
        included in the values to mask.

        dst_nodata (float, int, str, None): The target nodata value. If 'infer' the nodata
        value is set based on the input datatype. A list of nodata values can be based matching
        the amount of input rasters. If multiple nodata values should be set, use raster_mask_values.

        out_path (path | list | None): The destination of the changed rasters. If out_paths
        are specified, in_place is automatically set to False. The path can be a folder.

        in_place (bool): Should the rasters be changed in_place or copied?

        prefix (str): Prefix to add the the output if a folder is specified in out_path.

        postfix (str): Postfix to add the the output if a folder is specified in out_path.

    Returns:
        Returns the rasters with nodata removed. If in_place is True a reference to the
        changed orignal is returned, otherwise a copied memory raster or the path to the
        generated raster is outputted.
    """
    type_check(raster, [list, str, gdal.Dataset], "raster")
    type_check(values_to_mask, [list], "values_to_mask")
    type_check(out_path, [list, str], "out_path", allow_none=True)
    type_check(include_original_nodata, [bool], "include_original_nodata")
    type_check(dst_nodata, [float, int, str, list],
               "dst_nodata",
               allow_none=True)
    type_check(in_place, [bool], "in_place")
    type_check(overwrite, [bool], "overwrite")
    type_check(prefix, [str], "prefix")
    type_check(postfix, [str], "postfix")
    type_check(opened, [bool], "opened")
    type_check(creation_options, [list], "creation_options")

    rasters_metadata = []
    internal_in_place = in_place if out_path is None else False
    internal_dst_nodata = None

    for value in values_to_mask:
        if not isinstance(value, (int, float)):
            raise ValueError("Values in values_to_mask must be ints or floats")

    if isinstance(dst_nodata, str) and dst_nodata != "infer":
        raise ValueError(f"Invalid dst_nodata value. {dst_nodata}")

    if isinstance(dst_nodata, list):
        if not isinstance(raster, list) or len(dst_nodata) != len(raster):
            raise ValueError(
                "If dst_nodata is a list, raster must also be a list of equal length."
            )

        for value in dst_nodata:
            if isinstance(value, (float, int, str, None)):
                raise ValueError("Invalid type in dst_nodata list.")

            if isinstance(value, str) and value != "infer":
                raise ValueError(
                    "If dst_nodata is a string it must be 'infer'")

    raster_list, out_names = ready_io_raster(raster, out_path, overwrite,
                                             prefix, postfix)

    output_rasters = []

    for index, internal_raster in enumerate(raster_list):

        raster_metadata = None
        if len(rasters_metadata) == 0:
            raster_metadata = raster_to_metadata(internal_raster)
            rasters_metadata.append(raster_metadata)
        else:
            raster_metadata = rasters_metadata[index]

        if dst_nodata == "infer":
            internal_dst_nodata = gdal_nodata_value_from_type(
                raster_metadata["dtype_gdal_raw"])
        elif isinstance(dst_nodata, list):
            internal_dst_nodata = dst_nodata[index]
        else:
            internal_dst_nodata = dst_nodata

        mask_values = list(values_to_mask)
        if include_original_nodata:
            if raster_metadata["nodata_value"] is not None:
                mask_values.append(raster_metadata["nodata_value"])

        arr = raster_to_array(internal_raster, filled=True)

        mask = None
        for index, mask_value in enumerate(mask_values):
            if index == 0:
                mask = arr == mask_value
            else:
                mask = mask | arr == mask_value

        arr = np.ma.masked_array(arr,
                                 mask=mask,
                                 fill_value=internal_dst_nodata)

        if internal_in_place:
            for band in range(raster_metadata["bands"]):
                raster_band = internal_raster.GetRasterBand(band + 1)
                raster_band.WriteArray(arr[:, :, band])
                raster_band = None
        else:
            out_name = out_names[index]
            remove_if_overwrite(out_name, overwrite)

            output_rasters.append(
                array_to_raster(arr, internal_raster, out_path=out_name))

    if isinstance(raster, list):
        return output_rasters

    return output_rasters[0]
Beispiel #6
0
def raster_set_nodata(
    raster: Union[List[Union[gdal.Dataset, str]], gdal.Dataset, str],
    dst_nodata: Union[float, int, str, list, None],
    out_path: Union[list, str, None] = None,
    overwrite: bool = True,
    in_place: bool = False,
    prefix: str = "",
    postfix: str = "_nodata_set",
    opened: bool = False,
    creation_options: list = [],
) -> Union[list, gdal.Dataset, str]:
    """Sets all the nodata from a raster or a list of rasters.

    Args:
        raster (path | raster | list): The raster(s) to retrieve nodata values from.

        dst_nodata (float, int, str, None): The target nodata value. If 'infer' the nodata
        value is set based on the input datatype. A list of nodata values can be based matching
        the amount of input rasters. If multiple nodata values should be set, use raster_mask_values.

    **kwargs:
        out_path (path | list | None): The destination of the changed rasters. If out_paths
        are specified, in_place is automatically set to False. The path can be a folder.

        in_place (bool): Should the rasters be changed in_place or copied?

        prefix (str): Prefix to add the the output if a folder is specified in out_path.

        postfix (str): Postfix to add the the output if a folder is specified in out_path.

    Returns:
        Returns the rasters with nodata set. If in_place is True a reference to the
        changed orignal is returned, otherwise a copied memory raster or the path to the
        generated raster is outputted.
    """
    type_check(raster, [list, str, gdal.Dataset], "raster")
    type_check(dst_nodata, [float, int, str, list],
               "dst_nodata",
               allow_none=True)
    type_check(out_path, [list, str], "out_path", allow_none=True)
    type_check(overwrite, [bool], "overwrite")
    type_check(prefix, [str], "prefix")
    type_check(postfix, [str], "postfix")
    type_check(opened, [bool], "opened")
    type_check(creation_options, [list], "creation_options")

    rasters, out_names = ready_io_raster(raster, out_path, overwrite, prefix,
                                         postfix)

    rasters_metadata: List[Metadata_raster] = []
    internal_dst_nodata = None

    if isinstance(dst_nodata, str) and dst_nodata != "infer":
        raise ValueError(f"Invalid dst_nodata value. {dst_nodata}")

    if isinstance(dst_nodata, list):
        if not isinstance(raster, list) or len(dst_nodata) != len(raster):
            raise ValueError(
                "If dst_nodata is a list, raster must also be a list of equal length."
            )

        for value in dst_nodata:
            if isinstance(value, (float, int, str, None)):
                raise ValueError("Invalid type in dst_nodata list.")

            if isinstance(value, str) and value != "infer":
                raise ValueError(
                    "If dst_nodata is a string it must be 'infer'")

    output_rasters = []

    for index, internal_raster in enumerate(rasters):

        raster_metadata = None
        if len(rasters_metadata) == 0:
            raster_metadata = raster_to_metadata(internal_raster)

            if not isinstance(raster_metadata, dict):
                raise Exception("Metadata is in the wrong format.")

            rasters_metadata.append(raster_metadata)
        else:
            raster_metadata = rasters_metadata[index]

        if dst_nodata == "infer":
            internal_dst_nodata = gdal_nodata_value_from_type(
                raster_metadata["dtype_gdal_raw"])
        elif isinstance(dst_nodata, list):
            internal_dst_nodata = dst_nodata[index]
        else:
            internal_dst_nodata = dst_nodata

        if in_place:
            for band in range(raster_metadata["bands"]):
                raster_band = internal_raster.GetRasterBand(band + 1)
                raster_band.SetNodataValue(internal_dst_nodata)
                raster_band = None
        else:
            if out_path is None:
                raster_mem = raster_to_memory(internal_raster)
                raster_mem_ref = raster_to_reference(raster_mem)
            else:
                remove_if_overwrite(out_names[index], overwrite)
                raster_mem = raster_to_disk(internal_raster, out_names[index])
                raster_mem_ref = raster_to_reference(raster_mem)

            for band in range(raster_metadata["bands"]):
                raster_band = raster_mem_ref.GetRasterBand(band + 1)
                raster_band.SetNodataValue(internal_dst_nodata)

    if isinstance(raster, list):
        return output_rasters

    return output_rasters[0]
Beispiel #7
0
def internal_reproject_vector(
    vector: Union[str, ogr.DataSource],
    projection: Union[str, int, ogr.DataSource, gdal.Dataset,
                      osr.SpatialReference],
    out_path: Optional[str] = None,
    copy_if_same: bool = False,
    overwrite: bool = True,
) -> str:
    type_check(vector, [str, ogr.DataSource], "vector")
    type_check(
        projection,
        [str, int, ogr.DataSource, gdal.Dataset, osr.SpatialReference],
        "projection",
    )
    type_check(out_path, [str], "out_path", allow_none=True)
    type_check(copy_if_same, [bool], "copy_if_same")
    type_check(overwrite, [bool], "overwrite")

    vector_list, path_list = ready_io_vector(vector,
                                             out_path,
                                             overwrite=overwrite)
    origin = open_vector(vector_list[0])
    metadata = internal_vector_to_metadata(origin)
    out_name = path_list[0]

    origin_projection = metadata["projection_osr"]
    target_projection = parse_projection(projection)

    if not isinstance(target_projection, osr.SpatialReference):
        raise Exception("Error ")

    if origin_projection.IsSame(target_projection):
        if copy_if_same:
            if out_path is None:
                return internal_vector_to_memory(origin)

            return internal_vector_to_disk(origin, out_name)
        else:
            return get_vector_path(vector)

    # GDAL 3 changes axis order: https://github.com/OSGeo/gdal/issues/1546
    if int(osgeo.__version__[0]) >= 3:
        origin_projection.SetAxisMappingStrategy(
            osr.OAMS_TRADITIONAL_GIS_ORDER)
        target_projection.SetAxisMappingStrategy(
            osr.OAMS_TRADITIONAL_GIS_ORDER)

    coord_trans = osr.CoordinateTransformation(origin_projection,
                                               target_projection)

    remove_if_overwrite(out_path, overwrite)

    driver = ogr.GetDriverByName(path_to_driver_vector(out_name))
    destination: ogr.DataSource = driver.CreateDataSource(out_name)

    for layer_idx in range(len(metadata["layers"])):
        origin_layer = origin.GetLayerByIndex(layer_idx)
        origin_layer_defn = origin_layer.GetLayerDefn()

        layer_dict = metadata["layers"][layer_idx]
        layer_name = layer_dict["layer_name"]
        layer_geom_type = layer_dict["geom_type_ogr"]

        destination_layer = destination.CreateLayer(layer_name,
                                                    target_projection,
                                                    layer_geom_type)
        destination_layer_defn = destination_layer.GetLayerDefn()

        # Copy field definitions
        origin_layer_defn = origin_layer.GetLayerDefn()
        for i in range(0, origin_layer_defn.GetFieldCount()):
            field_defn = origin_layer_defn.GetFieldDefn(i)
            destination_layer.CreateField(field_defn)

        # Loop through the input features
        for _ in range(origin_layer.GetFeatureCount()):
            feature = origin_layer.GetNextFeature()
            geom = feature.GetGeometryRef()
            geom.Transform(coord_trans)

            new_feature = ogr.Feature(destination_layer_defn)
            new_feature.SetGeometry(geom)

            # Copy field values
            for i in range(0, destination_layer_defn.GetFieldCount()):
                new_feature.SetField(
                    destination_layer_defn.GetFieldDefn(i).GetNameRef(),
                    feature.GetField(i),
                )

            destination_layer.CreateFeature(new_feature)

        destination_layer.ResetReading()
        destination_layer = None

    destination.FlushCache()

    return out_name
Beispiel #8
0
def internal_reproject_raster(
    raster: Union[str, gdal.Dataset],
    projection: Union[int, str, gdal.Dataset, ogr.DataSource,
                      osr.SpatialReference],
    out_path: Optional[str] = None,
    resample_alg: str = "nearest",
    copy_if_already_correct: bool = True,
    overwrite: bool = True,
    creation_options: list = [],
    dst_nodata: Union[str, int, float] = "infer",
    prefix: str = "",
    postfix: str = "_reprojected",
) -> str:
    """OBS: Internal. Single output.

    Reproject a raster(s) to a target coordinate reference system.
    """
    type_check(raster, [str, gdal.Dataset], "raster")
    type_check(
        projection,
        [int, str, gdal.Dataset, ogr.DataSource, osr.SpatialReference],
        "projection",
    )
    type_check(out_path, [str], "out_path", allow_none=True)
    type_check(resample_alg, [str], "resample_alg")
    type_check(copy_if_already_correct, [bool], "copy_if_already_correct")
    type_check(overwrite, [bool], "overwrite")
    type_check(creation_options, [list], "creation_options")
    type_check(dst_nodata, [str, int, float], "dst_nodata")
    type_check(prefix, [str], "prefix")
    type_check(postfix, [str], "postfix")

    raster_list, path_list = ready_io_raster(raster, out_path, overwrite,
                                             prefix, postfix)
    out_name = path_list[0]
    ref = open_raster(raster_list[0])
    metadata = raster_to_metadata(ref)

    out_creation_options = default_options(creation_options)
    out_format = path_to_driver_raster(out_name)

    original_projection = parse_projection(ref)
    target_projection = parse_projection(projection)

    if not isinstance(original_projection, osr.SpatialReference):
        raise Exception("Error while parsing input projection.")

    if not isinstance(target_projection, osr.SpatialReference):
        raise Exception("Error while parsing target projection.")

    if original_projection.IsSame(target_projection):
        if not copy_if_already_correct:
            return get_raster_path(ref)

    src_nodata = metadata["nodata_value"]
    out_nodata = None
    if src_nodata is not None:
        out_nodata = src_nodata
    else:
        if dst_nodata == "infer":
            out_nodata = gdal_nodata_value_from_type(
                metadata["datatype_gdal_raw"])
        elif isinstance(dst_nodata, str):
            raise TypeError(f"dst_nodata is in a wrong format: {dst_nodata}")
        else:
            out_nodata = dst_nodata

    remove_if_overwrite(out_path, overwrite)

    reprojected = gdal.Warp(
        out_name,
        ref,
        format=out_format,
        srcSRS=original_projection,
        dstSRS=target_projection,
        resampleAlg=translate_resample_method(resample_alg),
        creationOptions=out_creation_options,
        srcNodata=metadata["nodata_value"],
        dstNodata=out_nodata,
        multithread=True,
    )

    if reprojected is None:
        raise Exception(f"Error while reprojecting raster: {raster}")

    return out_name
Beispiel #9
0
def extract_patches(
    raster: Union[List[Union[str, gdal.Dataset]], str, gdal.Dataset],
    out_dir: Optional[str] = None,
    prefix: str = "",
    postfix: str = "_patches",
    size: int = 32,
    offsets: Union[list, None] = [],
    generate_border_patches: bool = True,
    generate_zero_offset: bool = True,
    generate_grid_geom: bool = True,
    clip_geom: Optional[Union[str, ogr.DataSource, gdal.Dataset]] = None,
    clip_layer_index: int = 0,
    verify_output=True,
    verification_samples=100,
    overwrite=True,
    epsilon: float = 1e-9,
    verbose: int = 1,
) -> tuple:
    """Extracts square tiles from a raster.
    Args:
        raster (list of rasters | path | raster): The raster(s) to convert.

    **kwargs:
        out_dir (path | none): Folder to save output. If None, in-memory
        arrays and geometries are outputted.

        prefix (str): A prefix for all outputs.

        postfix (str): A postfix for all outputs.

        size (int): The size of the tiles in pixels.

        offsets (list of tuples): List of offsets to extract. Example:
        offsets=[(16, 16), (16, 0), (0, 16)]. Will offset the initial raster
        and extract from there.

        generate_border_patches (bool): The tiles often do not align with the
        rasters which means borders are trimmed somewhat. If generate_border_patches
        is True, an additional tile is added where needed.

        generate_zero_offset (bool): if True, an offset is inserted at (0, 0)
        if none is present.

        generate_grid_geom (bool): Output a geopackage with the grid of tiles.

        clip_geom (str, raster, vector): Clip the output to the
        intersections with a geometry. Useful if a lot of the target
        area is water or similar.

        epsilon (float): How much for buffer the arange array function. This
        should usually just be left alone.

        verbose (int): If 1 will output messages on progress.

    Returns:
        A tuple with paths to the generated items. (numpy_array, grid_geom)
    """
    type_check(raster, [str, list, gdal.Dataset], "raster")
    type_check(out_dir, [str], "out_dir", allow_none=True)
    type_check(prefix, [str], "prefix")
    type_check(postfix, [str], "postfix")
    type_check(size, [int], "size")
    type_check(offsets, [list], "offsets", allow_none=True)
    type_check(generate_grid_geom, [bool], "generate_grid_geom")
    type_check(
        clip_geom,
        [str, ogr.DataSource, gdal.Dataset],
        "clip_layer_index",
        allow_none=True,
    )
    type_check(clip_layer_index, [int], "clip_layer_index")
    type_check(overwrite, [bool], "overwrite")
    type_check(epsilon, [float], "epsilon")
    type_check(verbose, [int], "verbose")

    in_rasters = to_raster_list(raster)

    if out_dir is not None and not os.path.isdir(out_dir):
        raise ValueError(f"Output directory does not exists: {out_dir}")

    if not rasters_are_aligned(in_rasters):
        raise ValueError(
            "Input rasters must be aligned. Please use the align function.")

    output_geom = None

    metadata = internal_raster_to_metadata(in_rasters[0])

    if verbose == 1:
        print("Generating blocks..")

    # internal offset array. Avoid manipulating the og array.
    if offsets is None:
        offsets = []

    in_offsets = []
    if generate_zero_offset and (0, 0) not in offsets:
        in_offsets.append((0, 0))

    for offset in offsets:
        if offset != (0, 0):
            if not isinstance(offset, (list, tuple)) or len(offset) != 2:
                raise ValueError(
                    f"offset must be a list or tuple of two integers. Recieved: {offset}"
                )
            in_offsets.append((offset[0], offset[1]))

    border_patches_needed_x = True
    border_patches_needed_y = True

    if clip_geom is not None:
        border_patches_needed_x = False
        border_patches_needed_y = False

    shapes = []
    for offset in in_offsets:
        block_shape = shape_to_blockshape(metadata["shape"], (size, size),
                                          offset)

        if block_shape[0] * size == metadata["width"]:
            border_patches_needed_x = False

        if block_shape[1] * size == metadata["height"]:
            border_patches_needed_y = False

        shapes.append(block_shape)

    if generate_border_patches:
        cut_x = (metadata["width"] - in_offsets[0][0]) - (shapes[0][0] * size)
        cut_y = (metadata["height"] - in_offsets[0][1]) - (shapes[0][1] * size)

        if border_patches_needed_x and cut_x > 0:
            shapes[0][0] += 1

        if border_patches_needed_y and cut_y > 0:
            shapes[0][1] += 1

    # calculate the offsets
    all_rows = 0
    offset_rows = []
    for i in range(len(shapes)):
        row = 0

        for j in range(len(shapes[i])):
            if j == 0:
                row = int(shapes[i][j])
            else:
                row *= int(shapes[i][j])

        offset_rows.append(row)
        all_rows += row

    offset_rows_cumsum = np.cumsum(offset_rows)

    if generate_grid_geom is True or clip_geom is not None:

        if verbose == 1:
            print("Calculating grid cells..")

        mask = np.arange(all_rows, dtype="uint64")

        ulx, uly, _lrx, _lry = metadata["extent"]

        pixel_width = abs(metadata["pixel_width"])
        pixel_height = abs(metadata["pixel_height"])

        xres = pixel_width * size
        yres = pixel_height * size

        dx = xres / 2
        dy = yres / 2

        # Ready clip geom outside of loop.
        if clip_geom is not None:
            clip_ref = open_vector(
                internal_reproject_vector(clip_geom,
                                          metadata["projection_osr"]))
            clip_layer = clip_ref.GetLayerByIndex(clip_layer_index)

            meta_clip = internal_vector_to_metadata(clip_ref)
            # geom_clip = meta_clip["layers"][clip_layer_index]["column_geom"]

            clip_extent = meta_clip["extent_ogr"]
            # clip_adjust = [
            #     clip_extent[0] - clip_extent[0] % xres,  # x_min
            #     (clip_extent[1] - clip_extent[1] % xres) + xres,  # x_max
            #     clip_extent[2] - clip_extent[2] % yres,  # y_min
            #     (clip_extent[3] - clip_extent[3] % yres) + yres,  # y_max
            # ]

        coord_grid = np.empty((all_rows, 2), dtype="float64")

        # tiled_extent = [None, None, None, None]

        row_count = 0
        for idx in range(len(in_offsets)):
            x_offset = in_offsets[idx][0]
            y_offset = in_offsets[idx][1]

            x_step = shapes[idx][0]
            y_step = shapes[idx][1]

            x_min = (ulx + dx) + (x_offset * pixel_width)
            x_max = x_min + (x_step * xres)

            y_max = (uly - dy) - (y_offset * pixel_height)
            y_min = y_max - (y_step * yres)

            # if clip_geom is not None:
            #     if clip_adjust[0] > x_min:
            #         x_min = clip_adjust[0] + (x_offset * pixel_width)
            #     if clip_adjust[1] < x_max:
            #         x_max = clip_adjust[1] + (x_offset * pixel_width)
            #     if clip_adjust[2] > y_min:
            #         y_min = clip_adjust[2] - (y_offset * pixel_height)
            #     if clip_adjust[3] < y_max:
            #         y_max = clip_adjust[3] - (y_offset * pixel_height)

            # if idx == 0:
            #     tiled_extent[0] = x_min
            #     tiled_extent[1] = x_max
            #     tiled_extent[2] = y_min
            #     tiled_extent[3] = y_max
            # else:
            #     if x_min < tiled_extent[0]:
            #         tiled_extent[0] = x_min
            #     if x_max > tiled_extent[1]:
            #         tiled_extent[1] = x_max
            #     if y_min < tiled_extent[2]:
            #         tiled_extent[2] = y_min
            #     if y_max > tiled_extent[3]:
            #         tiled_extent[3] = y_max

            # y is flipped so: xmin --> xmax, ymax -- ymin to keep same order as numpy array
            x_patches = round((x_max - x_min) / xres)
            y_patches = round((y_max - y_min) / yres)

            xr = np.arange(x_min, x_max, xres)[0:x_step]
            if xr.shape[0] < x_patches:
                xr = np.arange(x_min, x_max + epsilon, xres)[0:x_step]
            elif xr.shape[0] > x_patches:
                xr = np.arange(x_min, x_max - epsilon, xres)[0:x_step]

            yr = np.arange(y_max, y_min + epsilon, -yres)[0:y_step]
            if yr.shape[0] < y_patches:
                yr = np.arange(y_max, y_min - epsilon, -yres)[0:y_step]
            elif yr.shape[0] > y_patches:
                yr = np.arange(y_max, y_min + epsilon, -yres)[0:y_step]

            if generate_border_patches and idx == 0:

                if border_patches_needed_x:
                    xr[-1] = xr[-1] - (
                        (xr[-1] + dx) - metadata["extent_dict"]["right"])

                if border_patches_needed_y:
                    yr[-1] = yr[-1] - (
                        (yr[-1] - dy) - metadata["extent_dict"]["bottom"])

            oxx, oyy = np.meshgrid(xr, yr)
            oxr = oxx.ravel()
            oyr = oyy.ravel()

            offset_length = oxr.shape[0]

            coord_grid[row_count:row_count + offset_length, 0] = oxr
            coord_grid[row_count:row_count + offset_length, 1] = oyr

            row_count += offset_length

            offset_rows_cumsum[idx] = offset_length

        offset_rows_cumsum = np.cumsum(offset_rows_cumsum)
        coord_grid = coord_grid[:row_count]

        # Output geometry
        driver = ogr.GetDriverByName("GPKG")
        patches_path = f"/vsimem/patches_{uuid4().int}.gpkg"
        patches_ds = driver.CreateDataSource(patches_path)
        patches_layer = patches_ds.CreateLayer("patches_all",
                                               geom_type=ogr.wkbPolygon,
                                               srs=metadata["projection_osr"])
        patches_fdefn = patches_layer.GetLayerDefn()

        og_fid = "og_fid"

        field_defn = ogr.FieldDefn(og_fid, ogr.OFTInteger)
        patches_layer.CreateField(field_defn)

        if clip_geom is not None:
            clip_feature_count = meta_clip["layers"][clip_layer_index][
                "feature_count"]
            spatial_index = rtree.index.Index(interleaved=False)
            for _ in range(clip_feature_count):
                clip_feature = clip_layer.GetNextFeature()
                clip_fid = clip_feature.GetFID()
                clip_feature_geom = clip_feature.GetGeometryRef()
                xmin, xmax, ymin, ymax = clip_feature_geom.GetEnvelope()

                spatial_index.insert(clip_fid, (xmin, xmax, ymin, ymax))

        fids = 0
        mask = []
        for tile_id in range(coord_grid.shape[0]):
            x, y = coord_grid[tile_id]

            if verbose == 1:
                progress(tile_id, coord_grid.shape[0], "Patch generation")

            x_min = x - dx
            x_max = x + dx
            y_min = y - dx
            y_max = y + dx

            tile_intersects = True

            grid_geom = None
            poly_wkt = None

            if clip_geom is not None:
                tile_intersects = False

                if not ogr_bbox_intersects([x_min, x_max, y_min, y_max],
                                           clip_extent):
                    continue

                intersections = list(
                    spatial_index.intersection((x_min, x_max, y_min, y_max)))
                if len(intersections) == 0:
                    continue

                poly_wkt = f"POLYGON (({x_min} {y_max}, {x_max} {y_max}, {x_max} {y_min}, {x_min} {y_min}, {x_min} {y_max}))"
                grid_geom = ogr.CreateGeometryFromWkt(poly_wkt)

                for fid1 in intersections:
                    clip_feature = clip_layer.GetFeature(fid1)
                    clip_geom = clip_feature.GetGeometryRef()

                    if grid_geom.Intersects(clip_geom):
                        tile_intersects = True
                        continue

            if tile_intersects:
                ft = ogr.Feature(patches_fdefn)

                if grid_geom is None:
                    poly_wkt = f"POLYGON (({x_min} {y_max}, {x_max} {y_max}, {x_max} {y_min}, {x_min} {y_min}, {x_min} {y_max}))"
                    grid_geom = ogr.CreateGeometryFromWkt(poly_wkt)

                ft_geom = ogr.CreateGeometryFromWkt(poly_wkt)
                ft.SetGeometry(ft_geom)

                ft.SetField(og_fid, int(fids))
                ft.SetFID(int(fids))

                patches_layer.CreateFeature(ft)
                ft = None

                mask.append(tile_id)
                fids += 1

        if verbose == 1:
            progress(coord_grid.shape[0], coord_grid.shape[0],
                     "Patch generation")

        mask = np.array(mask, dtype=int)

        if generate_grid_geom is True:
            if out_dir is None:
                output_geom = patches_ds
            else:
                raster_basename = metadata["basename"]
                geom_name = f"{prefix}{raster_basename}_geom_{str(size)}{postfix}.gpkg"
                output_geom = os.path.join(out_dir, geom_name)

                overwrite_required(output_geom, overwrite)
                remove_if_overwrite(output_geom, overwrite)

                if verbose == 1:
                    print("Writing output geometry..")

                internal_vector_to_disk(patches_ds,
                                        output_geom,
                                        overwrite=overwrite)

    if verbose == 1:
        print("Writing numpy array..")

    output_blocks = []

    for raster in in_rasters:

        base = None
        basename = None
        output_block = None

        if out_dir is not None:
            base = os.path.basename(raster)
            basename = os.path.splitext(base)[0]
            output_block = os.path.join(out_dir +
                                        f"{prefix}{basename}{postfix}.npy")

        metadata = internal_raster_to_metadata(raster)

        if generate_grid_geom is True or clip_geom is not None:
            output_shape = (row_count, size, size, metadata["band_count"])
        else:
            output_shape = (all_rows, size, size, metadata["band_count"])

        input_datatype = metadata["datatype"]

        output_array = np.empty(output_shape, dtype=input_datatype)

        # if clip_geom is not None:
        #     ref = raster_to_array(raster, filled=True, extent=tiled_extent)
        # else:
        ref = raster_to_array(raster, filled=True)

        for k, offset in enumerate(in_offsets):
            start = 0
            if k > 0:
                start = offset_rows_cumsum[k - 1]

            blocks = None
            if (k == 0 and generate_border_patches
                    and (border_patches_needed_x or border_patches_needed_y)):
                blocks = array_to_blocks(
                    ref,
                    (size, size),
                    offset,
                    border_patches_needed_x,
                    border_patches_needed_y,
                )
            else:
                blocks = array_to_blocks(ref, (size, size), offset)

            output_array[start:offset_rows_cumsum[k]] = blocks

        if generate_grid_geom is False and clip_geom is None:
            if out_dir is None:
                output_blocks.append(output_array)
            else:
                output_blocks.append(output_block)
                np.save(output_block, output_array)
        else:
            if out_dir is None:
                output_blocks.append(output_array[mask])
            else:
                output_blocks.append(output_block)
                np.save(output_block, output_array[mask])

    if verify_output and generate_grid_geom:
        test_extraction(
            in_rasters,
            output_blocks,
            output_geom,
            samples=verification_samples,
            grid_layer_index=0,
            verbose=verbose,
        )

    if len(output_blocks) == 1:
        output_blocks = output_blocks[0]

    return (output_blocks, output_geom)
Beispiel #10
0
def _warp_raster(
    raster: Union[str, gdal.Dataset],
    out_path: Optional[str] = None,
    projection: Optional[Union[int, str, gdal.Dataset, ogr.DataSource,
                               osr.SpatialReference]] = None,
    clip_geom: Optional[Union[str, ogr.DataSource]] = None,
    target_size: Optional[Union[Tuple[Number], Number]] = None,
    target_in_pixels: bool = False,
    resample_alg: str = "nearest",
    crop_to_geom: bool = True,
    all_touch: bool = True,
    adjust_bbox: bool = True,
    overwrite: bool = True,
    creation_options: Union[list, None] = None,
    src_nodata: Union[str, int, float] = "infer",
    dst_nodata: Union[str, int, float] = "infer",
    layer_to_clip: int = 0,
    prefix: str = "",
    postfix: str = "_resampled",
) -> str:
    """WARNING: INTERNAL. DO NOT USE."""
    raster_list, path_list = ready_io_raster(raster, out_path, overwrite,
                                             prefix, postfix)

    origin = open_raster(raster_list[0])
    out_name = path_list[0]
    raster_metadata = raster_to_metadata(origin, create_geometry=True)

    # options
    warp_options = []
    if all_touch:
        warp_options.append("CUTLINE_ALL_TOUCHED=TRUE")
    else:
        warp_options.append("CUTLINE_ALL_TOUCHED=FALSE")

    origin_projection: osr.SpatialReference = raster_metadata["projection_osr"]
    origin_extent: ogr.Geometry = raster_metadata["extent_geom_latlng"]

    target_projection = origin_projection
    if projection is not None:
        target_projection = parse_projection(projection)

    if clip_geom is not None:
        if is_raster(clip_geom):
            opened_raster = open_raster(clip_geom)
            clip_metadata_raster = raster_to_metadata(opened_raster,
                                                      create_geometry=True)
            clip_ds = clip_metadata_raster["extent_datasource"]
            clip_metadata = internal_vector_to_metadata(clip_ds,
                                                        create_geometry=True)
        elif is_vector(clip_geom):
            clip_ds = open_vector(clip_geom)
            clip_metadata = internal_vector_to_metadata(clip_ds,
                                                        create_geometry=True)
        else:
            if file_exists(clip_geom):
                raise ValueError(f"Unable to parse clip geometry: {clip_geom}")
            else:
                raise ValueError(f"Unable to find clip geometry {clip_geom}")

        if layer_to_clip > (clip_metadata["layer_count"] - 1):
            raise ValueError("Requested an unable layer_to_clip.")

        clip_projection = clip_metadata["projection_osr"]
        clip_extent = clip_metadata["extent_geom_latlng"]

        # Fast check: Does the extent of the two inputs overlap?
        if not origin_extent.Intersects(clip_extent):
            raise Exception("Clipping geometry did not intersect raster.")

        # Check if projections match, otherwise reproject target geom.
        if not target_projection.IsSame(clip_projection):
            clip_metadata["extent"] = reproject_extent(
                clip_metadata["extent"],
                clip_projection,
                target_projection,
            )

        # The extent needs to be reprojected to the target.
        # this ensures that adjust_bbox works.
        x_min_og, y_max_og, x_max_og, y_min_og = reproject_extent(
            raster_metadata["extent"],
            origin_projection,
            target_projection,
        )
        output_bounds = (x_min_og, y_min_og, x_max_og, y_max_og
                         )  # gdal_warp format

        if crop_to_geom:

            if adjust_bbox:
                output_bounds = align_bbox(
                    raster_metadata["extent"],
                    clip_metadata["extent"],
                    raster_metadata["pixel_width"],
                    raster_metadata["pixel_height"],
                    warp_format=True,
                )

            else:
                x_min_og, y_max_og, x_max_og, y_min_og = clip_metadata[
                    "extent"]
                output_bounds = (
                    x_min_og,
                    y_min_og,
                    x_max_og,
                    y_max_og,
                )  # gdal_warp format

        if clip_metadata["layer_count"] > 1:
            clip_ds = vector_to_memory(
                clip_ds,
                memory_path=f"clip_geom_{uuid4().int}.gpkg",
                layer_to_extract=layer_to_clip,
            )
        elif not isinstance(clip_ds, str):
            clip_ds = vector_to_memory(
                clip_ds,
                memory_path=f"clip_geom_{uuid4().int}.gpkg",
            )

        if clip_ds is None:
            raise ValueError(f"Unable to parse input clip geom: {clip_geom}")

    x_res, y_res, x_pixels, y_pixels = raster_size_from_list(
        target_size, target_in_pixels)

    out_format = path_to_driver_raster(out_name)
    out_creation_options = default_options(creation_options)

    # nodata
    out_nodata = None
    if src_nodata is not None:
        out_nodata = raster_metadata["nodata_value"]
    else:
        if dst_nodata == "infer":
            out_nodata = gdal_nodata_value_from_type(
                raster_metadata["datatype_gdal_raw"])
        else:
            out_nodata = dst_nodata

    # Removes file if it exists and overwrite is True.
    remove_if_overwrite(out_path, overwrite)

    warped = gdal.Warp(
        out_name,
        origin,
        xRes=x_res,
        yRes=y_res,
        width=x_pixels,
        height=y_pixels,
        cutlineDSName=clip_ds,
        outputBounds=output_bounds,
        format=out_format,
        srcSRS=origin_projection,
        dstSRS=target_projection,
        resampleAlg=translate_resample_method(resample_alg),
        creationOptions=out_creation_options,
        warpOptions=warp_options,
        srcNodata=src_nodata,
        dstNodata=out_nodata,
        targetAlignedPixels=False,
        cropToCutline=False,
        multithread=True,
    )

    if warped is None:
        raise Exception(f"Error while warping raster: {raster}")

    return out_name
Beispiel #11
0
def array_to_raster(
    array: Union[np.ndarray, np.ma.MaskedArray],
    reference: Union[str, gdal.Dataset],
    out_path: Optional[str] = None,
    overwrite: bool = True,
    creation_options: Union[list, None] = None,
) -> str:
    """Turns a numpy array into a GDAL dataset or exported
        as a raster using a reference raster.

    Args:
        array (np.ndarray): The numpy array to convert

        reference (path or Dataset): A reference on which to base
        the geographical extent and projection of the raster.

    **kwargs:
        out_path (path): The location to save the raster. If None is
        supplied an in memory raster is returned. filetype is infered
        from the extension.

        overwrite (bool): Specifies if the file already exists, should
        it be overwritten?

        creation_options (list): A list of options for the GDAL creation. Only
        used if an outpath is specified. Defaults are:
            "TILED=YES"
            "NUM_THREADS=ALL_CPUS"
            "BIGG_TIF=YES"
            "COMPRESS=LZW"

    Returns:
        If an out_path has been specified, it returns the path to the
        newly created raster file.
    """
    type_check(array, [np.ndarray, np.ma.MaskedArray], "array")
    type_check(reference, [str, gdal.Dataset], "reference")
    type_check(out_path, [str], "out_path", allow_none=True)
    type_check(overwrite, [bool], "overwrite")
    type_check(creation_options, [list], "creation_options", allow_none=True)

    # Verify the numpy array
    if (not isinstance(array, (np.ndarray, np.ma.MaskedArray))
            or array.size == 0 or array.ndim < 2 or array.ndim > 3):
        raise ValueError(f"Input array is invalid {array}")

    # Parse the driver
    driver_name = "GTiff" if out_path is None else path_to_driver_raster(
        out_path)
    if driver_name is None:
        raise ValueError(f"Unable to parse filetype from path: {out_path}")

    driver = gdal.GetDriverByName(driver_name)
    if driver is None:
        raise ValueError(
            f"Error while creating driver from extension: {out_path}")

    # How many bands?
    bands = 1
    if array.ndim == 3:
        bands = array.shape[2]

    overwrite_required(out_path, overwrite)

    metadata = raster_to_metadata(reference)
    reference_nodata = metadata["nodata_value"]

    # handle nodata. GDAL python throws error if conversion in not explicit.
    if reference_nodata is not None:
        reference_nodata = float(reference_nodata)
        if (reference_nodata).is_integer() is True:
            reference_nodata = int(reference_nodata)

    # Handle nodata
    input_nodata = None
    if np.ma.is_masked(array) is True:
        input_nodata = array.get_fill_value(
        )  # type: ignore (because it's a masked array.)

    destination_dtype = numpy_to_gdal_datatype(array.dtype)

    # Weird double issue with GDAL and numpy. Cast to float or int
    if input_nodata is not None:
        input_nodata = float(input_nodata)
        if (input_nodata).is_integer() is True:
            input_nodata = int(input_nodata)

    output_name = None
    if out_path is None:
        output_name = f"/vsimem/array_to_raster_{uuid4().int}.tif"
    else:
        output_name = out_path

    if metadata["width"] != array.shape[1] or metadata[
            "height"] != array.shape[0]:
        print("WARNING: Input array and raster are not of equal size.")

    remove_if_overwrite(out_path, overwrite)

    destination = driver.Create(
        output_name,
        array.shape[1],
        array.shape[0],
        bands,
        destination_dtype,
        default_options(creation_options),
    )

    destination.SetProjection(metadata["projection"])
    destination.SetGeoTransform(metadata["transform"])

    for band_idx in range(bands):
        band = destination.GetRasterBand(band_idx + 1)
        if bands > 1 or array.ndim == 3:
            band.WriteArray(array[:, :, band_idx])
        else:
            band.WriteArray(array)

        if input_nodata is not None:
            band.SetNoDataValue(input_nodata)
        elif reference_nodata is not None:
            band.SetNoDataValue(reference_nodata)

    return output_name
Beispiel #12
0
def stack_rasters(
    rasters: List[Union[str, gdal.Dataset]],
    out_path: Optional[str] = None,
    overwrite: bool = True,
    dtype: Optional[str] = None,
    creation_options: Union[list, None] = None,
) -> str:
    """Stacks a list of rasters. Must be aligned."""
    type_check(rasters, [list], "rasters")
    type_check(out_path, [str], "out_path", allow_none=True)
    type_check(overwrite, [bool], "overwrite")
    type_check(dtype, [str], "dtype", allow_none=True)
    type_check(creation_options, [list], "creation_options", allow_none=True)

    if not rasters_are_aligned(rasters, same_extent=True):
        raise ValueError("Rasters are not aligned. Try running align_rasters.")

    overwrite_required(out_path, overwrite)

    # Ensures that all the input rasters are valid.
    raster_list = get_raster_path(rasters, return_list=True)

    if out_path is not None and path_to_ext(out_path) == ".vrt":
        raise ValueError("Please use stack_rasters_vrt to create vrt files.")

    # Parse the driver
    driver_name = "GTiff" if out_path is None else path_to_driver_raster(
        out_path)
    if driver_name is None:
        raise ValueError(f"Unable to parse filetype from path: {out_path}")

    driver = gdal.GetDriverByName(driver_name)
    if driver is None:
        raise ValueError(
            f"Error while creating driver from extension: {out_path}")

    output_name = None
    if out_path is None:
        output_name = f"/vsimem/stacked_rasters_{uuid4().int}.tif"
    else:
        output_name = out_path

    raster_dtype = raster_to_metadata(raster_list[0])["datatype_gdal_raw"]

    datatype = translate_datatypes(
        dtype) if dtype is not None else raster_dtype

    nodata_values: List[Union[int, float, None]] = []
    nodata_missmatch = False
    nodata_value = None
    total_bands = 0
    metadatas = []
    for raster in raster_list:
        metadata = raster_to_metadata(raster)
        metadatas.append(metadata)

        nodata_value = metadata["nodata_value"]
        total_bands += metadata["band_count"]

        if nodata_missmatch is False:
            for ndv in nodata_values:
                if nodata_missmatch:
                    continue

                if metadata["nodata_value"] != ndv:
                    print(
                        "WARNING: NoDataValues of input rasters do not match. Removing nodata."
                    )
                    nodata_missmatch = True

        nodata_values.append(metadata["nodata_value"])

    if nodata_missmatch:
        nodata_value = None

    remove_if_overwrite(out_path, overwrite)

    destination = driver.Create(
        output_name,
        metadatas[0]["width"],
        metadatas[0]["height"],
        total_bands,
        datatype,
        default_options(creation_options),
    )

    destination.SetProjection(metadatas[0]["projection"])
    destination.SetGeoTransform(metadatas[0]["transform"])

    bands_added = 0
    for index, raster in enumerate(raster_list):
        metadata = metadatas[index]
        band_count = metadata["band_count"]

        array = raster_to_array(raster)

        for band_idx in range(band_count):
            dst_band = destination.GetRasterBand(bands_added + 1)
            dst_band.WriteArray(array[:, :, band_idx])

            if nodata_value is not None:
                dst_band.SetNoDataValue(nodata_value)

            bands_added += 1

    return output_name
Beispiel #13
0
def shift_raster(
    raster: Union[gdal.Dataset, str],
    shift: Union[Number, Tuple[Number, Number], List[Number]],
    out_path: Optional[str] = None,
    overwrite: bool = True,
    creation_options: list = [],
) -> Union[gdal.Dataset, str]:
    """Shifts a raster in a given direction.

    Returns:
        An in-memory raster. If an out_path is given the output is a string containing
        the path to the newly created raster.
    """
    type_check(raster, [list, str, gdal.Dataset], "raster")
    type_check(shift, [tuple, list], "shift")
    type_check(out_path, [list, str], "out_path", allow_none=True)
    type_check(overwrite, [bool], "overwrite")
    type_check(creation_options, [list], "creation_options")

    ref = open_raster(raster)
    metadata = raster_to_metadata(ref)

    x_shift: float = 0.0
    y_shift: float = 0.0
    if isinstance(shift, tuple) or isinstance(shift, list):
        if len(shift) == 1:
            if is_number(shift[0]):
                x_shift = float(shift[0])
                y_shift = float(shift[0])
            else:
                raise ValueError(
                    "shift is not a number or a list/tuple of numbers.")
        elif len(shift) == 2:
            if is_number(shift[0]) and is_number(shift[1]):
                x_shift = float(shift[0])
                y_shift = float(shift[1])
        else:
            raise ValueError("shift is either empty or larger than 2.")
    elif is_number(shift):
        x_shift = float(shift)
        y_shift = float(shift)
    else:
        raise ValueError("shift is invalid.")

    out_name = None
    out_format = None
    out_creation_options = []
    if out_path is None:
        raster_name = metadata["basename"]
        out_name = f"/vsimem/{raster_name}_{uuid4().int}_resampled.tif"
        out_format = "GTiff"
    else:
        out_creation_options = default_options(creation_options)
        out_name = out_path
        out_format = path_to_driver_raster(out_path)

    remove_if_overwrite(out_path, overwrite)

    driver = gdal.GetDriverByName(out_format)

    shifted = driver.Create(
        out_name,  # Location of the saved raster, ignored if driver is memory.
        metadata["width"],  # Dataframe width in pixels (e.g. 1920px).
        metadata["height"],  # Dataframe height in pixels (e.g. 1280px).
        metadata["band_count"],  # The number of bands required.
        metadata["datatype_gdal_raw"],  # Datatype of the destination.
        out_creation_options,
    )

    new_transform = list(metadata["transform"])
    new_transform[0] += x_shift
    new_transform[3] += y_shift

    shifted.SetGeoTransform(new_transform)
    shifted.SetProjection(metadata["projection"])

    src_nodata = metadata["nodata_value"]

    for band in range(metadata["band_count"]):
        origin_raster_band = ref.GetRasterBand(band + 1)
        target_raster_band = shifted.GetRasterBand(band + 1)

        target_raster_band.WriteArray(origin_raster_band.ReadAsArray())
        target_raster_band.SetNoDataValue(src_nodata)

    if out_path is not None:
        shifted = None
        return out_path
    else:
        return shifted
Beispiel #14
0
def align_rasters(
    rasters: List[Union[str, gdal.Dataset]],
    out_path: Optional[Union[List[str], str]] = None,
    master: Optional[Union[gdal.Dataset, str]] = None,
    postfix: str = "_aligned",
    bounding_box: Union[str, gdal.Dataset, ogr.DataSource, list,
                        tuple] = "intersection",
    resample_alg: str = "nearest",
    target_size: Optional[Union[tuple, list, int, float, str,
                                gdal.Dataset]] = None,
    target_in_pixels: bool = False,
    projection: Optional[Union[int, str, gdal.Dataset, ogr.DataSource,
                               osr.SpatialReference]] = None,
    overwrite: bool = True,
    creation_options: list = [],
    src_nodata: Optional[Union[str, int, float]] = "infer",
    dst_nodata: Optional[Union[str, int, float]] = "infer",
    prefix: str = "",
    ram=8000,
    skip_existing=False,
) -> List[str]:
    type_check(rasters, [list], "rasters")
    type_check(out_path, [list, str], "out_path", allow_none=True)
    type_check(master, [list, str], "master", allow_none=True)
    type_check(bounding_box, [str, gdal.Dataset, ogr.DataSource, list, tuple],
               "bounding_box")
    type_check(resample_alg, [str], "resample_alg")
    type_check(
        target_size,
        [tuple, list, int, float, str, gdal.Dataset],
        "target_size",
        allow_none=True,
    )
    type_check(
        target_in_pixels,
        [int, str, gdal.Dataset, ogr.DataSource, osr.SpatialReference],
        "target_in_pixels",
        allow_none=True,
    )
    type_check(overwrite, [bool], "overwrite")
    type_check(creation_options, [list], "creation_options")
    type_check(src_nodata, [str, int, float], "src_nodata", allow_none=True)
    type_check(dst_nodata, [str, int, float], "dst_nodata", allow_none=True)
    type_check(prefix, [str], "prefix")
    type_check(postfix, [str], "postfix")

    raster_list, path_list = ready_io_raster(
        rasters,
        out_path,
        overwrite=overwrite,
        prefix=prefix,
        postfix=postfix,
        uuid=False,
    )

    x_pixels = None
    y_pixels = None
    x_res = None
    y_res = None
    target_projection = None
    target_bounds = None

    reprojected_rasters: List[str] = []

    # Read the metadata for each raster.
    # Catalogue the used projections, to choose the most common one if necessary.
    used_projections: List[dict] = []
    metadata: List[str] = []

    for raster in rasters:
        meta = raster_to_metadata(raster)
        metadata.append(meta)
        used_projections.append(meta["projection"])

    # If there is a master layer, copy information from that layer.
    if master is not None:
        master_metadata = raster_to_metadata(master)

        target_projection = master_metadata["projection_osr"]
        x_min, y_max, x_max, y_min = master_metadata["extent"]

        # Set the target values.
        target_bounds = (x_min, y_min, x_max, y_max)
        x_res = master_metadata["pixel_width"]
        y_res = master_metadata["pixel_height"]
        x_pixels = master_metadata["width"]
        y_pixels = master_metadata["height"]
        target_size = (x_res, y_res)

        target_in_pixels = False

    # We allow overwrite of parameters specifically set.
    # Handle projection
    if projection is not None:
        target_projection = parse_projection(projection)

    # If no projection is specified, other from master or parameters. The most common one is chosen.
    elif target_projection is None:

        # Sort and count the projections
        projection_counter: dict = {}
        for proj in used_projections:
            if proj in projection_counter:
                projection_counter[proj] += 1
            else:
                projection_counter[proj] = 1

        # Choose most common projection
        most_common_projection = sorted(projection_counter,
                                        key=projection_counter.get,
                                        reverse=True)

        target_projection = parse_projection(most_common_projection[0])

    if target_size is not None:

        # If a raster is input, use it's pixel size as target values.
        if isinstance(target_size, (gdal.Dataset, str)):
            if isinstance(target_size, str) and not is_raster(target_size):
                raise ValueError(
                    f"Unable to parse the raster used for target_size: {target_size}"
                )

            # Reprojection is necessary to ensure the correct pixel_size
            reprojected_target_size = internal_reproject_raster(
                target_size, target_projection)
            target_size_raster = raster_to_metadata(reprojected_target_size)

            # Set the target values.
            x_res = target_size_raster["width"]
            y_res = target_size_raster["height"]
        else:
            # If a list, tuple, int or float is passed. Turn them into target values.
            x_res, y_res, x_pixels, y_pixels = raster_size_from_list(
                target_size, target_in_pixels)

    # If nothing has been specified, we will infer the pixel_size based on the median of all input rasters.
    elif x_res is None and y_res is None and x_pixels is None and y_pixels is None:

        # Ready numpy arrays for insertion
        x_res_arr = np.empty(len(raster_list), dtype="float32")
        y_res_arr = np.empty(len(raster_list), dtype="float32")

        for index, raster in enumerate(raster_list):
            # It is necessary to reproject each raster, as pixel height and width might be different after projection.
            reprojected = internal_reproject_raster(raster, target_projection)
            target_size_raster = raster_to_metadata(reprojected)

            # Add the pixel sizes to the numpy arrays
            x_res_arr[index] = target_size_raster["pixel_width"]
            y_res_arr[index] = target_size_raster["pixel_height"]

            # Keep track of the reprojected arrays so we only reproject rasters once.
            reprojected_rasters.append(reprojected)

        # Use the median values of pixel sizes as target values.
        x_res = np.median(x_res_arr)
        y_res = np.median(y_res_arr)

    if target_bounds is None:

        # If a bounding box is supplied, simply use that one. It must be in the target projection.
        if isinstance(bounding_box, (list, tuple)):
            if len(bounding_box) != 4:
                raise ValueError(
                    "bounding_box as a list/tuple must have 4 values.")
            target_bounds = bounding_box

        # If the bounding box is a raster. Take the extent and reproject it to the target projection.
        elif is_raster(bounding_box):
            reprojected_bbox_raster = raster_to_metadata(
                internal_reproject_raster(bounding_box, target_projection))

            x_min, y_max, x_max, y_min = reprojected_bbox_raster["extent"]

            # add to target values.
            target_bounds = (x_min, y_min, x_max, y_max)

        # If the bounding box is a raster. Take the extent and reproject it to the target projection.
        elif is_vector(bounding_box):
            reprojected_bbox_vector = internal_vector_to_metadata(
                internal_reproject_vector(bounding_box, target_projection))

            x_min, y_max, x_max, y_min = reprojected_bbox_vector["extent"]

            # add to target values.
            target_bounds = (x_min, y_min, x_max, y_max)

        # If the bounding box is a string, we either take the union or the intersection of all the
        # bounding boxes of the input rasters.
        elif isinstance(bounding_box, str):
            if bounding_box == "intersection" or bounding_box == "union":
                extents = []

                # If the rasters have not been reprojected, reproject them now.
                if len(reprojected_rasters) != len(raster_list):
                    reprojected_rasters = []

                    for raster in raster_list:
                        raster_metadata = raster_to_metadata(raster)

                        if raster_metadata["projection_osr"].IsSame(
                                target_projection):
                            reprojected_rasters.append(raster)
                        else:
                            reprojected = internal_reproject_raster(
                                raster, target_projection)
                            reprojected_rasters.append(reprojected)

                # Add the extents of the reprojected rasters to the extents list.
                for reprojected_raster in reprojected_rasters:
                    reprojected_raster_metadata = dict(
                        raster_to_metadata(reprojected_raster))
                    extents.append(reprojected_raster_metadata["extent"])

                # Placeholder values
                x_min, y_max, x_max, y_min = extents[0]

                # Loop the extents. Narrowing if intersection, expanding if union.
                for index, extent in enumerate(extents):
                    if index == 0:
                        continue

                    if bounding_box == "intersection":
                        if extent[0] > x_min:
                            x_min = extent[0]
                        if extent[1] < y_max:
                            y_max = extent[1]
                        if extent[2] < x_max:
                            x_max = extent[2]
                        if extent[3] > y_min:
                            y_min = extent[3]

                    elif bounding_box == "union":
                        if extent[0] < x_min:
                            x_min = extent[0]
                        if extent[1] > y_max:
                            y_max = extent[1]
                        if extent[2] > x_max:
                            x_max = extent[2]
                        if extent[3] < y_min:
                            y_min = extent[3]

                # Add to target values.
                target_bounds = (x_min, y_min, x_max, y_max)

            else:
                raise ValueError(
                    f"Unable to parse or infer target_bounds: {target_bounds}")
        else:
            raise ValueError(
                f"Unable to parse or infer target_bounds: {target_bounds}")
    """ 
        If the rasters have not been reprojected, we reproject them now.
        The reprojection is necessary as warp has to be a two step process
        in order to align the rasters properly. This might not be necessary
        in a future version of gdal.
    """
    if len(reprojected_rasters) != len(raster_list):
        reprojected_rasters = []

        for raster in raster_list:
            raster_metadata = raster_to_metadata(raster)

            # If the raster is already the correct projection, simply append the raster.
            if raster_metadata["projection_osr"].IsSame(target_projection):
                reprojected_rasters.append(raster)
            else:
                reprojected = internal_reproject_raster(
                    raster, target_projection)
                reprojected_rasters.append(reprojected)

    # If any of the target values are still undefined. Throw an error!
    if target_projection is None or target_bounds is None:
        raise Exception(
            "Error while preparing the target projection or bounds.")

    if x_res is None and y_res is None and x_pixels is None and y_pixels is None:
        raise Exception("Error while preparing the target pixel size.")

    # This is the list of rasters to return. If output is not memory, it's a list of paths.
    return_list: List[str] = []
    for index, raster in enumerate(reprojected_rasters):
        raster_metadata = raster_to_metadata(raster)

        out_name = path_list[index]
        out_format = path_to_driver_raster(out_name)

        if skip_existing and os.path.exists(out_name):
            return_list.append(out_name)
            continue

        # Handle nodata.
        out_src_nodata = None
        out_dst_nodata = None
        if src_nodata == "infer":
            out_src_nodata = raster_metadata["nodata_value"]

            if out_src_nodata is None:
                out_src_nodata = gdal_nodata_value_from_type(
                    raster_metadata["datatype_gdal_raw"])

        elif src_nodata == None:
            out_src_nodata = None
        elif not isinstance(src_nodata, str):
            out_src_nodata = src_nodata

        if dst_nodata == "infer":
            out_dst_nodata = out_src_nodata
        elif dst_nodata == False or dst_nodata == None:
            out_dst_nodata = None
        elif src_nodata == None:
            out_dst_nodata = None
        elif not isinstance(dst_nodata, str):
            out_dst_nodata = dst_nodata

        # Removes file if it exists and overwrite is True.
        remove_if_overwrite(out_name, overwrite)

        # Hand over to gdal.Warp to do the heavy lifting!
        warped = gdal.Warp(
            out_name,
            raster,
            xRes=x_res,
            yRes=y_res,
            width=x_pixels,
            height=y_pixels,
            dstSRS=target_projection,
            outputBounds=target_bounds,
            format=out_format,
            resampleAlg=translate_resample_method(resample_alg),
            creationOptions=default_options(creation_options),
            srcNodata=out_src_nodata,
            dstNodata=out_dst_nodata,
            targetAlignedPixels=False,
            cropToCutline=False,
            multithread=True,
            warpMemoryLimit=ram,
        )

        if warped == None:
            raise Exception("Error while warping rasters.")

        return_list.append(out_name)

    if not rasters_are_aligned(return_list, same_extent=True):
        raise Exception("Error while aligning rasters. Output is not aligned")

    return return_list
Beispiel #15
0
def internal_multipart_to_singlepart(
    vector: Union[str, ogr.DataSource],
    out_path: Optional[str] = None,
    copy_attributes: bool = False,
    overwrite: bool = True,
    add_index: bool = True,
    process_layer: int = -1,
    verbose: int = 1,
) -> str:
    type_check(vector, [str, ogr.DataSource], "vector")
    type_check(out_path, [str], "out_path", allow_none=True)
    type_check(overwrite, [bool], "overwrite")
    type_check(add_index, [bool], "add_index")
    type_check(process_layer, [int], "process_layer")
    type_check(verbose, [int], "verbose")

    vector_list, path_list = ready_io_vector(vector, out_path, overwrite=overwrite)

    ref = open_vector(vector_list[0])
    out_name = path_list[0]

    driver = ogr.GetDriverByName(path_to_driver_vector(out_name))

    metadata = internal_vector_to_metadata(ref)

    remove_if_overwrite(out_name, overwrite)

    destination = driver.CreateDataSource(out_name)

    for index, layer_meta in enumerate(metadata["layers"]):
        if process_layer != -1 and index != process_layer:
            continue

        if verbose == 1:
            layer_name = layer_meta["layer_name"]
            print(f"Splitting layer: {layer_name}")

        target_unknown = False

        if layer_meta["geom_type_ogr"] == 4:  # MultiPoint
            target_type = 1  # Point
        elif layer_meta["geom_type_ogr"] == 5:  # MultiLineString
            target_type = 2  # LineString
        elif layer_meta["geom_type_ogr"] == 6:  # MultiPolygon
            target_type = 3  # Polygon
        elif layer_meta["geom_type_ogr"] == 1004:  # MultiPoint (z)
            target_type = 1001  # Point (z)
        elif layer_meta["geom_type_ogr"] == 1005:  # MultiLineString (z)
            target_type = 1002  # LineString (z)
        elif layer_meta["geom_type_ogr"] == 1006:  # MultiPolygon (z)
            target_type = 1003  # Polygon (z)
        elif layer_meta["geom_type_ogr"] == 2004:  # MultiPoint (m)
            target_type = 2001  # Point (m)
        elif layer_meta["geom_type_ogr"] == 2005:  # MultiLineString (m)
            target_type = 2002  # LineString (m)
        elif layer_meta["geom_type_ogr"] == 2006:  # MultiPolygon (m)
            target_type = 2003  # Polygon (m)
        elif layer_meta["geom_type_ogr"] == 3004:  # MultiPoint (zm)
            target_type = 3001  # Point (m)
        elif layer_meta["geom_type_ogr"] == 3005:  # MultiLineString (zm)
            target_type = 3002  # LineString (m)
        elif layer_meta["geom_type_ogr"] == 3006:  # MultiPolygon (zm)
            target_type = 3003  # Polygon (m)
        else:
            target_unknown = True
            target_type = layer_meta["geom_type_ogr"]

        destination_layer = destination.CreateLayer(
            layer_meta["layer_name"], layer_meta["projection_osr"], target_type
        )
        layer_defn = destination_layer.GetLayerDefn()
        field_count = layer_meta["field_count"]

        original_target = ref.GetLayerByIndex(index)
        feature_count = original_target.GetFeatureCount()

        if copy_attributes:
            first_feature = original_target.GetNextFeature()
            original_target.ResetReading()

            if verbose == 1:
                print("Creating attribute fields")

            for field_id in range(field_count):
                field_defn = first_feature.GetFieldDefnRef(field_id)

                fname = field_defn.GetName()
                ftype = field_defn.GetTypeName()
                fwidth = field_defn.GetWidth()
                fprecision = field_defn.GetPrecision()

                if ftype == "String" or ftype == "Date":
                    fielddefn = ogr.FieldDefn(fname, ogr.OFTString)
                    fielddefn.SetWidth(fwidth)
                elif ftype == "Real":
                    fielddefn = ogr.FieldDefn(fname, ogr.OFTReal)
                    fielddefn.SetWidth(fwidth)
                    fielddefn.SetPrecision(fprecision)
                else:
                    fielddefn = ogr.FieldDefn(fname, ogr.OFTInteger)

                destination_layer.CreateField(fielddefn)

        for _ in range(feature_count):
            feature = original_target.GetNextFeature()
            geom = feature.GetGeometryRef()

            if target_unknown:
                out_feat = ogr.Feature(layer_defn)
                out_feat.SetGeometry(geom)

                if copy_attributes:
                    for field_id in range(field_count):
                        values = feature.GetField(field_id)
                        out_feat.SetField(field_id, values)

                destination_layer.CreateFeature(out_feat)

            for geom_part in geom:
                out_feat = ogr.Feature(layer_defn)
                out_feat.SetGeometry(geom_part)

                if copy_attributes:
                    for field_id in range(field_count):
                        values = feature.GetField(field_id)
                        out_feat.SetField(field_id, values)

                destination_layer.CreateFeature(out_feat)

            if verbose == 1:
                progress(_, feature_count - 1, "Splitting.")

    if add_index:
        vector_add_index(destination)

    return out_name
Beispiel #16
0
def internal_resample_raster(
    raster: Union[str, gdal.Dataset],
    target_size: Union[tuple, int, float, str, gdal.Dataset],
    target_in_pixels: bool = False,
    out_path: Optional[str] = None,
    resample_alg: str = "nearest",
    overwrite: bool = True,
    creation_options: list = [],
    dtype=None,
    dst_nodata: Union[str, int, float] = "infer",
    prefix: str = "",
    postfix: str = "_resampled",
    add_uuid: bool = False,
) -> str:
    """OBS: Internal. Single output.

    Reprojects a raster given a target projection. Beware if your input is in
    latitude and longitude, you'll need to specify the target_size in degrees as well.
    """
    type_check(raster, [str, gdal.Dataset], "raster")
    type_check(target_size, [tuple, int, float, str, gdal.Dataset],
               "target_size")
    type_check(target_in_pixels, [bool], "target_in_pixels")
    type_check(out_path, [list, str], "out_path", allow_none=True)
    type_check(resample_alg, [str], "resample_alg")
    type_check(overwrite, [bool], "overwrite")
    type_check(creation_options, [list], "creation_options")
    type_check(dst_nodata, [str, int, float], "dst_nodata")
    type_check(prefix, [str], "prefix")
    type_check(postfix, [str], "postfix")

    raster_list, path_list = ready_io_raster(
        raster,
        out_path,
        overwrite=overwrite,
        prefix=prefix,
        postfix=postfix,
        uuid=add_uuid,
    )

    ref = open_raster(raster_list[0])
    metadata = raster_to_metadata(ref)
    out_name = path_list[0]

    x_res, y_res, x_pixels, y_pixels = raster_size_from_list(
        target_size, target_in_pixels)

    out_creation_options = default_options(creation_options)
    out_format = path_to_driver_raster(out_name)

    src_nodata = metadata["nodata_value"]
    out_nodata = None
    if src_nodata is not None:
        out_nodata = src_nodata
    else:
        if dst_nodata == "infer":
            out_nodata = gdal_nodata_value_from_type(
                metadata["datatype_gdal_raw"])
        elif isinstance(dst_nodata, str):
            raise TypeError(f"dst_nodata is in a wrong format: {dst_nodata}")
        else:
            out_nodata = dst_nodata

    remove_if_overwrite(out_path, overwrite)

    resampled = gdal.Warp(
        out_name,
        ref,
        width=x_pixels,
        height=y_pixels,
        xRes=x_res,
        yRes=y_res,
        format=out_format,
        outputType=translate_datatypes(dtype),
        resampleAlg=translate_resample_method(resample_alg),
        creationOptions=out_creation_options,
        srcNodata=metadata["nodata_value"],
        dstNodata=out_nodata,
        multithread=True,
    )

    if resampled is None:
        raise Exception(f"Error while resampling raster: {out_name}")

    return out_name
Beispiel #17
0
def add_border_to_raster(
    input_raster,
    out_path=None,
    border_size=100,
    border_size_unit="px",
    border_value=0,
    overwrite: bool = True,
    creation_options: list = [],
):
    in_raster = open_raster(input_raster)
    metadata = raster_to_metadata(in_raster)

    # Parse the driver
    driver_name = "GTiff" if out_path is None else path_to_driver_raster(
        out_path)
    if driver_name is None:
        raise ValueError(f"Unable to parse filetype from path: {out_path}")

    driver = gdal.GetDriverByName(driver_name)
    if driver is None:
        raise ValueError(
            f"Error while creating driver from extension: {out_path}")

    output_name = None
    if out_path is None:
        output_name = f"/vsimem/raster_proximity_{uuid4().int}.tif"
    else:
        output_name = out_path

    in_arr = raster_to_array(in_raster)

    if border_size_unit == "px":
        border_size_y = border_size
        border_size_x = border_size
        new_shape = (
            in_arr.shape[0] + (2 * border_size_y),
            in_arr.shape[1] + (2 * border_size_x),
            in_arr.shape[2],
        )
    else:
        border_size_y = round(border_size / metadata["pixel_height"])
        border_size_x = round(border_size / metadata["pixel_width"])
        new_shape = (
            in_arr.shape[0] + (2 * border_size_y),
            in_arr.shape[1] + (2 * border_size_x),
            in_arr.shape[2],
        )

    new_arr = np.full(new_shape, border_value, dtype=in_arr.dtype)
    new_arr[border_size_y:-border_size_y,
            border_size_x:-border_size_x, :] = in_arr

    if isinstance(in_arr, np.ma.MaskedArray):
        mask = np.zeros(new_shape, dtype=bool)
        mask[border_size_y:-border_size_y,
             border_size_x:-border_size_x, :] = in_arr.mask
        new_arr = np.ma.array(new_arr, mask=mask)
        new_arr.fill_value = in_arr.fill_value

    remove_if_overwrite(out_path, overwrite)

    dest_raster = driver.Create(
        output_name,
        new_shape[1],
        new_shape[0],
        metadata["band_count"],
        numpy_to_gdal_datatype(in_arr.dtype),
        default_options(creation_options),
    )

    og_transform = in_raster.GetGeoTransform()

    new_transform = []
    for i in og_transform:
        new_transform.append(i)

    new_transform[0] -= border_size_x * og_transform[1]
    new_transform[3] -= border_size_y * og_transform[5]

    dest_raster.SetGeoTransform(new_transform)
    dest_raster.SetProjection(in_raster.GetProjectionRef())

    for band_num in range(1, metadata["band_count"] + 1):
        dst_band = dest_raster.GetRasterBand(band_num)
        dst_band.WriteArray(new_arr[:, :, band_num - 1])

        if metadata["has_nodata"]:
            dst_band.SetNoDataValue(metadata["nodata_value"])

    return output_name
Beispiel #18
0
def _clip_raster(
    raster: Union[str, gdal.Dataset],
    clip_geom: Union[str, ogr.DataSource, gdal.Dataset],
    out_path: Optional[str] = None,
    resample_alg: str = "nearest",
    crop_to_geom: bool = True,
    adjust_bbox: bool = True,
    all_touch: bool = True,
    overwrite: bool = True,
    creation_options: list = [],
    dst_nodata: Union[str, int, float] = "infer",
    layer_to_clip: int = 0,
    prefix: str = "",
    postfix: str = "_clipped",
    verbose: int = 1,
    uuid: bool = False,
    ram: int = 8000,
) -> str:
    """OBS: Internal. Single output.

    Clips a raster(s) using a vector geometry or the extents of
    a raster.
    """
    type_check(raster, [str, gdal.Dataset], "raster")
    type_check(clip_geom, [str, ogr.DataSource, gdal.Dataset], "clip_geom")
    type_check(out_path, [str], "out_path", allow_none=True)
    type_check(resample_alg, [str], "resample_alg")
    type_check(crop_to_geom, [bool], "crop_to_geom")
    type_check(adjust_bbox, [bool], "adjust_bbox")
    type_check(all_touch, [bool], "all_touch")
    type_check(dst_nodata, [str, int, float], "dst_nodata")
    type_check(layer_to_clip, [int], "layer_to_clip")
    type_check(overwrite, [bool], "overwrite")
    type_check(creation_options, [list], "creation_options")
    type_check(prefix, [str], "prefix")
    type_check(postfix, [str], "postfix")
    type_check(verbose, [int], "verbose")
    type_check(uuid, [bool], "uuid")

    _, path_list = ready_io_raster(raster,
                                   out_path,
                                   overwrite=overwrite,
                                   prefix=prefix,
                                   postfix=postfix,
                                   uuid=uuid)

    if out_path is not None:
        if "vsimem" not in out_path:
            if not os.path.isdir(os.path.split(os.path.normpath(out_path))[0]):
                raise ValueError(
                    f"out_path folder does not exists: {out_path}")

    # Input is a vector.
    if is_vector(clip_geom):
        clip_ds = open_vector(clip_geom)

        clip_metadata = internal_vector_to_metadata(
            clip_ds, process_layer=layer_to_clip)

        if clip_metadata["layer_count"] > 1:
            clip_ds = internal_vector_to_memory(clip_ds,
                                                layer_to_extract=layer_to_clip)

        if isinstance(clip_ds, ogr.DataSource):
            clip_ds = clip_ds.GetName()

    # Input is a raster (use extent)
    elif is_raster(clip_geom):
        clip_metadata = raster_to_metadata(clip_geom, create_geometry=True)
        clip_metadata["layer_count"] = 1
        clip_ds = clip_metadata["extent_datasource"].GetName()
    else:
        if file_exists(clip_geom):
            raise ValueError(f"Unable to parse clip geometry: {clip_geom}")
        else:
            raise ValueError(f"Unable to locate clip geometry {clip_geom}")

    if layer_to_clip > (clip_metadata["layer_count"] - 1):
        raise ValueError("Requested an unable layer_to_clip.")

    if clip_ds is None:
        raise ValueError(f"Unable to parse input clip geom: {clip_geom}")

    clip_projection = clip_metadata["projection_osr"]
    clip_extent = clip_metadata["extent"]

    # options
    warp_options = []
    if all_touch:
        warp_options.append("CUTLINE_ALL_TOUCHED=TRUE")
    else:
        warp_options.append("CUTLINE_ALL_TOUCHED=FALSE")

    origin_layer = open_raster(raster)

    raster_metadata = raster_to_metadata(raster)
    origin_projection = raster_metadata["projection_osr"]
    origin_extent = raster_metadata["extent"]

    # Check if projections match, otherwise reproject target geom.
    if not origin_projection.IsSame(clip_projection):
        clip_metadata["extent"] = reproject_extent(
            clip_metadata["extent"],
            clip_projection,
            origin_projection,
        )

    # Fast check: Does the extent of the two inputs overlap?
    if not gdal_bbox_intersects(origin_extent, clip_extent):
        raise Exception("Geometries did not intersect.")

    output_bounds = raster_metadata["extent_gdal_warp"]

    if crop_to_geom:

        if adjust_bbox:
            output_bounds = align_bbox(
                raster_metadata["extent"],
                clip_metadata["extent"],
                raster_metadata["pixel_width"],
                raster_metadata["pixel_height"],
                warp_format=True,
            )

        else:
            output_bounds = clip_metadata["extent_gdal_warp"]

    # formats
    out_name = path_list[0]
    out_format = path_to_driver_raster(out_name)
    out_creation_options = default_options(creation_options)

    # nodata
    src_nodata = raster_metadata["nodata_value"]
    out_nodata = None
    if src_nodata is not None:
        out_nodata = src_nodata
    else:
        if dst_nodata == "infer":
            out_nodata = gdal_nodata_value_from_type(
                raster_metadata["datatype_gdal_raw"])
        elif dst_nodata is None:
            out_nodata = None
        elif isinstance(dst_nodata, (int, float)):
            out_nodata = dst_nodata
        else:
            raise ValueError(f"Unable to parse nodata_value: {dst_nodata}")

    # Removes file if it exists and overwrite is True.
    remove_if_overwrite(out_path, overwrite)

    if verbose == 0:
        gdal.PushErrorHandler("CPLQuietErrorHandler")

    clipped = gdal.Warp(
        out_name,
        origin_layer,
        format=out_format,
        resampleAlg=translate_resample_method(resample_alg),
        targetAlignedPixels=False,
        outputBounds=output_bounds,
        xRes=raster_metadata["pixel_width"],
        yRes=raster_metadata["pixel_height"],
        cutlineDSName=clip_ds,
        cropToCutline=
        False,  # GDAL does this incorrectly when targetAlignedPixels is True.
        creationOptions=out_creation_options,
        warpMemoryLimit=ram,
        warpOptions=warp_options,
        srcNodata=raster_metadata["nodata_value"],
        dstNodata=out_nodata,
        multithread=True,
    )

    if verbose == 0:
        gdal.PopErrorHandler()

    if clipped is None:
        raise Exception("Error while clipping raster.")

    return out_name